repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
iModels/foyer | foyer/tests/utils.py | 1 | 3192 | import glob
from os.path import join, split, abspath
import urllib.parse as parseurl
import numpy as np
def atomtype(structure, forcefield, **kwargs):
"""Compare known atomtypes to those generated by foyer.
Parameters
----------
structure : parmed.Structure
A parmed structure with `atom.type` attributes.
forcefield : foyer.Forcefield
A forcefield to use for atomtyping.
Raises
------
AssertionError
"""
known_types = [atom.type for atom in structure.atoms]
typed_structure = forcefield.apply(structure, **kwargs)
generated_atom_types = list()
for i, atom in enumerate(typed_structure.atoms):
message = ('Found multiple or no atom types for atom {} in {}: {}\n'
'Should be atomtype: {}'.format(
i, structure.title, atom.type, known_types[i]))
assert atom.type, message
generated_atom_types.append(atom.type)
both = zip(generated_atom_types, known_types)
n_types = np.array(range(len(generated_atom_types)))
known_types = np.array(known_types)
generated_atom_types = np.array(generated_atom_types)
non_matches = np.array([a != b for a, b in both])
message = "Found inconsistent atom types in {}: {}".format(
structure.title,
list(zip(n_types[non_matches],
generated_atom_types[non_matches],
known_types[non_matches])))
assert not non_matches.any(), message
def get_fn(filename):
"""Gets the full path of the file name for a particular test file.
Parameters
----------
filename : str
Name of the file to get
Returns
-------
path: str
Name of the test file with the full path location
"""
return join(split(abspath(__file__))[0], 'files', filename)
def glob_fn(pattern):
"""Gets the full paths for test files adhering to the glob pattern.
Parameters
----------
pattern : str
the pattern for the files(expanded using globbing)
Returns
-------
list of file absolute paths matching the pattern.
"""
return glob.glob(join(split(abspath(__file__))[0], 'files', pattern))
def register_mock_request(mocker,
url='http://api.crossref.org/',
http_verb='GET',
text='',
path=None,
headers=None,
status_code=200):
"""Registers the mocker for the given uri.
Parameters
----------
mocker : request_mock's mocker object
url: url to register the mocker for
http_verb: One of the many http verbs, default GET
text: the fake text response, default ''
path: (str) path of the resource that forms the uri, default None
headers: (dict) the http headers to match (optional), default None
status_code: (int), the status code of the response, default 200
"""
uri = url
if headers is None:
headers = {}
if path is not None:
uri = parseurl.urljoin(url, path, allow_fragments=False)
mocker.register_uri(http_verb, uri, headers=headers, text=text, status_code=status_code)
| mit | -5,376,610,217,032,805,000 | 29.692308 | 92 | 0.605263 | false |
carsongee/edx-platform | lms/djangoapps/shoppingcart/urls.py | 12 | 1078 | from django.conf.urls import patterns, url
from django.conf import settings
urlpatterns = patterns('shoppingcart.views', # nopep8
url(r'^postpay_callback/$', 'postpay_callback'), # Both the ~accept and ~reject callback pages are handled here
url(r'^receipt/(?P<ordernum>[0-9]*)/$', 'show_receipt'),
url(r'^csv_report/$', 'csv_report', name='payment_csv_report'),
)
if settings.FEATURES['ENABLE_SHOPPING_CART']:
urlpatterns += patterns(
'shoppingcart.views',
url(r'^$', 'show_cart'),
url(r'^clear/$', 'clear_cart'),
url(r'^remove_item/$', 'remove_item'),
url(r'^add/course/{}/$'.format(settings.COURSE_ID_PATTERN), 'add_course_to_cart', name='add_course_to_cart'),
url(r'^use_code/$', 'use_code'),
url(r'^register_courses/$', 'register_courses'),
)
if settings.FEATURES.get('ENABLE_PAYMENT_FAKE'):
from shoppingcart.tests.payment_fake import PaymentFakeView
urlpatterns += patterns(
'shoppingcart.tests.payment_fake',
url(r'^payment_fake', PaymentFakeView.as_view()),
)
| agpl-3.0 | 2,950,699,554,577,574,400 | 40.461538 | 117 | 0.641002 | false |
knossos-project/PythonQt | examples/NicePyConsole/module_completion.py | 3 | 4932 | # -*- coding: utf-8 -*-
""" provide completions for modules 'import',
copied code with some modifications from IPython!"""
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
from __future__ import print_function
# Stdlib imports
import inspect
import os
import re
import sys
from time import time
from zipimport import zipimporter
# -----------------------------------------------------------------------------
# Globals and constants
# -----------------------------------------------------------------------------
# Time in seconds after which the rootmodules will be stored in (nonpermanent) cache
TIMEOUT_STORAGE = 2
# Time in seconds after which we give up
TIMEOUT_GIVEUP = 20
# Regular expression for the python import statement
import_re = re.compile(r'.*(\.so|\.py[cod]?)$')
# global cache
cache = dict()
def module_list(path):
"""
Return the list containing the names of the modules available in the given
folder.
"""
# sys.path has the cwd as an empty string, but isdir/listdir need it as '.'
if path == '':
path = '.'
if os.path.isdir(path):
folder_list = os.listdir(path)
elif path.endswith('.egg'):
try:
folder_list = [f for f in zipimporter(path)._files]
except:
folder_list = []
else:
folder_list = []
if not folder_list:
return []
# A few local constants to be used in loops below
isfile = os.path.isfile
pjoin = os.path.join
basename = os.path.basename
def is_importable_file(path):
"""Returns True if the provided path is a valid importable module"""
name, extension = os.path.splitext(path)
return import_re.match(path) # and py3compat.isidentifier(name)
# Now find actual path matches for packages or modules
folder_list = [p for p in folder_list
if isfile(pjoin(path, p, '__init__.py'))
or is_importable_file(p)]
return [basename(p).split('.')[0] for p in folder_list]
def get_root_modules():
"""
Returns a list containing the names of all the modules available in the
folders of the pythonpath.
"""
global cache
if 'rootmodules' in cache:
return cache['rootmodules']
t = time()
store = False
modules = list(sys.builtin_module_names)
for path in sys.path:
modules += module_list(path)
if time() - t >= TIMEOUT_STORAGE and not store:
store = True
print("\nCaching the list of root modules, please wait!")
print("(This will only be done once.)\n")
sys.stdout.flush()
if time() - t > TIMEOUT_GIVEUP:
print("This is taking too long, we give up.\n")
cache['rootmodules'] = []
return []
modules = set(modules)
if '__init__' in modules:
modules.remove('__init__')
modules = list(modules)
if store:
cache['rootmodules'] = modules
return modules
def is_importable(module, attr, only_modules):
if only_modules:
return inspect.ismodule(getattr(module, attr))
else:
return not(attr[:2] == '__' and attr[-2:] == '__')
def try_import(mod, only_modules=False):
try:
m = __import__(mod)
except:
return []
mods = mod.split('.')
for module in mods[1:]:
m = getattr(m, module)
m_is_init = hasattr(m, '__file__') and '__init__' in m.__file__
completions = []
if (not hasattr(m, '__file__')) or (not only_modules) or m_is_init:
completions.extend([attr for attr in dir(m) if
is_importable(m, attr, only_modules)])
completions.extend(getattr(m, '__all__', []))
if m_is_init:
completions.extend(module_list(os.path.dirname(m.__file__)))
completions = set(completions)
if '__init__' in completions:
completions.remove('__init__')
return list(completions)
def module_completion(line):
"""
Returns a list containing the completion possibilities for an import line.
The line looks like this :
'import xml.d'
'from xml.dom import'
"""
words = line.split(' ')
nwords = len(words)
# from whatever <tab> -> 'import '
if nwords == 3 and words[0] == 'from':
return ['import ']
# 'from xy<tab>' or 'import xy<tab>'
if nwords < 3 and (words[0] in ['import', 'from']):
if nwords == 1:
return get_root_modules()
mod = words[1].split('.')
if len(mod) < 2:
return get_root_modules()
completion_list = try_import('.'.join(mod[:-1]), True)
return ['.'.join(mod[:-1] + [el]) for el in completion_list]
# 'from xyz import abc<tab>'
if nwords >= 3 and words[0] == 'from':
mod = words[1]
return try_import(mod)
| lgpl-2.1 | -2,445,425,459,961,235,000 | 27.674419 | 84 | 0.550081 | false |
tiagocoutinho/bliss | tests/continuous_scan/test_p201.py | 1 | 2719 | """
Continuous acquisition test using icepap motor and P201 card
"""
import os
import sys
import time
import logging
import argparse
import gevent
from bliss.config import static
try:
import bliss
except ImportError:
__this_dir = os.path.dirname(__file__)
__juyo_dir = os.path.realpath(os.path.join(__this_dir, os.path.pardir))
sys.path.insert(0, __juyo_dir)
from bliss.common.continuous_scan import Scan, AcquisitionChain
from bliss.common.data_manager import ScanRecorder
from bliss.acquisition.p201 import P201AcquisitionMaster, P201AcquisitionDevice
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--log-level', type=str, default='info',
help='log level (debug, info, warning, error) [default: info]')
parser.add_argument('--nb-points', type=int,
help='number of points [default: 1000]', default=1000)
parser.add_argument('--acq-time', type=float, default=1e-3,
help='acquisition time [default: 0.001]')
parser.add_argument('--p201', type=str, default="p201",
help='P201 card configuration name [default: p201]')
args = parser.parse_args()
logging.basicConfig(level=getattr(logging, args.log_level.upper()),
format="%(asctime)s %(levelname)s %(name)s: %(message)s")
nb_points = args.nb_points
acq_time = args.acq_time
config = static.get_config()
p201 = config.get(args.p201)
p201_config = config.get_config(args.p201)
channels = {}
for counter in p201_config.get("counters", []):
addr = counter['address']
enabled = counter.get('software enable', False)
# skip internal masters and disabled channels
if addr >= p201.COUNTERS[-2] or not enabled:
continue
name = counter.get('name', "c%d" % addr)
channels[name] = addr
print "The following counters (which have software enable=True) will be used: ",
print ", ".join(sorted(channels.keys()))
p201_master = P201AcquisitionMaster(p201, nb_points=nb_points,
acq_expo_time=acq_time)
p201_counters = P201AcquisitionDevice(p201, nb_points=nb_points,
acq_expo_time=acq_time,
channels=channels)
chain = AcquisitionChain()
chain.add(p201_master, p201_counters)
scan = Scan(chain, ScanRecorder())
chain._tree.show()
print("Prepare")
scan.prepare()
print("Start!")
t = time.time()
scan.start()
dt = time.time() - t
print("Finished (took %fs)!" % dt)
if __name__ == '__main__':
main()
| lgpl-3.0 | 5,842,952,628,930,979,000 | 31.759036 | 87 | 0.613461 | false |
schmidtfederico/PRinde | core/modules/data_updater/sync.py | 1 | 5697 | from pymongo.errors import BulkWriteError
from core.lib.jobs.base import BaseJob
from core.lib.jobs.monitor import ProgressMonitor, JOB_STATUS_WAITING, JOB_STATUS_RUNNING
__author__ = 'Federico Schmidt'
class YieldDatabaseSync(BaseJob):
def __init__(self, system_config):
super(YieldDatabaseSync, self).__init__(progress_monitor=ProgressMonitor(end_value=4))
self.system_config = system_config
def run(self):
self.progress_monitor.update_progress(job_status=JOB_STATUS_WAITING)
# Acquire a read lock (parallel job).
with self.system_config.jobs_lock.parallel_job():
self.progress_monitor.update_progress(job_status=JOB_STATUS_RUNNING)
if 'yield_sync_db' in self.system_config.database:
source_db = self.system_config.database['yield_db']
target_db = self.system_config.database['yield_sync_db']
new_forecasts = self.__find_collection_diff__(collection_name='forecasts', source_db=source_db,
target_db=target_db)
if new_forecasts.count() > 0:
forecasts_insert_monitor = ProgressMonitor(end_value=new_forecasts.count())
self.progress_monitor.add_subjob(forecasts_insert_monitor, job_name='Synchronize forecasts')
inserted_forecasts_count = 0
# Sync new forecasts.
for f in new_forecasts:
simulations_ids = f['simulations']
bulk_op = target_db.simulations.initialize_unordered_bulk_op()
# Fetch this forecast' simulations.
for simulation in source_db.simulations.find({'_id': {'$in': simulations_ids}}):
bulk_op.insert(simulation)
try:
bulk_op.execute()
except BulkWriteError as bwe:
# Check if every error that was raised was a duplicate key error (11000).
for err in bwe.details['writeErrors']:
if err['code'] != 11000:
raise RuntimeError('Non recoverable error found while trying to sync yield '
'databases. Details: %s' % bwe.details)
target_db.forecasts.insert(f)
inserted_forecasts_count += 1
forecasts_insert_monitor.update_progress(inserted_forecasts_count)
# Notify we finished syncing forecasts (the first part of the job).
self.progress_monitor.update_progress(new_value=1)
# Sync new reference simulations.
self.__insert_missing_documents__(collection_name='reference_simulations',
source_db=source_db,
target_db=target_db)
self.progress_monitor.update_progress(new_value=2)
# Sync new locations.
self.__insert_missing_documents__(collection_name='locations',
source_db=source_db,
target_db=target_db)
self.progress_monitor.update_progress(new_value=3)
# Sync new reference rainfalls.
self.__insert_missing_documents__(collection_name='reference_rainfall',
id_field='omm_id',
source_db=source_db,
target_db=target_db)
self.progress_monitor.update_progress(new_value=4)
# Sync new soils.
self.__insert_missing_documents__(collection_name='soils',
source_db=source_db,
target_db=target_db)
def __insert_missing_documents__(self, collection_name, source_db, target_db, id_field='_id'):
"""
Finds documents inside the given collection that are present in the source database but not in the target
database and inserts them.
:param collection_name: A collection name.
:param source_db: Source database Pymongo connection.
:param target_db: Target database Pymongo connection.
"""
new_documents = self.__find_collection_diff__(collection_name, source_db, target_db, id_field)
if new_documents.count() > 0:
bulk_operator = target_db[collection_name].initialize_unordered_bulk_op()
for document in new_documents:
bulk_operator.insert(document)
bulk_operator.execute()
def __find_collection_diff__(self, collection_name, source_db, target_db, id_field='_id'):
"""
Finds documents inside the given collection that are present in the source database but not in the target
database.
:param collection_name: A collection name.
:param source_db: Source database Pymongo connection.
:param target_db: Target database Pymongo connection.
:returns The list of documents that are missing in the target_db's collection.
"""
found_ids = target_db[collection_name].distinct(id_field)
# Find documents in source DB with id's that are NOT in the "found_ids" list.
return source_db[collection_name].find({
id_field: {'$nin': found_ids}
})
| gpl-2.0 | 7,426,628,131,308,822,000 | 48.112069 | 113 | 0.54713 | false |
cjluo/money-monkey | email_sender.py | 1 | 1506 | import os
import smtplib
import logging
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
class EmailSender:
def __init__(self, address):
self._address = address
def send_email(self, title, image_pairs, delete=True):
from_address = '[email protected]'
to_address = self._address
msg = MIMEMultipart('related')
msg['Subject'] = title
msg['From'] = from_address
msg['To'] = to_address
msg_alt = MIMEMultipart('alternative')
msg.attach(msg_alt)
i = 0
text = ''
for symbol in image_pairs:
text += '<img src="cid:image' + str(i) + '"><br>'
image_file = open(image_pairs[symbol], 'rb').read()
image = MIMEImage(image_file, name=symbol)
image.add_header('Content-ID', '<image' + str(i) + '>')
msg.attach(image)
i += 1
text = MIMEText(text, 'html')
msg_alt.attach(text)
logger = logging.getLogger()
s = smtplib.SMTP('localhost')
try:
s.sendmail(from_address, to_address.split(','), msg.as_string())
logger.info("mail sent, subject %s" % title)
except Exception as exception:
logger.error("mail failed %s" % str(exception))
finally:
s.quit()
if delete:
for symbol in image_pairs:
os.remove(image_pairs[symbol])
| mit | -1,089,241,163,424,332,400 | 28.529412 | 76 | 0.563081 | false |
anthonyalmarza/trex | tests/test_scan.py | 1 | 1942 | from trex.redis import Connection
from twisted.internet.defer import inlineCallbacks
from twisted.trial import unittest
from .mixins import RedisVersionCheckMixin, REDIS_HOST, REDIS_PORT
class TestScan(unittest.TestCase, RedisVersionCheckMixin):
KEYS = ['_scan_test_' + str(v).zfill(4) for v in range(100)]
SKEY = ['_scan_test_set']
SUFFIX = '12'
PATTERN = '_scan_test_*' + SUFFIX
FILTERED_KEYS = [k for k in KEYS if k.endswith(SUFFIX)]
@inlineCallbacks
def setUp(self):
self.db = yield Connection(REDIS_HOST, REDIS_PORT, reconnect=False)
self.redis_2_8_0 = yield self.checkVersion(2, 8, 0)
yield self.db.delete(*self.KEYS)
yield self.db.delete(self.SKEY)
@inlineCallbacks
def tearDown(self):
yield self.db.delete(*self.KEYS)
yield self.db.delete(self.SKEY)
yield self.db.disconnect()
@inlineCallbacks
def test_scan(self):
self._skipCheck()
yield self.db.mset(dict((k, 'value') for k in self.KEYS))
cursor, result = yield self.db.scan(pattern=self.PATTERN)
while cursor != 0:
cursor, keys = yield self.db.scan(cursor, pattern=self.PATTERN)
result.extend(keys)
self.assertEqual(set(result), set(self.FILTERED_KEYS))
@inlineCallbacks
def test_sscan(self):
self._skipCheck()
yield self.db.sadd(self.SKEY, self.KEYS)
cursor, result = yield self.db.sscan(self.SKEY, pattern=self.PATTERN)
while cursor != 0:
cursor, keys = yield self.db.sscan(self.SKEY, cursor,
pattern=self.PATTERN)
result.extend(keys)
self.assertEqual(set(result), set(self.FILTERED_KEYS))
def _skipCheck(self):
if not self.redis_2_8_0:
skipMsg = "Redis version < 2.8.0 (found version: %s)"
raise unittest.SkipTest(skipMsg % self.redis_version)
| mit | -1,663,686,892,160,437,500 | 31.915254 | 77 | 0.625644 | false |
ericfrederich/graphviz | tests/test_dot.py | 2 | 3365 | # test_dot.py
import unittest2 as unittest
from graphviz.dot import Graph, Digraph
class TestDot(unittest.TestCase):
def test_repr_svg(self):
self.assertRegexpMatches(Graph('spam')._repr_svg_(),
r'(?s)^<\?xml .+</svg>\s*$')
def test_attr(self):
with self.assertRaises(ValueError):
Graph().attr('spam')
def test_subgraph_invalid(self):
with self.assertRaises(ValueError):
Graph().subgraph(Digraph())
with self.assertRaises(ValueError):
Digraph().subgraph(Graph())
def test_subgraph_recursive(self): # guard against potential infinite loop
dot = Graph()
dot.subgraph(dot)
self.assertEqual(dot.source, 'graph {\n\tsubgraph {\n\t}\n}')
def test_subgraph(self):
s1 = Graph()
s1.node('A')
s1.node('B')
s1.node('C')
s1.edge('A', 'B', constraint='false')
s1.edges(['AC', 'BC'])
s2 = Graph()
s2.node('D')
s2.node('E')
s2.node('F')
s2.edge('D', 'E', constraint='false')
s2.edges(['DF', 'EF'])
dot = Graph()
dot.subgraph(s1)
dot.subgraph(s2)
dot.attr('edge', style='dashed')
dot.edges(['AD', 'BE', 'CF'])
self.assertEqual(dot.source, '''graph {
subgraph {
A
B
C
A -- B [constraint=false]
A -- C
B -- C
}
subgraph {
D
E
F
D -- E [constraint=false]
D -- F
E -- F
}
edge [style=dashed]
A -- D
B -- E
C -- F
}''')
class TestHTML(unittest.TestCase):
"""http://www.graphviz.org/doc/info/shapes.html#html"""
def test_label_html(self):
dot = Digraph('structs', node_attr={'shape': 'plaintext'})
dot.node('struct1', '''<
<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0">
<TR>
<TD>left</TD>
<TD PORT="f1">middle</TD>
<TD PORT="f2">right</TD>
</TR>
</TABLE>>''')
dot.node('struct2', '''<
<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0">
<TR>
<TD PORT="f0">one</TD>
<TD>two</TD>
</TR>
</TABLE>>''')
dot.node('struct3', '''<
<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0" CELLPADDING="4">
<TR>
<TD ROWSPAN="3">hello<BR/>world</TD>
<TD COLSPAN="3">b</TD>
<TD ROWSPAN="3">g</TD>
<TD ROWSPAN="3">h</TD>
</TR>
<TR>
<TD>c</TD>
<TD PORT="here">d</TD>
<TD>e</TD>
</TR>
<TR>
<TD COLSPAN="3">f</TD>
</TR>
</TABLE>>''')
dot.edge('struct1:f1', 'struct2:f0')
dot.edge('struct1:f2', 'struct3:here')
self.assertEqual(dot.source, '''digraph structs {
node [shape=plaintext]
struct1 [label=<
<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0">
<TR>
<TD>left</TD>
<TD PORT="f1">middle</TD>
<TD PORT="f2">right</TD>
</TR>
</TABLE>>]
struct2 [label=<
<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0">
<TR>
<TD PORT="f0">one</TD>
<TD>two</TD>
</TR>
</TABLE>>]
struct3 [label=<
<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0" CELLPADDING="4">
<TR>
<TD ROWSPAN="3">hello<BR/>world</TD>
<TD COLSPAN="3">b</TD>
<TD ROWSPAN="3">g</TD>
<TD ROWSPAN="3">h</TD>
</TR>
<TR>
<TD>c</TD>
<TD PORT="here">d</TD>
<TD>e</TD>
</TR>
<TR>
<TD COLSPAN="3">f</TD>
</TR>
</TABLE>>]
struct1:f1 -> struct2:f0
struct1:f2 -> struct3:here
}''')
dot.render('test-output/html.gv')
| mit | -3,727,539,746,753,796,000 | 21.284768 | 79 | 0.532838 | false |
teeple/pns_server | work/install/Python-2.7.4/Demo/tkinter/guido/paint.py | 42 | 1898 | """"Paint program by Dave Michell.
Subject: tkinter "paint" example
From: Dave Mitchell <[email protected]>
To: [email protected]
Date: Fri, 23 Jan 1998 12:18:05 -0500 (EST)
Not too long ago (last week maybe?) someone posted a request
for an example of a paint program using Tkinter. Try as I might
I can't seem to find it in the archive, so i'll just post mine
here and hope that the person who requested it sees this!
All this does is put up a canvas and draw a smooth black line
whenever you have the mouse button down, but hopefully it will
be enough to start with.. It would be easy enough to add some
options like other shapes or colors...
yours,
dave mitchell
[email protected]
"""
from Tkinter import *
"""paint.py: not exactly a paint program.. just a smooth line drawing demo."""
b1 = "up"
xold, yold = None, None
def main():
root = Tk()
drawing_area = Canvas(root)
drawing_area.pack()
drawing_area.bind("<Motion>", motion)
drawing_area.bind("<ButtonPress-1>", b1down)
drawing_area.bind("<ButtonRelease-1>", b1up)
root.mainloop()
def b1down(event):
global b1
b1 = "down" # you only want to draw when the button is down
# because "Motion" events happen -all the time-
def b1up(event):
global b1, xold, yold
b1 = "up"
xold = None # reset the line when you let go of the button
yold = None
def motion(event):
if b1 == "down":
global xold, yold
if xold is not None and yold is not None:
event.widget.create_line(xold,yold,event.x,event.y,smooth=TRUE)
# here's where you draw it. smooth. neat.
xold = event.x
yold = event.y
if __name__ == "__main__":
main()
| gpl-2.0 | 9,145,878,009,647,141,000 | 30.633333 | 78 | 0.596944 | false |
JioCloud/nova | nova/api/openstack/compute/contrib/floating_ip_dns.py | 24 | 9936 | # Copyright 2011 Andrew Bogott for the Wikimedia Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import urllib
from oslo_utils import netutils
import webob
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import exception
from nova.i18n import _
from nova import network
authorize = extensions.extension_authorizer('compute', 'floating_ip_dns')
def _translate_dns_entry_view(dns_entry):
result = {}
result['ip'] = dns_entry.get('ip')
result['id'] = dns_entry.get('id')
result['type'] = dns_entry.get('type')
result['domain'] = dns_entry.get('domain')
result['name'] = dns_entry.get('name')
return {'dns_entry': result}
def _translate_dns_entries_view(dns_entries):
return {'dns_entries': [_translate_dns_entry_view(entry)['dns_entry']
for entry in dns_entries]}
def _translate_domain_entry_view(domain_entry):
result = {}
result['domain'] = domain_entry.get('domain')
result['scope'] = domain_entry.get('scope')
result['project'] = domain_entry.get('project')
result['availability_zone'] = domain_entry.get('availability_zone')
return {'domain_entry': result}
def _translate_domain_entries_view(domain_entries):
return {'domain_entries':
[_translate_domain_entry_view(entry)['domain_entry']
for entry in domain_entries]}
def _unquote_domain(domain):
"""Unquoting function for receiving a domain name in a URL.
Domain names tend to have .'s in them. Urllib doesn't quote dots,
but Routes tends to choke on them, so we need an extra level of
by-hand quoting here.
"""
return urllib.unquote(domain).replace('%2E', '.')
def _create_dns_entry(ip, name, domain):
return {'ip': ip, 'name': name, 'domain': domain}
def _create_domain_entry(domain, scope=None, project=None, av_zone=None):
return {'domain': domain, 'scope': scope, 'project': project,
'availability_zone': av_zone}
class FloatingIPDNSDomainController(object):
"""DNS domain controller for OpenStack API."""
def __init__(self):
self.network_api = network.API()
super(FloatingIPDNSDomainController, self).__init__()
def index(self, req):
"""Return a list of available DNS domains."""
context = req.environ['nova.context']
authorize(context)
try:
domains = self.network_api.get_dns_domains(context)
except NotImplementedError:
msg = _("Unable to get dns domain")
raise webob.exc.HTTPNotImplemented(explanation=msg)
domainlist = [_create_domain_entry(domain['domain'],
domain.get('scope'),
domain.get('project'),
domain.get('availability_zone'))
for domain in domains]
return _translate_domain_entries_view(domainlist)
def update(self, req, id, body):
"""Add or modify domain entry."""
context = req.environ['nova.context']
authorize(context)
fqdomain = _unquote_domain(id)
try:
entry = body['domain_entry']
scope = entry['scope']
except (TypeError, KeyError):
raise webob.exc.HTTPUnprocessableEntity()
project = entry.get('project', None)
av_zone = entry.get('availability_zone', None)
if (scope not in ('private', 'public') or
project and av_zone or
scope == 'private' and project or
scope == 'public' and av_zone):
raise webob.exc.HTTPUnprocessableEntity()
if scope == 'private':
create_dns_domain = self.network_api.create_private_dns_domain
area_name, area = 'availability_zone', av_zone
else:
create_dns_domain = self.network_api.create_public_dns_domain
area_name, area = 'project', project
try:
create_dns_domain(context, fqdomain, area)
except NotImplementedError:
msg = _("Unable to create dns domain")
raise webob.exc.HTTPNotImplemented(explanation=msg)
return _translate_domain_entry_view({'domain': fqdomain,
'scope': scope,
area_name: area})
def delete(self, req, id):
"""Delete the domain identified by id."""
context = req.environ['nova.context']
authorize(context)
domain = _unquote_domain(id)
# Delete the whole domain
try:
self.network_api.delete_dns_domain(context, domain)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
except NotImplementedError:
msg = _("Unable to delete dns domain")
raise webob.exc.HTTPNotImplemented(explanation=msg)
return webob.Response(status_int=202)
class FloatingIPDNSEntryController(object):
"""DNS Entry controller for OpenStack API."""
def __init__(self):
self.network_api = network.API()
super(FloatingIPDNSEntryController, self).__init__()
def show(self, req, domain_id, id):
"""Return the DNS entry that corresponds to domain_id and id."""
context = req.environ['nova.context']
authorize(context)
domain = _unquote_domain(domain_id)
floating_ip = None
# Check whether id is a valid ipv4/ipv6 address.
if netutils.is_valid_ip(id):
floating_ip = id
try:
if floating_ip:
entries = self.network_api.get_dns_entries_by_address(
context, floating_ip, domain)
else:
entries = self.network_api.get_dns_entries_by_name(
context, id, domain)
except NotImplementedError:
msg = _("Unable to get dns entry")
raise webob.exc.HTTPNotImplemented(explanation=msg)
if not entries:
explanation = _("DNS entries not found.")
raise webob.exc.HTTPNotFound(explanation=explanation)
if floating_ip:
entrylist = [_create_dns_entry(floating_ip, entry, domain)
for entry in entries]
dns_entries = _translate_dns_entries_view(entrylist)
return wsgi.ResponseObject(dns_entries)
entry = _create_dns_entry(entries[0], id, domain)
return _translate_dns_entry_view(entry)
def update(self, req, domain_id, id, body):
"""Add or modify dns entry."""
context = req.environ['nova.context']
authorize(context)
domain = _unquote_domain(domain_id)
name = id
try:
entry = body['dns_entry']
address = entry['ip']
dns_type = entry['dns_type']
except (TypeError, KeyError):
raise webob.exc.HTTPUnprocessableEntity()
try:
entries = self.network_api.get_dns_entries_by_name(
context, name, domain)
if not entries:
# create!
self.network_api.add_dns_entry(context, address, name,
dns_type, domain)
else:
# modify!
self.network_api.modify_dns_entry(context, name, address,
domain)
except NotImplementedError:
msg = _("Unable to create dns entry")
raise webob.exc.HTTPNotImplemented(explanation=msg)
return _translate_dns_entry_view({'ip': address,
'name': name,
'type': dns_type,
'domain': domain})
def delete(self, req, domain_id, id):
"""Delete the entry identified by req and id."""
context = req.environ['nova.context']
authorize(context)
domain = _unquote_domain(domain_id)
name = id
try:
self.network_api.delete_dns_entry(context, name, domain)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
except NotImplementedError:
msg = _("Unable to delete dns entry")
raise webob.exc.HTTPNotImplemented(explanation=msg)
return webob.Response(status_int=202)
class Floating_ip_dns(extensions.ExtensionDescriptor):
"""Floating IP DNS support."""
name = "FloatingIpDns"
alias = "os-floating-ip-dns"
namespace = "http://docs.openstack.org/ext/floating_ip_dns/api/v1.1"
updated = "2011-12-23T00:00:00Z"
def __init__(self, ext_mgr):
self.network_api = network.API()
super(Floating_ip_dns, self).__init__(ext_mgr)
def get_resources(self):
resources = []
res = extensions.ResourceExtension('os-floating-ip-dns',
FloatingIPDNSDomainController())
resources.append(res)
res = extensions.ResourceExtension('entries',
FloatingIPDNSEntryController(),
parent={'member_name': 'domain',
'collection_name': 'os-floating-ip-dns'})
resources.append(res)
return resources
| apache-2.0 | 2,142,448,356,065,491,700 | 35.395604 | 78 | 0.585648 | false |
user-none/calibre | src/calibre/library/server/browse.py | 13 | 40620 | #!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <[email protected]>'
__docformat__ = 'restructuredtext en'
import operator, os, json, re, time
from binascii import hexlify, unhexlify
from collections import OrderedDict
import cherrypy
from calibre.constants import filesystem_encoding, config_dir
from calibre import (isbytestring, force_unicode, fit_image,
prepare_string_for_xml, sanitize_file_name2)
from calibre.utils.filenames import ascii_filename
from calibre.utils.config import prefs, JSONConfig
from calibre.utils.icu import sort_key
from calibre.utils.magick import Image
from calibre.library.comments import comments_to_html
from calibre.library.server import custom_fields_to_display
from calibre.library.field_metadata import category_icon_map
from calibre.library.server.utils import quote, unquote
from calibre.db.categories import Tag
from calibre.ebooks.metadata.sources.identify import urls_from_identifiers
def xml(*args, **kwargs):
ans = prepare_string_for_xml(*args, **kwargs)
return ans.replace(''', ''')
def render_book_list(ids, prefix, suffix=''): # {{{
pages = []
num = len(ids)
pos = 0
delta = 25
while ids:
page = list(ids[:delta])
pages.append((page, pos))
ids = ids[delta:]
pos += len(page)
page_template = u'''\
<div class="page" id="page{0}">
<div class="load_data" title="{1}">
<span class="url" title="{prefix}/browse/booklist_page"></span>
<span class="start" title="{start}"></span>
<span class="end" title="{end}"></span>
</div>
<div class="loading"><img src="{prefix}/static/loading.gif" /> {2}</div>
<div class="loaded"></div>
</div>
'''
pagelist_template = u'''\
<div class="pagelist">
<ul>
{pages}
</ul>
</div>
'''
rpages, lpages = [], []
for i, x in enumerate(pages):
pg, pos = x
ld = xml(json.dumps(pg), True)
start, end = pos+1, pos+len(pg)
rpages.append(page_template.format(i, ld,
xml(_('Loading, please wait')) + '…',
start=start, end=end, prefix=prefix))
lpages.append(' '*20 + (u'<li><a href="#" title="Books {start} to {end}"'
' onclick="gp_internal(\'{id}\'); return false;"> '
'{start} to {end}</a></li>').format(start=start, end=end,
id='page%d'%i))
rpages = u'\n\n'.join(rpages)
lpages = u'\n'.join(lpages)
pagelist = pagelist_template.format(pages=lpages)
templ = u'''\
<h3>{0} {suffix}</h3>
<div id="booklist">
<div id="pagelist" title="{goto}">{pagelist}</div>
<div class="listnav topnav">
{navbar}
</div>
{pages}
<div class="listnav bottomnav">
{navbar}
</div>
</div>
'''
gp_start = gp_end = ''
if len(pages) > 1:
gp_start = '<a href="#" onclick="goto_page(); return false;" title="%s">' % \
(_('Go to') + '…')
gp_end = '</a>'
navbar = u'''\
<div class="navleft">
<a href="#" onclick="first_page(); return false;">{first}</a>
<a href="#" onclick="previous_page(); return false;">{previous}</a>
</div>
<div class="navmiddle">
{gp_start}
<span class="start">0</span> to <span class="end">0</span>
{gp_end}of {num}
</div>
<div class="navright">
<a href="#" onclick="next_page(); return false;">{next}</a>
<a href="#" onclick="last_page(); return false;">{last}</a>
</div>
'''.format(first=_('First'), last=_('Last'), previous=_('Previous'),
next=_('Next'), num=num, gp_start=gp_start, gp_end=gp_end)
return templ.format(_('Browsing %d books')%num, suffix=suffix,
pages=rpages, navbar=navbar, pagelist=pagelist,
goto=xml(_('Go to'), True) + '…')
# }}}
def utf8(x): # {{{
if isinstance(x, unicode):
x = x.encode('utf-8')
return x
# }}}
def render_rating(rating, url_prefix, container='span', prefix=None): # {{{
if rating < 0.1:
return '', ''
added = 0
if prefix is None:
prefix = _('Average rating')
rstring = xml(_('%(prefix)s: %(rating).1f stars')%dict(
prefix=prefix, rating=rating if rating else 0.0),
True)
ans = ['<%s class="rating">' % (container)]
for i in range(5):
n = rating - added
x = 'half'
if n <= 0.1:
x = 'off'
elif n >= 0.9:
x = 'on'
ans.append(
u'<img alt="{0}" title="{0}" src="{2}/static/star-{1}.png" />'.format(
rstring, x, url_prefix))
added += 1
ans.append('</%s>'%container)
return u''.join(ans), rstring
# }}}
def get_category_items(category, items, datatype, prefix): # {{{
def item(i):
templ = (u'<div title="{4}" class="category-item">'
'<div class="category-name">'
'<a href="{5}{3}" title="{4}">{0}</a></div>'
'<div>{1}</div>'
'<div>{2}</div></div>')
rating, rstring = render_rating(i.avg_rating, prefix)
orig_name = i.sort if i.use_sort_as_name else i.name
name = xml(orig_name)
if datatype == 'rating':
name = xml(_('%d stars')%int(i.avg_rating))
id_ = i.id
if id_ is None:
id_ = hexlify(force_unicode(orig_name).encode('utf-8'))
id_ = xml(str(id_))
desc = ''
if i.count > 0:
desc += '[' + _('%d books')%i.count + ']'
q = i.category
if not q:
q = category
href = '/browse/matches/%s/%s'%(quote(q), quote(id_))
return templ.format(xml(name), rating,
xml(desc), xml(href, True), rstring, prefix)
items = list(map(item, items))
return '\n'.join(['<div class="category-container">'] + items + ['</div>'])
# }}}
class Endpoint(object): # {{{
'Manage encoding, mime-type, last modified, cookies, etc.'
def __init__(self, mimetype='text/html; charset=utf-8', sort_type='category'):
self.mimetype = mimetype
self.sort_type = sort_type
self.sort_kwarg = sort_type + '_sort'
self.sort_cookie_name = 'calibre_browse_server_sort_'+self.sort_type
def __call__(eself, func):
def do(self, *args, **kwargs):
if 'json' not in eself.mimetype:
sort_val = None
cookie = cherrypy.request.cookie
if eself.sort_cookie_name in cookie:
sort_val = cookie[eself.sort_cookie_name].value
kwargs[eself.sort_kwarg] = sort_val
# Remove AJAX caching disabling jquery workaround arg
kwargs.pop('_', None)
ans = func(self, *args, **kwargs)
cherrypy.response.headers['Content-Type'] = eself.mimetype
updated = self.db.last_modified()
cherrypy.response.headers['Last-Modified'] = \
self.last_modified(max(updated, self.build_time))
ans = utf8(ans)
return ans
do.__name__ = func.__name__
return do
# }}}
class BrowseServer(object):
def add_routes(self, connect):
base_href = '/browse'
connect('browse', base_href, self.browse_catalog)
connect('browse_catalog', base_href+'/category/{category}',
self.browse_catalog)
connect('browse_category_group',
base_href+'/category_group/{category}/{group}',
self.browse_category_group)
connect('browse_matches',
base_href+'/matches/{category}/{cid}',
self.browse_matches)
connect('browse_booklist_page',
base_href+'/booklist_page',
self.browse_booklist_page)
connect('browse_search', base_href+'/search',
self.browse_search)
connect('browse_details', base_href+'/details/{id}',
self.browse_details)
connect('browse_book', base_href+'/book/{id}',
self.browse_book)
connect('browse_random', base_href+'/random',
self.browse_random)
connect('browse_category_icon', base_href+'/icon/{name}',
self.browse_icon)
self.icon_map = JSONConfig('gui').get('tags_browser_category_icons', {})
# Templates {{{
def browse_template(self, sort, category=True, initial_search=''):
if not hasattr(self, '__browse_template__') or \
self.opts.develop:
self.__browse_template__ = \
P('content_server/browse/browse.html', data=True).decode('utf-8')
ans = self.__browse_template__
scn = 'calibre_browse_server_sort_'
if category:
sort_opts = [('rating', _('Average rating')), ('name',
_('Name')), ('popularity', _('Popularity'))]
scn += 'category'
else:
scn += 'list'
fm = self.db.field_metadata
sort_opts, added = [], set([])
displayed_custom_fields = custom_fields_to_display(self.db)
for x in fm.sortable_field_keys():
if x in ('ondevice', 'formats', 'sort'):
continue
if fm.is_ignorable_field(x) and x not in displayed_custom_fields:
continue
if x == 'comments' or fm[x]['datatype'] == 'comments':
continue
n = fm[x]['name']
if n not in added:
added.add(n)
sort_opts.append((x, n))
ans = ans.replace('{sort_select_label}', xml(_('Sort by')+':'))
ans = ans.replace('{sort_cookie_name}', scn)
ans = ans.replace('{prefix}', self.opts.url_prefix)
ans = ans.replace('{library}', _('library'))
ans = ans.replace('{home}', _('home'))
ans = ans.replace('{Search}', _('Search'))
opts = ['<option %svalue="%s">%s</option>' % (
'selected="selected" ' if k==sort else '',
xml(k), xml(nl), ) for k, nl in
sorted(sort_opts, key=lambda x: sort_key(operator.itemgetter(1)(x))) if k and nl]
ans = ans.replace('{sort_select_options}', ('\n'+' '*20).join(opts))
lp = self.db.library_path
if isbytestring(lp):
lp = force_unicode(lp, filesystem_encoding)
ans = ans.replace('{library_name}', xml(os.path.basename(lp)))
ans = ans.replace('{library_path}', xml(lp, True))
ans = ans.replace('{initial_search}', xml(initial_search, attribute=True))
return ans
@property
def browse_summary_template(self):
if not hasattr(self, '__browse_summary_template__') or \
self.opts.develop:
self.__browse_summary_template__ = \
P('content_server/browse/summary.html', data=True).decode('utf-8')
return self.__browse_summary_template__.replace('{prefix}',
self.opts.url_prefix)
@property
def browse_details_template(self):
if not hasattr(self, '__browse_details_template__') or \
self.opts.develop:
self.__browse_details_template__ = \
P('content_server/browse/details.html', data=True).decode('utf-8')
return self.__browse_details_template__.replace('{prefix}',
self.opts.url_prefix)
# }}}
# Catalogs {{{
def browse_icon(self, name='blank.png'):
cherrypy.response.headers['Content-Type'] = 'image/png'
cherrypy.response.headers['Last-Modified'] = self.last_modified(self.build_time)
if not hasattr(self, '__browse_icon_cache__'):
self.__browse_icon_cache__ = {}
if name not in self.__browse_icon_cache__:
if name.startswith('_'):
name = sanitize_file_name2(name[1:])
try:
with open(os.path.join(config_dir, 'tb_icons', name), 'rb') as f:
data = f.read()
except:
raise cherrypy.HTTPError(404, 'no icon named: %r'%name)
else:
try:
data = I(name, data=True)
except:
raise cherrypy.HTTPError(404, 'no icon named: %r'%name)
img = Image()
img.load(data)
width, height = img.size
scaled, width, height = fit_image(width, height, 48, 48)
if scaled:
img.size = (width, height)
self.__browse_icon_cache__[name] = img.export('png')
return self.__browse_icon_cache__[name]
def browse_toplevel(self):
categories = self.categories_cache()
category_meta = self.db.field_metadata
cats = [
(_('Newest'), 'newest', 'forward.png'),
(_('All books'), 'allbooks', 'book.png'),
(_('Random book'), 'randombook', 'random.png'),
]
virt_libs = self.db.prefs.get('virtual_libraries', {})
if virt_libs:
cats.append((_('Virtual Libs.'), 'virt_libs', 'lt.png'))
def getter(x):
try:
return category_meta[x]['name'].lower()
except KeyError:
return x
displayed_custom_fields = custom_fields_to_display(self.db)
uc_displayed = set()
for category in sorted(categories, key=lambda x: sort_key(getter(x))):
if len(categories[category]) == 0:
continue
if category in ('formats', 'identifiers'):
continue
meta = category_meta.get(category, None)
if meta is None:
continue
if self.db.field_metadata.is_ignorable_field(category) and \
category not in displayed_custom_fields:
continue
# get the icon files
main_cat = (category.partition('.')[0]) if hasattr(category,
'partition') else category
if main_cat in self.icon_map:
icon = '_'+quote(self.icon_map[main_cat])
elif category in category_icon_map:
icon = category_icon_map[category]
elif meta['is_custom']:
icon = category_icon_map['custom:']
elif meta['kind'] == 'user':
icon = category_icon_map['user:']
else:
icon = 'blank.png'
if meta['kind'] == 'user':
dot = category.find('.')
if dot > 0:
cat = category[:dot]
if cat not in uc_displayed:
cats.append((meta['name'][:dot-1], cat, icon))
uc_displayed.add(cat)
else:
cats.append((meta['name'], category, icon))
uc_displayed.add(category)
else:
cats.append((meta['name'], category, icon))
cats = [(u'<li><a title="{2} {0}" href="{3}/browse/category/{1}"> </a>'
u'<img src="{3}{src}" alt="{0}" />'
u'<span class="label">{0}</span>'
u'</li>')
.format(xml(x, True), xml(quote(y)), xml(_('Browse books by')),
self.opts.url_prefix, src='/browse/icon/'+z)
for x, y, z in cats]
main = u'<div class="toplevel"><h3>{0}</h3><ul>{1}</ul></div>'\
.format(_('Choose a category to browse by:'), u'\n\n'.join(cats))
return self.browse_template('name').format(title='',
script='toplevel();', main=main)
def browse_sort_categories(self, items, sort):
if sort not in ('rating', 'name', 'popularity'):
sort = 'name'
items.sort(key=lambda x: sort_key(getattr(x, 'sort', x.name)))
if sort == 'popularity':
items.sort(key=operator.attrgetter('count'), reverse=True)
elif sort == 'rating':
items.sort(key=operator.attrgetter('avg_rating'), reverse=True)
return sort
def browse_category(self, category, sort):
categories = self.categories_cache()
categories['virt_libs'] = {}
if category not in categories:
raise cherrypy.HTTPError(404, 'category not found')
category_meta = self.db.field_metadata
category_name = _('Virtual Libraries') if category == 'virt_libs' else category_meta[category]['name']
datatype = 'text' if category == 'virt_libs' else category_meta[category]['datatype']
# See if we have any sub-categories to display. As we find them, add
# them to the displayed set to avoid showing the same item twice
uc_displayed = set()
cats = []
for ucat in sorted(categories.keys(), key=sort_key):
if len(categories[ucat]) == 0:
continue
if category == 'formats':
continue
meta = category_meta.get(ucat, None)
if meta is None:
continue
if meta['kind'] != 'user':
continue
cat_len = len(category)
if not (len(ucat) > cat_len and ucat.startswith(category+'.')):
continue
if ucat in self.icon_map:
icon = '_'+quote(self.icon_map[ucat])
else:
icon = category_icon_map['user:']
# we have a subcategory. Find any further dots (further subcats)
cat_len += 1
cat = ucat[cat_len:]
dot = cat.find('.')
if dot > 0:
# More subcats
cat = cat[:dot]
if cat not in uc_displayed:
cats.append((cat, ucat[:cat_len+dot], icon))
uc_displayed.add(cat)
else:
# This is the end of the chain
cats.append((cat, ucat, icon))
uc_displayed.add(cat)
cats = u'\n\n'.join(
[(u'<li><a title="{2} {0}" href="{3}/browse/category/{1}"> </a>'
u'<img src="{3}{src}" alt="{0}" />'
u'<span class="label">{0}</span>'
u'</li>')
.format(xml(x, True), xml(quote(y)), xml(_('Browse books by')),
self.opts.url_prefix, src='/browse/icon/'+z)
for x, y, z in cats])
if cats:
cats = (u'\n<div class="toplevel">\n'
'{0}</div>').format(cats)
script = 'toplevel();'
else:
script = 'true'
# Now do the category items
vls = self.db.prefs.get('virtual_libraries', {})
categories['virt_libs'] = sorted([Tag(k) for k, v in vls.iteritems()], key=lambda x:sort_key(x.name))
items = categories[category]
sort = self.browse_sort_categories(items, sort)
if not cats and len(items) == 1:
# Only one item in category, go directly to book list
html = get_category_items(category, items,
datatype, self.opts.url_prefix)
href = re.search(r'<a href="([^"]+)"', html)
if href is not None:
# cherrypy does not auto unquote params when using
# InternalRedirect
raise cherrypy.InternalRedirect(unquote(href.group(1)))
if len(items) <= self.opts.max_opds_ungrouped_items:
script = 'false'
items = get_category_items(category, items,
datatype, self.opts.url_prefix)
else:
getter = lambda x: unicode(getattr(x, 'sort', None) or x.name)
starts = set([])
for x in items:
val = getter(x)
if not val:
val = u'A'
starts.add(val[0].upper())
category_groups = OrderedDict()
for x in sorted(starts):
category_groups[x] = len([y for y in items if
getter(y).upper().startswith(x)])
items = [(u'<h3 title="{0}"><a class="load_href" title="{0}"'
u' href="{4}{3}"><strong>{0}</strong> [{2}]</a></h3><div>'
u'<div class="loaded" style="display:none"></div>'
u'<div class="loading"><img alt="{1}" src="{4}/static/loading.gif" /><em>{1}</em></div>'
u'</div>').format(
xml(s, True),
xml(_('Loading, please wait'))+'…',
unicode(c),
xml(u'/browse/category_group/%s/%s'%(
hexlify(category.encode('utf-8')),
hexlify(s.encode('utf-8'))), True),
self.opts.url_prefix)
for s, c in category_groups.items()]
items = '\n\n'.join(items)
items = u'<div id="groups">\n{0}</div>'.format(items)
if cats:
script = 'toplevel();category(%s);'%script
else:
script = 'category(%s);'%script
main = u'''
<div class="category">
<h3>{0}</h3>
<a class="navlink" href="{3}/browse"
title="{2}">{2} ↑</a>
{1}
</div>
'''.format(
xml(_('Browsing by')+': ' + category_name), cats + items,
xml(_('Up'), True), self.opts.url_prefix)
return self.browse_template(sort).format(title=category_name,
script=script, main=main)
@Endpoint(mimetype='application/json; charset=utf-8')
def browse_category_group(self, category=None, group=None, sort=None):
if sort == 'null':
sort = None
if sort not in ('rating', 'name', 'popularity'):
sort = 'name'
try:
category = unhexlify(category)
if isbytestring(category):
category = category.decode('utf-8')
except:
raise cherrypy.HTTPError(404, 'invalid category')
categories = self.categories_cache()
if category not in categories:
raise cherrypy.HTTPError(404, 'category not found')
category_meta = self.db.field_metadata
try:
datatype = category_meta[category]['datatype']
except KeyError:
datatype = 'text'
try:
group = unhexlify(group)
if isbytestring(group):
group = group.decode('utf-8')
except:
raise cherrypy.HTTPError(404, 'invalid group')
items = categories[category]
entries = []
getter = lambda x: unicode(getattr(x, 'sort', None) or x.name)
for x in items:
val = getter(x)
if not val:
val = u'A'
if val.upper().startswith(group):
entries.append(x)
sort = self.browse_sort_categories(entries, sort)
entries = get_category_items(category, entries,
datatype, self.opts.url_prefix)
return json.dumps(entries, ensure_ascii=True)
@Endpoint()
def browse_catalog(self, category=None, category_sort=None):
'Entry point for top-level, categories and sub-categories'
prefix = '' if self.is_wsgi else self.opts.url_prefix
if category is None:
ans = self.browse_toplevel()
# The following are fake categories used for the top-level view
elif category == 'newest':
raise cherrypy.InternalRedirect(prefix +
'/browse/matches/newest/dummy')
elif category == 'allbooks':
raise cherrypy.InternalRedirect(prefix +
'/browse/matches/allbooks/dummy')
elif category == 'randombook':
raise cherrypy.InternalRedirect(prefix +
'/browse/random')
else:
ans = self.browse_category(category, category_sort)
return ans
# }}}
# Book Lists {{{
def browse_sort_book_list(self, items, sort):
fm = self.db.field_metadata
keys = frozenset(fm.sortable_field_keys())
if sort not in keys:
sort = 'title'
self.sort(items, 'title', True)
if sort != 'title':
ascending = fm[sort]['datatype'] not in ('rating', 'datetime',
'series')
self.sort(items, sort, ascending)
return sort
@Endpoint(sort_type='list')
def browse_matches(self, category=None, cid=None, list_sort=None):
if list_sort:
list_sort = unquote(list_sort)
if not cid:
raise cherrypy.HTTPError(404, 'invalid category id: %r'%cid)
categories = self.categories_cache()
if category not in categories and \
category not in ('newest', 'allbooks', 'virt_libs'):
raise cherrypy.HTTPError(404, 'category not found')
fm = self.db.field_metadata
try:
category_name = fm[category]['name']
dt = fm[category]['datatype']
except:
if category not in ('newest', 'allbooks', 'virt_libs'):
raise
category_name = {
'newest' : _('Newest'),
'allbooks' : _('All books'),
'virt_libs': _('Virtual Libraries'),
}[category]
dt = None
hide_sort = 'true' if dt == 'series' else 'false'
if category == 'search':
which = unhexlify(cid).decode('utf-8')
try:
ids = self.search_cache('search:"%s"'%which)
except:
raise cherrypy.HTTPError(404, 'Search: %r not understood'%which)
else:
all_ids = self.search_cache('')
if category == 'newest':
ids = all_ids
hide_sort = 'true'
elif category == 'allbooks':
ids = all_ids
elif category == 'virt_libs':
which = unhexlify(cid).decode('utf-8')
vls = self.db.prefs.get('virtual_libraries', {})
ids = self.search_cache(vls[which])
category_name = _('virtual library: ') + xml(which)
if not ids:
msg = _('The virtual library <b>%s</b> has no books.') % prepare_string_for_xml(which)
if self.search_restriction:
msg += ' ' + _(
'This is probably because you have applied a virtual library'
' to the content server in Preferences->Sharing over the net.'
' This virtual library is applied globally and combined with'
' the current virtual library.')
return self.browse_template('name').format(title='',
script='', main='<p>%s</p>'%msg)
else:
if fm.get(category, {'datatype':None})['datatype'] == 'composite':
cid = cid.decode('utf-8')
q = category
if q == 'news':
q = 'tags'
ids = self.db.get_books_for_category(q, cid)
ids = [x for x in ids if x in all_ids]
items = [self.db.data.tablerow_for_id(x) for x in ids]
if category == 'newest':
list_sort = 'timestamp'
if dt == 'series':
list_sort = category
sort = self.browse_sort_book_list(items, list_sort)
ids = [x[0] for x in items]
html = render_book_list(ids, self.opts.url_prefix,
suffix=_('in') + ' ' + category_name)
return self.browse_template(sort, category=False).format(
title=_('Books in') + " " +category_name,
script='booklist(%s);'%hide_sort, main=html)
def browse_get_book_args(self, mi, id_, add_category_links=False):
fmts = self.db.formats(id_, index_is_id=True)
if not fmts:
fmts = ''
fmts = [x.lower() for x in fmts.split(',') if x]
pf = prefs['output_format'].lower()
try:
fmt = pf if pf in fmts else fmts[0]
except:
fmt = None
args = {'id':id_, 'mi':mi,
}
ccache = self.categories_cache() if add_category_links else {}
ftitle = fauthors = ''
for key in mi.all_field_keys():
val = mi.format_field(key)[1]
if not val:
val = ''
if key == 'title':
ftitle = xml(val, True)
elif key == 'authors':
fauthors = xml(val, True)
if add_category_links:
added_key = False
fm = mi.metadata_for_field(key)
if val and fm and fm['is_category'] and not fm['is_csp'] and\
key != 'formats' and fm['datatype'] not in ['rating']:
categories = mi.get(key)
if isinstance(categories, basestring):
categories = [categories]
dbtags = []
for category in categories:
dbtag = None
for tag in ccache[key]:
if tag.name == category:
dbtag = tag
break
dbtags.append(dbtag)
if None not in dbtags:
vals = []
for tag in dbtags:
tval = ('<a title="Browse books by {3}: {0}"'
' href="{1}" class="details_category_link">{2}</a>')
href='%s/browse/matches/%s/%s' % \
(self.opts.url_prefix, quote(tag.category), quote(str(tag.id)))
vals.append(tval.format(xml(tag.name, True),
xml(href, True),
xml(val if len(dbtags) == 1 else tag.name),
xml(key, True)))
join = ' & ' if key == 'authors' or \
(fm['is_custom'] and
fm['display'].get('is_names', False)) \
else ', '
args[key] = join.join(vals)
added_key = True
if not added_key:
args[key] = xml(val, True)
else:
args[key] = xml(val, True)
fname = quote(ascii_filename(ftitle) + ' - ' +
ascii_filename(fauthors))
return args, fmt, fmts, fname
@Endpoint(mimetype='application/json; charset=utf-8')
def browse_booklist_page(self, ids=None, sort=None):
if sort == 'null':
sort = None
if ids is None:
ids = json.dumps('[]')
try:
ids = json.loads(ids)
except:
raise cherrypy.HTTPError(404, 'invalid ids')
summs = []
for id_ in ids:
try:
id_ = int(id_)
mi = self.db.get_metadata(id_, index_is_id=True)
except:
continue
args, fmt, fmts, fname = self.browse_get_book_args(mi, id_)
args['other_formats'] = ''
args['fmt'] = fmt
if fmts and fmt:
other_fmts = [x for x in fmts if x.lower() != fmt.lower()]
if other_fmts:
ofmts = [u'<a href="{4}/get/{0}/{1}_{2}.{0}" title="{3}">{3}</a>'
.format(f, fname, id_, f.upper(),
self.opts.url_prefix) for f in
other_fmts]
ofmts = ', '.join(ofmts)
args['other_formats'] = u'<strong>%s: </strong>' % \
_('Other formats') + ofmts
args['details_href'] = self.opts.url_prefix + '/browse/details/'+str(id_)
if fmt:
href = self.opts.url_prefix + '/get/%s/%s_%d.%s'%(
fmt, fname, id_, fmt)
rt = xml(_('Read %(title)s in the %(fmt)s format')%
{'title':args['title'], 'fmt':fmt.upper()}, True)
args['get_button'] = \
'<a href="%s" class="read" title="%s">%s</a>' % \
(xml(href, True), rt, xml(_('Get')))
args['get_url'] = xml(href, True)
else:
args['get_button'] = ''
args['get_url'] = 'javascript:alert(\'%s\')' % xml(_(
'This book has no available formats to view'), True)
args['comments'] = comments_to_html(mi.comments)
args['stars'] = ''
if mi.rating:
args['stars'] = render_rating(mi.rating/2.0,
self.opts.url_prefix, prefix=_('Rating'))[0]
if args['tags']:
args['tags'] = u'<strong>%s: </strong>'%xml(_('Tags')) + \
args['tags']
if args['series']:
args['series'] = args['series']
args['details'] = xml(_('Details'), True)
args['details_tt'] = xml(_('Show book details'), True)
args['permalink'] = xml(_('Permalink'), True)
args['permalink_tt'] = xml(_('A permanent link to this book'), True)
summs.append(self.browse_summary_template.format(**args))
raw = json.dumps('\n'.join(summs), ensure_ascii=True)
return raw
def browse_render_details(self, id_, add_random_button=False, add_title=False):
try:
mi = self.db.get_metadata(id_, index_is_id=True)
except:
return _('This book has been deleted')
else:
args, fmt, fmts, fname = self.browse_get_book_args(mi, id_,
add_category_links=True)
args['fmt'] = fmt
if fmt:
args['get_url'] = xml(self.opts.url_prefix + '/get/%s/%s_%d.%s'%(
fmt, fname, id_, fmt), True)
else:
args['get_url'] = 'javascript:alert(\'%s\')' % xml(_(
'This book has no available formats to view'), True)
args['formats'] = ''
if fmts:
ofmts = [u'<a href="{4}/get/{0}/{1}_{2}.{0}" title="{3}">{3}</a>'
.format(xfmt, fname, id_, xfmt.upper(),
self.opts.url_prefix) for xfmt in fmts]
ofmts = ', '.join(ofmts)
args['formats'] = ofmts
fields, comments = [], []
displayed_custom_fields = custom_fields_to_display(self.db)
for field, m in list(mi.get_all_standard_metadata(False).items()) + \
list(mi.get_all_user_metadata(False).items()):
if self.db.field_metadata.is_ignorable_field(field) and \
field not in displayed_custom_fields:
continue
if m['datatype'] == 'comments' or field == 'comments' or (
m['datatype'] == 'composite' and
m['display'].get('contains_html', False)):
val = mi.get(field, '')
if val and val.strip():
comments.append((m['name'], comments_to_html(val)))
continue
if field in ('title', 'formats') or not args.get(field, False) \
or not m['name']:
continue
if field == 'identifiers':
urls = urls_from_identifiers(mi.get(field, {}))
links = [u'<a class="details_category_link" target="_new" href="%s" title="%s:%s">%s</a>' % (url, id_typ, id_val, name)
for name, id_typ, id_val, url in urls]
links = u', '.join(links)
if links:
fields.append((field, m['name'], u'<strong>%s: </strong>%s'%(
_('Ids'), links)))
continue
if m['datatype'] == 'rating':
r = u'<strong>%s: </strong>'%xml(m['name']) + \
render_rating(mi.get(field)/2.0, self.opts.url_prefix,
prefix=m['name'])[0]
else:
r = u'<strong>%s: </strong>'%xml(m['name']) + \
args[field]
fields.append((field, m['name'], r))
def fsort(x):
num = {'authors':0, 'series':1, 'tags':2}.get(x[0], 100)
return (num, sort_key(x[-1]))
fields.sort(key=fsort)
if add_title:
fields.insert(0, ('title', 'Title', u'<strong>%s: </strong>%s' % (xml(_('Title')), xml(mi.title))))
fields = [u'<div class="field">{0}</div>'.format(f[-1]) for f in
fields]
fields = u'<div class="fields">%s</div>'%('\n\n'.join(fields))
comments.sort(key=lambda x: x[0].lower())
comments = [(u'<div class="field"><strong>%s: </strong>'
u'<div class="comment">%s</div></div>') % (xml(c[0]),
c[1]) for c in comments]
comments = u'<div class="comments">%s</div>'%('\n\n'.join(comments))
random = ''
if add_random_button:
href = '%s/browse/random?v=%s'%(
self.opts.url_prefix, time.time())
random = '<a href="%s" id="random_button" title="%s">%s</a>' % (
xml(href, True), xml(_('Choose another random book'), True),
xml(_('Another random book')))
return self.browse_details_template.format(
id=id_, title=xml(mi.title, True), fields=fields,
get_url=args['get_url'], fmt=args['fmt'],
formats=args['formats'], comments=comments, random=random)
@Endpoint(mimetype='application/json; charset=utf-8')
def browse_details(self, id=None):
try:
id_ = int(id)
except:
raise cherrypy.HTTPError(404, 'invalid id: %r'%id)
ans = self.browse_render_details(id_)
return json.dumps(ans, ensure_ascii=True)
@Endpoint()
def browse_random(self, *args, **kwargs):
import random
try:
book_id = random.choice(self.search_for_books(''))
except IndexError:
raise cherrypy.HTTPError(404, 'This library has no books')
ans = self.browse_render_details(book_id, add_random_button=True, add_title=True)
return self.browse_template('').format(
title=prepare_string_for_xml(self.db.title(book_id, index_is_id=True)), script='book();', main=ans)
@Endpoint()
def browse_book(self, id=None, category_sort=None):
try:
id_ = int(id)
except:
raise cherrypy.HTTPError(404, 'invalid id: %r'%id)
ans = self.browse_render_details(id_, add_title=True)
return self.browse_template('').format(
title=prepare_string_for_xml(self.db.title(id_, index_is_id=True)), script='book();', main=ans)
# }}}
# Search {{{
@Endpoint(sort_type='list')
def browse_search(self, query='', list_sort=None):
if isbytestring(query):
query = query.decode('UTF-8')
ids = self.search_for_books(query)
items = [self.db.data.tablerow_for_id(x) for x in ids]
sort = self.browse_sort_book_list(items, list_sort)
ids = [x[0] for x in items]
html = render_book_list(ids, self.opts.url_prefix,
suffix=_('in search')+': '+xml(query))
return self.browse_template(sort, category=False, initial_search=query).format(
title=_('Matching books'),
script='search_result();', main=html)
# }}}
| gpl-3.0 | -7,050,573,765,135,154,000 | 40.280488 | 139 | 0.488602 | false |
emschorsch/scrapy | scrapy/settings/default_settings.py | 2 | 7437 | """
This module contains the default values for all settings used by Scrapy.
For more information about these settings you can read the settings
documentation in docs/topics/settings.rst
Scrapy developers, if you add a setting here remember to:
* add it in alphabetical order
* group similar settings without leaving blank lines
* add its documentation to the available settings documentation
(docs/topics/settings.rst)
"""
import sys, os
from os.path import join, abspath, dirname
BOT_NAME = 'scrapybot'
CLOSESPIDER_TIMEOUT = 0
CLOSESPIDER_PAGECOUNT = 0
CLOSESPIDER_ITEMCOUNT = 0
CLOSESPIDER_ERRORCOUNT = 0
COMMANDS_MODULE = ''
CONCURRENT_ITEMS = 100
CONCURRENT_REQUESTS = 16
CONCURRENT_REQUESTS_PER_DOMAIN = 8
CONCURRENT_REQUESTS_PER_IP = 0
COOKIES_ENABLED = True
COOKIES_DEBUG = False
DEFAULT_ITEM_CLASS = 'scrapy.item.Item'
DEFAULT_REQUEST_HEADERS = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en',
}
DEPTH_LIMIT = 0
DEPTH_STATS = True
DEPTH_PRIORITY = 0
DNSCACHE_ENABLED = True
DOWNLOAD_DELAY = 0
DOWNLOAD_HANDLERS = {}
DOWNLOAD_HANDLERS_BASE = {
'file': 'scrapy.core.downloader.handlers.file.FileDownloadHandler',
'http': 'scrapy.core.downloader.handlers.http.HttpDownloadHandler',
'https': 'scrapy.core.downloader.handlers.http.HttpDownloadHandler',
's3': 'scrapy.core.downloader.handlers.s3.S3DownloadHandler',
}
DOWNLOAD_TIMEOUT = 180 # 3mins
DOWNLOADER_DEBUG = False
DOWNLOADER_HTTPCLIENTFACTORY = 'scrapy.core.downloader.webclient.ScrapyHTTPClientFactory'
DOWNLOADER_CLIENTCONTEXTFACTORY = 'scrapy.core.downloader.webclient.ScrapyClientContextFactory'
DOWNLOADER_MIDDLEWARES = {}
DOWNLOADER_MIDDLEWARES_BASE = {
# Engine side
'scrapy.contrib.downloadermiddleware.robotstxt.RobotsTxtMiddleware': 100,
'scrapy.contrib.downloadermiddleware.httpauth.HttpAuthMiddleware': 300,
'scrapy.contrib.downloadermiddleware.downloadtimeout.DownloadTimeoutMiddleware': 350,
'scrapy.contrib.downloadermiddleware.useragent.UserAgentMiddleware': 400,
'scrapy.contrib.downloadermiddleware.retry.RetryMiddleware': 500,
'scrapy.contrib.downloadermiddleware.defaultheaders.DefaultHeadersMiddleware': 550,
'scrapy.contrib.downloadermiddleware.redirect.RedirectMiddleware': 600,
'scrapy.contrib.downloadermiddleware.cookies.CookiesMiddleware': 700,
'scrapy.contrib.downloadermiddleware.httpproxy.HttpProxyMiddleware': 750,
'scrapy.contrib.downloadermiddleware.httpcompression.HttpCompressionMiddleware': 800,
'scrapy.contrib.downloadermiddleware.chunked.ChunkedTransferMiddleware': 830,
'scrapy.contrib.downloadermiddleware.stats.DownloaderStats': 850,
'scrapy.contrib.downloadermiddleware.httpcache.HttpCacheMiddleware': 900,
# Downloader side
}
DOWNLOADER_STATS = True
DUPEFILTER_CLASS = 'scrapy.dupefilter.RFPDupeFilter'
try:
EDITOR = os.environ['EDITOR']
except KeyError:
if sys.platform == 'win32':
EDITOR = '%s -m idlelib.idle'
else:
EDITOR = 'vi'
EXTENSIONS = {}
EXTENSIONS_BASE = {
'scrapy.contrib.corestats.CoreStats': 0,
'scrapy.webservice.WebService': 0,
'scrapy.telnet.TelnetConsole': 0,
'scrapy.contrib.memusage.MemoryUsage': 0,
'scrapy.contrib.memdebug.MemoryDebugger': 0,
'scrapy.contrib.closespider.CloseSpider': 0,
'scrapy.contrib.feedexport.FeedExporter': 0,
'scrapy.contrib.logstats.LogStats': 0,
'scrapy.contrib.spiderstate.SpiderState': 0,
'scrapy.contrib.throttle.AutoThrottle': 0,
}
FEED_URI = None
FEED_URI_PARAMS = None # a function to extend uri arguments
FEED_FORMAT = 'jsonlines'
FEED_STORE_EMPTY = False
FEED_STORAGES = {}
FEED_STORAGES_BASE = {
'': 'scrapy.contrib.feedexport.FileFeedStorage',
'file': 'scrapy.contrib.feedexport.FileFeedStorage',
'stdout': 'scrapy.contrib.feedexport.StdoutFeedStorage',
's3': 'scrapy.contrib.feedexport.S3FeedStorage',
'ftp': 'scrapy.contrib.feedexport.FTPFeedStorage',
}
FEED_EXPORTERS = {}
FEED_EXPORTERS_BASE = {
'json': 'scrapy.contrib.exporter.JsonItemExporter',
'jsonlines': 'scrapy.contrib.exporter.JsonLinesItemExporter',
'csv': 'scrapy.contrib.exporter.CsvItemExporter',
'xml': 'scrapy.contrib.exporter.XmlItemExporter',
'marshal': 'scrapy.contrib.exporter.MarshalItemExporter',
'pickle': 'scrapy.contrib.exporter.PickleItemExporter',
}
HTTPCACHE_ENABLED = False
HTTPCACHE_DIR = 'httpcache'
HTTPCACHE_IGNORE_MISSING = False
HTTPCACHE_STORAGE = 'scrapy.contrib.httpcache.DbmCacheStorage'
HTTPCACHE_EXPIRATION_SECS = 0
HTTPCACHE_IGNORE_HTTP_CODES = []
HTTPCACHE_IGNORE_SCHEMES = ['file']
HTTPCACHE_DBM_MODULE = 'anydbm'
ITEM_PROCESSOR = 'scrapy.contrib.pipeline.ItemPipelineManager'
# Item pipelines are typically set in specific commands settings
ITEM_PIPELINES = []
LOG_ENABLED = True
LOG_ENCODING = 'utf-8'
LOG_FORMATTER = 'scrapy.logformatter.LogFormatter'
LOG_STDOUT = False
LOG_LEVEL = 'DEBUG'
LOG_FILE = None
LOG_UNSERIALIZABLE_REQUESTS = False
LOGSTATS_INTERVAL = 60.0
MAIL_DEBUG = False
MAIL_HOST = 'localhost'
MAIL_PORT = 25
MAIL_FROM = 'scrapy@localhost'
MAIL_PASS = None
MAIL_USER = None
MEMDEBUG_ENABLED = False # enable memory debugging
MEMDEBUG_NOTIFY = [] # send memory debugging report by mail at engine shutdown
MEMUSAGE_ENABLED = False
MEMUSAGE_LIMIT_MB = 0
MEMUSAGE_NOTIFY_MAIL = []
MEMUSAGE_REPORT = False
MEMUSAGE_WARNING_MB = 0
NEWSPIDER_MODULE = ''
RANDOMIZE_DOWNLOAD_DELAY = True
REDIRECT_ENABLED = True
REDIRECT_MAX_METAREFRESH_DELAY = 100
REDIRECT_MAX_TIMES = 20 # uses Firefox default setting
REDIRECT_PRIORITY_ADJUST = +2
REFERER_ENABLED = True
RETRY_ENABLED = True
RETRY_TIMES = 2 # initial response + 2 retries = 3 requests
RETRY_HTTP_CODES = [500, 503, 504, 400, 408]
RETRY_PRIORITY_ADJUST = -1
ROBOTSTXT_OBEY = False
SCHEDULER = 'scrapy.core.scheduler.Scheduler'
SCHEDULER_DISK_QUEUE = 'scrapy.squeue.PickleLifoDiskQueue'
SCHEDULER_MEMORY_QUEUE = 'scrapy.squeue.LifoMemoryQueue'
SPIDER_MANAGER_CLASS = 'scrapy.spidermanager.SpiderManager'
SPIDER_MIDDLEWARES = {}
SPIDER_MIDDLEWARES_BASE = {
# Engine side
'scrapy.contrib.spidermiddleware.httperror.HttpErrorMiddleware': 50,
'scrapy.contrib.spidermiddleware.offsite.OffsiteMiddleware': 500,
'scrapy.contrib.spidermiddleware.referer.RefererMiddleware': 700,
'scrapy.contrib.spidermiddleware.urllength.UrlLengthMiddleware': 800,
'scrapy.contrib.spidermiddleware.depth.DepthMiddleware': 900,
# Spider side
}
SPIDER_MODULES = []
STATS_CLASS = 'scrapy.statscol.MemoryStatsCollector'
STATS_DUMP = True
STATSMAILER_RCPTS = []
TEMPLATES_DIR = abspath(join(dirname(__file__), '..', 'templates'))
URLLENGTH_LIMIT = 2083
USER_AGENT = 'Scrapy/%s (+http://scrapy.org)' % __import__('scrapy').__version__
TELNETCONSOLE_ENABLED = 1
TELNETCONSOLE_PORT = [6023, 6073]
TELNETCONSOLE_HOST = '0.0.0.0'
WEBSERVICE_ENABLED = True
WEBSERVICE_LOGFILE = None
WEBSERVICE_PORT = [6080, 7030]
WEBSERVICE_HOST = '0.0.0.0'
WEBSERVICE_RESOURCES = {}
WEBSERVICE_RESOURCES_BASE = {
'scrapy.contrib.webservice.crawler.CrawlerResource': 1,
'scrapy.contrib.webservice.enginestatus.EngineStatusResource': 1,
'scrapy.contrib.webservice.stats.StatsResource': 1,
}
SPIDER_CONTRACTS = {}
SPIDER_CONTRACTS_BASE = {
'scrapy.contracts.default.UrlContract' : 1,
'scrapy.contracts.default.ReturnsContract': 2,
'scrapy.contracts.default.ScrapesContract': 3,
}
| bsd-3-clause | -2,283,552,007,354,715,000 | 29.231707 | 95 | 0.751244 | false |
C00kiie/Youtube-Mp3-telegram-bot | youtube_dl/extractor/crackle.py | 23 | 4916 | # coding: utf-8
from __future__ import unicode_literals, division
from .common import InfoExtractor
from ..utils import int_or_none
class CrackleIE(InfoExtractor):
_GEO_COUNTRIES = ['US']
_VALID_URL = r'(?:crackle:|https?://(?:(?:www|m)\.)?crackle\.com/(?:playlist/\d+/|(?:[^/]+/)+))(?P<id>\d+)'
_TEST = {
'url': 'http://www.crackle.com/comedians-in-cars-getting-coffee/2498934',
'info_dict': {
'id': '2498934',
'ext': 'mp4',
'title': 'Everybody Respects A Bloody Nose',
'description': 'Jerry is kaffeeklatsching in L.A. with funnyman J.B. Smoove (Saturday Night Live, Real Husbands of Hollywood). They’re headed for brew at 10 Speed Coffee in a 1964 Studebaker Avanti.',
'thumbnail': r're:^https?://.*\.jpg',
'duration': 906,
'series': 'Comedians In Cars Getting Coffee',
'season_number': 8,
'episode_number': 4,
'subtitles': {
'en-US': [
{'ext': 'vtt'},
{'ext': 'tt'},
]
},
},
'params': {
# m3u8 download
'skip_download': True,
}
}
_THUMBNAIL_RES = [
(120, 90),
(208, 156),
(220, 124),
(220, 220),
(240, 180),
(250, 141),
(315, 236),
(320, 180),
(360, 203),
(400, 300),
(421, 316),
(460, 330),
(460, 460),
(462, 260),
(480, 270),
(587, 330),
(640, 480),
(700, 330),
(700, 394),
(854, 480),
(1024, 1024),
(1920, 1080),
]
# extracted from http://legacyweb-us.crackle.com/flash/ReferrerRedirect.ashx
_MEDIA_FILE_SLOTS = {
'c544.flv': {
'width': 544,
'height': 306,
},
'360p.mp4': {
'width': 640,
'height': 360,
},
'480p.mp4': {
'width': 852,
'height': 478,
},
'480p_1mbps.mp4': {
'width': 852,
'height': 478,
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
config_doc = self._download_xml(
'http://legacyweb-us.crackle.com/flash/QueryReferrer.ashx?site=16',
video_id, 'Downloading config')
item = self._download_xml(
'http://legacyweb-us.crackle.com/app/revamp/vidwallcache.aspx?flags=-1&fm=%s' % video_id,
video_id, headers=self.geo_verification_headers()).find('i')
title = item.attrib['t']
subtitles = {}
formats = self._extract_m3u8_formats(
'http://content.uplynk.com/ext/%s/%s.m3u8' % (config_doc.attrib['strUplynkOwnerId'], video_id),
video_id, 'mp4', m3u8_id='hls', fatal=None)
thumbnails = []
path = item.attrib.get('p')
if path:
for width, height in self._THUMBNAIL_RES:
res = '%dx%d' % (width, height)
thumbnails.append({
'id': res,
'url': 'http://images-us-am.crackle.com/%stnl_%s.jpg' % (path, res),
'width': width,
'height': height,
'resolution': res,
})
http_base_url = 'http://ahttp.crackle.com/' + path
for mfs_path, mfs_info in self._MEDIA_FILE_SLOTS.items():
formats.append({
'url': http_base_url + mfs_path,
'format_id': 'http-' + mfs_path.split('.')[0],
'width': mfs_info['width'],
'height': mfs_info['height'],
})
for cc in item.findall('cc'):
locale = cc.attrib.get('l')
v = cc.attrib.get('v')
if locale and v:
if locale not in subtitles:
subtitles[locale] = []
for url_ext, ext in (('vtt', 'vtt'), ('xml', 'tt')):
subtitles.setdefault(locale, []).append({
'url': '%s/%s%s_%s.%s' % (config_doc.attrib['strSubtitleServer'], path, locale, v, url_ext),
'ext': ext,
})
self._sort_formats(formats, ('width', 'height', 'tbr', 'format_id'))
return {
'id': video_id,
'title': title,
'description': item.attrib.get('d'),
'duration': int(item.attrib.get('r'), 16) / 1000 if item.attrib.get('r') else None,
'series': item.attrib.get('sn'),
'season_number': int_or_none(item.attrib.get('se')),
'episode_number': int_or_none(item.attrib.get('ep')),
'thumbnails': thumbnails,
'subtitles': subtitles,
'formats': formats,
}
| mit | 6,306,601,350,509,899,000 | 33.851064 | 212 | 0.452584 | false |
PFWhite/qipr_approver | qipr_approver/approver/workflows/user_crud.py | 1 | 6581 | from approver.models import Person, Speciality, Expertise, QI_Interest, Suffix, Address, Organization, ClinicalArea, Self_Classification
from approver.constants import SESSION_VARS, ADDRESS_TYPE
from approver.utils import extract_tags, update_tags, true_false_to_bool, extract_model
from django.contrib.auth.models import User
from django.utils import timezone
from approver import utils
from approver.workflows.contact_person import add_contact_for_person
def create_new_user_from_current_session(session):
"""
This function creates a user from the session after shib validates
"""
now = timezone.now()
new_user = User(username=session.get(SESSION_VARS['gatorlink']),
email=session.get(SESSION_VARS['email']),
last_login=now,
last_name=session.get(SESSION_VARS['last_name']),
first_name=session.get(SESSION_VARS['first_name']))
new_user.save()
new_person = Person(user=new_user,
gatorlink=new_user.username,
first_name=new_user.first_name,
last_name=new_user.last_name,
email_address=new_user.email,
last_login_time=now,
account_expiration_time=utils.get_account_expiration_date(now))
new_person.save(last_modified_by=new_user)
return new_user
def check_changed_contact(person, form):
new_email_address = form.get('email')
new_first_name = form.get('first_name')
new_last_name = form.get('last_name')
if new_email_address != person.email_address:
return True
if new_first_name != person.first_name:
return True
if new_last_name != person.last_name:
return True
return False
def update_user_from_about_you_form(person, about_you_form, editing_user):
"""
This function changes an existing (user,person) entry
based on the information in the about_you_form.
This will not work if the (user,person) does not yet
exist.
"""
now = timezone.now()
changed_contact = check_changed_contact(person, about_you_form)
person.business_phone = about_you_form.get('business_phone') or None
person.contact_phone = about_you_form.get('contact_phone') or None
person.email_address = about_you_form.get('email')
person.first_name = about_you_form.get('first_name')
person.last_name = about_you_form.get('last_name')
person.webpage_url = about_you_form.get('webpage_url')
person.title = about_you_form.get('title')
person.department = about_you_form.get('department')
person.qi_required = about_you_form.get('qi_required')
person.training = about_you_form.get('training_program')
person.self_classification = extract_model(Self_Classification, "name", about_you_form.get('select-self_classification') or '')
if (about_you_form.get('select-self_classification') == 'other'):
person.other_self_classification = about_you_form.get('other_classification')
else:
person.other_self_classification = None
clinical_area = extract_tags(about_you_form, 'clinical_area')
expertises = extract_tags(about_you_form, 'expertise')
qi_interest = extract_tags(about_you_form, 'qi_interest')
specialities = extract_tags(about_you_form, 'speciality')
suffixes = extract_tags(about_you_form, 'suffix')
person = update_tags(model=person,
tag_property='expertise',
tags=expertises,
tag_model=Expertise,
tagging_user=editing_user)
person = update_tags(model=person,
tag_property='qi_interest',
tags=qi_interest,
tag_model=QI_Interest,
tagging_user=editing_user)
person = update_tags(model=person,
tag_property='speciality',
tags=specialities,
tag_model=Speciality,
tagging_user=editing_user)
person = update_tags(model=person,
tag_property='suffix',
tags=suffixes,
tag_model=Suffix,
tagging_user=editing_user)
person = update_tags(model=person,
tag_property='clinical_area',
tags=clinical_area,
tag_model=ClinicalArea,
tagging_user=editing_user)
save_address_from_form(about_you_form, editing_user, ADDRESS_TYPE['business'], person)
person.save(last_modified_by=editing_user)
if changed_contact:
add_contact_for_person(person, editing_user)
return person
def save_address_from_form(form, user, address_type, person=None, organization=None):
"""
This function will take a form and save address data found in it.
This function uses the following values:
* form: the form from the address.html template
* user: the current user
* address_type: the type of address found in constants.ADDRESS_TYPE
* person: the person who is assigned this address
* organiztion: the organiztion which is assigned this address
"""
address1_list = form.getlist('address1_' + address_type)
address2_list = form.getlist('address2_' + address_type)
city_list = form.getlist('city_' + address_type)
state_list = form.getlist('state_' + address_type)
zip_code_list = form.getlist('zip_code_' + address_type)
country_list = form.getlist('country_' + address_type)
address_id_list = form.getlist('address_id_' + address_type)
zipped_address_values = zip(
address1_list,
address2_list,
city_list,
state_list,
zip_code_list,
country_list,
address_id_list
)
__save_each_address_tuple(zipped_address_values, user, person, organization)
def __save_each_address_tuple(address_values, user, person=None, organization=None):
"""
This function will save the addresses generated in the save_address_from_list function
"""
ADDRESS1 = 0
ADDRESS2 = 1
CITY = 2
STATE = 3
ZIP_CODE = 4
COUNTRY = 5
ADDRESS_ID = 6
for values in address_values:
address=Address.objects.get(id=values[ADDRESS_ID]) if values[ADDRESS_ID] else Address()
address.address1=values[ADDRESS1]
address.address2=values[ADDRESS2]
address.city=values[CITY]
address.state=values[STATE]
address.zip_code=values[ZIP_CODE]
address.country=values[COUNTRY]
address.person=person
address.organization=organization
address.save(user)
| apache-2.0 | 378,675,038,706,688,800 | 37.261628 | 136 | 0.645647 | false |
alexvanboxel/airflow | airflow/hooks/S3_hook.py | 10 | 17040 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
from future import standard_library
standard_library.install_aliases()
import logging
import re
import fnmatch
import configparser
import math
import os
from urllib.parse import urlparse
import warnings
import boto
from boto.s3.connection import S3Connection
from boto.sts import STSConnection
boto.set_stream_logger('boto')
logging.getLogger("boto").setLevel(logging.INFO)
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
def _parse_s3_config(config_file_name, config_format='boto', profile=None):
"""
Parses a config file for s3 credentials. Can currently
parse boto, s3cmd.conf and AWS SDK config formats
:param config_file_name: path to the config file
:type config_file_name: str
:param config_format: config type. One of "boto", "s3cmd" or "aws".
Defaults to "boto"
:type config_format: str
:param profile: profile name in AWS type config file
:type profile: str
"""
Config = configparser.ConfigParser()
if Config.read(config_file_name): # pragma: no cover
sections = Config.sections()
else:
raise AirflowException("Couldn't read {0}".format(config_file_name))
# Setting option names depending on file format
if config_format is None:
config_format = 'boto'
conf_format = config_format.lower()
if conf_format == 'boto': # pragma: no cover
if profile is not None and 'profile ' + profile in sections:
cred_section = 'profile ' + profile
else:
cred_section = 'Credentials'
elif conf_format == 'aws' and profile is not None:
cred_section = profile
else:
cred_section = 'default'
# Option names
if conf_format in ('boto', 'aws'): # pragma: no cover
key_id_option = 'aws_access_key_id'
secret_key_option = 'aws_secret_access_key'
# security_token_option = 'aws_security_token'
else:
key_id_option = 'access_key'
secret_key_option = 'secret_key'
# Actual Parsing
if cred_section not in sections:
raise AirflowException("This config file format is not recognized")
else:
try:
access_key = Config.get(cred_section, key_id_option)
secret_key = Config.get(cred_section, secret_key_option)
calling_format = None
if Config.has_option(cred_section, 'calling_format'):
calling_format = Config.get(cred_section, 'calling_format')
except:
logging.warning("Option Error in parsing s3 config file")
raise
return (access_key, secret_key, calling_format)
class S3Hook(BaseHook):
"""
Interact with S3. This class is a wrapper around the boto library.
"""
def __init__(
self,
s3_conn_id='s3_default'):
self.s3_conn_id = s3_conn_id
self.s3_conn = self.get_connection(s3_conn_id)
self.extra_params = self.s3_conn.extra_dejson
self.profile = self.extra_params.get('profile')
self.calling_format = None
self._creds_in_conn = 'aws_secret_access_key' in self.extra_params
self._creds_in_config_file = 's3_config_file' in self.extra_params
self._default_to_boto = False
if self._creds_in_conn:
self._a_key = self.extra_params['aws_access_key_id']
self._s_key = self.extra_params['aws_secret_access_key']
if 'calling_format' in self.extra_params:
self.calling_format = self.extra_params['calling_format']
elif self._creds_in_config_file:
self.s3_config_file = self.extra_params['s3_config_file']
# The format can be None and will default to boto in the parser
self.s3_config_format = self.extra_params.get('s3_config_format')
else:
self._default_to_boto = True
# STS support for cross account resource access
self._sts_conn_required = ('aws_account_id' in self.extra_params or
'role_arn' in self.extra_params)
if self._sts_conn_required:
self.role_arn = (self.extra_params.get('role_arn') or
"arn:aws:iam::" +
self.extra_params['aws_account_id'] +
":role/" +
self.extra_params['aws_iam_role'])
self.connection = self.get_conn()
def __getstate__(self):
pickled_dict = dict(self.__dict__)
del pickled_dict['connection']
return pickled_dict
def __setstate__(self, d):
self.__dict__.update(d)
self.__dict__['connection'] = self.get_conn()
def _parse_s3_url(self, s3url):
warnings.warn(
'Please note: S3Hook._parse_s3_url() is now '
'S3Hook.parse_s3_url() (no leading underscore).',
DeprecationWarning)
return self.parse_s3_url(s3url)
@staticmethod
def parse_s3_url(s3url):
parsed_url = urlparse(s3url)
if not parsed_url.netloc:
raise AirflowException('Please provide a bucket_name')
else:
bucket_name = parsed_url.netloc
key = parsed_url.path.strip('/')
return (bucket_name, key)
def get_conn(self):
"""
Returns the boto S3Connection object.
"""
if self._default_to_boto:
return S3Connection(profile_name=self.profile)
a_key = s_key = None
if self._creds_in_config_file:
a_key, s_key, calling_format = _parse_s3_config(self.s3_config_file,
self.s3_config_format,
self.profile)
elif self._creds_in_conn:
a_key = self._a_key
s_key = self._s_key
calling_format = self.calling_format
if calling_format is None:
calling_format = 'boto.s3.connection.SubdomainCallingFormat'
if self._sts_conn_required:
sts_connection = STSConnection(aws_access_key_id=a_key,
aws_secret_access_key=s_key,
profile_name=self.profile)
assumed_role_object = sts_connection.assume_role(
role_arn=self.role_arn,
role_session_name="Airflow_" + self.s3_conn_id
)
creds = assumed_role_object.credentials
connection = S3Connection(
aws_access_key_id=creds.access_key,
aws_secret_access_key=creds.secret_key,
calling_format=calling_format,
security_token=creds.session_token
)
else:
connection = S3Connection(aws_access_key_id=a_key,
aws_secret_access_key=s_key,
calling_format=calling_format,
profile_name=self.profile)
return connection
def get_credentials(self):
if self._creds_in_config_file:
a_key, s_key, calling_format = _parse_s3_config(self.s3_config_file,
self.s3_config_format,
self.profile)
elif self._creds_in_conn:
a_key = self._a_key
s_key = self._s_key
return a_key, s_key
def check_for_bucket(self, bucket_name):
"""
Check if bucket_name exists.
:param bucket_name: the name of the bucket
:type bucket_name: str
"""
return self.connection.lookup(bucket_name) is not None
def get_bucket(self, bucket_name):
"""
Returns a boto.s3.bucket.Bucket object
:param bucket_name: the name of the bucket
:type bucket_name: str
"""
return self.connection.get_bucket(bucket_name)
def list_keys(self, bucket_name, prefix='', delimiter=''):
"""
Lists keys in a bucket under prefix and not containing delimiter
:param bucket_name: the name of the bucket
:type bucket_name: str
:param prefix: a key prefix
:type prefix: str
:param delimiter: the delimiter marks key hierarchy.
:type delimiter: str
"""
b = self.get_bucket(bucket_name)
keylist = list(b.list(prefix=prefix, delimiter=delimiter))
return [k.name for k in keylist] if keylist != [] else None
def list_prefixes(self, bucket_name, prefix='', delimiter=''):
"""
Lists prefixes in a bucket under prefix
:param bucket_name: the name of the bucket
:type bucket_name: str
:param prefix: a key prefix
:type prefix: str
:param delimiter: the delimiter marks key hierarchy.
:type delimiter: str
"""
b = self.get_bucket(bucket_name)
plist = b.list(prefix=prefix, delimiter=delimiter)
prefix_names = [p.name for p in plist
if isinstance(p, boto.s3.prefix.Prefix)]
return prefix_names if prefix_names != [] else None
def check_for_key(self, key, bucket_name=None):
"""
Checks that a key exists in a bucket
"""
if not bucket_name:
(bucket_name, key) = self.parse_s3_url(key)
bucket = self.get_bucket(bucket_name)
return bucket.get_key(key) is not None
def get_key(self, key, bucket_name=None):
"""
Returns a boto.s3.key.Key object
:param key: the path to the key
:type key: str
:param bucket_name: the name of the bucket
:type bucket_name: str
"""
if not bucket_name:
(bucket_name, key) = self.parse_s3_url(key)
bucket = self.get_bucket(bucket_name)
return bucket.get_key(key)
def check_for_wildcard_key(self,
wildcard_key, bucket_name=None, delimiter=''):
"""
Checks that a key matching a wildcard expression exists in a bucket
"""
return self.get_wildcard_key(wildcard_key=wildcard_key,
bucket_name=bucket_name,
delimiter=delimiter) is not None
def get_wildcard_key(self, wildcard_key, bucket_name=None, delimiter=''):
"""
Returns a boto.s3.key.Key object matching the regular expression
:param regex_key: the path to the key
:type regex_key: str
:param bucket_name: the name of the bucket
:type bucket_name: str
"""
if not bucket_name:
(bucket_name, wildcard_key) = self.parse_s3_url(wildcard_key)
bucket = self.get_bucket(bucket_name)
prefix = re.split(r'[*]', wildcard_key, 1)[0]
klist = self.list_keys(bucket_name, prefix=prefix, delimiter=delimiter)
if not klist:
return None
key_matches = [k for k in klist if fnmatch.fnmatch(k, wildcard_key)]
return bucket.get_key(key_matches[0]) if key_matches else None
def check_for_prefix(self, bucket_name, prefix, delimiter):
"""
Checks that a prefix exists in a bucket
"""
prefix = prefix + delimiter if prefix[-1] != delimiter else prefix
prefix_split = re.split(r'(\w+[{d}])$'.format(d=delimiter), prefix, 1)
previous_level = prefix_split[0]
plist = self.list_prefixes(bucket_name, previous_level, delimiter)
return False if plist is None else prefix in plist
def load_file(
self,
filename,
key,
bucket_name=None,
replace=False,
multipart_bytes=5 * (1024 ** 3),
encrypt=False):
"""
Loads a local file to S3
:param filename: name of the file to load.
:type filename: str
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which to store the file
:type bucket_name: str
:param replace: A flag to decide whether or not to overwrite the key
if it already exists. If replace is False and the key exists, an
error will be raised.
:type replace: bool
:param multipart_bytes: If provided, the file is uploaded in parts of
this size (minimum 5242880). The default value is 5GB, since S3
cannot accept non-multipart uploads for files larger than 5GB. If
the file is smaller than the specified limit, the option will be
ignored.
:type multipart_bytes: int
:param encrypt: If True, the file will be encrypted on the server-side
by S3 and will be stored in an encrypted form while at rest in S3.
:type encrypt: bool
"""
if not bucket_name:
(bucket_name, key) = self.parse_s3_url(key)
bucket = self.get_bucket(bucket_name)
key_obj = bucket.get_key(key)
if not replace and key_obj:
raise ValueError("The key {key} already exists.".format(
**locals()))
key_size = os.path.getsize(filename)
if multipart_bytes and key_size >= multipart_bytes:
# multipart upload
from filechunkio import FileChunkIO
mp = bucket.initiate_multipart_upload(key_name=key,
encrypt_key=encrypt)
total_chunks = int(math.ceil(key_size / multipart_bytes))
sent_bytes = 0
try:
for chunk in range(total_chunks):
offset = chunk * multipart_bytes
bytes = min(multipart_bytes, key_size - offset)
with FileChunkIO(
filename, 'r', offset=offset, bytes=bytes) as fp:
logging.info('Sending chunk {c} of {tc}...'.format(
c=chunk + 1, tc=total_chunks))
mp.upload_part_from_file(fp, part_num=chunk + 1)
except:
mp.cancel_upload()
raise
mp.complete_upload()
else:
# regular upload
if not key_obj:
key_obj = bucket.new_key(key_name=key)
key_size = key_obj.set_contents_from_filename(filename,
replace=replace,
encrypt_key=encrypt)
logging.info("The key {key} now contains"
" {key_size} bytes".format(**locals()))
def load_string(self, string_data,
key, bucket_name=None,
replace=False,
encrypt=False):
"""
Loads a local file to S3
This is provided as a convenience to drop a file in S3. It uses the
boto infrastructure to ship a file to s3. It is currently using only
a single part download, and should not be used to move large files.
:param string_data: string to set as content for the key.
:type string_data: str
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which to store the file
:type bucket_name: str
:param replace: A flag to decide whether or not to overwrite the key
if it already exists
:type replace: bool
:param encrypt: If True, the file will be encrypted on the server-side
by S3 and will be stored in an encrypted form while at rest in S3.
:type encrypt: bool
"""
if not bucket_name:
(bucket_name, key) = self.parse_s3_url(key)
bucket = self.get_bucket(bucket_name)
key_obj = bucket.get_key(key)
if not replace and key_obj:
raise ValueError("The key {key} already exists.".format(
**locals()))
if not key_obj:
key_obj = bucket.new_key(key_name=key)
key_size = key_obj.set_contents_from_string(string_data,
replace=replace,
encrypt_key=encrypt)
logging.info("The key {key} now contains"
" {key_size} bytes".format(**locals()))
| apache-2.0 | -5,699,005,850,409,829,000 | 39.094118 | 82 | 0.569073 | false |
mlperf/training_results_v0.6 | Fujitsu/benchmarks/resnet/implementations/mxnet/python/mxnet/context.py | 4 | 9068 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
"""Context management API of mxnet."""
from __future__ import absolute_import
import threading
import warnings
import ctypes
from .base import classproperty, with_metaclass, _MXClassPropertyMetaClass
from .base import _LIB
from .base import check_call
class Context(with_metaclass(_MXClassPropertyMetaClass, object)):
"""Constructs a context.
MXNet can run operations on CPU and different GPUs.
A context describes the device type and ID on which computation should be carried on.
One can use mx.cpu and mx.gpu for short.
See also
----------
`How to run MXNet on multiple CPU/GPUs <http://mxnet.io/faq/multi_devices.html>`
for more details.
Parameters
----------
device_type : {'cpu', 'gpu'} or Context.
String representing the device type.
device_id : int (default=0)
The device id of the device, needed for GPU.
Note
----
Context can also be used as a way to change the default context.
Examples
--------
>>> # array on cpu
>>> cpu_array = mx.nd.ones((2, 3))
>>> # switch default context to GPU(2)
>>> with mx.Context(mx.gpu(2)):
... gpu_array = mx.nd.ones((2, 3))
>>> gpu_array.context
gpu(2)
One can also explicitly specify the context when creating an array.
>>> gpu_array = mx.nd.ones((2, 3), mx.gpu(1))
>>> gpu_array.context
gpu(1)
"""
# static class variable
_default_ctx = threading.local()
devtype2str = {1: 'cpu', 2: 'gpu', 3: 'cpu_pinned', 5: 'cpu_shared'}
devstr2type = {'cpu': 1, 'gpu': 2, 'cpu_pinned': 3, 'cpu_shared': 5}
def __init__(self, device_type, device_id=0):
if isinstance(device_type, Context):
self.device_typeid = device_type.device_typeid
self.device_id = device_type.device_id
else:
self.device_typeid = Context.devstr2type[device_type]
self.device_id = device_id
self._old_ctx = None
@property
def device_type(self):
"""Returns the device type of current context.
Examples
-------
>>> mx.context.current_context().device_type
'cpu'
>>> mx.current_context().device_type
'cpu'
Returns
-------
device_type : str
"""
return Context.devtype2str[self.device_typeid]
def __hash__(self):
"""Compute hash value of context for dictionary lookup"""
return hash((self.device_typeid, self.device_id))
def __eq__(self, other):
"""Compares two contexts. Two contexts are equal if they
have the same device type and device id.
"""
return isinstance(other, Context) and \
self.device_typeid == other.device_typeid and \
self.device_id == other.device_id
def __str__(self):
return '%s(%d)' % (self.device_type, self.device_id)
def __repr__(self):
return self.__str__()
def __enter__(self):
if not hasattr(Context._default_ctx, "value"):
Context._default_ctx.value = Context('cpu', 0)
self._old_ctx = Context._default_ctx.value
Context._default_ctx.value = self
return self
def __exit__(self, ptype, value, trace):
Context._default_ctx.value = self._old_ctx
#pylint: disable=no-self-argument
@classproperty
def default_ctx(cls):
warnings.warn("Context.default_ctx has been deprecated. "
"Please use Context.current_context() instead. "
"Please use test_utils.set_default_context to set a default context",
DeprecationWarning)
if not hasattr(Context._default_ctx, "value"):
cls._default_ctx.value = Context('cpu', 0)
return cls._default_ctx.value
@default_ctx.setter
def default_ctx(cls, val):
warnings.warn("Context.default_ctx has been deprecated. "
"Please use Context.current_context() instead. "
"Please use test_utils.set_default_context to set a default context",
DeprecationWarning)
cls._default_ctx.value = val
#pylint: enable=no-self-argument
# initialize the default context in Context
Context._default_ctx.value = Context('cpu', 0)
def cpu(device_id=0):
"""Returns a CPU context.
This function is a short cut for ``Context('cpu', device_id)``.
For most operations, when no context is specified, the default context is `cpu()`.
Examples
----------
>>> with mx.cpu():
... cpu_array = mx.nd.ones((2, 3))
>>> cpu_array.context
cpu(0)
>>> cpu_array = mx.nd.ones((2, 3), ctx=mx.cpu())
>>> cpu_array.context
cpu(0)
Parameters
----------
device_id : int, optional
The device id of the device. `device_id` is not needed for CPU.
This is included to make interface compatible with GPU.
Returns
-------
context : Context
The corresponding CPU context.
"""
return Context('cpu', device_id)
def cpu_pinned(device_id=0):
"""Returns a CPU pinned memory context. Copying from CPU pinned memory to GPU
is faster than from normal CPU memory.
This function is a short cut for ``Context('cpu_pinned', device_id)``.
Examples
----------
>>> with mx.cpu_pinned():
... cpu_array = mx.nd.ones((2, 3))
>>> cpu_array.context
cpu_pinned(0)
>>> cpu_array = mx.nd.ones((2, 3), ctx=mx.cpu_pinned())
>>> cpu_array.context
cpu_pinned(0)
Parameters
----------
device_id : int, optional
The device id of the device. `device_id` is not needed for CPU.
This is included to make interface compatible with GPU.
Returns
-------
context : Context
The corresponding CPU pinned memory context.
"""
return Context('cpu_pinned', device_id)
def gpu(device_id=0):
"""Returns a GPU context.
This function is a short cut for Context('gpu', device_id).
The K GPUs on a node are typically numbered as 0,...,K-1.
Examples
----------
>>> cpu_array = mx.nd.ones((2, 3))
>>> cpu_array.context
cpu(0)
>>> with mx.gpu(1):
... gpu_array = mx.nd.ones((2, 3))
>>> gpu_array.context
gpu(1)
>>> gpu_array = mx.nd.ones((2, 3), ctx=mx.gpu(1))
>>> gpu_array.context
gpu(1)
Parameters
----------
device_id : int, optional
The device id of the device, needed for GPU.
Returns
-------
context : Context
The corresponding GPU context.
"""
return Context('gpu', device_id)
def num_gpus():
"""Query CUDA for the number of GPUs present.
Raises
------
Will raise an exception on any CUDA error.
Returns
-------
count : int
The number of GPUs.
"""
count = ctypes.c_int()
check_call(_LIB.MXGetGPUCount(ctypes.byref(count)))
return count.value
def gpu_memory_info(device_id=0):
"""Query CUDA for the free and total bytes of GPU global memory.
Parameters
----------
device_id : int, optional
The device id of the GPU device.
Raises
------
Will raise an exception on any CUDA error.
Returns
-------
(free, total) : (int, int)
The number of GPUs.
"""
free = ctypes.c_uint64()
total = ctypes.c_uint64()
dev_id = ctypes.c_int(device_id)
check_call(_LIB.MXGetGPUMemoryInformation64(dev_id, ctypes.byref(free), ctypes.byref(total)))
return (free.value, total.value)
def current_context():
"""Returns the current context.
By default, `mx.cpu()` is used for all the computations
and it can be overridden by using `with mx.Context(x)` statement where
x can be cpu(device_id) or gpu(device_id).
Examples
-------
>>> mx.current_context()
cpu(0)
>>> with mx.Context('gpu', 1): # Context changed in `with` block.
... mx.current_context() # Computation done here will be on gpu(1).
...
gpu(1)
>>> mx.current_context() # Back to default context.
cpu(0)
Returns
-------
default_ctx : Context
"""
if not hasattr(Context._default_ctx, "value"):
Context._default_ctx.value = Context('cpu', 0)
return Context._default_ctx.value
| apache-2.0 | -4,114,525,253,454,067,000 | 28.346278 | 97 | 0.606308 | false |
1tush/reviewboard | reviewboard/webapi/tests/mixins_review.py | 1 | 8138 | from __future__ import unicode_literals
from reviewboard.webapi.tests.mixins import test_template
from reviewboard.webapi.tests.mixins_extra_data import (ExtraDataItemMixin,
ExtraDataListMixin)
class ReviewListMixin(ExtraDataListMixin):
@test_template
def test_post_with_text_type_markdown(self):
"""Testing the POST <URL> API with text_type=markdown"""
self._test_post_with_text_type('markdown')
@test_template
def test_post_with_text_type_plain(self):
"""Testing the POST <URL> API with text_type=plain"""
self._test_post_with_text_type('plain')
def _test_post_with_text_type(self, text_type):
body_top = '`This` is **body_top**'
body_bottom = '`This` is **body_bottom**'
url, mimetype, data, objs = \
self.setup_basic_post_test(self.user, False, None, True)
data['body_top'] = body_top
data['body_bottom'] = body_bottom
data['text_type'] = text_type
rsp = self.api_post(url, data, expected_mimetype=mimetype)
self.assertEqual(rsp['stat'], 'ok')
review_rsp = rsp[self.resource.item_result_key]
self.assertEqual(review_rsp['body_top'], body_top)
self.assertEqual(review_rsp['body_bottom'], body_bottom)
self.assertEqual(review_rsp['text_type'], text_type)
self.compare_item(review_rsp,
self.resource.model.objects.get(pk=review_rsp['id']))
class ReviewItemMixin(ExtraDataItemMixin):
@test_template
def test_put_with_text_type_markdown_all_fields(self):
"""Testing the PUT <URL> API
with text_type=markdown and all fields specified
"""
self._test_put_with_text_type_all_fields('markdown')
def test_put_with_text_type_plain_all_fields(self):
"""Testing the PUT <URL> API
with text_type=plain and all fields specified
"""
self._test_put_with_text_type_all_fields('plain')
@test_template
def test_put_with_text_type_markdown_escaping_all_fields(self):
"""Testing the PUT <URL> API
with changing text_type to markdown and escaping all fields
"""
self._test_put_with_text_type_escaping_all_fields(
'markdown',
'`This` is **body_top**',
'`This` is **body_bottom**',
r'\`This\` is \*\*body\_top\*\*',
r'\`This\` is \*\*body\_bottom\*\*')
@test_template
def test_put_with_text_type_plain_escaping_all_fields(self):
"""Testing the PUT <URL> API
with changing text_type to plain and unescaping all fields
"""
self._test_put_with_text_type_escaping_all_fields(
'plain',
r'\`This\` is \*\*body_top\*\*',
r'\`This\` is \*\*body_bottom\*\*',
'`This` is **body_top**',
'`This` is **body_bottom**')
@test_template
def test_put_with_text_type_markdown_escaping_unspecified_fields(self):
"""Testing the PUT <URL> API
with changing text_type to markdown and escaping unspecified fields
"""
self._test_put_with_text_type_escaping_unspecified_fields(
'markdown',
'`This` is **body_top**',
r'\`This\` is \*\*body\_top\*\*')
@test_template
def test_put_with_text_type_plain_escaping_unspecified_fields(self):
"""Testing the PUT <URL> API
with changing text_type to plain and unescaping unspecified fields
"""
self._test_put_with_text_type_escaping_unspecified_fields(
'plain',
r'\`This\` is \*\*body_top\*\*',
'`This` is **body_top**')
@test_template
def test_put_without_text_type_and_escaping_provided_fields(self):
"""Testing the PUT <URL> API
without changing text_type and with escaping provided fields
"""
url, mimetype, data, review, objs = \
self.setup_basic_put_test(self.user, False, None, True)
review.rich_text = True
review.save()
if 'text_type' in data:
del data['text_type']
data.update({
'body_top': '`This` is **body_top**',
'body_bottom': '`This` is **body_bottom**',
})
rsp = self.api_put(url, data, expected_mimetype=mimetype)
self.assertEqual(rsp['stat'], 'ok')
review_rsp = rsp[self.resource.item_result_key]
self.assertEqual(review_rsp['text_type'], 'markdown')
self.assertEqual(review_rsp['body_top'],
r'\`This\` is \*\*body\_top\*\*')
self.assertEqual(review_rsp['body_bottom'],
r'\`This\` is \*\*body\_bottom\*\*')
self.compare_item(review_rsp,
self.resource.model.objects.get(pk=review_rsp['id']))
def _test_put_with_text_type_all_fields(self, text_type):
body_top = '`This` is **body_top**'
body_bottom = '`This` is **body_bottom**'
url, mimetype, data, review, objs = \
self.setup_basic_put_test(self.user, False, None, True)
data.update({
'text_type': text_type,
'body_top': body_top,
'body_bottom': body_bottom,
})
rsp = self.api_put(url, data, expected_mimetype=mimetype)
self.assertEqual(rsp['stat'], 'ok')
review_rsp = rsp[self.resource.item_result_key]
self.assertEqual(review_rsp['text_type'], text_type)
self.assertEqual(review_rsp['body_top'], body_top)
self.assertEqual(review_rsp['body_bottom'], body_bottom)
self.compare_item(review_rsp,
self.resource.model.objects.get(pk=review_rsp['id']))
def _test_put_with_text_type_escaping_all_fields(
self, text_type, body_top, body_bottom,
expected_body_top, expected_body_bottom):
self.assertIn(text_type, ('markdown', 'plain'))
url, mimetype, data, review, objs = \
self.setup_basic_put_test(self.user, False, None, True)
review.body_top = body_top
review.body_bottom = body_bottom
if text_type == 'markdown':
review.rich_text = False
elif text_type == 'plain':
review.rich_text = True
review.save()
for field in ('body_top', 'body_bottom'):
if field in data:
del data[field]
data['text_type'] = text_type
rsp = self.api_put(url, data, expected_mimetype=mimetype)
self.assertEqual(rsp['stat'], 'ok')
review_rsp = rsp[self.resource.item_result_key]
self.assertEqual(review_rsp['text_type'], text_type)
self.assertEqual(review_rsp['body_top'], expected_body_top)
self.assertEqual(review_rsp['body_bottom'], expected_body_bottom)
self.compare_item(review_rsp,
self.resource.model.objects.get(pk=review_rsp['id']))
def _test_put_with_text_type_escaping_unspecified_fields(
self, text_type, body_top, expected_body_top):
self.assertIn(text_type, ('markdown', 'plain'))
body_bottom = '`This` is **body_bottom**'
url, mimetype, data, review, objs = \
self.setup_basic_put_test(self.user, False, None, True)
review.body_top = body_top
if text_type == 'markdown':
review.rich_text = False
elif text_type == 'plain':
review.rich_text = True
review.save()
data['text_type'] = text_type
data['body_bottom'] = body_bottom
if 'body_top' in data:
del data['body_top']
rsp = self.api_put(url, data, expected_mimetype=mimetype)
self.assertEqual(rsp['stat'], 'ok')
review_rsp = rsp[self.resource.item_result_key]
self.assertEqual(review_rsp['text_type'], text_type)
self.assertEqual(review_rsp['body_top'], expected_body_top)
self.assertEqual(review_rsp['body_bottom'], body_bottom)
self.compare_item(review_rsp,
self.resource.model.objects.get(pk=review_rsp['id']))
| mit | -4,623,619,825,217,934,000 | 36.675926 | 79 | 0.577906 | false |
alexm92/sentry | src/sentry/plugins/sentry_mail/activity/release.py | 2 | 2095 | from __future__ import absolute_import
from sentry import features
from sentry.db.models.query import in_iexact
from sentry.models import GroupSubscriptionReason, Release, ReleaseCommit, User
from .base import ActivityEmail
class ReleaseActivityEmail(ActivityEmail):
def __init__(self, activity):
super(ReleaseActivityEmail, self).__init__(activity)
try:
self.release = Release.objects.get(
project=self.project,
version=activity.data['version'],
)
except Release.DoesNotExist:
self.release = None
self.commit_list = []
else:
self.commit_list = [
rc.commit
for rc in ReleaseCommit.objects.filter(
release=self.release,
).select_related('commit', 'commit__author')
]
def should_email(self):
return bool(self.release)
def get_participants(self):
project = self.project
email_list = set([
c.author.email for c in self.commit_list
if c.author
])
if not email_list:
return {}
# identify members which have been seen in the commit log and have
# verified the matching email address
return {
user: GroupSubscriptionReason.committed
for user in User.objects.filter(
in_iexact('emails__email', email_list),
emails__is_verified=True,
sentry_orgmember_set__teams=project.team,
is_active=True,
).distinct()
if features.has('workflow:release-emails', actor=user)
}
def get_context(self):
return {
'commit_list': self.commit_list,
'release': self.release,
}
def get_subject(self):
return u'Released {}'.format(self.release.short_version)
def get_template(self):
return 'sentry/emails/activity/release.txt'
def get_html_template(self):
return 'sentry/emails/activity/release.html'
| bsd-3-clause | -3,922,186,275,890,436,600 | 29.362319 | 79 | 0.577566 | false |
notestaff/cosi | tests/make_cosi_tests.py | 1 | 3662 | #!/usr/bin/env python
#
# Script: make_cosi_tests.py
#
# Generate the scripts to call cosi test cases.
#
import sys, os, stat, logging
def SystemSucceed( cmd, dbg = False, exitCodesOk = ( 0, ) ):
"""Run a shell command, and raise a fatal error if the command fails."""
logging.info( 'Running command ' + cmd + ' ; called from ' + sys._getframe(1).f_code.co_filename + ':' +
str( sys._getframe(1).f_lineno ) )
exitCode = os.system( cmd )
logging.info( 'Finished command ' + cmd + ' with exit code ' + str( exitCode ) )
if exitCodesOk != 'any' and exitCode not in exitCodesOk:
raise IOError( "Command %s failed with exit code %d" % ( cmd, exitCode ) )
logging.basicConfig( level = logging.DEBUG, format='%(process)d %(asctime)s %(levelname)-8s %(filename)s:%(lineno)s %(message)s' )
ntests = 11
include_xfail_tests = False
suffixes = ( '', )
xfail = [ ]
tests = [ i for i in range( 1, ntests+1 ) if i not in xfail ]
if not include_xfail_tests: xfail = []
for testNum in range( 1, ntests+1 ):
for sfx in suffixes:
scriptFN = 'tests/t%d/run_%d%s' % ( testNum, testNum, sfx )
with open( scriptFN, 'w' ) as out:
out.write( '''#!/bin/sh
set -e -v
TN=%d
TS=%s
if [ "$(expr substr $(uname -s) 1 6)" == "CYGWIN" -o "$(expr substr $(uname -s) 1 5)" == "MINGW" ]; then
PS=win
else
PS=
fi
TD=t${TN}test${TS}
rm -rf $TD
mkdir $TD
srcdirabs=$(cd $srcdir && pwd)
pf=$srcdirabs/tests/t${TN}/0_simple
cp $pf.cosiParams $pf.model $TD/
pushd $TD
$COSI_TEST_VALGRIND ../coalescent$TS -p 0_simple.cosiParams -o 0_simple_test
pwd
diff -q 0_simple_test.hap-1 ${pf}$TS$PS.hap-1
diff -q 0_simple_test.hap-4 ${pf}$TS$PS.hap-4
diff -q 0_simple_test.hap-5 ${pf}$TS$PS.hap-5
diff -q 0_simple_test.pos-1 ${pf}$TS$PS.pos-1
diff -q 0_simple_test.pos-4 ${pf}$TS$PS.pos-4
diff -q 0_simple_test.pos-5 ${pf}$TS$PS.pos-5
#if [ -x ../sample_stats_extra ]; then
# $COSI_TEST_VALGRIND ../coalescent$TS -p 0_simple.cosiParams -n 10 -m | ../sample_stats_extra -a 1,2,3-100,101-360 -l .200-.201 > sample_stats_out.txt
# diff -q sample_stats_out.txt $srcdirabs/tests/t${TN}/sample_stats_out.txt
#fi
popd
rm -rf $TD
''' % ( testNum, sfx ) )
os.fchmod( out.fileno(), stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO )
testData = []
with open( 'cositests.am', 'w' ) as mf:
is_first = [ True, True ]
for sfx in suffixes:
if sfx: mf.write( 'if ' + sfx[1:].upper() + '\n' )
for in_xfail in ( False, True ):
mf.write( ( '%sTESTS %s= ' % ( ( 'XFAIL_' if in_xfail else '' ),'' if is_first[ in_xfail ] else '+' ) ) + ' '.join([ 'tests/t%d/run_%d%s' % ( testNum, testNum,
sfx )
for testNum in ( xfail if in_xfail else tests + xfail ) ]) + '\n' )
is_first[ in_xfail ] = False
if sfx: mf.write( 'endif\n' )
for platformSuffix in ( '', 'win' ):
testData += [ 'tests/t%d/0_simple%s.%s' % ( testNum, ( sfx + platformSuffix) if ext not in ( 'cosiParams', 'model' ) else '', ext )
for testNum in tests+xfail for sfx in suffixes for ext in ( 'cosiParams', 'model', 'hap-1', 'hap-4', 'hap-5', 'pos-1', 'pos-4', 'pos-5' ) ]
testData = sorted( set( testData ) )
# for f in testData: SystemSucceed( 'svn add --force ' + f )
mf.write( 'COSI_TESTDATA = ' + ' '.join( testData ) + '\n' )
| gpl-3.0 | 5,908,847,216,820,035,000 | 41.091954 | 196 | 0.53905 | false |
ZHAW-INES/rioxo-uClinux-dist | user/python/python-2.4.4/Lib/bsddb/test/test_thread.py | 11 | 15508 | """TestCases for multi-threaded access to a DB.
"""
import os
import sys
import time
import errno
import shutil
import tempfile
from pprint import pprint
from random import random
try:
True, False
except NameError:
True = 1
False = 0
DASH = '-'
try:
from threading import Thread, currentThread
have_threads = True
except ImportError:
have_threads = False
import unittest
from test_all import verbose
try:
# For Pythons w/distutils pybsddb
from bsddb3 import db, dbutils
except ImportError:
# For Python 2.3
from bsddb import db, dbutils
#----------------------------------------------------------------------
class BaseThreadedTestCase(unittest.TestCase):
dbtype = db.DB_UNKNOWN # must be set in derived class
dbopenflags = 0
dbsetflags = 0
envflags = 0
def setUp(self):
if verbose:
dbutils._deadlock_VerboseFile = sys.stdout
homeDir = os.path.join(os.path.dirname(sys.argv[0]), 'db_home')
self.homeDir = homeDir
try:
os.mkdir(homeDir)
except OSError, e:
if e.errno <> errno.EEXIST: raise
self.env = db.DBEnv()
self.setEnvOpts()
self.env.open(homeDir, self.envflags | db.DB_CREATE)
self.filename = self.__class__.__name__ + '.db'
self.d = db.DB(self.env)
if self.dbsetflags:
self.d.set_flags(self.dbsetflags)
self.d.open(self.filename, self.dbtype, self.dbopenflags|db.DB_CREATE)
def tearDown(self):
self.d.close()
self.env.close()
shutil.rmtree(self.homeDir)
def setEnvOpts(self):
pass
def makeData(self, key):
return DASH.join([key] * 5)
#----------------------------------------------------------------------
class ConcurrentDataStoreBase(BaseThreadedTestCase):
dbopenflags = db.DB_THREAD
envflags = db.DB_THREAD | db.DB_INIT_CDB | db.DB_INIT_MPOOL
readers = 0 # derived class should set
writers = 0
records = 1000
def test01_1WriterMultiReaders(self):
if verbose:
print '\n', '-=' * 30
print "Running %s.test01_1WriterMultiReaders..." % \
self.__class__.__name__
threads = []
for x in range(self.writers):
wt = Thread(target = self.writerThread,
args = (self.d, self.records, x),
name = 'writer %d' % x,
)#verbose = verbose)
threads.append(wt)
for x in range(self.readers):
rt = Thread(target = self.readerThread,
args = (self.d, x),
name = 'reader %d' % x,
)#verbose = verbose)
threads.append(rt)
for t in threads:
t.start()
for t in threads:
t.join()
def writerThread(self, d, howMany, writerNum):
#time.sleep(0.01 * writerNum + 0.01)
name = currentThread().getName()
start = howMany * writerNum
stop = howMany * (writerNum + 1) - 1
if verbose:
print "%s: creating records %d - %d" % (name, start, stop)
for x in range(start, stop):
key = '%04d' % x
dbutils.DeadlockWrap(d.put, key, self.makeData(key),
max_retries=12)
if verbose and x % 100 == 0:
print "%s: records %d - %d finished" % (name, start, x)
if verbose:
print "%s: finished creating records" % name
## # Each write-cursor will be exclusive, the only one that can update the DB...
## if verbose: print "%s: deleting a few records" % name
## c = d.cursor(flags = db.DB_WRITECURSOR)
## for x in range(10):
## key = int(random() * howMany) + start
## key = '%04d' % key
## if d.has_key(key):
## c.set(key)
## c.delete()
## c.close()
if verbose:
print "%s: thread finished" % name
def readerThread(self, d, readerNum):
time.sleep(0.01 * readerNum)
name = currentThread().getName()
for loop in range(5):
c = d.cursor()
count = 0
rec = c.first()
while rec:
count += 1
key, data = rec
self.assertEqual(self.makeData(key), data)
rec = c.next()
if verbose:
print "%s: found %d records" % (name, count)
c.close()
time.sleep(0.05)
if verbose:
print "%s: thread finished" % name
class BTreeConcurrentDataStore(ConcurrentDataStoreBase):
dbtype = db.DB_BTREE
writers = 2
readers = 10
records = 1000
class HashConcurrentDataStore(ConcurrentDataStoreBase):
dbtype = db.DB_HASH
writers = 2
readers = 10
records = 1000
#----------------------------------------------------------------------
class SimpleThreadedBase(BaseThreadedTestCase):
dbopenflags = db.DB_THREAD
envflags = db.DB_THREAD | db.DB_INIT_MPOOL | db.DB_INIT_LOCK
readers = 5
writers = 3
records = 1000
def setEnvOpts(self):
self.env.set_lk_detect(db.DB_LOCK_DEFAULT)
def test02_SimpleLocks(self):
if verbose:
print '\n', '-=' * 30
print "Running %s.test02_SimpleLocks..." % self.__class__.__name__
threads = []
for x in range(self.writers):
wt = Thread(target = self.writerThread,
args = (self.d, self.records, x),
name = 'writer %d' % x,
)#verbose = verbose)
threads.append(wt)
for x in range(self.readers):
rt = Thread(target = self.readerThread,
args = (self.d, x),
name = 'reader %d' % x,
)#verbose = verbose)
threads.append(rt)
for t in threads:
t.start()
for t in threads:
t.join()
def writerThread(self, d, howMany, writerNum):
name = currentThread().getName()
start = howMany * writerNum
stop = howMany * (writerNum + 1) - 1
if verbose:
print "%s: creating records %d - %d" % (name, start, stop)
# create a bunch of records
for x in xrange(start, stop):
key = '%04d' % x
dbutils.DeadlockWrap(d.put, key, self.makeData(key),
max_retries=12)
if verbose and x % 100 == 0:
print "%s: records %d - %d finished" % (name, start, x)
# do a bit or reading too
if random() <= 0.05:
for y in xrange(start, x):
key = '%04d' % x
data = dbutils.DeadlockWrap(d.get, key, max_retries=12)
self.assertEqual(data, self.makeData(key))
# flush them
try:
dbutils.DeadlockWrap(d.sync, max_retries=12)
except db.DBIncompleteError, val:
if verbose:
print "could not complete sync()..."
# read them back, deleting a few
for x in xrange(start, stop):
key = '%04d' % x
data = dbutils.DeadlockWrap(d.get, key, max_retries=12)
if verbose and x % 100 == 0:
print "%s: fetched record (%s, %s)" % (name, key, data)
self.assertEqual(data, self.makeData(key))
if random() <= 0.10:
dbutils.DeadlockWrap(d.delete, key, max_retries=12)
if verbose:
print "%s: deleted record %s" % (name, key)
if verbose:
print "%s: thread finished" % name
def readerThread(self, d, readerNum):
time.sleep(0.01 * readerNum)
name = currentThread().getName()
for loop in range(5):
c = d.cursor()
count = 0
rec = dbutils.DeadlockWrap(c.first, max_retries=10)
while rec:
count += 1
key, data = rec
self.assertEqual(self.makeData(key), data)
rec = dbutils.DeadlockWrap(c.next, max_retries=10)
if verbose:
print "%s: found %d records" % (name, count)
c.close()
time.sleep(0.05)
if verbose:
print "%s: thread finished" % name
class BTreeSimpleThreaded(SimpleThreadedBase):
dbtype = db.DB_BTREE
class HashSimpleThreaded(SimpleThreadedBase):
dbtype = db.DB_HASH
#----------------------------------------------------------------------
class ThreadedTransactionsBase(BaseThreadedTestCase):
dbopenflags = db.DB_THREAD | db.DB_AUTO_COMMIT
envflags = (db.DB_THREAD |
db.DB_INIT_MPOOL |
db.DB_INIT_LOCK |
db.DB_INIT_LOG |
db.DB_INIT_TXN
)
readers = 0
writers = 0
records = 2000
txnFlag = 0
def setEnvOpts(self):
#self.env.set_lk_detect(db.DB_LOCK_DEFAULT)
pass
def test03_ThreadedTransactions(self):
if verbose:
print '\n', '-=' * 30
print "Running %s.test03_ThreadedTransactions..." % \
self.__class__.__name__
threads = []
for x in range(self.writers):
wt = Thread(target = self.writerThread,
args = (self.d, self.records, x),
name = 'writer %d' % x,
)#verbose = verbose)
threads.append(wt)
for x in range(self.readers):
rt = Thread(target = self.readerThread,
args = (self.d, x),
name = 'reader %d' % x,
)#verbose = verbose)
threads.append(rt)
dt = Thread(target = self.deadlockThread)
dt.start()
for t in threads:
t.start()
for t in threads:
t.join()
self.doLockDetect = False
dt.join()
def doWrite(self, d, name, start, stop):
finished = False
while not finished:
try:
txn = self.env.txn_begin(None, self.txnFlag)
for x in range(start, stop):
key = '%04d' % x
d.put(key, self.makeData(key), txn)
if verbose and x % 100 == 0:
print "%s: records %d - %d finished" % (name, start, x)
txn.commit()
finished = True
except (db.DBLockDeadlockError, db.DBLockNotGrantedError), val:
if verbose:
print "%s: Aborting transaction (%s)" % (name, val[1])
txn.abort()
time.sleep(0.05)
def writerThread(self, d, howMany, writerNum):
name = currentThread().getName()
start = howMany * writerNum
stop = howMany * (writerNum + 1) - 1
if verbose:
print "%s: creating records %d - %d" % (name, start, stop)
step = 100
for x in range(start, stop, step):
self.doWrite(d, name, x, min(stop, x+step))
if verbose:
print "%s: finished creating records" % name
if verbose:
print "%s: deleting a few records" % name
finished = False
while not finished:
try:
recs = []
txn = self.env.txn_begin(None, self.txnFlag)
for x in range(10):
key = int(random() * howMany) + start
key = '%04d' % key
data = d.get(key, None, txn, db.DB_RMW)
if data is not None:
d.delete(key, txn)
recs.append(key)
txn.commit()
finished = True
if verbose:
print "%s: deleted records %s" % (name, recs)
except (db.DBLockDeadlockError, db.DBLockNotGrantedError), val:
if verbose:
print "%s: Aborting transaction (%s)" % (name, val[1])
txn.abort()
time.sleep(0.05)
if verbose:
print "%s: thread finished" % name
def readerThread(self, d, readerNum):
time.sleep(0.01 * readerNum + 0.05)
name = currentThread().getName()
for loop in range(5):
finished = False
while not finished:
try:
txn = self.env.txn_begin(None, self.txnFlag)
c = d.cursor(txn)
count = 0
rec = c.first()
while rec:
count += 1
key, data = rec
self.assertEqual(self.makeData(key), data)
rec = c.next()
if verbose: print "%s: found %d records" % (name, count)
c.close()
txn.commit()
finished = True
except (db.DBLockDeadlockError, db.DBLockNotGrantedError), val:
if verbose:
print "%s: Aborting transaction (%s)" % (name, val[1])
c.close()
txn.abort()
time.sleep(0.05)
time.sleep(0.05)
if verbose:
print "%s: thread finished" % name
def deadlockThread(self):
self.doLockDetect = True
while self.doLockDetect:
time.sleep(0.5)
try:
aborted = self.env.lock_detect(
db.DB_LOCK_RANDOM, db.DB_LOCK_CONFLICT)
if verbose and aborted:
print "deadlock: Aborted %d deadlocked transaction(s)" \
% aborted
except db.DBError:
pass
class BTreeThreadedTransactions(ThreadedTransactionsBase):
dbtype = db.DB_BTREE
writers = 3
readers = 5
records = 2000
class HashThreadedTransactions(ThreadedTransactionsBase):
dbtype = db.DB_HASH
writers = 1
readers = 5
records = 2000
class BTreeThreadedNoWaitTransactions(ThreadedTransactionsBase):
dbtype = db.DB_BTREE
writers = 3
readers = 5
records = 2000
txnFlag = db.DB_TXN_NOWAIT
class HashThreadedNoWaitTransactions(ThreadedTransactionsBase):
dbtype = db.DB_HASH
writers = 1
readers = 5
records = 2000
txnFlag = db.DB_TXN_NOWAIT
#----------------------------------------------------------------------
def test_suite():
suite = unittest.TestSuite()
if have_threads:
suite.addTest(unittest.makeSuite(BTreeConcurrentDataStore))
suite.addTest(unittest.makeSuite(HashConcurrentDataStore))
suite.addTest(unittest.makeSuite(BTreeSimpleThreaded))
suite.addTest(unittest.makeSuite(HashSimpleThreaded))
suite.addTest(unittest.makeSuite(BTreeThreadedTransactions))
suite.addTest(unittest.makeSuite(HashThreadedTransactions))
suite.addTest(unittest.makeSuite(BTreeThreadedNoWaitTransactions))
suite.addTest(unittest.makeSuite(HashThreadedNoWaitTransactions))
else:
print "Threads not available, skipping thread tests."
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| gpl-2.0 | 8,036,978,964,572,095,000 | 30.016 | 88 | 0.501161 | false |
saurabh1e/FlaskStructure | manager.py | 3 | 1152 | import os
from flask_migrate import Migrate, MigrateCommand
import urllib.parse as up
from flask_script import Manager
from flask import url_for
from src import api, db, ma, create_app, configs, bp, security, admin
config = os.environ.get('PYTH_SRVR')
config = configs.get(config, 'default')
extensions = [api, db, ma, security, admin]
bps = [bp]
app = create_app(__name__, config, extensions=extensions, blueprints=bps)
manager = Manager(app)
migrate = Migrate(app, db)
manager.add_command('db', MigrateCommand)
@manager.shell
def _shell_context():
return dict(
app=app,
db=db,
ma=ma,
config=config
)
@manager.command
def list_routes():
output = []
for rule in app.url_map.iter_rules():
options = {}
for arg in rule.arguments:
options[arg] = "[{0}]".format(arg)
methods = ','.join(rule.methods)
url = url_for(rule.endpoint, **options)
line = up.unquote("{:50s} {:20s} {}".format(rule.endpoint, methods, url))
output.append(line)
for line in sorted(output):
print(line)
if __name__ == "__main__":
manager.run()
| mit | 3,009,530,823,375,237,600 | 22.04 | 81 | 0.625 | false |
schocco/mds-web | apps/trails/gis_math.py | 1 | 5200 | from bisect import bisect
from django.contrib.gis.geos.point import Point
import math
def haversine(origin, destination):
'''
:param origin: start position
:param destination: end position
:return: length in meters
.. See::
http://www.movable-type.co.uk/scripts/gis-faq-5.1.html
'''
lat1, lon1 = origin
lat2, lon2 = destination
# Earth radius varies from 6356.752 km at the poles
# to 6378.137 km at the equator, use something in
# between.
radius = radius_for_lat(lat1) # m
dlat = math.radians(lat2-lat1)
dlon = math.radians(lon2-lon1)
a = math.sin(dlat/2) * math.sin(dlat/2) + math.cos(math.radians(lat1)) \
* math.cos(math.radians(lat2)) * math.sin(dlon/2) * math.sin(dlon/2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))
d = radius * c
return d
def radius_for_lat(lat):
'''
Rt = radius of earth at latitude t
maxr = major radius of earth = 6,378,137 meters
minr = minor radius of earth = 6,356,752.31420 meters
Rt= SQRT( (((minr^2cos(t))^2)+((maxr^2sin(t))^2))/ ((acos(t))^2 + (maxr * sin(t)^2))
:return: radius for given latitude in m
.. See::
http://en.wikipedia.org/wiki/Earth_radius#Radius_at_a_given_geodetic_latitude
'''
maxr = 6378137.0 # m
minr = 6356752.0 # m
d = (minr**2 * math.cos(lat))**2 + (maxr**2 * math.sin(lat))**2
div = (minr * math.cos(lat))**2 + (maxr * math.sin(lat))**2
rlat = math.sqrt(d/div)
return rlat
class RasterRow:
"""
Representation of one data point of the rastered data.
"""
def __init__(self):
self.length_degree = 0
self.length_meters = 0
self.length_degree_cum = 0
self.length_meters_cum = 0
self.altitude = 0
self.slope = 0
class RasterMap:
'''
Class to calculate approximated information about a trail object.
Uses a few data points of the original data to speed up calculation.
'''
def __init__(self, trail):
#flatten multilinestring to linestring
self.linestring = [point for linestring in trail.waypoints for point in linestring]
self.length = trail.waypoints.length
self.length_m = trail.trail_length or 0
self.rasterRows = []
self.distances = [] #4th dimension of linestring with cumulative distance to the start point
self.build()
self.raster()
def build(self):
#calculate distance at each point in the linestring
b = Point(self.linestring[0])
distance_cum = 0
for p in self.linestring:
a = b
b = Point(p)
distance = a.distance(b)
distance_cum += distance
self.distances.append(distance_cum)
def raster(self):
'''
Divide a track into equally long sections and get the altitude at each point.
According to the MDS document a section is a part of the track of 5-20 meters.
'''
# the size of the segments should be chosen so that the calculation effort is not too cpu intensive
steps = 0
if self.length_m <= 1000:
#5m is the minimum section length according to the mds document
steps = self.length_m/5
elif self.length_m > 30000:
#50m segments for tracks longer than 30km
steps = self.length_m/50
elif self.length_m > 1000:
# use 20m segments for tracks between 1 and 30km
steps = self.length_m/20
row = None
for step in range(int(steps)):
prev_row = row
row = RasterRow()
row.length_degree = self.length / steps
row.length_degree_cum = row.length_degree * step
row.length_meters = self.length_m / steps
row.length_meters_cum = row.length_meters * step
if(row.length_degree_cum in self.distances):
row.altitude = self.linestring[self.distances.index(row.length_degree_cum)][2]
else:
# get index of element closest to the needed value
right_idx = bisect(self.distances, row.length_degree_cum)
# distances[i] is lower than the value, so i+1 is the right neighbour
left_idx = right_idx - 1
if(right_idx >= len(self.linestring)):
# the right index can be out of range
# in that case we can simply use the last value instead of interpolating
row.altitude = self.linestring[-1][2]
else:
# now interpolate
h0 = self.linestring[left_idx][2]
h1 = self.linestring[right_idx][2]
x0 = self.distances[left_idx]
x1 = self.distances[right_idx]
row.altitude = h0 + (h1-h0)/(x1-x0) * (row.length_degree_cum - x0)
self.rasterRows.append(row)
if(prev_row is not None and row.length_meters != 0):
row.slope = float((row.altitude - prev_row.altitude)/row.length_meters) | mit | 8,821,322,964,885,942,000 | 36.15 | 107 | 0.572885 | false |
themrmax/scikit-learn | sklearn/preprocessing/data.py | 2 | 68159 | # Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# Eric Martin <[email protected]>
# Giorgio Patrini <[email protected]>
# License: BSD 3 clause
from itertools import chain, combinations
import numbers
import warnings
from itertools import combinations_with_replacement as combinations_w_r
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..utils import check_array
from ..utils.extmath import row_norms
from ..utils.extmath import _incremental_mean_and_var
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..utils.sparsefuncs import (inplace_column_scale,
mean_variance_axis, incr_mean_variance_axis,
min_max_axis)
from ..utils.validation import check_is_fitted, FLOAT_DTYPES
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
]
def _handle_zeros_in_scale(scale, copy=True):
''' Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.'''
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == .0:
scale = 1.
return scale
elif isinstance(scale, np.ndarray):
if copy:
# New array to avoid side-effects
scale = scale.copy()
scale[scale == 0.0] = 1.0
return scale
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : {array-like, sparse matrix}
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSC matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSC matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSC matrix.
See also
--------
StandardScaler: Performs scaling to unit variance using the``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
""" # noqa
X = check_array(X, accept_sparse='csc', copy=copy, ensure_2d=False,
warn_on_dtype=True, estimator='the scale function',
dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if with_std:
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var, copy=False)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
if with_mean:
mean_ = np.mean(X, axis)
if with_std:
scale_ = np.std(X, axis)
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = Xr.mean(axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn("Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features.")
Xr -= mean_1
if with_std:
scale_ = _handle_zeros_in_scale(scale_, copy=False)
Xr /= scale_
if with_mean:
mean_2 = Xr.mean(axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# scale_ is very small so that mean_2 = mean_1/scale_ > 0, even
# if mean_1 was close to zero. The problem is thus essentially
# due to the lack of precision of mean_. A solution is then to
# subtract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn("Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. ")
Xr -= mean_2
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range : tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* attribute.
data_min_ : ndarray, shape (n_features,)
Per feature minimum seen in the data
.. versionadded:: 0.17
*data_min_*
data_max_ : ndarray, shape (n_features,)
Per feature maximum seen in the data
.. versionadded:: 0.17
*data_max_*
data_range_ : ndarray, shape (n_features,)
Per feature range ``(data_max_ - data_min_)`` seen in the data
.. versionadded:: 0.17
*data_range_*
See also
--------
minmax_scale: Equivalent function without the object oriented API.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.min_
del self.n_samples_seen_
del self.data_min_
del self.data_max_
del self.data_range_
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of min and max on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Passthrough for ``Pipeline`` compatibility.
"""
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
if sparse.issparse(X):
raise TypeError("MinMaxScaler does no support sparse input. "
"You may consider to use MaxAbsScaler instead.")
X = check_array(X, copy=self.copy, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
data_min = np.min(X, axis=0)
data_max = np.max(X, axis=0)
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = X.shape[0]
# Next steps
else:
data_min = np.minimum(self.data_min_, data_min)
data_max = np.maximum(self.data_max_, data_max)
self.n_samples_seen_ += X.shape[0]
data_range = data_max - data_min
self.scale_ = ((feature_range[1] - feature_range[0]) /
_handle_zeros_in_scale(data_range))
self.min_ = feature_range[0] - data_min * self.scale_
self.data_min_ = data_min
self.data_max_ = data_max
self.data_range_ = data_range
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, dtype=FLOAT_DTYPES)
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input data that will be transformed. It cannot be sparse.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, dtype=FLOAT_DTYPES)
X -= self.min_
X /= self.scale_
return X
def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
.. versionadded:: 0.17
*minmax_scale* function interface
to :class:`sklearn.preprocessing.MinMaxScaler`.
Parameters
----------
feature_range : tuple (min, max), default=(0, 1)
Desired range of transformed data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
See also
--------
MinMaxScaler: Performs scaling to a given range using the``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
""" # noqa
# Unlike the scaler object, this function allows 1d input.
# If copy is required, it will be done inside the scaler object.
X = check_array(X, copy=False, ensure_2d=False, warn_on_dtype=True,
dtype=FLOAT_DTYPES)
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MinMaxScaler(feature_range=feature_range, copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
This scaler can also be applied to sparse CSR or CSC matrices by passing
`with_mean=False` to avoid breaking the sparsity structure of the data.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_*
mean_ : array of floats with shape [n_features]
The mean value for each feature in the training set.
var_ : array of floats with shape [n_features]
The variance for each feature in the training set. Used to compute
`scale_`
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
See also
--------
scale: Equivalent function without the object oriented API.
:class:`sklearn.decomposition.PCA`
Further removes the linear correlation across features with 'whiten=True'.
""" # noqa
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.mean_
del self.var_
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Passthrough for ``Pipeline`` compatibility.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of mean and std on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
The algorithm for incremental mean and std is given in Equation 1.5a,b
in Chan, Tony F., Gene H. Golub, and Randall J. LeVeque. "Algorithms
for computing the sample variance: Analysis and recommendations."
The American Statistician 37.3 (1983): 242-247:
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Passthrough for ``Pipeline`` compatibility.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
warn_on_dtype=True, estimator=self, dtype=FLOAT_DTYPES)
# Even in the case of `with_mean=False`, we update the mean anyway
# This is needed for the incremental computation of the var
# See incr_mean_variance_axis and _incremental_mean_variance_axis
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.with_std:
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.mean_, self.var_ = mean_variance_axis(X, axis=0)
self.n_samples_seen_ = X.shape[0]
# Next passes
else:
self.mean_, self.var_, self.n_samples_seen_ = \
incr_mean_variance_axis(X, axis=0,
last_mean=self.mean_,
last_var=self.var_,
last_n=self.n_samples_seen_)
else:
self.mean_ = None
self.var_ = None
else:
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.mean_ = .0
self.n_samples_seen_ = 0
if self.with_std:
self.var_ = .0
else:
self.var_ = None
self.mean_, self.var_, self.n_samples_seen_ = \
_incremental_mean_and_var(X, self.mean_, self.var_,
self.n_samples_seen_)
if self.with_std:
self.scale_ = _handle_zeros_in_scale(np.sqrt(self.var_))
else:
self.scale_ = None
return self
def transform(self, X, y=None, copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.scale_ is not None:
inplace_column_scale(X, 1 / self.scale_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.scale_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.scale_ is not None:
inplace_column_scale(X, self.scale_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.scale_
if self.with_mean:
X += self.mean_
return X
class MaxAbsScaler(BaseEstimator, TransformerMixin):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
.. versionadded:: 0.17
Parameters
----------
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* attribute.
max_abs_ : ndarray, shape (n_features,)
Per feature maximum absolute value.
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
See also
--------
maxabs_scale: Equivalent function without the object oriented API.
"""
def __init__(self, copy=True):
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.max_abs_
def fit(self, X, y=None):
"""Compute the maximum absolute value to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of max absolute value of X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Passthrough for ``Pipeline`` compatibility.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0)
max_abs = np.maximum(np.abs(mins), np.abs(maxs))
else:
max_abs = np.abs(X).max(axis=0)
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = X.shape[0]
# Next passes
else:
max_abs = np.maximum(self.max_abs_, max_abs)
self.n_samples_seen_ += X.shape[0]
self.max_abs_ = max_abs
self.scale_ = _handle_zeros_in_scale(max_abs)
return self
def transform(self, X, y=None):
"""Scale the data
Parameters
----------
X : {array-like, sparse matrix}
The data that should be scaled.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
inplace_column_scale(X, 1.0 / self.scale_)
else:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : {array-like, sparse matrix}
The data that should be transformed back.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def maxabs_scale(X, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
See also
--------
MaxAbsScaler: Performs scaling to the [-1, 1] range using the``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
""" # noqa
# Unlike the scaler object, this function allows 1d input.
# If copy is required, it will be done inside the scaler object.
X = check_array(X, accept_sparse=('csr', 'csc'), copy=False,
ensure_2d=False, dtype=FLOAT_DTYPES)
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MaxAbsScaler(copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class RobustScaler(BaseEstimator, TransformerMixin):
"""Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to
the quantile range (defaults to IQR: Interquartile Range).
The IQR is the range between the 1st quartile (25th quantile)
and the 3rd quartile (75th quantile).
Centering and scaling happen independently on each feature (or each
sample, depending on the `axis` argument) by computing the relevant
statistics on the samples in the training set. Median and interquartile
range are then stored to be used on later data using the `transform`
method.
Standardization of a dataset is a common requirement for many
machine learning estimators. Typically this is done by removing the mean
and scaling to unit variance. However, outliers can often influence the
sample mean / variance in a negative way. In such cases, the median and
the interquartile range often give better results.
.. versionadded:: 0.17
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_centering : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_scaling : boolean, True by default
If True, scale the data to interquartile range.
quantile_range : tuple (q_min, q_max), 0.0 < q_min < q_max < 100.0
Default: (25.0, 75.0) = (1st quantile, 3rd quantile) = IQR
Quantile range used to calculate ``scale_``.
.. versionadded:: 0.18
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
center_ : array of floats
The median value for each feature in the training set.
scale_ : array of floats
The (scaled) interquartile range for each feature in the training set.
.. versionadded:: 0.17
*scale_* attribute.
See also
--------
robust_scale: Equivalent function without the object oriented API.
:class:`sklearn.decomposition.PCA`
Further removes the linear correlation across features with
'whiten=True'.
Notes
-----
See examples/preprocessing/plot_robust_scaling.py for an example.
https://en.wikipedia.org/wiki/Median_(statistics)
https://en.wikipedia.org/wiki/Interquartile_range
"""
def __init__(self, with_centering=True, with_scaling=True,
quantile_range=(25.0, 75.0), copy=True):
self.with_centering = with_centering
self.with_scaling = with_scaling
self.quantile_range = quantile_range
self.copy = copy
def _check_array(self, X, copy):
"""Makes sure centering is not enabled for sparse matrices."""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_centering:
raise ValueError(
"Cannot center sparse matrices: use `with_centering=False`"
" instead. See docstring for motivation and alternatives.")
return X
def fit(self, X, y=None):
"""Compute the median and quantiles to be used for scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the median and quantiles
used for later scaling along the features axis.
"""
if sparse.issparse(X):
raise TypeError("RobustScaler cannot be fitted on sparse inputs")
X = self._check_array(X, self.copy)
if self.with_centering:
self.center_ = np.median(X, axis=0)
if self.with_scaling:
q_min, q_max = self.quantile_range
if not 0 <= q_min <= q_max <= 100:
raise ValueError("Invalid quantile range: %s" %
str(self.quantile_range))
q = np.percentile(X, self.quantile_range, axis=0)
self.scale_ = (q[1] - q[0])
self.scale_ = _handle_zeros_in_scale(self.scale_, copy=False)
return self
def transform(self, X, y=None):
"""Center and scale the data
Parameters
----------
X : array-like
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, 1.0 / self.scale_)
else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, self.scale_)
else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X
def robust_scale(X, axis=0, with_centering=True, with_scaling=True,
quantile_range=(25.0, 75.0), copy=True):
"""Standardize a dataset along any axis
Center to the median and component wise scale
according to the interquartile range.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like
The data to center and scale.
axis : int (0 by default)
axis used to compute the medians and IQR along. If 0,
independently scale each feature, otherwise (if 1) scale
each sample.
with_centering : boolean, True by default
If True, center the data before scaling.
with_scaling : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
quantile_range : tuple (q_min, q_max), 0.0 < q_min < q_max < 100.0
Default: (25.0, 75.0) = (1st quantile, 3rd quantile) = IQR
Quantile range used to calculate ``scale_``.
.. versionadded:: 0.18
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_centering=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
RobustScaler: Performs centering and scaling using the ``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
"""
s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,
quantile_range=quantile_range, copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class PolynomialFeatures(BaseEstimator, TransformerMixin):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
Examples
--------
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1., 0., 1., 0., 0., 1.],
[ 1., 2., 3., 4., 6., 9.],
[ 1., 4., 5., 16., 20., 25.]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1., 0., 1., 0.],
[ 1., 2., 3., 6.],
[ 1., 4., 5., 20.]])
Attributes
----------
powers_ : array, shape (n_output_features, n_input_features)
powers_[i, j] is the exponent of the jth input in the ith output.
n_input_features_ : int
The total number of input features.
n_output_features_ : int
The total number of polynomial output features. The number of output
features is computed by iterating over all suitably sized combinations
of input features.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<sphx_glr_auto_examples_linear_model_plot_polynomial_interpolation.py>`
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
@staticmethod
def _combinations(n_features, degree, interaction_only, include_bias):
comb = (combinations if interaction_only else combinations_w_r)
start = int(not include_bias)
return chain.from_iterable(comb(range(n_features), i)
for i in range(start, degree + 1))
@property
def powers_(self):
check_is_fitted(self, 'n_input_features_')
combinations = self._combinations(self.n_input_features_, self.degree,
self.interaction_only,
self.include_bias)
return np.vstack(np.bincount(c, minlength=self.n_input_features_)
for c in combinations)
def get_feature_names(self, input_features=None):
"""
Return feature names for output features
Parameters
----------
input_features : list of string, length n_features, optional
String names for input features if available. By default,
"x0", "x1", ... "xn_features" is used.
Returns
-------
output_feature_names : list of string, length n_output_features
"""
powers = self.powers_
if input_features is None:
input_features = ['x%d' % i for i in range(powers.shape[1])]
feature_names = []
for row in powers:
inds = np.where(row)[0]
if len(inds):
name = " ".join("%s^%d" % (input_features[ind], exp)
if exp != 1 else input_features[ind]
for ind, exp in zip(inds, row[inds]))
else:
name = "1"
feature_names.append(name)
return feature_names
def fit(self, X, y=None):
"""
Compute number of output features.
"""
n_samples, n_features = check_array(X).shape
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
self.n_input_features_ = n_features
self.n_output_features_ = sum(1 for _ in combinations)
return self
def transform(self, X, y=None):
"""Transform data to polynomial features
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data to transform, row by row.
Returns
-------
XP : np.ndarray shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = check_array(X, dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
# allocate output data
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
for i, c in enumerate(combinations):
XP[:, i] = X[:, c].prod(1)
return XP
def normalize(X, norm='l2', axis=1, copy=True, return_norm=False):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
return_norm : boolean, default False
whether to return the computed norms
Returns
-------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Normalized input X.
norms : array, shape [n_samples] if axis=1 else [n_features]
An array of norms along given axis for X.
When X is sparse, a NotImplementedError will be raised
for norm 'l1' or 'l2'.
See also
--------
Normalizer: Performs normalization using the ``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
"""
if norm not in ('l1', 'l2', 'max'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy,
estimator='the normalize function', dtype=FLOAT_DTYPES)
if axis == 0:
X = X.T
if sparse.issparse(X):
if return_norm and norm in ('l1', 'l2'):
raise NotImplementedError("return_norm=True is not implemented "
"for sparse matrices with norm 'l1' "
"or norm 'l2'")
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
elif norm == 'max':
_, norms = min_max_axis(X, 1)
norms_elementwise = norms.repeat(np.diff(X.indptr))
mask = norms_elementwise != 0
X.data[mask] /= norms_elementwise[mask]
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
elif norm == 'l2':
norms = row_norms(X)
elif norm == 'max':
norms = np.max(X, axis=1)
norms = _handle_zeros_in_scale(norms, copy=False)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
if return_norm:
return X, norms
else:
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
normalize: Equivalent function without the object oriented API.
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
X = check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
"""
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
Binarizer: Performs binarization using the ``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
binarize: Equivalent function without the object oriented API.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Binarize each element of X
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
Read more in the :ref:`User Guide <kernel_centering>`.
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = check_array(K, dtype=FLOAT_DTYPES)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y=None, copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
copy : boolean, optional, default True
Set to False to perform inplace computation.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
check_is_fitted(self, 'K_fit_all_')
K = check_array(K, copy=copy, dtype=FLOAT_DTYPES)
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
@property
def _pairwise(self):
return True
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : {array, sparse matrix}, shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[ 1., 0., 1.],
[ 1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'], dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
def _transform_selected(X, transform, selected="all", copy=True):
"""Apply a transform function to portion of selected features
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
"""
X = check_array(X, accept_sparse='csc', copy=copy, dtype=FLOAT_DTYPES)
if isinstance(selected, six.string_types) and selected == "all":
return transform(X)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = np.sum(sel)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X[:, ind[sel]])
X_not_sel = X[:, ind[not_sel]]
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel))
else:
return np.hstack((X_sel, X_not_sel))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix where each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Note: a one-hot encoding of y labels should use a LabelBinarizer
instead.
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
Parameters
----------
n_values : 'auto', int or array of ints
Number of values per feature.
- 'auto' : determine value range from training data.
- int : number of categorical values per feature.
Each feature value should be in ``range(n_values)``
- array : ``n_values[i]`` is the number of categorical values in
``X[:, i]``. Each feature value should be
in ``range(n_values[i])``
categorical_features : "all" or array of indices or mask
Specify what features are treated as categorical.
- 'all' (default): All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
dtype : number type, default=np.float
Desired dtype of output.
sparse : boolean, default=True
Will return sparse matrix if set True else will return an array.
handle_unknown : str, 'error' or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform.
Attributes
----------
active_features_ : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
feature_indices_ : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
n_values_ : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and four samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \
[1, 0, 2]]) # doctest: +ELLIPSIS
OneHotEncoder(categorical_features='all', dtype=<... 'numpy.float64'>,
handle_unknown='error', n_values='auto', sparse=True)
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
sklearn.preprocessing.LabelBinarizer : binarizes labels in a one-vs-all
fashion.
sklearn.preprocessing.MultiLabelBinarizer : transforms between iterable of
iterables and a multilabel format, e.g. a (samples x classes) binary
matrix indicating the presence of a class label.
sklearn.preprocessing.LabelEncoder : encodes labels with values between 0
and n_classes-1.
"""
def __init__(self, n_values="auto", categorical_features="all",
dtype=np.float64, sparse=True, handle_unknown='error'):
self.n_values = n_values
self.categorical_features = categorical_features
self.dtype = dtype
self.sparse = sparse
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape [n_samples, n_feature]
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def _fit_transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if (isinstance(self.n_values, six.string_types) and
self.n_values == 'auto'):
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
if (np.max(X, axis=0) >= self.n_values).any():
raise ValueError("Feature out of bounds for n_values=%d"
% self.n_values)
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if (isinstance(self.n_values, six.string_types) and
self.n_values == 'auto'):
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return out if self.sparse else out.toarray()
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
"""
return _transform_selected(X, self._fit_transform,
self.categorical_features, copy=True)
def _transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
# We use only those categorical features of X that are known using fit.
# i.e lesser than n_values_ using mask.
# This means, if self.handle_unknown is "ignore", the row_indices and
# col_indices corresponding to the unknown categorical feature are
# ignored.
mask = (X < self.n_values_).ravel()
if np.any(~mask):
if self.handle_unknown not in ['error', 'ignore']:
raise ValueError("handle_unknown should be either error or "
"unknown got %s" % self.handle_unknown)
if self.handle_unknown == 'error':
raise ValueError("unknown categorical feature present %s "
"during transform." % X.ravel()[~mask])
column_indices = (X + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(np.sum(mask))
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if (isinstance(self.n_values, six.string_types) and
self.n_values == 'auto'):
out = out[:, self.active_features_]
return out if self.sparse else out.toarray()
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input array of type int.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.
"""
return _transform_selected(X, self._transform,
self.categorical_features, copy=True)
| bsd-3-clause | 3,821,227,525,569,790,500 | 34.835436 | 84 | 0.591118 | false |
bstaint/pr0bescan | libs/DNS/Lib.py | 2 | 23853 | # -*- encoding: utf-8 -*-
"""
$Id: Lib.py,v 1.11.2.11 2011/11/23 17:07:14 customdesigned Exp $
This file is part of the pydns project.
Homepage: http://pydns.sourceforge.net
This code is covered by the standard Python License. See LICENSE for details.
Library code. Largely this is packers and unpackers for various types.
"""
#
#
# See RFC 1035:
# ------------------------------------------------------------------------
# Network Working Group P. Mockapetris
# Request for Comments: 1035 ISI
# November 1987
# Obsoletes: RFCs 882, 883, 973
#
# DOMAIN NAMES - IMPLEMENTATION AND SPECIFICATION
# ------------------------------------------------------------------------
import types
import Type
import Class
import Opcode
import Status
from Base import DNSError
LABEL_UTF8 = False
LABEL_ENCODING = 'idna'
class UnpackError(DNSError): pass
class PackError(DNSError): pass
# Low-level 16 and 32 bit integer packing and unpacking
from struct import pack as struct_pack
from struct import unpack as struct_unpack
from socket import inet_ntoa, inet_aton
def pack16bit(n):
return struct_pack('!H', n)
def pack32bit(n):
return struct_pack('!L', n)
def unpack16bit(s):
return struct_unpack('!H', s)[0]
def unpack32bit(s):
return struct_unpack('!L', s)[0]
def addr2bin(addr):
return struct_unpack('!l', inet_aton(addr))[0]
def bin2addr(n):
return inet_ntoa(struct_pack('!L', n))
# Packing class
class Packer:
" packer base class. supports basic byte/16bit/32bit/addr/string/name "
def __init__(self):
self.buf = ''
self.index = {}
def getbuf(self):
return self.buf
def addbyte(self, c):
if len(c) != 1: raise TypeError, 'one character expected'
self.buf = self.buf + c
def addbytes(self, bytes):
self.buf = self.buf + bytes
def add16bit(self, n):
self.buf = self.buf + pack16bit(n)
def add32bit(self, n):
self.buf = self.buf + pack32bit(n)
def addaddr(self, addr):
n = addr2bin(addr)
self.buf = self.buf + pack32bit(n)
def addstring(self, s):
if len(s) > 255:
raise ValueError, "Can't encode string of length "+ \
"%s (> 255)"%(len(s))
self.addbyte(chr(len(s)))
self.addbytes(s)
def addname(self, name):
# Domain name packing (section 4.1.4)
# Add a domain name to the buffer, possibly using pointers.
# The case of the first occurrence of a name is preserved.
# One trailing dot is ignored.
if name.endswith('.'):
name = name[:-1]
if name:
nlist = name.split('.')
for label in nlist:
if not label:
raise PackError, 'empty label'
else:
nlist = []
keys = []
for i in range(len(nlist)):
key = '.'.join(nlist[i:]).upper()
keys.append(key)
if self.index.has_key(key):
pointer = self.index[key]
break
else:
i = len(nlist)
pointer = None
# Do it into temporaries first so exceptions don't
# mess up self.index and self.buf
buf = ''
offset = len(self.buf)
index = []
if LABEL_UTF8:
enc = 'utf8'
else:
enc = LABEL_ENCODING
for j in range(i):
label = nlist[j]
try:
label = label.encode(enc)
except UnicodeEncodeError:
if not LABEL_UTF8: raise
if not label.startswith('\ufeff'):
label = '\ufeff'+label
label = label.encode(enc)
n = len(label)
if n > 63:
raise PackError, 'label too long'
if offset + len(buf) < 0x3FFF:
index.append((keys[j], offset + len(buf)))
else:
print 'DNS.Lib.Packer.addname:',
print 'warning: pointer too big'
buf = buf + (chr(n) + label)
if pointer:
buf = buf + pack16bit(pointer | 0xC000)
else:
buf = buf + '\0'
self.buf = self.buf + buf
for key, value in index:
self.index[key] = value
def dump(self):
keys = self.index.keys()
keys.sort()
print '-'*40
for key in keys:
print '%20s %3d' % (key, self.index[key])
print '-'*40
space = 1
for i in range(0, len(self.buf)+1, 2):
if self.buf[i:i+2] == '**':
if not space: print
space = 1
continue
space = 0
print '%4d' % i,
for c in self.buf[i:i+2]:
if ' ' < c < '\177':
print ' %c' % c,
else:
print '%2d' % ord(c),
print
print '-'*40
# Unpacking class
class Unpacker:
def __init__(self, buf):
self.buf = buf
self.offset = 0
def getbyte(self):
if self.offset >= len(self.buf):
raise UnpackError, "Ran off end of data"
c = self.buf[self.offset]
self.offset = self.offset + 1
return c
def getbytes(self, n):
s = self.buf[self.offset : self.offset + n]
if len(s) != n: raise UnpackError, 'not enough data left'
self.offset = self.offset + n
return s
def get16bit(self):
return unpack16bit(self.getbytes(2))
def get32bit(self):
return unpack32bit(self.getbytes(4))
def getaddr(self):
return bin2addr(self.get32bit())
def getstring(self):
return self.getbytes(ord(self.getbyte()))
def getname(self):
# Domain name unpacking (section 4.1.4)
c = self.getbyte()
i = ord(c)
if i & 0xC0 == 0xC0:
d = self.getbyte()
j = ord(d)
pointer = ((i<<8) | j) & ~0xC000
save_offset = self.offset
try:
self.offset = pointer
domain = self.getname()
finally:
self.offset = save_offset
return domain
if i == 0:
return ''
domain = self.getbytes(i)
remains = self.getname()
if not remains:
return domain
else:
return domain + '.' + remains
# Test program for packin/unpacking (section 4.1.4)
def testpacker():
N = 2500
R = range(N)
import timing
# See section 4.1.4 of RFC 1035
timing.start()
for i in R:
p = Packer()
p.addaddr('192.168.0.1')
p.addbytes('*' * 20)
p.addname('f.ISI.ARPA')
p.addbytes('*' * 8)
p.addname('Foo.F.isi.arpa')
p.addbytes('*' * 18)
p.addname('arpa')
p.addbytes('*' * 26)
p.addname('')
timing.finish()
print timing.milli(), "ms total for packing"
print round(timing.milli() / i, 4), 'ms per packing'
#p.dump()
u = Unpacker(p.buf)
u.getaddr()
u.getbytes(20)
u.getname()
u.getbytes(8)
u.getname()
u.getbytes(18)
u.getname()
u.getbytes(26)
u.getname()
timing.start()
for i in R:
u = Unpacker(p.buf)
res = (u.getaddr(),
u.getbytes(20),
u.getname(),
u.getbytes(8),
u.getname(),
u.getbytes(18),
u.getname(),
u.getbytes(26),
u.getname())
timing.finish()
print timing.milli(), "ms total for unpacking"
print round(timing.milli() / i, 4), 'ms per unpacking'
#for item in res: print item
# Pack/unpack RR toplevel format (section 3.2.1)
class RRpacker(Packer):
def __init__(self):
Packer.__init__(self)
self.rdstart = None
def addRRheader(self, name, type, klass, ttl, *rest):
self.addname(name)
self.add16bit(type)
self.add16bit(klass)
self.add32bit(ttl)
if rest:
if rest[1:]: raise TypeError, 'too many args'
rdlength = rest[0]
else:
rdlength = 0
self.add16bit(rdlength)
self.rdstart = len(self.buf)
def patchrdlength(self):
rdlength = unpack16bit(self.buf[self.rdstart-2:self.rdstart])
if rdlength == len(self.buf) - self.rdstart:
return
rdata = self.buf[self.rdstart:]
save_buf = self.buf
ok = 0
try:
self.buf = self.buf[:self.rdstart-2]
self.add16bit(len(rdata))
self.buf = self.buf + rdata
ok = 1
finally:
if not ok: self.buf = save_buf
def endRR(self):
if self.rdstart is not None:
self.patchrdlength()
self.rdstart = None
def getbuf(self):
if self.rdstart is not None: self.patchrdlength()
return Packer.getbuf(self)
# Standard RRs (section 3.3)
def addCNAME(self, name, klass, ttl, cname):
self.addRRheader(name, Type.CNAME, klass, ttl)
self.addname(cname)
self.endRR()
def addHINFO(self, name, klass, ttl, cpu, os):
self.addRRheader(name, Type.HINFO, klass, ttl)
self.addstring(cpu)
self.addstring(os)
self.endRR()
def addMX(self, name, klass, ttl, preference, exchange):
self.addRRheader(name, Type.MX, klass, ttl)
self.add16bit(preference)
self.addname(exchange)
self.endRR()
def addNS(self, name, klass, ttl, nsdname):
self.addRRheader(name, Type.NS, klass, ttl)
self.addname(nsdname)
self.endRR()
def addPTR(self, name, klass, ttl, ptrdname):
self.addRRheader(name, Type.PTR, klass, ttl)
self.addname(ptrdname)
self.endRR()
def addSOA(self, name, klass, ttl,
mname, rname, serial, refresh, retry, expire, minimum):
self.addRRheader(name, Type.SOA, klass, ttl)
self.addname(mname)
self.addname(rname)
self.add32bit(serial)
self.add32bit(refresh)
self.add32bit(retry)
self.add32bit(expire)
self.add32bit(minimum)
self.endRR()
def addTXT(self, name, klass, ttl, tlist):
self.addRRheader(name, Type.TXT, klass, ttl)
if type(tlist) is types.StringType:
tlist = [tlist]
for txtdata in tlist:
self.addstring(txtdata)
self.endRR()
# Internet specific RRs (section 3.4) -- class = IN
def addA(self, name, klass, ttl, address):
self.addRRheader(name, Type.A, klass, ttl)
self.addaddr(address)
self.endRR()
def addWKS(self, name, ttl, address, protocol, bitmap):
self.addRRheader(name, Type.WKS, Class.IN, ttl)
self.addaddr(address)
self.addbyte(chr(protocol))
self.addbytes(bitmap)
self.endRR()
def addSRV(self):
raise NotImplementedError
def prettyTime(seconds):
if seconds<60:
return seconds,"%d seconds"%(seconds)
if seconds<3600:
return seconds,"%d minutes"%(seconds/60)
if seconds<86400:
return seconds,"%d hours"%(seconds/3600)
if seconds<604800:
return seconds,"%d days"%(seconds/86400)
else:
return seconds,"%d weeks"%(seconds/604800)
class RRunpacker(Unpacker):
def __init__(self, buf):
Unpacker.__init__(self, buf)
self.rdend = None
def getRRheader(self):
name = self.getname()
rrtype = self.get16bit()
klass = self.get16bit()
ttl = self.get32bit()
rdlength = self.get16bit()
self.rdend = self.offset + rdlength
return (name, rrtype, klass, ttl, rdlength)
def endRR(self):
if self.offset != self.rdend:
raise UnpackError, 'end of RR not reached'
def getCNAMEdata(self):
return self.getname()
def getHINFOdata(self):
return self.getstring(), self.getstring()
def getMXdata(self):
return self.get16bit(), self.getname()
def getNSdata(self):
return self.getname()
def getPTRdata(self):
return self.getname()
def getSOAdata(self):
return self.getname(), \
self.getname(), \
('serial',)+(self.get32bit(),), \
('refresh ',)+prettyTime(self.get32bit()), \
('retry',)+prettyTime(self.get32bit()), \
('expire',)+prettyTime(self.get32bit()), \
('minimum',)+prettyTime(self.get32bit())
def getTXTdata(self):
txt = []
while self.offset != self.rdend:
txt.append(self.getstring())
return txt
getSPFdata = getTXTdata
def getAdata(self):
return self.getaddr()
def getWKSdata(self):
address = self.getaddr()
protocol = ord(self.getbyte())
bitmap = self.getbytes(self.rdend - self.offset)
return address, protocol, bitmap
def getSRVdata(self):
"""
_Service._Proto.Name TTL Class SRV Priority Weight Port Target
"""
priority = self.get16bit()
weight = self.get16bit()
port = self.get16bit()
target = self.getname()
#print '***priority, weight, port, target', priority, weight, port, target
return priority, weight, port, target
# Pack/unpack Message Header (section 4.1)
class Hpacker(Packer):
def addHeader(self, id, qr, opcode, aa, tc, rd, ra, z, rcode,
qdcount, ancount, nscount, arcount):
self.add16bit(id)
self.add16bit((qr&1)<<15 | (opcode&0xF)<<11 | (aa&1)<<10
| (tc&1)<<9 | (rd&1)<<8 | (ra&1)<<7
| (z&7)<<4 | (rcode&0xF))
self.add16bit(qdcount)
self.add16bit(ancount)
self.add16bit(nscount)
self.add16bit(arcount)
class Hunpacker(Unpacker):
def getHeader(self):
id = self.get16bit()
flags = self.get16bit()
qr, opcode, aa, tc, rd, ra, z, rcode = (
(flags>>15)&1,
(flags>>11)&0xF,
(flags>>10)&1,
(flags>>9)&1,
(flags>>8)&1,
(flags>>7)&1,
(flags>>4)&7,
(flags>>0)&0xF)
qdcount = self.get16bit()
ancount = self.get16bit()
nscount = self.get16bit()
arcount = self.get16bit()
return (id, qr, opcode, aa, tc, rd, ra, z, rcode,
qdcount, ancount, nscount, arcount)
# Pack/unpack Question (section 4.1.2)
class Qpacker(Packer):
def addQuestion(self, qname, qtype, qclass):
self.addname(qname)
self.add16bit(qtype)
self.add16bit(qclass)
class Qunpacker(Unpacker):
def getQuestion(self):
return self.getname(), self.get16bit(), self.get16bit()
# Pack/unpack Message(section 4)
# NB the order of the base classes is important for __init__()!
class Mpacker(RRpacker, Qpacker, Hpacker):
pass
class Munpacker(RRunpacker, Qunpacker, Hunpacker):
pass
# Routines to print an unpacker to stdout, for debugging.
# These affect the unpacker's current position!
def dumpM(u):
print 'HEADER:',
(id, qr, opcode, aa, tc, rd, ra, z, rcode,
qdcount, ancount, nscount, arcount) = u.getHeader()
print 'id=%d,' % id,
print 'qr=%d, opcode=%d, aa=%d, tc=%d, rd=%d, ra=%d, z=%d, rcode=%d,' \
% (qr, opcode, aa, tc, rd, ra, z, rcode)
if tc: print '*** response truncated! ***'
if rcode: print '*** nonzero error code! (%d) ***' % rcode
print ' qdcount=%d, ancount=%d, nscount=%d, arcount=%d' \
% (qdcount, ancount, nscount, arcount)
for i in range(qdcount):
print 'QUESTION %d:' % i,
dumpQ(u)
for i in range(ancount):
print 'ANSWER %d:' % i,
dumpRR(u)
for i in range(nscount):
print 'AUTHORITY RECORD %d:' % i,
dumpRR(u)
for i in range(arcount):
print 'ADDITIONAL RECORD %d:' % i,
dumpRR(u)
class DnsResult:
def __init__(self,u,args):
self.header={}
self.questions=[]
self.answers=[]
self.authority=[]
self.additional=[]
self.args=args
self.storeM(u)
def show(self):
import time
print '; <<>> PDG.py 1.0 <<>> %s %s'%(self.args['name'],
self.args['qtype'])
opt=""
if self.args['rd']:
opt=opt+'recurs '
h=self.header
print ';; options: '+opt
print ';; got answer:'
print ';; ->>HEADER<<- opcode %s, status %s, id %d'%(
h['opcode'],h['status'],h['id'])
flags=filter(lambda x,h=h:h[x],('qr','aa','rd','ra','tc'))
print ';; flags: %s; Ques: %d, Ans: %d, Auth: %d, Addit: %d'%(
''.join(flags),h['qdcount'],h['ancount'],h['nscount'],
h['arcount'])
print ';; QUESTIONS:'
for q in self.questions:
print ';; %s, type = %s, class = %s'%(q['qname'],q['qtypestr'],
q['qclassstr'])
print
print ';; ANSWERS:'
for a in self.answers:
print '%-20s %-6s %-6s %s'%(a['name'],`a['ttl']`,a['typename'],
a['data'])
print
print ';; AUTHORITY RECORDS:'
for a in self.authority:
print '%-20s %-6s %-6s %s'%(a['name'],`a['ttl']`,a['typename'],
a['data'])
print
print ';; ADDITIONAL RECORDS:'
for a in self.additional:
print '%-20s %-6s %-6s %s'%(a['name'],`a['ttl']`,a['typename'],
a['data'])
print
if self.args.has_key('elapsed'):
print ';; Total query time: %d msec'%self.args['elapsed']
print ';; To SERVER: %s'%(self.args['server'])
print ';; WHEN: %s'%time.ctime(time.time())
def storeM(self,u):
(self.header['id'], self.header['qr'], self.header['opcode'],
self.header['aa'], self.header['tc'], self.header['rd'],
self.header['ra'], self.header['z'], self.header['rcode'],
self.header['qdcount'], self.header['ancount'],
self.header['nscount'], self.header['arcount']) = u.getHeader()
self.header['opcodestr']=Opcode.opcodestr(self.header['opcode'])
self.header['status']=Status.statusstr(self.header['rcode'])
for i in range(self.header['qdcount']):
#print 'QUESTION %d:' % i,
self.questions.append(self.storeQ(u))
for i in range(self.header['ancount']):
#print 'ANSWER %d:' % i,
self.answers.append(self.storeRR(u))
for i in range(self.header['nscount']):
#print 'AUTHORITY RECORD %d:' % i,
self.authority.append(self.storeRR(u))
for i in range(self.header['arcount']):
#print 'ADDITIONAL RECORD %d:' % i,
self.additional.append(self.storeRR(u))
def storeQ(self,u):
q={}
q['qname'], q['qtype'], q['qclass'] = u.getQuestion()
q['qtypestr']=Type.typestr(q['qtype'])
q['qclassstr']=Class.classstr(q['qclass'])
return q
def storeRR(self,u):
r={}
r['name'],r['type'],r['class'],r['ttl'],r['rdlength'] = u.getRRheader()
r['typename'] = Type.typestr(r['type'])
r['classstr'] = Class.classstr(r['class'])
#print 'name=%s, type=%d(%s), class=%d(%s), ttl=%d' \
# % (name,
# type, typename,
# klass, Class.classstr(class),
# ttl)
mname = 'get%sdata' % r['typename']
if hasattr(u, mname):
r['data']=getattr(u, mname)()
else:
r['data']=u.getbytes(r['rdlength'])
return r
def dumpQ(u):
qname, qtype, qclass = u.getQuestion()
print 'qname=%s, qtype=%d(%s), qclass=%d(%s)' \
% (qname,
qtype, Type.typestr(qtype),
qclass, Class.classstr(qclass))
def dumpRR(u):
name, type, klass, ttl, rdlength = u.getRRheader()
typename = Type.typestr(type)
print 'name=%s, type=%d(%s), class=%d(%s), ttl=%d' \
% (name,
type, typename,
klass, Class.classstr(klass),
ttl)
mname = 'get%sdata' % typename
if hasattr(u, mname):
print ' formatted rdata:', getattr(u, mname)()
else:
print ' binary rdata:', u.getbytes(rdlength)
if __name__ == "__main__":
testpacker()
#
# $Log: Lib.py,v $
# Revision 1.11.2.11 2011/11/23 17:07:14 customdesigned
# Rename some args in DNS.Lib to match py3dns.
#
# Revision 1.11.2.10 2011/08/04 22:23:55 customdesigned
# Allow empty label at end (trailing dot), so now '.' can lookup the
# root zone again.
#
# Revision 1.11.2.9 2011/03/21 21:03:22 customdesigned
# Get rid of obsolete string module
#
# Revision 1.11.2.8 2011/03/16 20:06:39 customdesigned
# Refer to explicit LICENSE file.
#
# Revision 1.11.2.7 2009/06/09 18:39:06 customdesigned
# Built-in SPF support
#
# Revision 1.11.2.6 2008/10/15 22:34:06 customdesigned
# Default to idna encoding.
#
# Revision 1.11.2.5 2008/09/17 17:35:14 customdesigned
# Use 7-bit ascii encoding, because case folding needs to be disabled
# before utf8 is safe to use, even experimentally.
#
# Revision 1.11.2.4 2008/09/17 16:09:53 customdesigned
# Encode unicode labels as UTF-8
#
# Revision 1.11.2.3 2007/05/22 20:27:40 customdesigned
# Fix unpacker underflow.
#
# Revision 1.11.2.2 2007/05/22 20:25:53 customdesigned
# Use socket.inetntoa,inetaton.
#
# Revision 1.11.2.1 2007/05/22 20:20:39 customdesigned
# Mark utf-8 encoding
#
# Revision 1.11 2002/03/19 13:05:02 anthonybaxter
# converted to class based exceptions (there goes the python1.4 compatibility :)
#
# removed a quite gross use of 'eval()'.
#
# Revision 1.10 2002/03/19 12:41:33 anthonybaxter
# tabnannied and reindented everything. 4 space indent, no tabs.
# yay.
#
# Revision 1.9 2002/03/19 10:30:33 anthonybaxter
# first round of major bits and pieces. The major stuff here (summarised
# from my local, off-net CVS server :/ this will cause some oddities with
# the
#
# tests/testPackers.py:
# a large slab of unit tests for the packer and unpacker code in DNS.Lib
#
# DNS/Lib.py:
# placeholder for addSRV.
# added 'klass' to addA, make it the same as the other A* records.
# made addTXT check for being passed a string, turn it into a length 1 list.
# explicitly check for adding a string of length > 255 (prohibited).
# a bunch of cleanups from a first pass with pychecker
# new code for pack/unpack. the bitwise stuff uses struct, for a smallish
# (disappointly small, actually) improvement, while addr2bin is much
# much faster now.
#
# DNS/Base.py:
# added DiscoverNameServers. This automatically does the right thing
# on unix/ win32. No idea how MacOS handles this. *sigh*
# Incompatible change: Don't use ParseResolvConf on non-unix, use this
# function, instead!
# a bunch of cleanups from a first pass with pychecker
#
# Revision 1.8 2001/08/09 09:08:55 anthonybaxter
# added identifying header to top of each file
#
# Revision 1.7 2001/07/19 07:50:44 anthony
# Added SRV (RFC 2782) support. Code from Michael Ströder.
#
# Revision 1.6 2001/07/19 07:39:18 anthony
# 'type' -> 'rrtype' in getRRheader(). Fix from Michael Ströder.
#
# Revision 1.5 2001/07/19 07:34:19 anthony
# oops. glitch in storeRR (fixed now).
# Reported by Bastian Kleineidam and by greg lin.
#
# Revision 1.4 2001/07/19 07:16:42 anthony
# Changed (opcode&0xF)<<11 to (opcode*0xF)<<11.
# Patch from Timothy J. Miller.
#
# Revision 1.3 2001/07/19 06:57:07 anthony
# cvs keywords added
#
#
| gpl-2.0 | 1,438,570,080,887,518,500 | 31.321138 | 82 | 0.550623 | false |
SamuelMarks/crow | amalgamate/merge_all.py | 6 | 1290 | """Merges all the header files."""
from glob import glob
from os import path as pt
import re
from collections import defaultdict
import sys
header_path = "../include"
if len(sys.argv) > 1:
header_path = sys.argv[1]
OUTPUT = 'crow_all.h'
re_depends = re.compile('^#include "(.*)"', re.MULTILINE)
headers = [x.rsplit('/', 1)[-1] for x in glob(pt.join(header_path, '*.h'))]
print(headers)
edges = defaultdict(list)
for header in headers:
d = open(pt.join(header_path, header)).read()
match = re_depends.findall(d)
for m in match:
# m should included before header
edges[m].append(header)
visited = defaultdict(bool)
order = []
def dfs(x):
"""Ensure all header files are visited."""
visited[x] = True
for y in edges[x]:
if not visited[y]:
dfs(y)
order.append(x)
for header in headers:
if not visited[header]:
dfs(header)
order = order[::-1]
for x in edges:
print(x, edges[x])
for x in edges:
for y in edges[x]:
assert order.index(x) < order.index(y), 'cyclic include detected'
print(order)
build = []
for header in order:
d = open(pt.join(header_path, header)).read()
build.append(re_depends.sub(lambda x: '\n', d))
build.append('\n')
open(OUTPUT, 'w').write('\n'.join(build))
| bsd-3-clause | -9,064,107,557,558,154,000 | 22.888889 | 75 | 0.627907 | false |
varunarya10/nova_test_latest | nova/network/linux_net.py | 12 | 76610 | # Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implements vlans, bridges, and iptables rules using linux utilities."""
import calendar
import inspect
import os
import re
import time
import netaddr
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import excutils
from oslo_utils import fileutils
from oslo_utils import importutils
from oslo_utils import timeutils
import six
from nova import exception
from nova.i18n import _, _LE, _LW
from nova import objects
from nova import paths
from nova.pci import utils as pci_utils
from nova import utils
LOG = logging.getLogger(__name__)
linux_net_opts = [
cfg.MultiStrOpt('dhcpbridge_flagfile',
default=['/etc/nova/nova-dhcpbridge.conf'],
help='Location of flagfiles for dhcpbridge'),
cfg.StrOpt('networks_path',
default=paths.state_path_def('networks'),
help='Location to keep network config files'),
cfg.StrOpt('public_interface',
default='eth0',
help='Interface for public IP addresses'),
cfg.StrOpt('dhcpbridge',
default=paths.bindir_def('nova-dhcpbridge'),
help='Location of nova-dhcpbridge'),
cfg.StrOpt('routing_source_ip',
default='$my_ip',
help='Public IP of network host'),
cfg.IntOpt('dhcp_lease_time',
default=86400,
help='Lifetime of a DHCP lease in seconds'),
cfg.MultiStrOpt('dns_server',
default=[],
help='If set, uses specific DNS server for dnsmasq. Can'
' be specified multiple times.'),
cfg.BoolOpt('use_network_dns_servers',
default=False,
help='If set, uses the dns1 and dns2 from the network ref.'
' as dns servers.'),
cfg.ListOpt('dmz_cidr',
default=[],
help='A list of dmz ranges that should be accepted'),
cfg.MultiStrOpt('force_snat_range',
default=[],
help='Traffic to this range will always be snatted to the '
'fallback ip, even if it would normally be bridged out '
'of the node. Can be specified multiple times.'),
cfg.StrOpt('dnsmasq_config_file',
default='',
help='Override the default dnsmasq settings with this file'),
cfg.StrOpt('linuxnet_interface_driver',
default='nova.network.linux_net.LinuxBridgeInterfaceDriver',
help='Driver used to create ethernet devices.'),
cfg.StrOpt('linuxnet_ovs_integration_bridge',
default='br-int',
help='Name of Open vSwitch bridge used with linuxnet'),
cfg.BoolOpt('send_arp_for_ha',
default=False,
help='Send gratuitous ARPs for HA setup'),
cfg.IntOpt('send_arp_for_ha_count',
default=3,
help='Send this many gratuitous ARPs for HA setup'),
cfg.BoolOpt('use_single_default_gateway',
default=False,
help='Use single default gateway. Only first nic of vm will '
'get default gateway from dhcp server'),
cfg.MultiStrOpt('forward_bridge_interface',
default=['all'],
help='An interface that bridges can forward to. If this '
'is set to all then all traffic will be forwarded. '
'Can be specified multiple times.'),
cfg.StrOpt('metadata_host',
default='$my_ip',
help='The IP address for the metadata API server'),
cfg.IntOpt('metadata_port',
default=8775,
help='The port for the metadata API port'),
cfg.StrOpt('iptables_top_regex',
default='',
help='Regular expression to match the iptables rule that '
'should always be on the top.'),
cfg.StrOpt('iptables_bottom_regex',
default='',
help='Regular expression to match the iptables rule that '
'should always be on the bottom.'),
cfg.StrOpt('iptables_drop_action',
default='DROP',
help='The table that iptables to jump to when a packet is '
'to be dropped.'),
cfg.IntOpt('ovs_vsctl_timeout',
default=120,
help='Amount of time, in seconds, that ovs_vsctl should wait '
'for a response from the database. 0 is to wait forever.'),
cfg.BoolOpt('fake_network',
default=False,
help='If passed, use fake network devices and addresses'),
cfg.IntOpt('ebtables_exec_attempts',
default=3,
help='Number of times to retry ebtables commands on failure.'),
cfg.FloatOpt('ebtables_retry_interval',
default=1.0,
help='Number of seconds to wait between ebtables retries.'),
]
CONF = cfg.CONF
CONF.register_opts(linux_net_opts)
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('use_ipv6', 'nova.netconf')
CONF.import_opt('my_ip', 'nova.netconf')
CONF.import_opt('network_device_mtu', 'nova.objects.network')
# NOTE(vish): Iptables supports chain names of up to 28 characters, and we
# add up to 12 characters to binary_name which is used as a prefix,
# so we limit it to 16 characters.
# (max_chain_name_length - len('-POSTROUTING') == 16)
def get_binary_name():
"""Grab the name of the binary we're running in."""
return os.path.basename(inspect.stack()[-1][1])[:16]
binary_name = get_binary_name()
class IptablesRule(object):
"""An iptables rule.
You shouldn't need to use this class directly, it's only used by
IptablesManager.
"""
def __init__(self, chain, rule, wrap=True, top=False):
self.chain = chain
self.rule = rule
self.wrap = wrap
self.top = top
def __eq__(self, other):
return ((self.chain == other.chain) and
(self.rule == other.rule) and
(self.top == other.top) and
(self.wrap == other.wrap))
def __ne__(self, other):
return not self == other
def __repr__(self):
if self.wrap:
chain = '%s-%s' % (binary_name, self.chain)
else:
chain = self.chain
# new rules should have a zero [packet: byte] count
return '[0:0] -A %s %s' % (chain, self.rule)
class IptablesTable(object):
"""An iptables table."""
def __init__(self):
self.rules = []
self.remove_rules = []
self.chains = set()
self.unwrapped_chains = set()
self.remove_chains = set()
self.dirty = True
def has_chain(self, name, wrap=True):
if wrap:
return name in self.chains
else:
return name in self.unwrapped_chains
def add_chain(self, name, wrap=True):
"""Adds a named chain to the table.
The chain name is wrapped to be unique for the component creating
it, so different components of Nova can safely create identically
named chains without interfering with one another.
At the moment, its wrapped name is <binary name>-<chain name>,
so if nova-compute creates a chain named 'OUTPUT', it'll actually
end up named 'nova-compute-OUTPUT'.
"""
if wrap:
self.chains.add(name)
else:
self.unwrapped_chains.add(name)
self.dirty = True
def remove_chain(self, name, wrap=True):
"""Remove named chain.
This removal "cascades". All rule in the chain are removed, as are
all rules in other chains that jump to it.
If the chain is not found, this is merely logged.
"""
if wrap:
chain_set = self.chains
else:
chain_set = self.unwrapped_chains
if name not in chain_set:
LOG.warning(_LW('Attempted to remove chain %s which does not '
'exist'), name)
return
self.dirty = True
# non-wrapped chains and rules need to be dealt with specially,
# so we keep a list of them to be iterated over in apply()
if not wrap:
self.remove_chains.add(name)
chain_set.remove(name)
if not wrap:
self.remove_rules += filter(lambda r: r.chain == name, self.rules)
self.rules = filter(lambda r: r.chain != name, self.rules)
if wrap:
jump_snippet = '-j %s-%s' % (binary_name, name)
else:
jump_snippet = '-j %s' % (name,)
if not wrap:
self.remove_rules += filter(lambda r: jump_snippet in r.rule,
self.rules)
self.rules = filter(lambda r: jump_snippet not in r.rule, self.rules)
def add_rule(self, chain, rule, wrap=True, top=False):
"""Add a rule to the table.
This is just like what you'd feed to iptables, just without
the '-A <chain name>' bit at the start.
However, if you need to jump to one of your wrapped chains,
prepend its name with a '$' which will ensure the wrapping
is applied correctly.
"""
if wrap and chain not in self.chains:
raise ValueError(_('Unknown chain: %r') % chain)
if '$' in rule:
rule = ' '.join(map(self._wrap_target_chain, rule.split(' ')))
rule_obj = IptablesRule(chain, rule, wrap, top)
if rule_obj in self.rules:
LOG.debug("Skipping duplicate iptables rule addition. "
"%(rule)r already in %(rules)r",
{'rule': rule_obj, 'rules': self.rules})
else:
self.rules.append(IptablesRule(chain, rule, wrap, top))
self.dirty = True
def _wrap_target_chain(self, s):
if s.startswith('$'):
return '%s-%s' % (binary_name, s[1:])
return s
def remove_rule(self, chain, rule, wrap=True, top=False):
"""Remove a rule from a chain.
Note: The rule must be exactly identical to the one that was added.
You cannot switch arguments around like you can with the iptables
CLI tool.
"""
try:
self.rules.remove(IptablesRule(chain, rule, wrap, top))
if not wrap:
self.remove_rules.append(IptablesRule(chain, rule, wrap, top))
self.dirty = True
except ValueError:
LOG.warning(_LW('Tried to remove rule that was not there:'
' %(chain)r %(rule)r %(wrap)r %(top)r'),
{'chain': chain, 'rule': rule,
'top': top, 'wrap': wrap})
def remove_rules_regex(self, regex):
"""Remove all rules matching regex."""
if isinstance(regex, six.string_types):
regex = re.compile(regex)
num_rules = len(self.rules)
self.rules = filter(lambda r: not regex.match(str(r)), self.rules)
removed = num_rules - len(self.rules)
if removed > 0:
self.dirty = True
return removed
def empty_chain(self, chain, wrap=True):
"""Remove all rules from a chain."""
chained_rules = [rule for rule in self.rules
if rule.chain == chain and rule.wrap == wrap]
if chained_rules:
self.dirty = True
for rule in chained_rules:
self.rules.remove(rule)
class IptablesManager(object):
"""Wrapper for iptables.
See IptablesTable for some usage docs
A number of chains are set up to begin with.
First, nova-filter-top. It's added at the top of FORWARD and OUTPUT. Its
name is not wrapped, so it's shared between the various nova workers. It's
intended for rules that need to live at the top of the FORWARD and OUTPUT
chains. It's in both the ipv4 and ipv6 set of tables.
For ipv4 and ipv6, the built-in INPUT, OUTPUT, and FORWARD filter chains
are wrapped, meaning that the "real" INPUT chain has a rule that jumps to
the wrapped INPUT chain, etc. Additionally, there's a wrapped chain named
"local" which is jumped to from nova-filter-top.
For ipv4, the built-in PREROUTING, OUTPUT, and POSTROUTING nat chains are
wrapped in the same was as the built-in filter chains. Additionally,
there's a snat chain that is applied after the POSTROUTING chain.
"""
def __init__(self, execute=None):
if not execute:
self.execute = _execute
else:
self.execute = execute
self.ipv4 = {'filter': IptablesTable(),
'nat': IptablesTable(),
'mangle': IptablesTable()}
self.ipv6 = {'filter': IptablesTable()}
self.iptables_apply_deferred = False
# Add a nova-filter-top chain. It's intended to be shared
# among the various nova components. It sits at the very top
# of FORWARD and OUTPUT.
for tables in [self.ipv4, self.ipv6]:
tables['filter'].add_chain('nova-filter-top', wrap=False)
tables['filter'].add_rule('FORWARD', '-j nova-filter-top',
wrap=False, top=True)
tables['filter'].add_rule('OUTPUT', '-j nova-filter-top',
wrap=False, top=True)
tables['filter'].add_chain('local')
tables['filter'].add_rule('nova-filter-top', '-j $local',
wrap=False)
# Wrap the built-in chains
builtin_chains = {4: {'filter': ['INPUT', 'OUTPUT', 'FORWARD'],
'nat': ['PREROUTING', 'OUTPUT', 'POSTROUTING'],
'mangle': ['POSTROUTING']},
6: {'filter': ['INPUT', 'OUTPUT', 'FORWARD']}}
for ip_version in builtin_chains:
if ip_version == 4:
tables = self.ipv4
elif ip_version == 6:
tables = self.ipv6
for table, chains in six.iteritems(builtin_chains[ip_version]):
for chain in chains:
tables[table].add_chain(chain)
tables[table].add_rule(chain, '-j $%s' % (chain,),
wrap=False)
# Add a nova-postrouting-bottom chain. It's intended to be shared
# among the various nova components. We set it as the last chain
# of POSTROUTING chain.
self.ipv4['nat'].add_chain('nova-postrouting-bottom', wrap=False)
self.ipv4['nat'].add_rule('POSTROUTING', '-j nova-postrouting-bottom',
wrap=False)
# We add a snat chain to the shared nova-postrouting-bottom chain
# so that it's applied last.
self.ipv4['nat'].add_chain('snat')
self.ipv4['nat'].add_rule('nova-postrouting-bottom', '-j $snat',
wrap=False)
# And then we add a float-snat chain and jump to first thing in
# the snat chain.
self.ipv4['nat'].add_chain('float-snat')
self.ipv4['nat'].add_rule('snat', '-j $float-snat')
def defer_apply_on(self):
self.iptables_apply_deferred = True
def defer_apply_off(self):
self.iptables_apply_deferred = False
self.apply()
def dirty(self):
for table in six.itervalues(self.ipv4):
if table.dirty:
return True
if CONF.use_ipv6:
for table in six.itervalues(self.ipv6):
if table.dirty:
return True
return False
def apply(self):
if self.iptables_apply_deferred:
return
if self.dirty():
self._apply()
else:
LOG.debug("Skipping apply due to lack of new rules")
@utils.synchronized('iptables', external=True)
def _apply(self):
"""Apply the current in-memory set of iptables rules.
This will blow away any rules left over from previous runs of the
same component of Nova, and replace them with our current set of
rules. This happens atomically, thanks to iptables-restore.
"""
s = [('iptables', self.ipv4)]
if CONF.use_ipv6:
s += [('ip6tables', self.ipv6)]
for cmd, tables in s:
all_tables, _err = self.execute('%s-save' % (cmd,), '-c',
run_as_root=True,
attempts=5)
all_lines = all_tables.split('\n')
for table_name, table in six.iteritems(tables):
start, end = self._find_table(all_lines, table_name)
all_lines[start:end] = self._modify_rules(
all_lines[start:end], table, table_name)
table.dirty = False
self.execute('%s-restore' % (cmd,), '-c', run_as_root=True,
process_input='\n'.join(all_lines),
attempts=5)
LOG.debug("IPTablesManager.apply completed with success")
def _find_table(self, lines, table_name):
if len(lines) < 3:
# length only <2 when fake iptables
return (0, 0)
try:
start = lines.index('*%s' % table_name) - 1
except ValueError:
# Couldn't find table_name
return (0, 0)
end = lines[start:].index('COMMIT') + start + 2
return (start, end)
def _modify_rules(self, current_lines, table, table_name):
unwrapped_chains = table.unwrapped_chains
chains = sorted(table.chains)
remove_chains = table.remove_chains
rules = table.rules
remove_rules = table.remove_rules
if not current_lines:
fake_table = ['#Generated by nova',
'*' + table_name, 'COMMIT',
'#Completed by nova']
current_lines = fake_table
# Remove any trace of our rules
new_filter = filter(lambda line: binary_name not in line,
current_lines)
top_rules = []
bottom_rules = []
if CONF.iptables_top_regex:
regex = re.compile(CONF.iptables_top_regex)
temp_filter = filter(lambda line: regex.search(line), new_filter)
for rule_str in temp_filter:
new_filter = filter(lambda s: s.strip() != rule_str.strip(),
new_filter)
top_rules = temp_filter
if CONF.iptables_bottom_regex:
regex = re.compile(CONF.iptables_bottom_regex)
temp_filter = filter(lambda line: regex.search(line), new_filter)
for rule_str in temp_filter:
new_filter = filter(lambda s: s.strip() != rule_str.strip(),
new_filter)
bottom_rules = temp_filter
seen_chains = False
rules_index = 0
for rules_index, rule in enumerate(new_filter):
if not seen_chains:
if rule.startswith(':'):
seen_chains = True
else:
if not rule.startswith(':'):
break
if not seen_chains:
rules_index = 2
our_rules = top_rules
bot_rules = []
for rule in rules:
rule_str = str(rule)
if rule.top:
# rule.top == True means we want this rule to be at the top.
# Further down, we weed out duplicates from the bottom of the
# list, so here we remove the dupes ahead of time.
# We don't want to remove an entry if it has non-zero
# [packet:byte] counts and replace it with [0:0], so let's
# go look for a duplicate, and over-ride our table rule if
# found.
# ignore [packet:byte] counts at beginning of line
if rule_str.startswith('['):
rule_str = rule_str.split(']', 1)[1]
dup_filter = filter(lambda s: rule_str.strip() in s.strip(),
new_filter)
new_filter = filter(lambda s:
rule_str.strip() not in s.strip(),
new_filter)
# if no duplicates, use original rule
if dup_filter:
# grab the last entry, if there is one
dup = dup_filter[-1]
rule_str = str(dup)
else:
rule_str = str(rule)
rule_str.strip()
our_rules += [rule_str]
else:
bot_rules += [rule_str]
our_rules += bot_rules
new_filter[rules_index:rules_index] = our_rules
new_filter[rules_index:rules_index] = [':%s - [0:0]' % (name,)
for name in unwrapped_chains]
new_filter[rules_index:rules_index] = [':%s-%s - [0:0]' %
(binary_name, name,)
for name in chains]
commit_index = new_filter.index('COMMIT')
new_filter[commit_index:commit_index] = bottom_rules
seen_lines = set()
def _weed_out_duplicates(line):
# ignore [packet:byte] counts at beginning of lines
if line.startswith('['):
line = line.split(']', 1)[1]
line = line.strip()
if line in seen_lines:
return False
else:
seen_lines.add(line)
return True
def _weed_out_removes(line):
# We need to find exact matches here
if line.startswith(':'):
# it's a chain, for example, ":nova-billing - [0:0]"
# strip off everything except the chain name
line = line.split(':')[1]
line = line.split('- [')[0]
line = line.strip()
for chain in remove_chains:
if chain == line:
remove_chains.remove(chain)
return False
elif line.startswith('['):
# it's a rule
# ignore [packet:byte] counts at beginning of lines
line = line.split(']', 1)[1]
line = line.strip()
for rule in remove_rules:
# ignore [packet:byte] counts at beginning of rules
rule_str = str(rule)
rule_str = rule_str.split(' ', 1)[1]
rule_str = rule_str.strip()
if rule_str == line:
remove_rules.remove(rule)
return False
# Leave it alone
return True
# We filter duplicates, letting the *last* occurrence take
# precedence. We also filter out anything in the "remove"
# lists.
new_filter.reverse()
new_filter = filter(_weed_out_duplicates, new_filter)
new_filter = filter(_weed_out_removes, new_filter)
new_filter.reverse()
# flush lists, just in case we didn't find something
remove_chains.clear()
for rule in remove_rules:
remove_rules.remove(rule)
return new_filter
# NOTE(jkoelker) This is just a nice little stub point since mocking
# builtins with mox is a nightmare
def write_to_file(file, data, mode='w'):
with open(file, mode) as f:
f.write(data)
def is_pid_cmdline_correct(pid, match):
"""Ensure that the cmdline for a pid seems sane
Because pids are recycled, blindly killing by pid is something to
avoid. This provides the ability to include a substring that is
expected in the cmdline as a safety check.
"""
try:
with open('/proc/%d/cmdline' % pid) as f:
cmdline = f.read()
return match in cmdline
except EnvironmentError:
return False
def metadata_forward():
"""Create forwarding rule for metadata."""
if CONF.metadata_host != '127.0.0.1':
iptables_manager.ipv4['nat'].add_rule('PREROUTING',
'-s 0.0.0.0/0 -d 169.254.169.254/32 '
'-p tcp -m tcp --dport 80 -j DNAT '
'--to-destination %s:%s' %
(CONF.metadata_host,
CONF.metadata_port))
else:
iptables_manager.ipv4['nat'].add_rule('PREROUTING',
'-s 0.0.0.0/0 -d 169.254.169.254/32 '
'-p tcp -m tcp --dport 80 '
'-j REDIRECT --to-ports %s' %
CONF.metadata_port)
iptables_manager.apply()
def _iptables_dest(ip):
if ((netaddr.IPAddress(ip).version == 4 and ip == '127.0.0.1')
or ip == '::1'):
return '-m addrtype --dst-type LOCAL'
else:
return '-d %s' % ip
def metadata_accept():
"""Create the filter accept rule for metadata."""
rule = ('-p tcp -m tcp --dport %s %s -j ACCEPT' %
(CONF.metadata_port, _iptables_dest(CONF.metadata_host)))
if netaddr.IPAddress(CONF.metadata_host).version == 4:
iptables_manager.ipv4['filter'].add_rule('INPUT', rule)
else:
iptables_manager.ipv6['filter'].add_rule('INPUT', rule)
iptables_manager.apply()
def add_snat_rule(ip_range, is_external=False):
if CONF.routing_source_ip:
if is_external:
if CONF.force_snat_range:
snat_range = CONF.force_snat_range
else:
snat_range = []
else:
snat_range = ['0.0.0.0/0']
for dest_range in snat_range:
rule = ('-s %s -d %s -j SNAT --to-source %s'
% (ip_range, dest_range, CONF.routing_source_ip))
if not is_external and CONF.public_interface:
rule += ' -o %s' % CONF.public_interface
iptables_manager.ipv4['nat'].add_rule('snat', rule)
iptables_manager.apply()
def init_host(ip_range, is_external=False):
"""Basic networking setup goes here."""
# NOTE(devcamcar): Cloud public SNAT entries and the default
# SNAT rule for outbound traffic.
add_snat_rule(ip_range, is_external)
rules = []
if is_external:
for snat_range in CONF.force_snat_range:
rules.append('PREROUTING -p ipv4 --ip-src %s --ip-dst %s '
'-j redirect --redirect-target ACCEPT' %
(ip_range, snat_range))
if rules:
ensure_ebtables_rules(rules, 'nat')
iptables_manager.ipv4['nat'].add_rule('POSTROUTING',
'-s %s -d %s/32 -j ACCEPT' %
(ip_range, CONF.metadata_host))
for dmz in CONF.dmz_cidr:
iptables_manager.ipv4['nat'].add_rule('POSTROUTING',
'-s %s -d %s -j ACCEPT' %
(ip_range, dmz))
iptables_manager.ipv4['nat'].add_rule('POSTROUTING',
'-s %(range)s -d %(range)s '
'-m conntrack ! --ctstate DNAT '
'-j ACCEPT' %
{'range': ip_range})
iptables_manager.apply()
def send_arp_for_ip(ip, device, count):
out, err = _execute('arping', '-U', ip,
'-A', '-I', device,
'-c', str(count),
run_as_root=True, check_exit_code=False)
if err:
LOG.debug('arping error for ip %s', ip)
def bind_floating_ip(floating_ip, device):
"""Bind ip to public interface."""
_execute('ip', 'addr', 'add', str(floating_ip) + '/32',
'dev', device,
run_as_root=True, check_exit_code=[0, 2, 254])
if CONF.send_arp_for_ha and CONF.send_arp_for_ha_count > 0:
send_arp_for_ip(floating_ip, device, CONF.send_arp_for_ha_count)
def unbind_floating_ip(floating_ip, device):
"""Unbind a public ip from public interface."""
_execute('ip', 'addr', 'del', str(floating_ip) + '/32',
'dev', device,
run_as_root=True, check_exit_code=[0, 2, 254])
def ensure_metadata_ip():
"""Sets up local metadata ip."""
_execute('ip', 'addr', 'add', '169.254.169.254/32',
'scope', 'link', 'dev', 'lo',
run_as_root=True, check_exit_code=[0, 2, 254])
def ensure_vpn_forward(public_ip, port, private_ip):
"""Sets up forwarding rules for vlan."""
iptables_manager.ipv4['filter'].add_rule('FORWARD',
'-d %s -p udp '
'--dport 1194 '
'-j ACCEPT' % private_ip)
iptables_manager.ipv4['nat'].add_rule('PREROUTING',
'-d %s -p udp '
'--dport %s -j DNAT --to %s:1194' %
(public_ip, port, private_ip))
iptables_manager.ipv4['nat'].add_rule('OUTPUT',
'-d %s -p udp '
'--dport %s -j DNAT --to %s:1194' %
(public_ip, port, private_ip))
iptables_manager.apply()
def ensure_floating_forward(floating_ip, fixed_ip, device, network):
"""Ensure floating ip forwarding rule."""
# NOTE(vish): Make sure we never have duplicate rules for the same ip
regex = '.*\s+%s(/32|\s+|$)' % floating_ip
num_rules = iptables_manager.ipv4['nat'].remove_rules_regex(regex)
if num_rules:
msg = _LW('Removed %(num)d duplicate rules for floating ip %(float)s')
LOG.warn(msg, {'num': num_rules, 'float': floating_ip})
for chain, rule in floating_forward_rules(floating_ip, fixed_ip, device):
iptables_manager.ipv4['nat'].add_rule(chain, rule)
iptables_manager.apply()
if device != network['bridge']:
ensure_ebtables_rules(*floating_ebtables_rules(fixed_ip, network))
def remove_floating_forward(floating_ip, fixed_ip, device, network):
"""Remove forwarding for floating ip."""
for chain, rule in floating_forward_rules(floating_ip, fixed_ip, device):
iptables_manager.ipv4['nat'].remove_rule(chain, rule)
iptables_manager.apply()
if device != network['bridge']:
remove_ebtables_rules(*floating_ebtables_rules(fixed_ip, network))
def floating_ebtables_rules(fixed_ip, network):
"""Makes sure only in-network traffic is bridged."""
return (['PREROUTING --logical-in %s -p ipv4 --ip-src %s '
'! --ip-dst %s -j redirect --redirect-target ACCEPT' %
(network['bridge'], fixed_ip, network['cidr'])], 'nat')
def floating_forward_rules(floating_ip, fixed_ip, device):
rules = []
rule = '-s %s -j SNAT --to %s' % (fixed_ip, floating_ip)
if device:
rules.append(('float-snat', rule + ' -d %s' % fixed_ip))
rules.append(('float-snat', rule + ' -o %s' % device))
else:
rules.append(('float-snat', rule))
rules.append(
('PREROUTING', '-d %s -j DNAT --to %s' % (floating_ip, fixed_ip)))
rules.append(
('OUTPUT', '-d %s -j DNAT --to %s' % (floating_ip, fixed_ip)))
rules.append(('POSTROUTING', '-s %s -m conntrack --ctstate DNAT -j SNAT '
'--to-source %s' %
(fixed_ip, floating_ip)))
return rules
def clean_conntrack(fixed_ip):
try:
_execute('conntrack', '-D', '-r', fixed_ip, run_as_root=True,
check_exit_code=[0, 1])
except processutils.ProcessExecutionError:
LOG.exception(_LE('Error deleting conntrack entries for %s'), fixed_ip)
def _enable_ipv4_forwarding():
sysctl_key = 'net.ipv4.ip_forward'
stdout, stderr = _execute('sysctl', '-n', sysctl_key)
if stdout.strip() is not '1':
_execute('sysctl', '-w', '%s=1' % sysctl_key, run_as_root=True)
@utils.synchronized('lock_gateway', external=True)
def initialize_gateway_device(dev, network_ref):
if not network_ref:
return
_enable_ipv4_forwarding()
# NOTE(vish): The ip for dnsmasq has to be the first address on the
# bridge for it to respond to requests properly
try:
prefix = network_ref.cidr.prefixlen
except AttributeError:
prefix = network_ref['cidr'].rpartition('/')[2]
full_ip = '%s/%s' % (network_ref['dhcp_server'], prefix)
new_ip_params = [[full_ip, 'brd', network_ref['broadcast']]]
old_ip_params = []
out, err = _execute('ip', 'addr', 'show', 'dev', dev,
'scope', 'global')
for line in out.split('\n'):
fields = line.split()
if fields and fields[0] == 'inet':
if fields[-2] in ('secondary', 'dynamic'):
ip_params = fields[1:-2]
else:
ip_params = fields[1:-1]
old_ip_params.append(ip_params)
if ip_params[0] != full_ip:
new_ip_params.append(ip_params)
if not old_ip_params or old_ip_params[0][0] != full_ip:
old_routes = []
result = _execute('ip', 'route', 'show', 'dev', dev)
if result:
out, err = result
for line in out.split('\n'):
fields = line.split()
if fields and 'via' in fields:
old_routes.append(fields)
_execute('ip', 'route', 'del', fields[0],
'dev', dev, run_as_root=True)
for ip_params in old_ip_params:
_execute(*_ip_bridge_cmd('del', ip_params, dev),
run_as_root=True, check_exit_code=[0, 2, 254])
for ip_params in new_ip_params:
_execute(*_ip_bridge_cmd('add', ip_params, dev),
run_as_root=True, check_exit_code=[0, 2, 254])
for fields in old_routes:
_execute('ip', 'route', 'add', *fields,
run_as_root=True)
if CONF.send_arp_for_ha and CONF.send_arp_for_ha_count > 0:
send_arp_for_ip(network_ref['dhcp_server'], dev,
CONF.send_arp_for_ha_count)
if CONF.use_ipv6:
_execute('ip', '-f', 'inet6', 'addr',
'change', network_ref['cidr_v6'],
'dev', dev, run_as_root=True)
def get_dhcp_leases(context, network_ref):
"""Return a network's hosts config in dnsmasq leasefile format."""
hosts = []
host = None
if network_ref['multi_host']:
host = CONF.host
for fixedip in objects.FixedIPList.get_by_network(context,
network_ref,
host=host):
# NOTE(cfb): Don't return a lease entry if the IP isn't
# already leased
if fixedip.leased:
hosts.append(_host_lease(fixedip))
return '\n'.join(hosts)
def get_dhcp_hosts(context, network_ref, fixedips):
"""Get network's hosts config in dhcp-host format."""
hosts = []
macs = set()
for fixedip in fixedips:
if fixedip.allocated:
if fixedip.virtual_interface.address not in macs:
hosts.append(_host_dhcp(fixedip))
macs.add(fixedip.virtual_interface.address)
return '\n'.join(hosts)
def get_dns_hosts(context, network_ref):
"""Get network's DNS hosts in hosts format."""
hosts = []
for fixedip in objects.FixedIPList.get_by_network(context, network_ref):
if fixedip.allocated:
hosts.append(_host_dns(fixedip))
return '\n'.join(hosts)
def _add_dnsmasq_accept_rules(dev):
"""Allow DHCP and DNS traffic through to dnsmasq."""
table = iptables_manager.ipv4['filter']
for port in [67, 53]:
for proto in ['udp', 'tcp']:
args = {'dev': dev, 'port': port, 'proto': proto}
table.add_rule('INPUT',
'-i %(dev)s -p %(proto)s -m %(proto)s '
'--dport %(port)s -j ACCEPT' % args)
iptables_manager.apply()
def _remove_dnsmasq_accept_rules(dev):
"""Remove DHCP and DNS traffic allowed through to dnsmasq."""
table = iptables_manager.ipv4['filter']
for port in [67, 53]:
for proto in ['udp', 'tcp']:
args = {'dev': dev, 'port': port, 'proto': proto}
table.remove_rule('INPUT',
'-i %(dev)s -p %(proto)s -m %(proto)s '
'--dport %(port)s -j ACCEPT' % args)
iptables_manager.apply()
# NOTE(russellb) Curious why this is needed? Check out this explanation from
# markmc: https://bugzilla.redhat.com/show_bug.cgi?id=910619#c6
def _add_dhcp_mangle_rule(dev):
table = iptables_manager.ipv4['mangle']
table.add_rule('POSTROUTING',
'-o %s -p udp -m udp --dport 68 -j CHECKSUM '
'--checksum-fill' % dev)
iptables_manager.apply()
def _remove_dhcp_mangle_rule(dev):
table = iptables_manager.ipv4['mangle']
table.remove_rule('POSTROUTING',
'-o %s -p udp -m udp --dport 68 -j CHECKSUM '
'--checksum-fill' % dev)
iptables_manager.apply()
def get_dhcp_opts(context, network_ref, fixedips):
"""Get network's hosts config in dhcp-opts format."""
gateway = network_ref['gateway']
# NOTE(vish): if we are in multi-host mode and we are not sharing
# addresses, then we actually need to hand out the
# dhcp server address as the gateway.
if network_ref['multi_host'] and not (network_ref['share_address'] or
CONF.share_dhcp_address):
gateway = network_ref['dhcp_server']
hosts = []
if CONF.use_single_default_gateway:
for fixedip in fixedips:
if fixedip.allocated:
vif_id = fixedip.virtual_interface_id
if fixedip.default_route:
hosts.append(_host_dhcp_opts(vif_id, gateway))
else:
hosts.append(_host_dhcp_opts(vif_id))
else:
hosts.append(_host_dhcp_opts(None, gateway))
return '\n'.join(hosts)
def release_dhcp(dev, address, mac_address):
if device_exists(dev):
try:
utils.execute('dhcp_release', dev, address, mac_address,
run_as_root=True)
except processutils.ProcessExecutionError:
raise exception.NetworkDhcpReleaseFailed(address=address,
mac_address=mac_address)
def update_dhcp(context, dev, network_ref):
conffile = _dhcp_file(dev, 'conf')
host = None
if network_ref['multi_host']:
host = CONF.host
fixedips = objects.FixedIPList.get_by_network(context,
network_ref,
host=host)
write_to_file(conffile, get_dhcp_hosts(context, network_ref, fixedips))
restart_dhcp(context, dev, network_ref, fixedips)
def update_dns(context, dev, network_ref):
hostsfile = _dhcp_file(dev, 'hosts')
host = None
if network_ref['multi_host']:
host = CONF.host
fixedips = objects.FixedIPList.get_by_network(context,
network_ref,
host=host)
write_to_file(hostsfile, get_dns_hosts(context, network_ref))
restart_dhcp(context, dev, network_ref, fixedips)
def update_dhcp_hostfile_with_text(dev, hosts_text):
conffile = _dhcp_file(dev, 'conf')
write_to_file(conffile, hosts_text)
def kill_dhcp(dev):
pid = _dnsmasq_pid_for(dev)
if pid:
# Check that the process exists and looks like a dnsmasq process
conffile = _dhcp_file(dev, 'conf')
if is_pid_cmdline_correct(pid, conffile.split('/')[-1]):
_execute('kill', '-9', pid, run_as_root=True)
else:
LOG.debug('Pid %d is stale, skip killing dnsmasq', pid)
_remove_dnsmasq_accept_rules(dev)
_remove_dhcp_mangle_rule(dev)
# NOTE(ja): Sending a HUP only reloads the hostfile, so any
# configuration options (like dchp-range, vlan, ...)
# aren't reloaded.
@utils.synchronized('dnsmasq_start')
def restart_dhcp(context, dev, network_ref, fixedips):
"""(Re)starts a dnsmasq server for a given network.
If a dnsmasq instance is already running then send a HUP
signal causing it to reload, otherwise spawn a new instance.
"""
conffile = _dhcp_file(dev, 'conf')
optsfile = _dhcp_file(dev, 'opts')
write_to_file(optsfile, get_dhcp_opts(context, network_ref, fixedips))
os.chmod(optsfile, 0o644)
_add_dhcp_mangle_rule(dev)
# Make sure dnsmasq can actually read it (it setuid()s to "nobody")
os.chmod(conffile, 0o644)
pid = _dnsmasq_pid_for(dev)
# if dnsmasq is already running, then tell it to reload
if pid:
if is_pid_cmdline_correct(pid, conffile.split('/')[-1]):
try:
_execute('kill', '-HUP', pid, run_as_root=True)
_add_dnsmasq_accept_rules(dev)
return
except Exception as exc:
LOG.error(_LE('kill -HUP dnsmasq threw %s'), exc)
else:
LOG.debug('Pid %d is stale, relaunching dnsmasq', pid)
cmd = ['env',
'CONFIG_FILE=%s' % jsonutils.dumps(CONF.dhcpbridge_flagfile),
'NETWORK_ID=%s' % str(network_ref['id']),
'dnsmasq',
'--strict-order',
'--bind-interfaces',
'--conf-file=%s' % CONF.dnsmasq_config_file,
'--pid-file=%s' % _dhcp_file(dev, 'pid'),
'--dhcp-optsfile=%s' % _dhcp_file(dev, 'opts'),
'--listen-address=%s' % network_ref['dhcp_server'],
'--except-interface=lo',
'--dhcp-range=set:%s,%s,static,%s,%ss' %
(network_ref['label'],
network_ref['dhcp_start'],
network_ref['netmask'],
CONF.dhcp_lease_time),
'--dhcp-lease-max=%s' % len(netaddr.IPNetwork(network_ref['cidr'])),
'--dhcp-hostsfile=%s' % _dhcp_file(dev, 'conf'),
'--dhcp-script=%s' % CONF.dhcpbridge,
'--no-hosts',
'--leasefile-ro']
# dnsmasq currently gives an error for an empty domain,
# rather than ignoring. So only specify it if defined.
if CONF.dhcp_domain:
cmd.append('--domain=%s' % CONF.dhcp_domain)
dns_servers = CONF.dns_server
if CONF.use_network_dns_servers:
if network_ref.get('dns1'):
dns_servers.append(network_ref.get('dns1'))
if network_ref.get('dns2'):
dns_servers.append(network_ref.get('dns2'))
if network_ref['multi_host']:
cmd.append('--addn-hosts=%s' % _dhcp_file(dev, 'hosts'))
if dns_servers:
cmd.append('--no-resolv')
for dns_server in dns_servers:
cmd.append('--server=%s' % dns_server)
_execute(*cmd, run_as_root=True)
_add_dnsmasq_accept_rules(dev)
@utils.synchronized('radvd_start')
def update_ra(context, dev, network_ref):
conffile = _ra_file(dev, 'conf')
conf_str = """
interface %s
{
AdvSendAdvert on;
MinRtrAdvInterval 3;
MaxRtrAdvInterval 10;
prefix %s
{
AdvOnLink on;
AdvAutonomous on;
};
};
""" % (dev, network_ref['cidr_v6'])
write_to_file(conffile, conf_str)
# Make sure radvd can actually read it (it setuid()s to "nobody")
os.chmod(conffile, 0o644)
pid = _ra_pid_for(dev)
# if radvd is already running, then tell it to reload
if pid:
if is_pid_cmdline_correct(pid, conffile):
try:
_execute('kill', pid, run_as_root=True)
except Exception as exc:
LOG.error(_LE('killing radvd threw %s'), exc)
else:
LOG.debug('Pid %d is stale, relaunching radvd', pid)
cmd = ['radvd',
'-C', '%s' % _ra_file(dev, 'conf'),
'-p', '%s' % _ra_file(dev, 'pid')]
_execute(*cmd, run_as_root=True)
def _host_lease(fixedip):
"""Return a host string for an address in leasefile format."""
timestamp = timeutils.utcnow()
seconds_since_epoch = calendar.timegm(timestamp.utctimetuple())
return '%d %s %s %s *' % (seconds_since_epoch + CONF.dhcp_lease_time,
fixedip.virtual_interface.address,
fixedip.address,
fixedip.instance.hostname or '*')
def _host_dhcp_network(vif_id):
return 'NW-%s' % vif_id
def _host_dhcp(fixedip):
"""Return a host string for an address in dhcp-host format."""
# NOTE(cfb): dnsmasq on linux only supports 64 characters in the hostname
# field (LP #1238910). Since the . counts as a character we need
# to truncate the hostname to only 63 characters.
hostname = fixedip.instance.hostname
if len(hostname) > 63:
LOG.warning(_LW('hostname %s too long, truncating.') % (hostname))
hostname = fixedip.instance.hostname[:2] + '-' +\
fixedip.instance.hostname[-60:]
if CONF.use_single_default_gateway:
net = _host_dhcp_network(fixedip.virtual_interface_id)
return '%s,%s.%s,%s,net:%s' % (fixedip.virtual_interface.address,
hostname,
CONF.dhcp_domain,
fixedip.address,
net)
else:
return '%s,%s.%s,%s' % (fixedip.virtual_interface.address,
hostname,
CONF.dhcp_domain,
fixedip.address)
def _host_dns(fixedip):
return '%s\t%s.%s' % (fixedip.address,
fixedip.instance.hostname,
CONF.dhcp_domain)
def _host_dhcp_opts(vif_id=None, gateway=None):
"""Return an empty gateway option."""
values = []
if vif_id is not None:
values.append(_host_dhcp_network(vif_id))
# NOTE(vish): 3 is the dhcp option for gateway.
values.append('3')
if gateway:
values.append('%s' % gateway)
return ','.join(values)
def _execute(*cmd, **kwargs):
"""Wrapper around utils._execute for fake_network."""
if CONF.fake_network:
LOG.debug('FAKE NET: %s', ' '.join(map(str, cmd)))
return 'fake', 0
else:
return utils.execute(*cmd, **kwargs)
def device_exists(device):
"""Check if ethernet device exists."""
return os.path.exists('/sys/class/net/%s' % device)
def _dhcp_file(dev, kind):
"""Return path to a pid, leases, hosts or conf file for a bridge/device."""
fileutils.ensure_tree(CONF.networks_path)
return os.path.abspath('%s/nova-%s.%s' % (CONF.networks_path,
dev,
kind))
def _ra_file(dev, kind):
"""Return path to a pid or conf file for a bridge/device."""
fileutils.ensure_tree(CONF.networks_path)
return os.path.abspath('%s/nova-ra-%s.%s' % (CONF.networks_path,
dev,
kind))
def _dnsmasq_pid_for(dev):
"""Returns the pid for prior dnsmasq instance for a bridge/device.
Returns None if no pid file exists.
If machine has rebooted pid might be incorrect (caller should check).
"""
pid_file = _dhcp_file(dev, 'pid')
if os.path.exists(pid_file):
try:
with open(pid_file, 'r') as f:
return int(f.read())
except (ValueError, IOError):
return None
def _ra_pid_for(dev):
"""Returns the pid for prior radvd instance for a bridge/device.
Returns None if no pid file exists.
If machine has rebooted pid might be incorrect (caller should check).
"""
pid_file = _ra_file(dev, 'pid')
if os.path.exists(pid_file):
with open(pid_file, 'r') as f:
return int(f.read())
def _ip_bridge_cmd(action, params, device):
"""Build commands to add/del ips to bridges/devices."""
cmd = ['ip', 'addr', action]
cmd.extend(params)
cmd.extend(['dev', device])
return cmd
def _set_device_mtu(dev, mtu=None):
"""Set the device MTU."""
if not mtu:
mtu = CONF.network_device_mtu
if mtu:
utils.execute('ip', 'link', 'set', dev, 'mtu',
mtu, run_as_root=True,
check_exit_code=[0, 2, 254])
def _create_veth_pair(dev1_name, dev2_name):
"""Create a pair of veth devices with the specified names,
deleting any previous devices with those names.
"""
for dev in [dev1_name, dev2_name]:
delete_net_dev(dev)
utils.execute('ip', 'link', 'add', dev1_name, 'type', 'veth', 'peer',
'name', dev2_name, run_as_root=True)
for dev in [dev1_name, dev2_name]:
utils.execute('ip', 'link', 'set', dev, 'up', run_as_root=True)
utils.execute('ip', 'link', 'set', dev, 'promisc', 'on',
run_as_root=True)
_set_device_mtu(dev)
def _ovs_vsctl(args):
full_args = ['ovs-vsctl', '--timeout=%s' % CONF.ovs_vsctl_timeout] + args
try:
return utils.execute(*full_args, run_as_root=True)
except Exception as e:
LOG.error(_LE("Unable to execute %(cmd)s. Exception: %(exception)s"),
{'cmd': full_args, 'exception': e})
raise exception.AgentError(method=full_args)
def create_ovs_vif_port(bridge, dev, iface_id, mac, instance_id):
_ovs_vsctl(['--', '--if-exists', 'del-port', dev, '--',
'add-port', bridge, dev,
'--', 'set', 'Interface', dev,
'external-ids:iface-id=%s' % iface_id,
'external-ids:iface-status=active',
'external-ids:attached-mac=%s' % mac,
'external-ids:vm-uuid=%s' % instance_id])
_set_device_mtu(dev)
def delete_ovs_vif_port(bridge, dev):
_ovs_vsctl(['--', '--if-exists', 'del-port', bridge, dev])
delete_net_dev(dev)
def ovs_set_vhostuser_port_type(dev):
_ovs_vsctl(['--', 'set', 'Interface', dev, 'type=dpdkvhostuser'])
def create_ivs_vif_port(dev, iface_id, mac, instance_id):
utils.execute('ivs-ctl', 'add-port',
dev, run_as_root=True)
def delete_ivs_vif_port(dev):
utils.execute('ivs-ctl', 'del-port', dev,
run_as_root=True)
utils.execute('ip', 'link', 'delete', dev,
run_as_root=True)
def create_tap_dev(dev, mac_address=None):
if not device_exists(dev):
try:
# First, try with 'ip'
utils.execute('ip', 'tuntap', 'add', dev, 'mode', 'tap',
run_as_root=True, check_exit_code=[0, 2, 254])
except processutils.ProcessExecutionError:
# Second option: tunctl
utils.execute('tunctl', '-b', '-t', dev, run_as_root=True)
if mac_address:
utils.execute('ip', 'link', 'set', dev, 'address', mac_address,
run_as_root=True, check_exit_code=[0, 2, 254])
utils.execute('ip', 'link', 'set', dev, 'up', run_as_root=True,
check_exit_code=[0, 2, 254])
def delete_net_dev(dev):
"""Delete a network device only if it exists."""
if device_exists(dev):
try:
utils.execute('ip', 'link', 'delete', dev, run_as_root=True,
check_exit_code=[0, 2, 254])
LOG.debug("Net device removed: '%s'", dev)
except processutils.ProcessExecutionError:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed removing net device: '%s'"), dev)
def delete_bridge_dev(dev):
"""Delete a network bridge."""
if device_exists(dev):
try:
utils.execute('ip', 'link', 'set', dev, 'down', run_as_root=True)
utils.execute('brctl', 'delbr', dev, run_as_root=True)
except processutils.ProcessExecutionError:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed removing bridge device: '%s'"), dev)
# Similar to compute virt layers, the Linux network node
# code uses a flexible driver model to support different ways
# of creating ethernet interfaces and attaching them to the network.
# In the case of a network host, these interfaces
# act as gateway/dhcp/vpn/etc. endpoints not VM interfaces.
interface_driver = None
def _get_interface_driver():
global interface_driver
if not interface_driver:
interface_driver = importutils.import_object(
CONF.linuxnet_interface_driver)
return interface_driver
def plug(network, mac_address, gateway=True):
return _get_interface_driver().plug(network, mac_address, gateway)
def unplug(network):
return _get_interface_driver().unplug(network)
def get_dev(network):
return _get_interface_driver().get_dev(network)
class LinuxNetInterfaceDriver(object):
"""Abstract class that defines generic network host API
for all Linux interface drivers.
"""
def plug(self, network, mac_address):
"""Create Linux device, return device name."""
raise NotImplementedError()
def unplug(self, network):
"""Destroy Linux device, return device name."""
raise NotImplementedError()
def get_dev(self, network):
"""Get device name."""
raise NotImplementedError()
# plugs interfaces using Linux Bridge
class LinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver):
def plug(self, network, mac_address, gateway=True):
vlan = network.get('vlan')
if vlan is not None:
iface = CONF.vlan_interface or network['bridge_interface']
LinuxBridgeInterfaceDriver.ensure_vlan_bridge(
vlan,
network['bridge'],
iface,
network,
mac_address,
network.get('mtu'))
iface = 'vlan%s' % vlan
else:
iface = CONF.flat_interface or network['bridge_interface']
LinuxBridgeInterfaceDriver.ensure_bridge(
network['bridge'],
iface,
network, gateway)
if network['share_address'] or CONF.share_dhcp_address:
isolate_dhcp_address(iface, network['dhcp_server'])
# NOTE(vish): applying here so we don't get a lock conflict
iptables_manager.apply()
return network['bridge']
def unplug(self, network, gateway=True):
vlan = network.get('vlan')
if vlan is not None:
iface = 'vlan%s' % vlan
LinuxBridgeInterfaceDriver.remove_vlan_bridge(vlan,
network['bridge'])
else:
iface = CONF.flat_interface or network['bridge_interface']
LinuxBridgeInterfaceDriver.remove_bridge(network['bridge'],
gateway)
if network['share_address'] or CONF.share_dhcp_address:
remove_isolate_dhcp_address(iface, network['dhcp_server'])
iptables_manager.apply()
return self.get_dev(network)
def get_dev(self, network):
return network['bridge']
@staticmethod
def ensure_vlan_bridge(vlan_num, bridge, bridge_interface,
net_attrs=None, mac_address=None,
mtu=None):
"""Create a vlan and bridge unless they already exist."""
interface = LinuxBridgeInterfaceDriver.ensure_vlan(vlan_num,
bridge_interface, mac_address,
mtu)
LinuxBridgeInterfaceDriver.ensure_bridge(bridge, interface, net_attrs)
return interface
@staticmethod
def remove_vlan_bridge(vlan_num, bridge):
"""Delete a bridge and vlan."""
LinuxBridgeInterfaceDriver.remove_bridge(bridge)
LinuxBridgeInterfaceDriver.remove_vlan(vlan_num)
@staticmethod
@utils.synchronized('lock_vlan', external=True)
def ensure_vlan(vlan_num, bridge_interface, mac_address=None, mtu=None,
interface=None):
"""Create a vlan unless it already exists."""
if interface is None:
interface = 'vlan%s' % vlan_num
if not device_exists(interface):
LOG.debug('Starting VLAN interface %s', interface)
_execute('ip', 'link', 'add', 'link', bridge_interface,
'name', interface, 'type', 'vlan',
'id', vlan_num, run_as_root=True,
check_exit_code=[0, 2, 254])
# (danwent) the bridge will inherit this address, so we want to
# make sure it is the value set from the NetworkManager
if mac_address:
_execute('ip', 'link', 'set', interface, 'address',
mac_address, run_as_root=True,
check_exit_code=[0, 2, 254])
_execute('ip', 'link', 'set', interface, 'up', run_as_root=True,
check_exit_code=[0, 2, 254])
# NOTE(vish): set mtu every time to ensure that changes to mtu get
# propogated
_set_device_mtu(interface, mtu)
return interface
@staticmethod
@utils.synchronized('lock_vlan', external=True)
def remove_vlan(vlan_num):
"""Delete a vlan."""
vlan_interface = 'vlan%s' % vlan_num
delete_net_dev(vlan_interface)
@staticmethod
@utils.synchronized('lock_bridge', external=True)
def ensure_bridge(bridge, interface, net_attrs=None, gateway=True,
filtering=True):
"""Create a bridge unless it already exists.
:param interface: the interface to create the bridge on.
:param net_attrs: dictionary with attributes used to create bridge.
:param gateway: whether or not the bridge is a gateway.
:param filtering: whether or not to create filters on the bridge.
If net_attrs is set, it will add the net_attrs['gateway'] to the bridge
using net_attrs['broadcast'] and net_attrs['cidr']. It will also add
the ip_v6 address specified in net_attrs['cidr_v6'] if use_ipv6 is set.
The code will attempt to move any ips that already exist on the
interface onto the bridge and reset the default gateway if necessary.
"""
if not device_exists(bridge):
LOG.debug('Starting Bridge %s', bridge)
out, err = _execute('brctl', 'addbr', bridge,
check_exit_code=False, run_as_root=True)
if (err and err != "device %s already exists; can't create "
"bridge with the same name\n" % (bridge)):
msg = _('Failed to add bridge: %s') % err
raise exception.NovaException(msg)
_execute('brctl', 'setfd', bridge, 0, run_as_root=True)
# _execute('brctl setageing %s 10' % bridge, run_as_root=True)
_execute('brctl', 'stp', bridge, 'off', run_as_root=True)
# (danwent) bridge device MAC address can't be set directly.
# instead it inherits the MAC address of the first device on the
# bridge, which will either be the vlan interface, or a
# physical NIC.
_execute('ip', 'link', 'set', bridge, 'up', run_as_root=True)
if interface:
LOG.debug('Adding interface %(interface)s to bridge %(bridge)s',
{'interface': interface, 'bridge': bridge})
out, err = _execute('brctl', 'addif', bridge, interface,
check_exit_code=False, run_as_root=True)
if (err and err != "device %s is already a member of a bridge; "
"can't enslave it to bridge %s.\n" % (interface, bridge)):
msg = _('Failed to add interface: %s') % err
raise exception.NovaException(msg)
out, err = _execute('ip', 'link', 'set', interface, 'up',
check_exit_code=False, run_as_root=True)
# NOTE(vish): This will break if there is already an ip on the
# interface, so we move any ips to the bridge
# NOTE(danms): We also need to copy routes to the bridge so as
# not to break existing connectivity on the interface
old_routes = []
out, err = _execute('ip', 'route', 'show', 'dev', interface)
for line in out.split('\n'):
fields = line.split()
if fields and 'via' in fields:
old_routes.append(fields)
_execute('ip', 'route', 'del', *fields,
run_as_root=True)
out, err = _execute('ip', 'addr', 'show', 'dev', interface,
'scope', 'global')
for line in out.split('\n'):
fields = line.split()
if fields and fields[0] == 'inet':
if fields[-2] in ('secondary', 'dynamic', ):
params = fields[1:-2]
else:
params = fields[1:-1]
_execute(*_ip_bridge_cmd('del', params, fields[-1]),
run_as_root=True, check_exit_code=[0, 2, 254])
_execute(*_ip_bridge_cmd('add', params, bridge),
run_as_root=True, check_exit_code=[0, 2, 254])
for fields in old_routes:
_execute('ip', 'route', 'add', *fields,
run_as_root=True)
if filtering:
# Don't forward traffic unless we were told to be a gateway
ipv4_filter = iptables_manager.ipv4['filter']
if gateway:
for rule in get_gateway_rules(bridge):
ipv4_filter.add_rule(*rule)
else:
ipv4_filter.add_rule('FORWARD',
('--in-interface %s -j %s'
% (bridge, CONF.iptables_drop_action)))
ipv4_filter.add_rule('FORWARD',
('--out-interface %s -j %s'
% (bridge, CONF.iptables_drop_action)))
@staticmethod
@utils.synchronized('lock_bridge', external=True)
def remove_bridge(bridge, gateway=True, filtering=True):
"""Delete a bridge."""
if not device_exists(bridge):
return
else:
if filtering:
ipv4_filter = iptables_manager.ipv4['filter']
if gateway:
for rule in get_gateway_rules(bridge):
ipv4_filter.remove_rule(*rule)
else:
drop_actions = ['DROP']
if CONF.iptables_drop_action != 'DROP':
drop_actions.append(CONF.iptables_drop_action)
for drop_action in drop_actions:
ipv4_filter.remove_rule('FORWARD',
('--in-interface %s -j %s'
% (bridge, drop_action)))
ipv4_filter.remove_rule('FORWARD',
('--out-interface %s -j %s'
% (bridge, drop_action)))
delete_bridge_dev(bridge)
# NOTE(cfb): This is a temporary fix to LP #1316621. We really want to call
# ebtables with --concurrent. In order to do that though we need
# libvirt to support this. Additionally since ebtables --concurrent
# will hang indefinitely waiting on the lock we need to teach
# oslo_concurrency.processutils how to timeout a long running
# process first. Once those are complete we can replace all of this
# with calls to ebtables --concurrent and a reasonable timeout.
def _exec_ebtables(*cmd, **kwargs):
check_exit_code = kwargs.pop('check_exit_code', True)
# List of error strings to re-try.
retry_strings = (
'Multiple ebtables programs',
)
# We always try at least once
attempts = CONF.ebtables_exec_attempts
if attempts <= 0:
attempts = 1
count = 1
while count <= attempts:
# Updated our counters if needed
sleep = CONF.ebtables_retry_interval * count
count += 1
# NOTE(cfb): ebtables reports all errors with a return code of 255.
# As such we can't know if we hit a locking error, or some
# other error (like a rule doesn't exist) so we have to
# to parse stderr.
try:
_execute(*cmd, check_exit_code=[0], **kwargs)
except processutils.ProcessExecutionError as exc:
# See if we can retry the error.
if any(error in exc.stderr for error in retry_strings):
if count > attempts and check_exit_code:
LOG.warning(_LW('%s failed. Not Retrying.'), ' '.join(cmd))
raise
else:
# We need to sleep a bit before retrying
LOG.warning(_LW("%(cmd)s failed. Sleeping %(time)s "
"seconds before retry."),
{'cmd': ' '.join(cmd), 'time': sleep})
time.sleep(sleep)
else:
# Not eligible for retry
if check_exit_code:
LOG.warning(_LW('%s failed. Not Retrying.'), ' '.join(cmd))
raise
else:
return
else:
# Success
return
@utils.synchronized('ebtables', external=True)
def ensure_ebtables_rules(rules, table='filter'):
for rule in rules:
cmd = ['ebtables', '-t', table, '-D'] + rule.split()
_exec_ebtables(*cmd, check_exit_code=False, run_as_root=True)
cmd[3] = '-I'
_exec_ebtables(*cmd, run_as_root=True)
@utils.synchronized('ebtables', external=True)
def remove_ebtables_rules(rules, table='filter'):
for rule in rules:
cmd = ['ebtables', '-t', table, '-D'] + rule.split()
_exec_ebtables(*cmd, check_exit_code=False, run_as_root=True)
def isolate_dhcp_address(interface, address):
# block arp traffic to address across the interface
rules = []
rules.append('INPUT -p ARP -i %s --arp-ip-dst %s -j DROP'
% (interface, address))
rules.append('OUTPUT -p ARP -o %s --arp-ip-src %s -j DROP'
% (interface, address))
rules.append('FORWARD -p IPv4 -i %s --ip-protocol udp '
'--ip-destination-port 67:68 -j DROP'
% interface)
rules.append('FORWARD -p IPv4 -o %s --ip-protocol udp '
'--ip-destination-port 67:68 -j DROP'
% interface)
# NOTE(vish): the above is not possible with iptables/arptables
ensure_ebtables_rules(rules)
def remove_isolate_dhcp_address(interface, address):
# block arp traffic to address across the interface
rules = []
rules.append('INPUT -p ARP -i %s --arp-ip-dst %s -j DROP'
% (interface, address))
rules.append('OUTPUT -p ARP -o %s --arp-ip-src %s -j DROP'
% (interface, address))
rules.append('FORWARD -p IPv4 -i %s --ip-protocol udp '
'--ip-destination-port 67:68 -j DROP'
% interface)
rules.append('FORWARD -p IPv4 -o %s --ip-protocol udp '
'--ip-destination-port 67:68 -j DROP'
% interface)
remove_ebtables_rules(rules)
# NOTE(vish): the above is not possible with iptables/arptables
def get_gateway_rules(bridge):
interfaces = CONF.forward_bridge_interface
if 'all' in interfaces:
return [('FORWARD', '-i %s -j ACCEPT' % bridge),
('FORWARD', '-o %s -j ACCEPT' % bridge)]
rules = []
for iface in CONF.forward_bridge_interface:
if iface:
rules.append(('FORWARD', '-i %s -o %s -j ACCEPT' % (bridge,
iface)))
rules.append(('FORWARD', '-i %s -o %s -j ACCEPT' % (iface,
bridge)))
rules.append(('FORWARD', '-i %s -o %s -j ACCEPT' % (bridge, bridge)))
rules.append(('FORWARD', '-i %s -j %s' % (bridge,
CONF.iptables_drop_action)))
rules.append(('FORWARD', '-o %s -j %s' % (bridge,
CONF.iptables_drop_action)))
return rules
# plugs interfaces using Open vSwitch
class LinuxOVSInterfaceDriver(LinuxNetInterfaceDriver):
def plug(self, network, mac_address, gateway=True):
dev = self.get_dev(network)
if not device_exists(dev):
bridge = CONF.linuxnet_ovs_integration_bridge
_ovs_vsctl(['--', '--may-exist', 'add-port', bridge, dev,
'--', 'set', 'Interface', dev, 'type=internal',
'--', 'set', 'Interface', dev,
'external-ids:iface-id=%s' % dev,
'--', 'set', 'Interface', dev,
'external-ids:iface-status=active',
'--', 'set', 'Interface', dev,
'external-ids:attached-mac=%s' % mac_address])
_execute('ip', 'link', 'set', dev, 'address', mac_address,
run_as_root=True)
_set_device_mtu(dev, network.get('mtu'))
_execute('ip', 'link', 'set', dev, 'up', run_as_root=True)
if not gateway:
# If we weren't instructed to act as a gateway then add the
# appropriate flows to block all non-dhcp traffic.
_execute('ovs-ofctl',
'add-flow', bridge, 'priority=1,actions=drop',
run_as_root=True)
_execute('ovs-ofctl', 'add-flow', bridge,
'udp,tp_dst=67,dl_dst=%s,priority=2,actions=normal' %
mac_address, run_as_root=True)
# .. and make sure iptbles won't forward it as well.
iptables_manager.ipv4['filter'].add_rule('FORWARD',
'--in-interface %s -j %s' % (bridge,
CONF.iptables_drop_action))
iptables_manager.ipv4['filter'].add_rule('FORWARD',
'--out-interface %s -j %s' % (bridge,
CONF.iptables_drop_action))
else:
for rule in get_gateway_rules(bridge):
iptables_manager.ipv4['filter'].add_rule(*rule)
return dev
def unplug(self, network):
dev = self.get_dev(network)
bridge = CONF.linuxnet_ovs_integration_bridge
_ovs_vsctl(['--', '--if-exists', 'del-port', bridge, dev])
return dev
def get_dev(self, network):
dev = 'gw-' + str(network['uuid'][0:11])
return dev
# plugs interfaces using Linux Bridge when using NeutronManager
class NeutronLinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver):
BRIDGE_NAME_PREFIX = 'brq'
GATEWAY_INTERFACE_PREFIX = 'gw-'
def plug(self, network, mac_address, gateway=True):
dev = self.get_dev(network)
bridge = self.get_bridge(network)
if not gateway:
# If we weren't instructed to act as a gateway then add the
# appropriate flows to block all non-dhcp traffic.
# .. and make sure iptbles won't forward it as well.
iptables_manager.ipv4['filter'].add_rule('FORWARD',
('--in-interface %s -j %s'
% (bridge, CONF.iptables_drop_action)))
iptables_manager.ipv4['filter'].add_rule('FORWARD',
('--out-interface %s -j %s'
% (bridge, CONF.iptables_drop_action)))
return bridge
else:
for rule in get_gateway_rules(bridge):
iptables_manager.ipv4['filter'].add_rule(*rule)
create_tap_dev(dev, mac_address)
if not device_exists(bridge):
LOG.debug("Starting bridge %s ", bridge)
utils.execute('brctl', 'addbr', bridge, run_as_root=True)
utils.execute('brctl', 'setfd', bridge, str(0), run_as_root=True)
utils.execute('brctl', 'stp', bridge, 'off', run_as_root=True)
utils.execute('ip', 'link', 'set', bridge, 'address', mac_address,
run_as_root=True, check_exit_code=[0, 2, 254])
utils.execute('ip', 'link', 'set', bridge, 'up', run_as_root=True,
check_exit_code=[0, 2, 254])
LOG.debug("Done starting bridge %s", bridge)
full_ip = '%s/%s' % (network['dhcp_server'],
network['cidr'].rpartition('/')[2])
utils.execute('ip', 'address', 'add', full_ip, 'dev', bridge,
run_as_root=True, check_exit_code=[0, 2, 254])
return dev
def unplug(self, network):
dev = self.get_dev(network)
if not device_exists(dev):
return None
else:
delete_net_dev(dev)
return dev
def get_dev(self, network):
dev = self.GATEWAY_INTERFACE_PREFIX + str(network['uuid'][0:11])
return dev
def get_bridge(self, network):
bridge = self.BRIDGE_NAME_PREFIX + str(network['uuid'][0:11])
return bridge
# provide compatibility with existing configs
QuantumLinuxBridgeInterfaceDriver = NeutronLinuxBridgeInterfaceDriver
iptables_manager = IptablesManager()
def set_vf_interface_vlan(pci_addr, mac_addr, vlan=0):
pf_ifname = pci_utils.get_ifname_by_pci_address(pci_addr,
pf_interface=True)
vf_ifname = pci_utils.get_ifname_by_pci_address(pci_addr)
vf_num = pci_utils.get_vf_num_by_pci_address(pci_addr)
# Set the VF's mac address and vlan
exit_code = [0, 2, 254]
port_state = 'up' if vlan > 0 else 'down'
utils.execute('ip', 'link', 'set', pf_ifname,
'vf', vf_num,
'mac', mac_addr,
'vlan', vlan,
run_as_root=True,
check_exit_code=exit_code)
# Bring up/down the VF's interface
utils.execute('ip', 'link', 'set', vf_ifname,
port_state,
run_as_root=True,
check_exit_code=exit_code)
| apache-2.0 | -827,402,703,517,651,200 | 38.007128 | 79 | 0.539394 | false |
exa-analytics/atomic | exatomic/core/tests/test_basis.py | 2 | 5093 | # -*- coding: utf-8 -*-
# Copyright (c) 2015-2020, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
import numpy as np
import pandas as pd
from unittest import TestCase
from exatomic.core.basis import BasisSet
class TestBasisSet(TestCase):
def setUp(self):
adict = {col: [0] for col in BasisSet._columns}
adict['frame'] = 0
# Trivial basis set
self.bs = BasisSet(adict)
self.bs['alpha'] = self.bs['alpha'].astype(np.float64)
self.bs['d'] = self.bs['d'].astype(np.float64)
# Medium basis set
self.mbs = BasisSet({'frame': 0,
'alpha': [5., 1., 1.],
'd': [1., 1., 1.],
'shell': [0, 1, 0],
'set': [0, 0, 1],
'L': [0, 1, 0],
'n': [1, 2, 1]})
# Large basis set
self.lbs = BasisSet({'frame': 0,
'alpha': [5., 3., 1., 3., 1., 1., 3., 1., 1.],
'd': [1., 1., 1., 1., 1., 1., 1., 1., 1.],
'shell': [0, 0, 0, 1, 1, 2, 0, 0, 1],
'set': [0, 0, 0, 0, 0, 0, 1, 1, 1],
'L': [0, 0, 0, 1, 1, 2, 0, 0, 1]})
def test_lmax(self):
self.assertEqual(self.bs.lmax, 0)
self.assertEqual(self.mbs.lmax, 1)
self.assertEqual(self.lbs.lmax, 2)
def test_shells(self):
self.bs.shells()
self.mbs.shells()
self.lbs.shells()
def test_functions_by_shell(self):
n = ['set', 'L']
mfp = pd.MultiIndex.from_product
mfa = pd.MultiIndex.from_arrays
self.assertTrue((self.bs.functions_by_shell() ==
pd.Series([1], index=mfp([[0], [0]], names=n))).all())
s = self.mbs.functions_by_shell()
self.assertTrue((s[s != 0] ==
pd.Series([1, 1, 1], index=mfa([[0, 0, 1], [0, 1, 0]], names=n))).all())
s = self.lbs.functions_by_shell()
self.assertTrue((s[s != 0] ==
pd.Series([1, 1, 1, 1, 1], index=mfa([[0, 0, 0, 1, 1],
[0, 1, 2, 0, 1]], names=n))).all())
def test_primitives_by_shell(self):
n = ['set', 'L']
mfp = pd.MultiIndex.from_product
mfa = pd.MultiIndex.from_arrays
self.assertTrue((self.bs.primitives_by_shell() ==
pd.Series([1], index=mfp([[0], [0]], names=n))).all())
s = self.mbs.primitives_by_shell()
self.assertTrue((s[s != 0] ==
pd.Series([1, 1, 1], index=mfa([[0, 0, 1], [0, 1, 0]], names=n))).all())
s = self.lbs.primitives_by_shell()
self.assertTrue((s[s != 0] ==
pd.Series([3, 2, 1, 2, 1], index=mfa([[0, 0, 0, 1, 1],
[0, 1, 2, 0, 1]], names=n))).all())
def test_functions(self):
n = ['set', 'L']
mfp = pd.MultiIndex.from_product
mfa = pd.MultiIndex.from_arrays
self.assertTrue((self.bs.functions(False) ==
pd.Series([1], index=mfp([[0], [0]], names=n))).all())
self.assertTrue((self.bs.functions(True) ==
pd.Series([1], index=mfp([[0], [0]], names=n))).all())
self.assertTrue((self.mbs.functions(False) ==
pd.Series([1, 3, 1], index=mfa([[0, 0, 1], [0, 1, 0]], names=n))).all())
self.assertTrue((self.mbs.functions(True) ==
pd.Series([1, 3, 1], index=mfa([[0, 0, 1], [0, 1, 0]], names=n))).all())
self.assertTrue((self.lbs.functions(False) ==
pd.Series([1, 3, 6, 1, 3], index=mfa([[0, 0, 0, 1, 1],
[0, 1, 2, 0, 1]], names=n))).all())
self.assertTrue((self.lbs.functions(True) ==
pd.Series([1, 3, 5, 1, 3], index=mfa([[0, 0, 0, 1, 1],
[0, 1, 2, 0, 1]], names=n))).all())
def test_primitives(self):
n = ['set', 'L']
mfp = pd.MultiIndex.from_product
mfa = pd.MultiIndex.from_arrays
self.assertTrue((self.bs.primitives(False) ==
pd.Series([1], index=mfp([[0], [0]], names=n))).all())
self.assertTrue((self.bs.primitives(True) ==
pd.Series([1], index=mfp([[0], [0]], names=n))).all())
self.assertTrue((self.mbs.primitives(False) ==
pd.Series([1, 3, 1], index=mfa([[0, 0, 1], [0, 1, 0]], names=n))).all())
self.assertTrue((self.mbs.primitives(True) ==
pd.Series([1, 3, 1], index=mfa([[0, 0, 1], [0, 1, 0]], names=n))).all())
self.assertTrue((self.lbs.primitives(False) ==
pd.Series([3, 6, 6, 2, 3], index=mfa([[0, 0, 0, 1, 1],
[0, 1, 2, 0, 1]], names=n))).all())
self.assertTrue((self.lbs.primitives(True) ==
pd.Series([3, 6, 5, 2, 3], index=mfa([[0, 0, 0, 1, 1],
[0, 1, 2, 0, 1]], names=n))).all())
| apache-2.0 | 894,215,249,751,153,900 | 45.724771 | 85 | 0.445906 | false |
tierney/mustached-bear | toolchain/mac_x86_pnacl/newlib/bin/pydir/pnacl-dis.py | 2 | 2746 | #!/usr/bin/python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# IMPORTANT NOTE: If you make local mods to this file, you must run:
# % pnacl/build.sh driver
# in order for them to take effect in the scons build. This command
# updates the copy in the toolchain/ tree.
#
import driver_tools
from driver_env import env
from driver_log import Log, DriverOpen, DriverClose
EXTRA_ENV = {
'INPUTS' : '',
'OUTPUT' : '',
'FLAGS' : '',
}
DISPatterns = [
( ('-o','(.*)'), "env.set('OUTPUT', pathtools.normalize($0))"),
( '(-.*)', "env.append('FLAGS', $0)"),
( '(.*)', "env.append('INPUTS', pathtools.normalize($0))"),
]
def main(argv):
env.update(EXTRA_ENV)
driver_tools.ParseArgs(argv, DISPatterns)
inputs = env.get('INPUTS')
output = env.getone('OUTPUT')
if len(inputs) == 0:
Log.Fatal("No input files given")
if len(inputs) > 1 and output != '':
Log.Fatal("Cannot have -o with multiple inputs")
for infile in inputs:
env.push()
env.set('input', infile)
env.set('output', output)
# When we output to stdout, set redirect_stdout and set log_stdout
# to False to bypass the driver's line-by-line handling of stdout
# which is extremely slow when you have a lot of output
if driver_tools.IsBitcode(infile):
if output == '':
# LLVM by default outputs to a file if -o is missing
# Let's instead output to stdout
env.set('output', '-')
env.append('FLAGS', '-f')
driver_tools.Run('${LLVM_DIS} ${FLAGS} ${input} -o ${output}')
elif driver_tools.IsELF(infile):
flags = env.get('FLAGS')
if len(flags) == 0:
env.append('FLAGS', '-d')
if output == '':
# objdump to stdout
driver_tools.Run('"${OBJDUMP}" ${FLAGS} ${input}')
else:
# objdump always outputs to stdout, and doesn't recognize -o
# Let's add this feature to be consistent.
fp = driver_tools.DriverOpen(output, 'w')
driver_tools.Run('${OBJDUMP} ${FLAGS} ${input}', redirect_stdout=fp)
driver_tools.DriverClose(fp)
else:
Log.Fatal('Unknown file type')
env.pop()
# only reached in case of no errors
return 0
def get_help(unused_argv):
return """Usage: pnacl-dis [options] <input binary file> -o <output.txt>
Disassembler for PNaCl. Converts either bitcode to text or
native code to assembly. For native code, this just a wrapper around objdump
so this accepts the usual objdump flags.
OPTIONS:
-o <file> Output to file
-help | -h Output this help.
"""
| bsd-3-clause | -3,004,215,988,635,176,400 | 30.930233 | 79 | 0.616169 | false |
a4fr/FelfeliDict | v0.1/felfeliDict.py | 1 | 1961 | #! python3
from bottle import Bottle, run, app, template, route, get, post, request, static_file
import felfeliDict_lib as felfeliDict
import time
import os
app = '/dict'
#Dictionary DataBase
start = time.time()
f_dict = felfeliDict.load_db()
print('* DataBase loaded in', format(time.time()-start, '.2f'))
###############################################################################
@route('/static/<filename>')
def server_static(filename):
return static_file(filename, root='template/')
@route('/static/img/<filename>')
def server_static(filename):
return static_file(filename, root='template/img')
###############################################################################
def split_words(words_str):
if words_str=='':
return []
word_tmp = words_str.split('-')
words = []
for i in word_tmp:
if i.strip() != '':
words.append(i.strip())
return words
def translate(words):
result = []
#store with order of words
for word in words:
for item in felfeliDict.search(word, f_dict).items():
result.append(item)
#show definition of last word in first
result.reverse()
return result
###############################################################################
@get(app)
def form():
output = {}
return template('template/doc', words=output, text='')
@route(app+'/help')
def help():
return template('template/help')
@post(app)
def from_post():
words_from_get = request.forms.get('words')
output = translate(split_words(words_from_get))
return template('template/doc', words=output, text=words_from_get)
@route(app+'/<path>')
def translate_url(path):
output = translate(split_words(path))
return template('template/doc', words=output, text=path)
###############################################################################
port = 2020
host = 'localhost'
run(host=host, port=port, debug=True, reloader=True)
#os.system('gnome-open http://{host}:{port}{app}'.format(host=host, port=port, app=app))
| gpl-2.0 | -3,933,762,045,450,733,600 | 23.822785 | 88 | 0.579296 | false |
enthought/traitsgui | enthought/pyface/ui/null/action/tool_bar_manager.py | 1 | 2973 | #------------------------------------------------------------------------------
# Copyright (c) 2005, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
# Thanks for using Enthought open source!
#
# Author: Enthought, Inc.
# Description: <Enthought pyface package component>
#------------------------------------------------------------------------------
""" The 'null' backend specific implementation of the tool bar manager.
"""
# Enthought library imports.
from enthought.traits.api import Bool, Enum, Instance, Tuple
# Local imports.
from enthought.pyface.image_cache import ImageCache
from enthought.pyface.action.action_manager import ActionManager
class ToolBarManager(ActionManager):
""" A tool bar manager realizes itself in errr, a tool bar control. """
#### 'ToolBarManager' interface ###########################################
# The size of tool images (width, height).
image_size = Tuple((16, 16))
# The orientation of the toolbar.
orientation = Enum('horizontal', 'vertical')
# Should we display the name of each tool bar tool under its image?
show_tool_names = Bool(True)
# Should we display the horizontal divider?
show_divider = Bool(True)
#### Private interface ####################################################
# Cache of tool images (scaled to the appropriate size).
_image_cache = Instance(ImageCache)
###########################################################################
# 'object' interface.
###########################################################################
def __init__(self, *args, **traits):
""" Creates a new tool bar manager. """
# Base class contructor.
super(ToolBarManager, self).__init__(*args, **traits)
# An image cache to make sure that we only load each image used in the
# tool bar exactly once.
self._image_cache = ImageCache(self.image_size[0], self.image_size[1])
return
###########################################################################
# 'ToolBarManager' interface.
###########################################################################
def create_tool_bar(self, parent, controller=None):
""" Creates a tool bar. """
# If a controller is required it can either be set as a trait on the
# tool bar manager (the trait is part of the 'ActionManager' API), or
# passed in here (if one is passed in here it takes precedence over the
# trait).
if controller is None:
controller = self.controller
return None
#### EOF ######################################################################
| bsd-3-clause | -5,176,776,958,236,423,000 | 36.1625 | 79 | 0.532795 | false |
sethkontny/blaze | blaze/io/sql/constructors.py | 3 | 1743 | """
SQL array constructors.
"""
from __future__ import absolute_import, division, print_function
from ... import Array
from .datadescriptor import SQL_DDesc
from datashape import dshape, Record, DataShape, coretypes
class TableSelection(object):
"""
Table and column name
Attributes
==========
table: str
table name
colname: str
column name
"""
def __init__(self, table_name, colname):
self.table_name = table_name
self.col_name = colname
def __repr__(self):
return "TableSelection(%s)" % (self,)
def __str__(self):
return "%s.%s" % (self.table_name, self.col_name)
def sql_table(table_name, colnames, measures, conn):
"""
Create a new blaze Array from an SQL table description. This returns
a Record array.
Parameters
==========
table_name: str
table name
colnames: [str]
column names
measures: [DataShape]
measure (element type) for each column
conn: pyodbc/whatever Connection
"""
dtype = Record(list(zip(colnames, measures)))
record_dshape = DataShape(coretypes.Var(), dtype)
table = TableSelection(table_name, '*')
return Array(SQL_DDesc(record_dshape, table, conn))
def sql_column(table_name, colname, dshape, conn):
"""
Create a new blaze Array from a single column description.
Parameters
==========
table_name: str
table name
colname: str
column
dshape: DataShape
type for the column. This should include the dimension, which may be
a TypeVar
conn: pyodbc/whatever Connection
"""
col = TableSelection(table_name, colname)
return Array(SQL_DDesc(dshape, col, conn))
| bsd-3-clause | -1,251,931,157,297,576,400 | 20 | 76 | 0.621343 | false |
sergio-incaser/odoo | openerp/addons/base/ir/ir_ui_view.py | 7 | 51880 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import collections
import copy
import datetime
import dateutil
from dateutil.relativedelta import relativedelta
import fnmatch
import logging
import os
import time
from operator import itemgetter
import simplejson
import werkzeug
import HTMLParser
from lxml import etree
import openerp
from openerp import tools, api
from openerp.http import request
from openerp.osv import fields, osv, orm
from openerp.tools import graph, SKIPPED_ELEMENT_TYPES, SKIPPED_ELEMENTS
from openerp.tools.parse_version import parse_version
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.view_validation import valid_view
from openerp.tools import misc
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
MOVABLE_BRANDING = ['data-oe-model', 'data-oe-id', 'data-oe-field', 'data-oe-xpath', 'data-oe-source-id']
def keep_query(*keep_params, **additional_params):
"""
Generate a query string keeping the current request querystring's parameters specified
in ``keep_params`` and also adds the parameters specified in ``additional_params``.
Multiple values query string params will be merged into a single one with comma seperated
values.
The ``keep_params`` arguments can use wildcards too, eg:
keep_query('search', 'shop_*', page=4)
"""
if not keep_params and not additional_params:
keep_params = ('*',)
params = additional_params.copy()
qs_keys = request.httprequest.args.keys()
for keep_param in keep_params:
for param in fnmatch.filter(qs_keys, keep_param):
if param not in additional_params and param in qs_keys:
params[param] = request.httprequest.args.getlist(param)
return werkzeug.urls.url_encode(params)
class view_custom(osv.osv):
_name = 'ir.ui.view.custom'
_order = 'create_date desc' # search(limit=1) should return the last customization
_columns = {
'ref_id': fields.many2one('ir.ui.view', 'Original View', select=True, required=True, ondelete='cascade'),
'user_id': fields.many2one('res.users', 'User', select=True, required=True, ondelete='cascade'),
'arch': fields.text('View Architecture', required=True),
}
def name_get(self, cr, uid, ids, context=None):
return [(rec.id, rec.user_id.name) for rec in self.browse(cr, uid, ids, context=context)]
def name_search(self, cr, user, name, args=None, operator='ilike', context=None, limit=100):
if args is None:
args = []
if name:
ids = self.search(cr, user, [('user_id', operator, name)] + args, limit=limit)
return self.name_get(cr, user, ids, context=context)
return super(view_custom, self).name_search(cr, user, name, args=args, operator=operator, context=context, limit=limit)
def _auto_init(self, cr, context=None):
super(view_custom, self)._auto_init(cr, context)
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = \'ir_ui_view_custom_user_id_ref_id\'')
if not cr.fetchone():
cr.execute('CREATE INDEX ir_ui_view_custom_user_id_ref_id ON ir_ui_view_custom (user_id, ref_id)')
def _hasclass(context, *cls):
""" Checks if the context node has all the classes passed as arguments
"""
node_classes = set(context.context_node.attrib.get('class', '').split())
return node_classes.issuperset(cls)
xpath_utils = etree.FunctionNamespace(None)
xpath_utils['hasclass'] = _hasclass
class view(osv.osv):
_name = 'ir.ui.view'
def _get_model_data(self, cr, uid, ids, fname, args, context=None):
result = dict.fromkeys(ids, False)
IMD = self.pool['ir.model.data']
data_ids = IMD.search_read(cr, uid, [('res_id', 'in', ids), ('model', '=', 'ir.ui.view')], ['res_id'], context=context)
result.update(map(itemgetter('res_id', 'id'), data_ids))
return result
_columns = {
'name': fields.char('View Name', required=True),
'model': fields.char('Object', select=True),
'priority': fields.integer('Sequence', required=True),
'type': fields.selection([
('tree','Tree'),
('form','Form'),
('graph', 'Graph'),
('calendar', 'Calendar'),
('diagram','Diagram'),
('gantt', 'Gantt'),
('kanban', 'Kanban'),
('search','Search'),
('qweb', 'QWeb')], string='View Type'),
'arch': fields.text('View Architecture', required=True),
'inherit_id': fields.many2one('ir.ui.view', 'Inherited View', ondelete='restrict', select=True),
'inherit_children_ids': fields.one2many('ir.ui.view','inherit_id', 'Inherit Views'),
'field_parent': fields.char('Child Field'),
'model_data_id': fields.function(_get_model_data, type='many2one', relation='ir.model.data', string="Model Data", store=True),
'xml_id': fields.function(osv.osv.get_xml_id, type='char', size=128, string="External ID",
help="ID of the view defined in xml file"),
'groups_id': fields.many2many('res.groups', 'ir_ui_view_group_rel', 'view_id', 'group_id',
string='Groups', help="If this field is empty, the view applies to all users. Otherwise, the view applies to the users of those groups only."),
'model_ids': fields.one2many('ir.model.data', 'res_id', domain=[('model','=','ir.ui.view')], auto_join=True),
'create_date': fields.datetime('Create Date', readonly=True),
'write_date': fields.datetime('Last Modification Date', readonly=True),
'mode': fields.selection(
[('primary', "Base view"), ('extension', "Extension View")],
string="View inheritance mode", required=True,
help="""Only applies if this view inherits from an other one (inherit_id is not False/Null).
* if extension (default), if this view is requested the closest primary view
is looked up (via inherit_id), then all views inheriting from it with this
view's model are applied
* if primary, the closest primary view is fully resolved (even if it uses a
different model than this one), then this view's inheritance specs
(<xpath/>) are applied, and the result is used as if it were this view's
actual arch.
"""),
'active': fields.boolean("Active",
help="""If this view is inherited,
* if True, the view always extends its parent
* if False, the view currently does not extend its parent but can be enabled
"""),
}
_defaults = {
'mode': 'primary',
'active': True,
'priority': 16,
}
_order = "priority,name"
# Holds the RNG schema
_relaxng_validator = None
def _relaxng(self):
if not self._relaxng_validator:
frng = tools.file_open(os.path.join('base','rng','view.rng'))
try:
relaxng_doc = etree.parse(frng)
self._relaxng_validator = etree.RelaxNG(relaxng_doc)
except Exception:
_logger.exception('Failed to load RelaxNG XML schema for views validation')
finally:
frng.close()
return self._relaxng_validator
def _check_xml(self, cr, uid, ids, context=None):
if context is None:
context = {}
context = dict(context, check_view_ids=ids)
# Sanity checks: the view should not break anything upon rendering!
# Any exception raised below will cause a transaction rollback.
for view in self.browse(cr, uid, ids, context):
view_def = self.read_combined(cr, uid, view.id, ['arch'], context=context)
view_arch_utf8 = view_def['arch']
if view.type != 'qweb':
view_doc = etree.fromstring(view_arch_utf8)
# verify that all fields used are valid, etc.
self.postprocess_and_fields(cr, uid, view.model, view_doc, view.id, context=context)
# RNG-based validation is not possible anymore with 7.0 forms
view_docs = [view_doc]
if view_docs[0].tag == 'data':
# A <data> element is a wrapper for multiple root nodes
view_docs = view_docs[0]
validator = self._relaxng()
for view_arch in view_docs:
version = view_arch.get('version', '7.0')
if parse_version(version) < parse_version('7.0') and validator and not validator.validate(view_arch):
for error in validator.error_log:
_logger.error(tools.ustr(error))
return False
if not valid_view(view_arch):
return False
return True
_sql_constraints = [
('inheritance_mode',
"CHECK (mode != 'extension' OR inherit_id IS NOT NULL)",
"Invalid inheritance mode: if the mode is 'extension', the view must"
" extend an other view"),
]
_constraints = [
(_check_xml, 'Invalid view definition', ['arch']),
]
def _auto_init(self, cr, context=None):
super(view, self)._auto_init(cr, context)
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = \'ir_ui_view_model_type_inherit_id\'')
if not cr.fetchone():
cr.execute('CREATE INDEX ir_ui_view_model_type_inherit_id ON ir_ui_view (model, inherit_id)')
def _compute_defaults(self, cr, uid, values, context=None):
if 'inherit_id' in values:
values.setdefault(
'mode', 'extension' if values['inherit_id'] else 'primary')
return values
def create(self, cr, uid, values, context=None):
if not values.get('type'):
if values.get('inherit_id'):
values['type'] = self.browse(cr, uid, values['inherit_id'], context).type
else:
values['type'] = etree.fromstring(values['arch']).tag
if not values.get('name'):
values['name'] = "%s %s" % (values.get('model'), values['type'])
self.clear_cache()
return super(view, self).create(
cr, uid,
self._compute_defaults(cr, uid, values, context=context),
context=context)
def write(self, cr, uid, ids, vals, context=None):
if not isinstance(ids, (list, tuple)):
ids = [ids]
if context is None:
context = {}
# drop the corresponding view customizations (used for dashboards for example), otherwise
# not all users would see the updated views
custom_view_ids = self.pool.get('ir.ui.view.custom').search(cr, uid, [('ref_id', 'in', ids)])
if custom_view_ids:
self.pool.get('ir.ui.view.custom').unlink(cr, uid, custom_view_ids)
self.clear_cache()
ret = super(view, self).write(
cr, uid, ids,
self._compute_defaults(cr, uid, vals, context=context),
context)
return ret
def toggle(self, cr, uid, ids, context=None):
""" Switches between enabled and disabled statuses
"""
for view in self.browse(cr, uid, ids, context=dict(context or {}, active_test=False)):
view.write({'active': not view.active})
# default view selection
def default_view(self, cr, uid, model, view_type, context=None):
""" Fetches the default view for the provided (model, view_type) pair:
primary view with the lowest priority.
:param str model:
:param int view_type:
:return: id of the default view of False if none found
:rtype: int
"""
domain = [
['model', '=', model],
['type', '=', view_type],
['mode', '=', 'primary'],
]
ids = self.search(cr, uid, domain, limit=1, context=context)
if not ids:
return False
return ids[0]
#------------------------------------------------------
# Inheritance mecanism
#------------------------------------------------------
def get_inheriting_views_arch(self, cr, uid, view_id, model, context=None):
"""Retrieves the architecture of views that inherit from the given view, from the sets of
views that should currently be used in the system. During the module upgrade phase it
may happen that a view is present in the database but the fields it relies on are not
fully loaded yet. This method only considers views that belong to modules whose code
is already loaded. Custom views defined directly in the database are loaded only
after the module initialization phase is completely finished.
:param int view_id: id of the view whose inheriting views should be retrieved
:param str model: model identifier of the inheriting views.
:rtype: list of tuples
:return: [(view_arch,view_id), ...]
"""
if not context:
context = {}
user = self.pool['res.users'].browse(cr, 1, uid, context=context)
user_groups = frozenset(user.groups_id or ())
conditions = [
['inherit_id', '=', view_id],
['model', '=', model],
['mode', '=', 'extension'],
['active', '=', True],
]
if self.pool._init and not context.get('load_all_views'):
# Module init currently in progress, only consider views from
# modules whose code is already loaded
conditions.extend([
'|',
['model_ids.module', 'in', tuple(self.pool._init_modules)],
['id', 'in', context.get('check_view_ids') or (0,)],
])
view_ids = self.search(cr, uid, conditions, context=context)
return [(view.arch, view.id)
for view in self.browse(cr, 1, view_ids, context)
if not (view.groups_id and user_groups.isdisjoint(view.groups_id))]
def raise_view_error(self, cr, uid, message, view_id, context=None):
view = self.browse(cr, uid, view_id, context)
not_avail = _('n/a')
message = ("%(msg)s\n\n" +
_("Error context:\nView `%(view_name)s`") +
"\n[view_id: %(viewid)s, xml_id: %(xmlid)s, "
"model: %(model)s, parent_id: %(parent)s]") % \
{
'view_name': view.name or not_avail,
'viewid': view_id or not_avail,
'xmlid': view.xml_id or not_avail,
'model': view.model or not_avail,
'parent': view.inherit_id.id or not_avail,
'msg': message,
}
_logger.error(message)
raise AttributeError(message)
def locate_node(self, arch, spec):
""" Locate a node in a source (parent) architecture.
Given a complete source (parent) architecture (i.e. the field
`arch` in a view), and a 'spec' node (a node in an inheriting
view that specifies the location in the source view of what
should be changed), return (if it exists) the node in the
source view matching the specification.
:param arch: a parent architecture to modify
:param spec: a modifying node in an inheriting view
:return: a node in the source matching the spec
"""
if spec.tag == 'xpath':
nodes = arch.xpath(spec.get('expr'))
return nodes[0] if nodes else None
elif spec.tag == 'field':
# Only compare the field name: a field can be only once in a given view
# at a given level (and for multilevel expressions, we should use xpath
# inheritance spec anyway).
for node in arch.iter('field'):
if node.get('name') == spec.get('name'):
return node
return None
for node in arch.iter(spec.tag):
if isinstance(node, SKIPPED_ELEMENT_TYPES):
continue
if all(node.get(attr) == spec.get(attr) for attr in spec.attrib
if attr not in ('position','version')):
# Version spec should match parent's root element's version
if spec.get('version') and spec.get('version') != arch.get('version'):
return None
return node
return None
def inherit_branding(self, specs_tree, view_id, root_id):
for node in specs_tree.iterchildren(tag=etree.Element):
xpath = node.getroottree().getpath(node)
if node.tag == 'data' or node.tag == 'xpath':
self.inherit_branding(node, view_id, root_id)
else:
node.set('data-oe-id', str(view_id))
node.set('data-oe-source-id', str(root_id))
node.set('data-oe-xpath', xpath)
node.set('data-oe-model', 'ir.ui.view')
node.set('data-oe-field', 'arch')
return specs_tree
def apply_inheritance_specs(self, cr, uid, source, specs_tree, inherit_id, context=None):
""" Apply an inheriting view (a descendant of the base view)
Apply to a source architecture all the spec nodes (i.e. nodes
describing where and what changes to apply to some parent
architecture) given by an inheriting view.
:param Element source: a parent architecture to modify
:param Elepect specs_tree: a modifying architecture in an inheriting view
:param inherit_id: the database id of specs_arch
:return: a modified source where the specs are applied
:rtype: Element
"""
# Queue of specification nodes (i.e. nodes describing where and
# changes to apply to some parent architecture).
specs = [specs_tree]
while len(specs):
spec = specs.pop(0)
if isinstance(spec, SKIPPED_ELEMENT_TYPES):
continue
if spec.tag == 'data':
specs += [c for c in spec]
continue
node = self.locate_node(source, spec)
if node is not None:
pos = spec.get('position', 'inside')
if pos == 'replace':
if node.getparent() is None:
source = copy.deepcopy(spec[0])
else:
for child in spec:
node.addprevious(child)
node.getparent().remove(node)
elif pos == 'attributes':
for child in spec.getiterator('attribute'):
attribute = (child.get('name'), child.text or None)
if attribute[1]:
node.set(attribute[0], attribute[1])
elif attribute[0] in node.attrib:
del node.attrib[attribute[0]]
else:
sib = node.getnext()
for child in spec:
if pos == 'inside':
node.append(child)
elif pos == 'after':
if sib is None:
node.addnext(child)
node = child
else:
sib.addprevious(child)
elif pos == 'before':
node.addprevious(child)
else:
self.raise_view_error(cr, uid, _("Invalid position attribute: '%s'") % pos, inherit_id, context=context)
else:
attrs = ''.join([
' %s="%s"' % (attr, spec.get(attr))
for attr in spec.attrib
if attr != 'position'
])
tag = "<%s%s>" % (spec.tag, attrs)
self.raise_view_error(cr, uid, _("Element '%s' cannot be located in parent view") % tag, inherit_id, context=context)
return source
def apply_view_inheritance(self, cr, uid, source, source_id, model, root_id=None, context=None):
""" Apply all the (directly and indirectly) inheriting views.
:param source: a parent architecture to modify (with parent modifications already applied)
:param source_id: the database view_id of the parent view
:param model: the original model for which we create a view (not
necessarily the same as the source's model); only the inheriting
views with that specific model will be applied.
:return: a modified source where all the modifying architecture are applied
"""
if context is None: context = {}
if root_id is None:
root_id = source_id
sql_inherit = self.get_inheriting_views_arch(cr, uid, source_id, model, context=context)
for (specs, view_id) in sql_inherit:
specs_tree = etree.fromstring(specs.encode('utf-8'))
if context.get('inherit_branding'):
self.inherit_branding(specs_tree, view_id, root_id)
source = self.apply_inheritance_specs(cr, uid, source, specs_tree, view_id, context=context)
source = self.apply_view_inheritance(cr, uid, source, view_id, model, root_id=root_id, context=context)
return source
def read_combined(self, cr, uid, view_id, fields=None, context=None):
"""
Utility function to get a view combined with its inherited views.
* Gets the top of the view tree if a sub-view is requested
* Applies all inherited archs on the root view
* Returns the view with all requested fields
.. note:: ``arch`` is always added to the fields list even if not
requested (similar to ``id``)
"""
if context is None: context = {}
context = context.copy()
# if view_id is not a root view, climb back to the top.
base = v = self.browse(cr, uid, view_id, context=context)
check_view_ids = context.setdefault('check_view_ids', [])
while v.mode != 'primary':
# Add inherited views to the list of loading forced views
# Otherwise, inherited views could not find elements created in their direct parents if that parent is defined in the same module
check_view_ids.append(v.id)
v = v.inherit_id
root_id = v.id
# arch and model fields are always returned
if fields:
fields = list({'arch', 'model'}.union(fields))
# read the view arch
[view] = self.read(cr, uid, [root_id], fields=fields, context=context)
view_arch = etree.fromstring(view['arch'].encode('utf-8'))
if not v.inherit_id:
arch_tree = view_arch
else:
parent_view = self.read_combined(
cr, uid, v.inherit_id.id, fields=fields, context=context)
arch_tree = etree.fromstring(parent_view['arch'])
arch_tree = self.apply_inheritance_specs(
cr, uid, arch_tree, view_arch, parent_view['id'], context=context)
if context.get('inherit_branding'):
arch_tree.attrib.update({
'data-oe-model': 'ir.ui.view',
'data-oe-id': str(root_id),
'data-oe-field': 'arch',
})
# and apply inheritance
arch = self.apply_view_inheritance(
cr, uid, arch_tree, root_id, base.model, context=context)
return dict(view, arch=etree.tostring(arch, encoding='utf-8'))
#------------------------------------------------------
# Postprocessing: translation, groups and modifiers
#------------------------------------------------------
# TODO:
# - split postprocess so that it can be used instead of translate_qweb
# - remove group processing from ir_qweb
#------------------------------------------------------
def postprocess(self, cr, user, model, node, view_id, in_tree_view, model_fields, context=None):
"""Return the description of the fields in the node.
In a normal call to this method, node is a complete view architecture
but it is actually possible to give some sub-node (this is used so
that the method can call itself recursively).
Originally, the field descriptions are drawn from the node itself.
But there is now some code calling fields_get() in order to merge some
of those information in the architecture.
"""
if context is None:
context = {}
result = False
fields = {}
children = True
modifiers = {}
Model = self.pool.get(model)
if Model is None:
self.raise_view_error(cr, user, _('Model not found: %(model)s') % dict(model=model),
view_id, context)
def encode(s):
if isinstance(s, unicode):
return s.encode('utf8')
return s
def check_group(node):
"""Apply group restrictions, may be set at view level or model level::
* at view level this means the element should be made invisible to
people who are not members
* at model level (exclusively for fields, obviously), this means
the field should be completely removed from the view, as it is
completely unavailable for non-members
:return: True if field should be included in the result of fields_view_get
"""
if node.tag == 'field' and node.get('name') in Model._fields:
field = Model._fields[node.get('name')]
if field.groups and not self.user_has_groups(
cr, user, groups=field.groups, context=context):
node.getparent().remove(node)
fields.pop(node.get('name'), None)
# no point processing view-level ``groups`` anymore, return
return False
if node.get('groups'):
can_see = self.user_has_groups(
cr, user, groups=node.get('groups'), context=context)
if not can_see:
node.set('invisible', '1')
modifiers['invisible'] = True
if 'attrs' in node.attrib:
del(node.attrib['attrs']) #avoid making field visible later
del(node.attrib['groups'])
return True
if node.tag in ('field', 'node', 'arrow'):
if node.get('object'):
attrs = {}
views = {}
xml = "<form>"
for f in node:
if f.tag == 'field':
xml += etree.tostring(f, encoding="utf-8")
xml += "</form>"
new_xml = etree.fromstring(encode(xml))
ctx = context.copy()
ctx['base_model_name'] = model
xarch, xfields = self.postprocess_and_fields(cr, user, node.get('object'), new_xml, view_id, ctx)
views['form'] = {
'arch': xarch,
'fields': xfields
}
attrs = {'views': views}
fields = xfields
if node.get('name'):
attrs = {}
field = Model._fields.get(node.get('name'))
if field:
children = False
views = {}
for f in node:
if f.tag in ('form', 'tree', 'graph', 'kanban', 'calendar'):
node.remove(f)
ctx = context.copy()
ctx['base_model_name'] = model
xarch, xfields = self.postprocess_and_fields(cr, user, field.comodel_name, f, view_id, ctx)
views[str(f.tag)] = {
'arch': xarch,
'fields': xfields
}
attrs = {'views': views}
fields[node.get('name')] = attrs
field = model_fields.get(node.get('name'))
if field:
orm.transfer_field_to_modifiers(field, modifiers)
elif node.tag in ('form', 'tree'):
result = Model.view_header_get(cr, user, False, node.tag, context=context)
if result:
node.set('string', result)
in_tree_view = node.tag == 'tree'
elif node.tag == 'calendar':
for additional_field in ('date_start', 'date_delay', 'date_stop', 'color', 'all_day', 'attendee'):
if node.get(additional_field):
fields[node.get(additional_field)] = {}
if not check_group(node):
# node must be removed, no need to proceed further with its children
return fields
# The view architeture overrides the python model.
# Get the attrs before they are (possibly) deleted by check_group below
orm.transfer_node_to_modifiers(node, modifiers, context, in_tree_view)
# TODO remove attrs counterpart in modifiers when invisible is true ?
# translate view
if 'lang' in context:
Translations = self.pool['ir.translation']
if node.text and node.text.strip():
term = node.text.strip()
trans = Translations._get_source(cr, user, model, 'view', context['lang'], term)
if trans:
node.text = node.text.replace(term, trans)
if node.tail and node.tail.strip():
term = node.tail.strip()
trans = Translations._get_source(cr, user, model, 'view', context['lang'], term)
if trans:
node.tail = node.tail.replace(term, trans)
if node.get('string') and node.get('string').strip() and not result:
term = node.get('string').strip()
trans = Translations._get_source(cr, user, model, 'view', context['lang'], term)
if trans == term:
if 'base_model_name' in context:
# If translation is same as source, perhaps we'd have more luck with the alternative model name
# (in case we are in a mixed situation, such as an inherited view where parent_view.model != model
trans = Translations._get_source(cr, user, context['base_model_name'], 'view', context['lang'], term)
else:
inherit_model = self.browse(cr, user, view_id, context=context).inherit_id.model or model
if inherit_model != model:
# parent view has a different model, if the terms belongs to the parent view, the translation
# should be checked on the parent model as well
trans = Translations._get_source(cr, user, inherit_model, 'view', context['lang'], term)
if trans:
node.set('string', trans)
for attr_name in ('confirm', 'sum', 'avg', 'help', 'placeholder'):
attr_value = node.get(attr_name)
if attr_value and attr_value.strip():
trans = Translations._get_source(cr, user, model, 'view', context['lang'], attr_value.strip())
if trans:
node.set(attr_name, trans)
for f in node:
if children or (node.tag == 'field' and f.tag in ('filter','separator')):
fields.update(self.postprocess(cr, user, model, f, view_id, in_tree_view, model_fields, context))
orm.transfer_modifiers_to_node(modifiers, node)
return fields
def add_on_change(self, cr, user, model_name, arch):
""" Add attribute on_change="1" on fields that are dependencies of
computed fields on the same view.
"""
# map each field object to its corresponding nodes in arch
field_nodes = collections.defaultdict(list)
def collect(node, model):
if node.tag == 'field':
field = model._fields.get(node.get('name'))
if field:
field_nodes[field].append(node)
if field.relational:
model = self.pool.get(field.comodel_name)
for child in node:
collect(child, model)
collect(arch, self.pool[model_name])
for field, nodes in field_nodes.iteritems():
# if field should trigger an onchange, add on_change="1" on the
# nodes referring to field
model = self.pool[field.model_name]
if model._has_onchange(field, field_nodes):
for node in nodes:
if not node.get('on_change'):
node.set('on_change', '1')
return arch
def _disable_workflow_buttons(self, cr, user, model, node):
""" Set the buttons in node to readonly if the user can't activate them. """
if model is None or user == 1:
# admin user can always activate workflow buttons
return node
# TODO handle the case of more than one workflow for a model or multiple
# transitions with different groups and same signal
usersobj = self.pool.get('res.users')
buttons = (n for n in node.getiterator('button') if n.get('type') != 'object')
for button in buttons:
user_groups = usersobj.read(cr, user, [user], ['groups_id'])[0]['groups_id']
cr.execute("""SELECT DISTINCT t.group_id
FROM wkf
INNER JOIN wkf_activity a ON a.wkf_id = wkf.id
INNER JOIN wkf_transition t ON (t.act_to = a.id)
WHERE wkf.osv = %s
AND t.signal = %s
AND t.group_id is NOT NULL
""", (model, button.get('name')))
group_ids = [x[0] for x in cr.fetchall() if x[0]]
can_click = not group_ids or bool(set(user_groups).intersection(group_ids))
button.set('readonly', str(int(not can_click)))
return node
def postprocess_and_fields(self, cr, user, model, node, view_id, context=None):
""" Return an architecture and a description of all the fields.
The field description combines the result of fields_get() and
postprocess().
:param node: the architecture as as an etree
:return: a tuple (arch, fields) where arch is the given node as a
string and fields is the description of all the fields.
"""
fields = {}
Model = self.pool.get(model)
if Model is None:
self.raise_view_error(cr, user, _('Model not found: %(model)s') % dict(model=model), view_id, context)
if node.tag == 'diagram':
if node.getchildren()[0].tag == 'node':
node_model = self.pool[node.getchildren()[0].get('object')]
node_fields = node_model.fields_get(cr, user, None, context=context)
fields.update(node_fields)
if not node.get("create") and not node_model.check_access_rights(cr, user, 'create', raise_exception=False):
node.set("create", 'false')
if node.getchildren()[1].tag == 'arrow':
arrow_fields = self.pool[node.getchildren()[1].get('object')].fields_get(cr, user, None, context=context)
fields.update(arrow_fields)
else:
fields = Model.fields_get(cr, user, None, context=context)
node = self.add_on_change(cr, user, model, node)
fields_def = self.postprocess(cr, user, model, node, view_id, False, fields, context=context)
node = self._disable_workflow_buttons(cr, user, model, node)
if node.tag in ('kanban', 'tree', 'form', 'gantt'):
for action, operation in (('create', 'create'), ('delete', 'unlink'), ('edit', 'write')):
if not node.get(action) and not Model.check_access_rights(cr, user, operation, raise_exception=False):
node.set(action, 'false')
if node.tag in ('kanban'):
group_by_name = node.get('default_group_by')
if group_by_name in Model._fields:
group_by_field = Model._fields[group_by_name]
if group_by_field.type == 'many2one':
group_by_model = Model.pool[group_by_field.comodel_name]
for action, operation in (('group_create', 'create'), ('group_delete', 'unlink'), ('group_edit', 'write')):
if not node.get(action) and not group_by_model.check_access_rights(cr, user, operation, raise_exception=False):
node.set(action, 'false')
arch = etree.tostring(node, encoding="utf-8").replace('\t', '')
for k in fields.keys():
if k not in fields_def:
del fields[k]
for field in fields_def:
if field in fields:
fields[field].update(fields_def[field])
else:
message = _("Field `%(field_name)s` does not exist") % \
dict(field_name=field)
self.raise_view_error(cr, user, message, view_id, context)
return arch, fields
#------------------------------------------------------
# QWeb template views
#------------------------------------------------------
@tools.ormcache_context(accepted_keys=('lang','inherit_branding', 'editable', 'translatable'))
def read_template(self, cr, uid, xml_id, context=None):
if isinstance(xml_id, (int, long)):
view_id = xml_id
else:
if '.' not in xml_id:
raise ValueError('Invalid template id: %r' % (xml_id,))
view_id = self.pool['ir.model.data'].xmlid_to_res_id(cr, uid, xml_id, raise_if_not_found=True)
arch = self.read_combined(cr, uid, view_id, fields=['arch'], context=context)['arch']
arch_tree = etree.fromstring(arch)
if 'lang' in context:
arch_tree = self.translate_qweb(cr, uid, view_id, arch_tree, context['lang'], context)
self.distribute_branding(arch_tree)
root = etree.Element('templates')
root.append(arch_tree)
arch = etree.tostring(root, encoding='utf-8', xml_declaration=True)
return arch
def clear_cache(self):
self.read_template.clear_cache(self)
def _contains_branded(self, node):
return node.tag == 't'\
or 't-raw' in node.attrib\
or any(self.is_node_branded(child) for child in node.iterdescendants())
def _pop_view_branding(self, element):
distributed_branding = dict(
(attribute, element.attrib.pop(attribute))
for attribute in MOVABLE_BRANDING
if element.get(attribute))
return distributed_branding
def distribute_branding(self, e, branding=None, parent_xpath='',
index_map=misc.ConstantMapping(1)):
if e.get('t-ignore') or e.tag == 'head':
# remove any view branding possibly injected by inheritance
attrs = set(MOVABLE_BRANDING)
for descendant in e.iterdescendants(tag=etree.Element):
if not attrs.intersection(descendant.attrib): continue
self._pop_view_branding(descendant)
# TODO: find a better name and check if we have a string to boolean helper
return
node_path = e.get('data-oe-xpath')
if node_path is None:
node_path = "%s/%s[%d]" % (parent_xpath, e.tag, index_map[e.tag])
if branding and not (e.get('data-oe-model') or e.get('t-field')):
e.attrib.update(branding)
e.set('data-oe-xpath', node_path)
if not e.get('data-oe-model'): return
if {'t-esc', 't-raw'}.intersection(e.attrib):
# nodes which fully generate their content and have no reason to
# be branded because they can not sensibly be edited
self._pop_view_branding(e)
elif self._contains_branded(e):
# if a branded element contains branded elements distribute own
# branding to children unless it's t-raw, then just remove branding
# on current element
distributed_branding = self._pop_view_branding(e)
if 't-raw' not in e.attrib:
# TODO: collections.Counter if remove p2.6 compat
# running index by tag type, for XPath query generation
indexes = collections.defaultdict(lambda: 0)
for child in e.iterchildren(tag=etree.Element):
if child.get('data-oe-xpath'):
# injected by view inheritance, skip otherwise
# generated xpath is incorrect
self.distribute_branding(child)
else:
indexes[child.tag] += 1
self.distribute_branding(
child, distributed_branding,
parent_xpath=node_path, index_map=indexes)
def is_node_branded(self, node):
""" Finds out whether a node is branded or qweb-active (bears a
@data-oe-model or a @t-* *which is not t-field* as t-field does not
section out views)
:param node: an etree-compatible element to test
:type node: etree._Element
:rtype: boolean
"""
return any(
(attr in ('data-oe-model', 'group') or (attr != 't-field' and attr.startswith('t-')))
for attr in node.attrib
)
def _translate_qweb(self, cr, uid, arch, translate_func, context=None):
# TODO: this should be moved in a place before inheritance is applied
# but process() is only called on fields_view_get()
h = HTMLParser.HTMLParser()
def get_trans(text):
if not text or not text.strip():
return None
text = text.strip()
if len(text) < 2 or (text.startswith('<!') and text.endswith('>')):
return None
return translate_func(text)
if type(arch) not in SKIPPED_ELEMENT_TYPES and arch.tag not in SKIPPED_ELEMENTS:
text = get_trans(arch.text)
if text:
arch.text = arch.text.replace(arch.text.strip(), text)
tail = get_trans(arch.tail)
if tail:
arch.tail = arch.tail.replace(arch.tail.strip(), tail)
for attr_name in ('title', 'alt', 'label', 'placeholder'):
attr = get_trans(arch.get(attr_name))
if attr:
arch.set(attr_name, attr)
for node in arch.iterchildren("*"):
self._translate_qweb(cr, uid, node, translate_func, context)
def translate_qweb(self, cr, uid, id_, arch, lang, context=None):
view_ids = []
view = self.browse(cr, uid, id_, context=context)
if view:
view_ids.append(view.id)
if view.mode == 'primary' and view.inherit_id.mode == 'primary':
# template is `cloned` from parent view
view_ids.append(view.inherit_id.id)
Translations = self.pool['ir.translation']
def translate_func(term):
trans = Translations._get_source(cr, uid, 'website', 'view', lang, term, view_ids)
return trans
self._translate_qweb(cr, uid, arch, translate_func, context=context)
return arch
@openerp.tools.ormcache()
def get_view_xmlid(self, cr, uid, id):
imd = self.pool['ir.model.data']
domain = [('model', '=', 'ir.ui.view'), ('res_id', '=', id)]
xmlid = imd.search_read(cr, uid, domain, ['module', 'name'])[0]
return '%s.%s' % (xmlid['module'], xmlid['name'])
@api.cr_uid_ids_context
def render(self, cr, uid, id_or_xml_id, values=None, engine='ir.qweb', context=None):
if isinstance(id_or_xml_id, list):
id_or_xml_id = id_or_xml_id[0]
if not context:
context = {}
if values is None:
values = dict()
qcontext = dict(
env=api.Environment(cr, uid, context),
keep_query=keep_query,
request=request, # might be unbound if we're not in an httprequest context
debug=request.debug if request else False,
json=simplejson,
quote_plus=werkzeug.url_quote_plus,
time=time,
datetime=datetime,
relativedelta=relativedelta,
)
qcontext.update(values)
# TODO: This helper can be used by any template that wants to embedd the backend.
# It is currently necessary because the ir.ui.view bundle inheritance does not
# match the module dependency graph.
def get_modules_order():
if request:
from openerp.addons.web.controllers.main import module_boot
return simplejson.dumps(module_boot())
return '[]'
qcontext['get_modules_order'] = get_modules_order
def loader(name):
return self.read_template(cr, uid, name, context=context)
return self.pool[engine].render(cr, uid, id_or_xml_id, qcontext, loader=loader, context=context)
#------------------------------------------------------
# Misc
#------------------------------------------------------
def graph_get(self, cr, uid, id, model, node_obj, conn_obj, src_node, des_node, label, scale, context=None):
nodes=[]
nodes_name=[]
transitions=[]
start=[]
tres={}
labels={}
no_ancester=[]
blank_nodes = []
_Model_Obj = self.pool[model]
_Node_Obj = self.pool[node_obj]
_Arrow_Obj = self.pool[conn_obj]
for model_key,model_value in _Model_Obj._columns.items():
if model_value._type=='one2many':
if model_value._obj==node_obj:
_Node_Field=model_key
_Model_Field=model_value._fields_id
for node_key,node_value in _Node_Obj._columns.items():
if node_value._type=='one2many':
if node_value._obj==conn_obj:
# _Source_Field = "Incoming Arrows" (connected via des_node)
if node_value._fields_id == des_node:
_Source_Field=node_key
# _Destination_Field = "Outgoing Arrows" (connected via src_node)
if node_value._fields_id == src_node:
_Destination_Field=node_key
datas = _Model_Obj.read(cr, uid, id, [],context)
for a in _Node_Obj.read(cr,uid,datas[_Node_Field],[]):
if a[_Source_Field] or a[_Destination_Field]:
nodes_name.append((a['id'],a['name']))
nodes.append(a['id'])
else:
blank_nodes.append({'id': a['id'],'name':a['name']})
if a.has_key('flow_start') and a['flow_start']:
start.append(a['id'])
else:
if not a[_Source_Field]:
no_ancester.append(a['id'])
for t in _Arrow_Obj.read(cr,uid, a[_Destination_Field],[]):
transitions.append((a['id'], t[des_node][0]))
tres[str(t['id'])] = (a['id'],t[des_node][0])
label_string = ""
if label:
for lbl in eval(label):
if t.has_key(tools.ustr(lbl)) and tools.ustr(t[lbl])=='False':
label_string += ' '
else:
label_string = label_string + " " + tools.ustr(t[lbl])
labels[str(t['id'])] = (a['id'],label_string)
g = graph(nodes, transitions, no_ancester)
g.process(start)
g.scale(*scale)
result = g.result_get()
results = {}
for node in nodes_name:
results[str(node[0])] = result[node[0]]
results[str(node[0])]['name'] = node[1]
return {'nodes': results,
'transitions': tres,
'label' : labels,
'blank_nodes': blank_nodes,
'node_parent_field': _Model_Field,}
def _validate_custom_views(self, cr, uid, model):
"""Validate architecture of custom views (= without xml id) for a given model.
This method is called at the end of registry update.
"""
cr.execute("""SELECT max(v.id)
FROM ir_ui_view v
LEFT JOIN ir_model_data md ON (md.model = 'ir.ui.view' AND md.res_id = v.id)
WHERE md.module IS NULL
AND v.model = %s
AND v.active = true
GROUP BY coalesce(v.inherit_id, v.id)
""", (model,))
ids = map(itemgetter(0), cr.fetchall())
context = dict(load_all_views=True)
return self._check_xml(cr, uid, ids, context=context)
def _validate_module_views(self, cr, uid, module):
"""Validate architecture of all the views of a given module"""
assert not self.pool._init or module in self.pool._init_modules
xmlid_filter = ''
params = (module,)
if self.pool._init:
# only validate the views that are still existing...
xmlid_filter = "AND md.name IN %s"
names = tuple(name for (xmod, name), (model, res_id) in self.pool.model_data_reference_ids.items() if xmod == module and model == self._name)
if not names:
# no views for this module, nothing to validate
return
params += (names,)
cr.execute("""SELECT max(v.id)
FROM ir_ui_view v
LEFT JOIN ir_model_data md ON (md.model = 'ir.ui.view' AND md.res_id = v.id)
WHERE md.module = %s
{0}
GROUP BY coalesce(v.inherit_id, v.id)
""".format(xmlid_filter), params)
for vid, in cr.fetchall():
if not self._check_xml(cr, uid, [vid]):
self.raise_view_error(cr, uid, "Can't validate view", vid)
# vim:et:
| agpl-3.0 | 4,882,319,257,160,424,000 | 44.628848 | 155 | 0.54653 | false |
glemaitre/protoclass | doc/source/conf.py | 1 | 9511 | # -*- coding: utf-8 -*-
#
# protoclass documentation build configuration file, created by
# sphinx-quickstart on Mon Apr 4 13:55:31 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinx.ext.autosummary',
'numpydoc'
]
autosummary_generate = True
autodoc_default_flags = ['members', 'inherited-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'protoclass'
copyright = u'2016, Guillaume Lemaitre'
author = u'Guillaume Lemaitre'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.1'
# The full version, including alpha/beta/rc tags.
release = u'0.1.dev0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'protoclassdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'protoclass.tex', u'protoclass Documentation',
u'Guillaume Lemaitre', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'protoclass', u'protoclass Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'protoclass', u'protoclass Documentation',
author, 'protoclass', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| gpl-2.0 | -4,905,088,611,089,797,000 | 31.35034 | 79 | 0.707602 | false |
OpenKore/OpenKore | src/scons-local-3.0.1/scons-local-3.0.1/SCons/Tool/ar.py | 5 | 2227 | """SCons.Tool.ar
Tool-specific initialization for ar (library archive).
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2017 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/ar.py 74b2c53bc42290e911b334a6b44f187da698a668 2017/11/14 13:16:53 bdbaddog"
import SCons.Defaults
import SCons.Tool
import SCons.Util
def generate(env):
"""Add Builders and construction variables for ar to an Environment."""
SCons.Tool.createStaticLibBuilder(env)
env['AR'] = 'ar'
env['ARFLAGS'] = SCons.Util.CLVar('rc')
env['ARCOM'] = '$AR $ARFLAGS $TARGET $SOURCES'
env['LIBPREFIX'] = 'lib'
env['LIBSUFFIX'] = '.a'
if env.get('RANLIB',env.Detect('ranlib')) :
env['RANLIB'] = env.get('RANLIB','ranlib')
env['RANLIBFLAGS'] = SCons.Util.CLVar('')
env['RANLIBCOM'] = '$RANLIB $RANLIBFLAGS $TARGET'
def exists(env):
return env.Detect('ar')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-2.0 | -7,688,123,544,123,417,000 | 34.349206 | 114 | 0.719353 | false |
eklitzke/spitfire | spitfire/compiler/walker.py | 1 | 4304 | class TreeWalkError(Exception):
pass
def print_tree(root):
print TreeVisitor(root).get_text()
# perform an in-order traversal of the AST and call the generate methods
class TreeVisitor(object):
def __init__(self, root):
self.root = root
def walk(self, node=None):
if node is None:
node = self.root
method_name = 'visitAST%s' % node.__class__.__name__
getattr(self, method_name, self.visitDefault)(node)
def visitDefault(self, node):
pass
def visitASTTemplateNode(self, node):
self.visitDefault(node)
if node.import_nodes:
for n in node.import_nodes:
self.walk(n)
if node.from_nodes:
for n in node.from_nodes:
self.walk(n)
if node.extends_nodes:
for n in node.extends_nodes:
self.walk(n)
for n in node.attr_nodes:
self.walk(n)
for n in node.child_nodes:
self.walk(n)
# if we aren't extending a template, build out the main function
self.walk(node.main_function)
def visitASTExtendsNode(self, node):
self.visitDefault(node)
for n in node.module_name_list:
self.walk(n)
visitASTImportNode = visitASTExtendsNode
visitASTFromNode = visitASTExtendsNode
visitASTAbsoluteExtendsNode = visitASTExtendsNode
def visitASTCallFunctionNode(self, node):
self.visitDefault(node)
self.walk(node.expression)
if node.arg_list:
self.walk(node.arg_list)
def visitASTForNode(self, node):
self.visitDefault(node)
self.walk(node.target_list)
self.walk(node.expression_list)
for n in node.child_nodes:
self.walk(n)
def visitASTIfNode(self, node):
self.visitDefault(node)
self.walk(node.test_expression)
for n in node.child_nodes:
self.walk(n)
if node.else_.child_nodes:
self.visitDefault(node.else_)
for n in node.else_.child_nodes:
self.walk(n)
def visitASTAttributeNode(self, node):
self.visitDefault(node)
self.walk(node.default)
def visitASTParameterListNode(self, node):
self.visitDefault(node)
for n in node.child_nodes:
self.walk(n)
visitASTArgListNode = visitASTParameterListNode
visitASTTargetListNode = visitASTParameterListNode
visitASTExpressionListNode = visitASTParameterListNode
visitASTListLiteralNode = visitASTParameterListNode
visitASTTupleLiteralNode = visitASTParameterListNode
def visitASTDictLiteralNode(self, node):
self.visitDefault(node)
for key_expression, value_expression in node.child_nodes:
self.walk(key_expression)
self.walk(value_expression)
def visitASTParameterNode(self, node):
self.visitDefault(node)
if node.default:
self.walk(node.default)
def visitASTGetUDNNode(self, node):
self.visitDefault(node)
self.walk(node.expression)
visitASTGetAttrNode = visitASTGetUDNNode
visitASTReturnNode = visitASTGetUDNNode
visitASTPlaceholderSubstitutionNode = visitASTGetUDNNode
visitASTBufferWrite = visitASTGetUDNNode
visitASTFilterNode = visitASTGetUDNNode
visitASTUnaryOpNode = visitASTGetUDNNode
def visitASTSliceNode(self, node):
self.visitDefault(node)
self.walk(node.expression)
self.walk(node.slice_expression)
def visitASTBinOpExpressionNode(self, node):
self.visitDefault(node)
self.walk(node.left)
self.walk(node.right)
visitASTBinOpNode = visitASTBinOpExpressionNode
visitASTAssignNode = visitASTBinOpNode
def visitASTFunctionNode(self, node):
self.visitDefault(node)
if node.parameter_list:
self.walk(parameter_list)
for n in node.child_nodes:
self.walk(n)
visitASTDefNode = visitASTFunctionNode
def visitASTFragmentNode(self, node):
self.visitDefault(node)
for n in node.child_nodes:
self.walk(n)
def visitASTLiteralNode(self, node):
self.visitDefault(node)
visitASTTextNode = visitASTLiteralNode
visitASTWhitespaceNode = visitASTLiteralNode
visitASTOptionalWhitespaceNode = visitASTLiteralNode
# flatten a tree into an in-order list
class ClearCutter(TreeVisitor):
def __init__(self, *pargs):
TreeVisitor.__init__(self, *pargs)
self.node_list = []
def visitDefault(self, node):
self.node_list.append(node)
def flatten_tree(node):
cc = ClearCutter(node)
cc.walk()
return cc.node_list
| bsd-3-clause | 730,874,781,039,876,200 | 26.240506 | 72 | 0.713987 | false |
envoyproxy/envoy-wasm | tools/protodoc/protodoc.py | 1 | 24954 | # protoc plugin to map from FileDescriptorProtos to Envoy doc style RST.
# See https://github.com/google/protobuf/blob/master/src/google/protobuf/descriptor.proto
# for the underlying protos mentioned in this file. See
# https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html for Sphinx RST syntax.
from collections import defaultdict
import json
import functools
import os
import pathlib
import re
import string
import sys
from google.protobuf import json_format
from bazel_tools.tools.python.runfiles import runfiles
import yaml
# We have to do some evil things to sys.path due to the way that Python module
# resolution works; we have both tools/ trees in bazel_tools and envoy. By
# default, Bazel leaves us with a sys.path in which the @bazel_tools repository
# takes precedence. Now that we're done with importing runfiles above, we can
# just remove it from the sys.path.
sys.path = [p for p in sys.path if not p.endswith('bazel_tools')]
from tools.api_proto_plugin import annotations
from tools.api_proto_plugin import plugin
from tools.api_proto_plugin import visitor
from tools.config_validation import validate_fragment
from tools.protodoc import manifest_pb2
from udpa.annotations import security_pb2
from udpa.annotations import status_pb2
from validate import validate_pb2
# Namespace prefix for Envoy core APIs.
ENVOY_API_NAMESPACE_PREFIX = '.envoy.api.v2.'
# Namespace prefix for Envoy top-level APIs.
ENVOY_PREFIX = '.envoy.'
# Namespace prefix for WKTs.
WKT_NAMESPACE_PREFIX = '.google.protobuf.'
# Namespace prefix for RPCs.
RPC_NAMESPACE_PREFIX = '.google.rpc.'
# http://www.fileformat.info/info/unicode/char/2063/index.htm
UNICODE_INVISIBLE_SEPARATOR = u'\u2063'
# Template for data plane API URLs.
DATA_PLANE_API_URL_FMT = 'https://github.com/envoyproxy/envoy/blob/{}/api/%s#L%d'.format(
os.environ['ENVOY_BLOB_SHA'])
# Template for formating extension descriptions.
EXTENSION_TEMPLATE = string.Template("""$anchor
This extension may be referenced by the qualified name *$extension*
.. note::
$status
$security_posture
""")
# A map from the extension security postures (as defined in the
# envoy_cc_extension build macro) to human readable text for extension docs.
EXTENSION_SECURITY_POSTURES = {
'robust_to_untrusted_downstream':
'This extension is intended to be robust against untrusted downstream traffic. It '
'assumes that the upstream is trusted.',
'robust_to_untrusted_downstream_and_upstream':
'This extension is intended to be robust against both untrusted downstream and '
'upstream traffic.',
'requires_trusted_downstream_and_upstream':
'This extension is not hardened and should only be used in deployments'
' where both the downstream and upstream are trusted.',
'unknown':
'This extension has an unknown security posture and should only be '
'used in deployments where both the downstream and upstream are '
'trusted.',
'data_plane_agnostic':
'This extension does not operate on the data plane and hence is intended to be robust against untrusted traffic.',
}
# A map from the extension status value to a human readable text for extension
# docs.
EXTENSION_STATUS_VALUES = {
'alpha':
'This extension is functional but has not had substantial production burn time, use only with this caveat.',
'wip':
'This extension is work-in-progress. Functionality is incomplete and it is not intended for production use.',
}
class ProtodocError(Exception):
"""Base error class for the protodoc module."""
def HideNotImplemented(comment):
"""Should a given type_context.Comment be hidden because it is tagged as [#not-implemented-hide:]?"""
return annotations.NOT_IMPLEMENTED_HIDE_ANNOTATION in comment.annotations
def GithubUrl(type_context):
"""Obtain data plane API Github URL by path from a TypeContext.
Args:
type_context: type_context.TypeContext for node.
Returns:
A string with a corresponding data plane API GitHub Url.
"""
if type_context.location is not None:
return DATA_PLANE_API_URL_FMT % (type_context.source_code_info.name,
type_context.location.span[0])
return ''
def FormatCommentWithAnnotations(comment, type_name=''):
"""Format a comment string with additional RST for annotations.
Args:
comment: comment string.
type_name: optional, 'message' or 'enum' may be specified for additional
message/enum specific annotations.
Returns:
A string with additional RST from annotations.
"""
formatted_extension = ''
if annotations.EXTENSION_ANNOTATION in comment.annotations:
extension = comment.annotations[annotations.EXTENSION_ANNOTATION]
formatted_extension = FormatExtension(extension)
return annotations.WithoutAnnotations(StripLeadingSpace(comment.raw) + '\n') + formatted_extension
def MapLines(f, s):
"""Apply a function across each line in a flat string.
Args:
f: A string transform function for a line.
s: A string consisting of potentially multiple lines.
Returns:
A flat string with f applied to each line.
"""
return '\n'.join(f(line) for line in s.split('\n'))
def Indent(spaces, line):
"""Indent a string."""
return ' ' * spaces + line
def IndentLines(spaces, lines):
"""Indent a list of strings."""
return map(functools.partial(Indent, spaces), lines)
def FormatInternalLink(text, ref):
return ':ref:`%s <%s>`' % (text, ref)
def FormatExternalLink(text, ref):
return '`%s <%s>`_' % (text, ref)
def FormatHeader(style, text):
"""Format RST header.
Args:
style: underline style, e.g. '=', '-'.
text: header text
Returns:
RST formatted header.
"""
return '%s\n%s\n\n' % (text, style * len(text))
def FormatExtension(extension):
"""Format extension metadata as RST.
Args:
extension: the name of the extension, e.g. com.acme.foo.
Returns:
RST formatted extension description.
"""
try:
extension_metadata = json.loads(pathlib.Path(
os.getenv('EXTENSION_DB_PATH')).read_text())[extension]
anchor = FormatAnchor('extension_' + extension)
status = EXTENSION_STATUS_VALUES.get(extension_metadata['status'], '')
security_posture = EXTENSION_SECURITY_POSTURES[extension_metadata['security_posture']]
return EXTENSION_TEMPLATE.substitute(anchor=anchor,
extension=extension,
status=status,
security_posture=security_posture)
except KeyError as e:
sys.stderr.write(
'\n\nDid you forget to add an entry to source/extensions/extensions_build_config.bzl?\n\n')
exit(1) # Raising the error buries the above message in tracebacks.
def FormatHeaderFromFile(style, source_code_info, proto_name):
"""Format RST header based on special file level title
Args:
style: underline style, e.g. '=', '-'.
source_code_info: SourceCodeInfo object.
proto_name: If the file_level_comment does not contain a user specified
title, use this as page title.
Returns:
RST formatted header, and file level comment without page title strings.
"""
anchor = FormatAnchor(FileCrossRefLabel(proto_name))
stripped_comment = annotations.WithoutAnnotations(
StripLeadingSpace('\n'.join(c + '\n' for c in source_code_info.file_level_comments)))
formatted_extension = ''
if annotations.EXTENSION_ANNOTATION in source_code_info.file_level_annotations:
extension = source_code_info.file_level_annotations[annotations.EXTENSION_ANNOTATION]
formatted_extension = FormatExtension(extension)
if annotations.DOC_TITLE_ANNOTATION in source_code_info.file_level_annotations:
return anchor + FormatHeader(
style, source_code_info.file_level_annotations[
annotations.DOC_TITLE_ANNOTATION]) + formatted_extension, stripped_comment
return anchor + FormatHeader(style, proto_name) + formatted_extension, stripped_comment
def FormatFieldTypeAsJson(type_context, field):
"""Format FieldDescriptorProto.Type as a pseudo-JSON string.
Args:
type_context: contextual information for message/enum/field.
field: FieldDescriptor proto.
Return: RST formatted pseudo-JSON string representation of field type.
"""
if TypeNameFromFQN(field.type_name) in type_context.map_typenames:
return '"{...}"'
if field.label == field.LABEL_REPEATED:
return '[]'
if field.type == field.TYPE_MESSAGE:
return '"{...}"'
return '"..."'
def FormatMessageAsJson(type_context, msg):
"""Format a message definition DescriptorProto as a pseudo-JSON block.
Args:
type_context: contextual information for message/enum/field.
msg: message definition DescriptorProto.
Return: RST formatted pseudo-JSON string representation of message definition.
"""
lines = []
for index, field in enumerate(msg.field):
field_type_context = type_context.ExtendField(index, field.name)
leading_comment = field_type_context.leading_comment
if HideNotImplemented(leading_comment):
continue
lines.append('"%s": %s' % (field.name, FormatFieldTypeAsJson(type_context, field)))
if lines:
return '.. code-block:: json\n\n {\n' + ',\n'.join(IndentLines(4, lines)) + '\n }\n\n'
else:
return '.. code-block:: json\n\n {}\n\n'
def NormalizeFieldTypeName(field_fqn):
"""Normalize a fully qualified field type name, e.g.
.envoy.foo.bar.
Strips leading ENVOY_API_NAMESPACE_PREFIX and ENVOY_PREFIX.
Args:
field_fqn: a fully qualified type name from FieldDescriptorProto.type_name.
Return: Normalized type name.
"""
if field_fqn.startswith(ENVOY_API_NAMESPACE_PREFIX):
return field_fqn[len(ENVOY_API_NAMESPACE_PREFIX):]
if field_fqn.startswith(ENVOY_PREFIX):
return field_fqn[len(ENVOY_PREFIX):]
return field_fqn
def NormalizeTypeContextName(type_name):
"""Normalize a type name, e.g.
envoy.foo.bar.
Strips leading ENVOY_API_NAMESPACE_PREFIX and ENVOY_PREFIX.
Args:
type_name: a name from a TypeContext.
Return: Normalized type name.
"""
return NormalizeFieldTypeName(QualifyTypeName(type_name))
def QualifyTypeName(type_name):
return '.' + type_name
def TypeNameFromFQN(fqn):
return fqn[1:]
def FormatEmph(s):
"""RST format a string for emphasis."""
return '*%s*' % s
def FormatFieldType(type_context, field):
"""Format a FieldDescriptorProto type description.
Adds cross-refs for message types.
TODO(htuch): Add cross-refs for enums as well.
Args:
type_context: contextual information for message/enum/field.
field: FieldDescriptor proto.
Return: RST formatted field type.
"""
if field.type_name.startswith(ENVOY_API_NAMESPACE_PREFIX) or field.type_name.startswith(
ENVOY_PREFIX):
type_name = NormalizeFieldTypeName(field.type_name)
if field.type == field.TYPE_MESSAGE:
if type_context.map_typenames and TypeNameFromFQN(
field.type_name) in type_context.map_typenames:
return 'map<%s, %s>' % tuple(
map(functools.partial(FormatFieldType, type_context),
type_context.map_typenames[TypeNameFromFQN(field.type_name)]))
return FormatInternalLink(type_name, MessageCrossRefLabel(type_name))
if field.type == field.TYPE_ENUM:
return FormatInternalLink(type_name, EnumCrossRefLabel(type_name))
elif field.type_name.startswith(WKT_NAMESPACE_PREFIX):
wkt = field.type_name[len(WKT_NAMESPACE_PREFIX):]
return FormatExternalLink(
wkt, 'https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#%s' %
wkt.lower())
elif field.type_name.startswith(RPC_NAMESPACE_PREFIX):
rpc = field.type_name[len(RPC_NAMESPACE_PREFIX):]
return FormatExternalLink(
rpc,
'https://cloud.google.com/natural-language/docs/reference/rpc/google.rpc#%s' % rpc.lower())
elif field.type_name:
return field.type_name
pretty_type_names = {
field.TYPE_DOUBLE: 'double',
field.TYPE_FLOAT: 'float',
field.TYPE_INT32: 'int32',
field.TYPE_SFIXED32: 'int32',
field.TYPE_SINT32: 'int32',
field.TYPE_FIXED32: 'uint32',
field.TYPE_UINT32: 'uint32',
field.TYPE_INT64: 'int64',
field.TYPE_SFIXED64: 'int64',
field.TYPE_SINT64: 'int64',
field.TYPE_FIXED64: 'uint64',
field.TYPE_UINT64: 'uint64',
field.TYPE_BOOL: 'bool',
field.TYPE_STRING: 'string',
field.TYPE_BYTES: 'bytes',
}
if field.type in pretty_type_names:
return FormatExternalLink(pretty_type_names[field.type],
'https://developers.google.com/protocol-buffers/docs/proto#scalar')
raise ProtodocError('Unknown field type ' + str(field.type))
def StripLeadingSpace(s):
"""Remove leading space in flat comment strings."""
return MapLines(lambda s: s[1:], s)
def FileCrossRefLabel(msg_name):
"""File cross reference label."""
return 'envoy_api_file_%s' % msg_name
def MessageCrossRefLabel(msg_name):
"""Message cross reference label."""
return 'envoy_api_msg_%s' % msg_name
def EnumCrossRefLabel(enum_name):
"""Enum cross reference label."""
return 'envoy_api_enum_%s' % enum_name
def FieldCrossRefLabel(field_name):
"""Field cross reference label."""
return 'envoy_api_field_%s' % field_name
def EnumValueCrossRefLabel(enum_value_name):
"""Enum value cross reference label."""
return 'envoy_api_enum_value_%s' % enum_value_name
def FormatAnchor(label):
"""Format a label as an Envoy API RST anchor."""
return '.. _%s:\n\n' % label
def FormatSecurityOptions(security_option, field, type_context, edge_config):
sections = []
if security_option.configure_for_untrusted_downstream:
sections.append(
Indent(4, 'This field should be configured in the presence of untrusted *downstreams*.'))
if security_option.configure_for_untrusted_upstream:
sections.append(
Indent(4, 'This field should be configured in the presence of untrusted *upstreams*.'))
if edge_config.note:
sections.append(Indent(4, edge_config.note))
example_dict = json_format.MessageToDict(edge_config.example)
validate_fragment.ValidateFragment(field.type_name[1:], example_dict)
field_name = type_context.name.split('.')[-1]
example = {field_name: example_dict}
sections.append(
Indent(4, 'Example configuration for untrusted environments:\n\n') +
Indent(4, '.. code-block:: yaml\n\n') +
'\n'.join(IndentLines(6,
yaml.dump(example).split('\n'))))
return '.. attention::\n' + '\n\n'.join(sections)
def FormatFieldAsDefinitionListItem(outer_type_context, type_context, field, protodoc_manifest):
"""Format a FieldDescriptorProto as RST definition list item.
Args:
outer_type_context: contextual information for enclosing message.
type_context: contextual information for message/enum/field.
field: FieldDescriptorProto.
protodoc_manifest: tools.protodoc.Manifest for proto.
Returns:
RST formatted definition list item.
"""
field_annotations = []
anchor = FormatAnchor(FieldCrossRefLabel(NormalizeTypeContextName(type_context.name)))
if field.options.HasExtension(validate_pb2.rules):
rule = field.options.Extensions[validate_pb2.rules]
if ((rule.HasField('message') and rule.message.required) or
(rule.HasField('duration') and rule.duration.required) or
(rule.HasField('string') and rule.string.min_len > 0) or
(rule.HasField('string') and rule.string.min_bytes > 0) or
(rule.HasField('repeated') and rule.repeated.min_items > 0)):
field_annotations = ['*REQUIRED*']
leading_comment = type_context.leading_comment
formatted_leading_comment = FormatCommentWithAnnotations(leading_comment)
if HideNotImplemented(leading_comment):
return ''
if field.HasField('oneof_index'):
oneof_context = outer_type_context.ExtendOneof(field.oneof_index,
type_context.oneof_names[field.oneof_index])
oneof_comment = oneof_context.leading_comment
formatted_oneof_comment = FormatCommentWithAnnotations(oneof_comment)
if HideNotImplemented(oneof_comment):
return ''
# If the oneof only has one field and marked required, mark the field as required.
if len(type_context.oneof_fields[field.oneof_index]) == 1 and type_context.oneof_required[
field.oneof_index]:
field_annotations = ['*REQUIRED*']
if len(type_context.oneof_fields[field.oneof_index]) > 1:
# Fields in oneof shouldn't be marked as required when we have oneof comment below it.
field_annotations = []
oneof_template = '\nPrecisely one of %s must be set.\n' if type_context.oneof_required[
field.oneof_index] else '\nOnly one of %s may be set.\n'
formatted_oneof_comment += oneof_template % ', '.join(
FormatInternalLink(
f,
FieldCrossRefLabel(NormalizeTypeContextName(
outer_type_context.ExtendField(i, f).name)))
for i, f in type_context.oneof_fields[field.oneof_index])
else:
formatted_oneof_comment = ''
# If there is a udpa.annotations.security option, include it after the comment.
if field.options.HasExtension(security_pb2.security):
manifest_description = protodoc_manifest.fields.get(type_context.name)
if not manifest_description:
raise ProtodocError('Missing protodoc manifest YAML for %s' % type_context.name)
formatted_security_options = FormatSecurityOptions(
field.options.Extensions[security_pb2.security], field, type_context,
manifest_description.edge_config)
else:
formatted_security_options = ''
comment = '(%s) ' % ', '.join([FormatFieldType(type_context, field)] +
field_annotations) + formatted_leading_comment
return anchor + field.name + '\n' + MapLines(functools.partial(
Indent, 2), comment + formatted_oneof_comment) + formatted_security_options
def FormatMessageAsDefinitionList(type_context, msg, protodoc_manifest):
"""Format a DescriptorProto as RST definition list.
Args:
type_context: contextual information for message/enum/field.
msg: DescriptorProto.
protodoc_manifest: tools.protodoc.Manifest for proto.
Returns:
RST formatted definition list item.
"""
type_context.oneof_fields = defaultdict(list)
type_context.oneof_required = defaultdict(bool)
type_context.oneof_names = defaultdict(list)
for index, field in enumerate(msg.field):
if field.HasField('oneof_index'):
leading_comment = type_context.ExtendField(index, field.name).leading_comment
if HideNotImplemented(leading_comment):
continue
type_context.oneof_fields[field.oneof_index].append((index, field.name))
for index, oneof_decl in enumerate(msg.oneof_decl):
if oneof_decl.options.HasExtension(validate_pb2.required):
type_context.oneof_required[index] = oneof_decl.options.Extensions[validate_pb2.required]
type_context.oneof_names[index] = oneof_decl.name
return '\n'.join(
FormatFieldAsDefinitionListItem(type_context, type_context.ExtendField(index, field.name),
field, protodoc_manifest)
for index, field in enumerate(msg.field)) + '\n'
def FormatEnumValueAsDefinitionListItem(type_context, enum_value):
"""Format a EnumValueDescriptorProto as RST definition list item.
Args:
type_context: contextual information for message/enum/field.
enum_value: EnumValueDescriptorProto.
Returns:
RST formatted definition list item.
"""
anchor = FormatAnchor(EnumValueCrossRefLabel(NormalizeTypeContextName(type_context.name)))
default_comment = '*(DEFAULT)* ' if enum_value.number == 0 else ''
leading_comment = type_context.leading_comment
formatted_leading_comment = FormatCommentWithAnnotations(leading_comment)
if HideNotImplemented(leading_comment):
return ''
comment = default_comment + UNICODE_INVISIBLE_SEPARATOR + formatted_leading_comment
return anchor + enum_value.name + '\n' + MapLines(functools.partial(Indent, 2), comment)
def FormatEnumAsDefinitionList(type_context, enum):
"""Format a EnumDescriptorProto as RST definition list.
Args:
type_context: contextual information for message/enum/field.
enum: DescriptorProto.
Returns:
RST formatted definition list item.
"""
return '\n'.join(
FormatEnumValueAsDefinitionListItem(type_context.ExtendEnumValue(index, enum_value.name),
enum_value)
for index, enum_value in enumerate(enum.value)) + '\n'
def FormatProtoAsBlockComment(proto):
"""Format a proto as a RST block comment.
Useful in debugging, not usually referenced.
"""
return '\n\nproto::\n\n' + MapLines(functools.partial(Indent, 2), str(proto)) + '\n'
class RstFormatVisitor(visitor.Visitor):
"""Visitor to generate a RST representation from a FileDescriptor proto.
See visitor.Visitor for visitor method docs comments.
"""
def __init__(self):
r = runfiles.Create()
with open(r.Rlocation('envoy/docs/protodoc_manifest.yaml'), 'r') as f:
# Load as YAML, emit as JSON and then parse as proto to provide type
# checking.
protodoc_manifest_untyped = yaml.safe_load(f.read())
self.protodoc_manifest = manifest_pb2.Manifest()
json_format.Parse(json.dumps(protodoc_manifest_untyped), self.protodoc_manifest)
def VisitEnum(self, enum_proto, type_context):
normal_enum_type = NormalizeTypeContextName(type_context.name)
anchor = FormatAnchor(EnumCrossRefLabel(normal_enum_type))
header = FormatHeader('-', 'Enum %s' % normal_enum_type)
github_url = GithubUrl(type_context)
proto_link = FormatExternalLink('[%s proto]' % normal_enum_type, github_url) + '\n\n'
leading_comment = type_context.leading_comment
formatted_leading_comment = FormatCommentWithAnnotations(leading_comment, 'enum')
if HideNotImplemented(leading_comment):
return ''
return anchor + header + proto_link + formatted_leading_comment + FormatEnumAsDefinitionList(
type_context, enum_proto)
def VisitMessage(self, msg_proto, type_context, nested_msgs, nested_enums):
# Skip messages synthesized to represent map types.
if msg_proto.options.map_entry:
return ''
normal_msg_type = NormalizeTypeContextName(type_context.name)
anchor = FormatAnchor(MessageCrossRefLabel(normal_msg_type))
header = FormatHeader('-', normal_msg_type)
github_url = GithubUrl(type_context)
proto_link = FormatExternalLink('[%s proto]' % normal_msg_type, github_url) + '\n\n'
leading_comment = type_context.leading_comment
formatted_leading_comment = FormatCommentWithAnnotations(leading_comment, 'message')
if HideNotImplemented(leading_comment):
return ''
return anchor + header + proto_link + formatted_leading_comment + FormatMessageAsJson(
type_context, msg_proto) + FormatMessageAsDefinitionList(
type_context, msg_proto,
self.protodoc_manifest) + '\n'.join(nested_msgs) + '\n' + '\n'.join(nested_enums)
def VisitFile(self, file_proto, type_context, services, msgs, enums):
has_messages = True
if all(len(msg) == 0 for msg in msgs) and all(len(enum) == 0 for enum in enums):
has_messages = False
# TODO(mattklein123): The logic in both the doc and transform tool around files without messages
# is confusing and should be cleaned up. This is a stop gap to have titles for all proto docs
# in the common case.
if (has_messages and
not annotations.DOC_TITLE_ANNOTATION in type_context.source_code_info.file_level_annotations
and file_proto.name.startswith('envoy')):
raise ProtodocError('Envoy API proto file missing [#protodoc-title:] annotation: {}'.format(
file_proto.name))
# Find the earliest detached comment, attribute it to file level.
# Also extract file level titles if any.
header, comment = FormatHeaderFromFile('=', type_context.source_code_info, file_proto.name)
# If there are no messages, we don't include in the doc tree (no support for
# service rendering yet). We allow these files to be missing from the
# toctrees.
if not has_messages:
header = ':orphan:\n\n' + header
warnings = ''
if file_proto.options.HasExtension(status_pb2.file_status):
if file_proto.options.Extensions[status_pb2.file_status].work_in_progress:
warnings += ('.. warning::\n This API is work-in-progress and is '
'subject to breaking changes.\n\n')
debug_proto = FormatProtoAsBlockComment(file_proto)
return header + warnings + comment + '\n'.join(msgs) + '\n'.join(enums) # + debug_proto
def Main():
plugin.Plugin([plugin.DirectOutputDescriptor('.rst', RstFormatVisitor)])
if __name__ == '__main__':
Main()
| apache-2.0 | -3,480,274,841,590,263,000 | 36.581325 | 122 | 0.703575 | false |
psas/reaction-control | controller_scripts/MPU_6050_roll_V3.py | 1 | 3633 | import mraa
import time
import sys
import numpy as np
#important values:
A_GAIN = 0.932 # [deg/LSB]
G_GAIN = 1.466 # [deg/s/LSB]
DT = 0.002 # [s/loop] loop period
A = 0.98 # complementary filter constant
KP = 1.8 # proportional controller gain [LSB/deg/loop]
KD = 0.4 # derivative controller gain [LSB/deg/loop]
KI = 0.5 # Integral controller gain
previous_Time = 0
timestart = time.time() #for timer()
timeend = 10 #how long should the program run in seconds (set to zer$
rateD = 0.00 # desired roll rate
kp = .25 # proportional gain for duty cycle
a = 2.0 * kp # (I/(r*.1s))/Ftot equation to dc from radian error
#Pin assignments
Roll_cw_pin = 2
Roll_ccw_pin = 3
estop_pin = 14 #A0
#########################################################################
#GPIO initializations
cw = mraa.Gpio(Roll_cw_pin)
cw.dir(mraa.DIR_OUT)
ccw = mraa.Gpio(Roll_ccw_pin)
ccw.dir(mraa.DIR_OUT)
estop = mraa.Gpio(estop_pin)
estop.dir(mraa.DIR_IN)
cw.write(0)
ccw.write(0)
#########################################################################
#i2c declarations and commands
x = mraa.I2c(6) #i2c bus
MPU = 0x68 #I2C address
x.address(MPU)
x.writeReg(0x6B, 0)
def IMU():
x.address(MPU)
AcX = np.int16(x.readReg(0x3C) | x.readReg(0x3B)<<8)
AcY = np.int16(x.readReg(0x3E) | x.readReg(0x3D)<<8)
AcZ = np.int16(x.readReg(0x40) | x.readReg(0x3F)<<8)
TMP = np.int16(x.readReg(0x42) | x.readReg(0x41)<<8)
GyX = np.int16(x.readReg(0x44) | x.readReg(0x43)<<8)
GyY = np.int16(x.readReg(0x46) | x.readReg(0x45)<<8)
GyZ = np.int16(x.readReg(0x48) | x.readReg(0x47)<<8)
rateX = GyX / 131
return rateX
def timer():
if timeend <= 0:
return True
elif (time.time()-timestart) >= timeend:
return False
else:
return True
return
def blinkwd():
rateX = IMU()
u = a*abs(rateX)
current_Time = time.time()
if u >= 1:
ledState = 1
elif abs(u) < .1:
ledState = 0
elif (current_Time - previous_Time) >= interval:
if ledState == 1:
interval = onTime - u * onTime
ledState = 0
elif ledState == 0:
interval = u * onTime
ledState = 1
else:
print "fix your shit... something is brok'd"
sys.exit()
previous_Time = current_Time
return
def case1():
rateX = IMU()
if abs(u) <= 0.175:
cw.write(0)
ccw.write(0)
return
elif u >= 1:
u = 1
return
return
def case2():
rateX = IMU()
if rateX >= 0.175:
blinkwd()
cw.write(ledState)
return
elif rateX <= -0.175:
blinkwd()
ccw.write(ledstate)
return
else:
cw.write(0)
ccw.write(0)
return
return
def case3():
rateX = IMU()
cw.write(0)
ccw.write(0)
return
while estop.read() < 1 & timer():
rateX = IMU()
if rateX >= 0.175:
blinkwd()
cw.write(ledstate)
elif rateX <= -0.175:
blinkwd()
ccw.write(ledstate)
else:
cw.write(0)
ccw.write(0)
| gpl-3.0 | -2,088,870,879,097,940,500 | 26.522727 | 80 | 0.466832 | false |
addition-it-solutions/project-all | addons/crm/wizard/__init__.py | 8 | 1102 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import crm_partner_binding
import crm_phonecall_to_phonecall
import crm_lead_to_opportunity
import crm_merge_opportunities
| agpl-3.0 | -5,529,893,088,458,077,000 | 43.08 | 78 | 0.626134 | false |
mlperf/training_results_v0.6 | NVIDIA/benchmarks/ssd/implementations/pytorch/nhwc/mnist_nhwc.py | 5 | 5689 | # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from nhwc.conv import Conv2d_NHWC
from nhwc.max_pool import MaxPool2d_NHWC
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = Conv2d_NHWC(1, 10, kernel_size=5)
self.pool1 = MaxPool2d_NHWC(2)
self.conv2 = Conv2d_NHWC(10, 20, kernel_size=5)
self.pool2 = MaxPool2d_NHWC(2)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(self.pool1(self.conv1(x)))
x = F.relu(self.pool2(self.conv2_drop(self.conv2(x))))
x = x.permute(0, 3, 1, 2).contiguous()
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
def train(args, model, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
data = data.permute(0, 2, 3, 1).contiguous().half()
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
def test(args, model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
data = data.permute(0, 2, 3, 1).contiguous().half()
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
def main():
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.test_batch_size, shuffle=True, **kwargs)
model = Net().to(device).half()
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
torch.backends.cudnn.benchmark = True
for epoch in range(1, args.epochs + 1):
train(args, model, device, train_loader, optimizer, epoch)
test(args, model, device, test_loader)
if __name__ == '__main__':
main()
| apache-2.0 | -3,352,556,964,942,353,400 | 41.774436 | 95 | 0.603094 | false |
BellScurry/gem5-fault-injection | src/arch/x86/isa/insts/simd64/floating_point/compare/compare_and_write_minimum_or_maximum.py | 91 | 2158 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
# PFMAX
# PFMIN
'''
| bsd-3-clause | -7,545,165,371,231,788,000 | 51.634146 | 72 | 0.793327 | false |
m4rx9/rna-pdb-tools | rna_tools/tools/webserver-engine/cleanup.py | 2 | 4553 | #!/usr/bin/python
"""
Remove jobs from VM. MAILS_TO_KEEP_DATA set up to keep the data. REAL_REMOVE
RNA models for: 260c8ff6-f24e-4eff-9760-1831407fc770/ (1kh6 revised without restraints)
RNA models for: f3c68edc-99c2-4950-9efd-0486812f15ae (3l0u ss + rst)
RNA models for: 4371aff4-cea1-4deb-88ce-fbb9dc6ae5a4 (tetraloop)
Remember that this is 20 days from a moment when a job has been created.
"""
import os
import subprocess
import sys
import time
USE_TZ = False
import datetime
import argparse
from os import listdir
import shutil
import glob
import datetime
import warnings
from django.core.wsgi import get_wsgi_application
warnings.filterwarnings(
'error', r"DateTimeField .* received a naive datetime",
RuntimeWarning, r'DateTimeField received a naive datetime')
sys.path.append('/home/simrna/simrnaweb_env/simrnaweb')
JOB_DIR = '/home/simrna/simrnaweb_env/simrnaweb/media/jobs/' # slash at the end must be
os.environ['DJANGO_SETTINGS_MODULE'] = 'web.settings'
application = get_wsgi_application()
from app import models
JOB_STATUSES = dict(map(reversed, models.JOB_STATUS))
# Settings
# number of days to keep the data on the server
OLD_IS_MORE = 14
# jobs to be kept
MAILS_TO_KEEP_DATA = ['[email protected]', '[email protected]', '[email protected]','[email protected]']
# don't remove these jobs
JOBS_TO_KEEP = ['260c8ff6-f24e-4eff-9760-1831407fc770', 'f3c68edc-99c2-4950-9efd-0486812f15ae', '4371aff4-cea1-4deb-88ce-fbb9dc6ae5a4']
from web import settings
from django.utils import timezone
def remove_old_jobs(args):
"""time of job is calculated beased on the files!
If there is now file then you don't estimate the time
args.debug vs args.prod. prod kick off REAL_REMOVE
Keep jobs that are on JOBS_TO_KEEP list"""
def del_job(j, REAL_REMOVE=True, not_in_db=False):
"""Low-level function of removing
j = j.job_id
not_in_db False, remove all files!"""
if True:
try:
if True:
try:
if not_in_db: # remove all files, this job is not in the db
print 'not_in_db', j
if REAL_REMOVE: shutil.rmtree(JOB_DIR + j)
return
else:
print 'full removing!!!!', j,
if REAL_REMOVE: shutil.rmtree(JOB_DIR + j)
return
except OSError:
print 'no such file or directory -- skip (it means that there is demo job in the db , but not on the drive -- its fine! :-)',
else:
print 'removing (moving to be removed later!)', j,
#shutil.move('/home/rpdock/web/media/jobs/' + j, '/home/magnus/to-remove-dir')
except IOError:
print 'IOError -- problem',
else:
print 'to be removed', j,
if args.debug:
REAL_REMOVE=False
if args.prod:
REAL_REMOVE=True
jobs = models.Job.objects.filter()
jobs_job_ids = []
d = datetime.timedelta(days=OLD_IS_MORE)
for j in jobs:
#jobs_job_ids.append(j.job_id)
fn = JOB_DIR + '/' + str(j) #+ '/' + 'log.txt'
try:
t = datetime.date.fromtimestamp(os.path.getmtime(fn))
#tt = datetime.datetime.strptime(t)
#print 't - d', t - d,
today = datetime.date.today()
delta = today - t
if delta > d:
print j.job_id, j.email, '-- old',
if j.job_id in JOBS_TO_KEEP:
print '-- to KEEP (JOBS_TO_KEEP)'
continue
if j.email not in MAILS_TO_KEEP_DATA:
if j.job_id not in JOB_STATUSES:
print '-- to remove',
del_job(j.job_id, REAL_REMOVE)
else:
print '-- to keep it',
pass
print
#print 'fn', fn, "last modified: %s" % time.ctime(os.path.getmtime(fn)), j.email
pass
except OSError:
#print
pass
#main
if __name__ == '__main__':
parser = argparse.ArgumentParser("")
parser.add_argument("--debug", help="Debug mode", action='store_true')
parser.add_argument("--prod", help="Production mode", action='store_true')
args = parser.parse_args()
remove_old_jobs(args)
| mit | 8,560,003,576,654,588,000 | 33.492424 | 154 | 0.567977 | false |
drewandersonnz/openshift-tools | ansible/roles/lib_openshift_3.2/build/ansible/oc_secret.py | 13 | 4319 | # pylint: skip-file
# pylint: disable=too-many-branches
def main():
'''
ansible oc module for secrets
'''
module = AnsibleModule(
argument_spec=dict(
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
state=dict(default='present', type='str',
choices=['present', 'absent', 'list']),
debug=dict(default=False, type='bool'),
namespace=dict(default='default', type='str'),
name=dict(default=None, type='str'),
files=dict(default=None, type='list'),
delete_after=dict(default=False, type='bool'),
contents=dict(default=None, type='list'),
content_type=dict(default='raw', choices=['yaml', 'json', 'raw'], type='str'),
force=dict(default=False, type='bool'),
decode=dict(default=False, type='bool'),
),
mutually_exclusive=[["contents", "files"]],
supports_check_mode=True,
)
occmd = Secret(module.params['namespace'],
module.params['name'],
module.params['decode'],
kubeconfig=module.params['kubeconfig'],
verbose=module.params['debug'])
state = module.params['state']
api_rval = occmd.get()
#####
# Get
#####
if state == 'list':
module.exit_json(changed=False, results=api_rval, state="list")
if not module.params['name']:
module.fail_json(msg='Please specify a name when state is absent|present.')
########
# Delete
########
if state == 'absent':
if not Utils.exists(api_rval['results'], module.params['name']):
module.exit_json(changed=False, state="absent")
if module.check_mode:
module.exit_json(changed=False, msg='Would have performed a delete.')
api_rval = occmd.delete()
module.exit_json(changed=True, results=api_rval, state="absent")
if state == 'present':
if module.params['files']:
files = module.params['files']
elif module.params['contents']:
files = Utils.create_files_from_contents(module.params['contents'])
else:
module.fail_json(msg='Either specify files or contents.')
########
# Create
########
if not Utils.exists(api_rval['results'], module.params['name']):
if module.check_mode:
module.exit_json(changed=False, msg='Would have performed a create.')
api_rval = occmd.create(module.params['files'], module.params['contents'])
# Remove files
if files and module.params['delete_after']:
Utils.cleanup([ftmp['path'] for ftmp in files])
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
module.exit_json(changed=True, results=api_rval, state="present")
########
# Update
########
secret = occmd.prep_secret(module.params['files'], module.params['contents'])
if secret['returncode'] != 0:
module.fail_json(msg=secret)
if Utils.check_def_equal(secret['results'], api_rval['results'][0]):
# Remove files
if files and module.params['delete_after']:
Utils.cleanup([ftmp['path'] for ftmp in files])
module.exit_json(changed=False, results=secret['results'], state="present")
if module.check_mode:
module.exit_json(changed=False, msg='Would have performed an update.')
api_rval = occmd.update(files, force=module.params['force'])
# Remove files
if secret and module.params['delete_after']:
Utils.cleanup([ftmp['path'] for ftmp in files])
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
module.exit_json(changed=True, results=api_rval, state="present")
module.exit_json(failed=True,
changed=False,
results='Unknown state passed. %s' % state,
state="unknown")
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
# import module snippets. This are required
from ansible.module_utils.basic import *
main()
| apache-2.0 | -660,355,043,803,292,000 | 33.007874 | 94 | 0.567029 | false |
scripnichenko/nova | nova/tests/unit/virt/xenapi/test_vmops.py | 12 | 64857 | # Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from eventlet import greenthread
import mock
from nova.compute import power_state
from nova.compute import task_states
from nova import context
from nova import exception
from nova import objects
from nova.objects import fields
from nova.pci import manager as pci_manager
from nova import test
from nova.tests.unit import fake_flavor
from nova.tests.unit import fake_instance
from nova.tests.unit.virt.xenapi import stubs
from nova.virt import fake
from nova.virt.xenapi import agent as xenapi_agent
from nova.virt.xenapi.client import session as xenapi_session
from nova.virt.xenapi import fake as xenapi_fake
from nova.virt.xenapi import vm_utils
from nova.virt.xenapi import vmops
from nova.virt.xenapi import volume_utils
from nova.virt.xenapi import volumeops
class VMOpsTestBase(stubs.XenAPITestBaseNoDB):
def setUp(self):
super(VMOpsTestBase, self).setUp()
self._setup_mock_vmops()
self.vms = []
def _setup_mock_vmops(self, product_brand=None, product_version=None):
stubs.stubout_session(self.stubs, xenapi_fake.SessionBase)
self._session = xenapi_session.XenAPISession('test_url', 'root',
'test_pass')
self.vmops = vmops.VMOps(self._session, fake.FakeVirtAPI())
def create_vm(self, name, state="Running"):
vm_ref = xenapi_fake.create_vm(name, state)
self.vms.append(vm_ref)
vm = xenapi_fake.get_record("VM", vm_ref)
return vm, vm_ref
def tearDown(self):
super(VMOpsTestBase, self).tearDown()
for vm in self.vms:
xenapi_fake.destroy_vm(vm)
class VMOpsTestCase(VMOpsTestBase):
def setUp(self):
super(VMOpsTestCase, self).setUp()
self._setup_mock_vmops()
self.context = context.RequestContext('user', 'project')
self.instance = fake_instance.fake_instance_obj(self.context)
def _setup_mock_vmops(self, product_brand=None, product_version=None):
self._session = self._get_mock_session(product_brand, product_version)
self._vmops = vmops.VMOps(self._session, fake.FakeVirtAPI())
def _get_mock_session(self, product_brand, product_version):
class Mock(object):
pass
mock_session = Mock()
mock_session.product_brand = product_brand
mock_session.product_version = product_version
return mock_session
def _test_finish_revert_migration_after_crash(self, backup_made, new_made,
vm_shutdown=True):
instance = {'name': 'foo',
'task_state': task_states.RESIZE_MIGRATING}
context = 'fake_context'
self.mox.StubOutWithMock(vm_utils, 'lookup')
self.mox.StubOutWithMock(self._vmops, '_destroy')
self.mox.StubOutWithMock(vm_utils, 'set_vm_name_label')
self.mox.StubOutWithMock(self._vmops, '_attach_mapped_block_devices')
self.mox.StubOutWithMock(self._vmops, '_start')
self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown')
vm_utils.lookup(self._session, 'foo-orig').AndReturn(
backup_made and 'foo' or None)
vm_utils.lookup(self._session, 'foo').AndReturn(
(not backup_made or new_made) and 'foo' or None)
if backup_made:
if new_made:
self._vmops._destroy(instance, 'foo')
vm_utils.set_vm_name_label(self._session, 'foo', 'foo')
self._vmops._attach_mapped_block_devices(instance, [])
vm_utils.is_vm_shutdown(self._session, 'foo').AndReturn(vm_shutdown)
if vm_shutdown:
self._vmops._start(instance, 'foo')
self.mox.ReplayAll()
self._vmops.finish_revert_migration(context, instance, [])
def test_finish_revert_migration_after_crash(self):
self._test_finish_revert_migration_after_crash(True, True)
def test_finish_revert_migration_after_crash_before_new(self):
self._test_finish_revert_migration_after_crash(True, False)
def test_finish_revert_migration_after_crash_before_backup(self):
self._test_finish_revert_migration_after_crash(False, False)
def test_xsm_sr_check_relaxed_cached(self):
self.make_plugin_call_count = 0
def fake_make_plugin_call(plugin, method, **args):
self.make_plugin_call_count = self.make_plugin_call_count + 1
return "true"
self.stubs.Set(self._vmops, "_make_plugin_call",
fake_make_plugin_call)
self.assertTrue(self._vmops._is_xsm_sr_check_relaxed())
self.assertTrue(self._vmops._is_xsm_sr_check_relaxed())
self.assertEqual(self.make_plugin_call_count, 1)
def test_get_vm_opaque_ref_raises_instance_not_found(self):
instance = {"name": "dummy"}
self.mox.StubOutWithMock(vm_utils, 'lookup')
vm_utils.lookup(self._session, instance['name'], False).AndReturn(None)
self.mox.ReplayAll()
self.assertRaises(exception.InstanceNotFound,
self._vmops._get_vm_opaque_ref, instance)
@mock.patch.object(vm_utils, 'destroy_vm')
@mock.patch.object(vm_utils, 'clean_shutdown_vm')
@mock.patch.object(vm_utils, 'hard_shutdown_vm')
def test_clean_shutdown_no_bdm_on_destroy(self, hard_shutdown_vm,
clean_shutdown_vm, destroy_vm):
vm_ref = 'vm_ref'
self._vmops._destroy(self.instance, vm_ref, destroy_disks=False)
hard_shutdown_vm.assert_called_once_with(self._vmops._session,
self.instance, vm_ref)
self.assertEqual(0, clean_shutdown_vm.call_count)
@mock.patch.object(vm_utils, 'destroy_vm')
@mock.patch.object(vm_utils, 'clean_shutdown_vm')
@mock.patch.object(vm_utils, 'hard_shutdown_vm')
def test_clean_shutdown_with_bdm_on_destroy(self, hard_shutdown_vm,
clean_shutdown_vm, destroy_vm):
vm_ref = 'vm_ref'
block_device_info = {'block_device_mapping': ['fake']}
self._vmops._destroy(self.instance, vm_ref, destroy_disks=False,
block_device_info=block_device_info)
clean_shutdown_vm.assert_called_once_with(self._vmops._session,
self.instance, vm_ref)
self.assertEqual(0, hard_shutdown_vm.call_count)
@mock.patch.object(vm_utils, 'destroy_vm')
@mock.patch.object(vm_utils, 'clean_shutdown_vm', return_value=False)
@mock.patch.object(vm_utils, 'hard_shutdown_vm')
def test_clean_shutdown_with_bdm_failed_on_destroy(self, hard_shutdown_vm,
clean_shutdown_vm, destroy_vm):
vm_ref = 'vm_ref'
block_device_info = {'block_device_mapping': ['fake']}
self._vmops._destroy(self.instance, vm_ref, destroy_disks=False,
block_device_info=block_device_info)
clean_shutdown_vm.assert_called_once_with(self._vmops._session,
self.instance, vm_ref)
hard_shutdown_vm.assert_called_once_with(self._vmops._session,
self.instance, vm_ref)
@mock.patch.object(vm_utils, 'try_auto_configure_disk')
@mock.patch.object(vm_utils, 'create_vbd',
side_effect=test.TestingException)
def test_attach_disks_rescue_auto_disk_config_false(self, create_vbd,
try_auto_config):
ctxt = context.RequestContext('user', 'project')
instance = fake_instance.fake_instance_obj(ctxt)
image_meta = objects.ImageMeta.from_dict(
{'properties': {'auto_disk_config': 'false'}})
vdis = {'root': {'ref': 'fake-ref'}}
self.assertRaises(test.TestingException, self._vmops._attach_disks,
instance, image_meta=image_meta, vm_ref=None,
name_label=None, vdis=vdis, disk_image_type='fake',
network_info=[], rescue=True)
self.assertFalse(try_auto_config.called)
@mock.patch.object(vm_utils, 'try_auto_configure_disk')
@mock.patch.object(vm_utils, 'create_vbd',
side_effect=test.TestingException)
def test_attach_disks_rescue_auto_disk_config_true(self, create_vbd,
try_auto_config):
ctxt = context.RequestContext('user', 'project')
instance = fake_instance.fake_instance_obj(ctxt)
image_meta = objects.ImageMeta.from_dict(
{'properties': {'auto_disk_config': 'true'}})
vdis = {'root': {'ref': 'fake-ref'}}
self.assertRaises(test.TestingException, self._vmops._attach_disks,
instance, image_meta=image_meta, vm_ref=None,
name_label=None, vdis=vdis, disk_image_type='fake',
network_info=[], rescue=True)
try_auto_config.assert_called_once_with(self._vmops._session,
'fake-ref', instance.flavor.root_gb)
class InjectAutoDiskConfigTestCase(VMOpsTestBase):
def test_inject_auto_disk_config_when_present(self):
vm, vm_ref = self.create_vm("dummy")
instance = {"name": "dummy", "uuid": "1234", "auto_disk_config": True}
self.vmops._inject_auto_disk_config(instance, vm_ref)
xenstore_data = vm['xenstore_data']
self.assertEqual(xenstore_data['vm-data/auto-disk-config'], 'True')
def test_inject_auto_disk_config_none_as_false(self):
vm, vm_ref = self.create_vm("dummy")
instance = {"name": "dummy", "uuid": "1234", "auto_disk_config": None}
self.vmops._inject_auto_disk_config(instance, vm_ref)
xenstore_data = vm['xenstore_data']
self.assertEqual(xenstore_data['vm-data/auto-disk-config'], 'False')
class GetConsoleOutputTestCase(VMOpsTestBase):
def test_get_console_output_works(self):
self.mox.StubOutWithMock(self.vmops, '_get_last_dom_id')
instance = {"name": "dummy"}
self.vmops._get_last_dom_id(instance, check_rescue=True).AndReturn(42)
self.mox.ReplayAll()
self.assertEqual("dom_id: 42", self.vmops.get_console_output(instance))
def test_get_console_output_throws_nova_exception(self):
self.mox.StubOutWithMock(self.vmops, '_get_last_dom_id')
instance = {"name": "dummy"}
# dom_id=0 used to trigger exception in fake XenAPI
self.vmops._get_last_dom_id(instance, check_rescue=True).AndReturn(0)
self.mox.ReplayAll()
self.assertRaises(exception.NovaException,
self.vmops.get_console_output, instance)
def test_get_dom_id_works(self):
instance = {"name": "dummy"}
vm, vm_ref = self.create_vm("dummy")
self.assertEqual(vm["domid"], self.vmops._get_dom_id(instance))
def test_get_dom_id_works_with_rescue_vm(self):
instance = {"name": "dummy"}
vm, vm_ref = self.create_vm("dummy-rescue")
self.assertEqual(vm["domid"],
self.vmops._get_dom_id(instance, check_rescue=True))
def test_get_dom_id_raises_not_found(self):
instance = {"name": "dummy"}
self.create_vm("not-dummy")
self.assertRaises(exception.NotFound, self.vmops._get_dom_id, instance)
def test_get_dom_id_works_with_vmref(self):
vm, vm_ref = self.create_vm("dummy")
self.assertEqual(vm["domid"],
self.vmops._get_dom_id(vm_ref=vm_ref))
class SpawnTestCase(VMOpsTestBase):
def _stub_out_common(self):
self.mox.StubOutWithMock(self.vmops, '_ensure_instance_name_unique')
self.mox.StubOutWithMock(self.vmops, '_ensure_enough_free_mem')
self.mox.StubOutWithMock(self.vmops, '_update_instance_progress')
self.mox.StubOutWithMock(vm_utils, 'determine_disk_image_type')
self.mox.StubOutWithMock(self.vmops, '_get_vdis_for_instance')
self.mox.StubOutWithMock(vm_utils, 'safe_destroy_vdis')
self.mox.StubOutWithMock(self.vmops._volumeops,
'safe_cleanup_from_vdis')
self.mox.StubOutWithMock(self.vmops, '_resize_up_vdis')
self.mox.StubOutWithMock(vm_utils,
'create_kernel_and_ramdisk')
self.mox.StubOutWithMock(vm_utils, 'destroy_kernel_ramdisk')
self.mox.StubOutWithMock(self.vmops, '_create_vm_record')
self.mox.StubOutWithMock(self.vmops, '_destroy')
self.mox.StubOutWithMock(self.vmops, '_attach_disks')
self.mox.StubOutWithMock(pci_manager, 'get_instance_pci_devs')
self.mox.StubOutWithMock(vm_utils, 'set_other_config_pci')
self.mox.StubOutWithMock(self.vmops, '_attach_orig_disks')
self.mox.StubOutWithMock(self.vmops, 'inject_network_info')
self.mox.StubOutWithMock(self.vmops, '_inject_hostname')
self.mox.StubOutWithMock(self.vmops, '_inject_instance_metadata')
self.mox.StubOutWithMock(self.vmops, '_inject_auto_disk_config')
self.mox.StubOutWithMock(self.vmops, '_file_inject_vm_settings')
self.mox.StubOutWithMock(self.vmops, '_create_vifs')
self.mox.StubOutWithMock(self.vmops.firewall_driver,
'setup_basic_filtering')
self.mox.StubOutWithMock(self.vmops.firewall_driver,
'prepare_instance_filter')
self.mox.StubOutWithMock(self.vmops, '_start')
self.mox.StubOutWithMock(self.vmops, '_wait_for_instance_to_start')
self.mox.StubOutWithMock(self.vmops,
'_configure_new_instance_with_agent')
self.mox.StubOutWithMock(self.vmops, '_remove_hostname')
self.mox.StubOutWithMock(self.vmops.firewall_driver,
'apply_instance_filter')
self.mox.StubOutWithMock(self.vmops, '_update_last_dom_id')
def _test_spawn(self, name_label_param=None, block_device_info_param=None,
rescue=False, include_root_vdi=True, throw_exception=None,
attach_pci_dev=False):
self._stub_out_common()
instance = {"name": "dummy", "uuid": "fake_uuid"}
name_label = name_label_param
if name_label is None:
name_label = "dummy"
image_meta = objects.ImageMeta.from_dict({"id": "image_id"})
context = "context"
session = self.vmops._session
injected_files = "fake_files"
admin_password = "password"
network_info = "net_info"
steps = 10
if rescue:
steps += 1
block_device_info = block_device_info_param
if block_device_info and not block_device_info['root_device_name']:
block_device_info = dict(block_device_info_param)
block_device_info['root_device_name'] = \
self.vmops.default_root_dev
di_type = "di_type"
vm_utils.determine_disk_image_type(image_meta).AndReturn(di_type)
step = 1
self.vmops._update_instance_progress(context, instance, step, steps)
vdis = {"other": {"ref": "fake_ref_2", "osvol": True}}
if include_root_vdi:
vdis["root"] = {"ref": "fake_ref"}
self.vmops._get_vdis_for_instance(context, instance,
name_label, image_meta, di_type,
block_device_info).AndReturn(vdis)
self.vmops._resize_up_vdis(instance, vdis)
step += 1
self.vmops._update_instance_progress(context, instance, step, steps)
kernel_file = "kernel"
ramdisk_file = "ramdisk"
vm_utils.create_kernel_and_ramdisk(context, session,
instance, name_label).AndReturn((kernel_file, ramdisk_file))
step += 1
self.vmops._update_instance_progress(context, instance, step, steps)
vm_ref = "fake_vm_ref"
self.vmops._ensure_instance_name_unique(name_label)
self.vmops._ensure_enough_free_mem(instance)
self.vmops._create_vm_record(context, instance, name_label,
di_type, kernel_file,
ramdisk_file, image_meta, rescue).AndReturn(vm_ref)
step += 1
self.vmops._update_instance_progress(context, instance, step, steps)
self.vmops._attach_disks(instance, image_meta, vm_ref, name_label,
vdis, di_type, network_info, rescue,
admin_password, injected_files)
if attach_pci_dev:
fake_dev = {
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': None,
'id': 1,
'compute_node_id': 1,
'address': '00:00.0',
'vendor_id': '1234',
'product_id': 'abcd',
'dev_type': fields.PciDeviceType.STANDARD,
'status': 'available',
'dev_id': 'devid',
'label': 'label',
'instance_uuid': None,
'extra_info': '{}',
}
pci_manager.get_instance_pci_devs(instance).AndReturn([fake_dev])
vm_utils.set_other_config_pci(self.vmops._session,
vm_ref,
"0/0000:00:00.0")
else:
pci_manager.get_instance_pci_devs(instance).AndReturn([])
step += 1
self.vmops._update_instance_progress(context, instance, step, steps)
self.vmops._inject_instance_metadata(instance, vm_ref)
self.vmops._inject_auto_disk_config(instance, vm_ref)
self.vmops._inject_hostname(instance, vm_ref, rescue)
self.vmops._file_inject_vm_settings(instance, vm_ref, vdis,
network_info)
self.vmops.inject_network_info(instance, network_info, vm_ref)
step += 1
self.vmops._update_instance_progress(context, instance, step, steps)
self.vmops._create_vifs(instance, vm_ref, network_info)
self.vmops.firewall_driver.setup_basic_filtering(instance,
network_info).AndRaise(NotImplementedError)
self.vmops.firewall_driver.prepare_instance_filter(instance,
network_info)
step += 1
self.vmops._update_instance_progress(context, instance, step, steps)
if rescue:
self.vmops._attach_orig_disks(instance, vm_ref)
step += 1
self.vmops._update_instance_progress(context, instance, step,
steps)
self.vmops._start(instance, vm_ref)
self.vmops._wait_for_instance_to_start(instance, vm_ref)
self.vmops._update_last_dom_id(vm_ref)
step += 1
self.vmops._update_instance_progress(context, instance, step, steps)
self.vmops._configure_new_instance_with_agent(instance, vm_ref,
injected_files, admin_password)
self.vmops._remove_hostname(instance, vm_ref)
step += 1
self.vmops._update_instance_progress(context, instance, step, steps)
self.vmops.firewall_driver.apply_instance_filter(instance,
network_info)
step += 1
last_call = self.vmops._update_instance_progress(context, instance,
step, steps)
if throw_exception:
last_call.AndRaise(throw_exception)
self.vmops._destroy(instance, vm_ref, network_info=network_info)
vm_utils.destroy_kernel_ramdisk(self.vmops._session, instance,
kernel_file, ramdisk_file)
vm_utils.safe_destroy_vdis(self.vmops._session, ["fake_ref"])
self.vmops._volumeops.safe_cleanup_from_vdis(["fake_ref_2"])
self.mox.ReplayAll()
self.vmops.spawn(context, instance, image_meta, injected_files,
admin_password, network_info,
block_device_info_param, name_label_param, rescue)
def test_spawn(self):
self._test_spawn()
def test_spawn_with_alternate_options(self):
self._test_spawn(include_root_vdi=False, rescue=True,
name_label_param="bob",
block_device_info_param={"root_device_name": ""})
def test_spawn_with_pci_available_on_the_host(self):
self._test_spawn(attach_pci_dev=True)
def test_spawn_performs_rollback_and_throws_exception(self):
self.assertRaises(test.TestingException, self._test_spawn,
throw_exception=test.TestingException())
def _test_finish_migration(self, power_on=True, resize_instance=True,
throw_exception=None, booted_from_volume=False):
self._stub_out_common()
self.mox.StubOutWithMock(volumeops.VolumeOps, "connect_volume")
self.mox.StubOutWithMock(self.vmops._session, 'call_xenapi')
self.mox.StubOutWithMock(vm_utils, "import_all_migrated_disks")
self.mox.StubOutWithMock(self.vmops, "_attach_mapped_block_devices")
context = "context"
migration = {}
name_label = "dummy"
instance = {"name": name_label, "uuid": "fake_uuid",
"root_device_name": "/dev/xvda"}
disk_info = "disk_info"
network_info = "net_info"
image_meta = objects.ImageMeta.from_dict({"id": "image_id"})
block_device_info = {}
import_root = True
if booted_from_volume:
block_device_info = {'block_device_mapping': [
{'mount_device': '/dev/xvda',
'connection_info': {'data': 'fake-data'}}]}
import_root = False
volumeops.VolumeOps.connect_volume(
{'data': 'fake-data'}).AndReturn(('sr', 'vol-vdi-uuid'))
self.vmops._session.call_xenapi('VDI.get_by_uuid',
'vol-vdi-uuid').AndReturn('vol-vdi-ref')
session = self.vmops._session
self.vmops._ensure_instance_name_unique(name_label)
self.vmops._ensure_enough_free_mem(instance)
di_type = "di_type"
vm_utils.determine_disk_image_type(image_meta).AndReturn(di_type)
root_vdi = {"ref": "fake_ref"}
ephemeral_vdi = {"ref": "fake_ref_e"}
vdis = {"root": root_vdi, "ephemerals": {4: ephemeral_vdi}}
vm_utils.import_all_migrated_disks(self.vmops._session, instance,
import_root=import_root).AndReturn(vdis)
kernel_file = "kernel"
ramdisk_file = "ramdisk"
vm_utils.create_kernel_and_ramdisk(context, session,
instance, name_label).AndReturn((kernel_file, ramdisk_file))
vm_ref = "fake_vm_ref"
rescue = False
self.vmops._create_vm_record(context, instance, name_label,
di_type, kernel_file,
ramdisk_file, image_meta, rescue).AndReturn(vm_ref)
if resize_instance:
self.vmops._resize_up_vdis(instance, vdis)
self.vmops._attach_disks(instance, image_meta, vm_ref, name_label,
vdis, di_type, network_info, False, None, None)
self.vmops._attach_mapped_block_devices(instance, block_device_info)
pci_manager.get_instance_pci_devs(instance).AndReturn([])
self.vmops._inject_instance_metadata(instance, vm_ref)
self.vmops._inject_auto_disk_config(instance, vm_ref)
self.vmops._file_inject_vm_settings(instance, vm_ref, vdis,
network_info)
self.vmops.inject_network_info(instance, network_info, vm_ref)
self.vmops._create_vifs(instance, vm_ref, network_info)
self.vmops.firewall_driver.setup_basic_filtering(instance,
network_info).AndRaise(NotImplementedError)
self.vmops.firewall_driver.prepare_instance_filter(instance,
network_info)
if power_on:
self.vmops._start(instance, vm_ref)
self.vmops._wait_for_instance_to_start(instance, vm_ref)
self.vmops._update_last_dom_id(vm_ref)
self.vmops.firewall_driver.apply_instance_filter(instance,
network_info)
last_call = self.vmops._update_instance_progress(context, instance,
step=5, total_steps=5)
if throw_exception:
last_call.AndRaise(throw_exception)
self.vmops._destroy(instance, vm_ref, network_info=network_info)
vm_utils.destroy_kernel_ramdisk(self.vmops._session, instance,
kernel_file, ramdisk_file)
vm_utils.safe_destroy_vdis(self.vmops._session,
["fake_ref_e", "fake_ref"])
self.mox.ReplayAll()
self.vmops.finish_migration(context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info, power_on)
def test_finish_migration(self):
self._test_finish_migration()
def test_finish_migration_no_power_on(self):
self._test_finish_migration(power_on=False, resize_instance=False)
def test_finish_migration_booted_from_volume(self):
self._test_finish_migration(booted_from_volume=True)
def test_finish_migrate_performs_rollback_on_error(self):
self.assertRaises(test.TestingException, self._test_finish_migration,
power_on=False, resize_instance=False,
throw_exception=test.TestingException())
def test_remove_hostname(self):
vm, vm_ref = self.create_vm("dummy")
instance = {"name": "dummy", "uuid": "1234", "auto_disk_config": None}
self.mox.StubOutWithMock(self._session, 'call_xenapi')
self._session.call_xenapi("VM.remove_from_xenstore_data", vm_ref,
"vm-data/hostname")
self.mox.ReplayAll()
self.vmops._remove_hostname(instance, vm_ref)
self.mox.VerifyAll()
def test_reset_network(self):
class mock_agent(object):
def __init__(self):
self.called = False
def resetnetwork(self):
self.called = True
vm, vm_ref = self.create_vm("dummy")
instance = {"name": "dummy", "uuid": "1234", "auto_disk_config": None}
agent = mock_agent()
self.mox.StubOutWithMock(self.vmops, 'agent_enabled')
self.mox.StubOutWithMock(self.vmops, '_get_agent')
self.mox.StubOutWithMock(self.vmops, '_inject_hostname')
self.mox.StubOutWithMock(self.vmops, '_remove_hostname')
self.vmops.agent_enabled(instance).AndReturn(True)
self.vmops._get_agent(instance, vm_ref).AndReturn(agent)
self.vmops._inject_hostname(instance, vm_ref, False)
self.vmops._remove_hostname(instance, vm_ref)
self.mox.ReplayAll()
self.vmops.reset_network(instance)
self.assertTrue(agent.called)
self.mox.VerifyAll()
def test_inject_hostname(self):
instance = {"hostname": "dummy", "os_type": "fake", "uuid": "uuid"}
vm_ref = "vm_ref"
self.mox.StubOutWithMock(self.vmops, '_add_to_param_xenstore')
self.vmops._add_to_param_xenstore(vm_ref, 'vm-data/hostname', 'dummy')
self.mox.ReplayAll()
self.vmops._inject_hostname(instance, vm_ref, rescue=False)
def test_inject_hostname_with_rescue_prefix(self):
instance = {"hostname": "dummy", "os_type": "fake", "uuid": "uuid"}
vm_ref = "vm_ref"
self.mox.StubOutWithMock(self.vmops, '_add_to_param_xenstore')
self.vmops._add_to_param_xenstore(vm_ref, 'vm-data/hostname',
'RESCUE-dummy')
self.mox.ReplayAll()
self.vmops._inject_hostname(instance, vm_ref, rescue=True)
def test_inject_hostname_with_windows_name_truncation(self):
instance = {"hostname": "dummydummydummydummydummy",
"os_type": "windows", "uuid": "uuid"}
vm_ref = "vm_ref"
self.mox.StubOutWithMock(self.vmops, '_add_to_param_xenstore')
self.vmops._add_to_param_xenstore(vm_ref, 'vm-data/hostname',
'RESCUE-dummydum')
self.mox.ReplayAll()
self.vmops._inject_hostname(instance, vm_ref, rescue=True)
def test_wait_for_instance_to_start(self):
instance = {"uuid": "uuid"}
vm_ref = "vm_ref"
self.mox.StubOutWithMock(vm_utils, 'get_power_state')
self.mox.StubOutWithMock(greenthread, 'sleep')
vm_utils.get_power_state(self._session, vm_ref).AndReturn(
power_state.SHUTDOWN)
greenthread.sleep(0.5)
vm_utils.get_power_state(self._session, vm_ref).AndReturn(
power_state.RUNNING)
self.mox.ReplayAll()
self.vmops._wait_for_instance_to_start(instance, vm_ref)
def test_attach_orig_disks(self):
instance = {"name": "dummy"}
vm_ref = "vm_ref"
vbd_refs = {vmops.DEVICE_ROOT: "vdi_ref"}
self.mox.StubOutWithMock(vm_utils, 'lookup')
self.mox.StubOutWithMock(self.vmops, '_find_vdi_refs')
self.mox.StubOutWithMock(vm_utils, 'create_vbd')
vm_utils.lookup(self.vmops._session, "dummy").AndReturn("ref")
self.vmops._find_vdi_refs("ref", exclude_volumes=True).AndReturn(
vbd_refs)
vm_utils.create_vbd(self.vmops._session, vm_ref, "vdi_ref",
vmops.DEVICE_RESCUE, bootable=False)
self.mox.ReplayAll()
self.vmops._attach_orig_disks(instance, vm_ref)
def test_agent_update_setup(self):
# agent updates need to occur after networking is configured
instance = {'name': 'betelgeuse',
'uuid': '1-2-3-4-5-6'}
vm_ref = 'vm_ref'
agent = xenapi_agent.XenAPIBasedAgent(self.vmops._session,
self.vmops._virtapi, instance, vm_ref)
self.mox.StubOutWithMock(xenapi_agent, 'should_use_agent')
self.mox.StubOutWithMock(self.vmops, '_get_agent')
self.mox.StubOutWithMock(agent, 'get_version')
self.mox.StubOutWithMock(agent, 'resetnetwork')
self.mox.StubOutWithMock(agent, 'update_if_needed')
xenapi_agent.should_use_agent(instance).AndReturn(True)
self.vmops._get_agent(instance, vm_ref).AndReturn(agent)
agent.get_version().AndReturn('1.2.3')
agent.resetnetwork()
agent.update_if_needed('1.2.3')
self.mox.ReplayAll()
self.vmops._configure_new_instance_with_agent(instance, vm_ref,
None, None)
class DestroyTestCase(VMOpsTestBase):
def setUp(self):
super(DestroyTestCase, self).setUp()
self.context = context.RequestContext(user_id=None, project_id=None)
self.instance = fake_instance.fake_instance_obj(self.context)
@mock.patch.object(vm_utils, 'lookup', side_effect=[None, None])
@mock.patch.object(vm_utils, 'hard_shutdown_vm')
@mock.patch.object(volume_utils, 'find_sr_by_uuid')
@mock.patch.object(volume_utils, 'forget_sr')
def test_no_vm_no_bdm(self, forget_sr, find_sr_by_uuid, hard_shutdown_vm,
lookup):
self.vmops.destroy(self.instance, 'network_info',
{'block_device_mapping': []})
self.assertEqual(0, find_sr_by_uuid.call_count)
self.assertEqual(0, forget_sr.call_count)
self.assertEqual(0, hard_shutdown_vm.call_count)
@mock.patch.object(vm_utils, 'lookup', side_effect=[None, None])
@mock.patch.object(vm_utils, 'hard_shutdown_vm')
@mock.patch.object(volume_utils, 'find_sr_by_uuid', return_value=None)
@mock.patch.object(volume_utils, 'forget_sr')
def test_no_vm_orphaned_volume_no_sr(self, forget_sr, find_sr_by_uuid,
hard_shutdown_vm, lookup):
self.vmops.destroy(self.instance, 'network_info',
{'block_device_mapping': [{'connection_info':
{'data': {'volume_id': 'fake-uuid'}}}]})
find_sr_by_uuid.assert_called_once_with(self.vmops._session,
'FA15E-D15C-fake-uuid')
self.assertEqual(0, forget_sr.call_count)
self.assertEqual(0, hard_shutdown_vm.call_count)
@mock.patch.object(vm_utils, 'lookup', side_effect=[None, None])
@mock.patch.object(vm_utils, 'hard_shutdown_vm')
@mock.patch.object(volume_utils, 'find_sr_by_uuid', return_value='sr_ref')
@mock.patch.object(volume_utils, 'forget_sr')
def test_no_vm_orphaned_volume(self, forget_sr, find_sr_by_uuid,
hard_shutdown_vm, lookup):
self.vmops.destroy(self.instance, 'network_info',
{'block_device_mapping': [{'connection_info':
{'data': {'volume_id': 'fake-uuid'}}}]})
find_sr_by_uuid.assert_called_once_with(self.vmops._session,
'FA15E-D15C-fake-uuid')
forget_sr.assert_called_once_with(self.vmops._session, 'sr_ref')
self.assertEqual(0, hard_shutdown_vm.call_count)
@mock.patch.object(vmops.VMOps, '_update_instance_progress')
@mock.patch.object(vmops.VMOps, '_get_vm_opaque_ref')
@mock.patch.object(vm_utils, 'get_sr_path')
@mock.patch.object(vmops.VMOps, '_detach_block_devices_from_orig_vm')
@mock.patch.object(vmops.VMOps, '_migrate_disk_resizing_down')
@mock.patch.object(vmops.VMOps, '_migrate_disk_resizing_up')
class MigrateDiskAndPowerOffTestCase(VMOpsTestBase):
def setUp(self):
super(MigrateDiskAndPowerOffTestCase, self).setUp()
self.context = context.RequestContext('user', 'project')
def test_migrate_disk_and_power_off_works_down(self,
migrate_up, migrate_down, *mocks):
instance = {"root_gb": 2, "ephemeral_gb": 0, "uuid": "uuid"}
flavor = fake_flavor.fake_flavor_obj(self.context, root_gb=1,
ephemeral_gb=0)
self.vmops.migrate_disk_and_power_off(None, instance, None,
flavor, None)
self.assertFalse(migrate_up.called)
self.assertTrue(migrate_down.called)
def test_migrate_disk_and_power_off_works_up(self,
migrate_up, migrate_down, *mocks):
instance = {"root_gb": 1, "ephemeral_gb": 1, "uuid": "uuid"}
flavor = fake_flavor.fake_flavor_obj(self.context, root_gb=2,
ephemeral_gb=2)
self.vmops.migrate_disk_and_power_off(None, instance, None,
flavor, None)
self.assertFalse(migrate_down.called)
self.assertTrue(migrate_up.called)
def test_migrate_disk_and_power_off_resize_down_ephemeral_fails(self,
migrate_up, migrate_down, *mocks):
instance = {"ephemeral_gb": 2}
flavor = fake_flavor.fake_flavor_obj(self.context, ephemeral_gb=1)
self.assertRaises(exception.ResizeError,
self.vmops.migrate_disk_and_power_off,
None, instance, None, flavor, None)
@mock.patch.object(vm_utils, 'get_vdi_for_vm_safely')
@mock.patch.object(vm_utils, 'migrate_vhd')
@mock.patch.object(vmops.VMOps, '_resize_ensure_vm_is_shutdown')
@mock.patch.object(vm_utils, 'get_all_vdi_uuids_for_vm')
@mock.patch.object(vmops.VMOps, '_update_instance_progress')
@mock.patch.object(vmops.VMOps, '_apply_orig_vm_name_label')
class MigrateDiskResizingUpTestCase(VMOpsTestBase):
def _fake_snapshot_attached_here(self, session, instance, vm_ref, label,
userdevice, post_snapshot_callback):
self.assertIsInstance(instance, dict)
if userdevice == '0':
self.assertEqual("vm_ref", vm_ref)
self.assertEqual("fake-snapshot", label)
yield ["leaf", "parent", "grandp"]
else:
leaf = userdevice + "-leaf"
parent = userdevice + "-parent"
yield [leaf, parent]
@mock.patch.object(volume_utils, 'is_booted_from_volume',
return_value=False)
def test_migrate_disk_resizing_up_works_no_ephemeral(self,
mock_is_booted_from_volume,
mock_apply_orig, mock_update_progress, mock_get_all_vdi_uuids,
mock_shutdown, mock_migrate_vhd, mock_get_vdi_for_vm):
context = "ctxt"
instance = {"name": "fake", "uuid": "uuid"}
dest = "dest"
vm_ref = "vm_ref"
sr_path = "sr_path"
mock_get_all_vdi_uuids.return_value = None
mock_get_vdi_for_vm.return_value = ({}, {"uuid": "root"})
with mock.patch.object(vm_utils, '_snapshot_attached_here_impl',
self._fake_snapshot_attached_here):
self.vmops._migrate_disk_resizing_up(context, instance, dest,
vm_ref, sr_path)
mock_get_all_vdi_uuids.assert_called_once_with(self.vmops._session,
vm_ref, min_userdevice=4)
mock_apply_orig.assert_called_once_with(instance, vm_ref)
mock_shutdown.assert_called_once_with(instance, vm_ref)
m_vhd_expected = [mock.call(self.vmops._session, instance, "parent",
dest, sr_path, 1),
mock.call(self.vmops._session, instance, "grandp",
dest, sr_path, 2),
mock.call(self.vmops._session, instance, "root",
dest, sr_path, 0)]
self.assertEqual(m_vhd_expected, mock_migrate_vhd.call_args_list)
prog_expected = [
mock.call(context, instance, 1, 5),
mock.call(context, instance, 2, 5),
mock.call(context, instance, 3, 5),
mock.call(context, instance, 4, 5)
# 5/5: step to be executed by finish migration.
]
self.assertEqual(prog_expected, mock_update_progress.call_args_list)
@mock.patch.object(volume_utils, 'is_booted_from_volume',
return_value=False)
def test_migrate_disk_resizing_up_works_with_two_ephemeral(self,
mock_is_booted_from_volume,
mock_apply_orig, mock_update_progress, mock_get_all_vdi_uuids,
mock_shutdown, mock_migrate_vhd, mock_get_vdi_for_vm):
context = "ctxt"
instance = {"name": "fake", "uuid": "uuid"}
dest = "dest"
vm_ref = "vm_ref"
sr_path = "sr_path"
mock_get_all_vdi_uuids.return_value = ["vdi-eph1", "vdi-eph2"]
mock_get_vdi_for_vm.side_effect = [({}, {"uuid": "root"}),
({}, {"uuid": "4-root"}),
({}, {"uuid": "5-root"})]
with mock.patch.object(vm_utils, '_snapshot_attached_here_impl',
self._fake_snapshot_attached_here):
self.vmops._migrate_disk_resizing_up(context, instance, dest,
vm_ref, sr_path)
mock_get_all_vdi_uuids.assert_called_once_with(self.vmops._session,
vm_ref, min_userdevice=4)
mock_apply_orig.assert_called_once_with(instance, vm_ref)
mock_shutdown.assert_called_once_with(instance, vm_ref)
m_vhd_expected = [mock.call(self.vmops._session, instance,
"parent", dest, sr_path, 1),
mock.call(self.vmops._session, instance,
"grandp", dest, sr_path, 2),
mock.call(self.vmops._session, instance,
"4-parent", dest, sr_path, 1, 1),
mock.call(self.vmops._session, instance,
"5-parent", dest, sr_path, 1, 2),
mock.call(self.vmops._session, instance,
"root", dest, sr_path, 0),
mock.call(self.vmops._session, instance,
"4-root", dest, sr_path, 0, 1),
mock.call(self.vmops._session, instance,
"5-root", dest, sr_path, 0, 2)]
self.assertEqual(m_vhd_expected, mock_migrate_vhd.call_args_list)
prog_expected = [
mock.call(context, instance, 1, 5),
mock.call(context, instance, 2, 5),
mock.call(context, instance, 3, 5),
mock.call(context, instance, 4, 5)
# 5/5: step to be executed by finish migration.
]
self.assertEqual(prog_expected, mock_update_progress.call_args_list)
@mock.patch.object(volume_utils, 'is_booted_from_volume',
return_value=True)
def test_migrate_disk_resizing_up_booted_from_volume(self,
mock_is_booted_from_volume,
mock_apply_orig, mock_update_progress, mock_get_all_vdi_uuids,
mock_shutdown, mock_migrate_vhd, mock_get_vdi_for_vm):
context = "ctxt"
instance = {"name": "fake", "uuid": "uuid"}
dest = "dest"
vm_ref = "vm_ref"
sr_path = "sr_path"
mock_get_all_vdi_uuids.return_value = ["vdi-eph1", "vdi-eph2"]
mock_get_vdi_for_vm.side_effect = [({}, {"uuid": "4-root"}),
({}, {"uuid": "5-root"})]
with mock.patch.object(vm_utils, '_snapshot_attached_here_impl',
self._fake_snapshot_attached_here):
self.vmops._migrate_disk_resizing_up(context, instance, dest,
vm_ref, sr_path)
mock_get_all_vdi_uuids.assert_called_once_with(self.vmops._session,
vm_ref, min_userdevice=4)
mock_apply_orig.assert_called_once_with(instance, vm_ref)
mock_shutdown.assert_called_once_with(instance, vm_ref)
m_vhd_expected = [mock.call(self.vmops._session, instance,
"4-parent", dest, sr_path, 1, 1),
mock.call(self.vmops._session, instance,
"5-parent", dest, sr_path, 1, 2),
mock.call(self.vmops._session, instance,
"4-root", dest, sr_path, 0, 1),
mock.call(self.vmops._session, instance,
"5-root", dest, sr_path, 0, 2)]
self.assertEqual(m_vhd_expected, mock_migrate_vhd.call_args_list)
prog_expected = [
mock.call(context, instance, 1, 5),
mock.call(context, instance, 2, 5),
mock.call(context, instance, 3, 5),
mock.call(context, instance, 4, 5)
# 5/5: step to be executed by finish migration.
]
self.assertEqual(prog_expected, mock_update_progress.call_args_list)
@mock.patch.object(vmops.VMOps, '_restore_orig_vm_and_cleanup_orphan')
@mock.patch.object(volume_utils, 'is_booted_from_volume',
return_value=False)
def test_migrate_disk_resizing_up_rollback(self,
mock_is_booted_from_volume,
mock_restore,
mock_apply_orig, mock_update_progress, mock_get_all_vdi_uuids,
mock_shutdown, mock_migrate_vhd, mock_get_vdi_for_vm):
context = "ctxt"
instance = {"name": "fake", "uuid": "fake"}
dest = "dest"
vm_ref = "vm_ref"
sr_path = "sr_path"
mock_migrate_vhd.side_effect = test.TestingException
mock_restore.side_effect = test.TestingException
with mock.patch.object(vm_utils, '_snapshot_attached_here_impl',
self._fake_snapshot_attached_here):
self.assertRaises(exception.InstanceFaultRollback,
self.vmops._migrate_disk_resizing_up,
context, instance, dest, vm_ref, sr_path)
mock_apply_orig.assert_called_once_with(instance, vm_ref)
mock_restore.assert_called_once_with(instance)
mock_migrate_vhd.assert_called_once_with(self.vmops._session,
instance, "parent", dest, sr_path, 1)
class CreateVMRecordTestCase(VMOpsTestBase):
@mock.patch.object(vm_utils, 'determine_vm_mode')
@mock.patch.object(vm_utils, 'get_vm_device_id')
@mock.patch.object(vm_utils, 'create_vm')
def test_create_vm_record_with_vm_device_id(self, mock_create_vm,
mock_get_vm_device_id, mock_determine_vm_mode):
context = "context"
instance = objects.Instance(vm_mode="vm_mode", uuid="uuid123")
name_label = "dummy"
disk_image_type = "vhd"
kernel_file = "kernel"
ramdisk_file = "ram"
device_id = "0002"
image_properties = {"xenapi_device_id": device_id}
image_meta = objects.ImageMeta.from_dict(
{"properties": image_properties})
rescue = False
session = "session"
self.vmops._session = session
mock_get_vm_device_id.return_value = device_id
mock_determine_vm_mode.return_value = "vm_mode"
self.vmops._create_vm_record(context, instance, name_label,
disk_image_type, kernel_file, ramdisk_file, image_meta, rescue)
mock_get_vm_device_id.assert_called_with(session, image_meta)
mock_create_vm.assert_called_with(session, instance, name_label,
kernel_file, ramdisk_file, False, device_id)
class BootableTestCase(VMOpsTestBase):
def setUp(self):
super(BootableTestCase, self).setUp()
self.instance = {"name": "test", "uuid": "fake"}
vm_rec, self.vm_ref = self.create_vm('test')
# sanity check bootlock is initially disabled:
self.assertEqual({}, vm_rec['blocked_operations'])
def _get_blocked(self):
vm_rec = self._session.call_xenapi("VM.get_record", self.vm_ref)
return vm_rec['blocked_operations']
def test_acquire_bootlock(self):
self.vmops._acquire_bootlock(self.vm_ref)
blocked = self._get_blocked()
self.assertIn('start', blocked)
def test_release_bootlock(self):
self.vmops._acquire_bootlock(self.vm_ref)
self.vmops._release_bootlock(self.vm_ref)
blocked = self._get_blocked()
self.assertNotIn('start', blocked)
def test_set_bootable(self):
self.vmops.set_bootable(self.instance, True)
blocked = self._get_blocked()
self.assertNotIn('start', blocked)
def test_set_not_bootable(self):
self.vmops.set_bootable(self.instance, False)
blocked = self._get_blocked()
self.assertIn('start', blocked)
@mock.patch.object(vm_utils, 'update_vdi_virtual_size', autospec=True)
class ResizeVdisTestCase(VMOpsTestBase):
def test_dont_resize_root_volumes_osvol_false(self, mock_resize):
instance = fake_instance.fake_db_instance(root_gb=20)
vdis = {'root': {'osvol': False, 'ref': 'vdi_ref'}}
self.vmops._resize_up_vdis(instance, vdis)
self.assertTrue(mock_resize.called)
def test_dont_resize_root_volumes_osvol_true(self, mock_resize):
instance = fake_instance.fake_db_instance(root_gb=20)
vdis = {'root': {'osvol': True}}
self.vmops._resize_up_vdis(instance, vdis)
self.assertFalse(mock_resize.called)
def test_dont_resize_root_volumes_no_osvol(self, mock_resize):
instance = fake_instance.fake_db_instance(root_gb=20)
vdis = {'root': {}}
self.vmops._resize_up_vdis(instance, vdis)
self.assertFalse(mock_resize.called)
@mock.patch.object(vm_utils, 'get_ephemeral_disk_sizes')
def test_ensure_ephemeral_resize_with_root_volume(self, mock_sizes,
mock_resize):
mock_sizes.return_value = [2000, 1000]
instance = fake_instance.fake_db_instance(root_gb=20, ephemeral_gb=20)
ephemerals = {"4": {"ref": 4}, "5": {"ref": 5}}
vdis = {'root': {'osvol': True, 'ref': 'vdi_ref'},
'ephemerals': ephemerals}
with mock.patch.object(vm_utils, 'generate_single_ephemeral',
autospec=True) as g:
self.vmops._resize_up_vdis(instance, vdis)
self.assertEqual([mock.call(self.vmops._session, instance, 4,
2000),
mock.call(self.vmops._session, instance, 5,
1000)],
mock_resize.call_args_list)
self.assertFalse(g.called)
def test_resize_up_vdis_root(self, mock_resize):
instance = {"root_gb": 20, "ephemeral_gb": 0}
self.vmops._resize_up_vdis(instance, {"root": {"ref": "vdi_ref"}})
mock_resize.assert_called_once_with(self.vmops._session, instance,
"vdi_ref", 20)
def test_resize_up_vdis_zero_disks(self, mock_resize):
instance = {"root_gb": 0, "ephemeral_gb": 0}
self.vmops._resize_up_vdis(instance, {"root": {}})
self.assertFalse(mock_resize.called)
def test_resize_up_vdis_no_vdis_like_initial_spawn(self, mock_resize):
instance = {"root_gb": 0, "ephemeral_gb": 3000}
vdis = {}
self.vmops._resize_up_vdis(instance, vdis)
self.assertFalse(mock_resize.called)
@mock.patch.object(vm_utils, 'get_ephemeral_disk_sizes')
def test_resize_up_vdis_ephemeral(self, mock_sizes, mock_resize):
mock_sizes.return_value = [2000, 1000]
instance = {"root_gb": 0, "ephemeral_gb": 3000}
ephemerals = {"4": {"ref": 4}, "5": {"ref": 5}}
vdis = {"ephemerals": ephemerals}
self.vmops._resize_up_vdis(instance, vdis)
mock_sizes.assert_called_once_with(3000)
expected = [mock.call(self.vmops._session, instance, 4, 2000),
mock.call(self.vmops._session, instance, 5, 1000)]
self.assertEqual(expected, mock_resize.call_args_list)
@mock.patch.object(vm_utils, 'generate_single_ephemeral')
@mock.patch.object(vm_utils, 'get_ephemeral_disk_sizes')
def test_resize_up_vdis_ephemeral_with_generate(self, mock_sizes,
mock_generate,
mock_resize):
mock_sizes.return_value = [2000, 1000]
instance = {"root_gb": 0, "ephemeral_gb": 3000, "uuid": "a"}
ephemerals = {"4": {"ref": 4}}
vdis = {"ephemerals": ephemerals}
self.vmops._resize_up_vdis(instance, vdis)
mock_sizes.assert_called_once_with(3000)
mock_resize.assert_called_once_with(self.vmops._session, instance,
4, 2000)
mock_generate.assert_called_once_with(self.vmops._session, instance,
None, 5, 1000)
@mock.patch.object(vm_utils, 'remove_old_snapshots')
class CleanupFailedSnapshotTestCase(VMOpsTestBase):
def test_post_interrupted_snapshot_cleanup(self, mock_remove):
self.vmops._get_vm_opaque_ref = mock.Mock()
self.vmops._get_vm_opaque_ref.return_value = "vm_ref"
self.vmops.post_interrupted_snapshot_cleanup("context", "instance")
mock_remove.assert_called_once_with(self.vmops._session,
"instance", "vm_ref")
class XenstoreCallsTestCase(VMOpsTestBase):
"""Test cases for Read/Write/Delete/Update xenstore calls
from vmops.
"""
@mock.patch.object(vmops.VMOps, '_make_plugin_call')
def test_read_from_xenstore(self, fake_xapi_call):
fake_xapi_call.return_value = "fake_xapi_return"
fake_instance = {"name": "fake_instance"}
path = "attr/PVAddons/MajorVersion"
self.assertEqual("fake_xapi_return",
self.vmops._read_from_xenstore(fake_instance, path,
vm_ref="vm_ref"))
@mock.patch.object(vmops.VMOps, '_make_plugin_call')
def test_read_from_xenstore_ignore_missing_path(self, fake_xapi_call):
fake_instance = {"name": "fake_instance"}
path = "attr/PVAddons/MajorVersion"
self.vmops._read_from_xenstore(fake_instance, path, vm_ref="vm_ref")
fake_xapi_call.assert_called_once_with('xenstore.py', 'read_record',
fake_instance, vm_ref="vm_ref",
path=path,
ignore_missing_path='True')
@mock.patch.object(vmops.VMOps, '_make_plugin_call')
def test_read_from_xenstore_missing_path(self, fake_xapi_call):
fake_instance = {"name": "fake_instance"}
path = "attr/PVAddons/MajorVersion"
self.vmops._read_from_xenstore(fake_instance, path, vm_ref="vm_ref",
ignore_missing_path=False)
fake_xapi_call.assert_called_once_with('xenstore.py', 'read_record',
fake_instance, vm_ref="vm_ref",
path=path,
ignore_missing_path='False')
class LiveMigrateFakeVersionTestCase(VMOpsTestBase):
@mock.patch.object(vmops.VMOps, '_pv_device_reported')
@mock.patch.object(vmops.VMOps, '_pv_driver_version_reported')
@mock.patch.object(vmops.VMOps, '_write_fake_pv_version')
def test_ensure_pv_driver_info_for_live_migration(
self,
mock_write_fake_pv_version,
mock_pv_driver_version_reported,
mock_pv_device_reported):
mock_pv_device_reported.return_value = True
mock_pv_driver_version_reported.return_value = False
fake_instance = {"name": "fake_instance"}
self.vmops._ensure_pv_driver_info_for_live_migration(fake_instance,
"vm_rec")
mock_write_fake_pv_version.assert_called_once_with(fake_instance,
"vm_rec")
@mock.patch.object(vmops.VMOps, '_read_from_xenstore')
def test_pv_driver_version_reported_None(self, fake_read_from_xenstore):
fake_read_from_xenstore.return_value = '"None"'
fake_instance = {"name": "fake_instance"}
self.assertFalse(self.vmops._pv_driver_version_reported(fake_instance,
"vm_ref"))
@mock.patch.object(vmops.VMOps, '_read_from_xenstore')
def test_pv_driver_version_reported(self, fake_read_from_xenstore):
fake_read_from_xenstore.return_value = '6.2.0'
fake_instance = {"name": "fake_instance"}
self.assertTrue(self.vmops._pv_driver_version_reported(fake_instance,
"vm_ref"))
@mock.patch.object(vmops.VMOps, '_read_from_xenstore')
def test_pv_device_reported(self, fake_read_from_xenstore):
with mock.patch.object(self._session.VM, 'get_record') as fake_vm_rec:
fake_vm_rec.return_value = {'VIFs': 'fake-vif-object'}
with mock.patch.object(self._session, 'call_xenapi') as fake_call:
fake_call.return_value = {'device': '0'}
fake_read_from_xenstore.return_value = '4'
fake_instance = {"name": "fake_instance"}
self.assertTrue(self.vmops._pv_device_reported(fake_instance,
"vm_ref"))
@mock.patch.object(vmops.VMOps, '_read_from_xenstore')
def test_pv_device_not_reported(self, fake_read_from_xenstore):
with mock.patch.object(self._session.VM, 'get_record') as fake_vm_rec:
fake_vm_rec.return_value = {'VIFs': 'fake-vif-object'}
with mock.patch.object(self._session, 'call_xenapi') as fake_call:
fake_call.return_value = {'device': '0'}
fake_read_from_xenstore.return_value = '0'
fake_instance = {"name": "fake_instance"}
self.assertFalse(self.vmops._pv_device_reported(fake_instance,
"vm_ref"))
@mock.patch.object(vmops.VMOps, '_read_from_xenstore')
def test_pv_device_None_reported(self, fake_read_from_xenstore):
with mock.patch.object(self._session.VM, 'get_record') as fake_vm_rec:
fake_vm_rec.return_value = {'VIFs': 'fake-vif-object'}
with mock.patch.object(self._session, 'call_xenapi') as fake_call:
fake_call.return_value = {'device': '0'}
fake_read_from_xenstore.return_value = '"None"'
fake_instance = {"name": "fake_instance"}
self.assertFalse(self.vmops._pv_device_reported(fake_instance,
"vm_ref"))
@mock.patch.object(vmops.VMOps, '_write_to_xenstore')
def test_write_fake_pv_version(self, fake_write_to_xenstore):
fake_write_to_xenstore.return_value = 'fake_return'
fake_instance = {"name": "fake_instance"}
with mock.patch.object(self._session, 'product_version') as version:
version.return_value = ('6', '2', '0')
self.assertIsNone(self.vmops._write_fake_pv_version(fake_instance,
"vm_ref"))
class LiveMigrateHelperTestCase(VMOpsTestBase):
def test_connect_block_device_volumes_none(self):
self.assertEqual({}, self.vmops.connect_block_device_volumes(None))
@mock.patch.object(volumeops.VolumeOps, "connect_volume")
def test_connect_block_device_volumes_calls_connect(self, mock_connect):
with mock.patch.object(self.vmops._session,
"call_xenapi") as mock_session:
mock_connect.return_value = ("sr_uuid", None)
mock_session.return_value = "sr_ref"
bdm = {"connection_info": "c_info"}
bdi = {"block_device_mapping": [bdm]}
result = self.vmops.connect_block_device_volumes(bdi)
self.assertEqual({'sr_uuid': 'sr_ref'}, result)
mock_connect.assert_called_once_with("c_info")
mock_session.assert_called_once_with("SR.get_by_uuid",
"sr_uuid")
class RollbackLiveMigrateDestinationTestCase(VMOpsTestBase):
@mock.patch.object(volume_utils, 'find_sr_by_uuid', return_value='sr_ref')
@mock.patch.object(volume_utils, 'forget_sr')
def test_rollback_dest_calls_sr_forget(self, forget_sr, sr_ref):
block_device_info = {'block_device_mapping': [{'connection_info':
{'data': {'volume_id': 'fake-uuid',
'target_iqn': 'fake-iqn',
'target_portal': 'fake-portal'}}}]}
self.vmops.rollback_live_migration_at_destination('instance',
block_device_info)
forget_sr.assert_called_once_with(self.vmops._session, 'sr_ref')
@mock.patch.object(volume_utils, 'forget_sr')
@mock.patch.object(volume_utils, 'find_sr_by_uuid',
side_effect=test.TestingException)
def test_rollback_dest_handles_exception(self, find_sr_ref, forget_sr):
block_device_info = {'block_device_mapping': [{'connection_info':
{'data': {'volume_id': 'fake-uuid',
'target_iqn': 'fake-iqn',
'target_portal': 'fake-portal'}}}]}
self.vmops.rollback_live_migration_at_destination('instance',
block_device_info)
self.assertFalse(forget_sr.called)
@mock.patch.object(vmops.VMOps, '_resize_ensure_vm_is_shutdown')
@mock.patch.object(vmops.VMOps, '_apply_orig_vm_name_label')
@mock.patch.object(vmops.VMOps, '_update_instance_progress')
@mock.patch.object(vm_utils, 'get_vdi_for_vm_safely')
@mock.patch.object(vm_utils, 'resize_disk')
@mock.patch.object(vm_utils, 'migrate_vhd')
@mock.patch.object(vm_utils, 'destroy_vdi')
class MigrateDiskResizingDownTestCase(VMOpsTestBase):
def test_migrate_disk_resizing_down_works_no_ephemeral(
self,
mock_destroy_vdi,
mock_migrate_vhd,
mock_resize_disk,
mock_get_vdi_for_vm_safely,
mock_update_instance_progress,
mock_apply_orig_vm_name_label,
mock_resize_ensure_vm_is_shutdown):
context = "ctx"
instance = {"name": "fake", "uuid": "uuid"}
dest = "dest"
vm_ref = "vm_ref"
sr_path = "sr_path"
instance_type = dict(root_gb=1)
old_vdi_ref = "old_ref"
new_vdi_ref = "new_ref"
new_vdi_uuid = "new_uuid"
mock_get_vdi_for_vm_safely.return_value = (old_vdi_ref, None)
mock_resize_disk.return_value = (new_vdi_ref, new_vdi_uuid)
self.vmops._migrate_disk_resizing_down(context, instance, dest,
instance_type, vm_ref, sr_path)
mock_get_vdi_for_vm_safely.assert_called_once_with(
self.vmops._session,
vm_ref)
mock_resize_ensure_vm_is_shutdown.assert_called_once_with(
instance, vm_ref)
mock_apply_orig_vm_name_label.assert_called_once_with(
instance, vm_ref)
mock_resize_disk.assert_called_once_with(
self.vmops._session,
instance,
old_vdi_ref,
instance_type)
mock_migrate_vhd.assert_called_once_with(
self.vmops._session,
instance,
new_vdi_uuid,
dest,
sr_path, 0)
mock_destroy_vdi.assert_called_once_with(
self.vmops._session,
new_vdi_ref)
prog_expected = [
mock.call(context, instance, 1, 5),
mock.call(context, instance, 2, 5),
mock.call(context, instance, 3, 5),
mock.call(context, instance, 4, 5)
# 5/5: step to be executed by finish migration.
]
self.assertEqual(prog_expected,
mock_update_instance_progress.call_args_list)
class GetVdisForInstanceTestCase(VMOpsTestBase):
"""Tests get_vdis_for_instance utility method."""
def setUp(self):
super(GetVdisForInstanceTestCase, self).setUp()
self.context = context.get_admin_context()
self.context.auth_token = 'auth_token'
self.session = mock.Mock()
self.vmops._session = self.session
self.instance = fake_instance.fake_instance_obj(self.context)
self.name_label = 'name'
self.image = 'fake_image_id'
@mock.patch.object(volumeops.VolumeOps, "connect_volume",
return_value=("sr", "vdi_uuid"))
def test_vdis_for_instance_bdi_password_scrubbed(self, get_uuid_mock):
# setup fake data
data = {'name_label': self.name_label,
'sr_uuid': 'fake',
'auth_password': 'scrubme'}
bdm = [{'mount_device': '/dev/vda',
'connection_info': {'data': data}}]
bdi = {'root_device_name': 'vda',
'block_device_mapping': bdm}
# Tests that the parameters to the to_xml method are sanitized for
# passwords when logged.
def fake_debug(*args, **kwargs):
if 'auth_password' in args[0]:
self.assertNotIn('scrubme', args[0])
fake_debug.matched = True
fake_debug.matched = False
with mock.patch.object(vmops.LOG, 'debug',
side_effect=fake_debug) as debug_mock:
vdis = self.vmops._get_vdis_for_instance(self.context,
self.instance, self.name_label, self.image,
image_type=4, block_device_info=bdi)
self.assertEqual(1, len(vdis))
get_uuid_mock.assert_called_once_with({"data": data})
# we don't care what the log message is, we just want to make sure
# our stub method is called which asserts the password is scrubbed
self.assertTrue(debug_mock.called)
self.assertTrue(fake_debug.matched)
| apache-2.0 | -187,607,245,181,147,360 | 44.545646 | 79 | 0.586922 | false |
cloudant/python-cloudant | src/cloudant/view.py | 1 | 14295 | #!/usr/bin/env python
# Copyright (C) 2015, 2019 IBM. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
API module for interacting with a view in a design document.
"""
import contextlib
from ._2to3 import STRTYPE
from ._common_util import codify, get_docs, response_to_json_dict
from .result import Result
from .error import CloudantArgumentError, CloudantViewException
class View(dict):
"""
Encapsulates a view as a dictionary based object, exposing the map and
reduce functions as attributes and supporting query/data access through
the view. A View object is instantiated with a reference to a
DesignDocument and is typically used as part of the
:class:`~cloudant.design_document.DesignDocument` view management API.
A View object provides a key accessible, sliceable, and iterable default
result collection that can be used to query the view data through the
``result`` attribute.
For example:
.. code-block:: python
# Access result collection through individual keys
view.result[100]
view.result['foo']
# Access result collection through index slicing:
view.result[100: 200]
view.result[: 200]
view.result[100: ]
view.result[: ]
# Access result collection through key slicing:
view.result['bar': 'foo']
view.result['bar': ]
view.result[: 'foo']
# Iterate over the result collection:
for doc in view.result:
print doc
The default result collection provides basic functionality,
which can be customized with other arguments using the
:func:`~cloudant.view.View.custom_result` context manager.
For example:
.. code-block:: python
# Including documents as part of a custom result
with view.custom_result(include_docs=True) as rslt:
rslt[100: 200] # slice by result
rslt[['2013', '10']: ['2013', '11']] # slice by startkey/endkey
# Iteration
for doc in rslt:
print doc
# Iteration over a view within startkey/endkey range:
with view.custom_result(startkey='2013', endkey='2014') as rslt:
for doc in rslt:
print doc
Note: A view must exist as part of a design document remotely in order to
access result content as depicted in the above examples.
:param DesignDocument ddoc: DesignDocument instance used in part to
identify the view.
:param str view_name: Name used in part to identify the view.
:param str map_func: Optional Javascript map function.
:param str reduce_func: Optional Javascript reduce function.
:param str partition_key: Optional. Specify a view partition key. Defaults
to ``None`` resulting in global queries.
"""
def __init__(
self,
ddoc,
view_name,
map_func=None,
reduce_func=None,
partition_key=None,
**kwargs
):
super(View, self).__init__()
self.design_doc = ddoc
self._r_session = self.design_doc.r_session
self.view_name = view_name
if map_func is not None:
self['map'] = codify(map_func)
if reduce_func is not None:
self['reduce'] = codify(reduce_func)
self._partition_key = partition_key
self.update(kwargs)
self.result = Result(self)
@property
def map(self):
"""
Provides an map property accessor and setter.
For example:
.. code-block:: python
# Set the View map property
view.map = 'function (doc) {\\n emit(doc._id, 1);\\n}'
print view.map
:param str js_func: Javascript function.
:returns: Codified map function
"""
return self.get('map')
@map.setter
def map(self, js_func):
"""
Provides a map property setter.
"""
self['map'] = codify(js_func)
@property
def reduce(self):
"""
Provides an reduce property accessor and setter.
For example:
.. code-block:: python
# Set the View reduce property
view.reduce = '_count'
# Get and print the View reduce property
print view.reduce
:param str js_func: Javascript function.
:returns: Codified reduce function
"""
return self.get('reduce')
@reduce.setter
def reduce(self, js_func):
"""
Provides a reduce property setter.
"""
self['reduce'] = codify(js_func)
@property
def url(self):
"""
Constructs and returns the View URL.
:returns: View URL
"""
if self._partition_key:
base_url = self.design_doc.document_partition_url(
self._partition_key)
else:
base_url = self.design_doc.document_url
return '/'.join((
base_url,
'_view',
self.view_name
))
def __call__(self, **kwargs):
"""
Makes the View object callable and retrieves the raw JSON content
from the remote database based on the View definition on the server,
using the kwargs provided as query parameters.
For example:
.. code-block:: python
# Construct a View
view = View(ddoc, 'view001')
# Assuming that 'view001' exists as part of the
# design document ddoc in the remote database...
# Use view as a callable
for row in view(include_docs=True, limit=100, skip=100)['rows']:
# Process view data (in JSON format).
Note: Rather than using the View callable directly, if you wish to
retrieve view results in raw JSON format use ``raw_result=True`` with
the provided database API of
:func:`~cloudant.database.CouchDatabase.get_view_result` instead.
:param bool descending: Return documents in descending key order.
:param endkey: Stop returning records at this specified key.
:param str endkey_docid: Stop returning records when the specified
document id is reached.
:param bool group: Using the reduce function, group the results to a
group or single row.
:param group_level: Only applicable if the view uses complex keys: keys
that are JSON arrays. Groups reduce results for the specified number
of array fields.
:param bool include_docs: Include the full content of the documents.
:param bool inclusive_end: Include rows with the specified endkey.
:param str key: Return only documents that match the specified key.
:param list keys: Return only documents that match the specified keys.
:param int limit: Limit the number of returned documents to the
specified count.
:param bool reduce: True to use the reduce function, false otherwise.
:param int skip: Skip this number of rows from the start.
:param str stale: Allow the results from a stale view to be used. This
makes the request return immediately, even if the view has not been
completely built yet. If this parameter is not given, a response is
returned only after the view has been built.
:param startkey: Return records starting with the specified key.
:param str startkey_docid: Return records starting with the specified
document ID.
:returns: View result data in JSON format
"""
resp = get_docs(self._r_session,
self.url,
self.design_doc.encoder,
**kwargs)
return response_to_json_dict(resp)
@contextlib.contextmanager
def custom_result(self, **options):
"""
Customizes the :class:`~cloudant.result.Result` behavior and provides
a convenient context manager for the Result. Result customizations
can be made by providing extra options to the result call using this
context manager. Depending on how you are accessing, slicing or
iterating through your result collection certain query parameters are
not permitted. See :class:`~cloudant.result.Result` for additional
details.
For example:
.. code-block:: python
with view.custom_result(include_docs=True, reduce=False) as rslt:
data = rslt[100: 200]
:param bool descending: Return documents in descending key order.
:param endkey: Stop returning records at this specified key.
Not valid when used with :class:`~cloudant.result.Result` key
access and key slicing.
:param str endkey_docid: Stop returning records when the specified
document id is reached.
:param bool group: Using the reduce function, group the results to a
group or single row.
:param group_level: Only applicable if the view uses complex keys: keys
that are JSON arrays. Groups reduce results for the specified number
of array fields.
:param bool include_docs: Include the full content of the documents.
:param bool inclusive_end: Include rows with the specified endkey.
:param key: Return only documents that match the specified key.
Not valid when used with :class:`~cloudant.result.Result` key
access and key slicing.
:param list keys: Return only documents that match the specified keys.
Not valid when used with :class:`~cloudant.result.Result` key
access and key slicing.
:param int limit: Limit the number of returned documents to the
specified count. Not valid when used with
:class:`~cloudant.result.Result` iteration.
:param int page_size: Sets the page size for result iteration.
:param bool reduce: True to use the reduce function, false otherwise.
:param int skip: Skip this number of rows from the start.
Not valid when used with :class:`~cloudant.result.Result` iteration.
:param str stale: Allow the results from a stale view to be used. This
makes the request return immediately, even if the view has not been
completely built yet. If this parameter is not given, a response is
returned only after the view has been built.
:param startkey: Return records starting with the specified key.
Not valid when used with :class:`~cloudant.result.Result` key
access and key slicing.
:param str startkey_docid: Return records starting with the specified
document ID.
:returns: View result data wrapped in a Result instance
"""
rslt = Result(self, **options)
yield rslt
del rslt
class QueryIndexView(View):
"""
A view that defines a JSON query index in a design document.
If you wish to manage a view that represents a JSON query index it
is strongly recommended that
:func:`~cloudant.database.CloudantDatabase.create_query_index`
and :func:`~cloudant.database.CloudantDatabase.delete_query_index` are used.
"""
def __init__(self, ddoc, view_name, map_fields, reduce_func, **kwargs):
if not isinstance(map_fields, dict):
raise CloudantArgumentError(132)
if not isinstance(reduce_func, STRTYPE):
raise CloudantArgumentError(133)
super(QueryIndexView, self).__init__(
ddoc,
view_name,
map_fields,
reduce_func,
**kwargs
)
self['map'] = map_fields
self['reduce'] = reduce_func
self.result = None
@property
def map(self):
"""
Provides a map property accessor and setter.
:param dict map_func: A dictionary of fields defining the index.
:returns: Fields defining the index
"""
return self.get('map')
@map.setter
def map(self, map_func):
"""
Provides a map property setter.
"""
if isinstance(map_func, dict):
self['map'] = map_func
else:
raise CloudantArgumentError(132)
@property
def reduce(self):
"""
Provides a reduce property accessor and setter.
:param str reduce_func: A string representation of the reduce function
used in part to define the index.
:returns: Reduce function as a string
"""
return self.get('reduce')
@reduce.setter
def reduce(self, reduce_func):
"""
Provides a reduce property setter.
"""
if isinstance(reduce_func, STRTYPE):
self['reduce'] = reduce_func
else:
raise CloudantArgumentError(133)
def __call__(self, **kwargs):
"""
QueryIndexView objects are not callable. If you wish to execute a query
using a query index, use
:func:`~cloudant.database.CloudantDatabase.get_query_result` instead.
"""
raise CloudantViewException(101)
def custom_result(self, **options):
"""
This method overrides the View base class
:func:`~cloudant.view.View.custom_result` method with the sole purpose of
disabling it. Since QueryIndexView objects are not callable, there is
no reason to wrap their output in a Result. If you wish to execute a
query using a query index, use
:func:`~cloudant.database.CloudantDatabase.get_query_result` instead.
"""
raise CloudantViewException(102)
| apache-2.0 | -4,179,084,407,348,044,000 | 35.937984 | 81 | 0.624344 | false |
nightjean/Deep-Learning | tensorflow/contrib/rnn/python/tools/checkpoint_convert.py | 5 | 10153 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Convert checkpoints using RNNCells to new name convention.
Usage:
python checkpoint_convert.py [--write_v1_checkpoint] \
'/path/to/checkpoint' '/path/to/new_checkpoint'
For example, if there is a V2 checkpoint to be converted and the files include:
/tmp/my_checkpoint/model.ckpt.data-00000-of-00001
/tmp/my_checkpoint/model.ckpt.index
/tmp/my_checkpoint/model.ckpt.meta
use the following command:
mkdir /tmp/my_converted_checkpoint &&
python checkpoint_convert.py \
/tmp/my_checkpoint/model.ckpt /tmp/my_converted_checkpoint/model.ckpt
This will generate three converted checkpoint files corresponding to the three
old ones in the new directory:
/tmp/my_converted_checkpoint/model.ckpt.data-00000-of-00001
/tmp/my_converted_checkpoint/model.ckpt.index
/tmp/my_converted_checkpoint/model.ckpt.meta
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import collections
import re
import sys
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import app
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import saver as saver_lib
_RNN_NAME_REPLACEMENTS = collections.OrderedDict([
############################################################################
# contrib/rnn/python/ops/core_rnn_cell_impl.py
# BasicRNNCell
('basic_rnn_cell/weights', 'basic_rnn_cell/kernel'),
('basic_rnn_cell/biases', 'basic_rnn_cell/bias'),
# GRUCell
('gru_cell/weights', 'gru_cell/kernel'),
('gru_cell/biases', 'gru_cell/bias'),
('gru_cell/gates/weights', 'gru_cell/gates/kernel'),
('gru_cell/gates/biases', 'gru_cell/gates/bias'),
('gru_cell/candidate/weights', 'gru_cell/candidate/kernel'),
('gru_cell/candidate/biases', 'gru_cell/candidate/bias'),
# BasicLSTMCell
('basic_lstm_cell/weights', 'basic_lstm_cell/kernel'),
('basic_lstm_cell/biases', 'basic_lstm_cell/bias'),
# LSTMCell
('lstm_cell/weights', 'lstm_cell/kernel'),
('lstm_cell/biases', 'lstm_cell/bias'),
('lstm_cell/projection/weights', 'lstm_cell/projection/kernel'),
('lstm_cell/projection/biases', 'lstm_cell/projection/bias'),
# OutputProjectionWrapper
('output_projection_wrapper/weights', 'output_projection_wrapper/kernel'),
('output_projection_wrapper/biases', 'output_projection_wrapper/bias'),
# InputProjectionWrapper
('input_projection_wrapper/weights', 'input_projection_wrapper/kernel'),
('input_projection_wrapper/biases', 'input_projection_wrapper/bias'),
############################################################################
# contrib/rnn/python/ops/lstm_ops.py
# LSTMBlockFusedCell ??
('lstm_block_wrapper/weights', 'lstm_block_wrapper/kernel'),
('lstm_block_wrapper/biases', 'lstm_block_wrapper/bias'),
############################################################################
# contrib/rnn/python/ops/rnn_cell.py
# LayerNormBasicLSTMCell
('layer_norm_basic_lstm_cell/weights', 'layer_norm_basic_lstm_cell/kernel'),
('layer_norm_basic_lstm_cell/biases', 'layer_norm_basic_lstm_cell/bias'),
# UGRNNCell, not found in g3, but still need it?
('ugrnn_cell/weights', 'ugrnn_cell/kernel'),
('ugrnn_cell/biases', 'ugrnn_cell/bias'),
# NASCell
('nas_rnn/weights', 'nas_rnn/kernel'),
('nas_rnn/recurrent_weights', 'nas_rnn/recurrent_kernel'),
# IntersectionRNNCell
('intersection_rnn_cell/weights', 'intersection_rnn_cell/kernel'),
('intersection_rnn_cell/biases', 'intersection_rnn_cell/bias'),
('intersection_rnn_cell/in_projection/weights',
'intersection_rnn_cell/in_projection/kernel'),
('intersection_rnn_cell/in_projection/biases',
'intersection_rnn_cell/in_projection/bias'),
# PhasedLSTMCell
('phased_lstm_cell/mask_gates/weights',
'phased_lstm_cell/mask_gates/kernel'),
('phased_lstm_cell/mask_gates/biases', 'phased_lstm_cell/mask_gates/bias'),
('phased_lstm_cell/new_input/weights', 'phased_lstm_cell/new_input/kernel'),
('phased_lstm_cell/new_input/biases', 'phased_lstm_cell/new_input/bias'),
('phased_lstm_cell/output_gate/weights',
'phased_lstm_cell/output_gate/kernel'),
('phased_lstm_cell/output_gate/biases',
'phased_lstm_cell/output_gate/bias'),
# AttentionCellWrapper
('attention_cell_wrapper/weights', 'attention_cell_wrapper/kernel'),
('attention_cell_wrapper/biases', 'attention_cell_wrapper/bias'),
('attention_cell_wrapper/attn_output_projection/weights',
'attention_cell_wrapper/attn_output_projection/kernel'),
('attention_cell_wrapper/attn_output_projection/biases',
'attention_cell_wrapper/attn_output_projection/bias'),
('attention_cell_wrapper/attention/weights',
'attention_cell_wrapper/attention/kernel'),
('attention_cell_wrapper/attention/biases',
'attention_cell_wrapper/attention/bias'),
])
_RNN_SHARDED_NAME_REPLACEMENTS = collections.OrderedDict([
('LSTMCell/W_', 'lstm_cell/weights/part_'),
('BasicLSTMCell/Linear/Matrix_', 'basic_lstm_cell/weights/part_'),
('GRUCell/W_', 'gru_cell/weights/part_'),
('MultiRNNCell/Cell', 'multi_rnn_cell/cell_'),
])
def _rnn_name_replacement(var_name):
for pattern in _RNN_NAME_REPLACEMENTS:
if pattern in var_name:
old_var_name = var_name
var_name = var_name.replace(pattern, _RNN_NAME_REPLACEMENTS[pattern])
logging.info('Converted: %s --> %s' % (old_var_name, var_name))
break
return var_name
def _rnn_name_replacement_sharded(var_name):
for pattern in _RNN_SHARDED_NAME_REPLACEMENTS:
if pattern in var_name:
old_var_name = var_name
var_name = var_name.replace(pattern,
_RNN_SHARDED_NAME_REPLACEMENTS[pattern])
logging.info('Converted: %s --> %s' % (old_var_name, var_name))
return var_name
def _split_sharded_vars(name_shape_map):
"""Split shareded variables.
Args:
name_shape_map: A dict from variable name to variable shape.
Returns:
not_sharded: Names of the non-sharded variables.
sharded: Names of the sharded varibales.
"""
sharded = []
not_sharded = []
for name in name_shape_map:
if re.match(name, '_[0-9]+$'):
if re.sub('_[0-9]+$', '_1', name) in name_shape_map:
sharded.append(name)
else:
not_sharded.append(name)
else:
not_sharded.append(name)
return not_sharded, sharded
def convert_names(checkpoint_from_path,
checkpoint_to_path,
write_v1_checkpoint=False):
"""Migrates the names of variables within a checkpoint.
Args:
checkpoint_from_path: Path to source checkpoint to be read in.
checkpoint_to_path: Path to checkpoint to be written out.
write_v1_checkpoint: Whether the output checkpoint will be in V1 format.
Returns:
A dictionary that maps the new variable names to the Variable objects.
A dictionary that maps the old variable names to the new variable names.
"""
with ops.Graph().as_default():
logging.info('Reading checkpoint_from_path %s' % checkpoint_from_path)
reader = pywrap_tensorflow.NewCheckpointReader(checkpoint_from_path)
name_shape_map = reader.get_variable_to_shape_map()
not_sharded, sharded = _split_sharded_vars(name_shape_map)
new_variable_map = {}
conversion_map = {}
for var_name in not_sharded:
new_var_name = _rnn_name_replacement(var_name)
tensor = reader.get_tensor(var_name)
var = variables.Variable(tensor, name=var_name)
new_variable_map[new_var_name] = var
if new_var_name != var_name:
conversion_map[var_name] = new_var_name
for var_name in sharded:
new_var_name = _rnn_name_replacement_sharded(var_name)
var = variables.Variable(tensor, name=var_name)
new_variable_map[new_var_name] = var
if new_var_name != var_name:
conversion_map[var_name] = new_var_name
write_version = (saver_pb2.SaverDef.V1
if write_v1_checkpoint else saver_pb2.SaverDef.V2)
saver = saver_lib.Saver(new_variable_map, write_version=write_version)
with session.Session() as sess:
sess.run(variables.global_variables_initializer())
logging.info('Writing checkpoint_to_path %s' % checkpoint_to_path)
saver.save(sess, checkpoint_to_path)
logging.info('Summary:')
logging.info(' Converted %d variable name(s).' % len(new_variable_map))
return new_variable_map, conversion_map
def main(_):
convert_names(
FLAGS.checkpoint_from_path,
FLAGS.checkpoint_to_path,
write_v1_checkpoint=FLAGS.write_v1_checkpoint)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.register('type', 'bool', lambda v: v.lower() == 'true')
parser.add_argument('checkpoint_from_path', type=str,
help='Path to source checkpoint to be read in.')
parser.add_argument('checkpoint_to_path', type=str,
help='Path to checkpoint to be written out.')
parser.add_argument('--write_v1_checkpoint', action='store_true',
help='Write v1 checkpoint')
FLAGS, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 | -504,070,315,371,197,400 | 40.105263 | 80 | 0.671723 | false |
spodkowinski/cassandra-dtest | tools/data.py | 2 | 7104 | import time
from cassandra import ConsistencyLevel
from cassandra.concurrent import execute_concurrent_with_args
from cassandra.query import SimpleStatement
from nose.tools import assert_equal, assert_true
import assertions
from dtest import debug, create_cf, DtestTimeoutError
from tools.funcutils import get_rate_limited_function
def create_c1c2_table(tester, session, read_repair=None):
create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'}, read_repair=read_repair)
def insert_c1c2(session, keys=None, n=None, consistency=ConsistencyLevel.QUORUM):
if (keys is None and n is None) or (keys is not None and n is not None):
raise ValueError("Expected exactly one of 'keys' or 'n' arguments to not be None; "
"got keys={keys}, n={n}".format(keys=keys, n=n))
if n:
keys = list(range(n))
statement = session.prepare("INSERT INTO cf (key, c1, c2) VALUES (?, 'value1', 'value2')")
statement.consistency_level = consistency
execute_concurrent_with_args(session, statement, [['k{}'.format(k)] for k in keys])
def query_c1c2(session, key, consistency=ConsistencyLevel.QUORUM, tolerate_missing=False, must_be_missing=False):
query = SimpleStatement('SELECT c1, c2 FROM cf WHERE key=\'k%d\'' % key, consistency_level=consistency)
rows = list(session.execute(query))
if not tolerate_missing:
assertions.assert_length_equal(rows, 1)
res = rows[0]
assert_true(len(res) == 2 and res[0] == 'value1' and res[1] == 'value2', res)
if must_be_missing:
assertions.assert_length_equal(rows, 0)
def insert_columns(tester, session, key, columns_count, consistency=ConsistencyLevel.QUORUM, offset=0):
upds = ["UPDATE cf SET v=\'value%d\' WHERE key=\'k%s\' AND c=\'c%06d\'" % (i, key, i) for i in xrange(offset * columns_count, columns_count * (offset + 1))]
query = 'BEGIN BATCH %s; APPLY BATCH' % '; '.join(upds)
simple_query = SimpleStatement(query, consistency_level=consistency)
session.execute(simple_query)
def query_columns(tester, session, key, columns_count, consistency=ConsistencyLevel.QUORUM, offset=0):
query = SimpleStatement('SELECT c, v FROM cf WHERE key=\'k%s\' AND c >= \'c%06d\' AND c <= \'c%06d\'' % (key, offset, columns_count + offset - 1), consistency_level=consistency)
res = list(session.execute(query))
assertions.assert_length_equal(res, columns_count)
for i in xrange(0, columns_count):
assert_equal(res[i][1], 'value{}'.format(i + offset))
# Simple puts and get (on one row), testing both reads by names and by slice,
# with overwrites and flushes between inserts to make sure we hit multiple
# sstables on reads
def putget(cluster, session, cl=ConsistencyLevel.QUORUM):
_put_with_overwrite(cluster, session, 1, cl)
# reads by name
# We do not support proper IN queries yet
# if cluster.version() >= "1.2":
# session.execute('SELECT * FROM cf USING CONSISTENCY %s WHERE key=\'k0\' AND c IN (%s)' % (cl, ','.join(ks)))
# else:
# session.execute('SELECT %s FROM cf USING CONSISTENCY %s WHERE key=\'k0\'' % (','.join(ks), cl))
# _validate_row(cluster, session)
# slice reads
query = SimpleStatement('SELECT * FROM cf WHERE key=\'k0\'', consistency_level=cl)
rows = list(session.execute(query))
_validate_row(cluster, rows)
def _put_with_overwrite(cluster, session, nb_keys, cl=ConsistencyLevel.QUORUM):
for k in xrange(0, nb_keys):
kvs = ["UPDATE cf SET v=\'value%d\' WHERE key=\'k%s\' AND c=\'c%02d\'" % (i, k, i) for i in xrange(0, 100)]
query = SimpleStatement('BEGIN BATCH %s APPLY BATCH' % '; '.join(kvs), consistency_level=cl)
session.execute(query)
time.sleep(.01)
cluster.flush()
for k in xrange(0, nb_keys):
kvs = ["UPDATE cf SET v=\'value%d\' WHERE key=\'k%s\' AND c=\'c%02d\'" % (i * 4, k, i * 2) for i in xrange(0, 50)]
query = SimpleStatement('BEGIN BATCH %s APPLY BATCH' % '; '.join(kvs), consistency_level=cl)
session.execute(query)
time.sleep(.01)
cluster.flush()
for k in xrange(0, nb_keys):
kvs = ["UPDATE cf SET v=\'value%d\' WHERE key=\'k%s\' AND c=\'c%02d\'" % (i * 20, k, i * 5) for i in xrange(0, 20)]
query = SimpleStatement('BEGIN BATCH %s APPLY BATCH' % '; '.join(kvs), consistency_level=cl)
session.execute(query)
time.sleep(.01)
cluster.flush()
def _validate_row(cluster, res):
assertions.assert_length_equal(res, 100)
for i in xrange(0, 100):
if i % 5 == 0:
assert_equal(res[i][2], 'value{}'.format(i * 4), 'for {}, expecting value{}, got {}'.format(i, i * 4, res[i][2]))
elif i % 2 == 0:
assert_equal(res[i][2], 'value{}'.format(i * 2), 'for {}, expecting value{}, got {}'.format(i, i * 2, res[i][2]))
else:
assert_equal(res[i][2], 'value{}'.format(i), 'for {}, expecting value{}, got {}'.format(i, i, res[i][2]))
# Simple puts and range gets, with overwrites and flushes between inserts to
# make sure we hit multiple sstables on reads
def range_putget(cluster, session, cl=ConsistencyLevel.QUORUM):
keys = 100
_put_with_overwrite(cluster, session, keys, cl)
paged_results = session.execute('SELECT * FROM cf LIMIT 10000000')
rows = [result for result in paged_results]
assertions.assert_length_equal(rows, keys * 100)
for k in xrange(0, keys):
res = rows[:100]
del rows[:100]
_validate_row(cluster, res)
def get_keyspace_metadata(session, keyspace_name):
cluster = session.cluster
cluster.refresh_keyspace_metadata(keyspace_name)
return cluster.metadata.keyspaces[keyspace_name]
def get_schema_metadata(session):
cluster = session.cluster
cluster.refresh_schema_metadata()
return cluster.metadata
def get_table_metadata(session, keyspace_name, table_name):
cluster = session.cluster
cluster.refresh_table_metadata(keyspace_name, table_name)
return cluster.metadata.keyspaces[keyspace_name].tables[table_name]
def rows_to_list(rows):
new_list = [list(row) for row in rows]
return new_list
def index_is_built(node, session, keyspace, table_name, idx_name):
# checks if an index has been built
full_idx_name = idx_name if node.get_cassandra_version() > '3.0' else '{}.{}'.format(table_name, idx_name)
index_query = """SELECT * FROM system."IndexInfo" WHERE table_name = '{}' AND index_name = '{}'""".format(keyspace, full_idx_name)
return len(list(session.execute(index_query))) == 1
def block_until_index_is_built(node, session, keyspace, table_name, idx_name):
"""
Waits up to 30 seconds for a secondary index to be built, and raises
DtestTimeoutError if it is not.
"""
start = time.time()
rate_limited_debug = get_rate_limited_function(debug, 5)
while time.time() < start + 30:
rate_limited_debug("waiting for index to build")
time.sleep(1)
if index_is_built(node, session, keyspace, table_name, idx_name):
break
else:
raise DtestTimeoutError() | apache-2.0 | 1,160,953,948,285,309,700 | 41.54491 | 181 | 0.653857 | false |
edx/edx-platform | lms/djangoapps/instructor/tests/views/test_instructor_dashboard.py | 2 | 28290 | """
Unit tests for instructor_dashboard.py.
"""
import datetime
import re
from unittest.mock import patch
import ddt
from django.conf import settings
from django.contrib.sites.models import Site
from django.test.utils import override_settings
from django.urls import reverse
from edx_toggles.toggles.testutils import override_waffle_flag
from pyquery import PyQuery as pq
from pytz import UTC
from common.djangoapps.course_modes.models import CourseMode
from common.djangoapps.edxmako.shortcuts import render_to_response
from common.djangoapps.student.models import CourseEnrollment
from common.djangoapps.student.roles import CourseFinanceAdminRole # lint-amnesty, pylint: disable=unused-import
from common.djangoapps.student.tests.factories import AdminFactory, CourseAccessRoleFactory, CourseEnrollmentFactory
from common.djangoapps.student.tests.factories import StaffFactory
from common.djangoapps.student.tests.factories import UserFactory
from common.test.utils import XssTestMixin
from lms.djangoapps.courseware.tabs import get_course_tab_list
from lms.djangoapps.courseware.tests.factories import StudentModuleFactory
from lms.djangoapps.courseware.tests.helpers import LoginEnrollmentTestCase
from lms.djangoapps.grades.config.waffle import WRITABLE_GRADEBOOK, waffle_flags
from lms.djangoapps.instructor.toggles import DATA_DOWNLOAD_V2
from lms.djangoapps.instructor.views.gradebook_api import calculate_page_info
from openedx.core.djangoapps.course_groups.cohorts import set_course_cohorted
from openedx.core.djangoapps.discussions.config.waffle import (
ENABLE_PAGES_AND_RESOURCES_MICROFRONTEND,
OVERRIDE_DISCUSSION_LEGACY_SETTINGS_FLAG
)
from openedx.core.djangoapps.site_configuration.models import SiteConfiguration
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.tests.django_utils import TEST_DATA_SPLIT_MODULESTORE, ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory, check_mongo_calls
def intercept_renderer(path, context):
"""
Intercept calls to `render_to_response` and attach the context dict to the
response for examination in unit tests.
"""
# I think Django already does this for you in their TestClient, except
# we're bypassing that by using edxmako. Probably edxmako should be
# integrated better with Django's rendering and event system.
response = render_to_response(path, context)
response.mako_context = context
response.mako_template = path
return response
@ddt.ddt
class TestInstructorDashboard(ModuleStoreTestCase, LoginEnrollmentTestCase, XssTestMixin):
"""
Tests for the instructor dashboard (not legacy).
"""
def setUp(self):
"""
Set up tests
"""
super().setUp()
self.course = CourseFactory.create(
grading_policy={"GRADE_CUTOFFS": {"A": 0.75, "B": 0.63, "C": 0.57, "D": 0.5}},
display_name='<script>alert("XSS")</script>'
)
self.course_mode = CourseMode(
course_id=self.course.id,
mode_slug=CourseMode.DEFAULT_MODE_SLUG,
mode_display_name=CourseMode.DEFAULT_MODE.name,
min_price=40
)
self.course_info = CourseFactory.create(
org="ACME",
number="001",
run="2017",
name="How to defeat the Road Runner"
)
self.course_mode.save()
# Create instructor account
self.instructor = AdminFactory.create()
self.client.login(username=self.instructor.username, password="test")
# URL for instructor dash
self.url = reverse('instructor_dashboard', kwargs={'course_id': str(self.course.id)})
def get_dashboard_enrollment_message(self):
"""
Returns expected dashboard enrollment message with link to Insights.
"""
return 'Enrollment data is now available in <a href="http://example.com/courses/{}" ' \
'rel="noopener" target="_blank">Example</a>.'.format(str(self.course.id))
def get_dashboard_analytics_message(self):
"""
Returns expected dashboard demographic message with link to Insights.
"""
return 'For analytics about your course, go to <a href="http://example.com/courses/{}" ' \
'rel="noopener" target="_blank">Example</a>.'.format(str(self.course.id))
def test_instructor_tab(self):
"""
Verify that the instructor tab appears for staff only.
"""
def has_instructor_tab(user, course):
"""Returns true if the "Instructor" tab is shown."""
tabs = get_course_tab_list(user, course)
return len([tab for tab in tabs if tab.name == 'Instructor']) == 1
assert has_instructor_tab(self.instructor, self.course)
staff = StaffFactory(course_key=self.course.id)
assert has_instructor_tab(staff, self.course)
student = UserFactory.create()
assert not has_instructor_tab(student, self.course)
researcher = UserFactory.create()
CourseAccessRoleFactory(
course_id=self.course.id,
user=researcher,
role='data_researcher',
org=self.course.id.org
)
assert has_instructor_tab(researcher, self.course)
org_researcher = UserFactory.create()
CourseAccessRoleFactory(
course_id=None,
user=org_researcher,
role='data_researcher',
org=self.course.id.org
)
assert has_instructor_tab(org_researcher, self.course)
@ddt.data(
('staff', False, False, True),
('staff', True, False, False),
('staff', True, True, True),
('staff', False, True, True),
('instructor', False, False, True),
('instructor', True, False, False),
('instructor', True, True, True),
('instructor', False, True, True)
)
@ddt.unpack
def test_discussion_tab_for_course_staff_role(self, access_role, is_pages_and_resources_enabled,
is_legacy_discussion_setting_enabled, is_discussion_tab_available):
"""
Verify that the Discussion tab is available for course for course staff role.
"""
discussion_section = ('<li class="nav-item"><button type="button" class="btn-link discussions_management" '
'data-section="discussions_management">Discussions</button></li>')
with override_waffle_flag(ENABLE_PAGES_AND_RESOURCES_MICROFRONTEND, is_pages_and_resources_enabled):
with override_waffle_flag(OVERRIDE_DISCUSSION_LEGACY_SETTINGS_FLAG, is_legacy_discussion_setting_enabled):
user = UserFactory.create()
CourseAccessRoleFactory(
course_id=self.course.id,
user=user,
role=access_role,
org=self.course.id.org
)
set_course_cohorted(self.course.id, True)
self.client.login(username=self.user.username, password='test')
response = self.client.get(self.url).content.decode('utf-8')
self.assertEqual(discussion_section in response, is_discussion_tab_available)
@ddt.data(
(False, False, True),
(True, False, False),
(True, True, True),
(False, True, True),
)
@ddt.unpack
def test_discussion_tab_for_global_user(self, is_pages_and_resources_enabled,
is_legacy_discussion_setting_enabled, is_discussion_tab_available):
"""
Verify that the Discussion tab is available for course for global user.
"""
discussion_section = ('<li class="nav-item"><button type="button" class="btn-link discussions_management" '
'data-section="discussions_management">Discussions</button></li>')
with override_waffle_flag(ENABLE_PAGES_AND_RESOURCES_MICROFRONTEND, is_pages_and_resources_enabled):
with override_waffle_flag(OVERRIDE_DISCUSSION_LEGACY_SETTINGS_FLAG, is_legacy_discussion_setting_enabled):
user = UserFactory.create(is_staff=True)
set_course_cohorted(self.course.id, True)
self.client.login(username=user.username, password='test')
response = self.client.get(self.url).content.decode('utf-8')
self.assertEqual(discussion_section in response, is_discussion_tab_available)
@ddt.data(
('staff', False, False),
('instructor', False, False),
('data_researcher', True, False),
('global_staff', True, False),
('staff', False, True),
('instructor', False, True),
('data_researcher', True, True),
('global_staff', True, True),
)
@ddt.unpack
def test_data_download(self, access_role, can_access, waffle_status):
"""
Verify that the Data Download tab only shows up for certain roles
"""
with override_waffle_flag(DATA_DOWNLOAD_V2, waffle_status):
download_section = '<li class="nav-item"><button type="button" class="btn-link data_download" ' \
'data-section="data_download">Data Download</button></li>'
if waffle_status:
download_section = '<li class="nav-item"><button type="button" class="btn-link data_download_2" ' \
'data-section="data_download_2">Data Download</button></li>'
user = UserFactory.create(is_staff=access_role == 'global_staff')
CourseAccessRoleFactory(
course_id=self.course.id,
user=user,
role=access_role,
org=self.course.id.org
)
self.client.login(username=user.username, password="test")
response = self.client.get(self.url)
if can_access:
self.assertContains(response, download_section)
else:
self.assertNotContains(response, download_section)
@override_settings(ANALYTICS_DASHBOARD_URL='http://example.com')
@override_settings(ANALYTICS_DASHBOARD_NAME='Example')
def test_data_download_only(self):
"""
Verify that only the data download tab is visible for data researchers.
"""
user = UserFactory.create()
CourseAccessRoleFactory(
course_id=self.course.id,
user=user,
role='data_researcher',
org=self.course.id.org
)
self.client.login(username=user.username, password="test")
response = self.client.get(self.url)
matches = re.findall(
rb'<li class="nav-item"><button type="button" class="btn-link .*" data-section=".*">.*',
response.content
)
assert len(matches) == 1
@ddt.data(
("How to defeat the Road Runner", "2017", "001", "ACME"),
)
@ddt.unpack
def test_instructor_course_info(self, display_name, run, number, org):
"""
Verify that it shows the correct course information
"""
url = reverse(
'instructor_dashboard',
kwargs={
'course_id': str(self.course_info.id)
}
)
response = self.client.get(url)
content = pq(response.content)
assert display_name == content('#field-course-display-name b').contents()[0].strip()
assert run == content('#field-course-name b').contents()[0].strip()
assert number == content('#field-course-number b').contents()[0].strip()
assert org == content('#field-course-organization b').contents()[0].strip()
@ddt.data(True, False)
def test_membership_reason_field_visibility(self, enbale_reason_field):
"""
Verify that reason field is enabled by site configuration flag 'ENABLE_MANUAL_ENROLLMENT_REASON_FIELD'
"""
configuration_values = {
"ENABLE_MANUAL_ENROLLMENT_REASON_FIELD": enbale_reason_field
}
site = Site.objects.first()
SiteConfiguration.objects.create(
site=site,
site_values=configuration_values,
enabled=True
)
url = reverse(
'instructor_dashboard',
kwargs={
'course_id': str(self.course_info.id)
}
)
response = self.client.get(url)
reason_field = '<textarea rows="2" id="reason-field-id" name="reason-field" ' \
'placeholder="Reason" spellcheck="false"></textarea>'
if enbale_reason_field:
self.assertContains(response, reason_field)
else:
self.assertNotContains(response, reason_field)
def test_student_admin_staff_instructor(self):
"""
Verify that staff users are not able to see course-wide options, while still
seeing individual learner options.
"""
# Original (instructor) user can see both specific grades, and course-wide grade adjustment tools
response = self.client.get(self.url)
self.assertContains(response, '<h4 class="hd hd-4">Adjust all enrolled learners')
self.assertContains(response, '<h4 class="hd hd-4">View a specific learner's grades and progress')
# But staff user can only see specific grades
staff = StaffFactory(course_key=self.course.id)
self.client.login(username=staff.username, password="test")
response = self.client.get(self.url)
self.assertNotContains(response, '<h4 class="hd hd-4">Adjust all enrolled learners')
self.assertContains(response, '<h4 class="hd hd-4">View a specific learner's grades and progress')
@patch(
'lms.djangoapps.instructor.views.instructor_dashboard.settings.WRITABLE_GRADEBOOK_URL',
'http://gradebook.local.edx.org'
)
def test_staff_can_see_writable_gradebook(self):
"""
Test that, when the writable gradebook feature is enabled and
deployed in another domain, a staff member can see it.
"""
waffle_flag = waffle_flags()[WRITABLE_GRADEBOOK]
with override_waffle_flag(waffle_flag, active=True):
response = self.client.get(self.url)
expected_gradebook_url = f'http://gradebook.local.edx.org/{self.course.id}'
self.assertContains(response, expected_gradebook_url)
self.assertContains(response, 'View Gradebook')
GRADEBOOK_LEARNER_COUNT_MESSAGE = (
'Note: This feature is available only to courses with a small number ' +
'of enrolled learners.'
)
@patch(
'lms.djangoapps.instructor.views.instructor_dashboard.settings.WRITABLE_GRADEBOOK_URL',
settings.LMS_ROOT_URL + '/gradebook'
)
def test_staff_can_see_writable_gradebook_as_subdirectory(self):
"""
Test that, when the writable gradebook feature is enabled and
deployed in a subdirectory, a staff member can see it.
"""
waffle_flag = waffle_flags()[WRITABLE_GRADEBOOK]
with override_waffle_flag(waffle_flag, active=True):
response = self.client.get(self.url)
expected_gradebook_url = f'{settings.WRITABLE_GRADEBOOK_URL}/{self.course.id}'
self.assertContains(response, expected_gradebook_url)
self.assertContains(response, 'View Gradebook')
GRADEBOOK_LEARNER_COUNT_MESSAGE = (
'Note: This feature is available only to courses with a small number ' +
'of enrolled learners.'
)
def test_gradebook_learner_count_message(self):
"""
Test that, when the writable gradebook featue is NOT enabled, there IS
a message that the feature is only available for courses with small
numbers of learners.
"""
response = self.client.get(self.url)
self.assertContains(
response,
self.GRADEBOOK_LEARNER_COUNT_MESSAGE,
)
self.assertContains(response, 'View Gradebook')
@patch(
'lms.djangoapps.instructor.views.instructor_dashboard.settings.WRITABLE_GRADEBOOK_URL',
'http://gradebook.local.edx.org'
)
def test_no_gradebook_learner_count_message(self):
"""
Test that, when the writable gradebook featue IS enabled, there is NOT
a message that the feature is only available for courses with small
numbers of learners.
"""
waffle_flag = waffle_flags()[WRITABLE_GRADEBOOK]
with override_waffle_flag(waffle_flag, active=True):
response = self.client.get(self.url)
assert TestInstructorDashboard.GRADEBOOK_LEARNER_COUNT_MESSAGE not in response.content.decode('utf-8')
self.assertContains(response, 'View Gradebook')
def test_course_name_xss(self):
"""Test that the instructor dashboard correctly escapes course names
with script tags.
"""
response = self.client.get(self.url)
self.assert_no_xss(response, '<script>alert("XSS")</script>')
@patch.dict(settings.FEATURES, {'DISPLAY_ANALYTICS_ENROLLMENTS': False})
@override_settings(ANALYTICS_DASHBOARD_URL='')
def test_no_enrollments(self):
"""
Test enrollment section is hidden.
"""
response = self.client.get(self.url)
# no enrollment information should be visible
self.assertNotContains(response, '<h3 class="hd hd-3">Enrollment Information</h3>')
@patch.dict(settings.FEATURES, {'DISPLAY_ANALYTICS_ENROLLMENTS': True})
@override_settings(ANALYTICS_DASHBOARD_URL='')
def test_show_enrollments_data(self):
"""
Test enrollment data is shown.
"""
response = self.client.get(self.url)
# enrollment information visible
self.assertContains(response, '<h4 class="hd hd-4">Enrollment Information</h4>')
self.assertContains(response, '<th scope="row">Verified</th>')
self.assertContains(response, '<th scope="row">Audit</th>')
self.assertContains(response, '<th scope="row">Honor</th>')
self.assertContains(response, '<th scope="row">Professional</th>')
# dashboard link hidden
self.assertNotContains(response, self.get_dashboard_enrollment_message())
@patch.dict(settings.FEATURES, {'DISPLAY_ANALYTICS_ENROLLMENTS': True})
@override_settings(ANALYTICS_DASHBOARD_URL='')
def test_show_enrollment_data_for_prof_ed(self):
# Create both "professional" (meaning professional + verification)
# and "no-id-professional" (meaning professional without verification)
# These should be aggregated for display purposes.
users = [UserFactory() for _ in range(2)]
CourseEnrollment.enroll(users[0], self.course.id, mode="professional")
CourseEnrollment.enroll(users[1], self.course.id, mode="no-id-professional")
response = self.client.get(self.url)
# Check that the number of professional enrollments is two
self.assertContains(response, '<th scope="row">Professional</th><td>2</td>')
@patch.dict(settings.FEATURES, {'DISPLAY_ANALYTICS_ENROLLMENTS': False})
@override_settings(ANALYTICS_DASHBOARD_URL='http://example.com')
@override_settings(ANALYTICS_DASHBOARD_NAME='Example')
def test_show_dashboard_enrollment_message(self):
"""
Test enrollment dashboard message is shown and data is hidden.
"""
response = self.client.get(self.url)
# enrollment information hidden
self.assertNotContains(response, '<th scope="row">Verified</th>')
self.assertNotContains(response, '<th scope="row">Audit</th>')
self.assertNotContains(response, '<th scope="row">Honor</th>')
self.assertNotContains(response, '<th scope="row">Professional</th>')
# link to dashboard shown
expected_message = self.get_dashboard_enrollment_message()
assert expected_message in response.content.decode(response.charset)
@override_settings(ANALYTICS_DASHBOARD_URL='')
@override_settings(ANALYTICS_DASHBOARD_NAME='')
def test_dashboard_analytics_tab_not_shown(self):
"""
Test dashboard analytics tab isn't shown if insights isn't configured.
"""
response = self.client.get(self.url)
analytics_section = '<li class="nav-item"><a href="" data-section="instructor_analytics">Analytics</a></li>'
self.assertNotContains(response, analytics_section)
@override_settings(ANALYTICS_DASHBOARD_URL='http://example.com')
@override_settings(ANALYTICS_DASHBOARD_NAME='Example')
def test_dashboard_analytics_points_at_insights(self):
"""
Test analytics dashboard message is shown
"""
response = self.client.get(self.url)
analytics_section = '<li class="nav-item"><button type="button" class="btn-link instructor_analytics"' \
' data-section="instructor_analytics">Analytics</button></li>'
self.assertContains(response, analytics_section)
# link to dashboard shown
expected_message = self.get_dashboard_analytics_message()
assert expected_message in response.content.decode(response.charset)
@ddt.data(
(True, True, True),
(True, False, False),
(False, True, False),
(False, False, False),
)
@ddt.unpack
def test_ccx_coaches_option_on_admin_list_management_instructor(
self, ccx_feature_flag, enable_ccx, expected_result
):
"""
Test whether the "CCX Coaches" option is visible or hidden depending on the value of course.enable_ccx.
"""
with patch.dict(settings.FEATURES, {'CUSTOM_COURSES_EDX': ccx_feature_flag}):
self.course.enable_ccx = enable_ccx
self.store.update_item(self.course, self.instructor.id)
response = self.client.get(self.url)
assert expected_result == ('CCX Coaches are able to create their own Custom Courses based on this course'
in response.content.decode('utf-8'))
def test_grade_cutoffs(self):
"""
Verify that grade cutoffs are displayed in the correct order.
"""
response = self.client.get(self.url)
self.assertContains(response, 'D: 0.5, C: 0.57, B: 0.63, A: 0.75')
@patch('lms.djangoapps.instructor.views.gradebook_api.MAX_STUDENTS_PER_PAGE_GRADE_BOOK', 2)
def test_calculate_page_info(self):
page = calculate_page_info(offset=0, total_students=2)
assert page['offset'] == 0
assert page['page_num'] == 1
assert page['next_offset'] is None
assert page['previous_offset'] is None
assert page['total_pages'] == 1
@patch('lms.djangoapps.instructor.views.gradebook_api.render_to_response', intercept_renderer)
@patch('lms.djangoapps.instructor.views.gradebook_api.MAX_STUDENTS_PER_PAGE_GRADE_BOOK', 1)
def test_spoc_gradebook_pages(self):
for i in range(2):
username = "user_%d" % i
student = UserFactory.create(username=username)
CourseEnrollmentFactory.create(user=student, course_id=self.course.id)
url = reverse(
'spoc_gradebook',
kwargs={'course_id': self.course.id}
)
response = self.client.get(url)
assert response.status_code == 200
# Max number of student per page is one. Patched setting MAX_STUDENTS_PER_PAGE_GRADE_BOOK = 1
assert len(response.mako_context['students']) == 1
def test_open_response_assessment_page(self):
"""
Test that Open Responses is available only if course contains at least one ORA block
"""
ora_section = (
'<li class="nav-item">'
'<button type="button" class="btn-link open_response_assessment" data-section="open_response_assessment">'
'Open Responses'
'</button>'
'</li>'
)
response = self.client.get(self.url)
self.assertNotContains(response, ora_section)
ItemFactory.create(parent_location=self.course.location, category="openassessment")
response = self.client.get(self.url)
self.assertContains(response, ora_section)
def test_open_response_assessment_page_orphan(self):
"""
Tests that the open responses tab loads if the course contains an
orphaned openassessment block
"""
# create non-orphaned openassessment block
ItemFactory.create(
parent_location=self.course.location,
category="openassessment",
)
# create orphan
self.store.create_item(
self.user.id, self.course.id, 'openassessment', "orphan"
)
response = self.client.get(self.url)
# assert we don't get a 500 error
assert 200 == response.status_code
@ddt.ddt
class TestInstructorDashboardPerformance(ModuleStoreTestCase, LoginEnrollmentTestCase, XssTestMixin):
"""
Tests for the instructor dashboard from the performance point of view.
"""
MODULESTORE = TEST_DATA_SPLIT_MODULESTORE
def setUp(self):
"""
Set up tests
"""
super().setUp()
self.course = CourseFactory.create(
grading_policy={"GRADE_CUTOFFS": {"A": 0.75, "B": 0.63, "C": 0.57, "D": 0.5}},
display_name='<script>alert("XSS")</script>',
default_store=ModuleStoreEnum.Type.split
)
self.course_mode = CourseMode(
course_id=self.course.id,
mode_slug=CourseMode.DEFAULT_MODE_SLUG,
mode_display_name=CourseMode.DEFAULT_MODE.name,
min_price=40
)
self.course_mode.save()
# Create instructor account
self.instructor = AdminFactory.create()
self.client.login(username=self.instructor.username, password="test")
def test_spoc_gradebook_mongo_calls(self):
"""
Test that the MongoDB cache is used in API to return grades
"""
# prepare course structure
course = ItemFactory.create(
parent_location=self.course.location,
category="course",
display_name="Test course",
)
students = []
for i in range(20):
username = "user_%d" % i
student = UserFactory.create(username=username)
CourseEnrollmentFactory.create(user=student, course_id=self.course.id)
students.append(student)
chapter = ItemFactory.create(
parent=course,
category='chapter',
display_name="Chapter",
publish_item=True,
start=datetime.datetime(2015, 3, 1, tzinfo=UTC),
)
sequential = ItemFactory.create(
parent=chapter,
category='sequential',
display_name="Lesson",
publish_item=True,
start=datetime.datetime(2015, 3, 1, tzinfo=UTC),
metadata={'graded': True, 'format': 'Homework'},
)
vertical = ItemFactory.create(
parent=sequential,
category='vertical',
display_name='Subsection',
publish_item=True,
start=datetime.datetime(2015, 4, 1, tzinfo=UTC),
)
for i in range(10):
problem = ItemFactory.create(
category="problem",
parent=vertical,
display_name="A Problem Block %d" % i,
weight=1,
publish_item=False,
metadata={'rerandomize': 'always'},
)
for j in students:
grade = i % 2
StudentModuleFactory.create(
grade=grade,
max_grade=1,
student=j,
course_id=self.course.id,
module_state_key=problem.location
)
# check MongoDB calls count
url = reverse('spoc_gradebook', kwargs={'course_id': self.course.id})
with check_mongo_calls(9):
response = self.client.get(url)
assert response.status_code == 200
| agpl-3.0 | 232,834,636,607,615,360 | 40.725664 | 118 | 0.62743 | false |
geekosphere/zgeist | zg/objects/item/model.py | 1 | 1165 | """
Item Model
"""
from __future__ import division, print_function, absolute_import, unicode_literals
from sqlalchemy import Table, ForeignKey, Column, Integer, String, Text
from sqlalchemy.orm import relationship
from zg.database import Base, UTCDateTime
from zg.objects.tag.model import TagModel
TaggingModel = Table('tagging', Base.metadata,
Column('item_id', Integer, ForeignKey('item.id')),
Column('tag_id', Integer, ForeignKey('tag.id')))
class ItemModel(Base):
__tablename__ = 'item'
id = Column(Integer, primary_key=True)
title = Column(String(2048))
description = Column(Text)
created_at = Column(UTCDateTime, nullable=False)
updated_at = Column(UTCDateTime)
tags = relationship('TagModel', secondary=TaggingModel)
#: item stauts either pending, failed or available
status = Column(String(32))
class FragmentModel(Base):
__tablename__ = 'fragment'
id = Column(Integer, primary_key=True)
item_id = Column(Integer, ForeignKey('item.id'))
type = Column(Enum('image'))
tempfile = Column(String(255))
data = Column()
| agpl-3.0 | -5,373,538,348,094,051,000 | 24.888889 | 82 | 0.660086 | false |
danmergens/mi-instrument | mi/dataset/parser/test/test_fdchp_a_dcl.py | 5 | 6846 | """
@package mi.dataset.parser.test
@file mi/dataset/parser/test/test_fdchp_a_dcl.py
@author Emily Hahn
@brief Parser test for the fdchp series a instrument through a DCL
"""
import os
from nose.plugins.attrib import attr
from mi.core.exceptions import UnexpectedDataException, SampleException
from mi.dataset.driver.fdchp_a.dcl.resource import RESOURCE_PATH
from mi.dataset.parser.utilities import particle_to_yml
from mi.dataset.test.test_parser import ParserUnitTestCase
from mi.dataset.parser.fdchp_a_dcl import FdchpADclParser
__author__ = 'Emily Hahn'
__license__ = 'Apache 2.0'
@attr('UNIT', group='mi')
class FdchpADclParserUnitTestCase(ParserUnitTestCase):
def create_yml(self, particles, filename):
particle_to_yml(particles, os.path.join(RESOURCE_PATH, filename))
def test_simple_telem(self):
"""
Test a simple telemetered case
"""
with open(os.path.join(RESOURCE_PATH, 'start.fdchp.log'), 'r') as file_handle:
parser = FdchpADclParser(file_handle, self.exception_callback, is_telemetered=True)
particles = parser.get_records(2)
self.assert_particles(particles, "start_telem.yml", RESOURCE_PATH)
self.assertEqual(self.exception_callback_value, [])
def test_simple_recov(self):
"""
Test a simple recovered case
"""
with open(os.path.join(RESOURCE_PATH, 'start.fdchp.log'), 'r') as file_handle:
parser = FdchpADclParser(file_handle, self.exception_callback, is_telemetered=False)
particles = parser.get_records(2)
self.assert_particles(particles, "start_recov.yml", RESOURCE_PATH)
self.assertEqual(self.exception_callback_value, [])
def test_long_telem(self):
"""
Test a longer telemetered case
"""
with open(os.path.join(RESOURCE_PATH, 'long.fdchp.log'), 'r') as file_handle:
parser = FdchpADclParser(file_handle, self.exception_callback, is_telemetered=True)
particles = parser.get_records(8)
self.assert_particles(particles, "long_telem.yml", RESOURCE_PATH)
self.assertEqual(self.exception_callback_value, [])
def test_long_recov(self):
"""
Test a longer telemetered case
"""
with open(os.path.join(RESOURCE_PATH, 'long.fdchp.log'), 'r') as file_handle:
parser = FdchpADclParser(file_handle, self.exception_callback, is_telemetered=False)
particles = parser.get_records(8)
self.assert_particles(particles, "long_recov.yml", RESOURCE_PATH)
self.assertEqual(self.exception_callback_value, [])
def test_full_recov(self):
"""
Test with the full file, just compare that the number of particles is correct
and there have been no exceptions
"""
with open(os.path.join(RESOURCE_PATH, '20141215.fdchp.log'), 'r') as file_handle:
parser = FdchpADclParser(file_handle, self.exception_callback, is_telemetered=False)
# request a few more particles than are available, should only get the number in the file
particles = parser.get_records(25)
self.assertEquals(len(particles), 22)
self.assertEqual(self.exception_callback_value, [])
def test_unexpected(self):
"""
Test with a file that has an unexpected data line in it
Confirm we get all expected particles and call an exception for the unexpected data
"""
with open(os.path.join(RESOURCE_PATH, 'unexpected_line.fdchp.log'), 'r') as file_handle:
parser = FdchpADclParser(file_handle, self.exception_callback, is_telemetered=True)
particles = parser.get_records(2)
self.assert_particles(particles, "start_telem.yml", RESOURCE_PATH)
self.assertEqual(len(self.exception_callback_value), 1)
self.assertIsInstance(self.exception_callback_value[0], UnexpectedDataException)
def test_missing_vals(self):
"""
Test that a file with missing data values is parsed correctly
The first line is missing a value, but still has a comma separating for the right number of values
The second line is missing a value, so it does not have the right number of values to be parsed
Neither line should produce a partcle
"""
with open(os.path.join(RESOURCE_PATH, 'missing_vals.fdchp.log'), 'r') as file_handle:
parser = FdchpADclParser(file_handle, self.exception_callback, is_telemetered=True)
particles = parser.get_records(2)
# 1st particle is returned but has encoding error due to missing value
self.assertEqual(len(particles), 0)
self.assertEqual(len(self.exception_callback_value), 2)
self.assertIsInstance(self.exception_callback_value[0], SampleException)
self.assertIsInstance(self.exception_callback_value[1], SampleException)
def test_logs_ignored(self):
"""
Test with a real file that has additional logs which should be ignored in it
"""
# file was obtained from the acquisition server CP01CNSM deployment 2
with open(os.path.join(RESOURCE_PATH, '20141119.fdchp.log'), 'r') as file_handle:
parser = FdchpADclParser(file_handle, self.exception_callback, is_telemetered=True)
particles = parser.get_records(4)
self.assert_particles(particles, "20141119_telem.yml", RESOURCE_PATH)
self.assertEqual(self.exception_callback_value, [])
def test_instrument_stop_start(self):
"""
Test with a real file where the instrument stops and starts in the middle
"""
# file was obtained from the acquisition server CP01CNSM deployment 2
with open(os.path.join(RESOURCE_PATH, '20141211.fdchp.log'), 'r') as file_handle:
parser = FdchpADclParser(file_handle, self.exception_callback, is_telemetered=True)
particles = parser.get_records(3)
self.assertEquals(len(particles), 3)
self.assertEqual(self.exception_callback_value, [])
def test_bug_10002(self):
"""
Redmine Ticket 10002 found files from early deployments had incorrect firmware
that was omitting commas between some paramaters. Still get 66 parameters but some
only separated by space.
Verify we get particles from files from early deployments
"""
with open(os.path.join(RESOURCE_PATH, '20140912.fdchp.log'), 'r') as file_handle:
parser = FdchpADclParser(file_handle, self.exception_callback, is_telemetered=True)
particles = parser.get_records(30)
self.assertEquals(len(particles), 23)
self.assertEqual(self.exception_callback_value, [])
| bsd-2-clause | 6,473,076,581,250,910,000 | 42.056604 | 106 | 0.663015 | false |
lym/allura-git | ForgeTracker/forgetracker/tests/test_app.py | 2 | 3100 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tempfile
import json
import operator
from nose.tools import assert_equal, assert_true
from pylons import tmpl_context as c
from allura import model as M
from allura.tests import decorators as td
from forgetracker import model as TM
from forgetracker.tests.functional.test_root import TrackerTestController
class TestBulkExport(TrackerTestController):
@td.with_tracker
def setup_with_tools(self):
super(TestBulkExport, self).setup_with_tools()
self.project = M.Project.query.get(shortname='test')
self.tracker = self.project.app_instance('bugs')
self.new_ticket(summary='foo', _milestone='1.0')
self.new_ticket(summary='bar', _milestone='2.0')
ticket = TM.Ticket.query.find(dict(summary='foo')).first()
ticket.discussion_thread.add_post(text='silly comment')
def test_bulk_export(self):
# Clear out some context vars, to properly simulate how this is run from the export task
# Besides, core functionality shouldn't need the c context vars
c.app = c.project = None
f = tempfile.TemporaryFile()
self.tracker.bulk_export(f)
f.seek(0)
tracker = json.loads(f.read())
tickets = sorted(tracker['tickets'],
key=operator.itemgetter('summary'))
assert_equal(len(tickets), 2)
ticket_foo = tickets[1]
assert_equal(ticket_foo['summary'], 'foo')
assert_equal(ticket_foo['custom_fields']['_milestone'], '1.0')
posts_foo = ticket_foo['discussion_thread']['posts']
assert_equal(len(posts_foo), 1)
assert_equal(posts_foo[0]['text'], 'silly comment')
tracker_config = tracker['tracker_config']
assert_true('options' in tracker_config.keys())
assert_equal(tracker_config['options']['mount_point'], 'bugs')
milestones = sorted(tracker['milestones'],
key=operator.itemgetter('name'))
assert_equal(milestones[0]['name'], '1.0')
assert_equal(milestones[1]['name'], '2.0')
saved_bins_summaries = [bin['summary']
for bin in tracker['saved_bins']]
assert_true('Closed Tickets' in saved_bins_summaries)
| apache-2.0 | -6,845,210,833,465,340,000 | 40.891892 | 96 | 0.654839 | false |
eunchong/build | third_party/sqlalchemy_0_7_1/sqlalchemy/orm/dynamic.py | 8 | 11427 | # orm/dynamic.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Dynamic collection API.
Dynamic collections act like Query() objects for read operations and support
basic add/delete mutation.
"""
from sqlalchemy import log, util
from sqlalchemy import exc as sa_exc
from sqlalchemy.orm import exc as orm_exc
from sqlalchemy.sql import operators
from sqlalchemy.orm import (
attributes, object_session, util as mapperutil, strategies, object_mapper
)
from sqlalchemy.orm.query import Query
from sqlalchemy.orm.util import has_identity
from sqlalchemy.orm import attributes, collections
class DynaLoader(strategies.AbstractRelationshipLoader):
def init_class_attribute(self, mapper):
self.is_class_level = True
strategies._register_attribute(self,
mapper,
useobject=True,
impl_class=DynamicAttributeImpl,
target_mapper=self.parent_property.mapper,
order_by=self.parent_property.order_by,
query_class=self.parent_property.query_class
)
log.class_logger(DynaLoader)
class DynamicAttributeImpl(attributes.AttributeImpl):
uses_objects = True
accepts_scalar_loader = False
supports_population = False
def __init__(self, class_, key, typecallable,
dispatch,
target_mapper, order_by, query_class=None, **kw):
super(DynamicAttributeImpl, self).\
__init__(class_, key, typecallable, dispatch, **kw)
self.target_mapper = target_mapper
self.order_by = order_by
if not query_class:
self.query_class = AppenderQuery
elif AppenderMixin in query_class.mro():
self.query_class = query_class
else:
self.query_class = mixin_user_query(query_class)
def get(self, state, dict_, passive=attributes.PASSIVE_OFF):
if passive is not attributes.PASSIVE_OFF:
return self._get_collection_history(state,
attributes.PASSIVE_NO_INITIALIZE).added_items
else:
return self.query_class(self, state)
def get_collection(self, state, dict_, user_data=None,
passive=attributes.PASSIVE_NO_INITIALIZE):
if passive is not attributes.PASSIVE_OFF:
return self._get_collection_history(state,
passive).added_items
else:
history = self._get_collection_history(state, passive)
return history.added_items + history.unchanged_items
def fire_append_event(self, state, dict_, value, initiator):
collection_history = self._modified_event(state, dict_)
collection_history.added_items.append(value)
for fn in self.dispatch.append:
value = fn(state, value, initiator or self)
if self.trackparent and value is not None:
self.sethasparent(attributes.instance_state(value), True)
def fire_remove_event(self, state, dict_, value, initiator):
collection_history = self._modified_event(state, dict_)
collection_history.deleted_items.append(value)
if self.trackparent and value is not None:
self.sethasparent(attributes.instance_state(value), False)
for fn in self.dispatch.remove:
fn(state, value, initiator or self)
def _modified_event(self, state, dict_):
if self.key not in state.committed_state:
state.committed_state[self.key] = CollectionHistory(self, state)
state.modified_event(dict_,
self,
attributes.NEVER_SET)
# this is a hack to allow the fixtures.ComparableEntity fixture
# to work
dict_[self.key] = True
return state.committed_state[self.key]
def set(self, state, dict_, value, initiator,
passive=attributes.PASSIVE_OFF):
if initiator and initiator.parent_token is self.parent_token:
return
self._set_iterable(state, dict_, value)
def _set_iterable(self, state, dict_, iterable, adapter=None):
collection_history = self._modified_event(state, dict_)
new_values = list(iterable)
if state.has_identity:
old_collection = list(self.get(state, dict_))
else:
old_collection = []
collections.bulk_replace(new_values, DynCollectionAdapter(self,
state, old_collection),
DynCollectionAdapter(self, state,
new_values))
def delete(self, *args, **kwargs):
raise NotImplementedError()
def set_committed_value(self, state, dict_, value):
raise NotImplementedError("Dynamic attributes don't support "
"collection population.")
def get_history(self, state, dict_, passive=attributes.PASSIVE_OFF):
c = self._get_collection_history(state, passive)
return attributes.History(c.added_items, c.unchanged_items,
c.deleted_items)
def get_all_pending(self, state, dict_):
c = self._get_collection_history(state, True)
return [
(attributes.instance_state(x), x)
for x in
c.added_items + c.unchanged_items + c.deleted_items
]
def _get_collection_history(self, state, passive=attributes.PASSIVE_OFF):
if self.key in state.committed_state:
c = state.committed_state[self.key]
else:
c = CollectionHistory(self, state)
if passive is attributes.PASSIVE_OFF:
return CollectionHistory(self, state, apply_to=c)
else:
return c
def append(self, state, dict_, value, initiator,
passive=attributes.PASSIVE_OFF):
if initiator is not self:
self.fire_append_event(state, dict_, value, initiator)
def remove(self, state, dict_, value, initiator,
passive=attributes.PASSIVE_OFF):
if initiator is not self:
self.fire_remove_event(state, dict_, value, initiator)
class DynCollectionAdapter(object):
"""the dynamic analogue to orm.collections.CollectionAdapter"""
def __init__(self, attr, owner_state, data):
self.attr = attr
self.state = owner_state
self.data = data
def __iter__(self):
return iter(self.data)
def append_with_event(self, item, initiator=None):
self.attr.append(self.state, self.state.dict, item, initiator)
def remove_with_event(self, item, initiator=None):
self.attr.remove(self.state, self.state.dict, item, initiator)
def append_without_event(self, item):
pass
def remove_without_event(self, item):
pass
class AppenderMixin(object):
query_class = None
def __init__(self, attr, state):
Query.__init__(self, attr.target_mapper, None)
self.instance = instance = state.obj()
self.attr = attr
mapper = object_mapper(instance)
prop = mapper._props[self.attr.key]
self._criterion = prop.compare(
operators.eq,
instance,
value_is_parent=True,
alias_secondary=False)
if self.attr.order_by:
self._order_by = self.attr.order_by
def __session(self):
sess = object_session(self.instance)
if sess is not None and self.autoflush and sess.autoflush \
and self.instance in sess:
sess.flush()
if not has_identity(self.instance):
return None
else:
return sess
def session(self):
return self.__session()
session = property(session, lambda s, x:None)
def __iter__(self):
sess = self.__session()
if sess is None:
return iter(self.attr._get_collection_history(
attributes.instance_state(self.instance),
attributes.PASSIVE_NO_INITIALIZE).added_items)
else:
return iter(self._clone(sess))
def __getitem__(self, index):
sess = self.__session()
if sess is None:
return self.attr._get_collection_history(
attributes.instance_state(self.instance),
attributes.PASSIVE_NO_INITIALIZE).added_items.\
__getitem__(index)
else:
return self._clone(sess).__getitem__(index)
def count(self):
sess = self.__session()
if sess is None:
return len(self.attr._get_collection_history(
attributes.instance_state(self.instance),
attributes.PASSIVE_NO_INITIALIZE).added_items)
else:
return self._clone(sess).count()
def _clone(self, sess=None):
# note we're returning an entirely new Query class instance
# here without any assignment capabilities; the class of this
# query is determined by the session.
instance = self.instance
if sess is None:
sess = object_session(instance)
if sess is None:
raise orm_exc.DetachedInstanceError(
"Parent instance %s is not bound to a Session, and no "
"contextual session is established; lazy load operation "
"of attribute '%s' cannot proceed" % (
mapperutil.instance_str(instance), self.attr.key))
if self.query_class:
query = self.query_class(self.attr.target_mapper, session=sess)
else:
query = sess.query(self.attr.target_mapper)
query._criterion = self._criterion
query._order_by = self._order_by
return query
def append(self, item):
self.attr.append(
attributes.instance_state(self.instance),
attributes.instance_dict(self.instance), item, None)
def remove(self, item):
self.attr.remove(
attributes.instance_state(self.instance),
attributes.instance_dict(self.instance), item, None)
class AppenderQuery(AppenderMixin, Query):
"""A dynamic query that supports basic collection storage operations."""
def mixin_user_query(cls):
"""Return a new class with AppenderQuery functionality layered over."""
name = 'Appender' + cls.__name__
return type(name, (AppenderMixin, cls), {'query_class': cls})
class CollectionHistory(object):
"""Overrides AttributeHistory to receive append/remove events directly."""
def __init__(self, attr, state, apply_to=None):
if apply_to:
deleted = util.IdentitySet(apply_to.deleted_items)
added = apply_to.added_items
coll = AppenderQuery(attr, state).autoflush(False)
self.unchanged_items = [o for o in util.IdentitySet(coll)
if o not in deleted]
self.added_items = apply_to.added_items
self.deleted_items = apply_to.deleted_items
else:
self.deleted_items = []
self.added_items = []
self.unchanged_items = []
| bsd-3-clause | 7,028,947,768,777,470,000 | 35.507987 | 84 | 0.602783 | false |
h2oai/h2o-3 | h2o-py/tests/testdir_algos/stackedensemble/pyunit_stackedensemble_gaussian.py | 3 | 6514 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import print_function
import h2o
import sys
sys.path.insert(1,"../../../") # allow us to run this standalone
from h2o.estimators.random_forest import H2ORandomForestEstimator
from h2o.estimators.gbm import H2OGradientBoostingEstimator
from h2o.estimators.stackedensemble import H2OStackedEnsembleEstimator
from tests import pyunit_utils as pu
from tests.pyunit_utils import assert_warn
seed = 1
def prepare_data(blending=False):
col_types = ["numeric", "numeric", "numeric", "enum", "enum", "numeric", "numeric", "numeric", "numeric"]
dat = h2o.upload_file(path=pu.locate("smalldata/extdata/prostate.csv"),
destination_frame="prostate_hex",
col_types=col_types)
train, test = dat.split_frame(ratios=[.8], seed=1)
x = ["CAPSULE", "GLEASON", "RACE", "DPROS", "DCAPS", "PSA", "VOL"]
y = "AGE"
ds = pu.ns(x=x, y=y, train=train, test=test)
if blending:
train, blend = train.split_frame(ratios=[.7], seed=seed)
return ds.extend(train=train, blend=blend)
else:
return ds
def train_base_models(dataset, **kwargs):
model_args = kwargs if hasattr(dataset, 'blend') else dict(nfolds=3, fold_assignment="Modulo", keep_cross_validation_predictions=True, **kwargs)
gbm = H2OGradientBoostingEstimator(distribution="gaussian",
ntrees=10,
max_depth=3,
min_rows=2,
learn_rate=0.2,
seed=seed,
**model_args)
gbm.train(x=dataset.x, y=dataset.y, training_frame=dataset.train)
rf = H2ORandomForestEstimator(ntrees=10,
seed=seed,
**model_args)
rf.train(x=dataset.x, y=dataset.y, training_frame=dataset.train)
xrf = H2ORandomForestEstimator(ntrees=20,
histogram_type="Random",
seed=seed,
**model_args)
xrf.train(x=dataset.x, y=dataset.y, training_frame=dataset.train)
return [gbm, rf, xrf]
def train_stacked_ensemble(dataset, base_models, **kwargs):
se = H2OStackedEnsembleEstimator(base_models=base_models, seed=seed)
se.train(x=dataset.x, y=dataset.y,
training_frame=dataset.train,
blending_frame=dataset.blend if hasattr(dataset, 'blend') else None,
**kwargs)
return se
def test_suite_stackedensemble_gaussian(blending=False):
def test_predict_on_se_model():
ds = prepare_data(blending)
models = train_base_models(ds)
se = train_stacked_ensemble(ds, models)
for i in range(2): # repeat predict to verify consistency
pred = se.predict(test_data=ds.test)
assert pred.nrow == ds.test.nrow, "expected " + str(pred.nrow) + " to be equal to " + str(ds.test.nrow)
assert pred.ncol == 1, "expected " + str(pred.ncol) + " to be equal to 1 but it was equal to " + str(pred.ncol)
def test_se_performance_is_better_than_individual_models():
ds = prepare_data(blending)
base_models = train_base_models(ds)
def compute_perf(model):
perf = pu.ns(
train=model.model_performance(train=True),
test=model.model_performance(test_data=ds.test)
)
print("{} training performance: ".format(model.model_id))
print(perf.train)
print("{} test performance: ".format(model.model_id))
print(perf.test)
return perf
base_perfs = {}
for model in base_models:
base_perfs[model.model_id] = compute_perf(model)
se = train_stacked_ensemble(ds, base_models)
perf_se = compute_perf(se)
# Check that stack perf is better (smaller) than the best (smaller) base learner perf:
# Training RMSE for each base learner
baselearner_best_rmse_train = min([perf.train.rmse() for perf in base_perfs.values()])
stack_rmse_train = perf_se.train.rmse()
print("Best Base-learner Training RMSE: {}".format(baselearner_best_rmse_train))
print("Ensemble Training RMSE: {}".format(stack_rmse_train))
assert_warn(stack_rmse_train < baselearner_best_rmse_train,
"expected SE training RMSE would be smaller than the best of base learner training RMSE, but obtained: " \
"RMSE (SE) = {}, RMSE (best base learner) = {}".format(stack_rmse_train, baselearner_best_rmse_train))
# Test RMSE for each base learner
baselearner_best_rmse_test = min([perf.test.rmse() for perf in base_perfs.values()])
stack_rmse_test = perf_se.test.rmse()
print("Best Base-learner Test RMSE: {}".format(baselearner_best_rmse_test))
print("Ensemble Test RMSE: {}".format(stack_rmse_test))
assert_warn(stack_rmse_test < baselearner_best_rmse_test,
"expected SE test RMSE would be smaller than the best of base learner test RMSE, but obtained: " \
"RMSE (SE) = {}, RMSE (best base learner) = {}".format(stack_rmse_test, baselearner_best_rmse_test))
def test_validation_frame_produces_same_metric_as_perf_test():
ds = prepare_data(blending)
models = train_base_models(ds)
se = train_stacked_ensemble(ds, models, validation_frame=ds.test)
se_perf = se.model_performance(test_data=ds.test)
se_perf_validation_frame = se.model_performance(valid=True)
# since the metrics object is not exactly the same, we can just test that RSME is the same
assert se_perf.rmse() == se_perf_validation_frame.rmse(), \
"expected SE test RMSE to be the same as SE validation frame RMSE, but obtained: " \
"RMSE (perf on test) = {}, RMSE (test passed as validation frame) = {}".format(se_perf.rmse(), se_perf_validation_frame.rmse())
return [pu.tag_test(test, 'blending' if blending else None) for test in [
test_predict_on_se_model,
test_se_performance_is_better_than_individual_models,
test_validation_frame_produces_same_metric_as_perf_test
]]
pu.run_tests([
test_suite_stackedensemble_gaussian(),
test_suite_stackedensemble_gaussian(blending=True)
])
| apache-2.0 | 1,637,608,106,587,428,600 | 42.139073 | 148 | 0.603623 | false |
tthtlc/volatility | volatility/plugins/mac/mount.py | 45 | 1762 | # Volatility
# Copyright (C) 2007-2013 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: Andrew Case
@license: GNU General Public License 2.0
@contact: [email protected]
@organization:
"""
import volatility.obj as obj
import volatility.plugins.mac.common as common
class mac_mount(common.AbstractMacCommand):
""" Prints mounted device information """
def calculate(self):
common.set_plugin_members(self)
mountlist_addr = self.addr_space.profile.get_symbol("_mountlist")
mount = obj.Object("mount", offset = mountlist_addr, vm = self.addr_space)
mount = mount.mnt_list.tqe_next
while mount:
yield mount
mount = mount.mnt_list.tqe_next
def render_text(self, outfd, data):
self.table_header(outfd, [("Device", "30"), ("Mount Point", "60"), ("Type", "")])
for mount in data:
self.table_row(outfd,
mount.mnt_vfsstat.f_mntonname,
mount.mnt_vfsstat.f_mntfromname,
mount.mnt_vfsstat.f_fstypename)
| gpl-2.0 | -4,060,370,142,997,711,000 | 34.24 | 89 | 0.659478 | false |
watonyweng/nova | nova/tests/functional/v3/test_networks_associate.py | 3 | 3375 | # Copyright 2012 Nebula, Inc.
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from nova.network import api as network_api
from nova.tests.functional.v3 import api_sample_base
CONF = cfg.CONF
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.legacy_v2.extensions')
class NetworksAssociateJsonTests(api_sample_base.ApiSampleTestBaseV3):
ADMIN_API = True
extension_name = "os-networks-associate"
extra_extensions_to_load = ["os-networks"]
# TODO(gmann): Overriding '_api_version' till all functional tests
# are merged between v2 and v2.1. After that base class variable
# itself can be changed to 'v2'
_api_version = 'v2'
_sentinel = object()
def _get_flags(self):
f = super(NetworksAssociateJsonTests, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
# Networks_associate requires Networks to be update
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.os_networks.Os_networks')
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.networks_associate.'
'Networks_associate')
return f
def setUp(self):
super(NetworksAssociateJsonTests, self).setUp()
def fake_associate(self, context, network_id,
host=NetworksAssociateJsonTests._sentinel,
project=NetworksAssociateJsonTests._sentinel):
return True
self.stubs.Set(network_api.API, "associate", fake_associate)
def test_disassociate(self):
response = self._do_post('os-networks/1/action',
'network-disassociate-req',
{})
self.assertEqual(response.status_code, 202)
self.assertEqual(response.content, "")
def test_disassociate_host(self):
response = self._do_post('os-networks/1/action',
'network-disassociate-host-req',
{})
self.assertEqual(response.status_code, 202)
self.assertEqual(response.content, "")
def test_disassociate_project(self):
response = self._do_post('os-networks/1/action',
'network-disassociate-project-req',
{})
self.assertEqual(response.status_code, 202)
self.assertEqual(response.content, "")
def test_associate_host(self):
response = self._do_post('os-networks/1/action',
'network-associate-host-req',
{"host": "testHost"})
self.assertEqual(response.status_code, 202)
self.assertEqual(response.content, "")
| apache-2.0 | 2,519,083,785,427,579,400 | 39.178571 | 78 | 0.623407 | false |
alanudg/SmartCheckIn | app/models/Lugar.py | 2 | 1549 | from app.config import db_sql as db
from app.utils.key_utils import generate_key
from geoalchemy2.types import Geometry
class Lugar(db.Model):
id = db.Column(db.Integer, primary_key=True)
nombre = db.Column(db.String(20), nullable=False, unique=True)
asignacion_automatica = db.Column(db.Boolean, nullable=False,
default=False)
coordenadas = db.Column(Geometry("POLYGON"))
key = db.Column(db.String(15), default=generate_key)
hora_apertura = db.Column(db.Time)
hora_cierre = db.Column(db.Time)
privado = db.Column(db.Boolean, nullable=False, default=False)
id_lugar_padre = db.Column(db.Integer, db.ForeignKey('lugar.id'),
nullable=True)
computadoras = db.relationship('Computadora', backref='Lugar', lazy=True)
lugar_padre = db.relationship('Lugar', remote_side=[id])
registros = db.relationship('Registro', backref='Lugar', lazy=True)
usuarios = db.relationship("Usuario",
secondary='lugares_usuarios')
def __unicode__(self):
return self.nombre
def __hash__(self):
return hash(self.nombre)
@property
def serialize(self):
return {
'id': self.id,
'nombre': self.nombre,
'asignacion_automatica': self.asignacion_automatica,
'coordenadas': str(self.coordenadas),
'hora_apertura': self.hora_apertura,
'hora_cierre': self.hora_cierre,
'privado': self.privado,
}
| mit | 6,522,296,970,790,014,000 | 35.880952 | 77 | 0.613299 | false |
google/skia | tools/skpbench/_hardware_pixel.py | 12 | 3172 | # Copyright 2017 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from _hardware import Expectation
from _hardware_android import HardwareAndroid
CPU_CLOCK_RATE = 1670400
GPU_CLOCK_RATE = 315000000
class HardwarePixel(HardwareAndroid):
def __init__(self, adb):
HardwareAndroid.__init__(self, adb)
def __enter__(self):
HardwareAndroid.__enter__(self)
if not self._adb.is_root():
return self
self._adb.shell('\n'.join([
# enable and lock the two fast cores.
'''
stop thermal-engine
stop perfd
for N in 3 2; do
echo 1 > /sys/devices/system/cpu/cpu$N/online
echo userspace > /sys/devices/system/cpu/cpu$N/cpufreq/scaling_governor
echo %i > /sys/devices/system/cpu/cpu$N/cpufreq/scaling_max_freq
echo %i > /sys/devices/system/cpu/cpu$N/cpufreq/scaling_min_freq
echo %i > /sys/devices/system/cpu/cpu$N/cpufreq/scaling_setspeed
done''' % tuple(CPU_CLOCK_RATE for _ in range(3)),
# turn off the two slow cores
'''
for N in 1 0; do
echo 0 > /sys/devices/system/cpu/cpu$N/online
done''',
# pylint: disable=line-too-long
# Set GPU bus and idle timer
# Set DDR frequency to max
# Set GPU to performance mode, 315 MHZ
# See https://android.googlesource.com/platform/frameworks/base/+/master/libs/hwui/tests/scripts/prep_marlfish.sh
'''
echo 0 > /sys/class/kgsl/kgsl-3d0/bus_split
echo 1 > /sys/class/kgsl/kgsl-3d0/force_clk_on
echo 10000 > /sys/class/kgsl/kgsl-3d0/idle_timer
echo 13763 > /sys/class/devfreq/soc:qcom,gpubw/min_freq
echo performance > /sys/class/kgsl/kgsl-3d0/devfreq/governor
echo %i > /sys/class/kgsl/kgsl-3d0/devfreq/max_freq
echo %i > /sys/class/kgsl/kgsl-3d0/devfreq/min_freq
echo 4 > /sys/class/kgsl/kgsl-3d0/max_pwrlevel
echo 4 > /sys/class/kgsl/kgsl-3d0/min_pwrlevel''' %
tuple(GPU_CLOCK_RATE for _ in range(2))]))
return self
def sanity_check(self):
HardwareAndroid.sanity_check(self)
if not self._adb.is_root():
return
result = self._adb.check(' '.join(
['cat',
'/sys/class/power_supply/battery/capacity',
'/sys/devices/system/cpu/online'] + \
['/sys/devices/system/cpu/cpu%i/cpufreq/scaling_cur_freq' % i
for i in range(2, 4)] + \
['/sys/kernel/debug/clk/bimc_clk/measure',
'/sys/class/thermal/thermal_zone22/temp',
'/sys/class/thermal/thermal_zone23/temp']))
expectations = \
[Expectation(int, min_value=30, name='battery', sleeptime=30*60),
Expectation(str, exact_value='2-3', name='online cpus')] + \
[Expectation(int, exact_value=CPU_CLOCK_RATE, name='cpu_%i clock rate' %i)
for i in range(2, 4)] + \
[Expectation(long, min_value=902390000, max_value=902409999,
name='measured ddr clock', sleeptime=10),
Expectation(int, max_value=41000, name='pm8994_tz temperature'),
Expectation(int, max_value=40, name='msm_therm temperature')]
Expectation.check_all(expectations, result.splitlines())
| bsd-3-clause | 4,039,787,392,599,664,600 | 34.244444 | 119 | 0.640921 | false |
NaohiroTamura/ironic | ironic/tests/unit/objects/test_fields.py | 6 | 5020 | # Copyright 2015 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
import inspect
from ironic.common import exception
from ironic.objects import fields
from ironic.tests import base as test_base
class TestMacAddressField(test_base.TestCase):
def setUp(self):
super(TestMacAddressField, self).setUp()
self.field = fields.MACAddressField()
def test_coerce(self):
values = {'aa:bb:cc:dd:ee:ff': 'aa:bb:cc:dd:ee:ff',
'AA:BB:CC:DD:EE:FF': 'aa:bb:cc:dd:ee:ff',
'AA:bb:cc:11:22:33': 'aa:bb:cc:11:22:33'}
for k in values:
self.assertEqual(values[k], self.field.coerce('obj', 'attr', k))
def test_coerce_bad_values(self):
for v in ('invalid-mac', 'aa-bb-cc-dd-ee-ff'):
self.assertRaises(exception.InvalidMAC,
self.field.coerce, 'obj', 'attr', v)
class TestFlexibleDictField(test_base.TestCase):
def setUp(self):
super(TestFlexibleDictField, self).setUp()
self.field = fields.FlexibleDictField()
def test_coerce(self):
d = {'foo_1': 'bar', 'foo_2': 2, 'foo_3': [], 'foo_4': {}}
self.assertEqual(d, self.field.coerce('obj', 'attr', d))
self.assertEqual({'foo': 'bar'},
self.field.coerce('obj', 'attr', '{"foo": "bar"}'))
def test_coerce_bad_values(self):
self.assertRaises(TypeError, self.field.coerce, 'obj', 'attr', 123)
self.assertRaises(TypeError, self.field.coerce, 'obj', 'attr', True)
def test_coerce_nullable_translation(self):
# non-nullable
self.assertRaises(ValueError, self.field.coerce, 'obj', 'attr', None)
# nullable
self.field = fields.FlexibleDictField(nullable=True)
self.assertEqual({}, self.field.coerce('obj', 'attr', None))
class TestStringFieldThatAcceptsCallable(test_base.TestCase):
def setUp(self):
super(TestStringFieldThatAcceptsCallable, self).setUp()
def test_default_function():
return "default value"
self.test_default_function_hash = hashlib.md5(
inspect.getsource(test_default_function).encode()).hexdigest()
self.field = fields.StringFieldThatAcceptsCallable(
default=test_default_function)
def test_coerce_string(self):
self.assertEqual("value", self.field.coerce('obj', 'attr', "value"))
def test_coerce_function(self):
def test_function():
return "value"
self.assertEqual("value",
self.field.coerce('obj', 'attr', test_function))
def test_coerce_invalid_type(self):
self.assertRaises(ValueError, self.field.coerce,
'obj', 'attr', ('invalid', 'tuple'))
def test_coerce_function_invalid_type(self):
def test_function():
return ('invalid', 'tuple',)
self.assertRaises(ValueError,
self.field.coerce, 'obj', 'attr', test_function)
def test_coerce_default_as_function(self):
self.assertEqual("default value",
self.field.coerce('obj', 'attr', None))
def test__repr__includes_default_function_name_and_source_hash(self):
expected = ('StringAcceptsCallable(default=test_default_function-%s,'
'nullable=False)' % self.test_default_function_hash)
self.assertEqual(expected, repr(self.field))
class TestNotificationLevelField(test_base.TestCase):
def setUp(self):
super(TestNotificationLevelField, self).setUp()
self.field = fields.NotificationLevelField()
def test_coerce_good_value(self):
self.assertEqual(fields.NotificationLevel.WARNING,
self.field.coerce('obj', 'attr', 'warning'))
def test_coerce_bad_value(self):
self.assertRaises(ValueError, self.field.coerce, 'obj', 'attr',
'not_a_priority')
class TestNotificationStatusField(test_base.TestCase):
def setUp(self):
super(TestNotificationStatusField, self).setUp()
self.field = fields.NotificationStatusField()
def test_coerce_good_value(self):
self.assertEqual(fields.NotificationStatus.START,
self.field.coerce('obj', 'attr', 'start'))
def test_coerce_bad_value(self):
self.assertRaises(ValueError, self.field.coerce, 'obj', 'attr',
'not_a_priority')
| apache-2.0 | -2,773,689,864,405,893,000 | 35.642336 | 78 | 0.625896 | false |
yaroslav-tarasov/avango | avango-skelanim/python/__init__.py | 3 | 1696 | # -*- Mode:Python -*-
##########################################################################
# #
# This file is part of AVANGO. #
# #
# Copyright 1997 - 2008 Fraunhofer-Gesellschaft zur Foerderung der #
# angewandten Forschung (FhG), Munich, Germany. #
# #
# AVANGO is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Lesser General Public License as #
# published by the Free Software Foundation, version 3. #
# #
# AVANGO is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU Lesser General Public #
# License along with AVANGO. If not, see <http://www.gnu.org/licenses/>. #
# #
##########################################################################
from ._skelanim import *
import avango.nodefactory
# nodes = avango.nodefactory.NodeFactory(module=__name__)
nodes = avango.nodefactory.NodeFactory('av::gua::skelanim::')
| lgpl-3.0 | -359,051,954,670,236,900 | 59.571429 | 74 | 0.40625 | false |
aquamatt/Peloton | src/peloton/utils/tests/testConfig.py | 1 | 3102 | # $Id: testConfig.py 106 2008-04-04 10:47:50Z mp $
#
# Copyright (c) 2007-2008 ReThought Limited and Peloton Contributors
# All Rights Reserved
# See LICENSE for details
""" Test the peloton.utils.config code """
from unittest import TestCase
from peloton.utils.config import findTemplateTargetsFor
from peloton.utils.config import PelotonSettings
from peloton.utils.structs import FilteredOptionParser
import os
class Test_templateTools(TestCase):
def setUp(self):
def touch(root, file):
o = open("%s/%s" % (root, file), 'wt')
o.write("hello")
o.close()
cwd = os.getcwd()
root = "%s/resource/templates/MyService" % cwd
os.makedirs(root)
for i in ['m1.xml.genshi','m1.html.genshi','m1.rss.genshi']:
touch(root, i)
for i in ['m2.xml.genshi','m2.html.genshi']:
touch(root, i)
def tearDown(self):
cwd = os.getcwd()
root = "%s/resource/templates/MyService" % cwd
for i in os.listdir(root):
os.unlink("%s/%s" % (root, i) )
os.removedirs(root)
def test_findTemplateTargetsFor(self):
cwd = os.getcwd()
templates = findTemplateTargetsFor(cwd+'/resource','MyService', 'm1')
self.assertEquals(len(templates), 3)
targets = [i[0] for i in templates]
self.assertTrue('xml' in targets)
self.assertTrue('html' in targets)
self.assertTrue('rss' in targets)
templates = findTemplateTargetsFor(cwd+'/resource', 'MyService', 'm2')
targets = [i[0] for i in templates]
self.assertEquals(len(templates), 2)
self.assertTrue('xml' in targets)
self.assertTrue('html' in targets)
class Test_pelotonSettings(TestCase):
def setUp(self):
fdir = os.path.split(__file__)[0]+'/testConfigs'
self.config = \
PelotonSettings(initFile=os.path.abspath(fdir+'/example_conf.pcfg'))
def test_values(self):
self.assertEquals(self.config['a'], 10)
self.assertEquals(self.config['c']['value'],'mango')
def test_repr(self):
newconfig = eval(repr(self.config))
self.assertEquals(newconfig['a'], 10)
self.assertEquals(newconfig['c']['value'],'mango')
def test_attrToItem(self):
self.assertEquals(self.config.a, 10)
self.assertEquals(self.config.c.value,'mango')
def test_assignment(self):
self.config.c['index_value'] = 'index'
self.config.c.attr_value = 'attr' # want this to go as index
self.assertEquals(self.config.c.index_value, 'index')
self.assertEquals(self.config.c.attr_value, 'attr')
self.assertEquals(self.config.c['index_value'], 'index')
self.assertEquals(self.config.c['attr_value'], 'attr')
self.config['d'] = PelotonSettings()
self.config['d'].name='hello'
self.assertEquals(self.config.d.name, 'hello')
self.assertEquals(self.config['d'].name, 'hello')
self.assertEquals(self.config['d']['name'], 'hello')
| bsd-3-clause | 444,120,789,223,818,200 | 36.385542 | 80 | 0.610574 | false |
ArcherSys/ArcherSys | Lib/lib2to3/fixes/fix_nonzero.py | 1 | 1931 | <<<<<<< HEAD
<<<<<<< HEAD
"""Fixer for __nonzero__ -> __bool__ methods."""
# Author: Collin Winter
# Local imports
from .. import fixer_base
from ..fixer_util import Name, syms
class FixNonzero(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
classdef< 'class' any+ ':'
suite< any*
funcdef< 'def' name='__nonzero__'
parameters< '(' NAME ')' > any+ >
any* > >
"""
def transform(self, node, results):
name = results["name"]
new = Name("__bool__", prefix=name.prefix)
name.replace(new)
=======
"""Fixer for __nonzero__ -> __bool__ methods."""
# Author: Collin Winter
# Local imports
from .. import fixer_base
from ..fixer_util import Name, syms
class FixNonzero(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
classdef< 'class' any+ ':'
suite< any*
funcdef< 'def' name='__nonzero__'
parameters< '(' NAME ')' > any+ >
any* > >
"""
def transform(self, node, results):
name = results["name"]
new = Name("__bool__", prefix=name.prefix)
name.replace(new)
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
"""Fixer for __nonzero__ -> __bool__ methods."""
# Author: Collin Winter
# Local imports
from .. import fixer_base
from ..fixer_util import Name, syms
class FixNonzero(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
classdef< 'class' any+ ':'
suite< any*
funcdef< 'def' name='__nonzero__'
parameters< '(' NAME ')' > any+ >
any* > >
"""
def transform(self, node, results):
name = results["name"]
new = Name("__bool__", prefix=name.prefix)
name.replace(new)
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
| mit | 4,875,776,425,556,398,000 | 26.985507 | 63 | 0.517866 | false |
mKeRix/home-assistant | homeassistant/components/trafikverket_train/sensor.py | 19 | 6918 | """Train information for departures and delays, provided by Trafikverket."""
from datetime import date, datetime, timedelta
import logging
from pytrafikverket import TrafikverketTrain
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_API_KEY,
CONF_NAME,
CONF_WEEKDAY,
DEVICE_CLASS_TIMESTAMP,
WEEKDAYS,
)
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
CONF_TRAINS = "trains"
CONF_FROM = "from"
CONF_TO = "to"
CONF_TIME = "time"
ATTR_DEPARTURE_STATE = "departure_state"
ATTR_CANCELED = "canceled"
ATTR_DELAY_TIME = "number_of_minutes_delayed"
ATTR_PLANNED_TIME = "planned_time"
ATTR_ESTIMATED_TIME = "estimated_time"
ATTR_ACTUAL_TIME = "actual_time"
ATTR_OTHER_INFORMATION = "other_information"
ATTR_DEVIATIONS = "deviations"
ICON = "mdi:train"
SCAN_INTERVAL = timedelta(minutes=5)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_TRAINS): [
{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_TO): cv.string,
vol.Required(CONF_FROM): cv.string,
vol.Optional(CONF_TIME): cv.time,
vol.Optional(CONF_WEEKDAY, default=WEEKDAYS): vol.All(
cv.ensure_list, [vol.In(WEEKDAYS)]
),
}
],
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the departure sensor."""
httpsession = async_get_clientsession(hass)
train_api = TrafikverketTrain(httpsession, config[CONF_API_KEY])
sensors = []
station_cache = {}
for train in config[CONF_TRAINS]:
try:
trainstops = [train[CONF_FROM], train[CONF_TO]]
for station in trainstops:
if station not in station_cache:
station_cache[station] = await train_api.async_get_train_station(
station
)
except ValueError as station_error:
if "Invalid authentication" in station_error.args[0]:
_LOGGER.error("Unable to set up up component: %s", station_error)
return
_LOGGER.error(
"Problem when trying station %s to %s. Error: %s ",
train[CONF_FROM],
train[CONF_TO],
station_error,
)
continue
sensor = TrainSensor(
train_api,
train[CONF_NAME],
station_cache[train[CONF_FROM]],
station_cache[train[CONF_TO]],
train[CONF_WEEKDAY],
train.get(CONF_TIME),
)
sensors.append(sensor)
async_add_entities(sensors, update_before_add=True)
def next_weekday(fromdate, weekday):
"""Return the date of the next time a specific weekday happen."""
days_ahead = weekday - fromdate.weekday()
if days_ahead <= 0:
days_ahead += 7
return fromdate + timedelta(days_ahead)
def next_departuredate(departure):
"""Calculate the next departuredate from an array input of short days."""
today_date = date.today()
today_weekday = date.weekday(today_date)
if WEEKDAYS[today_weekday] in departure:
return today_date
for day in departure:
next_departure = WEEKDAYS.index(day)
if next_departure > today_weekday:
return next_weekday(today_date, next_departure)
return next_weekday(today_date, WEEKDAYS.index(departure[0]))
class TrainSensor(Entity):
"""Contains data about a train depature."""
def __init__(self, train_api, name, from_station, to_station, weekday, time):
"""Initialize the sensor."""
self._train_api = train_api
self._name = name
self._from_station = from_station
self._to_station = to_station
self._weekday = weekday
self._time = time
self._state = None
self._departure_state = None
self._delay_in_minutes = None
async def async_update(self):
"""Retrieve latest state."""
if self._time is not None:
departure_day = next_departuredate(self._weekday)
when = datetime.combine(departure_day, self._time)
try:
self._state = await self._train_api.async_get_train_stop(
self._from_station, self._to_station, when
)
except ValueError as output_error:
_LOGGER.error(
"Departure %s encountered a problem: %s", when, output_error
)
else:
when = datetime.now()
self._state = await self._train_api.async_get_next_train_stop(
self._from_station, self._to_station, when
)
self._departure_state = self._state.get_state().name
self._delay_in_minutes = self._state.get_delay_time()
@property
def device_state_attributes(self):
"""Return the state attributes."""
if self._state is None:
return None
state = self._state
other_information = None
if state.other_information is not None:
other_information = ", ".join(state.other_information)
deviations = None
if state.deviations is not None:
deviations = ", ".join(state.deviations)
if self._delay_in_minutes is not None:
self._delay_in_minutes = self._delay_in_minutes.total_seconds() / 60
return {
ATTR_DEPARTURE_STATE: self._departure_state,
ATTR_CANCELED: state.canceled,
ATTR_DELAY_TIME: self._delay_in_minutes,
ATTR_PLANNED_TIME: state.advertised_time_at_location,
ATTR_ESTIMATED_TIME: state.estimated_time_at_location,
ATTR_ACTUAL_TIME: state.time_at_location,
ATTR_OTHER_INFORMATION: other_information,
ATTR_DEVIATIONS: deviations,
}
@property
def device_class(self):
"""Return the device class."""
return DEVICE_CLASS_TIMESTAMP
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
"""Return the icon for the frontend."""
return ICON
@property
def state(self):
"""Return the departure state."""
state = self._state
if state is not None:
if state.time_at_location is not None:
return state.time_at_location
if state.estimated_time_at_location is not None:
return state.estimated_time_at_location
return state.advertised_time_at_location
return None
| mit | -4,189,729,032,214,131,700 | 32.746341 | 86 | 0.600752 | false |
meteorcloudy/tensorflow | tensorflow/contrib/autograph/operators/control_flow_test.py | 14 | 3204 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for control_flow module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.autograph.operators import control_flow
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class ForLoopTest(test.TestCase):
def test_tensor(self):
s = control_flow.for_stmt(
constant_op.constant([1, 2, 3, 4]),
extra_test=lambda s: True,
body=lambda i, s: (s + i,),
init_state=(0,))
with self.test_session() as sess:
self.assertEqual((10,), sess.run(s))
def test_python(self):
s = control_flow.for_stmt(
range(5),
extra_test=lambda s: True,
body=lambda i, s: (s + i,),
init_state=(0,))
self.assertEqual(10, s)
def test_dataset(self):
to_int32 = lambda i: math_ops.cast(i, dtypes.int32)
s = control_flow.for_stmt(
dataset_ops.Dataset.range(5).map(to_int32),
extra_test=lambda s: True,
body=lambda i, s: (s + i,),
init_state=(0,))
with self.test_session() as sess:
self.assertEqual((10,), sess.run(s))
class WhileLoopTest(test.TestCase):
def test_tensor(self):
n = constant_op.constant(5)
results = control_flow.while_stmt(
test=lambda i, s: i < n,
body=lambda i, s: (i + 1, s + i,),
init_state=(0, 0),
extra_deps=(n,))
with self.test_session() as sess:
self.assertEqual((5, 10), sess.run(results))
def test_python(self):
n = 5
results = control_flow.while_stmt(
test=lambda i, s: i < n,
body=lambda i, s: (i + 1, s + i),
init_state=(0, 0),
extra_deps=(n,))
self.assertEqual((5, 10), results)
class IfStmtTest(test.TestCase):
def test_tensor(self):
def test_if_stmt(cond):
return control_flow.if_stmt(
cond=cond,
body=lambda: 1,
orelse=lambda: -1)
with self.test_session() as sess:
self.assertEqual(1, sess.run(test_if_stmt(constant_op.constant(True))))
self.assertEqual(-1, sess.run(test_if_stmt(constant_op.constant(False))))
def test_python(self):
self.assertEqual(1, control_flow.if_stmt(True, lambda: 1, lambda: -1))
self.assertEqual(-1, control_flow.if_stmt(False, lambda: 1, lambda: -1))
if __name__ == '__main__':
test.main()
| apache-2.0 | 8,365,276,159,819,342,000 | 31.363636 | 80 | 0.634207 | false |
minorua/QGIS | tests/src/python/test_processing_alg_decorator.py | 23 | 5963 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for the @alg processing algorithm.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nathan Woodrow'
__date__ = '10.12.2018'
__copyright__ = 'Copyright 2018, The QGIS Project'
import sys
import os
import qgis # NOQA
from qgis.testing import unittest, start_app
from qgis.processing import alg
from qgis.core import QgsSettings
from qgis.PyQt.QtCore import QCoreApplication
start_app()
ARGNAME = "TEST_ALG{0}"
HELPSTRING = "TEST_HELP STRING{0}"
def define_new_no_inputs(newid=1):
@alg(name="noinputs", label=alg.tr("Test func"), group="unittest",
group_label=alg.tr("Test label"))
@alg.output(type=str, name="DISTANCE_OUT", label="Distance out")
def testalg(instance, parameters, context, feedback, inputs):
"""
Test doc string text
"""
def define_new_no_outputs_but_sink_instead(newid=1):
@alg(name=ARGNAME.format(newid), label=alg.tr("Test func"), group="unittest",
group_label=alg.tr("Test label"))
@alg.help(HELPSTRING.format(newid))
@alg.input(type=alg.SOURCE, name="INPUT", label="Input layer")
@alg.input(type=alg.DISTANCE, name="DISTANCE", label="Distance", default=30)
@alg.input(type=alg.SINK, name="SINK", label="Output layer")
def testalg(instance, parameters, context, feedback, inputs):
"""
Given a distance will split a line layer into segments of the distance
"""
def define_new_doc_string(newid=1):
@alg(name=ARGNAME.format(newid), label=alg.tr("Test func"), group="unittest",
group_label=alg.tr("Test label"))
@alg.input(type=alg.SOURCE, name="INPUT", label="Input layer")
@alg.output(type=str, name="DISTANCE_OUT", label="Distance out")
def testalg(instance, parameters, context, feedback, inputs):
"""
Test doc string text
"""
def define_new(newid=1):
@alg(name=ARGNAME.format(newid), label=alg.tr("Test func"), group="unittest",
group_label=alg.tr("Test label"))
@alg.help(HELPSTRING.format(newid))
@alg.input(type=alg.SOURCE, name="INPUT", label="Input layer")
@alg.input(type=alg.DISTANCE, name="DISTANCE", label="Distance", default=30)
@alg.input(type=alg.SINK, name="SINK", label="Output layer")
@alg.output(type=str, name="DISTANCE_OUT", label="Distance out")
def testalg(instance, parameters, context, feedback, inputs):
"""
Given a distance will split a line layer into segments of the distance
"""
def cleanup():
alg.instances.clear()
class AlgNoInputs(unittest.TestCase):
def setUp(self):
cleanup()
def test_can_have_no_inputs(self):
define_new_no_inputs()
class AlgNoOutputsButSinkInstead(unittest.TestCase):
def setUp(self):
cleanup()
def test_can_have_no_outputs_if_there_is_destination(self):
define_new_no_outputs_but_sink_instead()
class AlgInstanceTests(unittest.TestCase):
"""
Tests to check the createInstance method will work as expected.
"""
def setUp(self):
cleanup()
define_new()
self.current = alg.instances.pop().createInstance()
def test_correct_number_of_inputs_and_outputs(self):
self.assertEqual(3, len(self.current.inputs))
self.assertEqual(1, len(self.current.outputs))
def test_correct_number_of_inputs_and_outputs_after_init(self):
self.current.initAlgorithm()
defs = self.current.parameterDefinitions()
self.assertEqual(3, len(defs))
inputs = [
("INPUT", "Input layer"),
("DISTANCE", "Distance"),
("SINK", "Output layer"),
]
for count, data in enumerate(inputs):
parmdef = defs[count]
self.assertEqual(data[0], parmdef.name())
self.assertEqual(data[1], parmdef.description())
def test_func_is_set(self):
self.assertIsNotNone(self.current._func)
def test_has_help_from_help_decorator(self):
self.assertEqual(HELPSTRING.format(1), self.current.shortHelpString())
def test_name_and_label(self):
self.assertEqual(ARGNAME.format(1), self.current.name())
self.assertEqual("Test func", self.current.displayName())
def test_group(self):
self.assertEqual("Test label", self.current.group())
self.assertEqual("unittest", self.current.groupId())
class AlgHelpTests(unittest.TestCase):
def test_has_help_from_help_decorator(self):
cleanup()
define_new()
current = alg.instances.pop()
self.assertEqual(HELPSTRING.format(1), current.shortHelpString())
def test_has_help_from_docstring(self):
define_new_doc_string()
current = alg.instances.pop()
self.assertEqual("Test doc string text", current.shortHelpString())
class TestAlg(unittest.TestCase):
def setUp(self):
cleanup()
define_new()
def test_correct_number_of_inputs_and_outputs(self):
current = alg.instances.pop()
self.assertEqual(3, len(current.inputs))
self.assertEqual(1, len(current.outputs))
self.assertTrue(current.has_inputs)
self.assertTrue(current.has_outputs)
def test_correct_number_defined_in_stack_before_and_after(self):
self.assertEqual(1, len(alg.instances))
alg.instances.pop()
self.assertEqual(0, len(alg.instances))
def test_current_has_correct_name(self):
alg.instances.pop()
for i in range(3):
define_new(i)
self.assertEqual(3, len(alg.instances))
for i in range(3, 1):
current = alg.instances.pop()
self.assertEqual(ARGNAME.format(i), current.name())
if __name__ == "__main__":
unittest.main()
| gpl-2.0 | -2,386,405,209,042,353,000 | 31.232432 | 81 | 0.650176 | false |
jean/python-docx | docx/section.py | 12 | 5424 | # encoding: utf-8
"""
The |Section| object and related proxy classes.
"""
from __future__ import absolute_import, print_function, unicode_literals
from collections import Sequence
class Sections(Sequence):
"""
Sequence of |Section| objects corresponding to the sections in the
document. Supports ``len()``, iteration, and indexed access.
"""
def __init__(self, document_elm):
super(Sections, self).__init__()
self._document_elm = document_elm
def __getitem__(self, key):
if isinstance(key, slice):
sectPr_lst = self._document_elm.sectPr_lst[key]
return [Section(sectPr) for sectPr in sectPr_lst]
sectPr = self._document_elm.sectPr_lst[key]
return Section(sectPr)
def __iter__(self):
for sectPr in self._document_elm.sectPr_lst:
yield Section(sectPr)
def __len__(self):
return len(self._document_elm.sectPr_lst)
class Section(object):
"""
Document section, providing access to section and page setup settings.
"""
def __init__(self, sectPr):
super(Section, self).__init__()
self._sectPr = sectPr
@property
def bottom_margin(self):
"""
|Length| object representing the bottom margin for all pages in this
section in English Metric Units.
"""
return self._sectPr.bottom_margin
@bottom_margin.setter
def bottom_margin(self, value):
self._sectPr.bottom_margin = value
@property
def footer_distance(self):
"""
|Length| object representing the distance from the bottom edge of the
page to the bottom edge of the footer. |None| if no setting is present
in the XML.
"""
return self._sectPr.footer
@footer_distance.setter
def footer_distance(self, value):
self._sectPr.footer = value
@property
def gutter(self):
"""
|Length| object representing the page gutter size in English Metric
Units for all pages in this section. The page gutter is extra spacing
added to the *inner* margin to ensure even margins after page
binding.
"""
return self._sectPr.gutter
@gutter.setter
def gutter(self, value):
self._sectPr.gutter = value
@property
def header_distance(self):
"""
|Length| object representing the distance from the top edge of the
page to the top edge of the header. |None| if no setting is present
in the XML.
"""
return self._sectPr.header
@header_distance.setter
def header_distance(self, value):
self._sectPr.header = value
@property
def left_margin(self):
"""
|Length| object representing the left margin for all pages in this
section in English Metric Units.
"""
return self._sectPr.left_margin
@left_margin.setter
def left_margin(self, value):
self._sectPr.left_margin = value
@property
def orientation(self):
"""
Member of the :ref:`WdOrientation` enumeration specifying the page
orientation for this section, one of ``WD_ORIENT.PORTRAIT`` or
``WD_ORIENT.LANDSCAPE``.
"""
return self._sectPr.orientation
@orientation.setter
def orientation(self, value):
self._sectPr.orientation = value
@property
def page_height(self):
"""
Total page height used for this section, inclusive of all edge spacing
values such as margins. Page orientation is taken into account, so
for example, its expected value would be ``Inches(8.5)`` for
letter-sized paper when orientation is landscape.
"""
return self._sectPr.page_height
@page_height.setter
def page_height(self, value):
self._sectPr.page_height = value
@property
def page_width(self):
"""
Total page width used for this section, inclusive of all edge spacing
values such as margins. Page orientation is taken into account, so
for example, its expected value would be ``Inches(11)`` for
letter-sized paper when orientation is landscape.
"""
return self._sectPr.page_width
@page_width.setter
def page_width(self, value):
self._sectPr.page_width = value
@property
def right_margin(self):
"""
|Length| object representing the right margin for all pages in this
section in English Metric Units.
"""
return self._sectPr.right_margin
@right_margin.setter
def right_margin(self, value):
self._sectPr.right_margin = value
@property
def start_type(self):
"""
The member of the :ref:`WdSectionStart` enumeration corresponding to
the initial break behavior of this section, e.g.
``WD_SECTION.ODD_PAGE`` if the section should begin on the next odd
page.
"""
return self._sectPr.start_type
@start_type.setter
def start_type(self, value):
self._sectPr.start_type = value
@property
def top_margin(self):
"""
|Length| object representing the top margin for all pages in this
section in English Metric Units.
"""
return self._sectPr.top_margin
@top_margin.setter
def top_margin(self, value):
self._sectPr.top_margin = value
| mit | -9,065,056,653,536,695,000 | 28.318919 | 78 | 0.620391 | false |
misty-/addons | plugin.video.nratv/default.py | 1 | 4223 | import os
import sys
import xbmc,xbmcaddon
import xbmcplugin
import xbmcgui
import plugintools
addon01 = xbmcaddon.Addon('plugin.video.nratv')
addonname = addon01.getAddonInfo('name')
addon_id = 'plugin.video.nratv'
from addon.common.addon import Addon
addon = Addon(addon_id, sys.argv)
icon = addon01.getAddonInfo('icon') # icon.png in addon directory
fanart = addon01.getAddonInfo('fanart') # fanart.jpg in addon directory
# main menu
def CATEGORIES():
media_item_list('NRA TV Live Stream', 'https://stream1.nra.tv/nratv/ngrp:nratvall/chunklist_b2749440.m3u8','' , icon, fanart)
IDX_YOUTUBE1()
# Create content list
def addDir(name,url,mode,iconimage):
params = {'url':url, 'mode':mode, 'name':name}
addon.add_directory(params, {'title': str(name)}, img = iconimage, fanart = fanart)
# Youtube videos
def IDX_YOUTUBE1():
plugintools.log("nratv1.run")
# Get params
params = plugintools.get_params()
if params.get("action") is None:
main_list1(params)
else:
pass
plugintools.close_item_list()
# Youtube menu
def main_list1(params):
plugintools.log("nratv1.main_list "+repr(params))
plugintools.add_item(
#action="",
title="Youtube Search for 'National Rifle Association'",
url='plugin://plugin.video.youtube/search/?q=National Rifle Association',
thumbnail=icon,
fanart=fanart,
folder=True )
plugintools.add_item(
#action="",
title="NRA TV",
url="plugin://plugin.video.youtube/user/NRANews/",
thumbnail="https://yt3.ggpht.com/-F2_HD0G9laQ/AAAAAAAAAAI/AAAAAAAAAAA/EqzbJJh6MuU/s288-c-k-no-mo-rj-c0xffffff/photo.jpg",
fanart=fanart,
folder=True )
plugintools.add_item(
#action="",
title="NRA",
url="plugin://plugin.video.youtube/user/NRAVideos/",
thumbnail="http://www.southeastradio.ie/wp-content/uploads/2017/10/NRA.jpeg",
fanart=fanart,
folder=True )
plugintools.add_item(
#action="",
title="NRA Pubs",
url="plugin://plugin.video.youtube/user/NRApubs/",
thumbnail="https://yt3.ggpht.com/-K7UP-3Nvibs/AAAAAAAAAAI/AAAAAAAAAAA/XbY5XdSScPg/s288-c-k-no-mo-rj-c0xffffff/photo.jpg",
fanart=fanart,
folder=True )
plugintools.add_item(
#action="",
title="NRA National Firearms Museum",
url="plugin://plugin.video.youtube/user/NFMCurator/",
thumbnail="https://yt3.ggpht.com/-FQ_ClCpa64Q/AAAAAAAAAAI/AAAAAAAAAAA/Do1Cs4h29q8/s288-c-k-no-mo-rj-c0xffffff/photo.jpg",
fanart=fanart,
folder=True )
plugintools.add_item(
#action="",
title="NRA Women",
url="plugin://plugin.video.youtube/user/NRAWomen/",
thumbnail="https://yt3.ggpht.com/-GqGKJRTuZw4/AAAAAAAAAAI/AAAAAAAAAAA/QTfGMN93j0I/s288-c-k-no-mo-rj-c0xffffff/photo.jpg",
fanart=fanart,
folder=True )
plugintools.add_item(
#action="",
title="NRA Freestyle",
url="plugin://plugin.video.youtube/user/nrafreestyle/",
thumbnail="https://yt3.ggpht.com/-mx9RJ3bJfFQ/AAAAAAAAAAI/AAAAAAAAAAA/C7N8I66dj8k/s288-c-k-no-mo-rj-c0xffffff/photo.jpg",
fanart=fanart,
folder=True )
plugintools.add_item(
#action="",
title="NRA General Operations",
url="plugin://plugin.video.youtube/user/GOnraMedia/",
thumbnail="https://yt3.ggpht.com/-c0JMaaNvfWE/AAAAAAAAAAI/AAAAAAAAAAA/PAP-cswAjPw/s288-c-k-no-mo-rj-c0xffffff/photo.jpg",
fanart=fanart,
folder=True )
# Create media items list
def media_item_list(name,url,plot,img,fanart):
addon.add_video_item({'url': url}, {'title': name, 'plot': plot}, img = icon, fanart = fanart, playlist=False)
# Query play, mode, url and name
play = addon.queries.get('play', None)
mode = addon.queries['mode']
url = addon.queries.get('url', '')
name = addon.queries.get('name', '')
# Program flow control
if play:
addon.resolve_url(url.encode('UTF-8')) # <<< Play resolved media url
if mode=='main':
print ""
CATEGORIES()
elif mode=='youtube1':
print ""+url
IDX_YOUTUBE1()
if not play:
addon.end_of_directory() | gpl-3.0 | -2,627,433,504,491,137,000 | 30.522388 | 129 | 0.649065 | false |
jamesyli/solum | solum/tests/common/test_trace_data.py | 2 | 2892 | # Copyright 2014 - Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import solum
from solum.common import context
from solum.common import trace_data
from solum.tests import base
solum.TLS.trace = trace_data.TraceData()
# Just putting highly recognizable values in context
CONTEXT = context.RequestContext(
'_auth_token_', '_user_', '_tenant_', '_domain_', '_user_domain_',
'_project_domain_', '_is_admin_', '_read_only_', '_request_id_',
'_user_name_', '_roles_', '_auth_url_')
class TestTraceData(base.BaseTestCase):
"""Tests the TraceData class."""
def test_auto_clear(self):
"""auto_clear success and then a failure case."""
solum.TLS.trace.auto_clear = True
solum.TLS.trace.auto_clear = False
try:
solum.TLS.trace.auto_clear = 'fail'
except AssertionError:
pass
else:
self.assertTrue(False)
def test_import_context(self):
"""Test importing Oslo RequestContext."""
solum.TLS.trace.clear()
solum.TLS.trace.import_context(CONTEXT)
self.assertEqual(
solum.TLS.trace._user_data,
{'user': '_user_', 'tenant': '_tenant_'})
self.assertEqual(({
'domain': '_domain_',
'instance_uuid': None,
'is_admin': '_is_admin_',
'project_domain': '_project_domain_',
'read_only': '_read_only_',
'roles': '_roles_',
'show_deleted': False,
'user_domain': '_user_domain_',
'user_identity': '_user_ _tenant_ _domain_ '
'_user_domain_ _project_domain_',
'user_name': '_user_name_',
'auth_url': '_auth_url_'
}), solum.TLS.trace._support_data)
def test_info_commands(self):
"""Test trace setting functions."""
solum.TLS.trace.clear()
solum.TLS.trace.request_id = '98765'
solum.TLS.trace.user_info(ip_addr="1.2.3.4", user_id=12345)
solum.TLS.trace.support_info(confidential_data={"a": "b", "c": "d"})
self.assertEqual(
solum.TLS.trace._user_data,
{'ip_addr': '1.2.3.4', 'user_id': 12345})
self.assertEqual(
solum.TLS.trace._support_data,
{'confidential_data': {'a': 'b', 'c': 'd'}})
self.assertEqual(
solum.TLS.trace.request_id,
'98765')
| apache-2.0 | 7,002,892,351,298,845,000 | 36.076923 | 76 | 0.593015 | false |
enableiot/iotkit-samples | api/python/iotkit_client.py | 2 | 12878 | #!/usr/bin/env python
# Copyright (c) 2015, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###################################################################
# This program will:
# * Authenticate a user using existing credentials
# * Create a device
# * Activate the device (but currently does not persist the token)
# * Register 2 time series for the device - one for temperature and
# one for humidity (The component types for these are already
# defined in the account catalog)
# * Send observations for both time series. As configured it will
# send one per hour for the last 25 hours for each time series.
# * Read the observations back and display the count.
#
# To use:
# * On the web:
# * Go to https://dashboard.us.enableiot.com
# * Register - be sure to click the "Sign Up Here" link. Do not use
# * any of the OAuth options, or you will not be able to use the API.#
# * Verify your email address
# * Enter a name for your account
# * Below line 39 in this file:
# * Update your username, password and account_name
# * Update the proxy address if required
# * Update the device id below. The device id MUST be unique or
# the step to create the device will fail
# * Install the python "requests" library. You can use Python
# virtual environments, or install it globally:
# $ pip install requests
# * Run the program
# $ python iotkit_client.py
#
import sys
import requests
import json
import uuid
import time
import random
#####################################
# Set these values first
#####################################
host = "dashboard.us.enableiot.com"
proxies = {
# "https": "http://proxy.example.com:8080"
}
username = "[email protected]"
password = "myPassword"
account_name = "myAccountName"
#this will create a device with this id - error if it already exists
device_id = "57798f4b-2b1c-4cea-84f1-ac45bf6ae9a2"
# this will create {observations_per_hour} observations per hour for {days_of_data} days
observations_per_hour = 1
days_of_data = 1
verify = True # whether to verify certs
#####################################
api_root = "/v1/api"
base_url = "https://{0}{1}".format(host, api_root)
device_name = "Device-{0}".format(device_id)
g_user_token = ""
g_device_token = ""
def main():
global g_user_token, g_device_token
# get an authentication token for use in the following API calls.
# It will be put in every header by get_user_headers()
g_user_token = get_token(username, password)
# get my user_id (uid) within the Intel IoT Analytics Platform
uid = get_user_id()
print "UserId: {0}".format(uid)
# for all the accounts I have access to, find the first account
# with the name {account_name} and return the account_id (aid)
aid = get_account_id(uid, account_name)
print "AccountId: {0}".format(aid)
# create a new device within the account - error if a device with
# device_id already exists in the system even if it's in another account
create_device(aid, device_id, device_name)
# refresh the activation code. It can be used any number of times
# in the next 60 minutes to activate devices.
ac = generate_activation_code(aid)
print "Activation code: {0}".format(ac)
# activate the device. This returns an authentication token that the device
# can use to register time series and send observations. It will be put in
# every header for device calls by get_device_headers(). You MUST persist
# this is you want to send additional observations at a later time.
g_device_token = activate(aid, device_id, ac)
# this registers a time series for this device. The component will have a
# Component Type of "temperature.v1.0" which defines the data type of the
# value, the format, the unit of measure, etc. This way, we don't need to
# specify all of that here.
# Within the scope of the device, this time series will be named "temp".
# This call returns the component_id (cid) which is globally unique
# within the Intel IoT Analytics platform.
cid = create_component(aid, device_id, "temperature.v1.0", "temp")
print "ComponentID (cid): {0}".format(cid)
# create some random observations around 23 +/- 1 in the new {cid} time series.
create_observations(aid, device_id, cid, 23, 1)
# read back the observation we just created.
o = get_observations(aid, device_id, cid)
print_observation_counts(o)
# create a second time series for humidity.
cid2 = create_component(aid, device_id, "humidity.v1.0", "humidity")
print "ComponentID (cid): {0}".format(cid2)
# create some random observations around 21 +/- 1
create_observations(aid, device_id, cid2, 21, 1)
o2 = get_observations(aid, device_id, cid2)
print_observation_counts(o2)
def get_user_headers():
headers = {
'Authorization': 'Bearer ' + g_user_token,
'content-type': 'application/json'
}
#print "Headers = " + str(headers)
return headers
def get_device_headers():
headers = {
'Authorization': 'Bearer ' + g_device_token,
'content-type': 'application/json'
}
#print "Headers = " + str(headers)
return headers
def check(resp, code):
if resp.status_code != code:
print "Expected {0}. Got {1} {2}".format(code, resp.status_code, resp.text)
sys.exit(1)
# Given a username and password, get the user token
def get_token(username, password):
url = "{0}/auth/token".format(base_url)
headers = {'content-type': 'application/json'}
payload = {"username": username, "password": password}
data = json.dumps(payload)
resp = requests.post(url, data=data, headers=headers, proxies=proxies, verify=verify)
check(resp, 200)
js = resp.json()
token = js['token']
return token
# given a user token, get the user_id
def get_user_id():
url = "{0}/auth/tokenInfo".format(base_url)
resp = requests.get(url, headers=get_user_headers(), proxies=proxies, verify=verify)
check(resp, 200)
js = resp.json()
#print js
user_id = js["payload"]["sub"]
return user_id
# given a user_id, get the account_id of the associated account with account_name
# if there are multiple accounts with the same name, return one of them
def get_account_id(user_id, account_name):
url = "{0}/users/{1}".format(base_url, user_id)
resp = requests.get(url, headers=get_user_headers(), proxies=proxies, verify=verify)
check(resp, 200)
js = resp.json()
if 'accounts' in js:
accounts = js["accounts"]
for k, v in accounts.iteritems():
if 'name' in v and v["name"] == account_name:
return k
print "Account name {0} not found.".format(account_name)
print "Available accounts are: {0}".format([v["name"] for k, v in accounts.iteritems()])
return None
# create a device
def create_device(account, device_id, device_name):
url = "{0}/accounts/{1}/devices".format(base_url, account)
device = {
"deviceId": str(device_id),
"gatewayId": str(device_id),
"name": device_name,
"tags": ["US", "California", "San Francisco"],
# if the device will be static, use this
# to remember where you put it
#"loc": [37.783944, -122.401289, 17],
"attributes": {
"vendor": "intel",
"platform": "x86",
"os": "linux"
}
}
data = json.dumps(device)
resp = requests.post(url, data=data, headers=get_user_headers(), proxies=proxies, verify=verify)
check(resp, 201)
return resp
# Generate an activation code and return it
# This activation code will be good for 60 minutes
def generate_activation_code(account_id):
url = "{0}/accounts/{1}/activationcode/refresh".format(base_url, account_id)
resp = requests.put(url, headers=get_user_headers(), proxies=proxies, verify=verify)
check(resp, 200)
js = resp.json()
activation_code = js["activationCode"]
return activation_code
# Activate a device using a valid activation code
def activate(account_id, device_id, activation_code):
url = "{0}/accounts/{1}/devices/{2}/activation".format(base_url, account_id, device_id)
activation = {
"activationCode": activation_code
}
data = json.dumps(activation)
resp = requests.put(url, data=data, headers=get_user_headers(), proxies=proxies, verify=verify)
check(resp, 200)
js = resp.json()
if "deviceToken" in js:
token = js["deviceToken"]
return token
else:
print js
sys.exit(1)
# Given an account_id and device_id, and a component type name and name - create a component and return the cid
def create_component(account_id, device_id, component_type_name, name):
url = "{0}/accounts/{1}/devices/{2}/components".format(base_url, account_id, device_id)
component = {
"type": component_type_name,
"name": name,
"cid": str(uuid.uuid4())
}
data = json.dumps(component)
resp = requests.post(url, data=data, headers=get_device_headers(), proxies=proxies, verify=verify)
check(resp, 201)
js = resp.json()
return js["cid"]
# Create several observations and submit them
# Create {observations_per_hour} observations per hour for {days_of_data} days
def create_observations(account_id, device_id, cid, mid, rang):
url = "{0}/data/{1}".format(base_url, device_id)
now = int(time.time()) * 1000;
start = now - days_of_data * 24 * 60 * 60 * 1000
body = {
"on": start,
"accountId": account_id,
"data": []
}
# n observation per hour per day
for i in range(int(days_of_data * 24 * observations_per_hour) + 1):
val = round(mid - rang + (random.random() * rang * 2), 1) # random number from mid-range to mid+range
#print "val={0}".format(val)
o = {
"componentId": cid,
"on": start + i * (60 / observations_per_hour) * 60 * 1000,
# if the device is mobile, you can record where it was when
# this observation was captured
#"loc": [ 45.5434085, -122.654422, 124.3 ],
"value": str(val),
"attributes": {
"i": i
}
}
body["data"].append(o)
data = json.dumps(body)
#print "data={0}".format(data)
resp = requests.post(url, data=data, headers=get_device_headers(), proxies=proxies, verify=verify)
check(resp, 201)
#get_observations
def get_observations(account_id, device_id, component_id):
url = "{0}/accounts/{1}/data/search".format(base_url, account_id)
search = {
"from": 0,
"targetFilter": {
"deviceList": [device_id]
},
"metrics": [
{
"id": component_id
}
]
# This will include lat, lon and alt keys
#,"queryMeasureLocation": True
}
data = json.dumps(search)
resp = requests.post(url, data=data, headers=get_user_headers(), proxies=proxies, verify=verify)
check(resp, 200)
js = resp.json()
return js
# print all of the device names and observation counts, sorted by device name
def print_observation_counts(js): # js is result of /accounts/{account}/data/search
if 'series' in js:
series = js["series"]
series = sorted(series, key=lambda v: v["deviceName"])
for v in series:
print "Device: {0} Count: {1}".format(v["deviceName"], len(v["points"]))
if __name__ == "__main__":
main() | bsd-2-clause | -6,109,433,356,895,974,000 | 35.797143 | 111 | 0.645364 | false |
messagebird/python-rest-api | messagebird/base_list.py | 1 | 1174 | from messagebird.base import Base
class Links(Base):
def __init__(self):
self.first = None
self.previous = None
self.next = None
self.last = None
class BaseList(Base):
def __init__(self, item_type):
"""When setting items, they are instantiated as objects of type item_type."""
self.limit = None
self.offset = None
self.count = None
self.totalCount = None
self._links = None
self._items = None
self.itemType = item_type
@property
def links(self):
return self._links
@links.setter
def links(self, value):
self._links = Links().load(value)
@property
def items(self):
return self._items
@items.setter
def items(self, value):
"""Create typed objects from the dicts."""
items = []
if value is not None:
for item in value:
items.append(self.itemType().load(item))
self._items = items
def __str__(self):
items_count = 0 if self.items is None else len(self.items)
return "%s with %d items.\n" % (str(self.__class__), items_count)
| bsd-2-clause | 3,953,238,563,114,596,400 | 22.48 | 85 | 0.560477 | false |
bioothod/zbuilder | zbuilder.py | 1 | 7249 | #!/usr/bin/python
import argparse
import docker
import json
import logging
import os
import shutil
import sys
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s', level=logging.INFO)
class zbuilder():
def __init__(self, config):
js = json.load(config)
self.docker_files = []
self.build_succeeded_file = "/tmp/build_succeeded"
packages = js.get("packages")
if not packages:
logging.error("core: there is no 'packages' object, nothing to build")
return
logging.info("Starting parse different build types")
for package_type, package in packages.items():
images = []
if package_type == "deb":
img = js.get("deb-images")
if img:
images += img
elif package_type == "rpm":
img = js.get("rpm-images")
if img:
images += img
else:
logging.error("%s: unsupported package type", package_type)
continue
logging.info("%s: starting to parse commands", package_type)
pre_build_commands = package.get("pre-build-commands")
build_commands = package.get("build-commands")
if build_commands:
build_commands.append("echo success > %s" % (self.build_succeeded_file))
post_build = package.get("post-build-commands")
final_commands = {}
if post_build:
pbs = post_build.get("success")
if pbs:
final_commands["success"] = pbs
pbf = post_build.get("fail")
if pbf:
final_commands["fail"] = pbf
pba = post_build.get("always")
if pba:
final_commands["always"] = pba
sources = package.get("sources")
if not sources:
logging.error("%s: there is no 'sources' object, nothing to build", package_type)
break
for name, source in sources.items():
logging.info("%s/%s: starting to parse source", package_type, name)
include_images = source.get("include-images")
if include_images:
images += include_images
exclude_images = source.get("exclude-images")
if exclude_images:
tmp = []
for x in images:
if x in exclude_images:
continue
tmp.append(x)
images = tmp
logging.info("%s/%s: images: %s", package_type, name, ', '.join(images))
fetch_commands = []
try:
stype = source["type"]
repo = source["repository"]
branch = source.get("branch", "master")
if stype == "git":
fetch_commands.append("rm -rf %s" % (name))
fetch_commands.append("git clone %s %s" % (repo, name))
fetch_commands.append("cd %s" % (name))
fetch_commands.append("git checkout %s" % (branch))
build_commands.append("cd %s" % (name))
else:
logging.error("%s/%s: unsupported source type '%s'", package_type, name, stype)
continue
except Exception as e:
logging.error("%s/%s: invalid source: %s", package_type, name, e)
continue
logging.info("%s/%s: fetch commands: %s", package_type, name, ', '.join(fetch_commands))
commands = []
try:
commands.append(pre_build_commands)
commands.append(fetch_commands)
commands.append(build_commands)
except Exception as e:
logging.notice("%s/%s: could not append command: %s", package_type, name, e)
for image in images:
df = self.generate_dockerfile(name, image, commands, final_commands)
self.docker_files.append(df)
def generate_dockerfile(self, name, image, commands, final_commands):
df = "Dockerfile.%s.%s" % (name, image)
with open(df, 'w+') as f:
f.write("FROM %s\n" % (image))
f.write("ENV ZBUILDER_IMAGE=%s ZBUILDER_NAME=%s DEBIAN_FRONTEND=noninteractive\n" % (image, name))
f.write("ADD conf.d conf.d\n")
for cmd_set in commands:
cs = "RUN %s\n" % (' && \\\n'.join(cmd_set))
f.write(cs)
success = final_commands.get("success")
if success:
cs = "RUN test -f %s && \\\n %s\n" % (self.build_succeeded_file, ' && \\\n'.join(success))
f.write(cs)
fail = final_commands.get("fail")
if fail:
cs = "RUN test -f %s || \\\n %s\n" % (self.build_succeeded_file, ' && \\\n'.join(fail))
f.write(cs)
always = final_commands.get("always")
if always:
cs = "RUN %s\n" % ' && \\\n'.join(always)
f.write(cs)
return df
def run(self, name = None, build_dir = '.'):
c = docker.Client(base_url='unix://var/run/docker.sock')
for path in self.docker_files:
if name and not name in path:
continue
try:
shutil.rmtree(path="%s/" % build_dir, ignore_errors=True)
os.mkdir("%s/" % build_dir)
shutil.copy(path, "%s/" % build_dir)
shutil.copytree("conf.d", "%s/conf.d" % build_dir)
except Exception as e:
logging.error("Could not copy local content to destination build dir %s: %s",
build_dir, e)
continue
with open("%s.build.log" % (path), "w+") as out:
response = c.build(path=build_dir, dockerfile=path, rm=False, pull=False, forcerm=False)
for r in response:
out.write(r)
logging.info("%s: %s", path, r)
if __name__ == '__main__':
bparser = argparse.ArgumentParser(description='Builder arguments.', add_help=True)
bparser.add_argument("--conf", dest='conf', action='store', type=argparse.FileType('r'),
required=True, help='Input config file.')
bparser.add_argument("--build-dir", dest='build_dir', action='store', default=".",
help='Local directory where build process will run.')
bparser.add_argument("--image", dest='image', action='store',
help='Build only images containing this substring.')
args = bparser.parse_args()
try:
zb = zbuilder(config=args.conf)
try:
zb.run(name=args.image, build_dir=args.build_dir)
except Exception as e:
logging.error("Could not run build, name: %s: %s", args.image, e)
except Exception as e:
logging.error("Could not create zbuilder object: %s", e)
| apache-2.0 | 5,666,204,978,273,247,000 | 37.973118 | 110 | 0.497448 | false |
hippysurfer/scout-records | subs_report.py | 1 | 10965 | # coding: utf-8
"""Online Scout Manager Interface - generate report from subs.
Usage:
subs_report.py [-d] [-u] [--term=<term>] [--email=<address>]
<apiid> <token> <outdir>
subs_report.py (-h | --help)
subs_report.py --version
Options:
<outdir> Output directory for vcard files.
-d,--debug Turn on debug output.
-u, --upload Upload to Drive.
--email=<email> Send to only this email address.
--term=<term> Which OSM term to use [default: current].
-h,--help Show this screen.
--version Show version.
"""
# Setup the OSM access
# In[1]:
import os.path
import osm
from group import Group
import update
import json
import traceback
import logging
import itertools
import smtplib
from docopt import docopt
from datetime import date
from datetime import datetime
from dateutil.relativedelta import relativedelta
from dateutil.tz import tzutc
from pandas.io.json import json_normalize
import pandas as pd
from email.encoders import encode_base64
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
log = logging.getLogger(__name__)
try:
from gdrive_upload import upload
except:
log.warning("Failed to import gdrive_upload.")
DEF_CACHE = "osm.cache"
DEF_CREDS = "osm.creds"
FROM = "Richard Taylor <[email protected]>"
MONTH_MAP = {1: "10", 2: "11", 3: "12", 4: "01", 5: "02", 6: "03", 7: "04", 8: "05", 9: "06", 10: "07", 11: "08",
12: "09"}
def send(to, subject, report_path, fro=FROM):
for dest in to:
msg = MIMEMultipart()
msg['Subject'] = subject
msg['From'] = fro
msg['To'] = dest
fp = open(report_path, 'rb')
file1 = MIMEBase('application', 'vnd.ms-excel')
file1.set_payload(fp.read())
fp.close()
encode_base64(file1)
file1.add_header('Content-Disposition',
'attachment;filename=output.xlsx')
msg.attach(file1)
hostname = 'localhost'
s = smtplib.SMTP(hostname)
try:
s.sendmail(fro, dest, msg.as_string())
except:
log.error(msg.as_string(),
exc_info=True)
s.quit()
def get_status(d):
if not d:
return "Payment Required?"
detail = [_ for _ in d if _['latest'] == '1']
return detail[0]['status']
def fetch_scheme(group, acc, section, scheme, term):
def set_subs_type(d, group=group):
try:
members = group.find_by_scoutid(d['scoutid'])
if len(members) == 0:
print(f"Can't find {d['scoutid']} {d} in OSM")
return members[0]['customisable_data.cf_subs_type_n_g_d_']
except:
if len(members) > 0:
print("failed to find sub type for: {} {} {}".format(
d['scoutid'],
repr(member),
traceback.format_exc()))
else:
print("Failed to find scoutid: {} {} (member has probably left the section)".format(
d,
traceback.format_exc()))
return "Unknown"
schedules = acc("ext/finances/onlinepayments/?action=getPaymentSchedule"
"§ionid={}&schemeid={}&termid={}".format(
section['id'], scheme['schemeid'], term))
status = acc("ext/finances/onlinepayments/?action="
"getPaymentStatus§ionid={}&schemeid={}&termid={}".format(
section['id'], scheme['schemeid'], term))
# Fix up wrongly named payment schedule in the Group Subs
if (scheme['name'] == 'Discounted Subscriptions for 7th Lichfield Scout Group' and
section['name'] == 'Subs'):
for payment in schedules['payments']:
if payment['date'] == '2017-02-20':
payment['name'] = '2017 - Spring Term - Part 2'
schedules = [_ for _ in schedules['payments'] if _['archived'] == '0']
try:
data = json_normalize(status['items'])
except:
return pd.DataFrame()
for schedule in schedules:
data[schedule['paymentid']] = data[schedule['paymentid']].apply(
lambda x: get_status(json.loads(x)['status']))
data['subs_type'] = data.apply(set_subs_type, axis=1)
data['section'] = section['name']
data['scheme'] = (
"General Subscriptions"
if scheme['name'].startswith("General Subscriptions")
else "Discounted Subscriptions")
for schedule in schedules:
data.rename(columns={schedule['paymentid']: schedule['name']},
inplace=True)
return data
# In[3]:
def fetch_section(group, acc, section, term):
schemes = acc(
"ext/finances/onlinepayments/?action=getSchemes§ionid={}".format(
section['id']))
# filter only General and Discounted Subscriptions
schemes = [_ for _ in schemes['items'] if (
_['name'].startswith("General Subscriptions") or
_['name'].startswith("Discounted Subscriptions"))]
# Check that we only have two subscriptions remaining. If there is
# more, the rest of the report is going to barf.
if len(schemes) > 2:
log.error("Found more than 2 matching schemes in {}."
"Matching schemes were: {}".format(section['name'],
",".join(schemes)))
c = pd.concat([fetch_scheme(group, acc, section, scheme, term)
for scheme in schemes
if scheme['name'] != 'Camps and Events'],
ignore_index=True)
return c
def _main(osm, auth, outdir, email, term, do_upload):
assert os.path.exists(outdir) and os.path.isdir(outdir)
group = Group(osm, auth, update.MAPPING.keys(), term)
# Nasty hack to pick up the current term if the user did not
# pass in a specific term.
actual_term = list(group._sections.sections.values())[0].term['termid']
acc = group._sections._accessor
sections = [
{'name': 'Paget', 'id': '9960'},
{'name': 'Swinfen', 'id': '17326'},
{'name': 'Maclean', 'id': '14324'},
{'name': 'Rowallan', 'id': '12700'},
{'name': 'Johnson', 'id': '5882'},
{'name': 'Garrick', 'id': '20711'},
{'name': 'Erasmus', 'id': '20707'},
{'name': 'Somers', 'id': '20706'},
{'name': 'Boswell', 'id': '10363'},
{'name': 'Subs', 'id': '33593'}
]
subs_names = ['General Subscriptions', 'Discounted Subscriptions']
subs_types = ['G', 'D']
subs_names_and_types = list(zip(subs_names, subs_types))
all_types = subs_types + ['N', ]
al = pd.concat([fetch_section(group, acc, section, actual_term)
for section in sections], ignore_index=True)
# al[(al['scheme'] == 'Discounted Subscriptions') & (
# al['subs_type'] == 'D')].dropna(axis=1, how='all')
# find all members that do not have at least one subscription to either
# 'Discounted Subscriptions' or 'General Subscriptions'
# filtered by those that have a 'N' in their subscription type.
#
# all_yp_members = group.all_yp_members_without_leaders()
all = [[[_['member_id'],
_['first_name'],
_['last_name'],
_['customisable_data.cf_subs_type_n_g_d_'],
section] for _ in
group.section_yp_members_without_leaders(section)]
for section in group.YP_SECTIONS]
all_in_one = list(itertools.chain.from_iterable(all))
all_members_df = pd.DataFrame(all_in_one, columns=(
'scoutid', 'firstname', 'lastname', 'subs_type', 'section'))
al_only_subs = al[al['scheme'].isin(subs_names)]
# only those that are paying more than one subscription.
members_paying_multiple_subs = al_only_subs[
al_only_subs.duplicated('scoutid', take_last=True) |
al_only_subs.duplicated('scoutid')]
# Calculate file name
frm = datetime((date.today() - relativedelta(months=+1)).year,
(date.today() - relativedelta(months=+1)).month,
4, 0, 0, 0, tzinfo=tzutc())
to = frm + relativedelta(months=+1)
filename = os.path.join(outdir, "{} {} {} {} Subs Report.xls".format(MONTH_MAP[to.month],
to.day - 1,
to.strftime("%b"),
to.year))
with pd.ExcelWriter(filename,
engine='xlsxwriter') as writer:
# Status of all subs.
for scheme in subs_names:
al[al['scheme'] == scheme].dropna(
axis=1, how='all').to_excel(writer, scheme)
# All subs with the correct subs_type
for scheme in subs_names_and_types:
al[(al['scheme'] == scheme[0]) &
(al['subs_type'] == scheme[1])].dropna(
axis=1, how='all').to_excel(writer, scheme[0] + "_OK")
# All subs with the wrong subs type
for scheme in subs_names_and_types:
al[(al['scheme'] == scheme[0]) &
(al['subs_type'] != scheme[1])].dropna(
axis=1, how='all').to_excel(writer, scheme[0] + "_BAD")
# Members not in the subs that their sub_type says they should be.
for scheme in subs_names_and_types:
gen = al[al['scheme'] == scheme[0]].dropna(axis=1, how='all')
all_gen_members = all_members_df[
all_members_df['subs_type'] == scheme[1]]
all_gen_members['scoutid'] = all_gen_members['scoutid'].astype(str)
all_gen_members[~all_gen_members['scoutid'].isin(
gen['scoutid'].values)].to_excel(writer, "Not in " + scheme[0])
# All YP members without their subs_type set to anything.
all_members_df[~all_members_df['subs_type'].isin(
all_types)].to_excel(writer, "Unknown Subs Type")
# Members paying multiple subs
members_paying_multiple_subs.dropna(
axis=1, how='all').to_excel(writer, "Multiple payers")
if email:
send([email, ], "OSM Subs Report", filename)
if do_upload:
from gc_accounts import SECTION_MAP, DRIVE_FOLDERS
if filename is not None:
upload(filename, DRIVE_FOLDERS['Group'],
filename=os.path.splitext(os.path.split(filename)[1])[0])
if __name__ == '__main__':
args = docopt(__doc__, version='OSM 2.0')
if args['--debug']:
level = logging.DEBUG
else:
level = logging.WARN
if args['--term'] in [None, 'current']:
args['--term'] = None
logging.basicConfig(level=level)
log.debug("Debug On\n")
auth = osm.Authorisor(args['<apiid>'], args['<token>'])
auth.load_from_file(open(DEF_CREDS, 'r'))
_main(osm, auth,
args['<outdir>'], args['--email'], args['--term'], args['--upload'])
| gpl-2.0 | 5,661,807,540,519,932,000 | 32.842593 | 113 | 0.560876 | false |
mirror/vbox | src/VBox/ValidationKit/tests/shutdown/tdGuestOsShutdown1.py | 2 | 12973 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
VMM Guest OS boot tests.
"""
__copyright__ = \
"""
Copyright (C) 2010-2014 Oracle Corporation
This file is part of VirtualBox Open Source Edition (OSE), as
available from http://www.virtualbox.org. This file is free software;
you can redistribute it and/or modify it under the terms of the GNU
General Public License (GPL) as published by the Free Software
Foundation, in version 2 as it comes in the "COPYING" file of the
VirtualBox OSE distribution. VirtualBox OSE is distributed in the
hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
The contents of this file may alternatively be used under the terms
of the Common Development and Distribution License Version 1.0
(CDDL) only, as it comes in the "COPYING.CDDL" file of the
VirtualBox OSE distribution, in which case the provisions of the
CDDL are applicable instead of those of the GPL.
You may elect to license modified versions of this file under the
terms and conditions of either the GPL or the CDDL or both.
"""
__version__ = "$Revision$"
# Standard Python imports.
import os
import sys
import time
# Only the main script needs to modify the path.
try: __file__
except: __file__ = sys.argv[0]
g_ksValidationKitDir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(g_ksValidationKitDir)
# Validation Kit imports.
from testdriver import vbox
from testdriver import base
from testdriver import reporter
from testdriver import vboxcon
class tdGuestOsBootTest1(vbox.TestDriver):
"""
VMM Unit Tests Set.
Scenario:
- Create VM that corresponds to Guest OS pre-installed on selected HDD
- Start VM and wait for TXS server connection (which is started after Guest successfully booted)
"""
ksSataController = 'SATA Controller'
ksIdeController = 'IDE Controller'
# VM parameters required to run HDD image.
# Format: { HDD image filename: (sKind, HDD controller type) }
kaoVmParams = {
't-win80.vdi': ( 'Windows 8 (64 bit)', ksSataController ),
}
# List of platforms which are able to suspend and resume host automatically.
# In order to add new platform, self._SuspendResume() should be adapted.
kasSuspendAllowedPlatforms = ( 'darwin' )
kcMsVmStartLimit = 5 * 60000
kcMsVmShutdownLimit = 1 * 60000
def __init__(self):
"""
Reinitialize child class instance.
"""
vbox.TestDriver.__init__(self)
self.sVmName = 'TestVM'
self.sHddName = None
self.sHddPathBase = os.path.join(self.sResourcePath, '4.2', 'nat', 'win80')
self.oVM = None
# TODO: that should be moved to some common place
self.fEnableIOAPIC = True
self.cCpus = 1
self.fEnableNestedPaging = True
self.fEnablePAE = False
self.fSuspendHost = False
self.cSecSuspendTime = 60
self.cShutdownIters = 1
self.fExtraVm = False
self.sExtraVmName = "TestVM-Extra"
self.oExtraVM = None
self.fLocalCatch = False
#
# Overridden methods.
#
def showUsage(self):
"""
Extend usage info
"""
rc = vbox.TestDriver.showUsage(self)
reporter.log(' --boot-hdd <HDD image file name>')
reporter.log(' --cpus <# CPUs>')
reporter.log(' --no-ioapic')
reporter.log(' --no-nested-paging')
reporter.log(' --pae')
reporter.log(' --suspend-host')
reporter.log(' --suspend-time <sec>')
reporter.log(' --shutdown-iters <# iters>')
reporter.log(' --extra-vm')
reporter.log(' --local-catch')
return rc
def parseOption(self, asArgs, iArg):
"""
Extend standard options set
"""
if asArgs[iArg] == '--boot-hdd':
iArg += 1
if iArg >= len(asArgs): raise base.InvalidOption('The "--boot-hdd" option requires an argument')
self.sHddName = asArgs[iArg]
elif asArgs[iArg] == '--cpus':
iArg += 1
if iArg >= len(asArgs): raise base.InvalidOption('The "--cpus" option requires an argument')
self.cCpus = int(asArgs[iArg])
elif asArgs[iArg] == '--no-ioapic':
self.fEnableIOAPIC = False
elif asArgs[iArg] == '--no-nested-paging':
self.fEnableNestedPaging = False
elif asArgs[iArg] == '--pae':
self.fEnablePAE = True
elif asArgs[iArg] == '--suspend-host':
self.fSuspendHost = True
elif asArgs[iArg] == '--suspend-time':
iArg += 1
if iArg >= len(asArgs): raise base.InvalidOption('The "--suspend-time" option requires an argument')
self.cSecSuspendTime = int(asArgs[iArg])
elif asArgs[iArg] == '--shutdown-iters':
iArg += 1
if iArg >= len(asArgs): raise base.InvalidOption('The "--shutdown-iters" option requires an argument')
self.cShutdownIters = int(asArgs[iArg])
elif asArgs[iArg] == '--extra-vm':
self.fExtraVm = True
elif asArgs[iArg] == '--local-catch':
self.fLocalCatch = True
else:
return vbox.TestDriver.parseOption(self, asArgs, iArg)
return iArg + 1
def getResourceSet(self):
"""
Returns a set of file and/or directory names relative to
TESTBOX_PATH_RESOURCES.
"""
return [os.path.join(self.sHddPathBase, sRsrc) for sRsrc in self.kaoVmParams];
def _addVM(self, sVmName, sNicTraceFile=None):
"""
Create VM
"""
# Get VM params specific to HDD image
sKind, sController = self.kaoVmParams[self.sHddName]
# Create VM itself
eNic0AttachType = vboxcon.NetworkAttachmentType_NAT
sHddPath = os.path.join(self.sHddPathBase, self.sHddName)
assert os.path.isfile(sHddPath)
oVM = \
self.createTestVM(sVmName, 1, sKind=sKind, cCpus=self.cCpus,
eNic0AttachType=eNic0AttachType, sDvdImage = self.sVBoxValidationKitIso)
assert oVM is not None
oSession = self.openSession(oVM)
# Attach an HDD
fRc = oSession.attachHd(sHddPath, sController, fImmutable=True)
# Enable HW virt
fRc = fRc and oSession.enableVirtEx(True)
# Enable I/O APIC
fRc = fRc and oSession.enableIoApic(self.fEnableIOAPIC)
# Enable Nested Paging
fRc = fRc and oSession.enableNestedPaging(self.fEnableNestedPaging)
# Enable PAE
fRc = fRc and oSession.enablePae(self.fEnablePAE)
if (sNicTraceFile is not None):
fRc = fRc and oSession.setNicTraceEnabled(True, sNicTraceFile)
# Remote desktop
oSession.setupVrdp(True)
fRc = fRc and oSession.saveSettings()
fRc = fRc and oSession.close()
assert fRc is True
return oVM
def actionConfig(self):
"""
Configure pre-conditions.
"""
if not self.importVBoxApi():
return False
# Save time: do not start VM if there is no way to suspend host
if (self.fSuspendHost is True and sys.platform not in self.kasSuspendAllowedPlatforms):
reporter.log('Platform [%s] is not in the list of supported platforms' % sys.platform)
return False
assert self.sHddName is not None
if self.sHddName not in self.kaoVmParams:
reporter.log('Error: unknown HDD image specified: %s' % self.sHddName)
return False
if (self.fExtraVm is True):
self.oExtraVM = self._addVM(self.sExtraVmName)
self.oVM = self._addVM(self.sVmName)
return vbox.TestDriver.actionConfig(self)
def _SuspendResume(self, cSecTimeout):
"""
Put host into sleep and automatically resume it after specified timeout.
"""
fRc = False
if (sys.platform == 'darwin'):
tsStart = time.time()
fRc = os.system("/usr/bin/pmset relative wake %d" % self.cSecSuspendTime)
fRc |= os.system("/usr/bin/pmset sleepnow")
# Wait for host to wake up
while ((time.time() - tsStart) < self.cSecSuspendTime):
self.sleep(0.1)
return fRc == 0
def _waitKeyboardInterrupt(self):
"""
Idle loop until user press CTRL+C
"""
reporter.log('[LOCAL CATCH]: waiting for keyboard interrupt')
while (True):
try:
self.sleep(1)
except KeyboardInterrupt:
reporter.log('[LOCAL CATCH]: keyboard interrupt occurred')
break
def actionExecute(self):
"""
Execute the testcase itself.
"""
#self.logVmInfo(self.oVM)
reporter.testStart('SHUTDOWN GUEST')
cIter = 0
fRc = True
if (self.fExtraVm is True):
oExtraSession, oExtraTxsSession = self.startVmAndConnectToTxsViaTcp(self.sExtraVmName,
fCdWait=False,
cMsTimeout=self.kcMsVmStartLimit)
if oExtraSession is None or oExtraTxsSession is None:
reporter.error('Unable to start extra VM.')
if (self.fLocalCatch is True):
self._waitKeyboardInterrupt()
reporter.testDone()
return False
while (cIter < self.cShutdownIters):
cIter += 1
reporter.log("Starting iteration #%d." % cIter)
oSession, oTxsSession = self.startVmAndConnectToTxsViaTcp(self.sVmName,
fCdWait=False,
cMsTimeout=self.kcMsVmStartLimit)
if oSession is not None and oTxsSession is not None:
# Wait until guest reported success
reporter.log('Guest started. Connection to TXS service established.')
if (self.fSuspendHost is True):
reporter.log("Disconnect form TXS.")
fRc = fRc and self.txsDisconnect(oSession, oTxsSession)
if (fRc is not True):
reporter.log("Disconnect form TXS failed.")
else:
reporter.log('Put host to sleep and resume it automatically after %d seconds.' % self.cSecSuspendTime)
fRc = fRc and self._SuspendResume(self.cSecSuspendTime)
if (fRc is True):
reporter.log("Sleep/resume success.")
else:
reporter.log("Sleep/resume failed.")
reporter.log("Re-connect to TXS in 10 seconds.")
self.sleep(10)
(fRc, oTxsSession) = self.txsDoConnectViaTcp(oSession, 2 * 60 * 10000)
if (fRc is not True):
reporter.log("Re-connect to TXS failed.")
if (fRc is True):
reporter.log('Attempt to shutdown guest.')
fRc = fRc and oTxsSession.syncShutdown(cMsTimeout=(4 * 60 * 1000))
if (fRc is True):
reporter.log('Shutdown request issued successfully.')
self.waitOnDirectSessionClose(self.oVM, self.kcMsVmShutdownLimit)
reporter.log('Shutdown %s.' % ('success' if fRc is True else 'failed'))
else:
reporter.error('Shutdown request failed.')
# Do not terminate failing VM in order to catch it.
if (fRc is not True and self.fLocalCatch is True):
self._waitKeyboardInterrupt()
break
fRc = fRc and self.terminateVmBySession(oSession)
reporter.log('VM terminated.')
else:
reporter.error('Guest did not start (iteration %d of %d)' % (cIter, self.cShutdownIters))
fRc = False
# Stop if fail
if (fRc is not True):
break
# Local catch at the end.
if (self.fLocalCatch is True):
reporter.log("Test completed. Waiting for user to press CTRL+C.")
self._waitKeyboardInterrupt()
if (self.fExtraVm is True):
fRc = fRc and self.terminateVmBySession(oExtraSession)
reporter.testDone()
return fRc is True
if __name__ == '__main__':
sys.exit(tdGuestOsBootTest1().main(sys.argv))
| gpl-2.0 | 2,482,712,481,723,269,600 | 35.338936 | 126 | 0.57003 | false |
SDSG-Invenio/invenio | invenio/base/scripts/demosite.py | 6 | 5190 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2013, 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Perform demosite operations."""
from __future__ import print_function
import warnings
warnings.warn("Use of `inveniomanage demosite populate` is being deprecated. "
"Please use `uploader` module to insert demo records.",
PendingDeprecationWarning)
import os
import pkg_resources
import sys
from invenio.ext.script import Manager
manager = Manager(usage=__doc__)
# Shortcuts for manager options to keep code DRY.
option_yes_i_know = manager.option('--yes-i-know', action='store_true',
dest='yes_i_know', help='use with care!')
option_default_data = manager.option('--no-data', action='store_false',
dest='default_data',
help='do not populate tables with '
'default data')
option_file = manager.option('-f', '--file', dest='files',
action='append', help='data file to use')
option_jobid = manager.option('-j', '--job-id', dest='job_id', type=int,
default=0, help='bibsched starting job id')
option_extrainfo = manager.option('-e', '--extra-info', dest='extra_info',
action='append',
help='extraneous parameters')
option_packages = manager.option('-p', '--packages', dest='packages',
action='append',
default=[],
help='package import name (repeteable)')
@option_packages
@option_default_data
@option_file
@option_jobid
@option_extrainfo
@option_yes_i_know
def populate(packages=[], default_data=True, files=None,
job_id=0, extra_info=None, yes_i_know=False):
"""Load demo records. Useful for testing purposes."""
from invenio.utils.text import wrap_text_in_a_box, wait_for_user
## Step 0: confirm deletion
wait_for_user(wrap_text_in_a_box(
"WARNING: You are going to override data in tables!"
))
if not default_data:
print('>>> Default data has been skiped (--no-data).')
return
if not packages:
packages = ['invenio_demosite.base']
from werkzeug.utils import import_string
from invenio.config import CFG_PREFIX
map(import_string, packages)
from invenio.ext.sqlalchemy import db
print(">>> Going to load demo records...")
db.session.execute("TRUNCATE schTASK")
db.session.commit()
if files is None:
files = [pkg_resources.resource_filename(
'invenio',
os.path.join('testsuite', 'data', 'demo_record_marc_data.xml'))]
# upload demo site files:
bibupload_flags = '-i'
if extra_info is not None and 'force-recids' in extra_info:
bibupload_flags = '-i -r --force'
for f in files:
job_id += 1
for cmd in ["%s/bin/bibupload -u admin %s %s" % (CFG_PREFIX, bibupload_flags, f),
"%s/bin/bibupload %d" % (CFG_PREFIX, job_id)]:
if os.system(cmd):
print("ERROR: failed execution of", cmd)
sys.exit(1)
for cmd in ["bin/bibdocfile --textify --with-ocr --recid 97",
"bin/bibdocfile --textify --all",
"bin/bibindex -u admin",
"bin/bibindex %d" % (job_id + 1,),
"bin/bibindex -u admin -w global",
"bin/bibindex %d" % (job_id + 2,),
"bin/bibreformat -u admin -o HB",
"bin/bibreformat %d" % (job_id + 3,),
"bin/webcoll -u admin",
"bin/webcoll %d" % (job_id + 4,),
"bin/bibrank -u admin",
"bin/bibrank %d" % (job_id + 5,),
"bin/bibsort -u admin -R",
"bin/bibsort %d" % (job_id + 6,),
"bin/oairepositoryupdater -u admin",
"bin/oairepositoryupdater %d" % (job_id + 7,),
"bin/bibupload %d" % (job_id + 8,)]:
cmd = os.path.join(CFG_PREFIX, cmd)
if os.system(cmd):
print("ERROR: failed execution of", cmd)
sys.exit(1)
print(">>> Demo records loaded successfully.")
def main():
"""Start the commandline manager."""
from invenio.base.factory import create_app
app = create_app()
manager.app = app
manager.run()
if __name__ == '__main__':
main()
| gpl-2.0 | -8,919,417,447,294,634,000 | 36.883212 | 89 | 0.574566 | false |
muricoca/crab | setup.py | 10 | 2537 | #!/usr/bin/env python
import os
descr = """Crab is a flexible, fast recommender engine for Python. The engine
aims to provide a rich set of components from which you can construct a
customized recommender system from a set of algorithms."""
DISTNAME = 'scikits.crab'
DESCRIPTION = 'A recommender engine for Python.'
LONG_DESCRIPTION = open('README.md').read()
MAINTAINER = 'Marcel Caraciolo',
MAINTAINER_EMAIL = '[email protected]',
URL = 'http://muricoca.github.com/crab/'
LICENSE = 'new BSD'
DOWNLOAD_URL = "http://pypi.python.org/pypi/crab"
VERSION = '0.1'
from numpy.distutils.core import setup
def configuration(parent_package='', top_path=None):
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path,
namespace_packages=['scikits'])
config.set_options(
ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True,
)
subpackages = ['.'.join(i[0].split('/')) for i in os.walk('scikits') if '__init__.py' in i[2]]
[config.add_subpackage(sub_package) for sub_package in subpackages]
config.add_data_files('scikits/__init__.py')
return config
if __name__ == "__main__":
setup(configuration=configuration,
name=DISTNAME,
version=VERSION,
include_package_data=True,
package_data={
'scikits': [
'crab/datasets/data/*.*',
'crab/datasets/descr/*.*',
]
},
install_requires='numpy',
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
zip_safe=False, # the package can run out of an .egg file
classifiers=[
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: C',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS'
])
| bsd-3-clause | 4,826,715,628,353,942,000 | 32.826667 | 98 | 0.597556 | false |
laurentb/weboob | modules/binck/browser.py | 1 | 9712 | # -*- coding: utf-8 -*-
# Copyright(C) 2016 Edouard Lambert
#
# This file is part of a weboob module.
#
# This weboob module is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This weboob module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this weboob module. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from lxml import etree
from io import StringIO
from weboob.browser import LoginBrowser, URL, need_login
from weboob.exceptions import BrowserIncorrectPassword, ActionNeeded
from weboob.browser.exceptions import HTTPNotFound, ServerError
from weboob.tools.capabilities.bank.investments import create_french_liquidity
from .pages import (
LoginPage, HomePage, AccountsPage, OldAccountsPage, HistoryPage, InvestmentPage, InvestDetailPage,
InvestmentListPage, QuestionPage, ChangePassPage, LogonFlowPage, ViewPage, SwitchPage,
HandlePasswordsPage, PostponePasswords,
)
class BinckBrowser(LoginBrowser):
BASEURL = 'https://web.binck.fr'
old_website_connection = False
unique_account = False
login = URL(r'/Logon', LoginPage)
view = URL('/PersonIntroduction/Index', ViewPage)
logon_flow = URL(r'/AmlQuestionnairesOverview/LogonFlow$', LogonFlowPage)
account = URL(r'/PortfolioOverview/Index', AccountsPage)
accounts = URL(r'/PersonAccountOverview/Index', AccountsPage)
old_accounts = URL(r'/AccountsOverview/Index', OldAccountsPage)
account_switch = URL('/Header/SwitchAccount', SwitchPage)
home_page = URL(r'/$',
r'/Home/Index', HomePage)
investment = URL(r'/PortfolioOverview/GetPortfolioOverview', InvestmentPage)
investment_list = URL(r'PortfolioOverview$', InvestmentListPage)
invest_detail = URL(r'/SecurityInformation/Get', InvestDetailPage)
history = URL(r'/TransactionsOverview/GetTransactions',
r'/TransactionsOverview/FilteredOverview', HistoryPage)
questions = URL(r'/FDL_Complex_FR_Compte',
r'/FDL_NonComplex_FR_Compte',
r'FsmaMandatoryQuestionnairesOverview', QuestionPage)
change_pass = URL(r'/ChangePassword/Index',
r'/EditSetting/GetSetting\?code=MutationPassword', ChangePassPage)
handle_passwords = URL(r'/PersonalCredentials/Index', HandlePasswordsPage)
postpone_passwords = URL(r'/PersonalCredentials/PostPone', PostponePasswords)
def deinit(self):
if self.page and self.page.logged:
self.location('https://www.binck.fr/deconnexion-site-client')
super(BinckBrowser, self).deinit()
def do_login(self):
self.login.go().login(self.username, self.password)
if self.login.is_here():
error = self.page.get_error()
# The message for the second error is :
# Vous ne pouvez plus vous servir de cet identifiant pour vous connecter,
# Nous vous prions d'utiliser celui que vous avez récemment créé.
if error and any((
'mot de passe' in error,
'Vous ne pouvez plus vous servir de cet identifiant' in error,
)):
raise BrowserIncorrectPassword(error)
elif error and any((
'Votre compte a été bloqué / clôturé' in error,
'Votre compte est bloqué, veuillez contacter le Service Clients' in error,
)):
raise ActionNeeded(error)
raise AssertionError('Unhandled behavior at login: error is "{}"'.format(error))
@need_login
def switch_account(self, account_id):
self.accounts.stay_or_go()
if self.accounts.is_here():
token = self.page.get_token()
data = {'accountNumber': account_id}
# Important: the "switch" request without the token will return a 500 error
self.account_switch.go(data=data, headers=token)
# We should be automatically redirected to the accounts page:
assert self.accounts.is_here(), 'switch_account did not redirect to AccountsPage properly'
@need_login
def iter_accounts(self):
# If we already know that it is an old website connection,
# we can call old_website_connection() right away.
if self.old_website_connection:
for account in self.iter_old_accounts():
yield account
return
if self.unique_account:
self.account.stay_or_go()
else:
self.accounts.stay_or_go()
if self.page.has_accounts_table():
for a in self.page.iter_accounts():
a._invpage = None
a._histpages = None
self.switch_account(a.id)
# We must get the new token almost everytime we get a new page:
if self.accounts.is_here():
token = self.page.get_token()
# Get valuation_diff from the investment page
try:
data = {'grouping': 'SecurityCategory'}
a.valuation_diff = self.investment.go(data=data, headers=token).get_valuation_diff()
except HTTPNotFound:
# if it is not an invest account, the portfolio link may be present but hidden and return a 404
a.valuation_diff = None
yield a
# Some Binck connections don't have any accounts on the new AccountsPage,
# so we need to fetch them on the OldAccountsPage for now:
else:
self.old_website_connection = True
for account in self.iter_old_accounts():
yield account
@need_login
def iter_old_accounts(self):
self.old_accounts.go()
for a in self.page.iter_accounts():
try:
self.old_accounts.stay_or_go().go_to_account(a.id)
except ServerError as exception:
# get html error to parse
parser = etree.HTMLParser()
html_error = etree.parse(StringIO(exception.response.text), parser)
account_error = html_error.xpath('//p[contains(text(), "Votre compte est")]/text()')
if account_error:
raise ActionNeeded(account_error[0])
else:
raise
a.iban = self.page.get_iban()
# Get token
token = self.page.get_token()
# Get investment page
data = {'grouping': "SecurityCategory"}
try:
a._invpage = self.investment.go(data=data, headers=token) \
if self.page.is_investment() else None
except HTTPNotFound:
# if it's not an invest account, the portfolio link may be present but hidden and return a 404
a._invpage = None
if a._invpage:
a.valuation_diff = a._invpage.get_valuation_diff()
# Get history page
data = [('currencyCode', a.currency), ('startDate', ""), ('endDate', "")]
a._histpages = [self.history.go(data=data, headers=token)]
while self.page.doc['EndOfData'] is False:
a._histpages.append(self.history.go(data=self.page.get_nextpage_data(data[:]), headers=token))
yield a
@need_login
def iter_investment(self, account):
if account.balance == 0:
return
# Start with liquidities:
if account._liquidity:
yield create_french_liquidity(account._liquidity)
if self.old_website_connection:
self.old_accounts.stay_or_go().go_to_account(account.id)
if account._invpage:
for inv in account._invpage.iter_investment(currency=account.currency):
if not inv.code:
params = {'securityId': inv._security_id}
self.invest_detail.go(params=params)
if self.invest_detail.is_here():
inv.code, inv.code_type = self.page.get_isin_code_and_type()
yield inv
return
self.switch_account(account.id)
token = self.page.get_token()
try:
data = {'grouping': 'SecurityCategory'}
self.investment.go(data=data, headers=token)
except HTTPNotFound:
return
for inv in self.page.iter_investment(currency=account.currency):
yield inv
@need_login
def iter_history(self, account):
if self.old_website_connection:
if account._histpages:
for page in account._histpages:
for tr in page.iter_history():
yield tr
return
self.switch_account(account.id)
token = self.page.get_token()
data = [('currencyCode', account.currency), ('startDate', ''), ('endDate', '')]
history_pages = [self.history.go(data=data, headers=token)]
while self.page.doc['EndOfData'] is False:
history_pages.append(self.history.go(data=self.page.get_nextpage_data(data[:]), headers=token))
for page in history_pages:
for tr in page.iter_history():
yield tr
| lgpl-3.0 | -8,604,345,800,985,341,000 | 40.823276 | 115 | 0.610945 | false |
cryptobanana/sdnctrlsim | plot/plot_defaults.py | 2 | 1263 |
from matplotlib import rc, rcParams
DEF_AXIS_LEFT = 0.15
DEF_AXIS_RIGHT = 0.95
DEF_AXIS_BOTTOM = 0.1
DEF_AXIS_TOP = 0.95
DEF_AXIS_WIDTH = DEF_AXIS_RIGHT - DEF_AXIS_LEFT
DEF_AXIS_HEIGHT = DEF_AXIS_TOP - DEF_AXIS_BOTTOM
# add_axes takes [left, bottom, width, height]
DEF_AXES = [DEF_AXIS_LEFT, DEF_AXIS_BOTTOM, DEF_AXIS_WIDTH, DEF_AXIS_HEIGHT]
AXIS_2Y_RIGHT = 0.8
AXIS_2Y_WIDTH = AXIS_2Y_RIGHT - DEF_AXIS_LEFT
AXES_2Y = [DEF_AXIS_LEFT, DEF_AXIS_BOTTOM, AXIS_2Y_WIDTH, DEF_AXIS_HEIGHT]
AXES_LABELSIZE = 24
TICK_LABELSIZE = 24
TEXT_LABELSIZE = 24
COLOR_LIGHTGRAY = '#cccccc'
#COLOR_HLINES = '#606060'
COLOR_HLINES = 'black'
HLINE_LABELSIZE = 24
HLINE_LINEWIDTH = 2
rc('axes', **{'labelsize' : 'large',
'titlesize' : 'large',
'grid' : True})
rc('legend', **{'fontsize': 'large'})
rcParams['axes.labelsize'] = AXES_LABELSIZE
rcParams['xtick.labelsize'] = TICK_LABELSIZE
rcParams['ytick.labelsize'] = TICK_LABELSIZE
rcParams['xtick.major.pad'] = 4
rcParams['ytick.major.pad'] = 6
rcParams['figure.subplot.bottom'] = DEF_AXIS_LEFT
rcParams['figure.subplot.left'] = DEF_AXIS_LEFT
rcParams['figure.subplot.right'] = DEF_AXIS_RIGHT
rcParams['lines.linewidth'] = 2
rcParams['grid.color'] = COLOR_LIGHTGRAY
rcParams['grid.linewidth'] = 0.6
| bsd-3-clause | 1,908,442,648,773,870,000 | 28.372093 | 76 | 0.693587 | false |
bukun/maplet | extor/model/ext_category_model.py | 2 | 1423 | # -*- coding:utf-8 -*-
'''
Model for Posts.
'''
from torcms.model.core_tab import TabPost,TabTag
from torcms.model.core_tab import TabPost2Tag
from torcms.model.abc_model import Mabc
from torcms.model.category_model import MCategory
class MExtCategory(Mabc):
'''
Model for Posts.
'''
def __init__(self):
super(MExtCategory, self).__init__()
@staticmethod
def query_by_slug(slug):
'''
查询全部章节
'''
cat_rec = MCategory.get_by_slug(slug)
if cat_rec:
cat_id = cat_rec.uid
else:
return None
if cat_id.endswith('00'):
cat_con = TabPost2Tag.par_id == cat_id
else:
cat_con = TabPost2Tag.tag_id == cat_id
recs = TabPost.select().join(
TabPost2Tag,
on=(TabPost.uid == TabPost2Tag.post_id)
).where(
cat_con
).order_by(
TabPost.order.asc()
)
return recs
@staticmethod
def query_all():
'''
查询大类记录
'''
recs = TabTag.select().where( TabTag.uid.endswith('00')).order_by(TabTag.uid)
return recs
@staticmethod
def query_by_kind(kind):
'''
查询大类记录
'''
recs = TabTag.select().where(TabTag.uid.endswith('00') and TabTag.kind == kind).order_by(TabTag.uid)
return recs | mit | -8,194,644,219,696,702,000 | 19.411765 | 108 | 0.534968 | false |
TheCodingMonkeys/checkin-at-fmi | checkinatfmi_project/lends/models.py | 1 | 1143 | from datetime import datetime
from django.db import models
import checkinatfmi.translations_bg as translate
from identifications.models import Book
from identifications.models import Cardowner
class LendRequest(models.Model):
WAITING = 'W'
FOR_LEND = 'L'
RETURNED = 'R'
DELAYED = 'D'
CANCELED = 'C'
REQUEST_STATUSES = (
(WAITING, translate.waiting),
(FOR_LEND, translate.for_lend),
(RETURNED, translate.returned),
(DELAYED, translate.delayed),
(CANCELED, translate.canceled),
)
date = models.DateTimeField(default=datetime.now, verbose_name = translate.date)
book = models.ForeignKey('identifications.Book', verbose_name = translate.book)
requester = models.ForeignKey('identifications.Cardowner', verbose_name = translate.requester)
status = models.CharField(choices=REQUEST_STATUSES, max_length=2, default=WAITING, verbose_name = translate.state)
def __unicode__(self):
return u"%s -> %s" % (self.requester, self.book)
class Meta:
verbose_name = translate.lend_request
verbose_name_plural = translate.lend_requests
| agpl-3.0 | 1,781,926,424,923,768,600 | 29.891892 | 118 | 0.692038 | false |
jodal/pyspotify | tests/test_utils.py | 3 | 15680 | # encoding: utf-8
from __future__ import unicode_literals
import unittest
import spotify
from spotify import utils
import tests
from tests import mock
class EventEmitterTest(unittest.TestCase):
def test_listener_receives_event_args(self):
listener_mock = mock.Mock()
emitter = utils.EventEmitter()
emitter.on('some_event', listener_mock)
emitter.emit('some_event', 'abc', 'def')
listener_mock.assert_called_with('abc', 'def')
def test_listener_receives_both_user_and_event_args(self):
listener_mock = mock.Mock()
emitter = utils.EventEmitter()
emitter.on('some_event', listener_mock, 1, 2, 3)
emitter.emit('some_event', 'abc')
listener_mock.assert_called_with('abc', 1, 2, 3)
def test_multiple_listeners_for_same_event(self):
listener_mock1 = mock.Mock()
listener_mock2 = mock.Mock()
emitter = utils.EventEmitter()
emitter.on('some_event', listener_mock1, 1, 2, 3)
emitter.on('some_event', listener_mock2, 4, 5)
emitter.emit('some_event', 'abc')
listener_mock1.assert_called_with('abc', 1, 2, 3)
listener_mock2.assert_called_with('abc', 4, 5)
def test_removing_a_listener(self):
listener_mock1 = mock.Mock()
listener_mock2 = mock.Mock()
emitter = utils.EventEmitter()
emitter.on('some_event', listener_mock1, 123)
emitter.on('some_event', listener_mock1, 456)
emitter.on('some_event', listener_mock2, 78)
emitter.off('some_event', listener_mock1)
emitter.emit('some_event')
self.assertEqual(listener_mock1.call_count, 0)
listener_mock2.assert_called_with(78)
def test_removing_all_listeners_for_an_event(self):
listener_mock1 = mock.Mock()
listener_mock2 = mock.Mock()
emitter = utils.EventEmitter()
emitter.on('some_event', listener_mock1)
emitter.on('some_event', listener_mock2)
emitter.off('some_event')
emitter.emit('some_event')
self.assertEqual(listener_mock1.call_count, 0)
self.assertEqual(listener_mock2.call_count, 0)
def test_removing_all_listeners_for_all_events(self):
listener_mock1 = mock.Mock()
listener_mock2 = mock.Mock()
emitter = utils.EventEmitter()
emitter.on('some_event', listener_mock1)
emitter.on('another_event', listener_mock2)
emitter.off()
emitter.emit('some_event')
emitter.emit('another_event')
self.assertEqual(listener_mock1.call_count, 0)
self.assertEqual(listener_mock2.call_count, 0)
def test_listener_returning_false_is_removed(self):
listener_mock1 = mock.Mock(return_value=False)
listener_mock2 = mock.Mock()
emitter = utils.EventEmitter()
emitter.on('some_event', listener_mock1)
emitter.on('some_event', listener_mock2)
emitter.emit('some_event')
emitter.emit('some_event')
self.assertEqual(listener_mock1.call_count, 1)
self.assertEqual(listener_mock2.call_count, 2)
def test_num_listeners_returns_total_number_of_listeners(self):
listener_mock1 = mock.Mock()
listener_mock2 = mock.Mock()
emitter = utils.EventEmitter()
self.assertEqual(emitter.num_listeners(), 0)
emitter.on('some_event', listener_mock1)
self.assertEqual(emitter.num_listeners(), 1)
emitter.on('another_event', listener_mock1)
emitter.on('another_event', listener_mock2)
self.assertEqual(emitter.num_listeners(), 3)
def test_num_listeners_returns_number_of_listeners_for_event(self):
listener_mock1 = mock.Mock()
listener_mock2 = mock.Mock()
emitter = utils.EventEmitter()
self.assertEqual(emitter.num_listeners('unknown_event'), 0)
emitter.on('some_event', listener_mock1)
self.assertEqual(emitter.num_listeners('some_event'), 1)
emitter.on('another_event', listener_mock1)
emitter.on('another_event', listener_mock2)
self.assertEqual(emitter.num_listeners('another_event'), 2)
def test_call_fails_if_zero_listeners_for_event(self):
emitter = utils.EventEmitter()
with self.assertRaises(AssertionError):
emitter.call('some_event')
def test_call_fails_if_multiple_listeners_for_event(self):
listener_mock1 = mock.Mock()
listener_mock2 = mock.Mock()
emitter = utils.EventEmitter()
emitter.on('some_event', listener_mock1)
emitter.on('some_event', listener_mock2)
with self.assertRaises(AssertionError):
emitter.call('some_event')
def test_call_calls_and_returns_result_of_a_single_listener(self):
listener_mock = mock.Mock()
emitter = utils.EventEmitter()
emitter.on('some_event', listener_mock, 1, 2, 3)
result = emitter.call('some_event', 'abc')
listener_mock.assert_called_with('abc', 1, 2, 3)
self.assertEqual(result, listener_mock.return_value)
class IntEnumTest(unittest.TestCase):
def setUp(self):
class Foo(utils.IntEnum):
pass
self.Foo = Foo
self.Foo.add('bar', 1)
self.Foo.add('baz', 2)
def test_has_pretty_repr(self):
self.assertEqual(repr(self.Foo.bar), '<Foo.bar: 1>')
self.assertEqual(repr(self.Foo.baz), '<Foo.baz: 2>')
def test_is_equal_to_the_int_value(self):
self.assertEqual(self.Foo.bar, 1)
self.assertEqual(self.Foo.baz, 2)
def test_two_instances_with_same_value_is_identical(self):
self.assertIs(self.Foo(1), self.Foo.bar)
self.assertIs(self.Foo(2), self.Foo.baz)
self.assertIsNot(self.Foo(2), self.Foo.bar)
self.assertIsNot(self.Foo(1), self.Foo.baz)
@mock.patch('spotify.search.lib', spec=spotify.lib)
class SequenceTest(unittest.TestCase):
def test_adds_ref_to_sp_obj_when_created(self, lib_mock):
sp_search = spotify.ffi.cast('sp_search *', 42)
utils.Sequence(
sp_obj=sp_search,
add_ref_func=lib_mock.sp_search_add_ref,
release_func=lib_mock.sp_search_release,
len_func=None,
getitem_func=None)
self.assertEqual(lib_mock.sp_search_add_ref.call_count, 1)
def test_releases_sp_obj_when_sequence_dies(self, lib_mock):
sp_search = spotify.ffi.cast('sp_search *', 42)
seq = utils.Sequence(
sp_obj=sp_search,
add_ref_func=lib_mock.sp_search_add_ref,
release_func=lib_mock.sp_search_release,
len_func=None,
getitem_func=None)
seq = None # noqa
tests.gc_collect()
self.assertEqual(lib_mock.sp_search_release.call_count, 1)
def test_len_calls_len_func(self, lib_mock):
sp_search = spotify.ffi.cast('sp_search *', 42)
len_func = mock.Mock()
len_func.return_value = 0
seq = utils.Sequence(
sp_obj=sp_search,
add_ref_func=lib_mock.sp_search_add_ref,
release_func=lib_mock.sp_search_release,
len_func=len_func,
getitem_func=None)
result = len(seq)
self.assertEqual(result, 0)
len_func.assert_called_with(sp_search)
def test_getitem_calls_getitem_func(self, lib_mock):
sp_search = spotify.ffi.cast('sp_search *', 42)
getitem_func = mock.Mock()
getitem_func.return_value = mock.sentinel.item_one
seq = utils.Sequence(
sp_obj=sp_search,
add_ref_func=lib_mock.sp_search_add_ref,
release_func=lib_mock.sp_search_release,
len_func=lambda x: 1,
getitem_func=getitem_func)
result = seq[0]
self.assertEqual(result, mock.sentinel.item_one)
getitem_func.assert_called_with(sp_search, 0)
def test_getitem_with_negative_index(self, lib_mock):
sp_search = spotify.ffi.cast('sp_search *', 42)
getitem_func = mock.Mock()
getitem_func.return_value = mock.sentinel.item_one
seq = utils.Sequence(
sp_obj=sp_search,
add_ref_func=lib_mock.sp_search_add_ref,
release_func=lib_mock.sp_search_release,
len_func=lambda x: 1,
getitem_func=getitem_func)
result = seq[-1]
self.assertEqual(result, mock.sentinel.item_one)
getitem_func.assert_called_with(sp_search, 0)
def test_getitem_with_slice(self, lib_mock):
sp_search = spotify.ffi.cast('sp_search *', 42)
getitem_func = mock.Mock()
getitem_func.side_effect = [
mock.sentinel.item_one,
mock.sentinel.item_two,
mock.sentinel.item_three,
]
seq = utils.Sequence(
sp_obj=sp_search,
add_ref_func=lib_mock.sp_search_add_ref,
release_func=lib_mock.sp_search_release,
len_func=lambda x: 3,
getitem_func=getitem_func)
result = seq[0:2]
# Entire collection of length 3 is created as a list
self.assertEqual(getitem_func.call_count, 3)
# Only a subslice of length 2 is returned
self.assertIsInstance(result, list)
self.assertEqual(len(result), 2)
self.assertEqual(result[0], mock.sentinel.item_one)
self.assertEqual(result[1], mock.sentinel.item_two)
def test_getitem_raises_index_error_on_too_low_index(self, lib_mock):
sp_search = spotify.ffi.cast('sp_search *', 42)
seq = utils.Sequence(
sp_obj=sp_search,
add_ref_func=lib_mock.sp_search_add_ref,
release_func=lib_mock.sp_search_release,
len_func=lambda x: 1,
getitem_func=None)
with self.assertRaises(IndexError):
seq[-3]
def test_getitem_raises_index_error_on_too_high_index(self, lib_mock):
sp_search = spotify.ffi.cast('sp_search *', 42)
seq = utils.Sequence(
sp_obj=sp_search,
add_ref_func=lib_mock.sp_search_add_ref,
release_func=lib_mock.sp_search_release,
len_func=lambda x: 1,
getitem_func=None)
with self.assertRaises(IndexError):
seq[1]
def test_getitem_raises_type_error_on_non_integral_index(self, lib_mock):
sp_search = spotify.ffi.cast('sp_search *', 42)
seq = utils.Sequence(
sp_obj=sp_search,
add_ref_func=lib_mock.sp_search_add_ref,
release_func=lib_mock.sp_search_release,
len_func=lambda x: 1,
getitem_func=None)
with self.assertRaises(TypeError):
seq['abc']
def test_repr(self, lib_mock):
sp_search = spotify.ffi.cast('sp_search *', 42)
seq = utils.Sequence(
sp_obj=sp_search,
add_ref_func=lib_mock.sp_search_add_ref,
release_func=lib_mock.sp_search_release,
len_func=lambda x: 1,
getitem_func=lambda s, i: 123)
result = repr(seq)
self.assertEqual(result, 'Sequence([123])')
class ToBytesTest(unittest.TestCase):
def test_unicode_to_bytes_is_encoded_as_utf8(self):
self.assertEqual(utils.to_bytes('æøå'), 'æøå'.encode('utf-8'))
def test_bytes_to_bytes_is_passed_through(self):
self.assertEqual(
utils.to_bytes('æøå'.encode('utf-8')), 'æøå'.encode('utf-8'))
def test_cdata_to_bytes_is_unwrapped(self):
cdata = spotify.ffi.new('char[]', 'æøå'.encode('utf-8'))
self.assertEqual(utils.to_bytes(cdata), 'æøå'.encode('utf-8'))
def test_anything_else_to_bytes_fails(self):
with self.assertRaises(ValueError):
utils.to_bytes([])
with self.assertRaises(ValueError):
utils.to_bytes(123)
class ToBytesOrNoneTest(unittest.TestCase):
def test_null_becomes_none(self):
self.assertEqual(utils.to_bytes_or_none(spotify.ffi.NULL), None)
def test_char_becomes_bytes(self):
result = utils.to_bytes_or_none(spotify.ffi.new('char[]', b'abc'))
self.assertEqual(result, b'abc')
def test_anything_else_fails(self):
with self.assertRaises(ValueError):
utils.to_bytes_or_none(b'abc')
class ToUnicodeTest(unittest.TestCase):
def test_unicode_to_unicode_is_passed_through(self):
self.assertEqual(utils.to_unicode('æøå'), 'æøå')
def test_bytes_to_unicode_is_decoded_as_utf8(self):
self.assertEqual(utils.to_unicode('æøå'.encode('utf-8')), 'æøå')
def test_cdata_to_unicode_is_unwrapped_and_decoded_as_utf8(self):
cdata = spotify.ffi.new('char[]', 'æøå'.encode('utf-8'))
self.assertEqual(utils.to_unicode(cdata), 'æøå')
def test_anything_else_to_unicode_fails(self):
with self.assertRaises(ValueError):
utils.to_unicode([])
with self.assertRaises(ValueError):
utils.to_unicode(123)
class ToUnicodeOrNoneTest(unittest.TestCase):
def test_null_becomes_none(self):
self.assertEqual(utils.to_unicode_or_none(spotify.ffi.NULL), None)
def test_char_becomes_bytes(self):
result = utils.to_unicode_or_none(
spotify.ffi.new('char[]', 'æøå'.encode('utf-8')))
self.assertEqual(result, 'æøå')
def test_anything_else_fails(self):
with self.assertRaises(ValueError):
utils.to_unicode_or_none('æøå')
class ToCharTest(unittest.TestCase):
def test_bytes_becomes_char(self):
result = utils.to_char(b'abc')
self.assertIsInstance(result, spotify.ffi.CData)
self.assertEqual(spotify.ffi.string(result), b'abc')
def test_unicode_becomes_char(self):
result = utils.to_char('æøå')
self.assertIsInstance(result, spotify.ffi.CData)
self.assertEqual(spotify.ffi.string(result).decode('utf-8'), 'æøå')
def test_anything_else_fails(self):
with self.assertRaises(ValueError):
utils.to_char(None)
with self.assertRaises(ValueError):
utils.to_char(123)
class ToCharOrNullTest(unittest.TestCase):
def test_none_becomes_null(self):
self.assertEqual(utils.to_char_or_null(None), spotify.ffi.NULL)
def test_bytes_becomes_char(self):
result = utils.to_char_or_null(b'abc')
self.assertIsInstance(result, spotify.ffi.CData)
self.assertEqual(spotify.ffi.string(result), b'abc')
def test_unicode_becomes_char(self):
result = utils.to_char_or_null('æøå')
self.assertIsInstance(result, spotify.ffi.CData)
self.assertEqual(spotify.ffi.string(result).decode('utf-8'), 'æøå')
def test_anything_else_fails(self):
with self.assertRaises(ValueError):
utils.to_char_or_null(123)
class ToCountryCodeTest(unittest.TestCase):
def test_unicode_to_country_code(self):
self.assertEqual(utils.to_country_code('NO'), 20047)
self.assertEqual(utils.to_country_code('SE'), 21317)
def test_bytes_to_country_code(self):
self.assertEqual(utils.to_country_code(b'NO'), 20047)
self.assertEqual(utils.to_country_code(b'SE'), 21317)
def test_fails_if_not_exactly_two_chars(self):
with self.assertRaises(ValueError):
utils.to_country_code('NOR')
def test_fails_if_not_in_uppercase(self):
with self.assertRaises(ValueError):
utils.to_country_code('no')
class ToCountryTest(unittest.TestCase):
def test_to_country(self):
self.assertEqual(utils.to_country(20047), 'NO')
self.assertEqual(utils.to_country(21317), 'SE')
| apache-2.0 | -4,640,322,048,668,185,000 | 32.099576 | 77 | 0.62216 | false |
jfelectron/python-driver | cassandra/murmur3.py | 15 | 2387 | from six.moves import range
import struct
def body_and_tail(data):
l = len(data)
nblocks = l // 16
tail = l % 16
if nblocks:
return struct.unpack_from('qq' * nblocks, data), struct.unpack_from('b' * tail, data, -tail), l
else:
return tuple(), struct.unpack_from('b' * tail, data, -tail), l
def rotl64(x, r):
# note: not a general-purpose function because it leaves the high-order bits intact
# suitable for this use case without wasting cycles
mask = 2 ** r - 1
rotated = (x << r) | ((x >> 64 - r) & mask)
return rotated
def fmix(k):
# masking off the 31s bits that would be leftover after >> 33 a 64-bit number
k ^= (k >> 33) & 0x7fffffff
k *= 0xff51afd7ed558ccd
k ^= (k >> 33) & 0x7fffffff
k *= 0xc4ceb9fe1a85ec53
k ^= (k >> 33) & 0x7fffffff
return k
INT64_MAX = int(2 ** 63 - 1)
INT64_MIN = -INT64_MAX - 1
INT64_OVF_OFFSET = INT64_MAX + 1
INT64_OVF_DIV = 2 * INT64_OVF_OFFSET
def truncate_int64(x):
if not INT64_MIN <= x <= INT64_MAX:
x = (x + INT64_OVF_OFFSET) % INT64_OVF_DIV - INT64_OVF_OFFSET
return x
def _murmur3(data):
h1 = h2 = 0
c1 = -8663945395140668459 # 0x87c37b91114253d5
c2 = 0x4cf5ad432745937f
body, tail, total_len = body_and_tail(data)
# body
for i in range(0, len(body), 2):
k1 = body[i]
k2 = body[i + 1]
k1 *= c1
k1 = rotl64(k1, 31)
k1 *= c2
h1 ^= k1
h1 = rotl64(h1, 27)
h1 += h2
h1 = h1 * 5 + 0x52dce729
k2 *= c2
k2 = rotl64(k2, 33)
k2 *= c1
h2 ^= k2
h2 = rotl64(h2, 31)
h2 += h1
h2 = h2 * 5 + 0x38495ab5
# tail
k1 = k2 = 0
len_tail = len(tail)
if len_tail > 8:
for i in range(len_tail - 1, 7, -1):
k2 ^= tail[i] << (i - 8) * 8
k2 *= c2
k2 = rotl64(k2, 33)
k2 *= c1
h2 ^= k2
if len_tail:
for i in range(min(7, len_tail - 1), -1, -1):
k1 ^= tail[i] << i * 8
k1 *= c1
k1 = rotl64(k1, 31)
k1 *= c2
h1 ^= k1
# finalization
h1 ^= total_len
h2 ^= total_len
h1 += h2
h2 += h1
h1 = fmix(h1)
h2 = fmix(h2)
h1 += h2
return truncate_int64(h1)
try:
from cassandra.cmurmur3 import murmur3
except ImportError:
murmur3 = _murmur3
| apache-2.0 | -2,561,335,672,573,787,600 | 20.123894 | 103 | 0.518224 | false |
teonlamont/mne-python | examples/inverse/plot_mixed_source_space_inverse.py | 5 | 5418 | """
=======================================================================
Compute MNE inverse solution on evoked data in a mixed source space
=======================================================================
Create a mixed source space and compute MNE inverse solution on evoked dataset.
"""
# Author: Annalisa Pascarella <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne import setup_volume_source_space
from mne import make_forward_solution
from mne.minimum_norm import make_inverse_operator, apply_inverse
from nilearn import plotting
# Set dir
data_path = sample.data_path()
subject = 'sample'
data_dir = op.join(data_path, 'MEG', subject)
subjects_dir = op.join(data_path, 'subjects')
bem_dir = op.join(subjects_dir, subject, 'bem')
# Set file names
fname_mixed_src = op.join(bem_dir, '%s-oct-6-mixed-src.fif' % subject)
fname_aseg = op.join(subjects_dir, subject, 'mri', 'aseg.mgz')
fname_model = op.join(bem_dir, '%s-5120-bem.fif' % subject)
fname_bem = op.join(bem_dir, '%s-5120-bem-sol.fif' % subject)
fname_evoked = data_dir + '/sample_audvis-ave.fif'
fname_trans = data_dir + '/sample_audvis_raw-trans.fif'
fname_fwd = data_dir + '/sample_audvis-meg-oct-6-mixed-fwd.fif'
fname_cov = data_dir + '/sample_audvis-shrunk-cov.fif'
###############################################################################
# Set up our source space.
# List substructures we are interested in. We select only the
# sub structures we want to include in the source space
labels_vol = ['Left-Amygdala',
'Left-Thalamus-Proper',
'Left-Cerebellum-Cortex',
'Brain-Stem',
'Right-Amygdala',
'Right-Thalamus-Proper',
'Right-Cerebellum-Cortex']
# Get a surface-based source space. We could set one up like this::
#
# >>> src = setup_source_space(subject, fname=None, spacing='oct6',
# add_dist=False, subjects_dir=subjects_dir)
#
# But we already have one saved:
src = mne.read_source_spaces(op.join(bem_dir, 'sample-oct-6-src.fif'))
# Now we create a mixed src space by adding the volume regions specified in the
# list labels_vol. First, read the aseg file and the source space bounds
# using the inner skull surface (here using 10mm spacing to save time):
vol_src = setup_volume_source_space(
subject, mri=fname_aseg, pos=7.0, bem=fname_model,
volume_label=labels_vol, subjects_dir=subjects_dir, verbose=True)
# Generate the mixed source space
src += vol_src
# Visualize the source space.
src.plot(subjects_dir=subjects_dir)
n = sum(src[i]['nuse'] for i in range(len(src)))
print('the src space contains %d spaces and %d points' % (len(src), n))
# We could write the mixed source space with::
#
# >>> write_source_spaces(fname_mixed_src, src, overwrite=True)
#
###############################################################################
# Export source positions to nift file:
nii_fname = op.join(bem_dir, '%s-mixed-src.nii' % subject)
src.export_volume(nii_fname, mri_resolution=True)
plotting.plot_img(nii_fname, cmap='nipy_spectral')
plt.show()
# Compute the fwd matrix
fwd = make_forward_solution(fname_evoked, fname_trans, src, fname_bem,
mindist=5.0, # ignore sources<=5mm from innerskull
meg=True, eeg=False, n_jobs=1)
leadfield = fwd['sol']['data']
print("Leadfield size : %d sensors x %d dipoles" % leadfield.shape)
src_fwd = fwd['src']
n = sum(src_fwd[i]['nuse'] for i in range(len(src_fwd)))
print('the fwd src space contains %d spaces and %d points' % (len(src_fwd), n))
# Load data
condition = 'Left Auditory'
evoked = mne.read_evokeds(fname_evoked, condition=condition,
baseline=(None, 0))
noise_cov = mne.read_cov(fname_cov)
# Compute inverse solution and for each epoch
snr = 3.0 # use smaller SNR for raw data
inv_method = 'MNE' # sLORETA, MNE, dSPM
parc = 'aparc' # the parcellation to use, e.g., 'aparc' 'aparc.a2009s'
lambda2 = 1.0 / snr ** 2
# Compute inverse operator
inverse_operator = make_inverse_operator(evoked.info, fwd, noise_cov,
depth=None, fixed=False)
stcs = apply_inverse(evoked, inverse_operator, lambda2, inv_method,
pick_ori=None)
# Get labels for FreeSurfer 'aparc' cortical parcellation with 34 labels/hemi
labels_parc = mne.read_labels_from_annot(subject, parc=parc,
subjects_dir=subjects_dir)
# Average the source estimates within each label of the cortical parcellation
# and each sub structure contained in the src space
# If mode = 'mean_flip' this option is used only for the surface cortical label
src = inverse_operator['src']
label_ts = mne.extract_label_time_course([stcs], labels_parc, src,
mode='mean',
allow_empty=True,
return_generator=False)
# plot the times series of 2 labels
fig, axes = plt.subplots(1)
axes.plot(1e3 * stcs.times, label_ts[0][0, :], 'k', label='bankssts-lh')
axes.plot(1e3 * stcs.times, label_ts[0][71, :].T, 'r',
label='Brain-stem')
axes.set(xlabel='Time (ms)', ylabel='MNE current (nAm)')
axes.legend()
mne.viz.tight_layout()
plt.show()
| bsd-3-clause | -7,522,093,241,928,322,000 | 35.608108 | 79 | 0.622001 | false |
Carson-Shook/bpm | dlanimotes.py | 4 | 3755 | #!/usr/bin/env python3
# -*- coding: utf8 -*-
################################################################################
##
## This file is part of BetterPonymotes.
## Copyright (c) 2012-2015 Typhos.
##
## This program is free software: you can redistribute it and/or modify it
## under the terms of the GNU Affero General Public License as published by
## the Free Software Foundation, either version 3 of the License, or (at your
## option) any later version.
##
## This program is distributed in the hope that it will be useful, but WITHOUT
## ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
## FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License
## for more details.
##
## You should have received a copy of the GNU Affero General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
##
################################################################################
import argparse
import hashlib
import os
import subprocess
import time
import urllib.request
import bplib
import bplib.objects
import bplib.resolve
AutogenHeader = """
/*
* This file is AUTOMATICALLY GENERATED. DO NOT EDIT.
* Generated at %s.
*/
""" % (time.strftime("%c"))
TempFilename = "animote-temp.png"
AnimoteUrlPrefix = "https://ponymotes.net/"
def find_animotes(emotes):
images = {}
for (name, emote) in emotes.items():
if emote.source.variant_matches is None:
emote.source.match_variants()
root = emote.source.variant_matches[emote]
#if root is None:
# print("ERROR:", emote)
if "+animote" in root.tags:
images.setdefault(emote.base_variant().image_url, []).append(emote)
return images
def image_path(url):
clean = bplib.clean_image_url(url)
assert clean.endswith(".png")
filename = "animotes/" + clean[:-4] + ".gif"
return filename
def update_cache(images):
for (i, url) in enumerate(images):
gif_filename = image_path(url)
if os.path.exists(gif_filename):
continue
download_url = bplib.image_download_url(url)
print("[%s/%s] Original URL: %s" % (i + 1, len(images), url))
print("[%s/%s] Download URL: %s" % (i + 1, len(images), download_url))
print("[%s/%s] Target file: %s" % (i + 1, len(images), gif_filename))
print()
req = urllib.request.Request(download_url)
with urllib.request.urlopen(req) as stream:
data = stream.read()
open(TempFilename, "wb").write(data)
subprocess.call(["apng2gif", TempFilename, gif_filename])
os.remove(TempFilename)
def dump_css(file, images):
file.write(AutogenHeader)
for (url, emotes) in images.items():
selectors = []
for emote in emotes:
for variant in emote.variants.values():
if hasattr(variant, "image_url") and variant.image_url == url:
selectors.append(".bpm-emote" + variant.selector())
selector = ",".join(selectors)
new_url = AnimoteUrlPrefix + image_path(url)
s = "%s{background-image:url(%s)!important}\n" % (selector, new_url)
file.write(s)
def main():
parser = argparse.ArgumentParser(description="Download and convert APNG animotes to GIF")
parser.add_argument("-c", "--css", help="Output CSS file", default="build/gif-animotes.css")
args = parser.parse_args()
context = bplib.objects.Context()
context.load_config()
context.load_sources()
emotes, all_emotes = bplib.resolve.resolve_emotes(context)
images = find_animotes(emotes)
update_cache(images)
with open(args.css, "w") as file:
dump_css(file, images)
if __name__ == "__main__":
main()
| agpl-3.0 | -4,976,498,514,931,275,000 | 33.136364 | 96 | 0.616511 | false |
cedrick-f/pySequence | src/rapport.py | 1 | 86883 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
##This file is part of pySequence
#############################################################################
#############################################################################
## ##
## rapport ##
## ##
#############################################################################
#############################################################################
## Copyright (C) 2012 Cédrick FAURY
##
## pySéquence : aide à la construction
## de Séquences et Progressions pédagogiques
## et à la validation de Projets
# pySequence is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
# pySequence is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pySequence; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
Module rapport
**************
Génération d'un rapport détaillé des tâches à effectuer par les élèves lors d'un Projet
"""
import os, sys
# if sys.platform != "win32":
# import wxversion
# wxversion.select('2.8')
import wx
import wx.richtext as rt
import richtext
import PyRTFParser
from widgets import messageErreur, scaleImage
from util_path import toSystemEncoding
import images
import io
from draw_cairo2 import getHoraireTxt
import draw_cairo_seq2 as draw_cairo_seq
import draw_cairo_prj2 as draw_cairo_prj
# from draw_cairo_prj2 import ICoulTache, BCoulTache
# from draw_cairo_seq2 import ICoulSeance#, BCoulSeance
# from xml.dom.minidom import parse, parseString
#from wx import ImageFromStream, BitmapFromImage, EmptyIcon
from wx.lib.embeddedimage import PyEmbeddedImage
#from constantes import NOM_PHASE_TACHE#, TypesSeanceCourt
# Module utilisé pour tester la disponibilité du presse-papier
# (Windows seulement)
#if sys.platform == "win32":
# import win32clipboard
import random
import time
Styles = {"Titre" : rt.RichTextAttr(),
"Titre 1" : rt.RichTextAttr(),
"Titre 2" : rt.RichTextAttr(),
"Message" : rt.RichTextAttr(),
"MessSens" : rt.RichTextAttr(),
"Sous titre": rt.RichTextAttr(),
"Tableau" : rt.RichTextAttr()}
Styles["Titre"].SetParagraphStyleName("Titre")
Styles["Titre"].SetFontSize(18)
Styles["Titre"].SetTextColour((0,0,0))
Styles["Titre"].SetParagraphSpacingBefore(40)
Styles["Titre"].SetAlignment(wx.TEXT_ALIGNMENT_CENTRE)
Styles["Titre"].SetPageBreak(pageBreak=True)
Styles["Titre 1"].SetParagraphStyleName("Titre 1")
Styles["Titre 1"].SetFontSize(12)
Styles["Titre 1"].SetFontWeight(wx.FONTWEIGHT_BOLD)
Styles["Titre 1"].SetTextColour((0,0,180))
Styles["Titre 1"].SetParagraphSpacingBefore(10)
Styles["Titre 1"].SetAlignment(wx.TEXT_ALIGNMENT_LEFT)
Styles["Titre 1"].SetParagraphSpacingAfter(10)
if int(wx.version()[0]) > 2:
Styles["Titre 1"].SetBulletStyle(wx.TEXT_ATTR_BULLET_STYLE_RIGHT_PARENTHESIS)
#Styles["Titre 1"].SetFontUnderlined(True)
Styles["Titre 2"].SetParagraphStyleName("Titre 2")
Styles["Titre 2"].SetFontSize(11)
Styles["Titre 2"].SetTextColour((0,0,120))
Styles["Titre 2"].SetParagraphSpacingAfter(0)
Styles["Titre 2"].SetParagraphSpacingBefore(10)
Styles["Titre 2"].SetAlignment(wx.TEXT_ALIGNMENT_LEFT)
Styles["Titre 2"].SetFontUnderlined(True)
Styles["Message"].SetParagraphStyleName("Message")
Styles["Message"].SetFontSize(10)
Styles["Message"].SetLeftIndent(80)
#Styles["Message"].SetFontStyle(wx.BOLD)
Styles["Message"].SetParagraphSpacingAfter(10)
Styles["Message"].SetParagraphSpacingBefore(10)
Styles["Message"].SetAlignment(wx.TEXT_ALIGNMENT_LEFT)
Styles["MessSens"].SetParagraphStyleName("MessSens")
Styles["MessSens"].SetFontSize(10)
Styles["MessSens"].SetTextColour((0,0,0))
#Styles["Message"].SetFontStyle(wx.BOLD)
Styles["MessSens"].SetParagraphSpacingAfter(10)
Styles["MessSens"].SetParagraphSpacingBefore(10)
Styles["MessSens"].SetAlignment(wx.TEXT_ALIGNMENT_LEFT)
Styles["MessSens"].SetTabs((800, 2000))
Styles["Tableau"].SetParagraphStyleName("Tableau")
Styles["Tableau"].SetFontSize(10)
#Styles["Tableau"].SetTextColour((0,0,0))
#Styles["Message"].SetFontStyle(wx.BOLD)
#Styles["Tableau"].SetParagraphSpacingAfter(10)
#Styles["Tableau"].SetParagraphSpacingBefore(10)
Styles["Sous titre"].SetFontSize(8)
Styles["Sous titre"].SetFontStyle(wx.ITALIC)#wx.TEXT_ATTR_FONT_ITALIC)
Styles["Sous titre"].SetAlignment(wx.TEXT_ALIGNMENT_CENTRE)
#########################################################################################
class StyleDeTexte:
def __init__(self, font, color):
self.font = font
self.color = color
def applique(self, win, color = None):
if color != None:
self.color = color
win.SetFont(self.font)
win.SetForegroundColour(self.color)
##########################################################################################
#class PanelRapport(wx.Panel):
# def __init__(self, parent, fichierCourant, doc, typ):
# wx.Panel.__init__(self, parent, -1)#,
## style = wx.DEFAULT_FRAME_STYLE)
#
# #
# # Instanciation du rapport RichTextCtrl
# #
# self.rtc = RapportRTF(self, rt.RE_READONLY)#rt.RichTextCtrl(self, style=wx.VSCROLL|wx.HSCROLL|wx.NO_BORDER)
#
# #
# # On rempli le rapport
# #
# self.rtc.Remplir(fichierCourant, doc, typ)
#
# sizer = wx.BoxSizer(wx.VERTICAL)
# sizer.Add(self.rtc, flag = wx.EXPAND)
# self.SetSizer(sizer)
#########################################################################################
class FrameRapport(wx.Frame):
def __init__(self, parent, fichierCourant, doc, typ, eleve = None, hide = False, scale = 1.0):
wx.Frame.__init__(self, parent, -1, "Tâches élèves détaillées",
size=(700, 500))#,
# style = wx.DEFAULT_FRAME_STYLE)
if hide:
self.Hide()
self.SetMinSize((700, -1))
self.parent = parent
self.scale = scale
self.SetIcon(images.getlogoIcon())
self.MakeMenuBar()
self.MakeToolBar()
self.CreateStatusBar()
#
# Instanciation du rapport en RTF
#
self.rtc = RapportRTF(self)#rt.RichTextCtrl(self, style=wx.VSCROLL|wx.HSCROLL|wx.NO_BORDER)
wx.CallAfter(self.rtc.SetFocus)
self.rtp = RTPrinting(self)
#
# On rempli le rapport
#
self.rtc.Remplir(fichierCourant, doc, typ, eleve)
# def AddRTCHandlers(self):
# # make sure we haven't already added them.
# if rt.RichTextBuffer.FindHandlerByType(rt.RICHTEXT_TYPE_HTML) is not None:
# return
#
# rt.RichTextBuffer.GetHandlers()[0].SetEncoding("iso-8859-1")
## f = rt.RichTextFileHandler()#.SetEncoding("iso-8859-1")
# # This would normally go in your app's OnInit method. I'm
# # not sure why these file handlers are not loaded by
# # default by the C++ richtext code, I guess it's so you
# # can change the name or extension if you wanted...
# HTML = rt.RichTextHTMLHandler()
# HTML.SetEncoding("latin_1")#"iso-8859-1")
# rt.RichTextBuffer.AddHandler(HTML)
# rt.RichTextBuffer.AddHandler(rt.RichTextXMLHandler())
## rt.RichTextBuffer.AddHandler(rt.RICHTEXT_TYPE_RTF)
# # This is needed for the view as HTML option since we tell it
# # to store the images in the memory file system.
# wx.FileSystem.AddHandler(wx.MemoryFSHandler())
self.Bind(wx.EVT_ENTER_WINDOW, self.OnEnter)
######################################################################################################
def OnEnter(self, event):
self.SetFocus()
event.Skip()
######################################################################################################
def OnFileSave(self, evt):
self.rtc.Enregistrer("Enregistrer les détails")
# if not self.rtc.GetFilename():
# self.OnFileSaveAs(evt)
# return
# self.rtc.SaveFile()
######################################################################################################
def OnFileSaveAs(self, evt):
self.rtc.EnregistrerSous("Enregistrer les détails")
# wildcard = u"Rich Text Format (.rtf)|*.rtf|" \
# u"Format HTML (.html)|*.html|" \
# u"Fichier texte (.txt)|*.txt"
# types = [0, 3, 2]
# dlg = wx.FileDialog(self, u"Enregistrer les détails",
# wildcard=wildcard,
# style=wx.SAVE)
#
# if dlg.ShowModal() == wx.ID_OK:
# path = dlg.GetPath()
# ext = os.path.splitext(path)[1].lstrip('.')
#
# if os.path.exists(path):
# dlg = wx.MessageDialog(self, u"Le fichier existe déja !\n\n"\
# u"Voulez-vous l'écraser ?",
# u"Fichier existant",
# wx.ICON_WARNING | wx.YES_NO | wx.CANCEL)
# res = dlg.ShowModal()
# dlg.Destroy()
# if res != wx.ID_YES:
# return
#
# if path:
# if ext == 'txt':
# wildcard, types = rt.RichTextBuffer.GetExtWildcard(save=True)
# fileType = 1
# ext = rt.RichTextBuffer.FindHandlerByType(fileType).GetExtension()
# if not path.endswith(ext):
# path += '.' + ext
# self.rtc.SaveFile(path, 1)
# elif ext == 'html':
# handler = rt.RichTextHTMLHandler()
# handler.SetFlags(rt.RICHTEXT_HANDLER_SAVE_IMAGES_TO_MEMORY)
# handler.SetFontSizeMapping([7,9,11,12,14,22,100])
# stream = cStringIO.StringIO()
# if handler.SaveStream(self.rtc.GetBuffer(), stream):
# f = open(path, 'w')
# f.write(prefixeHTML+stream.getvalue())#.encode(sys.getdefaultencoding()))
# f.close()
# elif ext == 'rtf':
# import PyRTFParser_
# # Use the custom RTF Handler
# handler = PyRTFParser_.PyRichTextRTFHandler()
# # Save the file with the custom RTF Handler.
# # The custom RTF Handler can take either a wxRichTextCtrl or a wxRichTextBuffer argument.
# handler.SaveFile(self.rtc.GetBuffer(), path)
# dlg.Destroy()
def OnApplyStyle(self, evt):
# self.rtc.ApplyStyle(Styles[evt.GetString()])
self.rtc.SetStyle(self.rtc.GetSelectionRange(), Styles[evt.GetString()])
def OnFileExit(self, evt):
self.Close(True)
def OnBold(self, evt):
self.rtc.ApplyBoldToSelection()
def OnItalic(self, evt):
self.rtc.ApplyItalicToSelection()
def OnUnderline(self, evt):
self.rtc.ApplyUnderlineToSelection()
def OnAlignLeft(self, evt):
self.rtc.ApplyAlignmentToSelection(wx.TEXT_ALIGNMENT_LEFT)
def OnAlignRight(self, evt):
self.rtc.ApplyAlignmentToSelection(wx.TEXT_ALIGNMENT_RIGHT)
def OnAlignCenter(self, evt):
self.rtc.ApplyAlignmentToSelection(wx.TEXT_ALIGNMENT_CENTRE)
def OnIndentMore(self, evt):
attr = rt.RichTextAttr()
attr.SetFlags(wx.TEXT_ATTR_LEFT_INDENT)
ip = self.rtc.GetInsertionPoint()
if self.rtc.GetStyle(ip, attr):
r = rt.RichTextRange(ip, ip)
if self.rtc.HasSelection():
r = self.rtc.GetSelectionRange()
attr.SetLeftIndent(attr.GetLeftIndent() + 100)
attr.SetFlags(wx.TEXT_ATTR_LEFT_INDENT)
self.rtc.SetStyle(r, attr)
def OnIndentLess(self, evt):
attr = rt.RichTextAttr()
attr.SetFlags(wx.TEXT_ATTR_LEFT_INDENT)
ip = self.rtc.GetInsertionPoint()
if self.rtc.GetStyle(ip, attr):
r = rt.RichTextRange(ip, ip)
if self.rtc.HasSelection():
r = self.rtc.GetSelectionRange()
if attr.GetLeftIndent() >= 100:
attr.SetLeftIndent(attr.GetLeftIndent() - 100)
attr.SetFlags(wx.TEXT_ATTR_LEFT_INDENT)
self.rtc.SetStyle(r, attr)
def OnParagraphSpacingMore(self, evt):
attr = rt.RichTextAttr()
attr.SetFlags(wx.TEXT_ATTR_PARA_SPACING_AFTER)
ip = self.rtc.GetInsertionPoint()
if self.rtc.GetStyle(ip, attr):
r = rt.RichTextRange(ip, ip)
if self.rtc.HasSelection():
r = self.rtc.GetSelectionRange()
attr.SetParagraphSpacingAfter(attr.GetParagraphSpacingAfter() + 20);
attr.SetFlags(wx.TEXT_ATTR_PARA_SPACING_AFTER)
self.rtc.SetStyle(r, attr)
def OnParagraphSpacingLess(self, evt):
attr = rt.RichTextAttr()
attr.SetFlags(wx.TEXT_ATTR_PARA_SPACING_AFTER)
ip = self.rtc.GetInsertionPoint()
if self.rtc.GetStyle(ip, attr):
r = rt.RichTextRange(ip, ip)
if self.rtc.HasSelection():
r = self.rtc.GetSelectionRange()
if attr.GetParagraphSpacingAfter() >= 20:
attr.SetParagraphSpacingAfter(attr.GetParagraphSpacingAfter() - 20);
attr.SetFlags(wx.TEXT_ATTR_PARA_SPACING_AFTER)
self.rtc.SetStyle(r, attr)
def OnLineSpacingSingle(self, evt):
attr = rt.RichTextAttr()
attr.SetFlags(wx.TEXT_ATTR_LINE_SPACING)
ip = self.rtc.GetInsertionPoint()
if self.rtc.GetStyle(ip, attr):
r = rt.RichTextRange(ip, ip)
if self.rtc.HasSelection():
r = self.rtc.GetSelectionRange()
attr.SetFlags(wx.TEXT_ATTR_LINE_SPACING)
attr.SetLineSpacing(10)
self.rtc.SetStyle(r, attr)
def OnLineSpacingHalf(self, evt):
attr = rt.RichTextAttr()
attr.SetFlags(wx.TEXT_ATTR_LINE_SPACING)
ip = self.rtc.GetInsertionPoint()
if self.rtc.GetStyle(ip, attr):
r = rt.RichTextRange(ip, ip)
if self.rtc.HasSelection():
r = self.rtc.GetSelectionRange()
attr.SetFlags(wx.TEXT_ATTR_LINE_SPACING)
attr.SetLineSpacing(15)
self.rtc.SetStyle(r, attr)
def OnLineSpacingDouble(self, evt):
attr = rt.RichTextAttr()
attr.SetFlags(wx.TEXT_ATTR_LINE_SPACING)
ip = self.rtc.GetInsertionPoint()
if self.rtc.GetStyle(ip, attr):
r = rt.RichTextRange(ip, ip)
if self.rtc.HasSelection():
r = self.rtc.GetSelectionRange()
attr.SetFlags(wx.TEXT_ATTR_LINE_SPACING)
attr.SetLineSpacing(20)
self.rtc.SetStyle(r, attr)
def OnFont(self, evt):
if not self.rtc.HasSelection():
return
r = self.rtc.GetSelectionRange()
fontData = wx.FontData()
fontData.EnableEffects(False)
attr = rt.RichTextAttr()
attr.SetFlags(wx.TEXT_ATTR_FONT)
if self.rtc.GetStyle(self.rtc.GetInsertionPoint(), attr):
fontData.SetInitialFont(attr.GetFont())
dlg = wx.FontDialog(self, fontData)
if dlg.ShowModal() == wx.ID_OK:
fontData = dlg.GetFontData()
font = fontData.GetChosenFont()
if font:
attr.SetFlags(wx.TEXT_ATTR_FONT)
attr.SetFont(font)
self.rtc.SetStyle(r, attr)
dlg.Destroy()
def OnColour(self, evt):
colourData = wx.ColourData()
attr = rt.RichTextAttr()
attr.SetFlags(wx.TEXT_ATTR_TEXT_COLOUR)
if self.rtc.GetStyle(self.rtc.GetInsertionPoint(), attr):
colourData.SetColour(attr.GetTextColour())
dlg = wx.ColourDialog(self, colourData)
if dlg.ShowModal() == wx.ID_OK:
colourData = dlg.GetColourData()
colour = colourData.GetColour()
if colour:
if not self.rtc.HasSelection():
self.rtc.BeginTextColour(colour)
else:
r = self.rtc.GetSelectionRange()
attr.SetFlags(wx.TEXT_ATTR_TEXT_COLOUR)
attr.SetTextColour(colour)
self.rtc.SetStyle(r, attr)
dlg.Destroy()
def OnUpdateBold(self, evt):
evt.Check(self.rtc.IsSelectionBold())
def OnUpdateItalic(self, evt):
evt.Check(self.rtc.IsSelectionItalics())
def OnUpdateUnderline(self, evt):
evt.Check(self.rtc.IsSelectionUnderlined())
def OnUpdateAlignLeft(self, evt):
evt.Check(self.rtc.IsSelectionAligned(wx.TEXT_ALIGNMENT_LEFT))
def OnUpdateAlignCenter(self, evt):
evt.Check(self.rtc.IsSelectionAligned(wx.TEXT_ALIGNMENT_CENTRE))
def OnUpdateAlignRight(self, evt):
evt.Check(self.rtc.IsSelectionAligned(wx.TEXT_ALIGNMENT_RIGHT))
def ForwardEvent(self, evt):
# The RichTextCtrl can handle menu and update events for undo,
# redo, cut, copy, paste, delete, and select all, so just
# forward the event to it.
self.rtc.ProcessEvent(evt)
def MakeMenuBar(self):
def doBind(item, handler, updateUI=None):
self.Bind(wx.EVT_MENU, handler, item)
if updateUI is not None:
self.Bind(wx.EVT_UPDATE_UI, updateUI, item)
fileMenu = wx.Menu()
doBind( fileMenu.Append(-1, "&Enregistrer\tCtrl+S", "Enregistrer le rapport"),
self.OnFileSave )
doBind( fileMenu.Append(-1, "&Enregistrer sous...\tF12", "Enregistrer le rapport"),
self.OnFileSaveAs )
fileMenu.AppendSeparator()
doBind( fileMenu.Append(-1, "&Mise en Page...\tCtrl+M", "Règle la mise en page de l'impression"),
self.OnPageSetup )
doBind( fileMenu.Append(-1, "Aperç&u avant impression...\tCtrl+U", "Affiche un aperçu de ce qui sera imprimé"),
self.OnPrintPreview )
doBind( fileMenu.Append(-1, "&Imprimer\tCtrl+P", "Imprime le document"),
self.OnDoPrint )
# doBind( fileMenu.Append(-1, u"&Aperçu HTML", u"Affiche un aperçu HTML"),
# self.OnFileViewHTML )
fileMenu.AppendSeparator()
doBind( fileMenu.Append(-1, "&Quitter\tCtrl+Q", "Quitter le visualisateur de rapport"),
self.OnFileExit )
fileMenu.AppendSeparator()
editMenu = wx.Menu()
doBind( editMenu.Append(wx.ID_UNDO, "&Annuler\tCtrl+Z"),
self.ForwardEvent, self.ForwardEvent)
doBind( editMenu.Append(wx.ID_REDO, "&R�tablir\tCtrl+Y"),
self.ForwardEvent, self.ForwardEvent )
editMenu.AppendSeparator()
doBind( editMenu.Append(wx.ID_CUT, "Co&uper\tCtrl+X"),
self.ForwardEvent, self.ForwardEvent )
doBind( editMenu.Append(wx.ID_COPY, "&Copier\tCtrl+C"),
self.ForwardEvent, self.ForwardEvent)
doBind( editMenu.Append(wx.ID_PASTE, "Co&ller\tCtrl+V"),
self.ForwardEvent, self.ForwardEvent)
doBind( editMenu.Append(wx.ID_CLEAR, "&E&ffacer\tDel"),
self.ForwardEvent, self.ForwardEvent)
editMenu.AppendSeparator()
doBind( editMenu.Append(wx.ID_SELECTALL, "Selectionner tout\tCtrl+A"),
self.ForwardEvent, self.ForwardEvent )
#doBind( editMenu.AppendSeparator(), )
#doBind( editMenu.Append(-1, "&Find...\tCtrl+F"), )
#doBind( editMenu.Append(-1, "&Replace...\tCtrl+R"), )
formatMenu = wx.Menu()
doBind( formatMenu.AppendCheckItem(-1, "&Gras\tCtrl+B"),
self.OnBold, self.OnUpdateBold)
doBind( formatMenu.AppendCheckItem(-1, "&Italic\tCtrl+I"),
self.OnItalic, self.OnUpdateItalic)
doBind( formatMenu.AppendCheckItem(-1, "&Soulign�\tCtrl+U"),
self.OnUnderline, self.OnUpdateUnderline)
formatMenu.AppendSeparator()
doBind( formatMenu.AppendCheckItem(-1, "Aligner � &gauche"),
self.OnAlignLeft, self.OnUpdateAlignLeft)
doBind( formatMenu.AppendCheckItem(-1, "&Centrer"),
self.OnAlignCenter, self.OnUpdateAlignCenter)
doBind( formatMenu.AppendCheckItem(-1, "Aligner � &droite"),
self.OnAlignRight, self.OnUpdateAlignRight)
formatMenu.AppendSeparator()
doBind( formatMenu.Append(-1, "&Indenter"), self.OnIndentMore)
doBind( formatMenu.Append(-1, "&Desindenter"), self.OnIndentLess)
formatMenu.AppendSeparator()
doBind( formatMenu.Append(-1, "&Augmenter l'espace entre paragraphe"), self.OnParagraphSpacingMore)
doBind( formatMenu.Append(-1, "&Diminuer l'espace entre paragraphe"), self.OnParagraphSpacingLess)
formatMenu.AppendSeparator()
doBind( formatMenu.Append(-1, "Interligne &simple"), self.OnLineSpacingSingle)
doBind( formatMenu.Append(-1, "Interligne &x1.5"), self.OnLineSpacingHalf)
doBind( formatMenu.Append(-1, "Interligne &double"), self.OnLineSpacingDouble)
formatMenu.AppendSeparator()
doBind( formatMenu.Append(-1, "&Police..."), self.OnFont)
mb = wx.MenuBar()
mb.Append(fileMenu, "&Fichier")
mb.Append(editMenu, "&Edition")
self.SetMenuBar(mb)
def MakeToolBar(self):
def doBind(item, handler, updateUI=None):
self.Bind(wx.EVT_TOOL, handler, item)
if updateUI is not None:
self.Bind(wx.EVT_UPDATE_UI, updateUI, item)
# Passage momentané en Anglais (bug de wxpython)
# loc = wx.GetApp().locale.GetSystemLanguage()
# wx.GetApp().locale = wx.Locale(wx.LANGUAGE_ENGLISH)
tbar = self.CreateToolBar()
doBind( tbar.AddTool(-1, "", _rt_save.GetBitmap(),
shortHelp="Enregistrer"), self.OnFileSave)
# bmp = wx.ArtProvider.GetBitmap(wx.ART_PRINT).ConvertToImage().Rescale(17,17,wx.IMAGE_QUALITY_HIGH).ConvertToBitmap()
bmp = scaleImage(images.Icone_print.GetBitmap(), 16*self.scale, 16*self.scale)
doBind( tbar.AddTool(-1, "", bmp,
shortHelp="Imprimer le rapport"), self.OnDoPrint)
tbar.AddSeparator()
doBind( tbar.AddTool(wx.ID_UNDO, "", _rt_undo.GetBitmap(),
shortHelp="Annuler"), self.ForwardEvent, self.ForwardEvent)
doBind( tbar.AddTool(wx.ID_REDO, "", _rt_redo.GetBitmap(),
shortHelp="R�tablir"), self.ForwardEvent, self.ForwardEvent)
tbar.AddSeparator()
doBind( tbar.AddTool(wx.ID_CUT, "", _rt_cut.GetBitmap(),
shortHelp="Couper dans le presse-papier"), self.ForwardEvent, self.ForwardEvent)
doBind( tbar.AddTool(wx.ID_COPY, "", _rt_copy.GetBitmap(),
shortHelp="Copier dans le presse-papier"), self.ForwardEvent, self.ForwardEvent)
doBind( tbar.AddTool(wx.ID_PASTE, "", _rt_paste.GetBitmap(),
shortHelp="Coller depuis le presse-papier"), self.ForwardEvent, self.ForwardEvent)
tbar.AddSeparator()
doBind( tbar.AddTool(-1, "", _rt_bold.GetBitmap(), kind = wx.ITEM_CHECK,
shortHelp="Gras"), self.OnBold, self.OnUpdateBold)
doBind( tbar.AddTool(-1, "", _rt_italic.GetBitmap(), kind = wx.ITEM_CHECK,
shortHelp="Italic"), self.OnItalic, self.OnUpdateItalic)
doBind( tbar.AddTool(-1, "", _rt_underline.GetBitmap(), kind = wx.ITEM_CHECK,
shortHelp="Soulign�"), self.OnUnderline, self.OnUpdateUnderline)
tbar.AddSeparator()
doBind( tbar.AddTool(-1, "", _rt_alignleft.GetBitmap(), kind = wx.ITEM_CHECK,
shortHelp="Aligner � gauche"), self.OnAlignLeft, self.OnUpdateAlignLeft)
doBind( tbar.AddTool(-1, "", _rt_centre.GetBitmap(), kind = wx.ITEM_CHECK,
shortHelp="Centrer"), self.OnAlignCenter, self.OnUpdateAlignCenter)
doBind( tbar.AddTool(-1, "", _rt_alignright.GetBitmap(), kind = wx.ITEM_CHECK,
shortHelp="Aligner � droite"), self.OnAlignRight, self.OnUpdateAlignRight)
tbar.AddSeparator()
doBind( tbar.AddTool(-1, "", _rt_indentless.GetBitmap(),
shortHelp="Indenter"), self.OnIndentLess)
doBind( tbar.AddTool(-1, "", _rt_indentmore.GetBitmap(),
shortHelp="Desindenter"), self.OnIndentMore)
tbar.AddSeparator()
doBind( tbar.AddTool(-1, "", _rt_font.GetBitmap(),
shortHelp="Police"), self.OnFont)
doBind( tbar.AddTool(-1, "", _rt_colour.GetBitmap(),
shortHelp="Couleur de police"), self.OnColour)
tbar.AddSeparator()
tbar.AddControl(
wx.ComboBox(
tbar, -1, "Styles", choices = list(Styles.keys()),
size=(150,-1), style=wx.CB_DROPDOWN
))
self.Bind(wx.EVT_COMBOBOX, self.OnApplyStyle)
# wx.GetApp().locale = wx.Locale(loc)
tbar.Realize()
def OnPageSetup(self, evt):
self.rtp.PageSetup()
# psdd = wx.PageSetupDialogData(self.printData)
# psdd.CalculatePaperSizeFromId()
# dlg = wx.PageSetupDialog(self, psdd)
# dlg.ShowModal()
#
# # this makes a copy of the wx.PrintData instead of just saving
# # a reference to the one inside the PrintDialogData that will
# # be destroyed when the dialog is destroyed
# self.printData = wx.PrintData( dlg.GetPageSetupData().GetPrintData() )
#
# dlg.Destroy()
def OnPrintPreview(self, event = None):
self.rtp.PreviewBuffer(self.rtc.GetBuffer())
# data = wx.PrintDialogData(self.printData)
# printout = MyPrintout(self.rtc)
# printout2 = MyPrintout(self.rtc)
# self.preview = wx.PrintPreview(printout, printout2, data)
## rtp = rt.RichTextPrinting()
## rt.RichTextPrinting(self, "test").PreviewBuffer(self.rtc.GetBuffer())
# if not self.preview.Ok():
# print "Erreur Preview"
# return
#
# pfrm = wx.PreviewFrame(self.preview, self, "This is a print preview")
#
# pfrm.Initialize()
# pfrm.SetPosition(self.rtc.GetPosition())
# pfrm.SetSize(self.rtc.GetSize())
# pfrm.Show(True)
def OnDoPrint(self, event = None):
self.rtp.PrintBuffer(self.rtc.GetBuffer())
# pdd = wx.PrintDialogData(self.printData)
# pdd.SetToPage(2)
# printer = wx.Printer(pdd)
# printout = MyPrintout(self.rtc)
#
# if not printer.Print(self.rtc, printout, True):
# wx.MessageBox("There was a problem printing.\nPerhaps your current printer is not set correctly?", "Printing", wx.OK)
# else:
# self.printData = wx.PrintData( printer.GetPrintDialogData().GetPrintData() )
# printout.Destroy()
def OnFileViewHTML(self, evt):
# Get an instance of the html file handler, use it to save the
# document to a StringIO stream, and then display the
# resulting html text in a dialog with a HtmlWindow.
handler = rt.RichTextHTMLHandler()
handler.SetFlags(rt.RICHTEXT_HANDLER_SAVE_IMAGES_TO_MEMORY)
handler.SetFontSizeMapping([7,9,11,12,14,22,100])
import io
stream = io.StringIO()
if not handler.SaveStream(self.rtc.GetBuffer(), stream):
return
print(stream.getvalue())
import wx.html
dlg = wx.Dialog(self, title="HTML", style=wx.DEFAULT_DIALOG_STYLE|wx.RESIZE_BORDER)
html = wx.html.HtmlWindow(dlg, size=(500,400), style=wx.BORDER_SUNKEN)
html.SetPage(stream.getvalue())
btn = wx.Button(dlg, wx.ID_CANCEL)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(html, 1, wx.ALL|wx.EXPAND, 5)
sizer.Add(btn, 0, wx.ALL|wx.CENTER, 10)
dlg.SetSizer(sizer)
sizer.Fit(dlg)
dlg.ShowModal()
handler.DeleteTemporaryImages()
class RapportRTF2(richtext.RichTextPanel):
def __init__(self, parent, objet, toolbar = True):
# print "RapportRTF"
richtext.RichTextPanel.__init__(self, parent, objet, toolbar)
self.Bind(wx.EVT_ENTER_WINDOW, self.OnEnter)
######################################################################################################
def OnEnter(self, event):
self.SetFocus()
event.Skip()
######################################################################################################
def Remplir(self, fichierCourant, doc, typ, eleve = None):
isEditable = self.IsEditable()
self.SetEditable(True)
self.MoveEnd()
self.Clear()
#
# On rempli le rapport
#
phase = ''
if typ == 'prj':
# for e in doc.eleves:
self.AddTitreProjet(eleve, doc.GetProjetRef().attributs["FIC"][0])
for t in doc.OrdonnerListeTaches(eleve.GetTaches(revues = True)):
if t.phase != phase and t.phase != '':
phase = t.phase
self.AddPhase(t, doc.GetTypeEnseignement(simple = True))
self.AddTache(t, revue = t.phase in ["R1", "R2", "R3", "Rev"])
self.eleve = eleve
self.projet = doc
else:
self.AddTitreSeance(doc)
for s in doc.seances:
self.AddSeance(s)
self.AddPieds(fichierCourant)
self.SetEditable(isEditable)
self.ScrollIntoView(0, wx.WXK_HOME)
######################################################################################################
def AddPieds(self, fichierCourant):
self.Newline()
self.BeginFontSize(8)
self.BeginItalic()
self.WriteText(os.path.basename(os.path.splitext(fichierCourant)[0]))
self.EndItalic()
self.EndFontSize()
self.Newline()
self.EndAlignment()
######################################################################################################
def AddTitreProjet(self, eleve, titre):
# print self.GetCaretPosition()
if self.GetCaretPosition() == -1:
Styles["Titre"].SetPageBreak(pageBreak=False)
else:
Styles["Titre"].SetPageBreak(pageBreak=True)
#
parag = self.AddParagraph(titre + "\n")
self.MoveEnd()
self.Newline()
self.BeginBold()
self.BeginFontSize(14)
self.WriteText(eleve.GetNomPrenom())
self.EndFontSize()
self.EndBold()
self.BeginAlignment(wx.TEXT_ALIGNMENT_CENTRE)
self.Newline()
self.SetStyle(parag, Styles["Titre"])
self.EndAlignment()
######################################################################################################
def AddTitreSeance(self, doc):
parag = self.AddParagraph("Détail des séances\n")
self.MoveEnd()
self.Newline()
self.BeginBold()
self.BeginFontSize(14)
self.BeginItalic()
self.WriteText(doc.intitule)
self.EndItalic()
self.EndFontSize()
self.EndBold()
self.BeginAlignment(wx.TEXT_ALIGNMENT_CENTRE)
self.Newline()
self.SetStyle(parag, Styles["Titre"])
self.EndAlignment()
######################################################################################################
def AddPhase(self, tache, typ):
if tache.phase != '':
r,v,b,a = self.fiche.ICoulTache[tache.phase]
else:
r,v,b, a = 1,1,1,1
bgCoul = wx.Colour(r*255,v*255,b*255)
if tache.phase != '':
r,v,b = self.fiche.BCoulTache[tache.phase]
else:
r,v,b, a = 0,0,0,1
fgCoul = wx.Colour(r*255,v*255,b*255)
Styles["Titre 1"].SetBackgroundColour(bgCoul)
Styles["Titre 1"].SetTextColour(fgCoul)
self.BeginStyle(Styles["Titre 1"])
phase = tache.GetProjetRef().phases[tache.phase][1]
self.WriteText(phase)
self.EndStyle()
# self.EndAlignment()
self.Newline()
# self.EndLeftIndent()
# self.EndStyle()
######################################################################################################
def AddTache(self, tache, revue = False):
if tache.phase != '':
r,v,b, a = self.fiche.ICoulTache[tache.phase]
else:
r,v,b, a = 1,1,1,1
bgCoul = wx.Colour(r*255,v*255,b*255)
if tache.phase != '':
r,v,b = self.fiche.BCoulTache[tache.phase]
else:
r,v,b, a = 0,0,0,1
fgCoul = wx.Colour(r*255,v*255,b*255)
if not revue:
Styles["Titre 2"].SetBackgroundColour(bgCoul)
self.BeginStyle(Styles["Titre 2"])
self.WriteText("Tache : " + tache.code+"\t\t\t"+getHoraireTxt(tache.GetDuree()))
self.EndStyle()
self.Newline()
self.EndAlignment()
self.BeginStyle(Styles["Message"])
# self.BeginLeftIndent(60)
self.BeginUnderline()
self.WriteText("Intitulé :")
self.EndUnderline()
self.WriteText(" " + tache.intitule)
self.EndStyle()
# self.Newline()
if tache.description != None:
# self.BeginUnderline()
# self.WriteText(u"Description :")
# self.BeginLeftIndent(60)
# self.EndUnderline()
self.Newline()
rtc = richtext.RichTextPanel(self.parent, tache, toolBar = False)
rtc.Show(False)
self.AddDescription(rtc.rtc)
rtc.Destroy()
self.EndStyle()
## self.BeginLeftIndent(60)
# self.Newline()
# self.EndStyle()
# self.MoveEnd()
# tache.panelPropriete.rtc.rtc.SelectAll()
#
# if sys.platform == "win32":
# #
# # Procédure pour vérifier que le clipboard est disponible
# # (source http://teachthe.net/?cat=56&paged=2)
# #
# cbOpened = False
# n = 0
# while not cbOpened and n < 10:
# n += 1
# try:
# win32clipboard.OpenClipboard(0)
# cbOpened = True
# win32clipboard.CloseClipboard()
# except Exception, err:
## print "error", err
# # If access is denied, that means that the clipboard is in use.
# # Keep trying until it's available.
# if err[0] == 5: #Access Denied
# pass
# print 'waiting on clipboard...'
# # wait on clipboard because something else has it. we're waiting a
# # random amount of time before we try again so we don't collide again
# time.sleep( random.random()/50 )
# elif err[0] == 1418: #doesn't have board open
# pass
# elif err[0] == 0: #open failure
# pass
# else:
# print 'ERROR in Clipboard section of readcomments: %s' %err
# pass
#
# tache.panelPropriete.rtc.rtc.Copy()
# self.Paste()
self.Newline()
# self.EndLeftIndent()
self.EndAlignment()
# self.BeginUnderline()
# self.WriteText(u"Volume horaire :")
# self.EndUnderline()
# self.WriteText(u" " + getHoraireTxt(tache.GetDuree()))
self.EndStyle()
def AddDescription(self, rtc):
""" Ajoute une description contenue dans un RichTextCtrl
"""
# print "AddDescription"
# par = rtc.GetFocusObject()
# par = rtc.GetSelectionAnchorObject()
par = rtc.GetBuffer()
pos = self.GetInsertionPoint()
# print " ", rtc.GetBuffer()
# print " ", pos
self.GetBuffer().InsertParagraphsWithUndo(pos, par, self)
self.MoveEnd()
# self.Newline()
# return
# pos = self.GetLastPosition()
#
# self.
#
#
#
#
#
#
# bufS = cStringIO.StringIO()
# handlerS = rt.RichTextXMLHandler()
# handlerS.SetFlags(rt.RICHTEXT_HANDLER_INCLUDE_STYLESHEET)
# handlerS.SaveStream(rtc.GetBuffer(), bufS)
## print " ", bufS.getvalue()
# domS = parseString(bufS.getvalue())
#
#
# bufT = cStringIO.StringIO()
# handlerT = rt.RichTextXMLHandler()
# handlerT.SetFlags(rt.RICHTEXT_HANDLER_INCLUDE_STYLESHEET)
# handlerT.SaveStream(self.GetBuffer(), bufT)
## print " ", bufT.getvalue()
# domT = parseString(bufT.getvalue())
#
# parS = domS.getElementsByTagName("paragraphlayout")[0]
# parT = domT.getElementsByTagName("paragraphlayout")[0]
#
# for c in parS.childNodes:
## print ">>>> ", c.toxml()
# parT.appendChild(domT.importNode(c, True))
## print " T : ", parT.toxml()
#
# print "resultat :"
## print domT.toxml()
# bufT = cStringIO.StringIO()
# bufT.write(domT.toxml())
# bufT.seek(0)
#
# try:
# for line in bufT:
# print line,
# finally:
# pass
#
# bufT.seek(0)
## print " >>", bufT.getvalue()
#
## rt_buffer = self.GetBuffer()
## rt_buffer.AddHandler(handlerT)
#
## handlerT.LoadStream(self.GetBuffer(), bufT)
#
# # add the handler (where you create the control)
# self.GetBuffer().AddHandler(rt.RichTextXMLHandler())
#
# # you have to specify the type of data to load and the control
# # must already have an instance of the handler to parse it
# self.GetBuffer().LoadStream(bufT, rt.RICHTEXT_TYPE_XML)
# bufT.close()
#
## self.MoveToParagraphEnd()
# self.MoveEnd()
# self.Newline()
# self.MoveEnd()
#
# self.EndStyle()
# self.EndLeftIndent()
# self.EndAlignment()
# self.Newline()
# self.Refresh()
######################################################################################################
def AddSeance(self, seance, indent = 1):
# print "Add", seance
if seance.typeSeance == '':
return
r,v,b = self.fiche.ICoulSeance[seance.typeSeance]
bgCoul = wx.Colour(r*255,v*255,b*255)
# self.Newline()
# if not isinstance(fgCoul, wx.Colour):
# fgCoul = wx.NamedColour(fgCoul)
# self.BeginTextColour(fgCoul)
if not isinstance(bgCoul, wx.Colour):
bgCoul = wx.NamedColour(bgCoul)
Styles["Titre 1"].SetBackgroundColour(bgCoul)
self.BeginStyle(Styles["Titre 1"])
self.WriteText(seance.GetReferentiel().seances[seance.typeSeance][0] + " : " + seance.code+"\t\t\t"+getHoraireTxt(seance.GetDuree()))
self.EndStyle()
# self.BeginLeftIndent(60*(indent-1))
self.Newline()
self.EndLeftIndent()
self.BeginStyle(Styles["Message"])
self.BeginUnderline()
self.WriteText("Intitulé :")
self.EndUnderline()
self.WriteText(" " + seance.intitule)
# self.EndStyle()
# self.BeginLeftIndent(60*indent)
self.Newline()
self.EndLeftIndent()
if seance.description != None:
self.Newline()
rtc = richtext.RichTextPanel(self.parent, seance, toolBar = False)
rtc.Show(False)
self.AddDescription(rtc.rtc)
rtc.Destroy()
self.EndStyle()
# if seance.description != None and hasattr(seance, 'panelPropriete'):
# # self.BeginUnderline()
# # self.WriteText(u"Description :")
# # self.EndUnderline()
# # self.Newline()
# # self.BeginLeftIndent(60*indent)
# seance.panelPropriete.rtc.rtc.SelectAll()
# seance.panelPropriete.rtc.rtc.Copy()
# self.Paste()
#
# self.Newline()
# self.EndLeftIndent()
self.EndStyle()
if seance.typeSeance in ["R", "S"]:
for sseance in seance.seances:
self.AddSeance(sseance, indent + 1)
self.Newline()
# ######################################################################################################
# # Analyse
# ######################################################################################################
# def AddTitreAnImmob(self):
# self.AddParagraphStyled(u"Structure du Montage :", "Titre 1")
#
# def AddAnImmob(self, analyse, zoneMtg):
# self.AddParagraphStyled(u"Mise en position axiale :", "Titre 2")
#
# # Message principal
# self.AddParagraphStyled(analyse.messageImmobilisation.mess, "Message", analyse.messageImmobilisation.coul)
# self.AppendText("\n")
#
# # Message par sens
# for s in [1,0]: # diff�rents sens ...
# self.BeginStyle(Styles["MessSens"])
# self.BeginTextColour(Couleur[analyse.resultatImmobilisation[s][0].coul])
# mess = self.AppendText(analyse.resultatImmobilisation[s][0].mess)
# if s == 1: self.WriteText("\t")
# self.AppendText("\n")
#
# # Image par sens
# for s in [1,0]: # diff�rents sens ...
# if analyse.resultatImmobilisation[s][0].clef == 'ArretArbreSens':
# img = self.GetImageArret(s, analyse, zoneMtg)
# elif analyse.resultatImmobilisation[s][0].clef == 'ImmobCorrect':
# img = self.GetImageChaine(s, analyse, zoneMtg)
# self.WriteImage(img)
# self.WriteText("\t")
# self.AppendText("\n")
#
# def AddAnStruc(self, analyse, zoneMtg):
# titre = self.AddParagraph(u"Sch�ma de Structure :")
# self.SetStyle(titre, Styles["Titre 2"])
# img = analyse.schemaStructure.bitmap().ConvertToImage()
# self.AddImage(img)
# self.AppendText("\n")
#
# ######################################################################################################
# def AddTitreAnCharg(self):
# self.AddParagraphStyled(u"R�sistance aux charges :", "Titre 1")
#
# def AddAnResistMtg(self, analyse, zoneMtg):
# self.AddParagraphStyled(u"R�sistance axiale du montage :", "Titre 2")
#
# # Message principal
# self.AddParagraphStyled(analyse.messageResistanceAxiale.mess, "Message", analyse.messageResistanceAxiale.coul)
# self.AppendText("\n")
#
# # Message par sens
# for s in [1,0]: # diff�rents sens ...
# self.BeginStyle(Styles["MessSens"])
# self.BeginTextColour(Couleur[analyse.resultatEffortAxialMtg[s][0].coul])
# mess = self.AppendText(analyse.resultatEffortAxialMtg[s][0].mess)
# if s == 1: self.WriteText("\t")
# self.AppendText("\n")
#
# # Image par sens
# for s in [1,0]: # diff�rents sens ...
# if analyse.resultatEffortAxialMtg[s][0].clef == 'ElemResistPas':
# img = self.GetImageChaineSurbrill(s, analyse, zoneMtg)
# elif analyse.resultatEffortAxialMtg[s][0].clef == 'ChargeAxOk':
# img = self.GetImageChaine(s, analyse, zoneMtg)
# elif analyse.resultatEffortAxialMtg[s][0].clef == 'ArretArbreSens':
# img = self.GetImageArret(s, analyse, zoneMtg)
# self.WriteImage(img)
# if s == 1: self.WriteText("\t")
# self.AppendText("\n")
#
#
# def AddAnResistRlt(self, analyse, zoneMtg, panelResist):
# self.AddParagraphStyled(u"R�sistance des roulements :", "Titre 2")
#
# # Message principal
# self.AddParagraphStyled(analyse.messageResistanceAxiale.mess, "Message", analyse.messageResistanceAxiale.coul)
# self.AppendText("\n")
#
# # Sch�ma de structure
# img = analyse.imageSchemaCharges.ConvertToImage()
# self.AddImage(img)
#
# # Tableau
# self.AddGrid(panelResist.tableResist)
#
# self.AppendText("\n")
#
#
# ######################################################################################################
# def AddTitreAnMontab(self, analyse):
# self.AddParagraphStyled(u"Montabilit� :", "Titre 1")
#
# self.AddParagraphStyled(analyse.resultatMontabilite.mess, "Message", analyse.resultatMontabilite.coul)
#
# def AddAnMontabEns(self, analyse, zoneMtg):
# if analyse.cdcf.bagueTournante == "I": ens = u"""arbre"""
# else: ens = u"""al�sage"""
# self.AddParagraphStyled(u"Montabilit� de l'ensemble "+ens+" :", "Titre 2")
# self.AppendText("")
#
# # Images pour "Montabilit�"
# imagMontabiliteEns = self.GetImagesDemontageEns(analyse, zoneMtg)
# for img in imagMontabiliteEns:
# self.WriteImage(img)
# self.WriteText("\t")
#
# def AddAnMontabRlt(self, analyse, zoneMtg):
# self.AddParagraphStyled(u"Montabilit� des Roulements :", "Titre 2")
# self.AppendText("")
#
# # Images pour "Montabilit�"
# imagMontabiliteRlt = self.GetImagesDemontageRlt(analyse, zoneMtg)
# for img in imagMontabiliteRlt:
# self.WriteImage(img)
# self.WriteText("\t")
#
#
# ######################################################################################################
# def AddAnEtanch(self, analyse, panelEtanch, CdCF):
# self.AddParagraphStyled(u"Etanch�it� :", "Titre 1")
#
# #
# # Etanch�it� statique
# #
# self.AddParagraphStyled(u"Etanch�it� Statique :", "Titre 2")
#
# # CdCF
# self.AddCdCFEtanchStat(CdCF)
#
# # Resultat principal
# message = analyse.resultatEtancheite["SB"]
# self.AddParagraphStyled(message.mess, "Message", message.coul)
#
# # D�tails
# if "SB+" in analyse.resultatEtancheite.keys():
# for mess in analyse.resultatEtancheite["SB+"]:
# self.AddParagraphStyled(mess.mess, "MessSens", mess.coul)
#
# self.AppendText("\n")
# self.AddGrid(panelEtanch.tableStat)
#
# #
# # Etanch�it� Dynamique
# #
# if "DB" in analyse.resultatEtancheite:
# self.AddParagraphStyled(u"Etanch�it� Dynamique :", "Titre 2")
#
# # CdCF
# self.AddCdCFEtanchDyn(CdCF)
#
# mess = analyse.resultatEtancheite["DB"]
# self.AddParagraphStyled(mess.mess, "Message", mess.coul)
#
# if "DB+" in analyse.resultatEtancheite.keys():
# for mess in analyse.resultatEtancheite["DB+"]:
# self.AddParagraphStyled(mess.mess, "MessSens", mess.coul)
#
# self.AppendText("\n")
# self.AddGrid(panelEtanch.tableDyn)
#
# #
# # Compatibilit� lubrifiant
# #
# self.AddParagraphStyled(u"Compatibilit� lubrifiant :", "Titre 2")
#
# # CdCF
# self.AddCdCFEtanchLub(CdCF)
#
# mess = analyse.resultatEtancheite["C"]
# self.AddParagraphStyled(mess.mess, "Message", mess.coul)
#
# self.AppendText("\n")
#
#
#
# ######################################################################################################
# def AddAnCout(self, analyse, panelDevis, CdCF):
# self.AddParagraphStyled(u"Devis (co�t indicatif) :", "Titre 1")
#
# # CdCF
# self.AddCdCFCoutMax(CdCF)
#
# # Devis
# self.AppendText("\n")
# self.AddGrid(panelDevis.devis)
#
#
# def AddGrid(self, grid):
#
# debut = self.GetInsertionPoint()
#
# def SsRc(s):
# return s.replace("\n", " ")
#
# # Définition des tabs
# coef = 5
# tabs = [max(coef*grid.GetRowLabelSize(), 30)]
# for c in range(grid.GetNumberCols()):
# tabs.append(tabs[-1:][0]+coef*grid.GetColSize(c))
# Styles["Tableau"].SetTabs(tabs)
#
# # Affichage du contenu
# for l in range(1+grid.GetNumberRows()):
# ll = l-1
# for c in range(1+grid.GetNumberCols()):
# # Titres colonnes
# cc = c-1
# if l == 0 and c > 0:
# self.BeginTextColour(wx.BLACK)
# self.AppendText(SsRc(grid.GetColLabelValue(cc)))
#
# # Titres lignes
# elif c == 0 and l > 0:
# self.BeginTextColour(wx.BLACK)
# self.AppendText(SsRc(grid.GetRowLabelValue(ll)))
#
# elif c == 0 and l == 0:
# pass
#
# # Valeurs
# else:
# self.BeginTextColour(grid.GetCellTextColour(ll,cc))
# self.AppendText(SsRc(grid.GetCellValue(ll,cc)))
#
# self.AppendText("\t")
# self.AppendText("\n")
#
# fin = self.GetInsertionPoint()
# tout = self.GetRange(debut, fin)
#
# self.SetStyle((debut, fin), Styles["Tableau"])
#
# self.EndTextColour()
def AddParagraphStyled(self, texte, style, couleur = None, bgCoul = "WHITE", souligne = False):
# if style == "MessSens":
# print Styles[style].GetTextColour(), texte.encode('cp437','replace')
if couleur is not None:
if isinstance(couleur, wx.Colour):
c = couleur
else:
c = wx.Colour(0,0,0)#Couleur[couleur]
# cs = Styles[style].GetTextColour()
# Styles[style].SetTextColour(c)
self.BeginTextColour(c)
if not isinstance(bgCoul, wx.Colour):
bgCoul = wx.NamedColour(bgCoul)
if souligne:
self.BeginUnderline()
# Styles[style].SetFlags(wx.TEXT_ATTR_BACKGROUND_COLOUR)
Styles[style].SetBackgroundColour(bgCoul)
parag = self.AddParagraph(texte)
self.SetStyle(parag, Styles[style])
self.EndTextColour()
self.EndUnderline()
self.EndParagraphStyle()
def AddTextStyled(self, texte, style,
fgCoul = "BLACK", bgCoul = "WHITE", souligne = False):
# if style == "MessSens":
# print Styles[style].GetTextColour(), texte.encode('cp437','replace')
if not isinstance(fgCoul, wx.Colour):
fgCoul = wx.NamedColour(fgCoul)
self.BeginTextColour(fgCoul)
if not isinstance(bgCoul, wx.Colour):
bgCoul = wx.NamedColour(bgCoul)
Styles[style].SetBackgroundColour(bgCoul)
if souligne:
self.BeginUnderline()
self.BeginStyle(Styles[style])
self.AppendText(texte)
self.EndStyle()
self.EndUnderline()
self.EndTextColour()
# if couleur is not None:
# Styles[style].SetTextColour(cs)
######################################################################################################
def getNomFichierDefaut(self):
f = "Tâches détaillées _ " + self.eleve.GetNomPrenom() + ".rtf"
return os.path.join(self.projet.GetPath(), f)
######################################################################################################
def Enregistrer(self, titre, nomFichierDefaut = ""):
if not self.GetFilename():
if self.Ecraser(nomFichierDefaut):
self.SetFilename(nomFichierDefaut)
self.SaveFile()
else:
return
else:
self.SaveFile()
if nomFichierDefaut:
ext = os.path.splitext(nomFichierDefaut)[1].lstrip('.')
if ext == 'txt':
wildcard, types = rt.RichTextBuffer.GetExtWildcard(save=True)
fileType = 1
ext = rt.RichTextBuffer.FindHandlerByType(fileType).GetExtension()
if not nomFichierDefaut.endswith(ext):
nomFichierDefaut += '.' + ext
self.SaveFile(nomFichierDefaut, 1)
elif ext == 'html':
handler = rt.RichTextHTMLHandler()
handler.SetFlags(rt.RICHTEXT_HANDLER_SAVE_IMAGES_TO_MEMORY)
handler.SetFontSizeMapping([7,9,11,12,14,22,100])
stream = io.StringIO()
if handler.SaveStream(self.GetBuffer(), stream):
f = open(nomFichierDefaut, 'w')
f.write(prefixeHTML+stream.getvalue())#.encode(sys.getdefaultencoding()))
f.close()
elif ext == 'rtf':
# Use the custom RTF Handler
handler = PyRTFParser.PyRichTextRTFHandler()
# Save the file with the custom RTF Handler.
# The custom RTF Handler can take either a wxRichTextCtrl or a wxRichTextBuffer argument.
handler.SaveFile(self.GetBuffer(), nomFichierDefaut)
dlg = wx.MessageDialog(self, "Le fichier a bien été enregistré\n\n%s\n\n"\
"Voulez-vous l'ouvrir ?" %self.GetFilename(),
"Fichier enregistré",
wx.ICON_INFORMATION | wx.YES_NO | wx.CANCEL)
res = dlg.ShowModal()
if res == wx.ID_YES:
try:
os.startfile(self.GetFilename())
except:
messageErreur(None, "Ouverture impossible",
"Impossible d'ouvrir le fichier\n\n%s\n" %toSystemEncoding(self.GetFilename()))
dlg.Destroy()
######################################################################################################
def Ecraser(self, nomFichier):
if os.path.exists(nomFichier):
dlg = wx.MessageDialog(self, "Le fichier existe déja !\n\n%s\n\n"\
"Voulez-vous l'écraser ?" %nomFichier,
"Fichier existant",
wx.ICON_WARNING | wx.YES_NO | wx.CANCEL)
res = dlg.ShowModal()
dlg.Destroy()
return res == wx.ID_YES
return True
######################################################################################################
def EnregistrerSous(self, titre, nomFichierDefaut = ""):
wildcard = "Rich Text Format (.rtf)|*.rtf|" \
"Format HTML (.html)|*.html|" \
"Fichier texte (.txt)|*.txt"
types = [0, 3, 2]
dlg = wx.FileDialog(self, titre,
wildcard=wildcard,
defaultFile = nomFichierDefaut,
style=wx.FD_SAVE)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
self.Enregistrer(titre, path)
dlg.Destroy()
class RapportRTF(rt.RichTextCtrl):
def __init__(self, parent, style = 0):
# print "RapportRTF"
rt.RichTextCtrl.__init__(self, parent, style = rt.RE_MULTILINE | wx.WANTS_CHARS |style)
self.style = style
self.parent = parent
self.Bind(wx.EVT_ENTER_WINDOW, self.OnEnter)
self.Bind(wx.EVT_KEY_DOWN, self.OnKey)
# p = draw_cairo_prj.Projet()
# ICoulTache = p.ICoulTache
# ICoulTache = p.ICoulTache
######################################################################################################
def OnKey(self, event = None):
wx.GetApp().GetTopWindow().OnKey(event)
######################################################################################################
def OnEnter(self, event):
self.SetFocus()
event.Skip()
######################################################################################################
def Remplir(self, fichierCourant, doc, typ, eleve = None):
isEditable = self.IsEditable()
self.SetEditable(True)
self.MoveEnd()
self.Clear()
#
# On rempli le rapport
#
phase = ''
if typ == 'prj':
self.eleve = eleve
self.projet = doc
self.fiche = self.projet.GetApp().fiche.fiche
# for e in doc.eleves:
self.AddTitreProjet(eleve, doc.GetProjetRef().attributs["FIC"][0])
for t in doc.OrdonnerListeTaches(eleve.GetTaches(revues = True)):
if t.phase != phase and t.phase != '':
phase = t.phase
self.AddPhase(t, doc.GetTypeEnseignement(simple = True))
self.AddTache(t, eleve, revue = t.phase in ["R1", "R2", "R3", "Rev"])
else:
self.doc = doc
self.fiche = self.doc.GetApp().fiche.fiche
self.AddTitreSeance(doc)
for s in doc.seances:
self.AddSeance(s)
self.AddPieds(fichierCourant)
self.SetEditable(isEditable)
self.ScrollIntoView(0, wx.WXK_HOME)
######################################################################################################
def AddPieds(self, fichierCourant):
self.Newline()
self.BeginFontSize(8)
self.BeginItalic()
self.WriteText(os.path.basename(os.path.splitext(fichierCourant)[0]))
self.EndItalic()
self.EndFontSize()
self.Newline()
self.EndAlignment()
######################################################################################################
def AddTitreProjet(self, eleve, titre):
# print self.GetCaretPosition()
if self.GetCaretPosition() == -1:
Styles["Titre"].SetPageBreak(pageBreak=False)
else:
Styles["Titre"].SetPageBreak(pageBreak=True)
#
parag = self.AddParagraph(titre + "\n")
self.MoveEnd()
self.Newline()
self.BeginBold()
self.BeginFontSize(14)
self.WriteText(eleve.GetNomPrenom())
self.EndFontSize()
self.EndBold()
self.BeginAlignment(wx.TEXT_ALIGNMENT_CENTRE)
self.Newline()
self.SetStyle(parag, Styles["Titre"])
self.EndAlignment()
######################################################################################################
def AddTitreSeance(self, doc):
parag = self.AddParagraph("Détail des séances\n")
self.MoveEnd()
self.Newline()
self.BeginBold()
self.BeginFontSize(14)
self.BeginItalic()
self.WriteText(doc.intitule)
self.EndItalic()
self.EndFontSize()
self.EndBold()
self.BeginAlignment(wx.TEXT_ALIGNMENT_CENTRE)
self.Newline()
self.SetStyle(parag, Styles["Titre"])
self.EndAlignment()
######################################################################################################
def AddPhase(self, tache, typ):
if tache.phase != '':
r,v,b,a = self.projet.GetApp().fiche.fiche.ICoulTache[tache.phase]
else:
r,v,b, a = 1,1,1,1
bgCoul = wx.Colour(r*255,v*255,b*255)
if tache.phase != '':
r,v,b = self.fiche.BCoulTache[tache.phase]
else:
r,v,b, a = 0,0,0,1
fgCoul = wx.Colour(r*255,v*255,b*255)
Styles["Titre 1"].SetBackgroundColour(bgCoul)
Styles["Titre 1"].SetTextColour(fgCoul)
self.BeginStyle(Styles["Titre 1"])
phase = tache.GetProjetRef().phases[tache.phase][1]
self.WriteText(phase)
self.EndStyle()
# self.EndAlignment()
self.Newline()
# self.EndLeftIndent()
# self.EndStyle()
######################################################################################################
def AddTache(self, tache, eleve, revue = False):
if tache.phase != '':
r,v,b, a = self.fiche.ICoulTache[tache.phase]
else:
r,v,b, a = 1,1,1,1
bgCoul = wx.Colour(r*255,v*255,b*255)
if tache.phase != '':
r,v,b = self.fiche.BCoulTache[tache.phase]
else:
r,v,b, a = 0,0,0,1
fgCoul = wx.Colour(r*255,v*255,b*255)
if not revue:
Styles["Titre 2"].SetBackgroundColour(bgCoul)
self.BeginStyle(Styles["Titre 2"])
# self.WriteText("Tache : " + tache.code+"\t\t\t"+getHoraireTxt(tache.GetDuree()))
# print(tache.impEleves, eleve.id)
self.WriteText("Tache : " + tache.code+"\t\t\t"+getHoraireTxt(tache.GetDuree()*tache.impEleves[tache.eleves.index(eleve.id)]*0.01))
self.EndStyle()
self.Newline()
self.EndAlignment()
self.BeginStyle(Styles["Message"])
# self.BeginLeftIndent(60)
self.BeginUnderline()
self.WriteText("Intitulé :")
self.EndUnderline()
self.WriteText(" " + tache.intitule)
self.EndStyle()
# self.Newline()
if tache.description != None:
# self.BeginUnderline()
# self.WriteText(u"Description :")
# self.BeginLeftIndent(60)
# self.EndUnderline()
self.Newline()
rtc = richtext.RichTextPanel(self.parent, tache, toolBar = False)
rtc.Show(False)
self.AddDescription(rtc.rtc)
rtc.Destroy()
self.EndStyle()
## self.BeginLeftIndent(60)
# self.Newline()
# self.EndStyle()
# self.MoveEnd()
# tache.panelPropriete.rtc.rtc.SelectAll()
#
# if sys.platform == "win32":
# #
# # Procédure pour vérifier que le clipboard est disponible
# # (source http://teachthe.net/?cat=56&paged=2)
# #
# cbOpened = False
# n = 0
# while not cbOpened and n < 10:
# n += 1
# try:
# win32clipboard.OpenClipboard(0)
# cbOpened = True
# win32clipboard.CloseClipboard()
# except Exception, err:
## print "error", err
# # If access is denied, that means that the clipboard is in use.
# # Keep trying until it's available.
# if err[0] == 5: #Access Denied
# pass
# print 'waiting on clipboard...'
# # wait on clipboard because something else has it. we're waiting a
# # random amount of time before we try again so we don't collide again
# time.sleep( random.random()/50 )
# elif err[0] == 1418: #doesn't have board open
# pass
# elif err[0] == 0: #open failure
# pass
# else:
# print 'ERROR in Clipboard section of readcomments: %s' %err
# pass
#
# tache.panelPropriete.rtc.rtc.Copy()
# self.Paste()
self.Newline()
# self.EndLeftIndent()
self.EndAlignment()
# self.BeginUnderline()
# self.WriteText(u"Volume horaire :")
# self.EndUnderline()
# self.WriteText(u" " + getHoraireTxt(tache.GetDuree()))
self.EndStyle()
def AddDescription(self, rtc):
""" Ajoute une description contenue dans un RichTextCtrl
"""
# print "AddDescription"
# par = rtc.GetFocusObject()
# par = rtc.GetSelectionAnchorObject()
par = rtc.GetBuffer()
pos = self.GetInsertionPoint()
# print " ", rtc.GetBuffer()
# print " ", pos
self.GetBuffer().InsertParagraphsWithUndo(pos, par, self)
self.MoveEnd()
######################################################################################################
def AddSeance(self, seance, indent = 1):
# print "Add", seance
if seance.typeSeance == '':
return
r,v,b = self.fiche.p_Icol_Sea_[seance.typeSeance]
bgCoul = wx.Colour(r*255,v*255,b*255)
# self.Newline()
# if not isinstance(fgCoul, wx.Colour):
# fgCoul = wx.NamedColour(fgCoul)
# self.BeginTextColour(fgCoul)
if not isinstance(bgCoul, wx.Colour):
bgCoul = wx.NamedColour(bgCoul)
Styles["Titre 1"].SetBackgroundColour(bgCoul)
self.BeginStyle(Styles["Titre 1"])
self.WriteText(seance.GetReferentiel().seances[seance.typeSeance][0] + " : " + seance.code+"\t\t\t"+getHoraireTxt(seance.GetDuree()))
self.EndStyle()
# self.BeginLeftIndent(60*(indent-1))
self.Newline()
self.EndLeftIndent()
self.BeginStyle(Styles["Message"])
self.BeginUnderline()
self.WriteText("Intitulé :")
self.EndUnderline()
self.WriteText(" " + seance.intitule)
# self.EndStyle()
# self.BeginLeftIndent(60*indent)
self.Newline()
self.EndLeftIndent()
if seance.description != None:
self.Newline()
rtc = richtext.RichTextPanel(self.parent, seance, toolBar = False)
rtc.Show(False)
self.AddDescription(rtc.rtc)
rtc.Destroy()
self.EndStyle()
# if seance.description != None and hasattr(seance, 'panelPropriete'):
# # self.BeginUnderline()
# # self.WriteText(u"Description :")
# # self.EndUnderline()
# # self.Newline()
# # self.BeginLeftIndent(60*indent)
# seance.panelPropriete.rtc.rtc.SelectAll()
# seance.panelPropriete.rtc.rtc.Copy()
# self.Paste()
#
# self.Newline()
# self.EndLeftIndent()
self.EndStyle()
if seance.typeSeance in ["R", "S"]:
for sseance in seance.seances:
self.AddSeance(sseance, indent + 1)
self.Newline()
def AddParagraphStyled(self, texte, style, couleur = None, bgCoul = "WHITE", souligne = False):
# if style == "MessSens":
# print Styles[style].GetTextColour(), texte.encode('cp437','replace')
if couleur is not None:
if isinstance(couleur, wx.Colour):
c = couleur
else:
c = wx.Colour(0,0,0)#Couleur[couleur]
# cs = Styles[style].GetTextColour()
# Styles[style].SetTextColour(c)
self.BeginTextColour(c)
if not isinstance(bgCoul, wx.Colour):
bgCoul = wx.NamedColour(bgCoul)
if souligne:
self.BeginUnderline()
# Styles[style].SetFlags(wx.TEXT_ATTR_BACKGROUND_COLOUR)
Styles[style].SetBackgroundColour(bgCoul)
parag = self.AddParagraph(texte)
self.SetStyle(parag, Styles[style])
self.EndTextColour()
self.EndUnderline()
self.EndParagraphStyle()
def AddTextStyled(self, texte, style,
fgCoul = "BLACK", bgCoul = "WHITE", souligne = False):
# if style == "MessSens":
# print Styles[style].GetTextColour(), texte.encode('cp437','replace')
if not isinstance(fgCoul, wx.Colour):
fgCoul = wx.NamedColour(fgCoul)
self.BeginTextColour(fgCoul)
if not isinstance(bgCoul, wx.Colour):
bgCoul = wx.NamedColour(bgCoul)
Styles[style].SetBackgroundColour(bgCoul)
if souligne:
self.BeginUnderline()
self.BeginStyle(Styles[style])
self.AppendText(texte)
self.EndStyle()
self.EndUnderline()
self.EndTextColour()
# if couleur is not None:
# Styles[style].SetTextColour(cs)
######################################################################################################
def getNomFichierDefaut(self):
if hasattr(self, 'eleve'): # type fiche détaillée de Projet
return self.eleve.GetNomDetails()
else:
return os.path.splitext(self.doc.GetPath())[0] + ".rtf"
# f = "Tâches détaillées _ " + self.eleve.GetNomPrenom() + ".rtf"
# return os.path.join(self.projet.GetPath(), f)
######################################################################################################
def Enregistrer(self, titre, nomFichierDefaut = "", dialog = True):
if nomFichierDefaut == "":
nomFichierDefaut = self.getNomFichierDefaut()
# print("Enregistrer", nomFichierDefaut)
if not self.GetFilename():
if self.Ecraser(nomFichierDefaut):
self.SetFilename(nomFichierDefaut)
# self.SaveFile()
else:
return
# else:
# self.SaveFile()
ext = os.path.splitext(nomFichierDefaut)[1].lstrip('.')
if ext == 'txt':
wildcard, types = rt.RichTextBuffer.GetExtWildcard(save=True)
fileType = 1
ext = rt.RichTextBuffer.FindHandlerByType(fileType).GetExtension()
if not nomFichierDefaut.endswith(ext):
nomFichierDefaut += '.' + ext
self.SaveFile(nomFichierDefaut, 1)
elif ext == 'html':
handler = rt.RichTextHTMLHandler()
handler.SetFlags(rt.RICHTEXT_HANDLER_SAVE_IMAGES_TO_MEMORY)
handler.SetFontSizeMapping([7,9,11,12,14,22,100])
handler.SaveFile(self.GetBuffer(), nomFichierDefaut)
# stream = io.StringIO()
# if handler.SaveFile(self.GetBuffer(), stream):
# f = open(nomFichierDefaut, 'w')
# f.write(prefixeHTML+stream.getvalue())#.encode(sys.getdefaultencoding()))
# f.close()
elif ext == 'rtf':
# Use the custom RTF Handler
handler = PyRTFParser.PyRichTextRTFHandler()
# Save the file with the custom RTF Handler.
# The custom RTF Handler can take either a wxRichTextCtrl or a wxRichTextBuffer argument.
print(nomFichierDefaut)
handler.SaveFile(self.GetBuffer(), nomFichierDefaut)
if dialog:
dlg = wx.MessageDialog(self, "Le fichier a bien été enregistré\n\n%s\n\n"\
"Voulez-vous l'ouvrir ?" %self.GetFilename(),
"Fichier enregistré",
wx.ICON_INFORMATION | wx.YES_NO | wx.CANCEL)
res = dlg.ShowModal()
if res == wx.ID_YES:
try:
os.startfile(self.GetFilename())
except:
messageErreur(None, "Ouverture impossible",
"Impossible d'ouvrir le fichier\n\n%s\n" %toSystemEncoding(self.GetFilename()))
dlg.Destroy()
######################################################################################################
def Ecraser(self, nomFichier):
if os.path.exists(nomFichier):
dlg = wx.MessageDialog(self, "Le fichier existe déja !\n\n%s\n\n"\
"Voulez-vous l'écraser ?" %nomFichier,
"Fichier existant",
wx.ICON_WARNING | wx.YES_NO | wx.CANCEL)
res = dlg.ShowModal()
dlg.Destroy()
return res == wx.ID_YES
return True
######################################################################################################
def EnregistrerSous(self, titre, nomFichierDefaut = ""):
wildcard = "Rich Text Format (.rtf)|*.rtf|" \
"Format HTML (.html)|*.html|" \
"Fichier texte (.txt)|*.txt"
types = [0, 3, 2]
dlg = wx.FileDialog(self, titre,
wildcard=wildcard,
defaultFile = nomFichierDefaut,
style=wx.FD_SAVE)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
self.Enregistrer(titre, path)
dlg.Destroy()
class RTPrinting(rt.RichTextPrinting):
def __init__(self, parent):
rt.RichTextPrinting.__init__(self, "", parent)
self.SetTitle("Tâches")
printData = wx.PrintData()
printData.SetPaperId(wx.PAPER_A4)
printData.SetPrintMode(wx.PRINT_MODE_PRINTER)
printData.SetQuality(wx.PRINT_QUALITY_HIGH)
self.SetPrintData(printData)
pageSetupData = self.GetPageSetupData()
pageSetupData.SetMarginBottomRight(wx.Point(10,10))
pageSetupData.SetMarginTopLeft(wx.Point(10,10))
prefixeHTML = """<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
"http://www.w3.org/TR/html4/loose.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
"""
#----------------------------------------------------------------------
_rt_save = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABAAAAAPCAYAAADtc08vAAAABHNCSVQICAgIfAhkiAAAAQ1J"
"REFUKJFjjE/L/c9AJlg0ZxojCwMDA8Oee78YvOzNGGbVJBHUFFc7hWHfiSsMLkpsDAwMDAws"
"DAwMDPE+rgyvP39kYGBgYNi7bz9Ozc5Ojgww9U+vHUQYgE0RsQDDAGJcgNcAsl0gysvPEFc7"
"haAGWRFJ3C5AlyTJBTCw7fxVvBq8DLVxG7Dt/FWG0kBLOF+In5eBn5eHgYeXl4Gfl49BTlKQ"
"wTChCcUQDBcwMDAwlE1Zy6CppsrAwMDA0JTkjtdFTHhlGRgYfv3+x8D89wfD7z9/yDOA+d93"
"hq9/WBh+/f2LVR7DC3KifAwrGhMZhKXkGTQVJAiZz8DIyMTMEJeSRXKOXDRnGiMDAwMDALeo"
"P7cp9rvcAAAAAElFTkSuQmCC")
#----------------------------------------------------------------------
_rt_undo = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABAAAAAPCAYAAADtc08vAAAABHNCSVQICAgIfAhkiAAAAhVJ"
"REFUKJGNkstrE1EYxX8zmcSZZDJp2rSNfSg22CANYhYijWjAjcviwkVxW2hBVyZ/gZu6aOtK"
"aLC7dicqwcdGiIrUoCIhpUVDsPZhq4GENqE2aUu5LuqkLxv94Fvce885995zPkmSLRxVffce"
"ikQ6W123N7i41XOR65fPSeaeFH3wTAz390h7ib2D4+J9ZhGXajskWqxscq27C5MjP0nOEInF"
"hQkIDgyJpeUCvjoVjyrjtCoAOK0KHlXGV6eSSGUZefxaACgu1cbH6W/0Do6LL/M5WjQNpyqz"
"tb3NbKnClaCPwMlmpudzJFJZ/G4Hhm2b+OQMAApAp8fOykoRv9uBrlpYq+yQU6NRKbXn+ZFY"
"XCzNLeN22Jj9UdoV0FU7umoHYK2yTmblF6nR6D5fAFobXRR/5tBVO07r+o6A06pgGM59QMOx"
"9ddU4pMzhDu8ICtAHgAZwDhmrXZbYz3hDi/BgSFxUMBjkzA0jbXNMucDp3YEJJsVQ9cwdI1S"
"uczCaoFsLl+N0ySHI/fF1eAZDF3j00KhGqOyWCgy8TZNa0sDXSeauNTuqw6KaWD37Zi4caGT"
"ekPnXeYrp9uaePPnTKo1iSb5ZjjA8WY333N5JpKfeXm3f9dgSbYc2aHomHj6Ki2mMnPiUWJK"
"hKJj4iBGrnV7yO/lrL+dfHGD4RcfSI70H4q25hdME0vlDZ7f6TtE/i8P/lW/AfYJsF99ZciZ"
"AAAAAElFTkSuQmCC")
#----------------------------------------------------------------------
_rt_redo = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABAAAAAPCAYAAADtc08vAAAABHNCSVQICAgIfAhkiAAAAg5J"
"REFUKJGdkr9rU1EcxT/3vbz2vfS924qRpmopVmIsFCWDiFkCAXHs5CDoJqSrJIP+BS5tXCw0"
"EHDo4FBUguDgULVQImJJLe0Qu2hqWyKNMT9q0p/XofRHmtrBA9/l3HPPPffcK4Sms4fxyRn1"
"NDXFYqG0z4Wv+kg+uC34B8SeQTSRUq/S87SbLU2iUn2D6/5unj+612AUTaSUEJpO/OV7Nfb2"
"Mx5TA2B9W6OyuYVjuGjVdxq4zGhMHD5QCE0nFB1RHl1h6DrZ4hrhgI/+nk7mvueZyCzQK00M"
"XadS32G5VuNyTydLywUqm1u4AMprNXxdkmp9m3DAx3BkoPHOg0PKf6qNrg4Dx9TYKJa45HEz"
"vVJGA3AMF7bpxjZ1zp1pb+ogMxoT2eIaAN4Oh+7THdimG2A3AYCUDtK2SE3NH9u2bLOwTTdS"
"OvucY6zuGlzrv0C1XuOsI/G0NL9YYHBIhXq9SMtqWtMAhiMDYjpXQNoWtwJ9hKIjak9w5/GY"
"AljIr5L7XaBcqyFtC2lbiBbj4B/cfzKupLZN0H+RX+Uqzz5+JR2PNMQZn5xR2cU887mfLC0X"
"+FH5c2AAcPNhQt290cf5Tg8r+SIjH+aaTJogNL1hgrGkejExq2az39Trd19UMJZURzWHRztq"
"mI5HxPCbT6yW1rni7ybo954YwHUcmY5HRNxOKmm1nrgZaOzgf/AXUUy2DjrCDG0AAAAASUVO"
"RK5CYII=")
#----------------------------------------------------------------------
_rt_copy = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABAAAAAPCAYAAADtc08vAAAABHNCSVQICAgIfAhkiAAAATlJ"
"REFUKJGFk71OwzAURo/tpE2AdihiAAmQWNiKWICpDIhnQEi8A0+ASsXAzDsgIcTEA3QANsZu"
"XTMQBiqkUkFF04aB2sRJSO90bV+f+33+EUIqzq7bMam471UA6JzuiPRaMqROltc2KS9tMFhY"
"JVArAJw31qlfPWfguYCqp6j5Lou+S81XpmAWRGgLe1t13r8i+sMxYdAtasrFyYGx5eik4v11"
"DYHW8T6dl0/6w4i3wYjXjxFh0KV51ADasYYYQNUzKXlQDQYsiNnluzLJA6CsBKQgrdtHa2x2"
"zJdkeoq5koLvsYEc7m5bdqxqRwk8V5C4GFwlDCRKKdR2Egq01IkpUhJgCsmKtkdKJiHTOSFA"
"xoWQ7NFbgF8F+ZAU4PLuKbMopYBJXAhxwH5ZgPW5ZkH+tdC8eShyZ+IHUNNZHhrzal0AAAAA"
"SUVORK5CYII=")
#----------------------------------------------------------------------
_rt_alignleft = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABHNCSVQICAgIfAhkiAAAAEJJ"
"REFUOI1jZGRiZqAEMFGkm4GBgYWBgYHBx9vrPzmat2zdxshIqRdIcsGWrdsY0cXo6wJsLhoN"
"g2ERBhRnJooNAACQdhhQZbXeGAAAAABJRU5ErkJggg==")
#----------------------------------------------------------------------
_rt_alignright = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABHNCSVQICAgIfAhkiAAAADxJ"
"REFUOI1jZGRiZqAEMFGkm4GBgYWBgYHBx9vrPzmat2zdxshIqRdYsAkS6yLquWA0DEZ8GFCc"
"mSg2AADQZxhQFG0xxgAAAABJRU5ErkJggg==")
#----------------------------------------------------------------------
_rt_bold = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABHNCSVQICAgIfAhkiAAAAEtJ"
"REFUOI3NUkEKACAMyq3//7jWNQwWY0HzKNOJCIi2DCSlfmHQmbA5zBNAFG4CPoAodo4fFOyA"
"wZGvHTDqdwCecnQHh0EU/ztIGyy1dBRJuH/9MwAAAABJRU5ErkJggg==")
#----------------------------------------------------------------------
_rt_centre = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABHNCSVQICAgIfAhkiAAAAEJJ"
"REFUOI1jZGRiZqAEMFGkm4GBgYWBgYHBx9vrPzmat2zdxshIqRdYkDnEumTL1m2MMDZ1XDAa"
"BiM+DCjOTBQbAAAwdhhQDziCqAAAAABJRU5ErkJggg==")
#----------------------------------------------------------------------
_rt_colour = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABHNCSVQICAgIfAhkiAAAAPZJ"
"REFUOI1jZGRiZqAEsOCS+Mcu9h+bONPPV4wofEKa37Lz4zWYEd0LuGzG5RKsLiAFDEIDllTz"
"MWxtyGJ4yiWKofgfCyTSkGMCJRDd/hr/Z2BgYGCZ5cAg8v0jg++C9wy6zx8ysP37zfCYXYFh"
"g1gww+VfUSiGwg2AaRZ/JcPw6v0fhv/qLxg4vv1jCOv5zPBvZgrDSukghp8/ZRkY/rFiGgDT"
"jBV84mX4572WgekzL8O/v5hBxoRXMwMDw/+3QgwM/3CHNeFY+MvMwMDyE6vtRBnAKPqWgUH2"
"OQUu4P/IwGh8HrcFBAORgYFhF/NZRhetP1jVAACsCFJPHjA77wAAAABJRU5ErkJggg==")
#----------------------------------------------------------------------
_rt_cut = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABAAAAAPCAYAAADtc08vAAAABHNCSVQICAgIfAhkiAAAAbBJ"
"REFUKJGdk0FLG1EQx3/vpRdv7sG49CKYxvSmVDwkpd78ALbSShQkbU81guAH8BN4EE0KGlCQ"
"5iAIoiaIwWAP3bi0WXZLW1q2WfGmJ8mhV19Pu+xqWsSBx/Bm/vObmQcPIWP4Jz83r96vb6pw"
"LJxzXfdWThKyuJR8/2rjOI4Kxz8ZDQUwkHosuGERwOLKsohLydpaKSIqfyjfrOsM8C2VSlKr"
"1RRAtVJRAK8mJ+8GWFxZFldui93dPTzvTFWqhwCMPnt6a3yAB52CWjLBSCLBwcH+P0f/7wpX"
"bouLywvys+/uB9CSCfRendVCkezMm/tN8PnwiKHBQX59axKXHWUACCFjAHyp15VX2gIgbdg0"
"MkO8LG+I7WxO+XeARwt5ngwPBw8q/eLe1wtI75y25QTCsG9bDtI7p+fFW6xmU0UAXmkLU9eY"
"OK0LNf0cIOji+4ezOSZO68LUNX4vrUbfIG3YXPf3AdD9o4Wpa5E9TV3jT8MC4Lq/j7RhRwGm"
"rtG2HPx9u6bGI4CuqXHShs12NqfalhNtIGSMn8cnaiczpnYyY6paKHb8jdVCMdA0Tz4Gmr9P"
"zKg0oZ3GfwAAAABJRU5ErkJggg==")
#----------------------------------------------------------------------
_rt_font = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABHNCSVQICAgIfAhkiAAAAIpJ"
"REFUOI21k8ENgCAMRSmMpwzAgenUsgDMhweCgUpRJDYhJG362v8DAFKJmZBT3W8A67LFz4Bj"
"T835HgY4V99DADqV24IF5Kk+WOht0QTkabm5twW03kHPeQoVIFV1EDFqjZHmtU55xLp2k8Bp"
"NaZdrwCcdhqlF5cHVHcJ4TzxwULTxJH4/zM9xQmi7UCACkKFWgAAAABJRU5ErkJggg==")
#----------------------------------------------------------------------
_rt_idea = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAABHNCSVQICAgIfAhkiAAAAVtJ"
"REFUWIXtl2FOwkAQhd8u3gAJp1EXuQEBrmOU24DxBtoWjmA8BAlXsOsPXNjadjtvRImJ708T"
"pnS+fTudnRpjezinLjR/Wq5K//W3+cwazbMM40BIPJ3c1GKPT4UKRASQShxruyuwWYMC6QRY"
"rkpfTZwBGCUhAGCzlkFYCeUxcTW5Ma521/Ay7RIFcFx9VouF5E0QAHB13VysFEBd7dbHYlxo"
"BUitXgohcYFwQLZ6VoJGpE+834oieQ9ZA5zCK3kWAEnyJMB8Zk1or1pJmpHaAe/zylUrRSvu"
"VjgTJK1YdRwD1Q4YuyDd+6DOLWBqgT2IAGIekGwFY30QVYQpJ+JZgJEYILUqzSASRBXh2+sd"
"Bn3XGBv0gTzPASyYR/JvwT7J6UQDOOdaYxq4fwcogPuHhQHQOuF8xilRHyaxspfnA8jodqz6"
"KvoWgC/fDwDG9n4f4FT60ZHsTwB8AA6FjDfFEDh8AAAAAElFTkSuQmCC")
#----------------------------------------------------------------------
_rt_indentless = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABHNCSVQICAgIfAhkiAAAAHRJ"
"REFUOI3Nkm8KgCAMxfe2TlftCJ0u6ATa9eyTIqZiKdEDYfj25+eQwEKlo6qu5oOFABbq0eSD"
"dZldbBh7Ir3LaSTB7ozdEJstBOyL3xJA9bgVpyTVBmAJBK1PMPYMefx0YpagR7/6B2WCeGnD"
"CbhmfrKDC/GuLg9MR0atAAAAAElFTkSuQmCC")
#----------------------------------------------------------------------
_rt_indentmore = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABHNCSVQICAgIfAhkiAAAAHlJ"
"REFUOI3NkmEKgCAMhbfZ6aododMJncB5PftlTE2TkuiBID7f9jkEJAO1xcyh5SMZQCQDbzTF"
"zbrMQRtOPOZnVxpJYIOTDbXZQ0BpwN4GciHzXoRykmaBOIPYXYdrT3DizzuUGv2dC4Kn+tU/"
"qBPooQ0noJb5yQwOwa4uD/KzgEcAAAAASUVORK5CYII=")
#----------------------------------------------------------------------
_rt_italic = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABHNCSVQICAgIfAhkiAAAAGdJ"
"REFUOI3Vk1EOgDAIQwt4/2P0lopfS6YOgsEfl+xntK8kMBE1dI623F8Atqzox+73N1GTcgez"
"mOTDPEThJekAHIBHmhQwzCTfAyrpKaCSHgKq6SGgmi5qkHmVV3Nfzf5S+/9faANOrocplI0e"
"xSoAAAAASUVORK5CYII=")
#----------------------------------------------------------------------
_rt_paste = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABAAAAAPCAYAAADtc08vAAAABHNCSVQICAgIfAhkiAAAAXNJ"
"REFUKJGFkzsvREEYhp/vzDnWWHuxdnsJjd+wRKPYgkIUKqHVKtYlQoi4FX6BiGQTolEpFBIU"
"/gUJtbXWdSMuo1jGHueceJvJN5nvmff9JiPiKH6UL5YMITrfGJWwfQARR5EvlsxY8pqr6gvL"
"60u+A3NT8wCcOd2hICdfLJmT/k+AQPPPXke6hcP241CHbmOxtboW5TRS0jO9a06HM5j7MgAf"
"lRsAzE2N15cLBm77A02NURxLSmUBUJlcvc5pYi1dAGxODDI7WgDgaHHEF8UBkERbJAQgrV2y"
"rZ510AixM5BEG+bxDkllMfdlVCZn46T071MXFvZ9cVwAiScxzw+hEIAm5ZDSsD05RLX2Tvnp"
"jZXS0S8AnUAgFALQ7AlQh/yVHSI6gcSTNo5vJiI0e/LtRJHWrh8gno6EAHhKLCTepHwzqaNi"
"McRVmNpTIA5U6J3ZC3r3AZz6IroV3j8wYCFn4532cN/OZeA/uAC98weRN/ynL78NdulpYuMM"
"AAAAAElFTkSuQmCC")
#----------------------------------------------------------------------
_rt_sample = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAABHNCSVQICAgIfAhkiAAAAMNJ"
"REFUWIXtl0sawiAMhGcoN2mvqIeoV6RHUVwoC5VqiOkXFsyahJ+8ADJM8FRw3X0A9AAQfy3I"
"t2vWOGaYaAIAAPN8atp82y7ite4pEAOktCKl1Q/gKLkDiIpQovfCk3aPGQAA5MaGJYGo7XMr"
"RQD4RiCaJi8q3mSWHRVhSSDr5MtyPgTAPQJEOftOBFpq4OlIbElKbsOaIT5vO203uafAHcB0"
"Ej7UNjk6isBO/7dI48IsBdI3YBXg/7PrxfE1GwDeAHen2yjnZJXsxQAAAABJRU5ErkJggg==")
#----------------------------------------------------------------------
_rt_underline = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABHNCSVQICAgIfAhkiAAAAFdJ"
"REFUOI1jZGRiZqAEMFGkmxoGsKAL/P/39z8yn5GJmRGbGE4XIEvC2NjEcBpAKhg1gIABS5cs"
"/o9MYwOMuJIyetwzMGBGIV4DiAUEUyI2gJKwBjw3AgDOdhYrghF5ggAAAABJRU5ErkJggg==")
| gpl-3.0 | 2,687,847,378,589,196,000 | 38.374489 | 143 | 0.542851 | false |
qbuat/rootpy | rootpy/utils/extras.py | 1 | 3415 | # Copyright 2012 the rootpy developers
# distributed under the terms of the GNU General Public License
from __future__ import absolute_import
from urllib2 import urlopen
import xml.dom.minidom as minidom
from itertools import chain, izip
from .. import log; log = log[__name__]
from . import quickroot as QROOT
__all__ = [
'iter_ROOT_classes',
'humanize_bytes',
'print_table',
'izip_exact',
'LengthMismatch',
]
def iter_ROOT_classes():
"""
Iterator over all available ROOT classes
"""
class_index = "http://root.cern.ch/root/html/ClassIndex.html"
for s in minidom.parse(urlopen(class_index)).getElementsByTagName("span"):
if ("class", "typename") in s.attributes.items():
class_name = s.childNodes[0].nodeValue
try:
yield getattr(QROOT, class_name)
except AttributeError:
pass
def humanize_bytes(bytes, precision=1):
abbrevs = (
(1<<50L, 'PB'),
(1<<40L, 'TB'),
(1<<30L, 'GB'),
(1<<20L, 'MB'),
(1<<10L, 'kB'),
(1, 'bytes')
)
if bytes == 1:
return '1 byte'
for factor, suffix in abbrevs:
if bytes >= factor:
break
return '%.*f %s' % (precision, bytes / factor, suffix)
def print_table(table, sep=' '):
# Reorganize data by columns
cols = zip(*table)
# Compute column widths by taking maximum length of values per column
col_widths = [ max(len(value) for value in col) for col in cols ]
# Create a suitable format string
format = sep.join(['%%-%ds' % width for width in col_widths ])
# Print each row using the computed format
for row in table:
print format % tuple(row)
class LengthMismatch(Exception):
pass
def _throw():
raise LengthMismatch
yield None # unreachable
def _check(rest):
for i in rest:
try:
i.next()
except LengthMismatch:
pass
else:
raise LengthMismatch
return
yield None # unreachable
def izip_exact(*iterables):
"""
A lazy izip() that ensures that all iterables have the same length.
A LengthMismatch exception is raised if the iterables' lengths differ.
Examples
--------
>>> list(zip_exc([]))
[]
>>> list(zip_exc((), (), ()))
[]
>>> list(zip_exc("abc", range(3)))
[('a', 0), ('b', 1), ('c', 2)]
>>> try:
... list(zip_exc("", range(3)))
... except LengthMismatch:
... print "mismatch"
mismatch
>>> try:
... list(zip_exc(range(3), ()))
... except LengthMismatch:
... print "mismatch"
mismatch
>>> try:
... list(zip_exc(range(3), range(2), range(4)))
... except LengthMismatch:
... print "mismatch"
mismatch
>>> items = zip_exc(range(3), range(2), range(4))
>>> items.next()
(0, 0, 0)
>>> items.next()
(1, 1, 1)
>>> try: items.next()
... except LengthMismatch: print "mismatch"
mismatch
References
----------
[1] http://code.activestate.com/recipes/497006-zip_exc-a-lazy-zip-that-ensures-that-all-iterables/
"""
rest = [chain(i, _throw()) for i in iterables[1:]]
first = chain(iterables[0], _check(rest))
return izip(*[first] + rest)
| gpl-3.0 | 3,772,350,570,441,238,000 | 24.485075 | 102 | 0.54817 | false |
babyliynfg/cross | tools/project-creator/Python2.6.6/Lib/test/test_StringIO.py | 1 | 4641 | # Tests StringIO and cStringIO
import unittest
import StringIO
import cStringIO
import types
from test import test_support
class TestGenericStringIO(unittest.TestCase):
# use a class variable MODULE to define which module is being tested
# Line of data to test as string
_line = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!'
# Constructor to use for the test data (._line is passed to this
# constructor)
constructor = str
def setUp(self):
self._line = self.constructor(self._line)
self._lines = self.constructor((self._line + '\n') * 5)
self._fp = self.MODULE.StringIO(self._lines)
def test_reads(self):
eq = self.assertEqual
self.assertRaises(TypeError, self._fp.seek)
eq(self._fp.read(10), self._line[:10])
eq(self._fp.readline(), self._line[10:] + '\n')
eq(len(self._fp.readlines(60)), 2)
self._fp.seek(0)
eq(self._fp.readline(-1), self._line + '\n')
def test_writes(self):
f = self.MODULE.StringIO()
self.assertRaises(TypeError, f.seek)
f.write(self._line[:6])
f.seek(3)
f.write(self._line[20:26])
f.write(self._line[52])
self.assertEqual(f.getvalue(), 'abcuvwxyz!')
def test_writelines(self):
f = self.MODULE.StringIO()
f.writelines([self._line[0], self._line[1], self._line[2]])
f.seek(0)
self.assertEqual(f.getvalue(), 'abc')
def test_writelines_error(self):
def errorGen():
yield 'a'
raise KeyboardInterrupt()
f = self.MODULE.StringIO()
self.assertRaises(KeyboardInterrupt, f.writelines, errorGen())
def test_truncate(self):
eq = self.assertEqual
f = self.MODULE.StringIO()
f.write(self._lines)
f.seek(10)
f.truncate()
eq(f.getvalue(), 'abcdefghij')
f.truncate(5)
eq(f.getvalue(), 'abcde')
f.write('xyz')
eq(f.getvalue(), 'abcdexyz')
self.assertRaises(IOError, f.truncate, -1)
f.close()
self.assertRaises(ValueError, f.write, 'frobnitz')
def test_closed_flag(self):
f = self.MODULE.StringIO()
self.assertEqual(f.closed, False)
f.close()
self.assertEqual(f.closed, True)
f = self.MODULE.StringIO("abc")
self.assertEqual(f.closed, False)
f.close()
self.assertEqual(f.closed, True)
def test_isatty(self):
f = self.MODULE.StringIO()
self.assertRaises(TypeError, f.isatty, None)
self.assertEqual(f.isatty(), False)
f.close()
self.assertRaises(ValueError, f.isatty)
def test_iterator(self):
eq = self.assertEqual
unless = self.failUnless
eq(iter(self._fp), self._fp)
# Does this object support the iteration protocol?
unless(hasattr(self._fp, '__iter__'))
unless(hasattr(self._fp, 'next'))
i = 0
for line in self._fp:
eq(line, self._line + '\n')
i += 1
eq(i, 5)
self._fp.close()
self.assertRaises(ValueError, self._fp.next)
class TestStringIO(TestGenericStringIO):
MODULE = StringIO
def test_unicode(self):
if not test_support.have_unicode: return
# The StringIO module also supports concatenating Unicode
# snippets to larger Unicode strings. This is tested by this
# method. Note that cStringIO does not support this extension.
f = self.MODULE.StringIO()
f.write(self._line[:6])
f.seek(3)
f.write(unicode(self._line[20:26]))
f.write(unicode(self._line[52]))
s = f.getvalue()
self.assertEqual(s, unicode('abcuvwxyz!'))
self.assertEqual(type(s), types.UnicodeType)
class TestcStringIO(TestGenericStringIO):
MODULE = cStringIO
import sys
if sys.platform.startswith('java'):
# Jython doesn't have a buffer object, so we just do a useless
# fake of the buffer tests.
buffer = str
class TestBufferStringIO(TestStringIO):
constructor = buffer
class TestBuffercStringIO(TestcStringIO):
constructor = buffer
def test_main():
test_support.run_unittest(TestStringIO, TestcStringIO)
with test_support._check_py3k_warnings(("buffer.. not supported",
DeprecationWarning)):
test_support.run_unittest(TestBufferStringIO, TestBuffercStringIO)
if __name__ == '__main__':
test_main()
| mit | 64,293,950,995,065,250 | 29.787671 | 74 | 0.588882 | false |
translate/terminator | project/terminator_comments_app/models.py | 1 | 4026 | # -*- coding: UTF-8 -*-
#
# Copyright 2011, 2013 Leandro Regueiro
#
# This file is part of Terminator.
#
# Terminator is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# Terminator is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# Terminator. If not, see <http://www.gnu.org/licenses/>.
from django.conf import settings
from django.core.mail import send_mail, EmailMessage
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django_comments.models import Comment
class TerminatorComment(Comment):
mail_me = models.BooleanField(default=True)
def comment_thread(self):
return self.content_type.get_object_for_this_type(pk=self.object_pk)
comment_thread.short_description = _('comment thread')
def save(self, *args, **kwargs):
# First of all test if object is in the database.
try:
object_in_bd = TerminatorComment.objects.get(pk=self.pk)
except TerminatorComment.DoesNotExist:
changed_or_new = _("New")
else:
changed_or_new = _("Changed")
# Call the super implementation.
super(TerminatorComment, self).save(*args, **kwargs)
# Send email messages only if allowed in the settings.
if settings.SEND_NOTIFICATION_EMAILS:
# Get the set of emails from all other users that commented in the
# current thread and want to keep updated.
thread_comments = TerminatorComment.objects.filter(content_type=self.content_type, object_pk=self.object_pk).exclude(user=self.user)
emails_to_notify_set = set()
for comment in thread_comments:
if comment.user.email and comment.mail_me:
emails_to_notify_set.add(comment.user.email)
# Get the set of emails from users that subscribed to glossary
# updates.
for subscriber in self.comment_thread().concept.glossary.subscribers.exclude(pk=self.user.pk):
emails_to_notify_set.add(subscriber.email)
#TODO Add the language mailing list address to emails_to_notify_set
# Now send an email to all other users that commented in the
# current thread or have subscribed to the glossary.
if emails_to_notify_set:
thread = self.comment_thread()
subject_data = {
'changed_or_new': changed_or_new,
'language': thread.language.name,
'concept': thread.concept.pk
}
mail_subject = _('[Terminator] %(changed_or_new)s message in '
'%(language)s thread for concept '
'#%(concept)s') % subject_data
email = EmailMessage(mail_subject, self.comment,
'[email protected]',
bcc=list(emails_to_notify_set))
email.send(fail_silently=False)
#TODO the two lines of code above try to send the message using
# only BCC field in order to avoid all the recipients to see
# the email addresses of the other recipients. If it fails
# comment the two lines above and uncomment the following one
# that send the email message without using the BCC field.
#send_mail(mail_subject, self.comment,
# '[email protected]',
# list(emails_to_notify_set), fail_silently=False)
| gpl-3.0 | -5,319,126,717,350,898,000 | 46.364706 | 144 | 0.622951 | false |
googleapis/googleapis-gen | google/cloud/domains/v1beta1/domains-v1beta1-py/google/cloud/domains_v1beta1/services/domains/transports/__init__.py | 2 | 1131 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from typing import Dict, Type
from .base import DomainsTransport
from .grpc import DomainsGrpcTransport
from .grpc_asyncio import DomainsGrpcAsyncIOTransport
# Compile a registry of transports.
_transport_registry = OrderedDict() # type: Dict[str, Type[DomainsTransport]]
_transport_registry['grpc'] = DomainsGrpcTransport
_transport_registry['grpc_asyncio'] = DomainsGrpcAsyncIOTransport
__all__ = (
'DomainsTransport',
'DomainsGrpcTransport',
'DomainsGrpcAsyncIOTransport',
)
| apache-2.0 | -404,207,965,977,427,400 | 33.272727 | 78 | 0.76481 | false |
Zac-HD/home-assistant | homeassistant/components/binary_sensor/homematic.py | 2 | 2130 | """
Support for Homematic binary sensors.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/binary_sensor.homematic/
"""
import logging
from homeassistant.const import STATE_UNKNOWN
from homeassistant.components.binary_sensor import BinarySensorDevice
from homeassistant.components.homematic import HMDevice
from homeassistant.loader import get_component
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['homematic']
SENSOR_TYPES_CLASS = {
"Remote": None,
"ShutterContact": "opening",
"MaxShutterContact": "opening",
"IPShutterContact": "opening",
"Smoke": "smoke",
"SmokeV2": "smoke",
"Motion": "motion",
"MotionV2": "motion",
"RemoteMotion": None,
"WeatherSensor": None,
"TiltSensor": None,
}
def setup_platform(hass, config, add_callback_devices, discovery_info=None):
"""Setup the Homematic binary sensor platform."""
if discovery_info is None:
return
homematic = get_component("homematic")
return homematic.setup_hmdevice_discovery_helper(
hass,
HMBinarySensor,
discovery_info,
add_callback_devices
)
class HMBinarySensor(HMDevice, BinarySensorDevice):
"""Representation of a binary Homematic device."""
@property
def is_on(self):
"""Return true if switch is on."""
if not self.available:
return False
return bool(self._hm_get_state())
@property
def device_class(self):
"""Return the class of this sensor, from DEVICE_CLASSES."""
# If state is MOTION (RemoteMotion works only)
if self._state == "MOTION":
return "motion"
return SENSOR_TYPES_CLASS.get(self._hmdevice.__class__.__name__, None)
def _init_data_struct(self):
"""Generate a data struct (self._data) from the Homematic metadata."""
# add state to data struct
if self._state:
_LOGGER.debug("%s init datastruct with main node '%s'", self._name,
self._state)
self._data.update({self._state: STATE_UNKNOWN})
| apache-2.0 | 808,940,468,253,477,100 | 29.428571 | 79 | 0.652582 | false |
tmap/MashPI | old_mashpi.py | 1 | 3801 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# mashpi 2014-08-13
# it should works
import RPi.GPIO as GPIO
import time
import os
import sys
import glob
import re
from threading import Timer
# Define thermometer specs
os.system('modprobe w1-gpio')
os.system('modprobe w1-therm')
base_dir = '/sys/bus/w1/devices/'
device_folder = glob.glob(base_dir + '28*')[0]
device_file = device_folder + '/w1_slave'
# Define recipe properties
mash_temp=65
mash_time=5400 #90 MINUTES
element_cycle=10
thermo_cycle=60
strike_temp=68
boil_temp=97
boil_time=3600
class Element(object):
state = 0
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(17,GPIO.OUT)
def setState(self,x):
self.state = x
GPIO.output(17,x)
def printState(self):
if self.state is True:
print "Current state: off"
elif self.state is False:
print "Current state: on"
else:
pass
class Thermo(object):
def read_temp(self):
f = open(device_file, 'r')
lines = f.readlines()
f.close()
lines=str(lines)
match = re.search(r't=?([^\\n>]+)', lines)
temp_c=match.group(1)
temp_c = float(temp_c) / 1000.00
self.temp_c=temp_c
return temp_c
##############
###
###
### brewing steps
def finish_boil():
try:
'''switch off'''
Element1.setState(True)
print 'BOILING STEP HAS FINISHED'
sys.exit(1)
except:
print 'finish_boil error'
'''switch off'''
Element1.setState(True)
def boil():
try:
curr_temp=Thermo1.read_temp()
if curr_temp < boil_temp:
'''switch on'''
Element1.setState(False)
pass
elif curr_temp>boil_temp:
start_boil=raw_input('Alerady boiling? start boil timer? [y/n]')
'''switch on'''
Element1.setState(False)
if start_boil=='y' or start_boil=='Y':
boil_timer = Timer(boil_time, finish_boil)
boil_timer.start()
elif start_boil=='n' or start_boil=='N':
'''switch off'''
Element1.setState(True)
except:
print 'boil error'
'''switch off'''
Element1.setState(True)
def finish_mash():
try:
remove_the_bag=raw_input('Mash finished, you should remove the bag, type Y when done [y/n]: ')
if remove_the_bag=='n' or remove_the_bag=='N':
finish_mash()
elif remove_the_bag=='y' or remove_the_bag=='Y':
'''switch off'''
Element1.setState(True)
start_boiling_procedure=ra_input('Start boiling procedure? [y/n]: ')
if start_boiling_procedure=='n' or start_boiling_procedure=='N':
'''switch off'''
Element1.setState(True)
elif start_boiling_procedure=='y' or start_boiling_procedure=='Y':
boil()
except:
print 'boil error'
'''switch off'''
Element1.setState(True)
def mashing():
mash_timer = Timer(mash_time, finish_mash)
mash_timer.start()
try:
curr_temp=Thermo1.read_temp()
if curr_temp < mash_temp:
'''switch on'''
Element1.setState(False)
sleep(element_cycle)
'''switch off'''
Element1.setState(True)
pass
elif curr_temp>=mash_temp:
'''switch off'''
Element1.setState(True)
pass
sleep(thermo_cycle)
except:
print 'mash error'
'''switch off'''
Element1.setState(True)
def water_heating():
try:
curr_temp=Thermo1.read_temp()
if curr_temp < strike_temp:
'''switch on'''
Element1.setState(False)
pass
elif curr_temp==strike_temp:
'''switch off'''
Element1.setState(True)
ask_if_add_grain=raw_input('Add grains? [y/n]: ')
if ask_if_add_grain=='n' or ask_if_add_grain=='N':
water_heating()
elif ask_if_add_grain=='y' or ask_if_add_grain=='Y':
print 'It\'s time to add the grains'
ask_if_start_mash=raw_input('Start Mash? [y/n]: ')
if ask_if_start_mash=='n' or ask_if_start_mash=='N':
water_heating()
elif ask_if_start_mash=='y' or ask_if_start_mash=='Y':
mashing()
except:
print 'water_heating error'
'''switch off'''
Element1.setState(True)
def main():
water_heating()
main()
| gpl-3.0 | 4,749,828,075,881,721,000 | 21.358824 | 96 | 0.653512 | false |
yrobla/nova | nova/tests/test_api.py | 9 | 23344 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for the API endpoint."""
import random
import StringIO
import boto
import boto.connection
from boto.ec2 import regioninfo
from boto import exception as boto_exc
# newer versions of boto use their own wrapper on top of httplib.HTTPResponse
if hasattr(boto.connection, 'HTTPResponse'):
httplib = boto.connection
else:
import httplib
import fixtures
import webob
from nova.api import auth
from nova.api import ec2
from nova.api.ec2 import apirequest
from nova.api.ec2 import ec2utils
from nova import block_device
from nova import context
from nova import exception
from nova.openstack.common import timeutils
from nova import test
from nova.tests import matchers
class FakeHttplibSocket(object):
"""a fake socket implementation for httplib.HTTPResponse, trivial."""
def __init__(self, response_string):
self.response_string = response_string
self._buffer = StringIO.StringIO(response_string)
def makefile(self, _mode, _other):
"""Returns the socket's internal buffer."""
return self._buffer
class FakeHttplibConnection(object):
"""A fake httplib.HTTPConnection for boto to use
requests made via this connection actually get translated and routed into
our WSGI app, we then wait for the response and turn it back into
the HTTPResponse that boto expects.
"""
def __init__(self, app, host, is_secure=False):
self.app = app
self.host = host
def request(self, method, path, data, headers):
req = webob.Request.blank(path)
req.method = method
req.body = data
req.headers = headers
req.headers['Accept'] = 'text/html'
req.host = self.host
# Call the WSGI app, get the HTTP response
resp = str(req.get_response(self.app))
# For some reason, the response doesn't have "HTTP/1.0 " prepended; I
# guess that's a function the web server usually provides.
resp = "HTTP/1.0 %s" % resp
self.sock = FakeHttplibSocket(resp)
self.http_response = httplib.HTTPResponse(self.sock)
# NOTE(vish): boto is accessing private variables for some reason
self._HTTPConnection__response = self.http_response
self.http_response.begin()
def getresponse(self):
return self.http_response
def getresponsebody(self):
return self.sock.response_string
def close(self):
"""Required for compatibility with boto/tornado."""
pass
class XmlConversionTestCase(test.TestCase):
"""Unit test api xml conversion."""
def test_number_conversion(self):
conv = ec2utils._try_convert
self.assertEqual(conv('None'), None)
self.assertEqual(conv('True'), True)
self.assertEqual(conv('TRUE'), True)
self.assertEqual(conv('true'), True)
self.assertEqual(conv('False'), False)
self.assertEqual(conv('FALSE'), False)
self.assertEqual(conv('false'), False)
self.assertEqual(conv('0'), 0)
self.assertEqual(conv('42'), 42)
self.assertEqual(conv('3.14'), 3.14)
self.assertEqual(conv('-57.12'), -57.12)
self.assertEqual(conv('0x57'), 0x57)
self.assertEqual(conv('-0x57'), -0x57)
self.assertEqual(conv('-'), '-')
self.assertEqual(conv('-0'), 0)
self.assertEqual(conv('0.0'), 0.0)
self.assertEqual(conv('1e-8'), 0.0)
self.assertEqual(conv('-1e-8'), 0.0)
self.assertEqual(conv('0xDD8G'), '0xDD8G')
self.assertEqual(conv('0XDD8G'), '0XDD8G')
self.assertEqual(conv('-stringy'), '-stringy')
self.assertEqual(conv('stringy'), 'stringy')
self.assertEqual(conv('add'), 'add')
self.assertEqual(conv('remove'), 'remove')
self.assertEqual(conv(''), '')
class Ec2utilsTestCase(test.TestCase):
def test_ec2_id_to_id(self):
self.assertEqual(ec2utils.ec2_id_to_id('i-0000001e'), 30)
self.assertEqual(ec2utils.ec2_id_to_id('ami-1d'), 29)
self.assertEqual(ec2utils.ec2_id_to_id('snap-0000001c'), 28)
self.assertEqual(ec2utils.ec2_id_to_id('vol-0000001b'), 27)
def test_bad_ec2_id(self):
self.assertRaises(exception.InvalidEc2Id,
ec2utils.ec2_id_to_id,
'badone')
def test_id_to_ec2_id(self):
self.assertEqual(ec2utils.id_to_ec2_id(30), 'i-0000001e')
self.assertEqual(ec2utils.id_to_ec2_id(29, 'ami-%08x'), 'ami-0000001d')
self.assertEqual(ec2utils.id_to_ec2_snap_id(28), 'snap-0000001c')
self.assertEqual(ec2utils.id_to_ec2_vol_id(27), 'vol-0000001b')
def test_dict_from_dotted_str(self):
in_str = [('BlockDeviceMapping.1.DeviceName', '/dev/sda1'),
('BlockDeviceMapping.1.Ebs.SnapshotId', 'snap-0000001c'),
('BlockDeviceMapping.1.Ebs.VolumeSize', '80'),
('BlockDeviceMapping.1.Ebs.DeleteOnTermination', 'false'),
('BlockDeviceMapping.2.DeviceName', '/dev/sdc'),
('BlockDeviceMapping.2.VirtualName', 'ephemeral0')]
expected_dict = {
'block_device_mapping': {
'1': {'device_name': '/dev/sda1',
'ebs': {'snapshot_id': 'snap-0000001c',
'volume_size': 80,
'delete_on_termination': False}},
'2': {'device_name': '/dev/sdc',
'virtual_name': 'ephemeral0'}}}
out_dict = ec2utils.dict_from_dotted_str(in_str)
self.assertThat(out_dict, matchers.DictMatches(expected_dict))
def test_properties_root_defice_name(self):
mappings = [{"device": "/dev/sda1", "virtual": "root"}]
properties0 = {'mappings': mappings}
properties1 = {'root_device_name': '/dev/sdb', 'mappings': mappings}
root_device_name = block_device.properties_root_device_name(
properties0)
self.assertEqual(root_device_name, '/dev/sda1')
root_device_name = block_device.properties_root_device_name(
properties1)
self.assertEqual(root_device_name, '/dev/sdb')
def test_mapping_prepend_dev(self):
mappings = [
{'virtual': 'ami',
'device': 'sda1'},
{'virtual': 'root',
'device': '/dev/sda1'},
{'virtual': 'swap',
'device': 'sdb1'},
{'virtual': 'swap',
'device': '/dev/sdb2'},
{'virtual': 'ephemeral0',
'device': 'sdc1'},
{'virtual': 'ephemeral1',
'device': '/dev/sdc1'}]
expected_result = [
{'virtual': 'ami',
'device': 'sda1'},
{'virtual': 'root',
'device': '/dev/sda1'},
{'virtual': 'swap',
'device': '/dev/sdb1'},
{'virtual': 'swap',
'device': '/dev/sdb2'},
{'virtual': 'ephemeral0',
'device': '/dev/sdc1'},
{'virtual': 'ephemeral1',
'device': '/dev/sdc1'}]
self.assertThat(block_device.mappings_prepend_dev(mappings),
matchers.DictListMatches(expected_result))
class ApiEc2TestCase(test.TestCase):
"""Unit test for the cloud controller on an EC2 API."""
def setUp(self):
super(ApiEc2TestCase, self).setUp()
self.host = '127.0.0.1'
# NOTE(vish): skipping the Authorizer
roles = ['sysadmin', 'netadmin']
ctxt = context.RequestContext('fake', 'fake', roles=roles)
self.app = auth.InjectContext(ctxt, ec2.FaultWrapper(
ec2.RequestLogging(ec2.Requestify(ec2.Authorizer(ec2.Executor()
), 'nova.api.ec2.cloud.CloudController'))))
self.useFixture(fixtures.FakeLogger('boto'))
def expect_http(self, host=None, is_secure=False, api_version=None):
"""Returns a new EC2 connection."""
self.ec2 = boto.connect_ec2(
aws_access_key_id='fake',
aws_secret_access_key='fake',
is_secure=False,
region=regioninfo.RegionInfo(None, 'test', self.host),
port=8773,
path='/services/Cloud')
if api_version:
self.ec2.APIVersion = api_version
self.mox.StubOutWithMock(self.ec2, 'new_http_connection')
self.http = FakeHttplibConnection(
self.app, '%s:8773' % (self.host), False)
# pylint: disable=E1103
if boto.Version >= '2':
self.ec2.new_http_connection(host or '%s:8773' % (self.host),
is_secure).AndReturn(self.http)
else:
self.ec2.new_http_connection(host, is_secure).AndReturn(self.http)
return self.http
def test_return_valid_isoformat(self):
"""
Ensure that the ec2 api returns datetime in xs:dateTime
(which apparently isn't datetime.isoformat())
NOTE(ken-pepple): https://bugs.launchpad.net/nova/+bug/721297
"""
conv = apirequest._database_to_isoformat
# sqlite database representation with microseconds
time_to_convert = timeutils.parse_strtime("2011-02-21 20:14:10.634276",
"%Y-%m-%d %H:%M:%S.%f")
self.assertEqual(conv(time_to_convert), '2011-02-21T20:14:10.634Z')
# mysqlite database representation
time_to_convert = timeutils.parse_strtime("2011-02-21 19:56:18",
"%Y-%m-%d %H:%M:%S")
self.assertEqual(conv(time_to_convert), '2011-02-21T19:56:18.000Z')
def test_xmlns_version_matches_request_version(self):
self.expect_http(api_version='2010-10-30')
self.mox.ReplayAll()
# Any request should be fine
self.ec2.get_all_instances()
self.assertTrue(self.ec2.APIVersion in self.http.getresponsebody(),
'The version in the xmlns of the response does '
'not match the API version given in the request.')
def test_describe_instances(self):
"""Test that, after creating a user and a project, the describe
instances call to the API works properly"""
self.expect_http()
self.mox.ReplayAll()
self.assertEqual(self.ec2.get_all_instances(), [])
def test_terminate_invalid_instance(self):
# Attempt to terminate an invalid instance.
self.expect_http()
self.mox.ReplayAll()
self.assertRaises(boto_exc.EC2ResponseError,
self.ec2.terminate_instances, "i-00000005")
def test_get_all_key_pairs(self):
"""Test that, after creating a user and project and generating
a key pair, that the API call to list key pairs works properly"""
keyname = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
for x in range(random.randint(4, 8)))
self.expect_http()
self.mox.ReplayAll()
self.ec2.create_key_pair(keyname)
rv = self.ec2.get_all_key_pairs()
results = [k for k in rv if k.name == keyname]
self.assertEquals(len(results), 1)
def test_create_duplicate_key_pair(self):
"""Test that, after successfully generating a keypair,
requesting a second keypair with the same name fails sanely"""
self.expect_http()
self.mox.ReplayAll()
self.ec2.create_key_pair('test')
try:
self.ec2.create_key_pair('test')
except boto_exc.EC2ResponseError, e:
if e.code == 'InvalidKeyPair.Duplicate':
pass
else:
self.assertEqual('InvalidKeyPair.Duplicate', e.code)
else:
self.fail('Exception not raised.')
def test_get_all_security_groups(self):
# Test that we can retrieve security groups.
self.expect_http()
self.mox.ReplayAll()
rv = self.ec2.get_all_security_groups()
self.assertEquals(len(rv), 1)
self.assertEquals(rv[0].name, 'default')
def test_create_delete_security_group(self):
# Test that we can create a security group.
self.expect_http()
self.mox.ReplayAll()
security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
for x in range(random.randint(4, 8)))
self.ec2.create_security_group(security_group_name, 'test group')
self.expect_http()
self.mox.ReplayAll()
rv = self.ec2.get_all_security_groups()
self.assertEquals(len(rv), 2)
self.assertTrue(security_group_name in [group.name for group in rv])
self.expect_http()
self.mox.ReplayAll()
self.ec2.delete_security_group(security_group_name)
def test_group_name_valid_chars_security_group(self):
"""Test that we sanely handle invalid security group names.
EC2 API Spec states we should only accept alphanumeric characters,
spaces, dashes, and underscores. Amazon implementation
accepts more characters - so, [:print:] is ok. """
bad_strict_ec2 = "aa \t\x01\x02\x7f"
bad_amazon_ec2 = "aa #^% -=99"
test_raise = [
(True, bad_amazon_ec2, "test desc"),
(True, "test name", bad_amazon_ec2),
(False, bad_strict_ec2, "test desc"),
]
for test in test_raise:
self.expect_http()
self.mox.ReplayAll()
self.flags(ec2_strict_validation=test[0])
self.assertRaises(boto_exc.EC2ResponseError,
self.ec2.create_security_group,
test[1],
test[2])
test_accept = [
(False, bad_amazon_ec2, "test desc"),
(False, "test name", bad_amazon_ec2),
]
for test in test_accept:
self.expect_http()
self.mox.ReplayAll()
self.flags(ec2_strict_validation=test[0])
self.ec2.create_security_group(test[1], test[2])
self.expect_http()
self.mox.ReplayAll()
self.ec2.delete_security_group(test[1])
def test_group_name_valid_length_security_group(self):
"""Test that we sanely handle invalid security group names.
API Spec states that the length should not exceed 255 chars """
self.expect_http()
self.mox.ReplayAll()
# Test block group_name > 255 chars
security_group_name = "".join(random.choice("poiuytrewqasdfghjklmnbvc")
for x in range(random.randint(256, 266)))
self.assertRaises(boto_exc.EC2ResponseError,
self.ec2.create_security_group,
security_group_name,
'test group')
def test_authorize_revoke_security_group_cidr(self):
"""
Test that we can add and remove CIDR based rules
to a security group
"""
self.expect_http()
self.mox.ReplayAll()
security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
for x in range(random.randint(4, 8)))
group = self.ec2.create_security_group(security_group_name,
'test group')
self.expect_http()
self.mox.ReplayAll()
group.connection = self.ec2
group.authorize('tcp', 80, 81, '0.0.0.0/0')
group.authorize('icmp', -1, -1, '0.0.0.0/0')
group.authorize('udp', 80, 81, '0.0.0.0/0')
group.authorize('tcp', 1, 65535, '0.0.0.0/0')
group.authorize('udp', 1, 65535, '0.0.0.0/0')
group.authorize('icmp', 1, 0, '0.0.0.0/0')
group.authorize('icmp', 0, 1, '0.0.0.0/0')
group.authorize('icmp', 0, 0, '0.0.0.0/0')
def _assert(message, *args):
try:
group.authorize(*args)
except boto_exc.EC2ResponseError as e:
self.assertEqual(e.status, 400, 'Expected status to be 400')
self.assertIn(message, e.error_message)
else:
raise self.failureException, 'EC2ResponseError not raised'
# Invalid CIDR address
_assert('Invalid CIDR', 'tcp', 80, 81, '0.0.0.0/0444')
# Missing ports
_assert('Not enough parameters', 'tcp', '0.0.0.0/0')
# from port cannot be greater than to port
_assert('Invalid port range', 'tcp', 100, 1, '0.0.0.0/0')
# For tcp, negative values are not allowed
_assert('Invalid port range', 'tcp', -1, 1, '0.0.0.0/0')
# For tcp, valid port range 1-65535
_assert('Invalid port range', 'tcp', 1, 65599, '0.0.0.0/0')
# Invalid Cidr for ICMP type
_assert('Invalid CIDR', 'icmp', -1, -1, '0.0.444.0/4')
# Invalid protocol
_assert('Invalid IP protocol', 'xyz', 1, 14, '0.0.0.0/0')
# Invalid port
_assert('An unknown error has occurred', 'tcp', " ", "81", '0.0.0.0/0')
# Invalid icmp port
_assert('An unknown error has occurred', 'icmp', " ", "81",
'0.0.0.0/0')
# Invalid CIDR Address
_assert('Invalid CIDR', 'icmp', -1, -1, '0.0.0.0')
# Invalid CIDR Address
_assert('Invalid CIDR', 'icmp', -1, -1, '0.0.0.0/')
# Invalid Cidr ports
_assert('Invalid port range', 'icmp', 1, 256, '0.0.0.0/0')
self.expect_http()
self.mox.ReplayAll()
rv = self.ec2.get_all_security_groups()
group = [grp for grp in rv if grp.name == security_group_name][0]
self.assertEquals(len(group.rules), 8)
self.assertEquals(int(group.rules[0].from_port), 80)
self.assertEquals(int(group.rules[0].to_port), 81)
self.assertEquals(len(group.rules[0].grants), 1)
self.assertEquals(str(group.rules[0].grants[0]), '0.0.0.0/0')
self.expect_http()
self.mox.ReplayAll()
group.connection = self.ec2
group.revoke('tcp', 80, 81, '0.0.0.0/0')
group.revoke('icmp', -1, -1, '0.0.0.0/0')
group.revoke('udp', 80, 81, '0.0.0.0/0')
group.revoke('tcp', 1, 65535, '0.0.0.0/0')
group.revoke('udp', 1, 65535, '0.0.0.0/0')
group.revoke('icmp', 1, 0, '0.0.0.0/0')
group.revoke('icmp', 0, 1, '0.0.0.0/0')
group.revoke('icmp', 0, 0, '0.0.0.0/0')
self.expect_http()
self.mox.ReplayAll()
self.ec2.delete_security_group(security_group_name)
self.expect_http()
self.mox.ReplayAll()
group.connection = self.ec2
rv = self.ec2.get_all_security_groups()
self.assertEqual(len(rv), 1)
self.assertEqual(rv[0].name, 'default')
def test_authorize_revoke_security_group_cidr_v6(self):
"""
Test that we can add and remove CIDR based rules
to a security group for IPv6
"""
self.expect_http()
self.mox.ReplayAll()
security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
for x in range(random.randint(4, 8)))
group = self.ec2.create_security_group(security_group_name,
'test group')
self.expect_http()
self.mox.ReplayAll()
group.connection = self.ec2
group.authorize('tcp', 80, 81, '::/0')
self.expect_http()
self.mox.ReplayAll()
rv = self.ec2.get_all_security_groups()
group = [grp for grp in rv if grp.name == security_group_name][0]
self.assertEquals(len(group.rules), 1)
self.assertEquals(int(group.rules[0].from_port), 80)
self.assertEquals(int(group.rules[0].to_port), 81)
self.assertEquals(len(group.rules[0].grants), 1)
self.assertEquals(str(group.rules[0].grants[0]), '::/0')
self.expect_http()
self.mox.ReplayAll()
group.connection = self.ec2
group.revoke('tcp', 80, 81, '::/0')
self.expect_http()
self.mox.ReplayAll()
self.ec2.delete_security_group(security_group_name)
self.expect_http()
self.mox.ReplayAll()
group.connection = self.ec2
rv = self.ec2.get_all_security_groups()
self.assertEqual(len(rv), 1)
self.assertEqual(rv[0].name, 'default')
def test_authorize_revoke_security_group_foreign_group(self):
"""
Test that we can grant and revoke another security group access
to a security group
"""
self.expect_http()
self.mox.ReplayAll()
rand_string = 'sdiuisudfsdcnpaqwertasd'
security_group_name = "".join(random.choice(rand_string)
for x in range(random.randint(4, 8)))
other_security_group_name = "".join(random.choice(rand_string)
for x in range(random.randint(4, 8)))
group = self.ec2.create_security_group(security_group_name,
'test group')
self.expect_http()
self.mox.ReplayAll()
other_group = self.ec2.create_security_group(other_security_group_name,
'some other group')
self.expect_http()
self.mox.ReplayAll()
group.connection = self.ec2
group.authorize(src_group=other_group)
self.expect_http()
self.mox.ReplayAll()
rv = self.ec2.get_all_security_groups()
# I don't bother checkng that we actually find it here,
# because the create/delete unit test further up should
# be good enough for that.
for group in rv:
if group.name == security_group_name:
self.assertEquals(len(group.rules), 3)
self.assertEquals(len(group.rules[0].grants), 1)
self.assertEquals(str(group.rules[0].grants[0]), '%s-%s' %
(other_security_group_name, 'fake'))
self.expect_http()
self.mox.ReplayAll()
rv = self.ec2.get_all_security_groups()
for group in rv:
if group.name == security_group_name:
self.expect_http()
self.mox.ReplayAll()
group.connection = self.ec2
group.revoke(src_group=other_group)
self.expect_http()
self.mox.ReplayAll()
self.ec2.delete_security_group(security_group_name)
self.ec2.delete_security_group(other_security_group_name)
| apache-2.0 | -3,937,147,327,687,546,400 | 36.957724 | 79 | 0.578093 | false |
Donkyhotay/MoonPy | zope/app/interface/__init__.py | 1 | 4380 | ##############################################################################
#
# Copyright (c) 2003 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Code about interfaces.
This module contains code for interfaces in persistent modules.
$Id: __init__.py 73069 2007-03-08 18:36:12Z rossp $
"""
__docformat__ = 'restructuredtext'
from persistent import Persistent
from zodbcode.patch import registerWrapper, Wrapper, NameFinder
from zope.interface.interface import InterfaceClass
from zope.interface import Interface
from zope.security.proxy import removeSecurityProxy
from wref import FlexibleWeakKeyDictionary
class PersistentInterfaceClass(Persistent, InterfaceClass):
def __init__(self, *args, **kw):
Persistent.__init__(self)
InterfaceClass.__init__(self, *args, **kw)
self.dependents = FlexibleWeakKeyDictionary()
# PersistentInterface is equivalent to the zope.interface.Interface object
# except that it is also persistent. It is used in conjunction with
# zodb.code to support interfaces in persistent modules.
PersistentInterface = PersistentInterfaceClass("PersistentInterface",
(Interface, ))
class PersistentInterfaceWrapper(Wrapper):
def unwrap(self):
return PersistentInterfaceClass(self._obj.__name__)
def getInterfaceStateForPersistentInterfaceCreation(iface):
# Need to convert the dependents weakref dict to a persistent dict
dict = iface.__dict__.copy()
dependents = FlexibleWeakKeyDictionary()
for k, v in dict['dependents'].iteritems():
dependents[k] = v
dict['dependents'] = dependents
return dict
registerWrapper(InterfaceClass, PersistentInterfaceWrapper,
lambda iface: (),
getInterfaceStateForPersistentInterfaceCreation,
)
NameFinder.classTypes[InterfaceClass] = True
NameFinder.types[InterfaceClass] = True
NameFinder.classTypes[PersistentInterfaceClass] = True
NameFinder.types[PersistentInterfaceClass] = True
from zope.interface.declarations import providedBy
def queryType(object, interface):
"""Returns the object's interface which implements interface.
>>> from zope.app.content.interfaces import IContentType
>>> from zope.interface import Interface, implements, directlyProvides
>>> class I(Interface):
... pass
>>> class J(Interface):
... pass
>>> directlyProvides(I, IContentType)
>>> class C(object):
... implements(I)
>>> class D(object):
... implements(J,I)
>>> obj = C()
>>> c1_ctype = queryType(obj, IContentType)
>>> c1_ctype.__name__
'I'
>>> class I1(I):
... pass
>>> class I2(I1):
... pass
>>> class I3(Interface):
... pass
>>> class C1(object):
... implements(I1)
>>> obj1 = C1()
>>> c1_ctype = queryType(obj1, IContentType)
>>> c1_ctype.__name__
'I'
>>> class C2(object):
... implements(I2)
>>> obj2 = C2()
>>> c2_ctype = queryType(obj2, IContentType)
>>> c2_ctype.__name__
'I'
>>> class C3(object):
... implements(I3)
>>> obj3 = C3()
If Interface doesn't provide `IContentType`, `queryType` returns ``None``.
>>> c3_ctype = queryType(obj3, IContentType)
>>> c3_ctype
>>> c3_ctype is None
True
>>> class I4(I):
... pass
>>> directlyProvides(I4, IContentType)
>>> class C4(object):
... implements(I4)
>>> obj4 = C4()
>>> c4_ctype = queryType(obj4, IContentType)
>>> c4_ctype.__name__
'I4'
"""
# Remove the security proxy, so that we can introspect the type of the
# object's interfaces.
naked = removeSecurityProxy(object)
object_iro = providedBy(naked).__iro__
for iface in object_iro:
if interface.providedBy(iface):
return iface
return None
| gpl-3.0 | -8,777,955,528,858,111,000 | 30.285714 | 78 | 0.634247 | false |
icereval/raven-python | raven/contrib/celery/__init__.py | 3 | 2299 | """
raven.contrib.celery
~~~~~~~~~~~~~~~~~~~~
>>> class CeleryClient(CeleryMixin, Client):
>>> def send_encoded(self, *args, **kwargs):
>>> "Errors through celery"
>>> self.send_raw.delay(*args, **kwargs)
>>> @task(routing_key='sentry')
>>> def send_raw(*args, **kwargs):
>>> return super(client, self).send_encoded(*args, **kwargs)
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import logging
from celery.signals import after_setup_logger, task_failure
from raven.handlers.logging import SentryHandler
class CeleryFilter(logging.Filter):
def filter(self, record):
# Context is fixed in Celery 3.x so use internal flag instead
extra_data = getattr(record, 'data', {})
if not isinstance(extra_data, dict):
return record.funcName != '_log_error'
# Fallback to funcName for Celery 2.5
return extra_data.get('internal', record.funcName != '_log_error')
def register_signal(client):
def process_failure_signal(sender, task_id, args, kwargs, **kw):
# This signal is fired inside the stack so let raven do its magic
client.captureException(
extra={
'task_id': task_id,
'task': sender,
'args': args,
'kwargs': kwargs,
})
task_failure.connect(process_failure_signal, weak=False)
def register_logger_signal(client, logger=None):
filter_ = CeleryFilter()
if logger is None:
logger = logging.getLogger()
handler = SentryHandler(client)
handler.setLevel(logging.ERROR)
handler.addFilter(filter_)
def process_logger_event(sender, logger, loglevel, logfile, format,
colorize, **kw):
# Attempt to find an existing SentryHandler, and if it exists ensure
# that the CeleryFilter is installed.
# If one is found, we do not attempt to install another one.
for h in logger.handlers:
if type(h) == SentryHandler:
h.addFilter(filter_)
return False
logger.addHandler(handler)
after_setup_logger.connect(process_logger_event, weak=False)
| bsd-3-clause | 8,558,829,466,970,187,000 | 32.318841 | 76 | 0.621575 | false |
piMoll/SEILAPLAN | lib/reportlab/graphics/renderPS.py | 1 | 38053 | #Copyright ReportLab Europe Ltd. 2000-2017
#see license.txt for license details
#history https://hg.reportlab.com/hg-public/reportlab/log/tip/src/reportlab/graphics/renderPS.py
__version__='3.3.0'
__doc__="""Render drawing objects in Postscript"""
from reportlab.pdfbase.pdfmetrics import getFont, stringWidth, unicode2T1 # for font info
from reportlab.lib.utils import getBytesIO, getStringIO, asBytes, char2int, rawBytes, asNative, isUnicode
from reportlab.lib.rl_accel import fp_str
from reportlab.lib.colors import black
from reportlab.graphics.renderbase import Renderer, StateTracker, getStateDelta, renderScaledDrawing
from reportlab.graphics.shapes import STATE_DEFAULTS
import math
from operator import getitem
from reportlab import rl_config, xrange, ascii
from reportlab.pdfgen.canvas import FILL_EVEN_ODD, FILL_NON_ZERO
_ESCAPEDICT={}
for c in xrange(256):
if c<32 or c>=127:
_ESCAPEDICT[c]= '\\%03o' % c
elif c in (ord('\\'),ord('('),ord(')')):
_ESCAPEDICT[c] = '\\'+chr(c)
else:
_ESCAPEDICT[c] = chr(c)
del c
def _escape_and_limit(s):
s = asBytes(s)
R = []
aR = R.append
n = 0
for c in s:
c = _ESCAPEDICT[char2int(c)]
aR(c)
n += len(c)
if n>=200:
n = 0
aR('\\\n')
return ''.join(R)
# we need to create encoding vectors for each font we use, or they will
# come out in Adobe's old StandardEncoding, which NOBODY uses.
PS_WinAnsiEncoding="""
/RE { %def
findfont begin
currentdict dup length dict begin
{ %forall
1 index /FID ne { def } { pop pop } ifelse
} forall
/FontName exch def dup length 0 ne { %if
/Encoding Encoding 256 array copy def
0 exch { %forall
dup type /nametype eq { %ifelse
Encoding 2 index 2 index put
pop 1 add
}{ %else
exch pop
} ifelse
} forall
} if pop
currentdict dup end end
/FontName get exch definefont pop
} bind def
/WinAnsiEncoding [
39/quotesingle 96/grave 128/euro 130/quotesinglbase/florin/quotedblbase
/ellipsis/dagger/daggerdbl/circumflex/perthousand
/Scaron/guilsinglleft/OE 145/quoteleft/quoteright
/quotedblleft/quotedblright/bullet/endash/emdash
/tilde/trademark/scaron/guilsinglright/oe/dotlessi
159/Ydieresis 164/currency 166/brokenbar 168/dieresis/copyright
/ordfeminine 172/logicalnot 174/registered/macron/ring
177/plusminus/twosuperior/threesuperior/acute/mu
183/periodcentered/cedilla/onesuperior/ordmasculine
188/onequarter/onehalf/threequarters 192/Agrave/Aacute
/Acircumflex/Atilde/Adieresis/Aring/AE/Ccedilla
/Egrave/Eacute/Ecircumflex/Edieresis/Igrave/Iacute
/Icircumflex/Idieresis/Eth/Ntilde/Ograve/Oacute
/Ocircumflex/Otilde/Odieresis/multiply/Oslash
/Ugrave/Uacute/Ucircumflex/Udieresis/Yacute/Thorn
/germandbls/agrave/aacute/acircumflex/atilde/adieresis
/aring/ae/ccedilla/egrave/eacute/ecircumflex
/edieresis/igrave/iacute/icircumflex/idieresis
/eth/ntilde/ograve/oacute/ocircumflex/otilde
/odieresis/divide/oslash/ugrave/uacute/ucircumflex
/udieresis/yacute/thorn/ydieresis
] def
"""
class PSCanvas:
def __init__(self,size=(300,300), PostScriptLevel=2):
self.width, self.height = size
xtraState = []
self._xtraState_push = xtraState.append
self._xtraState_pop = xtraState.pop
self.comments = 0
self.code = []
self.code_append = self.code.append
self._sep = '\n'
self._strokeColor = self._fillColor = self._lineWidth = \
self._font = self._fontSize = self._lineCap = \
self._lineJoin = self._color = None
self._fontsUsed = [] # track them as we go
self.setFont(STATE_DEFAULTS['fontName'],STATE_DEFAULTS['fontSize'])
self.setStrokeColor(STATE_DEFAULTS['strokeColor'])
self.setLineCap(2)
self.setLineJoin(0)
self.setLineWidth(1)
self.PostScriptLevel=PostScriptLevel
self._fillMode = FILL_EVEN_ODD
def comment(self,msg):
if self.comments: self.code_append('%'+msg)
def drawImage(self, image, x1,y1, width=None,height=None): # Postscript Level2 version
# select between postscript level 1 or level 2
if self.PostScriptLevel==1:
self._drawImageLevel1(image, x1,y1, width, height)
elif self.PostScriptLevel==2:
self._drawImageLevel2(image, x1, y1, width, height)
else :
raise ValueError('Unsupported Postscript Level %s' % self.PostScriptLevel)
def clear(self):
self.code_append('showpage') # ugh, this makes no sense oh well.
def _t1_re_encode(self):
if not self._fontsUsed: return
# for each font used, reencode the vectors
C = []
for fontName in self._fontsUsed:
fontObj = getFont(fontName)
if not fontObj._dynamicFont and fontObj.encName=='WinAnsiEncoding':
C.append('WinAnsiEncoding /%s /%s RE' % (fontName, fontName))
if C:
C.insert(0,PS_WinAnsiEncoding)
self.code.insert(1, self._sep.join(C))
def save(self,f=None):
if not hasattr(f,'write'):
_f = open(f,'wb')
else:
_f = f
if self.code[-1]!='showpage': self.clear()
self.code.insert(0,'''\
%%!PS-Adobe-3.0 EPSF-3.0
%%%%BoundingBox: 0 0 %d %d
%%%% Initialization:
/m {moveto} bind def
/l {lineto} bind def
/c {curveto} bind def
''' % (self.width,self.height))
self._t1_re_encode()
_f.write(rawBytes(self._sep.join(self.code)))
if _f is not f:
_f.close()
from reportlab.lib.utils import markfilename
markfilename(f,creatorcode='XPR3',filetype='EPSF')
def saveState(self):
self._xtraState_push((self._fontCodeLoc,))
self.code_append('gsave')
def restoreState(self):
self.code_append('grestore')
self._fontCodeLoc, = self._xtraState_pop()
def stringWidth(self, s, font=None, fontSize=None):
"""Return the logical width of the string if it were drawn
in the current font (defaults to self.font)."""
font = font or self._font
fontSize = fontSize or self._fontSize
return stringWidth(s, font, fontSize)
def setLineCap(self,v):
if self._lineCap!=v:
self._lineCap = v
self.code_append('%d setlinecap'%v)
def setLineJoin(self,v):
if self._lineJoin!=v:
self._lineJoin = v
self.code_append('%d setlinejoin'%v)
def setDash(self, array=[], phase=0):
"""Two notations. pass two numbers, or an array and phase"""
# copied and modified from reportlab.canvas
psoperation = "setdash"
if isinstance(array,(float,int)):
self.code_append('[%s %s] 0 %s' % (array, phase, psoperation))
elif isinstance(array,(tuple,list)):
assert phase >= 0, "phase is a length in user space"
textarray = ' '.join(map(str, array))
self.code_append('[%s] %s %s' % (textarray, phase, psoperation))
def setStrokeColor(self, color):
self._strokeColor = color
self.setColor(color)
def setColor(self, color):
if self._color!=color:
self._color = color
if color:
if hasattr(color, "cyan"):
self.code_append('%s setcmykcolor' % fp_str(color.cyan, color.magenta, color.yellow, color.black))
else:
self.code_append('%s setrgbcolor' % fp_str(color.red, color.green, color.blue))
def setFillColor(self, color):
self._fillColor = color
self.setColor(color)
def setFillMode(self, v):
self._fillMode = v
def setLineWidth(self, width):
if width != self._lineWidth:
self._lineWidth = width
self.code_append('%s setlinewidth' % width)
def setFont(self,font,fontSize,leading=None):
if self._font!=font or self._fontSize!=fontSize:
self._fontCodeLoc = len(self.code)
self._font = font
self._fontSize = fontSize
self.code_append('')
def line(self, x1, y1, x2, y2):
if self._strokeColor != None:
self.setColor(self._strokeColor)
self.code_append('%s m %s l stroke' % (fp_str(x1, y1), fp_str(x2, y2)))
def _escape(self, s):
'''
return a copy of string s with special characters in postscript strings
escaped with backslashes.
'''
try:
return _escape_and_limit(s)
except:
raise ValueError("cannot escape %s" % ascii(s))
def _textOut(self, x, y, s, textRenderMode=0):
if textRenderMode==3: return
xy = fp_str(x,y)
s = self._escape(s)
if textRenderMode==0: #the standard case
self.setColor(self._fillColor)
self.code_append('%s m (%s) show ' % (xy,s))
return
fill = textRenderMode==0 or textRenderMode==2 or textRenderMode==4 or textRenderMode==6
stroke = textRenderMode==1 or textRenderMode==2 or textRenderMode==5 or textRenderMode==6
addToClip = textRenderMode>=4
if fill and stroke:
if self._fillColor is None:
op = ''
else:
op = 'fill '
self.setColor(self._fillColor)
self.code_append('%s m (%s) true charpath gsave %s' % (xy,s,op))
self.code_append('grestore ')
if self._strokeColor is not None:
self.setColor(self._strokeColor)
self.code_append('stroke ')
else: #can only be stroke alone
self.setColor(self._strokeColor)
self.code_append('%s m (%s) true charpath stroke ' % (xy,s))
def _issueT1String(self,fontObj,x,y,s, textRenderMode=0):
fc = fontObj
code_append = self.code_append
fontSize = self._fontSize
fontsUsed = self._fontsUsed
escape = self._escape
if not isUnicode(s):
try:
s = s.decode('utf8')
except UnicodeDecodeError as e:
i,j = e.args[2:4]
raise UnicodeDecodeError(*(e.args[:4]+('%s\n%s-->%s<--%s' % (e.args[4],s[i-10:i],s[i:j],s[j:j+10]),)))
for f, t in unicode2T1(s,[fontObj]+fontObj.substitutionFonts):
if f!=fc:
psName = asNative(f.face.name)
code_append('(%s) findfont %s scalefont setfont' % (psName,fp_str(fontSize)))
if psName not in fontsUsed:
fontsUsed.append(psName)
fc = f
self._textOut(x,y,t,textRenderMode)
x += f.stringWidth(t.decode(f.encName),fontSize)
if fontObj!=fc:
self._font = None
self.setFont(fontObj.face.name,fontSize)
def drawString(self, x, y, s, angle=0, text_anchor='left', textRenderMode=0):
needFill = textRenderMode in (0,2,4,6)
needStroke = textRenderMode in (1,2,5,6)
if needFill or needStroke:
if text_anchor!='left':
textLen = stringWidth(s, self._font,self._fontSize)
if text_anchor=='end':
x -= textLen
elif text_anchor=='middle':
x -= textLen/2.
elif text_anchor=='numeric':
x -= numericXShift(text_anchor,s,textLen,self._font,self._fontSize)
fontObj = getFont(self._font)
if not self.code[self._fontCodeLoc]:
psName = asNative(fontObj.face.name)
self.code[self._fontCodeLoc]='(%s) findfont %s scalefont setfont' % (psName,fp_str(self._fontSize))
if psName not in self._fontsUsed:
self._fontsUsed.append(psName)
if angle!=0:
self.code_append('gsave %s translate %s rotate' % (fp_str(x,y),fp_str(angle)))
x = y = 0
oldColor = self._color
if fontObj._dynamicFont:
self._textOut(x, y, s, textRenderMode=textRenderMode)
else:
self._issueT1String(fontObj,x,y,s, textRenderMode=textRenderMode)
self.setColor(oldColor)
if angle!=0:
self.code_append('grestore')
def drawCentredString(self, x, y, text, text_anchor='middle', textRenderMode=0):
self.drawString(x,y,text, text_anchor=text_anchor, textRenderMode=textRenderMode)
def drawRightString(self, text, x, y, text_anchor='end', textRenderMode=0):
self.drawString(text,x,y,text_anchor=text_anchor, textRenderMode=textRenderMode)
def drawCurve(self, x1, y1, x2, y2, x3, y3, x4, y4, closed=0):
codeline = '%s m %s curveto'
data = (fp_str(x1, y1), fp_str(x2, y2, x3, y3, x4, y4))
if self._fillColor != None:
self.setColor(self._fillColor)
self.code_append((codeline % data) + ' eofill')
if self._strokeColor != None:
self.setColor(self._strokeColor)
self.code_append((codeline % data)
+ ((closed and ' closepath') or '')
+ ' stroke')
########################################################################################
def rect(self, x1,y1, x2,y2, stroke=1, fill=1):
"Draw a rectangle between x1,y1, and x2,y2"
# Path is drawn in counter-clockwise direction"
x1, x2 = min(x1,x2), max(x1, x2) # from piddle.py
y1, y2 = min(y1,y2), max(y1, y2)
self.polygon(((x1,y1),(x2,y1),(x2,y2),(x1,y2)), closed=1, stroke=stroke, fill = fill)
def roundRect(self, x1,y1, x2,y2, rx=8, ry=8):
"""Draw a rounded rectangle between x1,y1, and x2,y2,
with corners inset as ellipses with x radius rx and y radius ry.
These should have x1<x2, y1<y2, rx>0, and ry>0."""
# Path is drawn in counter-clockwise direction
x1, x2 = min(x1,x2), max(x1, x2) # from piddle.py
y1, y2 = min(y1,y2), max(y1, y2)
# Note: arcto command draws a line from current point to beginning of arc
# save current matrix, translate to center of ellipse, scale by rx ry, and draw
# a circle of unit radius in counterclockwise dir, return to original matrix
# arguments are (cx, cy, rx, ry, startAngle, endAngle)
ellipsePath = 'matrix currentmatrix %s %s translate %s %s scale 0 0 1 %s %s arc setmatrix'
# choice between newpath and moveTo beginning of arc
# go with newpath for precision, does this violate any assumptions in code???
rr = ['newpath'] # Round Rect code path
a = rr.append
# upper left corner ellipse is first
a(ellipsePath % (x1+rx, y1+ry, rx, -ry, 90, 180))
a(ellipsePath % (x1+rx, y2-ry, rx, -ry, 180, 270))
a(ellipsePath % (x2-rx, y2-ry, rx, -ry, 270, 360))
a(ellipsePath % (x2-rx, y1+ry, rx, -ry, 0, 90) )
a('closepath')
self._fillAndStroke(rr)
def ellipse(self, x1,y1, x2,y2):
"""Draw an orthogonal ellipse inscribed within the rectangle x1,y1,x2,y2.
These should have x1<x2 and y1<y2."""
#Just invoke drawArc to actually draw the ellipse
self.drawArc(x1,y1, x2,y2)
def circle(self, xc, yc, r):
self.ellipse(xc-r,yc-r, xc+r,yc+r)
def drawArc(self, x1,y1, x2,y2, startAng=0, extent=360, fromcenter=0):
"""Draw a partial ellipse inscribed within the rectangle x1,y1,x2,y2,
starting at startAng degrees and covering extent degrees. Angles
start with 0 to the right (+x) and increase counter-clockwise.
These should have x1<x2 and y1<y2."""
#calculate centre of ellipse
#print "x1,y1,x2,y2,startAng,extent,fromcenter", x1,y1,x2,y2,startAng,extent,fromcenter
cx, cy = (x1+x2)/2.0, (y1+y2)/2.0
rx, ry = (x2-x1)/2.0, (y2-y1)/2.0
codeline = self._genArcCode(x1, y1, x2, y2, startAng, extent)
startAngleRadians = math.pi*startAng/180.0
extentRadians = math.pi*extent/180.0
endAngleRadians = startAngleRadians + extentRadians
codelineAppended = 0
# fill portion
if self._fillColor != None:
self.setColor(self._fillColor)
self.code_append(codeline)
codelineAppended = 1
if self._strokeColor!=None: self.code_append('gsave')
self.lineTo(cx,cy)
self.code_append('eofill')
if self._strokeColor!=None: self.code_append('grestore')
# stroke portion
if self._strokeColor != None:
# this is a bit hacked up. There is certainly a better way...
self.setColor(self._strokeColor)
(startx, starty) = (cx+rx*math.cos(startAngleRadians), cy+ry*math.sin(startAngleRadians))
if not codelineAppended:
self.code_append(codeline)
if fromcenter:
# move to center
self.lineTo(cx,cy)
self.lineTo(startx, starty)
self.code_append('closepath')
self.code_append('stroke')
def _genArcCode(self, x1, y1, x2, y2, startAng, extent):
"Calculate the path for an arc inscribed in rectangle defined by (x1,y1),(x2,y2)"
#calculate semi-minor and semi-major axes of ellipse
xScale = abs((x2-x1)/2.0)
yScale = abs((y2-y1)/2.0)
#calculate centre of ellipse
x, y = (x1+x2)/2.0, (y1+y2)/2.0
codeline = 'matrix currentmatrix %s %s translate %s %s scale 0 0 1 %s %s %s setmatrix'
if extent >= 0:
arc='arc'
else:
arc='arcn'
data = (x,y, xScale, yScale, startAng, startAng+extent, arc)
return codeline % data
def polygon(self, p, closed=0, stroke=1, fill=1):
assert len(p) >= 2, 'Polygon must have 2 or more points'
start = p[0]
p = p[1:]
poly = []
a = poly.append
a("%s m" % fp_str(start))
for point in p:
a("%s l" % fp_str(point))
if closed:
a("closepath")
self._fillAndStroke(poly,stroke=stroke,fill=fill)
def lines(self, lineList, color=None, width=None):
if self._strokeColor != None:
self._setColor(self._strokeColor)
codeline = '%s m %s l stroke'
for line in lineList:
self.code_append(codeline % (fp_str(line[0]),fp_str(line[1])))
def moveTo(self,x,y):
self.code_append('%s m' % fp_str(x, y))
def lineTo(self,x,y):
self.code_append('%s l' % fp_str(x, y))
def curveTo(self,x1,y1,x2,y2,x3,y3):
self.code_append('%s c' % fp_str(x1,y1,x2,y2,x3,y3))
def closePath(self):
self.code_append('closepath')
def polyLine(self, p):
assert len(p) >= 1, 'Polyline must have 1 or more points'
if self._strokeColor != None:
self.setColor(self._strokeColor)
self.moveTo(p[0][0], p[0][1])
for t in p[1:]:
self.lineTo(t[0], t[1])
self.code_append('stroke')
def drawFigure(self, partList, closed=0):
figureCode = []
a = figureCode.append
first = 1
for part in partList:
op = part[0]
args = list(part[1:])
if op == figureLine:
if first:
first = 0
a("%s m" % fp_str(args[:2]))
else:
a("%s l" % fp_str(args[:2]))
a("%s l" % fp_str(args[2:]))
elif op == figureArc:
first = 0
x1,y1,x2,y2,startAngle,extent = args[:6]
a(self._genArcCode(x1,y1,x2,y2,startAngle,extent))
elif op == figureCurve:
if first:
first = 0
a("%s m" % fp_str(args[:2]))
else:
a("%s l" % fp_str(args[:2]))
a("%s curveto" % fp_str(args[2:]))
else:
raise TypeError("unknown figure operator: "+op)
if closed:
a("closepath")
self._fillAndStroke(figureCode)
def _fillAndStroke(self,code,clip=0,fill=1,stroke=1,fillMode=None):
fill = self._fillColor and fill
stroke = self._strokeColor and stroke
if fill or stroke or clip:
self.code.extend(code)
if fill:
if fillMode is None:
fillMode = self._fillMode
if stroke or clip: self.code_append("gsave")
self.setColor(self._fillColor)
self.code_append("eofill" if fillMode==FILL_EVEN_ODD else "fill")
if stroke or clip: self.code_append("grestore")
if stroke:
if clip: self.code_append("gsave")
self.setColor(self._strokeColor)
self.code_append("stroke")
if clip: self.code_append("grestore")
if clip:
self.code_append("clip")
self.code_append("newpath")
def translate(self,x,y):
self.code_append('%s translate' % fp_str(x,y))
def scale(self,x,y):
self.code_append('%s scale' % fp_str(x,y))
def transform(self,a,b,c,d,e,f):
self.code_append('[%s] concat' % fp_str(a,b,c,d,e,f))
def _drawTimeResize(self,w,h):
'''if this is used we're probably in the wrong world'''
self.width, self.height = w, h
def _drawImageLevel1(self, image, x1, y1, width=None, height=None):
# Postscript Level1 version available for fallback mode when Level2 doesn't work
# For now let's start with 24 bit RGB images (following piddlePDF again)
component_depth = 8
myimage = image.convert('RGB')
imgwidth, imgheight = myimage.size
if not width:
width = imgwidth
if not height:
height = imgheight
#print 'Image size (%d, %d); Draw size (%d, %d)' % (imgwidth, imgheight, width, height)
# now I need to tell postscript how big image is
# "image operators assume that they receive sample data from
# their data source in x-axis major index order. The coordinate
# of the lower-left corner of the first sample is (0,0), of the
# second (1,0) and so on" -PS2 ref manual p. 215
#
# The ImageMatrix maps unit squre of user space to boundary of the source image
#
# The CurrentTransformationMatrix (CTM) maps the unit square of
# user space to the rect...on the page that is to receive the
# image. A common ImageMatrix is [width 0 0 -height 0 height]
# (for a left to right, top to bottom image )
# first let's map the user coordinates start at offset x1,y1 on page
self.code.extend([
'gsave',
'%s %s translate' % (x1,y1), # need to start are lower left of image
'%s %s scale' % (width,height),
'/scanline %d 3 mul string def' % imgwidth # scanline by multiples of image width
])
# now push the dimensions and depth info onto the stack
# and push the ImageMatrix to map the source to the target rectangle (see above)
# finally specify source (PS2 pp. 225 ) and by exmample
self.code.extend([
'%s %s %s' % (imgwidth, imgheight, component_depth),
'[%s %s %s %s %s %s]' % (imgwidth, 0, 0, -imgheight, 0, imgheight),
'{ currentfile scanline readhexstring pop } false 3',
'colorimage '
])
# data source output--now we just need to deliver a hex encode
# series of lines of the right overall size can follow
# piddlePDF again
rawimage = (myimage.tobytes if hasattr(myimage,'tobytes') else myimage.tostring)()
hex_encoded = self._AsciiHexEncode(rawimage)
# write in blocks of 78 chars per line
outstream = getStringIO(hex_encoded)
dataline = outstream.read(78)
while dataline != "":
self.code_append(dataline)
dataline= outstream.read(78)
self.code_append('% end of image data') # for clarity
self.code_append('grestore') # return coordinates to normal
# end of drawImage
def _AsciiHexEncode(self, input): # also based on piddlePDF
"Helper function used by images"
output = getStringIO()
for char in asBytes(input):
output.write('%02x' % char2int(char))
return output.getvalue()
def _drawImageLevel2(self, image, x1,y1, width=None,height=None): # Postscript Level2 version
'''At present we're handling only PIL'''
### what sort of image are we to draw
if image.mode=='L' :
imBitsPerComponent = 8
imNumComponents = 1
myimage = image
elif image.mode == '1':
myimage = image.convert('L')
imNumComponents = 1
myimage = image
else :
myimage = image.convert('RGB')
imNumComponents = 3
imBitsPerComponent = 8
imwidth, imheight = myimage.size
if not width:
width = imwidth
if not height:
height = imheight
self.code.extend([
'gsave',
'%s %s translate' % (x1,y1), # need to start are lower left of image
'%s %s scale' % (width,height)])
if imNumComponents == 3 :
self.code_append('/DeviceRGB setcolorspace')
elif imNumComponents == 1 :
self.code_append('/DeviceGray setcolorspace')
# create the image dictionary
self.code_append("""
<<
/ImageType 1
/Width %d /Height %d %% dimensions of source image
/BitsPerComponent %d""" % (imwidth, imheight, imBitsPerComponent) )
if imNumComponents == 1:
self.code_append('/Decode [0 1]')
if imNumComponents == 3:
self.code_append('/Decode [0 1 0 1 0 1] %% decode color values normally')
self.code.extend([ '/ImageMatrix [%s 0 0 %s 0 %s]' % (imwidth, -imheight, imheight),
'/DataSource currentfile /ASCIIHexDecode filter',
'>> % End image dictionary',
'image'])
# after image operator just need to dump image dat to file as hexstring
rawimage = (myimage.tobytes if hasattr(myimage,'tobytes') else myimage.tostring)()
hex_encoded = self._AsciiHexEncode(rawimage)
# write in blocks of 78 chars per line
outstream = getStringIO(hex_encoded)
dataline = outstream.read(78)
while dataline != "":
self.code_append(dataline)
dataline= outstream.read(78)
self.code_append('> % end of image data') # > is EOD for hex encoded filterfor clarity
self.code_append('grestore') # return coordinates to normal
# renderpdf - draws them onto a canvas
"""Usage:
from reportlab.graphics import renderPS
renderPS.draw(drawing, canvas, x, y)
Execute the script to see some test drawings."""
from reportlab.graphics.shapes import *
# hack so we only get warnings once each
#warnOnce = WarnOnce()
# the main entry point for users...
def draw(drawing, canvas, x=0, y=0, showBoundary=rl_config.showBoundary):
"""As it says"""
R = _PSRenderer()
R.draw(renderScaledDrawing(drawing), canvas, x, y, showBoundary=showBoundary)
def _pointsFromList(L):
'''
given a list of coordinates [x0, y0, x1, y1....]
produce a list of points [(x0,y0), (y1,y0),....]
'''
P=[]
a = P.append
for i in range(0,len(L),2):
a((L[i],L[i+1]))
return P
class _PSRenderer(Renderer):
"""This draws onto a EPS document. It needs to be a class
rather than a function, as some EPS-specific state tracking is
needed outside of the state info in the SVG model."""
def drawNode(self, node):
"""This is the recursive method called for each node
in the tree"""
self._canvas.comment('begin node %r'%node)
color = self._canvas._color
if not (isinstance(node, Path) and node.isClipPath):
self._canvas.saveState()
#apply state changes
deltas = getStateDelta(node)
self._tracker.push(deltas)
self.applyStateChanges(deltas, {})
#draw the object, or recurse
self.drawNodeDispatcher(node)
rDeltas = self._tracker.pop()
if not (isinstance(node, Path) and node.isClipPath):
self._canvas.restoreState()
self._canvas.comment('end node %r'%node)
self._canvas._color = color
#restore things we might have lost (without actually doing anything).
for k, v in rDeltas.items():
if k in self._restores:
setattr(self._canvas,self._restores[k],v)
## _restores = {'stroke':'_stroke','stroke_width': '_lineWidth','stroke_linecap':'_lineCap',
## 'stroke_linejoin':'_lineJoin','fill':'_fill','font_family':'_font',
## 'font_size':'_fontSize'}
_restores = {'strokeColor':'_strokeColor','strokeWidth': '_lineWidth','strokeLineCap':'_lineCap',
'strokeLineJoin':'_lineJoin','fillColor':'_fillColor','fontName':'_font',
'fontSize':'_fontSize'}
def drawRect(self, rect):
if rect.rx == rect.ry == 0:
#plain old rectangle
self._canvas.rect(
rect.x, rect.y,
rect.x+rect.width, rect.y+rect.height)
else:
#cheat and assume ry = rx; better to generalize
#pdfgen roundRect function. TODO
self._canvas.roundRect(
rect.x, rect.y,
rect.x+rect.width, rect.y+rect.height, rect.rx, rect.ry
)
def drawLine(self, line):
if self._canvas._strokeColor:
self._canvas.line(line.x1, line.y1, line.x2, line.y2)
def drawCircle(self, circle):
self._canvas.circle( circle.cx, circle.cy, circle.r)
def drawWedge(self, wedge):
yradius, radius1, yradius1 = wedge._xtraRadii()
if (radius1==0 or radius1 is None) and (yradius1==0 or yradius1 is None) and not wedge.annular:
startangledegrees = wedge.startangledegrees
endangledegrees = wedge.endangledegrees
centerx= wedge.centerx
centery = wedge.centery
radius = wedge.radius
extent = endangledegrees - startangledegrees
self._canvas.drawArc(centerx-radius, centery-yradius, centerx+radius, centery+yradius,
startangledegrees, extent, fromcenter=1)
else:
P = wedge.asPolygon()
if isinstance(P,Path):
self.drawPath(P)
else:
self.drawPolygon(P)
def drawPolyLine(self, p):
if self._canvas._strokeColor:
self._canvas.polyLine(_pointsFromList(p.points))
def drawEllipse(self, ellipse):
#need to convert to pdfgen's bounding box representation
x1 = ellipse.cx - ellipse.rx
x2 = ellipse.cx + ellipse.rx
y1 = ellipse.cy - ellipse.ry
y2 = ellipse.cy + ellipse.ry
self._canvas.ellipse(x1,y1,x2,y2)
def drawPolygon(self, p):
self._canvas.polygon(_pointsFromList(p.points), closed=1)
def drawString(self, stringObj):
textRenderMode = getattr(stringObj,'textRenderMode',0)
if self._canvas._fillColor or textRenderMode:
S = self._tracker.getState()
text_anchor, x, y, text = S['textAnchor'], stringObj.x,stringObj.y,stringObj.text
if not text_anchor in ['start','inherited']:
font, fontSize = S['fontName'], S['fontSize']
textLen = stringWidth(text, font,fontSize)
if text_anchor=='end':
x -= textLen
elif text_anchor=='middle':
x -= textLen/2
elif text_anchor=='numeric':
x -= numericXShift(text_anchor,text,textLen,font,fontSize,encoding='winansi')
else:
raise ValueError('bad value for text_anchor '+str(text_anchor))
self._canvas.drawString(x,y,text, textRenderMode=textRenderMode)
def drawPath(self, path, fillMode=None):
from reportlab.graphics.shapes import _renderPath
c = self._canvas
drawFuncs = (c.moveTo, c.lineTo, c.curveTo, c.closePath)
autoclose = getattr(path,'autoclose','')
def rP(**kwds):
return _renderPath(path, drawFuncs, **kwds)
if fillMode is None:
fillMode = getattr(path,'fillMode',c._fillMode)
fill = c._fillColor is not None
stroke = c._strokeColor is not None
clip = path.isClipPath
fas = lambda **kwds: c._fillAndStroke([], fillMode=fillMode, **kwds)
pathFill = lambda : c._fillAndStroke([], stroke=0, fillMode=fillMode)
pathStroke = lambda : c._fillAndStroke([], fill=0)
if autoclose=='svg':
rP()
fas(stroke=stroke,fill=fill,clip=clip)
elif autoclose=='pdf':
if fill:
rP(forceClose=True)
fas(stroke=stroke,fill=fill,clip=clip)
elif stroke or clip:
rP()
fas(stroke=stroke,fill=0,clip=clip)
else:
if fill and rP(countOnly=True):
rP()
elif stroke or clip:
rP()
fas(stroke=stroke,fill=0,clip=clip)
def applyStateChanges(self, delta, newState):
"""This takes a set of states, and outputs the operators
needed to set those properties"""
for key, value in delta.items():
if key == 'transform':
self._canvas.transform(value[0], value[1], value[2],
value[3], value[4], value[5])
elif key == 'strokeColor':
#this has different semantics in PDF to SVG;
#we always have a color, and either do or do
#not apply it; in SVG one can have a 'None' color
self._canvas.setStrokeColor(value)
elif key == 'strokeWidth':
self._canvas.setLineWidth(value)
elif key == 'strokeLineCap': #0,1,2
self._canvas.setLineCap(value)
elif key == 'strokeLineJoin':
self._canvas.setLineJoin(value)
elif key == 'strokeDashArray':
if value:
if isinstance(value,(list,tuple)) and len(value)==2 and isinstance(value[1],(tuple,list)):
phase = value[0]
value = value[1]
else:
phase = 0
self._canvas.setDash(value,phase)
else:
self._canvas.setDash()
## elif key == 'stroke_opacity':
## warnOnce('Stroke Opacity not supported yet')
elif key == 'fillColor':
#this has different semantics in PDF to SVG;
#we always have a color, and either do or do
#not apply it; in SVG one can have a 'None' color
self._canvas.setFillColor(value)
## elif key == 'fill_rule':
## warnOnce('Fill rules not done yet')
## elif key == 'fill_opacity':
## warnOnce('Fill opacity not done yet')
elif key in ['fontSize', 'fontName']:
# both need setting together in PDF
# one or both might be in the deltas,
# so need to get whichever is missing
fontname = delta.get('fontName', self._canvas._font)
fontsize = delta.get('fontSize', self._canvas._fontSize)
self._canvas.setFont(fontname, fontsize)
def drawImage(self, image):
from reportlab.lib.utils import ImageReader
im = ImageReader(image.path)
self._canvas.drawImage(im._image,image.x,image.y,image.width,image.height)
def drawToFile(d,fn, showBoundary=rl_config.showBoundary,**kwd):
d = renderScaledDrawing(d)
c = PSCanvas((d.width,d.height))
draw(d, c, 0, 0, showBoundary=showBoundary)
c.save(fn)
def drawToString(d, showBoundary=rl_config.showBoundary):
"Returns a PS as a string in memory, without touching the disk"
s = getBytesIO()
drawToFile(d, s, showBoundary=showBoundary)
return s.getvalue()
#########################################################
#
# test code. First, define a bunch of drawings.
# Routine to draw them comes at the end.
#
#########################################################
def test(outDir='epsout',shout=False):
from reportlab.graphics import testshapes
from reportlab.rl_config import verbose
OLDFONTS = testshapes._FONTS[:]
testshapes._FONTS[:] = ['Times-Roman','Times-Bold','Times-Italic', 'Times-BoldItalic','Courier']
try:
import os
# save all drawings and their doc strings from the test file
if not os.path.isdir(outDir):
os.mkdir(outDir)
#grab all drawings from the test module
drawings = []
for funcname in dir(testshapes):
if funcname[0:10] == 'getDrawing':
func = getattr(testshapes,funcname)
drawing = func()
docstring = getattr(func,'__doc__','')
drawings.append((drawing, docstring))
i = 0
for (d, docstring) in drawings:
filename = outDir + os.sep + 'renderPS_%d.eps'%i
drawToFile(d,filename)
if shout or verbose>2: print('renderPS test saved %s' % ascii(filename))
i += 1
finally:
testshapes._FONTS[:] = OLDFONTS
if __name__=='__main__':
import sys
if len(sys.argv)>1:
outdir = sys.argv[1]
else:
outdir = 'epsout'
test(outdir,shout=True)
| gpl-2.0 | 4,931,666,620,803,069,000 | 38.108941 | 118 | 0.574225 | false |
jeanlinux/calibre | src/calibre/ebooks/oeb/iterator/spine.py | 14 | 5674 | #!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
from future_builtins import map
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <[email protected]>'
__docformat__ = 'restructuredtext en'
import re, os
from functools import partial
from operator import attrgetter
from collections import namedtuple
from calibre import guess_type, replace_entities
from calibre.ebooks.chardet import xml_to_unicode
def character_count(html):
''' Return the number of "significant" text characters in a HTML string. '''
count = 0
strip_space = re.compile(r'\s+')
for match in re.finditer(r'>[^<]+<', html):
count += len(strip_space.sub(' ', match.group()))-2
return count
def anchor_map(html):
''' Return map of all anchor names to their offsets in the html '''
ans = {}
for match in re.finditer(
r'''(?:id|name)\s*=\s*['"]([^'"]+)['"]''', html):
anchor = match.group(1)
ans[anchor] = ans.get(anchor, match.start())
return ans
def all_links(html):
''' Return set of all links in the file '''
ans = set()
for match in re.finditer(
r'''<\s*[Aa]\s+.*?[hH][Rr][Ee][Ff]\s*=\s*(['"])(.+?)\1''', html, re.MULTILINE|re.DOTALL):
ans.add(replace_entities(match.group(2)))
return ans
class SpineItem(unicode):
def __new__(cls, path, mime_type=None, read_anchor_map=True,
run_char_count=True, from_epub=False, read_links=True):
ppath = path.partition('#')[0]
if not os.path.exists(path) and os.path.exists(ppath):
path = ppath
obj = super(SpineItem, cls).__new__(cls, path)
with open(path, 'rb') as f:
raw = f.read()
if from_epub:
# According to the spec, HTML in EPUB must be encoded in utf-8 or
# utf-16. Furthermore, there exist epub files produced by the usual
# incompetents that have utf-8 encoded HTML files that contain
# incorrect encoding declarations. See
# http://www.idpf.org/epub/20/spec/OPS_2.0.1_draft.htm#Section1.4.1.2
# http://www.idpf.org/epub/30/spec/epub30-publications.html#confreq-xml-enc
# https://bugs.launchpad.net/bugs/1188843
# So we first decode with utf-8 and only if that fails we try xml_to_unicode. This
# is the same algorithm as that used by the conversion pipeline (modulo
# some BOM based detection). Sigh.
try:
raw, obj.encoding = raw.decode('utf-8'), 'utf-8'
except UnicodeDecodeError:
raw, obj.encoding = xml_to_unicode(raw)
else:
raw, obj.encoding = xml_to_unicode(raw)
obj.character_count = character_count(raw) if run_char_count else 10000
obj.anchor_map = anchor_map(raw) if read_anchor_map else {}
obj.all_links = all_links(raw) if read_links else set()
obj.verified_links = set()
obj.start_page = -1
obj.pages = -1
obj.max_page = -1
obj.index_entries = []
if mime_type is None:
mime_type = guess_type(obj)[0]
obj.mime_type = mime_type
obj.is_single_page = None
return obj
class IndexEntry(object):
def __init__(self, spine, toc_entry, num):
self.num = num
self.text = toc_entry.text or _('Unknown')
self.key = toc_entry.abspath
self.anchor = self.start_anchor = toc_entry.fragment or None
try:
self.spine_pos = spine.index(self.key)
except ValueError:
self.spine_pos = -1
self.anchor_pos = 0
if self.spine_pos > -1:
self.anchor_pos = spine[self.spine_pos].anchor_map.get(self.anchor,
0)
self.depth = 0
p = toc_entry.parent
while p is not None:
self.depth += 1
p = p.parent
self.sort_key = (self.spine_pos, self.anchor_pos)
self.spine_count = len(spine)
def find_end(self, all_entries):
potential_enders = [i for i in all_entries if
i.depth <= self.depth and
(
(i.spine_pos == self.spine_pos and i.anchor_pos >
self.anchor_pos)
or
i.spine_pos > self.spine_pos
)]
if potential_enders:
# potential_enders is sorted by (spine_pos, anchor_pos)
end = potential_enders[0]
self.end_spine_pos = end.spine_pos
self.end_anchor = end.anchor
else:
self.end_spine_pos = self.spine_count - 1
self.end_anchor = None
def create_indexing_data(spine, toc):
if not toc:
return
f = partial(IndexEntry, spine)
index_entries = list(map(f,
(t for t in toc.flat() if t is not toc),
(i-1 for i, t in enumerate(toc.flat()) if t is not toc)
))
index_entries.sort(key=attrgetter('sort_key'))
[i.find_end(index_entries) for i in index_entries]
ie = namedtuple('IndexEntry', 'entry start_anchor end_anchor')
for spine_pos, spine_item in enumerate(spine):
for i in index_entries:
if i.end_spine_pos < spine_pos or i.spine_pos > spine_pos:
continue # Does not touch this file
start = i.anchor if i.spine_pos == spine_pos else None
end = i.end_anchor if i.spine_pos == spine_pos else None
spine_item.index_entries.append(ie(i, start, end))
| gpl-3.0 | 5,830,317,173,059,512,000 | 37.080537 | 101 | 0.571907 | false |
qhm123/the10000 | v2ex/templatetags/filters.py | 2 | 6336 | import re
import logging
from django import template
from datetime import timedelta
import urllib, hashlib
register = template.Library()
def timezone(value, offset):
if offset > 12:
offset = 12 - offset
return value + timedelta(hours=offset)
register.filter(timezone)
# auto convert img.ly/abcd links to image tags
def imgly(value):
imgs = re.findall('(http://img.ly/[a-zA-Z0-9]+)\s?', value)
if (len(imgs) > 0):
for img in imgs:
img_id = re.findall('http://img.ly/([a-zA-Z0-9]+)', img)
if (img_id[0] != 'system' and img_id[0] != 'api'):
value = value.replace('http://img.ly/' + img_id[0], '<a href="http://img.ly/' + img_id[0] + '" target="_blank"><img src="http://zdxproxy.appspot.com/img.ly/show/large/' + img_id[0] + '" class="imgly" border="0" /></a>')
return value
else:
return value
register.filter(imgly)
# auto convert cl.ly/abcd links to image tags
def clly(value):
imgs = re.findall('(http://cl.ly/[a-zA-Z0-9]+)\s?', value)
if (len(imgs) > 0):
for img in imgs:
img_id = re.findall('http://cl.ly/([a-zA-Z0-9]+)', img)
if (img_id[0] != 'demo' and img_id[0] != 'whatever'):
value = value.replace('http://cl.ly/' + img_id[0], '<a href="http://cl.ly/' + img_id[0] + '" target="_blank"><img src="http://cl.ly/' + img_id[0] + '/content" class="imgly" border="0" /></a>')
return value
else:
return value
register.filter(clly)
# auto convert youtube.com links to player
def youtube(value):
videos = re.findall('(http://www.youtube.com/watch\?v=[a-zA-Z0-9\-\_]+)\s?', value)
if (len(videos) > 0):
for video in videos:
video_id = re.findall('http://www.youtube.com/watch\?v=([a-zA-Z0-9\-\_]+)', video)
value = value.replace('http://www.youtube.com/watch?v=' + video_id[0], '<object width="480" height="385"><param name="movie" value="http://www.youtube.com/v/' + video_id[0] + '?fs=1&hl=en_US"></param><param name="allowFullScreen" value="true"></param><param name="allowscriptaccess" value="always"></param><embed src="http://www.youtube.com/v/' + video_id[0] + '?fs=1&hl=en_US" type="application/x-shockwave-flash" allowscriptaccess="always" allowfullscreen="true" width="480" height="385"></embed></object>')
return value
else:
return value
register.filter(youtube)
# auto convert youku.com links to player
# example: http://v.youku.com/v_show/id_XMjA1MDU2NTY0.html
def youku(value):
videos = re.findall('(http://v.youku.com/v_show/id_[a-zA-Z0-9\=]+.html)\s?', value)
logging.error(value)
logging.error(videos)
if (len(videos) > 0):
for video in videos:
video_id = re.findall('http://v.youku.com/v_show/id_([a-zA-Z0-9\=]+).html', video)
value = value.replace('http://v.youku.com/v_show/id_' + video_id[0] + '.html', '<embed src="http://player.youku.com/player.php/sid/' + video_id[0] + '/v.swf" quality="high" width="480" height="400" align="middle" allowScriptAccess="sameDomain" type="application/x-shockwave-flash"></embed>')
return value
else:
return value
register.filter(youku)
# auto convert @username to clickable links
def mentions(value):
ms = re.findall('(@[a-zA-Z0-9\_]+\.?)\s?', value)
if (len(ms) > 0):
for m in ms:
m_id = re.findall('@([a-zA-Z0-9\_]+\.?)', m)
if (len(m_id) > 0):
if (m_id[0].endswith('.') != True):
value = value.replace('@' + m_id[0], '@<a href="/member/' + m_id[0] + '">' + m_id[0] + '</a>')
return value
else:
return value
register.filter(mentions)
# gravatar filter
def gravatar(value,arg):
default = "http://v2ex.appspot.com/static/img/avatar_" + str(arg) + ".png"
if type(value).__name__ != 'Member':
return '<img src="' + default + '" border="0" align="absmiddle" />'
if arg == 'large':
number_size = 73
member_avatar_url = value.avatar_large_url
elif arg == 'normal':
number_size = 48
member_avatar_url = value.avatar_normal_url
elif arg == 'mini':
number_size = 24
member_avatar_url = value.avatar_mini_url
if member_avatar_url:
return '<img src="'+ member_avatar_url +'" border="0" alt="' + value.username + '" />'
else:
gravatar_url = "http://www.gravatar.com/avatar/" + hashlib.md5(value.email.lower()).hexdigest() + "?"
gravatar_url += urllib.urlencode({'s' : str(number_size), 'd' : default})
return '<img src="' + gravatar_url + '" border="0" alt="' + value.username + '" align="absmiddle" />'
register.filter(gravatar)
# avatar filter
def avatar(value, arg):
default = "http://v2ex.appspot.com/static/img/avatar_" + str(arg) + ".png"
if type(value).__name__ != 'Member':
return '<img src="' + default + '" border="0" align="absmiddle" />'
if arg == 'large':
number_size = 73
member_avatar_url = value.avatar_large_url
elif arg == 'normal':
number_size = 48
member_avatar_url = value.avatar_normal_url
elif arg == 'mini':
number_size = 24
member_avatar_url = value.avatar_mini_url
if member_avatar_url:
return '<img src="'+ member_avatar_url +'" border="0" alt="' + value.username + '" align="absmiddle" />'
else:
return '<img src="' + default + '" border="0" alt="' + value.username + '" align="absmiddle" />'
register.filter(avatar)
# github gist script support
def gist(value):
return re.sub(r'(http://gist.github.com/[\d]+)', r'<script src="\1.js"></script>', value)
register.filter(gist)
_base_js_escapes = (
('\\', r'\u005C'),
('\'', r'\u0027'),
('"', r'\u0022'),
('>', r'\u003E'),
('<', r'\u003C'),
('&', r'\u0026'),
('=', r'\u003D'),
('-', r'\u002D'),
(';', r'\u003B'),
(u'\u2028', r'\u2028'),
(u'\u2029', r'\u2029')
)
# Escape every ASCII character with a value less than 32.
_js_escapes = (_base_js_escapes +
tuple([('%c' % z, '\\u%04X' % z) for z in range(32)]))
def escapejs(value):
"""Hex encodes characters for use in JavaScript strings."""
for bad, good in _js_escapes:
value = value.replace(bad, good)
return value
register.filter(escapejs) | bsd-3-clause | 2,040,978,807,477,734,100 | 40.149351 | 529 | 0.576862 | false |
John-Hart/autorest | src/generator/AutoRest.Python.Tests/Expected/AcceptanceTests/BodyComplex/autorestcomplextestservice/operations/readonlyproperty.py | 2 | 4268 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from .. import models
class Readonlyproperty(object):
"""Readonlyproperty operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def get_valid(
self, custom_headers=None, raw=False, **operation_config):
"""Get complex types that have readonly properties.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`ReadonlyObj
<fixtures.acceptancetestsbodycomplex.models.ReadonlyObj>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/complex/readonlyproperty/valid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ReadonlyObj', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def put_valid(
self, complex_body, custom_headers=None, raw=False, **operation_config):
"""Put complex types that have readonly properties.
:param complex_body:
:type complex_body: :class:`ReadonlyObj
<fixtures.acceptancetestsbodycomplex.models.ReadonlyObj>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/complex/readonlyproperty/valid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(complex_body, 'ReadonlyObj')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
| mit | -6,717,718,069,507,566,000 | 34.865546 | 84 | 0.635192 | false |
abougouffa/alfanous | interfaces/smart_phones/alfanousS60/ensymble.py | 4 | 86334 | #!/usr/bin/env python
#[SQUEEZE] this is an embedded package
import imp
s="requires python 2.5.2 or bytecode compatible"
if imp.get_magic()!='\xb3\xf2\r\n':raise RuntimeError,s
try:import zlib
except:raise RuntimeError,"requires zlib"
import base64,marshal
s=base64.decodestring("""
eNqFVW1v1EYQnl373ny5KwIkkigIvrS6qj+BFyEoHxBwVI4iyqmSZex1s8Rnn7ybD0F3UkX4Wfw3
mGdsB1QprbU7Nzu78/7sXkbdF/J8wtM9YJLzUHRGtGJG00pRHpAhspoK5kP6THRJ9G4VUD6gfEif
Fa1C6BwvRqxvv/K3tLDnh0xe1Wlumqz3pXk+ha9fmLBZ2CfYXGk4YjOdPS3CAcXHC8SX6U4Z8xkM
HDDZKtoSJexKIUAbYHmJUBTvLkXT32CStGEkybrOz0vjJDR7Wtdnzu8x+1rE7SE/hkJiK+uTZAF/
HnacKQuPDDsTC6QjxE2YPNyk2Vn6t3kc4zBMODCkJioL+DfopBL7CZOLnxGslSQOVN5WXobnUgcY
x2+rXyn0AX2IqL5Lij8OxQ7IDqGL7DVd8rlKk/qTNfu8EWas+siXdWV8xMwf71bJ6ze/n7x6Ljm+
NBfPm6ZuYuQYI0A/ZVLYKu/qtIAwHvSGqnRthNmk/vTa/OEJNXWR5B+oqTpU87YKUQc3gcA/6KCm
vwLa8QiFGdBuSLsRbUdAx4eA6kNS5ofu/pC0IoUKhlLB3ZiuFo/AtVVSgMNuQlvemFDChsdg7JjC
E/7tC4YMYtTkX7UoGRFdLdp6ohYtbJBKmve7HWgKW5ok6Ra5zYCgqK9hPO0tOH9eFFJJKMhxMFf1
df02G28l/mLTMhkkCGV9bQNmvLoF8U1pwL6aqUM9U5G6yWCcquUC4O9ihEsOOJJFm0qSCIRjAZEE
P+sdXesSIlh1IEM9CfdGFrIs7DaiHvpfAP0jXHTGKz8zu7DnFbr/trrDiNeC+KkgHrAWfG8ZDHNu
OQMgoHtpQNWhrNBzjTazCMsB3ifFN5wvzqWgBTCasWiEZ+WTJkFOCBBsGSWT7mBrgUthp0L3yM++
b/GQUORKLOWVc7flKkgJ7lt33zfnVZZ6ky/mKLC8FWnjTtNSOvextO8FVC/Wm7rxcvVkI099Kkxj
0lzwkZW1Mx6eGBRi6MWb9vygA2WLv9xk9XrTGOcEt3LSVs6nZennV89a68800iVpfotEqBSbFnD2
o5E3sS4K9hwf4dDd/+i4aL2va38PW/vyNxKoIzXn3o+Y/qQifUvN9L5eDK6u1bi/WxJJ/Nv/wcpB
4WGLyse6h9eeHuqDg2/wVSVBeNrUvQl8XFlaH3rPvaUqVWnfJa9ld7std8tqL7162j3ttuW2pm3Z
U7LHbnvcmlLdK+m2S1Vy3Vu21Eg9A24ISwgM+zowMDBAEmCGNTBhCXl5eS8sCQQIAcKDR3iPAR5r
wmPL5Pt/3znn3pJku3mQ3y9Ptq7u/c6+fefbznc+K+pwHKey7M+FtYV6cPxYxdE/7fT7Ev1Gy/Tw
6b9ybjnOdfuunOvKvLvOdde8e851z7xnnOsZ897mXG9zfIqZdXyKk3P8dud6uxPknXLB8bPO25R5
B2LOjuepxPAz9DMTvUCvs0v1u8V4KShOn4mK5ZpfrJRXyvNhNYzDICrWF4pU7XC5vBgUF8IqQcan
rk1FE8Uz589Hh6KnFeUlTSsU6efG4cNBrVL3w9riyThoLIe1cnWC061FcbB8ExHuBI35ehTc5AQv
UGoKfnFycrJQ+FtXZXZteT4s14oXZ4uoFafhilFmF1fisF6LTnAxUkqRfw4XL5XjJWSHolqyfJLz
5RSmGZLifL1SrhYrS+VGuULtsqFRcaHeKJqWcvlJazkf3Vpd8kw9LjajwC+EToZ6DrOg4tJjjH5H
6fc0psSnCbI26sQUQTn3eHzHaGhHNlznam3MycSu80bBqXc5in5otAlecx11bW3QiT0nzHAaxPco
/l6K3+ZQkoXNqTydqsuJs06YczYyFL0XwfjO4rumnGt+xtmgqdWGWeTzvFokSNZZzzjryllvc9az
zj3KLufE7c46Rcg7Y/eUsyEfVPWNvBPnnfW8Q1AKfMN16lRKTNOyANDnKBSj1tudkCZoh6NfOh2/
y1l3nXUPNR++5zr0f6PgrI056wUnpOBupH6/i+ZzSw9RSwuplo5wS1FOD5IM67LQ5kXHyWx0oE5n
bu51NjqddfrodEIukeKF3SiXXvpvh+qtDmeD+qjH2eh2/F6uXy+6+I2sszLgKL+vBVQvUNv6EBet
8vu3Cyz3c+DqZ5z1LqrBnzsbPU484Kz3OP4AN5NK6XVCGs4h9OAIurOPQcPISKE7+1A9Cn8jY7tz
MNWda+tOPOKEozri+2k4+52NAWdj0NkYcjaGnY0RjkC9SS99XOqo4w+hEsOvnPOHnfV+fhlx1gf4
hbIa5BdKMsQvO5z1YX7ZiUzWR3mgXjl39fbvKYxE6xjs4pylghRBXaPHVUJes+O7ac7P0Ex3MOPp
0UaPclQJw4iWiBOcuLMUAWUleCVHX3pZcWQ/mG8uxoi8FFRXQmQS7aRHrd66uou18nJQXAzvBLWo
QOEJroqwBg8HEXI7EE0ciGJUZyLaBVRZu1Ouhn6CEKK4gT8HD0QHuWIWo0kud6IcA6VWADXmo92c
0Uoz3lSjuF4vVsuNxSDyuOgTEbAAcMWV6TNHi/bnyOqBI8+tpgOPPSjw+H0DZ4NKswEEu13g+4Ka
T+isNXCnDjydxsUIXC2OH6AtYLdUvFgj1FYu6r5KN3J8B0Y1S48qsGgQ99LrYhD7wUK5WY01ED15
eW0lmGo06o0YvQEkOiRRE5xqRiHuppBTMQ3FfDPWibISub4SxxjexVpzTn9ibvhhJeZ8q0GNw99X
rjZTxVFUnk3RCrWR8/IDKivgVy5WZtvZcpXmHXK8FVCV8HK50QxKyIO/UFl+aQRln+t54dS1qePH
zk6fn5qdvj7FuVSqmL0d3Ezew5pxWI07UZTuO2yrhHQoanlluRzdiusy8caxV/AaWFlcxozmsmgO
3eHszF5ENY67dG1s13HURdNB0RI1fY6+Iu79ar22KF86Q3lhCKq8YkGUk7zM3Th6k1+4Hm2muJJr
Rlz2XV5NC3E7A6QVnKoZ+kfNyzHzclwGP/T57x36m0t1Qkbeo3Gscn5E3a0k1uTKGqdsNGtnEewj
UsbzRlW32+F6mUE1qOiv6lKe2+Vm3X7616sK7oDqpTi9ylNjFGPIO+5l1WOq4A2oQj7nZN0eNaBj
ee4+1e/mVfJvQHUrwjuEoB3nQ844hrGEHublX49KGIhSznQK9UCzEpewcLjjeRyAuriHMAz4KGHh
lYbxQHNKz5kGP7DpEbJ4YbnuN4mowkyJBunRqfQ/tzOX7cjm8C/q1ImXA+CfMLK0acHQpl/990mb
ynuWKVR5zzGd6oJOBc2ad4jUuF4AqQFSlZ6dIBGudzlBt1PuAeUBKrZXqNiOhIp9BEsMjShGAaFi
osNmp2eLK+XKLVpGEdF+cb1YrwXRH1E/msYaarUSNOKTy2v4M0kPJlBXGuEdWtsEpuck/Qq0HEUr
S41yFJw8euz4U0/f/LsRvMUbXBW83yTit/hCvclo7sVC4R20hSnNKFysMQ3bCCJCpNiUEJk3lrth
vFRAGFoWLoSVchwUVxp1Wk+BP1m8vATc3IjiJEUYMWVaLEec5zw1VAKoqIKUslwOa+lSiOCmvaTs
+5KsSR1R80OQ3VTvYHk+4BAbu8C1D+PJ4qnaGte+HNNmFFG9goiQMrWOi95UMZQBpLGyQjV/x3Q9
19/kwXFtf7fENVHGo0O0G6Mx+LN9DnqItilt6whwAnS+2YgPF0+nhoLKaKKDabtFRyDl+KWpCwAs
l+NDnFrPQ1McfSElILrU9NhuTW3nq1Q2on6WT516JZXhOOFlpgwmuFpRTBO6CFxNW9ih/4H8EDWK
xj1YjRtlKrQch1EcViJZAq0zLV4qE31RxZ66VqR5FtNcLNapHY1UJJrDIEOi5soKoVRMl9ABh0o0
s7OJ43qCfncajito/5+b4yK2iTAhMUeLuQeyXp0PYr26nDccyyt0b2W9egzrRRi278GsV/87Zr0G
7s96Eb9SfCj7dUezXxJ/bGsUYrSISiIOK6Q6c6vqh6iJAuoHSL8PgNnSPYT2dKfbPmTaPozNSbfY
dsiIeRlNhfYyv0ZsnefUf4My6jUfP08fPds0Bvn1bgfvBR+HzMY4407ui2GwZynmrxPMXCvzN5Jm
/nakBhTZ9W7Jbsxm178pOx5OCk9lt1Nnd7vuqVqJW4TmEe/+nG0rfTxBLO4gPm7tAHdJrGK4k6tL
L7tQEPg+B1xhrUsy3q0z1j26x7zsTXXtIFhNXcJaumurlAett91OuAeVrxP7He91Qkpb5J4cdG4P
ZEwUanLRNnmQFwJzyARNNXRfeiEMOuE+ZEzRuR/3g1n293MdH0nz/I+0gJitfxRxmed/tDVwhAPL
B5z4MSc86NA7lai4vCGn/klHnfMPOP5jmK28OIZFKiALY9gsDM/MGVcWxpdk3hrGG/jvg5jmWyMh
aBwTqz5mp2RrjJUMVfcQR3wcU4Yj9j4w4jlUnOq5MeKsTqh4nPu1C3Ho/5mbo2pjlKdzFwQCY5vn
4CE7IGOb5uAYD80hHppRGZonTC7DqSEaxRrGxOimD5rSE05xY4eM1uNO+IQjsL00dFQwUBTNx12O
LWF9F4JHUMo4NekwZqcuZXOc0QWPJy5l/dYY57DTCSec8DBjU5Wu6eR2NR1xwkmkwWjfnmq7SgCg
n/BJ5LKx21mnOXrE2diDyU3vc0e4DAIeRaegjE4uYwfV80m9ihB8DNPETKHVpkNjQLVB2hFHcNuZ
m69DmmMK5DqjLiyk0d8aE6JDDZwrRlWimqCc4074VDoZB3Njfjp7FRGe5qWy10HHP+Os7+UlSJ/P
OuFzQCq0nAD0eBiKzsY+jvm84x/hpXBC47F7jtrYz0Hvcvyjzvp+hjzCkBcc/5izvo8hjzLkpOMf
d9YfZciBJMP1IkMeY8iLjv+Us06L6mln/TGCuxsHGf5ux6d6HnQWpdBxBr7k+M8669SHVOdHOPIh
Z/0Qj8gpZ32PHhrTl9Lmx3mKdzv+86np/bKzzkhY6Y/H7YfMchZ2nbDCrhJYrRLeWBhVPnHrxMoJ
lnaxfAB8CTNzhhthVUKKCWkVijHLLYwiWGQWgoWARWAC7zaIqSzWmkSSN0D+ESffXCaKO3qYHKzE
EpGHicGYD4uynBUqLtmUtXRMN0CAt0rMAe/QTTTU6yZh2A7dbkOebgp+UYuc7pYboJwhe2qhhVnE
B0oWFTSUbXG+GVbjw6AaiRfcT1kshxHHSCcl6jVVMPPIadZPGrES7aE/UzVQvulqriRE9gnmu1MA
qT4Nw0NlhpxymoWFCWVbLGqhZJGDLzbjdDgHc0Cax5AfCkCJL2hx24scLc1M2Gi7dLeGLWUn/R5C
cBc9p2O1DMABXxPx82sx2MKFOEWXE3exWKs3Av9QNLl9EZqej7ZhGqOXNw828aHExtK41at+motc
aNSXU/xjaxEsODpTJhajp7WXGIY5OStyqQHIFauL9QYx0MvTPi0SikjR+xHFFGYjsIjNgjkvdDaV
ejYMqn7EcjYbHrHoM1X46SVqNQuJ7s6PnzZCohKWfSlvhEglLPwSy5RYjgShTwmtKCG3Uh8eqF0J
VS9hdEsQm5aeNwLJFWJUWTgXRn7YYNAbdSoXxYDJtULEGVoZJeCLEotsB0V0eXqqdHn67PTpU5en
zk/NvHL5XAkThTuKQi+Vpt9HIa9OvSaBLITUcw0LSyR2eMklyEwErWAyRUoYleN4jSMsBjEWjc39
1OzspXOlU7O6aJ2wEa6IZGwEj1GD+rjvK421lZilqe3M2sUiiEc5q41yjQrQIlkMkhHJctQwWsCg
8YcZwdKLJl8M7pVaGEecVXllJaj5PLynafI26tUqtdHEm675tBy6pD3JFGVI1ALp5LVOrG+1+nK1
XrnFM2RKrwKuBK0BrpGR/7JMnLtgiXjhgJrkr5R9lonnTCtovrOAl8rRiXLSJipjnsvkEanPE5fe
WOPOpsAtE1NP5dmkXHxtXQSdm0JMD55qNMpr8a5NoVuKaVkjR3iI71LGwTh49FIRj314AGeXIOwr
PYrHATwew+MgHpCIlg7h8TgenPZJIyoXOVHE71qKU7qEGO9FjHbdIT7Wb0cyTfFduogYhZa9V+Tb
IgA/dlOWVo06nhF0rXTEhDZDwgC8CS+IQgCqB56jcaMqcy3Pmgl/Vb60dBi9ndfjJx3KU476W0vF
o4Wj+u8x/VdLzRee0n+f1n+fkfLL+vNZzpd6QOsSElFyzyZp8OTKGouc34fA//YgGbqXdfPqflL0
Z71dasA95O1Re9QOd1SNeokc3bwNqh2ZTsjRKceC2+khZq874A0jlttr5fBZr68rpxTFfIbyH1UF
tZd/29ysGnCyFFvyG3W73G5vlzdCsQ54njemslS/Pq8XZdCXR9997jClOOANe/u8TtWjdno7Vb/X
r/Bv1BsiSN5DbNSyi/6lpPt9FjE/b3HyZYu7F/G4gcdVPN6FB8wsSlfwuIDHDB7XHy7d32ZItor3
97SK9/HMZYezuazifxD0p9CClfF3GRn/v/4fJeN3WVqfg2gfL+2Q7uMljzjyP8gy68+CrrdF3p/j
OF1gceQ/pP5UaDdkYAjqgSQs6HUWFORhBCHS+rXrfaIU6LRKgQqWIhZmp5Hx/S491pkyvmfEfBvK
WZuF2GpdGZ7Oc1b2E9GuUP8xqjoAfQLIEKCNxRAFFkMYYQsL46gFLHqDwPARJ0PBKTmYh3aBgUvJ
wQhrhFm0Di9sNYGXdmZAPKclNI9Q8OEUlGkNKrBo0EWfEMcO6wnIIjPSE9F+6oTTjQCEXhmU0NQq
JO1RWK8Vzf4GmbYRCDPZTHScjSRIYrKg5cmNKEgCxycnJw8VD7/Ymq9ERaxi6oftbGyciWJZ5yyx
WytW1PJ0Qns1oQO3qbUknLl4eepE8WKtupbKne2EIAcv7q/Sbt+EFvrkyWKttl8UCUY0PMGS6lr9
7mToGYZJ9G86FTNpTJ6fPCmc1EiaDUoqBQ6IU15cod04ptxlU4+DxaDBSmamOM4HC3HSFCZDS+Hi
UgrGOi3eTar1u0FD6DlL4zEGp92SkYvQnem+iRFniuj6ZWjR31duhOV5IgNQlSn6Oq9bxeXaaDPM
EfKWN/VyWLu4MnW7Wa6Ot5kNDK3kCgVV2t1490T3ECMpGmNqkmxcQIcNtIY/UziskOAeKGZ7RI+c
nkgHDRZzOvoznnqaUPqg6qato1910m+efoHJKmg+tsGMWdAVsRihcYIMR/EKEVmDB0k6rQQsaJeX
Ky2RjTanNk4rOQOMZMAsaa/RAickQIubAvUapdW03gZx9uw4b8r/VgmNQnNXiCbD/wqTWdaLJ80N
QvlB5GKaK9RLKaHL7Coat3s9LQ9DWBVpvz9U2KILMt9WeVOaPQXG9czsqUMTWypIDG+iGtqiGTLf
hksdr9Vrh4nMhbnDIVHmbFIVEQh8gjREirA/rSUzQ3Z3KawscSa2hVDTxPXiPPF95WqlWaWc/aRj
JIr8pDulNXOO3tJRLclTIfNvBBXiBi0nl6AfXlHUaRGWztHJY5PPPXVk8uiRI/R8alIIKupZ5pdt
6NHjTz/1/ORR+vd0BIK4WbtFOKTGPRPTimJsMO7Z/X/JLF+eRkxulu7i8SE8PtvQp+CJKHXpc1op
gNbVw5m9avT+TtdOr4sWC/0StcT7HbBUziyPo3Z5+LxCaAnELBOi6U9tu+d4tP0BksHWts4isVle
+lGGMrtQvqV3DbAgYGoesGe0DI2e5MuUASoOIjq9WZjsJBY4fwiizAyy4gAAt4662TBsnYrbbBg2
EArMcq0SpLcLCYiojGXGQJSkjh0kXKDFioD5oEb1roTl6qQwfyxW0bEJX7Xy/o3yXTQQ5L8vfI8p
nbuyBVmftkUyZMp8n2rGdWZiGRGnGjDuWu5E9wgzApWHIFng/HTvY29jwtHp7GESmDe7CiDAxwNm
xjxDww4qoteiR8U6SxA5imcO6wQhOm1n9NoSMbMpYoYj5oFGEZHIpmOEfdscIarqB0SVk0E4CKeC
g6RtPBcLnLTDqeUo614H6kLHZDLmKFGerrMQnCkmSGA5rsIb8skzNu/kfLoYwlbQeOlgYqyLqalu
hnSCmlpnLes6G6GsdzBB1c0EVRaCX+hTu5jO68ZwStnQmIIqy88yTxD9krdl2QRRhRj1uIXiMkhV
lg4P2qYVQ6m2rJgkp+0XzpbFw+XMnjt1+GjRD4l/iBnbTm6PSZPVQCyqzo3eFsXCoTVfmevSyGRK
J7SZ2GX4wWqx9UdgJrt0Lka0YvOgSsL8gCkozi+G/DHenB+LTYoQb7HpiMTxgzthJUiwfMuGtRwu
B4ymUz8Xpi9MMe6+T5oWI3T9kzI8bwlGn09dmzrM0oYJNGGyCLu1lgzrTCcKsWt+7sK8gTZFv86m
O1qOWQ3SPUHjFnNuOhe2g2npkiRnP4B8SoxqpJt0/O0zbMGrqUm7PW5NRWhFsafr/KknX1IbkEKm
vhUqfx6WL03QRzVNGojorSic7SRndtEmJ6wMmyeaOlMXV7S0bALvpWaN/14OVmN0Mb3ONKvVyaI2
EuJ8pCsoBykqIC44BtY32QsnEKyWl1doXk4h//cFjXBh7WKtRMum3ggmRVcDfrnKfAIzlalxl40C
SP1cOVpKdoh2DbFC5ss8RxmOaQcanAltRGKS2jaaBYH2S7eH8zjP65IF0ldqycLV4LzeglgEOTPO
wuLzViaB6ovhYW/aslTvYkyeY1WnmiaCoyURINWCu2IZywilFBryhdJwC5i50B/UC4R9jorgfMvE
Yb5mnJmgj5u+a0FCWqoGOSpKjJOOM2tY5JjpqrJsy3ZgToxXud86bBbCr3Sl8kkEcTAsFcqwXb4S
eTD1wNElNBEfeJmHJDWvP/42GzL1wTqCjyHmSJdbUP2ePHe09ahhL5vpUmNeliVRea9HZVP/OlUF
swWtyZtd+y820XnY8jKG1GPRBu24Y0Li0aY4Rtsg3nLOGEjANmxhfh9vrZO0tdpkrk3m2mSuTZaV
/bYP1kiahmznnT7PNkm0teYgvCE6YfUgaAY2Z8izXUQbJ8jyDptHbVml/C3OVYBz+gSIHP9APgWO
3o6NFyRHD2/FvUIjeLPjmF3RZ1ro1TMa9VXW7rfnXpk+MwFDtCiNKStrqW04AaY34iRnidlkriP5
4XwJvTVr4EQCv2ULAzdkyjTfhzUAOw8k+tXgcBgHy0Vw20/GTeCj8eXyG3ViWpbDGv5Aiakt8uJ6
Ojt8v4Ps0vsQ9D7p/UO+TVclXYDskAUQsN4faZEVjaDEbh6pjt+6cWwzKmmyPGnMcnkN+wNqiQ3U
h6KAWezamh0z0S+menSSsSjOTDDeuIA+C+1pkgvoOzlhgs+X0Ycs0mG8cJayeZ9kI6cQ6vqLEX7p
jEH4ptpA2oxIGSVSmRxq0rCGCXgtafAM+nWGz90JP5CV3K6EIjihV5MamNpg4XSOpdct9q6nOIqk
DJEdeaJ04GaleoebZXuY+4DHmpEZxRc015kkSoQ6nCrBmfTBOjSBoEerpQmjIOE8H4IKuzUqTGbX
TyAC1DhOT787wAxtFwT5qs+F0X3ezaqck1W7WXWQkgNZRtffyuj6npGjWjhLdP2MEaJaOGM7QnCA
txm0mWU+KMcYh/Ea4xpWyfxKC65Jq+4ewB9j9UTa2lgUwylcowm0eeSRxjbpvCW2MQVIfpKF+QCO
I4JUp7FcrkoG2p5X6xj12ESb8kqUmulstqrnJbNwgWsf3adi0wvpTFJG4uC7pZWBb/Iz2KSla7fi
k5bg7Rh92JjrQZDu14glWF6J14rjIEnLECzNrxW1uvrQJC/20s/j8akWxt1qeNsEMr0gZ4H4tItv
uPTpBa5OJPKfG3a16gWdrrKIgw4bSmzzaMhZGd2rQoKVtcqvrFV+5eP3X2W9epWlpxb0H1EvrzNI
AfSvN6TYHLqgdUK8or5V8YpKtCLE5bfTdt/hzBrrPxcbe8wiJWGG2fpvAPICWlKhZ2zUbj/lXF39
ILI6c/OuIysPogGVpATp8RjrUUREtSmYTZCtVd3qk1tL3alLpTU7RiXDOtoT47bb3+VcvV1i0kKz
7Ns2ps1Z/QCy9b1UzjCjm3ZEFwMyiGUjbOAHmodFED5bYEOu0G7KZ3s2UD+ENFCXnPRCRl1Fkjwj
lTZt0o2I7Un2MNjm7POcPR+EgTzCZI9wNKEAAokREit5vrMVIS0wswXVQDUKphdkP7eakC1soTBY
milPYyUkSa3UFsS0YMkIjm5yj7asfVuumJ4xIoqFzpjgQ951Ri9GOaGxCaWieibILsmPJQe8mieI
Yw4atJrLlaUEDE7Rxi4Ut/kpV4lyqFGr7wQPRpnMEyLzLWQONb9VtdWClhYsMjKdBNFirS4dLIdT
sWMe0AjPSFBpy0js85IepQYd8EUV1Z1STkGldUB26EQFVKoYeoVRmDHfkInA7JCeEzNMLYhqiXnD
1/B43VoTfaNFX3+Mxx8aRFb6HSCcgsnb1pJJB3yZYRNaAm9EKCKg9KZFd3zGkeoRLiyIiVS3BQSr
muPqaI0jKDL5LrNdSYFRpElzf2Q4oJHhphntUadGJ1jvNOC2e2xCoPo80BowGCi6+N7tDngIG6b3
QQWDhT7GmnjvVYLYQ8gNqJ+vwmalYTXsQPCwXYu+BJV3tD4dmvROLOW3FWvPRUvexkD+Hwhm8Vhd
3sZ6861BbRyUN44dCkZv3mEOyXUadXkX8AVeurW6/HqPU5odZ0OkWeqBUzQ1Dy80wmo1SszxFoOa
aDJlzp8Pl8O4rE9dFeTgjggk9M4Kxeqmczf6xJPp7qKmD7bGLNrFL9vxUv0uDr6ZHGZoMqXpjfuX
hSgs2gMOqN4trwEVLDSr1VZUp4/SFcc5Lkt7plPhh0x2l3Q8I7Ya56hnq+XF2aVm7Nfv1k6trESH
tqkPC9a70/vpL7psykA4fs6+KPPimhe2q8BLm4FkzUvOvLSbl7yzui67agwue60PJHjIvDNO/Vyt
vcfJ0FKDEcKLbISgY3TZGLc/ZWNoM4VuJojb7Sn9TznqGtsqiNlE2KNPEqnb12hL7dVHA/AiZhWy
leP0TTdv5dvFaN3NEZJ9QCaes22M1kywgxdYzuCKnKGDjyn1wK1B2K/l+UbOsEgbcR8OA2FzLegT
VrQRo08HOcUQk/ys5gBwmIEjLNnIYDGNiZUFrMGhEshAMgGZCCKPpspsMzmMOUkNsw+vYd69fw13
sPTDsT40duKUDfQfOenOPhx8ERlJuIfFJDk+HdPpjOJIF30UWfHXySXvw0EuAPfzOa0czqz4g/zy
KEfr4mgH+HhUJ85GidoENXnMYXN2mETMsCLUGEbYHYr9IzwlO51ZdCJheAc7XnRcEt4RjwTvOF3C
sgsVzxzzT+PxM8bIo/SzDjN/9PZzxkiepQevBWWh7y8Qub8kyuGymKqeqzcbYjx4Iaw1Y21IOBtg
O4mMabFIci+Hy8E4GyT+qWELjB6O97/Sn5ldl6NX64t1a5CprSLFTlTElaLNkA3V8syQdbJFYgPS
ThZA7zbSDHRI6Md12Gig8FeDNfFx0GWMN2Dti2DZ61+3lMCNFhFD6dfx+CU8/iMev2xkvUaKUOB3
DA/n2NHyKZ0SQ8LKXhmW+VWLirmv+DT88twaOj0r78taaItXv7xmol82SQm+RANho2vrYXqNgorR
nSJzTsGdCuMnVBaD0m36PAqqCzw60kZuXreCUTUevXjsUmZ8QLx8HWL8Fh4Thvah8UwJjpH/Q6gQ
jMXcXFgL47m5KZAerEnw0l4E+B8TIB6RGd0uP9UONejtIoinHnH1X8+IRHpUpzcEDwWql/6moG6X
1+v1qy7VCSmygoUjC05yacnxR6xFnNbJ9oodjEofJfQ0GwgWyxx1tVZoYrv2MgQn7WyCk+FzSA64
l3sseBY5MuJ1MEpsZ+6pjZWursiKleyLksoePcOB2dnxjHGRdcr3QdnTaikuNsorS2GFbUg0pZKi
EDQLk8gB4HIJ+i+9kop+GK1Uy2smN7+prWQSGmCS2WQONQeOI617OE9A8AxasZJVhoLmtfSdxi6e
Fm6LMmMxXhLJH0w2hZCABXJT63U0AcKW0ltDX167QNOLpY7/xi7WNqXliKIFM2sF9eMlPc7GqLuV
1q+UfsImymlNC3QsOfGTwS4+LKaC9vm+0zgnFuzonA8r7V3Caeul+QfT0yzNxCE1Sr9iYpDTkgU7
5RaclGQBXHLOmGK2aztMoj8w51incE9ptno9Y047t7MawYEageafpkckNr6F3WZbUDbZe8lOHTnK
Ai0rbRQRHAGBw8PESLaQ7SfUjPTld7L1OTq0dRQEq79uOKNUn/+47fifxONf4PFTNtm40tsD4aoy
G8k8sNPLvo9PiGbE/CeLDh/ixd2ruKe7dW9zT/9JuqdFqhGqNHnFC33bGFvJK6PM0bHk2DKRoLeE
BK2w+YYYbiimbjowQGKYQbOU4EQ/rOe3G00mSMOcPkOqMCFyTA7hg21bn6WSbi86V9dZD4TQdjvq
HmQqZtQtwnhLj/o2Yg/tTOAhmONUscWTRWLrCd6iGdeXCU9UWHyZZGP0JpzV/zcWf0ZmUK+hHgSr
8GzZNPteN5SEeCdajal5gpIw/bSHIeLN7jsHxSWSJUp+zChik7lZmlAPsEVjrp3mZIrr/wSmJk5m
OLldhAkecaGx7MGeJGbpPFUh3O92e0WPmUmbc+62u1GotDknHBB4m1b0B+yKrrS609hmGC9tssZM
pwDfpk0jx3EI6vAzTx3SRpK04vk0ZU6ZhZ2xC5sN+C4ZK76Hdk+qxJ9R2pzYaau0aflAm2l8Ta/W
zENXa+bBq1X2a7vO7rGBq8aPbdrWCtawsyyjjj5he3OZOggs/1qrHQ10mu90o2Ui3fDDzajJC0Rv
t+D0p8WfyIRWL3L2QjLanJbKhJxXggosNX1tFLNk+IblZhQX52lxaeZAZ/GOmYMZEbpvWl6/zSfu
7FBn7VAzXfirePyaoRUZX/N0f6iVMw1+moL/9xj9TsbbhpgbUv/zzIPvMvOgllLIGK4t0dW9w5kw
W18OttBbyclMdsOnVTNJ3jaPRrBYbvg4+plE4oE2wzkZPWsmgK3Y38ckyL+zSVA6/kDE2CWDn9Tt
t7YdenS7UWWeNipQi/9i1qFYvaUc/mjd5D5kl65mA9eA5ybgga8Z0NiFjYeO1tWlwI43W721utPh
hbrGerPbzaDBQxgFxYTrnGQm+VV9npOPOswIriyYrhSyV/Nml3RFRT/GO5WnjZ+RAdf8/h3bIR1r
WvtH6NYMd+uoYvNn7D19pjsLmVatsDZrYZrBNwSmKF5CdrazLlYpWX7JQeiiDU+0rqZdm0uzKx4c
TGInpdDhiGF1QTsshf0Nq7+IS1p9nHPrEKkPny8gxgexu9igpc1Kfb5I7F84c8iU2B9Pkrl4HpXM
s0k11lkqh8h9Wu3UEtSbCmLiLezTIh/Ie+SsUb41ST8nGUwVPcDiHym6AGtoaRGeQ+z9gMk9eDmi
P4vK2OIuCvE3zBFHWGzZyc6KujjXUXa1w/Y/sMJ14IkUfnbYT5EYIUO1NsoasCEWhI1xVjvg6obl
ULmNHu7mXme1k4nBndTNSsH7TI+Ri/Vz1ru1ixt87oHXCfioxZ+d9KffnBph56iIvhdea+IiBGSI
tFtr9dTGEAfvZ681QwwZZsgjcF8D5zIEGWHIo/BIA38cDvtDMZmuDzBkjCEH4HwG/kz2s98R6vAd
qbHYYbxUuOw1SGbNYw48z9BMfhSeYTRKQGsPIhJPpD9RV9e70QNz45zkkHa96h/gz8e5oJ2tg/4E
D/pjqUGfcPyDZtB3pSLvSmoF4GEHDmYk2m4nnnTCJ9lLyi7twWRjD0c74viH4EoD0faaaLtT0Yoc
7ShHE0ce+xhyzIFfmr3sCmYfnLJQ5mjObhxyg9OQSbgp8Q+zBxZe4+FTPMkegS+P9f26iJHZcZwE
ZnfXp+u1O/DKFi+FovGw6jp4fyuOr9SJ0p+vsq3Rmvg9OGQONpR+wejrxVPqlVpIiHHG+L5kAojR
lATLdy4xDhK7US2TYiFVR3L8+7IxadTfUDWIvjCl0tPGlLNG2SASAi7DmIiymMKc9tKI137nU4FR
6VeMjYOYpZ5JyRrPpGSNpeeUtvC5lAgcP2CYiGn4Ke3iwKAR3G6GURhr+R9LIypGyMpGsiJ4fbcx
py2dwgNqshK7P5jCA247Sx/F49vx+FY8bhrq6/RSULkVNZe5KxLDDAvu1EfwDaA0B1kOyn9zx0tF
FrqNP203ppyxzFioy1bPIkDe5VkO2G2FgX1GNpPWFol5x8C2gxIJecGb4FnkMLQpWjIOTH2C+uC5
xRtlqit5MzWmKKkR+CsrTf7DFuWqNqhnA+DSf7ZiKuZmAnwu2INJoT2n9AYet6wEGCK3EojR0goe
83hgxEsfMyvAnl45EyzgBJcIlxIhLBv0pyS+ZRFRF1gqWzn6TKVCwaZPthnHniSlhXUksfkwTKc5
Y69dI7Ckf/xRS1zk5eR/ncn10heYKVRfSVkBgzzkL2aX96iUbluS/T6y+wNDxazwyCSSX1Aj8sVu
BhYS/bV+twcnw3nJ74PI7y1r80yDmRR/qeVkGJ8GK93D4208PhePz8PjH+Dx+TaPuFGFRC+ph99M
eRegHky8C3BcQVLymgQhog6S16STzNmPJFfu9k0uBDadVsNc+wsQaV/D3gP6W+XcGRgg9WeyqqBl
1nk+f59neXUPH/sEZEjl4GEAx0FTttCQk3NKF5LtorfXnt0Xk6Ye1ev1ZfIsH5c8h6gs/avzyePc
v8fGTxl9IYP4gkyfE6d9bu0YDgmtMx15jw+XvtnHonBXqzfVVTkXBbGY46hrV2tFJ0P4D5rVQdas
tsRmjWrCecGHAFsO3G9HYsGlPsUIiSWzOiJZmiy9wlIjPmnGS5udtzTw2GV8yKSEF/CvzkabEXvG
uD+pzWqVOr7uusZqzOt0PepQT/W5PapbzYz3mvU0N4cqzc3x4p6bkzMc9JnjT79emZsTIfk+VOIJ
ZZBvQcTXGqGWDiPgKB7H8HgKD9wvUXqXxVyx2uolYdOs+2v6PKH0keJsPut1vprvx4zp7OTnjnwu
303/8vm32HhcnDj0oQgYg5R+yJyQELx4w9rDLFkhMLdl1criftui4L8yAjkZLq7Lg6u71Z9DMfHn
oL055B/N78kPwGlF/8v5Q/078s+yKzDdzY1gpW5tTnYarw7/Sv3tvToESksHfJ6o17MGkoPlCUxq
2VVzCzDDwLxDnVNmg7UyXzICs5Ms25zk2GNDO7ttzjvw01BwrvcjfoZodzfocV6Qlz7nhfKAEww6
14f4OQyatDyS1MqWK3a8PnMTqOco+4oe49J3gH0gzoqYJ3FjSuwO8TrwUTnEHMcIO/EcAxPhMy1J
5D2R8z67kCR6nIhxIrEXR5zrO1ENIrRhqLML5DOMZ3aDTMbLHhDIeNkLEhgvRVCveNkHohUv+0G3
4uURkK54eRS0Kl4OgGLFC1H0T/LLQXixw8s4nODh5ZD4tDiWOLpOhv0whp3XJVCFX274hyUgisT/
0h3zzUeXmBqDlFP7pV+p88INVivVph+ITxlKwytWn2gWS7XhZKJFQblRWZrDabvJyoJ4FCqvrDTD
hbtir3X8GBz5ozKy0xBOaJTt6dVyJRZDVoonLgWOH/PnOXixWinX7pSFZlw01q+L1fp8udoMxTn/
igZq3Z8+HTRfX2UscitYq5RXrOshXCnAdLHWsMvJICLGozK8CssNE00/rIvDnaAWEUHG3rDiqF65
FciGHa1F7IOfFbxBNVhZqtcCbUq/cjckCv+ueOlh68FgNea9xNVyndPGVxoh3ZUcexCp9/D5WLEv
FceSIuexQO0cU1wFumxbAXRdEBzuGRN/2nlla4dnqzlqIKN8OS2M8OVokbftFXhXnivX1mANtclt
fAp9GOGKZMeJ4KyGHfo5hOp7lD1dbhV5IKbWerUAcgymqbNXay9jt1sb0TJJQSM4RbRBWxxkAkrd
vuJc25yMYDoEzo+lR0LWMK+zR9gNIw4Ls1pZBftdGiK236VxE4HYjJgcgItCbSd5ArPNfrwjtU7M
9pTI58TPBm8D5/B4jzHZphWhY0dzftgoTacD0HE6lIX9etlo0Li9AUFHSWFsrKAHD0aPqJBNVdGm
G9ZLhvJcuFfyXLg3ytCejK92InoG6Mk7gZuWKGMXg5TEcVbfowVisSt20887WqQl/S0vbcYfDkeq
94mezfS8uj3Mv1DGzY6Lzipnnc2xf5AQft0apY8o05/TplNFtmrP3aS6V2Zt3X9n3ZJO6BjZKm2P
vURJ7idKj2drNi1bbReDdnH5I6ttBGttg4WCRNKt/rZoMn+NbRvSU5B3uI22ZAra0HWeoDS/R6Sv
Vr7UrOdtYmVsrDceEKvNxjpzv1iuHhm4KdIBuc356LHrYG8n1LpLtx+lUXuURs3T/kxmWHJKnerz
/sC3z4Ac0dCKeamPsy/CjxtrA2J/FsLVTYuFx1o78mPPatNWhv5RzIJvU5ozpUGboyKZXJTLSXAI
2IyncC7BCr+3m+gPXSzDbLdQwyUYc0RGIjc9OdaN71JH5d1uJeYxQ+ox/ref/oEg5MnipRWRH4an
wC9EP565+Tl6iWiDBSbiYz5DsAKBp4uehzHeDta4iLx+BJ7gKYgx+W1B/TFPprE3cuwyOxYR9Kao
Ok5qreX4V25JYhTHm/sU79rsCjNCJxZTaOIEQx4vTtdsHFmffKSjR7wuzoESMLhrQMMMLaDhpa8z
4gji8evVOzYgsrmkMKCsbM/gAEN6fLsRCaRq+ODBHBUZ/1xcn7PlmrK+2Nxm4vBAal0y/e1TwzKM
vfqXh/FGRhAfj5+vkk2WO3v1BJz6C6A+kdHGBMLxxRnNq8kBMtG/GGf4isczbDPsHMFp6mMOzPGm
pFOK/yExq3o3r0NKheE/Jh8OfxzgE7dyyJagtgDWSYiKT93ezb+1W1JWu518HiQVKHi35MJVGiG6
e2Td05OPkj0lMQscE8dsOhBtUWYYfdDCXIers5w+MbNbrg8b4WM8XSDd5dq2McKBV2t8l8G6OFPC
Zt0N/UPYC/EYlBkuW3cZ4Dof8gUw56zthfAyZjNTIuwJGA4a6W87ZcwmyLjujKszYhWZ13AYuIvq
180N2LelqRI8kqo7QDm2cV79lFzx9v3b2KfoQRuVDPmGOKqaGQHOp0P3dn0/m6KEwzoDTsFHkKXY
rGuiSy+femgvPzg3+o8/V+MRbfv8fvYxDc0J35PA8/fT4iX+t1qaphv1BTITC/p+hPVu4/LqVQoY
dep7pVDmjkZwPwOxSDFcnxMZ9pvu7Q96ikgm4pmeLbNGQvQvmyL9lGemLntKsVM3z1O3W6bub7qK
guOd6BlGfk86BrBLAHsYoNdGn536BZuefq+uzsg4nuU7FHo2Z9izOcOerRn2SIY/6+EXqjZiBmOl
Vyt3PJB5ks0QW1fjK1nqeiXqa+vE9HkPS8CLGlnNNWsWbdFOBvdyJ6J+QXPwmFw8wUcU8Cr7LsLO
sicMmEvDIIXNpjsscyZovjtB8zoDsd8JVqLSNyht2MxurDQ+JY5rIVxkPo1voLsSwTMqGwIXYdHI
bq85jpYvLRQTnvFEsfT12LffVFoecUU7uUpo5hOt9W3nfWK5Tu0u8nVMV0+VZqZnXjlRPGMtr3T1
OR0MROS6MLS99LVKKwZYCT5VKl0sPSglJ2JBSmi3Ob5FkDet0lcprd9nqnKlvsKbnwjivxlv343H
9+DxvXh8Eo8fwOMblfGivB2Bww7x7pSr+la0oCZektkxc5NNjJNr6KQzeP9k2kSTNbx/ogBxiD2n
PZDP8UBaD1ABsqEhpExLMKoqfRPEXr1G1dA6x5BtxNqJlUa9wgantiS+HqzZaFAnmo36+40JqCG6
CsmkAdn+LYZOAPElM2SO/Ql3bZ5cNhYqr2Gy4xvajXmxwfQmn1Q7ilOihdYWCbcupCFqiImx/HAa
cPsl+LVI8b0sgM66g8pzuli2Nuh20b9OtYNIiEEiIHJMTHjEQ8kbyIuc5ynwWAV3iKhGT+1lx7Hd
FBvwXKZgyRA46xBiJEmNrw6vXxUyiDXAkBy79ci5Bc4n56Jk/OtSQy5imXfA2UCkLU2XQgmzekNO
ucw69nB7KGazLJ6zJ9vlGCzYhD7e/8OMNuvurAtRWdtCZhhCU8QOmiEQKn7Tapg2gl9anxG8FQn5
12aWXLkh0iGaT6D0xcLwSeN9gSZjY+3Bw9mpGb2VNZ57zL5pLk9I+Eepo5nia087Avh9x1B8ckIZ
z3QvCbG3Q7ycSB/xskbfebTnd0tXtemYavXLxXXJF3G/yuE7hZZrHTjbUYV504MoLuusjjOUD2Pd
AwsxwnbOWc6XhZdCZ33YuUrtjNmpGIWOyGFmUKsCdTmbblZf3zPcx+1Z4uF411owCoQZHhgRdvS0
Dta0kfzdLVdvlX7BIEHDilGPAzcF4q/0AgPOCsCQ+Cnph7j4bNbmRNxV+j6rYIsDqC+WRXzH6i5P
jMTFYqe2ZtAc85IS2KgYUzrGX6WfNHjDDLnMF8zF5QWLUFhH9u1qyxWIrdOn9GMUA9pbvuTBEZWR
O6B20IqX6dOlhQVD7rAquiwzUWnb6S+xHiVo2ghfh8XlMBVrJFM+c+JhVlOxZVo/tWGmYtu1gREF
ayKW5nvclmQJ8TVTy+k8yji1N2AyyCZnMK7xsszZk1ZbBFjY5ks/hAaz53O9vdOWnpIVt+z2C2XY
+Z/Qks3lFehvI3Y3Lfz9HPR/k/FqzOW9QiPJPl84TCceL2yDGH4ZU+xH8PhRpS8RkD31U3h8BwZO
GaUlphfvHHz53gPRQS/v9KGVhwF+0B4+yAy5BjcDg4/T71al3qpjBEBqbQBLTuzCZMVptkZQh5ZB
CgqwV5zFniECZfxj9hCyrsza5BBe3TR4cd4SlfSq9XynxXwCO7T4Ias3GxW5x1HOj9tD7nI4I4ar
slgIs8QUY1IuZBlh3xOwTU+ZQCI9sfs8ML9tKZkfs9TNz7I9Ix6/qlpoJdAYImH+dWNpwJd5mOVf
+k+I9ZMPW3fm5EtqZvKKfc4OlZdzscViS6WNkRbkoAyVlxZY9it7KkmEvf+cpeY49dju1KeN20ig
U6V9vfhi4Orqy/jCrLm1akRiG64cBq9l5dz+NkL2L7VmFKsHZtS3KSMteRazQUHHw3q8w4KG0cpW
EInrvDqMjD/LQoHxdAZqmwyUZGC0AbkWqR1jYx4kEU0XDGadD2u8aE/XV9bM9TzQI/Pf7TkEua72
5ww+l1yR6flt5OJy3ACnvgUrw5VP2IhEXMTalCW+e4Nv/aA6lF61kiGD8Jmc1HdoYGPpThf6DgR+
XTrrOZPZewyf4ri9mS4+7DbAxwx66B0n9gfVoIfrCJhkyKfVGE94PM3eTlENkHs4GsGPacZQA7RX
kdU/dZkI+z1XDsXpuLjr7QDP1bQMWWgzESMD3bQ5tXdRpDYWEaXCEhEzS3G12DenVazwwCapZXNY
b9P4xdVnYPRLgUUEbUZGfFImWW6bYJu8c/u0KlFP2TN59Y/K5sTno4e19CvV+rdsCpOX/jRXHcI0
z8qz81q6weKIdu06TqplxEfppcYe7glmV0oPX/nIMKyU2494avUKV6BfF3fm5u/z5ZimTuvikMXV
p81001jCVW9sqTui/t3q275NfdttfX9V8e2TAywPo8F6kuIM8vm3+/Vhu+5Dk1xH7zCAq7efk7I7
0wOUlHBISvBaS0hPUMFanI1E7DAAYQ6w9masnhO/s61nd6zPoDFrxX7PnN2ZYTW27DQD9k54uc3n
eUsaBiuiIupJ6EOjNYvbtfYaOO/huiK+yX2Odtg5LmPBHsBRWovqlzaM5EGuMOMdM6iKexgqQ3zL
nmBZg75IAdhujxhItggm2PPMmbARVHCIhve7MwEhOiBh34BPFKMDKeRs4cVydJgyOJzG1hbXR3tT
9pjx1kKZgOIbnyaVIcISrP3vlJZfCDnwp3br/7QyZNsf4u33lFHVAWOXft/g88Zy3AgCrcanjqWP
0seU3jBKf6yMjrV363BFpf9qLCdFMcOhOujPUoIGM3DMZ/TxGMsYzJkdQau7o1i+OlNbiVUmGcA7
2ELGEnGJpVYMeRn5Sh9UdVxYmRFTDrYh08fqomE4pfPyapfb7e5Xu1WxbUB1t8EpzBDfAt/HrP2g
y+yFGmATnhQ806v6if7Z4Q653R7eR1xeTV1p9uMn0hSQsAiWEMV6NqSrVi8cdbSBWUvsjDlvwOxL
zHSOSAhA7eLiA2Z8N5iWTQvUM/pvm9VKQcGY4bOXeXOoKKONb8QVFjaYgsZ/Iktt0+wvX2iaE78X
E/piYp1Xp8ZnltCjbWVMUvXf/nnnrRxfnvoJdvftsxvxYc1lZ3Bo4YHVOnNzP+Ti6/ltYkC2nKEi
3q3eykMXsPo81AFnbh7l8wq2bp1b6kY1gAy9zVGXbt9Win7h08vns60j+qrHWUuvdTBv/g0t+EVk
vo+l5mO9Hh9OpG+Rea8G5QXIipHyMNNzwp4zTrKX5Z0Qse8hK0OOmvOHW9BDzWALi2kYd5WY4WCe
DviNEA4XWKv7QfFwkZG8IOiMll8KJmB8KmfGA+IZK4GgmQSlfEy1sIPMXHzYoBTONq0NZNnQX29S
EaJHRAjZ0SKYjNLfqC0jHGowowhjn8q3fumrScoLc2hQaTXthoORhUmZRlWyYbQCH4xDdrDv35qP
+tXjWMSNllFjdvwIn/YcVcNqVI3xcVhP7cwMsfwxr3LqpOrO5IgVeo7FWo8Reul2rSLTnmRSNArG
Xo5mo9jL0UJ/QVtvGCkFpHsrg1a6xXpMlvj1Mr3pe07Loe6MPsOR1fHO3Nyl/ejpDHJGNubSavGd
txh5cEJGLGw/wpSojt+uMUDIPvTvibdhkKYFzfbY1f0VzlttRinI0q6YLQUhCulJKwVx83pv6g51
OYQOKcs12iZwBVK/RiKi0/0ZiDSJDR07c/MncfUA0TCCBeDUJ5NivrRCjyuKQ0iCMhkdxMNIU9Oq
LK7wuqCigwoOdsRtsdBWVLEa9e2IvjZghPDISJxJroTVnnoOsmceOS+Wijk84nehONaoHVHagQ+O
hXVaPTMTUmNG6Xf7j10zujtaRylvKP7XqRvOyvC8aM5aydh0PXhs1vMJdjvuKvq9miq/y5afALet
AFoCzVl9p7SGfUYNtzSUY+xCvz9b3gXyANrPbSJdXW3j8dyN8fwL9vuQx/ktqOe+ngnUDaaJ6+vU
5jW8nbm5DA1hsgBMDaEi5HEb8fuMiU39pNSwH6Gy1e19QIq4yJQwpke3c+n2QU/R71WeJ90OC9Ud
7/Z3e3YYBzjxPf01mOotT/eWHWN3U3d66e50PIzDLBN4M6U/B0oF0RodTDaFlGvlhXrVDxpFvx5E
rCpjVj1REcouxLgJJNDpZhTXl4vVev1Wc4X3Ajm/fUKUcY+0krkHosRp/2wMz23hfKNMW0pvWuGn
68S7XKvGDumMFZ7JE8iU1X8c/xRfY2llFpvJyRsHiWQ/eJMtGd+BglHUhH8LjaIluB9J+tbWvLKl
r4rRaCLTY3I+uGNyPlF8Zw2iJAdvju9rFbR82pLsv2yFpx+z1Pqr9qrX77D7q5W5xv2pXZKryiSx
aCqxc09fZCVlSvrH6k+5iNA1l6n2uMbM/XVrwffjiPcTRh0Zj7CwPnrmyBx7n8DxNisvBFfAXAUr
LVlVUPpLZS7l7HQ1u5BoGDRPsGpOBSQhmyOKc6mBLa2UDLKull/qHVkrPfNozrekLaDSTGWiDpnj
0d6mD9+ZUEpbSEvIb9iTL+4oBFCq182pYyrnns1AXA1ztDZ30O2gjT/bBgIBN1xCgVjQKkRWMtLf
djWUgcoCJEJeFSk8r4bcgtrB6sTulDqR8mpDypzWkg1x6h7OFcLxnOogAiNL+UL2Ov546z2W4PhK
v2goODHvno/QcibSUhL+j1hd+XdY/0dfbXXqn7SKdcwT0bh/QrVcdplMpV7X8J+/i0e/a0hJVtl/
3GrrEwE2q7FYqM0y7t8xXGhJuWbi7nS3HrvYOmRbj16spo5e4MTQEJNsWS/rmn9dqkNgNKadSgi6
d/4v73Vm82P59nxfvjNfp78j9Naf35GfzY+zehOXfbKDWtjSmzMd7eZMx52/z5s6Xb6j0+MLOvN8
rCPPJziyxo9ozvgRxeZTSM4gnOT7seF+H1oM+PBlzyrpS3DqC0VqQLgM3xHiAGd86tpUNFE8c/58
dCj6cAbyAGkluyK4cfhwM/RPHlk9cvTY8aeefubZmwDJre1b4eIxpBVussEFHidPl1eOPkGPY09M
Tk5ykqWA4OGbwcnlsDaxXF5lYFhjHsamNRfbn6SaLUO3MsF1X4viYFkXzDe2S4IX5EbhF4s39HXC
NwuFlm6ZaO0TdBKqUUQ9tu2i5EqhAnqLU3CHTRYK+kTpCX23kr7yXfsplTuaUCx1yyJrhVryLo7X
G3DvsjYBT8S24VAvib9l3YJtMqTVIZ5YNmUoDl7goQV3FdksD225FeNw+qYkuJIxXpJMhnLtnx5q
nYK/ittELo5HS/Vm1S+KO/sq+3NnX17lCLlL+WaCSG5yFLt4v6IxY5LK2otv1oyT6ihYKYv/jPm1
4v4n9nMiM58k0TkzrHxJR7jcXMbYPcmdvsqf3F8y5f20v3k9nDIcktmparV+l/q9wc7Kanzvkdxk
L7dB8YispUC4j+sw5yBu/fUsltzO1ys0GypL1IQK5qYJ1bd86InOUy2Z7LoTebKbOYEzIUVW0oCE
i4moDCtRocAORKRKKBO9y3egyEVLZhj1oM2n5lO5Uqk3UJPq2mThgs3gCvsEXyiWV1aqoZzhKfJi
CCPtyQSDjt7Ux/Thvz+olGkuFlJpIut9SAzEAlMzEHIrjfpio7xcxA2XmDd3g2p1shBCXxyylBPy
OEhJ9hoG/G+yjrM2qgn0e+KoHYbbfNkveFQ3xaMSTiU4H/JcG9R6xHusIhkh7vpqbS/FZ5n+wuZU
nk7VpW1NiNu+Wutlg4As2HS5Ye+a3FkqlyYTlhb3JMSVCn8px98WC8yai4ZJXK15rAlhARwxZWPC
Yq+zRQtMZeO8dqZHgca7bAGcWSIzgLKC3YnoFz5Zl7KMZtEj8c5rY+zdtpPdiKR06Fdrh6jthVTb
td/hgrEwTV2PrMsYNoWNYPvSRbD9Lnhr4uQH2FEw9KUdOixV1VGTeiyV2nLmLak7dVgq9Q6Temcq
dRd44S2pu3RYKvUuk3p3KrXY4jr2AkTRG7GxLPhW0RD1gIqX2HzJksNWr5LZHvOyN5VrL5x/6Fy/
x1Frr2mPITj1qHQM5D3A8fTg9/GQ96YuRShQ3fvgI6U2KFWT+HLrAIGv1p4240e894LccF20Q1hE
eeGQzrNlLPvgf+WNDE92+DxxnOFXzt1eIGa5S9rXh+rDO8sw3LDAIUo7nMbQunkj66xQP8PtSQrE
N3D3I64VZg2KifBeZ2MIrljWh+BjRrT1EGCw8Kv/9se9twadjWE4Ylmps7YdflQUznmiC847sHwO
2YMJBei2Q8KuwUMGrL9387d0KXW0BL5w+1RG3Z7KcLccSDtTpeaN4ATpluYd3LZ5I5wKLla2Bpb3
yGwbcepPK3UODXqZ4h4yFULrPdN67gbqc3hTkfpvGwyjCP8JHFalhnKvjiI+9+oYXNWsjzlbE/bf
/pvMW6M6i2RlYmgPY1ki4yx9TDoU3JkOfpJFXTr4CIK70sFHsbBM8DEEd6eWnn8cS0WbnutITyFS
XzrS047MvlSkZyjSOW2xD7eUXciEYi0QSn2Bp0EfxXpWT1MI8/nikf8iXfEHDtz07IWDHv85Xlxj
3C1FJ95nzm9u7GTQfrY3whzbycb5+3gNaMz6fGperB3g9fkIIiaV4nrDcJ22gl36WvnCJrx5AvVo
WWs0D2DHKKO8sZtRCMWBe569cJzjvytV6Ufh0YfFULa6yICFgO1ZRb+i9j3J2lt23cIOaVg8BF66
eSI60Tgxf+LciTA4cWeJRSig5SM+GGsoeP6ydHvUpq/aE7glzsVLhlBDHGIp8pJrWceMMXaMIDSp
1TUl1EqfsoM59gXHmtuEtmf17+GAa3AgmjgQiTnVrvTF7oaA0jedsSq2jXOBzwvOoKmzNQ0UaKSh
pqECbfAxWmFNBDKv45mGC/QcX9h8IDpRNDVZSuhKvv2SCLa7dXHvFsEnKV9jyT6NkOpumTU6JywN
apOLE6OG0JOGJE14kTxXxvQ71yWU++7BFN9t1KkTEm96lFVzGaoWnU7TiZLuTpRjIB8kjnby0Yit
o8M/ByIOv9iMt4+gw7dJPx4d4vASJiXrxkHYbftzZPXAkedW2Qz0vnFe4MsbgvhFzirhPe6f1X3j
tGaVMB73z+q+cZKs9m+6anRzVqvF8QPEVD8wWmvFLL8Canh+LbaRD/gTcL2//0Fxkqz26YlHa9Dc
kUHwiaJfx7KhaEvwyJM15iHbrdO4XpfJyTEkM2KUinolpuOypxM2MZ/Bz/gBdtDKrjuNoxORZfHN
NmzRxmpM9onbYz0+srPHfitBPW90g8TkRQGtb3FS1NNyF2gdd6SW0HNyRKPPxCeeiOpXC2PiMFhs
yS6XRL7K0jKWeEFQWoI5SGloq3m2SHM967qFTWZ3Gv8txq8Cd0OlUZFj/qzBLbI/FTz2s2dgPNjx
EXcMZM8lvoQA0q8SNMQlFvcdNw02qIs/DMYqwa9m6RkTxyAorob5gNf9lu/yamm3a2S7E8YTlaDs
VWP5X5rEA/rQ0hc52hNNSjrXs0n4BStu+E64jMA/YitfzxtV3W6H62UgTKW/sPaFayC3Rw25/W6v
KrgDqpdi4Tw8rP2GvD24SXDLswdGHPzmqX41oAbcgjuiRlRWjWYAg4g060EfW/COqxzFGHGfdns1
ZEAVenO0++YoJtx5Ft28egYiW7dt03PAPtvY3DWv2mFI4sI6UerqsVPkXtWN8x8o1+uhFAU351kT
K3uEHf7Q1pV2S2RNQJgHcbUlrPhKqDWJoJhxrON+ffRvFxt9CE2zOcUuk4nYQ7cYpb5yTiiPTJry
yDhyi2wL5aGMlSqb7R9ZDdlumyfbs5s3qXIUNZeBJ5aCVWbMAzhuhTQJdydWtQn7/iOr+8WTdjqt
H1RCXGaIZBXx5hf4UK9oxDqS2s0RJ7WRi6ekDydqhnIj5hMs2lMIISyLJsQHdNHcEPGQ2bqHYsM9
d7SDDcyG3QGYOruwSh/wOpUnowy3+L0Yww+JOURK/N5lEdoJ64HoXXi8YFFCIkDH2uAiN4u5t6na
Vjn3cCLnhpOhXHYsm8O//M/xwUOk15d8WCG0MkLoMRFCX2cpyHVlLrBiM2ztmHaYtWIQGE3VorXl
+Wpg7m3lPUO/Fwrbx5ks2LmvzNxn4y+4TEkdUOKbFGzaI5PHniseO3Lk+cNHjh4+fkR6xG1BlJuU
AalWGmwDNKkNBGfERW0yAM852+sUWrNp6WvgaSaIs5lsjimjZfiEqwa2VzOmVx/bLNp34e5ogRfv
27ziX7vuiig+Y0XxbOdyYfrspnvHXHPv2MTme8fYzhmOnuTeMcWKAC7gehaXgvHofSXk+0XK9h1f
BXbh5QvmBr4a+4sOo+1v6GKPlnIXPDsa1i5TinIPHGueKZ+qESsvNvk0o6Z4J9kwxvihOW0m2tZr
tfzEM4mbNv2BLx92UsiB7BBtk1suPTyYDvt0qDgYhpsI46/Zln5sk796c3Df5W+XbQKoy1cyfOzA
evXxLCslZ7Y863SfVa0TroHZeyREFUdEfTkO7l9jeE+bSGrcK8bubelryHAQwzc2Q/bkROwmdzoQ
ZPBexlnkbcHPbA4ehMgTV2W2gtl4SK7KNFB9FSYcmoide2jMhOQe8XXj8uCeeCKXj2dxxO2ac3X1
3rZFVLcrwmbvyy0QLWWJbJTNlXzr7ZaP7ozBM0F7KkVLpur2jzpX/Q42BFFiwNxpz1+8/MgjT/HL
C+fPn2dOSXY6YBrmFAE/zzFOI2q7ASG6HXhmpMW7ygljageH66mLRZJbGTErmKSUDWy3OcRLWxuO
aonPuYUFor/5okU5fVENavefLnCnd8T6FHKf8Eb4jsMRtV91e8N8YrVLjaodatQTByJN1OgOHneV
ceXHG9Jh451vO093qRKfoDZwF6HN2WzWzWc6MzLVecdbYwMB92H5bN3OMAKdOd66IlE24QSKxYe+
QbL93gP0p6ltTJAkrl90WG3aZq5ozGpUqXWjInvnKfY2e8aDEJ7l8G+zTzy/01zOqLb873LezrJ2
9azyX1H+OeVPK/89yn9V+eeVf0H5M8q/qPxLyn+v8kvKn1X+ZeVfUf77lH9V+deU/5ryryv/hvLf
r/ybyn9d+XPK/4Dyy8qfV35F+b7yA+UvKH9R+UvKD5X/hvJvKb+q/GXl15RfV/6K8m8rv6H8SPmx
8pvKv6P8u8pfVf6a8t9U/mcpf135G8p/S/kfVP6HlP/Zyv8c5d9T/tvK/1zlf57y/4HyP1/5X6D8
L1T+Fyn/Hyr/i5X/j5T/Jcr/UuV/WPlfpvwvV/5XKP8rlf9Vyv9q5X+N8r9W+V+n/K9X/jco/xuV
/xHlf5Pyv1n5H1X+tyj/W5X/MeV/m1r8AHwJBt0sjbze4wRdbKf4fg8OBa/3wZVgQNBeJ2ARX//t
f6lusKuH6wM21eC2qQjaZ1P9pbrBugr4HtSphrdNNYwklJBTHXFvsPD4+ojjf7vyP67871D+dyr/
u5T/j5X/T5T/T5X/3cr/HuV/r/I/ofxPKv/7lP/9yv8B5f+g8n9I+T+s/H+mFgfZh+F+9mF4SgVj
zov+NWf0+g5bl51OMMpS2Pe78EV4fbcTEGgX7vgj0iDYLdX5lHuD5YxwSfiyEpeEwT7HP62w1e93
/DNKOyOcUtoZIRAbbIeEoKgYN0iW6XhCX0fhaPPpe6ymipW2NrdwSITbzA0tB62FcbFsiO/6guj2
+NbtYkU79J1M0KUcEcuK+BCsLuOESdecTIRwrHTMsrCeuBHeRIyllj8wDhEDzj67JXr9yZZoG/g9
toFaD+K0XA5tb5PfR+gAIuVUmL6iQc4CZ50ijEg97Jv6xGgm1Teedggk+RU35QReLGfvpW4n4LAA
8xZY0JblHXoniq4bV9jQ6Zf1xRe6s62BuJGkTB0/Ns2SFGLafXO3qh6i/VPXpvZPFPefOX9+v7mx
eDL8BrurDZr9ZOrSxdPJttdpvGqvEVaXuxLPnxc32wZAGWvXtMlFwyfsKNd4s8MQSZwPbTPKWeto
+IEDLTcBR0ZQgvknbFhmwG13B912by8Nfh/97VLttK21ezwRcukzF60TocPqQLdehxdm9EUSNC22
DLqX3CohGtp9OLwHb0SZ1sAsdg4b2GYC+W4zn7cSG5htuclF1LkLWTMRVtOrLW6uEB3O1LPWeB+d
wPMYP49PJEr5CXO1JjHItCiNLUQiPxwvR3b5HjL2IDLNTEdPsoh2W5GdTLTW6cIb9QvnQ4x0+Lah
g8LPt0HvDb80cWf8vNtytR6jg2ddfbGduP9ESZumTOkRJHvU1fddPRg/gPSZMmE0WVSvu1ftxj/X
Ory03jeWW9xCO+bCdl/8vw3yzSwbSoOLFKE2IfgDkTI2Um5TJDAJbYlXQp/PBtJ4D84ybRjh2tJL
EDbK1TxxAEm+YFEre5wsXjCvNIY1GFrsv8WLef+F/cXxSjkS2xDNl4XLYMvKtfjQZPEyX6KOG17m
cW1T8e4S8ZKHoxUYpMwH8d2AhhsDnypZ3xC/texI36lFzOhKI4BMhqbQ0SPHnuL6Hj3y1HNPP/vM
JG81fLDlFttclH7GHpEpyJzo1Zw7W7Z+mWXkP2SOdtiyHzy6RVcfTuznUxidbp/23pKFpM3dyk4e
3jrCiZcAx7FOAWjIX6LBflkzzBfYRYLcsoMlGETx4Ua5tigGJ+NHVoMj8iMiqmBBfg4VFxr15WKZ
u/NKLWRzFCiuJhnbNuOF5xidfuaDn9lb9YSUfcnTvOGHjTjZ+NM/foz96Qtr++rDZj4LcAmxogYo
MrDeXts6vR7Pdo09k/9MC3Zss9tSW8tWqJxkv7zHXi7GxNtk9JsP2K1whrGWbFEsSUh5RQjjaCty
0vsUcmP0k6TXtxsJRjTCA7Mq5BRUdU1s18riiGF/Omex7yrHVLX5Zsx+sYyz7k33vI8fmtRXMo4f
KuIuwEmDxIDQGJMxftMXIiXoLMFkykzrBw9WZ1oJUF6JGna0cgOq3ZWbNs1VUhZf3RV5MXybLogv
y9f1kT97exgN2IsziHCK3R9oLvs4Hxz0MJCvyoFDGs+X6uIch8YUKWpyspjG/wJ93X7KuXq7i72L
0jZ1So/5e1kjVa00q7IwTp+evny5eLp0+vDRZywFSCOBOSCrIILpG4aeZkY5XFyKF+qNuzgsUK4u
1qnTl5YnWZxsmGgiQ/f1MvGadn5cb/jMOYvrog8ZhzdwIBKWqzwftEyoVq6u1hsleItnhFTh5/yD
0coNxfhBpE9ORpyKDqouB3efdhCC6XerdkQym/03yemZN9oc7k/GJc9taNLCSSTwsTk7bR1B2Bt4
Ic53HDMcuB3TDod0PiOla1B50qbxZjWcB96pbDsQx48lpHjxar1xizA4n3dAMhmScJFWUQ1OS1Ya
9flqsBxNGmxEmNO+ZrQ0K2NwdulxV9ONyEsupASakhhHwOu/jCVxGo8T6mFb9YsU7cuSrZr9frqj
Lpw8cEd3pJ29/KDTevWX5l3YFS71WOiKbQtTZCIDgzceeHrq04dm0Zltzlj/7aedtzL6nGsqJZ/R
8bNbU+aSlHWcYBUrO7ACWX1lKt5Z7AS3jcbRIVEFL67nsaPwnv/oppWzyYQ24Z4MecWyJKGmOs1t
O7JIeu1CsPyU+PHAgPAuxROaR4uPrG6mp56zqr03zf4LVQ1MSqPkaGXd9xnCQikOb1REKOX7D2XT
jlERH7V+nNtG1Ql1ArcJe3ypLXa5ETOyOC6Q7EMjf0sqPYfB2OCjwDptgU/5bSSp2pg+V9qSETdt
jRiLoISkh5HhQDq+NUv0NsVnWdE+mBW2xPeMIWJmU3wWJu2DIWESv2AchHRhZrXGz4NV2QdDwZb4
bcbYMLspfg5CqX0bPen4XdohBQwQ+Qbldb7KGzxrr2Pr7HdzHDnR18sdy/cr7IOZksPGoMTPaFsq
uRyvjb7FmKmHQZ2wF9230OEMw9IvB+smWCgOclEDSVG4JM3FXWywCu3nWxtQziB/sNHpPmQ34Oxd
yDjDs+NQykc/l2aDtpqtwwT+DVwfih1fmJNDwielV5Rs/mKpj2N0dpcaT+PMQ0SaWHzqTxbFOJ/N
jcUUmrPR/Nd9eK6JlHlOy/kJIsTDuAiLg78zd8V0CPRW4Qs2vBq+ZKgTTa4wCv+W63v7PdnSDkTb
/UtkASG6m+UDnJLI0w96ItO1cYXsaX8QFyeYCNw/o3ZxX3HC3crFPYbHQcPPJdjJXEJ0fA6HHuTi
CvrCzQgdKfMG821tsmLxN5SyZtgCKq8CpG1ALGVIMEaGfNJLExOE2sxVDLXgroxK6RVXI0Xe7x4m
uyjtp+ifMFeVO1mYBez1uoiW6Kfdrc+979PDvwF9VmzQ7Vf9Xq9izfups1yjUwuN8Fa5XIvC/caV
3ux7uWKnqvPlGlUhfMQEnLrA7T21HDRCopWnaovVMFpihkzCz3EPn1omJB9WwkctvMRY/lSDuqkS
HjDgc69JOY3lgMt5zMa/wgzLqSYovCqCBm3QrKRBEAKGTcDLUzyCLwfVcqMZRQgbt2HneegobJHg
Z4k8QbUHbPBZHi0T3MC+FQ7Z0Blu1MtBbZHqEh6y8Et8vvDlRvnNEHW8RNxyE8rIIDxv47zCffxy
s7pY5uo+bkIuvCa5NqnxlOIJAz89xV18ulwr+2HSxZM2/Cw3xYTryh63wac429PluFyl4iZsV5fk
VrxGvRyjHodtgle4206vrVCvvdIIglvhszbsMveLhF1uNm6hJjds6CxTB6ffDKgCYwZ65hQP9Rkq
gyLnLPgKRz7TjClyv4FOSdeaRioLv8y1nYriOk+Moybg7CnO5izREGF4zEJnuU/OhjVqsz97N/CR
2xUbPM2lUDDXKW/hMit1F7oG/OopLvyVoN7AdAifNgGvTHH8V4LGMoE9W9nzXCfpu2ds5CuSS/MN
nAsKGcHKSEgu54L5RnA3fN6CpzmXc2HND8MTFvoqz4Zz9driq/R7eimsYbbsseFXeH6da9b0/Ooz
IdOnOWS6EqBPaNX12JAZY4RGrDGvk3fZoCn2wzsN2UytLNuVGRzraHP6LM/7lki6D0dNnIuzWzOa
XZE5UbJdJIM53QD0BZv9ZTmAHMvCbzPw98iwvIdwLfdB0Y6XTKJXyzVaEuXwRQt/lTv61fKb5VtL
4bst+AKX+uoSkNhLFnpRItcbARV6yoDPX9T3ZMJMkQJetgHSwPPUtppBh6aBszbS+7hmFOkOEp+2
8Ms8AueJrmgKhj1j0cKrHHShXAl8mftTNkjW2wVa2GvhWQs9L/YdgNLvcviKDSlx6Rcw/5bC8JyF
X5QU9apf5npN2xBtK0Izrc6d/x4TMnOdqzUT3L0elHlCjdggyW6GePCA10u7DZnhU9k2ZGatVm9E
t8JXTYRL57nTL9V5gu204ItyiWiCT7tM0PVzElQ6bZbCXptKMN+lZu0N2mXCCwZekiEs1Zelq2ds
wJVYNNayWfTaaXlGLpCs1OPolXJQpbXzlN0ZpU9ngwb25vCihU9zJ8yGtaVyFdW6ZENkFs5W63fK
t8IdFizDBrDsfbtsiMzEWaov7TbvtWBZdbP1ZryELTu1/R6xcS5J7fQ8zFi4bMuzd8tLtFOFly38
qoYLwszaK7jvhlGkl3SHjfxKEqZRYKcNuy43iiLMLNyTJvDyeS7lcpm2z/pieNDCT+s7ZcO75ZoZ
zt02VJDDZZrR1fCqhQryvBxUaWaE1yz4HIsQLi+Vw3Cf3XsvSrnhfBBTfV6zkad5QlwOFxthba0c
XrcBMiHMRtdt4a9aOAiV9xv4lVd5DK/capRDHsObNqTE9bnS8Jvh6wb4vmmeu+8Lg5gPjQbhB+xW
+ho39mpQpXLLdrJf4VyuN6vNcN6IrS6ffpm74HR9efnMGa7ApfrdoHFhcTkWB83NKnzME3FAwd3a
0a9c2YtLUnk/YdFkCsZ6udIFzu0y7fXEr1yRXrrUqMc02+9w0JkwunXKh2VvNyOD+G69cUvfyaop
xupZnA/V02qK5nbMBeq4yImvDu7iW3+JO7KQTl3TK1HQ4Dp1mXpaSLtOxFe2oQazzQbLoXBigxrb
I8KsxlTtTtio13D8oCoiausD3DpL+hFlVDNGRu2KjHptUksO5CLDDbnSU4RpBfbaaY45Klo/xB9n
cM9Pgc246EdMXMUHqFbndBuZwF4ocpSjj36K1/HVaT4n52lZBUtWn2YBLEtfx4gbxlmuPcY7u7iM
pZB9bJLETmhvf7Wj6PdqOsZeNiTLaUng2LD8fcHnFq3+iDgs/6SjfZO3sSojy4IAqqffbqQeNNU2
ck7t/VSUDsqbIJ/va6mdoi4bhjQhzpnkYy9uwMn1Qeqadu6anWINzArTdT6TurfFIrhNuksuJenh
rl3POT0XqKq3Kal8XpQv8XLDbjQ+O9F5pRljoy+oQUtVWUoFMcPNa49VUMmZbebwnyiOm2PASZJD
ltc/XBzXdx+lgydZpla1gjUWDT8hR6BcV9+Y9cQBubmMeH9e0TUivPTMbDlTVGk9Vi6myLxVPe7p
/NiJokhOmVN+zerNX7cmyx1SFHQAEBDw0qIP9mlWrzWXS53KmJR9qMWQn+2cV+risleniLQBWX0F
ASyf5YwK5q0K7MPNQmEPZmL3uixldaJzLMLrcttdz2VbdtWuut02r091e3CXMcC3HAxCD0fsKwvP
XWjmCgQtuJ4appAxt8/t8brFMWJ7Wg31aec+i1s899JkWxSNxzWs9TM3L2HB+XIVU5tD01cM29uc
l3BudV0sNdmWfUxbb1IYTU/Huf2ELMyWcAJe1cV7rM2NWUMPm/pE9khrLDTOwjfYc6GYFeKTrQn5
DiMjEcYLi4lxMpAatK5vNVzPaq8b0cfS6sXxupU1HZJD9kuwsI2S8/CbV4VeNKJmCUK2u900HaP7
L5hWzRuVWW/cb71U7T2jMxdnpnhZnDp/Xq+GxOyRrYO2WRfzYRyxv0Kaaye0JX9qdbQZO1hKwoa8
r9uV8h6zUmTmJ5Ned41c012PKjLrRVizqL88ybL0lKdFSuhHOfajI2655Xq76U+gH1Zacs/T34Ov
HmiGulUHTe0ed8jrUoO0JHq8Ii0Jq2G1yqFremq384Zk76jwvUTvfA+TfdIIfhHNOvvzzbV/szhR
LcoJmZI0P/VEOs3EBR+X2E7cCC1Ro3wXDq8ahzHOATRwhxPfEdIRk2ZEiymZ40uJOJHx3vbZ14s4
YDGT0hlV25zkp1drjxJFhcWGoil95qEGHT9AoP+gtMmoQz3O17skqn57L+vCZrMvfROYGLa5ujOx
ec3WnhPVnETNbIra1hJ1u75v39z376CTRWO3VSLMlL/pdMeertE9f1AMgpG+GtAEXtKn6ZZBOK7I
JU/Hj/FK00rp3laZrO1q8fFZvgvb4gfrpnuTiDFRcCuo5m8lIzCgRhX9umK1cBWFXKMHSyQF9hpg
1wF7xMJuAPZ+V1uECOwmYK8D9qiFzQH2AcAOWFgZsHnAHrMwbCQl39UCbIEFgC0ANmxhi4AtATZu
YRAfld4AbMDCbgEG4wyWJQpsGbAaYIcsrA7YCmDnLew2YA3AHrcwDGAJnAFLCwXWBOwOYJMWhrvY
S6uAHbewNcDeBGzCwj4LsHXADlvYBmBvAfashX0QsA8BdsPCPhuwzwFszMLuAfY2YDkL+1zAPg+w
fgOLRPJX+nxXax8l5hcg5hcCdtTCvgiwfwjYMQv7YsD+EWBXLOxLAPtSwPIW9mFGDa5WcwrsywH7
CsCetrCvBOyrXDbrN7CvBuxrAHvGwr6WfY65WlcisK8H7BsAe97CvhGwjwB2wsK+CbBvBmyPhX2U
nc0B1mdh3wrYxwDraemt6ZnStwP6Lhvz44j5HYA9aWHfCdh3uVr3IrB/DNg/AaxkYf8UsO92tcJH
YN8D2Pe6fNOTgX0CsE+6+siCwL4PsO8H7EUL+wHAfhCwd1vYDwH2w4C9ZGH/DLAfAeyUhf0oYJ8C
7GUL++eA/Rhgsxb244D9BGCnLewnAfsXgJ2xsJ8C7F8CNmVh/wtg/wqwsxb2vwL2rwF7xcL+N8D+
d8DOWdi/AeynAZu2sJ8B7GcBe4+F/Rxg/xawEQv7d4D9vKvRr8B+AbB/D5i98KL0i4D9EmA7LeyX
AfsPgHVZ2K8A9h8B22thvwrYrwF2wcJ+HbD/BNiMhf0GYP+Ha1T9DPtNwH4LsKcs7P8E7LcBu2hh
/xmw3wHskoX9X4D934DtsLDfBezTgO2ysN8D7PcBe6+F/QFg/w9gRyzsDwH7I8AyFvbHgP0JYJct
7E8B+zOXj+e05PdfAOuwsP8K2J+72tJBYP8vYH8B2EkL+0vA/gqwgxb214D9DWC7Ley/AfYZwK5a
GOiEkqIHC6IE5gLmAbbPwjKAtQH2moVlAcsBdt3C2gHLA9ZtYQXAOgB7v4V1AtYF2E0L6wasBzB7
xW6pF7A+wD5gYTjcUhoArGxhg4ANATZvrSiHPXPwjD9HPIOr+XPUM2iVP8c8gz35c4dnhpE/d3oG
q/DnLs8MHn/u9sx+wZ97PLNc+HOvZ3C6HNL0zGbPn/s8M+b8ud8zw82fj3hm9fDno7Zj+fOAZ7As
fz7mmaXBnwc9g5f5E/0kG5hrTs9uEjTJtn/AEkpnrd6aVdtyFh9v78bjJTxuGK02W7aI9nu/UQpX
y7VFZmDYJ8ETRvpH3BDz4FXmwfkwMhj6DmUO0XeZGyjnbhy/yTI05ERxiPOHH/VnPU22EQgCgtJh
zyZ46mbpmPmqIFs2H53E7ADjLse42L8BCOjSu73tj7q2kn5bz2ENKj5WnMtn83vy3fn+fFe+r4d+
8r151X/aU/9//pf3nlH0r/2dxoaN8fNu/oXOYr7AV3LjnHDEFqRRGNmzaT3mbNqf/n369pT3LB9V
k/ccn1aT93Y+qibveT6nJu8FPqom7x3sAlTeO/mcmotzWPAX2g0m6HoPWJ/rvfzsYzunrDNI/M7g
9X684vTVoFMeAj+Lo0bDfEjO4ZNSBX4ZlcNHHYlj0SeTO1bKxdnp2SIOiLBFMNtJpy5LISanWYmb
jSD6CE1C26sP8CWqT2ifPDp5ZPIIQ7B6Tk7NTMBFaMp7KKQrJ/df0iXP0MraP2G8iPqN8E5w8jS/
x8Eq+8w8ubyG17kDp3FdXpITMXoUhD+T9OAkK5T8VrBGYHpO0q9Ay1G0stQoR8FJrrDNQixWTu7X
LmJaq/JAX6VJDml/pVGjQj34YvGGtme+WSj8LXt7snixVl0zx7ojHNhfK0jvcw5cBXOkgM8amLjs
bbJSr/mhtZTSAia/wKkmWi65KwZxZbPTU6l+4lVUbs+ztdTGqtH9PKNWuKm+recWR6X391Sa6hzj
GRNzyaZK9Z0OOlG8Nvna5HXw99cmXpu4XhxfLr9Rb7B7UPyZb4ZVXwTQmIapKkD5U06J48yVgPHd
ekpIgERNlIczA9Y6Pk5V6n75mIqK2C+sFWksbHaSE8/ypEZn+PO/k/cm0HEk2WFgHoWriJMAAYJk
N4tgowk0DuI+2CSbuAHiZOFGk4MpVCaAJApVhcosHBTQMxKtOaQZHR4dvnSMNJKta9fSSPu8tiRL
3rfS0461lkcjS/KTLa93NZJWlt5KsvVkP4974/8fEZlZhYMHtE9+22wWMyMzIuP48ePffw/XFGZD
tLBnsZpr4O5hOxH02a+LxA+ygpuKfcJbm2O3bkzaZMQBMwKwkANvk1hirx6jpFt2MhZhDaW5vB+/
QMMEAYpnutittQ5BPyGWEFj146qB0TLUrJsZmoSC7QjvDd+CITeq6S5UhSIBJ54Ws6vLvcrhC6yi
6Z5XT3parGMdRyu0RuyY7bANi4MxI4Y3UK0cDd/pqCFg/0YgEeSxq5i1es1ZoV/PLfbrKcFfx8D5
wjdj0BbIcb3TAAoZ9PdoBKxCsMHGFFtvItvyoLcByybXDLaY8FTuP3TAYJt+3dmD9iJpZzORImUP
DCudYiBpBEFqRiiN1WPIAmJLpOPWTtq3rjR+z7YFN5CIg42xFtbMIANA7v9hMDSEMOsBS9ZBOzQ/
N9zUI6cUkZ4n9Bz4U3EnTA5AkAYsKNfC80U7vcam04FvYb/MSHQTpgeDu8mt2RyiyT6+MWgnyU5D
Rq7y0LvQCdh6cpvirEL7vg3PnWVqa0NePBgD9xYGGbX0MO5/KHAPDwlX19LKSnt7CaJro76XT0Fb
AL6oYQLvtFDMBExACKh24GXaAH0AQw/+FmIndBmmhdXh9hWNXCuSgFPthL68XEvb1r4JWg1PK5ai
Cl8Vf4RhIHdquUzavlj031uEYaOEG4AbF5WN4lOjDZefFm243BttuCI72vAlRUYCNqpOjzZ8+YWj
DVdnRRsuwG5aRSJirEhYCQmBMEgvBObFJIqJIxG3F7KE0eOLGPUTWnDfL+fvH+FbR0VoF1CMI8dk
OuyCqkL2ok8ryicVjPdzRfmEImIBl4LAGkzqMcwppigvo4nCMKF8AKfFG5aRgdkcVyuq8SYUGddR
C1oqYqKCcUQphN6lyL3sjcV4I81dFU6fnjF9IXjTO33ucpIq5W0K81uIX2ezwsMIwz81SjVY7R+V
KfEfp9ioVLEGK34G/TEwPm32UpR5lqIMl6JTLsVlKDrjm+S/gLBVBuLt5yjkMm5i3qRiLGU33eyd
ne+j0MQw2Lc4TDoUFLjQzYZKUZH5rNeKi7c9018Ovgzu9B9ixGKI9avAAz7934sxV3l/2OLVybhu
b2L1et6UiOtWgA4SbMrekXHdGkRct4wa10Qj0NVyiOvr/fLxEWWb4IWMiLJGs/LAuI2fu4K9hXDO
PEhvC/+2Z8+2irloU4x2Phdygx5V4BBwqx9dUvYH4HrwybsQlfmgBKf5OkTarVYXd74WCDihjP51
wEPeP/aCusR+Fvn3OsWHu3hwO5gHhjWqFAlO7BY+X+VBPmWYuW9DhZjRFRBzuRTDKrs99lRhr8DA
Ob7qzsZXPaILvRCcl4/9BprhXIYIysa78M0kmzm4uSuNbu7C8/htKr6HH7lBN/eltQH74nvQRmXW
Vx+Ir/YpRr8YOIX4lQOnWzmoahxUtWfXFmMYYJyEaiX+Fu3O6uxRD/jzyB3JOOmD4mLIsweuQlBj
T8D0GgxyXIHPrnjHMCxqj3hqS6w2Ki7GPE/fgPDFvO3fZQ29IW6+zG6uKcfgkGsQAvmY8jcU5yY2
9hAbZpvmTdA0OrUypPN1LHpbhnS+jki81hvSedzrgnYTgy1nNHdLNhfKaC6Ezd3yNjfBm9v5TL4a
j+GIOC4Jy7GymyEGPZNws1WnHDEMeANUkNDdG6BjZB8ypvgWhZ08jYYDsHFnRBx2aJ4ju0eK70wJ
i4tZz6SzBawR3z7wTnqMXMoaQBXJhoWr3QQ6SGMO57hG2WkpEK+wyWiWk1GDsFiDU9DsnYJ57war
AZ0YbKQamuEW5eimYixgH1u9wdQXfUVo7tYG76J5zpL/4U18GGlX+Bx2KEdvKYdvgSKRlYt8zoc3
lcT3qOqosawYK3DaoBtoLRzLGBz9beWQ3bztng0AWTw4+j8q+KBWhF9/H46p7Jfg0WPEu+ziyQkn
mI5PP6IYq3jcVYiKH+Ul2KVbgKcGn1Ril25hl9yejAc/uCVqRRQX/0AMijUMVAfIhT2Mwh7FE+vq
Mb3FiHYGvmjCnsEXr5304jq+uAG7AV984/QXN72jqQP8I0ZTlzGa6gsf1PHR4OHjdIFuG9wEq2Aq
wUmxmuaR4TBG29YjkF3xIDwwvqwHtS/EcmcvGpbi9IJ6F3IQgqWNr0KADr960NZCBd/LqocSMZ4q
xhYeQIwKfkfZ/wsCkz9RjhoAy7gUC5uHBjQeycASd+XGaMzAEo24Re56t0jMi3SuggqM3jpiB3mT
2JpNrOMNmK3gHUoJClTDNlIDDZwMZU+ASOIHeYNnle7zN4w4drNZ4ZPWqBgJTArQjASFkYQPPtdU
9YNGZed3LyzCa+8pxo5i2ChLTokz9nmA5vEBoD2GSGHn61TUx0k7ipXoHBd2/jKGnU/did2J3lm/
49yJ3Nm6k7xj3PHHn893I41SwHkU+NrcABLFvBhDgYS6GGFUynQpQD2IcPF9IbjFCHMeeS2G9ORi
2heIV19N8eptklz+lcSpR1f4HG5WBoorG+Ztfxz2zOCTAYxQJHJjUtp3ygjNC3UeFyJ5iSJZouHk
pxSyOd7Jw7+LnvCaOf541g9lUOuPyEiFv6aKFOT/RoaHHpVmRmhwlzLXrX0RqxBzSfp0JWBlBKuw
YeLi2P9QhARUeJa7chXTtrvRw91cxzJEPQaf5eH4oV+oB0CNgy+yMFueDFGuEF0Rf8+D0oslEO94
MwWUYpuru7XGKv7fzIZA0fQxuyS7OyOsflaqgaFjegnyaV/PImkn0cRjtYL8zB+EhptT1gkzgb9e
QZmFQeBHdcUT6MYezEpsEIlTUic7yT5GCaOy4u3wGEcMAhLb26BRMHj+Bdj/NKWx8Cc1vh95/Ho7
Hd3MkKoXiKQNjgSdKAYVE4Nw1068JJIfAEahGutkHR4/IHPYqWW0Ir+BvwP4O4TozJv1geTvXHyU
70koQU1SwHYR+Bijd2b3aNeVJIukFAK5USuOSErBlolKInzEHNlR4RaF77/C0aGQ0mbE7r/CcaSQ
+Pofo3bPvn8MEHub9AGzEJyjosRpsjALmifqCJpT+lrjK2/FuRQ+4ZD8lOCirmW/RcRgamIA2c0j
MGGmg22Lkrd6ewO6CXdAGJfCq6mjyUni5A+BJ6RP4J101QJ37Ap+ZAg9gbCCPTt7RqXMfkFZ3sR/
IvFFpZs9Q0jIReKMKyfku+Bb8RomnvAosWT6h2b8H5ue4LuBsm3InB3Q0cYQhfjmmJKDP77I3qB+
o8rGp0uqJdR2l+2G+/iS1AV5PsFeAoi7C44U9JZXxeN5C6bvLtcm3BcRx+USeF6sdLNswHaQ32LT
+M9kAPY/AIvVCpmkIgO+ezKhrdbODFoF+S0BagYnJuohnaJtGZAvmX0fNvljHBRr4jH76i+KwImR
g/shzCUxa6Ys0+5qmUkljHTUGRtE6x1L8f3U9Z9LwosT81D8uSqSUXxF5HRF/xJMdMEPuvANET6G
LEj+hSoC1aPxS73GA8mQ2Qo4o6BNSzqZZIsInUuxI0QiIk/Ci0mR8CI8hY6d8AMmaWGwLQsDDRMG
i8HwHHp3ws+CP73GZUlvgRVXeAmtedHkS0Yofl/YNQ9NJwH0wq0qd6mZ7FsaHpsYmh1bGSIDnhz4
6YBKbfADiLru9msk3fg3Ym7RwY802eSikIrSVDwEW5xilSfGoIDf7ERnVADFhYbMJG+qroUPnFXh
f+/xiUDXuRxBz+J36BDAQphzcv4Rs++ZXhxZGn525eTvyQwei8JMKWxLW6RBnTsy2nsII/aBvcZ2
DhJy5FVE4yIvJMwp8k+lORFS82P6CaQextnHAGffpJ6e9aNQvahePCHvxzW1XHsD/YtyWUlJzhWt
FPN95GIeBvhbxd4q14rY03IV3i5R31YrWXuF+mV4U6M3T8jiwN7Q1aB+Sy3UgQTVMcVyNSstYe+C
7xL05y31CqRZViuwHJ4Uq1Ceq4pMJG+qV7TL6mXdzQcirirUKwHIDFIAX+Y5SQp1eL9UK9cr4V1P
PpJc1rOyKshJAplIdPUy+1dXr7L5KVCbWI/BC+u6/AtlOfpFtTRQznpQrl5hMwdeWG+6vdBgfDCe
kJrH+lSsl+vZkXN/wRstkbRWWVExuYek7npIohsjyF06eNo+DNBlYbBA9JEsB/UN400cXTL6YeUD
9OqKt/I6AVEnx1sn4K3zCYjbRQHTIBIUcvAbFIX/ECMVQhwnSleCoYOEu6GfpL8TqtltbW5rbsdg
mgxOLXYicKXpGIWAYW/UYHhB1zML9K0RqZisWcDKiW3LwZCYMSfBWJlNM9UcmgRzEOmHiFYhnIiz
sclIzAZ9tKjbHJolUwAwpqGAhsJGABS7ZKLAzpKE0ey6LO4S+/z3YPNi0FRd8mYLEpGXS9/DZhmP
t1j1Bf7KobPAoaBfHsvAY3cx4OJfUflTJe8i+P+x3dahtbBdh+rPoBeWtikxAYBGHgon2dUGxtSN
/7Jw7tv/OQCLwSc/43KxEKyfc6+4/tXk30f+rpbOQ1EectWPLA1wqHiOIk23HKMeksusrzwXXWmz
388TgbZkSb7It4k5Dqxc7uSn7txSFmEUecr+x4kX34d3uGdtAVbJR7lkPjDFRpCkPwVYFCTpzwWu
KWPPQfoD+oBC9LvN9ekD8vCzBSC+MQq5kM0o4uoqdeePlUVwMaxDe85/wF2zXdZHWGE5ZD1AFl0E
7+j/jaYGyCYSc4cGQBnGEDx4J8kyTIPnOA7ND8w2tYXq4N3WromherLCcE2uTjT8QbfwZgTmWqQ/
a+PIS9e2tBGPXBulfwbonxj9M0H/1GLFFO4CZEKBkpOD81B5AOA4EGS6RDfrSmQyHiRqhnRBH+DG
mdWFg64kZpCkYyTF3NDSHJAVE0NTI3OjRN24FJmkw9A3Dw9Nbg0V/nMhGYEOkpdirjAcNsJ/R/EY
EqPlbyFaJOv8JEd6AcZ31hadYzX+ncrzmCi5RRocenCcFanseGWH4WX8A4dsmZYLiatUeTxpN7W6
al/6IiQHUiaRoHP+VEZoMo3U2PtiMsnBzEYqwfFkxvEk3JmXhCHShAswwE09OwsSXOJgMk2Wjxt1
ttFyizcXksiIJK7ychtz84CIAD9OzJD0obJUUIWCQM58ANuYnc9nIjufz4oKyXgsTOxjodM+7ckV
9CpcCYinAf/TANnJ5rh2smANT/ZeD/edLTs9m9jqmw/PGD375sHU4E7fcOvIYu/Ixk6iJTE2sLHS
sjs03BdumR/taHCGzWcj49vdz8aejj2bSB+sP+sw+1u2ptKLt7HBob6trduRsbHZmbHFmf3wyPja
2P4ja6B1oLd/bqAvtdO30xNtH5nvXXBmeyfHB1fGR545WyNd8cX+vrHwo0fLK0M91t6jxOLM8PIG
NrjcPx5umBhZi6e7kw1LMatlyNrrf9gz2bo3vbQ/0rfQtR5b6umdWN5tTc6EN8cSg5P7TmIoOjTT
vzXVml7pWJzejbUmurvGkrvt2ODi+Gy6tW2upWF0d3RvKdq1N7S40ze/4gw/e9TW39+1Pr6WvN2Z
6mifmJmZ23/6aP2ZsRZZ2Xto3U5tGr1LQ/ZKd2p48/ZAcio8QwY+qe6lh8balNUZXlp5lJ56ONW7
1zIzubXQPp/qnbSdte5Ho7sH7eln0w/nFrZHezaXhtsHIvMLvd0ta2tmenQiPOZMLq50hCPL010U
GHu/t32rd23Ofrp8sLCVaB9Y3uuZ7RqNDU60dbd0xm5HhsLLD1eGugYfdnVO9aQ6nc2xjan51keT
fVbfktmyP9r/dL59+/ZudHZ0ERscSIQ39+bW57cTO886jKejyw2jiwvpdNQcfvhweLnl0W7LVvfY
7GRqcPyZ1d91+6C17SDc2bfzdK177PZGa8vTh8ODc7H1ifGnfcNkQ9nRtTtldLRO7/VZ8319M5s7
g72tg4m9pbWlVPfERGJksWfQTu8m7Omu/dHIZiTRs7hsjjsLA7vr09vRtqfDvXZ6cdFscdZjz4jm
aFnbNJ6lW56mbscTI+nUQefIbHLWHOnvWdpMtHTOxcz98FRyvn1+1LLju5HhjmT69tju1NTQ+nzr
SNpqXdo1nI5H25s7ww3t1MPb610rsXmjJbU0aoxs74+uNey0HXQ8TPfGdkfmeqcj462zM4Pd8+ZE
ayS9ktg2Rg6erg8cRKO3u293zTTMRHsftZBwfi1im01dHfYf+rZMtzmbaOnr61+abliOzywMJ/b7
tkf6BuP9/Q1DA32PRoyOp7szu11t3bsd0/HWldaG3d3u1p6O+ZXxqVTX+tS6PRK/PTJGB1vPvL3Q
3t4wutYyu7RidM+sLHfGFma72hsGkrHuYSc22jqz0L62F3v6aC3ZEt9KRzrbw48GlvfnZvcnF+2l
1tj4xt5MT99k/15DEhuci2/vWU7i0VLyUcPkwLPI5GJ8YqirdeZZLGFOL06O3Y42jO7Ho6mHbd3L
c/3PeibD+zuDbS2Tk6Prcw/XE9Fx41FXy/igvdc1EiWKcK1vv/9RarHLnNmxF2P9c937u9ODew3W
9Nho39DmTKc5Hu+xEuGRgfTKzuz8RGpy4WBgJh2eHl5ZDNvT0xu7ZsdkZHZxenF8Chvs6lzvXB+Y
jTyytubjm87gVOvDjZ7+9O2Hfbej4xFr2banN57NpCPb4dHR9a34QWqtPWF0jE5PjvU27CaX2tue
tcyloy2RaKq9m8xNJx8ORDYde2Jhb2s5trc+tdwXm1jfXhtbfJRe3G7Zmk9a65u9PWbY7D4Y3Fhu
m3400jm3sJzontnYWBpo3Y45y70ze2F7frG7n3oYcVqe9S5bZnt7lxPeSFhW5+yIPd6w1hKZ7Z4f
6Z3ralmMR6MRs99IrUQHFts7Y72poZ623Xmzc6SzY7S/Yf/h1GZrtK+znQxie5dGt8zhjqG+5SU7
nAovb3aZHds94/vpncHxuaHp7S0jPBBemTyY2X0ajW9Nptciaw8X5tOJIWNwv3MkMb/fN7a78Gh4
LjE4/xQb3DwwNjfiHTNTE4u3G3bm9xYGbkfWjdbB9sXN8Z7uHXNx6Flid2DeWZg2N8d3p6xWm81n
/4zRd5DYbhnZmQonwvbwxtCzeG8nbZm9hkXj4WRDT2rnoM142JmYHe7efNR/u2O2eyncf7Ax0D7W
vhHbXugbT012PH24MDq/O724H+4Za5mOd3SuP2tIHcz3L8+MD2yOThJi3Oh75jQ8HLAWepYOZtun
OoYnjNjO3FzrdHqwyxlpH+2aWzOS/WZfW3TM3DGmR8xe+2BxZdbsGEtG2+YOErcjt5Mb6+uPrKm9
ODa4svF0vadjrHNkbXjJCI93r6907/cPriyu2w8b5p/2dQ0MdDVYTyfmVuzUo5bp7qGJzY72g42O
rrAxsJCYmX+UWr693DLe1d7bebsNG5yKzXQ+27t9MHEwGh6a6Vpv3ewNN/R3TRP7gkTHjIiQb4A6
AExkbaK8Hp2Qzc9zqB9LLUBB4cVc9fKlXBUxDCMwtiOWmycxX1AAP3eKQ88moxTYjcaogrt0oYuL
gLjIERe54iJPXOSLiwLl7oqu7HcrpuBnBp/UgweQmQs8hJmjVJp5mCEoH34ZCfY8AO+yB3d3bExw
Bq44BUh6sPcLkMVRMW0dZRfhiQbzMfAMZZREYnTDjINsihJ2ryegGN7ZNoGStohgTR60wSWy2NZG
HK4xKbggx5DDRMkoUGm15BQFbxcKtvCHVMXVVzqCk6/m6Rgwa8bBKJiuchtYDKf+lH25QoaAhvBD
RShw2JSMZBuQn1Y+zddbKCXQwBnRwMRwDsZWqmbzgWamwDStMqZJAzsYYMTYOoDhJ9lUacDzOUXw
AIwXVJ4OQX6bsv7IzwF3C30rxkQvAV/YGJkfRlOugyFLBaZX8JUl3mB185DTtUrQRDfofjx+B9MD
+SuA6UupjG/Nq2LAZvc9xnyqi/FuJeBcRFO7BkVVGXMK0ZnAMuQbFGXn86rKmEmoVc5ZWpibWTQP
RFjBsGEkrJaxx0mpsck1TZtmLImKFBS+N7W22SEuHQ9ivPagzOVpmLtmLJGE/JCOyPfg5xWDwbTN
uNU7odq65AYmsKi3SRDCOM/3xQVPSv2kuZlxsANUaN8J1tbxF+x6OxiEGPm3jmuGenwL2NIN05HF
pHq0oiF42hzkSgMezYa/g3rgyfMYU20d6IrgU/W2TznE9QlsCslxNWuq6yr8Mv5xya3dkJLzUik/
3jYMK+qQKP0jIu8pcpJ2IuVQwHjkQcGlxCYO9q6Uu/cIhnVoP2ripIe/S+WJekhkrgumVUw9xd3Z
JgzCC7FF5NYzs1xyJAuWABSOyoqDwN3uQCuAajVX19V3GPcKORhq1KBaUlio5mtFKnCqdVpRTqlW
oQX162qpmqfl6dUaBbp3U++ih+6BzpUJYkpkNKzVVcqYs7qKjzZiiTWYha+DCl8v/GrNfcs5LnGh
2/djHGaJ99QrgcvmOSRu6QVkTBJNHSQdXyLDPHG2fP/rOouqSiRXYUdDJF8kPiwQiQ+DIvHhBZH4
sFAkPiwSiQ+LhVtnCXcMgPxypQo/TsqUjONDht+R+XwLSXZcWBhR6GUE4qDKqfS+qEO6PLZDDDMi
7hD0QbuWTDtcKGWbDpbWsU1eH2q6j1lh6BFW4zrDQU/NkBmPrIG/2m2QN+EleCatJRIxk+1EzA9C
LfhqRRN4gtmgXp9OmvHZ2YnQmhWPpA7kvo9ZcVMmSAJnOF41iXo9Epihd47DXoK0JiZkigdRsxnZ
Bs853i55c02RiQxMFUMccduO0UhVsRlpgB5wK/RCDYh4lvgjymlchNZBJd6kZyEK22aUoU1yOUY1
4knDVW84QH5+aFwuIROdWZQoJiDMiLyFJBrN8Rfmcunos2k4tg9lfhMdT/Q8Xwl4cqDMFNxG8mR3
8kFUGq9DUacnaGEQi8E9Ih+lonnicHXwpAVZqIYS1QIgdSiaE/ga8FMd3soVbwXRoyTID3kVbvjZ
XwQSWaeQ20nBRY64oGMRXBsuwKFMzh+qUQwOIHwOivnriXr6YA540fDMDkUQtAB0KIV4Td9DiW4h
2OyAf0+xdJXJB98bl3ZYZAu4f9+THA5iQDaBt8lBGX67DP1H4M2d31TFOU9xLVmBusR+FuG9cm7y
py6BH0UenPKzdaA8tu9pfG/OglteRIC+L7qcyOvk8/4THoGuDYTYukATUmw1sXnroCzioMskq+wc
JM36YJbjZni2D1Xt7B/PlxrdXJNWPASem3Wc+6/nXnNZLpxeuwzh7JvRYjwRbxK5uUQmS0o95Q2u
h/9lzgl3YXQHiwPzXjeCCUHf7FRza4jLyv1NYE0+D/gJw+RmLTAVUNrI32wM1bBZoYxvbF5q6KNT
03NDd0LTbEFiEUiltG1TeL9kgr2H0m/y+aQEFZZzAF7YiEBMaduGDR3j3Mozxm2kImtr4FkITuPw
DmuOHZUwBmmFd4M0AIiiiN4rQVtJIo2alvA/sq2kVW5OmtsYM0HOVLMRcfA05THo2G34e0W25SbD
tlvD3ydv7c1IK5rpifTi3n7jXNbaYQgOZIP+3tiwHTDoaOJTj16pWAA2b7VApZLtKaruQ9gsJwJN
gcG5voK7HU8R7TXhI7G2twyYmgxrSUoyQdZyya2o3QOrzflVWAd2S8QWCttTwkoClXE76YRD9Ekq
HecnRPhHVGFSgcq9nxMmA9OzQ9BTyvqzzZaG9BqPpK7/46Kj0EswT4D1YJ8XOghKqmFtyPtiuRqi
KPz9og2YCEHc8TacoO/98MeQePJUwJege+topnPKqca4ZwVsCmzQCyhlIUbaFWqXVPZH07WLAUgK
FQSdtwb6dPZvAH8ZiVjISMUgIxNB21+oFmvlWoVeivr7MrVYLYCIoyItaJ44IfNIM3mErCWFGGWn
HzCSAWX/Z4E7Tfw0GcuqHotaDL7LYwvnKOoM4nF0yaTTtZsUlZQnnisRvcm9SimOYMCD4w+u8NCl
7K0bVoDrvtlRCqGKwTXKE3cQfDgP6czNFSpv+LZG39aVnavsKHA0kQZOlweLzrOoe2KZ5vK3ZjFC
rV0giDQRznCpubOlN8O/GrDBcSgYkOLgUJiV42ar50cBVHYSVCYPA8NMQTnH/+a2dP2fjiNhuJ1g
2PSYj5+A/d3zgVrkzWOLWa3U2fUvhJwRzTTBf/1DI2NToYGh8NzY8NhA39wQlqKNKV4NTQ1mPSWU
+KbHBhE67jO0TCTACtQryvYafLrxCXxTcE2mDTr+Bco8n+faTWnZqsmPCBRF6VZRP0lrgLQnpbrB
S+ojJQaLG5SE3hEG9DbhDQg/zNPTz/hZpKzdzcBf+WZ4AhbGSiEp39geVfK0i2qRlsN2Nfxbyvbz
FVXXSrUi7RJYrQSQui3wUrdw2hyC3zVPDgbCoRyiFskXuhaFHPA8kPE8B5/nUDBwCvVJyfg4BRtQ
fIV5fNc/+6cY0DcA/JFrGJDDdz0E9WXbCclNpF3BP5oIyXxOGYJzs65UGkh7HmrC09lNEWuhpMpD
RiLNxwje5xonlcle4AhDa/Ov8ajmRFTy7J2lZD9Q6CMji5T9dzkZua4RDZmPNKSK7iD5nIa8x2jI
Yi8NeQ9oyHvq4tKhEAbZP5mJJxg0jg/M3uxp4gB5PBkHGAKMaRizQ5YxjB8yIwdNIL6NOBjgmOoL
1OE/PCX24KQEO8ODPA4JHmu43TEuDPtOEowU9iGaBZBr2YQLUUtgs3h7D+LVM5YyCvbUf00JUm+/
gHbxEHPeCT6tM5n4bCY8tsCwVWh8aNmDsdAoPZ7Axu1af42hqYHw8szc0GBWXcQ6KNlgvQCBCCPy
gnTHegu3RJSBUAeXMwSG3x5CDA2wrTgPpIJ38IRuueFwrR1k/xOtdlu6WXjnlc2WRJ8Ij97JmSKy
CBFihl+LnwILfwbol2+GnwySq0zQXeHP6iKq+7fqXloLSSHIkMimD6ikIjElPtqKpkUWFXhfcoK+
F8LfrZ+OUz/NnoPZMVoyKyWVWlBlf/WARjQTUUlgKVmWQR0VSeooSGk2g178+vx4/Foq8CuEkQgo
8XaJZXMy3srFtzAMBTqgqog3iWtH3EpZD7iBVI6/MChECT+DeTQzkGCuxLl5EOgCcW4+oDsQxwsE
Skw/sO0M51ISR2Dxj+Tng/w5YlqkpngaxHzuGMsxbb74hsC0hYhpiySmVZHT92DaUsS0xK1zTFuA
mFZDTFvAMe1IJqYdAUw7QpiWImHM1gHI2WOCQXdZB5evdrFCJmP9/wO8+FKM+qux26+Hef/6Mev2
dT9WhyU89ixALhyF+YYdyawVPqYW8ulYIcUqNAhKNX4SOcDnjGbqBU6P63gInHp0eNj5urOOiBPO
hjLl1Q8I92wIZp8NLg8NhA07HcLfB8+/H34+Dz8/AD9n4ftvYs+/Bk/uIodcqUJqavzVCjNwfqFa
cRrOV7mVrsT5mlTJctRHhtiIa7nalcLne1Sb+18CqphEx4NPPuvJkfMOPuCW39d4enJGL1N0Ijov
UAO7oCwy9G2R7FblhDik9TWQ4CWpZRU7IiCIEmJmOBCQCAfGe5bRrEqA0btemlUBmpW1WwjiWqsY
65Xwa6LJvd97ju7cENS/QCRU+KwiSFwRR5EHGztmXzW76BZjmXF5UF19aC8CPKsjdAQA8AfOZiIe
2m1rbofAYhZ6G61zS/G1SHQLc6ILihhTBJDKAPkxLk+r8fCJ8KUY8ZOMbxZfBu2hVXqLB+11AEIj
a1HDXN/YtJ5uxbbjieROynbSu3v7B8+s//Lhhx9SmulfUMH3t1So9MSQkCjZALdFPFHIOchNZfMR
obZIRRiXuI2X0c2EFTUzZWI5JC5jTcAuwVszlWJoAqoMDS2Nzc7xpDYpFL9hemvZFRgnyZQQXSbd
tNXwYUpb/asq33inbaJPseeQlxxnUiko1ErZVoHNQXrGK9rbGriD5KPo6LKb0UMTWwVQETdqz0VX
CL4xuCvEIfhR6wILPQJpHoMgQfCFrPWQ5YQ2GXDYyUiU5BqW00y+f6BwrKm1azweNWAFfNpwABUt
C4MLBRKnq7icMsmOzI3xi5Qbg6e6uIB6H0HbsPFQLgxgTI/QKAPCfLCROQE3lgUqi3KQCMOix0gO
HmFUMdjgeZI407lhBCS8sgpEsqqALM0RpWhrz0vFR3U3e44IW5ArtRboLz+Erk3kFCqUdj5tnZNI
xOichVe4+iKSAoobA+ux9yC1RTN3xAc2A8Qjg0P98yN3Qgm7OQmS1/a6WrsevS75g6SVNBlsNgON
xLb4PVZvigy9i111nlB7T4jUTusW2xlckPsJoVdOmckUWVijTQ1+znPMXJF5b0Ciiu9AhxlC4Vps
GCZVZX3ixXDJTkZ5zbqK11ATynNpy7HL0yAKTjgIhI0iKUh6c0Ev1fK0crYZQAJbSH+hREMVdq5X
rvpfFXGOYLAvDW9IuYgDh0zgmlINokkd84wdIp4iQOMPAbnj+fJ5gEg8WRA82WMCMa6Y1JSDe1ie
jz4eZNXzWOMwCTBYIGGQhCc5ioRHdTFeyY6OC3h0BOnomJbpzSyNR94TSc5A2suewztwskhpqiNC
DkVQec6FqTcpe61xOoA2IyDM9M2NEs2lCBqBAwsuDaADQQeSyJ+aQwP3eIJnED95E6xDvj08fdh3
OK3TKz5kUjx09BMSvgqbtpkkG4pRAZ3gt+01F0Fo7XABFRF6uEwTlNEXAQXnut5FrE06Bz7QOZGY
ZBCLDpACLGGEvozaWWD506zeBxJv51YHKtSrGkWK0LVqBMpiRurka1VagVaoo3rTYwvyE9CxfwA/
P6oLLc4XpdsBbEvyLgUtBAorkbtGkguPDBoZUn8/fYxTQmZ3s41C3vQ6JIAHY15BScFcQVfB3YJ7
BdcLcguv2x7nCWkckiOMQx6dZhxCxh6oxjB1jDeIxh4MhpdXUKfAS3NlaQ4ZaOT5rTkCXu+t7/cm
U3JwR1P0NTB3K/XYDbimbpxy1LnX4P77tINncbOWQ0cscZ5gKMebMgQmDwSnobWe5g9UhxmXeo+E
JqYbEgsOKZRYECmGn1LdNEwRr/NRyvSGZSFRNYoiIbIAp92AfQkNA++UAtortNvd3NIqwrnAQ2Kd
4LS+E2ppDPXdCbU2hvrvhNoaQ4wTbwytsMsuaquvfwDCZKTYd5OJuGEL0xAewqGjva2VJzlcA8tV
Hpq5rq7jnbbuUEOovT70Tgiv2uRVa7Pby5P+w16Smfkm2NPY0ZRFcaMja4ldU8SsRd92pK0osG0k
6qQjMcYls38cwWcymjSFjuGmcees8UBkcz4e9kY6hqne+JBa38kYiDu4DhoSJY/DAx1crPnqbKdt
B7haSKUV8gTDZSRuzGRYEbcbyDOxc0PDI6NjD8cnJqemZx6FZ+fmFxaXlldQnyvN92SQWTDgA1RI
X6LkLTnSf2tUcoDo/fXz8BSzabVgGhd0kNb46Z1IGdEMgzC+ccGYrQyN2bbMtGUAT06fqxERN5SS
a4y+BBaNnIzLwfWY4a9ilfIec5CVGABmqV8oqU2FG4OtIHcGu14X2z/Aek6U509BVJJj9wGIOwhS
Jqxty4nwSOwEYiFM34pxc3lB327EioEtFVYkN8EYVCSrp/7luaHG0OJ0eLAxNDE9NcJ+J8amxpG+
mgBHOQ67ISHCEdbaAJo8ogsPJW8azZJkxaTtMI5/S3ZMDIesopnzIVImlDjPIrQk1J8ijqHOmVaB
k5Ac2P+cxpZNvots6cc07m+sSBdT7myaK7S87NxPUCJVND9Gt+dcQK3d7B1Ol3LL5CKRcFVE8PKr
bbkm2WWB0UP0ueu4nE/620Wkjbn503HfDp787Qvebxee8u2ijG8X+75dIuNY5oA10kkdKTu5Ixe9
HSk/pSMVGR255OtIJX77K3jOkLjWP71Vp06vrJQ1yW5zGjYXFw8fQAEDo3ey27sMsYWzWuKjB/rz
isgDKioS/yI09HlSQ/+vNBUGyP5SxC1AQlMnhPtZt8yYQZaLtiXTcMKmo+jTH4p4MWsHEGM97pgb
rKLr+or5A/sRzWJN2KgUsvrDD3nNPYbJTqo5ijbfWBM2NxJzuL+9mR7h8sOPfci4FGwP7JpPam+C
EpxjM4Ac0G03Q7gr4uigvXC/9WNCLyX9dguEpgUnB7ESZ7LYLU2VR+ctRRSe7JOXJGv1BsgLvqyL
6B6I56VT/Dq0jYNfZ80iaoaLVTN+MuYP32AtAmlpzyHmhkgPDNlrlB+X7FeAQ7/CDoBr7F//3ZWM
Z+XsCsyey9UyDUpK2H22OKKUxBGWwmHvubC7nQq76aBQkvnruhAqwMBPHgQ7Y4B44+alKh2RaZir
XfjZgx8YKL54nG20p60v6bybgNNz83K1glYU2oRnB/CwScljThfHXE/mMUchy9lhp6GRMx12OcKw
OVcYNufB8Yfi3/818/iDY8cC22A/PVjHelFPlzzEWiJ18uHIgzJxNTcECortRQ7sUM0AP9RY+wKa
3e/g2zXiHBxkdJRFeQxEQyAVaoSTcBNVJ7Y8I+FEDHCaXGZKhih5/CDEtOE8XmsRoSW8SV7BFMg8
QuQb8okuifaMAxIaIyslequb4oYHRAxdiFWP5LmDk85KXSHwEkX+WNVFinW4QQoeLjA3EclsplAc
lohjSg1GGLVj8Awi+VLmTtpKgb02EARrMBUIS+UerMhfBRKO8prlS/kfn2ubmLpCuYFjkQ07/L/p
AheAP3y4XVJ1uPllGW7+0/f1t0oCju3qK7BPtQKIWoORZyrUInaHFIzm3Z1VMprKocql6zIKJgVb
1IT4KGIYqcieGA1tLk30FPGPeHYy5XmBt8Nf/Ly7i7O7ViG7Zgnr8Mx+hX9LxEFwe4ImhSfP1L9m
Nb7g+azKaTopIXpP9UiISM3AWUbK8o19YScFCRoJPK08efiq3AXLcxNAOxvMO0dEQhUjvS5VPJf5
w0FDnMdF/jy+O57hYOu3xGsOPplEVbSw2CEbH0YPAokQxHOesihgenGxN3gXqApsNPYaHvPfQa5l
8j0+GrRZf47mgoyUu7t/i6KGXOcpxN1hFyDNQGPeKVEXvf2n7FpTGGxs6+H9ED/nMVQD97ShDH0Y
DjIsQ415T3VylcH4F6v+jZBxWv62LvgjhIWHvsOTDMXg5CV/QNqkdL3OTcbQSjayn4qZcdIPso16
JJSF7J3Tz6G/D+XvoGj7Tf2aXqTX6CDxyVWDkGDec6QW6ZUYtOkNfYr6l3FgIW7AU+t3YBwApi9y
fv2mjhkcqDw3N1cruVigF+h4ztAs4RwiljmA1uHAw1qnN3yCC6pWmFdwo3IQbzDrFZtaeUI2Sj+h
vBf0E9KQO9R5IrkVFK/SgQlp4zRMGBdAJ6Ec9BDKRfegPPQNCqBjUAC9gnLQJSgPnYF09AT6JgUS
wn0zJIQzPqOsVCjGZ5WVS4rxLZgT7lsxIdy3YTa4b1dWqhXjbyorVxTjc8rKVWzzGnAdK28At7Hy
JpZcx6+EsA838Fs1sAtWbgITsvIWMBwrtfj1t2HrrNwChmelDjidlXrgaVbeAc5jpQFYjpVG2N4r
TcBirDQDY7FyGxiKlRZgG8xWlIVdlrKwNsVsBwIf3J06gJYHcqIT5hHKryLh0Q2ZOKC8BzJxwEWv
YrypmHewreuyrXchjYb5LpbekKV3IdQ3L70pS+9BFgrzHpbWytL7kGuCl96Spe9BAgleWi9LH0C2
CF7aIEv7ICMEL22Spf2Q7oGX3palA4rRoph3sbRVlg5Cjgde2i5LhyBLAy/tlKXDkJaBl3bL0hFI
mcBLe2XpKCRP4KXvytIxCFjOS+/J0oeQIoGXvidLxyEpAi/tk6UTkB6Blw7I0klIXcBLh2TpFKQk
4KUjsnQaUhHw0jFZOgMxvHnpuCx9BDH8eemkLA1DQH5eOi1LZxVjRpQ+kqVzEIOfl87K0nmIqM9L
52XpAsTC56WLsnRRMZZE6bIsXYIw9rz0fVm6rBiPRekTWbqiGB8Rpauy9H3F+KgojcjSx4qxJkqj
svSJYhii1JSlH1GMdVG6IUtXFWNTlFqy9KMQVJ2XbsnSCEQ/56XbsnQNApXz0oQsjSpGUpTuyFID
cApsrvtgAwbsA9tS7+F1Lm6kB3idh8A/iNcFCPJDeF2KgD6M12UI3iN4rSNQj+J1EEF5DK8LEYAf
4nURgu04Xl9EYJ3A63IE0Um8zkfAnMLrCgTHabxOIRDO4LWNoPcIrx0EuDBepxHMZvF6F4FrDq/3
EKTm8XofAWkBrw8QfBbx+hkCzRJefx2CyjJeHyKArOD1EYLF+3j9AQLDY7z+GILAE7z+OCKYfrz+
egSCj+D1N+DSr+L1c1zwj+L138BljuD1NyLK6sPrT+BCr+H1J3F5o3j9KVxUA68/jWhrAK43bikr
pmKuYxKBlQ3FNNG85DE7FDaVFUtZYR9hpeyzT0FZfHEnP+/9DYy2tCVrxY6txUotqIi17ue9j8H3
V7ZlrfixteL4LYtqOXnvx7FWgjQr3yE1K+Q/ny/858mTHkPBYhD5EiEFx7zJmC0ZcyRjZmSk0iFU
YU9HS3Nra3tnR29zK/vTaZd5H7S0sN+O5nY0+SDaD8xPMX0yRQZXSuHzpdzzdHZsVjp1ZwuaizM4
8DDxBCilovx6KXRruC3YZhDnks2Gnk16ZRJCFzy0DZBYv8fqxIX1A5hhSDmw5Bm+S2qigP8kL9IA
zwdG5gcHxVy8W42uM6BX8mW3QufoTL2Sgn6pAaWK0e9VIYhyKRiO54pKFHkiH01NZ8EDlf3r5HB1
GNgisL7MIi+JQUFGhAQhtJ6OR3FKMDsq2BDBZLGJG0Z5np1ei8bALA3j4MWjwrHG78cyth4y9yNR
h5HNG84myAWGIzHbbGTMspNOgaumk06iTitUJ5puDIEg0EafZojkV09ih2mISrln2aaoK7tCZkyx
g2ZfBHT5GC0mb9Uat0jgxjpJPZwi5W65CCMsVpO8R8jwoURwBpjKGgeMkYNxuZGhIDL9qzq3kvCM
NvwVYfOwaSDfgFI4uEDtc8QQl+vYLnH78K1ToAyyse8JeamSVwDWcqAwVi/pF/USLU/Nz5WxUAMC
7m55NaCPUb0pge5Q4b4cDIaqbvAd8vOowMFJdlwFG6jfIPYiWysXCsjjMSjNWJM0e5G4wQZoeN1n
eXWb3nWXIUQZfr3tSecJrIB+FuIFWSji/kMH6ZuorxFfRQHh7+tCNQbrg+sR/r/h54/g5z/op27o
dXb/nWJDK8HLOpqY53tVy6vc/Q6lSXKGhXCf9Mxu9hONh86jILRgHvhY4/ueMRBcXxNQHiQeCxef
F281l7fK6t0gu0OhAMpXHlAUz/v97EPc1Q9CeDLy4AG7vEwYAhQmPKkXJkoUSb1UbpXuDeKpQ8NV
DI9UHde7Iq9CQ8OALcfWUGSNYm8NlDUcouZhPSDMx39LxsD1LDfkpciEHB7dU4LuuosBROgD2py2
sETejaSsRBpcwCFpdB213Yh2VY0CmuqbQ9Myfm5jKLppRrfIpBHypKKtMnwBpZ4pwEXYDfmQd83F
gSAfFCqCZsp20cxtjc14Ir2xiZ+nhKgoSI2ZjukOlfpo66iNmIiRwEQppZOW6ygUEcf9w499WGJd
F2d1TEUZ9RmIEhXT2VsOumxvQnLtKyc+hyFNxYBgCHi10oguUdDQrXExBiHOj4soEkJ6Cp0I/6Hc
qV8R25ULQ81421kbF3b9jwh1vJKTi3Gtr+m56gUNr9U31Er9onZFrdCusic1ainF/NW9Emowhj6k
QBMKHr9c2Ke5ESBmMR8eOqi+R1GhYfeyGv3c0Au3I70OGlsGzWyf8qjOnRnp5L0rKwCGLOE57HLw
afYmmuAQgMsrVVgqYSxWPsHFn1IMhnQND97K1U6kEYI7OI0KPDdtJ0+xc5GbBviPzP9d5r7Ri7Qq
HWwBomqmvPaKIsJwV8F0scvLRyoniCq4YeT3eCdH+Eush56ZqQSnDdYxPzduTtDgt7c1rVkgc09D
6BJubzwjLCWFh4FpAQmBW3JvMxET2IHvS771Qw24+ShhMPvKUzDkgCq4JfkiePdS+N+yGwHxrq7q
z84+YpD08E4jH9G/lqdO7kVNELoIIP0R+7UsKsq9+xYcPUJIekijKZj9ccz4S+JtnlisXChYKNaZ
MGdVuL/TcxJPC10WuSpBSa5bp5orUuhwyVUSS7iluC68HP2faAdBADVupFsgwr3keBM8ZhokXPBU
VOMLirof5lJoDKE2yE/SQ7L4CpFnaZ7bNZGy122UYnfT+NjV3Z1f4mrufBlIDCNA1NrNq6tW3HJW
V+vqfTrNWaHTPPSEoX8KlLZrE/3t8spWTwUTFLLGIttrRuT+f4RniiBpKJJZL3maoBL8lktY3UK4
R12UMB5GW+IE7gJewbYh4haaFCXWnsLRBUbd8UQo4vAk46isqisS+ii0IUZEsbqKwLO6ipwS9nJ1
FSJ1sZIqRYrVITAfaoaIjoYtmWDUdPhPYYP8jJTLF0gyrdCn3EKqeWvPoPB8eGLhnU7Fp5wDICb/
Iyh7C3dLkVaulzAqGfIqwHW5CtkaLgWKUKedq145RitdLI3kSb9UyXXShGFr7fu01/+zztnFY/XS
mcuZhxPF1mh1FVgJUjFpJ+unYWeexX3+BevBH0oFNRtNwQ2cc7HZp0BLG8tGHX3ZqIPwxgqqp1aQ
Il3JFcG28kSwrXwRbKsAcAtSMI+Pxy2IquP4ecK3jMji/3J7PpnkHAGTuAN6AcPzkBWGXU/yB7/o
QfI6eQJxwQTu3+IrxkUdktkxFKGq09imDiqLjvDXZFv+s2Cr3aGKmvWqtyZFfrIID3UQ3iFGnauu
Aa3kcu6cVH2MrstAKzpX0lFLkPab4cHZzMbYB95FROcWMxRXDTo8XaIv7YTv7Gyr6s6g/Ba5eK4c
0xyZrievZj4qzvxS4Iwvsb+LHvX4dU+8CMQqLh4BQMDgg6Cg86MX+QyoY3bSH0uahhOwwO0EZn2p
FENuL9YKVEaTcYrwmV4j8CJl2n+BvQvbBzcbbprhueWZodXpmbmx6am+CUpeJLc4igTojcm+qcG+
uenwsge9gZgAw80geisQzhNUoS8c7lsOf1kabKIi+muSuIXR0NWWFafUOuvr284ZKO6KKnWLQaVE
LdNL4V/tLUBpWrVWoV5liE5X32Rk7lWNrpHcLfY6zk1pJCJQhfmNlimaYhDFOEoDnV0OvqLu/yo/
/wef/DPcKfngInwUxDcwGQSlaAT4m0ehEx7fOX6zDbJLOwzyTPQOWqEdklMbaa4LlSpkaDVMXk8l
aLJ3V4VGk1FyqyEBGJDqzYJucItvOOiNDdz2BYxgJlvZuaWqvKsXJImR49qDw3Pw/WZfYlVp8IcX
hIXJP8Idwu7ZhFUfFYlB06brZLVQ8wjbrEK+Cfsr6P1UjvjUH7D99B9kd9B+EdnxIsVfIff4CheO
rcA+eZTH10RUQFx0ASuVgnP43Z1uZXExPqYEWPNOGa7CRRBUg+CxlQSP5cLMkIdcfaipToWgoi4p
VqVPJOnSStyEmaHwlMD/4MAdNSFijchJQwRVJQavMveT5NVsxo2mxHoTUN208S/5WVZCAGg5V4Vk
NKP80f2bPsJxELYqaiFqYOduOgpRu+qqhIiI9ipiAy3gwwFjU3NDI2GGA8oEH0VxQtlRalvPTGJm
H8uQpXoA0m/ATy78fBkzrAYEkpEZx1DGHP5LXfDFLvlQ7DNPOU5gRX4nCZIWpsy0bYbz4Av58FMA
P3+ic/u/TEfcTOQBxFaPKqz8GG98GTNS6SqgkVytVMtjqKREvaEWB/K0MrUM80iVahXwlg7mCpC7
qUKvgH81/A3kqhf1S2ABqOWx9qTVTL5AM+DiZyjcQMZAMeS+MFMdfPKf/K54CifOq8npjtANAPmq
oh5Uck8rClJAkRUWIWSOKlgVFH9RDhseQ2Fk9BsUij6lIp7htq0aRlrgtq2aslNN9r90rneQSAo1
GnAfYCdesQr5bsiGjHppFXpPRmrAKhKugWd8bfEgT/kAeo+uv9A/BoGEA4iVIjnEXWrHKXXNv8FX
BaIncnuaHJc7yRcOgSIpLO0K3AHieBanJRlYXlS8e4AgNhjwGdH8Z0mxf0QKdhDkf08AMNqihP+j
3FP/6RhjGxQ4EIdCkbidBAjMjwXjj59O9cLX0JkKI4doBWhQA77jpZquVmKWsTyAUQ1KAgxur+iF
eK+zcl0NaJVquU6O7hmEN44ct64utzMS4189yeIms28qq1Iqckzlark6jxWlFbxZMIxEhCBuZiFe
33HEedVJfH2Yh6T99Ckkt02tZtLcKJQ0IM8z48M2ZaQw8TIPX4GyDnZxIGj0kPcBUVcgOEWfCpKJ
1EtRjy5o8QrpRelS2JmmeZLy8pjmfe10vAVv/6iHcQqcwDidtUJlrOLfV4XGjuGrMHjlZC9D25ni
FW7DmyNseHNhhXLEEmalliLgl/MVEPM1Sb5x0j3E0j0TR0jSw20EuCcbcRNWbga3YQgH7heiycMP
ZNA1xAEP0PZcQr2fBH65pfoZVUgFtQK9VC8VqtFcr4rqPeVM+hOFMoKik1EdyGoXB0ucuc/MPuA/
4ivE0Mjw7dSD9oyD8xelszobz2VXzil1Q31eu1TqK4VpdISlqBMQlpyUUjDHU0IHllhA75h4ICn4
Q6N4IHOmungXUTMgX4/pbS7FdfL7px6HUX/JHdpF9bjQARe8UhEhEknDN+8ybDTrps6+T/KQB8qL
SUZQ0vElz87OO0kkIrHwCwlHYFt/UUAhCkcKC3IKcpCszN7uz15hu58iHuGxyIOgguOBy4MicPkF
Ebi8kAcuZ2gDI8u2gul/PBRBGpfhWhLZCVRvN4YgfTFXYNkQjwHo4KjKka9EKF/2IhRuby9SVfgx
i6a8yR9SYASBtWkrUgM+DHSPEjZgVR6QO4B6EhlypdrK5xV3vht5Mp/ecr+DV4acIEcofKD3LdUr
65WM2OcZ3fV5LmkIuHTOJXIfFzMzB8oSFHWif7WvmGHA7YgT3aTMBpyBIANiRHlIrQ/68d5/1UV6
YtxRfyrEk8jYk5WIRIOY3+4MXPgvAQqvcWKFYcNAESNIShhOrFSrtFI9qICCCrFjKQ8XiRvuG9Uz
sWNAqKq4c4RPRywSfkgcCtoXZKu5oyCVHOUJ/Mq4ebA5L1D2/wJoVcYkQtN/kNEUxT73NhXkJd2S
6WSsKNscDxJ9Z9ald3OVB4dovHu/39danCKds91UxfbPA3Z5mQQGwMwHeZhyR8oSMF4OcMLiU6y8
6kbVYTGlrizBQOuMDi8RUdhA6FCoVMGndg4UdfEQN8FqkDP1qxfojEGTdTR4ekGNLbGpXGObqZuF
dT5ZSSt0SoX+Q+wYVSphfiS2H0vjdwnPGAznDK4yB0VyIGQrAuAvhZ/fFUE+IqAazOEXbZT+mZuv
oIxq00jRBStEuv2Ms/OPZV5HYDqBONdvaEE1iIFUi7QbahCVtTfY8VMMZi1qYeC6VqSVg2uKEPhK
Re3fUjwhkLnWVbBlDCo2UBMF4ee5tKrdzbv7HGteP0IRp5XDo52C+scRYYXRSb9BWeS4DnVUzxFb
QS4kqibVVM/JEUmG5hccWUD4L3gBgDCPy1y56MdUfBzWcdyUKz3ElSoJiETZp2uSsOkrGifNgXK5
Ch4IxMgzHFTuHvaSMnuT5tfxOOZJCqYa8+xwwusKP//7xKFVawD/SXzHfSLFSuUgc4SA5IUJg1pN
aEWFBFPzwgFEHjoocb2UqmcX47eUgKOiGOsqirFcFyakuNgIbsy6AiviSgKyiwVC5YUBginfjJNI
eqjf3tMVPRdQ0bNhOpZjbq+uQu55JOUUjXJyo0GRnkk0HpQJiTt29e5ivF4O4w0ahuexHEnD6QMJ
V8OEXwn4ji3of/jOiwzCFoNo9Q4CwrBlrwRk5zkodrt4bzH+thzBlRMWovOFu/+S02+YMep5j7fn
xcI2wZf4pchLsz/P4FQHRXSZ1VWG+VZXXwhw32Q9HtBOcV8rzXZfy/pqhusauRytPT3ls1Bh2PNZ
AWgyilZ5xmdJ0JT95f9F9wIMftky9sPXA6eOGmqNvfaom9Us10FwtQWZwynfhloTmt9z7+UWOfw/
v6AmF1+cdr9FYYnO5lfCV+HnGvy8AT8AJITwYeJwBNSHM/ga4F8gjh2iXuRrrhU0FVxgvE0e+3MB
PLzgT/gL5y7W6ELXZG8YjL1UJAkJxJACivv5FRAepSJ7SCJJRlmamvyyl0kxBJNiqMJQ5DIZWalk
QqJzaei9eJDCi2OZ5FqOEZYkSyVTQqzLTQpwjmF/yO/XwmCK67pPW4tNMB6E4RGnENUheWQIVsTN
usAeIEPekosO6z8gjG59QhehmzxG+AIOruH7UOuGR10x4E5vX2wjkbKczW00zyM1ARJ+siZ+lLQQ
J4ltwj8EGAzAf0g0DRFow0uqYGheSq4zp4ngasDLgFGFekEHCU8IBZ1VKGrlsp4C7yHX/lfFzTzG
iMtHKPJ3KI58tbDmgXeE8AhSMT1AGb1DYdFqECKIust4F7gFnrkJaT43nz3Y0RfwqPMer/ScDOuk
ILm153MuIv4O3kA8NooTCHrBfCHJYs+DCEp0w5mQYuI98pCYLBQWX2x3eVz2jzHSJOvAR2j+4lGc
eSpFfGCFIcn9L6Tjrhu/1wBOMtOkOyg5lUOpAXhEIMOspztSQPUDEngf+wCawpKfzrDwdH1JGAKq
9amrwJNgrECeu+l0RiSh8fiWkhHRbjJGo1ot0mq0XEwmr7ProFakg8VQgZuBRRI7j1VxmuAyI0BD
QrJ3vEiIVpobnOk8Ni2oe4hxIU25g2FauFXINQ9EGh57OhGuRdQA6G0ijbcmYobjccqDmB+ijo1h
MfDm/gtFjY/KykER/tv7DmYuo+e5Qt8VB+31BcFVSVNwBrgWKrjBSl2yQLiFpf6KspvxfmEHq3is
9FK3iNzUPyBrWdz8hxjbBrKVCZSqCMeiF4dmAf75Ul5aa6M1uQfuJiS/hYgUEKQLqRLGMDEvQmxN
wAfeyLj5+TOSWUrYpTiXBIzhmwHOUEcZxxyugrYuB87k174TgLQFj24AxFIGiLmYkzGolarVDEyL
MTRLEFRckGVSv6rWqBc1XZfyQEn1tUnSJ0HAlwyIDHqGEPpVHyESI5qQ+Ls8KYQWpzhuWLSOAwmI
iyMwo4wHZaC9YY1hrkPc2xpUxt/1L1qt3RgSdnQv3SAdWD8g8YZLqML+JFPym4EzOctflJyBWsU2
V5F6XoLnL6A61St4vlrwdkFj2Dp3ggxkin2pNcvBuM48gjwi7E1MVvLXS9l0369sun9+yqZf0V5A
2dT1ksomTpG6rILuP+/un4tG6Tc0r0ZJl4yLpJt6MiQxUgzzXAjpSankKxeKJJLReHRHOcdojO5L
tdGLcD+wDX5HCmW0qy6vJRFOWbbwSOqLyjky6I8l1hpBWISW/dyiVnbnhcVD/4eHDzun7Qvb9Jc1
v94oryAn/MPHbt93X0dvJGI+PcCQuzHzmC0cqgMeywaPCK6t92NSwW25inhpFNumCGt+ch8jxgcD
Z+ucgSJjGr+aHqWXHhZDEdiWgP0L8kTEPF0/6NvFL7dtvyphXytUL+pyy0oya0V5LZ5Buir4N/Zz
H+qyKjzewbAIgzD52YQ1bZ0McvcHhfn66yOCP3FxmKBKs9XL9zOkGGRP9VK65SyUkCuX0TxTnfwi
pjl/6WK0EhGD3oeIb2cPQiYhPHZ5L/NeIxdEaNjt7++/9uzfZPU+lJ3WS7TLWjZOq8jAaaQAlfNZ
xdGaACAvatMlavvBl8Zv+bqL3/LPxm83X9RzAJDZ/6Xx2M+I5PILggzNFYT/p2PRXP2Lo7kw9ysH
VeyC31ooFI0kI5QcgqE6Zztib2WTKHvnSaI4ee4ZxHr4gCxzRb5iL9fuSnJeiJj5URkHAee3Ce1D
+eAs0xYDppSH2+mYY3GH9/Y2GLpN3C4SQj96qvhGOlK+HG4t0jll7ZJEV/QrRBbleXdj9CXJIsa0
uUFIVRl3lMc19YbU5nKMgOJR37/OjGUgYNfD9EeVLA7sOBQQ7lbOxMJv6HxPIDmmlTHWoNTFBvJg
mpG+9ZZ6zERkxxZ3BBvtrfVceNcRw5XzujOEJJlL5pVK4MJZkoj9hcm8W7rkkCBgYKFKePZUFHQW
4gH0ckG0i4intCAY/s1jkc6LezjC8T0ZeZpINYa2rXiCggKspa2YiEhrZ0ugH3rtmViL68JN370N
+G439BOQkods+lWp742FwTE2/C+kWxsal6KVrfpq5FKnXA6tRC3xMgsBr5utHJNrzqP5zn5xVi1Q
YgZ2UjXyv/ysgkF4ev+iZ9V9/QXCrr7I4QTQ0K573doKwr/1mhDShrqLVCS+gTvG2UPzIT4FdiMG
9JTO/twxLW5mn07vngg1qgAT7QSLKxcNhn8NpgeGGf6XUrHv2h2/DpAM6JKmliCie0EkluHcyC+E
ctmiZK8ytzRhLcg1KCWBlpBdWuKxqKQJdOeVI5GHWAkPwuKBNoq6GIfEUTbq55yE52kmmIZx6dC0
/T5N2K9lwypNp+7zIt01U63iou0MEB6H2buIIFaoFmCk+yBeVbwmSAP49vlAOj9crr4eSIPWaNmM
AM5j3MpmqK6lqbW1nucHOgjVtTa1t9a/JN4L8JBj3tvT8R5Jar/mhugM/zd53R/+8PzQ39LZ6O/N
bPTHBljlRYEkrrrMYWsQgg3UtnQYTbUtbfRznyL0IgzBqHA4NI4XRYQRXdqslL8m3ACMLGSgwqrX
hBvgv0YT6ZQNENPWXo9nZhriK7D7zl6CINuMYs4MKvr/6PRUVBdyVJgiyOx3TuCzcU6npwCdOWsb
QKfNuCN/+PEJjXq6/6JQk9Q9lk6vBzUAIWYG1Fx9Tai5jgfoWjpuxEjshHsHQAUjmcB0vMxpKZGL
djZY6DBu2AnhAPxUSaAIvC5Q7B5zWqre4G33zzwt+QmYIy5yxUWesp7rgk2DB+Nw0PFinSxIQmYf
B+5HQjQFrwhiH8jhopfU6wHZVW4a5D3S/tVrUmlVCGQ8rosnBv782GC2o1FjNnBtqKdBE+L+8D93
TTLPCbV8o7tzPTB0uuMEEkQAEPOWEWrZr23p2ed+E//8ZY6abz4vmhtW7rlvNXPDw6+JMq5lpjKI
MRouHdkQKZjObUmH1PNe0m/LWtIsN5+DCtfNDULdMJJ5MV4tYzcWockjqfrJylETzq2C4cfOIxU8
H9+KJ/bi6NgAQDEhJqrWCEEeR25Ii+G6h4DVHlK5a7FnWP36meDyXboQMmPY/2L1wmuCDYDIt2SA
zUdeE2zAcRUyfKUSEDANnDhEzDdMd+FbDYSa5RelaFHhTbFz+W0+gljgNI6tAEFsGmYEzD7CM/Dz
BH5WpT4EFSUrqmDl9MB5HE7fJxcLKRYfO6d7I5a59iQKhu8zNE/smiMug1IFN6d539HoHc646T7G
DVlVNKiYut2HCwx4qmMfl5IU+qQk4bHx7PS2fUeq9pEUwlmbErPmYcww0y7UaHUvz2LOfkSKNJEt
q+C/rwfAAKzf4wPgYPj3zksEXsZjzmKWlm3TiUhbSHhcIeD36ymgJLd83JCWj0fo2sBNfNxoixla
rv1GgFv5IuYHu4zuDuRHT7HqIPHJ3Z1JTDyIhpScUUdahW2PdRFOEm7zeKBnflvAY0bz2ws8bDS/
LcLbYi6BN0p4EGr+tIzHkuZPy3nYaN/tJf9tpf+2yn972X9b7b+94r+9Km6v4T4v5RleuJGVf5+/
QUcJLEgtCiMSoe1I/CBkWxvxCISOsIUPGcB9KhGLmSkEp1nxQq0RHge4ewQ/g/AzCz8j8DOHeRzg
Zx5+HsLPAvxMws8t+AGQC0M4p/AO/CRFsijZfqv/ts1/2+6/7fDfdvpvu/y33eGPwicXVWGGh5m0
4Weyb2lqfnJ2bGSqb24+PDTrSU11osAq4I9yg3nJ5SRikIozMB8kKSVXoUAR5pAqxRxUleqFXMKE
J/3hGDLg5epmKOsQw4P7A7BNHL5HOl2EyMMrVx95MwmVyQQplAVlpxz/UuZOouEyVp8wEc5Nt4ym
MCFN+d1p6QmIZF06zc0p8xFh97/tKnMo0XcZxDAjZY7PLWpXKnMo9xHEnfAhDm8Ynv1hRBx8Ono8
AfN4pjwKiAeZY/Jwj4DLVEDsKI2jlC7unkmClVfeOaRaKJVz54KZjNfk8X+CCcSDgxGS7CsU+KT3
dJuvNXb/76V8L6eQQplgGLhiNq0n6zZgBbD2mcHA2f1P+XQbtYzxfnQsOXRmlAeQZY3FKc8WT4bK
4x5nEUJBcZB850sQQgEPype3eeLlfJ5WgN8G8faC2C6FPFmA93YjXznFqKQYkeuvw0wCpxG+CxML
oujwPbi6Dz+QXImE4g/gCvjLcJ9HItSvCk1/QEbneG1C/88lRGiZqOSVQ0zAiv+p7gkKj9j/FWHA
6xDu5QzIw+7cWCg8pIYl9s593XmFc0EyUK88kzBvasA7k7OvNpP3s2dyVuRupSDPfEIbQ2kbYvGa
bH85aLpAaqHzm2kkDAbOb6YvnMtMw7wW+GZ6/tVmOpQ904Nm0owbZjx6kAm1LyYo1M4WFJJabRRG
+9swA2PnN7/lAZ+M8JVnGOazzDfDv6288gxzpRhJYkPJSHQLcIIhJ/plBPaq/2zQOJPMBfYnIPS8
DIT+FcyyBj+/oXDU7sHVbjil11kJcJvzifBfeS1g5q/61mLh3DD0TCqRNFPOwXljaGQZes4PruvO
BW/AvL3tm8meV5tJoBDGzYM7mAaXQbSV+qvCEl2qa6PRfX4KppZzwhMwf82+Gd16NTwBPjN9s1PN
rSGGERIQenGps6U3FGXAaa1bUVDfRDcjVvzcIBTCOaH59DlpZ947FwiF2bvnm89brwah76A4O5bY
SJBAx0mEDMtOxtjON9IY/QiT4sRiSLCf26zWwEB/9vxmdUTOqvYaswpzOOSb1Z99NSitOZbPicQB
aGMJMHH38ToXxGx+4UUV0KpfvKX4xVuaX7wlb0/ldbi0S7y8UXDaSVmCq/gT4lQM/w/SLA9s4cL/
I/z8Y/j5h65iKvyTUPBTsuBR+ItQ8NPHnKqvjbdmAiIyiJYtTnll+ABomPLBxz9+NfgoEL4RmxF7
M/tIWDuTrDnRHosEGQ755FaRe3gOF28c7+L9Y54V+nGB7JAG8PgmjrJ+ZnglkjY3eIJWAJsN/5NX
s/ONBITBKp46ZOn7yqsGa7TqW7WvKq+OK+OhGmu9pmktFYlHN0mJA0YlFpnZhcx94ej+GkZKqp9a
PUGHQ9Tq/wOz8jnFJ2r9M4zw5TfCe23SKnluhCpMf9y3IH/6agtymy+IGbPN0EuuyjmbcxyzDOd0
tH3dOZFgMMcHvln/w1eb9XoeMcOdXqQcvHRCaNMyU5FUdPPg9Uz1cs82sqJd8PsKpyfCBqIc+PkD
9GNSzo23+MZz2wAw8c99S/G5V1uKy1lLcQykS9+5zddUKXPRqdQw55yGnYK4Lp/1HC7f4pqRY0Aj
iq65AKwRERHfKvfQt0ncFTwXA+Jv8x4or00GQBe/xbd8K+fGaIOf1Xzccs6b0X6CqofzY7S/51zY
GJi3v+ubyeVzm0np9njOM4n+bj98fmjlh85lJmHeftA3kwOvNpO9GTNpC4kylxmHICUIpEplfOL6
upmCNJrCBOr8JvmeUKWc0yT/xLlMMkzpj/km+fFrmgUBSTkwMDY3FxoIDzS1dolIuD61ovSHPrfp
nZLSotFzEhT9TOAVzAXf9Njc0FAHuM1NiKxyuPXg1MvYg/6TwDlZD8LafjHgNwN78prr/dbJ6424
6r+Dlf6FV1lpj29C5BzW+JfOa41hPX8+Y43jr0aLtWKmx4Okk9hIRZKbVtQ1FYBVplQxPO+0327q
r4AdgS6Ht+Fn4Jzll186J3YEZvlXfLh0+zXtrGEBpinxn2WwY8li30p5cm5GPAsiRRnnttH6FN+J
9drz/BteieZLpPO7JmLXi7F6A+5BHHvsRd9LB7P/ncC5eXLAQv+6b9PloWHYK2y6mWNU4K6BjnTk
8KgHBkA7QJn5ILWIhAgUdfmF3C/mHxk42z8yR1rhIdiHh+FnS/I4uediWft/ntPGhJX4dwFvLpFF
9fU3Jp5uVtww93HeQWJgh9ZMyLHME99G/ETPuW3Mj567a8Qfv8oJWOY5AcdwHsA1GXffR19mH/75
eR19sKp/5D/6LOikBYZDFoQ0wigyFqYdg41owSFuQRQzC3Nug0m7BejGAitTCyXGGPgdE6Vh+E/Q
g1g34Qdj3YEhnQWprC0IKWpBv6x6DA3wcZEKE6YV04hbpbIpmFoLhmRB+i0LEgrUJWXIcYzEiE4E
mIYMDe1+SPEFawyDdTaMbWgsbjt96Y1tdjzgXse8xVg6E2EoIhKbT7LT2zCdClmeMsdIsmUafcmk
U3ncgxkIIOlcEo+GY5GN2c20YyT24qyOHd6HXryNhrI6j6Q3NJ2cSsdi7ocWzJS1fjAdD5u2k0iZ
4beg01dlk2zDhNNxMuuZj3NhW7gWXqo+9qXFiOUMxY0Tns6acQOe+j4wZ+479Bj2oRVPmyc9n92y
kmPrUwnn2vHP+9YYz4gvnNDA0L6Fz1FF4AB4DWUcWrOjfa3h2b4Tnw3O9oW/W+E57ob6LTYw9rmh
nXQkRmuNRSOYezs1txmJe16cMG0biyoyX5xOZbYA74pS0NsMTSQ2ppN9bO7y5d10CiVaQ/MQ6Qr7
QffD6XiUjdR2bMwzjvcMILg1hWV6igfNXU8x6NeGZlLWNuT7IEGZv4xmMfwdAOjfBT9gJkm9Xoik
wuZ2whHgiXFyZx3IKpgyhjBhH8bOuCzDla5Lk9Tf130pGA5UkdcKUSXGc0FRXUJ6iVhCHEKav9+U
JjPlwq2RjB7RoAZs8dA7gSS0j6QR+aw0IkdLpwVhFoKnJFmS/6zUKX5VqDdI2/E54aeCQhBk0smp
Bf1Z4pISTgprcDc3vBVfT4Qx5GuDMFyGfM6NIlz+FuQgC/8BTMC+IpKoDOo86P8ZGBZlxXe3E0Y6
Zt6vxnhL7OOFaqFWWMzwbR7lUOO51OBKy7pSM8rc8uP/wPO8yoLCNwsqSmoLCgtKKm9VfndlYeW1
yueV6co32F1dZXHlpcoS/HOB/b1U+V5laWU++xNk9/msJL+yoLIMn0JJET7LZ1dwX8R+y9gIzv6j
v9Bb6kW9S+1S0SEoum2sJg/a2ARKWgN25wPUGVCyRXaqMnhYkdcqpz7gWkMahK51JEPoOoBOLHSd
gxHr6DoXg9bRdR7mO6LrfEx5RNcFmPWIroOY+IiuL2DuI7ouxPRHdF2EGZDouhiSIPHrEmWlRFyX
Kiul4rpMWSnDDNUXMUN1OWaorgAp/MolEMGvVOJvFf5eBnE8+7/CyFcqVqrhcuWKSMV0VaRiuiZS
Mb0hUjG9CYr+leug318JwWdJe29BShbK/v3FPysK2t3Ivpro/+yzv6NEMDUzB84mGJSCzXtXS00o
kkzGgKAGM9I9hllp6ZCwfr+pKW0Z91r2W1rb2js6u7qfQBGrAKLDewz/TbF/sWyXLP7utTa3NLc8
EZVBnHhvaKqxubkZ37Ki7BX4abZ3N7AEg9dFI4gD79WwFptD0GYNVhHNeJ+Lrnpeg1eMlLVr3hvA
a3Y0pSK2YaXugY+ebMVh5UCt3ts+gMvV2oFmZ9/BGsBVsGL4p5n9YFmStbhlHrBi9tvM/sqGkgyV
JDfZJ8x7OC34+qYZSUL61XvbVrxxO7L/hHfcvjcQSbY2sJ+2Bu+YdtmxnUjdq1nAfzNGE0k7CUzw
gXeptKASZG00mWJHyT120LEPRmKNSIcf2I65LdZjLWGbfCQJHLfd1ZK2ZBMMnZn3GG7ranEnrYlw
nH2P/dsqe2Mn0qmoCTZasjI7wRll08TDkDUZZtKmT8UiDhjLcLBpbWld7+7tanVX866dit4PvY/I
NcbaC74ipDYHg3Rw2ncoWWQqGnL/awrNYp9DdjRlJR0In8PAwYxCLl56nb4vX2dk36aQ4EWxR4Z0
ZMAKbDz+9n3e3zJHpaeHWI1vFVkNlll8J/NdvoXku15g58/uhJaal5tXYDxLjcuNK6G6bU9YqUaK
KVWPrcHW83Z4ILG9HQnZZjKSwtHFGC3DQw01RTdZYZSBkusCDatNOAC2q7ehsShao6OeODS7MNI0
Z8UPQmQhRXPr2dKnfRnfC/EXUQ8BCeSkBgLbks2cMQrgPs9oCjGEZxyDeL+3aaZMXA4Be3sWq7om
teDsI3XgLbR2EDLM9Ug65tD8SiSTsbKspDnkpEzWYCwSxXD1ECeVvo4+w3XxRNzMak9gJ9E9ILBp
muvYaicjDlueeCMbuonM9l79yQZ+NHMMl/nW32NXyWqmbVOKTaBq3czQJF9E6g9Hf+4OYfdQF8rE
RvE0mV1fIkmxw2w7xAt4/aSnyTrWdzPKeAL0cAjZDsNuOB4zwgEacKlvQG6wRlRkNXrAgk1uTUNN
qG4iEY3EZs3UrhWFF6ZMBu2pLVkQDB3zX5h9cd42UxQnczFlOaa4RTEU3AzFd61UIg7MZ9ZCEmJ3
+8kRPKIBkYntWBiW6jgBu82EQsRZcAxasOxQytxg9RkQG7Cs9CJ7Ykaim6xfMM7QGoM6bMo9SI5t
Cr4EZo5RDDKGTUEE+3VADF6bEGxLnHfHYSt4FoKHiJiaYdpus6GzU7E5VFfT2tIy3tgxWZO9pfih
JprE1Qu5yEk8J389cfLhorinn0ClcPp5gZetFG5ZGJfDZsyK2hzOE76TYJZODHOfQSMOxnTXujk0
ZGESGYYC7UQM1xMPVWwJDlQvgE7iWQqrsiaxiwEIR2heWSvrbM7cZjECWfIgmkhh03g4u8hGHM4w
2YY0E3GdKkLiOaZTZtg1DQmYvZ+GOPXHnTzuAS/2KqFCOj0ZYNj8FTwWmkP9cuEo2ad4LeX5GKsD
AWahAh0j2RQDHCZYGgIEK0KaygFBTEmO2KhVkRjaS2RAd+EAFnlHpY6bEQzyRTsYHGP7K+HDWAA2
Fuu1FwvBCDBTZyPbp2KIthlbbwI8aRpBbwNsx6Rtvu3YU5/zYzMjQNadPWiP7Sp20NHsAASnUzA/
QVaLCA2ox06RxF6cHRXWTtqHV6VrqphYvrrQGAJWkB0BPPqr0SyGiUc26x4fCnSMk1JoHi46HrHx
EdLjoTn5UlC8FGHnWMQwtyOpLTG/vBk5uOEEO94EQYZHludUYh+xQ5AiuEduXfoObjhEiNsRN5s3
nR4OrLLc857h2uk1tmsdGChOCmI4coQDJMrP+uYQTcHxjSGEpkyb7eVGT55a6Lc49nFmkP70UhBs
dAB5tbU+IjCGqb9ioVp6GPc/9EeTCdW1tLLS3l7CdbV+cvUUKgxOBHbem6koHJ0xEwgBQl21Ay/T
Bjs/LYbC/S3ETugyTAurMxTfYKfTJpzJqHqDuJMn9OXlWtq29iE/kLcVC2TvFkjgSUKMiU5AkvyU
i+NRA/ETNxTl4DIPbUO5MqshiN6RxsPbaN7wNjp4mWN4GwiHI/Nrwvs6e/86ez8HNA7rmbV0XqsI
DLCtPPAtX4yXwmO4z4X7uKosGRiBmHH5jK0Hnj4fuHjGvzPOnfHsjFtnrLpRAvEmjDIIM2GUQ3QJ
4xJEkTCqIFqEUQ1RIYyrEAPCYJw+4/EZgx9SNq5B/plDilWZgwGNdcgiBRlk2YduKNWUBIqS0VYf
FWDqmgKM9ntDJNGBZMU13pDH+YrFenZT4RdvKUYtj8OBcakpp09QOaiG1LMWG8HbGPVc4wGXKetj
0DNjVThj8J1bUEXYpWNAoTr8TBHMOAQOeVtRjXqQl2wVYw7eC5iLpxSDF0DmnTJl9SIFxWaT9I5i
NEBnoJRNWKOvqWQ5a6rJ33qRbOQSNVKqfJqtdHN2xdv+ipCxrRLexQkqwKm1qni0EusyD69wVCjm
rEVctMJY+JQVQVpfHpjhS6yZIuWYJuileqXauQIqF1ijYnyXlYXYU+w2PmJl0LUKWsE2uPcGDb8C
yhrrDcgNzEqT8BoDm3bF6MBEwpmv8Yvr4iLEK7JWn2veFjoVo0u0sBPTVQ/IdItR9yhGrxh1sXLI
wPtrmIOOQ9u72dB2V1S9pxj3RdUSyHJs/Ddv1QfZVfsUox9fLxVtDIiLQc/sl8G6WzdgWIdlWHIR
m38P53hHUTcVJWAMKZoxrNwNGCPsYlS5e1Su7C8oh+XK4JOHylGFcsj+v4gt1SjVMP0trB+ywLoJ
L2AmXip7i5XVQnVWXI0BOGYC8HcRnTLGFOdt0I0ZDzG/1NElJXPVvkmuUCEvgm1WqRxVuQt2WKkc
MiwxroQOL1GSqsv4kO2sy7Rv6hXrHRz3ZRg3ttjgqX8Z6sM28pQYE3zeVHbD4DqeZPXYgBoVYzIT
RDIhhvWCB7DHTze9wPv4oUK47D7CkbIxPNu5evDRq2wiEx+T355SnGZYAEzHpWLnYDffBjCFixac
9Vb+ylE13rYhstMxBy2DYNYClQIWnPZtBlbcrhxW06yxmxbFs+M865L4KZ3AZYZBySMAlzC7mEVw
cWHqsaLuLxPsTALs8DHMKcY89Z4WRk67sSAhxbsYhZ5SmKedz+Qi+GwoyqfZex2K1el721iE1xhm
hEddeCJcUT6pKOtsuEvKJxTl6KqyX6FBlW4+nsEnuvaYvceOkzeUozcx/fgbiF0ggMwboNMFPMA+
ijFXfl09fJPV+ZJ6JNFFj7joVQ6v8xBOAKoh5YhtuRuKdQezZOC1sYzTs8tashVjRTHeB3uKwScR
5ahGNPOuZ0jXlEMGKzUIJTD5nbQeN09+BfoHfeZlsO9+Mg/+4sRBFz7E0eWoKustg3WAjr9kT66K
pm7gvGV84iZMLNuJ7lAlPF/FzwDePnpLLPRj/yK+hbHy3xLJHdRD9vDu8a/gdmCAP5c9UvEWtPAp
BVPouVMaOnZmrov3d+oK1J3GAvVTGujmoR0ojOTB38WdprxF6NI9WChMy8du7sMNIKLQcT0hSBNg
4UVUnilhJ+VR1pQd1eImr8SgRm8jVVLJk+1RVdjgn1aVT2rKJzHSoPEEIfeWaKgb0R4jJ96GhVKd
93z4kGMvAsE65aheiX8aEQyBofUAJgxA6YrstugZ4FHWu3h51hNWejwufht6saHiQPz9YOVIhPn7
clirWH2A6wxMn4AzVCuOrI+Ii1XlsE6cXXVwzROGVLOD46NQZETQAKYOAWpkFJbggBE3/VhPhTcW
441EiQ0gMaZnEGNr8KaXGHOJQxU/BcRYFLoLX2e9hH9U+sdQqiHJ4dE7SvzHCVqoooEVP0NEknos
kfSO55R7x7uhadYYUXf6N2FC3iEC4B3PgpoIp8VYym662Ts7VYVqvIgIh3VOOPDZ3RAXm/AtPs0N
ymGDS5OxW2dQsYagEG4bRRVLXDxVDutF3Xq4dpfosBGKjC1conqxRN/LHtSLPg8rRgyHD6A2gtW3
eVPxNFvLKdwW9TitcWz3GluPBLTI2sqscU00Al1lD5K+Ly/GbyIk+AFgB17wAgC0mFIeGDZ+7gr2
lo0EsCObRsPh3/YQX2kxF7uKscfnQrIER004hCa8bobQZex68Mm7ytFtSHgPszuqHN5WqtXFnb9d
FHDGMvq3Dw95/9gL6hL7WeTfOxAffuah8RjZ3SIo7C3Wx5bjKGxGIzzE974O6zBioVWxxhVnArPn
whttWOQiQiQV2HMRg4x17ZCD084nilXGUarOpGKxBZuGDxpHIjdom+jkB+LiYzwnKnyZkRrtHnBr
x8lq9zBncPg2oNFbs1LBngNvCVXk/LaLZj8uLr7e036Hctjhab8D2+/wtB/E5tgDbLjD03CH2xf2
bTadwBTJ6lQkpuIbskny56I7f0MxvlFAxQxSAJ3KIeMgPoHprSHtLLv5pOgMu2LP47ep+FP4kRt0
82m84bzON0EblVlf/Wbx1c8oxmfFJHQph12eSaBbOc4uHGeXB+cBAumiCe9S4m/RqLuyR/0tnlHD
ISZ5n28VF9/mAcwe5bDHk0/XeQT3sDE0qOkZw7eL2n/TU7tXFH5OXHyH5+kd5bBXtP27rKE74ubL
7Kb3uB3A2rtzXPkd3Bm9WTsjLHfGuxk7Azk59tyzM77Tm/D6IXwps7lZ2dzdjObuYnOz3ua+S2y0
/otqPIYj4lg2LMfKboak3GBOObqnHN5TrHnsLrtYgA8Z382RF+C4v4XhAQGl/W1eis3zo+Lv+I+K
vysu/p5n0u8rh/fFtw+8kx6jNN+LirUEw8LVXlasZcX4Hpzj+8rOVy+KV9hkrMjJuI+weB+nYMU7
Bd/r3WD3Fet93Ej3vWDzfaKP368Yn1eMHyBCmPXzPcSyj5XD9/DrD/D2iXL4AIUq9AIjYRgP/YPZ
QpAvZAtBSuFdXFpWq18xfii71g8fW+v/Le9Lg+PKrvPe0gAIkNgJEuRwATmDGXBAYt+LMxoQ+w48
gNiGFNTo191ooDf06wYaEMDInpQl21WuspNyqlypKKn8kCLLSxxJ1motTizvsmM5ymIlsRxbsmLF
iRUntpXEOd+5975+3QAxlGdScSoEcfHe3e99527nnvOdB97qfkBV94Oa/Y/yVg2xJToe0g7GuKq0
HRpyGVnD3ENDntGKGWxYOx7R2NKzG18X8UdoB9SteFG0CZI7oAZ3gfkQisSmg/PMWwmHtaMR/gig
kh/3LHxoOC3dHz7Z8J84teGjouGjWuLvGfq4/ZOa/VMokuYM3/EYdojDj29qx7QO0ss4j0pTjUoe
njW7H7r4ZIzrNzZu/zQ2SCcjIegf82pODz/D2xhv0D95ym7K5NCPaPZHuRfuqfgfw/rI25i2Uwpj
08s/yxE/LpNyayawYgw/vsStmeDW5BqRrXsyobL/hDfVJBYVlWqyINVP1j2ZVKk+qeUWC2B5foor
gpWAAj+NAyrXuPxpNf4MR/w5zLwcse/siJ/FTMgR+8/ug89hjuOIA2dH/DyGHQ1EsSWzv+DthSks
MqoXpgp64SOXnkypXvh57WhQtP+firz/GXv/gnb04BTvL2qCkGWRvwheHB5+CWwKPPwyWCX0IEh7
Q4u8C2RAXwQ7vyZUismGFsBi7Xiax1+vZ/zdoFZPaxE/fBHR/hUtvalFAhRF1+MtYlZ3E/jEnpIS
2JwgLzIYYr+KT0zEi30rbDHNaHJfFczfV81qslR6mOHT8pNZzf61gtRzmv3r6p3afzyv2V/yvi9o
6ZAWCWv2b2j2b2r2P+fds2YcW+y/pdm/pdlf1uzf5gfh/gvP/99m9yvq/5cBzUx9QP+P5tldYLOO
tzz/jfzXW9ob1drxonZkaRE6VCwK1gW9bPMMP6s9sTwNnXMb+i8LGrrENPFD1IH/ikNk+x562kel
/WvN/jf57RON+EquZeFirvhSfh1Lnr2OD1Ud+XP+jre3l7Xsl7T0DrZRzHH6vHa8otlfzTVFHgeI
+la0K+J0ufpmLfB+oS+r1ig3XKa+RyfnKV+6xAut78vyo7xx/tlbuKpauPv+Kyv2vy34EmtaOqpF
YpxwXTta51R8fNCfdPC7m+m6J9M1lSlY5f08smap//6ddvy6lq7N5xC+zjyDf89L5pTMzM3J/l1J
ABQNFfJ8DYzEFz1D92/z9ux0bkXhji3ublJmC3Zss7xdifPYrhfbla+pjLz4Q9NoyptW86dNPXdF
ctPLFSDf39Ps/8Bfy6cdP9Li3mDBMyqIsUVd81jLftrgNfYjBnjmk4Vs879yYy+6jb2cdxS5rBiE
zMM6fieTdUKmPd7g16R8tX+fv8QrYuuyqyLRFPwuXp+TN0VIivuM5yf7D8QZ36DK7X7zmrh92VBd
dk/nCoxqR4947blEPf91ZgRchpdY72USWouQpMflrX9DRnR5927Jf4jINEmjQ15DIziLd2oRh+nP
z1eBft7BfVNxujbB7F2J82UprPDd4qtQb+0SccG6/I/cCX6q6h9pR5tMFbfEaPJzrrTZ+ha3pkpc
QP0n73Z4k0v9lqTrK8cBVZE/lufcK0e02w1o91WVGxFBsSx56aW16J3u0mvnbcJqdltvPLG146Dn
5q6D9xiUBJmj5nRCtf8z703+C9NVEDW5+YYwkHIc0uLXKcKfSIJ340hb1hSemwZC3NUbRI46zzAV
11ao9lui3L/lHQ6XNPvbTLVhzA9HNEP+V37d0o62mKuc1iilWC11eEkiFnHf/SaTR47xGVb1lCNU
8ICFty6vYb2zk/2nuRHlFnAqCSuiMvNq8N886xeE5kUkk7vjqMpzcI9oWRqwEfpud/jij3LIaOk9
RMJ3Z41A+78r3glNCPru12+uINq+Zv8ZfSGO8OfeCAjM4gxKp1ocw0w2MHLsnZSbTjb4L05pcLzx
ZLzvnNUxR4p7KHZraHMJl0x7ulZ3izXLK7PqFbl7OtDs/8EPh1RjU49ffUp01YkU9m7Jd9VX4mUY
nt/L19qrgMB3b1/E1SE4ULikOlJXifjkq6dc0XjuAhLPclVQDWVDSZSsuwkTYLNstdCCJgg0+IQi
aWYgPpAaiA5EBpyBwEBoIDuQHvAP7AwkBzYH7IGwNT4QHNjbmh+IDSQGFg8GJnZZxQGC5ayPo7QQ
+EWpHzjQ9WGtA35ifQM23pmnZ8BJ3BdoTQr1AdZeyWkPMLifqzXAGbKiACdX6gGsK+TRCBDRIPfv
lHDFWMyfFZVcSU62OZETxuRyXP0BfnPl+y3WZ2XLNGyzRRSdyNWIxfhZiTxfgl/YtXAF+xxWAz0p
g8edkye1L6w7Q8spnlBifzmBLRYp437KKSGwEuu9IFen0bnb6FhFyr6WAh50RTulEj8U2Ll/7lEt
NjNh1mHhlvMrFDPSIJR9tjl1qr6BKHWCwaC8OgrCf5G/i0cXRHhnuVaNzkCDawYiCdH/QCKWTMRh
LYiBXHLC3RIqJ+CPC+knJSHGYpzpraADLVLVTSwz5yoasLWJYfXW4CSDAQAZ2FIEyls3zkQWZSeC
DhvRDrK6YSk3Q35y0Yh51mhjwVNWQJXSqKxWPSE7XCZoaVgMUk6nS60y5QtFEJFxjI1WCAFULoMl
UJ0mT74xFgD1ZMpRhLQqkjEl5uuB8Cf06ILgsaOvs717lK3MFaieiJokrPdAvU+M02gQCo/WDyvL
TE4isBMU9LHBbWgcb5xpXIz8T9BQGdMQS25Ccw9lDyWSB4roIJ/If3lAjLBEsJKbhSD2ANv38krV
CgV3fJAQhB2ZrlSdqIS0932DSix1a4jQ3BvCMKogu4uBwJ4NLzaotA3c2v0UzWs0mVlfA9wLvm40
stlyGEkGTAm9hwaj7g5sC79P0y4ws92AFBp4U9IyihBxi39AXCW/X8Qwean7EZqllfhcOmcVnhFp
PQxen4QfjXisESFbOdln+VKFllG+4B+ARJyMXHoilc+94Je183Htdt9La/d7NXFzbyoYU57lqesC
6iHBRr7wMQErJyVQlXVwr25KwT/Id27RhNsgBMP5EyqRZB7w/nQEFhRYLPqsTGSRSj+Hx38kTt6L
PW0CvOJElRk0j7VQI7EkE0GYKCDmD0cCrCjMo0a8bwj5UOvTujRGlDzYQO2I7OTo5mcLcF7WOBzg
LlrfVObcOBJNIpjHhL7rOVaKjQY3aGCxqnQIcvIb8MIcztWiIAYq4MiqV4Tya3meCieIuEosOgEa
nRsq7oAuUQs1s06v0C/qtbpplOsNBmzWvKhfMtyvdvJ7Xs0Ng0YHI1LSOCSSGx2X0osUpQMJMvsY
1Df8+CEEEQX9CmOF2M8bTH7C0wR5Ea1dwCU3Pb6Ptuw0BiBpVidQddlO0PvETl7fLeFfYSUIPTIr
wBgKunvCtZMkjHtDIfk34XwVr3dVZCYH7nP6imLR2sC0Yk0oMIqT3WtNUhimO4FrqJdQB9ZRh1br
cvKXhCdmxhQvs6xm6XTKtWzfn4ISzwAEn/0ntcaUHLhYdxodnuLUyqzieBfmKs53Y6/R3uD/LVTR
SI7Q6Y1nsv0UtK6keDNgeVLhTIzXUFFvuUMT9Y5zvVmfVHhknJFTqg9Z/rzqYtN0LxyMB4WyTDro
pO+l2Ng6ogrTpBa6XcBMgJxZAzYvY9qeZWLIbSuY5XRBKoEHPtC8o0E/0+LttuxtXkTz0tq0bMf8
UU5GKyj1V1oI/YuyGdlGdSbieDoyagrt53eZmnp8zdSc4cIiaB+SETo3vEfgxcpf2NSIWIRoO5GI
xbA42XL9xnZXdGnUeq8h927IH/u3TGCrQKlPJMLOWCSKWLym1srtsgKPp0FJ3zPIS9AmbWzv9XTJ
td27lRZZqA+e5xngYa56JkclKpLa/mHnLVKErCk2SQZnBs4snDlTTjRokspOqNNJ0fFzcpRgpy1y
sq15swD6OleBvZxCliN3smqTL1KnZR9hsy98/O42jDf9wnOHG+jVCCnouqvymKC0SwqCXz2F/L25
5Q0DpR7HKp7pexGsfEFFVJqhOUOFuUmaicSlrl0iLTQlBEU1tWXb5L+Ge0TKvSH+d4cB75Uqjrc2
2OHl2iJ33blzj+iSJHf5SBwKCN6WJ3PKfwMM5u7xED1CM4nqdb/a228yhGWeDl9zgQpfs1dbrzlP
Wa/ZumNKKAePQrXIOSzr7zmDsb8l6UEdxoTvOJOqVFvjo6uXFD3KbtDrhGbafqKB7YQ4APHlXnfu
nxjzkXgklonlkguqgBoxnTVi/mx+aG4pgGKbqNcenxsm4smM0BRtcu7IXQsdwV7BoEb4XCaNCK6h
TxHOxAkSKfwnZzWk9Kr0KS1mSnktZ+AnLyWtE/yf0ypDYLk6cVrU+26DqHfg5N6tUcwk9+UG/lWO
uOjVE5b5UUQuxaP2q0qSQRIyJU/tl4KwCN2nY5/Imvf/Tu7IJiOBEO9DS1fEclVxPY2RpXi1aguC
vAqzniDgG7mqq1Bxy/V7lo0pc2KP0qhbqMx30NUHpf6DaicH8aZujVYMxqhKcEwrE8/X3sx9+XGX
6mh+wJbOwfe72yC/3nyeUmRe8TN5Wo55QeI0NSNPUy4lfgFcm5uC/h/RCZHPyo9oc+A8ok2GoLeW
lBPIdMg4cuZ4RCOurdPfGXpEe2gigYIkG6lgmJOVit2K8M2AOp9WykY0EbA6GNPqPTerLGyfrF/H
GvMlOL8B51fh/Bo5GbmKPqLTw6PNSNxT12A2mHnxRF3bQ3293X5V19cR8zHqZ7UZcuPEqpYNZ9XP
Hwm1xCKhTOPZvYV2tKQa2zps5/k3iSlz5Gkw6s/EA3Rmx54zbMo139sIpKC0ntVWTS25ZcsqY0R2
bH27gdf1PFrXUzi70bYezJB4w0hnx0QMe4+mkdURTI7D09N3GmjX5USIjqiscGFSDA4H67TIgzkh
gGzClouVAaUqIn2Fllxk6eOq8dH6whqbIhXlkzywrc+j6n9Xlycz8uHpnjqLP6scRDuhDV7F5tfc
PVcyFQyxwhofFW9TjNvqMGNnimUnUltwfHiUeVqvYve0v0Ur1D2cCFoCoTAP1v00I8rN03nBb28E
ohEoGtu0Ir1UQIBUbF4kRY4U11qgmkeR2zdafK9xSfLQSitcOkIbHWbzsddGJgIKdmpyPpIxBG/r
59UpxgoAI2rTp15tvAZ90k720093HMxMxJwj/Ix8v2PkipXVYvsjbNiXiQu0amGBtlBrC60R0E5s
f5VNr9YwLhWcy4wFBWdVk1BYzJIRTB2aJJIJwT79FEj278D5LByeILEVsepczCw+831bVwc/PvN9
WldHwGsg9z/A68/A+Qyc9+vyWGhVK34V7f9D6HLrw5hK/ggRvqU4Vyk+gFs/Bs8/h/OjcP4Yzh8y
vhaK+ByedEOe7OlYHFKsgPVIEthrafTN+sT8xvDI6PTg0siw9WW38CR0UrNpCziL1g8rRoGV4gao
8z7AskCDnJET8MfleutYX0JkH2qBTrNuuYh878DTHTgvwzmPeBfgYJq0sKQwqtnM4OrE0Nzs6MT0
yOLE+oiA9Ns1ZagsBscMQOZaiyhjibf7DBzMNtnhzDOmGBzcGViLcJbgPGRMMTg3kTNWFasBT2EG
9YJzD7V6XZMIfVa7Lk1FC1p3etpoTubSEwj+CpxfgfM7cIKuXd8kXpuNnJ3puaVxao9q2GsotB/O
DUYoxNMnUPJHka4LT5/kORJOhwsaCChAC5xL68/Av9nm+sO5xbnAwYxuAWTRwipgvciwaXDAPrAA
smi97NL8VxXpMH1Ipm/aJ+8F+PsqViyNe9v6fYWfyDzXDcn53IjRFqpWcJIZkYAmYhVm/QMkwUBM
x5K0UdpwMiGaDK0VtBOsGkG+f6HqEI1sbhDNWnuYLnZhCNkn+FOOdY/HtaH4JSnEQKiFuYOLSAWB
MLEX3GBmFtPwKooAh8SqQDm8ZqcV20tyGKw1hN/gD4+nr2myWFCa1eqSvaQ7QfaeU6z1CKkew3kn
nA047zLzCDEDZ8+l0H02BQ2nhzlDaAZME1mNaN+LcF6CYyhIOgurg8CeY9i5MTh+U2LYEU2msbqm
rfdoyisYFl7cUpqamXfHXZwIoH949UjtW7+nMqE4MkWZvG2Sr6rx3KuHqgDKxhM9wmr4/Ip8Y/vW
Z9mwtDKRzisX87J4enVSXAcwJ/mdraD5kzG/s8OZR7DoY54RnZ0KMEOSm1Cq2JP86YoN+a02Xu9+
zJwzUQs39zLXi2nbXUVRFeZpcvker3R5AdU7Te4aZGXwjcCMs/Z9pzLj+mASBFWuQaUM06zXK4zz
hum7qF/U6a9eDj6nWSzNj5TqNfgxqvQyo1avothVuqlfobh1Zq0BK97lRo3vRb3cB65omXnNuEF+
9fpFo4LSgGtarFfoLeTW+BBSZbLrqzFvFNUY5Ua9AXYgyi7XLxu1Ri2VcJ1tg9frl6jMUv1lvU6/
rJfpxYapmXoJPZfSTyXFqKScyymPWqNcP09pkFOpfk6HqfrbVJ8avc6orKnSKimsmkPRFq4zsyCr
9Qb9OqUtR45GHb2VUtn1VEaJ75p+0XzBuGKU6bVGg3ndrDEBW3hFrym+ZDYal4yr5FOvl5qN5iVu
UTH1ShX/XuaeKae4tfo1Sl0JbjH14AWKV2lWIrYhYl+imps6fov1WvMC+1VQ6hv0VGa+pF8wUWeT
23mFS6k3KylXfImLZj3yo9TVRqV8KqOnF6mHX9CvUn9dpTg3+BvWGhU6vl2xjjxqyfeGftWo9+RW
lctXv+q7wP1BtfIRLRB1oOZXjSr6NpcQ16gya3TQSSV9U/7qBru+l8m3lutxmXrBpJ96zqHM5PAX
S3Sdcu6BP9OVqT9Hbpl+k97u8W8V5Yp3728lhyDHSv7ypXoRlV/lq6VW1OpXzVqdadJE7+GnnlpW
g7rxT7ku+tU0a80SrnU9fZELlEaFlJjCDr1J+SEMtAWaqPHEgAu6MVVu/MVfIGopoW99W/ZdPbWT
fOjb3zRuGMWgbOOG+71qzErfJcr7POVcTXGq8O31crOUvg56vMy4wCML/XUd34tLqaU+qjZeNm+Y
t6gWV8xLVGI50a74XnWU8jrFq/a9wH0KGqWe91VKJnuNWYpQansxUV0Txa80qG+MEgOxK4i+TbOa
YpynGtZQOxp0Bok+J6/hGCQa6Kh2iXass2toab6YsIU4q6lehY6KDyePIw03cbizKNIOv6BlP4Vb
DEqr9GYM+ZL4+/RSJDErAAZRjBs6BBRp+jxfmql0iV6+QYsIoAgdQBFHJUIrmoEp6CVSwgqffE94
hSq7+ykhsJEr7h5flxTmUUu+fJdnC19D2/1TTd99h/hdWUHuRbj0g8DHqrgjDBnaIhsVdIAvvojL
c7+6RfNKFDDMyin3FZKFzmhbE8OdLczQX5xY3FgesRYn5mYfOS+/Qr9N7xig/7ebXn/n7ccv37l9
54jeXqK3l+jtJXp7tNh85w7z5xfXZh5MDM5uUG4qLQKZ823ljiri+qfMPQ/EsU7AWh7vcnkZTQX9
djQSD4qL6CAso1nfMeX2PpxKZJIO35zzI++4hXVF3va4Oxhe1GRrN1IJXtVpoeqkZ7Fd8fFOiorh
tTjhgaY9uWLBmtmXTWmCRsM87KssppkUM4x2lcdlEc9GJcYt8VdvMM4LSi7yUvLnyBFY52+o++Fj
9pEgK0qJiun35rGAzBBmTHHz3CWl7nRlfdQnxIFrQTREGCB9KfZuaU94cMTbZRqfSlPkTePzpvk+
7YkPaY4YxsVmqcCw1Ak3+b1YuyWsyDlfwgHYn3KAK5lPVAMNt/faWzpaOm+zGEQkFon6UxLdZyIe
AXA4xbgtBD6SAmNLiH34XQSd28ucOBGLpBkgLZpO0O4EjJWGGeAwCtg2Il6GY5Q3Dw5n6Y86DAcm
00KSgmHgKDsEi2IYHw5DQ6BdBVORhN3CFGpRxwsyZTYGy9lETPdWctk99PHR+EfgtKh7YkF9vBXF
FtAKKizkM4kLx0CgsfOdoFZSQ9NhBU3OXUYbTewMz1PmJaGYrhDzcd9azsQS5lkj/kWhTWdo2c+A
GoYffzR3k0sf8Iq8weXPfuWYLdKJSS/ChghsvtdlFWHX1yeJQUxsOf8iTUyGhf7Fmjuf5vnzFJbn
c06pKxWD/iLFck7Ud1/SVtCKEi37HnEfnUUcGsCYnUs5yTlWFjqHTbZdJsSNS9mrTEhqnpca1RSu
xI0vsEXf4jxx4xIuthRqRzbrpEcqgCgkJT93v6WtUJTFJuyHnQ/yB/XnYLBcAZa0QLfiyy1J5qMs
0MLQZLgWFTwyBrwsAOtiGk4F70n73ALGjSINLd7raGhC3Pae6ZE7AqIsh4d6tuHpFitkSrsHjXFm
GjEfk98D4s+Q+CPuZBqnxZ9GZlky7fOUzjJcLkZm7lYNpM7N4GtBVUkGShacDGZdjDDyt6GGCzMi
mI3jsjOsLVOyTDxsJ5dfJBYJDD3r23jaxtMOnB9j6zGmmvL5KNnKyOXmmww1MEIqcSgBh1orxs69
mPcptNsysWev5x+cNqrF7kl3d6XG84aDA41AC5w8GF0ebd/raFtbCA21LrR2Poi2bQZ7u5o706GF
dWuwfaujKzw56e8Zmots9/SMRGYW7cXl4YeRlc69w+5D6yDZMRSeWZ4eT01yhq3r2+sxaza01xnv
62nvCmT625s7xrpjwyvxpchYz2HnVM/Y4WpqJTSd7d9tnz7sduw9itvfGXy419e/F9rs6R1pTYVX
ZrN9e5xh515f22hX+25Hf39XZ8q/mu4ORdr7HvYurKdGejeHu5MdHXuhWPdIW7Zvbrin1d8fX+jp
seZTvSu99l56z54PLQ0H0s2RzM5yO2fYs7Kf3XkYttum7NWt9uWltsHUxHhnqLc35u9sXVoK2L1z
sfXJvfj81I61Nt27vte/O9yzP9fsHM4HmtfGg5utrcHpzfT8/lynQFxMbmdmVkZnF2NrifbDZKR/
f3bBv7a/ezjTd7jdSjXs61keOtiOdc7amYm23sGe/rEH8cPB5tme6fRWp9U6Z/cFt+ba253xjuAs
ZzjycG1kq7e5p2c6GRjPdM3tBYaC/b3zseB+19Z6f3Ci3X/YvTASjXcfLK/3rmVDY86qf/IgOHuY
GHvY3t1jD8/ujiUnJjvC6xLPNtwx7oyub+20pdf92cnRqXRfYGmiyxqMLM3NDC71rnUkE1F/9/z2
6GwgO9Scaev2Lz/Ixsba2pdGrJmdw93VGattc7OjY30yMCWabHdPJXom58ZnR6zNye3I7tR8u9XT
P2wPd051zC0Obe93DO3vr4fH2+YH+8YGp5ubh6ejm2PbE7GOWevhbMfQTHRpbNnuWI6Gt0WTR5YW
5hZDOwcP/ZP+7oR//eF4dCbYY28n/HuxTWvaajvMOisPQtsdExlrezwUOlhN77V19q5mM5muxHJ8
pj8Vjg+v7ySXQmIpzEQXl8ML7Wtba5Gd9u7DpZ1DK/ogMNQ7PRI97BtKPvBHI4NLa5M9m+HFntjm
/u5yeK09Fttq7949nNuKTS+Gk7Md7dHNUHxzfFFMhbHR6Uiqd+0wvTj/sDu0MLEzFdvdmXFmks5+
9uHyYPDgML61MLWzH9zxL7VN7g+FVsPjB5NjVvdubCm6N/igr39hbs4eGm/d3OIMF0eWN0eG1rYj
0bnE+lb79u6Ov8dZnumP9U/Nz3bPLGfXdkZ6J+IHa+mlwwfBzlC705zY21z1W1NbkwsTg4vL01S1
vmR0bXhyX6Dezi9vrwxtxYe7prp2WxOb8R5/V7ezt5LM9EaWH8Z6mpPbyw/34/PTDw/aF63pVHw0
EnSCYzP9oYnunda+rcOF0cz63M6D/cMDgXyd3RpLJyZXUkGi1KlMW1/bblf72lp2ty8RPUwlD5Lz
id7dxbaVQKBvaX50LbC03rYYDATSU8vx3tG12HSgfc+eDfpbt1o77GHOcLZjbc75Lc2dew6n+5MT
q2uDQ0PL8dGVrUQm6O9KrKWtna6F8f2VibG1md3B8IjTM9I2NBLOxufGBh0rvjZmRdfCq1OB+FTX
8qQ/2p1dj3eLDNcmw32h/sH94eGd8MLi8ODWsH9maOEgOTy/M7O1cLg8PP1wpnfhgAbIzPrk9tzM
pL01E5id2tp7OLOSjXbMZLM7czPd+9HMiOiA/baOseb97PzYctdhaGyp63BvaDVM09tO91jo4bpl
xbZGBrfjI9t2+JVXmm65lxL97oEg6lM3E0vuDQbzuZnXj3sA6z0+xZJ+3V13mN3MXOVjhCbgwHa2
BZtU1n2Xu51jhX8fQr/fp9YicKZ4sy9488w3fy9e33eKBYsTy0ueDQsIgDmwXXWqLQcj7624uKT4
NWnjgplVxedK/1LDz0TpC6WXS+8XFwuZWCoNd25esw/lyuwD7Ij8tTD7YLDBB5NNPfjYyEMRm3co
YsMOxfh/EaYZ1qu0YLXmr8FWELYXapV9hovQ2oGJBp1NO8AIw/mcEQbcS8G8TT6evfOZItgT4b5x
7SrE4fFdWB841fKAaw8gGzzF1gAbRohGz7BC8Mx2BAR4fySO4FcbXk9k0hK/39Na3om5IMNe4aFk
KrEXsQGljlNYMslgopk0cH/LWMy9QNrIudsQCSH0TkvDXO4URtQbCR3gHJYTn0iEyiDZgNtlbFeH
p6elxLaElRWyugqbV36QQusBol2nGQNIpCJhRpnOswYgm39KfHnr7HZHkzQM4OYToSpTp+6ncACM
CzBYQQuqeJj82Qsy/qyTs4DH2NButhIZnWH0BYL7CdD3twb5/tYA398GuHdJ0d8F2Dtu6ukszlDo
hdDichS8Tbl5gcrfLpjyp4KU//8CXT0S2wzatmfsiHKFELVNE0KLKi+vEMTBt+K5IYKcBqNO4m7h
JHHKBEHkBhmU4KnllvH3D4VYeq/l6eDErdK4EXM/IIr0/xY4cbjsTJjhyrNghiu9MMNVJ6GiqhUU
DOMenwkzfPGZYYbrCmGGgUZH1YyUK8yjCoV5dF4rRL0Dpsx5xjzNepHtLnHgag7Z7mReFyDAEKnS
ItUKJxSKoMKrhmERxXMtcI3fEKyjC9pug+FCD12WHZS+CORRu547p84LMnMlz0sCE0uUUtmTV1WX
PucBLKpQntfUw3VPaCXDp7RQFhVSz5kvGSqV0rP8fjcUGJPPp8df1XOxGXiq0gs8VaGd0kEVgB05
xb9SS9ezmu1NBeRbBcuT6auuKn41ez3nquJXS+BND0xSg1cvqZ5xWAuyu+ZmV1OQHcPFUrgnu1ve
tlpuzxAN9LltpZdm+ia3GXjqugb011pYxkR1a6H5SgUBSFuAB8lv/EI+vJSL/Pii53sw0LAHXqqy
AF6qAUjGCl7qNrCH7Ze4Jy9qux/0qSjU5OfdJl9kQucxRL6ehjZ5B+VFYBaDpX/RS1F3VB1f9tSx
jiEsRbVA542w9UleKOyShteXoKhO5Clix0tY15fCZGbN6uGuJ9fLDGZcmCuPC+Aai1zrVa6X3Vzr
ecxc0STkd96YaTl1zFwRNxOt+YGXOdDfpElw3kuu0vLRFS3xDU0fB7j3eaZiU1GxC7TUjlF+ahDG
VykV16ERJSzSQycInmGDKk5JwRg+XZxnN0iZI1aeGTH3NdDmHga4qFPIk8Xk1UtF5zoXkfok+LMn
Ur+mwIDq0czCIdTs0tPVgiF0lSmr2UtZA17Kugrc74bj57T0XWBCi/eb9NEof0zqjD3sZnR0HcGX
kRnwFe5jDBUG1ueAT59c5dBrWqRFi7R68IaA3vWKF7ayzju/uZ0xShVu4/TtnAVDthzf0NKdjKB7
gzuzHsAiqOpN4Brbr7Li4k3ABXuANVQWEva4C5jGUOy/pW10YZJN3tC5u24BtlmA62KNFNBI9jsU
SN0tQDeHtRzx3QLmLQbm89yBfYxGrPNrPxBFaQoDNvnzWJpRxxcYr/Yu1PTt15iY78tZ8w1NB1Lt
Xejf24MMCkM+L7HPq8BSByAq+TSxzzsYTr2Jfe7kMgR4MPm8zD6vAWkdAAYjjCygGYBOvQuQW3tU
EwiWiHyXPR9o9hhARe1x4Pci8j0G/aEOGtLiFaKpbtuPbquuu6Y6oIUXC9oPTHhIk8p/TiQQLy3u
y3M5kAEIaJ4AGQDPP5NDFACaAF/TiuPWs+rznxOSVUKlHy/qRH2mor7Qor8ktOjdY9vbrUIvlPLO
1sUT2lGyzVLz7hn1m1oL9JvydYBEQ4QmvMieqiWUyd6SYpTAAnjr2lH/N1WZSqVie9CrzSQ8JflI
RbXT9Xo8yAhchtDx8arwNDoccFK5hwJYoFkwETwMhLWgSHJSZeWE1s3FU9VXpOIoDlJ5hyuP6srp
4fdxlnKClDHCcQA7K/3J8Fx6tlue3xc5ssE1nTNwQhPBlkdqoeWi9GHy8miSh8w7zmBh8oKYkqnk
nHZqzOXymsyFyqbeuCeClWaCOqhC2Qan0acEO32FVUnhkzLnLGqf4Azl19N6lWW24aAuFpplQRbH
GmIBVDgjzLeGMwau8cjbIo2fE6jP6U9PuErULNB7yZXar3cFbXK3rjNKUP27kwa/ni8SbuYrBay7
nHg2ks2d89hVFPCzgL0ddALWBus9KAEdwW7EYLXehQA/C8RqSkycbW0zk3/LNZ/NAt1s6TrqWquO
u2a22W71ribl8Zta34L0d4uSg7Y+5svrotanCCsrKWI0J13mPrPAbkluVeMg8Ywgsaq1wXk3nAM4
x0rHXVpRRddBglyWQN3mqCzxbD1Bor8BB8LN1vfA+V44b8D5m3ytwfcdcN4L531wvh/OD8D5wfy7
jYr82wYltYsv48DUuOY7S2q3zjhdWveaWQtpVpZJvWNcEDKZLAtaAlnRZ5IILTtFFvQGy1xdMoo9
TyLvWr3YrK6E1CckPk2WrC2lOlzRi3XhllNdiqRb5Cm7TL9mQoL0hmmaQiLyqlFnCJlRSMqiNiUG
Qor1anqv1J8zn9NrTCHzWamXmnXsltNPqXnBFBKdUPjRMJbPK4YWRPsFN0tgfXiBFuCjxBjBamLp
RAgNshCPEM+5EvGBe5R9v55mLBE6jwn5leHHP6gfl4IJdVwG36NSjksJYRrKQIA8JuKkUqQ1HAnJ
x4sssIg0fFXTS6XGO4UA2IkURW4Kk1OUiBQl4tBSplL8LzognIMAI9KVgL0EPDw2UJWr2IahHVQw
k6yUjV4I9pxA3a8VzKnz4LB5GVNlLK5pMvMFJVwQeYOlky5naZ5Str1C+2e2pEQb341KDcBdVDPx
khYcJ2YpRWp4j1zJiWvByKNTIxJXsc9F8PXwUAfuHsDSaP9djYMBsqLD+mUtUSksZdTjvDc2vvu7
ur67rOF3hbqT+oflNisU4gbLbdKKwpZP6e9oJBi1BVcXq5ripor1y5V5k3c0glO7FfTbEH1T5pHj
wf28tb2FNzz3p1l17VDTGviBJpkGIZDpU1KZrLrHGnFqQ8fLcSyY9kOjo4FvD/JWwA+5M/2Pw/kw
i3byEjPA86fhXQisf+jq/VS5i9J1V7GHFZ14pfuou5J8DA4X8rNwPg7nE+468EnWOIPzU+569BNq
OW6qUWsP7kppR4F9uvUZzOOfhPNpX266hvqCmkzxbP0cYnzWl8uAZ2BotSi9ikjcDmaFWsoHtIJ4
WOR+ASFfhPOLcH4Jzi+/6Qz7cSryECAlWNO1IkghFdP8VWvUaviLeQ6Xw+wa5UYd+Zj0XmFUmywJ
r5cXYdapkTLqmJ8uCwFCnBhK1Izzo4B6GZNiqMOP77McqAEiEKKfRyxOLTk6ErZI9zIjFCbM/d0y
bSU7ognAF4VQVAReN+TqmEWrZ2/xe4kAiSzi8CJP+G5aW9ldouGBa14+a/GmAmcNh84fQOyQg0Sq
mz+IJgI7gt7lPZ0NXUv6K8RDMIqiweGgMJkKsVR3YLU4WOztjBCHDnrSn6RuIVP8dTgfcbdTTKDf
YIr0yW2M9Sf4pMVKOw8wLqKem6inILmXQdwNIKgiGSUkUKsiTjDqBCOhs+kCZUEVkg91WlEZ65jU
+qr0ChPPVQbceqPpUr4kRYH8xIA7kAqkJri/nyohse5uKHmEsYQECJVrVSgScbLyeTIR2IOyCPWp
shAlxTeVDERFSXFJ6V9qV0qH+BCPLMPBOA1OV+yhRIk9+N9OsQeDBR5MFnXwsSzCOSF/UJSTP8AG
cUyC5TT4eb4tgJHh04Jf3Ht+RwcuFar+V5QHQC6vtrS0lJU9c6GFV+8KWMJjRd01wcpgQCJjZar4
/8zV6ywdMFFqrh22wk8pBApj+8nKUHZBDcF4EQI3ZSjSicSSVKjIJEY+jjB/KyzfZpw0X1wq87dK
Th1Kc7hV5/xSZYAo5KlAKqE7dxu2kTStMoqkcct7tyHYEm5puB07EOR82yMiUOb6bgjZdyA43W4p
49m3Su75ePaFKu1f5wvMsO/M68rSs64rS73XlWUnryvPqzsLvhs987qy4pmvKysLrytZ24ElwHnJ
ucDbzAu5m7Oa3Yj+5DzvEStwq2YL05eV3kuP6jwvZY2iQlx61JwW6K/mwOyr2HAOP+7hC80avq68
IK/p7Nr8C46LskK0yRTM+N2v6QIk7eJpvFbs1MBgfQamaLlgij47L5RxFe6+OZ7om7LSyiRLRQAn
NNW9fSyPAqaGqeXpzbPEeNVbOO03uyBzDDfHvbp2ujy5mM/VmZhRgHjH9rQzsVFM5+HTT8Qvu+fU
slLo6lXSCbip9JR1PLeEX89frvu0p0smuhU9uQpXeVdhg9bequKStv8NIgv7rA==
""")
exec marshal.loads(zlib.decompress(s[:873]))
boot("ensymble",s,61930,873)
import cmdmain
| agpl-3.0 | -2,104,151,386,635,089,200 | 75.429596 | 76 | 0.943452 | false |
andrewromanenco/pyjvm | pyjvm/utils.py | 1 | 2811 | # PyJVM (pyjvm.org) Java Virtual Machine implemented in pure Python
# Copyright (C) 2014 Andrew Romanenco ([email protected])
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Common utils"""
def arr_to_string(str_arr):
'''Convert string's array to real unicode string'''
result_string = ""
for char_ in str_arr:
result_string += str(unichr(char_))
return result_string
def str_to_string(vm, ref):
'''Convert java string reference to unicode'''
if ref is None:
return "NULL"
heap_string = vm.heap[ref[1]]
value_ref = heap_string.fields["value"]
value = vm.heap[value_ref[1]] # this is array of chars
return arr_to_string(value.values)
def args_count(desc):
'''Get arguments count from method signature string
e.g. ()V - 0; (II)V - 2 (two int params)
'''
count = _args_count(desc[1:])
return count
def _args_count(desc):
'''Recursive parsing for method signuture'''
char_ = desc[0]
if char_ == ")":
return 0
if char_ in ["B", "C", "F", "I", "S", "Z"]:
return 1 + _args_count(desc[1:])
if char_ in ["J", "D"]:
return 2 + _args_count(desc[1:])
if char_ == "L":
return 1 + _args_count(desc[desc.index(";") + 1:])
if char_ == "[":
return _args_count(desc[1:])
raise Exception("Unknown type def %s", str(char_))
def default_for_type(desc):
'''Get default value for specific type'''
if desc == "I":
return 0
elif desc == "J": # long
return ("long", 0)
elif desc[0] == "[": # array
return None
elif desc[0] == 'L': # object
return None
elif desc == 'Z': # boolean
return 0
elif desc == 'D': # double
return ("double", 0.0)
elif desc == 'F': # float
return ("float", 0.0)
elif desc == 'C': # char
return 0
elif desc == 'B': # boolean
return 0
raise Exception("Default value not yet supported for " + desc)
def category_type(value):
'''Get category type of a variable according to jdk specs
long, double are 2, others are 1'''
if type(value) is tuple and value[0] in ('long', 'double'):
return 2
else:
return 1
| gpl-3.0 | 3,142,101,564,049,662,500 | 29.89011 | 71 | 0.610459 | false |
HomeRad/TorCleaner | wc/proxy/HttpProxyClient.py | 1 | 7708 | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2005-2009 Bastian Kleineidam
"""
Internal http client.
"""
import urlparse
from . import Headers, ClientServerMatchmaker
from .decoder import UnchunkStream
from .. import log, LOG_PROXY, filter as webfilter, url as urlutil
from ..http import parse_http_response
from ..http.header import WcMessage
def funcname(func):
name = func.func_name
if hasattr(func, 'im_class'):
name = func.im_class.__name__+"."+name
return name
class HttpProxyClient(object):
"""
A class buffering all incoming data from a server for later use.
Used to fetch javascript content in background.
On completion the handler function is called.
Buffered data is None on error, else the content string.
"""
def __init__(self, localhost, url, method="GET"):
"""
Args is a tuple (url, JS version).
"""
self.handlers = {}
self.method = method
self.decoders = []
self.url = urlutil.url_norm(url)[0]
self.scheme, self.hostname, self.port, self.document = \
urlutil.url_split(self.url)
# fix missing trailing /
if not self.document:
self.document = '/'
self.connected = True
self.addr = ('#wc_proxy_client#', 80)
self.localhost = localhost
self.isredirect = False
self.headers = WcMessage()
attrs = webfilter.get_filterattrs(self.url, self.localhost,
[webfilter.STAGE_REQUEST])
# note: use HTTP/1.0 for older browsers
request = "%s %s HTTP/1.0" % (self.method, self.url)
for stage in webfilter.ClientFilterStages:
request = webfilter.applyfilter(stage, request, "filter", attrs)
self.request = request
log.debug(LOG_PROXY, '%s init', self)
def add_content_handler(self, handler, args=()):
self.handlers['content'] = (handler, args)
assert self.method != "HEAD", "No content for HEAD method"
def add_header_handler(self, handler, args=()):
self.handlers['headers'] = (handler, args)
def __repr__(self):
"""
Object representation.
"""
slist = []
for key, value in self.handlers.items():
handler, args = value
s = "%s: %s" % (key, funcname(handler))
if args:
s += " %s" % args[0]
slist.append(s)
return '<%s: %s>' % ('proxyclient', "\n ".join(slist))
def flush_coders(self, coders, data=""):
while coders:
log.debug(LOG_PROXY, "flush %s", coders[0])
data = coders[0].process(data)
data += coders[0].flush()
del coders[0]
return data
def finish(self):
"""
Tell handler all data is written and remove handler.
"""
log.debug(LOG_PROXY, '%s finish', self)
data = self.flush_coders(self.decoders)
if "content" in self.handlers:
handler, args = self.handlers['content']
if data:
handler(data, *args)
handler(None, *args)
del self.handlers["content"]
def error(self, status, msg, txt=''):
"""
On error the client finishes.
"""
log.warn(LOG_PROXY, '%s error %s %s %s', self, status, msg, txt)
self.finish()
def write(self, data):
"""
Give data to handler.
"""
for decoder in self.decoders:
data = decoder.process(data)
if "content" in self.handlers and data:
handler, args = self.handlers['content']
handler(data, *args)
def server_response(self, server, response, status, headers):
"""
Follow redirects, and finish on errors. For HTTP status 2xx continue.
"""
self.server = server
assert self.server.connected
log.debug(LOG_PROXY, '%s server_response %r', self, response)
version, status, msg = parse_http_response(response, self.url)
# XXX check version
log.debug(LOG_PROXY, '%s response %s %d %s',
self, version, status, msg)
if status in (302, 301):
self.isredirect = True
else:
if "headers" in self.handlers:
handler, args = self.handlers['headers']
handler(headers, *args)
del self.handlers["headers"]
if not (200 <= status < 300):
log.debug(LOG_PROXY,
"%s got %s status %d %r", self, version, status, msg)
self.finish()
if 'Transfer-Encoding' in headers:
# XXX don't look at value, assume chunked encoding for now
log.debug(LOG_PROXY,
'%s Transfer-encoding %r', self, headers['Transfer-encoding'])
unchunker = UnchunkStream.UnchunkStream(self)
self.decoders.append(unchunker)
def write_trailer(self, data):
pass
def server_content(self, data):
"""
Delegate server content to handler if it is not from a redirect
response.
"""
assert self.server
log.debug(LOG_PROXY,
'%s server_content with %d bytes', self, len(data))
if data and not self.isredirect:
self.write(data)
def server_close(self, server):
"""
The server has closed. Either redirect to new url, or finish.
"""
assert self.server
log.debug(LOG_PROXY, '%s server_close', self)
if self.isredirect:
self.redirect()
else:
self.finish()
def server_abort(self):
"""
The server aborted, so finish.
"""
log.debug(LOG_PROXY, '%s server_abort', self)
self.finish()
def handle_local(self):
"""
Local data is not allowed here, finish.
"""
log.error(LOG_PROXY, "%s handle_local %s", self)
self.finish()
def redirect(self):
"""
Handle redirection to new url.
"""
assert self.server
# eg: http://ezpolls.mycomputer.com/ezpoll.html?u=shuochen&p=1
# make a new ClientServerMatchmaker
url = self.server.headers.getheader("Location",
self.server.headers.getheader("Uri", ""))
url = urlparse.urljoin(self.server.url, url)
self.url = urlutil.url_norm(url)[0]
self.isredirect = False
log.debug(LOG_PROXY, "%s redirected", self)
self.scheme, self.hostname, self.port, self.document = \
urlutil.url_split(self.url)
# fix missing trailing /
if not self.document:
self.document = '/'
host = urlutil.stripsite(self.url)[0]
mime_types = self.server.mime_types
content = ''
attrs = webfilter.get_filterattrs(self.url, self.localhost,
[webfilter.STAGE_REQUEST])
# note: use HTTP/1.0 for older browsers
request = "%s %s HTTP/1.0" % (self.method, self.url)
for stage in webfilter.ClientFilterStages:
request = webfilter.applyfilter(stage, request, "filter", attrs)
if self.request == request:
# avoid request loop
self.finish()
return
# close the server and try again
self.server = None
headers = Headers.get_wc_client_headers(host)
headers['Accept-Encoding'] = 'identity\r'
ClientServerMatchmaker.ClientServerMatchmaker(
self, request, headers, content, mime_types=mime_types)
| gpl-2.0 | -2,078,656,444,847,312,400 | 34.520737 | 78 | 0.554359 | false |
olapaola/olapaola-android-scripting | python/src/Demo/turtle/tdemo_planet_and_moon.py | 32 | 2835 | #!/usr/bin/python
""" turtle-example-suite:
tdemo_planets_and_moon.py
Gravitational system simulation using the
approximation method from Feynman-lectures,
p.9-8, using turtlegraphics.
Example: heavy central body, light planet,
very light moon!
Planet has a circular orbit, moon a stable
orbit around the planet.
You can hold the movement temporarily by pressing
the left mouse button with mouse over the
scrollbar of the canvas.
"""
from turtle import Shape, Turtle, mainloop, Vec2D as Vec
from time import sleep
G = 8
class GravSys(object):
def __init__(self):
self.planets = []
self.t = 0
self.dt = 0.01
def init(self):
for p in self.planets:
p.init()
def start(self):
for i in range(10000):
self.t += self.dt
for p in self.planets:
p.step()
class Star(Turtle):
def __init__(self, m, x, v, gravSys, shape):
Turtle.__init__(self, shape=shape)
self.penup()
self.m = m
self.setpos(x)
self.v = v
gravSys.planets.append(self)
self.gravSys = gravSys
self.resizemode("user")
self.pendown()
def init(self):
dt = self.gravSys.dt
self.a = self.acc()
self.v = self.v + 0.5*dt*self.a
def acc(self):
a = Vec(0,0)
for planet in self.gravSys.planets:
if planet != self:
v = planet.pos()-self.pos()
a += (G*planet.m/abs(v)**3)*v
return a
def step(self):
dt = self.gravSys.dt
self.setpos(self.pos() + dt*self.v)
if self.gravSys.planets.index(self) != 0:
self.setheading(self.towards(self.gravSys.planets[0]))
self.a = self.acc()
self.v = self.v + dt*self.a
## create compound yellow/blue turtleshape for planets
def main():
s = Turtle()
s.reset()
s.tracer(0,0)
s.ht()
s.pu()
s.fd(6)
s.lt(90)
s.begin_poly()
s.circle(6, 180)
s.end_poly()
m1 = s.get_poly()
s.begin_poly()
s.circle(6,180)
s.end_poly()
m2 = s.get_poly()
planetshape = Shape("compound")
planetshape.addcomponent(m1,"orange")
planetshape.addcomponent(m2,"blue")
s.getscreen().register_shape("planet", planetshape)
s.tracer(1,0)
## setup gravitational system
gs = GravSys()
sun = Star(1000000, Vec(0,0), Vec(0,-2.5), gs, "circle")
sun.color("yellow")
sun.shapesize(1.8)
sun.pu()
earth = Star(12500, Vec(210,0), Vec(0,195), gs, "planet")
earth.pencolor("green")
earth.shapesize(0.8)
moon = Star(1, Vec(220,0), Vec(0,295), gs, "planet")
moon.pencolor("blue")
moon.shapesize(0.5)
gs.init()
gs.start()
return "Done!"
if __name__ == '__main__':
msg = main()
print msg
mainloop()
| apache-2.0 | -7,343,327,556,414,829,000 | 24.088496 | 66 | 0.571781 | false |
gbitzes/root | tutorials/pyroot/gui_ex.py | 37 | 1870 | ## \file
## \ingroup tutorial_pyroot
## A Simple GUI Example
##
## \macro_code
##
## \author Wim Lavrijsen
import os, sys, ROOT
def pygaus( x, par ):
import math
if (par[2] != 0.0):
arg1 = (x[0]-par[1])/par[2]
arg2 = (0.01*0.39894228)/par[2]
arg3 = par[0]/(1+par[3])
gauss = arg3*arg2*math.exp(-0.5*arg1*arg1)
else:
print 'returning 0'
gauss = 0.
return gauss
tpygaus = ROOT.TF1( 'pygaus', pygaus, -4, 4, 4 )
tpygaus.SetParameters( 1., 0., 1. )
def MyDraw():
btn = ROOT.BindObject( ROOT.gTQSender, ROOT.TGTextButton )
if btn.WidgetId() == 10:
global tpygaus, window
tpygaus.Draw()
ROOT.gPad.Update()
m = ROOT.TPyDispatcher( MyDraw )
class pMainFrame( ROOT.TGMainFrame ):
def __init__( self, parent, width, height ):
ROOT.TGMainFrame.__init__( self, parent, width, height )
self.Canvas = ROOT.TRootEmbeddedCanvas( 'Canvas', self, 200, 200 )
self.AddFrame( self.Canvas, ROOT.TGLayoutHints() )
self.ButtonsFrame = ROOT.TGHorizontalFrame( self, 200, 40 )
self.DrawButton = ROOT.TGTextButton( self.ButtonsFrame, '&Draw', 10 )
self.DrawButton.Connect( 'Clicked()', "TPyDispatcher", m, 'Dispatch()' )
self.ButtonsFrame.AddFrame( self.DrawButton, ROOT.TGLayoutHints() )
self.ExitButton = ROOT.TGTextButton( self.ButtonsFrame, '&Exit', 20 )
self.ExitButton.SetCommand( 'TPython::Exec( "raise SystemExit" )' )
self.ButtonsFrame.AddFrame( self.ExitButton, ROOT.TGLayoutHints() )
self.AddFrame( self.ButtonsFrame, ROOT.TGLayoutHints() )
self.SetWindowName( 'My first GUI' )
self.MapSubwindows()
self.Resize( self.GetDefaultSize() )
self.MapWindow()
def __del__(self):
self.Cleanup()
if __name__ == '__main__':
window = pMainFrame( ROOT.gClient.GetRoot(), 200, 200 )
| lgpl-2.1 | 6,683,356,999,407,638,000 | 27.769231 | 79 | 0.622995 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.