repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
majetideepak/arrow | python/benchmarks/microbenchmarks.py | 13 | 1588 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pyarrow.benchmark as pb
from . import common
class PandasObjectIsNull(object):
size = 10 ** 5
types = ('int', 'float', 'object', 'decimal')
param_names = ['type']
params = [types]
def setup(self, type_name):
gen = common.BuiltinsGenerator()
if type_name == 'int':
lst = gen.generate_int_list(self.size)
elif type_name == 'float':
lst = gen.generate_float_list(self.size, use_nan=True)
elif type_name == 'object':
lst = gen.generate_object_list(self.size)
elif type_name == 'decimal':
lst = gen.generate_decimal_list(self.size)
else:
assert 0
self.lst = lst
def time_PandasObjectIsNull(self, *args):
pb.benchmark_PandasObjectIsNull(self.lst)
| apache-2.0 | -4,802,399,870,697,181,000 | 34.288889 | 66 | 0.676322 | false |
sniemi/SamPy | focus/WFC3ImageExtensionerTinyTim.py | 1 | 3567 | '''
Created on March 3, 2011
:author: Sami-Matias Niemi
:contact: [email protected]
:version: 0.1
'''
import pyfits as PF
import numpy as N
import glob as g
def parse_parameterfile(file='./complete_results/parameters.txt'):
out = {}
#read data
pdata = open(file).readlines()
#loop over data
for line in pdata:
if 'Chip' in line:
chip = line.split('|')[1].strip()
out['CHIP'] = chip
if 'Position' in line:
pos = line.split('|')[1].split()
out['LTV1'] = -float(pos[0])
out['LTV2'] = -float(pos[1])
if 'Filter' in line:
filt = line.split('|')[1].strip()
out['FILTER'] = filt
if 'Camera' in line:
apr = line.split('|')[1].split()[1]
out['APERTURE'] = apr
return out
if __name__ == '__main__':
# file assignment
file = 'result00.fits'
output = 'iTinyTimPSF.fits'
#try to open the template, will exit if not possible
#One could change this, actually don't need the data, just the shape...
template = '/grp/hst/OTA/focus/Data/prop11877/visit09-jan2010/ibcy09usq_flt.fits'
#yorg, xorg = 4096, 2051
try:
tempdata = PF.open(template)[1].data
yorg, xorg = tempdata.shape
tempdata = N.zeros(tempdata.shape, dtype=N.float64)
except:
print 'Cannot open template file %s' % template
import sys
sys.exit('Will exit now')
#find all suitable wfc3 files
wfc3file = g.glob('./complete_results/%s' % file)
#get data from the parameter file
prms = parse_parameterfile()
#make full frame data file
print 'processing file %s' % file
#open file handler and read data and headers and close the handler
fh = PF.open(wfc3file[0])
data = fh[0].data
hd0 = fh[0].header
fh.close()
#check out the shape of the subarray
y, x = data.shape
#add the keys
print prms
for key in prms:
if 'CHIP' in key:
hd0.update('CCDCHIP', prms[key])
else:
hd0.update(key, prms[key])
hd0.update('EXPTIME', '1')
hd0.update('EXPSTART', 5.55555e4)
hd0.update('DATE-OBS', '2015-01-01')
hd0.update('SIZAXIS1', xorg)
hd0.update('SIZAXIS2', yorg)
hd0.update('ORIENTAT', 0.0)
#set the positions
xstart = prms['LTV1'] #offset in X to subsection start
ystart = prms['LTV2'] #offset in Y to subsection start
apert = prms['APERTURE'] + prms['CHIP']
#make integers
xstart = int(round(xstart))
ystart = int(round(ystart))
#assign the data to the temp array to a right place
tempdata[-ystart: y - ystart, -xstart: x - xstart] = 1e5 * data
#check which chip was used
if 'UVIS1' in apert:
#is in extension 4!
#this is so horrible solution... arggggggh
hdu = PF.PrimaryHDU(header=hd0)
hdu1 = PF.ImageHDU(data=N.zeros((1, 1), dtype=N.float64), header=hd0, name='SCI')
hdu2 = PF.ImageHDU(data=N.zeros((1, 1), dtype=N.float64), header=hd0, name='SCI')
hdu3 = PF.ImageHDU(data=N.zeros((1, 1), dtype=N.float64), header=hd0, name='SCI')
hdu4 = PF.ImageHDU(data=tempdata, header=hd0, name='SCI')
thdulist = PF.HDUList([hdu, hdu1, hdu2, hdu3, hdu4])
elif 'UVIS2' in apert:
hdu = PF.PrimaryHDU(header=hd0)
hdu1 = PF.ImageHDU(data=tempdata, header=hd0, name='SCI')
thdulist = PF.HDUList([hdu, hdu1])
else: print 'Error with file %s' % file
#write the output
thdulist.writeto(output)
print 'All done'
| bsd-2-clause | 6,947,535,516,556,092,000 | 29.758621 | 89 | 0.594057 | false |
surround-io/grpc | src/python/grpcio_test/grpc_test/_adapter/_intermediary_low_test.py | 13 | 17271 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for the old '_low'."""
import Queue
import threading
import time
import unittest
from grpc._adapter import _intermediary_low as _low
_STREAM_LENGTH = 300
_TIMEOUT = 5
_AFTER_DELAY = 2
_FUTURE = time.time() + 60 * 60 * 24
_BYTE_SEQUENCE = b'\abcdefghijklmnopqrstuvwxyz0123456789' * 200
_BYTE_SEQUENCE_SEQUENCE = tuple(
bytes(bytearray((row + column) % 256 for column in range(row)))
for row in range(_STREAM_LENGTH))
class LonelyClientTest(unittest.TestCase):
def testLonelyClient(self):
host = 'nosuchhostexists'
port = 54321
method = 'test method'
deadline = time.time() + _TIMEOUT
after_deadline = deadline + _AFTER_DELAY
metadata_tag = object()
finish_tag = object()
completion_queue = _low.CompletionQueue()
channel = _low.Channel('%s:%d' % (host, port), None)
client_call = _low.Call(channel, completion_queue, method, host, deadline)
client_call.invoke(completion_queue, metadata_tag, finish_tag)
first_event = completion_queue.get(after_deadline)
self.assertIsNotNone(first_event)
second_event = completion_queue.get(after_deadline)
self.assertIsNotNone(second_event)
kinds = [event.kind for event in (first_event, second_event)]
self.assertItemsEqual(
(_low.Event.Kind.METADATA_ACCEPTED, _low.Event.Kind.FINISH),
kinds)
self.assertIsNone(completion_queue.get(after_deadline))
completion_queue.stop()
stop_event = completion_queue.get(_FUTURE)
self.assertEqual(_low.Event.Kind.STOP, stop_event.kind)
del client_call
del channel
del completion_queue
def _drive_completion_queue(completion_queue, event_queue):
while True:
event = completion_queue.get(_FUTURE)
if event.kind is _low.Event.Kind.STOP:
break
event_queue.put(event)
class EchoTest(unittest.TestCase):
def setUp(self):
self.host = 'localhost'
self.server_completion_queue = _low.CompletionQueue()
self.server = _low.Server(self.server_completion_queue)
port = self.server.add_http2_addr('[::]:0')
self.server.start()
self.server_events = Queue.Queue()
self.server_completion_queue_thread = threading.Thread(
target=_drive_completion_queue,
args=(self.server_completion_queue, self.server_events))
self.server_completion_queue_thread.start()
self.client_completion_queue = _low.CompletionQueue()
self.channel = _low.Channel('%s:%d' % (self.host, port), None)
self.client_events = Queue.Queue()
self.client_completion_queue_thread = threading.Thread(
target=_drive_completion_queue,
args=(self.client_completion_queue, self.client_events))
self.client_completion_queue_thread.start()
def tearDown(self):
self.server.stop()
self.server_completion_queue.stop()
self.client_completion_queue.stop()
self.server_completion_queue_thread.join()
self.client_completion_queue_thread.join()
del self.server
def _perform_echo_test(self, test_data):
method = 'test method'
details = 'test details'
server_leading_metadata_key = 'my_server_leading_key'
server_leading_metadata_value = 'my_server_leading_value'
server_trailing_metadata_key = 'my_server_trailing_key'
server_trailing_metadata_value = 'my_server_trailing_value'
client_metadata_key = 'my_client_key'
client_metadata_value = 'my_client_value'
server_leading_binary_metadata_key = 'my_server_leading_key-bin'
server_leading_binary_metadata_value = b'\0'*2047
server_trailing_binary_metadata_key = 'my_server_trailing_key-bin'
server_trailing_binary_metadata_value = b'\0'*2047
client_binary_metadata_key = 'my_client_key-bin'
client_binary_metadata_value = b'\0'*2047
deadline = _FUTURE
metadata_tag = object()
finish_tag = object()
write_tag = object()
complete_tag = object()
service_tag = object()
read_tag = object()
status_tag = object()
server_data = []
client_data = []
client_call = _low.Call(self.channel, self.client_completion_queue,
method, self.host, deadline)
client_call.add_metadata(client_metadata_key, client_metadata_value)
client_call.add_metadata(client_binary_metadata_key,
client_binary_metadata_value)
client_call.invoke(self.client_completion_queue, metadata_tag, finish_tag)
self.server.service(service_tag)
service_accepted = self.server_events.get()
self.assertIsNotNone(service_accepted)
self.assertIs(service_accepted.kind, _low.Event.Kind.SERVICE_ACCEPTED)
self.assertIs(service_accepted.tag, service_tag)
self.assertEqual(method, service_accepted.service_acceptance.method)
self.assertEqual(self.host, service_accepted.service_acceptance.host)
self.assertIsNotNone(service_accepted.service_acceptance.call)
metadata = dict(service_accepted.metadata)
self.assertIn(client_metadata_key, metadata)
self.assertEqual(client_metadata_value, metadata[client_metadata_key])
self.assertIn(client_binary_metadata_key, metadata)
self.assertEqual(client_binary_metadata_value,
metadata[client_binary_metadata_key])
server_call = service_accepted.service_acceptance.call
server_call.accept(self.server_completion_queue, finish_tag)
server_call.add_metadata(server_leading_metadata_key,
server_leading_metadata_value)
server_call.add_metadata(server_leading_binary_metadata_key,
server_leading_binary_metadata_value)
server_call.premetadata()
metadata_accepted = self.client_events.get()
self.assertIsNotNone(metadata_accepted)
self.assertEqual(_low.Event.Kind.METADATA_ACCEPTED, metadata_accepted.kind)
self.assertEqual(metadata_tag, metadata_accepted.tag)
metadata = dict(metadata_accepted.metadata)
self.assertIn(server_leading_metadata_key, metadata)
self.assertEqual(server_leading_metadata_value,
metadata[server_leading_metadata_key])
self.assertIn(server_leading_binary_metadata_key, metadata)
self.assertEqual(server_leading_binary_metadata_value,
metadata[server_leading_binary_metadata_key])
for datum in test_data:
client_call.write(datum, write_tag)
write_accepted = self.client_events.get()
self.assertIsNotNone(write_accepted)
self.assertIs(write_accepted.kind, _low.Event.Kind.WRITE_ACCEPTED)
self.assertIs(write_accepted.tag, write_tag)
self.assertIs(write_accepted.write_accepted, True)
server_call.read(read_tag)
read_accepted = self.server_events.get()
self.assertIsNotNone(read_accepted)
self.assertEqual(_low.Event.Kind.READ_ACCEPTED, read_accepted.kind)
self.assertEqual(read_tag, read_accepted.tag)
self.assertIsNotNone(read_accepted.bytes)
server_data.append(read_accepted.bytes)
server_call.write(read_accepted.bytes, write_tag)
write_accepted = self.server_events.get()
self.assertIsNotNone(write_accepted)
self.assertEqual(_low.Event.Kind.WRITE_ACCEPTED, write_accepted.kind)
self.assertEqual(write_tag, write_accepted.tag)
self.assertTrue(write_accepted.write_accepted)
client_call.read(read_tag)
read_accepted = self.client_events.get()
self.assertIsNotNone(read_accepted)
self.assertEqual(_low.Event.Kind.READ_ACCEPTED, read_accepted.kind)
self.assertEqual(read_tag, read_accepted.tag)
self.assertIsNotNone(read_accepted.bytes)
client_data.append(read_accepted.bytes)
client_call.complete(complete_tag)
complete_accepted = self.client_events.get()
self.assertIsNotNone(complete_accepted)
self.assertIs(complete_accepted.kind, _low.Event.Kind.COMPLETE_ACCEPTED)
self.assertIs(complete_accepted.tag, complete_tag)
self.assertIs(complete_accepted.complete_accepted, True)
server_call.read(read_tag)
read_accepted = self.server_events.get()
self.assertIsNotNone(read_accepted)
self.assertEqual(_low.Event.Kind.READ_ACCEPTED, read_accepted.kind)
self.assertEqual(read_tag, read_accepted.tag)
self.assertIsNone(read_accepted.bytes)
server_call.add_metadata(server_trailing_metadata_key,
server_trailing_metadata_value)
server_call.add_metadata(server_trailing_binary_metadata_key,
server_trailing_binary_metadata_value)
server_call.status(_low.Status(_low.Code.OK, details), status_tag)
server_terminal_event_one = self.server_events.get()
server_terminal_event_two = self.server_events.get()
if server_terminal_event_one.kind == _low.Event.Kind.COMPLETE_ACCEPTED:
status_accepted = server_terminal_event_one
rpc_accepted = server_terminal_event_two
else:
status_accepted = server_terminal_event_two
rpc_accepted = server_terminal_event_one
self.assertIsNotNone(status_accepted)
self.assertIsNotNone(rpc_accepted)
self.assertEqual(_low.Event.Kind.COMPLETE_ACCEPTED, status_accepted.kind)
self.assertEqual(status_tag, status_accepted.tag)
self.assertTrue(status_accepted.complete_accepted)
self.assertEqual(_low.Event.Kind.FINISH, rpc_accepted.kind)
self.assertEqual(finish_tag, rpc_accepted.tag)
self.assertEqual(_low.Status(_low.Code.OK, ''), rpc_accepted.status)
client_call.read(read_tag)
client_terminal_event_one = self.client_events.get()
client_terminal_event_two = self.client_events.get()
if client_terminal_event_one.kind == _low.Event.Kind.READ_ACCEPTED:
read_accepted = client_terminal_event_one
finish_accepted = client_terminal_event_two
else:
read_accepted = client_terminal_event_two
finish_accepted = client_terminal_event_one
self.assertIsNotNone(read_accepted)
self.assertIsNotNone(finish_accepted)
self.assertEqual(_low.Event.Kind.READ_ACCEPTED, read_accepted.kind)
self.assertEqual(read_tag, read_accepted.tag)
self.assertIsNone(read_accepted.bytes)
self.assertEqual(_low.Event.Kind.FINISH, finish_accepted.kind)
self.assertEqual(finish_tag, finish_accepted.tag)
self.assertEqual(_low.Status(_low.Code.OK, details), finish_accepted.status)
metadata = dict(finish_accepted.metadata)
self.assertIn(server_trailing_metadata_key, metadata)
self.assertEqual(server_trailing_metadata_value,
metadata[server_trailing_metadata_key])
self.assertIn(server_trailing_binary_metadata_key, metadata)
self.assertEqual(server_trailing_binary_metadata_value,
metadata[server_trailing_binary_metadata_key])
self.assertSetEqual(set(key for key, _ in finish_accepted.metadata),
set((server_trailing_metadata_key,
server_trailing_binary_metadata_key,)))
server_timeout_none_event = self.server_completion_queue.get(0)
self.assertIsNone(server_timeout_none_event)
client_timeout_none_event = self.client_completion_queue.get(0)
self.assertIsNone(client_timeout_none_event)
self.assertSequenceEqual(test_data, server_data)
self.assertSequenceEqual(test_data, client_data)
def testNoEcho(self):
self._perform_echo_test(())
def testOneByteEcho(self):
self._perform_echo_test([b'\x07'])
def testOneManyByteEcho(self):
self._perform_echo_test([_BYTE_SEQUENCE])
def testManyOneByteEchoes(self):
self._perform_echo_test(_BYTE_SEQUENCE)
def testManyManyByteEchoes(self):
self._perform_echo_test(_BYTE_SEQUENCE_SEQUENCE)
class CancellationTest(unittest.TestCase):
def setUp(self):
self.host = 'localhost'
self.server_completion_queue = _low.CompletionQueue()
self.server = _low.Server(self.server_completion_queue)
port = self.server.add_http2_addr('[::]:0')
self.server.start()
self.server_events = Queue.Queue()
self.server_completion_queue_thread = threading.Thread(
target=_drive_completion_queue,
args=(self.server_completion_queue, self.server_events))
self.server_completion_queue_thread.start()
self.client_completion_queue = _low.CompletionQueue()
self.channel = _low.Channel('%s:%d' % (self.host, port), None)
self.client_events = Queue.Queue()
self.client_completion_queue_thread = threading.Thread(
target=_drive_completion_queue,
args=(self.client_completion_queue, self.client_events))
self.client_completion_queue_thread.start()
def tearDown(self):
self.server.stop()
self.server_completion_queue.stop()
self.client_completion_queue.stop()
self.server_completion_queue_thread.join()
self.client_completion_queue_thread.join()
del self.server
def testCancellation(self):
method = 'test method'
deadline = _FUTURE
metadata_tag = object()
finish_tag = object()
write_tag = object()
service_tag = object()
read_tag = object()
test_data = _BYTE_SEQUENCE_SEQUENCE
server_data = []
client_data = []
client_call = _low.Call(self.channel, self.client_completion_queue,
method, self.host, deadline)
client_call.invoke(self.client_completion_queue, metadata_tag, finish_tag)
self.server.service(service_tag)
service_accepted = self.server_events.get()
server_call = service_accepted.service_acceptance.call
server_call.accept(self.server_completion_queue, finish_tag)
server_call.premetadata()
metadata_accepted = self.client_events.get()
self.assertIsNotNone(metadata_accepted)
for datum in test_data:
client_call.write(datum, write_tag)
write_accepted = self.client_events.get()
server_call.read(read_tag)
read_accepted = self.server_events.get()
server_data.append(read_accepted.bytes)
server_call.write(read_accepted.bytes, write_tag)
write_accepted = self.server_events.get()
self.assertIsNotNone(write_accepted)
client_call.read(read_tag)
read_accepted = self.client_events.get()
client_data.append(read_accepted.bytes)
client_call.cancel()
# cancel() is idempotent.
client_call.cancel()
client_call.cancel()
client_call.cancel()
server_call.read(read_tag)
server_terminal_event_one = self.server_events.get()
server_terminal_event_two = self.server_events.get()
if server_terminal_event_one.kind == _low.Event.Kind.READ_ACCEPTED:
read_accepted = server_terminal_event_one
rpc_accepted = server_terminal_event_two
else:
read_accepted = server_terminal_event_two
rpc_accepted = server_terminal_event_one
self.assertIsNotNone(read_accepted)
self.assertIsNotNone(rpc_accepted)
self.assertEqual(_low.Event.Kind.READ_ACCEPTED, read_accepted.kind)
self.assertIsNone(read_accepted.bytes)
self.assertEqual(_low.Event.Kind.FINISH, rpc_accepted.kind)
self.assertEqual(_low.Status(_low.Code.CANCELLED, ''), rpc_accepted.status)
finish_event = self.client_events.get()
self.assertEqual(_low.Event.Kind.FINISH, finish_event.kind)
self.assertEqual(_low.Status(_low.Code.CANCELLED, 'Cancelled'),
finish_event.status)
server_timeout_none_event = self.server_completion_queue.get(0)
self.assertIsNone(server_timeout_none_event)
client_timeout_none_event = self.client_completion_queue.get(0)
self.assertIsNone(client_timeout_none_event)
self.assertSequenceEqual(test_data, server_data)
self.assertSequenceEqual(test_data, client_data)
class ExpirationTest(unittest.TestCase):
@unittest.skip('TODO(nathaniel): Expiration test!')
def testExpiration(self):
pass
if __name__ == '__main__':
unittest.main(verbosity=2)
| bsd-3-clause | 107,481,459,297,424,960 | 38.794931 | 80 | 0.70986 | false |
corbamico/Garmin-Connect-Downloader | gdownload/main.py | 1 | 6944 | #!/usr/bin/python
###
# Copyright (c) corbamico 01/2014 <[email protected]>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
###
# GPL from gupload.py
# Copyright (c) David Lotton 01/2012 <[email protected]>
#
# All rights reserved.
#
# License: GNU General Public License (GPL)
#
# THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM
# 'AS IS' WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR
# IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE
# ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
# IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
# THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
#
#
# Name: gupload.py
#
# Brief: gupload.py is a utility to upload Garmin fitness
# GPS files to the connect.garmin.com web site.
# It requires that you have a user account on that
# site. See help (-h option) for more information.
###
# Make sure you have MultipartPostHandler.py in your path as well
def downloader():
import gdownload
import argparse
import os
import os.path
import ConfigParser
import logging
import platform
import string
import sys
out_directory = "./workouts"
parser= argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='A script to download .TCX files from the Garmin Connect web site.',
epilog="""
Output:
The script will output files into ./workouts as filename({datetime}-{activityid}.tcx):
Credentials:
Username and password credentials may be placed in a configuration file
located either in the current working directory, or in the user's home
directory. WARNING, THIS IS NOT SECURE. USE THIS OPTION AT YOUR OWN
RISK. Username and password are stored as clear text in a file
format that is consistent with Microsoft (r) INI files.
The configuration file must contain a [Credentials] section containing
'username' and 'password' entries.
The name of the config file for non-windows platforms is '.guploadrc'
for windows platforms the config file is named 'gupload.ini'.
Example \'.guploadrc\' (or \'gupload.ini\' for windows):
[Credentials]
username=<myusername>
password=<mypassword>
Replace <myusername> and <mypassword> above with your own login
credentials.
Priority of credentials:
Command line credentials take priority over config files, current
directory config file takes priority over a config file in the user's
home directory.
Examples:
Upload file and set activty name:
gupload.py -l myusername mypassword --download_all
""")
parser.add_argument('--download_all', action='store_true', required=True,help='Download activities from Garmin Connect.')
parser.add_argument('-l', type=str, nargs=2, help='Garmin Connect login credentials \'-l username password\'')
parser.add_argument('-v', type=int, nargs=1, default=[3], choices=[1, 2, 3, 4, 5] , help='Verbose - select level of verbosity. 1=DEBUG(most verbose), 2=INFO, 3=WARNING, 4=ERROR, 5= CRITICAL(least verbose). [default=3]')
myargs = parser.parse_args()
logging.basicConfig(level=(myargs.v[0]*10))
if platform.system() == 'Windows':
configFile='gupload.ini'
else:
configFile='.guploadrc'
# ----Login Credentials for Garmin Connect----
# If credentials are given on command line, use them.
# If no credentials are given on command line, look in
# current directory for a .guploadrc file (or gupload.ini
# for windows). If no .guploadrc/gupload.ini file exists
# in the current directory look in the user's home directory.
configCurrentDir=os.path.abspath(os.path.normpath('./' + configFile))
configHomeDir=os.path.expanduser(os.path.normpath('~/' + configFile))
if myargs.l:
logging.debug('Using credentials from command line.')
username=myargs.l[0]
password=myargs.l[1]
elif os.path.isfile(configCurrentDir):
logging.debug('Using credentials from \'' + configCurrentDir + '\'.')
config=ConfigParser.RawConfigParser()
config.read(configCurrentDir)
username=config.get('Credentials', 'username')
password=config.get('Credentials', 'password')
elif os.path.isfile(configHomeDir):
logging.debug('Using credentials from \'' + configHomeDir + '\'.')
config=ConfigParser.RawConfigParser()
config.read(configHomeDir)
username=config.get('Credentials', 'username')
password=config.get('Credentials', 'password')
else:
cwd = os.path.abspath(os.path.normpath('./'))
homepath = os.path.expanduser(os.path.normpath('~/'))
logging.critical('\'' + configFile + '\' file does not exist in current directory (' + cwd + ') or home directory (' + homepath + '). Use -l option.')
exit(1)
def obscurePassword(password):
length=len(password)
if length==1:
return('*')
elif length == 2:
return(password[1] + '*')
else:
obscured=password[0]
for letter in range(1, length-1):
obscured=obscured+'*'
obscured=obscured+password[length-1]
return(obscured)
yes = raw_input("Download All Activities from Garmin Connect\n[Download Garmin]Connect to Garmin Connect&Download all[Y|y]:")
if yes != "Y" and yes != 'y' :
exit(1)
def print_screen_line(string):
sys.stderr.write("\r")
sys.stderr.write(string)
# Check directory of output
if os.path.isdir(out_directory):
pass
else:
os.mkdir(out_directory)
logging.debug('Username: ' + username)
logging.debug('Password: ' + obscurePassword(password))
# Create object
g = DownloadGarmin.DownloadGarmin()
# LOGIN
print_screen_line ("[Download Garmin]login trying...")
if not g.login(username, password):
logging.critical('LOGIN FAILED - please verify your login credentials')
print_screen_line ("[Download Garmin]login failed.")
exit(1)
else:
logging.info('Login Successful.')
print_screen_line ("[Download Garmin]login OK.\n")
# Download All
counter = 0;
print_screen_line ("[Download Garmin]try to get all activities id.")
for activityid,time,tzinfo in g.get_workouts():
counter += 1
print_screen_line ("[Download Garmin]downloading file:(%d) total(%d)." %(counter, g.totalFound))
g.download_activity(activityid,time,tzinfo,out_directory)
exit()
if __name__ == '__main__':
downloader()
| gpl-2.0 | -7,127,054,790,775,405,000 | 33.039216 | 220 | 0.719614 | false |
ubuntu/ubuntu-make | umake/frameworks/scala.py | 1 | 2515 | # -*- coding: utf-8 -*-
# Copyright (C) 2014 Canonical
#
# Authors:
# Didier Roche
# Igor Vuk
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; version 3.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""Scala module"""
from contextlib import suppress
from gettext import gettext as _
import logging
import os
import re
import umake.frameworks.baseinstaller
from umake.interactions import DisplayMessage
from umake.tools import add_env_to_user
from umake.ui import UI
logger = logging.getLogger(__name__)
class ScalaCategory(umake.frameworks.BaseCategory):
def __init__(self):
super().__init__(name="Scala", description=_("The Scala Programming Language"), logo_path=None)
class ScalaLang(umake.frameworks.baseinstaller.BaseInstaller):
def __init__(self, **kwargs):
super().__init__(name="Scala Lang", description=_("Scala compiler and interpreter (default)"),
is_category_default=True,
packages_requirements=["openjdk-7-jre | openjdk-8-jre"],
download_page="http://www.scala-lang.org/download/",
dir_to_decompress_in_tarball="scala-*",
required_files_path=[os.path.join("bin", "scala")], **kwargs)
def parse_download_link(self, line, in_download):
"""Parse Scala download link, expect to find a url"""
if 'id="#link-main-unixsys"' in line:
p = re.search(r'href="(.*)"', line)
with suppress(AttributeError):
url = p.group(1)
return ((url, None), True)
return ((None, None), False)
def post_install(self):
"""Add the necessary Scala environment variables"""
add_env_to_user(self.name, {"PATH": {"value": os.path.join(self.install_path, "bin")},
"SCALA_HOME": {"value": self.install_path}})
UI.delayed_display(DisplayMessage(self.RELOGIN_REQUIRE_MSG.format(self.name)))
| gpl-3.0 | 1,447,711,293,639,071,700 | 36.537313 | 103 | 0.650099 | false |
karlnapf/shogun | applications/ocr/Ai.py | 12 | 2394 | # File : $HeadURL$
# Version: $Id$
from shogun import RealFeatures, MulticlassLabels
from shogun import GaussianKernel
from shogun import GMNPSVM
import numpy as np
import gzip as gz
import pickle as pkl
import common as com
class Ai:
def __init__(self):
self.x = None
self.y = None
self.x_test = None
self.y_test = None
self.svm = None
def load_train_data(self, x_fname, y_fname):
Ai.__init__(self)
self.x = np.loadtxt(x_fname)
self.y = np.loadtxt(y_fname) - 1.0
self.x_test = self.x
self.y_test = self.y
def _svm_new(self, kernel_width, c, epsilon):
if self.x == None or self.y == None:
raise Exception("No training data loaded.")
x = RealFeatures(self.x)
y = MulticlassLabels(self.y)
self.svm = GMNPSVM(c, GaussianKernel(x, x, kernel_width), y)
self.svm.set_epsilon(epsilon)
def write_svm(self):
gz_stream = gz.open(com.TRAIN_SVM_FNAME_GZ, 'wb', 9)
pkl.dump(self.svm, gz_stream)
gz_stream.close()
def read_svm(self):
gz_stream = gz.open(com.TRAIN_SVM_FNAME_GZ, 'rb')
self.svm = pkl.load(gz_stream)
gz_stream.close()
def enable_validation(self, train_frac):
x = self.x
y = self.y
idx = np.arange(len(y))
np.random.shuffle(idx)
train_idx=idx[:np.floor(train_frac*len(y))]
test_idx=idx[np.ceil(train_frac*len(y)):]
self.x = x[:,train_idx]
self.y = y[train_idx]
self.x_test = x[:,test_idx]
self.y_test = y[test_idx]
def train(self, kernel_width, c, epsilon):
self._svm_new(kernel_width, c, epsilon)
x = RealFeatures(self.x)
self.svm.io.enable_progress()
self.svm.train(x)
self.svm.io.disable_progress()
def load_classifier(self): self.read_svm()
def classify(self, matrix):
cl = self.svm.apply(
RealFeatures(
np.reshape(matrix, newshape=(com.FEATURE_DIM, 1),
order='F')
)
).get_label(0)
return int(cl + 1.0) % 10
def get_test_error(self):
self.svm.io.enable_progress()
l = self.svm.apply(RealFeatures(self.x_test)).get_labels()
self.svm.io.disable_progress()
return 1.0 - np.mean(l == self.y_test)
| bsd-3-clause | 178,005,301,146,680,060 | 25.021739 | 68 | 0.561404 | false |
googleinterns/deepspeech-reconstruction | src/deep_speaker/utils.py | 1 | 3373 | import logging
import os
import random
import shutil
from glob import glob
import click
import dill
import numpy as np
import pandas as pd
from natsort import natsorted
from constants import TRAIN_TEST_RATIO
logger = logging.getLogger(__name__)
def find_files(directory, ext='wav'):
return sorted(glob(directory + f'/**/*.{ext}', recursive=True))
def init_pandas():
pd.set_option('display.float_format', lambda x: '%.3f' % x)
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', 1000)
def create_new_empty_dir(directory: str):
if os.path.exists(directory):
shutil.rmtree(directory)
os.makedirs(directory)
def ensure_dir_for_filename(filename: str):
ensures_dir(os.path.dirname(filename))
def ensures_dir(directory: str):
if len(directory) > 0 and not os.path.exists(directory):
os.makedirs(directory)
class ClickType:
@staticmethod
def input_file(writable=False):
return click.Path(exists=True, file_okay=True, dir_okay=False,
writable=writable, readable=True, resolve_path=True)
@staticmethod
def input_dir(writable=False):
return click.Path(exists=True, file_okay=False, dir_okay=True,
writable=writable, readable=True, resolve_path=True)
@staticmethod
def output_file():
return click.Path(exists=False, file_okay=True, dir_okay=False,
writable=True, readable=True, resolve_path=True)
@staticmethod
def output_dir():
return click.Path(exists=False, file_okay=False, dir_okay=True,
writable=True, readable=True, resolve_path=True)
def parallel_function(f, sequence, num_threads=None):
from multiprocessing import Pool
pool = Pool(processes=num_threads)
result = pool.map(f, sequence)
cleaned = [x for x in result if x is not None]
pool.close()
pool.join()
return cleaned
def load_best_checkpoint(checkpoint_dir):
checkpoints = natsorted(glob(os.path.join(checkpoint_dir, '*.h5')))
if len(checkpoints) != 0:
return checkpoints[-1]
return None
def delete_older_checkpoints(checkpoint_dir, max_to_keep=5):
assert max_to_keep > 0
checkpoints = natsorted(glob(os.path.join(checkpoint_dir, '*.h5')))
checkpoints_to_keep = checkpoints[-max_to_keep:]
for checkpoint in checkpoints:
if checkpoint not in checkpoints_to_keep:
os.remove(checkpoint)
def enable_deterministic():
print('Deterministic mode enabled.')
np.random.seed(123)
random.seed(123)
def load_pickle(file):
if not os.path.exists(file):
return None
logger.info(f'Loading PKL file: {file}.')
with open(file, 'rb') as r:
return dill.load(r)
def load_npy(file):
if not os.path.exists(file):
return None
logger.info(f'Loading NPY file: {file}.')
return np.load(file)
def train_test_sp_to_utt(audio, is_test):
sp_to_utt = {}
for speaker_id, utterances in audio.speakers_to_utterances.items():
utterances_files = sorted(utterances.values())
train_test_sep = int(len(utterances_files) * TRAIN_TEST_RATIO)
sp_to_utt[speaker_id] = utterances_files[train_test_sep:] if is_test else utterances_files[:train_test_sep]
return sp_to_utt
| apache-2.0 | -1,590,965,533,119,710,700 | 27.108333 | 115 | 0.663801 | false |
TRox1972/youtube-dl | youtube_dl/extractor/rottentomatoes.py | 9 | 1280 | from __future__ import unicode_literals
from .common import InfoExtractor
from .internetvideoarchive import InternetVideoArchiveIE
class RottenTomatoesIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?rottentomatoes\.com/m/[^/]+/trailers/(?P<id>\d+)'
_TEST = {
'url': 'http://www.rottentomatoes.com/m/toy_story_3/trailers/11028566/',
'info_dict': {
'id': '11028566',
'ext': 'mp4',
'title': 'Toy Story 3',
'description': 'From the creators of the beloved TOY STORY films, comes a story that will reunite the gang in a whole new way.',
'thumbnail': 're:^https?://.*\.jpg$',
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
iva_id = self._search_regex(r'publishedid=(\d+)', webpage, 'internet video archive id')
return {
'_type': 'url_transparent',
'url': 'http://video.internetvideoarchive.net/player/6/configuration.ashx?domain=www.videodetective.com&customerid=69249&playerid=641&publishedid=' + iva_id,
'ie_key': InternetVideoArchiveIE.ie_key(),
'id': video_id,
'title': self._og_search_title(webpage),
}
| unlicense | -3,198,420,102,580,834,300 | 39 | 169 | 0.598438 | false |
mancoast/CPythonPyc_test | cpython/232_test_bufio.py | 41 | 1987 | from test.test_support import verify, TestFailed, TESTFN
# Simple test to ensure that optimizations in fileobject.c deliver
# the expected results. For best testing, run this under a debug-build
# Python too (to exercise asserts in the C code).
# Repeat string 'pattern' as often as needed to reach total length
# 'length'. Then call try_one with that string, a string one larger
# than that, and a string one smaller than that. The main driver
# feeds this all small sizes and various powers of 2, so we exercise
# all likely stdio buffer sizes, and "off by one" errors on both
# sides.
def drive_one(pattern, length):
q, r = divmod(length, len(pattern))
teststring = pattern * q + pattern[:r]
verify(len(teststring) == length)
try_one(teststring)
try_one(teststring + "x")
try_one(teststring[:-1])
# Write s + "\n" + s to file, then open it and ensure that successive
# .readline()s deliver what we wrote.
def try_one(s):
# Since C doesn't guarantee we can write/read arbitrary bytes in text
# files, use binary mode.
f = open(TESTFN, "wb")
# write once with \n and once without
f.write(s)
f.write("\n")
f.write(s)
f.close()
f = open(TESTFN, "rb")
line = f.readline()
if line != s + "\n":
raise TestFailed("Expected %r got %r" % (s + "\n", line))
line = f.readline()
if line != s:
raise TestFailed("Expected %r got %r" % (s, line))
line = f.readline()
if line:
raise TestFailed("Expected EOF but got %r" % line)
f.close()
# A pattern with prime length, to avoid simple relationships with
# stdio buffer sizes.
primepat = "1234567890\00\01\02\03\04\05\06"
nullpat = "\0" * 1000
try:
for size in range(1, 257) + [512, 1000, 1024, 2048, 4096, 8192, 10000,
16384, 32768, 65536, 1000000]:
drive_one(primepat, size)
drive_one(nullpat, size)
finally:
try:
import os
os.unlink(TESTFN)
except:
pass
| gpl-3.0 | 6,624,645,991,389,826,000 | 32.116667 | 74 | 0.640161 | false |
jykntr/wishapp | app/models.py | 2 | 6544 | from datetime import datetime
import hashlib
from werkzeug.security import generate_password_hash, check_password_hash
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from flask import current_app, request
from flask.ext.login import UserMixin, AnonymousUserMixin
from . import db, login_manager
class Permission:
FOLLOW = 0x01
MAKE_LISTS = 0x02
ADMINISTER = 0x80
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
default = db.Column(db.Boolean, default=False, index=True)
permissions = db.Column(db.Integer)
users = db.relationship('User', backref='role', lazy='dynamic')
@staticmethod
def insert_roles():
roles = {
'User': (Permission.FOLLOW |
Permission.MAKE_LISTS, True),
'Administrator': (0xff, False)
}
for r in roles:
role = Role.query.filter_by(name=r).first()
if role is None:
role = Role(name=r)
role.permissions = roles[r][0]
role.default = roles[r][1]
db.session.add(role)
db.session.commit()
def __repr__(self):
return '<Role %r>' % self.name
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(64), unique=True, index=True)
username = db.Column(db.String(64), unique=True, index=True)
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
password_hash = db.Column(db.String(128))
confirmed = db.Column(db.Boolean, default=False)
name = db.Column(db.String(64))
member_since = db.Column(db.DateTime(), default=datetime.utcnow)
last_seen = db.Column(db.DateTime(), default=datetime.utcnow)
avatar_hash = db.Column(db.String(32))
@staticmethod
def generate_dev_users():
from sqlalchemy.exc import IntegrityError
# Add administrator user
admin_role = Role.query.filter_by(permissions=0xff).first()
admin = User(email='[email protected]',
username='admin',
password='admin',
confirmed=True,
name='Trusty Admin')
admin.role = admin_role
db.session.add(admin)
# Add regular users
names = ['john', 'jack']
for name in names:
user = User(email=name + '@example.com',
username=name,
password=name,
confirmed=True,
name=name)
db.session.add(user)
try:
db.session.commit()
except IntegrityError:
db.session.rollback()
def __init__(self, **kwargs):
super(User, self).__init__(**kwargs)
if self.role is None:
if self.email == current_app.config['ADMIN_USER']:
self.role = Role.query.filter_by(permissions=0xff).first()
if self.role is None:
self.role = Role.query.filter_by(default=True).first()
if self.email is not None and self.avatar_hash is None:
self.avatar_hash = hashlib.md5(
self.email.encode('utf-8')).hexdigest()
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def generate_confirmation_token(self, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'confirm': self.id})
def confirm(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('confirm') != self.id:
return False
self.confirmed = True
db.session.add(self)
return True
def generate_reset_token(self, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'reset': self.id})
def reset_password(self, token, new_password):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('reset') != self.id:
return False
self.password = new_password
db.session.add(self)
return True
def generate_email_change_token(self, new_email, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'change_email': self.id, 'new_email': new_email})
def change_email(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('change_email') != self.id:
return False
new_email = data.get('new_email')
if new_email is None:
return False
if self.query.filter_by(email=new_email).first() is not None:
return False
self.email = new_email
self.avatar_hash = hashlib.md5(self.email.encode('utf-8')).hexdigest()
db.session.add(self)
return True
def can(self, permissions):
return self.role is not None and \
(self.role.permissions & permissions) == permissions
def is_administrator(self):
return self.can(Permission.ADMINISTER)
def ping(self):
self.last_seen = datetime.utcnow()
db.session.add(self)
def gravatar(self, size=256):
if request.is_secure:
url = 'https://robohash.org'
else:
url = 'http://robohash.org'
hash = self.avatar_hash or hashlib.md5(
self.email.encode('utf-8')).hexdigest()
return '{url}/{hash}?size={size}x{size}&bgset=any&gravatar=hashed'\
.format(url=url, hash=hash, size=size)
def __repr__(self):
return '<User %r>' % self.username
class AnonymousUser(AnonymousUserMixin):
def can(self, permissions):
return False
def is_administrator(self):
return False
login_manager.anonymous_user = AnonymousUser
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
| mit | -1,985,243,045,587,933,400 | 31.72 | 78 | 0.592757 | false |
chen0031/Dato-Core | cxxtest/cxxtest/cxxtestgen.py | 28 | 19506 | #-------------------------------------------------------------------------
# CxxTest: A lightweight C++ unit testing library.
# Copyright (c) 2008 Sandia Corporation.
# This software is distributed under the LGPL License v2.1
# For more information, see the COPYING file in the top CxxTest directory.
# Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
# the U.S. Government retains certain rights in this software.
#-------------------------------------------------------------------------
# vim: fileencoding=utf-8
from __future__ import division
# the above import important for forward-compatibility with python3,
# which is already the default in archlinux!
__all__ = ['main']
import __release__
import os
import sys
import re
import glob
from optparse import OptionParser
import cxxtest_parser
try:
import cxxtest_fog
imported_fog=True
except ImportError:
imported_fog=False
from cxxtest_misc import abort
options = []
suites = []
wrotePreamble = 0
wroteWorld = 0
lastIncluded = ''
def main(args=sys.argv):
'''The main program'''
#
# Reset global state
#
global wrotePreamble
wrotePreamble=0
global wroteWorld
wroteWorld=0
global lastIncluded
lastIncluded = ''
global suites
global options
files = parseCommandline(args)
if imported_fog and options.fog:
[options,suites] = cxxtest_fog.scanInputFiles( files, options )
else:
[options,suites] = cxxtest_parser.scanInputFiles( files, options )
writeOutput()
def parseCommandline(args):
'''Analyze command line arguments'''
global imported_fog
global options
parser = OptionParser("%prog [options] [<filename> ...]")
parser.add_option("--version",
action="store_true", dest="version", default=False,
help="Write the CxxTest version.")
parser.add_option("-o", "--output",
dest="outputFileName", default=None, metavar="NAME",
help="Write output to file NAME.")
parser.add_option("-w","--world", dest="world", default="cxxtest",
help="The label of the tests, used to name the XML results.")
parser.add_option("", "--include", action="append",
dest="headers", default=[], metavar="HEADER",
help="Include file HEADER in the test runner before other headers.")
parser.add_option("", "--abort-on-fail",
action="store_true", dest="abortOnFail", default=False,
help="Abort tests on failed asserts (like xUnit).")
parser.add_option("", "--main",
action="store", dest="main", default="main",
help="Specify an alternative name for the main() function.")
parser.add_option("", "--headers",
action="store", dest="header_filename", default=None,
help="Specify a filename that contains a list of header files that are processed to generate a test runner.")
parser.add_option("", "--runner",
dest="runner", default="", metavar="CLASS",
help="Create a test runner that processes test events using the class CxxTest::CLASS.")
parser.add_option("", "--gui",
dest="gui", metavar="CLASS",
help="Create a GUI test runner that processes test events using the class CxxTest::CLASS. (deprecated)")
parser.add_option("", "--error-printer",
action="store_true", dest="error_printer", default=False,
help="Create a test runner using the ErrorPrinter class, and allow the use of the standard library.")
parser.add_option("", "--xunit-printer",
action="store_true", dest="xunit_printer", default=False,
help="Create a test runner using the XUnitPrinter class.")
parser.add_option("", "--xunit-file", dest="xunit_file", default="",
help="The file to which the XML summary is written for test runners using the XUnitPrinter class. The default XML filename is TEST-<world>.xml, where <world> is the value of the --world option. (default: cxxtest)")
parser.add_option("", "--have-std",
action="store_true", dest="haveStandardLibrary", default=False,
help="Use the standard library (even if not found in tests).")
parser.add_option("", "--no-std",
action="store_true", dest="noStandardLibrary", default=False,
help="Do not use standard library (even if found in tests).")
parser.add_option("", "--have-eh",
action="store_true", dest="haveExceptionHandling", default=False,
help="Use exception handling (even if not found in tests).")
parser.add_option("", "--no-eh",
action="store_true", dest="noExceptionHandling", default=False,
help="Do not use exception handling (even if found in tests).")
parser.add_option("", "--longlong",
dest="longlong", default=None, metavar="TYPE",
help="Use TYPE as for long long integers. (default: not supported)")
parser.add_option("", "--no-static-init",
action="store_true", dest="noStaticInit", default=False,
help="Do not rely on static initialization in the test runner.")
parser.add_option("", "--template",
dest="templateFileName", default=None, metavar="TEMPLATE",
help="Generate the test runner using file TEMPLATE to define a template.")
parser.add_option("", "--root",
action="store_true", dest="root", default=False,
help="Write the main() function and global data for a test runner.")
parser.add_option("", "--part",
action="store_true", dest="part", default=False,
help="Write the tester classes for a test runner.")
#parser.add_option("", "--factor",
#action="store_true", dest="factor", default=False,
#help="Declare the _CXXTEST_FACTOR macro. (deprecated)")
if imported_fog:
fog_help = "Use new FOG C++ parser"
else:
fog_help = "Use new FOG C++ parser (disabled)"
parser.add_option("-f", "--fog-parser",
action="store_true",
dest="fog",
default=False,
help=fog_help
)
(options, args) = parser.parse_args(args=args)
if not options.header_filename is None:
if not os.path.exists(options.header_filename):
abort( "ERROR: the file '%s' does not exist!" % options.header_filename )
INPUT = open(options.header_filename)
headers = [line.strip() for line in INPUT]
args.extend( headers )
INPUT.close()
if options.fog and not imported_fog:
abort( "Cannot use the FOG parser. Check that the 'ply' package is installed. The 'ordereddict' package is also required if running Python 2.6")
if options.version:
printVersion()
# the cxxtest builder relies on this behaviour! don't remove
if options.runner == 'none':
options.runner = None
if options.xunit_printer or options.runner == "XUnitPrinter":
options.xunit_printer=True
options.runner="XUnitPrinter"
if len(args) > 1:
if options.xunit_file == "":
if options.world == "":
options.world = "cxxtest"
options.xunit_file="TEST-"+options.world+".xml"
elif options.xunit_file == "":
if options.world == "":
options.world = "cxxtest"
options.xunit_file="TEST-"+options.world+".xml"
if options.error_printer:
options.runner= "ErrorPrinter"
options.haveStandardLibrary = True
if options.noStaticInit and (options.root or options.part):
abort( '--no-static-init cannot be used with --root/--part' )
if options.gui and not options.runner:
options.runner = 'StdioPrinter'
files = setFiles(args[1:])
if len(files) == 0 and not options.root:
sys.stderr.write(parser.error("No input files found"))
return files
def printVersion():
'''Print CxxTest version and exit'''
sys.stdout.write( "This is CxxTest version %s.\n" % __release__.__version__ )
sys.exit(0)
def setFiles(patterns ):
'''Set input files specified on command line'''
files = expandWildcards( patterns )
return files
def expandWildcards( patterns ):
'''Expand all wildcards in an array (glob)'''
fileNames = []
for pathName in patterns:
patternFiles = glob.glob( pathName )
for fileName in patternFiles:
fileNames.append( fixBackslashes( fileName ) )
return fileNames
def fixBackslashes( fileName ):
'''Convert backslashes to slashes in file name'''
return re.sub( r'\\', '/', fileName, 0 )
def writeOutput():
'''Create output file'''
if options.templateFileName:
writeTemplateOutput()
else:
writeSimpleOutput()
def writeSimpleOutput():
'''Create output not based on template'''
output = startOutputFile()
writePreamble( output )
if options.root or not options.part:
writeMain( output )
if len(suites) > 0:
output.write("bool "+suites[0]['object']+"_init = false;\n")
writeWorld( output )
output.close()
include_re = re.compile( r"\s*\#\s*include\s+<cxxtest/" )
preamble_re = re.compile( r"^\s*<CxxTest\s+preamble>\s*$" )
world_re = re.compile( r"^\s*<CxxTest\s+world>\s*$" )
def writeTemplateOutput():
'''Create output based on template file'''
template = open(options.templateFileName)
output = startOutputFile()
while 1:
line = template.readline()
if not line:
break;
if include_re.search( line ):
writePreamble( output )
output.write( line )
elif preamble_re.search( line ):
writePreamble( output )
elif world_re.search( line ):
if len(suites) > 0:
output.write("bool "+suites[0]['object']+"_init = false;\n")
writeWorld( output )
else:
output.write( line )
template.close()
output.close()
def startOutputFile():
'''Create output file and write header'''
if options.outputFileName is not None:
output = open( options.outputFileName, 'w' )
else:
output = sys.stdout
output.write( "/* Generated file, do not edit */\n\n" )
return output
def writePreamble( output ):
'''Write the CxxTest header (#includes and #defines)'''
global wrotePreamble
if wrotePreamble: return
output.write( "#ifndef CXXTEST_RUNNING\n" )
output.write( "#define CXXTEST_RUNNING\n" )
output.write( "#endif\n" )
output.write( "\n" )
if options.xunit_printer:
output.write( "#include <fstream>\n" )
if options.haveStandardLibrary:
output.write( "#define _CXXTEST_HAVE_STD\n" )
if options.haveExceptionHandling:
output.write( "#define _CXXTEST_HAVE_EH\n" )
if options.abortOnFail:
output.write( "#define _CXXTEST_ABORT_TEST_ON_FAIL\n" )
if options.longlong:
output.write( "#define _CXXTEST_LONGLONG %s\n" % options.longlong )
#if options.factor:
#output.write( "#define _CXXTEST_FACTOR\n" )
for header in options.headers:
output.write( "#include \"%s\"\n" % header )
output.write( "#include <cxxtest/TestListener.h>\n" )
output.write( "#include <cxxtest/TestTracker.h>\n" )
output.write( "#include <cxxtest/TestRunner.h>\n" )
output.write( "#include <cxxtest/RealDescriptions.h>\n" )
output.write( "#include <cxxtest/TestMain.h>\n" )
if options.runner:
output.write( "#include <cxxtest/%s.h>\n" % options.runner )
if options.gui:
output.write( "#include <cxxtest/%s.h>\n" % options.gui )
output.write( "\n" )
wrotePreamble = 1
def writeMain( output ):
'''Write the main() function for the test runner'''
if not (options.gui or options.runner):
return
output.write( 'int %s( int argc, char *argv[] ) {\n' % options.main )
output.write( ' int status;\n' )
if options.noStaticInit:
output.write( ' CxxTest::initialize();\n' )
if options.gui:
tester_t = "CxxTest::GuiTuiRunner<CxxTest::%s, CxxTest::%s> " % (options.gui, options.runner)
else:
tester_t = "CxxTest::%s" % (options.runner)
if options.xunit_printer:
output.write( ' std::ofstream ofstr("%s");\n' % options.xunit_file )
output.write( ' %s tmp(ofstr);\n' % tester_t )
output.write( ' CxxTest::RealWorldDescription::_worldName = "%s";\n' % options.world )
else:
output.write( ' %s tmp;\n' % tester_t )
output.write( ' status = CxxTest::Main<%s>( tmp, argc, argv );\n' % tester_t )
output.write( ' return status;\n')
output.write( '}\n' )
def writeWorld( output ):
'''Write the world definitions'''
global wroteWorld
if wroteWorld: return
writePreamble( output )
writeSuites( output )
if options.root or not options.part:
writeRoot( output )
writeWorldDescr( output )
if options.noStaticInit:
writeInitialize( output )
wroteWorld = 1
def writeSuites(output):
'''Write all TestDescriptions and SuiteDescriptions'''
for suite in suites:
writeInclude( output, suite['file'] )
if isGenerated(suite):
generateSuite( output, suite )
if isDynamic(suite):
writeSuitePointer( output, suite )
else:
writeSuiteObject( output, suite )
writeTestList( output, suite )
writeSuiteDescription( output, suite )
writeTestDescriptions( output, suite )
def isGenerated(suite):
'''Checks whether a suite class should be created'''
return suite['generated']
def isDynamic(suite):
'''Checks whether a suite is dynamic'''
return 'create' in suite
def writeInclude(output, file):
'''Add #include "file" statement'''
global lastIncluded
if file == lastIncluded: return
output.writelines( [ '#include "', file, '"\n\n' ] )
lastIncluded = file
def generateSuite( output, suite ):
'''Write a suite declared with CXXTEST_SUITE()'''
output.write( 'class %s : public CxxTest::TestSuite {\n' % suite['name'] )
output.write( 'public:\n' )
for line in suite['lines']:
output.write(line)
output.write( '};\n\n' )
def writeSuitePointer( output, suite ):
'''Create static suite pointer object for dynamic suites'''
if options.noStaticInit:
output.write( 'static %s *%s;\n\n' % (suite['name'], suite['object']) )
else:
output.write( 'static %s *%s = 0;\n\n' % (suite['name'], suite['object']) )
def writeSuiteObject( output, suite ):
'''Create static suite object for non-dynamic suites'''
output.writelines( [ "static ", suite['name'], " ", suite['object'], ";\n\n" ] )
def writeTestList( output, suite ):
'''Write the head of the test linked list for a suite'''
if options.noStaticInit:
output.write( 'static CxxTest::List %s;\n' % suite['tlist'] )
else:
output.write( 'static CxxTest::List %s = { 0, 0 };\n' % suite['tlist'] )
def writeWorldDescr( output ):
'''Write the static name of the world name'''
if options.noStaticInit:
output.write( 'const char* CxxTest::RealWorldDescription::_worldName;\n' )
else:
output.write( 'const char* CxxTest::RealWorldDescription::_worldName = "cxxtest";\n' )
def writeTestDescriptions( output, suite ):
'''Write all test descriptions for a suite'''
for test in suite['tests']:
writeTestDescription( output, suite, test )
def writeTestDescription( output, suite, test ):
'''Write test description object'''
output.write( 'static class %s : public CxxTest::RealTestDescription {\n' % test['class'] )
output.write( 'public:\n' )
if not options.noStaticInit:
output.write( ' %s() : CxxTest::RealTestDescription( %s, %s, %s, "%s" ) {}\n' %
(test['class'], suite['tlist'], suite['dobject'], test['line'], test['name']) )
output.write( ' void runTest() { %s }\n' % runBody( suite, test ) )
output.write( '} %s;\n\n' % test['object'] )
def runBody( suite, test ):
'''Body of TestDescription::run()'''
if isDynamic(suite): return dynamicRun( suite, test )
else: return staticRun( suite, test )
def dynamicRun( suite, test ):
'''Body of TestDescription::run() for test in a dynamic suite'''
return 'if ( ' + suite['object'] + ' ) ' + suite['object'] + '->' + test['name'] + '();'
def staticRun( suite, test ):
'''Body of TestDescription::run() for test in a non-dynamic suite'''
return suite['object'] + '.' + test['name'] + '();'
def writeSuiteDescription( output, suite ):
'''Write SuiteDescription object'''
if isDynamic( suite ):
writeDynamicDescription( output, suite )
else:
writeStaticDescription( output, suite )
def writeDynamicDescription( output, suite ):
'''Write SuiteDescription for a dynamic suite'''
output.write( 'CxxTest::DynamicSuiteDescription<%s> %s' % (suite['name'], suite['dobject']) )
if not options.noStaticInit:
output.write( '( %s, %s, "%s", %s, %s, %s, %s )' %
(suite['cfile'], suite['line'], suite['name'], suite['tlist'],
suite['object'], suite['create'], suite['destroy']) )
output.write( ';\n\n' )
def writeStaticDescription( output, suite ):
'''Write SuiteDescription for a static suite'''
output.write( 'CxxTest::StaticSuiteDescription %s' % suite['dobject'] )
if not options.noStaticInit:
output.write( '( %s, %s, "%s", %s, %s )' %
(suite['cfile'], suite['line'], suite['name'], suite['object'], suite['tlist']) )
output.write( ';\n\n' )
def writeRoot(output):
'''Write static members of CxxTest classes'''
output.write( '#include <cxxtest/Root.cpp>\n' )
def writeInitialize(output):
'''Write CxxTest::initialize(), which replaces static initialization'''
output.write( 'namespace CxxTest {\n' )
output.write( ' void initialize()\n' )
output.write( ' {\n' )
for suite in suites:
output.write( ' %s.initialize();\n' % suite['tlist'] )
if isDynamic(suite):
output.write( ' %s = 0;\n' % suite['object'] )
output.write( ' %s.initialize( %s, %s, "%s", %s, %s, %s, %s );\n' %
(suite['dobject'], suite['cfile'], suite['line'], suite['name'],
suite['tlist'], suite['object'], suite['create'], suite['destroy']) )
else:
output.write( ' %s.initialize( %s, %s, "%s", %s, %s );\n' %
(suite['dobject'], suite['cfile'], suite['line'], suite['name'],
suite['object'], suite['tlist']) )
for test in suite['tests']:
output.write( ' %s.initialize( %s, %s, %s, "%s" );\n' %
(test['object'], suite['tlist'], suite['dobject'], test['line'], test['name']) )
output.write( ' }\n' )
output.write( '}\n' )
| agpl-3.0 | -5,187,205,807,677,835,000 | 39.6375 | 238 | 0.592074 | false |
urbaniak/rjsmin | _setup/py2/make/default_targets.py | 6 | 3317 | # -*- coding: ascii -*-
#
# Copyright 2007, 2008, 2009, 2010, 2011
# Andr\xe9 Malo or his licensors, as applicable
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
==================
Simple make base
==================
Simple make base.
"""
__author__ = u"Andr\xe9 Malo"
__docformat__ = "restructuredtext en"
import os as _os
import sys as _sys
from _setup import make as _make
from _setup import shell as _shell
class MakefileTarget(_make.Target):
""" Create a make file """
NAME = 'makefile'
def run(self):
def escape(value):
""" Escape for make and shell """
return '"%s"' % value.replace(
'\\', '\\\\').replace(
'"', '\\"').replace(
'$', '\\$$')
def decorate(line, prefix='# ', width=78, char='~', padding=' '):
""" Decorate a line """
line = line.center(width - len(prefix))
return '%s%s%s%s%s%s' % (
prefix,
char * (len(line) - len(line.lstrip()) - len(padding)),
padding,
line.strip(),
padding,
char * (len(line) - len(line.rstrip()) - len(padding)),
)
python = escape(_sys.executable)
script = escape(_sys.argv[0])
targets = self.runner.targetinfo()
names = []
for name, info in targets.items():
if not info['hide']:
names.append(name)
names.sort()
fp = open(_shell.native('Makefile'), 'w')
print >> fp, decorate("Generated Makefile, DO NOT EDIT")
print >> fp, decorate("python %s %s" % (
_os.path.basename(script), self.NAME
))
print >> fp
print >> fp, "_default_:"
print >> fp, "\t@%s %s" % (python, script)
for name in names:
print >> fp, "\n"
print >> fp, "# %s" % \
targets[name]['desc'].splitlines()[0].strip()
print >> fp, "%s:" % name
print >> fp, "\t@%s %s %s" % (python, script, escape(name))
print >> fp
extension = self.extend(names)
if extension is not None:
print >> fp, extension
print >> fp
print >> fp, ".PHONY: _default_ %s\n\n" % ' '.join(names)
fp.close()
def extend(self, names):
pass
class CleanTarget(_make.Target):
""" Clean the mess """
NAME = 'clean'
_scm, _dist = True, False
def run(self):
self.runner.run_clean(scm=self._scm, dist=self._dist)
class DistCleanTarget(CleanTarget):
""" Clean as freshly unpacked dist package """
NAME = 'distclean'
_scm, _dist = False, True
class ExtraCleanTarget(CleanTarget):
""" Clean everything """
NAME = 'extraclean'
_scm, _dist = True, True
| apache-2.0 | -4,551,624,158,483,825,000 | 29.154545 | 74 | 0.539343 | false |
stianrh/askbot-nordic | askbot/management/commands/jinja2_makemessages.py | 15 | 2362 | """
Taken from Coffin
http://github.com/miracle2k/coffin/raw/master/coffin/management/commands/makemessages.py
added support for the pluralization
Jinja2's i18n functionality is not exactly the same as Django's.
In particular, the tags names and their syntax are different:
1. The Django ``trans`` tag is replaced by a _() global.
2. The Django ``blocktrans`` tag is called ``trans``.
(1) isn't an issue, since the whole ``makemessages`` process is based on
converting the template tags to ``_()`` calls. However, (2) means that
those Jinja2 ``trans`` tags will not be picked up my Django's
``makemessage`` command.
There aren't any nice solutions here. While Jinja2's i18n extension does
come with extraction capabilities built in, the code behind ``makemessages``
unfortunately isn't extensible, so we can:
* Duplicate the command + code behind it.
* Offer a separate command for Jinja2 extraction.
* Try to get Django to offer hooks into makemessages().
* Monkey-patch.
We are currently doing that last thing. It turns out there we are lucky
for once: It's simply a matter of extending two regular expressions.
Credit for the approach goes to:
http://stackoverflow.com/questions/2090717/getting-translation-strings-for-jinja2-templates-integrated-with-django-1-x
"""
import re
from django.core.management.commands import makemessages
from django.utils.translation import trans_real
class Command(makemessages.Command):
def handle(self, *args, **options):
old_endblock_re = trans_real.endblock_re
old_block_re = trans_real.block_re
old_plural_re = trans_real.plural_re
# Extend the regular expressions that are used to detect
# translation blocks with an "OR jinja-syntax" clause.
trans_real.endblock_re = re.compile(
trans_real.endblock_re.pattern + '|' + r"""^-?\s*endtrans\s*-?$""")
trans_real.block_re = re.compile(
trans_real.block_re.pattern + '|' + r"""^-?\s*trans(?:\s*|$)""")
trans_real.plural_re = re.compile(
trans_real.plural_re.pattern + '|' + r"""^-?\s*pluralize(\s+\w+)?\s*-?$""")
try:
super(Command, self).handle(*args, **options)
finally:
trans_real.endblock_re = old_endblock_re
trans_real.block_re = old_block_re
trans_real.plural_re = old_plural_re
| gpl-3.0 | 561,947,539,413,680,640 | 40.438596 | 118 | 0.686706 | false |
prabhamatta/Analyzing-Open-Data | notebooks/Day_05_B_Geographical_Hierarchies.py | 1 | 13634 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
# our usual pylab import
%pylab --no-import-all inline
# <headingcell level=1>
# Goal
# <markdowncell>
# For background, see [Mapping Census Data](http://www.udel.edu/johnmack/frec682/census/), including the
# [scan of the 10-question form](http://www.udel.edu/johnmack/frec682/census/census_form.png). Keep in mind what people were asked and the range of data available in the census.
#
# Using the census API to get an understanding of some of the geographic entities in the **2010 census**. We'll specifically be using the variable `P0010001`, the total population.
#
# What you will do in this notebook:
#
# * Sum the population of the **states** (or state-like entity like DC) to get the total population of the **nation**
# * Add up the **counties** for each **state** and validate the sums
# * Add up the **census tracts** for each **county** and validate the sums
#
# We will make use of `pandas` in this notebook.
# <markdowncell>
# I often have the following [diagram](http://www.census.gov/geo/reference/pdfs/geodiagram.pdf) in mind to help understand the relationship among entities. Also use the [list of example URLs](http://api.census.gov/data/2010/sf1/geo.html) -- it'll come in handy.
# <markdowncell>
# <a href="http://www.flickr.com/photos/raymondyee/12297467734/" title="Census Geographic Hierarchies by Raymond Yee, on Flickr"><img src="http://farm4.staticflickr.com/3702/12297467734_af8882d310_c.jpg" width="618" height="800" alt="Census Geographic Hierarchies"></a>
# <headingcell level=1>
# Working out the geographical hierarchy for Cafe Milano
# <markdowncell>
# It's helpful to have a concrete instance of a place to work with, especially when dealing with rather intangible entities like census tracts, block groups, and blocks. You can use the [American FactFinder](http://factfinder2.census.gov/faces/nav/jsf/pages/index.xhtml) site to look up for any given US address the corresponding census geographies.
#
# Let's use Cafe Milano in Berkeley as an example. You can verify the following results by typing in the address into http://factfinder2.census.gov/faces/nav/jsf/pages/searchresults.xhtml?refresh=t.
#
# https://www.evernote.com/shard/s1/sh/dc0bfb96-4965-4fbf-bc28-c9d4d0080782/2bd8c92a045d62521723347d62fa2b9d
#
# 2522 Bancroft Way, BERKELEY, CA, 94704
#
# * State: California
# * County: Alameda County
# * County Subdivision: Berkeley CCD, Alameda County, California
# * Census Tract: Census Tract 4228, Alameda County, California
# * Block Group: Block Group 1, Census Tract 4228, Alameda County, California
# * Block: Block 1001, Block Group 1, Census Tract 4228, Alameda County, California
#
# <codecell>
# YouTube video I made on how to use the American Factfinder site to look up addresses
from IPython.display import YouTubeVideo
YouTubeVideo('HeXcliUx96Y')
# <codecell>
# standard numpy, pandas, matplotlib imports
import numpy as np
import matplotlib.pyplot as plt
from pandas import DataFrame, Series, Index
import pandas as pd
# <codecell>
# check that CENSUS_KEY is defined
import census
import us
import requests
import settings
assert settings.CENSUS_KEY is not None
# <markdowncell>
# The census documentation has example URLs but needs your API key to work. In this notebook, we'll use the IPython notebook HTML display mechanism to help out.
# <codecell>
c = census.Census(key=settings.CENSUS_KEY)
# <markdowncell>
# Note: we can use `c.sf1` to access 2010 census (SF1: Census Summary File 1 (2010, 2000, 1990) available in API -- 2010 is the default)
#
# see documentation: [sunlightlabs/census](https://github.com/sunlightlabs/census)
# <headingcell level=1>
# Summing up populations by state
# <markdowncell>
# Let's make a `DataFrame` named `states_df` with columns `NAME`, `P0010001` (for population), and `state` (to hold the FIPS code). **Make sure to exclude Puerto Rico.**
# <codecell>
# call the API and instantiate `df`
df = DataFrame(c.sf1.get('NAME,P0010001', geo={'for':'state:*'}))
# convert the population to integer
df['P0010001'] = df['P0010001'].astype(np.int)
df.head()
# <markdowncell>
# You can filter Puerto Rico (PR) in a number of ways -- use the way you're most comfortable with.
#
# Optional fun: filter PR in the following way
#
# * calculate a `np.array` holding the the fips of the states
# * then use [numpy.in1d](http://docs.scipy.org/doc/numpy/reference/generated/numpy.in1d.html), which is a analogous to the [in](http://stackoverflow.com/a/3437130/7782) operator to test membership in a list
# <codecell>
us.states.STATES
states_fips = np.array([state.fips for state in us.states.STATES])
#states_fips
# <codecell>
states_df = df[np.in1d(df.state,states_fips)]
states_df.head() #first 5 rows of data
states_df.columns #heading of the table
# <markdowncell>
# If `states_df` is calculated properly, the following asserts will pass silently.
# <codecell>
# check that we have three columns
assert set(states_df.columns) == set((u'NAME', u'P0010001', u'state'))
# check that the total 2010 census population is correct
assert np.sum(states_df.P0010001) == 308745538
# check that the number of states+DC is 51
assert len(states_df) == 51
# <headingcell level=1>
# Counties
# <markdowncell>
# Looking at http://api.census.gov/data/2010/sf1/geo.html, we see
#
# state-county: http://api.census.gov/data/2010/sf1?get=P0010001&for=county:*
#
# if we want to grab all counties in one go, or you can grab counties state-by-state:
#
# http://api.census.gov/data/2010/sf1?get=P0010001&for=county:*&in=state:06
#
# for all counties in the state with FIPS code `06` (which is what state?)
# <codecell>
#to find the state whose FIPS code is 06
for state in us.states.STATES:
if state.fips == '06':
print state
# <markdowncell>
# <codecell>
# Here's a way to use translate
# http://api.census.gov/data/2010/sf1?get=P0010001&for=county:*
# into a call using the census.Census object
r = c.sf1.get('NAME,P0010001', geo={'for':'county:*'})
# ask yourself what len(r) means and what it should be
len(r)
# <codecell>
# let's try out one of the `census` object convenience methods
# instead of using `c.sf1.get`
r = c.sf1.state_county('NAME,P0010001',census.ALL,census.ALL)
r
# <codecell>
# convert the json from the API into a DataFrame
# coerce to integer the P0010001 column
df = DataFrame(r)
df['P0010001'] = df['P0010001'].astype('int')
# display the first records
df.head()
# <codecell>
# calculate the total population
# what happens when you google the number you get?
np.sum(df['P0010001'])
# <codecell>
# often you can use dot notation to access a DataFrame column
df.P0010001.head()
# <codecell>
# let's filter out PR -- what's the total population now
sum(df[np.in1d(df.state, states_fips)].P0010001)
# <codecell>
# fall back to non-Pandas solution if you need ton
np.sum([int(county['P0010001']) for county in r if county['state'] in states_fips])
# <codecell>
# construct counties_df with only 50 states + DC
counties_df = df[np.in1d(df.state, states_fips)]
len(counties_df)
# <codecell>
set(counties_df.columns) == set(df.columns)
# <markdowncell>
# Check properties of `counties_df`
# <codecell>
# number of counties
assert len(counties_df) == 3143 #3143 county/county-equivs in US
# <codecell>
# check that the total population by adding all counties == population by adding all states
assert np.sum(counties_df['P0010001']) == np.sum(states_df.P0010001)
# <codecell>
# check we have same columns between counties_df and df
set(counties_df.columns) == set(df.columns)
# <headingcell level=1>
# Using FIPS code as the Index
# <markdowncell>
# From [Mapping Census Data](http://www.udel.edu/johnmack/frec682/census/):
#
# * Each state (SUMLEV = 040) has a 2-digit FIPS ID; Delaware's is 10.
# * Each county (SUMLEV = 050) within a state has a 3-digit FIPS ID, appended to the 2-digit state ID. New Castle County, Delaware, has FIPS ID 10003.
# * Each Census Tract (SUMLEV = 140) within a county has a 6-digit ID, appended to the county code. The Tract in New Castle County DE that contains most of the the UD campus has FIPS ID 10003014502.
# * Each Block Group (SUMLEV = 150) within a Tract has a single digit ID appended to the Tract ID. The center of campus in the northwest corner of the tract is Block Group100030145022.
# * Each Block (SUMLEV = 750) within a Block Group is identified by three more digits appended to the Block Group ID. Pearson Hall is located in Block 100030145022009.
# <codecell>
# take a look at the current structure of counties_df
counties_df.head()
# <codecell>
# reindex states_df by state FIPS
# http://pandas.pydata.org/pandas-docs/dev/generated/pandas.DataFrame.set_index.html
states_df.set_index(keys='state', inplace=True)
states_df.head()
# <codecell>
# display the result of using set_index
counties_df.head()
# <codecell>
# #Prabha: calculate
# counties_df.P0010001.apply(lambda n: n)
# #to apply to a column
# def double(x):
# return 2*x
# counties_df.P0010001.apply(double)
# to do something at a row by using axis=1
#counties_df.apply(lambda s:(s['NAME)'], s['P0010001']), axis=1).head()
# counties_df.apply(lambda s:(s['NAME)'], s['P0010001']), axis=1).head()
# <codecell>
# http://manishamde.github.io/blog/2013/03/07/pandas-and-python-top-10/#create
counties_df['FIPS'] = counties_df.apply(lambda s:s['state'] + s['county'], axis=1)
counties_df.set_index('FIPS', inplace=True)
# <codecell>
counties_df.head()
# <codecell>
counties_df.groupby('state').sum().head()
# <codecell>
states_df.P0010001.head()
# <codecell>
# now we're ready to compare for each state, if you add all the counties, do you get the same
# population?
# not that you can do .agg('sum') instead of .sum()
# look at http://pandas.pydata.org/pandas-docs/dev/groupby.html to learn more about agg
np.all(states_df.P0010001 == counties_df.groupby('state').agg('sum').P0010001)
# <headingcell level=1>
# Counties in California
# <markdowncell>
# Let's look at home: California state and Alameda County
# <codecell>
# boolean indexing to pull up California
states_df[states_df.NAME == 'California']
# <codecell>
# use .ix -- most general indexing
# http://pandas.pydata.org/pandas-docs/dev/indexing.html#different-choices-for-indexing-loc-iloc-and-ix
states_df.ix['06']
# <codecell>
# California counties
counties_df[counties_df.state=='06']
# <codecell>
counties_df[counties_df.NAME == 'Alameda County']
# <codecell>
counties_df[counties_df.NAME == 'Alameda County']['P0010001']
# <markdowncell>
# Different ways to read off the population of Alameda County -- still looking for the best way
# <codecell>
counties_df[counties_df.NAME == 'Alameda County']['P0010001'].to_dict().values()[0]
# <codecell>
list(counties_df[counties_df.NAME == 'Alameda County']['P0010001'].iteritems())[0][1]
# <codecell>
int(counties_df[counties_df.NAME == 'Alameda County']['P0010001'].values)
# <markdowncell>
# If you know the FIPS code for Alameda County, just read off the population using `.ix`
# <codecell>
# this is like accessing a cell in a spreadsheet -- row, col
ALAMEDA_COUNTY_FIPS = '06001'
counties_df.ix[ALAMEDA_COUNTY_FIPS,'P0010001']
# <headingcell level=1>
# Reading off all the tracts in Alameda County
# <codecell>
counties_df.ix[ALAMEDA_COUNTY_FIPS,'county']
# <codecell>
# http://api.census.gov/data/2010/sf1/geo.html
# state-county-tract
geo = {'for': 'tract:*',
'in': 'state:%s county:%s' % (us.states.CA.fips,
counties_df.ix[ALAMEDA_COUNTY_FIPS,'county'])}
r = c.sf1.get('NAME,P0010001', geo=geo)
# <codecell>
# alameda_county_tracts_df.apply(lambda s: s['state']+s['county']+s['tract'], axis=1)
# <codecell>
#use state_county_tract to make a DataFrame
alameda_county_tracts_df = DataFrame(r)
alameda_county_tracts_df['P0010001'] = alameda_county_tracts_df['P0010001'].astype('int')
alameda_county_tracts_df['FIPS'] = alameda_county_tracts_df.apply(lambda s: s['state']+s['county']+s['tract'], axis=1)
alameda_county_tracts_df.head()
# <codecell>
alameda_county_tracts_df.P0010001.sum()
# <codecell>
# Cafe Milano is in tract 4228
MILANO_TRACT_ID = '422800'
alameda_county_tracts_df[alameda_county_tracts_df.tract==MILANO_TRACT_ID]
# <headingcell level=1>
# Using Generators to yield all the tracts in the country
# <markdowncell>
# http://www.jeffknupp.com/blog/2013/04/07/improve-your-python-yield-and-generators-explained/
# <codecell>
import time
import us
from itertools import islice
def census_tracts(variable=('NAME','P0010001'), sleep_time=1.0):
for state in us.states.STATES:
print state
for tract in c.sf1.get(variable,
geo={'for':"tract:*",
'in':'state:{state_fips}'.format(state_fips=state.fips)
}):
yield tract
# don't hit the API more than once a second
time.sleep(sleep_time)
# limit the number of tracts we crawl for until we're reading to get all of them
tracts_df = DataFrame(list(islice(census_tracts(), 100)))
tracts_df['P0010001'] = tracts_df['P0010001'].astype('int')
# <codecell>
tracts_df.head()
# <headingcell level=1>
# Compare with Tabulations
# <markdowncell>
# We can compare the total number of tracts we calculate to:
#
# https://www.census.gov/geo/maps-data/data/tallies/tractblock.html
#
# and
#
# https://www.census.gov/geo/maps-data/data/docs/geo_tallies/Tract_Block2010.txt
| apache-2.0 | 3,533,805,183,281,788,400 | 26.82449 | 352 | 0.711383 | false |
lewismc/podaacpy | podaac/drive.py | 3 | 5645 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import configparser
import gzip
import os
import requests
from requests.auth import HTTPBasicAuth
class Drive:
def __init__(self, file, username, password, webdav_url='https://podaac-tools.jpl.nasa.gov/drive/files'):
''' In order to access PODAAC Drive, all users are required to be registered \
with NASA Earthdata system. User can login to the PODAAC Drive using the \
following link https://podaac-tools.jpl.nasa.gov/drive/. \
Once you have authenticated, you will be able to view, retrieve and change \
your encrypted password. N.B. The encrypted password must then either be entered \
into `podaac.ini` and passes as an argument to `file`, or alternatively provided \
via the `username` parameter.
'''
config = configparser.ConfigParser()
if file:
config_file_path = os.path.join(os.path.dirname(__file__), file)
config.read_file(open(config_file_path, 'r'))
self.USERNAME = config['drive']['urs_username']
self.PASSWORD = config['drive']['urs_password']
self.URL = config['drive']['webdav_url']
if username:
self.USERNAME = username
if password:
self.PASSWORD = password
if webdav_url:
self.URL = webdav_url
def mine_drive_urls_from_granule_search(self, granule_search_response=''):
''' Convenience function which extracts the PO.DAAC Drive URLs from \
a given granule search obtained using podaac.granule_search(). \
The response of this function is an array of strings denoting the \
PO.DAAC Drive URLs to the granules.
:param granule_search_response: the output response of a podaac.granule_search()
:type path: :mod:`string`
:returns: prints an array of PO.DAAC Drive URLs.
'''
from bs4 import BeautifulSoup
soup = BeautifulSoup(granule_search_response, 'html.parser')
drive_list = []
for drive_link in soup.find_all('link'):
href = drive_link.get('href')
if self.URL in href:
drive_list.append(href)
return drive_list
def download_granules(self, granule_collection=None, path=''):
''' Granule download service downloads a granule collection \
from PO.DAAC Drive to the users' local machine at the given path. Note, as \
of https://github.com/nasa/podaacpy/issues/131 we now maintain the PO.DAAC \
Drive directory structure. This is to say, if the Drive URL was \
https://podaac-tools.jpl.nasa.gov/drive/files/allData/ghrsst/data/GDS2/L2P/AVHRR19_L/NAVO/v1/2019/088/20190329001403-NAVO-L2P_GHRSST-SST1m-AVHRR19_L-v02.0-fv01.0.nc \
then a directory structure would be created as follows \
allData/ghrsst/data/GDS2/L2P/AVHRR19_L/NAVO/v1/2019/088/20190329001403-NAVO-L2P_GHRSST-SST1m-AVHRR19_L-v02.0-fv01.0.nc
:param granule_collection: a populated collection of PO.DAAC Drive Granule URLs. \
These can be obtained by using the drive.mine_drive_urls_from_granule_search() \
function which itself merely wraps a podaac.granule_search() request.
:type granule_collection: :mod:`string`
:param path: path to a directory where you want the data to be stored.
:type path: :mod:`string`
:returns: a zip file downloaded and extracted in the destination \
directory path provided.
'''
if granule_collection is None:
granule_collection = []
for granule_url in granule_collection:
directory_structure, granule = os.path.split(granule_url[46:])
granule_name = os.path.splitext(granule)[0]
if path == '':
granule_path = os.path.join(os.path.dirname(__file__), directory_structure)
else:
granule_path = path + '/' + directory_structure
r = requests.get(granule_url, auth=HTTPBasicAuth(self.USERNAME, self.PASSWORD), stream=True)
if r.status_code != 200:
raise PermissionError("Granule: '%s' not downloaded. Please check authentication configuration and try again." % (granule))
try:
from pathlib import Path
except ImportError:
from pathlib2 import Path # python 2 backport
Path(granule_path).mkdir(parents=True, exist_ok=True)
with open(granule_path + "/" + granule, 'wb') as f:
for chunk in r:
f.write(chunk)
if granule.endswith('.gz'):
gzip_granule = gzip.open(granule_path + "/" + granule, 'rb')
with open(granule_path + "/" + granule_name, 'wb') as uncompressed_granule:
uncompressed_granule.write(gzip_granule.read())
gzip_granule.close()
uncompressed_granule.close()
os.remove(granule_path + "/" + granule)
| apache-2.0 | -5,893,766,555,220,921,000 | 48.955752 | 178 | 0.625155 | false |
FreeON/spammpack | src-charm/analyze_timing.py | 1 | 4752 | #!/usr/bin/env python
import argparse
import matplotlib.pyplot as plt
import numpy
import os
import re
import signal
import sys
def make_graph ():
np = []
for i in timing[PPV]:
np.append(i)
np.sort()
t = []
np_dead = []
global t_0
global number_samples
if iteration < 0:
additional_label = "all"
for i in np:
t.append(timing[PPV][i]["t_total"])
else:
for i in np:
if iteration <= len(timing[PPV][i]["t_multiply"]):
t.append(
timing[PPV][i]["t_multiply"][iteration-1]
+timing[PPV][i]["t_add"][iteration-1]
+timing[PPV][i]["t_setEq"][iteration-1])
else:
np_dead.append(i)
for i in np_dead:
np.remove(i)
additional_label = "iteration {:d}, complexity {:d}".format(
iteration, timing[PPV][np[0]]["complexity"][iteration-1])
plt.loglog(np, t, '-o', label = "PPV{:03d} ({:s})".format(
PPV, additional_label), hold = True)
if options.one_ideal:
number_samples += 1
t_0 += t[0]/np[0]
else:
plt.loglog(
[ np[0], np[-1] ],
[ t[0], t[0]*np[0]/np[-1] ],
'-',
hold = True)
return np, t
parser = argparse.ArgumentParser()
parser.add_argument("OUTFILE",
help = "The output file of a spamm SP2 calculation",
nargs = "+")
parser.add_argument("--iteration",
metavar = "N",
help = "Plot iteration N instead of total time",
type = int,
nargs = "+",
action = "append")
parser.add_argument("--one-graph",
help = "Plot all iterations on one graph",
action = "store_true",
default = False)
parser.add_argument("--one-ideal",
help = "average serial time to get one ideal",
action = "store_true",
default = False)
parser.add_argument("--title",
help = "The graph title",
default = "")
parser.add_argument("--debug",
help = "Print debug information",
action = "store_true",
default = False)
options = parser.parse_args()
while True:
temp = []
flattened = True
for i in options.iteration:
if type(i) != type(1):
flattened = False
for j in i:
temp.append(j)
else:
temp.append(i)
options.iteration = temp
if flattened:
break
timing = {}
for f in options.OUTFILE:
fd = open(f)
np = 0
PPV = 0
t_multiply = []
t_add = []
t_setEq = []
complexity = []
for line in fd:
if np == 0:
result = re.compile("Running on\s+([0-9]+)\s+unique").search(line)
if result:
np = int(result.group(1))
if PPV == 0:
result = re.compile("PP([0-9]+).OrthoF").search(line)
if result:
PPV = int(result.group(1))
if not PPV in timing:
timing[PPV] = {}
result = re.compile(
"iteration\s+([0-9]+), "
+ "multiply: ([0-9.]+) seconds, "
+ "setEq: ([0-9.]+) seconds, "
+ "add: ([0-9.]+) seconds.+"
+ "complexity ([0-9]+)").search(line)
if result:
iteration = int(result.group(1))
t_multiply.append(float(result.group(2)))
t_add.append(float(result.group(3)))
t_setEq.append(float(result.group(4)))
complexity.append(int(result.group(5)))
if iteration != len(t_multiply):
raise Exception("strange...")
t_multiply = numpy.array(t_multiply)
t_add = numpy.array(t_add)
t_setEq = numpy.array(t_setEq)
timing[PPV][np] = {
"t_total" : numpy.sum(t_multiply)+numpy.sum(t_add)+numpy.sum(t_setEq),
"t_multiply" : t_multiply,
"t_add" : t_add,
"t_setEq" : t_setEq,
"complexity" : complexity
}
if options.debug:
print("file {:s}, PPV{:03d}, np = {:d}, {:d} iterations".format(
f, PPV, np, len(timing[PPV][np]["t_multiply"])))
if options.one_graph:
for PPV in timing:
number_samples = 0
t_0 = 0
for iteration in options.iteration:
np, t = make_graph()
if not options.one_ideal:
plt.loglog(
[ np[0], np[-1] ],
[ t[0], t[0]*np[0]/np[-1] ],
'-',
hold = True)
if options.one_ideal:
plt.loglog(
[ np[0], np[-1] ],
[ t_0/number_samples*np[0], t_0/number_samples*np[0]/np[-1] ],
'-',
hold = True)
plt.legend()
plt.xlabel("# PEs")
plt.ylabel("walltime [s]")
plt.title(options.title)
plt.show()
else:
pid = []
for iteration in options.iteration:
pid.append(os.fork())
if(pid[-1]):
pass
else:
plt.figure(iteration)
for PPV in timing:
make_graph()
plt.legend()
plt.xlabel("# PEs")
plt.ylabel("walltime [s]")
plt.title(options.title)
plt.show()
print("done, please press Enter to close all windows")
sys.stdin.readline()
for i in pid:
os.kill(i, signal.SIGHUP)
| bsd-3-clause | 1,658,309,229,996,763,100 | 21.736842 | 76 | 0.552399 | false |
felix1m/pyspotify | setup.py | 3 | 1584 | from __future__ import unicode_literals
import re
from setuptools import find_packages, setup
def read_file(filename):
with open(filename) as fh:
return fh.read()
def get_version(filename):
init_py = read_file(filename)
metadata = dict(re.findall("__([a-z]+)__ = '([^']+)'", init_py))
return metadata['version']
setup(
name='pyspotify',
version=get_version('spotify/__init__.py'),
url='http://pyspotify.mopidy.com/',
license='Apache License, Version 2.0',
author='Stein Magnus Jodal',
author_email='[email protected]',
description='Python wrapper for libspotify',
long_description=read_file('README.rst'),
packages=find_packages(exclude=['tests', 'tests.*']),
zip_safe=False,
include_package_data=True,
setup_requires=['cffi >= 1.0.0'],
cffi_modules=['spotify/_spotify_build.py:ffi'],
install_requires=['cffi >= 1.0.0'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Software Development :: Libraries',
],
)
| apache-2.0 | -6,958,234,335,733,278,000 | 31.326531 | 70 | 0.619949 | false |
willprice/weboob | modules/boursorama/pages/accounts_list.py | 1 | 5980 | # -*- coding: utf-8 -*-
# Copyright(C) 2011 Gabriel Kerneis
# Copyright(C) 2010-2011 Jocelyn Jaubert
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
import re
from decimal import Decimal
from weboob.capabilities.bank import Account
from weboob.deprecated.browser import Page
from weboob.tools.capabilities.bank.transactions import FrenchTransaction
class AccountsList(Page):
ACCOUNT_TYPES = {u'banque': Account.TYPE_CHECKING,
u'épargne': Account.TYPE_SAVINGS,
u'crédit': Account.TYPE_LOAN,
u'assurance vie': Account.TYPE_LIFE_INSURANCE,
u'bourse': Account.TYPE_MARKET,
}
def get_list(self):
blocks = self.document.xpath('//div[@id="synthese-list"]//div[@class="block"]')
for div in blocks:
block_title = ''.join(div.xpath('.//span[@class="title"]/a/text()')).lower().strip()
for tr in div.getiterator('tr'):
account = Account()
account.id = None
account._link_id = None
account.type = self.ACCOUNT_TYPES.get(block_title, Account.TYPE_UNKNOWN)
if 'assurance vie' in block_title:
# Life insurance accounts are investments
account.type = Account.TYPE_LIFE_INSURANCE
for td in tr.getiterator('td'):
if td.get('class', '') == 'account-cb':
try:
a = td.xpath('./*/a[@class="gras"]')[0]
except IndexError:
# ignore account
break
account.type = Account.TYPE_CARD
account.label, account.id = [s.strip() for s in self.parser.tocleanstring(td).rsplit('-', 1)]
# Sometimes there is text after the card number:
# <a class="gras" href="/comptes/banque/cartes/index.phtml?CompteCourant=ulietuliedtlueditluedt&currentCB=ruisecruicertuci">
# CARTE PREMIER </a>
# <br>MACHIN BIDULE TRUC - 1111********1111
#
# <br>
# <strong><a href="/aide/faq/index.phtml?document_id=472">Son échéance est le <span style="color:#ff8400; font-weight:bold;">31/03/2015</span>.<br>En savoir plus</a></strong>
# So we have to remove all the shit after it.
account.id = account.id.split(' ')[0]
try:
account._link_id = td.xpath('.//a')[0].get('href')
# Try to use account._link_id for account.id to prevent duplicate accounts
currentCB = re.search('currentCB=(.*)', account._link_id)
if currentCB:
account.id = currentCB.group(1)
except KeyError:
pass
elif td.get('class', '') == 'account-name':
try:
span = td.xpath('./span[@class="label"]')[0]
except IndexError:
# ignore account
break
account.label = self.parser.tocleanstring(span)
account.id = self.parser.tocleanstring(td).rsplit('-', 1)[-1].strip()
try:
account._link_id = td.xpath('.//a')[0].get('href')
account._detail_url = account._link_id
except KeyError:
pass
elif td.get('class', '') == 'account-more-actions':
for a in td.getiterator('a'):
# For normal account, two "account-more-actions"
# One for the account, one for the credit card. Take the good one
if 'href' in a.attrib and "mouvements.phtml" in a.get('href') and "/cartes/" not in a.get('href'):
account._link_id = a.get('href')
elif td.get('class', '') == 'account-number':
id = td.text
id = id.strip(u' \n\t')
account.id = id
elif td.get('class', '') == 'account-total':
span = td.find('span')
if span is None:
balance = td.text
else:
balance = span.text
account.currency = account.get_currency(balance)
balance = FrenchTransaction.clean_amount(balance)
if balance != "":
account.balance = Decimal(balance)
else:
account.balance = Decimal(0)
else:
# because of some weird useless <tr>
if account.id is not None and (not account._link_id or not 'moneycenter' in account._link_id):
yield account
| agpl-3.0 | 3,786,853,397,567,203,300 | 48.8 | 200 | 0.483936 | false |
open-homeautomation/home-assistant | tests/components/cover/test_mqtt.py | 2 | 9028 | """The tests for the MQTT cover platform."""
import unittest
from homeassistant.bootstrap import setup_component
from homeassistant.const import STATE_OPEN, STATE_CLOSED, STATE_UNKNOWN
import homeassistant.components.cover as cover
from tests.common import mock_mqtt_component, fire_mqtt_message
from tests.common import get_test_home_assistant
class TestCoverMQTT(unittest.TestCase):
"""Test the MQTT cover."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.mock_publish = mock_mqtt_component(self.hass)
def tearDown(self): # pylint: disable=invalid-name
"""Stop down everything that was started."""
self.hass.stop()
def test_state_via_state_topic(self):
"""Test the controlling state via topic."""
self.hass.config.components = set(['mqtt'])
self.assertTrue(setup_component(self.hass, cover.DOMAIN, {
cover.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'state_topic': 'state-topic',
'command_topic': 'command-topic',
'qos': 0,
'payload_open': 'OPEN',
'payload_close': 'CLOSE',
'payload_stop': 'STOP'
}
}))
state = self.hass.states.get('cover.test')
self.assertEqual(STATE_UNKNOWN, state.state)
fire_mqtt_message(self.hass, 'state-topic', '0')
self.hass.block_till_done()
state = self.hass.states.get('cover.test')
self.assertEqual(STATE_CLOSED, state.state)
fire_mqtt_message(self.hass, 'state-topic', '50')
self.hass.block_till_done()
state = self.hass.states.get('cover.test')
self.assertEqual(STATE_OPEN, state.state)
fire_mqtt_message(self.hass, 'state-topic', '100')
self.hass.block_till_done()
state = self.hass.states.get('cover.test')
self.assertEqual(STATE_OPEN, state.state)
fire_mqtt_message(self.hass, 'state-topic', STATE_CLOSED)
self.hass.block_till_done()
state = self.hass.states.get('cover.test')
self.assertEqual(STATE_CLOSED, state.state)
fire_mqtt_message(self.hass, 'state-topic', STATE_OPEN)
self.hass.block_till_done()
state = self.hass.states.get('cover.test')
self.assertEqual(STATE_OPEN, state.state)
def test_state_via_template(self):
"""Test the controlling state via topic."""
self.hass.config.components = set(['mqtt'])
self.assertTrue(setup_component(self.hass, cover.DOMAIN, {
cover.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'state_topic': 'state-topic',
'command_topic': 'command-topic',
'qos': 0,
'value_template': '{{ (value | multiply(0.01)) | int }}',
}
}))
state = self.hass.states.get('cover.test')
self.assertEqual(STATE_UNKNOWN, state.state)
fire_mqtt_message(self.hass, 'state-topic', '10000')
self.hass.block_till_done()
state = self.hass.states.get('cover.test')
self.assertEqual(STATE_OPEN, state.state)
fire_mqtt_message(self.hass, 'state-topic', '99')
self.hass.block_till_done()
state = self.hass.states.get('cover.test')
self.assertEqual(STATE_CLOSED, state.state)
def test_optimistic_state_change(self):
"""Test changing state optimistically."""
self.hass.config.components = set(['mqtt'])
self.assertTrue(setup_component(self.hass, cover.DOMAIN, {
cover.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'command_topic': 'command-topic',
'qos': 0,
}
}))
state = self.hass.states.get('cover.test')
self.assertEqual(STATE_UNKNOWN, state.state)
cover.open_cover(self.hass, 'cover.test')
self.hass.block_till_done()
self.assertEqual(('command-topic', 'OPEN', 0, False),
self.mock_publish.mock_calls[-2][1])
state = self.hass.states.get('cover.test')
self.assertEqual(STATE_OPEN, state.state)
cover.close_cover(self.hass, 'cover.test')
self.hass.block_till_done()
self.assertEqual(('command-topic', 'CLOSE', 0, False),
self.mock_publish.mock_calls[-2][1])
state = self.hass.states.get('cover.test')
self.assertEqual(STATE_CLOSED, state.state)
def test_send_open_cover_command(self):
"""Test the sending of open_cover."""
self.hass.config.components = set(['mqtt'])
self.assertTrue(setup_component(self.hass, cover.DOMAIN, {
cover.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'state_topic': 'state-topic',
'command_topic': 'command-topic',
'qos': 2
}
}))
state = self.hass.states.get('cover.test')
self.assertEqual(STATE_UNKNOWN, state.state)
cover.open_cover(self.hass, 'cover.test')
self.hass.block_till_done()
self.assertEqual(('command-topic', 'OPEN', 2, False),
self.mock_publish.mock_calls[-2][1])
state = self.hass.states.get('cover.test')
self.assertEqual(STATE_UNKNOWN, state.state)
def test_send_close_cover_command(self):
"""Test the sending of close_cover."""
self.hass.config.components = set(['mqtt'])
self.assertTrue(setup_component(self.hass, cover.DOMAIN, {
cover.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'state_topic': 'state-topic',
'command_topic': 'command-topic',
'qos': 2
}
}))
state = self.hass.states.get('cover.test')
self.assertEqual(STATE_UNKNOWN, state.state)
cover.close_cover(self.hass, 'cover.test')
self.hass.block_till_done()
self.assertEqual(('command-topic', 'CLOSE', 2, False),
self.mock_publish.mock_calls[-2][1])
state = self.hass.states.get('cover.test')
self.assertEqual(STATE_UNKNOWN, state.state)
def test_send_stop__cover_command(self):
"""Test the sending of stop_cover."""
self.hass.config.components = set(['mqtt'])
self.assertTrue(setup_component(self.hass, cover.DOMAIN, {
cover.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'state_topic': 'state-topic',
'command_topic': 'command-topic',
'qos': 2
}
}))
state = self.hass.states.get('cover.test')
self.assertEqual(STATE_UNKNOWN, state.state)
cover.stop_cover(self.hass, 'cover.test')
self.hass.block_till_done()
self.assertEqual(('command-topic', 'STOP', 2, False),
self.mock_publish.mock_calls[-2][1])
state = self.hass.states.get('cover.test')
self.assertEqual(STATE_UNKNOWN, state.state)
def test_current_cover_position(self):
"""Test the current cover position."""
self.hass.config.components = set(['mqtt'])
self.assertTrue(setup_component(self.hass, cover.DOMAIN, {
cover.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'state_topic': 'state-topic',
'command_topic': 'command-topic',
'payload_open': 'OPEN',
'payload_close': 'CLOSE',
'payload_stop': 'STOP'
}
}))
state_attributes_dict = self.hass.states.get(
'cover.test').attributes
self.assertFalse('current_position' in state_attributes_dict)
fire_mqtt_message(self.hass, 'state-topic', '0')
self.hass.block_till_done()
current_cover_position = self.hass.states.get(
'cover.test').attributes['current_position']
self.assertEqual(0, current_cover_position)
fire_mqtt_message(self.hass, 'state-topic', '50')
self.hass.block_till_done()
current_cover_position = self.hass.states.get(
'cover.test').attributes['current_position']
self.assertEqual(50, current_cover_position)
fire_mqtt_message(self.hass, 'state-topic', '101')
self.hass.block_till_done()
current_cover_position = self.hass.states.get(
'cover.test').attributes['current_position']
self.assertEqual(50, current_cover_position)
fire_mqtt_message(self.hass, 'state-topic', 'non-numeric')
self.hass.block_till_done()
current_cover_position = self.hass.states.get(
'cover.test').attributes['current_position']
self.assertEqual(50, current_cover_position)
| apache-2.0 | -8,960,463,534,008,067,000 | 35.699187 | 73 | 0.568675 | false |
stonestone/stonefreedomsponsors | djangoproject/core/migrations/0014_auto__add_field_userinfo_hide_from_userlist.py | 3 | 15622 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'UserInfo.hide_from_userlist'
db.add_column('core_userinfo', 'hide_from_userlist',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'UserInfo.hide_from_userlist'
db.delete_column('core_userinfo', 'hide_from_userlist')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.issue': {
'Meta': {'object_name': 'Issue'},
'createdByUser': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'creationDate': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_feedback': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_public_suggestion': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Project']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'trackerURL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'core.issuecomment': {
'Meta': {'object_name': 'IssueComment'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'content': ('django.db.models.fields.TextField', [], {}),
'creationDate': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Issue']"})
},
'core.issuecommenthistevent': {
'Meta': {'object_name': 'IssueCommentHistEvent'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.IssueComment']"}),
'content': ('django.db.models.fields.TextField', [], {}),
'event': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'eventDate': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'core.issuewatch': {
'Meta': {'object_name': 'IssueWatch'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Issue']"}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'core.offer': {
'Meta': {'object_name': 'Offer'},
'acceptanceCriteria': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'creationDate': ('django.db.models.fields.DateTimeField', [], {}),
'expirationDate': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Issue']"}),
'lastChangeDate': ('django.db.models.fields.DateTimeField', [], {}),
'no_forking': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'price': ('django.db.models.fields.DecimalField', [], {'max_digits': '9', 'decimal_places': '2'}),
'require_release': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'sponsor': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'core.offercomment': {
'Meta': {'object_name': 'OfferComment'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'content': ('django.db.models.fields.TextField', [], {}),
'creationDate': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'offer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Offer']"})
},
'core.offercommenthistevent': {
'Meta': {'object_name': 'OfferCommentHistEvent'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.OfferComment']"}),
'content': ('django.db.models.fields.TextField', [], {}),
'event': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'eventDate': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'core.offerhistevent': {
'Meta': {'object_name': 'OfferHistEvent'},
'acceptanceCriteria': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'event': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'eventDate': ('django.db.models.fields.DateTimeField', [], {}),
'expirationDate': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'no_forking': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'offer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Offer']"}),
'price': ('django.db.models.fields.DecimalField', [], {'max_digits': '9', 'decimal_places': '2'}),
'require_release': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'core.offerwatch': {
'Meta': {'object_name': 'OfferWatch'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'offer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Offer']"}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'core.payment': {
'Meta': {'object_name': 'Payment'},
'confirm_key': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'creationDate': ('django.db.models.fields.DateTimeField', [], {}),
'currency': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'fee': ('django.db.models.fields.DecimalField', [], {'max_digits': '9', 'decimal_places': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lastChangeDate': ('django.db.models.fields.DateTimeField', [], {}),
'offer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Offer']"}),
'paykey': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'total': ('django.db.models.fields.DecimalField', [], {'max_digits': '9', 'decimal_places': '2'})
},
'core.paymenthistevent': {
'Meta': {'object_name': 'PaymentHistEvent'},
'event': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'eventDate': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'payment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Payment']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'core.paymentpart': {
'Meta': {'object_name': 'PaymentPart'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'payment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Payment']"}),
'paypalEmail': ('django.db.models.fields.EmailField', [], {'max_length': '256', 'null': 'True'}),
'price': ('django.db.models.fields.DecimalField', [], {'max_digits': '9', 'decimal_places': '2'}),
'programmer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'realprice': ('django.db.models.fields.DecimalField', [], {'max_digits': '9', 'decimal_places': '2'})
},
'core.project': {
'Meta': {'object_name': 'Project'},
'createdByUser': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'creationDate': ('django.db.models.fields.DateTimeField', [], {}),
'homeURL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'trackerURL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'core.solution': {
'Meta': {'object_name': 'Solution'},
'creationDate': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Issue']"}),
'lastChangeDate': ('django.db.models.fields.DateTimeField', [], {}),
'programmer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'core.solutionhistevent': {
'Meta': {'object_name': 'SolutionHistEvent'},
'event': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'eventDate': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'solution': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Solution']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'core.userinfo': {
'Meta': {'object_name': 'UserInfo'},
'about': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'brazilianPaypal': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hide_from_userlist': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_paypal_email_verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_primary_email_verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'paypalEmail': ('django.db.models.fields.EmailField', [], {'max_length': '256'}),
'realName': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'receiveAllEmail': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'screenName': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'website': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['core'] | agpl-3.0 | -8,078,862,925,209,118,000 | 70.665138 | 182 | 0.54148 | false |
econchick/heroku-buildpack-python | vendor/pip-1.2.1/contrib/get-pip.py | 17 | 85929 | #! /usr/bin/env python
sources = """
eNrsvVt7HEmWGDaSba1VuqyllSX704NzwMFmJlmVbHIusjBTzeGQ4DQ17CZFkD2aBTA1iaoEkIuq
ykJmFsDq2fbn3+AnP/hX+E/5we/+7DefS9wjMqtAcnalT5r9tonKjDwRceLEiXNOnMv/+ve/v/5B
8v6frMpVNq+meVtWy+b67737d3/2gx/s7e29ko+i28uiLqLbIppX1VV0XtXRtFqelxfNMCqXTZvP
51HTrs/Ph1HRTuHLwaBcrKq6jZpNI/+smsF5XS0i7Owsn17d5vVsWi1WeRuJFhdFO1lt2stqOZmX
Z4PBYFacR/V6uSyXF5P1clbUk5uybtf5vFjeJOnBIIL/YW/479uiXdfL6F29LqLyHMYaw4jFxzjI
clZEeaS/H0Yv8nlTRFULc7stmyIbWPBqhneZN3nb1glMZBgByHw+WdXFefkhTmGA5fbx3bsXvXj5
H74+PIjKJjpbl/PZQxjHRVXNomW+KJ5QI3ouAEdjQFUGaLnM/roql9hzxm9gANQQusaPmnq6yyfQ
DD4oYK7ugNpLGFLZLOMWMVPUGx4WYD1fz9utA4MfsGLT21mS7jwy+xse2uBeRMiLvs6n0euj6D9E
D4x1ivRkEIPLqo1WdbUq6vkG1qip5jfFDCCULb5tqkUBs4IVn5dXRfQQO37YVg+Zqh6elcuHWTbo
mFR+1uC/ifk6HQTnIpvqlzQNwms0qwrG6XRarZctbZcbnMm8XF4Vs6itoot5dZbDninbYrSCvZBf
FM1ggD8n8if0Zu+HJB2sGyCxWVkbIyk+rPLlDF8k8f8CyASKJITN8xb6XUTjcRTflssfP4559QEF
ALOXYI6mdblqG7GW95gGqnUbQS8NLHS5jKNqGf22XM6q2wamUzERQ9e4OnpkZdM2CXcoNsOOI8Au
uHd7xoDEsq6WSEJJ/PTNm+dP3z2Nh6pRCoN9D1t6X7zaJ9TXVb4AiiBwgrQnTVvVgGQHlzQQCQyG
AZxKDEN+x1xvcl7OC/e7AGgGkZXL0gEzr+4KA76wNvGdkHi3WWefadr40WeaN9Phi6qe4taG/QPD
jR6um/ohnlpz3Ne01HASLWdwrGhOcl4Dj72t6it5TjWSVM1dcnzws1PaKfDxLZI3gDH4zvHBI/H+
4dGmaYvFw1flWZ3Xm4dxgKxje2Dx4Prvv/9HOJebKTC/sr3+r969+Z9+8ANx5gG0FaJEnpJ1oU5J
eSwCoPmkWZ8B25sWjXGKrttyLhvNygZms5kgUodRvWjrQkPCrmVD+HMYfVvUDRzsz6plW1dz3Q7Q
LdvBnxdFve3IXtfzx9glHmVD/LXK66YYwB8wHODJY/Usk8/w5XoZei2eDgbTed400a/LNrEHKrgI
doaIBmTGTF9lLZ9l6mFdrKqJfDydV8uCnzfTy2JB/DUhCEMC9OCybVfm34380TSX8k+jOa6ZoMwz
OL3mhSRsfDui7rL2gxjKxRqlD+zwXvQOD93bvIEjFyZIo/w5kvQihwOrxJMYH0X5RQ5EDcLFwcky
VkSGwIGSy/ZkeUEfL6oWxJrZLKrq8gI+2E8Aj2kTjc65Bcx1eoW8ez+pi5u0OUGeIPdkNJkgrMkk
aYr5OS3f+BsY+DC6n9co292/j0t+0QDe1RDuRb+F7QTDr+Fwm8FOifIVrGABBx3O52x9YTQFwEWE
2Dx4+DCv23I6L7KLRQ64qeqLh/wXElR2w+s8mvJC4yo+fPSTn/30iy9SBQ42LYxQ7zi9mMNoWbSw
44YRk//1GoSZIW7+iwUOjMiMaIvQY0FAVkBA4HSZNbclnOoxL67dE7UFbJUgATZAn5dEQtjf8cEI
BJWE2Nm8aeHwTOKHcZqeet8vi1tsBd+5kB5YO4lgpRkQwjyfFkl8coJkBzBN+B50gMAzFfsoSVzs
iP49BKU+sPy8hRNhNV/jLAWCzuHET+IHcfrgUUfn3PD4QH99+iAwomP9/uB0p+Gp/po1iH4JUNow
QqJNM0XD0I1PuJrWictMgPtNjP0qCB+pDvvRK87TgS0D/8UtoV7gMQNyHGzVpfwso7nhwyZxiEaI
RPgu45VLIwFA/oZ/YWcIwrsXojrsplyui4GD78kib6eXNMysKfJ6epnUyCBOmvvMGOAPYA3wX2YO
SXY/hR+jcyAmHIG3DxTMg27a4gbZBez+VfIolbOwPgC0Bcf2ex6c5Ek4lmv4jxhX16gUNH9UvD6q
Qf+oeIJ0tMMXIWCk8BEZQYOB85wZI9GCoikQvuEQFEQkFWhjBUGXPPzAx/xlodg9iPkV6Fo5P8Ux
AfvHP2dFAwtNQBQ0qY7i/1BWEMKbFBuyxdUM/07iEQ8mZsFpZPAHHF62XqJikUgQ+m1bb8I0KwZg
sMWHIeqU7WBQ6s8HyKyslo4Mk3hgjmmU08UMxi8JZAT8pviAMxrl9N9z+u9IKOAa5adDDx4gB1lM
06LmMibgE34GjOOyupVvyAgAHOB2NvZxAwwPxu3gh2UrA5OKHGjcEyTHakV2E0EZ8GRIi0t/ypc2
nTzDb4kKoEmJZ2Ek2kVnBTCdQh/lQC14ZhbLJofDv71EQsovGgUNKRzE0+UUD5ZFvgG+WkgB4WFE
kgdjMFOfsPGkof6Pvnr6KKrO6W8GgywLekDKOMdDPzMHbuwTHjcdGIhvVF/hM0RIk+D0U79ttl7N
YBaJ+oB7NL4xzv8lSgnFxOxoVk7hVAH0XqXEmq+G0Q2yZt0B6NQL4MupIZUwrpm1oNUgl/PEs1e1
Y4RNmMfEAn37TRztW+wBwBgtoWtvmDb53CMJj+Q2xCmMOdS5wXiOPYDHusNTLWQUczmjncZACsrH
D8HtWyqmmi2g+pDBKbxM9p5V6/mMuAqKENC53W28j8I2iPxrVNSRuBdlm+0xotPQgIxtpLcfcik4
Z3jP8XaTzDyw53o5ksWNSPmNyfqHy5Yx9lFhiamHU2YfNoXvDl5sa2Jt1/EpME9jwCZoNVFjz/Tz
FdCZy7oBrBd4GE8v8yUalkil4/OG1G5Bjp1DN0dLkORQQ/NGBafABWoK4lS3OchIM83UkkV+tomK
G2gjttQiB5W6tsR8YzYHnmQh2aLgMj7TNf4+/uI0hKGdpkpT4APnEvTeO6xPddbmaI1R62MLlkMh
tyieh2IVPEpcHPTNnTag915YAZBhRQlgf79JXX7lb1UHcGytS9zbSWyO2FgOQ5JJwjtxaIJKg7wD
+EV5vkniZ6BMI1/Yb/bR2hgxD04URAFkaNlAxOGR9koh1j4kCwEvshgugjgNyaNBSXQnwuylRr2H
Xi/nm2hWAadUp769LWCu5+fAknkzf3X49LmvrLIcp6hMbkHGjKl22BsmIONtxd3deZig+27ZmTWO
z8KlHfGwQ/xTYxj4GoFSKKzxK4x2TmK6rtEyIoSInScDzUekr+J8cHGR19512Ebf4eEr2axz+ChW
3mXc0J5IYBeU13kJAkWFUgwOd1rNiv5pyeEijzoN6uM43B5lHIWOsaV7D3xl8k6KirlOAP0ui2TO
KMtXIM3PksRYKgKYBmdPMq/87S27fGGvtSlWdy63UhrusOT8Da16fScqNUbUvaJyRD2rSk3lspKR
KR59GafAyIKrLETOMagufPdwfEY9nmF3BpAU+edZ9EM45O7Hp5+VUngIdyUWA19heuEGaReGiWqM
Rx7hGO9s2sFLzrq4XpfAWYGhSLmmxHNMjnVIkj2yFFP+JEu2Legk/uzEQYWNs3l1W9SOUeyibA9c
y4OATKb4mI6bVaVFnIuLCazKXxfTVl4A4GgzfE7m1VRRCizHI6SV0GAOQpqHZQy02bt/0vYyMV9H
3rZJgkqy/sicgzmyUjMFVw182cYNq2PWi/M1kLbEFqJ5vxkJqcvF7VDBPjb6NDYMqaWJMx5zXvly
5mwd460FlHajLZ2mwRmhcnNZ5DM0Y0jl/qMnGJSGAv/rGrU244uhI7OM07RHIg8Nb1bc4Pjc4Q3c
bQxtf7nf3IN2YzEjpOWhSRFDuwNfKiNthMzrB0FTzxsyH8FB0bTrs+j921cN+1vEeI38y8uqaRHu
Af56SNsaL84i3M1R3DSXBw8fxtqG9O4yxzUTHGYWnRXTHO9383l7Wa0vLnE1N3Tje3T0Ff8gN48S
L/GEr4UCRle91FEecU/iTgIWNrvI0AZ7uT5Ls+hXIFLfFmway2UbOAgUJOTWoHVk0VfFckq+TyhY
3vAdHV3O0SUGXok2wqhM+MfXuKcQNWFDmWAxMaIBNwPbZ92brbwB7LXcEi29B1ZbW7oSDyWLQIFV
ER1xSH2ViZ2m7k2C0kjdy5WwdmpdCOieBHSi7h7i3r2/QZdpfjDAu/S6uCiRCSCAdHD9X7//p3hP
XXyYFqx3XP837/7Pv0eObIfqGdLRDIioRspCtUq425DPmrh3fsmeAsRVD+u6qhP1vfY5+3WxLOp8
Hqnuotm6Fq5m6msT6vtl+Ulw18suyM9LlAHO1vj8m6p9gSbaxJuE7uItSt4z9OkDGqaTUX4NMs0S
Ce6sYEMvatsNgGjON2S1VDKA2f2v8tmzarGALRCaj9mZ4dQFEldOhj7cOcKti7pEyNf/4P2/YA8S
et9kYubXf/bu/4mV50QFiiW6FyoPBRifdEsQs3+rRzyMjB9HRRv8jO8IjXk2O7lGKNdJ2cD0IBsa
3nCmN0VTqOnzRwKJug3dfsi3b5hOX+BDo2tN7c7U9cK7dC0XS/zr+FQIVMfC+wq6pPMHjpyL6Pj1
m3cvX39zdBq9efrsN09/fTj55unXh0dZlgnPijVArMkwJPqS26uJDU8JeE/SbocfgnHmMH+wx+3d
+zoXXbSGdZbPZsL+YAvm8YiNPKNiVrb52Rx+We/RTDGO5cvGeZtPEeI4ZgHc+5TMqeNjR/mFsyq/
yetx/O2zowdvD9+8PprAkXn8y7eH357SOS2w6YC7LOarsUJkLlGJzi5w+M83bPrRFqIsOqrWNR5V
JbQ/ExdGsPfsm7iYjElL3Nn19KHoOkpI8B5NgSxTOs0EHRT414gYfJSs8UB0wTXAm1fZagPTvynm
1QrO1t9Va+Qk6L3CPqbAWeAnOQQiBJ5BVW8eKutWwvKDDZp8oMT8RwWNd7GRn0A/5ErDi0z3XDBn
oNObcgajXsBKlCsgNpITMhf0m6ppSljg6Cafr9EmDgwJlifK6+Igam5An7nAU+nygm/Tvquz+K50
VjOdGdwkSGomt/ms1Pbi5atD3J/9ZAX/j9Ky8jqd45E6I50BHl+UN2TM10OM6NY5cvFpLgUuPSwF
nbTOMtwZieLGF5XLEbrPNkEcku4Zev1pGIRdGkQePMeTUfnCK+SBIHvXCZY8QeL1I75Swhvuzark
X4HZUtuJ/7Zn3HKqsfDFQvgZuxaTJ1YDh8e8eBic7a+AJ6CMj8rUG/pEHkZwouAJJd03o33xR3pn
LIyKD22dm1gIMWVsM1Gzb3af/keRAc/+EDtlFac6VzyYBlE00hUVlYEZHBlI/fDIXM47Y2JZSbeH
AAqW1ST0Us4P/WeLSVuvi445spUpxA8uluhjYM0vSiq8hkAyR66dt5G5ERknyJ5B0b77in/NhA7Y
Gy1KFFPCOxveT8LvP33O6CqOTO7N5s3LSHTCuts5sEX0dyUnr1w0W+R0sU4yGSqet8u7s7PeuYbf
/WnI+mhVTMvzcirmzYsJpEuedyytW4uDU0aGHpuueDtN+YwXmsND9J8jEAKcnywTBFHDAjV9EkbO
85dvO+ZPfluh1SeHKM27SSQCMAY7a1gUos7VrXldoFHFjhG5G0YEGpCC5lXu/NJ4MZ/0oEY2+7zY
eS6gBvAjtjyyQyGckVP+ZbG4+3ZQc5zmINf1zy/U5BNm+AzBRRJ6Yc0U53n3yWA0E61cQ3K4+bde
Vf27Z01RY/ysy8keTyhoa8VnC+kjlVvhTXfDxnvB31cXNWA3zNuD77bydbF9+ePIVDKlH+WyuIUO
IkBWOaeZCu/yOy/pSyGc0eE4UipRWCqjRpOuRjtOS5zDOAutgan5JXVhbLmPPntRxpgVHOEgf8AB
UiynZdH0Ta6n3WcXQay+Pk6KYktGpxwVer3bMu0p9ojnA7vWWpQ4hAOiBebCYYBo9AJBSqqyyCr3
PmrN1IkRnlLHezGnPT2nvfCccLQzPbONMZ2/XjetOQFQ6VHfmddAgBuTi+5ZkPcS9FSdFy02XioA
aBggI725UOnenZmM/HTErzu0JWoinVHuIkIxXlgDyOuLNSvAwGRAu23Wq9W85GhOxIcyg8g5OoiQ
xj6MtOPLEnf045M9/ajhGMyxHUt2speC6u1ARjFWqdkuUOVEDMNcoRFQtTReuGP3+3h5Hm2qNVpH
IjYC5RKyvGxRB5oIwSEUISdjDSk/a6r5ui3obfYRK80Rs30LzS0+dZ1FZK6BnZ7Fdtc4J8PbucPB
pZ37I2aNN2idehHFbGLM8EceNXI30wrVI4JkBocJGZd5wOScLM/i8l95dmltvOOq8BlwCAxRU3yd
4WDQWF2t6pJc2CsRim7jy7wihLcY2V3RZcGiyJetWJoKzva6nMFBEZ1tYKHOyNaNLGtZtZafJyyf
NJ12ObDTLZNlaU+0aWksJp3pR1vuhjVyxvrPLd8YCq/q0Hi25Wv3S/HbWFVgve4aigg/53pSglCq
l32L572OxpZS1AVOiLZhYOIl3gzqOxPT0V60M3WeMCR9wAMwzAkRbOXKbG7b0CTdFABem3TQPa2u
j0ULM9bBOrXwUzlm5w0o78enISRZLMIJ8bFhSH8iyXD0MGyeaozCeeEMQhE7enbpgYun6B4qH7o2
vdBEpNGr10mYpEeWisk0dyDchONhzH5eBqdyXHqt0Z4OzOgf5FTiUj3MDkOM0Ly3VobzCfrDj51L
SJvfKwoaezTleFMytYwd6nHOCWOPjEMbp6M5qdn+B/TY/kTob5pN8W+7kbvFxl17L/iZqQS4X5rv
Ust7kO4y0SYLjM114rIWhI5e07XN467+RXKGhiBk/0XC/k+oc6fhASgql9eJf6LRSPhiRDKdws1U
I8145gwWL3TcAZs3Po5rEmbUwBvzZeC2PJGwhmLzjPkfddiM/RiVXZEBf9subuymZ390mTeT7rEb
PEWf4YGRoMtCwAskxnvNBSpCeC2G9vB5kcMvWH/nOkxJcMZYOHIB4182ILagNM1izJ55y7nf7D1J
w7D2Iw6SY/TuRcJ91Z+QG/TwMdMxx+0PJ8apEK2lDYf40yRQrFTP98iARE6n+GCsxp7aq5gEjyvP
KRCTcQgrDrCM8yr6RZQ8HkY/S12/0K7J8tEW0WX0fEMSfVWLm05xqyXgR4+zn5Fej2ak2jR4y5xW
KP23VWWfVjtOJESxij14rZHGL4qWM1CpbodRPFFuO2hnJJtKamZp2AUXhimQAkUUSkilM7tDDc7o
0N+HxlEtT4x+VgeyHSaNoCD8JpFcApjLFDhKVbXknIgLzYTDjiND4UBiPkt7A5vsTslPR/bZOwm5
H4kMZBANd9jfhfgwcSSsoSNJpSFxjOTPOBLyyjGApg1jsNwdXFHdATXrKXqno8PnRh+2flSTehWK
8LYkrSMDojF0FrfUb8cBuBeJhvno0xHQjwHdlY8C/e5uODCGz0jQDxws7ExGU9KaRaaKxPhsIk/Y
tDdijj6fSX8rzAtDA+sHdC96BifAEuS67Ttjy/ARznoldlrfplXxvBaAwWDgOKulg+v/9v1fWE6B
nFbi+h+++3//XPkEGukGW1Atbut8JX+vrtBNna9fVCMEKFfrbs55Zu4lcuIv6gVmCwCu/13xCQmV
Pizm9Wo6L8/Q03W2nmJMyGLFXyELxn7VUSg/OgLOPG1FrqRh9KqqmkL8Ut5/R4Sufuc/RmnQ9+/f
vz98+zvPzY+B0g16vKs7nzWQT/bm63GWCL35BB8Z/P0ZHWTubpHx1RkzyD4+fPv29duD6OuyIQut
8qeXlmuU1Gi16I4/zeJQZL16xI4ABjvmdDqe0mxZJuCZPlURX5PLslWRKyIPjEjuoyBooKJxW+fL
BtOy0deJgmOc2Gq33ZYzyqdkheWI7G4cWZWVmMBz4wUDuhC8TZxQXJCeTl1S9EQD69ckOJ6hA2Rs
/zSWWMycV9mb/4GFMhiK4gHZUVHDVn9TVx822oAxtNhWplpPCHHIEDyUEi2LUfwxxu0eH8iBxGI/
yyffDzHIJvZ4NIISOVmdFaLFsbOxvrssyJWQ3SZoXiXGeNBFWM6vYLsIPtZk0W8LShWg3w04CEPc
fJJI2kqgxpcRWblRjKfgPTYVTyvMV9FSINCAgwTtoeCFN+o5twV5K64x+6s5fCMJ5x+/H0i995KS
rxEi9JoJ3gkPjxmxpwa/k4yS3ko86waSjYsG8pragDDFuwTxekI7oapBVIYNHp/aAiwOA7kEKeY8
+uyq2HjxkvLlMX5xihOU9MCapUEO4i94JrEND4/F36fYFIeHDfHf73skcbvTYw3vVNoexZN04MRz
lecyTS1HdZUXl3i3fyPPOhF9onBNmRtxOK50p3A9lkAm4lHSPbqAncJtrFaV4lno7+3fEN7oCxqq
SHBZ5MCdaxA96hnmHYa9sF6WsCfl4t7CaVkhzZKHtyByzuQBxz9Ttdg/DWtyeCWikCFNl9RgLJqo
yWfsCY2RpEA243m+OJvl0YeD6IMaLwXfYB6YMVrJUzNNsglbMIkQw8SJTKbVfL1YCp75+KceGyWD
GiNeqRNmWtzjVWZFd5ILLtG9KeJlGAqGiZlBnjz97FsYLd529gvnQBExLTgXN0QRpVLhYz/Dc1n2
Aat3XrYKkG3/UOOQcm2G/0nUFnW6H/mohmc/TTuAJvHJEsNo4bSP7keJ/+2D6MdpKpLM8lcalIjA
jvcb6ELEHCKEbI5eAz4woDAPhpeFzNZogonhpNHSJxJ/186Y5m0KwSPfDH9KfMXKGIpYrQfj6LHX
xBu++hb0MOrZ5Tkmw2/cAHNTKcXAZYN3McCDTkXYvRb55ujd01evDp8fkPGRvxamOQ043LvPxnft
aDf4ASivnr47PHp3QPZOgiIGHEjvFsjMFl6wkblgHC+FwXiYcOKQ0k6QVcw9rxop65B6Vhdy4Rp5
Uj0ayiPlsWBWFhHIJByLVWJpZ+pz2AXBF49Tme9YOh9jEPPZEqStfK5OseV6cUYWM3I5QPcj4B6W
3mfM9lvk7M4sjfGZn5nDCz3H0TFeXIKW+BHIUIo96rBJIo6TG8Ta44MAUulFihT+iHjjzSNksfBI
oRnl/4GtxaaD68H7f25ZBdbL78rV9T96/78rPVu9+k6nVP6rciU1eh2f+Z16mOj3joZM4D01mL5F
dl7elLN1boa9DQYWYBjxP37/D1V03/U/efd/FRSi+hZUFUxvARibk+0G1hPV6qG80OyuqqAzR4u/
FiWouZuVNnO0l+inhUnQpd2jasoPeAPdax0BKX9WLVSn1fSqaNWvFoXQnQwdpqnk5fI8/ILpG9Qu
kVWAEjTAUdcX6hgKfN1mX0lE2nraDttj+//9uljDaNAkQDYZma56+5fv377iPqKv3r17I/5cb/8O
RBvYMpwxSyUETrdN63CxajcYvEADpl/6E+XTp5N2VyB0c7bkSVuhUvmYOsQf7L91gfsXVM339Rwm
/vgroB+8dixQvJtM0LQ9oWRhlrcM6iSD54cvnr5/9W7y9Us0SGCoI2ZbgT3WZo5JRccQ2z431RkK
d1qjJJ0O7wgardoMBsoxyPQI4rSzRd4oYzemgyimlyxIU24ZEc8ig/0Nn02sKEH7BB5e8HWM2dxy
WqK7NqU0dqXUNnyFoi4fINPlR6SBkT856SxGnogWE5mj2zSroXFFdwTY1j/sRparQ8Dpghqpe/WN
gmf4dnBOG/KdH8OyXRTkR584CQDR4w8U1zN2yWAwlKj0MserRuE2elYUS+YWM0rpcFndOrPil2oc
IMrbSfIM1B34KRE0ypQNaoIyn/E8kX5VLpKD9nW8kEri92RfM6OGtLOJtJa5/fem+wgMVvqhIEmh
8dNdFJnEiS58jYSPdm2X5hITb6Jb71khPRtJ+8O3Q0xVwS3oTtQEAoQ+J4XECv0USWGlJx8taMOB
RIW2tQgQHhmRfhkYKVB8NUStVoylrVGcAfZWNBNiYUmZFRlfQr4CSGk/uaJLERoUGDWMxV8CJ2jL
KXsQ6q2KGvBEBe+rTDpdroxHfLipWH9Ut/foimMvSlAsKW8KETK0h8u4NzTygujsvIC/skZc4kqA
TE4Qhuw5FOqW3tubMOCsdC9ardngRak5kFvpAF4qTUCZ52nQ+MJgZQYMUt8NgzgZCTifIqWkPwjk
ADEPEW7lKmpKHsku4ABpJvg3p29sSAJmrofmViqygcf+w8t2MY+DeciVu5q3S8MKC+HP+sZyf8FJ
lDrftu8ywun/dDoqTsYStAyRIdk4Rv0hSjcU4QdYNrOyDqHWgSnbowFVr0bwC5wT5jwmd54mwxXd
0gdtJbXOVmEWlgUQXKBEAF1p6plQPv2ebhxS2mHlOhdbKBe8c6iZ5pZ09vnpyuDJUHqs2VkmZZIl
vG6WPwdOKk5KOmBFgJ6LP1C8AU5L+0yDpcPEvCPx7ifs49g1Ecls4DklKaJDU+xsHu20qkUSCOC/
/q291z3yzUQJ/cYRpYdAqXJlB85qy6olJWUMaKqIBQA+DC4KMkuWmMSpuso3athw1OpkSajUeEo2
XTDqcxkfJPbYaeE8hkJfwoB865qzpjLvO5CEfJoQLvzJp3r23GnEXgeaJsyL8mpxxikKBQ2IGwoz
mpZK5qFFdj6vbo1vMVIc5IcFWvaN7+mo8zItGFndQeS25DdXonvgyREDi5UvrnAmdIeAefjsmycW
cqbIzm0qoTVQZOFQxQuY6yL/UC7WC1bp2/KsBL1tw9gwxfBhVCw5IOSSjyIHFFYZIJ9NPP9Lim+j
2ihZFD3VicBUvTqRnwKlDwdQgmcuCWd4s3OZswdESyOi7Pn4GUJOSebk+n+8EO7VA0Yu4VKIA39R
XlyCJAU8+IqvJQrUFygupFq6OsdZAbu2rOqso7bCDmUVuKJCoJiCYH/w0qmNw2TfaXrWsgvIDl6P
AfoYhA4XcWDa9HgqyS+geoSSw+uB4AGVOJ8atmXZUop1QZnPdmhVzhHkV5LJ0CNlbAqd8B7mSHRz
+GgnYrcjl/idvbNs4BltNGkvRXQqgANLFNQCK+9Ls19md51i7WAQHCwxRFxuc3kt0KcDRxGaFWfr
iySWWQOEY4GBfM6os98ccPrxa3uFVKGQbsHL7ui+NAd7GTIpaZhaWjeTrPlOUpCveimnfmWO9HO5
SiRxGRILVQ7x0jUZe67JdKbGcboy+/ET3/LJtnQORWMR+Wzqw9XTZT7ffKf1bjYDIVzGIf6ZdWu4
4UuO4AWHNRdrg/oYpV6FIaQHPb22/R67vsERDORq3rJlicN7IMhr+pcXb2z8oRx0GxM8NZmI1jco
HOPbTKp2ZIOcsf4/NBleoPNT098fGcjHIKgbSTZP2oodlehPD0M62dpbQz4Nrax8Z00niOTzHM7j
JHYLkCy116YwTMk0h1yBR3u/y4Iv1yGn+mDmxfibykqqiGmhKDicUyoyX/SAUnr/64yHASLF5GzT
fd44jE2ng7YBsPecajhE6/vQ60Zd0lmD6UGt9SrD0ybx/QKCxP6Kas6wQaOLzk31WdJ82k/01oBO
XW7iHhD2HnjgNLDkbfMrG8wDh1ofhOhUw8Lw4Sl62vew/sRdLWueqZS5OqUYdTXITkC4zPD/PVwU
VmRPhc1hZ7AYGq/7KrmuMKrvN3u9lxf7UWKNeKhj745jaRbllWyMAWYow+OqpbsVvgvgUm0AG2Pp
FvwLB5jAS8sF5ubAcRsQlQTlvefxI9QdwztApjyc6E4wTraq5szQZbJ7m55Dwy3PeYlKzON5furx
1LXMhwIsJtCrRy2BPkAJhrmIHvrpJj6UuRx1AICcQIJ5k/BOpmpwWKO2Gs1yjoSJJN9pTB4bbyGr
Toa1gwnpToMOji5K3ImolWooT862TdE1/GH3GqTp1tTz0ifYB3GXc1DNxTgDhS3fOQWjhORI1cmW
qfO8hyZjl11pVt7B6oilW+z2NBzTtuPxS+xL5KiKzFGEj+GOVfkivDPuoecq32oZGU71EqObl7DL
nxmOi91n5MudNxQcGBrgthXpJcSOVdqFMR0/Ojg9JVtZvMQCSruRLtbLDQBLoy+jRwfbbt6MEyoR
qZ4Mh+ADkbyqExNJ57b7JERYNCqm3UlKxn2xbZxkY7Vh/rRtktbF1Au6EUfKwhwfZIkmKwYVPRDi
/JBz+emrIGHnwGMdtnerTMlOsg1K98dpizhjeqXNYeSVhHlmimVTtuVN2W6yQH57y7TRZ+66F32b
X6yLOaaOqVt5y0gmvKdvXmZZFt0WZT2jlEo4U9ukpi8Sz3Ia56KalSTIl82TcKA/qVEPxpZdLWyM
7rJD99qgHZbLKdW5BB/be89kwIiMmfMiIQIBGeggI83atrNMYlqpU7eOzhV7hEvt2x4qjcS0+eID
gkmiN985GSWtfWOmGpQ3KgSVbo2ke1uARiDJ1jly6ICVtd7ELiCoaVfxXnwbKtxr7DdtU7HrIDWB
Hfa7spjDFkv4koMkftZGZAVF3lMGiOaqXK3QMcq3tGn/h0K4HODhIG4DpDb68LJaFERatFrmtkIR
F4W6a/TwgUUjT5/kzoY1C0y2WreBsjOzijx6LbePYuk5YLBbWUCVKUm+x2qTyQJ46aMvhsTxtSXS
DdtGD1nlpJa9o7+SNq9hscZ6S9KQZxNeDYxHGrOQYU1pSKMf0ogdQmkzGP/zvFiA2O5rqmI2Uplo
vW/xDtjBt7iIoA+dGfEx4h0JM0WNE9gJaDi5suYOKvKVWajNnbh5ndk9bz2W20u8+3cO1aA1zyjk
bNMIjCDhG3pb4GbnUu1w1lVS22UaJpHiaHeseI5NMRAwCdfHCjNwXZ/q7veIwVEgkiWJ4Oeplx3D
4rl1MRcOOyGHAX8rkn8LkwdGwJ/X+cWCq/ZwLXW8ZcN79jqmygHJ8e//8vS+zACpguYDzZPjfPTd
F6N/O8lOH6Qj/WsEP2PETvZSAFkpA0YAzGi1SY4fPf7x6Ul2DN+fpj+yUobx1UOPg9KerDRdzLVT
ERs+0V+hpjQAIEiOYCrSfo0lbIeRfoDBuRg5yqRdzMsFlRpFLromeastDFMFfEfJwBA5zKqGWxmb
uYyBU1Pq4sLSEqZhTa6Wz5IDIzNXOew3geOWFBeG1e2oL6Yd/lxwJImaB9STsZieRdhY0qG482GZ
4yCIOfM2ikjCJQZ1YadbYxO/Y5a/wz3K/23wrI5ujPGHYelJdEE0D923QpDHuk5FTfYblE9rTKGI
goFpGFLb5qrYDM3oGOiJnV4nyphHwoAolEA5sKbo823LFRaITF/efb1r+juZ9g7eTOcl7rbOglRb
aFEyFi60aLfsueeVnw1xjvJb6dmdpKGrcXhx0LVZ9IZzXEN7w0DEtdiRkMykpfPnbD8hczBdLQa3
VtATtWdby4Lpp+7cJCoMRSgDgSJ09X8vouzsIBfPqjVQ3Ihu1hourRHa6cbqyD+PD0Y/OfUb0ipw
x7jfP7TeMD+0Ess8wOziO1KK8c+z7x7Lv/lf8Q5DItK/rVVbL6+WICxHwuESGcgib+VFgmAVMI30
T7GYOmCX2ZU6cMnGBDqFpniDtwwd4jKM9X0KZCcKbusKXdQ5BpKVpwSRLLJ2B9hl0ChjzIst/HJW
lgQgw8Y9oys6duJn9tg1hsRfxwfUSorPNlHqnuADbndRV+tV8siX1XTTH47NfFDHBz8+7VQzXRzu
N6q+oJP1ifKPCmNINz/wUWd3RDZI7zLFoU7/gkKCTXpvGsRnXgwJQXUfKlcS40T0yJUPwz6iPeig
EUPUlBQiH3l3EwEysbGmzdI0Y22kxfMQhyER6C9KyMAorCQmPX2RyutoM1BBxovgkbvX5OfFHn9L
p7O1FBFn5PGDhummW5VAnFBK9Th1I/NNB2IDx+qKPFhq1h7/MWrQlsByms2FaWYU96fB6rODWAKR
bQERH3717utXGOmRaa1KNh6yJ+hYR4W4qqsZa8E93Sm2BSPjCphkcYPhOCr7g/aYVJVZhOvl82+O
IhBKjCD89RJENfNj00kzp4zzIDHNemt1BmNMZGkUzh0ifiW9cSRwtIsgBzSynuHQZ0B6NECOttt7
Eh1iodJl0QK6kL9jsvAnzu0Htsyay/X5+bxI7CS/emTaUdTVbfT7SGHO80H+Or8SObylC3KOMRzl
DDEXEt2SPZE5Zw+vA+hHo36R1zz8SHWPE6/mqT12DBsTEPebPdj9+lWoe/1Wy1h7orLVXrqto/1G
FsHq6cnAK4kN5jK7u4Zca8wIICPYTUZNuYFuXJMESBS3HMegqeCy87ycw2JMUN1FKe7HO+RbYu9n
/tDIZWI6vl2EX8hwFn6nemorPDuWGwVU7GnbmVhgwO6ezEhkSf8ijb4Ux4g1KbsMscGbeqDTBCRo
g/eUjZzCdhhyrnqEwtpl5O9pJyGIQw5q4kv/gw4UHlPK47GIf7KiusiiLHBgQp1jKcXOhZQAu/H7
gAD4fek+GjZnO5q79v0MhdMxsmXv+EORtDwiPIp+W6xgbGzoWcpogXyO8iu5iOPdKLATReVmABtG
EdbFBRVaQz59CcIZ1Wy8BJ1eDHAi7eUBW9Uv2ssvT5r72AL+wUa2oUvlNu78VLY4aR5waT799T0j
zhEv05oqym+L87XgaBM0/LlwYZa/OP79l6f3Txp4DfDH8P/H8d7pk+T49/Hel6cPUnr9JTS0RnpZ
F+cetBifjpMnB3vw9d7p/XTvb05i+PMkhr9P4r+BP788OWlOTpbwW9r4/gb+cySg4mWJP3ccJL6B
OWMHwVEa4+sKfEXjKZXdJZLEYuyFOPNduhYtYRjiL/u1KjRtPxYQMbMG/2WOBMQinxuaex7BcWQg
0a8TGKjYzxT3iSfssOCCNz1qj7scgAct3decxxRQhkaQJL4Hy/KI8owZhxn1kvms1o/Y8CQ7M3pI
ljTl2ubi/mlWmbleKbh5vcK48dvijA8cLQep+PGbqQq5/3baHPG3FuMQ9dOBd+gGmeg4GFAnBF9L
LqbmHD8JTVje5WenCDl+cBBv0/PEZStV5oTZ7ksADV+3sp6vPMlBJO2+SrSRKpel2ycTfYqQhmnx
FAV5EqBItdofSyBLyS+NaEzvzgajyEwaDBpgtgzaozrjlPPJbZveZTqICvWItoD8HQwnOQMGjFYn
WOFjw7TUYXkyLE6nnVOR/Wk5UHTSMx/Beyg2FZcRJDzSX8zn4ZgYE4dG4xCJG6GtPUOhi3mMMups
0Z+Zp8f8IcIAlA0ERM1nPObROxizY6cwp5Omvf3tTmoW0onmHOmqF8l9pGfP+ddF2xpTxmnZcrqw
AXCZz1nxIcN1ASmF0nCRsiKqYpIxoCrZ5i7rHJVGaB55FTGnGaLiBrxlKMogrfI6XzQq6aO0mad8
DJC1I5N/BJmF4KwYF82WamSPdviwmSiEY2uDRmUQAp3YOqk5CRENZRiAhffWyvUXVO8FB2TAwWmb
+10H1H4nH/MIdFxvvNNRoUVHIMZ6ieJ0yyHAyycjc1VhKspp0rIRMEwNgIYVjwDToyShBvlcKIyc
HyXBT1InJ6wUTPAdO66lwZMCjp81AcjI3SElL2LuQElMqZu5KjGyyegUM5ybJ8Mi29W6Vb8Lfvv6
SDQzc98460N35xuM4plOeMzHj07dpLoYkgiyVeLnYisb8qldToukcIcTdgKaoNBFIY7Kwc9vhmpM
NH7kp3PjscTYwwzrWsYDLzLdHpLE1ccPBkBe5g2l4sc0mbBUDZaUZBdPo6+M3+yAhR1m4mAienSH
y1/5zeNtuFEkxZNBZWBGLOcnX/wkwEmUisY+XE8+cXW98XX4cd8F5KNB6GPL/5n88dgJzzn2cEnS
tAPCbzEGHuUu8S37V5pJglS2JRnPVZvgM+VM6O2hHQ5QPjA904FhNNjF+3ZLR3YnFJGjOdPpkPiX
d3VBsmpfspWgMGUZeuHEJnOuKZFIb1B2tGOPWC5AGH11+PQ5eQwWTWuabMMHsXMAOwcNq2PW+SuL
FijNJqHE4Ch+kmUT/zjn3+f403VqVbuEoswxRTcOG3WlClEDP564H6AduibPKGLcT0JraSQdxZII
40CCrsRSuf8Yf1U1Lab3JXx8b66cddxZDlIhbUNyP/wOZo1MQrA/OsSIafwQtvMXX4gsvIQ5dZvc
gywbYVL2ELH7dcFHeNOlJsXB6Hjj9CXTWCyIb4TEhwOJzbDIQMQpT2peNYX0x/slpsyBFdooqiYT
CsoBjq1BEI/EGJtE9iayuWuOtq/WhF0mM7KGSyOJn1rHv1rT9jrZXf/dapjhehCU3SRoQ5UNuxCl
fcKCHrYYHqm8wGRaESsJEmWaYBdq0aUwilHkNTrn2IgKZSqykRBJkOLJY+/Jj9MAEO4FC1yQ/0Di
yatc50Lggw3PNhz2VNIB5YQVox6Achl00OVGJhcf0NutbCedTobck7loLoxmWuerYrYLCDXAQL/S
KXXejIFNCpMsbjJVkjjdsu4qwTtzesqCif5aW4iAzaw70QCHFOEYPVLQujqdA2mg4tmcQxbnYTOL
eK+7CEtkvlYf3nxBz1ObSUjqF0zCvisOelZ1sIpgX/8p7RefhAP7hu4R6BwS6NPXBkOBUeM6oIM9
E5Cd2PJd8U2r2bvAZndDDNgYM+bRrTO83hrm3+aia2vAnXr9k1LJhAH71zzHv2c36B/95YPhw4Of
j5/8Mru3Pzk5+ZvRaexdcBijC10pAqPCG/OGr8xzFa3LNaIKyjQ9yzC8CrO2YvZFDNPChOvNJejk
0Xpl1rfiUheojVDqIqorDQO6rcu2xXiuKtp//EWUsAs0lWi6KerR9bpqzeCTfVwsTnA0vczJybRu
0ixQ9JhpTuIpa9Zndv4HEQvN5BTF+/v7jz/EBH/m8p6hsKyIq0FaEXUt2HVdREumcz4GL4o6boKM
TJFjA8TW+yCZhU1/ErxciVXsq3Svk4MZut/v4IRj3z7x6Opi1X1dFf/iFTuTfSkLeJmfFtcKg7TM
3dddqM5TE7d3kFEvO3vHl2q+nVKwtLe7QDoExx1uwbwmT3qaGAFqrndWILDNUvJY4xKVT5zSUqTf
1yIfc4FWAzVRZT50XelVmjgfSawOdeDZ10UV0lW0qA+ScmTcHeBjwyVQOWN3XJbK1wFEivpcToTg
x0WvdEzQBOKO0HdHNLqzTs2QWh84F0PGkoAn3iM5w8Xsp4GJwdMxRtecY3TMg5654edqe3XOizvp
nU73VDqUPu+CM0CscCSF9FpZ7aWDGkJ72tvBsoAAGoTMun9UydjOTCiMQrkVGKrc/mCD0ikKo2nQ
LyQTsctojGO7G1KZqCMjv5BnHx3PYyUvMHa2hjnIIAXR7mAQcMFXe8a6+1S8yymY01ZWslLtQyuK
6ISbycdG5ArmSceyXHPKtcEMEF0HoxdVNXqUPUaZAf4cjx9lP5ZI0IKtpLA6/n2S3X+SjpJZcfM3
J7PsPnqOWN0NwnQXcLo1aC7s5f1YoN06LSXrNvoMgBFGH+1K75+c+814bJSj0X7XgU4tBq7IU/p0
XlZNS1WMDbGEPVPxI7MsmfQJVUSKee9lTSdOAFXxF8+/OeJxoLPqgZCMvvzyS+12ITLN6hqTArhq
aQ6RJ3Uc526SfdTCz0IPp/5DghHPvBfiwHhdlxdsKYukICrzTKvSlI9VXMLZJno6x4wY0ddFmwPl
5mu7yBqaFAVi/TgI9WYc+QUFZMGs2lgAxLPw7AU9DYZT1JyYExMJZH6BFqMDcVFzgRbrpj3bUMWi
4oNadrV3xR2cbJ+XRbCoiogOKDAjYAFoqtFLScAS7HAv20N2KCcyK7h+U6GJyaYgjDo12cfx3n6T
sVds0gyNrjB7jM6xxFU6JhyKbTQCLnwq6Nxqgn0Z7qhwqq1bHhO9Rwrn9qCOtLcYOrmX73GZBKzv
YBRlwOpbzITZrRYaIgf6bo/Tng8pmwPlEsY24qV8a9MJNrjSEeUUSY7jdFR8Uu9F9YyER5nlzbQs
J2TdwdApKhdX5O346sEjz5UK65QRY+HI7Q8h9ZJbDTzrD387Jix0hkEPrv/p+39lVaRBsQFWBHjS
9Z+/+7//gVXGdpdqtINfPT06nDx7/fWbV4fvXr7+Bh2ZAWn38Dt0fros5vO0iXQ3ETmjoFsUCGxt
2vS0BGoZkFumBj85evb25Zt3R+gUzKwCxnYZH1CnE4A00d8DQ+I2+PnbwzevfjdOoh8l9HPy29dv
nx+N9370R/3r+P7p93vRyUnIbkatnmGz8Y/03x2N37x8M3n6/t1riZbD8aPoR4+iNEoH3w/E+Ipo
VMl6r9HoReSMHZEyIB/H+Ds1v/P1ckov3cY4TfImQ5qfAQ7xnwE5C8yi0dMpP1YPpkujxWq+CaCF
PgCE+PgwMZEkDGgEk0u9lkE0MOBHpwY2pu08Gv2mCwPfKzX+mXrXX0JYw/CqJD2lerloixDUzCGs
lF8GN7B87IK45PjWcYROlrtWGfbG++mVhonYMb7nzCkDnBNhjGMqRYqXnE3rtKBnY94uoVLFtP+C
xYUP0dvfWBi6ZWO/OYB15zl8J6bw3cdO4bvPN4PvaAJ3qIMMBPkGi0py0iMXpHVTTMOxboXxAVr+
fXYmyqTaTQUCKWMA4C1+QE/5QKO/8HTlBIIMOrV8eGVNZN2WGjle9cSFw2PCK0sLiH1jScGTiIvE
PQL2oz/GDJgKs+IfWKmVlufAHtf3vUVpuIIySDgZCnuFKjD9u2odLdZNS3FT0X6DlSP3o5hSgIks
jyYC0SLgM490cP3fvf+XTuU2kZbv+p+9+x2cheoAxHtuVT6MWrzVqtAwMn4cFe2QQwhNbam5W2V3
v86Y6JUupMhPRteMK+0y9V1F42Re+FBl9ePXb3Dhjk6jN0+f/ebprw8n3zz9+vAoyrIsUGhOQDIr
zO3GEN2xfjI/JEfd0chM5BhiDOZCdDAdzk7RUY79+LSjgvuLl68OEVNBpqMRhf9vVC1qSMKkgtHW
daNTdyGDGdrzJamWMSETN9DJtYBBYjaICD24muzOLHnDaNwUTRB9/nObVWNCtBAG9p5X6EORN1fi
bF2elxgoj8MHTqlIErrhndlke3dixqbppimQi9kb0Z7n2bqczyazsuboCZvT1NOON+pKTr62wxhl
AVgcmSvNW4Mj9Jv2E09w9BlLRvYoLDybOCH02LVyccesNYKnmkR0ELhCvuasQC53SiQsheuxZJ0h
VaJ/Ym6WOrRWud9c5s2ke6ycWtNjeUms+D5uGYyumBdCM7XMc77/Yoy3VYzEBm9PQBdErQPJVD3f
EzVpp1wvmAOKHby781A0nOTrtpoIEpfIy2DnpFh902HRcPL88/d/7hwG13/x7l//a6rD+SvMiPdM
SqLI5WUVLy7LWYs6nWY5Tl2D06mRqct0YoaXAks3qgclWprkoSNPGnkY7lRXE8fP7EW24F9D9KA7
Ly9eEyLeiGfvVzNKkvSc+WrzFaD/BeXOaE2gHbUi+47HX+UzdaZ4dDPUh6RxfurQomJ5E6kCpqSd
wnk0wcfbSl4ekYr/8jVdHsrinOgKN4xu87nK/NPY9SoFhidIash5xcjxT+I02FKugs1X+f1Ut5eg
kEwbtJGZsEXg7D0hq/MEGrNKzmZAUUJ19WGjvfAy9czUvIhwrdBKIVqoywopU6gHSmmiWNbtQoK4
CVP7LnSKoaTqUVbi1LKEcfDupT+HjsRaXYzxQlWYZVHGBOZ9Q7Wn9JYfhjKKjMVJKpPWugyWCXnc
S+aJA9phNRaHF0e9ZNl1xg+owv1ByOWB32czykdbWz8xegPZXRyKyxhdDqPRSOXiLdrprvnwguIF
/6PnYhLlsZqrjB/WhLEo6otCCu0qD23ZlrkS5dUBZVacDOUJALFCVJuUWtQ0r2sqlATztNCM/pEc
+wXsDXcVbn38lzaBfED+hTF53dMZMOFW4TChGD03YatMJCj03lZOa+Qm3cjHFFPTBUd47pMfG5PT
5GZKn1LgnXmGTsg3qBPSsoJttwJQp25cdUsOolrIgl8UqEKPPfzjU/LAuQh9aJyV8hCEERZUzsUB
xS+85rA6ZxUcgIEPxCsrKH69wiSWF7DdXHbCBdQVbeXlUsUFswVugkIbC5UenRlwLOlT3ooKomdR
Cl8kJJvaLMum505KNmoiydgBoOlvKLLBeQVIcZFhNxiNozB6JWgRukA/J0D9QBQt0E2d/GTkeO8r
NOH5Pw7kG0JzzBrmGC5rdJwQvKHU4jH+xKHNRAB6fvir978eWh2KdIGpXWVATi2wm4K5fZw2lv1O
rZJNROkg1KG551XpF70Syxs3c8nLc8psj5KFSuU/EzUcMfiFM+9hRMMcixDzvVyQpQN2lzdlXS3Z
hfzbl2/fvX/6anL4zbdxd2rjrpozpLndkBQJMNt1PschJnKAaRaIbOOQrLJ13ezuRU9B5kV9bkqz
kNoccNLpuq6BJcFDoz9ChkqwbE9W41mzTW+10FsNtyDlLIY/jkePuFrrZDL5FvAxeXt49O7p23fw
M+6KhPhtgelTY4pCz5ebViTlLxv+DyXIiDaFc7yKnU+dHowenwbHLsZsOW/4Ni0iDdb4UfcXRzyV
Oaapc2VchTWx9l7U4z1NS8tKERrDzTHTkDQxofbIIaB4v+dSWBDvIrzzLmRngDJBkH8GVSRoEvN9
RyCaeZBb41cwDXTAyZ6LWo5NKLBbDCvx6tbqca0A5BrlR2tsafTDcW/wbwhiN7LSnjBra7fu/ZpW
z9yXWHXBroCztzWgmc3UwR1oQ852hAVCsomfYbRlut0FfjsYSX8IobO9ejvvy+Zolm+Q1d+D+0xo
BhZNDDoIvadQxNv1csmFXnYGTkgq4QCUeiNXpe7atFbTQPiMBYlYqXPbbKq67hobv0jyHdrwhr20
Y0lY7qnhdCxrrsNacLGHJZYxEhfWfEdP1sN5XmOVhywLVSgYBCNDmZNQndSyUWwFq6CcY+VbFP3P
0TWChiGG1SFueAs9OcfANdSXUXSgwtNGc5DP8zhYhUwLTapilCUEMeg07S1dKnq384MIlxAQZ4Sm
IHQHNS7xG+fNhkzTkZU0fxKFElJ7mrZWZix6METVebFCxXZsSj9SrzDA4SaH4X3RHcpHghcaeG3T
rhdpvpst585x5Bbf5WjynpJxh9LcFOFvYcE+OFnyVuYnII5O3SAIgYZH7qy0seo/qXH3VRhRXe0O
/rEl2Ama7qkbjO9VLKSVLrhsA22X0dhSF3CH2gUIRCbik6W4MjTVj7Rnrkctl6tTjhxkH12q8rDY
eboDu+CWwClu41BzceWJw0x3QYXwykJ0DAbCscpYBElUpv8gYY8f+95v6o1LlNQC+chY2T/FQ2Vh
zuheeKLstMl9+fkwun+fTOyIgjHqhKnpUwYP8FgnMTyRDqgW1vRNxaKaFWNks8pZ7PVKSNXYYEbr
gs1FtaQCqynDN8JF7CXf1VMDKQ+wqArMKCKzdg4rbVScBzWN5kW1iAYyRIYAUAFi+oJyebYsWWAV
crIgC2lMPXWcy3SuooBsqrx3u9rmZw1JoXbDWVk77cQTp51WM01hXbQ1rSBNtgDhHF7ot8IhEb+w
xk4gdDcDbw84q5hqasRPDwbBnRDLe35MNnv/Z2aIodsO7w4jKs8lvrCsvHgOZsA9z/EPaD5VUquq
ao7gBPWZJvfEQAoGVcliSLHlRcCMQFmwMRuUaotuGDAWmPV6bgptRpkS67CcTPiqYTJJFJDUdAR9
Sa8PbSdQNn3p4Zs3CjK61ry9tK4RzPBbf/ICbPAL+iGyvWIaaXTzolJ7V8QgrSsRyhI0VjibULXV
ycRdCcxgSmCh4+t/8f4fI6Ll/cH1f//uf/vLH/zg3g8frpv64Vm5fIhyHPsM+/dicBiQtU5dmTXr
M5BopkXTBC7L6kL+NSvPz+fl2WDbXdDaufOh3FVNNSfHeaC2Xg8Q00I+tLA+9FdwaGN/+23c3RxL
drn3Qz9r0y1cVbOcWHUPBbHg3ag8LRPLt5dwQMYC4dWgHaqkWzdlVhavxeQS/YWQGWWNQ6w+IbBk
txJM/3BJ9TOwTgUm1aZ0GEvlt2U6qZLTFPsuASj0dIvYXyy1mfe9iL0clDSA2QVxkJd5w0k5MXn3
jHzlTO9G8vCQ/CH2XCVjmbRC67tBZjFlh09TLz6OtSdnLGNfUiySqL+I0LjeJt5H5NcZn6Y+I5JH
2Fh0eSwcPy2v9JcYLuDwIv2hiJcIcCThY6TXbhwdTxfCJXMBe2A6b0w+hdskA16/gG/lCYbp8vjG
kYdkOM+dipXSPfjz0+8kUz++pf5vqWNGM3RFP42hnjqO+SEU+LCVkdAclByypArnM/PCqxHVOagY
h7jYMyEB8aFd0AQYBtp1L6hsNY/Szm4pRoDcjrX1Fyh9RvIAZXlQHj4m2nsGor3UlJFdkI+VwnDk
miB152O37skcU1tJIG7+fXkO4pBxXXtYGZXxmk+q5XzjZlo1JoWfoC+nOdz5lDPUyHdyZ4t9BPuy
I4ukHIi0FEgArkGFDBxFTXmAcdV5AdTnw+gclwBPLZRTPU4byg1KHx4Ek2VKXPU0056h2LjnRsEg
LoN4x/Y+RzOfQyzeJSNejB2jMYGcF+DfCUdfJCmZsrKlb4hyLtrtbZ35F+5dBmYET35EPxwrGSM7
ev/mzdvDo6PJV4ev3pwaOwjYPlqcUH1a1cVNWa0b2Dy0r87pclrMh07a/CYv51zwydnI+C1Ok9jb
BxnfOI4xTEhHoigKk7z61MMbou3DMLrhgB3xp3Yowwl+kASrOg1NSLTHOk+C4ZNVKNjhleoQ/3K6
uzL3joCVnnb4RYjvDtwLmQmgja47uYEZtC0yUbI+CCjTewITqZXTS3lDRSapsO0VBFuf6nW3mFxy
HAccpVWTUPzfPQp5VWOp+M5ayjzIeDjR27JAmTU3yktgPrUwm0SZJfjKY6Hm6YubKbSXem2+xj7a
cfN85CZiVMaqSvIHTfDmJJByg6QkNAyTCbGYKgsOoatLXc2lpHpP6FjKjVdGgIp2KpO1pdS5bnU3
00apXfwIPeLgE1aDy0YoSXYMTUBlAkBKW0KbbHFefhAPKWhw8iDOzMU1NEjRm5wvuURIrwSqZWrb
hsxXvn3Iess2ItKwpZxpC/0Dw41KI3gQ8q/wXSvMrpStQBr3iGyktGC74QqAxyzgnJr2Druh6LII
epmqq8gE0xUrv9E9NvoURlUXSXuyGKY+0Ojm+ItTSwCxdGvxr5qd/FTKCsaBaIjWaAKcGEKz0FVp
4/JLWWVJaZOO4m6ZUm14Nn+4WGOBmLHTxmWs4oo+LNRIEHvkfUdxo/RoGO1Fe7yZ5ce99x8STiyl
Sza4uDImraZyhIwBdHwQKTzEBAWe8BA6GJQOY5HWLZ1tQJuU0u87KOmbSi3k2UZZJiMd/Ah/Aoy0
OVlGUdzhuJUs8s1ZEW2QKqns4p71PY1fei/rObsUaPngiR9WNgAp+XhcYaiW1HRZotSDVdXC2ZCv
DKWe/v2VfEEZv1XYIgfbiYqQxCDFCo7QprDaCH3bVrBVIgnk9XLJOWBhvbqo8xk55a0wGzsM694n
/A8+jn5bl5Sf+bwuiu/YuNsoN9gXdfVdsTQc9LcnFWLuTpUbilnZojDHexA99sZW2S3lGClKetkv
ONMm/Nd+LIFi3UXxp5eaiENKxqrbgSwPchOqrlsnJ7MHVFOXWs3yNlTYZJQ8/uJkpv9PF+ENlrOg
QAZUBrieBf6FSW5RBiuW081EFHRFL6VJm1+Ii2/LEKkqQ8udiLXeURFNXEM46UmqQrPthipQYRYL
D5SYgD/JA3KCcSGGj6VVPnFK9W4mqFcVgqHqutDOzZ5epcA9/LUo3+V0ljCWdPlohZ3Uj/6+7q4h
LW6vMP2r6RGm4/3rApOpYH5yjWY4zfZVkQq/0rWJTqmexrB9fvjDaMce4lDVi2s6w2D17PCQgORp
YNT2kQhUgO1qepc+UUPjhOrXGf1t+4ex5zqVC8GXKVozHnEKWvwNZwQqZmjiGDuKgVHOU7Z0Ln2h
hcrLRzUheON2FsWkPYe71vpI7OPerziTifgOVX0FJeBdcrOUpI+FpezNkMTwOg5WnDa+CxsOsIGx
2432SacWQel11VdJmL2kXWUDzB57C8LSFuocRPxbTrB8gK6dyucSgOtdpRNxe6Xg+3aV8GiZ2t6c
CJkKDCy9qQrjObAEoTscxHdxufLHIK8JyHmq0VsKRWCYWnO+sQKw9puD3kmahNZX3+WGa7hOelIH
b5+NBhX/cb/5noK8FGFvAdrDt20OEu83v9xvKKWYDCwxqAolgBuyYlM+JzoqyDTmXd9jRQbiRiKF
zaRXgEj70p3bPXnJjagX1SYd+JlGdUKm0WoDh3x2MvtRzCLNtuxfhhjTWXzXS1jXWzjrWnremyKQ
zJ8oMXMQ4O3xqNB7zkvQpLw/6KbRkpjSB8cNpRq/Tk/TB9jy08XLt2YSMSFb0nUq3ofoC8mELj0o
Axp7zpPJOayksAFOtuPCYNPbWSCU1QqvnFBlZUQCZi7ogS8VB3KhH3se+6xUFM20p0OqLj8RN02O
kcEE4cswgIfJCk04jsyGl8rwmFTjxcyLhcKUpmyHIm83XDvr9571cy/ekz99DkKNgIpAad1DIsLf
utjuHuoeJyd7DntVw5YcFH8E4qFE4Q1pylKfKTOAQQKGokC/zVskm/mp95qgsjcv3xwOjKMM/kms
lQUlXHqsSqWRdXRzsMJs6TriTqvVJjGcOIzVNvyJljfZGiPjisRqEbhrxEE7wyevERfHw4izQ4yN
lkfvnr9+/47elEtZJ4/3Bv/jpBSBnQL/j+mrbsbKGVfc5WmvtsFuPnqWo5jVzx5dCXKxDnQYKj4U
03XrYzsp7F1lnhC4bdm0hqkt1u2KFvlY2bXEugd96BRNIJ5EOI56yUN65HjRoeA+dpwYEv6SqgVR
XHo47zO+8reSn3pc9IH/yAyeNjw9U7mT6JMHtKW9ri1WeNBZmcVqRgCDoqFRoEbskHa9mhdpT6Wb
oZoRFT/tECNx7xnNO+VS0ZzHybtU2fPc8qyBboh3YLhrjSdKuluFG+U9qkcWylJYzAT6htYD2IuS
xJCI18tymremcGFTrgOLWLMw0xKM21xlwsccm/hIH1lWFuXAkeYdCrrvzplzNSHUkoUzhxgrO/Gp
jXoQB/liD0AlZuhBpEzFox3/51B7V4oEbyDKywZYD9bqKWZc54AshZy1CDjPIBD0YU5w6GJ/m4N6
t7pkDugyn32mgWxhf6pMi7cKIICVKHby3YkIJ6MC6jKITPgrkwHS9FQ0T2R5l4T/SQfX//L9X1ge
gWxNvP5X7/7tP1MJ9rSHmeF2trq6ABJmnyH9sFwNdstEtHPOhm0JiHb375KWUZxgfwoiRkJ//iEv
29Br3oDkQ6JCmrS7h8qoA5jPp620pjQpaaZEEDtnJ7Lm8HeQmqgvx05HYiJf5N4xNVFT9GUe4gjC
Syw3mZ/hnsJ7aUDoRbEsyMWZbjKKW+SL3zkA7px86JwRhSaNERkvgngi22fo9aclcHr/9lUYQW9f
iRw7yxlX8JROnewiIMshwLTIG5w9wX1sEDrvjBJxvUEeR0FshN5szckkkUFG0NCkX56Tp7wRJziU
xZrRgIusgDIqzDcjfwfG6Y5h+eJwWFQ3hTj3WbZE4SH52PRPRrBHyFyvqcdoaDyElbbC26WnlxVD
Ag8DYWavz6QTDweZcTAbkAYgwbFJKPu9Z4mmXA7S8OJEdXGaBywdowfTkfvBsouo7xw/D7Mr40pH
tzcCuTy7IqkbekJCGRLqxCDkJmadaBmm5MVUv03hJzAhgxQmhsLdCXpinsRu/1n7oQ2FH3vtREIC
AkkpqgVISqbVdAG2s69w9ZSlQTy+nYEMfmRMwAY7DExrMVd36+1cRjeMzlVYgw3FDItrOPfPJ/g4
6j/TkFUNhQTvOjTT13zbb/i6brPMWRzjdYvMFeMY/gzy9wY4OZeBJkYr3+2TlDWqqQVfHHSqYaid
CuU0El/J35ZT1b2ugsNy7cLaXmd2HXMn846lCyX1U9pnCexn75mKtIq5Go5jRWwiwfQok0bYLix0
gesJj9fmgePHB6dZyEKwm/3fhIS3dPZwFeRsLsqIjLtiyQHAhHdAV7o+CZdwPoyMrD0qntZ4lobx
nviYqjsRb8qQXT56wS//qhNkPr/NN81ovcQq93eCed4Fs7wbnBEZCUdUgGC0rudx+qcg8c66xdvX
mcxf/fYbASSzvfe7zQR7R7C3yWWG6PWsmObocla2cSNCHGDDgwSKQUdtdEuShuCXB0HlOZTMwWRf
6S6DiqIE67bTUfeGpb1vUKUTUi+KyvBnflOVM5aBRKHZdO/uvM5CmXR+s46E7Re0e+4lC/UI2jt6
nXFCgRZRKCRXaTBQZ+J2NO6CxM5ZSnptKLmWedhZkz8NwJ1RicfuT8ICA8iq71A7qebz6haJy1JQ
bilwgNSYsw05rIIuRprwgWViJb9Vo28j+bQ1pIxidxt0vL8qQIzgom0fDqIPmRNR2ocMzNg8sI0K
6eD6f3j/Z8IEcv0/vvs//j+rVoGfB/MSjRh+PB9wNIoF77O4tMWCGxn2lGluxc2dlcvJajNENQWJ
fbIGRlVPtOp0p5i7bRkqLc+kOwfogSy2muebCVehrhct4DXcMm+uhhQoAurbrOwAVzZSfsRjDtuh
+zLddc/ZtWsif4YB1ORACVsRPbjyefldIYaGt9EoJ9LP8LcYg8zOz1ye9abwE5jad6nb4jaTfLnB
O5fVBvEyVGXLVE7PreyAE1LKJKeimAmWeG6MuvbbwbwoipkEQik4KZB1Ekw2GfrfWToI1+3BAlN9
lEx5kdmRvDsJa4JDwj0xURVAYZVlIUxU4WkNe0ZJqwbt6Bv4FmVXWD8C2vfdeomcGoWliShH2dAP
CYcGhT+2w1BN5QMsoz4RtSIxBvP5IQZfTr5++vY3h28n0oYmIrxHlKe6GOEhN+KC4FW9IdVRGUJ9
aWG7jyg5d+gSjkMRM6oTUivPj5DtRiZwEU35itUNUbOvs6hDk07DKp6luZvyD+0PO89zn49qV3VM
OwufmjNVQ5I/7ujt2lGfcyJrmhGRukna7nG5oMsKy0FTnnZr5s8NFTnixVQ5hULGpnsoUagMUzKE
y/HzZa+pEjTws03PeD7LcDAr9byctqLCdb7kcqx2xKYEeeCunfh2Qt86I2Xk4mk5UdnVw21gn57B
ETkvApMFUhUxjNrhStvRmKBnbiFWeoprTX8Y4I6KFuVRgsrpnZr1FA+C87UtPdkAxZsJNS5mFErq
DFSd0W+AiLAbM4m9g8YoIR8VURYwqkH4wuPH2TDm16bhr9uHWmmX5Ectf00cFiJ4gal7OtVs2a2M
twtH2ijINtCgroox7lhn0VDYkLUeeEFl5qY2jgm7Amqg8If5XagaJrrJ8RQ6+Kb+0+Ce5OREvNIo
JduJalLwCM1uTy4yMZcBbPe2wNyUgSNARHMKOHyDsCgvLlu6QgjZs1GOEueL1F5Qco/Jsp6tMCmx
kbK/RiUsM4fjlKi1UGj45nkWFebgVvSB4Jim172VRlF63YtM+faRQ8U0A3I/D2vp5m3UZvWXyNCW
VygItZyi007xT/HOyGdBXAUZZVbkTppN7VbfWeAzcOY5yTsxzExMs2xgRSgCj6O51fybAqtmM1ZF
ShVrY2Se5UKYB3w5msEHKi50XPbvPVdEsl87emxO1UhfoPqrqYadhzHV7K7JH0nbtT0+9foZEl3i
t/lsS+DIiiEsOVmM8GgNN/QCIsxtCkjkwq1UtDQ3thgbDUSyJ8zIyTVYMV1RPPiPFXkGr/RETJsJ
7lCiHCA4PBrlNPQIE289bywpkAViByhCOhIVzWVRcatAd+BMCNXxlt1Y4lRnEjnRr8rVZirGiQcn
80NfttVrt8Vsp2mvyC3cQrWc7Hy8g/dW3+dssqR5euvUNRkDY1bx+cCgZKnugX2ChoqDG/QUDlkK
1cDuprX/TFdk9GUcPdhtJVgwV7ExrHIqaV1qwgGFMSjcd+4t2XHgo93XvkOfkOa4bHE1w7+TeESv
ReCn6xsnNA6QCeYWGH9Adx14OM5AlvCV8ZtemhmfOtxPDC1DCkCNTOqCdW5RjSGXc1RtpiRr0nmk
hvokSi4vSDA5+w6EkKJgW7xXiMlJ0qe+d4h/QnY29dZo51KaBEh+dQZZGan/OF9cjaLKJECNtiAt
0jJSOx04dUseSbg6VY2eYUo6HoorB/THQZeSqNSaL95KkJvSArP9rop6kS/xmJdQTTlZHSRa89hC
6sGyNYKy5XfBNl1EVhmIkbTR1RZmZZF2kOQH5r1Bf4u/PQLHkQeCeYl8rFkFAo0CBGx9kgbdUGfF
2foiiUlDQ1VCa1YiUaMFoncbdDQ11ARjWB2xwDt77sZP1RWVnZ+UBZifA0MosFIYiIZM+MQbxLVT
HHCktcQda4TpIIyzr6sbw/uNXIhZEMCucGeplQRUDjoEYHHkWL2btJ4O+0Zm1kTFiwByHLO+H1o0
lW4zTpmNe0yQ3c3CxkQ2JqzqCvhMu1F8j1NFfAYRyDym8MLPjA/s6hwtLp97AHwpkl2vq7ZQ8li2
Xjb5eTExmL4/GvYKXG382D7/IHHWY2josLaLoFoMQWXnFX5SV1Wr3ni5BExmHLA0S+z4t/aOU7+M
l5JDi2SPIj5Nbhql4NgMrdNv/m7wA1qUt53xOmhJNZge9+Rkl6WAmcoRuXLJOtpxtg4VDDeZHB2+
e/9m8uZ3k0lMde6FxC7BOFfaPYDe/ObXVILWAeRUY2SB6beYUgRTba6Eko5RrSPGE4ZYYp1IMh8B
UQyVU0V7KXJFMwVwAIJ0610AmwXOv3R6omJhRuY7XBBQqwEeccm6gpNz0QT1YGXYRqdin0Z9OR/f
oKe+cBH2ExuGVQ7Fm5iLbdlUKDxLZMWdfizO2WZ20eGTYyaLtprvMk30/YExyXJr1hhtFLhRsx70
Yw7MwFA7DpyOR1OAqUpSy7HFp6BNOUPxb9kwVtDDoR1+y1tHPBN0YgfydtzgBaNsvz18+6vXR4cT
jrbt/Iaib2O+LvaZhenDUS7Re7ujaAmxiJHJImR+hLD9R9/4bbsp9GOA9pNvqObqeLyffCtSUVD8
IzvIXwmengYUuw5NQhwL2oJMd2hkIZ6jLHoJ7BKjPFsY2nxDW/lN3rTFETMhNspf5o2EklNvIyCt
m3JGBWKAYUarCoMiyH2I2IgoG5XfkmPBEt0NOLeOwTwx4xVoGhO+fcYSoCaj5Bt3attW1bzJnNAc
uYwUJi44JJkCpX+82IJccO5Km7LkhyJkSfAspSyYl1XZedFOLwVG4TuVqI5N2xISZtytJ4SICSGi
SdQOYj+euomtuozCdMlEH5V8uzq9zJcXWKkxrjG7M/qD628YDN7voqdCPkssEJbIzU1touRnQhYA
EGy9VYP02aF8NRSNZXJAfEceyoKqgb4UFPUHZpkfu4syQH6TSLd6ckKWS59S3GyS6pPu5KQ+OVly
EPkyBklYNkXmBHDQJ53K/JpJHZibooO8kHlkpv2Dj7SLmvvcsoBa3viBbvrkRVdotQCbPvlOQYId
JDSjCIK1iJNAHYTuc6xjPqG5hAoXxLXBWXEeGFm8EitsfNlVqAM/Cayr2sC96+roIEEhvl+nJ48h
VB/HnWjuljLEhx8vXcipiczlmHyDzEx5U6R3nIQJyhGRJBtjyQ+lkaEQCTmxOmbX5D4POlMDkyVo
SQmORNmLptvNnKJYqD12dtB7M4QtMtbdk6Bo5IygH6JMQ88hARTHpasbYqF0817LKATXVV/QmVXi
FvMzV16hF9b8rCRuxsJInPanjMWMsTvC5WOaCt++Idgo1vUX19sZyYy/p1jiZWYisb2sq/UF/FvA
0LatOQYOY8M4EksmHzTx56ADSeQysuk4jKl0t1tdh6xO0/5ddc6xkByeJJ9jvoUMhiKvufVONyLE
DO6rvtz9fhvzZ9J+fShtdoUwftk3UgkXqFP8Mg0lZ1P9D6M9D27pA94LAB74mddlwWmyMcNWW1JB
CYrPGeLLVYlp1y/RcL/iBBTwPotYPAW91YFY5E3JFWFJVEI+q9K7kzBLYVzn1XxW1EJoVd7tPjAM
oZhSvaK8xsBNzgsGREn66rpewcnUZO5qzY1zrkmjL93MIDZBohN44rp6T6s1l5eQLhJp9GBnhwP6
n/YvmbfoYpE75XG3/88dBQOi6JQvQoqFZ9ezrzYsSsA8fel2C5YN0iMk6+DnyMTOk19IGLa80yE+
iR2Hb4OyzbGZhKtZz1s/u5IMjUMYnF6ex5cedCZwCUVnGZEwwTCfYMhcMHSCB2pmgfHwz000apUa
6RYGx/hb5WydpDvgOH7zm1+PXn7z4nW8A5Yt/xJgNfJb9h4h95s+HwRbFDR6Noh2BSpbMUtowJQ1
xUOGljs7zLFubGaHWdYeUm/4rMora8QlNwWFpQcyyJ4cJ9n9J+nJqWXRNb6VWhwyMSdVbrmc0GPX
48Wk3ODABfxgILEMihbmz/AsMmoViPsK58FzBrsl5WCQ8CkRu4BAdgH5Q0RHMX78bu9FGE1mOc0J
+yV/fkv1QlDc4SZwJOw2oE1ZzLkucxdp5WdNNV+3hYxd8GgLB3INEijWvADpTuZALY37DcqG6ik0
6iOZS9WfNw9PgOwaoQ6AFg37qF+bo45j0Tw+1TTL8oUwFMgETV1wd7x7MDxHgrrdPV3InOuAG3UJ
Gi5nBevZwFID656WVtKUzIFEkgiaa9GhOLvIhpzjXoT6jcez4mZPeTL3XYQbqWwNj2qNYyt7smgr
iDhsYuxOp0b3o0dsuWe5DSsemcUZ9lWuDuWuxxvc9SQN3VyC5BfwDVPLlSqSFWliPjI3kz8DPXph
ZLRd9z/vWDsvoMTtb8/oJG6lUafZitJPGK++zyRff+2pLtKDnKH4G3BgUjYtzwnRTHS294yz9YqI
AiNVdd7SNXsoPTbsmPXyalndLpXR2pjCrt4iXgzLjtxhJ9974P+UFZYcTRvBLC4KOdHZDoOMH8Rq
c1LU1B46WyM2o/16L+S2aWGd+ul1nbmZkuudDETQk+N6Rg9A3TeOye500wKOJZsFk0xj3JgGIz/3
702lVweRln/GGGAybpP00kDYimZCAUaNelQnFB+CWKMvhlGAsbxfAsCCVD+3Wg0iKkpoU6cHXRxF
L7lErbkNZdSK2IBY6QWjAs/LeuFdtptOViqAhpiwFS6jk3uJfNJsubJjmjKtf7+hWuSNLOVO9hOq
5lhjTZ2Sv2db33o5l/cy+L8//MEc7x/+gHsZeYcB/G1xznc7VcQhfxqsgFmt26acUeHIP/wBLxk5
fBKgjfQF++W60bgSfmwlHszaBGfa39AaQGWedCpm7pJOgsAHcFxj+i4ROmUY9Uo9WxTPOUMV526K
gJMVI5m0yZiz4wyndjGJF5PyfCIsdEFvpkD0suaratHtI2LoRN0rsiPzunFMkcVjHLjPgEUJxKgN
rMiVBl3shYfU2IvgEvXp9Bflqt8KYJWWGO5ukHCSbVPuR20xM27MMEFSXpOKiEIyxqcePHwIxzFm
DTkr82VW1RcPpxflCOvdwnPOzJXBkyfwa/yzR//zj3/2bwzPP/zGm5M3T3Uj5WQiW21YS7VveN/8
bvL103/3+u3QUjyLvNnIO2+E7ucYl7OOjfHdFPOKB0MZkMZ2EHjvAnVVnvZml3biowtECG/W6RIa
CBpVg/Ddg1llnlCOJJx6wT1+Ap30eHx0re1dvEYEjND0B24st7G1MJdd0oGs7uxeavojYqpdyb2M
zBOw72VBdy+1Vwe8HuPVrhFutPMdS549oI57jhCWdkVOC7uC7MVdaAl/RsZJoK9CVY3r+l566VLr
Rm5WG6Nd0O6QScfqouN+Ts11IorgHa/YxW1HBhsmBweDFiHsChj2IBldVlJR1dM53YmYAqeGPd1P
oZxtrYnfksvkR3w1jQ2+S3F41jT01Y/L99Od2J351VbWYld0shma2fvKx738IS5vHVDDO90pxNiZ
zH2ZQWcOZgNDxyEl7hjh3MweYgCPh7pBV/ApxTO6p6WHadEgkj48apNdSv8FF4TjxyBFRvbvKepQ
ua3zSyP9ezgJEjRxvR4MlSWxexhHzppE8SFIRiQO7Deu+UYTkV2rKkpIXef7QLODofa9HUY9hBSk
Oxfhn0Z6LrS/K+pzsGBV8xWFqtHVvhGOko300LUKiQcOH9EqNupdq8SbHD+tmxx42RiFO7Bk5kbK
TnYO6fq2a/Hs6zuRrkkUTgyXpkI9DlQYGMyC7Mu35fLHj+ODnblnX4/EVc/yNvbxLUss7IZk0/8u
dLaTYovCt5mWKDvKzwvzgbMz+SPa1+er5EV+VbwghtOVP9UfRBqIYiSYOHpxf5LEzlS7xD3erJRJ
jKufEiTQXBfNTjDutEABl4GPJYq798sq0YciTj8zwGyRL8tzdEj5nJBHwlVfx2GE4AhvFtPO0pv4
xPneuA4UaVMmtskp4BBnAAy4KRtvMwkz2cEQXihLBl6TiQ+l7YKcq0zIe71Gb75KMqt6pVZ85GJR
tp9zngzxzrPkz/4kcxRJHNwQ5N1vxzjwVSaDcIuyyRwRst7AfjPabzJMYGqbljpup1IPUNANRIcn
mv31xQKa4Dxf02YF3AyHmzdXSfxOppjYb0SsXxYlZXoBsheMO7lNSzSWJ2cppeiLelNPOK4F1igA
RlyiL90t/ucsTgMVReXIsM5ygN15a+HX1iQR1gJ0G2/JxvEcza1IcwHnCGsGPlOrlBed1XDLiM7i
UNLuhlPdiSsCTobYA9abxq/gK9o5KxEr2XsjFrgUc1bLeqeGlwawYAZJmkCGelZ2yW9rGW1cwNYB
DIh8mdlflSuSCWyoREGqCWXQe/Hq6bvD50490rLepXKve/3hp+yGp2I6whl3aLokaq/ecGwSpmy3
vJKFB2SPW698K4lrW8yUGCQxoV2B+/YCe5Jpp2VFuatgauDlBBZClnyspcLT4RMKTXlRjPV9KZ2l
OP0unPekpeoffaDIMxV6nU/ytkXAX3x4dPg8+sUvokc/A0n3i+rf/PSnXZ9z9Ahm+GBYtmHZRK1y
/Td9UTvzjetAgXHUnd6xW5brTOLrRCGEV893zNt1+XSUQd/6idy5unF46VJ3Uwd1854wsEBo6lF+
U8x2YNI6u5AzQ7M6Ot+d+WKAm0qL2z0wvUrdy889mv1+TVYDcuXGz9l5gj8XN9eJ1XUaSo5GieJF
gwePDk6DCdTkvYkxqKG9U4JlTu0LVKmcq0InfFcnfzu+b70REVYuQ+UosaWDXocFCqA3MjYEkrHU
xbSqQ9lY+MWka6dYoOFrMUwBzzE1e/HKclJYFQZty/5ZGAr+9GMRZb5oFYL3cxmENd6vf7534n/x
mSO8tAuJiBg8jR74vVKBWmfdYIf705ZoDMw1Ho0wVHlejITQO5JMe74ZgcKYXxSz8GdqiZ0l9X39
uxJih25fjHx/8YLvnNd1odKZksM7ILSplhQ7LUnl5xRrDY82+E0IKuZruzXjwksyLJHbASe0jDTd
5cLZirozrtGbzYKMkOIlgkKOm7tucB4xPuAoZknOl0ASGBq5g7nPdiFXfgW0N6bz9YwiovEKPx7e
0RtehuQAyfjJpYHBnN4pE4G80tcFzQMJDrYkIAhu6lBYt41bj1uG0+9/tlhtN3AtEEK9w/kZDkF0
NlO6tTDlW/pA6YgqnSOXt7aBdURpdh8XZupbr9y5UVGnpxvTATtQUWdbuIAMQMH3wdCisKjmJD0g
1+RQO78AbW8dVNblnlEiYo5gWs6M5BI6j5CuUSEWwdkXaQ/jk0krxIdNtShuMdqn5xORG5lYJCaQ
XXINzSe7LPh5UPijZDdo2fXvaD/LuhsCwLa1t/Ot9uwMC+4DLVmABOa1VfOT8SN+GQFDjLYyVqQ7
oE/hKOwxwApN0DuBFOg0XLdD1YxVo+cq9NtGFGRRvllqO+/RFpXetcdmC1wsS5yzgjqo2J/K3iZc
5QPJ396yo5aRloXd7cgR3vUGHArPtw3GMUSLvL4ynFpJDSdjUjjPm85H7gehcn4EcvybENi6C0um
H3H8VvogNobfdLzNPdhPPBeIj6SSHf0upn4GKysypiufI17UhabvtHPtlsaA3JY7JYd3FSHXp/tO
GtE2YUXeivcKK3fJlOSx4lsMxog8iY+zVINO8eRzpK3xOWBAdYlM3SX6kysq3pgedOopsVgGrm26
rEazYtVQ4h36wlnxdDj4bPl3AkbSThnvrily+gUobf6whySo3CncJ+vTi+6+ef3u5YvfWRyN64Ci
g1Qd/14oWlF2H9Wy+Pd4bKjf8e+nMt+h+H38bHparTbySYf6gHCBMDTcvd8r3+rs/okWxaKTBHkH
DLneyGsijEku25P0R3s94M82bTFimqSU9l1N7wGnEPogVTwzS4uBNsY5uUsk/mKB4Sn1At28u2x5
MA/x7UEkb0Ypu2N9gDai5SyXYjXuNS1Y/2jv9MC/G5GVF2k9hnY1sIA4aa8rBnMO+kVTYTNK6EtB
J8ZdoeOS7Z+jL1BStTKVWc72pNDq0B0qvy5cqHUiHjSZ+UcuVaAs2qA3dm/lEBC86mpVl3lbzDdZ
6Ejelo3QvmDy09cFyqrYbsuo95p4CKST5lpdPRVXgCZfIF3sMMIgLJHs6plAzkEov5VTcWXHSVj3
rp75UbOjQDCklIW6Lp2N6i275X9W7Y3YHZ1qpifLMsLGpl3nLRGksu3Czo9qrKOxw2r4ZWh6c5pw
uD/ZMyW7cGsE3z2hiQS6kXKCAzodbMGmkdQbf0+8cGknxpbu/ESiD4YlPlQ1XvysMtJbU2tUOGzh
wMbg0i5p6GZZNgvfs8uMuhK5dMTPAL/mULC6uHFFWCtsiqchLmktJIcv2sTIh1bQlQGlVxcVa+l0
3KGTioApjlJTXXmlThR+VGopF3xnWUmud+7nmrLBBjVVA8lDgWNjlCAOci0gZyiJ6DXd0arC5YG6
ljbebx7sN79kSTwxQwBpSLtEzgWq2ujQ70AJtoC/IBE5X5l1VQjyJRNRXI1zOJrlhvyd0bv9FO/7
1L33sTP2ReLQXBS7If3dIAiZm4GaGi4x9Lupp47KuAvzF3d+2FTzbIYUTsUSw1vDFmKqmKHWXDDA
bO+shG36CvFJP0uKqYhoFgP/mUwrELumslCqfUmTiHnZ+BoG+3R8hBMX2cZvb0bp6TYbkGFMONjm
yWCkaev9TLJb71rcxE/3tbiNPGmvw487XTS92K2m7Ssqbhp331Ecqs4HHyVGtS2dwTl1MrD/HG+W
yCcdzVXSVIVGKiFdbPP3Mf1+YLgKJ8F8Ajt7JIQvGdR+3ooTtmN1JsoPsyDHFKjy/va3tpK5W5de
5pYShMK03pefeGfKVMbKkB1t93KGIe7hPQt/47KU0OPunD0ho+Qdc4zslG+921sGS6Xab74+PDp6
+muqkhrHJwPKJ8Jheg0XLZmxIUzUl24rYCezciprmIjtgyU+Vut2YDTNBoPXS6x5ghCNct26oKOZ
14SbBdJ5DxhtsyjhUPBoU61l1YRWDjbNBjB6Vcf1rSlZb6ngmjq1KSdXxcY5LPg5TBrZ4h+/14Cw
aV/uGYKlm4vi2s4H/JQCJfwzCgBo+QOhORkR9LeS2erBHkN7P8+Z8YmJEZmHTtW1RX99f1r41B4Q
tTDrkBUtNlJwoI0IBrB1U5yZmT8mMDv9XE4O/k4Dy0IzRWkYu3EHdOEPqGu5FCwTM5jA3qcVRiMu
GIrEB4LJJpTt/io10t4fX52mKa+lmivRTRqduqOILcL9437zPUVEoQWQdzr3moYIHQPitxYrNgQO
LcaIWtHOr2k+vSy4FGmgYvHqos5nSpYuycd5ojZzZ6Vj0VBlICuLxss5QRjqr/zEDE/Jl+Iv+7U5
LYyPNX52NKQZm03pgVu5liZOpWvpL8eW7CAC2rmPvKLLasGhsbX+qRFq9DVsAMqQcQ7CTJkDwdVY
C8hSTF2AE2pJRApMywk1wC9nE6d7l+cFlkvPyHzqrIzB4ScSm4QMtwOrpYk1tyEMlGJMyBFxveIW
26of4kfYEm2TbIC/NtNVqZlngjF3yihooNNJvU6tDrwUmpgIi74wiy15bDiWW/oYHWexOZoyzWEi
8FMzAGNmrZdz14ZfH7hOjsZL24Yh5uUXSemkDsmBzf56glOk8RHjx8xBd6Ug3zXpFjlDPa/WVJbZ
yEhyUd4UywOOJGVZv+RMV9jheL9OwxL9fmROSPjgOmU7EyHFBiLNPEo6Jq86G/Md5ja6FsnPqjWI
wFTRG/3qYcd+R9NvnkTRIaVzm0Q3TRaNnngOSQZ1RT8cB9ayjyccm5+fymq+itbcZWNas2zUtq1S
6plW+JD1S3Z2EJpIcFtGBtyueXRms7dcoCybcod07ky5S66j685uDpKqvDahTdTXt1QhmoAdP19u
iEWoOjQ7cbNwanwLMbvADk1kB9C74Lxs1PHQdXthntsBVuWc8lp5XuXL2boR1SSsZuk2s4r3wW5E
ttUjjTMhui5psietsAeCJ+7EH/ecHjBvKB2zgb5QeI33BjuEoPloSXvWGlfX5aN/J0ykc+06ePeu
gLcyIB9+Ly8+NaQKWuzfFBuRiuubSuJA3usWkYiUQJ8O++bw4/K87cRP3D1/rSNWrWjhcTh0WH7k
Bcsagyb/M9NKbRakUUfnbI15UaFhw5YCzrQqTlz6FvM+zoubHPOyYQv29DFBScO6dEU/AzA5Jamf
YX44AEcFSJtilddo61gU7WWlIQimSC6R8kAIMkpXGN12fugr/MuSZRw6BwVsj+6Dz0V3KC9Lz9ax
bJmtqlXyxS7XNB4EknYDn0vMLzvdj82UdJ6KhE4JQsg2+uupGeI09HLchaR45xvT06Az/sxU+Lqt
r+5oXAeAnp53hen4RQTvVbvXsXOZ/Jjfj0WW5+euBXNpeldAol1s63GCVf5GI6luw14Uf3ZkvQwd
X/Y0Uj/WZVeCC6DEKjTs+1N0LaTp4em8C9RWliaOQBi0vZhb98hdOzbr+KqNq4W1rbgMF2JWW912
E+/89g6KoiFVhRNWRvGuITexLDIpb4a4XIeZzIg4zR0gJiD2XuarJmLXSSmFodEcOprRsY6HDhyY
AFe6Bu4OH4MIYLI3GPmkEUAMiHoUT56kO4MkU6a5LMPuJTYjRVc1Hpj2PTMKoZjJqbO6q7z9DKWf
fcMAI+Fum0XPRP30YrGKjAIw2nKJVPaQjky8dGAfdMtl7b+c3d6h8F8O7f+4D+3/DE/rjz2sndm9
PpMOAXxD09eNebLuIgWgIxNuBP+x1FO3J3rvGPZ7Yl4duVSwMngrKsz4fYcSi3QTmdPvc8FEoeeH
a2cMfaj7mEhR051zV+ruXvdPE9R4DipM/mNls11FMK82/e4coXuUHUUg+/1LbHGvx8uE/O21f4T7
4S7ir1sBIixa9jF2o+HOzN2qM7/zcpnprnrMh7ut2d1G1M+G2H7xy1/KDCnNpsE7eZnil8LpOOGm
FVJnf66MvZTOnq4+sCTREmS4tqgXwDAbLD5BkakdIKrpdF03IOrISuycADJvUTJrKW+SkMDay6ID
BpGPNkYOBp++LbdqLuGRSNOSqHJMmyNH75XqXJQ9BnkFRfSLqsJy0lxaskLxPSrbJ1H0UtTowGia
K1AdsrDnLmMkIG3171LL3cfIuSGj5OK+UpqOA/4tjVOe1XL96iJuuLxUKQoMCo8dHnJBvt9/kF3/
oa+gZuCU3Mp7tV8wawtcvtm0HrvqiHRBMAW+7t3dvz+DYYGhw16IRxi3TZl2n+wwoVfQLnRM934q
vMDM6iwBTAf9tLeeu8E7Ql7mCUBMyJdbE9luW8iJm/nq3bs3pJlv756ciylkcioLdT063fpV151K
h/YvdXrYzsTUdvbx7NOIi3R7KvI73dx4gvVd5oUoV5Mji/77t6/u4svqTY986dNP2VSK34XlO6UI
Y6seJcgQFJ2N5Mcq7Rht4AxBwemfj9N72LPePIN2ExWVR/r6TDt/2CedHzGUbt9apPVLv0CCne7G
DTxfky0fk8iyu4T2SeKxzf9IAaOqU3BE81UdRihRQC89aXZC00cIjJ9HcNxtH30ysj5+iuV5yHC3
HalU5BDN0ygmQX8sfMDfqEdwZHDLkcGFTsvycAewRuaWRmTA5JQtUlsVqaK28IVdMBOyWFKlurvg
ur/CZD8olPObqwI+2US3+QbZfIkatiiRpYR+PCFAAjo7Q1TgbSiFD8+KD9uWVvDVsBVjR9FhWq02
+KXWDfssBR9j5dKL764WRx1j3a0ov8lLYAfnWJZA2n7FIbi6umh6ga6KvHUjs1mDkcGe1VKZt5J7
j36ysx653dD5Kaa8Oxs+/5QG0E80hO7ODXc0jJLYpEQHabHe7ZRytQFUmUDq58wYSGyz4LUbHeSs
wOAR6lVpdlDjvt+mnQJMUTEYmRwqbQd9SmSXD283dm33U2en7Sx67KR1CBdVOz7esEVzMCtiLLWC
43fRPr7FG5k/qfphpAAjpzaZ22JXvYWztscvlzf5vJyZUj1W54wSjFrjRTDrsgopnURzh8duZ7K9
wWcf6bfbjRYubHxuZKf4PAMUIvI4FDZLqLnL2XNX+fjOsrHP04nsnbrJPc5q3Z7GAbinHuO+w0VA
yM1e4mYbTrfZK/uiAnbtwxdYkt2vY7xrmIcPH/bazD5CsLlLckmdDYYRbd2Xd1+GP8PWmJeemg9l
GByNz77jdq516EPOaZ9lWbxT4qyAT6BJG743YFemOMNbRDRhxcWI8VC5SrD+IfIeIdcbqPfcDyUk
K/DNuJAwomfe6UBF7BqDaCQhYdnWfA6yNWVjZKhGkXVFcTv1rhR9O5EA5xDQ32wL7A5fxITT1Sls
Uzf7DS4v3RcG9E2R+M0eYR919ixH2Ds+cYO3ZPSWSN8N2zOUqLDT1O2aU3oCW+16LO7G5Z3lbF89
fGAPnJon4B+l7ko4e5IReSZd/f0ANbtEuUjcJYtJ7EX7SYB7D41RmAV0A9H4uuEwdL7ojxERtOad
BgQ3OttNb3e8x8mG94bR3v7/z97bPcdxZHti8kfEOtobG7t2OMLhj3UJXN6qIhsNkhrtzoUGkiiK
0sAjkVwSHM1cCNMsdBeAumx0Nbu6CUIz4xe/+1/xkyP8Dzj86L/Gb/vgPF+ZJ7OyqhsUdX0dvrt3
RHRVVn6ePHnyfPxOsycGh53bFPm+g9xg58RzPLaaZAuU9lorlOG2Zg+AFpuD7aALhGBu1hjUUrAM
fH0ewKBAJyK59phauadQEAplvR3N/eR+pgEE2NuqBa+ka2GbACo/GLEjwKcVwrgxDgIL5j1dhtzF
kS4PYw22ZupDIOWb04xFPA1TV7EeCVDMMrrrXxTICFWcIaoExWwFqiDRk+T6iPT8o46XdN7dMD4w
evvGuCXwjfKuvgprHZBPbeP7fdDdPAMwPobVMCOS0digYxcffCyRfvr0dm2daN+Qm2Bmcm32LuLV
2uUqY29yvnZhKz8UhGlzxkidoPxGLkUdfcj7+tDlxLLV9Va32REe0a1B7BQhew7rrXuneyb9ulFq
CXfB7tCZqHuAEUBY3UKomkOXgM2IAm+ret045VrP1alj/bT/YTgoB+S53SpF0tR1eLOAKe8GN6rN
I/gAvY9E0/QOtlNG76XBTRiqnXHc6irqpDPKWqVQCy38UQSpWakUMB/EtJxU6LdQQT5ZkNpX5hT4
DPNdmZ9l0VTlEm0OZeFF/CzNjNeXCpOaQEiNLDU6/ekBIpNcQreXlriKJpkBLy10NZBKsliNBoMO
9wrETrEuBAWBnGD2B3PpEMhAdivRdQhXH5pC16clYslzZLDk72Rf5iQ5XCVX2ADYUnQl1UpNAPjy
THAUV6WR+Wy5eGawYAW2Sg4GklMcW4swrRj+DI6LligvmF7DxLs7CTTY/vtkIPslso5BDVvDH8qk
yAcHiQXn6yxLBmr4qwc2kfmJBiXcOk+ZjIJXi0cRAbCLNMhzpxUo/O022kBoRy7auC/EgQaCPIsl
8BLn/LWptjZS4c2sBjzXQzXVejrhQoX3tY0dCfOFDWWWNihhZdItLBp/Nej7wMfK7MC/7K3hfF1N
w2/p2e3kz6mZkXTfzs0N09JoVxUzo1yR+euvnRWFGfkiFN09IXES+Ke8gIli7S4bn2W/7VSA/5Si
771S9EVnNKoO5w2139ez+KC2ApLtG9vPJpGhYxtKT+h914ZWHooqLOKolHuVCLItPvzq5ZOvv3s8
/u3jh18/fi4YdKCwlQRigDmnTpAhOY0IWhkIVNei2hUJynxPhYo56HRV0BJUeL6s1wsjQ/2xXmMJ
ubCg8DarXptThKLDEe3ugvU8kIl4QIrkpZPf6FCznSkwQowwmmae4kFEQCPUI06diMJbIEEv4OAF
AQvn15uvk5iu/megHSnTcYB4FALdLh0gUHq7OTi43fw4F+skKzPhr3aa5nwQrwSntlETdgVAggod
sOaYpGuc1+IU7B6e6czLrfNB5+OXn4uOlCRwBSC1F94YOwMVv2iBS7E6CDv3T3k8kyCP5wDnIohn
CWNgNBpqEOYyRj449vE8DX00hnKzroTjfaYL6VJfvcBqVBJyQRG1AOTyXiWlsmX4IIsCf4ZFLXse
sAOlITU0iowkFc0y/VMG6uG/wH+avxCaOiaXHB3KUAiJ3ONr7hglDxnCFkzczuIHog2GXzzY5rWh
WPSc0/cjfEpJVQ7kqxE/dO1SiYHoae03CnBQ167G6crSHLkBOBx3uDWgdp8fZbqQHZj7k2vihGvj
+fryFEKjJftaaR6UgDcioO2jZjGrVpg/zM+a5L4GReV970VXojbm7ViEe+BZxm+F1+6WeCYzSLOF
BjH7U9KqBHlxuupptb67TPNotyB/qV3PtI39E60pHoFMd2344PjB/kk0k113QLX6GJhSq1+2Pv43
PUhbYaxuT8l09aRwu5U85/x2CYLRvHz+XU/HzH9x14HDAzIfftUlJ6MZC+ih3Sf+sncSPT4nP/iG
4gY1lC/aahmBko2wCtVzgg5Qe8ltMGEUoqxu95YA9qEh33jXppi/66Q9cg7YXc/NqRmSFWQ5Smb1
/BxcThvQcoNGURTcM5RVjFCDsgwCPVTzQcuf1RpV2Kmjqc0HqREgyXNvFIgbTbNpLGedY4Hp3EVP
w6220VlsGykGc8M9pL/kDeR1yO2fWccGUnF6ogE1Av68muAGeV2WRmJfgo2BxXO+GNRnYS0ku8+n
5HbZmgoiu1gmTBebhl+K4Ec5njasS9W5LuiqvQuqmG2WpfrFl0X3Z/OqbJ4wrA92NF5koKWTTZO1
i36u3TMT6XX7iw/b967FDifVMSu40O5iOBZ+BLkI8GbgeJg6x/PNK192UpA4qcRIo6uqDkPsFoTU
r3CNLY3rXneNXX6dI5hKFzXe2WaHyDWUpOHg6CGHxUg92/L87+kbtJ6FXYjlegE4WC0g20HZBALo
w6o7rMRggMOBzxrI1XQXpO/9vb3TWXEB//vSnDq3yvPzg2/q+rRYCoCdOmCwjoxe5yi8oUghPhF0
rOuODAL3NIJrgsM8lnnUEwjw9I6HJ5urvuSoqNA6hQh9qLG5AA2NjbEt5+RFTIh8Az+gD9P/rmoJ
WM0HLiRsO/ANvhHSnUPibNFMQtdzlOiX9YzNJZ6DUlcjt5t92N9BDcFG03mUKImSX5yiLmU86d1U
3IU9fSLYZhyVxFpQr0G9dxfUezqeuE3oW8eqqh19YJgbKy1OSzaOrhjSCsnUUNgwOa9W9MfFOfwL
TOz0pyX8CTOgaY4G7tJwofcu3IEyM4Jhcj8/vncicz9Qlxqwy8FlTOxtXINe8G1Gl35juuaPDoOL
zEiSjIdjhNpvDeVmPCjz8/tyOVkvq2KWZDRA2iBfFT8VZidmPNQcA9bWS7B7QZXrBeR/LafxKVAX
UpHK0+yL/Vt/uTW688Xf5LDTs+M//c3JnTwdRj4305IRDARUwx5JdE9DDWh2P6dObp44q4GkJLwL
vKiqBmV9btn14Qd75rgy/Vh4fr5Y2/HugxO0Cqer4rwBc8TpsphDYBz8bZ65R2kbdxS3P9bySSBH
8OP7J2CQSFfL9fx12vP5g5NfYDcw4B2mhYf0hrwpPgNud7lugMuZCx2y6mek7Wx7M8UIwu8hjcNf
0IEvI6+dPhU4pToJvuiksD9lhrpyQ2e70/LtX3Z/nI7u5P8GdTuOrvA7zc9fwIFupOyzBL4aJrv3
Rg8MTa4mo83dZTaMxx5wJ8ls8VIcWp4ZRu/ltuCT8CH6ANZkg8LjAN1rnRO3xIFa1xhyHAXt/CB0
i5EzMJIyA9J1htkpqDWwv6yyMB/Jsjxbc8K01ssFxtaFyRimlPkF/gm8aoq3ZXcWIxzkWLripUNY
AIYLsGHBXHZ6Q549+/dzh2TNGdkJSx+/odhKuMAirIyt1kx04NK/d1lPq7PrIUXjkd/Kldk/yk9G
tyrZZBpEb5llZAdXeupiPvYBjH0PYdhgMF9jW4OdybzPoXIHshLbitkpu1hhpgAjYzRgFS/nb6tl
PeeYq51Bn4eha3bkg1YLmH+pwbOhmPX9HWLUWUv1rWbHD2xspYLlnBDxJcbVOwi74bsbxKFmgopc
05FgDUtmkY/cRoGIqaDpNrfVm0d94GW/WPBMmtEa4Y/MgWaVljqfj7yJDJ7f5L0DkUKte7itWAdv
LWLurPLmWD6BuCzHz1YXj02fq7KJ9Cj+Oc4HDfRGM2irV4ERl+ZEWCmaaYIIIyoAhtcCTU4rYKps
4UTuAOBQZloTVj7XZ1SNs8uU4AkIASnqQ7CHEqfiWkzFo+TwLNkr9uD5nhcZIg9XtfkLra+rdyvk
QKc1MCVbhbk/l3CYooim/emMRLoE33AkbM148MU4zr8JQ4AaaFAyQyJsMGXUgbnJRmDnwUtwfp0d
Z6GXFDaENBwNekn0RsVbsv1gtKQLsraw5SjQ6Ce9nl62LhyKG/JJ5M6vXkc2qoDZu0KKS5vhNhdx
/hNLUxdL+P2Wg1Ds0EBynC4BQANrPL5/Irobbz5cL9iDZwuQe0MHzylgzpLiq1eOTb16RZcW/pyk
BU46Z+t49Uo3YT4xJyT6c4dJ0W38dXCW9bPWFgqkd1Lh5TJ+4mzl1m/JnoibYxwdR6B5UIdRy5cc
8oWo8cf0JM2inqP8k16nWyhW9KbD5rcJD4i7zam2i+Z1lj6DECZIHHi9N8+/gCxqmemTuVjMw0Tg
9iAg7rkRVPQJekhwBJ4ZAAk/8CMTKaI+sxb1Dk8ruz9by8DdyN9vKtBjQWYCl6HjiFLiJfiDI6O9
fD2Fv7NmfWb6fpDuWspN38M5kGbgAL2FNsxCz9rPy6sxyzM0PcR3un0n45GSeHiTH5+XGLW7npac
LRrg7k+W5DCXkSVeet4xdLX+5u+OPBtKsBFvxXwTfb7QKTbtCpqtQCPuYiE3jgFwTJhjKEJRHViu
xH0QjGIxB+8eZSK7hIip02vh4y1G6pNqFEjQw1TYeYRhAtAjdL42Y/4MkdbnWvQvpztb8NJOYTyE
UK6JRWN7tg3gAf0T3skONNH5Y11dLrbbDMEmAAcZB+XbLi6EK/UPgzIRgg0coYA+hQx8kdPI1jHC
4ByuNoAZFhl2576LFcIcsHNrYzWX+0XdNNWp4VSbyESSDgTnLYVAe6XzQR9rbLnjd92+W4oLJ+h7
uouIjiFy6wguZxBGOCu7riekqnrpKToe290wl9OqxNtSxeEvgPeKdz6xPVNoctK+leCI+VIlb/2X
Um9cJQKziQZTLIETGrnBhjc5/B2L+ghuQreSp/PkB8OW6qtm2CoN4tzbEhSYiOuKdRJuq6oAaK0x
q3eB0TXifDqpl3BSIOnLANFMMS0B7BZAQE+berZe6aqIIpQU0FSrctcF94CDgHiHopYEU0BAG1fF
UsUrJdIfj8hBY2CWFzSKeLZfVfNPHqStvBpKiqbJOr4XuC3K5OK/1n8u/fHHNHCc08ur76Ex+Vv7
J0WPYGAgHglqmdanubMLcWyzL03PlqepXvgrWnWYwSZJf1yC4yV5GV5/8prmml+5Fw9G7zxnKSDJ
Mzhaiym7WfVSL/6remn95bSIbG6Epxn1B2F80CIqTl/yIHRYMYIFW07py56rvi6qCgqhupTRPNn7
m8HY4wuGtUmccbDvvEkU0eSU6CO5K30MJO0tAJo8F5c4GVxpMjBrgO6NtHo0s7EF2iis2FuAXvHN
AkcqLNZKHCLlINrDEPks+QCBrNNB75tEDV6Y2motSaaqXTitYf2r8ka7qWcaWxOR99G8p5vkc/Cb
4nWJgYWh4v6HZbFQrvI8zXOALKSitFNlQ2a5gEZiBY/g5nmOZnAI/V8srvuU9+E+o2Gdl4Afkc1o
S7r9qJkaNx6QSGvrRPeSOBuX7+RCZ1qM7oMnRubq2AZeYnLz/QirUwpAquHFql4crsBnFELjY+Sk
4w3M9JiysUzJfmODN//Ny39hbm0jCncEM+ab//bof/lXH31UgbQGJ82A/5Kro/xelgNk7RA+wY8C
TBH7fmRoW8oQmbtX61U1k3cktA29JBiuJNi3uaD5c5j8nszpj8ia7srZ/EBcWHkQPLBUSybbzK+D
J4o9ztPTn5bEc12gVzqyD81ZWo9tUTJh4gs/kg9q2aXXGD9DHs2EWAvEia0MsRh6eui/G/nRNBf2
zzNXBv4kQpHYPwqwAPB3jIFhwzQ1/5kFZQV3DH6YFOegt12u5/v6hCF7p2lDioF3U2Y4UN6Yf8Fv
oUlGGInhMkKho0sQ1cTbk52cFSGS60IkEFafnjHf6KhqVntBK8cpcdLt8XzuDE6LGk3NhPzY3KEp
MX/swq/sxyma5n1vsbgJ1W3Dty0radRdSrkiU2kMMdYeyGiBJ+cWtMm3kk9ZsLS3nbwHnXhMgbjP
jC/Hl+9gUwnbFSOXd+F7/I4YhpF8mQBhrxj5GHQxjAQM3TLkCH8CwFA1JzWsVOjhtxjG06W+Snep
P7AjAuWTQrvPpApPegvsYZGxiG823V9NVy0qkQdvzIOcYFYFHHq1kkvF7Dp2LW0DA7XOlxY+E6kP
L6dmpHbMUs+J2WJX0wMZZr8Gz0wgnA3NampuL5RWYUzPhqD/v5I3pFbv1xXxeNz8WjqBDTcRowFh
SAmZjduO3H2jpZpS/J4HSlBWDokK8814bW3XjDcW3eZiDYrQJN19k54YGVfVFu1AjcmnVAd8Tjfk
DW+zzkNiJ/PIv020dqhqFVQPEO6ANZ20SvGBCcdNkiEkxtsKzjVxRNV7O+IA47fTW33akqIJ+Fht
4iy+2kNdVa/xPn0EVaLKfQ2mecFTS/tN9LZBbsOXI7BXYcqsXnoIaYLP+ChVmF/H2D60okHS9GqH
eY6Ti3oFMXLAWcBZjpHtCe3Kmh3goKdLNEsC+3t7KLdCwEOUytaLcpkRU6Jo3byb6ELQTKo/jbsy
QgdC78LwCAlH3n1SsJv41nsSQ/ZPIjyKtmSbp2pRAv26u+WIvmgqqeodObLZDEHmTkMksZ902kxS
Rr+wBbdz236Xd0E7LWrbTTz533WiGNtbrrjOjN0pnMGfeR/wEC6qlpzpk0FPaSgQkyDCRPHEmrrp
wjKvG9CG+WZ+M+KwnabGPNoAx0K/16sC0KvfNt29Bv/Gm/QY/SFv0mHpQhuDR2gcquyhcXg9jnqb
Ho9+vNo9uZuDNMt+gFFp1tXQJh3zDgQ0W8L3/msLv62SD/JYnThkMcfBBUS2J6BonsuPvLWu02qy
yuT7gB8DApEH7Gz9/zT+IQb/QC890sT9p8/xiCxnQ+sXddR73LDR/ba3K9ZsWay3mSAnhTYosRMh
ZNOg+G65B6S7aXgNUJ2Jag28yxc7LY8DccXu2V6qtMXtZnHFdXd0I0S1WDoUuw9XkKgMXvuc2Ehm
Yxk4+dfvsn99OE1DW/exavOkz7cqVvvUHPNLbiKY9qEeTIsGzcdf3m7QAdhiui9q75uh3yJEVYMm
Zlmeg71GTvF88Oa/e/mfoQ6knL99898f/e//7KOPzBXpqK5nlAHqql6+tvgBb6vlag0pot9qH8cG
7lSDllqnuXZ/RjQ3pH1CEYdLfVVMH9WXl+Ds1KPfoeAXgNAwdD+u5mPoeQb/IXSVIdpLxs5eUizt
TpPL33P6moFAKe3gukEw5AtgeGaCFkvIYyieYuTQysPXY/eqBahnMBi1k5QXpw12Mbfv4Je3d/8n
vXGxmigIK7zRBoxyjjfFq5KcwgGL+rTB79HGW/AgF7UZlZYQAZ7BzCIHB4N+Ga6dYPQ02w0cP7Ka
0BcgOWH0dkujwv74pA5DC8N7aWVwCgYyV/1TENSipsCvJeKAigV6VK6i67PUHKpED7FARK26WJpp
zFIAcYkQQ5DeXawe1N9INRg84bYUm5ytfwEG8hbgAuI1MiFjwWkpqYsCvxhKRWF49icKKoUafCRh
U1BnfDMDxce67UqPaCNi3ijckVv4jamvGbtRNcmb19u2B94vGzjQYb5UzrsINR1SHzWQvsCdjrpP
KmfmqfTMgS7nxCmBdLGfj8WK5BhswiQHG+0mX2hsJ9ggvZ00DUc76B8p21eR5oMbdM/W+1a2xZYf
kgOBY+HWqAXSTnSzaDZbSHISrncQYYKCDBD+5k8cwsYmaIEx4W2McxDv1/O5oVkIMBwoRDLQhjJT
Mu88MEluAf453t/lKxKcbnhBlnNu9AxtZfarY+olISnAFR9OJbjbE2NMx+Px7x8/+f34+eMXRw+f
H5mfKYsTUOHoqqjEcmO3N74ggQDiH81R/q9f/pdwZk5oBRqG0nrzPxz9h//0o4/UkUqykz13Nb6+
2YZGhqW/46YUrfoYog1xjRq6rWwy7UO/FR3lCttxiDeU/4nICtbwgqMV8vMLhfYXLEpanzVg46BA
Zib0PDl++uzo8OmTFycCLAb4OoYGGEHs2cNHv3v4LT19AZkSaE3WphX0hiAWWybyQZNknM6vEVd2
4L+X69mqWswUrri28Eh237hNUhsjSSGjBy56GfuJtkmu5XJsNWdKONIO6ebpdqFsKYCgYUwaiEgu
KYbEp6ftYBEOn7bARn5DrdeMgEpElvm0mu7ySna2wpiw8TYkC4PXgqP/aP2YlBBFrVUNdz4NLY+Z
kCdyxhatvB/SLifU0ijHXjbnQGn5bL1a+Yk6KOs14RDjeU1AOohVsWduFeWsXujXXfpNunc4TWZr
8vNA1RnMnL4ea/A+hbsIpDRa1Ivsnr7F2CvyGOJDDvoIGYjWJ9e2psWrzjAEnxUYzpi8/Jdii5aN
8ebjo7sJXnZ+W9BOBRbz+0cvbDh1ItHXEmjbcc+5APY4cHwLiAlcsdAjeyVcS8B1UH89q05//k3o
RuyZ1Q4TmdyieT1kIwsAdY1BuzRGY4CZJJAeIvqM1Pko/n7SvKBJ8b0zxnTBRF7IUYrOJH2csrn5
vEK72sU525vhHzQ/n2zmeWA2oztscml2ka0eL6gyyRSGXyzRrSVYzoTy3TdaXcywR03ZjOflypyQ
ZlFWLikPNZF3fHG2LM4xBrT3GyJxN299jHorJwuZ6ZEtzTV8SQnlVtcOJJLhpjvqA0eaLKhUXMk7
6xQE2446j0OQ6QAFm9gF9+ukqxGVhLd19jmy8pWWXY0EsW70uayZ9La1bDLl9Fz797Aihf0QZpFj
9KJoitVqmZmXEDOCx2HUPAWp/5wXmJA38CKSik0FZpWhAiO1bgishLKtFHF2XeMBf/TuWD6FaEPz
t07I0x4uY+1BeY0wIsNnAOC+1qflLOwBtu4H5Jum2hV1RDpEarRj6tHNxVbBDdlmSJ/XCTE/wEKA
8aFWKA10wExJGh4z7swQBlCDngmrZeipkG1ZuoZ0ypDbpCChS8d+iYq5HJ2PQlwG6lFqAyPN6z0x
OSkBR/eMMN8JSGPbNeDQAu8G5jrGtcWButuuE5GoYLUppTIvqWenfUjgKRxkqapZY4FqbJAbkHOU
OTNFRymEgH94arpJxSGZWD24t6JRMwF/1N3DGGBHfA5Bb2waVyd+7sQAz8vNFwXkstXydEsHXc6W
5lxltnIHJD3zzx2QpM5bDpgWrzDwy5xcTltQB3Tueh1tnb1hc6qH5MCivEEjOeuYgRw++eapGLhc
BTErqSST82yleufBRjKF39kAAQ4LUDi4GqpWf4cqMdDoNvQf9Qk69AOS2Osymez/+ONZPZtq2DDd
AywLho5KYzWquADfbKtMtVqnq0NuMXQOPu067c3qdXpUw8p2x+TorWdKqux1KO6ivsZJvyTr+Pyn
OrOFow7b/fot+fT20rpPd8TUgg82paZy36D6C790PRvK+/CKxTTOb8P5l8fbuIm0zyAKcZGoFeVb
B04k7OtHFvTTa3TLFJOJfw75H0bbow0seznAZ1J+ByR3QRwkSOUUUjZM3qzNndvcaJZoGtawpVSR
hxYacQUFVKwvUwkabZ9f4vDCwfTUuS+xc8EIdNvrOZXMejudpm2bctzHBYnlBhJE4zyVcJHM5f0K
MnwT5gGAH4BRe64S2es6BKwbnF65NcERgHAepxHlUMQlo7OmZk84TbnYPKSKQdfZ4xm4h70WYTsp
Do7Duf/4ILN6QE+kNIHUcSZpcyVfVg1R8Hr+Zl1bGDkkb3aNgpwAK7PlMVQC46hGfSg0dKMfUX0l
kp+euwA9Y4n9b9wA7uN/H3SM4xF9k6yuajLmm8GQbbYCQ0u1MkSFGiVKczfBpwXkTzs7M6s/n+gQ
sEjnmed4cwudysHAEn/3IH9v92zdgSO0OBLLwXKNyL5U0zRZmesZadwxbK58t5hhnoSL+ooSdEFg
njP0XRCJLxYA+Hd21hKhMQVW7WMUof6VVx93kVddi/GZr6tVx3ziEfGkXh1eLmaoMCmnpNLeyp3T
W3PSC6JCUQE4mDVHj1QiUYvwJaOA05pn1DFQyWwBEsMZh1CT0P/eo7iZB66u/AV+SchmQMpmTV+9
ghpevcJIJLBSwx+vXpmJf/Vq+x6+h8uurvIlfoTWcwKP3LWJI7Gf7FBONPPqlarwJp0MprHt39o3
o11errrRZ6azwCoKz5/csOaSlPOy8nuTmTkOFbJWBMPL1mDEIcLTBJaTqaSDTa7y2vlVR1C8Yv21
Ho+BAtxIhpcLeOoHskXyXJctuC5Vp/91vAY/hQ7OvHfARW6fQhpjJcTIkeanQm55NGv2r6sJz7HO
uMrbovCn7g+ROV0ULtQYGFZ2W+xJff9PpE0bbjQyZ8kMfY/a7s3UwXybxKsvhUOZXoKL9XaZVrNY
m37//B3QA7jBLKC9+W8CQax1QjgSb+LpPgWTvc0AfcE+HFVs5Jo4OoZqN0mWZk2OjHWYZFWOxiXz
11VeLeDf0xyNAIReg6r9KuXcjOlpiGPTAfCtJ+NrhX5isXY1OfI1rSACGPVPD6y823WxCYoMXw99
w4A7xgpwbFhJjyYQvHzMwiAHNGzUpXkCnAN9yekjAX/p26sZQg79AHIOpN6sv0jEJwI7enzvZCh/
3j9Rjo8xxJ4m3YhAREex3acUCLGZmFtco5NbbNquZCYhUaJDiGhnm/BGWaWxjBfTGsgPRtYfGB6v
8yrdRP6QW8fhoEQiQdrqSoplip8NXWdgvHunke5BvW07drw1byhfFRQMY/bKNpEwnWxaOhBbZDSO
jjCynxbZFt56JkS1wa+1WWIhQfjRK/LWoXidwXNIoyy0Ry6k7+Hz3cKU6xEUB11tbPIoDwHID1q2
AF/37A+8OgsraGk8g/cZxQFt381BixbZqxEyDi8vIexA7m7m0mLvOXyZoYM443Pmxe+fWNogXG0W
QR2mNj6A25MXtpxrRYWHJYzO2EXjDSYfvNl5+V+07ftvbh39n/85+hG8WK3PzhjfBe/eDWX6pGv4
KnlGvnU8MM9rWvlKtx0LtHG+mCP+HIPVIJ3A78vpp/DPpF5cAyWDHd3aJ3RZU4f+OYiDCJBu/wlc
rl82hhrFPyGzf/n3D/Qwtl4MAuY+h+8RTRkofCpyf9Cddiuu5+jgYITbi1l1Kv4NZqSDLi9dVwSS
Aph/xpf1dM1IQ/D0QD0czcurQdDW4vW59qWAnM7WB7Wz1VuJORDSJjkrBL0Dsn7XcEWHJITgr6ta
MPQz7m3GdQmUUAfw38gyOQv5HKwJb7SDosqMZN60wtrhXadJLcZ46Ro1MHu9qWfguTupp3BsHKAv
4phjjMbjkbwxZE2uusIm4O6SfG7EtU+GuZruqpYpeIGKycOn7t3Zem6ETIhCsKgQ0/WkdAVY54YI
KlLGyOM4P8Pkt0dHz4jE7Qdv1iXcbqnkv4cfw+SxkaauW3XCtocUnc5z5wEcItbM2F/WzNqyMlTt
ipaXhVttzsYHxIHP+bcmYK6aPFlMOXYYapeQxm2ZB7oQQmyeLwjoxBTRwCe63LvL2XIxGU1mFXAp
U5AexJvUncK/dRmAllD1wE+phew7i6wYJqcRC1qRfG6eJ7vw12/MX851JWsiDjBEamWWrldnu79O
XfF1vPi0jBcXml7VY0hK2/dtSP7so3y9Qh8U/JfcnZCU0aCKMBzm95APl+LK7IQFCjj478DdtpBO
JrINurcFL/NWBP/vtyX4GKEPtyDl77cl5faTB8GjFin1E2tQZ0hocaLZQCQ3IQpv7U2h6MqDazc9
EwJAFoYxkpaXmT0B1rzLRYtE7N/hKYVzPeJ9zTPwTVlOebK6zyl27gc4YvOlEVTmyYPRp2HFrqpI
5eR0CPIRnGDNyHB5YjNSFuRAamdsVmSof/NpQGKtiCrgb2vEZavGi50cv0myB8PkUx0fVZ6BB62R
U+TmEzr5T/XtJx7DEFQS3Am87K1ByUFwvWkPJlbiEtiIX8JXt3TXxhmflkZymay0t8JUqPOWfZs+
/OrR1yANvrtODUfd/Tx5+C55eJ189S756jp59C55dJ18/S75+jr4bgmQY9kDTLi4KIvVwSf09b17
98z/7if37pt/799P7pvf983v++b3/fuUN3SBJ/UBeRJeFotstV4AQBg6U+TJnQR6CneELKXKnS3V
XG/XM6D34+OTE5tCCSpE+yxUrPefFH539/j6xEXP8wv4fS3fqdrMEF0pVx2l1MK+QjjF1Mj4/4Zk
fLgvNetTJsM3t4/uphHkqpsgVWEWOCn1XTV//T5IVT1OwBtArAbj5u18bLgnewP4iWpT8LzZyY7/
tHNyN98xBySWBlVMqyShsZpL6q55bb75caq+iNW9TM0ptZ9ko7u5qhitaZGyz/mV/UBcjV7Y1ejH
1jL1t7G17EMPW8t6v7Wgs6D8EOsShCzJl6b/bliSCHC5zOtda2OzyFxxIK055oWS0gGQllWIdENp
pfpzs10NtZsTZjOm1lb+BTd0K/gH9CYwg6VD8mYYHx6uUQQfAfM/SmjiwZ/T7x4++TbdT9JH6V8d
RxegA0XwgnhA3WqdOxFgg5jLqdN9YIKzM480YupGu2o9uM1PaaZQK+EiR1F4mO4nP86p2rDjcagu
3/VlA8iYN01qw3/oudKW+X8UE4abxZsv/SKAsHhPL4p/1Fh2BF53/KfkJP+iC/CjY6n76a4L4C40
qKx+DrKddWRk4+GHh64DKv1ZuHXbAX8F9h7qAPoz+e1b1b+Fuopsk57Y7liKj+3U7iR/uoMd4+cB
eh8dVCH5D4ZjIyQS6BIRBo/dMuwZEalRjsGz6h16w1SMmg0nLib6KaZTCuOGqbjPV6CtDAI3xvay
iHrezJ4MB78keF5fooWfB5/Xeb4ylt626GUfHliPaox1IGz7nzD1/n+AqWdl+5uj6gUhGWTFmpYA
FFXOJ9eUtz04jIF7g7EnKBWArp+f22BBszxwCSRfUf0idlLq91uezODivIsuzv0fu2SfmCkDTTmA
+UO+h4V1kAQfo12BwwFh/YtWVa9LJLrdlJypdLsORSpH0IS8FWTT7XxC1fZNEZZg5Crzdx8abSvT
7IfAlesI5ros3lWX60u3v4FQwEhEkdlrSA5vZpicCltXHF3pLbAPl/vo3jknXR0mxkYDieAkIBWB
DBvDu7s3CGIkG9QxLJuhixM3hzaYorpOa2Ehcq/m+EKoJOKTYJ4e75+0OZQmWBqaRRyCxtEdYc5m
ZXCBNfsd6hqElY9U1oh4NBlnLhifzeNgRPpLOKipeBoVVQN5xlWd9+0oNoRewdCW8y+227hngrOv
GvFHD07MB8kZJpwI0ST9XAVqFPCVJ+T/OqWAnPD533Y8v38vdivgzli9n5Gx1UVkyNXQnvtx/uO7
exPQQ0QcRSB0EwqbLWn+D+gCwBWW1VS8lk2HY1TGmcr5y09OYqCAeFACANL0+G9PctwAKP1h5wEk
08z2NP/8b9F7Dcqc3A1vBe7U70wvNwFfOLqcvMsIHPAm7ob283ttF6DWYvzmi3eXs+h6yIwE6j65
nkFNub040QbEiY6H7MQm8VKl/4apREOz0heOwM0DItKpsX9Uc7nJuXHn5dxcK+rzefWT4T2gNtyz
2WY4B3fzulosyP9rhxDn8vfjfuEOJbijA3sStHsKJdjpmNb5brqX4hJi4iv0qq/rlRcXaWmIEh4v
A1heqTL/mSx8hQnRZa82cD7A3Yp02TBlGlI4OJt4jfHn0K5eJ6Dqh4RABk0un6ek6t0OAtldWT8s
DDJ04sPBIN9KDucJZLBqWGGLS8MuVJQwhY9XukkPyW0XXFcw9RSKGKPFtU6X5LBvZnX9GjwHWSS0
4QLrOVgzrkoKjASMwWLWrqpeVudW0E4O2uLP1YVkSu5z2He+Zak0kYYu+2i162tKh85HLIlxJQAc
G/abA7+RmJbjhzKFeVsWQDiGt5iZY20PblneOCiLITwJOpjDjQq5KZBqOIVR/vWIXK8kLlU+ojNP
O25nwNWmnFYZ3dPktWF2+c4GN1Bv+fI+Fd6gJdd0LGCXOOZq7xJ8zmIZtH4BeeeDyDo3lnPEJbEt
4dxANPCU8R1SwU1UtOiz6lJwgSvxdHbNZ+TK0BXUuM9Bze3aeUy+FnejMNIP3LHx6Ca30Z2Wz6fb
o0+XPDLry/hFJ7BxH4g3TDI88s9V0t/dxGY1w2iN3bepV+VJrz6QTNw9WN70WY+1AA7yxseCzyMq
eBAFsaihynzQBuymd+auP1KBsIGiHnpqIbjNZ3jC5ZGTH0u6ORdHYoL5tmgKbx0us+crYlgzFhxH
QsHxLfW4lS0Ie8Q1dgNKm60ieODJ5zg5sXTtWdANBhkA1hO8+VzayaPqcr5bHjbqO/cNHrdmJ0qS
0y8iNdSY3RQtzE11DpnqL0sCsEOr9OT1dfur1iRye/GCFrI9XEl5+4Fd6LcFT8eCcXiHyIklG4Ew
LYivayJWqrOLa5KBCSM80I/9HJD17fHSqzPZcg9OKJ8EJgKghABp3g2CPkyMSHtmOvfx+8Oh282e
+6dT0CPyuy8p+xglPenqGGcH28+26NRuJ4i67Rdxlmjn7p9gZM1quZ6/TsPeHMHTZH937+eBuXuN
W5xZpudoygO+5xmCI0GJOou6U3NBSPdwcTvzGkRB8+VIan0l0gFndvR4q89VY/f4DhkhYinAKdYG
etNieYn2glNz6BoG8BYwojH+DVgIzSbWn/dlLTHTv8czz2Mc9n11I8oO6mnLILeSr2uwWb6e11eG
ERSYo6tquj0Y4F4MN1ILFn5WrRjaAjxp1pMVALrDSJo92TB7OHW5+MO0M6Zsosj2wKJ5BeAC2pFb
IJpTwEsp4G7F+eDN31BaAXBte5MeZQfWe06nBaAAl3bgy6pYKRc7ybYIeEjoDicPXgNV0oXWfvtT
5eWSNIIx/nw/BOI4kqeOYBl6Xr9D5bTbjbcc5CcACGqM9AV7wFiB0vclQdAxQWSsBp6qTWjIY204
oJ9HKNVoSFCwaF7DP4fzs6Cgg/5A+EK4Z8NRVb5bhTWijD1uTFEMMW4E3hYCtyI9MAVApl2YOYDy
iMgtKCBBUWxxPDOXPpgmrCpJL4rGf+R9Aj50OAtjSXEA37jB0AR5n3D6eoE+JfeiYsYj8oqu54bK
0EEHSq/nhsjsL4a3l1Sh40lh9u+YgLaCauiVlKSqgCyorhPJdMHBowDhysDBGPPii0HsOtxd2PsV
SyplJB94d0BVUMELRMZd5l5XvFcZxOkIwpHZW2iActk2dF51q98AWQDuZ+g7CHf5XXDKwGwHaHTk
QeQJJ0yv2XNTkiVeDskTROLNIDz7YmmmcZQAOPOkIIgRs2FB6zVUX3JF3GIC4JzV6XpVMoxCvSiB
wV077aYoYQzrNGR82dhMwaZ12PHDBEEQwT7JYz/ef3DCjtVm8Bzy92D0qyEcC2hiNUfd0xcU/jZf
X56Wy+T+J+4LUMggri2gdtigwQY+T+ljL4COa/hUQ/VnGfcOZsYrjQknoMMjcMwG/YORfD4F/YdH
EPp76Wr00/uf5GEkK4+EMHugDjaCcWoaM7mntOjoiSPdzkArBGyfEB9HcNmop2XyN3gWjF6MD58/
fvh1nnx84D+IN26WD3CBCPukmO5hVuoBO/VPLkzNGRGs1PXD88Ojxzl/DVCfQKug4wIOQKFoFksF
vNfp/YJzJpObuSlEnefN4rkzKBxLQ0HfIhw8enqxrwZREdqHLTARdNDwMYansqlawC3qaioTJ/c8
S5gB/CawPFABW61bcdq4Htl4X8Lo82D7DBM07WQgbWoQP5UqglpKR6Cxhh/HM9LuyZf5/okOqsWj
m+ZGh6kDqzLHyUFqTtvXqZukb6pzkIPq9cpDRi2UGhPkRoi3BF2qReeBcBBmGVkxRdUp1DzE/z4w
ba0muUwWqHvv8342Jw/bI8zfA6eCDhFnTPV3XXENIQpucfdVKhmvRvMRBBOFccZeZTw7HlQhaoIQ
+5GRMeFvmC+FuQuxx6jIY6p69vDot6TAsLMiYIMK58pQVYO1qfRFBDLZuiHTY6Qqdo6mWA5oyJxb
xydyg+XJAkKRKs19FQSB+aREmmt8iSlvN3KMfwgTJRdAwiQCZCPzP7PV5tdEfnbqCBCtfFdO1hgq
3jAGLrM/PT5YjOgI4UV8jI//cATH8+jR0+8/Gz3+w+PPRl89PPps9Oj7r1kd4D4/RsQy7MtKAAbN
ny5dWzhPMtAp3iLWqH20g2IsJlk98Jpn8M+54ARfDgKfSxHQgHQAS1GaVr2Jjfs4TVtzLlYc3QdD
0DAlLrYG1oGrbrTxCUDg5fx0m+GsWjbOsma6OI4BBhP3gwF4Cjl/TsN72IrQ0iaz9VRwKd3W8oyX
3OqYRm47cddu/Ygra9WgR7b+thuZWJcabK4sjyqj5O2gDcmLkd/N64wjLm1CD8cQHjavyQmJYzQx
204xgSMEZC0kIgWDx8eIBRuxwRzEBO+HoBr+Fjl8Nn7ydHz45NnLo1b6QVSoOwiB9ElNwa/ojVu+
WxheXk6T7N/oSsBulX+WYHAzxgKxJoDG4im7BTzHXryy3lL8p/UFj2A92+Ls4MRTG00E9kdz+XQf
ZLeXOQ4LzZZ2h7pR2tmVEWURAqIiwwQ5DiUw4sXN816/UNGV4/c2bmpsLnU+MLNZ2sOkuExOK8zj
hXGgLovJx7LyhM88uVxYeOaiDcwLnLSIduReqFy4r2GfjRzVnd8A76GDgfkvmKyg//kA7GbwJySr
cPe4dvzY8Z+K3Z9O0PV2dCiymH+LzRT2N7foVQmBf6SL1XDgUpe66GYYAewCVykg+HOIkLx3B/7T
Htnt0f2z709x6bH0HhQb3cN/JCTUq6mrnup3YS3Rz7v68Lt2H2IRqa45LAnfUKQ7zUX7lh9IugGu
4Ct4+wppRolxKi9SoSzWlZJqAze8qgmaaqFVEHiPqWq8uI4fLsphYBDnzvJ9uw2LkuEjZMiciGIj
u1hdzsLJMCKaQllcYPz8wlwqlqIHpRSUECyKL0HmnQcxGzItgp6gw29+gyh+nx//6TcndySYMvlx
enff0DP2x9usJA+6r58BsRtOdXqdZF/s/6Y4/tPnJ3e++Dz/win3uB7eYXZbaAVOJgmABO1jIcZ/
eQGp10555r0AEh7S2YKt/FHohlbQw9nCOT5id1o6I00tLHGAPE43NvUQCWCmIhXl7x9/dIQCblcs
h+AMZvjae4T1gPCEtSS/0Q+gqrzTw8LIOVgbHz9xKneSHtRvg6lx9+u+9H5oyqkvI/ued0rKkxro
3Eie76ZvAcsjwf5CXNTQOYyrofmywCpZNSpHQ51Si/yU4CzVnkrouwT6I8ysZq9zIKLWc87epQ26
PXKqpPrisLEOugmjFOmr6Lnng45SChOvX1H7Y9h1zsDn26vo5ccHfukNvdAsi5axrRzNBPR8Nl7V
Qa7c7yEcubAJ3awiYqicyuxLqgdBIK0RRGsuqAXnnPz5559Td6QQqUvSvXWz3GsuzG1vz1qn94pd
PBMWnup2NBrFNpGuoZgjDO3uYlZMTH3LyZ6FsFTW3HQ02qP/i7b4D9HlC/PdHqS43aKTG1q7SXd1
J72J0w2HdWt9k873JmcoOJZSes6Qw7byd3YVAJmMss51666IoLb5iInbtTQW+z4rv4DLa02Xf1Uf
WZ0GVTR23gHw8wbf041K9YDOYFWpnB9j8a8BXasuYR75V3h63M6z5z7R79CKJ/0/NjSVntwB3Z0u
n7NSj8vdJbWMXekTmy1V1UVKhLb0iBpCNQ+aL6nHJJ25CtvSe0SXOiCIfFL4FkRCANZq+jMxG39e
TQojqoAKdNfWA+nvJK0KCzWMhR0IVp1aVPCy0aSohA7QvPhd/A6iWUPtDEWkQkAPItWPVgWoLWvp
AIWokH7CGj5HfgM657jVL6nEtaZKfRunyqDw8e6v9k8CbQcn28XX+7u/8jS3tjM2+TplfqtnU0hy
cdUeqpTIaZgXnGERH6OcP1nW5m46NfIppCeQUd9KLAgmedae1sulGdiUfLR1xQQofmEO6niOmgz6
JTMExXBXYckwPTtrdqGQmi6N/4OvBoMQ0NSOf7CpM6ZkvDM9WcpriXFSXXBriRhPbJwJ/fcadT9D
d/rYngmuZiQekXYRvAQh0TaLRqgITMEkRZZq8hxXKb1p+IdSjAVXXWTIjsYgCIIKFyC2qineNHaw
h6MdfwOynNVlG88238liXEObOIL3bsC5suCsbPYm9Onpn0JSpX0Neb/AqoiYFKh3ISlWkr1idWz6
QoFX5pxNZGracp7ah7Or4rqxDXVOc5yLeUNw0a04oFyhzNouQkyuHQNF78/GYK47gB5QPMdBlroQ
RMw2Xi100vH4VBXoPY1JNOx8RCascST16pVr/hWqDnAWMtPpYj0D9GE05cpQlS9HM7DBQOJq355h
pO74LL56BeM0bRbL8zVlLEfAHghtQjTaGkIADNfdhaPB8/2zRmuyun+WcG/xir9h4kbJMXoVmh3T
UCUYPuO698VJa6u4Kdr3kgGa45yuNR4pR+57XuFZcXk6LZLpfmtPHU9d8JznfTOCkGHYqJDO13YI
6sumOYXVQWSuzUlpRiT+DeBNBLHSHPfducvkJsnXDrMHpqxYhUnlYBNwutuF2sgAfcaGbYxq1kQ9
SrIjrEYQJ8ARjfUvALtqJOqCogPn1m8gtrHJYisZl03j5HeAeUwaMk6SMII1uN6hMadorneZYEGu
JucI2fIEEYU2IR7b+aw+Bcax67aObtjyBaBmQoHewBc8pVjglIQejNphDL0O7QBSzSJ9juIv3rcl
Y56Y6nelehc5gwry1tp8W87NFhMeiQW4zEiluKQjkMOPcAua2nhG3ByxuC/jIuLxqiL/ERsZBdom
5XUMK3VFBHBVkGsie/rJ5dcnuaoJJlvom2PYA1IfxAFTpGT7mJM3LbhqG9bkGLrnw5S1FTUFQd8l
mTnkr3PQL5oNDSml2Hp2VU1NP9/RBoYHF2V1frEyT6jbALZUgFEJoK7Z1iFtQhrCaX010taEqp6s
ZuNvfzh88uLvsjMtbLUxZMjd7WwyX81iL7CZuom9Ih9KX7uyJHWfeT5i0Pj0Am67WP8I+2U6NJRq
R0eHTx9RP921Pb3/4JNfacUdUeBmP3LQAC3hdpTdGyb38l/gAzEYErfD0XpzfQ9DnLxH99uPHuRa
9Jose5bnbEoirgR0TXDipiDvm19Px8+/fvrkuz/m7UUISSCUeEmLezbtn2WbxyHSV2wmC02U3x0+
efzCLPiDT6mL3stHT797+f0TeP3re7nnoAFGvskS8m0M5W+IdqEt5twAlVpbxSvMIBRqjjKT23sv
4SOrNEsy+IV/4YclZvW12vMIINRARyt2mEe6UZf0pcYPIQC/2e219NDrA/G1Hf1dtfgGZgHrUJhR
rOM9aCmOTUFMiIKhdDnxF54uz94vaXf98tGoPCiCFoM2EAQCQHg5gkVsoi61rfj4RVsVjFXr7JS2
bEdMYwu6IQpVezbfBnOijVAbLmgrqQaoZ+ZKI7BHoY7eM7BFxMKMHvYgbMU7GAXDCPsYjjUekq8o
EUJ7r07TtrmkxZOULQgd/iJRjZ2AWRE7UrQsUnhganL+vxE2oDc+qnjsxodfN9n4v8Smr85skzEF
0vlPTDA9ZVZQyLWFXpsHhm3sm+fOJNVXw+lPD7Zo5rSjGfh6u3YCdZitIo3eh/ogJ8HbwEhKaFlF
H9cz7j17ddhljPT3DjUHq38g0QmjgOlC6QjTRZyFcp9hvH9z8GD0Kd1f3PUFRdPiFF2Wi3eoaioZ
42eFrh7sLtbPno/92OESXIyR/xLeB/6meMglHKD0oMlazEd/+bEZvenSmG4xY+qZM1Oc+I5e3W3s
t5mvaqbN/jC2q93w9ihaNz0kDIcLj4iYh1v3IeEmjtwcIlw1vvUjnmQxFuAbNLuZcA+DJdIdIVzu
ZIWsj7ocgWmha1r2u/KaA2Yeitc9/u7ocMnZRcStPmtNqgKQrC8pafV6gQE/HGUge4LwzUu8R3bU
kDWLclKdVaiYM9f2qWn7kmDeNiYH7LTjp4d0NbWM/nbDHnlI27cbUgu+LWbVdH9TDjnFHBy1m0tw
hx9BlJi7KSdqktuClOLfRROUuSOdnQpudqgraHyYivr077OzxZBrvuEhTx+1QCO2lQGA8n0JoC/m
J6N/eDIjwJwxREWoDzE/1otFDWghUnWCVWO+eDppqPJBD0SrJ31xce62F38EAgkoDWB64SJ8uRg7
HsWOPeiz7Q/FphNcUQJq21PQDvid9SIiVGtMMcESZ0EX9AeBU5F6dTcdcV93oa+U/FHKs1zoDWYQ
WXIzZes5bH+/D07gs1Fa0Yufrh/hj187F0GvcTybioU5QujTPSNepgoix4kzSoyBMvmmQotrQpH2
S8p9rWrG/Kdz1NIYqNvdayl0ta9t5ZPUP+53u+fe0LG3IhyZ3vKfWVuwwpLWLGrfB27vLP2BQy3K
Ez/ZP0F8xL/lGciZ3lz0Cfdd44P7rH7gAbRAT/fAg87jf5Q41bkOdvjT5R6+0w8lhw0w6sMEkz+f
UsyMOu10kgiV1sKmGbJPXKJul23BolIBJY8Qr5R1aOE8aCnCgSOQ8feLyJvL4ryaIAZFATHPzReh
/H1mbnAzK4BTq/YszYTViHn4dmP3Hm5+OFTzzxKVWXCyEp8xduPtOHO9A7eDC+ZBBsdWGHHk4uA3
DipTP78BcJjBm+zlP9dp4t7kR//zE0ww+NL8qlYUE0lnA0RBs24bNa2DW8mTp0dmatF8QInukst1
AxRyWSBYPHjHoKs/hgDYAMVPhpDGCb22EvT+mZq6Tq8VzihjSCkjSFAD0vB6MRpEIrpd0Lf5x70f
ufQmlDZJMkthz4/MIy/UGaccw3OpXIl7Ggxy9oEN4EXKWFJs8Xxqf1IVXso9WwfNl4Qfp4eXi0PV
jPn7O1snqJrGYCpIEeN7BTk1OajYBu5KEQCKKotLm6gH1+ainC1KBGKZlyWjkddLQISVwK1nj58l
n9x7kJSX65nkcrcBjpjs2+x42lwJ+79PSfT1kiEWS7OcM7JJ0IZDVbjpEOnLfmVVAfTWXNTMp4gc
gQ8ipnYHgORXBLI42CpX1aURsYpLz9GHOzJCWYNnRPxmKvD6OAerj2kPIimd2oTdDw3RvYVMno2R
MWblLogSGObLH9kYTWfiAlCCJfh+//mvLvDgagm+1kudQUkrSk4h3yKFt3brvieY6ag+HY3HGGEx
HoduGf4NJ/Y1EHtWn+bdrVwuwavNlDbNmL9v1gjW1r7hYjyIqTIz/xsmXkxI+P/IiaQFFBJ26vj+
vn8r487Bzo30y9VBjQ+RmpE/oeE1eSzxWo+wr3C2w+WuWZul1lgwuEdgy5jaWmhMK0r1RATQGaIl
BY5XJx45bA6xwVDf1ideW0xnZprgDEHmBV/Z3wN9NbaT1XM3hvWgva/4rRivDX+sr+wWkDgCcpWa
OuK3QB2G+oZYXAeRWn+++aQXIom9A872w/q036FM7fXihIc+iEwidc3N1bSarPRc0e+wUD3xysBP
r4h0C30z6U/Nh7gYsx7vIMCrqYTZoi/QQZoOLTKCH3H7R8gdZqQQOjT4fKGQoaGZQHN45xbwmw8W
iOlfG6EKQvDooiEu9rD2ygmLugBfFhOAiMPzmith83KKp41E1wN8TgVOzhZUzHnakNeVIHaKS4R0
qZrbCnHEKUXnUIwufMWplmpSZpijeo61oo++SBhW9QYmfSzPlQJsOblEoBXfRtLjlucTCoZ5R5bg
TpIZAQae6V5+nEv7OjjenpimlzRPWGRMvu1EFBbiwTqJGPE3mEleXx657ePVRYUHDHqSoruFjby1
HkGZOIdIeDp5qRBkPlUA8YCnJbrNkTAxzTE4Yn7tICxo7iZAHOwebHYPz7/MklQ7QreoeS1k6TrM
Aj8IhrMKnFxVCkh2uyvW5xcrumugZ9J0yO3BIqCXvOJMyEvgs8WyNncSMFIMxb5vfQQocoin8vG7
Ak7yhg+fW0SDegdwCsoJRG/gkrFnhd6Feetrt2Ywx4hNEfuQXoyEAIaJfUBc92464jsXyso2I0oJ
Wqlhcnnw57/6zBDRbS+jbNBLlHx5jHyOYizkfBKJ1OMJUKGWNVkbxp6HYWbCaBUD3UN8Ejn8w0Nv
zALyeBwxkHZlDA0joZngOO96+4QIoGS6W7LhwL9UO/0A4Q4rpL8s6+0NCwA2gi6ivGzH6ANvRH+h
tZRSQqEd6/hkEDGo0kVdAICZ1a8A/uwKoYGBCjmvaVdHjhcUyWQDzVibSwScR5rVucDbRx2RFewK
e8h1iINMjqYm8SwOaTg8Nt/voNQbndk88cau83FVL3Zn4POVqO/Qlxf00v8fOSo9jhQMU/EjueGC
G4135c1iChhdHNCK9a2ZUESYz+LallO6KVmiQUYlVey3vDJCdiZFLU1UEaYmBgiVPYQbb1Mdv6CN
lmhglYAmA944uPWld5101NruopuNgGp5X10UDW58V9DXE7Rvxsde3IR8N/J3C03LYBDtFdzOvetw
tJRLi6qUFHbHieZAXpAEBoJvw9eHtEl28PtqsuPksPN6aXbGJfteu4ozya8itjQge2mk8hppHA5N
sZI0rORGMvLrJHa5dYXsCup8uGWTm9WbrRvy2uUpxA8RKWZZ/1TOEWN3Xc1Wu3YHO7lUdckhMjqw
KtGykQS3big0GiMJbSYi5jjgMzwmttMSMczxO69WFkjBMkwNDw3gmyoIOUA7prHx9xAjJcCr7Zok
W9BVyVKecEArtjbVfFKysg/d53Fgb6sisYNw/VqfcmiftMoxLTujnRzQUz1ICC78sSuN0qYb3Nbo
u5JuqPsrnizvq/ZBLmdnK3TL1p73KHvIGuaUweUK9GegIPNWhYYtbHVbuarHldMqGzO31pG+qMxl
Ho9hMgvYWte0wgZpwyu4+Yn1ejAIOLU7P5T2sZqDZX2lkk9x7xlNCg5ev612yVEDuTVzzOlB2k7r
R07iEkaU76KyQ1iAn+3qDBw5XYWBoq2eMn1zZ9HdBKvp8s7jTw4OUtnXKUyh1NR3rFnD/EYHETsl
EQ8RPHoPBHYihtvPXUHApjZoBj6HsEw+hc8igribl7N5VK6kWVWL2ON/4pjIdpPcZil6svc7PSVk
ZrzbWQui3LCA1ze4LrRz3djL+KCDNMIJ5sfdYg4X6JN0OPz/rlTmRB4rCRDLaMkB9PimUoAOYpiQ
/xqHSBwEKRCC062fYwUnHidNR5De1gshQP4zKCAHjDtr/ALEq81/XU/RPSN6lIb9Gi9L9C3oU9HX
Uz4LdLVu9HYYQ39EQ9u9DWlchVfDh23Ssa9anixKAKBbJ0RljfnmNR4btjkpAGgOE9PNKUCHryd8
9/F8DiFO8rMEIxUlFufvQX9XiC4V2QFg/E2L5dQZniYXlRFhr8OjzbTkw/+Dn64SjILVkGAlcR2i
GdxZnu7kkgjGESIvWgArpefRiSNu6qatdWU3BbtQxw9OIjsdSx0cAAU8++P4xdOXzx897lkncVcJ
iCFdvkzzdr4u2wvwX+AmHj39/tnhd4+/HiLVPRo//sPR4ycvDp8+yd+nWXDAclNneAuhY3XuC472
pzMuJpCpHentUB/GxJb6OCgWA45TslO2w3zsjM7aNbkBsHmfRYP+xEWGodkPIRFM5rWvBp23zRU8
FCdvNaJM6Z6xcELcJNvCYUOK4piyfvft+OvD548fHT19/kd/45Bx2Ws8vAvcpAc2RzAw++gCf+id
IceJzVVATyJ986iaTyMBfaOPWkw2BTTQvp1l+yfbqmMTtY+Cze6KqpvKD0A6eEOHxW5O3zUmTTP7
vZNII0Q2XM5KUPJnlM0OexwnT3gVZHChdfvwtOjS1f3y1Phz1tonZVyrSHrWX2Cl+6i3HVfqbRHK
JtLh4x09NfCL2IkVn4uzrjnAFj4kMfvTHyNnptA4QdNLdRh6FcRBKrUuy5uk3FMNuFtLkGVcin/A
TbNhQ8ie6jxdotqJzhm1AwjS6qg2tOxC244klxuIMroXrXuAD4prd6hLBUJ/DawPpH3UKgMeAZ4q
xb7ZQl/Mek40I3CCQPv5CGoaWy0oOYcfW9UwuwSeADxCk+lsh1Qfaz/sYwcRZ6sIsOIWZpKgF2bz
jkXXfBNdTfuOz13Zj2kazubauXUc1fl3Kz4whuZsfgx93j9xiFkIltWOxIF4wPzg4AEFjM6PMbmc
86yVrYagnlF2aL65d7LR9qAv5vhF/FruruZYaKjsv+1ef3xwf9tJ2ayRaqXciyultm1wo94CH8vf
21pu3kuloVRaUcOHyzakdu6wY8caEupSwSqQJG0YwxvmGOycHlrCsirfljEThQ+yb226AuFRMocq
nSkfRBlkAejxKUYMN058Z7HzCSV7Xl6BczdGmaD9gXHOLsxN3SHlEEA+Gs2V44C7pBfLes04DqdF
U00SxRztZR2+lz6PGISEv0dX1bcYldeU4McCEfG1tcvYAVLUB9p3GPhISpt1K6YO4KSUoojOSdmj
q1WOziGn5uWsBDdk07f5Gr3bwWuGzWsNlDHF7fzBVDTURfAqKZbXI0+B5UlvtqsHXQtwbOngRLvq
SRCbbw+1HfBWFHu0hWeG6oz9TpHhYLP2chv3DYuYEFd1qk541oiO6TH8ecWQRq6rQ1tLbi3Zmup9
Ab4D8QO7oEWryExsGm9sMIFFVnsxODO6lcF2dgL3hdbOb4Ktr637siGt6MYA12DkRF8LvV0amxRI
KhTVsvBrhsZ2Wgr007qqKE3Ntf3e75pnkSQnCmcxBXberQK2zGBI6DoQqQB+duh5Zve7trfYdOLc
9yHDILEjXNVY3zcIMoC0o1MaX3kGiScJD+xt/ZqcypSjoDXXzu0KXJaTi2JeNZdNkmGKD+gMp/mw
nqe5NdWKg16netI6BtrcVAXESs8Js9YmbyNvQnCGM5yqQn7EtmHwigOQQ3jJ1upp8vCbo8fPnSOK
GQFoUWfXQ5m/RtXtf/rV42+ePn+s7dtLw3YJlUce0uRJemPMX39RmBlgr7xLnrdvkKQqdGkhlS/g
nJE76bQyc78s0RAM/PaieFuZa9CQzxHw0CQkSviIlxmZHa0pEjauMjjBOC9VMCKfGgr1u0qJQ6yH
5FytJ6ooOee5W9tRgtGzeFg2pZ01FQyBZG+zk0BEjdnxHLMkXo141oHlv5p7MRHSTFAJBrYt5URF
3yzJy16DizXOLoap8BFVIPkKoRFGlTmmF8V84h83+oJjr0C9NyJf7RfgvTlLvJJ+R2mYDsvXnj4X
HGbFq8jdwRLyjkM4HyGEeFs1e8tCeinexERhmInnfIsHD9rjTHUO3VX5EECnSQ3huV2abwQpzvnc
+UxeOTSCB9lgC589U67HZa99MGonTtsXYal+b3x3zbAz3lbQLnl+RVSJJ466g4/XRAy2rXWhj/XJ
qaEdyfQjLnj1cqwU1ZCQrFwp6XaHSu9okDv/Sy1zRnhDmyPQpscUp1rqlH3YIaYuy1n5FgxUIh0i
v/ENS568mgiyGQmfXv/YToAHLTTgFnc60sMJzzW0985AkXVtPxgSvnBrAzCmJ3p35/rY853VITjq
1NyoLzx3caFLDQBofbsN725aXn+MtA4OT+sGDOV/z2iIArNPgdrVZbXig8acj+YMxB4bZgBMc1eg
DxL8wy6QOp0g+oa4eBM9bEche/KJpXMbB8UOdKlj/+VJJJuYX2KoYgbzsA1wgfQJ2AWaoknJMQwq
h2xC7KXCKDSL4u86XZQlWAaLxZR51E4QEOSzn7Cw13mxS7nYRqUQVHnkwncqvx6iSvZtet/oFZVo
p/WqIbOtDePaeDaMFKznjbdqZF+0tQXb7I34viDlwM32xnb7IjxNOu4e+X6bNq2mTyuWozaTm5Fl
R2IxFc6q3MQd5WCQ3tRJ+ymlERS5CWKkQccI4iaJnuguGchRZGSce7Uo/Z0KX6TgdYqFlgVQXXSa
USYvyrdoR+CiQWSv5RphFQTAYjoVymhTq/iliwa3wiAUz7W18t3O+QgpzhhySUXuHLH/hJkbs34D
F/CAjhg4WTbC2onodAA2lP6SImAhQr4SLF85QQwdGvF4VS1QvLd9swItQgqjfwfdEe+MQNKie8+p
IVpC9yZk0zuU+ZwzSlq2Iy6kMpazEkP3MWrc7KcZothDxYJnCzZ2gn81q5oPtTZlxcnSLQYvbTKC
U56ZI75JUL7FTkHoVUqrn9LGeqj6j4S2NAyIcdSA3jCn6n5SoPCARIR5DfEuzZ790zVhUAAyLCyz
vVEh20HqVG0guMIUSJuIxWaSGDL/mEEImlkkM4MYYIDXc/gKrW8uATJedswMZkcC0kCRZzSXo9wT
ryhZnrhrkvcZGhqyohHoWpwx5Njo8Oxh3ubs52s1kYZ1GqnrHOWlo1gDEvRgOF85/YxyLfOeBnCW
QOSiexKMknIqGyEOitKNjWG34SvOmmteOnqEYTfNGnYLOXvLDqLAivLNGu6nI3UZsyWsf3nBUOjZ
el4hUzHT8OvdU1AkUmQG9NZsIwnSwLUQTq93MFTEUXGmxZdcWxVtmW7TRMwUXoeZq1HpYJbFeble
Ftd88/YDICEXDl7RirkfcWfExzkSP0cbes6budzTl3k7agSvBn6y1SFSiucajrt2gpEwF7hWlCcV
uy/eXRzuiHumQqB+VrxSFexV77haiNBtKczqj4BAR7HsUtT7RZA8Bq+Kd4J7IsALPC8hc8CqWgF7
LBqXW8ieBLeS/xFjS+cMCQ0QYqumXCAA3YjwPhrXHL2kBsP2XNkx3WLpu7uq0rvJjnnFgi+cXZht
bkcOsp1W4WteL+Xof7x/AsAJwOlozQvaMDZ5JNGrmi4L5wIA41eG8RCqhwJAtvl1XTgiYI8q/fR+
mNJKkQ7mZPay9MY9xNuQpC3cMTo4w6XFBha+9HIr+cMf/sC4JWbTmDHzyWyZtGXPIFIUGIqrvsYU
L2BsMASLJOFYrdN8w7KIT5znbI3dHMoa5kFMAwxBpWML3KoRHUhqztvRByPiidxIOOLDhlV0cOEF
eO2EMp+hEnd96jDTIbXKT/WihOoUmg7wUXekU6IulkB2pPjIEimrZGJzQKsu5J73pK3lCvItrBvW
j0S+iVounnZhRTTnW+EAQolmZUTaJUN87TwqQKKAlpPbmG31x/lOcnuDcyBc3rmfQ2g73wqr1sw6
8mtQy8Rtyfj6AP8ZLSWd4DztdHkHUsNvuGpPzXcrzbv93ztx/0JahGqBoX6N/sGU8JkyLIN11TCB
jwfdDjsdWd2tL6+1Akh+iFDTJM+Jk7nQdiWYowQgOiGnEpLrKl90pMnk4bPDEUozWhPj4p1s6Kef
St52FBijGAj4EkoYTaTROatrw2vNocs2Xe59pH5ORwAfUxNyQg10TMvQSAqwxyGPJF8eka+YU6JY
FkYekbGg8O/SHmLPIV+XCI2knAKks+mQ8xotaeqWdQ3CMH6R+WJ0kWBiS2eGtVLJ0iYGOK3mxfKa
D4EhQxmItMmoZHxC0TFJN2XKcYyzo0wQft4HEQs0h5YWMEmkaNtOISLaatC8XBDILOv1+UViZMTq
LSS5AAWhuzROIzjbni6YunQyGqMfE1wKA0BxgbZXDFIRNPtPex7ohz78g9NiyLBhOyNe2RBVGivS
6hUBUUuSODMdNiBRwhDtFsuHlO5crOxWIPfFQq3eElUQ9y8ftHQGOhZMYl8lXDuVptMOPCgXEaHm
GfH1pUE7/qUXMBH2Byrp6Ix5hZo5WrK0D5nqFugDq7NrViTzjiWrFcLjhEhoaACi3slQuSILUJfs
wmWIaUJA3HZkU+8Et1XE7ybSE4XBrZaOgUbCIqEkFOQNZNO75u79iNwpIDFESN5mchw1SxYDqmoc
la85yV5LL2THn3nf54M3d17+KxA7If2oYTbNCEDM3tw9+r/+k48+UjJpU3IBEUgf0c9hws8R2GdI
wTiA7Sb1DWwlCoCE62jB6w28Fs1YmlIUbAn9spFRvzXd5D5k/C+TDs9LCgMhprqG3PHw6PZiWZ/z
fWFtvkEssfTFBQBsvy0qzHgtA2pShbK0nrNLJ6dtHyKkWKDX88bt55+H0v4JH2Ipyp3ztAR7ZQL6
mlYyWZj+NpiZZBfhAizT6mWJ4GV1wRs+qW1FluDRKMWohvwy7+ibbvWYf5zEyo5oOUcLcxatxrBU
gdMwR6Q6Oae7OD4E6YvpoHGLua8EMlkZ8eZsDBvT/R2hcgn8NdvfkMfm6/L6gKGy3u0n70b+ZQdE
LbtJ7AI0+x0LNbqoptNyviVoOY8xYckXE71LRSR9yC8mbMwCpzeJ2erDlwPYW5CAr1y+2T36X/+j
jz5qgznW4kQHVM8kzpkkQ/bU9VvyszrWRdPkiF4O/ddgsQD8n+V6Wn5GCa9YlUG+cXD7AbhA03Gp
Qsw5eFhCQAg6rcHZS2CVU/gM0TZF3QtgR+Vydj2UtE+mlitoBARlSmJdy4f+Zdp6ezgeDXORa9cD
6ZpIz6ZlwOVEY211lji8uAM45+CV9eM0ojl4F+NMy1kJz/Y9by14ksF/zBqOXv5XHrsWtcSbvZf/
8a3Bm3sv/6X3+qdq8eb+0f/269hCGw4jKZnmqBiOYHwifvPAOVZjTKfl0lp9r5Ggh5gmbY0u0kbA
ulwtS/XVrD53RhfAhP0ZZ0T8VLKnxN9VGw4JC1HsnxHJ8dNnR4dPn7w4SZ49fPS7h98+Hj95+P3j
FyOR09XxYdowBDit3lbTdTGzgnHaEUurg1LXgFvp+kjhTrld06wdO0P95o77vIODy5FPFtPpmI6q
NpZ9uruLaNAqe7UFLMYrw0HamJtJOYac75FCALV+kHZVAbz5IKWMSJm595DOvMB8P7myDPVhMt5g
JNuM4wz8j288EPaFpPSYHcP8u3CQWFl8mNsMygxoXu8uridBfzauCw1lXo/b31JPv65ZMJjMDKNN
Rqacc46TfFVNkpERCyxf39Y12KQeLhbJ4/k5qBZuPJgZAOnu7sJp+34DinxJw/lObC/2GmqP/KFk
tqsQHB0E+NW6uflCwImP+dGb9+s7fD+OfU8j+AGkK5obs+016DP42dTk0GFOJZBNLwEIhxYIzU9s
ib/5mBjNODYa0iFFR4IIWtFBPC9BmTBBpdyysEClvvc8wIQV15b0rqrZdFIsp83N6WlO9NRU6DBY
3nRdvJ1gSpMHuuk5anag04Ze9nALuwHpEC8cTcjEzb38IYCWXThdtrIsDSHTN/hMQTpbmixyLaTF
EEcCpSSk4LVyBkk6scF47JR2P+NroeFWAIrj5G081MdAPBA8piOOxMU9bj2IwJe0sqWLiBdJD4K+
ZYjkXM03Dcd2c5u2sGAkFwn40d1B30Yssd+lg12Ckzt45GYs8IxWy2LeAC1R3XdNNTkj/nQrZGmq
RevaTr3TGhssAZA1D+CDgZgwrJxWJVML/yj6Hu83ZwGYlqfr82wHoMUXYgiDyH+BsVAuBEQet5sN
MADmQsRZaCDng4Pb0bSnr3Y3oVIEUoeiEkJlNxbGVt1JvRcxeGo96x0z6c/Mw+l0q3kxnSDbkmhY
ldPKxhkLemJRtKG3N9F+tKYPda1YFjvSBCWZgatS8sizUPG7dpyiDiDFu7zfsZCltrUvXfqPtoj5
x3pNXnBwoIHDw6wsAFhy7pTC5sTDI2NJ4l+qHZf5SPfYMtr0loiz2O4ZuwU6rB0PfKNCDyhCqDDf
tgIGZcroEGsjRdl45u1VQlEaSo+cZx+k7ajAJJc7ZxaRKD9TGnPs06KcxvNz3LZgov7Q80gsOVqt
WyNt27L/oUZrjTl6uB9gsDYNCDOOjR+ZO+9qTAJvGPWlHL4D2qqs20eb63mzHGHmXntIopRoSGg0
3uFtjol2zRvrtfxpRBehFmNTlTru5neY+Fy8217K0LHam63kr62BBina2OauqyH8tkg2J6/Q+3Kx
7yPEO/8xXWnrndu1CZZpk24X2Q69Cclb/G8cCJMyT95X4Vx538oRTRJ4MA29mfvSl5hhCE5IyM4H
ikLIKQTBXZStD9fFnZnRUXtZzrxeD/0MaLrT+TC297uL633s52FDbcqCh8ALmLZWo7uTeQSDhc/b
6Eza/G8inVGiIfhLXfREJDG3HXUn69Xid6Szi3qaBLpiZ45fz6c16va94p3Zq8dxtrOqxw0ofr2T
WaMhb8pRrYCPPQHcLQk5ipGEXi7ybiBCmIzebNf6Ph6a/DQNsVd3nw/KhiyR0ELec3/oyxRJ3/Y1
LoMFVgelXYq5zRPVPQZsd79X1vVSakPxeILSzpuW5/YEFXQlu3SONV5ewr6CXTkru3vDtGvFAsuD
Ff0ECwEvYw3xjHKNndc1dNNKn5eX9VvgAvP6are8XKyu3ZERSQfZcSioNeGMjN271F4ZltB0aUuN
wQgwXoSNbH//DAYkmhvkrXsWV0YP7T2Y7fa8SaW0jPEim6YOvWy61krWGSmvWS0z79Not1o0EUWy
imQ4teLTlsKT4Uwkk6nMXMvqfNyHCBqcRqBs3/r8iy1Hz+mj04yrtJfl+XnomQebv7fXbbLr+sSc
D5T6c5tD8BBdSF/TYQjp5SQlVPw4VAMCc5HXhYi/o+ucM9xl3gJ16itkeb6nnXQb3P/BuxwFLVwl
r57Oy05LBRqVUCxToMSywBQ669/syeoxA5uhNxCDQ7bmzeXPGkScL3j1dzCG1lb3xY50D5RgHd+x
4zjJC3x2N0MV30AI0JBapO8G7Q0Xtnb/CaxBwo/PKLDFQ/De7McbolRJNlg/Ue0kzU+6T1vEv2oA
ZnnMzvPHmZsDsPUZkd4Ou2Hwovxkw+AcIPm82d84Egjzi3hqx4HBt5UDA16ItR0jUJVdw5PRjDyV
tbS1XYsYpgtTtr/VOnm0Cb2920uS2wth0UYwbpISh3dvlS4xKy4Q1Oau4Xx0I1zggzAA8ozIOuQX
9IVof+jOBDgGMLywTtD2bP5t+Y1ZPUHHucL3Kowi5NNkHjPHg1XOSl9GBFm0OOH7yxCdwh3DKLaZ
ENqvtF5gdUG21vCW5mIV0DSwDAiQIhIF1RJ/BSQSl8/n5dVYvj1ukxQFECQc9dWgR+6Itl6OeTR4
RC0nPipuStgGtpVgMR4OkV/w4CUR1oZsRtU5fUqOTgWHr6gISP4YIxx09ISbKB5dByf1BnWbou3I
RQuTO8FdYz8BsEo4oVubcbG66L94brEX/SvJImYY67XE3aCZgCi3uUgSadrZfJ/7ZFwXg1ozyWp9
RvgBIS2BcnB+7aipjxnaTR1jGJ37uUNB2OYtcQ0InptaqjaNpT78DMZwM/7MVprElC1weg+R3pBi
b0O9YUxMzPPB9hTSRvbtUGx4iojIgbaRp/XTiWVDYlTBX5AVSAlaEOrUHRiFpeGGBeXiHRTlhSzb
3Xb0VMcFXtiuXnDz7cngRsoab19F9lQQGYXuH/aQCVxAPG+KTuKKK68od0s0FLPDDSHqH4wSqLLo
bE7h0nHlxZ3TlzCwZ+vlQX4rXy8gMSE3nbeYtMyuNTrKpHsbQQV5Z85oGy8GxfoWLMy1E7HVdEx1
/J7H3grL9Aa45FbHF1MBYxiNQsuNHGBdkJedsJKbj74+2Xqrae6qJJBzBa6FE+S6EDQzVgwUbgGl
9dvFINTBZvXVoQ4oPUFwIrB0S2Iu3QzFDvS5Q0QDPm7mcsB2ZHI6mLuoxCvnNph2uHNhaF2m99P2
jGhLbgOe14HtUx5llJCtfc/s3KUSgPweG0vVIiIDVbPxpO/WMS4MrZXn5/uxS1/E2a1zjkzfMjtP
AFQAeCm71jscR+q9B5AY977t08ynMSe8Q/wdcw7/CNdqCP10PHxWne5RTuY033ZFg3k4xBDrbcbf
o1Tljb1AbPLAyibOIO038ZDz4HhjIg9PuY5FLt9pmRFD8MwjJ10C5jjrkrqke6gCgO/pgAQPM0Mj
u3BdsT/g3vDecdzcBHj2A+/ursW7KD2pbdwF6V73/VRmN9FabNXN7rO4W0/c2u7omhM/k0PysCau
IM/WxPDnlZUvsLK8y6ZXncU4yoYO+M33D65fY+Ut2Mv563l9RVrP/ZurlahXN+Fk1rOn44q5Yftu
FEhim7Ovl/He9s9vb3aVDt3S9msUzNuTWniW9b5Ko2shJNqpELCujTYQYAt6j8QZ7r47vn+Sb9P3
l/Of/t9ZdTAyrsk62T8tHR03EleS3a4oxoFMe0HV+T8O0ljPNxBHZ4f69J8+N3NZ8xR/WtVGbDSc
997gfcw4EfHpZ9hjbmh/oa7fpewltvqWcx4WGwwGKmQvH7x58PJf+OHgbz45+j/+9Ucf7ezsfIVY
UASwxaHijalqgUAJkbDWxQpLye/F6/OxxMLr2FdUnIIsBgbGxgtxNELtqj6t69lgA1rTo3p+Vp0/
w075qEs6ClLwJiWAkgLOxhP8eEyINfLQkA8+sZGNLyFa1VT7NRVoINT3G0QvAJxgGezoEOmtnHqv
HXTLo3WzMv2ByBjGPpAMGOhsPueRPMVZpvEQ+BcFy7oM3txPm763fLcwKyjQ5UMCNJGfoFZsILBp
vTC0vwQry4yRfwlEBPszI4wsL+c2VTuWPAX6GhbNh0wAAl3ImTrqiAYkNTeZficP83YCy75pHvX1
1i5ke4bd6umnrUXzSZ/ARXhZEMrYLck1/J8FShR4F6KzNbnzcXgZXDLK+dtqWc/humn27dtiWYF7
QdOX+/wOXHfNP3dgL7QjA6glw2j0vhg9L6707yyIBuP7JdU4WtSLLIVHYdAY9VulG1TbpwnrpHek
QXFfu0JmPcrlyrXvfFViKzLaMAthYkfVr0CBqN4xhB2tAUKtpM8On40fPX3yzeG3428Ov3ucilFa
07uuQbv98+VevY7n4jpWJU5CMj+OsKYT5UEd2TqWczV+qNxLJlAXImjJFFUbhMFA1ivGJQLSsdTp
DkSMt6NZSr6uEXPH3OtWkHiANUUIrCGwaxOzsAWwF8T6Z+wWXlrkwhnwmyYf6ZA8gFY0XTyvcU4B
kc1m9MCEHZztEiP+vO0ULq2fnOtWcn/kjcw71JuSgJvwunk+q08LiKO1NNlG5wOSpjXIAMciC7cC
V5jxv/qmdCt5MOra8INtmuBvx+YrHzmDp67hxAIyy6xpqxvGcvWHbgTfITwn/AxsFbE6Q+2NKQes
obx2ipAxxoamUe9GKKjh1nZ3Y/d0qtS8pGsZ/LxF+KcLNIiZ9UcH6JUvGjEbVhyIg1dNBR3xOd0n
Es0cIZaS1TOcpmBo5m1/gvNI9Qw4g7DCaKelqY62QR0eUWwtqic4UDje6lsUUs1/GejoJjZUbmoO
vDOaX6zVHdwhOtw38SP+kxQl6y6lDHXWCnWZ+X0DVTx9zR0yM/jWMJgxLlYmZNypgA+Ok9/DVz0a
+XIrwETU3SNMzM7DOavJ68lkDfm2puslAXRrHsUJkztu3RZ45JO8CxuhOebhg5HthBY+PDukaPQw
dJwJToyAu1lkQWGIiL3nDSEIn9Zn/EXR2OpjoTNe6lvFaaIJnI9P/O57XI/va5w+Eg7r1B/Fc4vA
d17OS8QCZNA+hKcOmC/zSE6jB9XpUYZcUskKUU5pZiXgfp3ZHMm8l2muSmXRb0sue0zYfrJ3Fg8Y
QKkdIP/0LWA1TwXTAFzxSWpgedQJAQSwTqi1q6rgdfdOfV/u5fB6gF8MxJCcoPYYss1w0qt6+XrU
FXG/WNaQ3SQYS+CNmzydTXGpd2HjvjbH+Kcuy9IoRmB2p//ezY26TwzCHWXD6GLXEfk1AizhDMGT
Tbv+ESonjE7yCrhkfABnLXdmrDI5sD1AmVPt69YZpkGF6ZsAWLhNWqa6sSni+CUfk2P6Ltuewwi/
hcsMc1uux0qc3bc0XgM1/Za7LzCfK/qWeIoBws0T+H5gJunChvhyZipEDWKZFRzUyfZindSdkwu3
MbR/WSXAENkt13e8/8lJLlku/d58rXrypF59A2bKfYYhRLhvwPbCuPFqAXZR2PH1ekUbajYTiHRu
6ICgFXknHcSuog4b6YCRkR49/f77h0++dghJjLIhdUq6LkJrnU4RrY3J7wAvLkPBr6Zr8kG/KoPD
6DCHn4jDuXS6BRaS7l4QSAii/w0H4jN0kKoHnUghhBCCeIBYPO9u5jE1o9i315q52bzlB5BeyvD1
g/Trw+deO+ZLwCAxJfUpgDDya4QRQwGnrPCmgZZg4GVkGUzNOV8uDScCdslXEl0JArZZB9a8bySN
jASk/l3fSqmHBG/GwZsNM3nImC9epTAqNXTOlVFJPAOPjzOjjpJDgu+Hct50ixCN+PxmBvg7mAhV
u6Ai0D141D0PtxJDfuT2ZJki4O0qL2Bgr67qZujjAN/S7ygR2x5hIUASAoC4r6blEsF0Gd8eMetG
vAq77utdWLwWNY3VU0ykbiZ9aUsRVlWqZ9+yvRcvnz17/vjFi/FvH3/3rGcCnhuqw/sOMHlOEaAG
BaBuoAvFQ9N2ewlgveZAdOWInOxztxFoLPxirF50UpGMS7GN9xvaS8jKsF7VhqdAhhXIaTHHZt9i
9LMmR8poS+krPaZ5KyHuM7seJbuPXYpOR59AimpiMM9zZGL4eXti8MUvMjHdu597Zbj2ad2iOv1M
+kJXKr8b9/Sm/5byEULykPVqsV718Z431LqhiNLnn/rJzVqmhC0bW96d1edei+63ZdigbQPYP4+n
fVdz6hjKqVMg6PDM8GGAuXpXXa4vE5q4anWdm80OWF6WWF6bI72PB/2AMROMW3xuxvLW3JdB6z2r
JuCteC3wWjPBsaWR7EqRXfokHNlY3o+997/8xjtywj2OCG0Ytt9mK9I6yDDQuzjsu3rYtTTS39Bc
8nO6TjD6RUM6P8yoIz2f17v42+vqvB7rh/8Ae3fXCGTvrr1O6CdbnRI7LxDlHfOPw7ce2r1haMt9
8Ae8mn6Jb0fmwVt4Vi9Xo4RuUjtGEOW0sfCh+8YQ/pdwSlOnzX0VlKjM/qf2pLmu11IRHJTmOoVO
6HPg1xeQIpcAKqh5BGSHL8zPt3BA8ZfQaLuHhBALxWX/mSKXC6iO8mpIN0c7fZxiVV2WNawr/OBJ
dA99unzx+NHTJ1+/8NbEL0qrcjari5CX3f/Uk0JZc9rUk9fwJ1WSyO0luS1/AAjqnOD0ttqGcmsB
DeGynkmeXswUMa1WqPsdUgahtHk7t2xGRv524rMXe2O2z38Z8eQhZOQq30nYa0OxT81rIyczUYFY
2Eh34cWufrGLn/sirSkz1mXGuszPGoaMw1zzYEbHKK+bA948nI5BxQnW7V+9/GcMiPvm06Oz/xrN
2uZ8OQeRo8OMPePXYj78Dv0KMsrwmHOKPVFycF06/yPbdRk3dxf1v81kWS0g/dtDsNI2yRISPVkj
CZ0YQ5js4m1dTQ1FXgL8/tTcPSEpMohK7MOEQDJ0scNE39IX/OP3j59/9fTF4/HXj796+S2AcFPn
Rvh7l9S90XeUiOHJN0/VC/hJacSfHh1+80fzJtOv7sqPHx4+f5LvkdMH/G0Kwj+HT3Qr8ARLPH7+
/Olz9QJ/U8qLh0cPv1Nv8DeN67vHv3/83Qtwo/BGOEz4H+jPkPs5xNaH1NCQaj3ZAiiY1ZSQVm0Z
oInhO3Yr0f4h+Dw49k0BPHXCj8dws1+C4BSgRoXvx5ADDChKKrJdJ9g6RgBozj1raDgSM4mk0OIZ
apV31aK6+4a10oT3VMoOPTesVlawp2IMHLphtUQQPZWiJv+D13pWmLvNTWtFeu2tVooOiW301h11
kweF9VUXXhskAu7BadtBsL7impQlCxDBSfAgpTmc9NaTvhmi9gDS2u8oBwCyPeE/8NGV0iXO6foe
wVWTnTnmMctvq4W1W7c1Vppf+GzMuTmzoBKqNO78nnXtz+j0gPilu5Zxii0jVgxVuq0uL+qNvKDL
ikRNcKh2d5IsVfRstm4u4qFpdh04nUyn+2WbtHz1vF1OSEh2G4u/h3evX89gQ5E0Se9ofn3Xvuwa
RYyLd/emFdaNaCGncEsVEBZMNQrVJPP15Wm5/GKboaW37z2oBM6D6VNeR1dJkvsIuUEEHix/2o2C
heWYSKTuu3Fq6V4SqSdzvXOHK7icWeqNWKle0JxdlSLOJNP6ag6ZXRL56gttPmLzAsPMIuWO/a2s
zg0KbVE0XsEMgRXH9RCtdH4PKWfdfuihZM1Waj8OE58r7hwtK5aR/XpvL3NwF0OAE3fy3176cLEA
rwO8O2wmAiYXzGywxm0W0NoFMMbBDXhBDysCCI9NqQW2YWIRyUhvcOLbgCd5aV5S6giXqAqPbQiA
bC2luS6ZUqO0vaTbL6eu2Vp4/DXeuekiaVNoZHpiXiov4BogozcbnnK7oQcCXO+hTxXEkPs9G2w+
ISDJhiGK1gognXC0bz644fER5xl4M24g+VKBeXuvZUAIOHg1J1u8N3NJdlqCbrxJRmnTCSfiT3YM
92Trgq0Jes9Z+HCyfpuRDmXe0KTos9VDzOmDmZjnpGWi5ptJvSglwyEqB+3cY0oeICfXOViNYYIT
gLRVzMkDLx2lMS+QjTQMkN5Mu52SRIQ0PxThWdFPsZCucFzym0DpAVO4v4MsROBM36oh34XH8uMm
B6dqJd1iHn5c3m7o/6NIEHD0YYSZcaeG0lJ+44nsYrv8lz5GO45ivpTEvXNAQ4tyo6ULVGga4iBx
CVWI5zWdp9BARBToluaZBaueeSe//6YtmthOguofu0O51fErMBc3prehn9DNbyOuaKNWozN0HhuI
zoFSj+CtsGspeq45ejSff/55At52rOzK/RfBjH8yTH7lSvinekf5B668Jz/EijdGFC8zYBjD5JMc
/neDtsKPb97wffrw/o0/fND60O9uwEWVl4+QMFQTynZwtoP7Tw0IB1hwxM/kR71o3XkxLbrygSXR
GJ9+HhBEJ+21r5xYs+mHrhjcqeHZbw7ev15+sUGw1Fsi+TxsjrbCl6iyvSyNxDYN9obZr6giBvKe
gLY13A5WgWfejkjlqJeL2v1Ncq+7V17SSPvJ5xSXRSXyvq9377cCA/gN/qPUmIjmxRwN0EpRrRBy
NfOCcL98daZ5DAniWwkTfoaOxRY9OOjlarZHNuq4q9HI4UX97vxSq1e8cYHXJqO6UuOR+4odGsPg
ZvBR3qEdHpXvVtAF6pA5Y0jQ1Nzzzb99+c/B7iDX2zf/7uj//sJmCHRpAc/LFYKBRHNE8l+XlaFn
jGyLpgw0HQ1yBsYj5rLL6acwv4trQMQbJu8uZ8vFZFadDpP1cqb+fTDciA1oCqLxxXdLHCa/PTp6
htrD/D0TDupYwExgBCS/YVe/yBwybqqfAoxaP0li0bzuqmGNCPYcEEh+UWNZuvGkMDzesI8ZZnSm
X/JSjfPtxA7Q/NmXi3EwGKPD6Bj2YGrXYYypkqBgyomjMbUngwvDMzPtgLjkJ91Kq2ZsXvB7xAEr
CEABU3Oa3/xW/XwQVHEOzAYLmcogfVPFYIFYK02OGdQYARj8T9XUjV0dUNj90i+5/MVqtcBHhqkN
IlNgpsY+HR3JU0z4KT7RenYyUxUmaC0bxDbUlyTycveSsJOjO6VPqVao0EYVng9xu5+8fP4dJB0n
W6TpMT4ZJYkIi5nzLhUMaDnkJeXWuDH0clmOXWYs01WXtdvPZkRlQeLG3Fnny3q9yO7nLegMVExz
WXOfIMQ4Uj7LDAzaeml55UXqwLBaICbbI+k8V6ZdwXBz6HYNzBeiulHwZEXJ7GfRFBCug8iJAi1Y
MNpoKjXzlaT/3k9BEAuDOLhcB8CLnyVI1g82VWOkiovxdAnbwiwkvouk9movaNCwv6ygYtgHPYwD
TcnSv/R1nBj0aD1/s65XZbwHYb4yGFlnh8xLaZ+Ba1sTYdrkKeuRzZxafILzhgZsyrcdyDvNgsaB
0HF2KwRSD/EjSKQO6LnwD0WUUmEvYMjC0Hl1uZwPZ/pL9vg+fBqEAvWH/3Sm8n6E2mzKlQSe4eFe
EPwRgOgVZYE/dz5GII+eeRmOAA4Ly0EwzT1kHl4Avsoy/VMGu/cv8J/mLxR5msJ0jQ7zQYRuw8/3
7mTHxe5PJ/mPf7GfSez78++eLiCUxrk96BsMOOg3CWjvYH+Dl4ZErRuBqEExAJ2LkPGQ01OzXgAH
96rZaJAH2eiymFvKfzCCqp+xT9H350vI8clO7c/LYnapwQTH4wmesnwbBvqI330Pz1SwLJA7Z/9s
aAxAEHDNQc0FZE4sZrjUgJN5DgdLkfzq3n2dJnJhhB3UuyXgYH+5wKvYZUGQ7WclbBBoUIIOknUD
iqHToqkmOHdOcW3aHw2inUYiAacsOq/Ez0qiW4zYtCwmq7ERZkBfVJk7l79DyJl2SYgyMfVcNCxP
BqdWRPayDc7kucl8Hq62n17Ljpi87eLxANLJ0PMU0b3MGnRAQLewmYORqJ7TM3+mYjfR/m9jCxPL
AYgf+IFeMnmbaPaHZbEgkQZ2IAQ0lJBfg3zezY5bmU1rKPMcyHiV7KBMa+hbKSAkukEiwLCy5988
2idJZ2/vdA2oABhpM6qX53tV06zLX/+7Tx5sodDASegJWaJDRQjhuaOYoeEmZqMsm4M/pw8nQC67
RpCoQZ+Z7hthEol5dZ3+tTWf5uNwKnlF7Fy6dTkgBZEsTkSn7uLG2iFuKD5KdlvTlA2yWzJdsDcY
7Wse5UALY4Ae4bjk6bqamTqaUbQDxP6HybxcGeGJcGuGiZmx5bWRU5fFOc0k+ciZP0iQ8EjYUJVP
q26HOu6CbrTTsSNdfhCyFmbKlCoRStsSrHajjupA9aZExe9VSXYH5xbKbQx8DZPuRpw5RbgXqbg4
0NDw3KhNzX5kZSmSpJbFFbkWZ+nLhvFZAOsNTm8eTj6IwXjyxHiV8aV+xP9mqRxYpr42SKPtEp6V
UjKul5epB9fN6KT3c55fdJWBkinufRmc11/BwfbQvP4tvc50A7mCiQCBQ30L2wJcVFEOUdVryvKd
MUj4xPOYMDrOAFXAcMRZ6VynzW4AJJllos7GdkAjNDrCg83sFu1BAABINiLZSDaGy4G/amKpDvPZ
dzCTF3IRJaFI5otEDy/0tsGgqznfWtGV1Gkqq3lJjtpGxCAMA+K1sBf402qFaBpNwk7jsBDjZ8+f
/uGP7mBToV6CSdHBgrxtBdcB+duFXuKAFJPBB5lMkQ8VDg+DGxw8GrOQqGjgGTwXuvnzDhxNO/tU
epjsnLlff80DzIg+YvJas3qv0SPQ7Xxz9Oy3IZ3xkYWFOGZJqqJ/vBTmhg97chfH3tN+8Zzzdr7c
IQh/eBWckIZSUfI84NejJTF28w3eEO+1Uhvs7GNt8mWnAl4K8JXTfOYZLSIlh74dm98SHwh8MiJy
5yZZ5jF902Bze5YRoQatwEu9O6RcpL/ZhmFKJjjQdtldYxcYRj873CC+BNJJS3PP2eCjRy/uAADA
hGtYlm8QJnsqym8kBui80vq4a98P2lQa8vONFwR9W1Wk4MGC2ma3PaRvcaoS5FkoHKk+OtAikB/N
EFZByhAYgdowouG53XwJB7mbjTRtUfwt5mkWfki6HrIpKHOQ2DOdRYUvRVrIXCvhjN5EZqCGegnn
F5qqfZ6tLLJ17Mw5Y1KxBFK38ryl4PWcaDjbSLd5G6jQVhrbwL6UT8dM6zzuYDXfshxAp9V2xy4d
t1pb8A906kqaXh5V5MBkxIcAzcw1GiSEkqO3VZl5kZpFhzzV0ffCS6wMCO4WEAQ25uNaPhMt5pft
DSbt7KdySI27pV1p0GNc3je+ovcGfjhrQmLwKutA+tlqr3tTkXfi/b/Xpg/2Be9Odgny56enG/G5
kKwBvHKbDe900LO6xwzF6QjFKkOWKA0GFPH+sUkAaKPMqtdwITSVCekzkTDAvJ9OseVCYC0mlCXY
U/7f8zHHxXuGvoC8dGT8MHsENajwh9i8jGCZniR3wZI3AmGPPmp4nMrOljmxRvr/iJCq2Jy0Lwqa
gtTsXlF2TEW7hdLY43dprv1TdzhlAQV14kCodhAWKOm2w5vNYO5uL3PAnrIyhDMhYM46acSlrLOa
/03GBrNAqF2O2UNaCYcwSd69E8/Wcfxg/ySiDG/ZJPSy4e/BoEOZnf6JVdmBAnxD6eP9v5w41feA
xHdrJdUjai8u9hbX1S6zuZMdoeRg3kg8LKp9i9Omnq1Xpb/6QUomUC1Dlg2b08F8FeYEgKm/6bT/
pTXtWvs2ClfXvrRCgcoxoQxlrlxIP8KtcF729nA5UTsXTvCDDz/DKGCCkgnrwrFNqTjIQM3GBcAJ
71kAnPxh+CXyHXylJhI6rQ2OxJx0wg41j2byCOv/OFgWw8w5+chyJejRUMGJJQjY7Nj2/qClXt1q
bfBjb43Y/sY3INSkBqv0EoNcLZAHdxodT89qvgDh1UdMv+C2A9BrZj3t8qCVENXKDMk2XdaLhUUs
q9engDUKNiwPYFSYMV75MIqaR5MjeVTohzFDO1whYJDmmDs1ktZ1YgTm5W5ZnJfoEWIENmLEs6vi
GgzXqA1rDK+CqYFLIdluTCdNt4asu7WDwQHAtNmjqFgV4IiNuAZU51kNIb+GCk+vzSqbWgmAjQTM
GbgOQMazgoZzViyhV4cJJDFAgCJQP0NtmKEH3OdRU183zEuO0AJwCIp7PB5OS8qsYwRQq8hf1fWs
GVXl6gz1+Bery9ne8mxy/99+cm/AlZgxnq3nBAJoDqX1Jeq8i1Vw+9zfO6vrvdOCpIWKVFkNKi+b
VtE9r7AQjqf5R8qypDLQ19wlpaFrmfQHNkqtWflmbs/OrYwC7YPGqg3J2b1Izut6mkAqVSDPCZCI
EEYgfe3tsfClu6nEH+07E5WD0MkR+v8K3r6COSw0dFHRUDIirAbp2uLp8cP/p7ZraW4kKcL3uXHh
3COj6JanJcyyS7AiRMDuzMZwWDgMcBkbh2QJSeGxrFFL8uwS/DD+HZWvqsyq6pbMsBdbatUjqyo7
KysrMz/wUKwI1RpwUdwMjJY/+o+zH7+gz+EZ/t/+MDuAUauUi3CYXI/TksVoCZgs0nMyvd5j0+hl
rCRZtyFAgLuvi+AwA4qM96OiVB08WVAFHLcWG9CYMb8e5CK4Vc+xNU+maSoQqcpDgB/4D0n3ubt4
XZyIV8XNmMSfKRoTk8+XuhOjJkLJkdfF+HIfr8bALUQ8/EbLA4RTwOeKmvF2vgTfhX/XXI3onQvO
XkM0SYQQugzSlkn+bSjG4HqwGFf6gGg7iYbml59Qde1s0sDIvZCpU3OTmXA1m0n52sxRXdB60yp0
cMRYEt8VwkCQCs0p8fy1MeZXXJRwHpDF5/3FDtkype501ObFoqoECcGecZpcLo4ZcjvGpsSMYUDb
ilDkoSMyBwuZR8rx+DD/qvLOjiu32fJsU7PmF0hiqb+PVotP8/US7hL9y2grvJwQRa4TfDCO0VEp
Sr73/euvCqzAF9GCGwfZHvvNwG+D5JaFufg+QS6vxZyq9ZuXvbxzJ8TT4muaGaMnS9ug2rx9vnGq
jidToeOxk0/CodA24iwjg7NNi1xMO+YXVoP1x624Npm65U6ANinCVHk93K0Om3vweWJ3qS+vvv5N
go+KhSxvz1zpe3XhrVeYc5JjLaEr57Rkask0+IfizUVDr9Fp+TYSLmq8+lcNLRpPljc4y8ucsln7
/BoHF8RGuXUH4yUq7JDjGXUTdj4pWRoNqUR5Y/zJqpBeuvbQj4PWxq/MWDD8mx7ZKMiJrff74sur
y19dXV0JlKn+NVQn/YokG39PR7uOIgzbnPnEvwQ0S8eBbq3R5QHTtGMWK9CNc6F1YOdR5LWC9URx
0+VrnhTCWsNXn01tMphau3tXupMYn6wF3Pa8jg8MIwa9MAlCQZdNPsIzsq3mG+EqlN3F1EATPjnQ
KgEj9ZK3X0uA4DJpZUC3HEhlQcSoDDGkhMF5HKW67WaL9rBdmgO99oGuZ8VhtjT/63W/X0gCBvea
XYbmf6lprltpGGSg5nMS6Vw5631EtxwTGv0WyeEEF6t7SXgaTAw95FgOIzo5312SHxRB2viCT32n
bjd+IfqjTjHg2/b6bASNGNrE2bYwdxm877g9c2T0Xn3T5r4q4WgtHu2c3Lao1gNMRloX1dNgDcRX
swHGmBRlW9RMjFqekOAaKddwSnuCP7PSer4HsiaFKxZjk+B82ZA6vHk11Z7KDhT213BqCBLqBLGR
m4UPmEpLdtAzK+Mc6c1esHlCyE5Xm8kovpkS9pJbiT5amfpN64rAK35yTRKoecL2iSaADlaj/BTU
YWCeIWHFlJczVcds8+FF6Rh3B9petPe8mx6dxD5zTc3xVmJzkiO7DYmKjvD8JqHu5hYP1BeOTBs9
3M/hc1UOqQeMRlpvBUvG3CH78wtbfS5K7e2iNGGt/nXphZbocXxzHRChglCJxnkyHM57B7HVVt9f
l9HrnOKd295i8KGOYLS4poxYnxeM12kkDFWxwZnlXpUj0YRBdpvAoex5JakQh2iIDUTOLHBgBYt1
6z53enONueGs01jgC334AC7OTGaMCop+vj6VEa6GgBXA65dMtNVOOCjmVjstE63wEmpmspRGM5g9
r+CkqzhfAZGd2P0yyGprGNQAzjEXu9/HkXv/PmPMwmBZSIdaaXoTZSlpzZDrFE9XIENBwQ4aKDPA
2qBDh3LkJRjVukISZfBcwmLmMWJFRGMOyjnh2TOPzidOvydMPCcMqEqLi47k3Wa6blPfs5uCw6QS
1rDi+vvLiZ0TZQ8xsbr2pY+oSHkz6jVxgXNLe9igZTuzIqAXPYBwlzU3lqEzXvJxemiPYudCFWOL
6Aiu6Q6qsfY4DE4ghCunTtFZUyIdCNSqotAbpjeyn31GtN0F5KAMEALfCR0thL7pprGDPBWH48Pe
aPLeuo1IxaRkvEDF+4wyTcSxa9xy7+2bP77uvfj427/9jAPERw+L3d1ht55++Pj1X//zc58QIAT7
+2j+JFUA5AaQoHIJKMcAt+YwY2ilfBy9jYgn031XeHpLRHtd/J3SWn9LWa1DOb/xCe6vuuF/Do6v
z7j8vcxSZbvkKeYNrFxxgn6GZoZHI3m2W2wfb+WpUx82i1JdL9Id2grT4q+Wr7wbEH1s+HPTrOQT
XCTfDSlWGtuh2zTRHl2ZIXYy2n/aU0fLAyRPh24u6H71Ca72wtCQwt95rC64jcFHfCmzO2zG1xt7
gnG9FBAseb1xH7aHDx+KProfNfiADAjF0L0Mbr84wtMycUvHS4VAOvtNSux67EDgmsklY8WM1oQf
CdVoK4WHbTnv0IGS1bqCG5Dv5n7iIheqnMVZhLs1CdEOofU7N0XXzSXMjftXjS4H7l+JMiDVOHwT
GV9HuaS+jQLxY92UWO2YJeUfSAsti/sw3J0myTeVc9c70oqcQxKNjtEAju2BADvMMno8z7EfL1A5
s1RqPen1em8+CSRA8XaJzNysEdZHcAscSQyuAefi9Ya0JGlMu752nyKJlugU6eNU+O427LytEa2R
+EzTG7ynxC8Pc9cVX4KXYfQ3dU4v3C92nJlnQinJ6FlNxjj+hZApirun+SSlM4twz1euSp/wEUnu
3XEMJ5jAzb72CyuYdGZvcoLRQ+baYy/WJRy8tXjKr5a7O3tqywBNv5v+c9GCNJ1OukKIVsQMcoUa
cGdGiCdYbEYHKOsiuXGN4J0pbMu3jaa1bAdsWFXYzW3NJmdQufv5yzu++DEz8ufHdwSNGd8FnRfd
rE1badKNkHeA1j7aVfitQxvYOGsH6xcU4rLoDDCIXw/9NpBYQ8COj+gpq7iN+JqQDSMIa8OleQbt
6hUEu/Speznrhf4s8h9n4HqmyLebZc3y2Ue/wd7hHlWRKfc4TrYOwW2eFO/d15vkd8mUDNkpKwz0
Pq4RXYTRD7X8zvlumx46my9TpFc8PSphXeXlS62bGnReiX3r1CRQ0Fego5+21AZeVT1kjLOxXbaL
i0gdrAlmyHACjwyhMAc/3bvADNK+mZL+cTY3w85CokryPDWCZFpGm1TLJuRpSNQJn9x43eAZF9ha
RExlY/psthjv4xtFoovm4Xsy07KfAhjnsWmfG1eiec7kQHmQFqdGrjtAEtIMeaL4QpMdWi/8nNUI
q/fXT9fz69Hw5hVoge7r3H0ajy5/0aYQhqYyyfQw5C+U8Cphi+6YlPwiG6DT26+3PTSqTJf5G1SZ
IJ+Kz32R1azhV/mSRnYhtL3Uj5Z+tptu7lYnVp8KLZ7FAVLnWVygqGlnBGm5gxmkyP+JIWxzuet7
+B2u0UzBU7yRLd3CHyxZkEeoXp5N1AzmOYUKnGAW1cogzu9BG2E7szidaAdGPb9lPoNptuDFvWfk
VtC6PzgZP/mXa+rfny1TeXwxefnhob32rDGK7fhzx7h5nC9+ikGyg4AeZLO70+hgol8hhHMwzmIO
ChBf0TkmUrgy/qhsgIDCWYfI1XJcJoHd2DCYfUrcy7ePQb9aLsFbApK7iHEJQabhOXyH1unacljG
QfqKlPbwusyq2sQtzPIZz9uUD5JqxEqdO56v4zfiU6LR19DyO7sSelC0i2LhOKr5T/sS4/unFuwF
o+llogkQfMjuOvGy1L7t96rPm8gjISJHkZ+lCMwWbmuk1JFU+H+nT3XWSmKsyuc6mC+O0EPcQSZ0
4Q/95sIVmzBJwIZ1wjS17cQJXDBa7xZLx+Tu3fHHzMGLj+PD6L9MgSqn
"""
import os
import sys
import base64
import zlib
import tempfile
import shutil
def unpack(sources):
temp_dir = tempfile.mkdtemp('-scratchdir', 'unpacker-')
for package, content in sources.items():
filepath = package.split(".")
dirpath = os.sep.join(filepath[:-1])
packagedir = os.path.join(temp_dir, dirpath)
if not os.path.isdir(packagedir):
os.makedirs(packagedir)
mod = open(os.path.join(packagedir, "%s.py" % filepath[-1]), 'wb')
try:
mod.write(content.encode("ascii"))
finally:
mod.close()
return temp_dir
if __name__ == "__main__":
if sys.version_info >= (3, 0):
exec("def do_exec(co, loc): exec(co, loc)\n")
import pickle
sources = sources.encode("ascii") # ensure bytes
sources = pickle.loads(zlib.decompress(base64.decodebytes(sources)))
else:
import cPickle as pickle
exec("def do_exec(co, loc): exec co in loc\n")
sources = pickle.loads(zlib.decompress(base64.decodestring(sources)))
try:
temp_dir = unpack(sources)
sys.path.insert(0, temp_dir)
entry = """
import sys
try:
import setuptools
import pkg_resources
except ImportError:
raise SystemExit("An error occured while trying to run %s. Make sure "
"you have setuptools or distribute installed." % __file__)
import pip
pip.bootstrap()
"""
do_exec(entry, locals())
finally:
shutil.rmtree(temp_dir)
| mit | -9,112,955,044,156,730,000 | 73.526453 | 79 | 0.949098 | false |
OCA/l10n-romania | l10n_ro_stock/tests/test_stock_warehouse_creation.py | 1 | 1704 | # Copyright (C) 2019 NextERP Romania
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo.tests.common import TransactionCase
class TestStockWarehouseCreation(TransactionCase):
def setUp(self):
super(TestStockWarehouseCreation, self).setUp()
self.warehouse_obj = self.env["stock.warehouse"]
def test_warehouse_creation(self):
warehouse = self.warehouse_obj.create(
{"name": "Warehouse Romania", "code": "ROW"}
)
self.assertTrue(warehouse.wh_consume_loc_id)
self.assertTrue(warehouse.wh_usage_loc_id)
self.assertTrue(warehouse.consume_type_id)
self.assertTrue(warehouse.usage_type_id)
wh_stock_loc = warehouse.lot_stock_id
wh_consume_loc = warehouse.wh_consume_loc_id
wh_usage_loc = warehouse.wh_usage_loc_id
consume_type = warehouse.consume_type_id
usage_type = warehouse.usage_type_id
self.assertTrue(wh_consume_loc.usage, "consume")
self.assertTrue(wh_usage_loc.usage, "usage_giving")
self.assertTrue(consume_type.code, "internal")
self.assertTrue(consume_type.default_location_src_id, wh_stock_loc)
self.assertTrue(consume_type.default_location_dest_id, wh_consume_loc)
self.assertTrue(usage_type.code, "internal")
self.assertTrue(usage_type.default_location_src_id, wh_stock_loc)
self.assertTrue(usage_type.default_location_dest_id, wh_usage_loc)
def test_warehouse_rename(self):
warehouse = self.warehouse_obj.create(
{"name": "Warehouse Romania", "code": "ROW"}
)
warehouse._update_name_and_code(new_name="Warehouse", new_code="WRO")
| agpl-3.0 | -8,953,999,522,050,827,000 | 38.627907 | 78 | 0.670775 | false |
RyanMatlock/cocktail-reader | cocktail-reader.py | 1 | 3444 | """
cocktail-reader.py
Reads cocktail information from completed-cocktails.tex source file and puts them into some file format for loading into a database.
"""
import re
# these are the states of reading the file
IGNORE = "ignore"
COCKTAIL = "cocktail"
BODY = "body"
state = IGNORE
STARRED = "*"
### re testing
"""
test_str = "\hi{there}"
test_re = re.compile(r"\\(?P<func>[a-z]+){(?P<arg>[a-z]+)}")
test_match = test_re.match(test_str)
print test_match
print test_match.group("func")
print test_match.group("arg")
"""
### end re testing
#begin_cocktail_re = re.compile(r"^\begin{(?P<type>[A-Z]+)?Cocktail(\*)?}{(?P<name>)[A-Z].*}{\s}?{#.*}$") # error somewhere? -- not catching everything
begin_cocktail_re = re.compile(r"\\begin\{(?P<source>[A-Za-z0-9]+)?Cocktail(?P<starred>\*)?\}\{(?P<name>.+)\}(\s)?(?P<comments>\#.*)?") ## this one is looking pretty good
end_cocktail_re = re.compile(r"\\end\{([A-Za-z0-9]+)?Cocktail\}(\s)?(?P<comments>\#.*)?")
begin_ingredient_re = re.compile(r"\\begin\{Ingredients\}(\s)?(?P<comments>\#.*)?")
end_ingredients_re = re.compile(r"\\end\{Ingredients\}(\s)?(?P<comments>\#.*)?")
ingredient_re = re.compile("")
f = open("completed-cocktails-medium.tex")
cocktail_names = []
cocktails = {}
starred = False
def append_cocktail(cocktails, source, name):
if source in cocktails.keys():
cocktails[source].append(name)
else:
cocktails[source] = [name]
#all_comments = []
## there's something buggy with the simple finite state machine right now
for line in f:
#print line
cocktail_match = begin_cocktail_re.match(line)
end_cocktail_match = end_cocktail_re.match(line)
#print repr(line)
#print cocktail_match
if cocktail_match:
try:
source = cocktail_match.group("source")
try:
if cocktail_match.group("starred") == STARRED:
starred = True
except AttributeError:
pass
name = cocktail_match.group("name")
"""
comments = cocktail_match.group("comments")
if comments:
all_comments.append(comments)
"""
#print name
cocktail_names.append(name)
#print source, starred, name
if not starred:
if source == None:
source = "Classic"
append_cocktail(cocktails, source, name)
else:
if source == None:
starred_source = "HBAR"
else:
starred_source = source + "*"
append_cocktail(cocktails, starred_source, name)
except AttributeError:
print "failed on: " + line
#pass
starred = False
state = COCKTAIL
elif end_cocktail_match:
if state == COCKTAIL:
print "exited recipe"
state = IGNORE
if state == COCKTAIL:
print "entered recipe"
f.close()
for key in sorted(cocktails.keys()):
print str(key).upper() + ":"
for entry in sorted(cocktails[key]):
print entry
print ""
cocktail_names.sort()
raw_names = open("cocktail_names_latex.dat","w")
for entry in cocktail_names:
raw_names.write(entry+"\n")
raw_names.close()
"""
for entry in all_comments:
print entry
""" | mit | 785,201,290,090,480,400 | 24.518519 | 170 | 0.559814 | false |
numerals/rollcall | rollcall/main.py | 3 | 3198 | """
Main module
"""
import os
import func_json as fj
import display
import exce
from datetime import date
def pDir():
"""
gives the present working directory
"""
return os.getcwd()
def full_path_to(fName, dire=pDir()):
"""
joins the filename and the directory path
"""
path = os.path.join(dire, fName)
return path
def fileExists(fName):
"""
Check if a file exists
"""
if os.path.isfile(fName):
return True
return False
def add(json_str, sub):
"""
Add a subject
creates a new sub.json file
"""
if fileExists(sub):
raise exce.SubjectExists("Records for %s are already present" %(sub))
with open(sub, "w") as recordFile:
recordFile.write(json_str)
def get_json_file(sub):
"""
Gets the json string from the file
returns json as a dictionary
"""
if not fileExists(sub):
raise exce.SubjectError("Subject: %s does not exist" %(sub))
with open(sub, "r") as recordFile:
json_string = recordFile.read()
return fj.json_to_dict(json_string)
def update_json_file(tag, sub, date=date.today()):
"""
Update a subject
"""
if tag not in fj.TAGS.keys():
raise exce.UnknownTag("Tag: %s UNKNOWN" %(tag))
json_dic = get_json_file(sub)
new_json= fj.update_json_dict(json_dic, date, fj.TAGS[tag])
newdata = fj.dict_to_json(new_json)
with open(sub, "w") as recordFile:
recordFile.write(newdata)
return True
def display_names(ext='.json', dire=pDir()):
"""
yields all subject names
"""
for filename in os.listdir(dire):
name, extension = os.path.splitext(filename)
if extension == ext:
yield filename
def gen_total_classes(field=None, ext='.json', dire=pDir()):
"""
yields (subject, total_classes_till_field)
"""
for filename in display_names(ext, dire):
path = full_path_to(filename, dire)
json_dic = get_json_file(path)
total = len(display.total_classes(json_dic, field))
yield filename, total
def gen_classes_with_tag(tag=fj.TAGS['p'], ext='.json', dire=pDir()):
"""
yields (subject, total_classes_with_tag)
"""
for filename in display_names(ext, dire):
path = full_path_to(filename, dire)
json_dic = get_json_file(path)
total = len(display.classes_with_tag(json_dic, tag))
yield filename, total
def gen_percent(tag=fj.TAGS['p'], ext='.json', dire=pDir()):
"""
yields (subject, total_classes_with_tag)
"""
for filename in display_names(ext, dire):
path = full_path_to(filename, dire)
json_dic = get_json_file(path)
percent = display.percent(json_dic, tag)
yield filename, percent
def deleteSubject(fName):
"""
Delete a file and raises SubjectError if not Found
"""
if not fileExists(fName):
raise exce.SubjectError("Subject: %s is not Found" %(fName))
os.remove(fName)
def reset(ext='.json', dire=pDir()):
"""
removes all subjects
a clean fresh start
"""
for filename in display_names(ext, dire):
path = full_path_to(filename, dire)
os.remove(path)
| mit | -4,378,065,813,467,410,000 | 24.181102 | 77 | 0.611632 | false |
omise/omise-python | omise/test/test_receipt.py | 1 | 7749 | import mock
import unittest
from .helper import _ResourceMixin
class ReceiptTest(_ResourceMixin, unittest.TestCase):
def _getTargetClass(self):
from .. import Receipt
return Receipt
def _getCollectionClass(self):
from .. import Collection
return Collection
def _getLazyCollectionClass(self):
from .. import LazyCollection
return LazyCollection
def _makeOne(self):
return self._getTargetClass().from_data({
'object': 'receipt',
'id': 'rcpt_test',
'number': 'OMTH201710110001',
'location': '/receipts/rcpt_test',
'date': '2017-10-11T16:59:59Z',
'customer_name': 'John Doe',
'customer_address': 'Crystal Design Center (CDC)',
'customer_tax_id': 'Tax ID 1234',
'customer_email': '[email protected]',
'customer_statement_name': 'John',
'company_name': 'Omise Company Limited',
'company_address': 'Crystal Design Center (CDC)',
'company_tax_id': '0000000000000',
'charge_fee': 1315,
'voided_fee': 0,
'transfer_fee': 0,
'subtotal': 1315,
'vat': 92,
'wht': 0,
'total': 1407,
'credit_note': False,
'currency': 'thb'
})
@mock.patch('requests.get')
def test_retrieve(self, api_call):
class_ = self._getTargetClass()
self.mockResponse(api_call, """{
"object": "receipt",
"id": "rcpt_test",
"number": "OMTH201710110001",
"location": "/receipts/rcpt_test",
"date": "2017-10-11T16:59:59Z",
"customer_name": "John Doe",
"customer_address": "Crystal Design Center (CDC)",
"customer_tax_id": "Tax ID 1234",
"customer_email": "[email protected]",
"customer_statement_name": "John",
"company_name": "Omise Company Limited",
"company_address": "Crystal Design Center (CDC)",
"company_tax_id": "0000000000000",
"charge_fee": 1315,
"voided_fee": 0,
"transfer_fee": 0,
"subtotal": 1315,
"vat": 92,
"wht": 0,
"total": 1407,
"credit_note": false,
"currency": "thb"
}""")
receipt = class_.retrieve('rcpt_test')
self.assertTrue(isinstance(receipt, class_))
self.assertEqual(receipt.id, 'rcpt_test')
self.assertEqual(receipt.number, 'OMTH201710110001')
self.assertEqual(receipt.company_tax_id, '0000000000000')
self.assertEqual(receipt.charge_fee, 1315)
self.assertEqual(receipt.voided_fee, 0)
self.assertEqual(receipt.transfer_fee, 0)
self.assertEqual(receipt.subtotal, 1315)
self.assertEqual(receipt.total, 1407)
self.assertEqual(receipt.currency, 'thb')
self.assertRequest(
api_call,
'https://api.omise.co/receipts/rcpt_test'
)
@mock.patch('requests.get')
def test_retrieve_no_args(self, api_call):
class_ = self._getTargetClass()
collection_class_ = self._getCollectionClass()
self.mockResponse(api_call, """{
"object": "list",
"from": "1970-01-01T00:00:00Z",
"to": "2017-10-11T23:59:59Z",
"offset": 0,
"limit": 20,
"total": 1,
"order": "chronological",
"location": "/receipts",
"data": [
{
"object": "receipt",
"id": "rcpt_test",
"number": "OMTH201710110001",
"location": "/receipts/rcpt_test",
"date": "2017-10-11T16:59:59Z",
"customer_name": "John Doe",
"customer_address": "Crystal Design Center (CDC)",
"customer_tax_id": "Tax ID 1234",
"customer_email": "[email protected]",
"customer_statement_name": "John",
"company_name": "Omise Company Limited",
"company_address": "Crystal Design Center (CDC)",
"company_tax_id": "0000000000000",
"charge_fee": 1315,
"voided_fee": 0,
"transfer_fee": 0,
"subtotal": 1315,
"vat": 92,
"wht": 0,
"total": 1407,
"credit_note": false,
"currency": "thb"
}
]
}""")
receipts = class_.retrieve()
self.assertTrue(isinstance(receipts, collection_class_))
self.assertTrue(isinstance(receipts[0], class_))
self.assertEqual(receipts[0].id, 'rcpt_test')
self.assertEqual(receipts[0].number, 'OMTH201710110001')
self.assertEqual(receipts[0].company_tax_id, '0000000000000')
self.assertEqual(receipts[0].charge_fee, 1315)
self.assertEqual(receipts[0].voided_fee, 0)
self.assertEqual(receipts[0].transfer_fee, 0)
self.assertEqual(receipts[0].subtotal, 1315)
self.assertEqual(receipts[0].total, 1407)
self.assertEqual(receipts[0].currency, 'thb')
self.assertRequest(
api_call,
'https://api.omise.co/receipts'
)
@mock.patch('requests.get')
def test_list(self, api_call):
class_ = self._getTargetClass()
lazy_collection_class_ = self._getLazyCollectionClass()
self.mockResponse(api_call, """{
"object": "list",
"from": "1970-01-01T00:00:00Z",
"to": "2017-10-11T23:59:59Z",
"offset": 0,
"limit": 20,
"total": 1,
"order": "chronological",
"location": "/receipts",
"data": [
{
"object": "receipt",
"id": "rcpt_test",
"number": "OMTH201710110001",
"location": "/receipts/rcpt_test",
"date": "2017-10-11T16:59:59Z",
"customer_name": "John Doe",
"customer_address": "Crystal Design Center (CDC)",
"customer_tax_id": "Tax ID 1234",
"customer_email": "[email protected]",
"customer_statement_name": "John",
"company_name": "Omise Company Limited",
"company_address": "Crystal Design Center (CDC)",
"company_tax_id": "0000000000000",
"charge_fee": 1315,
"voided_fee": 0,
"transfer_fee": 0,
"subtotal": 1315,
"vat": 92,
"wht": 0,
"total": 1407,
"credit_note": false,
"currency": "thb"
}
]
}""")
receipts = class_.list()
self.assertTrue(isinstance(receipts, lazy_collection_class_))
receipts = list(receipts)
self.assertTrue(isinstance(receipts[0], class_))
self.assertEqual(receipts[0].id, 'rcpt_test')
self.assertEqual(receipts[0].number, 'OMTH201710110001')
self.assertEqual(receipts[0].company_tax_id, '0000000000000')
self.assertEqual(receipts[0].charge_fee, 1315)
self.assertEqual(receipts[0].voided_fee, 0)
self.assertEqual(receipts[0].transfer_fee, 0)
self.assertEqual(receipts[0].subtotal, 1315)
self.assertEqual(receipts[0].total, 1407)
self.assertEqual(receipts[0].currency, 'thb') | mit | -5,833,385,752,817,734,000 | 37.17734 | 70 | 0.502387 | false |
waqasbhatti/astroph-coffee | pysqlite/benchmarks/insert.py | 1 | 1496 | import time
def yesno(question):
val = raw_input(question + " ")
return val.startswith("y") or val.startswith("Y")
use_pysqlite2 = yesno("Use pysqlite 2.0?")
use_autocommit = yesno("Use autocommit?")
use_executemany= yesno("Use executemany?")
if use_pysqlite2:
from pysqlite2 import dbapi2 as sqlite
else:
import sqlite
def create_db():
con = sqlite.connect(":memory:")
if use_autocommit:
if use_pysqlite2:
con.isolation_level = None
else:
con.autocommit = True
cur = con.cursor()
cur.execute("""
create table test(v text, f float, i integer)
""")
cur.close()
return con
def test():
row = ("sdfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffasfd", 3.14, 42)
l = []
for i in range(1000):
l.append(row)
con = create_db()
cur = con.cursor()
if sqlite.version_info > (2, 0):
sql = "insert into test(v, f, i) values (?, ?, ?)"
else:
sql = "insert into test(v, f, i) values (%s, %s, %s)"
starttime = time.time()
for i in range(50):
if use_executemany:
cur.executemany(sql, l)
else:
for r in l:
cur.execute(sql, r)
endtime = time.time()
print "elapsed", endtime - starttime
cur.execute("select count(*) from test")
print "rows:", cur.fetchone()[0]
if __name__ == "__main__":
test()
| mit | 1,127,763,636,754,973,300 | 22.52459 | 93 | 0.550802 | false |
wroersma/volatility | volatility/plugins/mac/vfsevents.py | 4 | 3049 | # Volatility
# Copyright (C) 2007-2013 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: Andrew Case
@license: GNU General Public License 2.0
@contact: [email protected]
@organization:
"""
import volatility.obj as obj
import volatility.debug as debug
import volatility.plugins.mac.common as common
class mac_vfsevents(common.AbstractMacCommand):
""" Lists processes filtering file system events """
def calculate(self):
common.set_plugin_members(self)
if not self.addr_space.profile.obj_has_member("fs_event_watcher", "proc_name"):
debug.error("This plugin only supports OS X >= 10.8.2. Please file a bug if you are running against a version matching this criteria.")
event_types = ["CREATE_FILE", "DELETE", "STAT_CHANGED", "RENAME", "CONTENT_MODIFIED", "EXCHANGE", "FINDER_INFO_CHANGED", "CREATE_DIR", "CHOWN"]
event_types = event_types + ["XATTR_MODIFIED", "XATTR_REMOVED", "DOCID_CREATED", "DOCID_CHANGED"]
table_addr = self.addr_space.profile.get_symbol("_watcher_table")
arr = obj.Object(theType = "Array", targetType = "Pointer", count = 8, vm = self.addr_space, offset = table_addr)
for watcher_addr in arr:
if not watcher_addr.is_valid():
continue
watcher = watcher_addr.dereference_as("fs_event_watcher")
name = self.addr_space.read(watcher.proc_name.obj_offset, 33)
if name:
idx = name.find("\x00")
if idx != -1:
name = name[:idx]
events = ""
event_arr = obj.Object(theType = "Array", targetType = "unsigned char", offset = watcher.event_list.v(), count = 13, vm = self.addr_space)
for (i, event) in enumerate(event_arr):
if event == 1:
events = events + event_types[i] + ", "
if len(events) and events[-1] == " " and events[-2] == ",":
events = events[:-2]
yield watcher_addr, name, watcher.pid, events
def render_text(self, outfd, data):
self.table_header(outfd, [("Offset", "[addrpad]"),
("Name", "20"),
("Pid", "8"),
("Events", "")])
for (addr, name, pid, events) in data:
self.table_row(outfd, addr, name, pid, events)
| gpl-2.0 | 1,202,187,429,838,534,400 | 39.118421 | 151 | 0.610692 | false |
cloudify-cosmo/cloudify-cli | cloudify_cli/tests/commands/test_users.py | 1 | 5641 | ########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from mock import MagicMock
from .test_base import CliCommandTest
from cloudify_cli.exceptions import CloudifyValidationError
class BaseUsersTest(CliCommandTest):
def setUp(self):
super(BaseUsersTest, self).setUp()
self.use_manager()
self.client.users = MagicMock()
class UsersTest(BaseUsersTest):
def setUp(self):
super(UsersTest, self).setUp()
def test_create_users_missing_username(self):
outcome = self.invoke(
'cfy users create',
err_str_segment='2', # Exit code
exception=SystemExit
)
self.assertIn('missing argument', outcome.output.lower())
self.assertIn('username', outcome.output.lower())
def test_create_users_missing_password(self):
outcome = self.invoke(
'cfy users create username',
err_str_segment='2', # Exit code
exception=SystemExit
)
self.assertIn('missing option', outcome.output.lower())
self.assertIn('--password', outcome.output)
def test_create_users_default_role(self):
self.invoke('cfy users create username -p password')
call_list = self.client.users.method_calls[0][1]
self.assertEqual(call_list, ('username', 'password', 'default'))
def test_create_users_custom_role(self):
self.invoke('cfy users create username -p password -r admin')
call_list = self.client.users.method_calls[0][1]
self.assertEqual(call_list, ('username', 'password', 'admin'))
def test_empty_username(self):
self.invoke(
'cfy users create "" -p password',
err_str_segment='ERROR: The `username` argument is empty',
exception=CloudifyValidationError
)
def test_illegal_characters_in_username(self):
self.invoke(
'cfy users create "#&*" -p password',
err_str_segment='ERROR: The `username` argument contains '
'illegal characters',
exception=CloudifyValidationError
)
def test_empty_password(self):
self.invoke(
'cfy users create user -p ""',
err_str_segment='ERROR: The password is empty',
exception=CloudifyValidationError
)
def test_unlock_user(self):
self.invoke('cfy users unlock user1')
call_list = self.client.users.method_calls[0][1][0]
self.assertEqual(call_list, 'user1')
class CreateUsersWithTenantTest(BaseUsersTest):
def setUp(self):
super(CreateUsersWithTenantTest, self).setUp()
self.client.tenants = MagicMock()
def test_create_users_without_tenant_info(self):
self.invoke('cfy users create username -p password')
call_list = self.client.users.method_calls[0][1]
self.assertEqual(call_list, ('username', 'password', 'default'))
adding_to_tenant_call_list = self.client.tenants.method_calls
self.assertEqual(adding_to_tenant_call_list, [])
def test_create_users_with_full_tenant_info(self):
self.invoke('cfy users create username -p password -t\
test_tenant -l test_user')
user_create_call_list = self.client.users.method_calls[0][1]
self.assertEqual(user_create_call_list,
('username', 'password', 'default'))
adding_to_tenant_call_list = self.client.tenants.method_calls[0][1]
self.assertEqual(adding_to_tenant_call_list,
('username', 'test_tenant', 'test_user'))
def test_create_users_with_full_tenant_info_long_flags_names(self):
self.invoke('cfy users create username -p password --tenant-name\
test_tenant --user-tenant-role test_user')
user_create_call_list = self.client.users.method_calls[0][1]
self.assertEqual(user_create_call_list,
('username', 'password', 'default'))
adding_to_tenant_call_list = self.client.tenants.method_calls[0][1]
self.assertEqual(adding_to_tenant_call_list,
('username', 'test_tenant', 'test_user'))
def test_create_fail_users_with_tenant_name_only(self):
self.invoke('cfy users create username -p password -t default')
user_create_call_list = self.client.users.method_calls[0][1]
self.assertEqual(user_create_call_list,
('username', 'password', 'default'))
adding_to_tenant_call_list = self.client.tenants.method_calls
self.assertEqual(adding_to_tenant_call_list, [])
def test_create_fail_users_with_user_tenant_role_only(self):
self.invoke('cfy users create username -p password -l user')
user_create_call_list = self.client.users.method_calls[0][1]
self.assertEqual(user_create_call_list,
('username', 'password', 'default'))
adding_to_tenant_call_list = self.client.tenants.method_calls
self.assertEqual(adding_to_tenant_call_list, [])
| apache-2.0 | -3,084,391,692,655,285,000 | 40.785185 | 79 | 0.636767 | false |
rangadi/incubator-beam | sdks/python/apache_beam/runners/portability/stager.py | 2 | 25320 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Support for installing custom code and required dependencies.
Workflows, with the exception of very simple ones, are organized in multiple
modules and packages. Typically, these modules and packages have
dependencies on other standard libraries. Beam relies on the Python
setuptools package to handle these scenarios. For further details please read:
https://pythonhosted.org/an_example_pypi_project/setuptools.html
When a runner tries to run a pipeline it will check for a --requirements_file
and a --setup_file option.
If --setup_file is present then it is assumed that the folder containing the
file specified by the option has the typical layout required by setuptools and
it will run 'python setup.py sdist' to produce a source distribution. The
resulting tarball (a .tar or .tar.gz file) will be staged at the staging
location specified as job option. When a worker starts it will check for the
presence of this file and will run 'easy_install tarball' to install the
package in the worker.
If --requirements_file is present then the file specified by the option will be
staged in the staging location. When a worker starts it will check for the
presence of this file and will run 'pip install -r requirements.txt'. A
requirements file can be easily generated by running 'pip freeze -r
requirements.txt'. The reason a runner does not run this automatically is
because quite often only a small fraction of the dependencies present in a
requirements.txt file are actually needed for remote execution and therefore a
one-time manual trimming is desirable.
TODO(silviuc): Should we allow several setup packages?
TODO(silviuc): We should allow customizing the exact command for setup build.
"""
from __future__ import absolute_import
import glob
import logging
import os
import shutil
import subprocess
import sys
import tempfile
import pkg_resources
from apache_beam.internal import pickler
from apache_beam.io.filesystems import FileSystems
from apache_beam.options.pipeline_options import SetupOptions
from apache_beam.options.pipeline_options import WorkerOptions
# TODO(angoenka): Remove reference to dataflow internal names
from apache_beam.runners.dataflow.internal import names
from apache_beam.utils import processes
# All constants are for internal use only; no backwards-compatibility
# guarantees.
# Standard file names used for staging files.
WORKFLOW_TARBALL_FILE = 'workflow.tar.gz'
REQUIREMENTS_FILE = 'requirements.txt'
EXTRA_PACKAGES_FILE = 'extra_packages.txt'
# Package names for distributions
BEAM_PACKAGE_NAME = 'apache-beam'
class Stager(object):
"""Abstract Stager identifies and copies the appropriate artifacts to the
staging location.
Implementation of this stager has to implement :func:`stage_artifact` and
:func:`commit_manifest`.
"""
def stage_artifact(self, local_path_to_artifact, artifact_name):
""" Stages the artifact to Stager._staging_location and adds artifact_name
to the manifest of artifacts that have been staged."""
raise NotImplementedError
def commit_manifest(self):
"""Commits manifest."""
raise NotImplementedError
@staticmethod
def get_sdk_package_name():
"""For internal use only; no backwards-compatibility guarantees.
Returns the PyPI package name to be staged."""
return BEAM_PACKAGE_NAME
def stage_job_resources(self,
options,
build_setup_args=None,
temp_dir=None,
populate_requirements_cache=None,
staging_location=None):
"""For internal use only; no backwards-compatibility guarantees.
Creates (if needed) and stages job resources to staging_location.
Args:
options: Command line options. More specifically the function will
expect requirements_file, setup_file, and save_main_session options
to be present.
build_setup_args: A list of command line arguments used to build a
setup package. Used only if options.setup_file is not None. Used
only for testing.
temp_dir: Temporary folder where the resource building can happen. If
None then a unique temp directory will be created. Used only for
testing.
populate_requirements_cache: Callable for populating the requirements
cache. Used only for testing.
staging_location: Location to stage the file.
Returns:
A list of file names (no paths) for the resources staged. All the
files are assumed to be staged at staging_location.
Raises:
RuntimeError: If files specified are not found or error encountered
while trying to create the resources (e.g., build a setup package).
"""
temp_dir = temp_dir or tempfile.mkdtemp()
resources = []
setup_options = options.view_as(SetupOptions)
# Make sure that all required options are specified.
if staging_location is None:
raise RuntimeError('The staging_location must be specified.')
# Stage a requirements file if present.
if setup_options.requirements_file is not None:
if not os.path.isfile(setup_options.requirements_file):
raise RuntimeError(
'The file %s cannot be found. It was specified in the '
'--requirements_file command line option.' %
setup_options.requirements_file)
staged_path = FileSystems.join(staging_location, REQUIREMENTS_FILE)
self.stage_artifact(setup_options.requirements_file, staged_path)
resources.append(REQUIREMENTS_FILE)
requirements_cache_path = (
os.path.join(tempfile.gettempdir(), 'dataflow-requirements-cache')
if setup_options.requirements_cache is None else
setup_options.requirements_cache)
# Populate cache with packages from requirements and stage the files
# in the cache.
if not os.path.exists(requirements_cache_path):
os.makedirs(requirements_cache_path)
(populate_requirements_cache if populate_requirements_cache else
Stager._populate_requirements_cache)(setup_options.requirements_file,
requirements_cache_path)
for pkg in glob.glob(os.path.join(requirements_cache_path, '*')):
self.stage_artifact(
pkg, FileSystems.join(staging_location, os.path.basename(pkg)))
resources.append(os.path.basename(pkg))
# Handle a setup file if present.
# We will build the setup package locally and then copy it to the staging
# location because the staging location is a remote path and the file cannot
# be created directly there.
if setup_options.setup_file is not None:
if not os.path.isfile(setup_options.setup_file):
raise RuntimeError(
'The file %s cannot be found. It was specified in the '
'--setup_file command line option.' % setup_options.setup_file)
if os.path.basename(setup_options.setup_file) != 'setup.py':
raise RuntimeError(
'The --setup_file option expects the full path to a file named '
'setup.py instead of %s' % setup_options.setup_file)
tarball_file = Stager._build_setup_package(setup_options.setup_file,
temp_dir, build_setup_args)
staged_path = FileSystems.join(staging_location, WORKFLOW_TARBALL_FILE)
self.stage_artifact(tarball_file, staged_path)
resources.append(WORKFLOW_TARBALL_FILE)
# Handle extra local packages that should be staged.
if setup_options.extra_packages is not None:
resources.extend(
self._stage_extra_packages(
setup_options.extra_packages, staging_location,
temp_dir=temp_dir))
# Pickle the main session if requested.
# We will create the pickled main session locally and then copy it to the
# staging location because the staging location is a remote path and the
# file cannot be created directly there.
if setup_options.save_main_session:
pickled_session_file = os.path.join(temp_dir,
names.PICKLED_MAIN_SESSION_FILE)
pickler.dump_session(pickled_session_file)
staged_path = FileSystems.join(staging_location,
names.PICKLED_MAIN_SESSION_FILE)
self.stage_artifact(pickled_session_file, staged_path)
resources.append(names.PICKLED_MAIN_SESSION_FILE)
if hasattr(setup_options, 'sdk_location'):
if (setup_options.sdk_location == 'default') or Stager._is_remote_path(
setup_options.sdk_location):
# If --sdk_location is not specified then the appropriate package
# will be obtained from PyPI (https://pypi.python.org) based on the
# version of the currently running SDK. If the option is
# present then no version matching is made and the exact URL or path
# is expected.
#
# Unit tests running in the 'python setup.py test' context will
# not have the sdk_location attribute present and therefore we
# will not stage SDK.
sdk_remote_location = 'pypi' if (setup_options.sdk_location == 'default'
) else setup_options.sdk_location
resources.extend(
self._stage_beam_sdk(sdk_remote_location, staging_location,
temp_dir))
elif setup_options.sdk_location == 'container':
# Use the SDK that's built into the container, rather than re-staging
# it.
pass
else:
# This branch is also used by internal tests running with the SDK built
# at head.
if os.path.isdir(setup_options.sdk_location):
# TODO(angoenka): remove reference to Dataflow
sdk_path = os.path.join(setup_options.sdk_location,
names.DATAFLOW_SDK_TARBALL_FILE)
else:
sdk_path = setup_options.sdk_location
if os.path.isfile(sdk_path):
logging.info('Copying Beam SDK "%s" to staging location.', sdk_path)
staged_path = FileSystems.join(
staging_location,
Stager._desired_sdk_filename_in_staging_location(
setup_options.sdk_location))
self.stage_artifact(sdk_path, staged_path)
_, sdk_staged_filename = FileSystems.split(staged_path)
resources.append(sdk_staged_filename)
else:
if setup_options.sdk_location == 'default':
raise RuntimeError('Cannot find default Beam SDK tar file "%s"'
% sdk_path)
elif not setup_options.sdk_location:
logging.info('Beam SDK will not be staged since --sdk_location '
'is empty.')
else:
raise RuntimeError(
'The file "%s" cannot be found. Its location was specified by '
'the --sdk_location command-line option.' % sdk_path)
worker_options = options.view_as(WorkerOptions)
dataflow_worker_jar = getattr(worker_options, 'dataflow_worker_jar', None)
if dataflow_worker_jar is not None:
jar_staged_filename = 'dataflow-worker.jar'
staged_path = FileSystems.join(staging_location, jar_staged_filename)
self.stage_artifact(dataflow_worker_jar, staged_path)
resources.append(jar_staged_filename)
# Delete all temp files created while staging job resources.
shutil.rmtree(temp_dir)
retrieval_token = self.commit_manifest()
return retrieval_token, resources
@staticmethod
def _download_file(from_url, to_path):
"""Downloads a file over http/https from a url or copy it from a remote
path to local path."""
if from_url.startswith('http://') or from_url.startswith('https://'):
# TODO(silviuc): We should cache downloads so we do not do it for every
# job.
try:
# We check if the file is actually there because wget returns a file
# even for a 404 response (file will contain the contents of the 404
# response).
# TODO(angoenka): Extract and use the filename when downloading file.
response, content = __import__('httplib2').Http().request(from_url)
if int(response['status']) >= 400:
raise RuntimeError(
'Artifact not found at %s (response: %s)' % (from_url, response))
with open(to_path, 'w') as f:
f.write(content)
except Exception:
logging.info('Failed to download Artifact from %s', from_url)
raise
else:
if not os.path.isdir(os.path.dirname(to_path)):
logging.info(
'Created folder (since we have not done yet, and any errors '
'will follow): %s ', os.path.dirname(to_path))
os.mkdir(os.path.dirname(to_path))
shutil.copyfile(from_url, to_path)
@staticmethod
def _is_remote_path(path):
return path.find('://') != -1
def _stage_extra_packages(self, extra_packages, staging_location, temp_dir):
"""Stages a list of local extra packages.
Args:
extra_packages: Ordered list of local paths to extra packages to be
staged. Only packages on localfile system and GCS are supported.
staging_location: Staging location for the packages.
temp_dir: Temporary folder where the resource building can happen.
Caller is responsible for cleaning up this folder after this function
returns.
Returns:
A list of file names (no paths) for the resources staged. All the files
are assumed to be staged in staging_location.
Raises:
RuntimeError: If files specified are not found or do not have expected
name patterns.
"""
resources = []
staging_temp_dir = tempfile.mkdtemp(dir=temp_dir)
local_packages = []
for package in extra_packages:
if not (os.path.basename(package).endswith('.tar') or
os.path.basename(package).endswith('.tar.gz') or
os.path.basename(package).endswith('.whl') or
os.path.basename(package).endswith('.zip')):
raise RuntimeError(
'The --extra_package option expects a full path ending with '
'".tar", ".tar.gz", ".whl" or ".zip" instead of %s' % package)
if os.path.basename(package).endswith('.whl'):
logging.warning(
'The .whl package "%s" is provided in --extra_package. '
'This functionality is not officially supported. Since wheel '
'packages are binary distributions, this package must be '
'binary-compatible with the worker environment (e.g. Python 2.7 '
'running on an x64 Linux host).')
if not os.path.isfile(package):
if Stager._is_remote_path(package):
# Download remote package.
logging.info('Downloading extra package: %s locally before staging',
package)
_, last_component = FileSystems.split(package)
local_file_path = FileSystems.join(staging_temp_dir, last_component)
Stager._download_file(package, local_file_path)
else:
raise RuntimeError(
'The file %s cannot be found. It was specified in the '
'--extra_packages command line option.' % package)
else:
local_packages.append(package)
local_packages.extend([
FileSystems.join(staging_temp_dir, f)
for f in os.listdir(staging_temp_dir)
])
for package in local_packages:
basename = os.path.basename(package)
staged_path = FileSystems.join(staging_location, basename)
self.stage_artifact(package, staged_path)
resources.append(basename)
# Create a file containing the list of extra packages and stage it.
# The file is important so that in the worker the packages are installed
# exactly in the order specified. This approach will avoid extra PyPI
# requests. For example if package A depends on package B and package A
# is installed first then the installer will try to satisfy the
# dependency on B by downloading the package from PyPI. If package B is
# installed first this is avoided.
with open(os.path.join(temp_dir, EXTRA_PACKAGES_FILE), 'wt') as f:
for package in local_packages:
f.write('%s\n' % os.path.basename(package))
staged_path = FileSystems.join(staging_location, EXTRA_PACKAGES_FILE)
# Note that the caller of this function is responsible for deleting the
# temporary folder where all temp files are created, including this one.
self.stage_artifact(
os.path.join(temp_dir, EXTRA_PACKAGES_FILE), staged_path)
resources.append(EXTRA_PACKAGES_FILE)
return resources
@staticmethod
def _get_python_executable():
# Allow overriding the python executable to use for downloading and
# installing dependencies, otherwise use the python executable for
# the current process.
python_bin = os.environ.get('BEAM_PYTHON') or sys.executable
if not python_bin:
raise ValueError('Could not find Python executable.')
return python_bin
@staticmethod
def _populate_requirements_cache(requirements_file, cache_dir):
# The 'pip download' command will not download again if it finds the
# tarball with the proper version already present.
# It will get the packages downloaded in the order they are presented in
# the requirements file and will not download package dependencies.
cmd_args = [
Stager._get_python_executable(),
'-m',
'pip',
'download',
'--dest',
cache_dir,
'-r',
requirements_file,
'--exists-action',
'i',
# Download from PyPI source distributions.
'--no-binary',
':all:'
]
logging.info('Executing command: %s', cmd_args)
processes.check_output(cmd_args)
@staticmethod
def _build_setup_package(setup_file, temp_dir, build_setup_args=None):
saved_current_directory = os.getcwd()
try:
os.chdir(os.path.dirname(setup_file))
if build_setup_args is None:
build_setup_args = [
Stager._get_python_executable(),
os.path.basename(setup_file), 'sdist', '--dist-dir', temp_dir
]
logging.info('Executing command: %s', build_setup_args)
processes.check_output(build_setup_args)
output_files = glob.glob(os.path.join(temp_dir, '*.tar.gz'))
if not output_files:
raise RuntimeError(
'File %s not found.' % os.path.join(temp_dir, '*.tar.gz'))
return output_files[0]
finally:
os.chdir(saved_current_directory)
@staticmethod
def _desired_sdk_filename_in_staging_location(sdk_location):
"""Returns the name that SDK file should have in the staging location.
Args:
sdk_location: Full path to SDK file.
"""
if sdk_location.endswith('.whl'):
_, wheel_filename = FileSystems.split(sdk_location)
if wheel_filename.startswith('apache_beam'):
return wheel_filename
else:
raise RuntimeError('Unrecognized SDK wheel file: %s' % sdk_location)
else:
return names.DATAFLOW_SDK_TARBALL_FILE
def _stage_beam_sdk(self, sdk_remote_location, staging_location, temp_dir):
"""Stages a Beam SDK file with the appropriate version.
Args:
sdk_remote_location: A URL from which thefile can be downloaded or a
remote file location. The SDK file can be a tarball or a wheel. Set
to 'pypi' to download and stage a wheel and source SDK from PyPi.
staging_location: Location where the SDK file should be copied.
temp_dir: path to temporary location where the file should be
downloaded.
Returns:
A list of SDK files that were staged to the staging location.
Raises:
RuntimeError: if staging was not successful.
"""
if sdk_remote_location == 'pypi':
sdk_local_file = Stager._download_pypi_sdk_package(temp_dir)
sdk_sources_staged_name = Stager.\
_desired_sdk_filename_in_staging_location(sdk_local_file)
staged_path = FileSystems.join(staging_location, sdk_sources_staged_name)
logging.info('Staging SDK sources from PyPI to %s', staged_path)
self.stage_artifact(sdk_local_file, staged_path)
staged_sdk_files = [sdk_sources_staged_name]
try:
# Stage binary distribution of the SDK, for now on a best-effort basis.
sdk_local_file = Stager._download_pypi_sdk_package(
temp_dir, fetch_binary=True)
sdk_binary_staged_name = Stager.\
_desired_sdk_filename_in_staging_location(sdk_local_file)
staged_path = FileSystems.join(staging_location, sdk_binary_staged_name)
logging.info('Staging binary distribution of the SDK from PyPI to %s',
staged_path)
self.stage_artifact(sdk_local_file, staged_path)
staged_sdk_files.append(sdk_binary_staged_name)
except RuntimeError as e:
logging.warn(
'Failed to download requested binary distribution '
'of the SDK: %s', repr(e))
return staged_sdk_files
elif Stager._is_remote_path(sdk_remote_location):
local_download_file = os.path.join(temp_dir, 'beam-sdk.tar.gz')
Stager._download_file(sdk_remote_location, local_download_file)
staged_name = Stager._desired_sdk_filename_in_staging_location(
sdk_remote_location)
staged_path = FileSystems.join(staging_location, staged_name)
logging.info('Staging Beam SDK from %s to %s', sdk_remote_location,
staged_path)
self.stage_artifact(local_download_file, staged_path)
return [staged_name]
else:
raise RuntimeError(
'The --sdk_location option was used with an unsupported '
'type of location: %s' % sdk_remote_location)
@staticmethod
def _download_pypi_sdk_package(temp_dir,
fetch_binary=False,
language_version_tag='27',
language_implementation_tag='cp',
abi_tag='cp27mu',
platform_tag='manylinux1_x86_64'):
"""Downloads SDK package from PyPI and returns path to local path."""
package_name = Stager.get_sdk_package_name()
try:
version = pkg_resources.get_distribution(package_name).version
except pkg_resources.DistributionNotFound:
raise RuntimeError('Please set --sdk_location command-line option '
'or install a valid {} distribution.'
.format(package_name))
cmd_args = [
Stager._get_python_executable(), '-m', 'pip', 'download', '--dest',
temp_dir,
'%s==%s' % (package_name, version), '--no-deps'
]
if fetch_binary:
logging.info('Downloading binary distribtution of the SDK from PyPi')
# Get a wheel distribution for the SDK from PyPI.
cmd_args.extend([
'--only-binary', ':all:', '--python-version', language_version_tag,
'--implementation', language_implementation_tag, '--abi', abi_tag,
'--platform', platform_tag
])
# Example wheel: apache_beam-2.4.0-cp27-cp27mu-manylinux1_x86_64.whl
expected_files = [
os.path.join(
temp_dir, '%s-%s-%s%s-%s-%s.whl' % (package_name.replace(
'-', '_'), version, language_implementation_tag,
language_version_tag, abi_tag,
platform_tag))
]
else:
logging.info('Downloading source distribtution of the SDK from PyPi')
cmd_args.extend(['--no-binary', ':all:'])
expected_files = [
os.path.join(temp_dir, '%s-%s.zip' % (package_name, version)),
os.path.join(temp_dir, '%s-%s.tar.gz' % (package_name, version))
]
logging.info('Executing command: %s', cmd_args)
try:
processes.check_output(cmd_args)
except subprocess.CalledProcessError as e:
raise RuntimeError(repr(e))
for sdk_file in expected_files:
if os.path.exists(sdk_file):
return sdk_file
raise RuntimeError(
'Failed to download a distribution for the running SDK. '
'Expected either one of %s to be found in the download folder.' %
(expected_files))
| apache-2.0 | -2,992,134,150,777,267,000 | 43.343257 | 80 | 0.654305 | false |
telefonicaid/fiware-health | fiware-region-sanity-tests/commons/dbus_phonehome_service.py | 2 | 11746 | # -*- coding: utf-8 -*-
# Copyright 2015-2016 Telefónica Investigación y Desarrollo, S.A.U
#
# This file is part of FIWARE project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For those usages not covered by the Apache version 2.0 License please
# contact with [email protected]
from commons.constants import PHONEHOME_DBUS_NAME, PHONEHOME_TIMEOUT, PHONEHOME_SIGNAL, PHONEHOME_METADATA_SIGNAL,\
PHONEHOME_DBUS_OBJECT_METADATA_PATH, PHONEHOME_DBUS_OBJECT_PATH, PHONEHOME_TX_ID_HEADER
from dbus import SystemBus
from dbus.exceptions import DBusException
from dbus.service import BusName
from dbus.mainloop.glib import DBusGMainLoop
import dbus
import gobject
import re
log_message_data_out_of_sequence =\
"Received data are not for this FIWARE Node. Probably they come from another SanityCheck execution running "\
"at the same time. Skipping and waiting for more data from PhoneHome Server..."
class DbusPhoneHomeClient:
expected_signal_hostname = None
mainloop = None
data_received = None
logger = None
def __init__(self, logger):
"""Inits the DBus client and creates a new System bus.
:param logger: Logger
:return:
"""
self.logger = logger
DbusPhoneHomeClient.logger = logger
self.logger.debug("Attaching to a main loop")
DBusGMainLoop(set_as_default=True)
self.logger.debug("Creating session in SystemBus")
self.bus = SystemBus()
@staticmethod
def timeout(mainloop, logger, *args):
"""Timeout function for DBus MainLoop.
:param mainloop: Loop manager (MainLoop)
:param logger: Logger
:param args: Rest of arguments
:return: False. The function is called repeatedly until it returns FALSE,
at which point the timeout is automatically destroyed and the function will not be called again.
"""
logger.debug("Timed out!. Aborting the wait.")
mainloop.quit()
return False
@staticmethod
def phonehome_signal_handler(phonehome_http_data):
"""Handler for `PHONEHOME_SIGNAL`.
:param phonehome_http_data: Data the VM emitted in the signal. If matches the expected one, main loop finishes.
:return: None
"""
DbusPhoneHomeClient.logger.debug("Data received from PhoneHome Server: '%s'",
phonehome_http_data.encode('base64', 'strict').replace('\n', ' '))
hostname = re.match(".*hostname=([\w-]*)", phonehome_http_data)
hostname = hostname.group(1) if hostname is not None else hostname
if DbusPhoneHomeClient.expected_signal_hostname == hostname:
DbusPhoneHomeClient.logger.debug("Received hostname: '%s'",
DbusPhoneHomeClient.expected_signal_hostname)
DbusPhoneHomeClient.data_received = phonehome_http_data
DbusPhoneHomeClient.mainloop.quit()
else:
DbusPhoneHomeClient.logger.debug(log_message_data_out_of_sequence)
@staticmethod
def phonehome_signal_handler_metadata(phonehome_http_data, hostname):
"""Handler for `PHONEHOME_METADATA_SIGNAL`.
:param phonehome_http_data: Data the VM emitted in the signal.
:param hostname: VM hostname. If matches the expected one, main loop finishes.
:return: None
"""
DbusPhoneHomeClient.logger.debug("Data received from PhoneHome Server (Hostname): '%s'",
hostname.encode('base64', 'strict').replace('\n', ' '))
if DbusPhoneHomeClient.expected_signal_hostname == hostname:
DbusPhoneHomeClient.logger.debug("Received hostname: '%s'", hostname)
DbusPhoneHomeClient.data_received = phonehome_http_data
DbusPhoneHomeClient.mainloop.quit()
else:
DbusPhoneHomeClient.logger.debug(log_message_data_out_of_sequence)
def connect_and_wait_for_phonehome_signal(self, bus_name, object_path, phonehome_signal, data_expected):
"""Connects to Bus and gets the published object (PhoneHome DBus object).
:param bus_name: str
A bus name (either the unique name or a well-known name)
of the application owning the object. The keyword argument
named_service is a deprecated alias for this. PhoneHome DBus service.
:param object_path: str
The object path of the desired PhoneHome Object.
:param data_expected: The PhoneHome client will wait for `PHONEHOME_SIGNAL` with this data value.
When received, main loop will be finished and data received from the signal will be returned.
:return: None if signal has not been received after the timewait; Else, the content received in the signal
"""
DbusPhoneHomeClient.data_received = None
self.logger.debug("Connecting to PhoneHome DBus Service in bus '%s' and getting PhoneHome object "
"with path '%s'", bus_name, object_path)
DbusPhoneHomeClient.expected_signal_hostname = data_expected
try:
object = self.bus.get_object(bus_name, object_path)
phonehome_interface = dbus.Interface(object, bus_name)
except DBusException as e:
self.logger.error("PhoneHome bus or object not found. Please check the PhoneHome services. %s", str(e))
return False
# Connect to signal
self.logger.debug("Connecting to signal '%s'", phonehome_signal)
if phonehome_signal == PHONEHOME_SIGNAL:
phonehome_interface.connect_to_signal(phonehome_signal, self.phonehome_signal_handler)
elif phonehome_signal == PHONEHOME_METADATA_SIGNAL:
phonehome_interface.connect_to_signal(PHONEHOME_METADATA_SIGNAL, self.phonehome_signal_handler_metadata)
# Attach to a main loop
self.logger.debug("Creating main loop")
DbusPhoneHomeClient.mainloop = gobject.MainLoop()
# Setup timeout and start main loop
phonehome_timeout = PHONEHOME_TIMEOUT * 1000
self.logger.debug("Setting time out to: %d", phonehome_timeout)
gobject.timeout_add(phonehome_timeout, self.timeout, DbusPhoneHomeClient.mainloop, self.logger, priority=100)
self.logger.debug("Waiting for signal '%s' with value or header 'hostname=%s' ."
" Timeout set to %s seconds", phonehome_signal, data_expected, PHONEHOME_TIMEOUT)
DbusPhoneHomeClient.mainloop.run()
self.logger.debug("Dbus PhoneHome Service stopped")
return DbusPhoneHomeClient.data_received
class DbusPhoneHomeObject(dbus.service.Object):
def __init__(self, logger, bus, object_path):
"""Creates and registers a new PhoneHome service in the bus.
:param bus: BusName. The created DBus with well-known name.
:param object_path: The object path of the desired PhoneHome Object.
:return:
"""
self.logger = logger
self.bus = bus
self.loop = None
self.object_path = object_path
self.logger.debug("Creating PhoneHome Object in the path '%s'", self.object_path)
dbus.service.Object.__init__(self, self.bus, self.object_path)
@dbus.service.signal(dbus_interface=PHONEHOME_DBUS_NAME, signature='s')
def phonehome_signal(self, phonehome_http_data):
"""This method is used to emit `PHONEHOME_SIGNAL` with the given http data.
:param phonehome_http_data: String with all BODY data of the POST request
:return: None
"""
self.logger.debug("PhoneHome signal emitted with data: %s", str(phonehome_http_data))
@dbus.service.signal(dbus_interface=PHONEHOME_DBUS_NAME, signature='ss')
def phonehome_metadata_signal(self, phonehome_http_data, hostname):
"""This method is used to emit `PHONEHOME_METADATA_SIGNAL` with the given http data.
:param phonehome_http_data: String with all BODY data of the POST request
:param hostname: String with the header hostname value.
:return: None
"""
self.logger.debug("PhoneHome Metadata signal emitted with data: %s for the hostname %s",
str(phonehome_http_data), hostname)
def remove_object(self):
"""Makes this object inaccessible via the given D-Bus connection and object path:
The object ceases to be accessible via any connection or path.
:return: None
"""
self.logger.debug("Removing object '%s' from connection", self.object_path)
self.remove_from_connection(path=self.object_path)
class DbusPhoneHomeServer:
def __init__(self, logger):
"""Initializes the DbusPhoneHomeServer.
:param logger: Logger
:return:
"""
self.logger = logger
self.dbus_phonehome_objects = {}
self.logger.debug("Attaching to a main loop")
DBusGMainLoop(set_as_default=True)
def register_phonehome_object(self, phonehome_object_path):
"""Registers the bus name and a new phonehome object in this one.
:param phonehome_object_path: The object path tho publish the desired PhoneHome Object. Format: /xxx/...
:return: None
"""
self.logger.debug("Registering new DBus name '%s'", PHONEHOME_DBUS_NAME)
bus = BusName(PHONEHOME_DBUS_NAME, bus=SystemBus())
self.logger.debug("Registering new PhoneHome Object '%s' in the Bus", phonehome_object_path)
self.dbus_phonehome_objects[phonehome_object_path] = DbusPhoneHomeObject(self.logger, bus,
phonehome_object_path)
def emit_phonehome_signal(self, phonehome_data, phonehome_object_path, hostname, transaction_id):
"""This method emits `PHONEHOME_SIGNAL` to all clients connected to the bus, with the given data as value.
:param phonehome_data: PhoneHome data (HTTP POST request)
:param phonehome_object_path: /metadata or /phonehome
:param hostname: String with the header Hostname value
:param transaction_id: String with the transaction id value
:return: None
"""
self.logger.debug("%s: %s - Emit signal, data:'%s' to '%s'", PHONEHOME_TX_ID_HEADER, transaction_id,
phonehome_data, hostname)
if phonehome_object_path == PHONEHOME_DBUS_OBJECT_METADATA_PATH:
self.dbus_phonehome_objects[phonehome_object_path].phonehome_metadata_signal(phonehome_data, hostname)
elif phonehome_object_path == PHONEHOME_DBUS_OBJECT_PATH:
self.dbus_phonehome_objects[phonehome_object_path].phonehome_signal(phonehome_data)
def remove_object(self, phonehome_object_path):
"""Makes the PhoneHome object inaccessible via the given D-Bus connection and object path:
The object ceases to be accessible via any connection or path.
:return: None
"""
self.dbus_phonehome_objects[phonehome_object_path].remove_object()
def logdebug(self, trace):
"""Writes a debug log trace.
:param trace: Message to trace
:return: None
"""
self.logger.debug(trace)
| apache-2.0 | -1,946,093,824,831,349,200 | 43.996169 | 119 | 0.66468 | false |
RaphaelPoncet/2016-macs2-projet-hpc | read_receiver.py | 1 | 2438 | #! /usr/bin/env python
import numpy
from matplotlib import pyplot as plt
import matplotlib.colors as colors
import optparse
import os
import sys
#import seaborn
def ParseVariableBinaryHeader(header):
header_detect = '#'
header = header.strip("\n").split()
assert(header[0] == header_detect)
name = header[1]
dtype = header[2]
nb_components = int(header[3])
nx = int(header[4])
ny = int(header[5])
nz = int(header[6])
return name, dtype, nb_components, nx, ny, nz
parser = optparse.OptionParser(usage="usage: %prog filename")
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("wrong number of arguments")
receiver_filename = args[0]
receiver_file = open(receiver_filename, 'rb')
readlines = receiver_file.read().split("\n")
receiver_file.close()
temp_filename = "tmp.binary"
tempfile = open(temp_filename, 'wb')
# Parse header
header = readlines[0]
name, dtype, nb_components, nx, ny, nz = ParseVariableBinaryHeader(header)
# Write data without header
for line in readlines[1:]:
tempfile.write(line + "\n")
tempfile.close()
tempfile = open(temp_filename, 'rb')
data = numpy.fromfile(tempfile, dtype = 'float_')
tempfile.close()
if os.path.exists(temp_filename):
print "Removing temporary file " + str(temp_filename)
os.remove(temp_filename)
print data.shape, nx, nz
data = data.reshape(nz, nx)
amplitude_max = max(numpy.amax(data), - numpy.amin(data))
print "amplitude_max=", amplitude_max
rcv_ids = {'rcv1': (nx / 2, 'blue'),
'rcv2': (2 * nx / 5, 'red'),
# 'rcv3': (3 * nx / 5, 'green'),
# 'rcv4': (1200, 'orange'),
# 'rcv5': (800, 'purple'),
}
#with seaborn.axes_style("dark"):
fig, (ax1, ax2) = plt.subplots(1, 2)
cmap = 'gray'
ax1.imshow(data, cmap = cmap, interpolation = 'nearest', aspect = 'auto', vmin = - 0.1 * amplitude_max, vmax = 0.1 * amplitude_max)
for key, value in rcv_ids.iteritems():
rcv_id, color = value
ax1.plot([rcv_id, rcv_id], [0.0, nz], color = color, linewidth = 2)
ax1.set_xlim([0,nx])
ax1.set_ylim([nz,0])
cnt = 1
for key, value in rcv_ids.iteritems():
rcv_id, color = value
offset = numpy.power(-1.0, cnt) * (2.0 * amplitude_max) * (cnt / 2)
ax2.plot(offset + data[:, rcv_id], color = color, linewidth = 2, label = key)
cnt += 1
plt.legend()
# fig.set_size_inches(8, 4)
# fig.savefig('./test.png', dpi=100)
plt.show()
| apache-2.0 | -6,778,286,463,717,795,000 | 21.574074 | 131 | 0.634537 | false |
azumimuo/family-xbmc-addon | script.cu.lrclyrics/resources/lib/culrcscrapers/lyricstime/lyricsScraper.py | 4 | 2074 | #-*- coding: UTF-8 -*-
import sys
import urllib
import re
from utilities import *
__title__ = 'lyricstime'
__priority__ = '220'
__lrc__ = False
def replace_char(string):
invalid_char = [" ",",","'",]
for char in invalid_char:
string = string.replace(char,"-")
return string
class LyricsFetcher:
def __init__( self ):
self.clean_lyrics_regex = re.compile( "<.+?>" )
self.normalize_lyrics_regex = re.compile( "&#[x]*(?P<name>[0-9]+);*" )
self.clean_br_regex = re.compile( "<br[ /]*>[\s]*", re.IGNORECASE )
self.clean_info_regex = re.compile( "\[[a-z]+?:.*\]\s" )
def get_lyrics(self, song):
log( "%s: searching lyrics for %s - %s" % (__title__, song.artist, song.title))
lyrics = Lyrics()
lyrics.song = song
lyrics.source = __title__
lyrics.lrc = __lrc__
try: # ***** parser - changing this changes search string
url = "http://www.lyricstime.com/%s-%s-lyrics.html" % (replace_char(song.artist.lower()).replace("&","and").replace("---","-").replace("--","-"),replace_char(song.title.lower()).replace("&","and").replace("---","-").replace("--","-"))
song_search = urllib.urlopen(url).read()
log( "%s: search url: %s" % (__title__, url))
lyr = song_search.split('<div id="songlyrics" style="padding-right:20px;">')[1].split('</div>')[0]
lyr = self.clean_br_regex.sub( "\n", lyr ).strip()
lyr = self.clean_lyrics_regex.sub( "", lyr ).strip()
lyr = self.normalize_lyrics_regex.sub(
lambda m: unichr( int( m.group( 1 ) ) ), lyr.decode("ISO-8859-1") )
lyr = u"\n".join( [ lyric.strip() for lyric in lyr.splitlines() ] )
lyr = self.clean_info_regex.sub( "", lyr )
lyrics.lyrics = lyr
return lyrics
except:
log( "%s: %s::%s (%d) [%s]" % ( __title__, self.__class__.__name__, sys.exc_info()[ 2 ].tb_frame.f_code.co_name, sys.exc_info()[ 2 ].tb_lineno, sys.exc_info()[ 1 ] ))
return None
| gpl-2.0 | 3,878,594,336,368,681,000 | 44.086957 | 246 | 0.526519 | false |
andysim/psi4 | tests/python/mints9/input.py | 3 | 8970 | from __future__ import print_function
import psi4
from psi4.driver import qcdb
#! A test of the basis specification. Various basis sets are specified outright and in blocks, both
#! orbital and auxiliary. Constructs libmints BasisSet objects through the constructor that calls
#! qcdb.BasisSet infrastructure. Checks that the resulting bases are of the right size and checks
#! that symmetry of the Molecule observes the basis assignment to atoms.
# cc-pvdz aug-cc-pvdz
# BASIS H 5/ 5 C 14/15 H +4/ 4 C +9/10
# RIFIT H 14/15 C 56/66 H +9/10 C +16/20
# JKFIT H 23/25 C 70/81 H +9/10 C +16/20
mymol = psi4.geometry("""
C 0.0 0.0 0.0
O 1.4 0.0 0.0
H_r -0.5 -0.7 0.0
H_l -0.5 0.7 0.0
""")
psi4.set_options({'basis': 'cc-pvdz'})
print('[1] <<< uniform cc-pVDZ >>>')
wert = psi4.core.BasisSet.build(mymol, 'BASIS', psi4.core.get_global_option('BASIS'))
psi4.compare_strings('CC-PVDZ', psi4.core.get_global_option('BASIS'), 'name') #TEST
psi4.compare_integers(38, wert.nbf(), 'nbf()') #TEST
psi4.compare_integers(40, wert.nao(), 'nao()') #TEST
psi4.compare_strings('c2v', mymol.schoenflies_symbol(), 'symm') #TEST
psi4.compare_strings('CC-PVDZ', wert.name(), 'callby') #TEST
psi4.compare_strings('CC-PVDZ', wert.blend(), 'blend') #TEST
mymol.print_out()
print('[2] <<< RIFIT (default) >>>')
wert = psi4.core.BasisSet.build(mymol, 'DF_BASIS_MP2', '', 'RIFIT', psi4.core.get_global_option('BASIS'))
psi4.compare_integers(140, wert.nbf(), 'nbf()') #TEST
psi4.compare_integers(162, wert.nao(), 'nao()') #TEST
psi4.compare_strings('c2v', mymol.schoenflies_symbol(), 'symm') #TEST
psi4.compare_strings('(CC-PVDZ AUX)', wert.name(), 'callby') #TEST
psi4.compare_strings('CC-PVDZ-RI', wert.blend(), 'blend') #TEST
mymol.print_out()
print('[3] <<< cc-pVDZ w/ aug-cc-pVDZ on C >>>')
psi4.basis_helper("""
assign cc-pvdz
assign c aug-cc-pvdz
""", name='dz_PLUS')
wert = psi4.core.BasisSet.build(mymol, 'BASIS', psi4.core.get_global_option('BASIS'))
psi4.compare_integers(47, wert.nbf(), 'nbf()') #TEST
psi4.compare_integers(50, wert.nao(), 'nao()') #TEST
psi4.compare_strings('c2v', mymol.schoenflies_symbol(), 'symm') #TEST
psi4.compare_strings('DZ_PLUS', wert.name(), 'callby') #TEST
psi4.compare_strings('AUG-CC-PVDZ + CC-PVDZ', wert.blend(), 'blend') #TEST
mymol.print_out()
print('[4] <<< RIFIT (default) >>>')
wert = psi4.core.BasisSet.build(mymol, 'DF_BASIS_MP2', '', 'RIFIT', psi4.core.get_global_option('BASIS'))
mymol.print_out()
wert.print_out()
psi4.compare_integers(156, wert.nbf(), 'nbf()') #TEST
psi4.compare_integers(182, wert.nao(), 'nao()') #TEST
psi4.compare_strings('c2v', mymol.schoenflies_symbol(), 'symm') #TEST
psi4.compare_strings('(DZ_PLUS AUX)', wert.name(), 'callby') #TEST
psi4.compare_strings('AUG-CC-PVDZ-RI + CC-PVDZ-RI', wert.blend(), 'blend') #TEST
mymol.print_out()
print('[5] <<< cc-pVDZ w/ aug-cc-pVDZ on C, H_R >>>')
psi4.basis_helper("""
assign cc-pvdz
assign c aug-cc-pvdz
assign h_r aug-cc-pvdz
""",
name='dz_PLUSplus',
key='BASis')
wert = psi4.core.BasisSet.build(mymol, 'BASIS', psi4.core.get_global_option('BASIS'))
psi4.compare_strings('DZ_PLUSPLUS', psi4.core.get_global_option('BASIS'), 'name') #TEST
psi4.compare_integers(51, wert.nbf(), 'nbf()') #TEST
psi4.compare_integers(54, wert.nao(), 'nao()') #TEST
psi4.compare_strings('cs', mymol.schoenflies_symbol(), 'symm') #TEST
psi4.compare_strings('DZ_PLUSPLUS', wert.name(), 'callby') #TEST
psi4.compare_strings('AUG-CC-PVDZ + CC-PVDZ', wert.blend(), 'blend') #TEST
mymol.print_out()
print('[6] <<< RIFIT (custom: force cc-pVDZ on H, default on C, O) >>>')
psi4.basis_helper("""
assign h cc-pvdz-ri
""",
name='dz_PLUSplusRI',
key='df_basis_mp2')
wert = psi4.core.BasisSet.build(mymol, 'DF_BASIS_MP2', psi4.core.get_global_option('DF_BASIS_MP2'), 'RIFIT', psi4.core.get_global_option('BASIS'))
mymol.print_out()
psi4.compare_integers(156, wert.nbf(), 'nbf()') #TEST
psi4.compare_integers(182, wert.nao(), 'nao()') #TEST
psi4.compare_strings('cs', mymol.schoenflies_symbol(), 'symm') #TEST
psi4.compare_strings('DZ_PLUSPLUSRI', wert.name(), 'callby') #TEST
psi4.compare_strings('AUG-CC-PVDZ-RI + CC-PVDZ-RI', wert.blend(), 'blend') #TEST
mymol.print_out()
print('[7] <<< cc-pVDZ w/ aug-cc-pVDZ on C, H >>>')
psi4.basis_helper("""
assign cc-pvdz
assign c aug-cc-pvdz
assign h aug-cc-pvdz
""",
name = 'dz_PLUSplusplus')
wert = psi4.core.BasisSet.build(mymol, 'BASIS', psi4.core.get_global_option('BASIS'))
psi4.compare_integers(55, wert.nbf(), 'nbf()') #TEST
psi4.compare_integers(58, wert.nao(), 'nao()') #TEST
psi4.compare_strings('c2v', mymol.schoenflies_symbol(), 'symm') #TEST
psi4.compare_strings('DZ_PLUSPLUSPLUS', wert.name(), 'callby') #TEST
psi4.compare_strings('AUG-CC-PVDZ + CC-PVDZ', wert.blend(), 'blend') #TEST
mymol.print_out()
print('[8] <<< JKFIT (default) >>>')
wert = psi4.core.BasisSet.build(mymol, 'DF_BASIS_SCF', '', 'JKFIT', psi4.core.get_global_option('BASIS'))
psi4.compare_integers(220, wert.nbf(), 'nbf()') #TEST
psi4.compare_integers(252, wert.nao(), 'nao()') #TEST
psi4.compare_strings('c2v', mymol.schoenflies_symbol(), 'symm') #TEST
psi4.compare_strings('(DZ_PLUSPLUSPLUS AUX)', wert.name(), 'callby') #TEST
psi4.compare_strings('AUG-CC-PVDZ-JKFIT + CC-PVDZ-JKFIT', wert.blend(), 'blend') #TEST
mymol.print_out()
psi4.set_options({'basis': 'aug-cc-pvdz'})
print('[9] <<< aug-cc-pVDZ >>>')
wert = psi4.core.BasisSet.build(mymol, 'BASIS', psi4.core.get_global_option('BASIS'))
psi4.compare_integers(64, wert.nbf(), 'nbf()') #TEST
psi4.compare_integers(68, wert.nao(), 'nao()') #TEST
psi4.compare_strings('c2v', mymol.schoenflies_symbol(), 'symm') #TEST
psi4.compare_strings('AUG-CC-PVDZ', wert.name(), 'callby') #TEST
psi4.compare_strings('AUG-CC-PVDZ', wert.blend(), 'blend') #TEST
mymol.print_out()
print('[10] <<< JKFIT (default) >>>')
wert = psi4.core.BasisSet.build(mymol, 'DF_BASIS_SCF', '', 'JKFIT', psi4.core.get_global_option('BASIS'))
psi4.compare_integers(236, wert.nbf(), 'nbf()') #TEST
psi4.compare_integers(272, wert.nao(), 'nao()') #TEST
psi4.compare_strings('c2v', mymol.schoenflies_symbol(), 'symm') #TEST
psi4.compare_strings('(AUG-CC-PVDZ AUX)', wert.name(), 'callby') #TEST
psi4.compare_strings('AUG-CC-PVDZ-JKFIT', wert.blend(), 'blend') #TEST
mymol.print_out()
mymol2 = psi4.geometry("""
C 0.0 0.0 0.0
O 1.4 0.0 0.0
H_r -0.5 -0.6 0.3
H_l -0.5 0.6 0.3
H_c -0.5 0.0 0.7
""")
psi4.set_options({'basis': 'dz_plusplusplus'})
print('[11] <<< cc-pVDZ w/ aug-cc-pVDZ on C, H >>>')
wert = psi4.core.BasisSet.build(mymol2, 'BASIS', psi4.core.get_global_option('BASIS'))
psi4.compare_integers(64, wert.nbf(), 'nbf()') #TEST
psi4.compare_integers(67, wert.nao(), 'nao()') #TEST
psi4.compare_strings('cs', mymol2.schoenflies_symbol(), 'symm') #TEST
psi4.compare_strings('DZ_PLUSPLUSPLUS', wert.name(), 'callby') #TEST
psi4.compare_strings('AUG-CC-PVDZ + CC-PVDZ', wert.blend(), 'blend') #TEST
mymol2.print_out()
hene = psi4.geometry("""
He
Ne 1 2.0
""")
psi4.basis_helper("""
assign cc-pv5z
""", name='disguised5z')
psi4.core.set_global_option('DF_BASIS_MP2', '') # clear df_basis_mp2 {...} to get autoaux below
print('[12] <<< cc-pV5Z on HeNe >>>')
wert = psi4.core.BasisSet.build(hene, 'BASIS', psi4.core.get_global_option('BASIS'))
hene.print_out()
psi4.compare_integers(146, wert.nbf(), 'nbf()') #TEST
psi4.compare_integers(196, wert.nao(), 'nao()') #TEST
psi4.compare_strings('DISGUISED5Z', wert.name(), 'callby') #TEST
psi4.compare_strings('CC-PV5Z', wert.blend(), 'blend') #TEST
print('[13] <<< RI for cc-pV5Z on HeNe >>>')
wert = psi4.core.BasisSet.build(hene, 'DF_BASIS_MP2', '', 'RIFIT', psi4.core.get_global_option('BASIS'))
hene.print_out()
psi4.compare_integers(284, wert.nbf(), 'nbf()') #TEST
psi4.compare_integers(413, wert.nao(), 'nao()') #TEST
psi4.compare_strings('(DISGUISED5Z AUX)', wert.name(), 'callby') #TEST
psi4.compare_strings('CC-PV5Z-RI', wert.blend(), 'blend') #TEST
print('[14] <<< impossible JK for cc-pV5Z on HeNe >>>')
error_tripped = 0
try:
wert = psi4.core.BasisSet.build(hene, 'DF_BASIS_SCF', '', 'JKFIT', psi4.core.get_global_option('BASIS'))
except qcdb.BasisSetNotFound:
error_tripped = 1
psi4.compare_integers(1, error_tripped, 'squashed 4z aux for 5z orb') #TEST
psi4.basis_helper(key='df_basis_scf', name='uggh', block="""
assign he DEF2-QZVPP-JKFIT
""")
hene.print_out()
print('[15] <<< forced JK for cc-pV5Z on HeNe >>>')
wert = psi4.core.BasisSet.build(hene, 'DF_BASIS_SCF', '', 'JKFIT', psi4.core.get_global_option('BASIS'))
psi4.compare_integers(169, wert.nbf(), 'nbf()') #TEST
psi4.compare_integers(241, wert.nao(), 'nao()') #TEST
psi4.compare_strings('UGGH', wert.name(), 'callby') #TEST
psi4.compare_strings('CC-PV5Z-JKFIT + DEF2-QZVPP-JKFIT', wert.blend(), 'blend') #TEST
| gpl-2.0 | 7,538,565,838,771,796,000 | 40.527778 | 146 | 0.655072 | false |
rdbhost/Rdbhdb | examples/animal.py | 1 | 2875 | # animal.py - create animal table and
# retrieve information from it
import sys
from rdbhdb import rdbhdb as db
from rdbhdb import extensions
DictCursor = extensions.DictCursor
# connect to the RdbHost server
authcode = 'KF7IUQPlwfSth4sBvjdqqanHkojAZzEjMshrkfEV0O53yz6w6v'
try:
conn = db.connect ('s000015', authcode=authcode)
except db.Error, e:
print "Error %d: %s" % (e.args[0], e.args[1])
sys.exit (1)
# create the animal table and populate it
try:
cursor = conn.cursor ()
cursor.execute ("DROP TABLE IF EXISTS animal")
cursor.execute ("""
CREATE TABLE animal
(
name CHAR(40),
category CHAR(40)
)
""")
cursor.execute ("""
INSERT INTO animal (name, category)
VALUES
('snake', 'reptile'),
('frog', 'amphibian'),
('tuna', 'fish'),
('racoon', 'mammal')
""")
print "Number of rows inserted: %d" % cursor.rowcount
# perform a fetch loop using fetchone()
cursor.execute ("SELECT name, category FROM animal")
while (1):
row = cursor.fetchone ()
if row == None:
break
print "%s, %s" % (row[0], row[1])
print "Number of rows returned: %d" % cursor.rowcount
# perform a fetch loop using fetchall()
cursor.execute ("SELECT name, category FROM animal")
rows = cursor.fetchall ()
for row in rows:
print "%s, %s" % (row[0], row[1])
print "Number of rows returned: %d" % cursor.rowcount
# issue a statement that changes the name by including data values
# literally in the statement string, then change the name back
# by using placeholders
cursor.execute ("""
UPDATE animal SET name = 'turtle'
WHERE name = 'snake'
""")
print "Number of rows updated: %d" % cursor.rowcount
print "Reptile category example changed to turtle from snake"
# perform a fetch loop using fetchall()
cursor.execute ("SELECT name, category FROM animal")
rows = cursor.fetchall ()
for row in rows:
print "%s, %s" % (row[0], row[1])
cursor.execute ("""
UPDATE animal SET name = %s
WHERE name = %s
""", ("snake", "turtle"))
print "Number of rows updated: %d" % cursor.rowcount
print "Reptile category example changed back to snake"
# create a dictionary cursor so that column values
# can be accessed by name rather than by position
cursor.close ()
cursor = conn.cursor (curDef=DictCursor)
cursor.execute ("SELECT name, category FROM animal")
result_set = cursor.fetchall ()
for row in result_set:
print "%s, %s" % (row["name"], row["category"])
print "Number of rows returned: %d" % cursor.rowcount
cursor.close ()
except db.Error, e:
print "Error %s: %s" % (e.args[0], e.args[1])
sys.exit (1)
conn.commit ()
conn.close () | mit | 3,213,750,046,008,830,000 | 28.346939 | 66 | 0.615304 | false |
AccelAI/accel.ai | flask-aws/lib/python2.7/site-packages/ebcli/operations/envvarops.py | 4 | 1971 | # Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from botocore.compat import six
from six import iteritems
from ..lib import elasticbeanstalk, utils
from ..core import io
from ..resources.strings import strings
from ..objects.exceptions import TimeoutError
from . import commonops
def get_and_print_environment_vars(app_name, env_name):
settings = elasticbeanstalk.describe_configuration_settings(
app_name, env_name
)['OptionSettings']
namespace = 'aws:elasticbeanstalk:application:environment'
vars = {n['OptionName']: n['Value'] for n in settings
if n["Namespace"] == namespace}
print_environment_vars(vars)
def print_environment_vars(vars):
io.echo(' Environment Variables:')
for key, value in iteritems(vars):
key, value = utils.mask_vars(key, value)
io.echo(' ', key, '=', value)
def setenv(app_name, env_name, var_list, timeout=None):
options, options_to_remove = commonops.create_envvars_list(var_list)
request_id = elasticbeanstalk.update_environment(env_name, options,
remove=options_to_remove)
try:
if timeout is None:
timeout = 4
commonops.wait_for_success_events(request_id,
timeout_in_minutes=timeout,
can_abort=True)
except TimeoutError:
io.log_error(strings['timeout.error'])
| mit | -5,468,110,718,848,300,000 | 34.836364 | 78 | 0.664637 | false |
willprice/arduino-sphere-project | scripts/example_direction_finder/temboo/Library/Zoho/Writer/ListAllDocuments.py | 5 | 4923 | # -*- coding: utf-8 -*-
###############################################################################
#
# ListAllDocuments
# Lists all the documents associated with a user's Zoho Writer Account.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class ListAllDocuments(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the ListAllDocuments Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(ListAllDocuments, self).__init__(temboo_session, '/Library/Zoho/Writer/ListAllDocuments')
def new_input_set(self):
return ListAllDocumentsInputSet()
def _make_result_set(self, result, path):
return ListAllDocumentsResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return ListAllDocumentsChoreographyExecution(session, exec_id, path)
class ListAllDocumentsInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the ListAllDocuments
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((required, string) The API Key provided by Zoho)
"""
super(ListAllDocumentsInputSet, self)._set_input('APIKey', value)
def set_Limit(self, value):
"""
Set the value of the Limit input for this Choreo. ((optional, integer) Sets the number of documents to be listed.)
"""
super(ListAllDocumentsInputSet, self)._set_input('Limit', value)
def set_LoginID(self, value):
"""
Set the value of the LoginID input for this Choreo. ((required, string) Your Zoho username (or login id))
"""
super(ListAllDocumentsInputSet, self)._set_input('LoginID', value)
def set_OrderBy(self, value):
"""
Set the value of the OrderBy input for this Choreo. ((optional, string) Order documents by createdTime, lastModifiedTime or name.)
"""
super(ListAllDocumentsInputSet, self)._set_input('OrderBy', value)
def set_Password(self, value):
"""
Set the value of the Password input for this Choreo. ((required, password) Your Zoho password)
"""
super(ListAllDocumentsInputSet, self)._set_input('Password', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that response should be in. Can be set to xml or json. Defaults to xml.)
"""
super(ListAllDocumentsInputSet, self)._set_input('ResponseFormat', value)
def set_SortOrder(self, value):
"""
Set the value of the SortOrder input for this Choreo. ((optional, string) Sorting order: asc or desc. Default sort order is set to ascending.)
"""
super(ListAllDocumentsInputSet, self)._set_input('SortOrder', value)
def set_StartFrom(self, value):
"""
Set the value of the StartFrom input for this Choreo. ((optional, integer) Sets the initial document number from which the documents will be listed.)
"""
super(ListAllDocumentsInputSet, self)._set_input('StartFrom', value)
class ListAllDocumentsResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the ListAllDocuments Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Zoho. Corresponds to the ResponseFormat input. Defaults to XML.)
"""
return self._output.get('Response', None)
class ListAllDocumentsChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return ListAllDocumentsResultSet(response, path)
| gpl-2.0 | 6,051,263,625,903,569,000 | 41.076923 | 170 | 0.672557 | false |
tahir24434/hydra | src/main/python/hydra/selftest/agents.py | 4 | 4173 | __author__ = 'sushil'
import zmq
import random
import time
import threading
import sys
import os
# REQ -> REP
# PUB -> SUB
class Test(object):
def __init__(self, argv):
p0 = os.environ.get('PORT0')
print("REQ PORT0 = " + p0)
self.port_rep = p0
self.port_pub = argv[1]
self.ip_port_sub = argv[2]
self.shutdown = False
self.pub_enabled = True
self.send_delay = 1
self.thread_rep = self.startthread(target=self.rep_task, args=("rep", self.port_rep))
self.thread_pub = self.startthread(target=self.pub_task, args=("pub", self.port_pub))
self.thread_sub = self.startthread(target=self.sub_task, args=("sub", self.ip_port_sub))
try:
while 1:
sys.stdout.flush()
time.sleep(1)
except KeyboardInterrupt:
print("Exiting")
self.shutdown = True
sys.exit(0)
self.thread_rep.join()
self.thread_pub.join()
self.thread_sub.join()
return 0
def startthread(self, target, args):
t = threading.Thread(target=target, args=args)
t.daemon = True
t.start()
return t
def rep_task(self, name, port_rep):
if (port_rep == "0"):
return
ctx = zmq.Context()
# create req-rep socket
socket_rep = ctx.socket(zmq.REP)
socket_rep.bind("tcp://*:%s" % port_rep)
while not self.shutdown:
message = socket_rep.recv()
print("REP: GOT Message %s" % message)
if message == 'ping':
socket_rep.send('pong')
elif message == 'disable_pub':
socket_rep.send('ok')
self.pub_enabled = False
elif message == 'enable_pub':
socket_rep.send('ok')
self.pub_enabled = True
elif message == 'reset_pub':
socket_rep.send('ok')
self.pub_msg_cnt = 0
elif message == 'reset_sub':
socket_rep.send('ok')
self.sub_msg_cnt = 0
elif message == 'cnt_pub':
socket_rep.send(str(self.pub_msg_cnt))
elif message == 'cnt_sub':
socket_rep.send(str(self.sub_msg_cnt))
elif message.find('delay') == 0:
d = message.split(':')[1]
self.send_delay = float(d)
socket_rep.send('ok')
else:
socket_rep.send('Unknown message %s' % message)
def pub_task(self, name, port_pub):
if (port_pub == "0"):
return
ctx = zmq.Context()
# publish socket
socket_pub = ctx.socket(zmq.PUB)
socket_pub.bind("tcp://*:%s" % port_pub)
self.pub_msg_cnt = 0
while not self.shutdown:
if self.pub_enabled:
topic = random.randrange(9999, 10005)
messagedata = self.pub_msg_cnt
self.pub_msg_cnt += 1
print("PUB [%d] %d %d" % (self.pub_msg_cnt, topic, messagedata))
socket_pub.send("%d %d" % (topic, messagedata))
time.sleep(self.send_delay)
def sub_task(self, name, ip_port_sub):
if (ip_port_sub == "0"):
return
ctx = zmq.Context()
# subscribe socket
socket_sub = ctx.socket(zmq.SUB)
socket_sub.connect("tcp://%s" % ip_port_sub)
socket_sub.setsockopt(zmq.SUBSCRIBE, '')
total_value = 0
self.sub_msg_cnt = 0
while not self.shutdown:
string = socket_sub.recv()
topic, messageData = string.split()
total_value += int(messageData)
self.sub_msg_cnt += 1
print("SUB:: [%d] %s %s" % (self.sub_msg_cnt, topic, messageData))
if '__main__' == __name__:
argv = sys.argv
if (len(argv) != 4):
print("Usages %s <port_rep> <port_pub> <ip_port_sub>")
print("Use 0 if you dont want to create a service i.e. set <port_pub> to 0 if pub is not needed")
exit(1)
print("Running command : " + argv[0] + ' ' + '.'.join(argv[1:]))
exit(Test(argv))
| apache-2.0 | -3,955,799,438,844,457,500 | 33.204918 | 105 | 0.510903 | false |
gochist/horizon | openstack_dashboard/dashboards/project/firewalls/tabs.py | 8 | 5386 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013, Big Switch Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: KC Wang, Big Switch Networks
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tabs
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.firewalls import tables
FirewallsTable = tables.FirewallsTable
PoliciesTable = tables.PoliciesTable
RulesTable = tables.RulesTable
class RulesTab(tabs.TableTab):
table_classes = (RulesTable,)
name = _("Firewall Rules")
slug = "rules"
template_name = "horizon/common/_detail_table.html"
def get_rulestable_data(self):
try:
tenant_id = self.request.user.tenant_id
rules = api.fwaas.rule_list(self.tab_group.request,
tenant_id=tenant_id)
except Exception:
rules = []
exceptions.handle(self.tab_group.request,
_('Unable to retrieve rules list.'))
for r in rules:
r.set_id_as_name_if_empty()
return rules
class PoliciesTab(tabs.TableTab):
table_classes = (PoliciesTable,)
name = _("Firewall Policies")
slug = "policies"
template_name = "horizon/common/_detail_table.html"
def get_policiestable_data(self):
try:
tenant_id = self.request.user.tenant_id
policies = api.fwaas.policy_list(self.tab_group.request,
tenant_id=tenant_id)
except Exception:
policies = []
exceptions.handle(self.tab_group.request,
_('Unable to retrieve policies list.'))
for p in policies:
p.set_id_as_name_if_empty()
return policies
class FirewallsTab(tabs.TableTab):
table_classes = (FirewallsTable,)
name = _("Firewalls")
slug = "firewalls"
template_name = "horizon/common/_detail_table.html"
def get_firewallstable_data(self):
try:
tenant_id = self.request.user.tenant_id
firewalls = api.fwaas.firewall_list(self.tab_group.request,
tenant_id=tenant_id)
except Exception:
firewalls = []
exceptions.handle(self.tab_group.request,
_('Unable to retrieve firewall list.'))
for f in firewalls:
f.set_id_as_name_if_empty()
return firewalls
class RuleDetailsTab(tabs.Tab):
name = _("Firewall Rule Details")
slug = "ruledetails"
template_name = "project/firewalls/_rule_details.html"
failure_url = reverse_lazy('horizon:project:firewalls:index')
def get_context_data(self, request):
rid = self.tab_group.kwargs['rule_id']
try:
rule = api.fwaas.rule_get(request, rid)
except Exception:
exceptions.handle(request,
_('Unable to retrieve rule details.'),
redirect=self.failure_url)
return {'rule': rule}
class PolicyDetailsTab(tabs.Tab):
name = _("Firewall Policy Details")
slug = "policydetails"
template_name = "project/firewalls/_policy_details.html"
failure_url = reverse_lazy('horizon:project:firewalls:index')
def get_context_data(self, request):
pid = self.tab_group.kwargs['policy_id']
try:
policy = api.fwaas.policy_get(request, pid)
except Exception:
exceptions.handle(request,
_('Unable to retrieve policy details.'),
redirect=self.failure_url)
return {'policy': policy}
class FirewallDetailsTab(tabs.Tab):
name = _("Firewall Details")
slug = "firewalldetails"
template_name = "project/firewalls/_firewall_details.html"
failure_url = reverse_lazy('horizon:project:firewalls:index')
def get_context_data(self, request):
fid = self.tab_group.kwargs['firewall_id']
try:
firewall = api.fwaas.firewall_get(request, fid)
except Exception:
exceptions.handle(request,
_('Unable to retrieve firewall details.'),
redirect=self.failure_url)
return {'firewall': firewall}
class FirewallTabs(tabs.TabGroup):
slug = "fwtabs"
tabs = (FirewallsTab, PoliciesTab, RulesTab)
sticky = True
class RuleDetailsTabs(tabs.TabGroup):
slug = "ruletabs"
tabs = (RuleDetailsTab,)
class PolicyDetailsTabs(tabs.TabGroup):
slug = "policytabs"
tabs = (PolicyDetailsTab,)
class FirewallDetailsTabs(tabs.TabGroup):
slug = "firewalltabs"
tabs = (FirewallDetailsTab,)
| apache-2.0 | 771,887,736,549,303,300 | 31.642424 | 78 | 0.615485 | false |
apollo13/ansible | test/units/utils/test_version.py | 4 | 7609 | # -*- coding: utf-8 -*-
# (c) 2020 Matt Martz <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from distutils.version import LooseVersion, StrictVersion
import pytest
from ansible.utils.version import _Alpha, _Numeric, SemanticVersion
EQ = [
('1.0.0', '1.0.0', True),
('1.0.0', '1.0.0-beta', False),
('1.0.0-beta2+build1', '1.0.0-beta.2+build.1', False),
('1.0.0-beta+build', '1.0.0-beta+build', True),
('1.0.0-beta+build1', '1.0.0-beta+build2', True),
('1.0.0-beta+a', '1.0.0-alpha+bar', False),
]
NE = [
('1.0.0', '1.0.0', False),
('1.0.0', '1.0.0-beta', True),
('1.0.0-beta2+build1', '1.0.0-beta.2+build.1', True),
('1.0.0-beta+build', '1.0.0-beta+build', False),
('1.0.0-beta+a', '1.0.0-alpha+bar', True),
]
LT = [
('1.0.0', '2.0.0', True),
('1.0.0-beta', '2.0.0-alpha', True),
('1.0.0-alpha', '2.0.0-beta', True),
('1.0.0-alpha', '1.0.0', True),
('1.0.0-beta', '1.0.0-alpha3', False),
('1.0.0+foo', '1.0.0-alpha', False),
('1.0.0-beta.1', '1.0.0-beta.a', True),
('1.0.0-beta+a', '1.0.0-alpha+bar', False),
]
GT = [
('1.0.0', '2.0.0', False),
('1.0.0-beta', '2.0.0-alpha', False),
('1.0.0-alpha', '2.0.0-beta', False),
('1.0.0-alpha', '1.0.0', False),
('1.0.0-beta', '1.0.0-alpha3', True),
('1.0.0+foo', '1.0.0-alpha', True),
('1.0.0-beta.1', '1.0.0-beta.a', False),
('1.0.0-beta+a', '1.0.0-alpha+bar', True),
]
LE = [
('1.0.0', '1.0.0', True),
('1.0.0', '2.0.0', True),
('1.0.0-alpha', '1.0.0-beta', True),
('1.0.0-beta', '1.0.0-alpha', False),
]
GE = [
('1.0.0', '1.0.0', True),
('1.0.0', '2.0.0', False),
('1.0.0-alpha', '1.0.0-beta', False),
('1.0.0-beta', '1.0.0-alpha', True),
]
VALID = [
"0.0.4",
"1.2.3",
"10.20.30",
"1.1.2-prerelease+meta",
"1.1.2+meta",
"1.1.2+meta-valid",
"1.0.0-alpha",
"1.0.0-beta",
"1.0.0-alpha.beta",
"1.0.0-alpha.beta.1",
"1.0.0-alpha.1",
"1.0.0-alpha0.valid",
"1.0.0-alpha.0valid",
"1.0.0-alpha-a.b-c-somethinglong+build.1-aef.1-its-okay",
"1.0.0-rc.1+build.1",
"2.0.0-rc.1+build.123",
"1.2.3-beta",
"10.2.3-DEV-SNAPSHOT",
"1.2.3-SNAPSHOT-123",
"1.0.0",
"2.0.0",
"1.1.7",
"2.0.0+build.1848",
"2.0.1-alpha.1227",
"1.0.0-alpha+beta",
"1.2.3----RC-SNAPSHOT.12.9.1--.12+788",
"1.2.3----R-S.12.9.1--.12+meta",
"1.2.3----RC-SNAPSHOT.12.9.1--.12",
"1.0.0+0.build.1-rc.10000aaa-kk-0.1",
"99999999999999999999999.999999999999999999.99999999999999999",
"1.0.0-0A.is.legal",
]
INVALID = [
"1",
"1.2",
"1.2.3-0123",
"1.2.3-0123.0123",
"1.1.2+.123",
"+invalid",
"-invalid",
"-invalid+invalid",
"-invalid.01",
"alpha",
"alpha.beta",
"alpha.beta.1",
"alpha.1",
"alpha+beta",
"alpha_beta",
"alpha.",
"alpha..",
"beta",
"1.0.0-alpha_beta",
"-alpha.",
"1.0.0-alpha..",
"1.0.0-alpha..1",
"1.0.0-alpha...1",
"1.0.0-alpha....1",
"1.0.0-alpha.....1",
"1.0.0-alpha......1",
"1.0.0-alpha.......1",
"01.1.1",
"1.01.1",
"1.1.01",
"1.2",
"1.2.3.DEV",
"1.2-SNAPSHOT",
"1.2.31.2.3----RC-SNAPSHOT.12.09.1--..12+788",
"1.2-RC-SNAPSHOT",
"-1.0.3-gamma+b7718",
"+justmeta",
"9.8.7+meta+meta",
"9.8.7-whatever+meta+meta",
]
PRERELEASE = [
('1.0.0-alpha', True),
('1.0.0-alpha.1', True),
('1.0.0-0.3.7', True),
('1.0.0-x.7.z.92', True),
('0.1.2', False),
('0.1.2+bob', False),
('1.0.0', False),
]
STABLE = [
('1.0.0-alpha', False),
('1.0.0-alpha.1', False),
('1.0.0-0.3.7', False),
('1.0.0-x.7.z.92', False),
('0.1.2', False),
('0.1.2+bob', False),
('1.0.0', True),
('1.0.0+bob', True),
]
LOOSE_VERSION = [
(LooseVersion('1'), SemanticVersion('1.0.0')),
(LooseVersion('1-alpha'), SemanticVersion('1.0.0-alpha')),
(LooseVersion('1.0.0-alpha+build'), SemanticVersion('1.0.0-alpha+build')),
]
LOOSE_VERSION_INVALID = [
LooseVersion('1.a.3'),
LooseVersion(),
'bar',
StrictVersion('1.2.3'),
]
def test_semanticversion_none():
assert SemanticVersion().major is None
@pytest.mark.parametrize('left,right,expected', EQ)
def test_eq(left, right, expected):
assert (SemanticVersion(left) == SemanticVersion(right)) is expected
@pytest.mark.parametrize('left,right,expected', NE)
def test_ne(left, right, expected):
assert (SemanticVersion(left) != SemanticVersion(right)) is expected
@pytest.mark.parametrize('left,right,expected', LT)
def test_lt(left, right, expected):
assert (SemanticVersion(left) < SemanticVersion(right)) is expected
@pytest.mark.parametrize('left,right,expected', LE)
def test_le(left, right, expected):
assert (SemanticVersion(left) <= SemanticVersion(right)) is expected
@pytest.mark.parametrize('left,right,expected', GT)
def test_gt(left, right, expected):
assert (SemanticVersion(left) > SemanticVersion(right)) is expected
@pytest.mark.parametrize('left,right,expected', GE)
def test_ge(left, right, expected):
assert (SemanticVersion(left) >= SemanticVersion(right)) is expected
@pytest.mark.parametrize('value', VALID)
def test_valid(value):
SemanticVersion(value)
@pytest.mark.parametrize('value', INVALID)
def test_invalid(value):
pytest.raises(ValueError, SemanticVersion, value)
def test_example_precedence():
# https://semver.org/#spec-item-11
sv = SemanticVersion
assert sv('1.0.0') < sv('2.0.0') < sv('2.1.0') < sv('2.1.1')
assert sv('1.0.0-alpha') < sv('1.0.0')
assert sv('1.0.0-alpha') < sv('1.0.0-alpha.1') < sv('1.0.0-alpha.beta')
assert sv('1.0.0-beta') < sv('1.0.0-beta.2') < sv('1.0.0-beta.11') < sv('1.0.0-rc.1') < sv('1.0.0')
@pytest.mark.parametrize('value,expected', PRERELEASE)
def test_prerelease(value, expected):
assert SemanticVersion(value).is_prerelease is expected
@pytest.mark.parametrize('value,expected', STABLE)
def test_stable(value, expected):
assert SemanticVersion(value).is_stable is expected
@pytest.mark.parametrize('value,expected', LOOSE_VERSION)
def test_from_loose_version(value, expected):
assert SemanticVersion.from_loose_version(value) == expected
@pytest.mark.parametrize('value', LOOSE_VERSION_INVALID)
def test_from_loose_version_invalid(value):
pytest.raises((AttributeError, ValueError), SemanticVersion.from_loose_version, value)
def test_comparison_with_string():
assert SemanticVersion('1.0.0') > '0.1.0'
def test_alpha():
assert _Alpha('a') == _Alpha('a')
assert _Alpha('a') == 'a'
assert _Alpha('a') != _Alpha('b')
assert _Alpha('a') != 1
assert _Alpha('a') < _Alpha('b')
assert _Alpha('a') < 'c'
assert _Alpha('a') > _Numeric(1)
with pytest.raises(ValueError):
_Alpha('a') < None
assert _Alpha('a') <= _Alpha('a')
assert _Alpha('a') <= _Alpha('b')
assert _Alpha('b') >= _Alpha('a')
assert _Alpha('b') >= _Alpha('b')
def test_numeric():
assert _Numeric(1) == _Numeric(1)
assert _Numeric(1) == 1
assert _Numeric(1) != _Numeric(2)
assert _Numeric(1) != 'a'
assert _Numeric(1) < _Numeric(2)
assert _Numeric(1) < 3
assert _Numeric(1) < _Alpha('b')
with pytest.raises(ValueError):
_Numeric(1) < None
assert _Numeric(1) <= _Numeric(1)
assert _Numeric(1) <= _Numeric(2)
assert _Numeric(2) >= _Numeric(1)
assert _Numeric(2) >= _Numeric(2)
| gpl-3.0 | -9,013,840,861,554,829,000 | 25.698246 | 103 | 0.568143 | false |
jmerkow/VTK | Examples/VisualizationAlgorithms/Python/smoothFran.py | 42 | 1792 | #!/usr/bin/env python
# This example shows how to use decimation to reduce a polygonal
# mesh. We also use mesh smoothing and generate surface normals to
# give a pleasing result.
import vtk
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# We start by reading some data that was originally captured from a
# Cyberware laser digitizing system.
fran = vtk.vtkPolyDataReader()
fran.SetFileName(VTK_DATA_ROOT + "/Data/fran_cut.vtk")
# We want to preserve topology (not let any cracks form). This may
# limit the total reduction possible, which we have specified at 90%.
deci = vtk.vtkDecimatePro()
deci.SetInputConnection(fran.GetOutputPort())
deci.SetTargetReduction(0.9)
deci.PreserveTopologyOn()
smoother = vtk.vtkSmoothPolyDataFilter()
smoother.SetInputConnection(deci.GetOutputPort())
smoother.SetNumberOfIterations(50)
normals = vtk.vtkPolyDataNormals()
normals.SetInputConnection(smoother.GetOutputPort())
normals.FlipNormalsOn()
franMapper = vtk.vtkPolyDataMapper()
franMapper.SetInputConnection(normals.GetOutputPort())
franActor = vtk.vtkActor()
franActor.SetMapper(franMapper)
franActor.GetProperty().SetColor(1.0, 0.49, 0.25)
# Create the RenderWindow, Renderer and both Actors
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer, set the background and size
ren.AddActor(franActor)
ren.SetBackground(1, 1, 1)
renWin.SetSize(250, 250)
cam1 = vtk.vtkCamera()
cam1.SetClippingRange(0.0475572, 2.37786)
cam1.SetFocalPoint(0.052665, -0.129454, -0.0573973)
cam1.SetPosition(0.327637, -0.116299, -0.256418)
cam1.SetViewUp(-0.0225386, 0.999137, 0.034901)
ren.SetActiveCamera(cam1)
iren.Initialize()
renWin.Render()
iren.Start()
| bsd-3-clause | 3,316,487,524,060,862,500 | 31.581818 | 69 | 0.788504 | false |
dailijun/ffc | tests/pjsua/mod_call.py | 8 | 5357 | # $Id: mod_call.py 5065 2015-04-13 12:14:02Z nanang $
import time
import imp
import sys
import inc_const as const
from inc_cfg import *
# Load configuration
cfg_file = imp.load_source("cfg_file", ARGS[1])
# Check media flow between ua1 and ua2
def check_media(ua1, ua2):
ua1.send("#")
ua1.expect("#")
ua1.send("1122")
ua2.expect(const.RX_DTMF + "1")
ua2.expect(const.RX_DTMF + "1")
ua2.expect(const.RX_DTMF + "2")
ua2.expect(const.RX_DTMF + "2")
# Test body function
def test_func(t):
callee = t.process[0]
caller = t.process[1]
# if have_reg then wait for couple of seconds for PUBLISH
# to complete (just in case pUBLISH is used)
if callee.inst_param.have_reg:
time.sleep(1)
if caller.inst_param.have_reg:
time.sleep(1)
# Caller making call
caller.send("m")
caller.send(t.inst_params[0].uri)
caller.expect(const.STATE_CALLING)
# Callee waits for call and answers with 180/Ringing
time.sleep(0.2)
callee.expect(const.EVENT_INCOMING_CALL)
callee.send("a")
callee.send("180")
callee.expect("SIP/2.0 180")
caller.expect("SIP/2.0 180")
# Synchronize stdout
caller.sync_stdout()
callee.sync_stdout()
# Callee answers with 200/OK
callee.send("a")
callee.send("200")
# Wait until call is connected in both endpoints
##time.sleep(0.2)
caller.expect(const.STATE_CONFIRMED)
callee.expect(const.STATE_CONFIRMED)
# Synchronize stdout
caller.sync_stdout()
callee.sync_stdout()
##time.sleep(0.1)
caller.sync_stdout()
callee.sync_stdout()
# Test that media is okay
##time.sleep(0.3)
check_media(caller, callee)
check_media(callee, caller)
# Hold call by caller
caller.send("H")
caller.expect("INVITE sip:")
callee.expect("INVITE sip:")
callee.expect(const.MEDIA_HOLD)
caller.expect(const.MEDIA_HOLD)
# Synchronize stdout
caller.sync_stdout()
callee.sync_stdout()
# Release hold
##time.sleep(0.5)
caller.send("v")
caller.expect("INVITE sip:")
callee.expect("INVITE sip:")
callee.expect(const.MEDIA_ACTIVE, title="waiting for media active after call hold")
caller.expect(const.MEDIA_ACTIVE, title="waiting for media active after call hold")
# Synchronize stdout
caller.sync_stdout()
callee.sync_stdout()
# Test that media is okay
check_media(caller, callee)
check_media(callee, caller)
# Synchronize stdout
caller.sync_stdout()
callee.sync_stdout()
# Hold call by callee
callee.send("H")
callee.expect("INVITE sip:")
caller.expect("INVITE sip:")
caller.expect(const.MEDIA_HOLD)
callee.expect(const.MEDIA_HOLD)
# Synchronize stdout
caller.sync_stdout()
callee.sync_stdout()
# Release hold
##time.sleep(0.1)
callee.send("v")
callee.expect("INVITE sip:")
caller.expect("INVITE sip:")
caller.expect(const.MEDIA_ACTIVE, title="waiting for media active after call hold")
callee.expect(const.MEDIA_ACTIVE, title="waiting for media active after call hold")
# Synchronize stdout
caller.sync_stdout()
callee.sync_stdout()
# Test that media is okay
# Wait for some time for ICE negotiation
##time.sleep(0.6)
check_media(caller, callee)
check_media(callee, caller)
# Synchronize stdout
caller.sync_stdout()
callee.sync_stdout()
# UPDATE (by caller)
caller.send("U")
#caller.sync_stdout()
callee.expect(const.MEDIA_ACTIVE, title="waiting for media active with UPDATE")
caller.expect(const.MEDIA_ACTIVE, title="waiting for media active with UPDATE")
# Synchronize stdout
caller.sync_stdout()
callee.sync_stdout()
# Test that media is okay
##time.sleep(0.1)
check_media(caller, callee)
check_media(callee, caller)
# UPDATE (by callee)
callee.send("U")
callee.expect("UPDATE sip:")
caller.expect("UPDATE sip:")
caller.expect(const.MEDIA_ACTIVE, title="waiting for media active with UPDATE")
callee.expect(const.MEDIA_ACTIVE, title="waiting for media active with UPDATE")
# Synchronize stdout
caller.sync_stdout()
callee.sync_stdout()
# Test that media is okay
##time.sleep(0.1)
check_media(caller, callee)
check_media(callee, caller)
# Synchronize stdout
caller.sync_stdout()
callee.sync_stdout()
# Set codecs in both caller and callee so that there is
# no common codec between them.
# In caller we only enable PCMU, in callee we only enable PCMA
caller.send("Cp")
caller.expect("Enter codec")
caller.send("* 0")
caller.send("Cp")
caller.expect("Enter codec")
caller.send("pcmu 120")
callee.send("Cp")
callee.expect("Enter codec")
callee.send("* 0")
callee.send("Cp")
callee.expect("Enter codec")
callee.send("pcma 120")
# Test when UPDATE fails (by callee)
callee.send("U")
caller.expect("SIP/2.0 488")
callee.expect("SIP/2.0 488")
callee.sync_stdout()
caller.sync_stdout()
# Test that media is still okay
##time.sleep(0.1)
check_media(caller, callee)
check_media(callee, caller)
# Test when UPDATE fails (by caller)
caller.send("U")
caller.expect("UPDATE sip:")
callee.expect("UPDATE sip:")
callee.expect("SIP/2.0 488")
caller.expect("SIP/2.0 488")
caller.sync_stdout()
callee.sync_stdout()
# Test that media is still okay
##time.sleep(0.1)
check_media(callee, caller)
check_media(caller, callee)
# Hangup call
##time.sleep(0.1)
caller.send("h")
# Wait until calls are cleared in both endpoints
caller.expect(const.STATE_DISCONNECTED)
callee.expect(const.STATE_DISCONNECTED)
# Here where it all comes together
test = cfg_file.test_param
test.test_func = test_func
| gpl-2.0 | 949,922,448,059,902,800 | 22.70354 | 84 | 0.715699 | false |
fr0uty/oartm | oar/rest_api/views/__init__.py | 1 | 5479 | # -*- coding: utf-8 -*-
from __future__ import with_statement, absolute_import
import os
import time
from collections import OrderedDict
from functools import wraps
from flask import Blueprint as FlaskBlueprint, Response, g, abort
from oar.lib.database import BaseModel
from oar.lib.compat import json
from oar.lib.utils import JSONEncoder
from ..utils import ArgParser
class Blueprint(FlaskBlueprint):
def __init__(self, *args, **kwargs):
self.root_prefix = kwargs.pop('url_prefix', '')
self.trailing_slash = kwargs.pop('trailing_slash', True)
super(Blueprint, self).__init__(*args, **kwargs)
self.before_request(self._prepare_response)
def route(self, partial_rule, args={}, jsonify=True, **options):
"""A decorator that is used to define custom routes, injects parsed
arguments into a view function or method and jsonify the response.
Example usage with: ::
@app.route('/factorial', jsonify=True, args={'x': int})
def factorial(x=0):
import math
return {'result': math.factorial(x)}
"""
rule = partial_rule
if self.root_prefix:
rule = (self.root_prefix + partial_rule)
if self.trailing_slash and len(rule) > 1:
rule = rule.rstrip("/")
def decorator(f):
endpoint = options.pop("endpoint", f.__name__)
if jsonify and not hasattr(f, "decorated_with_jsonify") and args:
@self.args(args)
@self.jsonify
@wraps(f)
def wrapper(*proxy_args, **proxy_kwargs):
return f(*proxy_args, **proxy_kwargs)
elif jsonify and not hasattr(f, "decorated_with_jsonify"):
@self.jsonify
@wraps(f)
def wrapper(*proxy_args, **proxy_kwargs):
return f(*proxy_args, **proxy_kwargs)
elif args:
@self.args(args)
@wraps(f)
def wrapper(*proxy_args, **proxy_kwargs):
return f(*proxy_args, **proxy_kwargs)
else:
wrapper = f
self.add_url_rule(rule, endpoint, wrapper, **options)
return wrapper
return decorator
@property
def jsonify(self):
"""A decorator that is used to jsonify the response.
Example usage with: ::
@app.jsonify('/foo')
def foo(name="bar"):
g.data["foo"] = name # or return {"foo": name}
"""
def decorator(func):
@wraps(func)
def decorated(*proxy_args, **proxy_kwargs):
result = func(*proxy_args, **proxy_kwargs)
if result is None:
result = g.data
if not isinstance(result, (dict, list, BaseModel)):
return result
return self._jsonify_response(result)
decorated.decorated_with_jsonify = True
return decorated
return decorator
def args(self, argmap):
"""Decorator that injects parsed arguments into a view function.
Example usage with: ::
@app.route('/factorial', methods=['GET', 'POST'])
@app.args({'x': int})
def factorial(x=0):
import math
return {'result': math.factorial(x)}
"""
def decorator(func):
@wraps(func)
def decorated(*proxy_args, **proxy_kwargs):
parser = ArgParser(argmap)
parsed_kwargs, raw_kwargs = parser.parse()
proxy_kwargs.update(parsed_kwargs)
g.request_args.update(raw_kwargs)
g.request_args.update(proxy_kwargs)
return func(*proxy_args, **proxy_kwargs)
return decorated
return decorator
def need_authentication(self):
"""Decorator that check is user is authenticate."""
def decorator(func):
@wraps(func)
def decorated(*proxy_args, **proxy_kwargs):
if g.current_user is not None:
return func(*proxy_args, **proxy_kwargs)
else:
abort(403)
return decorated
return decorator
def _prepare_response(self):
g.request_args = {}
g.data = OrderedDict()
g.data['api_timezone'] = 'UTC'
g.data['api_timestamp'] = int(time.time())
def _json_dumps(self, obj, **kwargs):
"""Dump object to json string."""
kwargs.setdefault('ensure_ascii', False)
kwargs.setdefault('cls', JSONEncoder)
kwargs.setdefault('indent', 4)
kwargs.setdefault('separators', (',', ': '))
return json.dumps(obj, **kwargs)
def _jsonify_response(self, obj):
"""Get a json response."""
return Response(self._json_dumps(obj), mimetype='application/json')
def load_blueprints():
folder = os.path.abspath(os.path.dirname(__file__))
for filename in os.listdir(folder):
if filename.endswith('.py') and not filename.startswith('__'):
name = filename[:-3]
module_path = 'oar.rest_api.views.%s' % name
module = __import__(module_path, None, None, ['app'])
yield getattr(module, 'app')
def register_blueprints(app):
for blueprint in load_blueprints():
app.register_blueprint(blueprint)
| bsd-3-clause | -7,579,997,127,180,881,000 | 33.459119 | 77 | 0.551013 | false |
gauravbose/digital-menu | digimenu2/tests/urlpatterns_reverse/urls.py | 7 | 5018 | import warnings
from django.conf.urls import include, patterns, url
from django.utils.deprecation import RemovedInDjango20Warning
from .views import (
absolute_kwargs_view, defaults_view, empty_view, empty_view_partial,
empty_view_wrapped, nested_view,
)
other_patterns = [
url(r'non_path_include/$', empty_view, name='non_path_include'),
url(r'nested_path/$', nested_view),
]
# test deprecated patterns() function. convert to list of urls() in Django 2.0
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=RemovedInDjango20Warning)
urlpatterns = patterns('',
url(r'^places/([0-9]+)/$', empty_view, name='places'),
url(r'^places?/$', empty_view, name="places?"),
url(r'^places+/$', empty_view, name="places+"),
url(r'^places*/$', empty_view, name="places*"),
url(r'^(?:places/)?$', empty_view, name="places2?"),
url(r'^(?:places/)+$', empty_view, name="places2+"),
url(r'^(?:places/)*$', empty_view, name="places2*"),
url(r'^places/([0-9]+|[a-z_]+)/', empty_view, name="places3"),
url(r'^places/(?P<id>[0-9]+)/$', empty_view, name="places4"),
url(r'^people/(?P<name>\w+)/$', empty_view, name="people"),
url(r'^people/(?:name/)', empty_view, name="people2"),
url(r'^people/(?:name/(\w+)/)?', empty_view, name="people2a"),
url(r'^people/(?P<name>\w+)-(?P=name)/$', empty_view, name="people_backref"),
url(r'^optional/(?P<name>.*)/(?:.+/)?', empty_view, name="optional"),
url(r'^optional/(?P<arg1>\d+)/(?:(?P<arg2>\d+)/)?', absolute_kwargs_view, name="named_optional"),
url(r'^optional/(?P<arg1>\d+)/(?:(?P<arg2>\d+)/)?$', absolute_kwargs_view, name="named_optional_terminated"),
url(r'^nested/noncapture/(?:(?P<p>\w+))$', empty_view, name='nested-noncapture'),
url(r'^nested/capture/((\w+)/)?$', empty_view, name='nested-capture'),
url(r'^nested/capture/mixed/((?P<p>\w+))$', empty_view, name='nested-mixedcapture'),
url(r'^nested/capture/named/(?P<outer>(?P<inner>\w+)/)?$', empty_view, name='nested-namedcapture'),
url(r'^hardcoded/$', empty_view, name="hardcoded"),
url(r'^hardcoded/doc\.pdf$', empty_view, name="hardcoded2"),
url(r'^people/(?P<state>\w\w)/(?P<name>\w+)/$', empty_view, name="people3"),
url(r'^people/(?P<state>\w\w)/(?P<name>[0-9])/$', empty_view, name="people4"),
url(r'^people/((?P<state>\w\w)/test)?/(\w+)/$', empty_view, name="people6"),
url(r'^character_set/[abcdef0-9]/$', empty_view, name="range"),
url(r'^character_set/[\w]/$', empty_view, name="range2"),
url(r'^price/\$([0-9]+)/$', empty_view, name="price"),
url(r'^price/[$]([0-9]+)/$', empty_view, name="price2"),
url(r'^price/[\$]([0-9]+)/$', empty_view, name="price3"),
url(r'^product/(?P<product>\w+)\+\(\$(?P<price>[0-9]+(\.[0-9]+)?)\)/$', empty_view, name="product"),
url(r'^headlines/(?P<year>[0-9]+)\.(?P<month>[0-9]+)\.(?P<day>[0-9]+)/$', empty_view, name="headlines"),
url(r'^windows_path/(?P<drive_name>[A-Z]):\\(?P<path>.+)/$', empty_view, name="windows"),
url(r'^special_chars/(?P<chars>.+)/$', empty_view, name="special"),
url(r'^(?P<name>.+)/[0-9]+/$', empty_view, name="mixed"),
url(r'^repeats/a{1,2}/$', empty_view, name="repeats"),
url(r'^repeats/a{2,4}/$', empty_view, name="repeats2"),
url(r'^repeats/a{2}/$', empty_view, name="repeats3"),
url(r'^(?i)CaseInsensitive/(\w+)', empty_view, name="insensitive"),
url(r'^test/1/?', empty_view, name="test"),
url(r'^(?i)test/2/?$', empty_view, name="test2"),
url(r'^outer/(?P<outer>[0-9]+)/', include('urlpatterns_reverse.included_urls')),
url(r'^outer-no-kwargs/([0-9]+)/', include('urlpatterns_reverse.included_no_kwargs_urls')),
url('', include('urlpatterns_reverse.extra_urls')),
# Partials should be fine.
url(r'^partial/', empty_view_partial, name="partial"),
url(r'^partial_wrapped/', empty_view_wrapped, name="partial_wrapped"),
# This is non-reversible, but we shouldn't blow up when parsing it.
url(r'^(?:foo|bar)(\w+)/$', empty_view, name="disjunction"),
# Regression views for #9038. See tests for more details
url(r'arg_view/$', 'urlpatterns_reverse.views.kwargs_view'),
url(r'arg_view/(?P<arg1>[0-9]+)/$', 'urlpatterns_reverse.views.kwargs_view'),
url(r'absolute_arg_view/(?P<arg1>[0-9]+)/$', absolute_kwargs_view),
url(r'absolute_arg_view/$', absolute_kwargs_view),
# Tests for #13154. Mixed syntax to test both ways of defining URLs.
url(r'defaults_view1/(?P<arg1>[0-9]+)/', defaults_view, {'arg2': 1}, name='defaults'),
(r'defaults_view2/(?P<arg1>[0-9]+)/', defaults_view, {'arg2': 2}, 'defaults'),
url('^includes/', include(other_patterns)),
# Security tests
url('(.+)/security/$', empty_view, name='security'),
)
| bsd-3-clause | 8,094,194,534,985,012,000 | 56.678161 | 117 | 0.57214 | false |
dmlc/xgboost | demo/kaggle-higgs/higgs-pred.py | 1 | 1163 | #!/usr/bin/python
# make prediction
import numpy as np
import xgboost as xgb
# path to where the data lies
dpath = 'data'
modelfile = 'higgs.model'
outfile = 'higgs.pred.csv'
# make top 15% as positive
threshold_ratio = 0.15
# load in training data, directly use numpy
dtest = np.loadtxt( dpath+'/test.csv', delimiter=',', skiprows=1 )
data = dtest[:,1:31]
idx = dtest[:,0]
print ('finish loading from csv ')
xgmat = xgb.DMatrix( data, missing = -999.0 )
bst = xgb.Booster({'nthread':16}, model_file = modelfile)
ypred = bst.predict( xgmat )
res = [ ( int(idx[i]), ypred[i] ) for i in range(len(ypred)) ]
rorder = {}
for k, v in sorted( res, key = lambda x:-x[1] ):
rorder[ k ] = len(rorder) + 1
# write out predictions
ntop = int( threshold_ratio * len(rorder ) )
fo = open(outfile, 'w')
nhit = 0
ntot = 0
fo.write('EventId,RankOrder,Class\n')
for k, v in res:
if rorder[k] <= ntop:
lb = 's'
nhit += 1
else:
lb = 'b'
# change output rank order to follow Kaggle convention
fo.write('%s,%d,%s\n' % ( k, len(rorder)+1-rorder[k], lb ) )
ntot += 1
fo.close()
print ('finished writing into prediction file')
| apache-2.0 | -3,084,890,467,697,225,000 | 23.744681 | 66 | 0.627687 | false |
punchagan/zulip | zerver/management/commands/show_admins.py | 6 | 1187 | from argparse import ArgumentParser
from typing import Any
from django.core.management.base import CommandError
from zerver.lib.management import ZulipBaseCommand
class Command(ZulipBaseCommand):
help = """Show the owners and administrators in an organization."""
def add_arguments(self, parser: ArgumentParser) -> None:
self.add_realm_args(parser, required=True)
def handle(self, *args: Any, **options: Any) -> None:
realm = self.get_realm(options)
assert realm is not None # True because of required=True above
admin_users = realm.get_admin_users_and_bots()
owner_user_ids = set(realm.get_human_owner_users().values_list("id", flat=True))
if admin_users:
print("Administrators:\n")
for user in admin_users:
owner_detail = ""
if user.id in owner_user_ids:
owner_detail = " [owner]"
print(f" {user.delivery_email} ({user.full_name}){owner_detail}")
else:
raise CommandError("There are no admins for this realm!")
print('\nYou can use the "change_user_role" management command to adjust roles.')
| apache-2.0 | 3,480,435,765,788,275,700 | 34.969697 | 89 | 0.636057 | false |
Micronaet/micronaet-migration | report_store_value/store.py | 1 | 13783 | # -*- coding: utf-8 -*-
###############################################################################
#
# ODOO (ex OpenERP)
# Open Source Management Solution
# Copyright (C) 2001-2015 Micronaet S.r.l. (<http://www.micronaet.it>)
# Developer: Nicola Riolini @thebrush (<https://it.linkedin.com/in/thebrush>)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import os
import sys
import logging
import openerp
import xmlrpclib
import erppeek
import csv
import openerp.netsvc as netsvc
import openerp.addons.decimal_precision as dp
from openerp.osv import fields, osv, expression, orm
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from openerp import SUPERUSER_ID, api
from openerp import tools
from openerp.tools.translate import _
from openerp.tools.float_utils import float_round as round
from openerp.tools import (DEFAULT_SERVER_DATE_FORMAT,
DEFAULT_SERVER_DATETIME_FORMAT,
DATETIME_FORMATS_MAP,
float_compare)
_logger = logging.getLogger(__name__)
class StatisticStore(orm.Model):
''' Object that store data from 2 company for mix store values
using protocol to link product from Company 1 to Company 2
'''
_name = 'statistic.store'
_description = 'Store info'
_rec_name = "product_code"
_order = "product_code,product_description"
def schedule_csv_import_store(
self, cr, uid,
# File import:
file_input1='~/etl/esistoerprogr.CM1',
file_input2='~/etl/esistoerprogr.CM2',
exch_file1='~/etl/cm1-cm2.CM1',
exch_file2='~/etl/cm1-cm2.CM2',
delimiter=';', header=0, verbose=100,
# Access XMLRPC database:
hostname='localhost', port=18069, database='DB', user='admin',
password='password',
# Extra context:
context=None):
''' Scheduled importation of existence
'''
# TODO test prepare function in csv module
# Remove all previous data:
stock_ids = self.search(cr, uid, [], context=context)
self.unlink(cr, uid, stock_ids, context=context)
# TODO keep in this form?
records = {'FIA': {}, 'GPB': {}}
q_x_packs = {'FIA': {}, 'GPB': {}}
csv_pool = self.pool.get('csv.base')
product_pool = self.pool.get('product.product')
# -----------------------
# First company Q x pack:
# -----------------------
pack_ids = product_pool.search(cr, uid, [], context=context)
for item in product_pool.browse(cr, uid, pack_ids, context=context):
q_x_packs["FIA"][item.default_code] = item.q_x_pack
# ------------------------
# Second company Q x pack:
# ------------------------
erp = erppeek.Client(
'http://%s:%s' % (hostname, port),
db=database,
user=user,
password=password,
)
erp_pool = erp.ProductProduct
erp_ids = erp_pool.search([])
erp_pool.browse(erp_ids)
for item in erp_pool.browse(item_ids):
q_x_packs["GPB"][item.default_code] = item.q_x_pack
loops = [
("FIA", file_input1, exch_file1),
("GPB", file_input2, exch_file2),
]
for azienda, f, f_exch in loops:
file_csv = os.path.expanduser(f)
try:
lines = csv.reader(open(file_csv, 'rb'), delimiter=delimiter)
except:
_logger.error('Exchange file not found: %s' % f_exch)
continue
counter = -header
for line in lines:
try:
counter += 1
if counter <= 0: # jump n lines of header
continue
# decode_date(self, valore, with_slash=True
ref = csv_pool.decode_string(line[0])
product_description = csv_pool.decode_string(
line[1]).title()
product_um = csv_pool.decode_string(line[2]).upper()
inventary = csv_pool.decode_float(line[3])
value_in = csv_pool.decode_float(line[4])
value_out = csv_pool.decode_float(line[5])
balance = csv_pool.decode_float(line[6])
supplier_order = csv_pool.decode_float(line[7])
customer_order = csv_pool.decode_float(line[8])
customer_order_auto = csv_pool.decode_float(line[9])
customer_order_suspended = csv_pool.decode_float(line[10])
supplier = csv_pool.decode_string(line[11]).title()
product_description += "\n" + (csv_pool.decode_string(
line[12]).title())
mexal_s = csv_pool.decode_string(line[13]) or False
# Calculated fields:
company = azienda.lower()
availability = (
balance + supplier_order -
customer_order - customer_order_suspended)
# E + F - I - S TODO auto??
product_um2 = ''
inventary_last = 0.0
q_x_pack = q_x_packs[azienda][ref] \
if ref in q_x_packs[azienda] else 0
records[azienda][ref] = {
'company': company,
'supplier': supplier,
'mexal_s': mexal_s,
'product_code': ref,
'product_description': product_description,
'product_um': product_um,
'q_x_pack': q_x_pack,
# Value fields
'inventary': inventary,
'q_in': value_in,
'q_out': value_out,
'balance': balance,
'supplier_order': supplier_order,
'customer_order': customer_order,
'customer_order_auto': customer_order_auto,
'customer_order_suspended':
customer_order_suspended,
# Field calculated:
'availability': availability,
'product_um2': product_um2,
'inventary_last': inventary_last,
}
except:
_logger.error('Error import line: %s' % (
sys.exc_info(),))
continue
# Sell from CM1 to CM2 (subtract from q_in sum to q_out:
file_exchange = os.path.expanduser(f_exch)
lines = csv.reader(
open(file_exchange, 'rb'), delimiter=delimiter)
for line in lines:
if len(line): # jump empty lines
ref = csv_pool.decode_string(line[0])
value_sale = csv_pool.decode_float(line[1]) or 0.0
if ref in records[azienda]:
# also for unload number is positive
records[azienda][ref]['q_in'] -= value_sale
records[azienda][ref]['q_out'] -= value_sale
comment = ""
for azienda in ["FIA", "GPB"]:
if azienda == "FIA":
altra_azienda="GPB"
else:
altra_azienda="FIA"
for item in records[azienda].keys():
if ((azienda == "FIA") and (item[:1] == "C") and (
item[1:] in records["GPB"])) or (
(azienda=="GPB") and (item[:1]=="F") and (
item[1:] in records["FIA"])): # jump other CM items
pass # Do nothing (sum in other company) Jump
elif (
(azienda=="FIA") and (
"F" + item in records["GPB"])) or (
(azienda=="GPB") and (
"C" + item in records["FIA"])): # Sum item this CM
# double
if azienda == "FIA":
item_other = "F" + item
else:
item_other = "C" + item
data_store = {
'company': records[azienda][item]['company'],
'supplier': records[azienda][item]['supplier'],
'product_code': records[azienda][item][
'product_code'],
'product_description': records[azienda][item][
'product_description'],
'product_um': records[azienda][item]['product_um'],
'inventary': records[azienda][item][
'inventary'] + records[altra_azienda][
item_other]['inventary'],
'q_x_pack': records[azienda][item]['q_x_pack'],
'q_in': records[azienda][item]['q_in'] + records[
altra_azienda][item_other]['q_in'],
'q_out': records[azienda][item]['q_out'] + \
records[altra_azienda][item_other]['q_out'],
'balance': records[azienda][item]['balance'] + \
records[altra_azienda][item_other]['balance'],
'supplier_order': records[azienda][item][
'supplier_order'] + \
records[altra_azienda][item_other][
'supplier_order'],
'customer_order': records[azienda][item][
'customer_order'] + records[altra_azienda][
item_other]['customer_order'],
'customer_order_auto': records[azienda][item][
'customer_order_auto'] + records[
altra_azienda][item_other][
'customer_order_auto'],
'customer_order_suspended': records[azienda][item][
'customer_order_suspended'] + records[
altra_azienda][item_other][
'customer_order_suspended'],
'availability': records[azienda][item][
'availability'] + records[altra_azienda][
item_other]['availability'],
'product_um2': records[azienda][item][
'product_um2'],
'inventary_last': records[azienda][item][
'inventary_last'],
'both': True, # for elements present in both company
}
self.create(cr, uid, data_store, context=context)
else: # Normal (no intersection)
self.create(cr, uid, records[azienda][item],
context=context)
return True
_columns = {
# Extra info calculated:
'company':fields.selection([
('gpb','G.P.B.'),
('fia','Fiam'),
], 'Company', select=True),
# Product info:
'mexal_s': fields.char('Mexal ID', size=10),
'supplier': fields.char('Supplier', size=68),
'product_code': fields.char('Product code', size=24),
'q_x_pack': fields.integer('Q. x pack'),
'product_description': fields.char('Product description', size=128),
'product_um': fields.char('UOM', size=4),
# Value fields
'inventary': fields.float('Inventary', digits=(16, 2)),
'q_in': fields.float('In', digits=(16, 2)),
'q_out': fields.float('Out', digits=(16, 2)),
'balance': fields.float('Existent', digits=(16, 2)),
'supplier_order': fields.float('OF', digits=(16, 2)),
'customer_order': fields.float('OC imp.', digits=(16, 2)),
'customer_order_auto': fields.float(
'OC automatic in prod.', digits=(16, 2)),
'customer_order_suspended': fields.float(
'OC suspended', digits=(16, 2)),
# Field calculated:
'availability': fields.float('Dispo lord', digits=(16, 2)),
'product_um2': fields.char('UM2', size=4),
'inventary_last': fields.float('Manual inventary', digits=(16, 2)),
'both': fields.boolean('Entrambe',
help="Esiste in entrambe le aziende"),
}
_defaults = {
'both': lambda *a: False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -4,325,560,370,230,387,000 | 42.755556 | 79 | 0.47399 | false |
v4Lo/OnlyRO | tools/validateinterfaces.py | 39 | 8572 | #! /usr/bin/env python
# -*- coding: utf8 -*-
#
# Copyright (C) 2014 Andrei Karas (4144)
import os
import re
import sys
from sets import Set
interfaceRe = re.compile("struct (?P<name1>[a-z_]+)_interface (?P<name2>[a-z_]+)_s;")
class Tracker:
pass
def searchDefault(r, ifname):
defaultStr = "void {0}_defaults(void)".format(ifname);
for line in r:
if line.find(defaultStr) == 0:
return True
return False
def searchStructStart(r, ifname):
for line in r:
if line.find("struct {0}_interface".format(ifname)) == 0:
return True
return False
def readCFile(tracker, cFile):
methods = Set()
shortIfName = ""
with open(cFile, "r") as r:
for line in r:
# print "cline1: " + line
m = interfaceRe.search(line)
if m != None and m.group("name1") == m.group("name2"):
# found C file with interface
ifname = m.group("name1")
if searchDefault(r, ifname) == False:
return (None, shortIfName, methods)
lineRe = re.compile("(?P<ifname>[a-z_]+)->(?P<method>[\w_]+)[ ][=][ ](?P<fullmethod>[^;]+);")
for line in r:
# print "cline2: " + line
test = line.strip()
if len(test) > 2 and test[0:2] == "//":
continue
if len(line) > 0 and line[0] == "}":
break
m = lineRe.search(line)
if m != None:
tmp = m.group("ifname")
if len(tmp) < 2 or tmp[0] != ifname[0]:
continue
if shortIfName == "":
shortIfName = m.group("ifname")
# print "{2}: add {0}, from: {1}".format(m.group("method"), line, ifname)
methods.add(m.group("method"))
tracker.interfaces.add(ifname);
tracker.fullmethods.add(m.group("fullmethod"));
return (ifname, shortIfName, methods)
return (None, shortIfName, methods)
def readHFile(tracker, hFile, ifname):
methods = Set()
with open(hFile, "r") as r:
if searchStructStart(r, ifname) == False:
return methods
lineRe = re.compile("[(][*](?P<method>[^)]+)[)]".format(ifname))
for line in r:
# print "hline: " + line
test = line.strip()
if len(test) > 2 and test[0:2] == "//":
continue
if len(line) > 0 and line[0] == "}":
break
m = lineRe.search(line)
if m != None:
# print "{2}: add {0}, from: {1}".format(m.group("method"), line, ifname)
methods.add(m.group("method"))
tracker.fullmethods.add(ifname + "_" + m.group("method"))
return methods
def checkIfFile(tracker, cFile, hFile):
data = readCFile(tracker, cFile)
cMethods = data[2]
ifname = data[0]
shortIfName = data[1]
if len(cMethods) > 0:
hMethods = readHFile(tracker, hFile, ifname)
for method in hMethods:
tracker.arr[ifname + "_" + method] = list()
tracker.methods.add(ifname + "_" + method)
if method not in cMethods:
print "Missing initialisation in file {0}: {1}".format(cFile, method)
tracker.retCode = 1
# for method in cMethods:
# if method not in hMethods:
# print "Extra method in file {0}: {1}".format(cFile, method)
def processIfDir(tracker, srcDir):
files = os.listdir(srcDir)
for file1 in files:
if file1[0] == '.' or file1 == "..":
continue
cPath = os.path.abspath(srcDir + os.path.sep + file1)
if not os.path.isfile(cPath):
processIfDir(tracker, cPath)
else:
if file1[-2:] == ".c":
file2 = file1[:-2] + ".h"
hPath = srcDir + os.path.sep + file2;
if os.path.exists(hPath) and os.path.isfile(hPath):
checkIfFile(tracker, cPath, hPath)
def checkChr(ch):
if (ch >= "a" and ch <= "z") or ch == "_" or (ch >= "0" and ch <= "9" or ch == "\"" or ch == ">"):
return True
return False
def checkFile(tracker, cFile):
# print "Checking: " + cFile
with open(cFile, "r") as r:
for line in r:
parts = re.findall(r'[\w_]+', line)
for part in parts:
if part in tracker.methods:
idx = line.find(part)
if idx > 0:
if idx + len(part) >= len(line):
continue
if checkChr(line[idx + len(part)]):
continue
if checkChr(line[idx - 1]):
continue
if line[0:3] == " * ":
continue;
if line[-1] == "\n":
line = line[:-1]
text = line.strip()
if text[0:2] == "/*" or text[0:2] == "//":
continue
idx2 = line.find("//")
if idx2 > 0 and idx2 < idx:
continue
tracker.arr[part].append(line)
def processDir(tracker, srcDir):
files = os.listdir(srcDir)
for file1 in files:
if file1[0] == '.' or file1 == "..":
continue
cPath = os.path.abspath(srcDir + os.path.sep + file1)
if not os.path.isfile(cPath):
processDir(tracker, cPath)
elif file1[-2:] == ".c" or file1[-2:] == ".h":
# elif file1[-2:] == ".c":
checkFile(tracker, cPath)
def reportMethods(tracker):
print "\n"
for method in tracker.methods:
if len(tracker.arr[method]) > 2:
print method
for t in tracker.arr[method]:
print t
print "\n"
def checkLostFile(tracker, cFile):
# print "Checking: " + cFile
methodRe = re.compile("^([\w0-9* _]*)([ ]|[*])(?P<ifname>[a-z_]+)_(?P<method>[\w_]+)(|[ ])[(]")
with open(cFile, "r") as r:
for line in r:
if line.find("(") < 1 or len(line) < 3 or line[0] == "\t" or line[0] == " " or line.find("_defaults") > 0:
continue
m = methodRe.search(line)
if m != None:
name = "{0}_{1}".format(m.group("ifname"), m.group("method"))
if name[:name.find("_")] not in tracker.interfaces and m.group("ifname") not in tracker.interfaces:
continue
if name not in tracker.fullmethods:
# print "src : " + line
print name
def processLostDir(tracker, srcDir):
files = os.listdir(srcDir)
for file1 in files:
if file1[0] == '.' or file1 == "..":
continue
cPath = os.path.abspath(srcDir + os.path.sep + file1)
if not os.path.isfile(cPath):
processLostDir(tracker, cPath)
elif file1[-2:] == ".c":
checkLostFile(tracker, cPath)
def runIf():
processIfDir(tracker, "../src/char");
processIfDir(tracker, "../src/map");
processIfDir(tracker, "../src/login");
processIfDir(tracker, "../src/common");
def runLost():
processLostDir(tracker, "../src/char");
processLostDir(tracker, "../src/map");
processLostDir(tracker, "../src/login");
processLostDir(tracker, "../src/common");
def runLong():
processDir(tracker, "../src/char");
processDir(tracker, "../src/map");
processDir(tracker, "../src/login");
processDir(tracker, "../src/common");
reportMethods(tracker)
tracker = Tracker()
tracker.arr = dict()
tracker.methods = Set()
tracker.fullmethods = Set()
tracker.interfaces = Set()
tracker.retCode = 0
if len(sys.argv) > 1:
cmd = sys.argv[1]
else:
cmd = "default"
if cmd == "silent":
runIf()
elif cmd == "init":
print "Checking interfaces initialisation"
runIf()
elif cmd == "lost":
print "Checking not added functions to interfaces"
runLost();
elif cmd == "long":
print "Checking interfaces usage"
runLong();
else:
print "Checking interfaces initialisation"
runIf()
print "Checking not added functions to interfaces"
runLost();
print "Checking interfaces usage"
runLong();
exit(tracker.retCode)
| gpl-3.0 | -1,066,631,068,934,448,900 | 33.845528 | 118 | 0.494984 | false |
blueskycoco/rt-thread | tools/sconsui.py | 16 | 15534 | #! /usr/bin/env python
#coding=utf-8
#
# File : sconsui.py
# This file is part of RT-Thread RTOS
# COPYRIGHT (C) 2006 - 2015, RT-Thread Development Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Change Logs:
# Date Author Notes
# 2015-01-20 Bernard Add copyright information
#
import sys
py2 = py30 = py31 = False
version = sys.hexversion
if version >= 0x020600F0 and version < 0x03000000 :
py2 = True # Python 2.6 or 2.7
from Tkinter import *
import ttk
elif version >= 0x03000000 and version < 0x03010000 :
py30 = True
from tkinter import *
import ttk
elif version >= 0x03010000:
py31 = True
from tkinter import *
import tkinter.ttk as ttk
else:
print ("""
You do not have a version of python supporting ttk widgets..
You need a version >= 2.6 to execute PAGE modules.
""")
sys.exit()
import ScrolledText
import tkFileDialog
import tkMessageBox
import os
import threading
import platform
builder = None
executor = None
lock = None
class CmdExecutor(threading.Thread):
def __init__(self, cmd, output):
threading.Thread.__init__(self)
self.cmd = cmd
self.child = None
def run(self):
global executor, builder, lock
if platform.system() == 'Windows':
try:
from win32spawn import Win32Spawn
subprocess = Win32Spawn(self.cmd)
subprocess.start_pipe()
builder.progressbar.start()
while not subprocess.is_terminated or subprocess.qsize() > 0:
try:
line = subprocess.get(timeout=1)
line = line.replace('\r', '')
if line:
lock.acquire()
builder.output.see(END)
builder.output.insert(END, line)
lock.release()
except:
pass
builder.progressbar.stop()
except:
pass
executor = None
if builder.is_makeing_project:
builder.output.insert(END, 'Done')
builder.is_makeing_project = False
def ExecCmd(cmd):
global executor
if executor:
print 'last task does not exit'
return
executor = CmdExecutor(cmd, builder)
executor.start()
class DirSelectBox(ttk.Frame):
def __init__(self, master=None, **kw):
ttk.Frame.__init__(self, master, **kw)
self.dir_var = StringVar()
self.entry = ttk.Entry(self, textvariable = self.dir_var)
self.entry.pack(fill=BOTH, expand=1,side=LEFT)
self.entry.configure(width = 50)
self.browser_button = ttk.Button(self, text="Browser", command=self.browser)
self.browser_button.pack(side=RIGHT)
def browser(self):
dir = tkFileDialog.askdirectory(parent=self, title='Open directory', initialdir=self.dir_var.get())
if dir != '':
self.dir_var.set(dir)
def set_path(self, path):
path = path.replace('\\', '/')
self.dir_var.set(path)
def get_path(self):
return self.dir_var.get()
COMPILER = [
("GNU GCC", "GCC"),
("Keil ARMCC", "ARMCC"),
("IAR Compiler", "IAR"),
]
IDE = [
('Keil MDK4', 'mdk4'),
('Keil MDK', 'mdk'),
('IAR Compiler', 'iar')
]
class SconsUI():
def __init__(self, master=None):
style = ttk.Style()
theme = style.theme_use()
default = style.lookup(theme, 'background')
master.configure(background=default)
notebook = ttk.Notebook(master)
notebook.pack(fill=BOTH, padx=5, pady=5)
# building page
page_building = ttk.Frame(notebook)
notebook.add(page_building, padding=3)
notebook.tab(0, text='Build', underline="-1")
self.setup_building_ui(page_building)
self.building_page = page_building
# make project page
page_project = ttk.Frame(notebook)
notebook.add(page_project, padding = 3)
notebook.tab(1, text = 'Project', underline = '-1')
self.setup_project_ui(page_project)
self.project_page = page_project
# setting page
page_setting = ttk.Frame(notebook)
notebook.add(page_setting, padding = 3)
notebook.tab(2, text = 'Setting', underline = '-1')
self.setup_setting_ui(page_setting)
self.setting_page = page_setting
padding = ttk.Frame(master)
padding.pack(fill=X)
quit = ttk.Button(padding, text='Quit', command = self.quit)
quit.pack(side=RIGHT)
# set notebook to self
self.notebook = notebook
# read setting
self.read_setting()
self.is_makeing_project = False
def read_setting(self):
import platform
import os
home = ''
if platform.system() == 'Windows':
driver = os.environ['HOMEDRIVE']
home = os.environ['HOMEPATH']
home = os.path.join(driver, home)
else:
home = os.environ['HOME']
setting_path = os.path.join(home, '.rtt_scons')
if os.path.exists(setting_path):
setting = open(os.path.join(home, '.rtt_scons'))
for line in setting:
line = line.replace('\n', '')
line = line.replace('\r', '')
if line.find('=') != -1:
items = line.split('=')
if items[0] == 'RTTRoot':
self.RTTRoot.set_path(items[1])
elif items[0] == 'BSPRoot':
self.BSPRoot.set_path(items[1])
elif items[0] == 'compiler':
compiler = items[1]
else:
self.CompilersPath[items[0]].set_path(items[1])
setting.close()
# set RT-Thread Root Directory according environ
if 'RTT_ROOT' in os.environ:
self.RTTRoot.set_path(os.environ['RTT_ROOT'])
if self.RTTRoot.get_path() == '':
rtt_root = ''
# detect RT-Thread directory
if os.path.exists(os.path.join('..', 'include', 'rtthread.h')):
rtt_root = os.path.join('..')
elif os.path.exists(os.path.join('..', '..', 'include', 'rtthread.h')):
rtt_root = os.path.join('..', '..')
if rtt_root:
self.RTTRoot.set_path(os.path.abspath(rtt_root))
# detect compiler path
if platform.system() == 'Windows':
# Keil MDK
if not self.CompilersPath['ARMCC'].get_path():
if os.path.exists('C:\\Keil'):
self.CompilersPath['ARMCC'].set_path('C:\\Keil')
elif os.path.exists('D:\\Keil'):
self.CompilersPath['ARMCC'].set_path('D:\\Keil')
elif os.path.exists('E:\\Keil'):
self.CompilersPath['ARMCC'].set_path('E:\\Keil')
elif os.path.exists('F:\\Keil'):
self.CompilersPath['ARMCC'].set_path('F:\\Keil')
elif os.path.exists('G:\\Keil'):
self.CompilersPath['ARMCC'].set_path('G:\\Keil')
# GNU GCC
if not self.CompilersPath['GCC'].get_path():
paths = os.environ['PATH']
paths = paths.split(';')
for path in paths:
if path.find('CodeSourcery') != -1:
self.CompilersPath['GCC'].set_path(path)
break
elif path.find('GNU Tools ARM Embedded') != -1:
self.CompilersPath['GCC'].set_path(path)
break
def save_setting(self):
import platform
import os
home = ''
if platform.system() == 'Windows':
driver = os.environ['HOMEDRIVE']
home = os.environ['HOMEPATH']
home = os.path.join(driver, home)
else:
home = os.environ['HOME']
setting = open(os.path.join(home, '.rtt_scons'), 'w+')
# current comiler
# line = '%s=%s\n' % ('compiler', self.compilers.get()))
line = '%s=%s\n' % ('compiler', 'iar')
setting.write(line)
# RTT Root Folder
if self.RTTRoot.get_path():
line = '%s=%s\n' % ('RTTRoot', self.RTTRoot.get_path())
setting.write(line)
# BSP Root Folder
if self.BSPRoot.get_path():
line = '%s=%s\n' % ('BSPRoot', self.BSPRoot.get_path())
setting.write(line)
for (compiler, path) in self.CompilersPath.iteritems():
if path.get_path():
line = '%s=%s\n' % (compiler, path.get_path())
setting.write(line)
setting.close()
tkMessageBox.showinfo("RT-Thread SCons UI",
"Save setting sucessfully")
def setup_building_ui(self, frame):
padding = ttk.Frame(frame)
padding.pack(fill=X)
button = ttk.Button(padding, text='Clean', command=self.do_clean)
button.pack(side=RIGHT)
button = ttk.Button(padding, text='Build', command=self.do_build)
button.pack(side=RIGHT)
label = ttk.Label(padding, relief = 'flat', text = 'Click Build or Clean to build or clean system -->')
label.pack(side=RIGHT, ipady = 5)
self.progressbar = ttk.Progressbar(frame)
self.progressbar.pack(fill=X)
separator = ttk.Separator(frame)
separator.pack(fill=X)
self.output = ScrolledText.ScrolledText(frame)
self.output.pack(fill=X)
def setup_project_ui(self, frame):
label = ttk.Label(frame, relief = 'flat', text = 'Choose Integrated Development Environment:')
label.pack(fill=X, pady = 5)
separator = ttk.Separator(frame)
separator.pack(fill=X)
self.ide = StringVar()
self.ide.set("mdk4") # initialize
for text,mode in IDE:
radiobutton = ttk.Radiobutton(frame, text=text, variable = self.ide, value = mode)
radiobutton.pack(fill=X, padx=10)
bottom = ttk.Frame(frame)
bottom.pack(side=BOTTOM, fill=X)
button = ttk.Button(bottom, text="Make Project", command = self.do_make_project)
button.pack(side=RIGHT, padx = 10, pady = 10)
def setup_setting_ui(self, frame):
row = 0
label = ttk.Label (frame, relief = 'flat', text='RT-Thread Root Folder:')
label.grid(row=row, column=0,ipadx=5, ipady=5, padx = 5)
self.RTTRoot = DirSelectBox(frame)
self.RTTRoot.grid(row=row, column=1, sticky=E+W)
row = row + 1
label = ttk.Label (frame, relief = 'flat', text='Board Support Folder:')
label.grid(row=row, column=0,ipadx=5, ipady=5, padx = 5)
self.BSPRoot = DirSelectBox(frame)
self.BSPRoot.grid(row=row, column=1, sticky=E+W)
row = row + 1
label = ttk.Label (frame, relief='flat', text='Toolchain:')
label.grid(row=row, column=0,ipadx=5, ipady=5, sticky=E+W)
row = row + 1
separator = ttk.Separator(frame)
separator.grid(row = row, column = 0, columnspan = 2, sticky = E+W)
row = row + 1
self.compilers = StringVar()
self.compilers.set("GCC") # initialize
self.CompilersPath = {}
for text,compiler in COMPILER:
radiobutton = ttk.Radiobutton(frame, text=text, variable = self.compilers, value = compiler)
radiobutton.grid(row=row, column = 0, sticky = W, ipadx = 5, ipady = 5, padx = 20)
self.CompilersPath[compiler] = DirSelectBox(frame)
self.CompilersPath[compiler].grid(row=row, column=1, sticky=E+W)
row = row + 1
button = ttk.Button(frame, text='Save Setting', command = self.save_setting)
button.grid(row = row, column = 1, sticky = E)
row = row + 1
def prepare_build(self):
# get compiler
compiler = self.compilers.get()
if compiler == 'GCC':
compiler = 'gcc'
elif compiler == 'ARMCC':
compiler = 'keil'
elif compiler == 'IAR':
compiler = 'iar'
# get RTT Root
rtt_root = self.RTTRoot.get_path()
# get Compiler path
exec_path = self.CompilersPath[self.compilers.get()].get_path()
command = ''
os.environ['RTT_ROOT'] = rtt_root
os.environ['RTT_CC'] = compiler
os.environ['RTT_EXEC_PATH'] = exec_path
return command
def check_path(self):
result = True
if self.BSPRoot.get_path() == '':
result = False
if self.RTTRoot.get_path() == '':
result = False
if not result:
tkMessageBox.showinfo("RT-Thread SCons UI",
"Folder is empty, please choose correct directory.")
return result
def do_build(self):
self.prepare_build()
command = 'scons'
if not self.check_path():
return
bsp = self.BSPRoot.get_path()
os.chdir(bsp)
self.output.delete(1.0, END)
self.output.insert(END, 'building project...\n')
ExecCmd(command)
def do_clean(self):
self.prepare_build()
command = 'scons -c'
if not self.check_path():
return
bsp = self.BSPRoot.get_path()
os.chdir(bsp)
self.output.delete(1.0, END)
self.output.insert(END, 'clean project...\n')
ExecCmd(command)
def do_make_project(self):
ide = self.ide.get()
self.prepare_build()
command = 'scons --target=%s -s' % ide
if not self.check_path():
return
# select build page
self.notebook.select(self.building_page)
bsp = self.BSPRoot.get_path()
os.chdir(bsp)
self.output.delete(1.0, END)
self.output.insert(END, 'Generate project ...\n')
self.is_makeing_project = True
ExecCmd(command)
def quit(self):
exit(0)
def StartSConsUI(path=None):
global val, root, builder, lock
root = Tk()
root.title('RT-Thread SCons UI')
#root.geometrygeometry('590x510+50+50')
lock = threading.RLock()
builder = SconsUI(root)
if path:
builder.BSPRoot.set_path(path)
root.mainloop()
if __name__ == '__main__':
StartSConsUI()
| gpl-2.0 | -2,522,084,228,583,075,300 | 31.634454 | 111 | 0.540621 | false |
chenbaihu/grpc | src/python/src/grpc/early_adopter/_assembly_utilities.py | 3 | 7378 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import abc
import collections
# assembly_interfaces is referenced from specification in this module.
from grpc.framework.assembly import interfaces as assembly_interfaces # pylint: disable=unused-import
from grpc.framework.assembly import utilities as assembly_utilities
from grpc.early_adopter import _reexport
from grpc.early_adopter import interfaces
# TODO(issue 726): Kill the "implementations" attribute of this in favor
# of the same-information-less-bogusly-represented "cardinalities".
class InvocationBreakdown(object):
"""An intermediate representation of invocation-side views of RPC methods.
Attributes:
cardinalities: A dictionary from RPC method name to interfaces.Cardinality
value.
implementations: A dictionary from RPC method name to
assembly_interfaces.MethodImplementation describing the method.
request_serializers: A dictionary from RPC method name to callable
behavior to be used serializing request values for the RPC.
response_deserializers: A dictionary from RPC method name to callable
behavior to be used deserializing response values for the RPC.
"""
__metaclass__ = abc.ABCMeta
class _EasyInvocationBreakdown(
InvocationBreakdown,
collections.namedtuple(
'_EasyInvocationBreakdown',
('cardinalities', 'implementations', 'request_serializers',
'response_deserializers'))):
pass
class ServiceBreakdown(object):
"""An intermediate representation of service-side views of RPC methods.
Attributes:
implementations: A dictionary from RPC method name
assembly_interfaces.MethodImplementation implementing the RPC method.
request_deserializers: A dictionary from RPC method name to callable
behavior to be used deserializing request values for the RPC.
response_serializers: A dictionary from RPC method name to callable
behavior to be used serializing response values for the RPC.
"""
__metaclass__ = abc.ABCMeta
class _EasyServiceBreakdown(
ServiceBreakdown,
collections.namedtuple(
'_EasyServiceBreakdown',
('implementations', 'request_deserializers', 'response_serializers'))):
pass
def break_down_invocation(method_descriptions):
"""Derives an InvocationBreakdown from several RPC method descriptions.
Args:
method_descriptions: A dictionary from RPC method name to
interfaces.RpcMethodInvocationDescription describing the RPCs.
Returns:
An InvocationBreakdown corresponding to the given method descriptions.
"""
cardinalities = {}
implementations = {}
request_serializers = {}
response_deserializers = {}
for name, method_description in method_descriptions.iteritems():
cardinality = method_description.cardinality()
cardinalities[name] = cardinality
if cardinality is interfaces.Cardinality.UNARY_UNARY:
implementations[name] = assembly_utilities.unary_unary_inline(None)
elif cardinality is interfaces.Cardinality.UNARY_STREAM:
implementations[name] = assembly_utilities.unary_stream_inline(None)
elif cardinality is interfaces.Cardinality.STREAM_UNARY:
implementations[name] = assembly_utilities.stream_unary_inline(None)
elif cardinality is interfaces.Cardinality.STREAM_STREAM:
implementations[name] = assembly_utilities.stream_stream_inline(None)
request_serializers[name] = method_description.serialize_request
response_deserializers[name] = method_description.deserialize_response
return _EasyInvocationBreakdown(
cardinalities, implementations, request_serializers,
response_deserializers)
def break_down_service(method_descriptions):
"""Derives a ServiceBreakdown from several RPC method descriptions.
Args:
method_descriptions: A dictionary from RPC method name to
interfaces.RpcMethodServiceDescription describing the RPCs.
Returns:
A ServiceBreakdown corresponding to the given method descriptions.
"""
implementations = {}
request_deserializers = {}
response_serializers = {}
for name, method_description in method_descriptions.iteritems():
cardinality = method_description.cardinality()
if cardinality is interfaces.Cardinality.UNARY_UNARY:
def service(
request, face_rpc_context,
service_behavior=method_description.service_unary_unary):
return service_behavior(
request, _reexport.rpc_context(face_rpc_context))
implementations[name] = assembly_utilities.unary_unary_inline(service)
elif cardinality is interfaces.Cardinality.UNARY_STREAM:
def service(
request, face_rpc_context,
service_behavior=method_description.service_unary_stream):
return service_behavior(
request, _reexport.rpc_context(face_rpc_context))
implementations[name] = assembly_utilities.unary_stream_inline(service)
elif cardinality is interfaces.Cardinality.STREAM_UNARY:
def service(
request_iterator, face_rpc_context,
service_behavior=method_description.service_stream_unary):
return service_behavior(
request_iterator, _reexport.rpc_context(face_rpc_context))
implementations[name] = assembly_utilities.stream_unary_inline(service)
elif cardinality is interfaces.Cardinality.STREAM_STREAM:
def service(
request_iterator, face_rpc_context,
service_behavior=method_description.service_stream_stream):
return service_behavior(
request_iterator, _reexport.rpc_context(face_rpc_context))
implementations[name] = assembly_utilities.stream_stream_inline(service)
request_deserializers[name] = method_description.deserialize_request
response_serializers[name] = method_description.serialize_response
return _EasyServiceBreakdown(
implementations, request_deserializers, response_serializers)
| bsd-3-clause | -7,535,147,532,467,745,000 | 42.916667 | 102 | 0.757116 | false |
0x0all/nupic | py/regions/ImageSensorFilters/Brightness.py | 2 | 1997 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
## @file
"""
from PIL import (Image,
ImageChops,
ImageEnhance)
from nupic.regions.ImageSensorFilters.BaseFilter import BaseFilter
class Brightness(BaseFilter):
"""
Modify the brightness of the image.
"""
def __init__(self, factor=1.0):
"""
@param factor -- Factor by which to brighten the image, a nonnegative
number. 0.0 returns a black image, 1.0 returns the original image, and
higher values return brighter images.
"""
BaseFilter.__init__(self)
if factor < 0:
raise ValueError("'factor' must be a nonnegative number")
self.factor = factor
def process(self, image):
"""
@param image -- The image to process.
Returns a single image, or a list containing one or more images.
"""
BaseFilter.process(self, image)
brightnessEnhancer = ImageEnhance.Brightness(image.split()[0])
newImage = brightnessEnhancer.enhance(self.factor)
newImage.putalpha(image.split()[1])
return newImage
| gpl-3.0 | -3,712,439,628,782,400,000 | 29.723077 | 76 | 0.651978 | false |
polyaxon/polyaxon | core/polyaxon/schemas/types/auth.py | 1 | 2825 | #!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import polyaxon_sdk
from marshmallow import fields
from polyaxon.schemas.base import BaseCamelSchema
from polyaxon.schemas.fields.ref_or_obj import RefOrObject
from polyaxon.schemas.types.base import BaseTypeConfig
class AuthTypeSchema(BaseCamelSchema):
user = RefOrObject(fields.Str(), required=True)
password = RefOrObject(fields.Str(), required=True)
@staticmethod
def schema_config():
return V1AuthType
class V1AuthType(BaseTypeConfig, polyaxon_sdk.V1AuthType):
"""Auth type.
Args:
user: str
password: str
### YAML usage
The inputs definition
```yaml
>>> inputs:
>>> - name: test1
>>> type: auth
>>> - name: test2
>>> type: auth
```
The params usage
```yaml
>>> params:
>>> test1: {value: "username1:password1"}
>>> test1: {value: {"user": "username2", "password": "password2"}}
```
### Python usage
The inputs definition
```python
>>> from polyaxon import types
>>> from polyaxon.schemas import types
>>> from polyaxon.polyflow import V1IO
>>> inputs = [
>>> V1IO(
>>> name="test1",
>>> type=types.AUTH,
>>> ),
>>> V1IO(
>>> name="test2",
>>> type=types.AUTH,
>>> ),
>>> ]
```
The params usage
```python
>>> from polyaxon import types
>>> from polyaxon.schemas import types
>>> from polyaxon.polyflow import V1Param
>>> params = {
>>> "test1": V1Param(value=types.V1AuthType(user="username1", password="password1")),
>>> "test2": V1Param(value=types.V1AuthType(user="username2", password="password2")),
>>> }
```
> Normally you should not be passing auth details in plain values.
This type validate several values:
String values:
* '{"user": "foo", "password": "bar"}'
* 'foo:bar'
Dict values:
* {"user": "foo", "password": "bar"}
"""
IDENTIFIER = "auth"
SCHEMA = AuthTypeSchema
REDUCED_ATTRIBUTES = ["user", "password"]
def __str__(self):
return "{}:{}".format(self.user, self.password)
def __repr__(self):
return str(self)
| apache-2.0 | 9,188,222,945,497,423,000 | 23.780702 | 93 | 0.611327 | false |
marcore/edx-platform | lms/djangoapps/courseware/tests/test_submitting_problems.py | 7 | 51218 | # -*- coding: utf-8 -*-
"""
Integration tests for submitting problem responses and getting grades.
"""
import json
import os
from textwrap import dedent
from django.conf import settings
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.client import RequestFactory
from mock import patch
from nose.plugins.attrib import attr
from capa.tests.response_xml_factory import (
OptionResponseXMLFactory, CustomResponseXMLFactory, SchematicResponseXMLFactory,
CodeResponseXMLFactory,
)
from courseware import grades
from courseware.models import StudentModule, BaseStudentModuleHistory
from courseware.tests.helpers import LoginEnrollmentTestCase
from lms.djangoapps.lms_xblock.runtime import quote_slashes
from student.tests.factories import UserFactory
from student.models import anonymous_id_for_user
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.partitions.partitions import Group, UserPartition
from openedx.core.djangoapps.credit.api import (
set_credit_requirements, get_credit_requirement_status
)
from openedx.core.djangoapps.credit.models import CreditCourse, CreditProvider
from openedx.core.djangoapps.user_api.tests.factories import UserCourseTagFactory
class ProblemSubmissionTestMixin(TestCase):
"""
TestCase mixin that provides functions to submit answers to problems.
"""
def refresh_course(self):
"""
Re-fetch the course from the database so that the object being dealt with has everything added to it.
"""
self.course = self.store.get_course(self.course.id)
def problem_location(self, problem_url_name):
"""
Returns the url of the problem given the problem's name
"""
return self.course.id.make_usage_key('problem', problem_url_name)
def modx_url(self, problem_location, dispatch):
"""
Return the url needed for the desired action.
problem_location: location of the problem on which we want some action
dispatch: the the action string that gets passed to the view as a kwarg
example: 'check_problem' for having responses processed
"""
return reverse(
'xblock_handler',
kwargs={
'course_id': self.course.id.to_deprecated_string(),
'usage_id': quote_slashes(problem_location.to_deprecated_string()),
'handler': 'xmodule_handler',
'suffix': dispatch,
}
)
def submit_question_answer(self, problem_url_name, responses):
"""
Submit answers to a question.
Responses is a dict mapping problem ids to answers:
{'2_1': 'Correct', '2_2': 'Incorrect'}
"""
problem_location = self.problem_location(problem_url_name)
modx_url = self.modx_url(problem_location, 'problem_check')
answer_key_prefix = 'input_{}_'.format(problem_location.html_id())
# format the response dictionary to be sent in the post request by adding the above prefix to each key
response_dict = {(answer_key_prefix + k): v for k, v in responses.items()}
resp = self.client.post(modx_url, response_dict)
return resp
def look_at_question(self, problem_url_name):
"""
Create state for a problem, but don't answer it
"""
location = self.problem_location(problem_url_name)
modx_url = self.modx_url(location, "problem_get")
resp = self.client.get(modx_url)
return resp
def reset_question_answer(self, problem_url_name):
"""
Reset specified problem for current user.
"""
problem_location = self.problem_location(problem_url_name)
modx_url = self.modx_url(problem_location, 'problem_reset')
resp = self.client.post(modx_url)
return resp
def show_question_answer(self, problem_url_name):
"""
Shows the answer to the current student.
"""
problem_location = self.problem_location(problem_url_name)
modx_url = self.modx_url(problem_location, 'problem_show')
resp = self.client.post(modx_url)
return resp
class TestSubmittingProblems(ModuleStoreTestCase, LoginEnrollmentTestCase, ProblemSubmissionTestMixin):
"""
Check that a course gets graded properly.
"""
# Tell Django to clean out all databases, not just default
multi_db = True
# arbitrary constant
COURSE_SLUG = "100"
COURSE_NAME = "test_course"
ENABLED_CACHES = ['default', 'mongo_metadata_inheritance', 'loc_cache']
def setUp(self):
super(TestSubmittingProblems, self).setUp()
# create a test student
self.course = CourseFactory.create(display_name=self.COURSE_NAME, number=self.COURSE_SLUG)
self.student = '[email protected]'
self.password = 'foo'
self.create_account('u1', self.student, self.password)
self.activate_user(self.student)
self.enroll(self.course)
self.student_user = User.objects.get(email=self.student)
self.factory = RequestFactory()
# Disable the score change signal to prevent other components from being pulled into tests.
signal_patch = patch('courseware.module_render.SCORE_CHANGED.send')
signal_patch.start()
self.addCleanup(signal_patch.stop)
def add_dropdown_to_section(self, section_location, name, num_inputs=2):
"""
Create and return a dropdown problem.
section_location: location object of section in which to create the problem
(problems must live in a section to be graded properly)
name: string name of the problem
num_input: the number of input fields to create in the problem
"""
prob_xml = OptionResponseXMLFactory().build_xml(
question_text='The correct answer is Correct',
num_inputs=num_inputs,
weight=num_inputs,
options=['Correct', 'Incorrect', u'ⓤⓝⓘⓒⓞⓓⓔ'],
correct_option='Correct'
)
problem = ItemFactory.create(
parent_location=section_location,
category='problem',
data=prob_xml,
metadata={'rerandomize': 'always'},
display_name=name
)
# re-fetch the course from the database so the object is up to date
self.refresh_course()
return problem
def add_graded_section_to_course(self, name, section_format='Homework', late=False, reset=False, showanswer=False):
"""
Creates a graded homework section within a chapter and returns the section.
"""
# if we don't already have a chapter create a new one
if not hasattr(self, 'chapter'):
self.chapter = ItemFactory.create(
parent_location=self.course.location,
category='chapter'
)
if late:
section = ItemFactory.create(
parent_location=self.chapter.location,
display_name=name,
category='sequential',
metadata={'graded': True, 'format': section_format, 'due': '2013-05-20T23:30'}
)
elif reset:
section = ItemFactory.create(
parent_location=self.chapter.location,
display_name=name,
category='sequential',
rerandomize='always',
metadata={
'graded': True,
'format': section_format,
}
)
elif showanswer:
section = ItemFactory.create(
parent_location=self.chapter.location,
display_name=name,
category='sequential',
showanswer='never',
metadata={
'graded': True,
'format': section_format,
}
)
else:
section = ItemFactory.create(
parent_location=self.chapter.location,
display_name=name,
category='sequential',
metadata={'graded': True, 'format': section_format}
)
# now that we've added the problem and section to the course
# we fetch the course from the database so the object we are
# dealing with has these additions
self.refresh_course()
return section
def add_grading_policy(self, grading_policy):
"""
Add a grading policy to the course.
"""
self.course.grading_policy = grading_policy
self.update_course(self.course, self.student_user.id)
self.refresh_course()
def get_grade_summary(self):
"""
calls grades.grade for current user and course.
the keywords for the returned object are
- grade : A final letter grade.
- percent : The final percent for the class (rounded up).
- section_breakdown : A breakdown of each section that makes
up the grade. (For display)
- grade_breakdown : A breakdown of the major components that
make up the final grade. (For display)
"""
return grades.grade(self.student_user, self.course)
def get_progress_summary(self):
"""
Return progress summary structure for current user and course.
Returns
- courseware_summary is a summary of all sections with problems in the course.
It is organized as an array of chapters, each containing an array of sections,
each containing an array of scores. This contains information for graded and
ungraded problems, and is good for displaying a course summary with due dates,
etc.
"""
return grades.progress_summary(self.student_user, self.course)
def check_grade_percent(self, percent):
"""
Assert that percent grade is as expected.
"""
grade_summary = self.get_grade_summary()
self.assertEqual(grade_summary['percent'], percent)
def earned_hw_scores(self):
"""
Global scores, each Score is a Problem Set.
Returns list of scores: [<points on hw_1>, <points on hw_2>, ..., <points on hw_n>]
"""
return [s.earned for s in self.get_grade_summary()['totaled_scores']['Homework']]
def score_for_hw(self, hw_url_name):
"""
Returns list of scores for a given url.
Returns list of scores for the given homework:
[<points on problem_1>, <points on problem_2>, ..., <points on problem_n>]
"""
# list of grade summaries for each section
sections_list = []
for chapter in self.get_progress_summary():
sections_list.extend(chapter['sections'])
# get the first section that matches the url (there should only be one)
hw_section = next(section for section in sections_list if section.get('url_name') == hw_url_name)
return [s.earned for s in hw_section['scores']]
@attr('shard_3')
class TestCourseGrader(TestSubmittingProblems):
"""
Suite of tests for the course grader.
"""
# Tell Django to clean out all databases, not just default
multi_db = True
def basic_setup(self, late=False, reset=False, showanswer=False):
"""
Set up a simple course for testing basic grading functionality.
"""
grading_policy = {
"GRADER": [{
"type": "Homework",
"min_count": 1,
"drop_count": 0,
"short_label": "HW",
"weight": 1.0
}],
"GRADE_CUTOFFS": {
'A': .9,
'B': .33
}
}
self.add_grading_policy(grading_policy)
# set up a simple course with four problems
self.homework = self.add_graded_section_to_course('homework', late=late, reset=reset, showanswer=showanswer)
self.add_dropdown_to_section(self.homework.location, 'p1', 1)
self.add_dropdown_to_section(self.homework.location, 'p2', 1)
self.add_dropdown_to_section(self.homework.location, 'p3', 1)
self.refresh_course()
def weighted_setup(self):
"""
Set up a simple course for testing weighted grading functionality.
"""
grading_policy = {
"GRADER": [
{
"type": "Homework",
"min_count": 1,
"drop_count": 0,
"short_label": "HW",
"weight": 0.25
}, {
"type": "Final",
"name": "Final Section",
"short_label": "Final",
"weight": 0.75
}
]
}
self.add_grading_policy(grading_policy)
# set up a structure of 1 homework and 1 final
self.homework = self.add_graded_section_to_course('homework')
self.problem = self.add_dropdown_to_section(self.homework.location, 'H1P1')
self.final = self.add_graded_section_to_course('Final Section', 'Final')
self.final_question = self.add_dropdown_to_section(self.final.location, 'FinalQuestion')
def dropping_setup(self):
"""
Set up a simple course for testing the dropping grading functionality.
"""
grading_policy = {
"GRADER": [
{
"type": "Homework",
"min_count": 3,
"drop_count": 1,
"short_label": "HW",
"weight": 1
}
]
}
self.add_grading_policy(grading_policy)
# Set up a course structure that just consists of 3 homeworks.
# Since the grading policy drops 1 entire homework, each problem is worth 25%
# names for the problem in the homeworks
self.hw1_names = ['h1p1', 'h1p2']
self.hw2_names = ['h2p1', 'h2p2']
self.hw3_names = ['h3p1', 'h3p2']
self.homework1 = self.add_graded_section_to_course('homework1')
self.add_dropdown_to_section(self.homework1.location, self.hw1_names[0], 1)
self.add_dropdown_to_section(self.homework1.location, self.hw1_names[1], 1)
self.homework2 = self.add_graded_section_to_course('homework2')
self.add_dropdown_to_section(self.homework2.location, self.hw2_names[0], 1)
self.add_dropdown_to_section(self.homework2.location, self.hw2_names[1], 1)
self.homework3 = self.add_graded_section_to_course('homework3')
self.add_dropdown_to_section(self.homework3.location, self.hw3_names[0], 1)
self.add_dropdown_to_section(self.homework3.location, self.hw3_names[1], 1)
def test_submission_late(self):
"""Test problem for due date in the past"""
self.basic_setup(late=True)
resp = self.submit_question_answer('p1', {'2_1': 'Correct'})
self.assertEqual(resp.status_code, 200)
err_msg = (
"The state of this problem has changed since you loaded this page. "
"Please refresh your page."
)
self.assertEqual(json.loads(resp.content).get("success"), err_msg)
def test_submission_reset(self):
"""Test problem ProcessingErrors due to resets"""
self.basic_setup(reset=True)
resp = self.submit_question_answer('p1', {'2_1': 'Correct'})
# submit a second time to draw NotFoundError
resp = self.submit_question_answer('p1', {'2_1': 'Correct'})
self.assertEqual(resp.status_code, 200)
err_msg = (
"The state of this problem has changed since you loaded this page. "
"Please refresh your page."
)
self.assertEqual(json.loads(resp.content).get("success"), err_msg)
def test_submission_show_answer(self):
"""Test problem for ProcessingErrors due to showing answer"""
self.basic_setup(showanswer=True)
resp = self.show_question_answer('p1')
self.assertEqual(resp.status_code, 200)
err_msg = (
"The state of this problem has changed since you loaded this page. "
"Please refresh your page."
)
self.assertEqual(json.loads(resp.content).get("success"), err_msg)
def test_show_answer_doesnt_write_to_csm(self):
self.basic_setup()
self.submit_question_answer('p1', {'2_1': u'Correct'})
# Now fetch the state entry for that problem.
student_module = StudentModule.objects.filter(
course_id=self.course.id,
student=self.student_user
)
# count how many state history entries there are
baseline = BaseStudentModuleHistory.get_history(student_module)
self.assertEqual(len(baseline), 3)
# now click "show answer"
self.show_question_answer('p1')
# check that we don't have more state history entries
csmh = BaseStudentModuleHistory.get_history(student_module)
self.assertEqual(len(csmh), 3)
def test_grade_with_collected_max_score(self):
"""
Tests that the results of grading runs before and after the cache
warms are the same.
"""
self.basic_setup()
self.submit_question_answer('p1', {'2_1': 'Correct'})
self.look_at_question('p2')
self.assertTrue(
StudentModule.objects.filter(
module_state_key=self.problem_location('p2')
).exists()
)
# problem isn't in the cache, but will be when graded
self.check_grade_percent(0.33)
# problem is in the cache, should be the same result
self.check_grade_percent(0.33)
def test_none_grade(self):
"""
Check grade is 0 to begin with.
"""
self.basic_setup()
self.check_grade_percent(0)
self.assertEqual(self.get_grade_summary()['grade'], None)
def test_b_grade_exact(self):
"""
Check that at exactly the cutoff, the grade is B.
"""
self.basic_setup()
self.submit_question_answer('p1', {'2_1': 'Correct'})
self.check_grade_percent(0.33)
self.assertEqual(self.get_grade_summary()['grade'], 'B')
def test_b_grade_above(self):
"""
Check grade between cutoffs.
"""
self.basic_setup()
self.submit_question_answer('p1', {'2_1': 'Correct'})
self.submit_question_answer('p2', {'2_1': 'Correct'})
self.check_grade_percent(0.67)
self.assertEqual(self.get_grade_summary()['grade'], 'B')
def test_a_grade(self):
"""
Check that 100 percent completion gets an A
"""
self.basic_setup()
self.submit_question_answer('p1', {'2_1': 'Correct'})
self.submit_question_answer('p2', {'2_1': 'Correct'})
self.submit_question_answer('p3', {'2_1': 'Correct'})
self.check_grade_percent(1.0)
self.assertEqual(self.get_grade_summary()['grade'], 'A')
def test_wrong_answers(self):
"""
Check that answering incorrectly is graded properly.
"""
self.basic_setup()
self.submit_question_answer('p1', {'2_1': 'Correct'})
self.submit_question_answer('p2', {'2_1': 'Correct'})
self.submit_question_answer('p3', {'2_1': 'Incorrect'})
self.check_grade_percent(0.67)
self.assertEqual(self.get_grade_summary()['grade'], 'B')
def test_submissions_api_overrides_scores(self):
"""
Check that answering incorrectly is graded properly.
"""
self.basic_setup()
self.submit_question_answer('p1', {'2_1': 'Correct'})
self.submit_question_answer('p2', {'2_1': 'Correct'})
self.submit_question_answer('p3', {'2_1': 'Incorrect'})
self.check_grade_percent(0.67)
self.assertEqual(self.get_grade_summary()['grade'], 'B')
# But now we mock out a get_scores call, and watch as it overrides the
# score read from StudentModule and our student gets an A instead.
with patch('submissions.api.get_scores') as mock_get_scores:
mock_get_scores.return_value = {
self.problem_location('p3').to_deprecated_string(): (1, 1)
}
self.check_grade_percent(1.0)
self.assertEqual(self.get_grade_summary()['grade'], 'A')
def test_submissions_api_anonymous_student_id(self):
"""
Check that the submissions API is sent an anonymous student ID.
"""
self.basic_setup()
self.submit_question_answer('p1', {'2_1': 'Correct'})
self.submit_question_answer('p2', {'2_1': 'Correct'})
self.submit_question_answer('p3', {'2_1': 'Incorrect'})
with patch('submissions.api.get_scores') as mock_get_scores:
mock_get_scores.return_value = {
self.problem_location('p3').to_deprecated_string(): (1, 1)
}
self.get_grade_summary()
# Verify that the submissions API was sent an anonymized student ID
mock_get_scores.assert_called_with(
self.course.id.to_deprecated_string(),
anonymous_id_for_user(self.student_user, self.course.id)
)
def test_weighted_homework(self):
"""
Test that the homework section has proper weight.
"""
self.weighted_setup()
# Get both parts correct
self.submit_question_answer('H1P1', {'2_1': 'Correct', '2_2': 'Correct'})
self.check_grade_percent(0.25)
self.assertEqual(self.earned_hw_scores(), [2.0]) # Order matters
self.assertEqual(self.score_for_hw('homework'), [2.0])
def test_weighted_exam(self):
"""
Test that the exam section has the proper weight.
"""
self.weighted_setup()
self.submit_question_answer('FinalQuestion', {'2_1': 'Correct', '2_2': 'Correct'})
self.check_grade_percent(0.75)
def test_weighted_total(self):
"""
Test that the weighted total adds to 100.
"""
self.weighted_setup()
self.submit_question_answer('H1P1', {'2_1': 'Correct', '2_2': 'Correct'})
self.submit_question_answer('FinalQuestion', {'2_1': 'Correct', '2_2': 'Correct'})
self.check_grade_percent(1.0)
def dropping_homework_stage1(self):
"""
Get half the first homework correct and all of the second
"""
self.submit_question_answer(self.hw1_names[0], {'2_1': 'Correct'})
self.submit_question_answer(self.hw1_names[1], {'2_1': 'Incorrect'})
for name in self.hw2_names:
self.submit_question_answer(name, {'2_1': 'Correct'})
def test_dropping_grades_normally(self):
"""
Test that the dropping policy does not change things before it should.
"""
self.dropping_setup()
self.dropping_homework_stage1()
self.assertEqual(self.score_for_hw('homework1'), [1.0, 0.0])
self.assertEqual(self.score_for_hw('homework2'), [1.0, 1.0])
self.assertEqual(self.earned_hw_scores(), [1.0, 2.0, 0]) # Order matters
self.check_grade_percent(0.75)
def test_dropping_nochange(self):
"""
Tests that grade does not change when making the global homework grade minimum not unique.
"""
self.dropping_setup()
self.dropping_homework_stage1()
self.submit_question_answer(self.hw3_names[0], {'2_1': 'Correct'})
self.assertEqual(self.score_for_hw('homework1'), [1.0, 0.0])
self.assertEqual(self.score_for_hw('homework2'), [1.0, 1.0])
self.assertEqual(self.score_for_hw('homework3'), [1.0, 0.0])
self.assertEqual(self.earned_hw_scores(), [1.0, 2.0, 1.0]) # Order matters
self.check_grade_percent(0.75)
def test_dropping_all_correct(self):
"""
Test that the lowest is dropped for a perfect score.
"""
self.dropping_setup()
self.dropping_homework_stage1()
for name in self.hw3_names:
self.submit_question_answer(name, {'2_1': 'Correct'})
self.check_grade_percent(1.0)
self.assertEqual(self.earned_hw_scores(), [1.0, 2.0, 2.0]) # Order matters
self.assertEqual(self.score_for_hw('homework3'), [1.0, 1.0])
def test_min_grade_credit_requirements_status(self):
"""
Test for credit course. If user passes minimum grade requirement then
status will be updated as satisfied in requirement status table.
"""
self.basic_setup()
self.submit_question_answer('p1', {'2_1': 'Correct'})
self.submit_question_answer('p2', {'2_1': 'Correct'})
# Enable the course for credit
credit_course = CreditCourse.objects.create(
course_key=self.course.id,
enabled=True,
)
# Configure a credit provider for the course
CreditProvider.objects.create(
provider_id="ASU",
enable_integration=True,
provider_url="https://credit.example.com/request",
)
requirements = [{
"namespace": "grade",
"name": "grade",
"display_name": "Grade",
"criteria": {"min_grade": 0.52},
}]
# Add a single credit requirement (final grade)
set_credit_requirements(self.course.id, requirements)
self.get_grade_summary()
req_status = get_credit_requirement_status(self.course.id, self.student_user.username, 'grade', 'grade')
self.assertEqual(req_status[0]["status"], 'satisfied')
@attr('shard_1')
class ProblemWithUploadedFilesTest(TestSubmittingProblems):
"""Tests of problems with uploaded files."""
# Tell Django to clean out all databases, not just default
multi_db = True
def setUp(self):
super(ProblemWithUploadedFilesTest, self).setUp()
self.section = self.add_graded_section_to_course('section')
def problem_setup(self, name, files):
"""
Create a CodeResponse problem with files to upload.
"""
xmldata = CodeResponseXMLFactory().build_xml(
allowed_files=files, required_files=files,
)
ItemFactory.create(
parent_location=self.section.location,
category='problem',
display_name=name,
data=xmldata
)
# re-fetch the course from the database so the object is up to date
self.refresh_course()
def test_three_files(self):
# Open the test files, and arrange to close them later.
filenames = "prog1.py prog2.py prog3.py"
fileobjs = [
open(os.path.join(settings.COMMON_TEST_DATA_ROOT, "capa", filename))
for filename in filenames.split()
]
for fileobj in fileobjs:
self.addCleanup(fileobj.close)
self.problem_setup("the_problem", filenames)
with patch('courseware.module_render.XQUEUE_INTERFACE.session') as mock_session:
resp = self.submit_question_answer("the_problem", {'2_1': fileobjs})
self.assertEqual(resp.status_code, 200)
json_resp = json.loads(resp.content)
self.assertEqual(json_resp['success'], "incorrect")
# See how post got called.
name, args, kwargs = mock_session.mock_calls[0]
self.assertEqual(name, "post")
self.assertEqual(len(args), 1)
self.assertTrue(args[0].endswith("/submit/"))
self.assertItemsEqual(kwargs.keys(), ["files", "data"])
self.assertItemsEqual(kwargs['files'].keys(), filenames.split())
@attr('shard_1')
class TestPythonGradedResponse(TestSubmittingProblems):
"""
Check that we can submit a schematic and custom response, and it answers properly.
"""
# Tell Django to clean out all databases, not just default
multi_db = True
SCHEMATIC_SCRIPT = dedent("""
# for a schematic response, submission[i] is the json representation
# of the diagram and analysis results for the i-th schematic tag
def get_tran(json,signal):
for element in json:
if element[0] == 'transient':
return element[1].get(signal,[])
return []
def get_value(at,output):
for (t,v) in output:
if at == t: return v
return None
output = get_tran(submission[0],'Z')
okay = True
# output should be 1, 1, 1, 1, 1, 0, 0, 0
if get_value(0.0000004, output) < 2.7: okay = False;
if get_value(0.0000009, output) < 2.7: okay = False;
if get_value(0.0000014, output) < 2.7: okay = False;
if get_value(0.0000019, output) < 2.7: okay = False;
if get_value(0.0000024, output) < 2.7: okay = False;
if get_value(0.0000029, output) > 0.25: okay = False;
if get_value(0.0000034, output) > 0.25: okay = False;
if get_value(0.0000039, output) > 0.25: okay = False;
correct = ['correct' if okay else 'incorrect']""").strip()
SCHEMATIC_CORRECT = json.dumps(
[['transient', {'Z': [
[0.0000004, 2.8],
[0.0000009, 2.8],
[0.0000014, 2.8],
[0.0000019, 2.8],
[0.0000024, 2.8],
[0.0000029, 0.2],
[0.0000034, 0.2],
[0.0000039, 0.2]
]}]]
)
SCHEMATIC_INCORRECT = json.dumps(
[['transient', {'Z': [
[0.0000004, 2.8],
[0.0000009, 0.0], # wrong.
[0.0000014, 2.8],
[0.0000019, 2.8],
[0.0000024, 2.8],
[0.0000029, 0.2],
[0.0000034, 0.2],
[0.0000039, 0.2]
]}]]
)
CUSTOM_RESPONSE_SCRIPT = dedent("""
def test_csv(expect, ans):
# Take out all spaces in expected answer
expect = [i.strip(' ') for i in str(expect).split(',')]
# Take out all spaces in student solution
ans = [i.strip(' ') for i in str(ans).split(',')]
def strip_q(x):
# Strip quotes around strings if students have entered them
stripped_ans = []
for item in x:
if item[0] == "'" and item[-1]=="'":
item = item.strip("'")
elif item[0] == '"' and item[-1] == '"':
item = item.strip('"')
stripped_ans.append(item)
return stripped_ans
return strip_q(expect) == strip_q(ans)""").strip()
CUSTOM_RESPONSE_CORRECT = "0, 1, 2, 3, 4, 5, 'Outside of loop', 6"
CUSTOM_RESPONSE_INCORRECT = "Reading my code I see. I hope you like it :)"
COMPUTED_ANSWER_SCRIPT = dedent("""
if submission[0] == "a shout in the street":
correct = ['correct']
else:
correct = ['incorrect']""").strip()
COMPUTED_ANSWER_CORRECT = "a shout in the street"
COMPUTED_ANSWER_INCORRECT = "because we never let them in"
def setUp(self):
super(TestPythonGradedResponse, self).setUp()
self.section = self.add_graded_section_to_course('section')
self.correct_responses = {}
self.incorrect_responses = {}
def schematic_setup(self, name):
"""
set up an example Circuit_Schematic_Builder problem
"""
script = self.SCHEMATIC_SCRIPT
xmldata = SchematicResponseXMLFactory().build_xml(answer=script)
ItemFactory.create(
parent_location=self.section.location,
category='problem',
boilerplate='circuitschematic.yaml',
display_name=name,
data=xmldata
)
# define the correct and incorrect responses to this problem
self.correct_responses[name] = self.SCHEMATIC_CORRECT
self.incorrect_responses[name] = self.SCHEMATIC_INCORRECT
# re-fetch the course from the database so the object is up to date
self.refresh_course()
def custom_response_setup(self, name):
"""
set up an example custom response problem using a check function
"""
test_csv = self.CUSTOM_RESPONSE_SCRIPT
expect = self.CUSTOM_RESPONSE_CORRECT
cfn_problem_xml = CustomResponseXMLFactory().build_xml(script=test_csv, cfn='test_csv', expect=expect)
ItemFactory.create(
parent_location=self.section.location,
category='problem',
boilerplate='customgrader.yaml',
data=cfn_problem_xml,
display_name=name
)
# define the correct and incorrect responses to this problem
self.correct_responses[name] = expect
self.incorrect_responses[name] = self.CUSTOM_RESPONSE_INCORRECT
# re-fetch the course from the database so the object is up to date
self.refresh_course()
def computed_answer_setup(self, name):
"""
set up an example problem using an answer script'''
"""
script = self.COMPUTED_ANSWER_SCRIPT
computed_xml = CustomResponseXMLFactory().build_xml(answer=script)
ItemFactory.create(
parent_location=self.section.location,
category='problem',
boilerplate='customgrader.yaml',
data=computed_xml,
display_name=name
)
# define the correct and incorrect responses to this problem
self.correct_responses[name] = self.COMPUTED_ANSWER_CORRECT
self.incorrect_responses[name] = self.COMPUTED_ANSWER_INCORRECT
# re-fetch the course from the database so the object is up to date
self.refresh_course()
def _check_correct(self, name):
"""
check that problem named "name" gets evaluated correctly correctly
"""
resp = self.submit_question_answer(name, {'2_1': self.correct_responses[name]})
respdata = json.loads(resp.content)
self.assertEqual(respdata['success'], 'correct')
def _check_incorrect(self, name):
"""
check that problem named "name" gets evaluated incorrectly correctly
"""
resp = self.submit_question_answer(name, {'2_1': self.incorrect_responses[name]})
respdata = json.loads(resp.content)
self.assertEqual(respdata['success'], 'incorrect')
def _check_ireset(self, name):
"""
Check that the problem can be reset
"""
# first, get the question wrong
resp = self.submit_question_answer(name, {'2_1': self.incorrect_responses[name]})
# reset the question
self.reset_question_answer(name)
# then get it right
resp = self.submit_question_answer(name, {'2_1': self.correct_responses[name]})
respdata = json.loads(resp.content)
self.assertEqual(respdata['success'], 'correct')
def test_schematic_correct(self):
name = "schematic_problem"
self.schematic_setup(name)
self._check_correct(name)
def test_schematic_incorrect(self):
name = "schematic_problem"
self.schematic_setup(name)
self._check_incorrect(name)
def test_schematic_reset(self):
name = "schematic_problem"
self.schematic_setup(name)
self._check_ireset(name)
def test_check_function_correct(self):
name = 'cfn_problem'
self.custom_response_setup(name)
self._check_correct(name)
def test_check_function_incorrect(self):
name = 'cfn_problem'
self.custom_response_setup(name)
self._check_incorrect(name)
def test_check_function_reset(self):
name = 'cfn_problem'
self.custom_response_setup(name)
self._check_ireset(name)
def test_computed_correct(self):
name = 'computed_answer'
self.computed_answer_setup(name)
self._check_correct(name)
def test_computed_incorrect(self):
name = 'computed_answer'
self.computed_answer_setup(name)
self._check_incorrect(name)
def test_computed_reset(self):
name = 'computed_answer'
self.computed_answer_setup(name)
self._check_ireset(name)
@attr('shard_1')
class TestAnswerDistributions(TestSubmittingProblems):
"""Check that we can pull answer distributions for problems."""
def setUp(self):
"""Set up a simple course with four problems."""
super(TestAnswerDistributions, self).setUp()
self.homework = self.add_graded_section_to_course('homework')
self.p1_html_id = self.add_dropdown_to_section(self.homework.location, 'p1', 1).location.html_id()
self.p2_html_id = self.add_dropdown_to_section(self.homework.location, 'p2', 1).location.html_id()
self.p3_html_id = self.add_dropdown_to_section(self.homework.location, 'p3', 1).location.html_id()
self.refresh_course()
def test_empty(self):
# Just make sure we can process this without errors.
empty_distribution = grades.answer_distributions(self.course.id)
self.assertFalse(empty_distribution) # should be empty
def test_one_student(self):
# Basic test to make sure we have simple behavior right for a student
# Throw in a non-ASCII answer
self.submit_question_answer('p1', {'2_1': u'ⓤⓝⓘⓒⓞⓓⓔ'})
self.submit_question_answer('p2', {'2_1': 'Correct'})
distributions = grades.answer_distributions(self.course.id)
self.assertEqual(
distributions,
{
('p1', 'p1', '{}_2_1'.format(self.p1_html_id)): {
u'ⓤⓝⓘⓒⓞⓓⓔ': 1
},
('p2', 'p2', '{}_2_1'.format(self.p2_html_id)): {
'Correct': 1
}
}
)
def test_multiple_students(self):
# Our test class is based around making requests for a particular user,
# so we're going to cheat by creating another user and copying and
# modifying StudentModule entries to make them from other users. It's
# a little hacky, but it seemed the simpler way to do this.
self.submit_question_answer('p1', {'2_1': u'Correct'})
self.submit_question_answer('p2', {'2_1': u'Incorrect'})
self.submit_question_answer('p3', {'2_1': u'Correct'})
# Make the above submissions owned by user2
user2 = UserFactory.create()
problems = StudentModule.objects.filter(
course_id=self.course.id,
student=self.student_user
)
for problem in problems:
problem.student_id = user2.id
problem.save()
# Now make more submissions by our original user
self.submit_question_answer('p1', {'2_1': u'Correct'})
self.submit_question_answer('p2', {'2_1': u'Correct'})
self.assertEqual(
grades.answer_distributions(self.course.id),
{
('p1', 'p1', '{}_2_1'.format(self.p1_html_id)): {
'Correct': 2
},
('p2', 'p2', '{}_2_1'.format(self.p2_html_id)): {
'Correct': 1,
'Incorrect': 1
},
('p3', 'p3', '{}_2_1'.format(self.p3_html_id)): {
'Correct': 1
}
}
)
def test_other_data_types(self):
# We'll submit one problem, and then muck with the student_answers
# dict inside its state to try different data types (str, int, float,
# none)
self.submit_question_answer('p1', {'2_1': u'Correct'})
# Now fetch the state entry for that problem.
student_module = StudentModule.objects.get(
course_id=self.course.id,
student=self.student_user
)
for val in ('Correct', True, False, 0, 0.0, 1, 1.0, None):
state = json.loads(student_module.state)
state["student_answers"]['{}_2_1'.format(self.p1_html_id)] = val
student_module.state = json.dumps(state)
student_module.save()
self.assertEqual(
grades.answer_distributions(self.course.id),
{
('p1', 'p1', '{}_2_1'.format(self.p1_html_id)): {
str(val): 1
},
}
)
def test_missing_content(self):
# If there's a StudentModule entry for content that no longer exists,
# we just quietly ignore it (because we can't display a meaningful url
# or name for it).
self.submit_question_answer('p1', {'2_1': 'Incorrect'})
# Now fetch the state entry for that problem and alter it so it points
# to a non-existent problem.
student_module = StudentModule.objects.get(
course_id=self.course.id,
student=self.student_user
)
student_module.module_state_key = student_module.module_state_key.replace(
name=student_module.module_state_key.name + "_fake"
)
student_module.save()
# It should be empty (ignored)
empty_distribution = grades.answer_distributions(self.course.id)
self.assertFalse(empty_distribution) # should be empty
def test_broken_state(self):
# Missing or broken state for a problem should be skipped without
# causing the whole answer_distribution call to explode.
# Submit p1
self.submit_question_answer('p1', {'2_1': u'Correct'})
# Now fetch the StudentModule entry for p1 so we can corrupt its state
prb1 = StudentModule.objects.get(
course_id=self.course.id,
student=self.student_user
)
# Submit p2
self.submit_question_answer('p2', {'2_1': u'Incorrect'})
for new_p1_state in ('{"student_answers": {}}', "invalid json!", None):
prb1.state = new_p1_state
prb1.save()
# p1 won't show up, but p2 should still work
self.assertEqual(
grades.answer_distributions(self.course.id),
{
('p2', 'p2', '{}_2_1'.format(self.p2_html_id)): {
'Incorrect': 1
},
}
)
@attr('shard_1')
class TestConditionalContent(TestSubmittingProblems):
"""
Check that conditional content works correctly with grading.
"""
def setUp(self):
"""
Set up a simple course with a grading policy, a UserPartition, and 2 sections, both graded as "homework".
One section is pre-populated with a problem (with 2 inputs), visible to all students.
The second section is empty. Test cases should add conditional content to it.
"""
super(TestConditionalContent, self).setUp()
self.user_partition_group_0 = 0
self.user_partition_group_1 = 1
self.partition = UserPartition(
0,
'first_partition',
'First Partition',
[
Group(self.user_partition_group_0, 'alpha'),
Group(self.user_partition_group_1, 'beta')
]
)
self.course = CourseFactory.create(
display_name=self.COURSE_NAME,
number=self.COURSE_SLUG,
user_partitions=[self.partition]
)
grading_policy = {
"GRADER": [{
"type": "Homework",
"min_count": 2,
"drop_count": 0,
"short_label": "HW",
"weight": 1.0
}]
}
self.add_grading_policy(grading_policy)
self.homework_all = self.add_graded_section_to_course('homework1')
self.p1_all_html_id = self.add_dropdown_to_section(self.homework_all.location, 'H1P1', 2).location.html_id()
self.homework_conditional = self.add_graded_section_to_course('homework2')
def split_setup(self, user_partition_group):
"""
Setup for tests using split_test module. Creates a split_test instance as a child of self.homework_conditional
with 2 verticals in it, and assigns self.student_user to the specified user_partition_group.
The verticals are returned.
"""
vertical_0_url = self.course.id.make_usage_key("vertical", "split_test_vertical_0")
vertical_1_url = self.course.id.make_usage_key("vertical", "split_test_vertical_1")
group_id_to_child = {}
for index, url in enumerate([vertical_0_url, vertical_1_url]):
group_id_to_child[str(index)] = url
split_test = ItemFactory.create(
parent_location=self.homework_conditional.location,
category="split_test",
display_name="Split test",
user_partition_id='0',
group_id_to_child=group_id_to_child,
)
vertical_0 = ItemFactory.create(
parent_location=split_test.location,
category="vertical",
display_name="Condition 0 vertical",
location=vertical_0_url,
)
vertical_1 = ItemFactory.create(
parent_location=split_test.location,
category="vertical",
display_name="Condition 1 vertical",
location=vertical_1_url,
)
# Now add the student to the specified group.
UserCourseTagFactory(
user=self.student_user,
course_id=self.course.id,
key='xblock.partition_service.partition_{0}'.format(self.partition.id),
value=str(user_partition_group)
)
return vertical_0, vertical_1
def split_different_problems_setup(self, user_partition_group):
"""
Setup for the case where the split test instance contains problems for each group
(so both groups do have graded content, though it is different).
Group 0 has 2 problems, worth 1 and 3 points respectively.
Group 1 has 1 problem, worth 1 point.
This method also assigns self.student_user to the specified user_partition_group and
then submits answers for the problems in section 1, which are visible to all students.
The submitted answers give the student 1 point out of a possible 2 points in the section.
"""
vertical_0, vertical_1 = self.split_setup(user_partition_group)
# Group 0 will have 2 problems in the section, worth a total of 4 points.
self.add_dropdown_to_section(vertical_0.location, 'H2P1_GROUP0', 1).location.html_id()
self.add_dropdown_to_section(vertical_0.location, 'H2P2_GROUP0', 3).location.html_id()
# Group 1 will have 1 problem in the section, worth a total of 1 point.
self.add_dropdown_to_section(vertical_1.location, 'H2P1_GROUP1', 1).location.html_id()
# Submit answers for problem in Section 1, which is visible to all students.
self.submit_question_answer('H1P1', {'2_1': 'Correct', '2_2': 'Incorrect'})
def test_split_different_problems_group_0(self):
"""
Tests that users who see different problems in a split_test module instance are graded correctly.
This is the test case for a user in user partition group 0.
"""
self.split_different_problems_setup(self.user_partition_group_0)
self.submit_question_answer('H2P1_GROUP0', {'2_1': 'Correct'})
self.submit_question_answer('H2P2_GROUP0', {'2_1': 'Correct', '2_2': 'Incorrect', '2_3': 'Correct'})
self.assertEqual(self.score_for_hw('homework1'), [1.0])
self.assertEqual(self.score_for_hw('homework2'), [1.0, 2.0])
self.assertEqual(self.earned_hw_scores(), [1.0, 3.0])
# Grade percent is .63. Here is the calculation
homework_1_score = 1.0 / 2
homework_2_score = (1.0 + 2.0) / 4
self.check_grade_percent(round((homework_1_score + homework_2_score) / 2, 2))
def test_split_different_problems_group_1(self):
"""
Tests that users who see different problems in a split_test module instance are graded correctly.
This is the test case for a user in user partition group 1.
"""
self.split_different_problems_setup(self.user_partition_group_1)
self.submit_question_answer('H2P1_GROUP1', {'2_1': 'Correct'})
self.assertEqual(self.score_for_hw('homework1'), [1.0])
self.assertEqual(self.score_for_hw('homework2'), [1.0])
self.assertEqual(self.earned_hw_scores(), [1.0, 1.0])
# Grade percent is .75. Here is the calculation
homework_1_score = 1.0 / 2
homework_2_score = 1.0 / 1
self.check_grade_percent(round((homework_1_score + homework_2_score) / 2, 2))
def split_one_group_no_problems_setup(self, user_partition_group):
"""
Setup for the case where the split test instance contains problems on for one group.
Group 0 has no problems.
Group 1 has 1 problem, worth 1 point.
This method also assigns self.student_user to the specified user_partition_group and
then submits answers for the problems in section 1, which are visible to all students.
The submitted answers give the student 2 points out of a possible 2 points in the section.
"""
[_, vertical_1] = self.split_setup(user_partition_group)
# Group 1 will have 1 problem in the section, worth a total of 1 point.
self.add_dropdown_to_section(vertical_1.location, 'H2P1_GROUP1', 1).location.html_id()
self.submit_question_answer('H1P1', {'2_1': 'Correct'})
def test_split_one_group_no_problems_group_0(self):
"""
Tests what happens when a given group has no problems in it (students receive 0 for that section).
"""
self.split_one_group_no_problems_setup(self.user_partition_group_0)
self.assertEqual(self.score_for_hw('homework1'), [1.0])
self.assertEqual(self.score_for_hw('homework2'), [])
self.assertEqual(self.earned_hw_scores(), [1.0, 0.0])
# Grade percent is .25. Here is the calculation.
homework_1_score = 1.0 / 2
homework_2_score = 0.0
self.check_grade_percent(round((homework_1_score + homework_2_score) / 2, 2))
def test_split_one_group_no_problems_group_1(self):
"""
Verifies students in the group that DOES have a problem receive a score for their problem.
"""
self.split_one_group_no_problems_setup(self.user_partition_group_1)
self.submit_question_answer('H2P1_GROUP1', {'2_1': 'Correct'})
self.assertEqual(self.score_for_hw('homework1'), [1.0])
self.assertEqual(self.score_for_hw('homework2'), [1.0])
self.assertEqual(self.earned_hw_scores(), [1.0, 1.0])
# Grade percent is .75. Here is the calculation.
homework_1_score = 1.0 / 2
homework_2_score = 1.0 / 1
self.check_grade_percent(round((homework_1_score + homework_2_score) / 2, 2))
| agpl-3.0 | 2,510,981,406,953,927,000 | 36.908148 | 119 | 0.594537 | false |
CentechMTL/TableauDeBord | app/businessCanvas/tests/tests_views.py | 2 | 10544 | # coding: utf-8
from django.conf import settings
from django.core import mail
from django.core.urlresolvers import reverse
from django.test import TestCase
import time
from app.founder.factories import FounderFactory
from app.mentor.factories import MentorFactory
from app.home.factories import UserFactory, StaffUserProfileFactory, \
ExecutiveUserProfileFactory
from app.company.factories import CompanyStatusFactory, CompanyFactory
from app.businessCanvas.factories import BusinessCanvasElementFactory, \
ArchiveFactory
from app.businessCanvas.models import BUSINESS_CANVAS_TYPE_CHOICES
class BusinessCanvasTests(TestCase):
def setUp(self):
settings.EMAIL_BACKEND = \
'django.core.mail.backends.locmem.EmailBackend'
self.founder = FounderFactory()
self.mentor = MentorFactory()
self.staff = StaffUserProfileFactory()
self.executive = ExecutiveUserProfileFactory()
self.founderCompany = FounderFactory()
self.mentorCompany = MentorFactory()
self.status = CompanyStatusFactory()
self.company = CompanyFactory(companyStatus=self.status)
self.company.founders.add(self.founderCompany)
self.company.mentors.add(self.mentorCompany)
self.company.save()
self.element = BusinessCanvasElementFactory(
company=self.company,
type=BUSINESS_CANVAS_TYPE_CHOICES[0][0]
)
self.element2 = BusinessCanvasElementFactory(
company=self.company,
type=BUSINESS_CANVAS_TYPE_CHOICES[1][0]
)
self.archive = ArchiveFactory(company=self.company)
self.archive.elements.add(self.element)
def test_businessCanvasElement_list(self):
"""
To test the business canvas list of a company.
"""
"""
Access : Staff
"""
self.client.logout()
self.client.login(
username=self.staff.user.username,
password="Toto1234!#"
)
result = self.client.get(
reverse(
'businessCanvas:businessCanvasElement_list',
args=[self.company.id]
),
follow=False
)
self.assertEqual(result.status_code, 200)
"""
Access : Founders of the company
"""
self.client.logout()
self.client.login(
username=self.founderCompany.user.username,
password="Toto1234!#"
)
result = self.client.get(
reverse(
'businessCanvas:businessCanvasElement_list',
args=[self.company.id]
),
follow=False
)
self.assertEqual(result.status_code, 200)
"""
Access : Mentors of the company
"""
self.client.logout()
self.client.login(
username=self.mentorCompany.user.username,
password="Toto1234!#"
)
result = self.client.get(
reverse(
'businessCanvas:businessCanvasElement_list',
args=[self.company.id]
),
follow=False
)
self.assertEqual(result.status_code, 200)
"""
No Access : Other founders
"""
self.client.logout()
self.client.login(
username=self.founder.user.username,
password="Toto1234!#"
)
result = self.client.get(
reverse(
'businessCanvas:businessCanvasElement_list',
args=[self.company.id]
),
follow=False
)
self.assertEqual(result.status_code, 302)
"""
No Access : Other mentors
"""
self.client.logout()
self.client.login(
username=self.mentor.user.username,
password="Toto1234!#"
)
result = self.client.get(
reverse(
'businessCanvas:businessCanvasElement_list',
args=[self.company.id]
),
follow=False
)
self.assertEqual(result.status_code, 302)
"""
No Access : Executive
"""
self.client.logout()
self.client.login(
username=self.executive.user.username,
password="Toto1234!#"
)
result = self.client.get(
reverse(
'businessCanvas:businessCanvasElement_list',
args=[self.company.id]
),
follow=False
)
self.assertEqual(result.status_code, 302)
def test_businessCanvasElementArchived_list(self):
"""
To test the business canvas archived list of a company.
"""
"""
Access : Staff
"""
self.client.logout()
self.client.login(
username=self.staff.user.username,
password="Toto1234!#"
)
result = self.client.get(
reverse(
'businessCanvas:businessCanvasElementArchived_list',
args=[self.archive.id]
),
follow=False
)
self.assertEqual(result.status_code, 200)
"""
Access : Founders of the company
"""
self.client.logout()
self.client.login(
username=self.founderCompany.user.username,
password="Toto1234!#"
)
result = self.client.get(
reverse(
'businessCanvas:businessCanvasElementArchived_list',
args=[self.archive.id]
),
follow=False
)
self.assertEqual(result.status_code, 200)
"""
Access : Mentors of the company
"""
self.client.logout()
self.client.login(
username=self.mentorCompany.user.username,
password="Toto1234!#"
)
result = self.client.get(
reverse(
'businessCanvas:businessCanvasElementArchived_list',
args=[self.archive.id]
),
follow=False
)
self.assertEqual(result.status_code, 200)
"""
No Access : Other founders
"""
self.client.logout()
self.client.login(
username=self.founder.user.username,
password="Toto1234!#"
)
result = self.client.get(
reverse(
'businessCanvas:businessCanvasElementArchived_list',
args=[self.archive.id]
),
follow=False
)
self.assertEqual(result.status_code, 302)
"""
No Access : Other mentors
"""
self.client.logout()
self.client.login(
username=self.mentor.user.username,
password="Toto1234!#"
)
result = self.client.get(
reverse(
'businessCanvas:businessCanvasElementArchived_list',
args=[self.archive.id]
),
follow=False
)
self.assertEqual(result.status_code, 302)
"""
No Access : Executive
"""
self.client.logout()
self.client.login(
username=self.executive.user.username,
password="Toto1234!#"
)
result = self.client.get(
reverse(
'businessCanvas:businessCanvasElementArchived_list',
args=[self.archive.id]
),
follow=False
)
self.assertEqual(result.status_code, 302)
def test_businessCanvasDeleteArchive(self):
"""
To test delete a business canvas archive of a company.
"""
"""
Access : Founders of the company
"""
self.client.logout()
self.client.login(
username=self.founderCompany.user.username,
password="Toto1234!#"
)
result = self.client.get(
reverse(
'businessCanvas:businessCanvasDeleteArchive',
args=[self.archive.id]
),
follow=False
)
self.assertEqual(result.status_code, 200)
"""
No Access : Staff
"""
self.client.logout()
self.client.login(
username=self.staff.user.username,
password="Toto1234!#"
)
result = self.client.get(
reverse(
'businessCanvas:businessCanvasDeleteArchive',
kwargs={'pk': self.archive.id}
),
follow=False
)
self.assertEqual(result.status_code, 302)
"""
No Access : Mentors of the company
"""
self.client.logout()
self.client.login(
username=self.mentorCompany.user.username,
password="Toto1234!#"
)
result = self.client.get(
reverse(
'businessCanvas:businessCanvasDeleteArchive',
args=[self.archive.id]
),
follow=False
)
self.assertEqual(result.status_code, 302)
"""
No Access : Other founders
"""
self.client.logout()
self.client.login(
username=self.founder.user.username,
password="Toto1234!#"
)
result = self.client.get(
reverse(
'businessCanvas:businessCanvasDeleteArchive',
args=[self.archive.id]
),
follow=False
)
self.assertEqual(result.status_code, 302)
"""
No Access : Other mentors
"""
self.client.logout()
self.client.login(
username=self.mentor.user.username,
password="Toto1234!#"
)
result = self.client.get(
reverse(
'businessCanvas:businessCanvasDeleteArchive',
args=[self.archive.id]
),
follow=False
)
self.assertEqual(result.status_code, 302)
"""
No Access : Executive
"""
self.client.logout()
self.client.login(
username=self.executive.user.username,
password="Toto1234!#"
)
result = self.client.get(
reverse(
'businessCanvas:businessCanvasDeleteArchive',
args=[self.archive.id]
),
follow=False
)
self.assertEqual(result.status_code, 302)
| gpl-3.0 | 6,807,858,770,510,924,000 | 26.245478 | 72 | 0.53272 | false |
cyphar/synge | src/gtk/bakeui.py | 1 | 1324 | #!/usr/bin/env python3
# Synge-GTK: A graphical interface for Synge
# Copyright (C) 2013, 2016 Aleksa Sarai
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Takes 3 arguments:
# bakeui.py <template> <xml-ui> <output-file>
from sys import argv, exit
from re import sub
from codecs import open
def main():
if len(argv) != 4:
exit(1)
xmlui = ""
template = ""
with open(argv[2], "r", "utf8") as f:
xmlui = f.read().replace('"', '\\"').replace("\n", "")
xmlui = sub(">\s+?<", "><", xmlui)
with open(argv[1], "r", "utf8") as f:
template = f.read()
final = template.replace("STRING_REPLACED_DURING_COMPILE_TIME", xmlui);
with open(argv[3], "w", "utf8") as f:
f.write(final + "\n\n")
if __name__ == "__main__":
main()
| gpl-3.0 | 7,089,773,359,850,187,000 | 28.422222 | 73 | 0.680514 | false |
b3j0f/aop | b3j0f/aop/advice/test/core.py | 1 | 22259 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# --------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2014 Jonathan Labéjof <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# --------------------------------------------------------------------
from unittest import main
from six import PY2
from b3j0f.utils.ut import UTCase
from ..core import weave, unweave, weave_on
from time import sleep
class WeaveTest(UTCase):
def setUp(self):
self.count = 0
def joinpoint(self, joinpoint):
"""
Default interceptor which increments self count
"""
self.count += 1
return joinpoint.proceed()
def test_builtin(self):
weave(target=min, advices=[self.joinpoint, self.joinpoint])
weave(target=min, advices=self.joinpoint)
min(5, 2)
self.assertEqual(self.count, 3)
unweave(min)
min(5, 2)
self.assertEqual(self.count, 3)
def test_method(self):
class A():
def __init__(self):
pass
def a(self):
pass
weave(target=A.a, advices=[self.joinpoint, self.joinpoint], ctx=A)
weave(target=A, advices=self.joinpoint, pointcut='__init__', ctx=A)
weave(target=A.__init__, advices=self.joinpoint, ctx=A)
a = A()
a.a()
self.assertEqual(self.count, 4)
unweave(A.a, ctx=A)
unweave(A, ctx=A)
A()
a.a()
self.assertEqual(self.count, 4)
def test_function(self):
def f():
pass
weave(target=f, advices=[self.joinpoint, self.joinpoint])
weave(target=f, advices=self.joinpoint)
f()
self.assertEqual(self.count, 3)
unweave(f)
f()
self.assertEqual(self.count, 3)
def test_lambda(self):
f = lambda: None
weave(target=f, advices=[self.joinpoint, self.joinpoint])
weave(target=f, advices=self.joinpoint)
f()
self.assertEqual(self.count, 3)
unweave(f)
f()
self.assertEqual(self.count, 3)
def test_function_args(self):
def f(a):
pass
weave(target=f, advices=[self.joinpoint, self.joinpoint])
weave(target=f, advices=self.joinpoint)
f(1)
self.assertEqual(self.count, 3)
unweave(f)
f(1)
self.assertEqual(self.count, 3)
def test_function_varargs(self):
def f(*args):
pass
weave(target=f, advices=[self.joinpoint, self.joinpoint])
weave(target=f, advices=self.joinpoint)
f()
self.assertEqual(self.count, 3)
unweave(f)
f()
self.assertEqual(self.count, 3)
def test_function_args_varargs(self):
def f(a, **args):
pass
weave(target=f, advices=[self.joinpoint, self.joinpoint])
weave(target=f, advices=self.joinpoint)
f(1)
self.assertEqual(self.count, 3)
unweave(f)
f(1)
self.assertEqual(self.count, 3)
def test_function_kwargs(self):
def f(**kwargs):
pass
weave(target=f, advices=[self.joinpoint, self.joinpoint])
weave(target=f, advices=self.joinpoint)
f()
self.assertEqual(self.count, 3)
unweave(f)
f()
self.assertEqual(self.count, 3)
def test_function_args_kwargs(self):
def f(a, **args):
pass
weave(target=f, advices=[self.joinpoint, self.joinpoint])
weave(target=f, advices=self.joinpoint)
f(1)
self.assertEqual(self.count, 3)
unweave(f)
f(1)
self.assertEqual(self.count, 3)
def test_function_args_varargs_kwargs(self):
def f(a, *args, **kwargs):
pass
weave(target=f, advices=[self.joinpoint, self.joinpoint])
weave(target=f, advices=self.joinpoint)
f(1)
self.assertEqual(self.count, 3)
unweave(f)
f(1)
self.assertEqual(self.count, 3)
def _assert_class(self, cls):
"""
Run assertion tests on input cls
"""
weave(target=cls, advices=[self.joinpoint, self.joinpoint])
weave(target=cls, advices=self.joinpoint, pointcut='__init__')
weave(target=cls.B, advices=[self.joinpoint, self.joinpoint])
weave(target=cls.B, advices=self.joinpoint, pointcut='__init__')
weave(target=cls.C, advices=[self.joinpoint, self.joinpoint])
weave(target=cls.C, advices=self.joinpoint, pointcut='__init__')
cls()
self.assertEqual(self.count, 3)
cls.B()
self.assertEqual(self.count, 6)
cls.C()
self.assertEqual(self.count, 9)
unweave(cls)
cls()
self.assertEqual(self.count, 9)
unweave(cls.B)
cls.B()
self.assertEqual(self.count, 9)
unweave(cls.C)
cls.C()
self.assertEqual(self.count, 9)
def test_class(self):
class A(object):
class B(object):
def __init__(self):
pass
class C(object):
pass
def __init__(self):
pass
self._assert_class(A)
def test_namespace(self):
class A:
class B:
def __init__(self):
pass
class C:
pass
def __init__(self):
pass
self._assert_class(A)
def test_multi(self):
count = 5
f = lambda: None
weave(target=f, advices=[self.joinpoint, self.joinpoint])
weave(target=f, advices=self.joinpoint)
for i in range(count):
f()
self.assertEqual(self.count, 3 * count)
unweave(f)
for i in range(count):
f()
self.assertEqual(self.count, 3 * count)
weave(target=f, advices=[self.joinpoint, self.joinpoint])
weave(target=f, advices=self.joinpoint)
for i in range(count):
f()
self.assertEqual(self.count, 6 * count)
unweave(f)
for i in range(count):
f()
self.assertEqual(self.count, 6 * count)
def test_ttl(self):
def test():
pass
weave(target=test, advices=self.joinpoint, ttl=0.1)
test()
self.assertEqual(self.count, 1)
sleep(0.2)
test()
self.assertEqual(self.count, 1)
def test_cancel_ttl(self):
def test():
pass
_, timer = weave(target=test, advices=self.joinpoint, ttl=0.1)
timer.cancel()
sleep(0.2)
test()
self.assertEqual(self.count, 1)
def test_inheritance(self):
class BaseTest:
def test(self):
pass
class Test(BaseTest):
pass
self.assertEqual(BaseTest.test, Test.test)
weave(ctx=Test, target=Test.test, advices=lambda x: None)
self.assertNotEqual(BaseTest.test, Test.test)
unweave(ctx=Test, target=Test.test)
self.assertEqual(BaseTest.test, Test.test)
def test_inherited_method(self):
self.count = 0
class BaseTest:
def __init__(self, testcase):
self.testcase = testcase
def test(self):
self.testcase.count += 1
class Test(BaseTest):
pass
basetest = BaseTest(self)
test = Test(self)
weave(ctx=Test, target=Test.test, advices=lambda x: None)
basetest.test()
self.assertEqual(self.count, 1)
test.test()
self.assertEqual(self.count, 1)
unweave(ctx=Test, target=Test.test)
basetest.test()
self.assertEqual(self.count, 2)
test.test()
self.assertEqual(self.count, 3)
def test_inherited_instance_method(self):
class BaseTest(object):
def test(self):
pass
self._test_inherited(BaseTest)
def test_inherited_instance_method_with_container(self):
class BaseTest:
def test(self):
pass
self._test_inherited(BaseTest)
def _test_inherited(self, BaseTest):
self.count = 0
class Test(BaseTest):
pass
def advice(jp):
self.count += 1
return jp.proceed()
self.old_count = 0
def assertCount(f, increment=0):
"""
Assert incrementation of count in executing.
"""
f()
self.old_count += increment
self.assertEqual(self.count, self.old_count)
test = Test()
test2 = Test()
basetest = BaseTest()
basetest2 = BaseTest()
assertCount(test.test)
assertCount(test2.test)
assertCount(basetest.test)
assertCount(basetest2.test)
weave(target=test.test, advices=advice, ctx=test)
assertCount(test.test, 1)
assertCount(test2.test)
assertCount(basetest.test)
assertCount(basetest2.test)
unweave(target=test.test, ctx=test)
assertCount(test.test)
assertCount(test2.test)
assertCount(basetest.test)
assertCount(basetest2.test)
weave(target=basetest.test, advices=advice, ctx=basetest)
assertCount(test.test)
assertCount(test2.test)
assertCount(basetest.test, 1)
assertCount(basetest2.test)
unweave(target=basetest.test, ctx=basetest)
assertCount(test.test)
assertCount(test2.test)
assertCount(basetest.test)
assertCount(basetest2.test)
weave(target=BaseTest.test, advices=advice, ctx=BaseTest)
assertCount(test.test, 1)
assertCount(test2.test, 1)
assertCount(basetest.test, 1)
assertCount(basetest2.test, 1)
unweave(target=BaseTest.test, ctx=BaseTest)
assertCount(test.test)
assertCount(test2.test)
assertCount(basetest.test)
assertCount(basetest2.test)
weave(target=Test.test, advices=advice, ctx=Test)
assertCount(test.test, 1)
assertCount(test2.test, 1)
assertCount(basetest.test)
assertCount(basetest2.test)
unweave(target=Test.test, ctx=Test)
# weave all
weave(target=BaseTest.test, advices=advice, ctx=BaseTest)
weave(target=Test.test, advices=advice, ctx=Test)
weave(target=test.test, advices=advice, ctx=test)
assertCount(test.test, 3)
assertCount(test2.test, 2)
assertCount(basetest.test, 1)
assertCount(basetest2.test, 1)
# remove middle interceptor
unweave(target=Test.test, ctx=Test)
assertCount(test.test, 2)
assertCount(test2.test, 1)
assertCount(basetest.test, 1)
assertCount(basetest2.test, 1)
# remove the first
unweave(target=BaseTest.test, ctx=BaseTest)
assertCount(test.test, 1)
assertCount(test2.test)
assertCount(basetest.test)
assertCount(basetest2.test)
# remove the last
unweave(target=test.test, ctx=test)
assertCount(test.test)
assertCount(test2.test)
assertCount(basetest.test)
assertCount(basetest2.test)
# weave all in opposite way
weave(target=test.test, advices=advice, ctx=test)
weave(target=Test.test, advices=advice, ctx=Test)
weave(target=BaseTest.test, advices=advice, ctx=BaseTest)
assertCount(test.test, 3)
assertCount(test2.test, 2)
assertCount(basetest.test, 1)
assertCount(basetest2.test, 1)
# remove middle interceptor
unweave(target=Test.test, ctx=Test)
assertCount(test.test, 2)
assertCount(test2.test, 1)
assertCount(basetest.test, 1)
assertCount(basetest2.test, 1)
# remove last
unweave(target=BaseTest.test, ctx=BaseTest)
assertCount(test.test, 1)
assertCount(test2.test)
assertCount(basetest.test)
assertCount(basetest2.test)
# remove first
unweave(target=test.test, ctx=test)
assertCount(test.test)
assertCount(test2.test)
assertCount(basetest.test)
assertCount(basetest2.test)
# weave all in random way
weave(target=Test.test, advices=advice, ctx=Test)
weave(target=test.test, advices=advice, ctx=test)
weave(target=BaseTest.test, advices=advice, ctx=BaseTest)
assertCount(test.test, 3)
assertCount(test2.test, 2)
assertCount(basetest.test, 1)
assertCount(basetest2.test, 1)
# remove middle interceptor
unweave(target=Test.test, ctx=Test)
assertCount(test.test, 2)
assertCount(test2.test, 1)
assertCount(basetest.test, 1)
assertCount(basetest2.test, 1)
# remove last
unweave(target=BaseTest.test, ctx=BaseTest)
assertCount(test.test, 1)
assertCount(test2.test)
assertCount(basetest.test)
assertCount(basetest2.test)
# remove first
unweave(target=test.test, ctx=test)
assertCount(test.test)
assertCount(test2.test)
assertCount(basetest.test)
assertCount(basetest2.test)
# weave all in random way
weave(target=Test.test, advices=advice, ctx=Test)
weave(target=test.test, advices=advice, ctx=test)
weave(target=BaseTest.test, advices=advice, ctx=BaseTest)
assertCount(test.test, 3)
assertCount(test2.test, 2)
assertCount(basetest.test, 1)
assertCount(basetest2.test, 1)
# remove first interceptor
unweave(target=BaseTest.test, ctx=BaseTest)
assertCount(test.test, 2)
assertCount(test2.test, 1)
assertCount(basetest.test)
assertCount(basetest2.test)
# remove second
unweave(target=Test.test, ctx=Test)
assertCount(test.test, 1)
assertCount(test2.test)
assertCount(basetest.test)
assertCount(basetest2.test)
# remove last
unweave(target=test.test, ctx=test)
assertCount(test.test)
assertCount(test2.test)
assertCount(basetest.test)
assertCount(basetest2.test)
# weave all in random way
weave(target=Test.test, advices=advice, ctx=Test)
weave(target=test.test, advices=advice, ctx=test)
weave(target=BaseTest.test, advices=advice, ctx=BaseTest)
assertCount(test.test, 3)
assertCount(test2.test, 2)
assertCount(basetest.test, 1)
assertCount(basetest2.test, 1)
# remove last interceptor
unweave(target=test.test, ctx=test)
assertCount(test.test, 2)
assertCount(test2.test, 2)
assertCount(basetest.test, 1)
assertCount(basetest2.test, 1)
# remove second
unweave(target=Test.test, ctx=Test)
assertCount(test.test, 1)
assertCount(test2.test, 1)
assertCount(basetest.test, 1)
assertCount(basetest2.test, 1)
# remove first
unweave(target=BaseTest.test, ctx=BaseTest)
assertCount(test.test)
assertCount(test2.test)
assertCount(basetest.test)
assertCount(basetest2.test)
def test_instance_method(self):
class A:
def __call__(self):
return 1
a = A()
self.assertEqual(
a.__call__.__func__,
A.__call__.__func__ if PY2 else A.__call__
)
weave(target=a.__call__, advices=lambda ae: None, ctx=a)
self.assertNotEqual(
a.__call__.__func__,
A.__call__.__func__ if PY2 else A.__call__
)
result = a.__call__()
self.assertEqual(result, None)
unweave(target=a.__call__, ctx=a)
self.assertEqual(
a.__call__.__func__,
A.__call__.__func__ if PY2 else A.__call__
)
result = a()
self.assertEqual(result, 1)
def test_instance_method_with_pointcut(self):
class A:
def __call__(self):
return 1
a = A()
weave(target=a, advices=lambda ae: None)
result = a()
self.assertEqual(result, None)
unweave(target=a)
result = a()
self.assertEqual(result, 1)
class WeaveOnTest(UTCase):
def setUp(self):
self.count = 0
def advice(self, joinpoint):
"""
Default interceptor which increments self count
"""
self.count += 1
return joinpoint.proceed()
def test_builtin(self):
weave_on(advices=[self.advice, self.advice])(min)
weave_on(advices=self.advice)(min)
min(5, 2)
self.assertEqual(self.count, 3)
unweave(min)
min(5, 2)
self.assertEqual(self.count, 3)
def test_method(self):
@weave_on(advices=self.advice, pointcut='__init__')
class A():
@weave_on(advices=[self.advice, self.advice])
def __init__(self):
pass
@weave_on(advices=[self.advice, self.advice, self.advice])
def a(self):
pass
a = A()
a.a()
self.assertEqual(self.count, 6)
def test_function(self):
@weave_on(self.advice)
@weave_on([self.advice, self.advice])
def f():
pass
f()
self.assertEqual(self.count, 3)
def test_lambda(self):
f = lambda: None
weave_on(self.advice)(f)
weave_on([self.advice, self.advice])(f)
f()
self.assertEqual(self.count, 3)
def test_function_args(self):
@weave_on(self.advice)
@weave_on([self.advice, self.advice])
def f(a):
pass
f(1)
self.assertEqual(self.count, 3)
def test_function_varargs(self):
@weave_on(self.advice)
@weave_on([self.advice, self.advice])
def f(*args):
pass
f()
self.assertEqual(self.count, 3)
def test_function_args_varargs(self):
@weave_on(self.advice)
@weave_on([self.advice, self.advice])
def f(a, **args):
pass
f(1)
self.assertEqual(self.count, 3)
def test_function_kwargs(self):
@weave_on([self.advice, self.advice])
@weave_on(self.advice)
def f(**kwargs):
pass
f()
self.assertEqual(self.count, 3)
def test_function_args_kwargs(self):
@weave_on(self.advice)
@weave_on([self.advice, self.advice])
def f(a, **args):
pass
f(1)
self.assertEqual(self.count, 3)
def test_function_args_varargs_kwargs(self):
@weave_on(self.advice)
@weave_on([self.advice, self.advice])
def f(a, *args, **kwargs):
pass
f(1)
self.assertEqual(self.count, 3)
def _assert_class(self, cls):
"""
Run assertion tests on input cls
"""
weave_on(advices=[self.advice, self.advice])(cls)
weave_on(advices=self.advice, pointcut='__init__')(cls)
weave_on(advices=[self.advice, self.advice])(cls.B)
weave_on(advices=self.advice, pointcut='__init__')(cls.B)
weave_on(advices=[self.advice, self.advice])(cls.C)
weave_on(advices=self.advice, pointcut='__init__')(cls.C)
cls()
cls.B()
cls.C()
self.assertEqual(self.count, 9)
def test_class(self):
class A(object):
class B(object):
def __init__(self):
pass
class C(object):
pass
def __init__(self):
pass
self._assert_class(A)
def test_namespace(self):
class A:
class B:
def __init__(self):
pass
class C:
pass
def __init__(self):
pass
self._assert_class(A)
def test_multi(self):
count = 5
f = lambda: None
weave_on(advices=[self.advice, self.advice])(f)
weave_on(advices=self.advice)(f)
for i in range(count):
f()
self.assertEqual(self.count, 3 * count)
unweave(f)
for i in range(count):
f()
self.assertEqual(self.count, 3 * count)
weave_on(advices=[self.advice, self.advice])(f)
weave_on(advices=self.advice)(f)
for i in range(count):
f()
self.assertEqual(self.count, 6 * count)
unweave(f)
for i in range(count):
f()
self.assertEqual(self.count, 6 * count)
def test_ttl(self):
def test():
pass
weave_on(advices=self.advice, ttl=0.1)(test)
test()
sleep(0.2)
test()
self.assertEqual(self.count, 1)
if __name__ == '__main__':
main()
| mit | -8,555,532,749,594,229,000 | 21.92173 | 79 | 0.560633 | false |
jmplonka/InventorLoader | dxfgrabber/dxfentities.py | 1 | 41274 | # encoding: utf-8
# Purpose: entity classes, new implementation without dxf12/dxf13 layer
# Created: 17.04.2016
# Copyright (C) 2016, Manfred Moitzi
# License: MIT License
from __future__ import unicode_literals
__author__ = "mozman <[email protected]>"
import math
from . import const
from .color import TrueColor
from .styles import default_text_style
from .decode import decode
SPECIAL_CHARS = {
'd': '°'
}
basic_attribs = {
5: 'handle',
6: 'linetype',
8: 'layer',
39: 'thickness',
48: 'ltscale',
62: 'color',
67: 'paperspace',
210: 'extrusion',
284: 'shadow_mode',
330: 'owner',
370: 'line_weight',
410: 'layout_tab_name',
}
class DXFEntity(object):
def __init__(self):
self.dxftype = 'ENTITY'
self.handle = None
self.owner = None
self.paperspace = None
self.layer = '0'
self.linetype = None
self.thickness = 0.0
self.extrusion = None
self.ltscale = 1.0
self.line_weight = 0
self.invisible = 0
self.color = const.BYLAYER
self.true_color = None
self.transparency = None
self.shadow_mode = None
self.layout_tab_name = None
def setup_attributes(self, tags):
self.dxftype = tags.get_type()
for code, value in tags.plain_tags():
if code in basic_attribs:
self.__setattr__(basic_attribs[code], value)
elif code == 420:
self.true_color = TrueColor(value)
elif code == 440:
self.transparency = 1. - float(value & 0xFF) / 255.
else:
yield code, value # chain of generators
def set_default_extrusion(self): # call only for 2d entities with extrusion vector
if self.extrusion is None:
self.extrusion = (0., 0., 1.)
def __str__(self):
return "{} [{}]".format(self.dxftype, self.handle)
class Point(DXFEntity):
def __init__(self):
super(Point, self).__init__()
self.point = (0, 0, 0)
def setup_attributes(self, tags):
for code, value in super(Point, self).setup_attributes(tags):
if code == 10:
self.point = value
else:
yield code, value # chain of generators
self.set_default_extrusion()
class Line(DXFEntity):
def __init__(self):
super(Line, self).__init__()
self.start = (0, 0, 0)
self.end = (0, 0, 0)
def setup_attributes(self, tags):
for code, value in super(Line, self).setup_attributes(tags):
if code == 10:
self.start = value
elif code == 11:
self.end = value
else:
yield code, value # chain of generators
class Circle(DXFEntity):
def __init__(self):
super(Circle, self).__init__()
self.center = (0, 0, 0)
self.radius = 1.0
def setup_attributes(self, tags):
for code, value in super(Circle, self).setup_attributes(tags):
if code == 10:
self.center = value
elif code == 40:
self.radius = value
else:
yield code, value # chain of generators
self.set_default_extrusion()
class Arc(Circle):
def __init__(self):
super(Arc, self).__init__()
self.start_angle = 0.
self.end_angle = 360.
def setup_attributes(self, tags):
for code, value in super(Arc, self).setup_attributes(tags):
if code == 50:
self.start_angle = value
elif code == 51:
self.end_angle = value
else:
yield code, value # chain of generators
self.set_default_extrusion()
TRACE_CODES = frozenset((10, 11, 12, 13))
class Trace(DXFEntity):
def __init__(self):
super(Trace, self).__init__()
self.points = []
def setup_attributes(self, tags):
for code, value in super(Trace, self).setup_attributes(tags):
if code in TRACE_CODES:
self.points.append(value)
else:
yield code, value # chain of generators
self.set_default_extrusion()
Solid = Trace
class Face(Trace):
def __init__(self):
super(Face, self).__init__()
self.points = []
self.invisible_edge = 0
def setup_attributes(self, tags):
for code, value in super(Face, self).setup_attributes(tags):
if code == 70:
self.invisible_edge = value
else:
yield code, value # chain of generators
self.set_default_extrusion()
def is_edge_invisible(self, edge):
# edges 0 .. 3
return bool(self.invisible_edge & (1 << edge))
class Text(DXFEntity):
def __init__(self):
super(Text, self).__init__()
self.insert = (0., 0.)
self.height = 1.0
self.text = ""
self.rotation = 0.
self.oblique = 0.
self.style = "STANDARD"
self.width = 1.
self.is_backwards = False
self.is_upside_down = False
self.halign = 0
self.valign = 0
self.align_point = None
self.font = None
self.big_font = None
def setup_attributes(self, tags):
for code, value in super(Text, self).setup_attributes(tags):
if code == 10:
self.insert = value
elif code == 11:
self.align_point = value
elif code == 1:
self.text = value
elif code == 7:
self.style = value
elif code == 40:
self.height = value
elif code == 41:
self.width = value
elif code == 50:
self.rotation = value
elif code == 51:
self.oblique = value
elif code == 71:
self.is_backwards = bool(value & 2)
self.is_upside_down = bool(value & 4)
elif code == 72:
self.halign = value
elif code == 73:
self.valign = value
else:
yield code, value # chain of generators
self.set_default_extrusion()
def resolve_text_style(self, text_styles):
style = text_styles.get(self.style, None)
if style is None:
style = default_text_style
if self.height == 0:
self.height = style.height
if self.width == 0:
self.width = style.width
if self.oblique is None:
self.oblique = style.oblique
if self.is_backwards is None:
self.is_backwards = style.is_backwards
if self.is_upside_down is None:
self.is_upside_down = style.is_upside_down
if self.font is None:
self.font = style.font
if self.big_font is None:
self.big_font = style.big_font
def plain_text(self):
chars = []
raw_chars = list(reversed(self.text)) # text split into chars, in reversed order for efficient pop()
while len(raw_chars):
char = raw_chars.pop()
if char == '%': # formatting codes and special characters
if len(raw_chars) and raw_chars[-1] == '%':
raw_chars.pop() # '%'
if len(raw_chars):
special_char = raw_chars.pop() # command char
chars.append(SPECIAL_CHARS.get(special_char, ""))
else: # char is just a single '%'
chars.append(char)
else: # char is what it is, a character
chars.append(char)
return "".join(chars)
class Attrib(Text):
def __init__(self):
super(Attrib, self).__init__()
self.field_length = 0
self.tag = ""
def setup_attributes(self, tags):
for code, value in super(Attrib, self).setup_attributes(tags):
if code == 2:
self.tag = value
elif code == 73:
self.field_length = value
else:
yield code, value
class Insert(DXFEntity):
def __init__(self):
super(Insert, self).__init__()
self.name = ""
self.insert = (0., 0., 0.)
self.rotation = 0.
self.scale = (1., 1., 1.)
self.row_count = 1
self.row_spacing = 0.
self.col_count = 1
self.col_spacing = 0.
self.attribsfollow = False
self.attribs = []
def setup_attributes(self, tags):
xscale = 1.
yscale = 1.
zscale = 1.
for code, value in super(Insert, self).setup_attributes(tags):
if code == 2:
self.name = value
elif code == 10:
self.insert = value
elif code == 41:
xscale = value
elif code == 42:
yscale = value
elif code == 43:
zscale = value
elif code == 44:
self.col_spacing = value
elif code == 45:
self.row_spacing = value
elif code == 50:
self.rotation = value
elif code == 66:
self.attribsfollow = bool(value)
elif code == 70:
self.col_count = value
elif code == 71:
self.row_count = value
else:
yield code, value # chain of generators
self.scale = (xscale, yscale, zscale)
self.set_default_extrusion()
def find_attrib(self, attrib_tag):
for attrib in self.attribs:
if attrib.tag == attrib_tag:
return attrib
return None
def append_data(self, attribs):
self.attribs = attribs
class Polyline(DXFEntity):
LINE_TYPES = frozenset(('spline2d', 'polyline2d', 'polyline3d'))
def __init__(self):
super(Polyline, self).__init__()
self.vertices = [] # set in append data
self.points = [] # set in append data
self.control_points = [] # set in append data
self.width = [] # set in append data
self.bulge = [] # set in append data
self.tangents = [] # set in append data
self.flags = 0
self.mode = 'polyline2d'
self.mcount = 0
self.ncount = 0
self.default_start_width = 0.
self.default_end_width = 0.
self.is_mclosed = False
self.is_nclosed = False
self.is_closed = False
self.elevation = (0., 0., 0.)
self.m_smooth_density = 0.
self.n_smooth_density = 0.
self.smooth_type = 0
self.spline_type = None
def setup_attributes(self, tags):
def get_mode():
flags = self.flags
if flags & const.POLYLINE_SPLINE_FIT_VERTICES_ADDED:
return 'spline2d'
elif flags & const.POLYLINE_3D_POLYLINE:
return 'polyline3d'
elif flags & const.POLYLINE_3D_POLYMESH:
return 'polymesh'
elif flags & const.POLYLINE_POLYFACE:
return 'polyface'
else:
return 'polyline2d'
for code, value in super(Polyline, self).setup_attributes(tags):
if code == 10:
self.elevation = value
elif code == 40:
self.default_start_width = value
elif code == 41:
self.default_end_width = value
elif code == 70:
self.flags = value
elif code == 71:
self.mcount = value
elif code == 72:
self.ncount = value
elif code == 73:
self.m_smooth_density = value
elif code == 73:
self.n_smooth_density = value
elif code == 75:
self.smooth_type = value
else:
yield code, value # chain of generators
self.mode = get_mode()
if self.mode == 'spline2d':
if self.smooth_type == const.POLYMESH_CUBIC_BSPLINE:
self.spline_type = 'cubic_bspline'
elif self.smooth_type == const.POLYMESH_QUADRIC_BSPLINE:
self.spline_type = 'quadratic_bspline'
elif self.smooth_type == const.POLYMESH_BEZIER_SURFACE:
self.spline_type = 'bezier_curve' # is this a valid spline type for DXF12?
self.is_mclosed = bool(self.flags & const.POLYLINE_MESH_CLOSED_M_DIRECTION)
self.is_nclosed = bool(self.flags & const.POLYLINE_MESH_CLOSED_N_DIRECTION)
self.is_closed = self.is_mclosed
self.set_default_extrusion()
def __len__(self):
return len(self.vertices)
def __getitem__(self, item):
return self.vertices[item]
def __iter__(self):
return iter(self.vertices)
def append_data(self, vertices):
def default_width(start_width, end_width):
if start_width == 0.:
start_width = self.default_start_width
if end_width == 0.:
end_width = self.default_end_width
return start_width, end_width
self.vertices = vertices
if self.mode in Polyline.LINE_TYPES:
for vertex in self.vertices:
if vertex.flags & const.VTX_SPLINE_FRAME_CONTROL_POINT:
self.control_points.append(vertex.location)
else:
self.points.append(vertex.location)
self.width.append(default_width(vertex.start_width, vertex.end_width))
self.bulge.append(vertex.bulge)
self.tangents.append(vertex.tangent if vertex.flags & const.VTX_CURVE_FIT_TANGENT else None)
def cast(self):
if self.mode == 'polyface':
return PolyFace(self)
elif self.mode == 'polymesh':
return PolyMesh(self)
else:
return self
class SubFace(object):
def __init__(self, face_record, vertices):
self._vertices = vertices
self.face_record = face_record
def __len__(self):
return len(self.face_record.vtx)
def __getitem__(self, item):
return self._vertices[self._vertex_index(item)]
def __iter__(self):
return (self._vertices[index].location for index in self.indices())
def _vertex_index(self, pos):
return abs(self.face_record.vtx[pos]) - 1
def indices(self):
return tuple(abs(i)-1 for i in self.face_record.vtx if i != 0)
def is_edge_visible(self, pos):
return self.face_record.vtx[pos] > 0
class PolyShape(object):
def __init__(self, polyline, dxftype):
# copy all dxf attributes from polyline
for key, value in polyline.__dict__.items():
self.__dict__[key] = value
self.dxftype = dxftype
def __str__(self):
return "{} [{}]".format(self.dxftype, self.handle)
class PolyFace(PolyShape):
def __init__(self, polyline):
VERTEX_FLAGS = const.VTX_3D_POLYFACE_MESH_VERTEX + const.VTX_3D_POLYGON_MESH_VERTEX
def is_vertex(flags):
return flags & VERTEX_FLAGS == VERTEX_FLAGS
super(PolyFace, self).__init__(polyline, 'POLYFACE')
vertices = []
face_records = []
for vertex in polyline.vertices:
(vertices if is_vertex(vertex.flags) else face_records).append(vertex)
self._face_records = face_records
def __getitem__(self, item):
return SubFace(self._face_records[item], self.vertices)
def __len__(self):
return len(self._face_records)
def __iter__(self):
return (SubFace(f, self.vertices) for f in self._face_records)
class PolyMesh(PolyShape):
def __init__(self, polyline):
super(PolyMesh, self).__init__(polyline, 'POLYMESH')
def __iter__(self):
return iter(self.vertices)
def get_location(self, pos):
return self.get_vertex(pos).location
def get_vertex(self, pos):
m, n = pos
if 0 <= m < self.mcount and 0 <= n < self.ncount:
pos = m * self.ncount + n
return self.vertices[pos]
else:
raise IndexError(repr(pos))
class Vertex(DXFEntity):
def __init__(self):
super(Vertex, self).__init__()
self.location = (0., 0., 0.)
self.flags = 0
self.start_width = 0.
self.end_width = 0.
self.bulge = 0.
self.tangent = None
self.vtx = None
def setup_attributes(self, tags):
vtx0 = 0
vtx1 = 0
vtx2 = 0
vtx3 = 0
for code, value in super(Vertex, self).setup_attributes(tags):
if code == 10:
self.location = value
elif code == 40:
self.start_width = value
elif code == 41:
self.end_width = value
elif code == 42:
self.bulge = value
elif code == 50:
self.tangent = value
elif code == 70:
self.flags = value
elif code == 71:
vtx0 = value
elif code == 72:
vtx1 = value
elif code == 73:
vtx2 = value
elif code == 74:
vtx3 = value
else:
yield code, value # chain of generators
indices = (vtx0, vtx1, vtx2, vtx3)
if any(indices):
self.vtx = indices
def __getitem__(self, item):
return self.location[item]
def __iter__(self):
return iter(self.location)
class Block(DXFEntity):
def __init__(self):
super(Block, self).__init__()
self.basepoint = (0, 0, 0)
self.name = ''
self.description = ''
self.flags = 0
self.xrefpath = ""
self._entities = []
def setup_attributes(self, tags):
for code, value in super(Block, self).setup_attributes(tags):
if code == 2:
self.name = value
elif code == 4:
self.description = value
elif code == 1:
self.xrefpath = value
elif code == 10:
self.basepoint = value
elif code == 70:
self.flags = value
else:
yield code, value # chain of generators
@property
def is_xref(self):
return bool(self.flags & const.BLK_XREF)
@property
def is_xref_overlay(self):
return bool(self.flags & const.BLK_XREF_OVERLAY)
@property
def is_anonymous(self):
return bool(self.flags & const.BLK_ANONYMOUS)
def set_entities(self, entities):
self._entities = entities
def __iter__(self):
return iter(self._entities)
def __getitem__(self, item):
return self._entities[item]
def __len__(self):
return len(self._entities)
class LWPolyline(DXFEntity):
def __init__(self):
super(LWPolyline, self).__init__()
self.points = []
self.width = []
self.bulge = []
self.elevation = 0.
self.const_width = 0.
self.flags = 0
def setup_attributes(self, tags):
bulge, start_width, end_width = 0., 0., 0.
init = True
for code, value in super(LWPolyline, self).setup_attributes(tags):
if code == 10:
if not init:
self.bulge.append(bulge)
self.width.append((start_width, end_width))
bulge, start_width, end_width = 0., 0., 0.
self.points.append(value)
init = False
elif code == 40:
start_width = value
elif code == 41:
end_width = value
elif code == 42:
bulge = value
elif code == 38:
self.elevation = value
elif code == 39:
self.thickness = value
elif code == 43:
self.const_width = value
elif code == 70:
self.flags = value
elif code == 210:
self.extrusion = value
else:
yield code, value # chain of generators
# add values for the last point
self.bulge.append(bulge)
self.width.append((start_width, end_width))
if self.const_width != 0.:
self.width = []
self.set_default_extrusion()
@property
def is_closed(self):
return bool(self.flags & 1)
def __len__(self):
return len(self.points)
def __getitem__(self, item):
return self.points[item]
def __iter__(self):
return iter(self.points)
class Ellipse(DXFEntity):
def __init__(self):
super(Ellipse, self).__init__()
self.center = (0., 0., 0.)
self.major_axis = (1., 0., 0.)
self.ratio = 1.0
self.start_param = 0.
self.end_param = 6.283185307179586 # 2*pi
def setup_attributes(self, tags):
for code, value in super(Ellipse, self).setup_attributes(tags):
if code == 10:
self.center = value
elif code == 11:
self.major_axis = value
elif code == 40:
self.ratio = value
elif code == 41:
self.start_param = value
elif code == 42:
self.end_param = value
else:
yield code, value # chain of generators
self.set_default_extrusion()
class Ray(DXFEntity):
def __init__(self):
super(Ray, self).__init__()
self.start = (0, 0, 0)
self.unit_vector = (1, 0, 0)
def setup_attributes(self, tags):
for code, value in super(Ray, self).setup_attributes(tags):
if code == 10:
self.start = value
elif code == 11:
self.unit_vector = value
else:
yield code, value # chain of generators
def deg2vec(deg):
rad = float(deg) * math.pi / 180.0
return math.cos(rad), math.sin(rad), 0.
def normalized(vector):
x, y, z = vector
m = (x**2 + y**2 + z**2)**0.5
return x/m, y/m, z/m
##################################################
# MTEXT inline codes
# \L Start underline
# \l Stop underline
# \O Start overstrike
# \o Stop overstrike
# \K Start strike-through
# \k Stop strike-through
# \P New paragraph (new line)
# \pxi Control codes for bullets, numbered paragraphs and columns
# \X Paragraph wrap on the dimension line (only in dimensions)
# \Q Slanting (obliquing) text by angle - e.g. \Q30;
# \H Text height - e.g. \H3x;
# \W Text width - e.g. \W0.8x;
# \F Font selection
#
# e.g. \Fgdt;o - GDT-tolerance
# e.g. \Fkroeger|b0|i0|c238|p10 - font Kroeger, non-bold, non-italic, codepage 238, pitch 10
#
# \S Stacking, fractions
#
# e.g. \SA^B:
# A
# B
# e.g. \SX/Y:
# X
# -
# Y
# e.g. \S1#4:
# 1/4
#
# \A Alignment
#
# \A0; = bottom
# \A1; = center
# \A2; = top
#
# \C Color change
#
# \C1; = red
# \C2; = yellow
# \C3; = green
# \C4; = cyan
# \C5; = blue
# \C6; = magenta
# \C7; = white
#
# \T Tracking, char.spacing - e.g. \T2;
# \~ Non-wrapping space, hard space
# {} Braces - define the text area influenced by the code
# \ Escape character - e.g. \\ = "\", \{ = "{"
#
# Codes and braces can be nested up to 8 levels deep
ESCAPED_CHARS = "\\{}"
GROUP_CHARS = "{}"
ONE_CHAR_COMMANDS = "PLlOoKkX"
class MText(DXFEntity):
def __init__(self):
super(MText, self).__init__()
self.insert = (0., 0., 0.)
self.raw_text = ""
self.height = 0.
self.rect_width = None
self.horizontal_width = None
self.vertical_height = None
self.line_spacing = 1.
self.attachment_point = 1
self.style = 'STANDARD'
self.xdirection = (1., 0., 0.)
self.font = None
self.big_font = None
def setup_attributes(self, tags):
text = ""
lines = []
rotation = 0.
xdir = None
for code, value in super(MText, self).setup_attributes(tags):
if code == 10:
self.insert = value
elif code == 11:
xdir = value
elif code == 1:
text = value
elif code == 3:
lines.append(value)
elif code == 7:
self.style = value
elif code == 40:
self.height = value
elif code == 41:
self.rect_width = value
elif code == 42:
self.horizontal_width = value
elif code == 43:
self.vertical_height = value
elif code == 44:
self.line_spacing = value
elif code == 50:
rotation = value
elif code == 71:
self.attachment_point = value
else:
yield code, value # chain of generators
lines.append(text)
self.raw_text = "".join(lines)
if xdir is None:
xdir = deg2vec(rotation)
self.xdirection = normalized(xdir)
self.set_default_extrusion()
def lines(self):
return self.raw_text.split('\P')
def plain_text(self, split=False):
chars = []
raw_chars = list(reversed(self.raw_text)) # text split into chars, in reversed order for efficient pop()
while len(raw_chars):
char = raw_chars.pop()
if char == '\\': # is a formatting command
try:
char = raw_chars.pop()
except IndexError:
break # premature end of text - just ignore
if char in ESCAPED_CHARS: # \ { }
chars.append(char)
elif char in ONE_CHAR_COMMANDS:
if char == 'P': # new line
chars.append('\n')
# discard other commands
else: # more character commands are terminated by ';'
stacking = char == 'S' # stacking command surrounds user data
try:
while char != ';': # end of format marker
char = raw_chars.pop()
if stacking and char != ';':
chars.append(char) # append user data of stacking command
except IndexError:
break # premature end of text - just ignore
elif char in GROUP_CHARS: # { }
pass # discard group markers
elif char == '%': # special characters
if len(raw_chars) and raw_chars[-1] == '%':
raw_chars.pop() # discard next '%'
if len(raw_chars):
special_char = raw_chars.pop()
# replace or discard formatting code
chars.append(SPECIAL_CHARS.get(special_char, ""))
else: # char is just a single '%'
chars.append(char)
else: # char is what it is, a character
chars.append(char)
plain_text = "".join(chars)
return plain_text.split('\n') if split else plain_text
def resolve_text_style(self, text_styles):
style = text_styles.get(self.style, None)
if style is None:
style = default_text_style
if self.height == 0:
self.height = style.height
if self.font is None:
self.font = style.font
if self.big_font is None:
self.big_font = style.font
class Light(DXFEntity):
def __init__(self):
super(Light, self).__init__()
self.version = 1
self.name = ""
self.light_type = 1 # distant = 1; point = 2; spot = 3
self.status = False # on/off ?
self.light_color = 0 # 0 is unset
self.true_color = None # None is unset
self.plot_glyph = 0
self.intensity = 0
self.position = (0., 0., 1.)
self.target = (0., 0., 0.)
self.attenuation_type = 0 # 0 = None; 1 = Inverse Linear; 2 = Inverse Square
self.use_attenuation_limits = False
self.attenuation_start_limit = 0
self.attenuation_end_limit = 0
self.hotspot_angle = 0
self.fall_off_angle = 0.
self.cast_shadows = False
self.shadow_type = 0 # 0 = Ray traced shadows; 1 = Shadow maps
self.shadow_map_size = 0
self.shadow_softness = 0
def setup_attributes(self, tags):
for code, value in super(Light, self).setup_attributes(tags):
if code == 1:
self.name = value
elif code == 10:
self.position = value
elif code == 11:
self.target = value
elif code == 40:
self.intensity = value
elif code == 41:
self.attenuation_start_limit = value
elif code == 42:
self.attenuation_end_limit = value
elif code == 50:
self.hotspot_angle = value
elif code == 51:
self.fall_off_angle = value
elif code == 63:
self.light_color = value
elif code == 70:
self.light_type = value
elif code == 72:
self.attenuation_type = value
elif code == 73:
self.shadow_type = value
elif code == 90:
self.version = value
elif code == 91:
self.shadow_map_size = value
elif code == 280:
self.shadow_softness = value
elif code == 290:
self.status = value
elif code == 291:
self.plot_glyph = value
elif code == 292:
self.use_attenuation_limits = value
elif code == 293:
self.cast_shadows = value
elif code == 421:
self.true_color = value
else:
yield code, value # chain of generators
class Body(DXFEntity):
def __init__(self):
super(Body, self).__init__()
# need handle to get SAB data in DXF version AC1027 and later
self.version = 1
self.acis = []
def setup_attributes(self, tags):
sat = []
for code, value in super(Body, self).setup_attributes(tags):
if code == 70:
self.version = value
elif code in (1, 3):
sat.append(value)
else:
yield code, value # chain of generators
self.acis = decode(sat)
def set_sab_data(self, sab_data):
self.acis = sab_data
@property
def is_sat(self):
return isinstance(self.acis, list) # but could be an empty list
@property
def is_sab(self):
return not self.is_sat # has binary encoded ACIS data
class Surface(Body):
def __init__(self):
super(Body, self).__init__()
self.u_isolines = 0
self.v_isolines = 0
def setup_attributes(self, tags):
for code, value in super(Surface, self).setup_attributes(tags):
if code == 71:
self.u_isolines = value
elif code == 72:
self.v_isolines = value
else:
yield code, value # chain of generators
class Mesh(DXFEntity):
def __init__(self):
super(Mesh, self).__init__()
self.version = 2
self.blend_crease = False
self.subdivision_levels = 1
# rest are mostly positional tags
self.vertices = []
self.faces = []
self.edges = []
self.edge_crease_list = []
def setup_attributes(self, tags):
status = 0
count = 0
index_tags = []
for code, value in super(Mesh, self).setup_attributes(tags):
if code == 10:
self.vertices.append(value)
elif status == -1: # ignore overridden properties at the end of the mesh entity
pass # property override uses also group codes 90, 91, 92 but only at the end of the MESH entity
elif code == 71:
self.version = value
elif code == 72:
self.blend_crease = bool(value)
elif code == 91:
self.subdivision_levels = value
elif 92 <= code <= 95: # 92 = vertices, 93 = faces; 94 = edges 95 = edge creases
if code in (92, 95):
continue # ignore vertices and edge creases count
status = code
count = value
if status == 94: # edge count
count *= 2
elif code == 140:
self.edge_crease_list.append(value)
elif code == 90 and count > 0: # faces or edges
count -= 1
index_tags.append(value)
if count < 1:
if status == 93:
self.setup_faces(index_tags)
elif status == 94:
self.setup_edges(index_tags)
index_tags = []
elif code == 90: # count == 0; start of overridden properties (group code 90 after face or edge list)
status = -1
else:
yield code, value # chain of generators
def get_face(self, index):
return tuple(self.vertices[vertex_index] for vertex_index in self.faces[index])
def get_edge(self, index):
return tuple(self.vertices[vertex_index] for vertex_index in self.edges[index])
def setup_faces(self, tags):
face = []
count = 0
for value in tags:
if count == 0:
if len(face):
self.faces.append(tuple(face))
del face[:]
count = value
else:
count -= 1
face.append(value)
if len(face):
self.faces.append(tuple(face))
def setup_edges(self, tags):
self.edges = list(zip(tags[::2], tags[1::2]))
class Spline(DXFEntity):
def __init__(self):
super(Spline, self).__init__()
self.normal_vector = None
self.flags = 0
self.degree = 3
self.start_tangent = None
self.end_tangent = None
self.knots = []
self.weights = []
self.tol_knot = .0000001
self.tol_control_point = .0000001
self.tol_fit_point = .0000000001
self.control_points = []
self.fit_points = []
def setup_attributes(self, tags):
subclass = 'AcDbSpline'
for code, value in super(Spline, self).setup_attributes(tags):
if subclass == 'AcDbHelix':
yield code, value # chain of generators
elif code == 10:
self.control_points.append(value)
elif code == 11:
self.fit_points.append(value)
elif code == 12:
self.start_tangent = value
elif code == 13:
self.end_tangent = value
elif code == 40:
self.knots.append(value)
elif code == 41:
self.weights.append(value)
elif code == 42:
self.tol_knot = value
elif code == 43:
self.tol_control_point = value
elif code == 44:
self.tol_fit_point = value
elif code == 70:
self.flags = value
elif code == 71:
self.degree = value
elif 72 <= code < 75:
pass # ignore knot-, control- and fit point count
elif code == 100:
subclass = value
self.normal_vector = self.extrusion
if len(self.weights) == 0:
self.weights = [1.0] * len(self.control_points)
@property
def is_closed(self):
return bool(self.flags & const.SPLINE_CLOSED)
@property
def is_periodic(self):
return bool(self.flags & const.SPLINE_PERIODIC)
@property
def is_rational(self):
return bool(self.flags & const.SPLINE_RATIONAL)
@property
def is_planar(self):
return bool(self.flags & const.SPLINE_PLANAR)
@property
def is_linear(self):
return bool(self.flags & const.SPLINE_LINEAR)
class Helix(Spline):
def __init__(self):
super(Helix, self).__init__()
self.helix_version = (1, 1)
self.axis_base_point = None
self.start_point = None
self.axis_vector = None
self.radius = 0
self.turns = 0
self.turn_height = 0
self.handedness = 0 # 0 = left, 1 = right
self.constrain = 0 # 0 = turn height, 1 = turns, 2 = height
def setup_attributes(self, tags):
helix_major_version = 1
helix_maintainance_version = 1
for code, value in super(Helix, self).setup_attributes(tags):
if code == 10:
self.axis_base_point = value
elif code == 11:
self.start_point = value
elif code == 12:
self.axis_vector = value
elif code == 90:
helix_major_version = value
elif code == 91:
helix_maintainance_version = value
elif code == 91:
helix_maintainance_version = value
elif code == 40:
self.radius = value
elif code == 41:
self.turns = value
elif code == 42:
self.turn_height = value
elif code == 290:
self.handedness = value
elif code == 280:
self.constrain = value
else:
yield code, value # chain of generators
self.helix_version = (helix_major_version, helix_maintainance_version)
EntityTable = {
'LINE' : Line,
'POINT' : Point,
'CIRCLE' : Circle,
'ARC' : Arc,
'TRACE' : Trace,
'SOLID' : Solid,
'3DFACE' : Face,
'TEXT' : Text,
'INSERT' : Insert,
'SEQEND' : DXFEntity,
'ATTRIB' : Attrib,
'ATTDEF' : Attrib,
'POLYLINE' : Polyline,
'VERTEX' : Vertex,
'BLOCK' : Block,
'ENDBLK' : DXFEntity,
'LWPOLYLINE' : LWPolyline,
'ELLIPSE' : Ellipse,
'RAY' : Ray,
'XLINE' : Ray,
'SPLINE' : Spline,
'HELIX' : Helix,
'MTEXT' : MText,
'MESH' : Mesh,
'LIGHT' : Light,
'BODY' : Body,
'REGION' : Body,
'3DSOLID' : Body,
'SURFACE' : Surface,
'PLANESURFACE': Surface,
}
def entity_factory(tags):
dxftype = tags.get_type()
cls = EntityTable[dxftype] # get entity class or raise KeyError
entity = cls() # call constructor
list(entity.setup_attributes(tags)) # setup dxf attributes - chain of generators
return entity
| gpl-2.0 | 820,865,479,230,030,800 | 31.528026 | 114 | 0.484869 | false |
tedder/ansible | lib/ansible/modules/cloud/vmware/vmware_vm_host_drs_rule.py | 25 | 15326 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Karsten Kaj Jakobsen <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
author:
- "Karsten Kaj Jakobsen (@karstenjakobsen)"
description:
- "This module can be used to create VM-Host rules in a given cluster."
extends_documentation_fragment: vmware.documentation
module: vmware_vm_host_drs_rule
notes:
- "Tested on vSphere 6.5 and 6.7"
options:
affinity_rule:
default: true
description:
- "If set to C(True), the DRS rule will be an Affinity rule."
- "If set to C(False), the DRS rule will be an Anti-Affinity rule."
- "Effective only if C(state) is set to C(present)."
type: bool
datacenter:
aliases:
- datacenter_name
description:
- "Datacenter to search for given cluster. If not set, we use first cluster we encounter with C(cluster_name)."
required: false
cluster_name:
description:
- "Cluster to create VM-Host rule."
required: true
drs_rule_name:
description:
- "Name of rule to create or remove."
required: true
enabled:
default: false
description:
- "If set to C(True), the DRS rule will be enabled."
- "Effective only if C(state) is set to C(present)."
type: bool
host_group_name:
description:
- "Name of Host group to use with rule."
- "Effective only if C(state) is set to C(present)."
required: true
mandatory:
default: false
description:
- "If set to C(True), the DRS rule will be mandatory."
- "Effective only if C(state) is set to C(present)."
type: bool
state:
choices:
- present
- absent
default: present
description:
- "If set to C(present) and the rule doesn't exists then the rule will be created."
- "If set to C(absent) and the rule exists then the rule will be deleted."
required: true
vm_group_name:
description:
- "Name of VM group to use with rule."
- "Effective only if C(state) is set to C(present)."
required: true
requirements:
- "python >= 2.6"
- PyVmomi
short_description: "Creates vm/host group in a given cluster"
version_added: "2.8"
'''
EXAMPLES = r'''
---
- name: "Create mandatory DRS Affinity rule for VM/Host"
vmware_vm_host_drs_rule:
hostname: "{{ vcenter_hostname }}"
password: "{{ vcenter_password }}"
username: "{{ vcenter_username }}"
validate_certs: False
cluster_name: DC0_C0
drs_rule_name: drs_rule_host_aff_0001
host_group_name: DC0_C0_HOST_GR1
vm_group_name: DC0_C0_VM_GR1
mandatory: True
enabled: True
affinity_rule: True
'''
RETURN = r'''
'''
try:
from pyVmomi import vim
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible.module_utils.vmware import (PyVmomi, vmware_argument_spec, wait_for_task, find_cluster_by_name,
find_datacenter_by_name)
class VmwareVmHostRuleDrs(PyVmomi):
"""
Class to manage VM HOST DRS Rules
"""
def __init__(self, module):
"""
Doctring: Init
"""
super(VmwareVmHostRuleDrs, self).__init__(module)
self.__datacenter_name = module.params.get('datacenter', None)
self.__datacenter_obj = None
self.__cluster_name = module.params['cluster_name']
self.__cluster_obj = None
self.__vm_group_name = module.params.get('vm_group_name', None)
self.__host_group_name = module.params.get('host_group_name', None)
self.__rule_name = module.params['drs_rule_name']
self.__enabled = module.params['enabled']
self.__mandatory = module.params['mandatory']
self.__affinity_rule = module.params['affinity_rule']
self.__state = module.params['state']
self.__msg = 'Nothing to see here...'
self.__result = dict()
self.__changed = False
if self.__datacenter_name is not None:
self.__datacenter_obj = find_datacenter_by_name(self.content, self.__datacenter_name)
if self.__datacenter_obj is None and module.check_mode is False:
raise Exception("Datacenter '%s' not found" % self.__datacenter_name)
self.__cluster_obj = find_cluster_by_name(content=self.content,
cluster_name=self.__cluster_name,
datacenter=self.__datacenter_obj)
# Throw error if cluster does not exist
if self.__cluster_obj is None and module.check_mode is False:
raise Exception("Cluster '%s' not found" % self.__cluster_name)
# Dont populate lists if we are deleting group
if self.__state == 'present':
# Get list of vm groups only if state is present
self.__vm_group_obj = self.__get_group_by_name(group_name=self.__vm_group_name)
self.__host_group_obj = self.__get_group_by_name(group_name=self.__host_group_name, host_group=True)
def get_msg(self):
"""
Returns message for Ansible result
Args: none
Returns: string
"""
return self.__msg
def get_result(self):
"""
Returns result for Ansible
Args: none
Returns: dict
"""
return self.__result
def get_changed(self):
"""
Returns if anything changed
Args: none
Returns: boolean
"""
return self.__changed
def __get_group_by_name(self, group_name, cluster_obj=None, host_group=False):
"""
Return group
Args:
group_name: Group name
cluster_obj: Cluster managed object
Returns: cluster_obj.configurationEx.group
"""
if cluster_obj is None:
cluster_obj = self.__cluster_obj
for group in cluster_obj.configurationEx.group:
if not host_group and isinstance(group, vim.cluster.VmGroup):
if group.name == group_name:
return group
elif host_group and isinstance(group, vim.cluster.HostGroup):
if group.name == group_name:
return group
raise Exception("Failed to find the group %s in given cluster %s" % (group_name, cluster_obj.name))
def __get_rule_key_by_name(self, cluster_obj=None, rule_name=None):
"""
Function to get a specific VM-Host DRS rule key by name
Args:
rule_name: Name of rule
cluster_obj: Cluster managed object
Returns: Rule Object if found or None
"""
if cluster_obj is None:
cluster_obj = self.__cluster_obj
if rule_name is None:
rule_name = self.__rule_name
if rule_name:
rules_list = [rule for rule in cluster_obj.configuration.rule if rule.name == rule_name]
if rules_list:
return rules_list[0]
# No rule found
return None
def __normalize_vm_host_rule_spec(self, rule_obj, cluster_obj=None):
"""
Return human readable rule spec
Args:
rule_obj: Rule managed object
cluster_obj: Cluster managed object
Returns: Dictionary with VM-Host DRS Rule info
"""
if cluster_obj is None:
cluster_obj = self.__cluster_obj
if not all([rule_obj, cluster_obj]):
return {}
return dict(rule_key=rule_obj.key,
rule_enabled=rule_obj.enabled,
rule_name=rule_obj.name,
rule_mandatory=rule_obj.mandatory,
rule_uuid=rule_obj.ruleUuid,
rule_vm_group_name=rule_obj.vmGroupName,
rule_affine_host_group_name=rule_obj.affineHostGroupName,
rule_anti_affine_host_group_name=rule_obj.antiAffineHostGroupName,
rule_vms=self.__get_all_from_group(group_name=rule_obj.vmGroupName,
cluster_obj=cluster_obj),
rule_affine_hosts=self.__get_all_from_group(group_name=rule_obj.affineHostGroupName,
cluster_obj=cluster_obj,
host_group=True),
rule_anti_affine_hosts=self.__get_all_from_group(group_name=rule_obj.antiAffineHostGroupName,
cluster_obj=cluster_obj,
host_group=True),
rule_type="vm_host_rule"
)
def __get_all_from_group(self, group_name=None, cluster_obj=None, host_group=False):
"""
Return all VM / Host names using given group name
Args:
group_name: Rule name
cluster_obj: Cluster managed object
host_group: True if we want only host name from group
Returns: List of VM-Host names belonging to given group object
"""
obj_name_list = []
if not all([group_name, cluster_obj]):
return obj_name_list
for group in cluster_obj.configurationEx.group:
if group.name == group_name:
if not host_group and isinstance(group, vim.cluster.VmGroup):
obj_name_list = [vm.name for vm in group.vm]
break
elif host_group and isinstance(group, vim.cluster.HostGroup):
obj_name_list = [host.name for host in group.host]
break
return obj_name_list
def __check_rule_has_changed(self, rule_obj, cluster_obj=None):
"""
Function to check if the rule being edited has changed
"""
if cluster_obj is None:
cluster_obj = self.__cluster_obj
existing_rule = self.__normalize_vm_host_rule_spec(rule_obj=rule_obj, cluster_obj=cluster_obj)
# Check if anything has changed
if ((existing_rule['rule_enabled'] == self.__enabled) and
(existing_rule['rule_mandatory'] == self.__mandatory) and
(existing_rule['rule_vm_group_name'] == self.__vm_group_name) and
(existing_rule['rule_affine_host_group_name'] == self.__host_group_name or
existing_rule['rule_anti_affine_host_group_name'] == self.__host_group_name)):
return False
else:
return True
def create(self):
"""
Function to create a host VM-Host DRS rule if rule does not exist
"""
rule_obj = self.__get_rule_key_by_name(rule_name=self.__rule_name)
# Check if rule exists
if rule_obj:
operation = 'edit'
rule_changed = self.__check_rule_has_changed(rule_obj)
else:
operation = 'add'
# Check if anything has changed when editing
if operation == 'add' or (operation == 'edit' and rule_changed is True):
rule = vim.cluster.VmHostRuleInfo()
# Check if already rule exists
if rule_obj:
# This need to be set in order to edit a existing rule
rule.key = rule_obj.key
rule.enabled = self.__enabled
rule.mandatory = self.__mandatory
rule.name = self.__rule_name
if self.__affinity_rule:
rule.affineHostGroupName = self.__host_group_name
else:
rule.antiAffineHostGroupName = self.__host_group_name
rule.vmGroupName = self.__vm_group_name
rule_spec = vim.cluster.RuleSpec(info=rule, operation=operation)
config_spec = vim.cluster.ConfigSpecEx(rulesSpec=[rule_spec])
if not self.module.check_mode:
task = self.__cluster_obj.ReconfigureEx(config_spec, modify=True)
wait_for_task(task)
self.__changed = True
rule_obj = self.__get_rule_key_by_name(rule_name=self.__rule_name)
self.__result = self.__normalize_vm_host_rule_spec(rule_obj)
if operation == 'edit':
self.__msg = "Updated DRS rule `%s` successfully" % (self.__rule_name)
else:
self.__msg = "Created DRS rule `%s` successfully" % (self.__rule_name)
# Delete
def delete(self, rule_name=None):
"""
Function to delete VM-Host DRS rule using name
"""
changed = False
if rule_name is None:
rule_name = self.__rule_name
rule_obj = self.__get_rule_key_by_name(rule_name=rule_name)
if rule_obj is not None:
rule_key = int(rule_obj.key)
rule_spec = vim.cluster.RuleSpec(removeKey=rule_key, operation='remove')
config_spec = vim.cluster.ConfigSpecEx(rulesSpec=[rule_spec])
if not self.module.check_mode:
task = self.__cluster_obj.ReconfigureEx(config_spec, modify=True)
wait_for_task(task)
self.__changed = True
if self.__changed:
self.__msg = "Deleted DRS rule `%s` successfully" % (self.__rule_name)
else:
self.__msg = "DRS Rule `%s` does not exists or already deleted" % (self.__rule_name)
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(
state=dict(type='str', default='present', choices=['absent', 'present']),
vm_group_name=dict(type='str', required=True),
host_group_name=dict(type='str', required=True),
cluster_name=dict(type='str', required=True),
datacenter=dict(type='str', required=False, aliases=['datacenter_name']),
drs_rule_name=dict(type='str', required=True),
enabled=dict(type='bool', default=False),
mandatory=dict(type='bool', default=False),
affinity_rule=dict(type='bool', default=True))
)
required_if = [
['state', 'present', ['vm_group_name'], ['host_group_name']],
]
module = AnsibleModule(argument_spec=argument_spec,
required_if=required_if,
supports_check_mode=True)
try:
# Create instance of VmwareDrsGroupManager
vm_host_drs = VmwareVmHostRuleDrs(module=module)
if module.params['state'] == 'present':
vm_host_drs.create()
elif module.params['state'] == 'absent':
vm_host_drs.delete()
# Set results
results = dict(msg=vm_host_drs.get_msg(),
failed=False,
changed=vm_host_drs.get_changed(),
result=vm_host_drs.get_result())
except Exception as error:
results = dict(failed=True, msg="Error: `%s`" % error)
if results['failed']:
module.fail_json(**results)
else:
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 | -8,348,959,489,354,864,000 | 32.317391 | 117 | 0.571317 | false |
double12gzh/nova | nova/debugger.py | 40 | 3009 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# NOTE(markmc): this is imported before monkey patching in nova.cmd
# so we avoid extra imports here
import sys
def enabled():
return ('--remote_debug-host' in sys.argv and
'--remote_debug-port' in sys.argv)
def register_cli_opts():
from oslo_config import cfg
cli_opts = [
cfg.StrOpt('host',
help='Debug host (IP or name) to connect. Note '
'that using the remote debug option changes how '
'Nova uses the eventlet library to support async IO. '
'This could result in failures that do not occur '
'under normal operation. Use at your own risk.'),
cfg.IntOpt('port',
help='Debug port to connect. Note '
'that using the remote debug option changes how '
'Nova uses the eventlet library to support async IO. '
'This could result in failures that do not occur '
'under normal operation. Use at your own risk.')
]
cfg.CONF.register_cli_opts(cli_opts, 'remote_debug')
def init():
from oslo_config import cfg
CONF = cfg.CONF
# NOTE(markmc): gracefully handle the CLI options not being registered
if 'remote_debug' not in CONF:
return
if not (CONF.remote_debug.host and CONF.remote_debug.port):
return
import logging
from nova.i18n import _LW
LOG = logging.getLogger(__name__)
LOG.debug('Listening on %(host)s:%(port)s for debug connection',
{'host': CONF.remote_debug.host,
'port': CONF.remote_debug.port})
try:
from pydev import pydevd
except ImportError:
import pydevd
pydevd.settrace(host=CONF.remote_debug.host,
port=CONF.remote_debug.port,
stdoutToServer=False,
stderrToServer=False)
LOG.warning(_LW('WARNING: Using the remote debug option changes how '
'Nova uses the eventlet library to support async IO. This '
'could result in failures that do not occur under normal '
'operation. Use at your own risk.'))
| apache-2.0 | 7,376,880,445,210,520,000 | 35.253012 | 79 | 0.619807 | false |
jesseh/dothis | volunteering/migrations/0007_auto_20150809_2057.py | 1 | 1047 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('volunteering', '0006_auto_20150809_2046'),
]
operations = [
migrations.AlterField(
model_name='triggerbyassignment',
name='bcc',
field=models.CharField(help_text=b'comma-seperated email addresses', max_length=255, null=True, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='triggerbydate',
name='bcc',
field=models.CharField(help_text=b'comma-seperated email addresses', max_length=255, null=True, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='triggerbyevent',
name='bcc',
field=models.CharField(help_text=b'comma-seperated email addresses', max_length=255, null=True, blank=True),
preserve_default=True,
),
]
| agpl-3.0 | 4,613,068,971,073,253,000 | 31.71875 | 120 | 0.602674 | false |
joshmoore/zeroc-ice | py/test/Ice/objects/Server.py | 1 | 1805 | #!/usr/bin/env python
# **********************************************************************
#
# Copyright (c) 2003-2011 ZeroC, Inc. All rights reserved.
#
# This copy of Ice is licensed to you under the terms described in the
# ICE_LICENSE file included in this distribution.
#
# **********************************************************************
import os, sys, traceback
import Ice
Ice.loadSlice('Test.ice')
Ice.loadSlice('ServerPrivate.ice')
import Test, TestI
class MyObjectFactory(Ice.ObjectFactory):
def create(self, type):
if type == '::Test::I':
return TestI.II()
elif type == '::Test::J':
return TestI.JI()
elif type == '::Test::H':
return TestI.HI()
assert(False) # Should never be reached
def destroy(self):
# Nothing to do
pass
def run(args, communicator):
factory = MyObjectFactory()
communicator.addObjectFactory(factory, '::Test::I')
communicator.addObjectFactory(factory, '::Test::J')
communicator.addObjectFactory(factory, '::Test::H')
communicator.getProperties().setProperty("TestAdapter.Endpoints", "default -p 12010")
adapter = communicator.createObjectAdapter("TestAdapter")
initial = TestI.InitialI(adapter)
adapter.add(initial, communicator.stringToIdentity("initial"))
uoet = TestI.UnexpectedObjectExceptionTestI()
adapter.add(uoet, communicator.stringToIdentity("uoet"))
adapter.activate()
communicator.waitForShutdown()
return True
try:
communicator = Ice.initialize(sys.argv)
status = run(sys.argv, communicator)
except:
traceback.print_exc()
status = False
if communicator:
try:
communicator.destroy()
except:
traceback.print_exc()
status = False
sys.exit(not status)
| gpl-2.0 | -5,684,738,105,812,537,000 | 28.112903 | 89 | 0.617175 | false |
mredar/harvester | scripts/queue_delete_solr_collection.py | 3 | 3879 | #! /bin/env python
# -*- coding: utf-8 -*-
import sys
import logbook
from harvester.config import config as config_harvest
from redis import Redis
from rq import Queue
JOB_TIMEOUT = 86400 # 24 hrs
def def_args():
import argparse
parser = argparse.ArgumentParser(description='Harvest a collection')
parser.add_argument('user_email', type=str, help='user email')
parser.add_argument('rq_queue', type=str, help='RQ Queue to put job in')
parser.add_argument(
'collection_key',
type=str,
help='URL for the collection Django tastypie api resource')
return parser
def queue_delete_from_solr(redis_host,
redis_port,
redis_password,
redis_timeout,
rq_queue,
collection_key,
timeout=JOB_TIMEOUT):
rQ = Queue(
rq_queue,
connection=Redis(
host=redis_host,
port=redis_port,
password=redis_password,
socket_connect_timeout=redis_timeout))
job = rQ.enqueue_call(
func='harvester.solr_updater.delete_solr_collection',
kwargs=dict(
collection_key=collection_key),
timeout=timeout)
return job
def main(collection_keys,
log_handler=None,
config_file='akara.ini',
rq_queue=None,
**kwargs):
'''Runs a UCLDC delete from solr for collection key'''
config = config_harvest(config_file=config_file)
if not log_handler:
log_handler = logbook.StderrHandler(level='DEBUG')
log_handler.push_application()
for collection_key in [str(x) for x in collection_keys.split(';')]:
queue_delete_from_solr(
config['redis_host'],
config['redis_port'],
config['redis_password'],
config['redis_connect_timeout'],
rq_queue=rq_queue,
collection_key=collection_key,
**kwargs)
log_handler.pop_application()
if __name__ == '__main__':
parser = def_args()
args = parser.parse_args(sys.argv[1:])
if not args.collection_key:
parser.print_help()
sys.exit(27)
kwargs = {}
main(args.collection_key, rq_queue=args.rq_queue, **kwargs)
# Copyright © 2016, Regents of the University of California
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the University of California nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
| bsd-3-clause | -6,711,897,638,411,535,000 | 37.39604 | 77 | 0.664002 | false |
ambitioninc/django-dynamic-db-router | settings.py | 2 | 1652 | import os
from django.conf import settings
def configure_settings():
"""
Configures settings for manage.py and for run_tests.py.
"""
if not settings.configured:
# Determine the database settings depending on if a test_db var is set in CI mode or not
test_db = os.environ.get('DB', 'postgres')
if test_db == 'postgres':
db_config_one = {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'USER': 'postgres',
'NAME': 'dynamic_db_router_one',
}
db_config_two = {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'USER': 'postgres',
'NAME': 'dynamic_db_router_two',
}
elif test_db == 'sqlite':
db_config_one = {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'dynamic_db_router_one',
}
db_config_two = {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'dynamic_db_router_two',
}
else:
raise RuntimeError('Unsupported test DB {0}'.format(test_db))
settings.configure(
DATABASES={
'default': db_config_one,
'test': db_config_two,
},
DATABASE_ROUTERS=['dynamic_db_router.DynamicDbRouter'],
INSTALLED_APPS=(
'dynamic_db_router',
'dynamic_db_router.tests',
),
ROOT_URLCONF='dynamic_db_router.urls',
SILENCED_SYSTEM_CHECKS=["1_7.W001"],
DEBUG=False,
)
| mit | -4,334,931,302,368,102,000 | 32.714286 | 96 | 0.493947 | false |
lupyuen/RaspberryPiImage | home/pi/GrovePi/Software/Python/others/temboo/Library/MailChimp/ListSubscribe.py | 5 | 5836 | # -*- coding: utf-8 -*-
###############################################################################
#
# ListSubscribe
# Adds a subscriber to a MailChimp list.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class ListSubscribe(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the ListSubscribe Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(ListSubscribe, self).__init__(temboo_session, '/Library/MailChimp/ListSubscribe')
def new_input_set(self):
return ListSubscribeInputSet()
def _make_result_set(self, result, path):
return ListSubscribeResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return ListSubscribeChoreographyExecution(session, exec_id, path)
class ListSubscribeInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the ListSubscribe
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((required, string) The API Key provided by Mailchimp.)
"""
super(ListSubscribeInputSet, self)._set_input('APIKey', value)
def set_DoubleOptIn(self, value):
"""
Set the value of the DoubleOptIn input for this Choreo. ((optional, boolean) Flag to control whether a double opt-in confirmation message is sent. Specify '1' (true) or '0' (false). Defaults to 0.)
"""
super(ListSubscribeInputSet, self)._set_input('DoubleOptIn', value)
def set_EmailAddress(self, value):
"""
Set the value of the EmailAddress input for this Choreo. ((conditional, string) The email address for the subscriber you want to create. Required unless the email address is included in the MergeVars input as part of your JSON object.)
"""
super(ListSubscribeInputSet, self)._set_input('EmailAddress', value)
def set_EmailType(self, value):
"""
Set the value of the EmailType input for this Choreo. ((optional, string) Must be one of 'text', 'html', or 'mobile'. Defaults to html.)
"""
super(ListSubscribeInputSet, self)._set_input('EmailType', value)
def set_ListId(self, value):
"""
Set the value of the ListId input for this Choreo. ((required, string) The id of the list that the subsbriber will be added to.)
"""
super(ListSubscribeInputSet, self)._set_input('ListId', value)
def set_MergeVars(self, value):
"""
Set the value of the MergeVars input for this Choreo. ((conditional, json) A JSON object of the merge fields for this subscriber. If the subscriber email address is not provided for the EmailAddress input, it must be specified here.)
"""
super(ListSubscribeInputSet, self)._set_input('MergeVars', value)
def set_ReplaceInterests(self, value):
"""
Set the value of the ReplaceInterests input for this Choreo. ((optional, boolean) A flag to determine whether to replace the interest groups with the groups provided or add the provided groups to the member's interest groups. Specify '1' (true) or '0' (false). Defaults to 1.)
"""
super(ListSubscribeInputSet, self)._set_input('ReplaceInterests', value)
def set_SendWelcome(self, value):
"""
Set the value of the SendWelcome input for this Choreo. ((optional, boolean) If double_optin is false and this flag is true, a welcome email will be sent. Note that this does not apply when updating records. Specify '1' (true) or '0' (false). Defaults to 0.)
"""
super(ListSubscribeInputSet, self)._set_input('SendWelcome', value)
def set_UpdateExisting(self, value):
"""
Set the value of the UpdateExisting input for this Choreo. ((optional, boolean) Indicates that if the email already exists, this request will perform an update instead of an insert. Specify '1' (true) or '0' (false). Defaults to 1.)
"""
super(ListSubscribeInputSet, self)._set_input('UpdateExisting', value)
class ListSubscribeResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the ListSubscribe Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Mailchimp. Returns the string "true" for success and an error description for failures.)
"""
return self._output.get('Response', None)
class ListSubscribeChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return ListSubscribeResultSet(response, path)
| apache-2.0 | -245,207,782,184,932,200 | 46.836066 | 284 | 0.67889 | false |
tuxfux-hlp-notes/python-batches | archieves/batch-64/15-files/myenv/bin/runxlrd.py | 1 | 16321 | #!/home/khyaathi/Documents/git_repos/python-batches/batch-64/15-files/myenv/bin/python
# Copyright (c) 2005-2012 Stephen John Machin, Lingfo Pty Ltd
# This script is part of the xlrd package, which is released under a
# BSD-style licence.
from __future__ import print_function
cmd_doc = """
Commands:
2rows Print the contents of first and last row in each sheet
3rows Print the contents of first, second and last row in each sheet
bench Same as "show", but doesn't print -- for profiling
biff_count[1] Print a count of each type of BIFF record in the file
biff_dump[1] Print a dump (char and hex) of the BIFF records in the file
fonts hdr + print a dump of all font objects
hdr Mini-overview of file (no per-sheet information)
hotshot Do a hotshot profile run e.g. ... -f1 hotshot bench bigfile*.xls
labels Dump of sheet.col_label_ranges and ...row... for each sheet
name_dump Dump of each object in book.name_obj_list
names Print brief information for each NAME record
ov Overview of file
profile Like "hotshot", but uses cProfile
show Print the contents of all rows in each sheet
version[0] Print versions of xlrd and Python and exit
xfc Print "XF counts" and cell-type counts -- see code for details
[0] means no file arg
[1] means only one file arg i.e. no glob.glob pattern
"""
options = None
if __name__ == "__main__":
PSYCO = 0
import xlrd
import sys, time, glob, traceback, gc
from xlrd.timemachine import xrange, REPR
class LogHandler(object):
def __init__(self, logfileobj):
self.logfileobj = logfileobj
self.fileheading = None
self.shown = 0
def setfileheading(self, fileheading):
self.fileheading = fileheading
self.shown = 0
def write(self, text):
if self.fileheading and not self.shown:
self.logfileobj.write(self.fileheading)
self.shown = 1
self.logfileobj.write(text)
null_cell = xlrd.empty_cell
def show_row(bk, sh, rowx, colrange, printit):
if bk.ragged_rows:
colrange = range(sh.row_len(rowx))
if not colrange: return
if printit: print()
if bk.formatting_info:
for colx, ty, val, cxfx in get_row_data(bk, sh, rowx, colrange):
if printit:
print("cell %s%d: type=%d, data: %r, xfx: %s"
% (xlrd.colname(colx), rowx+1, ty, val, cxfx))
else:
for colx, ty, val, _unused in get_row_data(bk, sh, rowx, colrange):
if printit:
print("cell %s%d: type=%d, data: %r" % (xlrd.colname(colx), rowx+1, ty, val))
def get_row_data(bk, sh, rowx, colrange):
result = []
dmode = bk.datemode
ctys = sh.row_types(rowx)
cvals = sh.row_values(rowx)
for colx in colrange:
cty = ctys[colx]
cval = cvals[colx]
if bk.formatting_info:
cxfx = str(sh.cell_xf_index(rowx, colx))
else:
cxfx = ''
if cty == xlrd.XL_CELL_DATE:
try:
showval = xlrd.xldate_as_tuple(cval, dmode)
except xlrd.XLDateError as e:
showval = "%s:%s" % (type(e).__name__, e)
cty = xlrd.XL_CELL_ERROR
elif cty == xlrd.XL_CELL_ERROR:
showval = xlrd.error_text_from_code.get(cval, '<Unknown error code 0x%02x>' % cval)
else:
showval = cval
result.append((colx, cty, showval, cxfx))
return result
def bk_header(bk):
print()
print("BIFF version: %s; datemode: %s"
% (xlrd.biff_text_from_num[bk.biff_version], bk.datemode))
print("codepage: %r (encoding: %s); countries: %r"
% (bk.codepage, bk.encoding, bk.countries))
print("Last saved by: %r" % bk.user_name)
print("Number of data sheets: %d" % bk.nsheets)
print("Use mmap: %d; Formatting: %d; On demand: %d"
% (bk.use_mmap, bk.formatting_info, bk.on_demand))
print("Ragged rows: %d" % bk.ragged_rows)
if bk.formatting_info:
print("FORMATs: %d, FONTs: %d, XFs: %d"
% (len(bk.format_list), len(bk.font_list), len(bk.xf_list)))
if not options.suppress_timing:
print("Load time: %.2f seconds (stage 1) %.2f seconds (stage 2)"
% (bk.load_time_stage_1, bk.load_time_stage_2))
print()
def show_fonts(bk):
print("Fonts:")
for x in xrange(len(bk.font_list)):
font = bk.font_list[x]
font.dump(header='== Index %d ==' % x, indent=4)
def show_names(bk, dump=0):
bk_header(bk)
if bk.biff_version < 50:
print("Names not extracted in this BIFF version")
return
nlist = bk.name_obj_list
print("Name list: %d entries" % len(nlist))
for nobj in nlist:
if dump:
nobj.dump(sys.stdout,
header="\n=== Dump of name_obj_list[%d] ===" % nobj.name_index)
else:
print("[%d]\tName:%r macro:%r scope:%d\n\tresult:%r\n"
% (nobj.name_index, nobj.name, nobj.macro, nobj.scope, nobj.result))
def print_labels(sh, labs, title):
if not labs:return
for rlo, rhi, clo, chi in labs:
print("%s label range %s:%s contains:"
% (title, xlrd.cellname(rlo, clo), xlrd.cellname(rhi-1, chi-1)))
for rx in xrange(rlo, rhi):
for cx in xrange(clo, chi):
print(" %s: %r" % (xlrd.cellname(rx, cx), sh.cell_value(rx, cx)))
def show_labels(bk):
# bk_header(bk)
hdr = 0
for shx in range(bk.nsheets):
sh = bk.sheet_by_index(shx)
clabs = sh.col_label_ranges
rlabs = sh.row_label_ranges
if clabs or rlabs:
if not hdr:
bk_header(bk)
hdr = 1
print("sheet %d: name = %r; nrows = %d; ncols = %d" %
(shx, sh.name, sh.nrows, sh.ncols))
print_labels(sh, clabs, 'Col')
print_labels(sh, rlabs, 'Row')
if bk.on_demand: bk.unload_sheet(shx)
def show(bk, nshow=65535, printit=1):
bk_header(bk)
if 0:
rclist = xlrd.sheet.rc_stats.items()
rclist = sorted(rclist)
print("rc stats")
for k, v in rclist:
print("0x%04x %7d" % (k, v))
if options.onesheet:
try:
shx = int(options.onesheet)
except ValueError:
shx = bk.sheet_by_name(options.onesheet).number
shxrange = [shx]
else:
shxrange = range(bk.nsheets)
# print("shxrange", list(shxrange))
for shx in shxrange:
sh = bk.sheet_by_index(shx)
nrows, ncols = sh.nrows, sh.ncols
colrange = range(ncols)
anshow = min(nshow, nrows)
print("sheet %d: name = %s; nrows = %d; ncols = %d" %
(shx, REPR(sh.name), sh.nrows, sh.ncols))
if nrows and ncols:
# Beat the bounds
for rowx in xrange(nrows):
nc = sh.row_len(rowx)
if nc:
_junk = sh.row_types(rowx)[nc-1]
_junk = sh.row_values(rowx)[nc-1]
_junk = sh.cell(rowx, nc-1)
for rowx in xrange(anshow-1):
if not printit and rowx % 10000 == 1 and rowx > 1:
print("done %d rows" % (rowx-1,))
show_row(bk, sh, rowx, colrange, printit)
if anshow and nrows:
show_row(bk, sh, nrows-1, colrange, printit)
print()
if bk.on_demand: bk.unload_sheet(shx)
def count_xfs(bk):
bk_header(bk)
for shx in range(bk.nsheets):
sh = bk.sheet_by_index(shx)
nrows, ncols = sh.nrows, sh.ncols
print("sheet %d: name = %r; nrows = %d; ncols = %d" %
(shx, sh.name, sh.nrows, sh.ncols))
# Access all xfindexes to force gathering stats
type_stats = [0, 0, 0, 0, 0, 0, 0]
for rowx in xrange(nrows):
for colx in xrange(sh.row_len(rowx)):
xfx = sh.cell_xf_index(rowx, colx)
assert xfx >= 0
cty = sh.cell_type(rowx, colx)
type_stats[cty] += 1
print("XF stats", sh._xf_index_stats)
print("type stats", type_stats)
print()
if bk.on_demand: bk.unload_sheet(shx)
def main(cmd_args):
import optparse
global options, PSYCO
usage = "\n%prog [options] command [input-file-patterns]\n" + cmd_doc
oparser = optparse.OptionParser(usage)
oparser.add_option(
"-l", "--logfilename",
default="",
help="contains error messages")
oparser.add_option(
"-v", "--verbosity",
type="int", default=0,
help="level of information and diagnostics provided")
oparser.add_option(
"-m", "--mmap",
type="int", default=-1,
help="1: use mmap; 0: don't use mmap; -1: accept heuristic")
oparser.add_option(
"-e", "--encoding",
default="",
help="encoding override")
oparser.add_option(
"-f", "--formatting",
type="int", default=0,
help="0 (default): no fmt info\n"
"1: fmt info (all cells)\n"
)
oparser.add_option(
"-g", "--gc",
type="int", default=0,
help="0: auto gc enabled; 1: auto gc disabled, manual collect after each file; 2: no gc")
oparser.add_option(
"-s", "--onesheet",
default="",
help="restrict output to this sheet (name or index)")
oparser.add_option(
"-u", "--unnumbered",
action="store_true", default=0,
help="omit line numbers or offsets in biff_dump")
oparser.add_option(
"-d", "--on-demand",
action="store_true", default=0,
help="load sheets on demand instead of all at once")
oparser.add_option(
"-t", "--suppress-timing",
action="store_true", default=0,
help="don't print timings (diffs are less messy)")
oparser.add_option(
"-r", "--ragged-rows",
action="store_true", default=0,
help="open_workbook(..., ragged_rows=True)")
options, args = oparser.parse_args(cmd_args)
if len(args) == 1 and args[0] in ("version", ):
pass
elif len(args) < 2:
oparser.error("Expected at least 2 args, found %d" % len(args))
cmd = args[0]
xlrd_version = getattr(xlrd, "__VERSION__", "unknown; before 0.5")
if cmd == 'biff_dump':
xlrd.dump(args[1], unnumbered=options.unnumbered)
sys.exit(0)
if cmd == 'biff_count':
xlrd.count_records(args[1])
sys.exit(0)
if cmd == 'version':
print("xlrd: %s, from %s" % (xlrd_version, xlrd.__file__))
print("Python:", sys.version)
sys.exit(0)
if options.logfilename:
logfile = LogHandler(open(options.logfilename, 'w'))
else:
logfile = sys.stdout
mmap_opt = options.mmap
mmap_arg = xlrd.USE_MMAP
if mmap_opt in (1, 0):
mmap_arg = mmap_opt
elif mmap_opt != -1:
print('Unexpected value (%r) for mmap option -- assuming default' % mmap_opt)
fmt_opt = options.formatting | (cmd in ('xfc', ))
gc_mode = options.gc
if gc_mode:
gc.disable()
for pattern in args[1:]:
for fname in glob.glob(pattern):
print("\n=== File: %s ===" % fname)
if logfile != sys.stdout:
logfile.setfileheading("\n=== File: %s ===\n" % fname)
if gc_mode == 1:
n_unreachable = gc.collect()
if n_unreachable:
print("GC before open:", n_unreachable, "unreachable objects")
if PSYCO:
import psyco
psyco.full()
PSYCO = 0
try:
t0 = time.time()
bk = xlrd.open_workbook(fname,
verbosity=options.verbosity, logfile=logfile,
use_mmap=mmap_arg,
encoding_override=options.encoding,
formatting_info=fmt_opt,
on_demand=options.on_demand,
ragged_rows=options.ragged_rows,
)
t1 = time.time()
if not options.suppress_timing:
print("Open took %.2f seconds" % (t1-t0,))
except xlrd.XLRDError as e:
print("*** Open failed: %s: %s" % (type(e).__name__, e))
continue
except KeyboardInterrupt:
print("*** KeyboardInterrupt ***")
traceback.print_exc(file=sys.stdout)
sys.exit(1)
except BaseException as e:
print("*** Open failed: %s: %s" % (type(e).__name__, e))
traceback.print_exc(file=sys.stdout)
continue
t0 = time.time()
if cmd == 'hdr':
bk_header(bk)
elif cmd == 'ov': # OverView
show(bk, 0)
elif cmd == 'show': # all rows
show(bk)
elif cmd == '2rows': # first row and last row
show(bk, 2)
elif cmd == '3rows': # first row, 2nd row and last row
show(bk, 3)
elif cmd == 'bench':
show(bk, printit=0)
elif cmd == 'fonts':
bk_header(bk)
show_fonts(bk)
elif cmd == 'names': # named reference list
show_names(bk)
elif cmd == 'name_dump': # named reference list
show_names(bk, dump=1)
elif cmd == 'labels':
show_labels(bk)
elif cmd == 'xfc':
count_xfs(bk)
else:
print("*** Unknown command <%s>" % cmd)
sys.exit(1)
del bk
if gc_mode == 1:
n_unreachable = gc.collect()
if n_unreachable:
print("GC post cmd:", fname, "->", n_unreachable, "unreachable objects")
if not options.suppress_timing:
t1 = time.time()
print("\ncommand took %.2f seconds\n" % (t1-t0,))
return None
av = sys.argv[1:]
if not av:
main(av)
firstarg = av[0].lower()
if firstarg == "hotshot":
import hotshot, hotshot.stats
av = av[1:]
prof_log_name = "XXXX.prof"
prof = hotshot.Profile(prof_log_name)
# benchtime, result = prof.runcall(main, *av)
result = prof.runcall(main, *(av, ))
print("result", repr(result))
prof.close()
stats = hotshot.stats.load(prof_log_name)
stats.strip_dirs()
stats.sort_stats('time', 'calls')
stats.print_stats(20)
elif firstarg == "profile":
import cProfile
av = av[1:]
cProfile.run('main(av)', 'YYYY.prof')
import pstats
p = pstats.Stats('YYYY.prof')
p.strip_dirs().sort_stats('cumulative').print_stats(30)
elif firstarg == "psyco":
PSYCO = 1
main(av[1:])
else:
main(av)
| gpl-3.0 | -8,034,611,722,542,785,000 | 38.422705 | 101 | 0.491269 | false |
testmana2/test | E5Network/E5RFC6266.py | 1 | 12479 | # -*- coding: utf-8 -*-
# Copyright (c) 2015 Detlev Offenbach <[email protected]>
#
"""
Module implementing a Content-Disposition parser iaw. RFC 6266.
"""
#
# This code is adapted from the rfc6266.py module of qutebrowser.
# Original copyright 2014-2015 Florian Bruhin (The Compiler)
# <[email protected]>
#
from __future__ import unicode_literals
try: # Py3
import urllib.parse as parse
except (ImportError):
import urlparse as parse # __IGNORE_WARNING__
import collections
import string
import re
try:
import pypeg2 as peg
class UniqueNamespace(peg.Namespace):
"""
A pyPEG2 namespace which prevents setting a value twice.
"""
def __setitem__(self, key, value):
"""
Special method to set an item.
@param key key for the item
@param value value of the item
"""
if key in self:
raise DuplicateParamError(key)
super(UniqueNamespace, self).__setitem__(key, value)
# RFC 2616
separator_chars = "()<>@,;:\\\"/[]?={} \t" # __IGNORE_WARNING__
ctl_chars = ''.join(chr(i) for i in range(32)) + chr(127)
nontoken_chars = separator_chars + ctl_chars
# RFC 5987
attr_chars_nonalnum = '!#$&+-.^_`|~'
attr_chars = string.ascii_letters + string.digits + attr_chars_nonalnum
# RFC 5987 gives this alternative construction of the token character class
token_chars = attr_chars + "*'%" # __IGNORE_WARNING__
# Definitions from https://tools.ietf.org/html/rfc2616#section-2.2
# token was redefined from attr_chars to avoid using AnyBut,
# which might include non-ascii octets.
token_re = '[{0}]+'.format(re.escape(token_chars))
class Token(str):
"""
A token (RFC 2616, Section 2.2).
"""
grammar = re.compile(token_re)
# RFC 2616 says some linear whitespace (LWS) is in fact allowed in text
# and qdtext; however it also mentions folding that whitespace into
# a single SP (which isn't in CTL) before interpretation.
# Assume the caller already that folding when parsing headers.
# Note: qdtext also allows non-ascii, which we choose to parse
# as ISO-8859-1; rejecting it entirely would also be permitted.
# Some broken browsers attempt encoding-sniffing, which is broken
# because the spec only allows iso, and because encoding-sniffing
# can mangle valid values.
# Everything else in this grammar (including RFC 5987 ext values)
# is in an ascii-safe encoding.
qdtext_re = r'[^"{0}]'.format(re.escape(ctl_chars))
quoted_pair_re = r'\\[{0}]'.format(re.escape(
''.join(chr(i) for i in range(128))))
class QuotedString(str):
"""
A quoted string (RFC 2616, Section 2.2).
"""
grammar = re.compile(r'"({0}|{1})+"'.format(quoted_pair_re, qdtext_re))
def __str__(self):
s = super(QuotedString, self).__str__()
s = s[1:-1] # remove quotes
s = re.sub(r'\\(.)', r'\1', s) # drop backslashes
return s
class Value(str):
"""
A value. (RFC 2616, Section 3.6).
"""
grammar = [re.compile(token_re), QuotedString]
class Charset(str):
"""
A charset (RFC5987, Section 3.2.1).
"""
# Other charsets are forbidden, the spec reserves them
# for future evolutions.
grammar = re.compile('UTF-8|ISO-8859-1', re.I)
class Language(str):
"""
A language-tag (RFC 5646, Section 2.1).
Fixme: This grammar is not 100% correct yet.
https://github.com/The-Compiler/qutebrowser/issues/105
"""
grammar = re.compile('[A-Za-z0-9-]+')
attr_char_re = '[{0}]'.format(re.escape(attr_chars))
hex_digit_re = '%[' + string.hexdigits + ']{2}'
class ValueChars(str):
"""
A value of an attribute.
Fixme: Can we merge this with Value?
https://github.com/The-Compiler/qutebrowser/issues/105
"""
grammar = re.compile('({0}|{1})*'.format(attr_char_re, hex_digit_re))
class ExtValue(peg.List):
"""
An ext-value of an attribute (RFC 5987, Section 3.2).
"""
grammar = peg.contiguous(Charset, "'", peg.optional(Language), "'",
ValueChars)
class ExtToken(peg.Symbol):
"""
A token introducing an extended value (RFC 6266, Section 4.1).
"""
regex = re.compile(token_re + r'\*')
def __str__(self):
return super(ExtToken, self).__str__().lower()
class NoExtToken(peg.Symbol):
"""
A token introducing a normal value (RFC 6266, Section 4.1).
"""
regex = re.compile(token_re + r'(?<!\*)')
def __str__(self):
return super(NoExtToken, self).__str__().lower()
class DispositionParm(str):
"""
A parameter for the Disposition-Type header (RFC6266, Section 4.1).
"""
grammar = peg.attr('name', NoExtToken), '=', Value
class ExtDispositionParm:
"""
An extended parameter (RFC6266, Section 4.1).
"""
grammar = peg.attr('name', ExtToken), '=', ExtValue
def __init__(self, value, name=None):
self.name = name
self.value = value
class DispositionType(peg.List):
"""
The disposition type (RFC6266, Section 4.1).
"""
grammar = [re.compile('(inline|attachment)', re.I), Token]
class DispositionParmList(UniqueNamespace):
"""
A list of disposition parameters (RFC6266, Section 4.1).
"""
grammar = peg.maybe_some(';', [ExtDispositionParm, DispositionParm])
class ContentDispositionValue:
"""
A complete Content-Disposition value (RFC 6266, Section 4.1).
"""
# Allows nonconformant final semicolon
# I've seen it in the wild, and browsers accept it
# http://greenbytes.de/tech/tc2231/#attwithasciifilenamenqs
grammar = (peg.attr('dtype', DispositionType),
peg.attr('params', DispositionParmList),
peg.optional(';'))
LangTagged = collections.namedtuple('LangTagged', ['string', 'langtag'])
class DuplicateParamError(Exception):
"""
Exception raised when a parameter has been given twice.
"""
class InvalidISO8859Error(Exception):
"""
Exception raised when a byte is invalid in ISO-8859-1.
"""
class ContentDisposition:
"""
Records various indications and hints about content disposition.
These can be used to know if a file should be downloaded or
displayed directly, and to hint what filename it should have
in the download case.
"""
def __init__(self, disposition='inline', assocs=None):
"""
Used internally after parsing the header.
Instances should generally be created from a factory
function, such as parse_headers and its variants.
"""
if len(disposition) != 1:
self.disposition = 'inline'
else:
self.disposition = disposition[0]
if assocs is None:
self.assocs = {}
else:
self.assocs = dict(assocs) # So we can change values
if 'filename*' in self.assocs:
param = self.assocs['filename*']
assert isinstance(param, ExtDispositionParm)
self.assocs['filename*'] = \
parse_ext_value(param.value).string
def filename(self):
"""
The filename from the Content-Disposition header or None.
On safety:
This property records the intent of the sender.
You shouldn't use this sender-controlled value as a filesystem
path, it can be insecure. Serving files with this filename can be
dangerous as well, due to a certain browser using the part after
the dot for mime-sniffing. Saving it to a database is fine by
itself though.
"""
if 'filename*' in self.assocs:
return self.assocs['filename*']
elif 'filename' in self.assocs:
# XXX Reject non-ascii (parsed via qdtext) here?
return self.assocs['filename']
def is_inline(self):
"""
Return if the file should be handled inline.
If not, and unless your application supports other dispositions
than the standard inline and attachment, it should be handled
as an attachment.
"""
return self.disposition.lower() == 'inline'
def normalize_ws(text):
"""
Do LWS (linear whitespace) folding.
"""
return ' '.join(text.split())
def parse_headers(content_disposition):
"""
Build a ContentDisposition from header values.
@param content_disposition contents of the disposition header
@type bytes
"""
# We allow non-ascii here (it will only be parsed inside of qdtext, and
# rejected by the grammar if it appears in other places), although
# parsing it can be ambiguous. Parsing it ensures that a non-ambiguous
# filename* value won't get dismissed because of an unrelated ambiguity
# in the filename parameter. But it does mean we occasionally give
# less-than-certain values for some legacy senders.
content_disposition = content_disposition.decode('iso-8859-1')
# Our parsing is relaxed in these regards:
# - The grammar allows a final ';' in the header;
# - We do LWS-folding, and possibly normalise other broken
# whitespace, instead of rejecting non-lws-safe text.
# XXX Would prefer to accept only the quoted whitespace
# case, rather than normalising everything.
content_disposition = normalize_ws(content_disposition)
try:
parsed = peg.parse(content_disposition, ContentDispositionValue)
except (SyntaxError, DuplicateParamError, InvalidISO8859Error):
return ContentDisposition()
else:
return ContentDisposition(disposition=parsed.dtype,
assocs=parsed.params)
def parse_ext_value(val):
"""
Parse the value of an extended attribute.
"""
if len(val) == 3:
charset, langtag, coded = val
else:
charset, coded = val
langtag = None
decoded = parse.unquote(coded, charset, errors='strict')
if charset == 'iso-8859-1':
# Fail if the filename contains an invalid ISO-8859-1 char
for c in decoded:
if 0x7F <= ord(c) <= 0x9F:
raise InvalidISO8859Error(c)
return LangTagged(decoded, langtag)
except ImportError:
class ContentDisposition:
"""
Records various indications and hints about content disposition.
These can be used to know if a file should be downloaded or
displayed directly, and to hint what filename it should have
in the download case.
"""
def __init__(self, filename):
"""
Constructor
@param filename file name to be stored in this surrogate class
@type str
"""
self.__filename = filename
def filename(self):
"""
Public method to get the stored file name
@return file name
@rtype str
"""
return self.__filename
def parse_headers(content_disposition):
"""
Build a ContentDisposition from header values.
@param content_disposition contents of the disposition header
@type bytes
"""
header = content_disposition.decode()
if header:
pos = header.find("filename=")
if pos != -1:
path = header[pos + 9:]
if path.startswith('"') and path.endswith('"'):
path = path[1:-1]
return ContentDisposition(path)
return ContentDisposition("")
| gpl-3.0 | -2,485,631,554,635,507,700 | 33.663889 | 79 | 0.574165 | false |
defaultnamehere/honours | timeseries/get_sentence.py | 1 | 6289 | import spikes as spike_finder
import api
import sys
import datetime
import collections
import nltk
import nltk.data
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
RUN_ON_TEST = False
SENTENCES_DIR = "test/" if RUN_ON_TEST else "development/"
OUTDIR = "results/"
LOGDIR = "log/"
#SENTENCES_DIR = "../../annotation/schwa_10x_sentences/"
from math import sqrt
def cosine_similarity(v1, v2):
n1 = sqrt(sum(map(lambda x: x**2, v1.values())))
n2 = sqrt(sum(map(lambda x: x**2, v2.values())))
if n1 * n2 == 0:
return 0
return sum((v * v2.get(k, 0) for k, v in v1.iteritems())) / (n1 * n2)
class SentenceExtractor(object):
def __init__(self, article_name):
self.stopwords = set(stopwords.words('english'))
self.stemmer = PorterStemmer()
self.sentences = self._get_sentences(article_name)
self.sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')
self.article_name = article_name
self.messages = []
def log(self, message):
self.messages.append(message)
def write_logs(self):
with open(LOGDIR + self.article_name + ".similarity", 'w') as f:
for message in self.messages:
try:
f.write(message.decode("utf8"))
except (UnicodeEncodeError, UnicodeDecodeError):
f.write("Whoops, unicode is hard")
f.write("\n")
def _get_sentences(self, article_name):
sentences = []
with open(SENTENCES_DIR + article_name + ".sentences") as f:
for line in f:
if line.strip():
sentences.append(line.strip())
return sentences
def preprocess(self, sentences):
"""Returns a list of Counters, one for each sentence"""
# Stem and tokenize words
tokenised = {}
for sentence in sentences:
tokens = nltk.word_tokenize(sentence)
# Remove stopwords, case, and non alphanumeric chars, and also stem
tokens = [self.stemmer.stem(t.lower()) for t in tokens if t.lower() not in self.stopwords and t.isalnum()]
tokens = collections.Counter(tokens)
tokenised[sentence] = tokens
return tokenised
def write_similarity_matrix(self):
pass
def log_almost_chosen_sentences(self, similarities):
most_similar_sentence = max(self.sentences, key=lambda sentence : similarities[sentence])
top_score = similarities[most_similar_sentence]
almost_chosen = [(sentence, score) for sentence, score in similarities.iteritems() if 0 < abs(score - top_score) < 0.5 and score != 0]
if almost_chosen:
self.log("\nALTERNATIVES")
for sentence, score in sorted(almost_chosen, key=lambda (sentence, score) : similarities[sentence], reverse=True):
self.log("\t%s -> %s" % (score, sentence))
def get_similarities(self, revision):
"""Returns {sentence : cosine similarity} for each sentence in this SentenceExtractor"""
similarities = {}
# TODO Could cache this if it's slow.
sentence_tokens = self.preprocess(self.sentences)
full_text = "\n".join(revision.additions)
# Should be one Counter of tokens for the whole revision!
revision_tokens = self.preprocess([full_text])
revision_token_counter = revision_tokens.values()[0]
for sentence, tokenised_sentence in sentence_tokens.iteritems():
# If the sentence is entirely junk, never choose it as the best.
if not(tokenised_sentence and sentence):
similarities[sentence] = -1
continue
#Each revision maps to a dict of {score: sentence }
similarities[sentence] = cosine_similarity(revision_token_counter, tokenised_sentence)
return similarities
def get_best_sentence(self, revision):
# Finds the best sentence for the single revision
similarities = self.get_similarities(revision)
most_similar_sentence = max(self.sentences, key=lambda sentence : similarities[sentence])
self.log("\n==================%s=================\n" % self.article_name)
self.log("[CHOSEN]%s -> %s" %(similarities[most_similar_sentence], most_similar_sentence))
self.log("[FROM REVISION] %s" % "\n".join(revision.additions))
self.log_almost_chosen_sentences(similarities)
return most_similar_sentence, similarities[most_similar_sentence]
def main():
with open("spikes/%s.spikes" % sys.argv[1]) as f:
spikes = map(lambda x: spike_finder.SpikeDetector.get_date(int(str.strip(x))), f.readlines())
print spikes
article_name = sys.argv[1]
wikiAPI = api.WikiAPI()
WINDOW = 2
sentences = {}
for spikedate in spikes:
wikiAPI.log("====================%s=================" % spikedate)
print spikedate
# Result is either a revision or an (arbitrary revision, sentence) pair
result = wikiAPI.get_revision_between_times(article_name, spikedate - datetime.timedelta(days=WINDOW), spikedate + datetime.timedelta(days=WINDOW))
if result is not None:
if api.SENTENCE_CHOOSING_SCHEME == api.Selection.CHOOSE_HIGHEST_SUM or api.SENTENCE_CHOOSING_SCHEME == api.Selection.CHOOSE_MOST_EDITS:
print "Extracting best sentence ..."
arbitrary_revision, sentence = result
sentences[arbitrary_revision] = (sentence, -1)
# Now, get the sentence that corresponds most with this revision.
else:
print "Extracting sentences...."
se = SentenceExtractor(article_name)
sentences[result] = se.get_best_sentence(result)
se.write_logs()
print "Done."
else:
continue
with open(OUTDIR + article_name, 'w') as f:
if sentences:
for revision in sentences:
sentence, score = sentences[revision]
f.write(sentence + "\t" + str(revision) + "\t" + str(score)+ "\n")
else:
f.write("")
print "Results written to %s" % OUTDIR + article_name
if __name__ == "__main__":
main()
| gpl-2.0 | 3,478,592,766,570,398,000 | 38.30625 | 155 | 0.610272 | false |
Svolcano/python_exercise | dianhua/worker/crawler/china_mobile/tianjin/response_data.py | 1 | 9802 | # -*- coding: utf-8 -*-
import traceback
import json
import lxml.html
from lxml import etree
import re,traceback,time
# 登录前_XPath
def before_login_data(html):
before_login = {'error': False}
try:
root = etree.HTML(html)
before_login['token'] = root.xpath('//*[@id="token"]/@value')[0]
before_login['redirectUrl'] = root.xpath('//*[@id="redirectUrl"]/@value')[0]
before_login['appKey'] = root.xpath('//*[@id="appKey"]/@value')[0]
except :
error = traceback.format_exc()
msg = 'html_error:{}'.format(error)
return {'error': True, 'msg': msg}
return before_login
# 登录前_图片验证码
def before_login_captcha_data(self,resp):
content_text = resp.text
if not content_text or content_text == '0':
self.log('crawler', 'unknown_error', resp)
return 9, 'unknown_error', ""
return 0, 'success', content_text.replace('data:image/png;base64,','')
# 登录
def login_data(self, resp):
try:
resp.encoding = 'utf-8'
json_string = resp.text
result = json.loads(json_string)
if len(result) == 0:
self.log('crawler', 'json_error', resp)
return 9, 'json_error'
if result['state'] == True:
return 0, 'success'
if u'验证码输入不正确' in resp.text:
self.log('user', 'verify_error', resp)
return 2, 'verify_error'
elif u'服务密码不正确' in resp.text:
self.log('user', 'pin_pwd_error', resp)
return 2, 'pin_pwd_error'
elif result['state'] == False and result['message']:
self.log('user', 'login_param_error', resp)
return 1, 'login_param_error'
else:
self.log('user', 'json_error', resp)
return 9, 'json_error'
except:
error = traceback.format_exc()
if u'<title>天津移动-网上营业厅</title>' in resp.text:
self.log('website', 'website_busy_error', resp)
return 9, 'website_busy_error'
self.log('crawler', 'unknown_error:{}'.format(error), resp)
return 9, 'unknown_error'
# 登录后_获取短信验证码
def before_call_log_get_sms_data(self, resp):
try:
xml_json = resp.text
if '请您登陆后进行' in xml_json:
self.log('crawler', 'website_busy_error', resp)
return 9, 'website_busy_error', ''
root = etree.fromstring(xml_json.encode('utf-8'))
json_string = root.xpath('//*[@id="dataset"]')
if len(json_string) == 0:
self.log('crawler', 'xml_error', resp)
return 9, 'xml_error', ''
result = json.loads(json_string[0].text)[0]
if len(result) == 0:
self.log('crawler', 'xml_error', resp)
return 9, 'xml_error', ''
if result['FLAG'] == 'true':
return 0, 'success', ''
self.log('crawler', 'send_sms_error', resp)
return 9, 'send_sms_error', ''
except:
error = traceback.format_exc()
self.log('crawler', 'xml_error:{}'.format(error), resp)
return 9, 'xml_error', ''
# 登录后_验证短信验证码
def before_call_log_verify_sms_data(self, resp):
try:
xml_json = resp.text
root = etree.fromstring(xml_json.encode('utf-8'))
json_string = root.xpath('//*[@id="dataset"]')
if len(json_string) == 0:
self.log('crawler', 'xml_error', resp)
return 9, 'xml_error'
result = json.loads(json_string[0].text)[0]
if len(result) == 0:
self.log('crawler', 'xml_error', resp)
return 9, 'xml_error'
if result['FLAG'] == 'true':
self.einfo = result['ECHTOKENINFO']
return 0, 'success'
elif result['FLAG'] == 'false' and result['RESULTINFO']:
self.log('user', 'verify_error', resp)
return 2, 'verify_error'
else:
self.log('user', 'xml_error', resp)
return 9, 'xml_error'
except:
error = traceback.format_exc()
self.log('crawler', 'unknown_error:{}'.format(error), resp)
return 9, 'unknown_error'
def time_transform(time_str,bm='utf-8',str_format="%Y-%m-%d %H:%M:%S"):
time_type = time.strptime(time_str.encode(bm), str_format)
return str(int(time.mktime(time_type)))
def time_format(time_str,**kwargs):
exec_type=1
time_str = time_str.encode('utf-8')
if 'exec_type' in kwargs:
exec_type = kwargs['exec_type']
if (exec_type == 1):
xx = re.match(r'(.*时)?(.*分)?(.*秒)?',time_str)
h,m,s = 0,0,0
if xx.group(1):
hh = re.findall('\d+', xx.group(1))[0]
h = int(hh)
if xx.group(2):
mm = re.findall('\d+', xx.group(2))[0]
m = int(mm)
if xx.group(3):
ss = re.findall('\d+', xx.group(3))[0]
s = int(ss)
real_time = h*60*60 + m*60 +s
if (exec_type == 2):
xx = re.findall(r'\d*', time_str)
h, m, s = map(int,xx[::2])
real_time = h*60*60 + m*60 +s
return str(real_time)
# 通话详单
def call_log_data(xml_json, searchMonth, self_obj=None):
# print 'xml_json:', xml_json
try:
if "系统繁忙" in xml_json:
return 9, "website_busy_error", "系统繁忙,请稍后再试", []
if "号码状态异常" in xml_json:
return 9, "websiter_prohibited_error", '手机号码异常', []
root = etree.fromstring(xml_json.encode('utf-8'))
json_string = root.xpath('//*[@id="dataset"]')
if len(json_string) == 0:
return 9, 'xml_error', 'xpath id="dataset"', []
result = json.loads(json_string[0].text)[0]
if len(result) == 0:
return 9, 'xml_error', 'xml_json_string error', []
# 查看没有当月详单情况
data = list()
if result.has_key('ERRORINFO'):
# 当月通话详单为空时情况
if u'由于您的号码状态异常,不能进行此业务办理' in result['ERRORINFO']:
return 0, 'success', '', []
return 9, 'request_error', result['ERRORINFO'], data
if not result.has_key('detailInfos'):
return 9, 'request_error', 'no result[detailInfos]', data
for record in result['detailInfos']:
#print 'type(record):', type(record)
raw_call_from = record['AREA_CODE']
call_from, error = self_obj.formatarea(raw_call_from)
if not call_from:
call_from = raw_call_from
self_obj.log("crawler", "{}-{}".format(error, raw_call_from), "")
_data = {
'call_from': call_from,
'call_time': time_transform(record['START_TIME']),
'call_to': '',
'call_tel': record['OTHER_PARTY'],
'call_method': record['FORWARD_CAUSE'],
'call_type': record['LONG_TYPE'],
'call_cost': record['CFEE_LFEE'],
'call_duration': time_format(record['DURATION']),
'month': searchMonth,
}
data.append(_data)
# print '\n', 'call_log_data:', data
return 0, 'success', 'success', data
except :
error = traceback.format_exc()
message = 'unknown_error:{}'.format(error)
return 9, 'unknown_error', message, []
# 账单
def phone_bill_data(xml_json, searchMonth):
try:
root = etree.fromstring(xml_json.encode('utf-8'))
json_string = root.xpath('//*[@id="dataset"]')
if len(json_string) == 0:
return 9, 'xml_error', 'xpath id="dataset" error', []
result = json.loads(json_string[0].text)[0]
if len(result) == 0:
return 9, 'xml_error', 'xml_json_string error', []
if result.has_key('ERRORINFO'):
return 0, 'success', result['ERRORINFO'], []
if not result.has_key('BILL'):
return 0, 'success', 'no result[BILL]', []
data = {
'bill_month': searchMonth,
'bill_amount': result['BILL']['ALL_FEE'],
'bill_package': result['BILL']['DISCNT_GUDING_FEE'],
'bill_ext_calls': result['BILL']['YUYIN_TONGXIN_FEE'],
'bill_ext_data': result['BILL']['SHANGWANG_FEE'],
'bill_ext_sms': result['BILL']['DUANXIN_FEE'],
'bill_zengzhifei': result['BILL']['ZENGZHI_YEWU_FEE'],
'bill_daishoufei': result['BILL']['DAISHOUFEI_YEWU_FEE'],
'bill_qita': result['BILL']['OTHER_FEE'],
}
return 0, 'success', 'success', data
except :
error = traceback.format_exc()
message = 'unknown_error:{}'.format(error)
return 9, 'unknown_error', message, []
def personal_info_data(self,resp):
try:
xml_json = resp.text
root = etree.fromstring(xml_json.encode('utf-8'))
json_string = root.xpath('//*[@id="dataset"]')
if len(json_string) == 0:
self.log('crawler', 'xml_error', resp)
return 9, 'xml_error', {}
result = json.loads(json_string[0].text)[0]
if len(result) == 0:
self.log('crawler', 'xml_error', resp)
return 9, 'xml_error', {}
data = {
'full_name': result['CUST_NAME'],
'id_card': '',
'is_realname_register': False,
'open_date': time_transform(result['OPEN_DATE']),
'address': '',
}
return 0, 'success', data
except:
error = traceback.format_exc()
message = 'unknown_error:{}'.format(error)
self.log('crawler', message, resp)
return 9, 'unknown_error', {}
| mit | 3,634,192,705,566,494,700 | 34.036765 | 84 | 0.524974 | false |
KEHANG/RMG-Py | rmgpy/data/statmechfit.py | 4 | 21172 | #!/usr/bin/python
# -*- coding: utf-8 -*-
################################################################################
#
# RMG - Reaction Mechanism Generator
#
# Copyright (c) 2002-2010 Prof. William H. Green ([email protected]) and the
# RMG Team ([email protected])
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
"""
Contains functions for fitting of molecular degrees of freedom from
macroscopic properties, particularly the heat capacity.
"""
import math
import numpy
import scipy.special
import logging
import rmgpy.constants as constants
from rmgpy.statmech import HarmonicOscillator, HinderedRotor
from pydqed import DQED
################################################################################
# This block contains a number of global constants that control various
# aspects of the optimization (bounds, iterations, etc.)
# The lower bound for harmonic oscillator frequencies in cm^-1
hoFreqLowerBound = 180.0
# The upper bound for harmonic oscillator frequencies in cm^-1
hoFreqUpperBound = 4000.0
# The lower bound for hindered rotor frequencies in cm^-1
hrFreqLowerBound = 180.0
# The upper bound for hindered rotor frequencies in cm^-1
hrFreqUpperBound = 4000.0
# The lower bound for hindered rotor barrier heights in cm^-1
hrBarrLowerBound = 10.0
# The upper bound for hindered rotor barrier heights in cm^-1
hrBarrUpperBound = 10000.0
# The maximum number of iterations for the optimization solver to use
maxIter = 200
################################################################################
class StatmechFitError(Exception):
"""
An exception used when attempting to fit molecular degrees of freedom to
heat capacity data. Pass a string describing the circumstances of the
exceptional behavior.
"""
pass
################################################################################
def fitStatmechToHeatCapacity(Tlist, Cvlist, Nvib, Nrot, molecule=None):
"""
For a given set of dimensionless heat capacity data `Cvlist` corresponding
to temperature list `Tlist` in K, fit `Nvib` harmonic oscillator and `Nrot`
hindered internal rotor modes. External and other previously-known modes
should have already been removed from `Cvlist` prior to calling this
function. You must provide at least 7 values for `Cvlist`.
This function returns a list containing the fitted vibrational frequencies
in a :class:`HarmonicOscillator` object and the fitted 1D hindered rotors
in :class:`HinderedRotor` objects.
"""
# You must specify at least 7 heat capacity points to use in the fitting;
# you can specify as many as you like above that minimum
if len(Tlist) < 7:
raise StatmechFitError('You must specify at least 7 heat capacity points to fitStatmechToHeatCapacity().')
if len(Tlist) != len(Cvlist):
raise StatmechFitError('The number of heat capacity points ({0:d}) does not match the number of temperatures provided ({1:d}).'.format(len(Cvlist), len(Tlist)))
# The number of optimization variables available is constrained to be less
# than the number of heat capacity points
# This is also capped to a (somewhat arbitrarily chosen) maximum of 16
maxVariables = len(Tlist) - 1
if maxVariables > 16: maxVariables = 16
# The type of variables fitted depends on the values of Nvib and Nrot and
# the number of heat capacity points provided
# For low values of Nvib and Nrot, we can fit the individual
# parameters directly
# For high values of Nvib and/or Nrot we are limited by the number of
# temperatures we are fitting at, and so we can only fit
# pseudo-oscillators and/or pseudo-rotors
vib = []; hind = []
if Nvib <= 0 and Nrot <= 0:
pass
elif Nvib + 2 * Nrot <= maxVariables:
vib, hind = fitStatmechDirect(Tlist, Cvlist, Nvib, Nrot, molecule)
elif Nvib + 2 <= maxVariables:
vib, hind = fitStatmechPseudoRotors(Tlist, Cvlist, Nvib, Nrot, molecule)
else:
vib, hind = fitStatmechPseudo(Tlist, Cvlist, Nvib, Nrot, molecule)
modes = []
if Nvib > 0:
vib.sort()
ho = HarmonicOscillator(frequencies=(vib[:],"cm^-1"))
modes.append(ho)
for i in range(Nrot):
freq = hind[i][0]
barr = hind[i][1]
inertia = (barr*constants.c*100.0*constants.h) / (8 * math.pi * math.pi * (freq*constants.c*100.0)**2)
barrier = barr*constants.c*100.0*constants.h*constants.Na
hr = HinderedRotor(inertia=(inertia*constants.Na*1e23,"amu*angstrom^2"), barrier=(barrier/1000.,"kJ/mol"), symmetry=1, semiclassical=False, quantum=False)
modes.append(hr)
# Return the fitted modes
return modes
################################################################################
def fitStatmechDirect(Tlist, Cvlist, Nvib, Nrot, molecule=None):
"""
Fit `Nvib` harmonic oscillator and `Nrot` hindered internal rotor modes to
the provided dimensionless heat capacities `Cvlist` at temperatures `Tlist`
in K. This method assumes that there are enough heat capacity points
provided that the vibrational frequencies and hindered rotation frequency-
barrier pairs can be fit directly.
"""
# Construct the lower and upper bounds for each variable
bounds = []
# Bounds for harmonic oscillator frequencies
for i in range(Nvib):
bounds.append((hoFreqLowerBound, hoFreqUpperBound))
# Bounds for hindered rotor frequencies and barrier heights
for i in range(Nrot):
bounds.append((hrFreqLowerBound, hrFreqUpperBound))
bounds.append((hrBarrLowerBound, hrBarrUpperBound))
# Construct the initial guess
# Initial guesses within each mode type must be distinct or else the
# optimization will fail
x0 = numpy.zeros(Nvib + 2*Nrot, numpy.float64)
# Initial guess for harmonic oscillator frequencies
if Nvib > 0:
x0[0] = 200.0
x0[1:Nvib] = numpy.linspace(800.0, 1600.0, Nvib-1)
# Initial guess for hindered rotor frequencies and barrier heights
if Nrot > 0:
x0[Nvib] = 100.0
x0[Nvib+1] = 100.0
for i in range(1, Nrot):
x0[Nvib+2*i] = x0[Nvib+2*i-2] + 20.0
x0[Nvib+2*i+1] = x0[Nvib+2*i-1] + 100.0
# Execute the optimization
fit = DirectFit(Tlist, Cvlist, Nvib, Nrot)
fit.initialize(Neq=len(Tlist), Nvars=len(x0), Ncons=0, bounds=bounds, maxIter=maxIter)
x, igo = fit.solve(x0)
# Check that the results of the optimization are valid
if not numpy.isfinite(x).all():
raise StatmechFitError('Returned solution vector is nonsensical: x = {0}.'.format(x))
if igo == 8:
logging.warning('Maximum number of iterations reached when fitting spectral data for {0}.'.format(molecule))
# Postprocess optimization results
vib = list(x[0:Nvib])
hind = []
for i in range(Nrot):
hind.append((x[Nvib+2*i], x[Nvib+2*i+1]))
return vib, hind
################################################################################
def fitStatmechPseudoRotors(Tlist, Cvlist, Nvib, Nrot, molecule=None):
"""
Fit `Nvib` harmonic oscillator and `Nrot` hindered internal rotor modes to
the provided dimensionless heat capacities `Cvlist` at temperatures `Tlist`
in K. This method assumes that there are enough heat capacity points
provided that the vibrational frequencies can be fit directly, but the
hindered rotors must be combined into a single "pseudo-rotor".
"""
# Construct the lower and upper bounds for each variable
bounds = []
# Bounds for harmonic oscillator frequencies
for i in range(Nvib):
bounds.append((hoFreqLowerBound, hoFreqUpperBound))
# Bounds for pseudo-hindered rotor frequency and barrier height
bounds.append((hrFreqLowerBound, hrFreqUpperBound))
bounds.append((hrBarrLowerBound, hrBarrUpperBound))
# Construct the initial guess
# Initial guesses within each mode type must be distinct or else the
# optimization will fail
x0 = numpy.zeros(Nvib + 2, numpy.float64)
# Initial guess for harmonic oscillator frequencies
if Nvib > 0:
x0[0] = 200.0
x0[1:Nvib] = numpy.linspace(800.0, 1600.0, Nvib-1)
# Initial guess for hindered rotor frequencies and barrier heights
x0[Nvib] = 100.0
x0[Nvib+1] = 300.0
# Execute the optimization
fit = PseudoRotorFit(Tlist, Cvlist, Nvib, Nrot)
fit.initialize(Neq=len(Tlist), Nvars=len(x0), Ncons=0, bounds=bounds, maxIter=maxIter)
x, igo = fit.solve(x0)
# Check that the results of the optimization are valid
if not numpy.isfinite(x).all():
raise StatmechFitError('Returned solution vector is nonsensical: x = {0}.'.format(x))
if igo == 8:
logging.warning('Maximum number of iterations reached when fitting spectral data for {0}.'.format(molecule))
# Postprocess optimization results
vib = list(x[0:Nvib])
hind = []
for i in range(Nrot):
hind.append((x[Nvib], x[Nvib+1]))
return vib, hind
################################################################################
def fitStatmechPseudo(Tlist, Cvlist, Nvib, Nrot, molecule=None):
"""
Fit `Nvib` harmonic oscillator and `Nrot` hindered internal rotor modes to
the provided dimensionless heat capacities `Cvlist` at temperatures `Tlist`
in K. This method assumes that there are relatively few heat capacity points
provided, so the vibrations must be combined into one real vibration and
two "pseudo-vibrations" and the hindered rotors must be combined into a
single "pseudo-rotor".
"""
# Construct the lower and upper bounds for each variable
bounds = []
# x[0] corresponds to the first harmonic oscillator (real) frequency
bounds.append((hoFreqLowerBound, hoFreqUpperBound))
# x[1] corresponds to the degeneracy of the second harmonic oscillator
bounds.append((1.0, float(Nvib - 2)))
# x[2] corresponds to the second harmonic oscillator pseudo-frequency
bounds.append((hoFreqLowerBound, hoFreqUpperBound))
# x[3] corresponds to the third harmonic oscillator pseudo-frequency
bounds.append((hoFreqLowerBound, hoFreqUpperBound))
# x[4] corresponds to the hindered rotor pseudo-frequency
bounds.append((hrFreqLowerBound, hrFreqUpperBound))
# x[5] corresponds to the hindered rotor pseudo-barrier
bounds.append((hrBarrLowerBound, hrBarrUpperBound))
# Construct the initial guess
x0 = numpy.zeros(6, numpy.float64) # Initial guess
x0[0] = 300.0
x0[1] = float(math.floor((Nvib - 1) / 2.0))
x0[2] = 800.0
x0[3] = 1600.0
x0[4] = 100.0
x0[5] = 300.0
# Execute the optimization
fit = PseudoFit(Tlist, Cvlist, Nvib, Nrot)
fit.initialize(Neq=len(Tlist), Nvars=len(x0), Ncons=0, bounds=bounds, maxIter=maxIter)
x, igo = fit.solve(x0)
# Check that the results of the optimization are valid
if not numpy.isfinite(x).all():
raise StatmechFitError('Returned solution vector is nonsensical: x = {0}.'.format(x))
if igo == 8:
logging.warning('Maximum number of iterations reached when fitting spectral data for {0}.'.format(molecule))
# Postprocess optimization results
Nvib2 = int(round(x[1]))
Nvib3 = Nvib - Nvib2 - 1
if Nvib2 < 0 or Nvib2 > Nvib-1 or Nvib3 < 0 or Nvib3 > Nvib-1:
raise StatmechFitError('Invalid degeneracies {0} and {1} fitted for pseudo-frequencies.'.format(Nvib2, Nvib3))
vib = [x[0]]
for i in range(Nvib2): vib.append(x[2])
for i in range(Nvib3): vib.append(x[3])
hind = []
for i in range(Nrot):
hind.append((x[4], x[5]))
return vib, hind
################################################################################
def harmonicOscillator_heatCapacity(T, freq):
"""
Return the heat capacity in J/mol*K at the given set of temperatures `Tlist`
in K for the harmonic oscillator with a frequency `freq` in cm^-1.
"""
x = freq / (0.695039 * T) # kB = 0.695039 cm^-1/K
exp_x = math.exp(x)
one_minus_exp_x = 1.0 - exp_x
return x * x * exp_x / one_minus_exp_x / one_minus_exp_x
def harmonicOscillator_d_heatCapacity_d_freq(T, freq):
"""
Return the first derivative of the heat capacity with respect to the
harmonic oscillator frequency in J/mol*K/cm^-1 at the given set of
temperatures `Tlist` in K, evaluated at the frequency `freq` in cm^-1.
"""
x = freq / (0.695039 * T) # kB = 0.695039 cm^-1/K
exp_x = math.exp(x)
one_minus_exp_x = 1.0 - exp_x
return x * exp_x / one_minus_exp_x / one_minus_exp_x * (2.0 + x + 2.0 * x * exp_x / one_minus_exp_x) * x / freq
def hinderedRotor_heatCapacity(T, freq, barr):
"""
Return the heat capacity in J/mol*K at the given set of temperatures `Tlist`
in K for the 1D hindered rotor with a frequency `freq` in cm^-1 and a
barrier height `barr` in cm^-1.
"""
x = constants.h * constants.c * 100. * freq / constants.kB / T
exp_x = math.exp(x)
one_minus_exp_x = 1.0 - exp_x
z = 0.5 * constants.h * constants.c * 100. * barr / constants.kB / T
BB = scipy.special.i1(z) / scipy.special.i0(z)
return x * x * exp_x / one_minus_exp_x / one_minus_exp_x - 0.5 + z * (z - BB - z * BB * BB)
def hinderedRotor_d_heatCapacity_d_freq(T, freq, barr):
"""
Return the first derivative of the heat capacity with respect to the
hindered rotor frequency in J/mol*K/cm^-1 at the given set of temperatures
`Tlist` in K, evaluated at the frequency `freq` in cm^-1 and a barrier
height `barr` in cm^-1.
"""
x = constants.h * constants.c * 100. * freq / constants.kB / T
exp_x = math.exp(x)
one_minus_exp_x = 1.0 - exp_x
return x * exp_x / one_minus_exp_x / one_minus_exp_x * (2 + x + 2 * x * exp_x / one_minus_exp_x) * x / freq
def hinderedRotor_d_heatCapacity_d_barr(T, freq, barr):
"""
Return the first derivative of the heat capacity with respect to the
hindered rotor frequency in J/mol*K/cm^-1 at the given set of temperatures
`Tlist` in K, evaluated at the frequency `freq` in cm^-1 and a barrier
height `barr` in cm^-1.
"""
z = 0.5 * constants.h * constants.c * 100. * barr / constants.kB / T
BB = scipy.special.i1(z) / scipy.special.i0(z)
return z * (1 - 2 * z * BB + BB * BB + 2 * z * BB * BB * BB) * z / barr
################################################################################
class DirectFit(DQED):
"""
Class for fitting vibrational frequencies and hindered rotor
frequency-barrier pairs for the case when there are few enough oscillators
and rotors that their values can be fit directly.
"""
def __init__(self, Tdata, Cvdata, Nvib, Nrot):
self.Tdata = Tdata
self.Cvdata = Cvdata
self.Nvib = Nvib
self.Nrot = Nrot
def evaluate(self, x):
Neq = self.Neq; Nvars = self.Nvars; Ncons = self.Ncons
f = numpy.zeros((Neq), numpy.float64)
J = numpy.zeros((Neq, Nvars), numpy.float64)
fcons = numpy.zeros((Ncons), numpy.float64)
Jcons = numpy.zeros((Ncons, Nvars), numpy.float64)
Nvib = self.Nvib
Nrot = self.Nrot
for i in range(len(self.Tdata)):
# Residual
for n in range(Nvib):
f[i] += harmonicOscillator_heatCapacity(self.Tdata[i], x[n])
for n in range(Nrot):
f[i] += hinderedRotor_heatCapacity(self.Tdata[i], x[Nvib+2*n], x[Nvib+2*n+1])
f[i] -= self.Cvdata[i]
# Jacobian
for n in range(Nvib):
J[i,n ] = harmonicOscillator_d_heatCapacity_d_freq(self.Tdata[i], x[n])
for n in range(Nrot):
J[i,Nvib+2*n ] = hinderedRotor_d_heatCapacity_d_freq(self.Tdata[i], x[Nvib+2*n], x[Nvib+2*n+1])
J[i,Nvib+2*n+1] = hinderedRotor_d_heatCapacity_d_barr(self.Tdata[i], x[Nvib+2*n], x[Nvib+2*n+1])
return f, J, fcons, Jcons
class PseudoRotorFit(DQED):
"""
Class for fitting vibrational frequencies and hindered rotor
frequency-barrier pairs for the case when there are too many oscillators
and rotors for their values can be fit directly, and where collapsing the
rotors into a single pseudo-rotor allows for fitting the vibrational
frequencies directly.
"""
def __init__(self, Tdata, Cvdata, Nvib, Nrot):
self.Tdata = Tdata
self.Cvdata = Cvdata
self.Nvib = Nvib
self.Nrot = Nrot
def evaluate(self, x):
Neq = self.Neq; Nvars = self.Nvars; Ncons = self.Ncons
f = numpy.zeros((Neq), numpy.float64)
J = numpy.zeros((Neq, Nvars), numpy.float64)
fcons = numpy.zeros((Ncons), numpy.float64)
Jcons = numpy.zeros((Ncons, Nvars), numpy.float64)
Nvib = self.Nvib
Nrot = self.Nrot
Cv = numpy.zeros((len(self.Tdata), Nvib+1), numpy.float64)
dCv = numpy.zeros((len(self.Tdata), Nvib+2), numpy.float64)
for i in range(len(self.Tdata)):
for j in range(Nvib):
Cv[i,j] = harmonicOscillator_heatCapacity(self.Tdata[i], x[j])
dCv[i,j] = harmonicOscillator_d_heatCapacity_d_freq(self.Tdata[i], x[j])
Cv[i,Nvib] = hinderedRotor_heatCapacity(self.Tdata[i], x[Nvib], x[Nvib+1])
dCv[i,Nvib] = hinderedRotor_d_heatCapacity_d_freq(self.Tdata[i], x[Nvib], x[Nvib+1])
dCv[i,Nvib+1] = hinderedRotor_d_heatCapacity_d_barr(self.Tdata[i], x[Nvib], x[Nvib+1])
for i in range(len(self.Tdata)):
# Residual
for j in range(Nvib):
f[i] += Cv[i,j]
f[i] += Nrot * Cv[i,Nvib]
f[i] -= self.Cvdata[i]
# Jacobian
for j in range(Nvib):
J[i,j] = 2.0 * f[i] * dCv[i,j]
J[i,Nvib] = 2.0 * f[i] * Nrot * dCv[i,Nvib]
J[i,Nvib+1] = 2.0 * f[i] * Nrot * dCv[i,Nvib+1]
return f, J, fcons, Jcons
class PseudoFit(DQED):
"""
Class for fitting vibrational frequencies and hindered rotor
frequency-barrier pairs for the case when there are too many oscillators
and rotors for their values can be fit directly, and where we must collapse
both the vibrations and hindered rotations into "pseudo-oscillators" and
"pseudo-rotors".
"""
def __init__(self, Tdata, Cvdata, Nvib, Nrot):
self.Tdata = Tdata
self.Cvdata = Cvdata
self.Nvib = Nvib
self.Nrot = Nrot
def evaluate(self, x):
Neq = self.Neq; Nvars = self.Nvars; Ncons = self.Ncons
f = numpy.zeros((Neq), numpy.float64)
J = numpy.zeros((Neq, Nvars), numpy.float64)
fcons = numpy.zeros((Ncons), numpy.float64)
Jcons = numpy.zeros((Ncons, Nvars), numpy.float64)
Nvib = self.Nvib
Nrot = self.Nrot
for i in range(len(self.Tdata)):
Cv1 = harmonicOscillator_heatCapacity(self.Tdata[i], x[0])
Cv2 = harmonicOscillator_heatCapacity(self.Tdata[i], x[2])
Cv3 = harmonicOscillator_heatCapacity(self.Tdata[i], x[3])
Cv4 = hinderedRotor_heatCapacity(self.Tdata[i], x[4], x[5])
dCv1 = harmonicOscillator_d_heatCapacity_d_freq(self.Tdata[i], x[0])
dCv2 = harmonicOscillator_d_heatCapacity_d_freq(self.Tdata[i], x[2])
dCv3 = harmonicOscillator_d_heatCapacity_d_freq(self.Tdata[i], x[3])
dCv4 = hinderedRotor_d_heatCapacity_d_freq(self.Tdata[i], x[4], x[5])
dCv5 = hinderedRotor_d_heatCapacity_d_barr(self.Tdata[i], x[4], x[5])
# Residual
f[i] = Cv1 + x[1] * Cv2 + (Nvib - x[1] - 1) * Cv3 + Nrot * Cv4 - self.Cvdata[i]
# Jacobian
J[i,0] = 2.0 * f[i] * dCv1
J[i,1] = 2.0 * f[i] * (Cv2 - Cv3)
J[i,2] = 2.0 * f[i] * x[1] * dCv2
J[i,3] = 2.0 * f[i] * ((Nvib - x[1] - 1) * dCv3)
J[i,4] = 2.0 * f[i] * Nrot * dCv4
J[i,5] = 2.0 * f[i] * Nrot * dCv5
return f, J, fcons, Jcons
| mit | 7,728,801,642,532,686,000 | 40.841897 | 168 | 0.626393 | false |
karlnapf/kernel_exp_family | kernel_exp_family/examples/demo_xvalidation_bayes_opt_manual.py | 1 | 3167 | from kernel_exp_family.estimators.finite.gaussian import KernelExpFiniteGaussian
from kernel_exp_family.estimators.lite.gaussian import KernelExpLiteGaussian
from kernel_exp_family.estimators.parameter_search_bo import BayesOptSearch,\
plot_bayesopt_model_1d
from kernel_exp_family.examples.tools import visualise_fit_2d
import matplotlib.pyplot as plt
import numpy as np
if __name__ == '__main__':
"""
This simple demo demonstrates how to select the kernel parameter for the lite
estimator, based on a Bayesian optimisation black-box optimiser.
Note that this optimiser can be "hot-started", i.e. it can be reset, but using
the previous model as initialiser for the new optimisation, which is useful
when the objective function changes slightly, e.g. when a new data was added
to the kernel exponential family model.
"""
N = 200
D = 2
# fit model to samples from a standard Gaussian
X = np.random.randn(N, D)
# use any of the below models, might have to change parameter bounds
estimators = [
KernelExpFiniteGaussian(sigma=1., lmbda=1., m=N, D=D),
KernelExpLiteGaussian(sigma=1., lmbda=.001, D=D, N=N),
]
for est in estimators:
print(est.__class__.__name__)
est.fit(X)
# specify bounds of parameters to search for
param_bounds = {
# 'lmbda': [-5,0], # fixed lmbda, uncomment to include in search
'sigma': [-2,3],
}
# oop interface for optimising and using results
# objective is not put through log here, if it is, might want to bound away from zero
bo = BayesOptSearch(est, X, param_bounds, objective_log=False, objective_log_bound=100,
num_initial_evaluations=5)
# optimisation starts here, use results and apply to model
best_params = bo.optimize(num_iter=5)
est.set_parameters_from_dict(best_params)
est.fit(X)
visualise_fit_2d(est, X)
plt.suptitle("Original fit %s\nOptimised over: %s" %
(str(est.get_parameters()), str(param_bounds)))
if len(param_bounds) == 1:
plt.figure()
plot_bayesopt_model_1d(bo)
plt.title("Objective")
# now change data, with different length scale
X = np.random.randn(200, D) * .1
# reset optimiser, which but initialise from old model, sample 3 random point to update
best_params = bo.re_initialise(new_data=X, num_initial_evaluations=3)
# this optimisation now runs on the "new" objective
best_params = bo.optimize(num_iter=3)
est.set_parameters_from_dict(best_params)
est.fit(X)
visualise_fit_2d(est, X)
plt.suptitle("New fit %s\nOptimised over: %s" %
(str(est.get_parameters()), str(param_bounds)))
if len(param_bounds) == 1:
plt.figure()
plot_bayesopt_model_1d(bo)
plt.title("New objective")
plt.show()
| bsd-3-clause | 3,263,166,658,225,626,000 | 38.5875 | 95 | 0.604673 | false |
shhui/horizon | openstack_dashboard/dashboards/admin/auditlog/tests.py | 1 | 4060 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django import http
from django.test.utils import override_settings
from mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
class AuditlogsViewTest(test.BaseAdminViewTests):
@test.create_stubs({api.auditlog: ('auditlog_list',
'resource_list',),
api.keystone: ('tenant_list',
'user_list',)})
def test_auditlogs_list(self):
# In form.__init__
api.auditlog.resource_list(IsA(http.HttpRequest)) \
.AndReturn([])
api.keystone.tenant_list(IsA(http.HttpRequest)) \
.AndReturn([self.tenants.list(), False])
api.keystone.user_list(IsA(http.HttpRequest)) \
.AndReturn(self.users.list())
# In view method
api.auditlog.auditlog_list(IsA(http.HttpRequest),
marker=None,
paginate=True,
q=IsA(list)) \
.AndReturn([self.auditlogs.list(),
False])
api.keystone.tenant_list(IsA(http.HttpRequest)) \
.AndReturn([self.tenants.list(), False])
api.keystone.user_list(IsA(http.HttpRequest)) \
.AndReturn(self.users.list())
api.auditlog.resource_list(IsA(http.HttpRequest)) \
.AndReturn([])
self.mox.ReplayAll()
res = self.client.get(
reverse('horizon:admin:auditlog:index'))
self.assertTemplateUsed(res, 'admin/auditlog/index.html')
self.assertEqual(len(res.context['auditlog_table'].data),
len(self.auditlogs.list()))
@override_settings(API_RESULT_PAGE_SIZE=2)
@test.create_stubs({api.auditlog: ('auditlog_list',
'resource_list',),
api.keystone: ('tenant_list',
'user_list',)})
def test_auditlogs_list_get_pagination(self):
auditlogs = self.auditlogs.list()[:5]
# In form init
api.auditlog.resource_list(IsA(http.HttpRequest)) \
.AndReturn([])
api.keystone.tenant_list(IsA(http.HttpRequest)) \
.AndReturn([self.tenants.list(), False])
api.keystone.user_list(IsA(http.HttpRequest)) \
.AndReturn(self.users.list())
# In view method
api.auditlog.auditlog_list(IsA(http.HttpRequest),
marker=None,
paginate=True,
q=IsA(list)) \
.AndReturn([auditlogs, True])
api.keystone.tenant_list(IsA(http.HttpRequest)) \
.AndReturn([self.tenants.list(), False])
api.keystone.user_list(IsA(http.HttpRequest)) \
.AndReturn(self.users.list())
api.auditlog.resource_list(IsA(http.HttpRequest)) \
.AndReturn([])
self.mox.ReplayAll()
url = reverse('horizon:admin:auditlog:index')
res = self.client.get(url)
# get all
self.assertEqual(len(res.context['auditlog_table'].data),
len(auditlogs))
self.assertTemplateUsed(res, 'admin/auditlog/index.html')
self.assertTrue(res.context['auditlog_table'].has_more_data())
| apache-2.0 | -3,233,998,072,603,758,600 | 41.736842 | 78 | 0.577094 | false |
tapomayukh/projects_in_python | contact_modeling/find_model_params_online.py | 1 | 11223 | #!/usr/bin/env python
# Online Estimation
import numpy as np
import scipy as scp
import pylab as pyl
import matplotlib.pyplot as pp
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
import tf
import os
import hrl_lib.util as ut
import hrl_lib.transforms as tr
import copy
import pickle
import optparse
import math
import unittest
import random
from estimate_model import VE_Model
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/rapid_categorization/taxel_based/NIDRR_demo_2014/model_estimation/')
class VE_Model:
def __init__(self, model_str):
self.index = 0
if model_str == 'HC':
x0 = 0
ke = 5.0
be = 0.3
n = 1.5
self.theta = np.matrix([ke, be, n]).T
elif model_str == 'KV':
k = 10.0
b = 0.1
self.theta = np.matrix([k, b]).T
elif model_str == 'Maxwell':
k = 10.0
b = 0.1
self.theta = np.matrix([k, b]).T
elif model_str == 'SLS':
k1 = 10.0
k2 = 10.0
b = 0.1
self.theta = np.matrix([k1, k2, b]).T
else:
print "Specified model not correct"
self.theta = np.matrix([]).T
def return_init_theta(self):
return self.theta
def RLSestimator(self, y, psi, theta):
betanew = 0.98
beta = (1 + (psi.T)*betanew*psi).tolist()[0][0]
num_param = len(theta)
I = np.matrix(np.identity(num_param))
if self.index == 0:
self.P = I*(10**19)
Ln = (self.P*psi*(1./(beta + ((psi.T)*self.P*psi)))).tolist()[0][0]
theta_new = theta + Ln*(y-(psi.T)*theta)
abs_error = abs((y - (psi.T)*theta).tolist()[0][0])
N0 = 0.001
if abs_error >= 2*N0:
delta = 1.
elif abs_error >= N0 and abs_error < 2*N0:
delta = (abs_error)/N0 - 1
elif abs_error < N0:
delta = 0.
self.P = delta*(1./beta)*(I - Ln*(psi.T))*self.P
return theta_new
def estimate_HC_online(self, num, theta, force, position, velocity):
# Estimate Hunt-Crossley Model using the double-stage identification process
theta = theta.tolist()
# Gamma-1 estimator
theta_1 = np.matrix([theta[0][0], theta[1][0]]).T
y_1 = force
psi_1 = np.matrix([position**theta[2][0], (position**theta[2][0])*velocity]).T
#print y_1
#print psi_1
#print theta_1
theta_1_new = self.RLSestimator(y_1, psi_1, theta_1).tolist()
# Gamma-2 estimator
#print '#######'
#print num
#print theta_1_new[0][0]
#print theta_1_new[1][0]
#print force
#print velocity
y_2 = math.log(abs((force - 0)/(theta_1_new[0][0] + theta_1_new[1][0]*velocity)))
psi_2 = np.matrix(math.log(position))
theta_2 = np.matrix(theta[2][0])
theta_2_new = self.RLSestimator(y_2, psi_2, theta_2).tolist()
#print theta_2_new[0][0]
theta_new = np.matrix([theta_1_new[0][0], theta_1_new[1][0], theta_2_new[0][0]]).T
return theta_new
def estimate_KV_online(self, num, theta, force, position, velocity):
print "Not implemented yet"
def estimate_Maxwell_online(self, num, theta, force, position, velocity):
print "Not implemented yet"
def estimate_SLS_online(self, num, theta, force, position, velocity):
print "Not implemented yet"
class Params(VE_Model):
def __init__(self, Fmat_original, temp_num_human, temp_num_furniture, hs_th, rls_th):
# Initialize Params
self.h_th = hs_th
self.r_th = rls_th
self.stiffness = {}
self.damping = {}
self.n = {}
self.flag = {}
self.deformation_final = {}
self.force_final = {}
self.velocity_final = {}
self.trial_lengths = {}
self.num_human = temp_num_human
self.num_furniture = temp_num_furniture
self.mat = Fmat_original
for i in range(self.num_human + self.num_furniture):
self.stiffness[i] = []
self.damping[i] = []
self.n[i] = []
self.flag[i] = []
self.deformation_final[i] = []
self.force_final[i] = []
self.velocity_final[i] = []
self.trial_lengths[i] = 0.0
# Initialize model
model_str = 'HC'
self.hC_model = VE_Model(model_str)
self.init_theta = self.hC_model.return_init_theta()
def find_params(self):
# Run Estimation
for i in range(self.num_human + self.num_furniture):
self.index = 0
theta_prev = self.init_theta.tolist()
seq_len = len(self.mat[i])
force = sum(self.mat[i][0:seq_len/2],[])
motion = sum(self.mat[i][seq_len/2:],[])
deformation = (np.array(motion) - np.array(motion[0]))
self.deformation_final[i] = deformation[deformation>0.0].tolist()
self.force_final[i] = np.array(force)[deformation>0.0].tolist()
self.velocity_final[i] = ((np.array(self.deformation_final[i][1:]) - np.array(self.deformation_final[i][0:-1]))/0.01).tolist()
self.velocity_final[i].insert(0,0.0)
for j in range(len(force)):
theta_new = self.hC_model.estimate_HC_online(i, np.matrix(theta_prev), self.force_final[i][j], self.deformation_final[i][j], self.velocity_final[i][j]).tolist()
self.stiffness[i].append(theta_new[0][0])
self.damping[i].append(theta_new[1][0])
self.n[i].append(theta_new[2][0])
min_dist = min(abs(np.array(theta_prev)-np.array(theta_new)))
if min_dist > self.r_th:
self.flag[i].append(0)
#print "Estimation has not converged yet. Taxel is unknown"
elif min_dist <= self.r_th:
if theta_new[2][0] < self.h_th: # k (spring stiffness) for soft environments ?
self.flag[i].append(1)
#print "Taxel is in touch with Mannequin"
else:
self.flag[i].append(2)
#print "Taxel is in touch with Wheelchair"
theta_prev = theta_new
self.index = self.index + 1
def debug(self):
self.find_params()
sample_length = 0
sample_index = 0
for i in range(self.num_human + self.num_furniture):
if sample_length < len(self.mat[i]):
sample_index = i
sample_length = max(sample_length,len(self.mat[i]))
data_len = sample_length
data_num = sample_index
force_list = self.force_final[data_num]
deformation_list = self.deformation_final[data_num]
velocity_list = self.velocity_final[data_num]
stiffness_list = self.stiffness[data_num]
damping_list = self.damping[data_num]
n_list = self.n[data_num]
flag_list = self.flag[data_num]
samples = []
for i in range(1, len(stiffness_list)+1):
samples.append(i)
#print len(samples)
#print len(stiffness_list)
#print len(damping_list)
#print len(n_list)
#print len(flag_list)
#print len(force_list)
#print len(deformation_list)
#print len(velocity_list)
estimated_force_list = []
for i in range(len(samples)):
estimated_force_list.append(stiffness_list[-1]*(deformation_list[i]**n_list[-1]) + damping_list[-1]*((deformation_list[i]**n_list[-1])*velocity_list[i]))
if self.theta_test.tolist()[0][0] < self.h_th:
ref_flag_list.append(1)
else:
ref_flag_list.append(2)
theta_est = [stiffness_list[-1], damping_list[-1], n_list[-1]]
plot_data(samples, force_list, estimated_force_list, deformation_list, velocity_list, theta_est)
plot_estimated_values(samples, stiffness_list, damping_list, n_list, flag_list, ref_flag_list)
def return_values(self):
self.find_params()
return self.force_final, self.deformation_final, self.velocity_final, self.stiffness, self.damping, self.n, self.flag
def plot_data(samples, force_list, estimated_force_list, deformation_list, velocity_list, theta_est):
# Plot data
pp.figure(1)
pp.subplot(3,1,1)
pp.title('Measured and Estimated Forces',fontsize='24')
pp.ylabel('Force (N)',fontsize='24')
pp.plot(samples, force_list, samples, estimated_force_list, linewidth=3.0)
pp.legend(["Measured Force (N)", "Estimated Force (N)"], loc=3)
pp.grid('on')
pp.subplot(3,1,2)
pp.title('Deformation',fontsize='24')
pp.ylabel('Deformation (m)',fontsize='24')
pp.plot(samples, deformation_list, linewidth=3.0)
pp.legend(["Deformation (m)"], loc=3)
pp.grid('on')
pp.subplot(3,1,3)
pp.title('Velocity',fontsize='24')
pp.xlabel('No. of Samples',fontsize='24')
pp.ylabel('Velocity (m/s)',fontsize='24')
pp.plot(samples, velocity_list, linewidth=3.0)
pp.legend(["Velocity (m/s)"], loc=3)
pp.grid('on')
def plot_estimated_values(samples, stiffness_list, damping_list, n_list, flag_list, ref_flag_list):
# Plot data
pp.figure(2)
pp.subplot(4,1,1)
pp.title('Stiffness',fontsize='24')
pp.ylabel('Stiffness',fontsize='24')
pp.plot(samples, stiffness_list, linewidth=3.0)
pp.legend(["Stiffness (N/m)"], loc=3)
pp.grid('on')
pp.subplot(4,1,2)
pp.title('Damping',fontsize='24')
pp.ylabel('Damping',fontsize='24')
pp.plot(samples, damping_list, linewidth=3.0)
pp.legend(["Damping (Ns/m)"], loc=3)
pp.grid('on')
pp.subplot(4,1,3)
pp.title('Exponent',fontsize='24')
pp.ylabel('Exponent',fontsize='24')
pp.plot(samples, n_list, linewidth=3.0)
pp.legend(["Exponent (n)"], loc=3)
pp.grid('on')
pp.subplot(4,1,4)
pp.title('Flag',fontsize='24')
pp.xlabel('No. of Samples',fontsize='24')
pp.ylabel('Flag',fontsize='24')
pp.plot(samples, flag_list, samples, ref_flag_list, linewidth=3.0)
pp.legend(["Estimated Flag (0-Unknown, 1-Human, 2-Furniture)", "Actual Flag (0-Unknown, 1-Human, 2-Furniture)"], loc=3)
#pp.ylim([70,100])
pp.grid('on')
#####################################################################################################
if __name__ == '__main__':
# Load Data
from data_mannequin_furniture_force_motion import Fmat_original, temp_num_human, temp_num_furniture
#print temp_num_human
#print temp_num_human+temp_num_furniture
# Find Parameters
human_stiffness_threshold = 50.
rls_termination_threshold = 0.005
params = Params(Fmat_original, temp_num_human, temp_num_furniture, human_stiffness_threshold, rls_termination_threshold)
# Debug
params.debug()
pp.show()
| mit | -765,287,255,659,265,200 | 34.181818 | 177 | 0.554219 | false |
adityaarun1/faster_rcnn_pytorch | faster_rcnn/datasets/nissan.py | 2 | 10135 | import os
import PIL
import numpy as np
import scipy.sparse
import subprocess
import pickle
import math
import glob
from .imdb import imdb
from .imdb import ROOT_DIR
# TODO: make fast_rcnn irrelevant
# >>>> obsolete, because it depends on sth outside of this project
from ..fast_rcnn.config import cfg
# <<<< obsolete
class nissan(imdb):
def __init__(self, image_set, nissan_path=None):
imdb.__init__(self, 'nissan_' + image_set)
self._image_set = image_set
self._nissan_path = self._get_default_path() if nissan_path is None \
else nissan_path
self._data_path = os.path.join(self._nissan_path, 'Images')
self._classes = ('__background__', 'Car', 'Pedestrian', 'Cyclist')
self._class_to_ind = dict(list(zip(self.classes, list(range(self.num_classes)))))
self._image_ext = '.png'
self._image_index = self._load_image_set_index()
# Default to roidb handler
if cfg.IS_RPN:
self._roidb_handler = self.gt_roidb
else:
self._roidb_handler = self.region_proposal_roidb
# num of subclasses
self._num_subclasses = 227 + 36 + 36 + 1
# load the mapping for subcalss to class
filename = os.path.join(self._nissan_path, 'mapping.txt')
assert os.path.exists(filename), 'Path does not exist: {}'.format(filename)
mapping = np.zeros(self._num_subclasses, dtype=np.int)
with open(filename) as f:
for line in f:
words = line.split()
subcls = int(words[0])
mapping[subcls] = self._class_to_ind[words[1]]
self._subclass_mapping = mapping
self.config = {'top_k': 100000}
# statistics for computing recall
self._num_boxes_all = np.zeros(self.num_classes, dtype=np.int)
self._num_boxes_covered = np.zeros(self.num_classes, dtype=np.int)
self._num_boxes_proposal = 0
assert os.path.exists(self._nissan_path), \
'Nissan path does not exist: {}'.format(self._nissan_path)
assert os.path.exists(self._data_path), \
'Path does not exist: {}'.format(self._data_path)
def image_path_at(self, i):
"""
Return the absolute path to image i in the image sequence.
"""
return self.image_path_from_index(self.image_index[i])
def image_path_from_index(self, index):
"""
Construct an image path from the image's "index" identifier.
"""
# set the prefix
prefix = self._image_set
image_path = os.path.join(self._data_path, prefix, index + self._image_ext)
assert os.path.exists(image_path), \
'Path does not exist: {}'.format(image_path)
return image_path
def _load_image_set_index(self):
"""
Load the indexes listed in this dataset's image set file.
"""
image_set_file = os.path.join(self._data_path, self._image_set + '.txt')
assert os.path.exists(image_set_file), \
'Path does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_index = [x.rstrip('\n') for x in f.readlines()]
return image_index
def _get_default_path(self):
"""
Return the default path where NISSAN is expected to be installed.
"""
return os.path.join(ROOT_DIR, 'data', 'NISSAN')
def gt_roidb(self):
"""
Return the database of ground-truth regions of interest.
No implementation.
"""
gt_roidb = []
return gt_roidb
def region_proposal_roidb(self):
"""
Return the database of regions of interest.
Ground-truth ROIs are also included.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path,
self.name + '_' + cfg.REGION_PROPOSAL + '_region_proposal_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = pickle.load(fid)
print('{} roidb loaded from {}'.format(self.name, cache_file))
return roidb
print('Loading region proposal network boxes...')
model = cfg.REGION_PROPOSAL
roidb = self._load_rpn_roidb(None, model)
print('Region proposal network boxes loaded')
print('{} region proposals per image'.format(self._num_boxes_proposal / len(self.image_index)))
with open(cache_file, 'wb') as fid:
pickle.dump(roidb, fid, pickle.HIGHEST_PROTOCOL)
print('wrote roidb to {}'.format(cache_file))
return roidb
def _load_rpn_roidb(self, gt_roidb, model):
# set the prefix
prefix = model
box_list = []
for index in self.image_index:
filename = os.path.join(self._nissan_path, 'region_proposals', prefix, self._image_set, index + '.txt')
assert os.path.exists(filename), \
'RPN data not found at: {}'.format(filename)
raw_data = np.loadtxt(filename, dtype=float)
if len(raw_data.shape) == 1:
if raw_data.size == 0:
raw_data = raw_data.reshape((0, 5))
else:
raw_data = raw_data.reshape((1, 5))
x1 = raw_data[:, 0]
y1 = raw_data[:, 1]
x2 = raw_data[:, 2]
y2 = raw_data[:, 3]
score = raw_data[:, 4]
inds = np.where((x2 > x1) & (y2 > y1))[0]
raw_data = raw_data[inds,:4]
self._num_boxes_proposal += raw_data.shape[0]
box_list.append(raw_data)
return self.create_roidb_from_box_list(box_list, gt_roidb)
def evaluate_detections(self, all_boxes, output_dir):
# load the mapping for subcalss the alpha (viewpoint)
filename = os.path.join(self._nissan_path, 'mapping.txt')
assert os.path.exists(filename), \
'Path does not exist: {}'.format(filename)
mapping = np.zeros(self._num_subclasses, dtype=np.float)
with open(filename) as f:
for line in f:
words = line.split()
subcls = int(words[0])
mapping[subcls] = float(words[3])
# for each image
for im_ind, index in enumerate(self.image_index):
filename = os.path.join(output_dir, index + '.txt')
print('Writing NISSAN results to file ' + filename)
with open(filename, 'wt') as f:
# for each class
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
for k in range(dets.shape[0]):
subcls = int(dets[k, 5])
cls_name = self.classes[self.subclass_mapping[subcls]]
assert (cls_name == cls), 'subclass not in class'
alpha = mapping[subcls]
f.write('{:s} -1 -1 {:f} {:f} {:f} {:f} {:f} -1 -1 -1 -1 -1 -1 -1 {:.32f}\n'.format(\
cls, alpha, dets[k, 0], dets[k, 1], dets[k, 2], dets[k, 3], dets[k, 4]))
# write detection results into one file
def evaluate_detections_one_file(self, all_boxes, output_dir):
# open results file
filename = os.path.join(output_dir, 'detections.txt')
print('Writing all NISSAN results to file ' + filename)
with open(filename, 'wt') as f:
# for each image
for im_ind, index in enumerate(self.image_index):
# for each class
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
for k in range(dets.shape[0]):
subcls = int(dets[k, 5])
cls_name = self.classes[self.subclass_mapping[subcls]]
assert (cls_name == cls), 'subclass not in class'
f.write('{:s} {:s} {:f} {:f} {:f} {:f} {:d} {:f}\n'.format(\
index, cls, dets[k, 0], dets[k, 1], dets[k, 2], dets[k, 3], subcls, dets[k, 4]))
def evaluate_proposals(self, all_boxes, output_dir):
# for each image
for im_ind, index in enumerate(self.image_index):
filename = os.path.join(output_dir, index + '.txt')
print('Writing NISSAN results to file ' + filename)
with open(filename, 'wt') as f:
# for each class
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
for k in range(dets.shape[0]):
f.write('{:f} {:f} {:f} {:f} {:.32f}\n'.format(\
dets[k, 0], dets[k, 1], dets[k, 2], dets[k, 3], dets[k, 4]))
def evaluate_proposals_msr(self, all_boxes, output_dir):
# for each image
for im_ind, index in enumerate(self.image_index):
filename = os.path.join(output_dir, index + '.txt')
print('Writing NISSAN results to file ' + filename)
with open(filename, 'wt') as f:
dets = all_boxes[im_ind]
if dets == []:
continue
for k in range(dets.shape[0]):
f.write('{:f} {:f} {:f} {:f} {:.32f}\n'.format(dets[k, 0], dets[k, 1], dets[k, 2], dets[k, 3], dets[k, 4]))
if __name__ == '__main__':
d = nissan('2015-10-21-16-25-12')
res = d.roidb
from IPython import embed; embed()
| mit | 2,613,996,979,361,824,000 | 39.218254 | 127 | 0.524815 | false |
erdincay/pyload | module/plugins/hooks/SimplyPremiumComHook.py | 12 | 1137 | # -*- coding: utf-8 -*-
from module.common.json_layer import json_loads
from module.plugins.internal.MultiHook import MultiHook
class SimplyPremiumComHook(MultiHook):
__name__ = "SimplyPremiumComHook"
__type__ = "hook"
__version__ = "0.06"
__status__ = "testing"
__config__ = [("pluginmode" , "all;listed;unlisted", "Use for plugins" , "all"),
("pluginlist" , "str" , "Plugin list (comma separated)", "" ),
("reload" , "bool" , "Reload plugin list" , True ),
("reloadinterval", "int" , "Reload interval in hours" , 12 )]
__description__ = """Simply-Premium.com hook plugin"""
__license__ = "GPLv3"
__authors__ = [("EvolutionClip", "[email protected]")]
def get_hosters(self):
json_data = self.load("http://www.simply-premium.com/api/hosts.php", get={'format': "json", 'online': 1})
json_data = json_loads(json_data)
host_list = [element['regex'] for element in json_data['result']]
return host_list
| gpl-3.0 | -5,857,881,405,702,177,000 | 38.206897 | 113 | 0.531223 | false |
Pulgama/supriya | supriya/commands/NodeRunRequest.py | 1 | 3773 | import supriya.osc
from supriya.commands.Request import Request
from supriya.enums import RequestId
class NodeRunRequest(Request):
"""
A /n_run request.
::
>>> import supriya
>>> server = supriya.Server.default().boot()
>>> synth_a = supriya.Synth().allocate()
>>> synth_b = supriya.Synth().allocate()
>>> synth_a.is_paused, synth_b.is_paused
(False, False)
Unpause ``synth_a`` (a no-op because it's already unpaused) and pause
``synth_b``:
::
>>> request = supriya.commands.NodeRunRequest([
... [synth_a, True],
... [synth_b, False],
... ])
>>> request.to_osc(with_request_name=True)
OscMessage('/n_run', 1000, 1, 1001, 0)
::
>>> with server.osc_io.capture() as transcript:
... request.communicate(server=server)
... _ = server.sync()
...
>>> for entry in transcript:
... (entry.label, entry.message)
...
('S', OscMessage(12, 1000, 1, 1001, 0))
('S', OscMessage(52, 0))
('R', OscMessage('/n_off', 1001, 1, -1, 1000, 0))
('R', OscMessage('/synced', 0))
::
>>> synth_a.is_paused, synth_b.is_paused
(False, True)
Pause ``synth_a`` and unpause ``synth_b``:
::
>>> request = supriya.commands.NodeRunRequest([
... [synth_a, False],
... [synth_b, True],
... ])
>>> request.to_osc(with_request_name=True)
OscMessage('/n_run', 1000, 0, 1001, 1)
::
>>> with server.osc_io.capture() as transcript:
... request.communicate(server=server)
... _ = server.sync()
...
>>> for entry in transcript:
... (entry.label, entry.message)
...
('S', OscMessage(12, 1000, 0, 1001, 1))
('S', OscMessage(52, 1))
('R', OscMessage('/n_off', 1000, 1, 1001, -1, 0))
('R', OscMessage('/n_on', 1001, 1, -1, 1000, 0))
('R', OscMessage('/synced', 1))
::
>>> synth_a.is_paused, synth_b.is_paused
(True, False)
"""
### CLASS VARIABLES ###
__slots__ = ("_node_id_run_flag_pairs",)
request_id = RequestId.NODE_RUN
### INITIALIZER ###
def __init__(self, node_id_run_flag_pairs=None):
Request.__init__(self)
if node_id_run_flag_pairs:
pairs = []
for node_id, run_flag in node_id_run_flag_pairs:
node_id = node_id
run_flag = bool(run_flag)
pairs.append((node_id, run_flag))
node_id_run_flag_pairs = tuple(pairs)
self._node_id_run_flag_pairs = node_id_run_flag_pairs
### PRIVATE METHODS ###
def _apply_local(self, server):
for node_id, run_flag in self.node_id_run_flag_pairs:
node = server._nodes.get(node_id)
if not node:
continue
node._run(run_flag)
### PUBLIC METHODS ###
def to_osc(self, *, with_placeholders=False, with_request_name=False):
if with_request_name:
request_id = self.request_name
else:
request_id = int(self.request_id)
contents = [request_id]
sanitized_pairs = []
for node_id, run_flag in self.node_id_run_flag_pairs or []:
node_id = self._sanitize_node_id(node_id, with_placeholders)
sanitized_pairs.append((node_id, int(run_flag)))
for pair in sorted(sanitized_pairs):
contents.extend(pair)
message = supriya.osc.OscMessage(*contents)
return message
### PUBLIC PROPERTIES ###
@property
def node_id_run_flag_pairs(self):
return self._node_id_run_flag_pairs
| mit | 3,331,147,071,377,809,000 | 27.583333 | 74 | 0.518685 | false |
ptitjes/quodlibet | quodlibet/packages/raven/transport/gevent.py | 12 | 1658 | """
raven.transport.gevent
~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from raven.transport.base import AsyncTransport
from raven.transport.http import HTTPTransport
try:
import gevent
# gevent 1.0bN renamed coros to lock
try:
from gevent.lock import Semaphore
except ImportError:
from gevent.coros import Semaphore # NOQA
has_gevent = True
except ImportError:
has_gevent = None
class GeventedHTTPTransport(AsyncTransport, HTTPTransport):
scheme = ['gevent+http', 'gevent+https']
def __init__(self, maximum_outstanding_requests=100, *args, **kwargs):
if not has_gevent:
raise ImportError('GeventedHTTPTransport requires gevent.')
self._lock = Semaphore(maximum_outstanding_requests)
super(GeventedHTTPTransport, self).__init__(*args, **kwargs)
def async_send(self, url, data, headers, success_cb, failure_cb):
"""
Spawn an async request to a remote webserver.
"""
# this can be optimized by making a custom self.send that does not
# read the response since we don't use it.
self._lock.acquire()
return gevent.spawn(
super(GeventedHTTPTransport, self).send, url, data, headers
).link(lambda x: self._done(x, success_cb, failure_cb))
def _done(self, greenlet, success_cb, failure_cb, *args):
self._lock.release()
if greenlet.successful():
success_cb()
else:
failure_cb(greenlet.exception)
| gpl-2.0 | 1,029,439,619,663,752,100 | 30.283019 | 75 | 0.648975 | false |
knuevena/americorps-backend | views.py | 1 | 10945 | import volunteer
from volunteer import Volunteer
from admin import Admin
import admin
from organization import *
import orgmember
import organization
import event
from volunteerSkills import VolunteerSkills
from volunteerNeighborhoods import VolunteerNeighborhoods
from volunteerInterests import VolunteerInterests
from flask import render_template,redirect, url_for, json, g
from flask.ext.api import status
from flask import Flask, request, jsonify, session
from db import Base, Session
from app import app
from user import User
import jwt
from datetime import datetime, timedelta
from event import Event
from flask.ext.cors import CORS, cross_origin
@app.route('/')
def index():
content = {'test content':'disregard'}
return content, status.HTTP_404_NOT_FOUND
@app.route('/organization/', methods=['POST'])
def create_org():
success = {'status':'organization created, yay!'}
error = {'error': 'Error in JSON/SQL syntax'}
error3 = {'error': 'No organization data provided'}
data = request.json
if data:
if createOrganization(data):
return success, status.HTTP_200_OK
else:
return error, status.HTTP_500_INTERNAL_SERVER_ERROR
else:
return error3, status.HTTP_400_BAD_REQUEST
@app.route('/user/', methods=['POST'])
def create_user():
success = {'status':'account created, yay!'}
error = {'error': 'Error in JSON/SQL syntax'}
error3 = {'error': 'No user data provided'}
data = request.json
if data:
if data['permissions'] == 'volunteer':
if volunteer.createVolunteer(data):
return success, status.HTTP_200_OK
else:
return error, status.HTTP_500_INTERNAL_SERVER_ERROR
if data['permissions'] == 'admin':
if admin.Admin.createAdmin(data):
return success, status.HTTP_200_OK
else:
return error, status.HTTP_500_INTERNAL_SERVER_ERROR
if data['permissions'] == 'organization':
if organization.createOrganization(data):
return success, status.HTTP_200_OK
else:
return error, status.HTTP_500_INTERNAL_SERVER_ERROR
else:
return error, status.HTTP_500_INTERNAL_SERVER_ERROR
else:
return error3, status.HTTP_400_BAD_REQUESTup
@app.route('/organizations/<int:org_id>', methods=['GET', 'POST', 'DELETE'])
def orgs(org_id):
error = {'error': 'Error in JSON/SQL syntax'}
updateSuccess = {'status':'Organization updated'}
updateError = {'error': 'Organization not found/input validation failed.'}
noOrg = {'error': 'Organization not found.'}
deleteSuccess = {'status' : 'Organization deleted'}
# update user
if request.method == 'POST':
data = request.json
if data:
s = Session()
o = s.query(Organization).filter_by(id=org_id).update(data)
if o:
s.commit()
s.close()
return updateSuccess, status.HTTP_200_OK
else:
return updateError, status.HTTP_400_BAD_REQUEST
if request.method == 'GET':
s = Session()
u = s.query(Organization).filter_by(id=org_id).first()
if u:
s.close()
return u.asdict(), status.HTTP_200_OK
else:
return noOrg, status.HTTP_404_NOT_FOUND
if request.method == 'DELETE':
s = Session()
org = s.query(Organization).filter_by(id=org_id).first()
if not(org):
return noOrg, status.HTTP_404_NOT_FOUND
try:
org.deleteSelf(s)
except exc.SQLAlchemyError as e:
deleteError = {'error': str(e)}
return deleteError, status.HTTP_400_BAD_REQUEST
s.close()
return deleteSuccess, status.HTTP_200_OK
@app.route('/user/<int:user_id>', methods=['GET', 'POST', 'DELETE'])
def users(user_id):
error = {'error': 'Error in JSON/SQL syntax'}
updateSuccess = {'status':'account updated'}
updateError = {'error': 'User not found/input validation failed.'}
noUser = {'error': 'User not found.'}
deleteSuccess = {'status' : 'account deleted'}
# update user
if request.method == 'POST':
data = request.json
if data:
s = Session()
u = s.query(User).filter_by(id=user_id).update(data)
if u:
s.commit()
u = s.query(User).filter_by(id=user_id).first()
#print(u)
return u.asdict(), status.HTTP_200_OK
else:
return updateError, status.HTTP_400_BAD_REQUEST
if request.method == 'GET':
s = Session()
u = s.query(User).filter_by(id=user_id).first()
if u:
user_id = u.id
u = u.asdict()
u['skills'] = VolunteerSkills.get_skills(user_id)
u['neighborhoods'] = VolunteerNeighborhoods.get_neighborhoods(user_id)
u['interests'] = VolunteerInterests.get_interests(user_id)
return u, status.HTTP_200_OK
else:
return noUser, status.HTTP_404_NOT_FOUND
if request.method == 'DELETE':
s = Session()
user = s.query(User).filter_by(id=user_id).first()
if not(user):
return noUser, status.HTTP_404_NOT_FOUND
else:
try:
user.deleteSelf(s)
except exc.SQLAlchemyError as e:
deleteError = {'error' : e.args}
return deleteError, status.HTTP_400_BAD_REQUEST
finally:
s.close()
return deleteSuccess, status.HTTP_200_OK
@app.route('/user/loghours', methods=['POST'])
def hours():
noVolunteer = {'error': 'Volunteer not found.'}
wrong = {'error': 'JSON incorrect - need volunteer, event, and hours'}
correct = {'status': 'hours logged!'}
wrong2 = {'error': 'error logging hours'}
data = request.json
s = Session()
vo = s.query(Volunteer).filter_by(id=data['volunteerid']).first()
if not(vo):
return noVolunteer, status.HTTP_404_NOT_FOUND
else:
eventid = data["eventid"]
hours = data["hours"]
if eventid and hours:
if vo.log_hours(eventid, hours):
return correct, status.HTTP_200_OK
else:
return wrong2, status.HTTP_500_INTERNAL_SERVER_ERROR
else:
return wrong, status.HTTP_500_INTERNAL_SERVER_ERROR
@app.route('/event/<int:event_id>', methods=['GET', 'POST', 'PUT', 'DELETE'])
def event(event_id):
content = {'events': 'test'}
success = {'status': 'event created'}
updateSuccess = {'status':'account updated'}
noEvent = {'error': 'User not found.'}
updateError = {'error': 'User not found/input validation failed.'}
error = {'error': "Error in JSON/SQL syntax"}
if request.method == 'POST':
data = request.json
if event.createEvent(data):
return success, status.HTTP_200_OK
else:
return error, status.HTTP_500_INTERNAL_SERVER_ERROR
if request.method == 'GET':
s = Session()
e = s.query(Event).filter_by(id=event_id).first()
if e:
return Event.asdict(e), status.HTTP_200_OK
else:
return noEvent, status.HTTP_404_NOT_FOUND
if request.method == 'POST':
data = request.json
if data:
s = Session()
u = s.query(Event).filter_by(id=event_id).update(data)
if u:
s.commit()
s.close()
return updateSuccess, status.HTTP_200_OK
else:
return updateError, status.HTTP_400_BAD_REQUEST
@app.route('/event/get_all', methods=['GET'])
def get_all():
if request.method == 'GET':
s = Session()
events = s.query(Event).all()
events_Json = {'results':[]}
for e in events:
#print(Event.asdict(e))
events_Json['results'].append(Event.asdict(e))
return events_Json, status.HTTP_200_OK
@app.route('/event/signup', methods=['POST'])
def signup():
error = {'error': "Error in JSON/SQL syntax"}
success = {'success': 'signup successful!'}
if request.method == 'POST':
data = request.json
if (volunteer.addEvent(data['eventid'], data['userid'])):
return success, status.HTTP_200_OK
else:
return error, status.HTTP_500_INTERNAL_SERVER_ERROR
@app.route('/event/featured', methods=['GET'])
def featured():
if request.method == 'GET':
s = Session()
feats = s.query(Event).filter_by(featured=True)
feats_Json = {'results':[]}
for e in feats:
#print(Event.asdict(e))
feats_Json['results'].append(Event.asdict(e))
return feats_Json, status.HTTP_200_OK
@app.route('/login', methods=['POST'])
@cross_origin(headers=['Content-Type','Authorization'])
def login():
s = Session()
json_data = request.json
user = s.query(User).filter_by(email=json_data['email']).first()
error = "Login Failed"
s.close()
if user and user.check_password(json_data['passwordhash']):
#session['logged_in'] = True
#status = True
if create_token(user) is not None:
return create_token(user), status.HTTP_200_OK
else:
return jsonify({'result': "Token Failed" }), status.HTTP_500_INTERNAL_SERVER_ERROR
else:
#status = False
return jsonify({'result': error}), status.HTTP_401_UNAUTHORIZED
def create_token(user):
payload = {
# subject
'sub': user.id,
#issued at
'iat': datetime.utcnow(),
#expiry
'exp': datetime.utcnow() + timedelta(days=1)
}
s = Session()
token = jwt.encode(payload, app.secret_key, algorithm='HS256')
try:
if (user.permissions == 'volunteer'):
us = s.query(Volunteer).filter_by(id=user.id).first()
d = volunteer.Volunteer.asdict(us)
d['skills'] = VolunteerSkills.get_skills(us.id)
d['neighborhoods'] = VolunteerNeighborhoods.get_neighborhoods(us.id)
d['interests'] = VolunteerInterests.get_interests(us.id)
if (user.permissions == 'admin'):
us = s.query(Admin).filter_by(id=user.id).first()
d = admin.Admin.asdict(us)
if (user.permissions == 'organization'):
us = s.query(Organization).filter_by(id=user.id).first()
d =organization.Organization.asdict(us)
except:
return None
finally:
s.close()
m = {'token': str(token), 'user': d}
return m
def parse_token(req):
token = req.headers.get('Authorization').split()[1]
return jwt.decode(token, app.secret_key, algorithms='HS256')
@app.route('/logout', methods=['GET'])
def logout():
return {'result': 'success'}
| mit | -8,632,896,102,610,305,000 | 33.856688 | 94 | 0.586661 | false |
gonboy/sl4a | python/src/Mac/Tools/Doc/HelpIndexingTool/Help_Indexing_Tool_Suite.py | 29 | 3570 | """Suite Help Indexing Tool Suite: Special events that just the Help Indexing Tool supports.
Level 0, version 0
Generated from /Developer/Applications/Apple Help Indexing Tool.app
AETE/AEUT resource version 1/1, language 0, script 0
"""
import aetools
import MacOS
_code = 'HIT '
class Help_Indexing_Tool_Suite_Events:
def turn_anchor_indexing(self, _object, _attributes={}, **_arguments):
"""turn anchor indexing: Turns anchor indexing on or off.
Required argument: \xd2on\xd3 or \xd2off\xd3, to turn anchor indexing on or off
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'HIT '
_subcode = 'tAnc'
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_turn_remote_root = {
'with_root_url' : 'rURL',
}
def turn_remote_root(self, _object, _attributes={}, **_arguments):
"""turn remote root: Turn usage of remote root for content on the web on or off. If turning \xd2on\xd3, supply a string as second parameter.
Required argument: \xd2on\xd3 or \xd2off\xd3, to turn remote root on or off
Keyword argument with_root_url: The remote root to use, in the form of \xd2http://www.apple.com/help/\xd3.
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'HIT '
_subcode = 'tRem'
aetools.keysubst(_arguments, self._argmap_turn_remote_root)
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def use_tokenizer(self, _object, _attributes={}, **_arguments):
"""use tokenizer: Tells the indexing tool which tokenizer to use.
Required argument: Specify \xd2English\xd3, \xd2European\xd3, \xd2Japanese\xd3, \xd2Korean\xd3, or \xd2Simple\xd3.
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'HIT '
_subcode = 'uTok'
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
class application(aetools.ComponentItem):
"""application - Application class """
want = 'capp'
class _Prop_idleStatus(aetools.NProperty):
"""idleStatus - """
which = 'sIdl'
want = 'bool'
application._superclassnames = []
application._privpropdict = {
'idleStatus' : _Prop_idleStatus,
}
application._privelemdict = {
}
#
# Indices of types declared in this module
#
_classdeclarations = {
'capp' : application,
}
_propdeclarations = {
'sIdl' : _Prop_idleStatus,
}
_compdeclarations = {
}
_enumdeclarations = {
}
| apache-2.0 | -3,069,617,058,972,281,000 | 31.454545 | 148 | 0.62465 | false |
ezio-melotti/devguide | tools/rstlint.py | 3 | 7608 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Check for stylistic and formal issues in .rst and .py
# files included in the documentation.
#
# 01/2009, Georg Brandl
# TODO: - wrong versions in versionadded/changed
# - wrong markup after versionchanged directive
from __future__ import with_statement
import os
import re
import sys
import getopt
from os.path import join, splitext, abspath, exists
from collections import defaultdict
directives = [
# standard docutils ones
'admonition', 'attention', 'caution', 'class', 'compound', 'container',
'contents', 'csv-table', 'danger', 'date', 'default-role', 'epigraph',
'error', 'figure', 'footer', 'header', 'highlights', 'hint', 'image',
'important', 'include', 'line-block', 'list-table', 'meta', 'note',
'parsed-literal', 'pull-quote', 'raw', 'replace',
'restructuredtext-test-directive', 'role', 'rubric', 'sectnum', 'sidebar',
'table', 'target-notes', 'tip', 'title', 'topic', 'unicode', 'warning',
# Sphinx and Python docs custom ones
'acks', 'attribute', 'autoattribute', 'autoclass', 'autodata',
'autoexception', 'autofunction', 'automethod', 'automodule', 'centered',
'cfunction', 'class', 'classmethod', 'cmacro', 'cmdoption', 'cmember',
'code-block', 'confval', 'cssclass', 'ctype', 'currentmodule', 'cvar',
'data', 'decorator', 'decoratormethod', 'deprecated-removed',
'deprecated(?!-removed)', 'describe', 'directive', 'doctest', 'envvar',
'event', 'exception', 'function', 'glossary', 'highlight', 'highlightlang',
'impl-detail', 'index', 'literalinclude', 'method', 'miscnews', 'module',
'moduleauthor', 'opcode', 'pdbcommand', 'productionlist',
'program', 'role', 'sectionauthor', 'seealso', 'sourcecode', 'staticmethod',
'tabularcolumns', 'testcode', 'testoutput', 'testsetup', 'toctree', 'todo',
'todolist', 'versionadded', 'versionchanged'
]
all_directives = '(' + '|'.join(directives) + ')'
seems_directive_re = re.compile(r'(?<!\.)\.\. %s([^a-z:]|:(?!:))' % all_directives)
default_role_re = re.compile(r'(^| )`\w([^`]*?\w)?`($| )')
leaked_markup_re = re.compile(r'[a-z]::\s|`|\.\.\s*\w+:')
checkers = {}
checker_props = {'severity': 1, 'falsepositives': False}
def checker(*suffixes, **kwds):
"""Decorator to register a function as a checker."""
def deco(func):
for suffix in suffixes:
checkers.setdefault(suffix, []).append(func)
for prop in checker_props:
setattr(func, prop, kwds.get(prop, checker_props[prop]))
return func
return deco
@checker('.py', severity=4)
def check_syntax(fn, lines):
"""Check Python examples for valid syntax."""
code = ''.join(lines)
if '\r' in code:
if os.name != 'nt':
yield 0, '\\r in code file'
code = code.replace('\r', '')
try:
compile(code, fn, 'exec')
except SyntaxError as err:
yield err.lineno, 'not compilable: %s' % err
@checker('.rst', severity=2)
def check_suspicious_constructs(fn, lines):
"""Check for suspicious reST constructs."""
inprod = False
for lno, line in enumerate(lines):
if seems_directive_re.search(line):
yield lno+1, 'comment seems to be intended as a directive'
if '.. productionlist::' in line:
inprod = True
elif not inprod and default_role_re.search(line):
yield lno+1, 'default role used'
elif inprod and not line.strip():
inprod = False
@checker('.py', '.rst')
def check_whitespace(fn, lines):
"""Check for whitespace and line length issues."""
for lno, line in enumerate(lines):
if '\r' in line:
yield lno+1, '\\r in line'
if '\t' in line:
yield lno+1, 'OMG TABS!!!1'
if line[:-1].rstrip(' \t') != line[:-1]:
yield lno+1, 'trailing whitespace'
@checker('.rst', severity=0)
def check_line_length(fn, lines):
"""Check for line length; this checker is not run by default."""
for lno, line in enumerate(lines):
if len(line) > 81:
# don't complain about tables, links and function signatures
if line.lstrip()[0] not in '+|' and \
'http://' not in line and \
not line.lstrip().startswith(('.. function',
'.. method',
'.. cfunction')):
yield lno+1, "line too long"
@checker('.html', severity=2, falsepositives=True)
def check_leaked_markup(fn, lines):
"""Check HTML files for leaked reST markup; this only works if
the HTML files have been built.
"""
for lno, line in enumerate(lines):
if leaked_markup_re.search(line):
yield lno+1, 'possibly leaked markup: %r' % line
def main(argv):
usage = '''\
Usage: %s [-v] [-f] [-s sev] [-i path]* [path]
Options: -v verbose (print all checked file names)
-f enable checkers that yield many false positives
-s sev only show problems with severity >= sev
-i path ignore subdir or file path
'''% argv[0]
try:
gopts, args = getopt.getopt(argv[1:], 'vfs:i:')
except getopt.GetoptError:
print(usage)
return 2
verbose = False
severity = 1
ignore = []
falsepos = False
for opt, val in gopts:
if opt == '-v':
verbose = True
elif opt == '-f':
falsepos = True
elif opt == '-s':
severity = int(val)
elif opt == '-i':
ignore.append(abspath(val))
if len(args) == 0:
path = '.'
elif len(args) == 1:
path = args[0]
else:
print(usage)
return 2
if not exists(path):
print('Error: path %s does not exist' % path)
return 2
count = defaultdict(int)
for root, dirs, files in os.walk(path):
# ignore subdirs in ignore list
if abspath(root) in ignore:
del dirs[:]
continue
for fn in files:
fn = join(root, fn)
if fn[:2] == './':
fn = fn[2:]
# ignore files in ignore list
if abspath(fn) in ignore:
continue
ext = splitext(fn)[1]
checkerlist = checkers.get(ext, None)
if not checkerlist:
continue
if verbose:
print('Checking %s...' % fn)
try:
with open(fn, 'r', encoding='utf-8') as f:
lines = list(f)
except (IOError, OSError) as err:
print('%s: cannot open: %s' % (fn, err))
count[4] += 1
continue
for checker in checkerlist:
if checker.falsepositives and not falsepos:
continue
csev = checker.severity
if csev >= severity:
for lno, msg in checker(fn, lines):
print('[%d] %s:%d: %s' % (csev, fn, lno, msg))
count[csev] += 1
if verbose:
print()
if not count:
if severity > 1:
print('No problems with severity >= %d found.' % severity)
else:
print('No problems found.')
else:
for severity in sorted(count):
number = count[severity]
print('%d problem%s with severity %d found.' %
(number, number > 1 and 's' or '', severity))
return int(bool(count))
if __name__ == '__main__':
sys.exit(main(sys.argv))
| cc0-1.0 | 5,274,824,342,021,721,000 | 32.078261 | 83 | 0.549422 | false |
GluuFederation/oxd-python | examples/flask_app/demosite.py | 1 | 1649 | import os
import oxdpython
from flask import Flask, render_template, redirect, request, make_response
app = Flask(__name__)
this_dir = os.path.dirname(os.path.realpath(__file__))
config = os.path.join(this_dir, 'demosite.cfg')
oxc = oxdpython.Client(config)
# If site is not registered, first register it
if not oxc.config.get('oxd','id'):
oxc.register_site()
@app.route('/')
def home():
return render_template("home.html")
@app.route('/authorize/')
def authorize():
auth_url = oxc.get_authorization_url()
return redirect(auth_url)
@app.route('/login_callback/')
def login_callback():
# using request from Flask to parse the query string of the callback
code = request.args.get('code')
state = request.args.get('state')
tokens = oxc.get_tokens_by_code(code, state)
claims = oxc.get_user_info(tokens['access_token'])
resp = make_response(render_template("home.html"))
resp.set_cookie('sub', claims['sub'][0])
resp.set_cookie('session_id', request.args.get('session_id'))
return resp
@app.route('/logout/')
def logout():
logout_url = oxc.get_logout_uri()
return redirect(logout_url)
@app.route('/logout_callback/')
def logout_callback():
"""Route called by the OpenID provider when user logs out.
Clear the cookies here.
"""
resp = make_response('Logging Out')
resp.set_cookie('sub', 'null', expires=0)
resp.set_cookie('session_id', 'null', expires=0)
return resp
@app.route('/post_logout/')
def post_logout():
return render_template("home.html")
if __name__ == "__main__":
app.run(debug=True, port=8080, ssl_context='adhoc')
| mit | 5,455,942,204,776,373,000 | 23.61194 | 74 | 0.66222 | false |
jlaniau/conquests | xlwt/Row.py | 8 | 11870 | # -*- coding: windows-1252 -*-
from . import BIFFRecords
from . import Style
from .Cell import StrCell, BlankCell, NumberCell, FormulaCell, MulBlankCell, BooleanCell, ErrorCell, \
_get_cells_biff_data_mul
from . import ExcelFormula
import datetime as dt
from .Formatting import Font
from .compat import basestring, xrange, int_types, iteritems
try:
from decimal import Decimal
except ImportError:
# Python 2.3: decimal not supported; create dummy Decimal class
class Decimal(object):
pass
class Row(object):
__slots__ = [# private variables
"__idx",
"__parent",
"__parent_wb",
"__cells",
"__min_col_idx",
"__max_col_idx",
"__xf_index",
"__has_default_xf_index",
"__height_in_pixels",
# public variables
"height",
"has_default_height",
"height_mismatch",
"level",
"collapse",
"hidden",
"space_above",
"space_below"]
def __init__(self, rowx, parent_sheet):
if not (isinstance(rowx, int_types) and 0 <= rowx <= 65535):
raise ValueError("row index was %r, not allowed by .xls format" % rowx)
self.__idx = rowx
self.__parent = parent_sheet
self.__parent_wb = parent_sheet.get_parent()
self.__cells = {}
self.__min_col_idx = 0
self.__max_col_idx = 0
self.__xf_index = 0x0F
self.__has_default_xf_index = 0
self.__height_in_pixels = 0x11
self.height = 0x00FF
self.has_default_height = 0x00
self.height_mismatch = 0
self.level = 0
self.collapse = 0
self.hidden = 0
self.space_above = 0
self.space_below = 0
def __adjust_height(self, style):
twips = style.font.height
points = float(twips)/20.0
# Cell height in pixels can be calcuted by following approx. formula:
# cell height in pixels = font height in points * 83/50 + 2/5
# It works when screen resolution is 96 dpi
pix = int(round(points*83.0/50.0 + 2.0/5.0))
if pix > self.__height_in_pixels:
self.__height_in_pixels = pix
def __adjust_bound_col_idx(self, *args):
for arg in args:
iarg = int(arg)
if not ((0 <= iarg <= 255) and arg == iarg):
raise ValueError("column index (%r) not an int in range(256)" % arg)
sheet = self.__parent
if iarg < self.__min_col_idx:
self.__min_col_idx = iarg
if iarg > self.__max_col_idx:
self.__max_col_idx = iarg
if iarg < sheet.first_used_col:
sheet.first_used_col = iarg
if iarg > sheet.last_used_col:
sheet.last_used_col = iarg
def __excel_date_dt(self, date):
adj = False
if isinstance(date, dt.date):
if self.__parent_wb.dates_1904:
epoch_tuple = (1904, 1, 1)
else:
epoch_tuple = (1899, 12, 31)
adj = True
if isinstance(date, dt.datetime):
epoch = dt.datetime(*epoch_tuple)
else:
epoch = dt.date(*epoch_tuple)
else: # it's a datetime.time instance
date = dt.datetime.combine(dt.datetime(1900, 1, 1), date)
epoch = dt.datetime(1900, 1, 1)
delta = date - epoch
xldate = delta.days + delta.seconds / 86400.0
# Add a day for Excel's missing leap day in 1900
if adj and xldate > 59:
xldate += 1
return xldate
def get_height_in_pixels(self):
return self.__height_in_pixels
def set_style(self, style):
self.__adjust_height(style)
self.__xf_index = self.__parent_wb.add_style(style)
self.__has_default_xf_index = 1
def get_xf_index(self):
return self.__xf_index
def get_cells_count(self):
return len(self.__cells)
def get_min_col(self):
return self.__min_col_idx
def get_max_col(self):
return self.__max_col_idx
def get_row_biff_data(self):
height_options = (self.height & 0x07FFF)
height_options |= (self.has_default_height & 0x01) << 15
options = (self.level & 0x07) << 0
options |= (self.collapse & 0x01) << 4
options |= (self.hidden & 0x01) << 5
options |= (self.height_mismatch & 0x01) << 6
options |= (self.__has_default_xf_index & 0x01) << 7
options |= (0x01 & 0x01) << 8
options |= (self.__xf_index & 0x0FFF) << 16
options |= (self.space_above & 1) << 28
options |= (self.space_below & 1) << 29
return BIFFRecords.RowRecord(self.__idx, self.__min_col_idx,
self.__max_col_idx, height_options, options).get()
def insert_cell(self, col_index, cell_obj):
if col_index in self.__cells:
if not self.__parent._cell_overwrite_ok:
msg = "Attempt to overwrite cell: sheetname=%r rowx=%d colx=%d" \
% (self.__parent.name, self.__idx, col_index)
raise Exception(msg)
prev_cell_obj = self.__cells[col_index]
sst_idx = getattr(prev_cell_obj, 'sst_idx', None)
if sst_idx is not None:
self.__parent_wb.del_str(sst_idx)
self.__cells[col_index] = cell_obj
def insert_mulcells(self, colx1, colx2, cell_obj):
self.insert_cell(colx1, cell_obj)
for col_index in xrange(colx1+1, colx2+1):
self.insert_cell(col_index, None)
def get_cells_biff_data(self):
cell_items = [item for item in iteritems(self.__cells) if item[1] is not None]
cell_items.sort() # in column order
return _get_cells_biff_data_mul(self.__idx, cell_items)
# previously:
# return ''.join([cell.get_biff_data() for colx, cell in cell_items])
def get_index(self):
return self.__idx
def set_cell_text(self, colx, value, style=Style.default_style):
self.__adjust_height(style)
self.__adjust_bound_col_idx(colx)
xf_index = self.__parent_wb.add_style(style)
self.insert_cell(colx, StrCell(self.__idx, colx, xf_index, self.__parent_wb.add_str(value)))
def set_cell_blank(self, colx, style=Style.default_style):
self.__adjust_height(style)
self.__adjust_bound_col_idx(colx)
xf_index = self.__parent_wb.add_style(style)
self.insert_cell(colx, BlankCell(self.__idx, colx, xf_index))
def set_cell_mulblanks(self, first_colx, last_colx, style=Style.default_style):
assert 0 <= first_colx <= last_colx <= 255
self.__adjust_height(style)
self.__adjust_bound_col_idx(first_colx, last_colx)
xf_index = self.__parent_wb.add_style(style)
# ncols = last_colx - first_colx + 1
self.insert_mulcells(first_colx, last_colx, MulBlankCell(self.__idx, first_colx, last_colx, xf_index))
def set_cell_number(self, colx, number, style=Style.default_style):
self.__adjust_height(style)
self.__adjust_bound_col_idx(colx)
xf_index = self.__parent_wb.add_style(style)
self.insert_cell(colx, NumberCell(self.__idx, colx, xf_index, number))
def set_cell_date(self, colx, datetime_obj, style=Style.default_style):
self.__adjust_height(style)
self.__adjust_bound_col_idx(colx)
xf_index = self.__parent_wb.add_style(style)
self.insert_cell(colx,
NumberCell(self.__idx, colx, xf_index, self.__excel_date_dt(datetime_obj)))
def set_cell_formula(self, colx, formula, style=Style.default_style, calc_flags=0):
self.__adjust_height(style)
self.__adjust_bound_col_idx(colx)
xf_index = self.__parent_wb.add_style(style)
self.__parent_wb.add_sheet_reference(formula)
self.insert_cell(colx, FormulaCell(self.__idx, colx, xf_index, formula, calc_flags=0))
def set_cell_boolean(self, colx, value, style=Style.default_style):
self.__adjust_height(style)
self.__adjust_bound_col_idx(colx)
xf_index = self.__parent_wb.add_style(style)
self.insert_cell(colx, BooleanCell(self.__idx, colx, xf_index, bool(value)))
def set_cell_error(self, colx, error_string_or_code, style=Style.default_style):
self.__adjust_height(style)
self.__adjust_bound_col_idx(colx)
xf_index = self.__parent_wb.add_style(style)
self.insert_cell(colx, ErrorCell(self.__idx, colx, xf_index, error_string_or_code))
def write(self, col, label, style=Style.default_style):
self.__adjust_height(style)
self.__adjust_bound_col_idx(col)
style_index = self.__parent_wb.add_style(style)
if isinstance(label, basestring):
if len(label) > 0:
self.insert_cell(col,
StrCell(self.__idx, col, style_index, self.__parent_wb.add_str(label))
)
else:
self.insert_cell(col, BlankCell(self.__idx, col, style_index))
elif isinstance(label, bool): # bool is subclass of int; test bool first
self.insert_cell(col, BooleanCell(self.__idx, col, style_index, label))
elif isinstance(label, int_types+(float, Decimal)):
self.insert_cell(col, NumberCell(self.__idx, col, style_index, label))
elif isinstance(label, (dt.datetime, dt.date, dt.time)):
date_number = self.__excel_date_dt(label)
self.insert_cell(col, NumberCell(self.__idx, col, style_index, date_number))
elif label is None:
self.insert_cell(col, BlankCell(self.__idx, col, style_index))
elif isinstance(label, ExcelFormula.Formula):
self.__parent_wb.add_sheet_reference(label)
self.insert_cell(col, FormulaCell(self.__idx, col, style_index, label))
elif isinstance(label, (list, tuple)):
self.__rich_text_helper(col, label, style, style_index)
else:
raise Exception("Unexpected data type %r" % type(label))
def set_cell_rich_text(self, col, rich_text_list, style=Style.default_style):
self.__adjust_height(style)
self.__adjust_bound_col_idx(col)
if not isinstance(rich_text_list, (list, tuple)):
raise Exception("Unexpected data type %r" % type(rich_text_list))
self.__rich_text_helper(col, rich_text_list, style)
def __rich_text_helper(self, col, rich_text_list, style, style_index=None):
if style_index is None:
style_index = self.__parent_wb.add_style(style)
default_font = None
rt = []
for data in rich_text_list:
if isinstance(data, basestring):
s = data
font = default_font
elif isinstance(data, (list, tuple)):
if not isinstance(data[0], basestring) or not isinstance(data[1], Font):
raise Exception ("Unexpected data type %r, %r" % (type(data[0]), type(data[1])))
s = data[0]
font = self.__parent_wb.add_font(data[1])
else:
raise Exception ("Unexpected data type %r" % type(data))
if s:
rt.append((s, font))
if default_font is None:
default_font = self.__parent_wb.add_font(style.font)
if rt:
self.insert_cell(col, StrCell(self.__idx, col, style_index, self.__parent_wb.add_rt(rt)))
else:
self.insert_cell(col, BlankCell(self.__idx, col, style_index))
write_blanks = set_cell_mulblanks
write_rich_text = set_cell_rich_text
| gpl-3.0 | -2,857,220,994,210,055,000 | 38.698997 | 110 | 0.566639 | false |
tianyi33/simple_blog | django/utils/encoding.py | 19 | 9186 | from __future__ import unicode_literals
import codecs
import datetime
from decimal import Decimal
import locale
try:
from urllib.parse import quote
except ImportError: # Python 2
from urllib import quote
import warnings
from django.utils.functional import Promise
from django.utils import six
class DjangoUnicodeDecodeError(UnicodeDecodeError):
def __init__(self, obj, *args):
self.obj = obj
UnicodeDecodeError.__init__(self, *args)
def __str__(self):
original = UnicodeDecodeError.__str__(self)
return '%s. You passed in %r (%s)' % (original, self.obj,
type(self.obj))
class StrAndUnicode(object):
"""
A class that derives __str__ from __unicode__.
On Python 2, __str__ returns the output of __unicode__ encoded as a UTF-8
bytestring. On Python 3, __str__ returns the output of __unicode__.
Useful as a mix-in. If you support Python 2 and 3 with a single code base,
you can inherit this mix-in and just define __unicode__.
"""
def __init__(self, *args, **kwargs):
warnings.warn("StrAndUnicode is deprecated. Define a __str__ method "
"and apply the @python_2_unicode_compatible decorator "
"instead.", PendingDeprecationWarning, stacklevel=2)
super(StrAndUnicode, self).__init__(*args, **kwargs)
if six.PY3:
def __str__(self):
return self.__unicode__()
else:
def __str__(self):
return self.__unicode__().encode('utf-8')
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if not six.PY3:
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
def smart_text(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Returns a text object representing 's' -- unicode on Python 2 and str on
Python 3. Treats bytestrings using the 'encoding' codec.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, Promise):
# The input is the result of a gettext_lazy() call.
return s
return force_text(s, encoding, strings_only, errors)
def is_protected_type(obj):
"""Determine if the object instance is of a protected type.
Objects of protected types are preserved as-is when passed to
force_text(strings_only=True).
"""
return isinstance(obj, six.integer_types + (type(None), float, Decimal,
datetime.datetime, datetime.date, datetime.time, tuple, list, dict))
def force_text(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Similar to smart_text, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
# Handle the common case first, saves 30-40% when s is an instance of
# six.text_type. This function gets called often in that setting.
if isinstance(s, six.text_type):
return s
if strings_only and is_protected_type(s):
return s
try:
if not isinstance(s, six.string_types):
if hasattr(s, '__unicode__'):
s = s.__unicode__()
else:
if six.PY3:
if isinstance(s, bytes):
s = six.text_type(s, encoding, errors)
else:
s = six.text_type(s)
else:
s = six.text_type(bytes(s), encoding, errors)
else:
# Note: We use .decode() here, instead of six.text_type(s, encoding,
# errors), so that if s is a SafeBytes, it ends up being a
# SafeText at the end.
s = s.decode(encoding, errors)
except UnicodeDecodeError as e:
if not isinstance(s, Exception):
raise DjangoUnicodeDecodeError(s, *e.args)
else:
# If we get to here, the caller has passed in an Exception
# subclass populated with non-ASCII bytestring data without a
# working unicode method. Try to handle this without raising a
# further exception by individually forcing the exception args
# to unicode.
s = ' '.join([force_text(arg, encoding, strings_only,
errors) for arg in s])
return s
def smart_bytes(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Returns a bytestring version of 's', encoded as specified in 'encoding'.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, Promise):
# The input is the result of a gettext_lazy() call.
return s
return force_bytes(s, encoding, strings_only, errors)
def force_bytes(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Similar to smart_bytes, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, bytes):
if encoding == 'utf-8':
return s
else:
return s.decode('utf-8', errors).encode(encoding, errors)
if strings_only and (s is None or isinstance(s, int)):
return s
if isinstance(s, Promise):
return six.text_type(s).encode(encoding, errors)
if not isinstance(s, six.string_types):
try:
if six.PY3:
return six.text_type(s).encode(encoding)
else:
return bytes(s)
except UnicodeEncodeError:
if isinstance(s, Exception):
# An Exception subclass containing non-ASCII data that doesn't
# know how to print itself properly. We shouldn't raise a
# further exception.
return b' '.join([force_bytes(arg, encoding, strings_only,
errors) for arg in s])
return six.text_type(s).encode(encoding, errors)
else:
return s.encode(encoding, errors)
if six.PY3:
smart_str = smart_text
force_str = force_text
else:
smart_str = smart_bytes
force_str = force_bytes
# backwards compatibility for Python 2
smart_unicode = smart_text
force_unicode = force_text
smart_str.__doc__ = """\
Apply smart_text in Python 3 and smart_bytes in Python 2.
This is suitable for writing to sys.stdout (for instance).
"""
force_str.__doc__ = """\
Apply force_text in Python 3 and force_bytes in Python 2.
"""
def iri_to_uri(iri):
"""
Convert an Internationalized Resource Identifier (IRI) portion to a URI
portion that is suitable for inclusion in a URL.
This is the algorithm from section 3.1 of RFC 3987. However, since we are
assuming input is either UTF-8 or unicode already, we can simplify things a
little from the full method.
Returns an ASCII string containing the encoded result.
"""
# The list of safe characters here is constructed from the "reserved" and
# "unreserved" characters specified in sections 2.2 and 2.3 of RFC 3986:
# reserved = gen-delims / sub-delims
# gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@"
# sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
# / "*" / "+" / "," / ";" / "="
# unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
# Of the unreserved characters, urllib.quote already considers all but
# the ~ safe.
# The % character is also added to the list of safe characters here, as the
# end of section 3.1 of RFC 3987 specifically mentions that % must not be
# converted.
if iri is None:
return iri
return quote(force_bytes(iri), safe=b"/#%[]=:;$&()+,!?*@'~")
def filepath_to_uri(path):
"""Convert a file system path to a URI portion that is suitable for
inclusion in a URL.
We are assuming input is either UTF-8 or unicode already.
This method will encode certain chars that would normally be recognized as
special chars for URIs. Note that this method does not encode the '
character, as it is a valid character within URIs. See
encodeURIComponent() JavaScript function for more details.
Returns an ASCII string containing the encoded result.
"""
if path is None:
return path
# I know about `os.sep` and `os.altsep` but I want to leave
# some flexibility for hardcoding separators.
return quote(force_bytes(path).replace(b"\\", b"/"), safe=b"/~!*()'")
# The encoding of the default system locale but falls back to the
# given fallback encoding if the encoding is unsupported by python or could
# not be determined. See tickets #10335 and #5846
try:
DEFAULT_LOCALE_ENCODING = locale.getdefaultlocale()[1] or 'ascii'
codecs.lookup(DEFAULT_LOCALE_ENCODING)
except:
DEFAULT_LOCALE_ENCODING = 'ascii'
| mit | -4,645,230,983,217,476,000 | 36.647541 | 80 | 0.61953 | false |
doctaphred/projecteuler | projecteuler/__main__.py | 1 | 1036 | # -*- coding: utf-8 -*-
import sys
from .problem import Problem
def main():
problems = list(Problem.discover())
if not problems:
print('Did not find any problems!')
sys.exit(1)
num_problems = len(problems)
if num_problems == 1:
print('1 problem attempted')
else:
print(num_problems, 'problems attempted')
for i, problem in enumerate(problems, start=1):
print()
print('{}/{}: Solving problem {}...'
.format(i, num_problems, problem.number))
problem.solve()
print('Answer:', problem.answer)
print(problem)
print()
total_seconds = sum(problem.time.total_seconds() for problem in problems)
print(total_seconds, 'seconds total')
num_correct = sum(problem.correct for problem in problems)
print('{}/{} correct'.format(num_correct, num_problems))
if num_correct == num_problems:
print('You win!')
else:
print('FAILURE')
sys.exit(1)
if __name__ == '__main__':
main()
| gpl-3.0 | -5,461,986,150,749,626,000 | 23.093023 | 77 | 0.586873 | false |
h1ds/h1ds | h1ds/h1ds_summary/forms.py | 1 | 1655 | from django import forms
from h1ds.models import Device
from h1ds_summary.models import SummaryAttribute
from h1ds_summary.validators import shotslug_validator, attributeslug_validator
from h1ds_summary.parsers import parse_shot_slug, parse_attr_str
class SummaryAttributeForm(forms.ModelForm):
class Meta:
model = SummaryAttribute
# The device is determined by the data URL, so don't give user the
# option to modify it.
exclude = ('device', )
class ControlPanelForm(forms.Form):
DEVICE_CHOICES = tuple((d.slug, d.name) for d in Device.objects.all())
device = forms.ChoiceField(widget=forms.Select, choices=DEVICE_CHOICES)
shots = forms.CharField(max_length=256,
validators=[shotslug_validator],
help_text='list (url syntax) of shots to recompute, or "all"')
attributes = forms.CharField(max_length=264,
validators=[attributeslug_validator],
help_text='list (url syntax) of attributes to recompute, or "all"')
def get_cleaned_data_for_device(self, device):
cleaned_data = self.clean()
cleaned_data['shots'] = parse_shot_slug(device, cleaned_data['shots'])
cleaned_data['attributes'] = parse_attr_str(device, cleaned_data['attributes'])
return cleaned_data
class RawSqlForm(forms.Form):
DEVICE_CHOICES = tuple((d.slug, d.name) for d in Device.objects.all())
device = forms.ChoiceField(widget=forms.Select, choices=DEVICE_CHOICES)
select = forms.CharField(widget=forms.Textarea)
where = forms.CharField(widget=forms.Textarea)
| mit | -214,507,504,021,422,080 | 47.676471 | 100 | 0.672508 | false |
Learning-from-our-past/Kaira | qtgui/layouts/ui_mainwindow.py | 2 | 11342 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mainwindow.ui'
#
# Created by: PyQt5 UI code generator 5.4.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1123, 694)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/icons/icon.ico"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MainWindow.setWindowIcon(icon)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout_5 = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.entriesFrame = QtWidgets.QFrame(self.centralwidget)
self.entriesFrame.setFrameShape(QtWidgets.QFrame.Panel)
self.entriesFrame.setFrameShadow(QtWidgets.QFrame.Raised)
self.entriesFrame.setObjectName("entriesFrame")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.entriesFrame)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.entriesControlLayout = QtWidgets.QFormLayout()
self.entriesControlLayout.setFieldGrowthPolicy(QtWidgets.QFormLayout.AllNonFixedFieldsGrow)
self.entriesControlLayout.setObjectName("entriesControlLayout")
self.entriesLabel = QtWidgets.QLabel(self.entriesFrame)
self.entriesLabel.setObjectName("entriesLabel")
self.entriesControlLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.entriesLabel)
self.entriesComboBox = QtWidgets.QComboBox(self.entriesFrame)
self.entriesComboBox.setMaxVisibleItems(20)
self.entriesComboBox.setObjectName("entriesComboBox")
self.entriesComboBox.addItem("")
self.entriesControlLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.entriesComboBox)
self.verticalLayout_4.addLayout(self.entriesControlLayout)
self.entriestListView = EntriesListView(self.entriesFrame)
self.entriestListView.setStyleSheet("QListWidget {\n"
"alternate-background-color: #EDEDED;\n"
"background-color: white;\n"
"}")
self.entriestListView.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.entriestListView.setAlternatingRowColors(True)
self.entriestListView.setObjectName("entriestListView")
self.verticalLayout_4.addWidget(self.entriestListView)
self.horizontalLayout_2.addWidget(self.entriesFrame)
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.previousFrame = QtWidgets.QFrame(self.centralwidget)
self.previousFrame.setFrameShape(QtWidgets.QFrame.Panel)
self.previousFrame.setFrameShadow(QtWidgets.QFrame.Raised)
self.previousFrame.setObjectName("previousFrame")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.previousFrame)
self.verticalLayout_3.setContentsMargins(-1, -1, -1, 12)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.previousControlsLayout = QtWidgets.QHBoxLayout()
self.previousControlsLayout.setSpacing(2)
self.previousControlsLayout.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint)
self.previousControlsLayout.setContentsMargins(-1, -1, -1, 0)
self.previousControlsLayout.setObjectName("previousControlsLayout")
self.previousEntryLabel = QtWidgets.QLabel(self.previousFrame)
self.previousEntryLabel.setObjectName("previousEntryLabel")
self.previousControlsLayout.addWidget(self.previousEntryLabel)
self.verticalLayout_3.addLayout(self.previousControlsLayout)
self.previousEntryTextEdit = QtWidgets.QPlainTextEdit(self.previousFrame)
self.previousEntryTextEdit.setObjectName("previousEntryTextEdit")
self.verticalLayout_3.addWidget(self.previousEntryTextEdit)
self.rawTextLabel = QtWidgets.QLabel(self.previousFrame)
self.rawTextLabel.setObjectName("rawTextLabel")
self.verticalLayout_3.addWidget(self.rawTextLabel)
self.rawTextTextEdit = QtWidgets.QPlainTextEdit(self.previousFrame)
self.rawTextTextEdit.setObjectName("rawTextTextEdit")
self.verticalLayout_3.addWidget(self.rawTextTextEdit)
self.label = QtWidgets.QLabel(self.previousFrame)
self.label.setObjectName("label")
self.verticalLayout_3.addWidget(self.label)
self.nextEntryTextEdit = QtWidgets.QPlainTextEdit(self.previousFrame)
self.nextEntryTextEdit.setObjectName("nextEntryTextEdit")
self.verticalLayout_3.addWidget(self.nextEntryTextEdit)
self.gridLayout.addWidget(self.previousFrame, 0, 1, 1, 1)
self.horizontalLayout_2.addLayout(self.gridLayout)
self.treeView = EntryTreeView(self.centralwidget)
self.treeView.setMinimumSize(QtCore.QSize(631, 0))
self.treeView.setMaximumSize(QtCore.QSize(631, 16777215))
self.treeView.setUniformRowHeights(True)
self.treeView.setSortingEnabled(True)
self.treeView.setAnimated(True)
self.treeView.setWordWrap(True)
self.treeView.setExpandsOnDoubleClick(False)
self.treeView.setObjectName("treeView")
self.treeView.header().setCascadingSectionResizes(True)
self.treeView.header().setDefaultSectionSize(100)
self.treeView.header().setHighlightSections(True)
self.treeView.header().setMinimumSectionSize(399)
self.treeView.header().setStretchLastSection(True)
self.horizontalLayout_2.addWidget(self.treeView)
self.verticalLayout_5.addLayout(self.horizontalLayout_2)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1123, 21))
self.menubar.setObjectName("menubar")
self.menuFile = QtWidgets.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
self.menuExport = QtWidgets.QMenu(self.menuFile)
self.menuExport.setObjectName("menuExport")
self.menuImport = QtWidgets.QMenu(self.menuFile)
self.menuImport.setObjectName("menuImport")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.toolBar = QtWidgets.QToolBar(MainWindow)
self.toolBar.setObjectName("toolBar")
MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBar)
self.actionOpen_XML_for_analyze = QtWidgets.QAction(MainWindow)
self.actionOpen_XML_for_analyze.setObjectName("actionOpen_XML_for_analyze")
self.actionCsv = QtWidgets.QAction(MainWindow)
self.actionCsv.setObjectName("actionCsv")
self.actionJSON = QtWidgets.QAction(MainWindow)
self.actionJSON.setObjectName("actionJSON")
self.actionOpen_txt = QtWidgets.QAction(MainWindow)
self.actionOpen_txt.setObjectName("actionOpen_txt")
self.actionSave_changes_to_xml = QtWidgets.QAction(MainWindow)
self.actionSave_changes_to_xml.setObjectName("actionSave_changes_to_xml")
self.actionFrom_txt_OCR = QtWidgets.QAction(MainWindow)
self.actionFrom_txt_OCR.setObjectName("actionFrom_txt_OCR")
self.actionRun_analysis_for_all = QtWidgets.QAction(MainWindow)
self.actionRun_analysis_for_all.setObjectName("actionRun_analysis_for_all")
self.actionRun_analysis_for_current = QtWidgets.QAction(MainWindow)
self.actionRun_analysis_for_current.setObjectName("actionRun_analysis_for_current")
self.actionCreate_a_new_Person = QtWidgets.QAction(MainWindow)
self.actionCreate_a_new_Person.setObjectName("actionCreate_a_new_Person")
self.actionSave = QtWidgets.QAction(MainWindow)
self.actionSave.setObjectName("actionSave")
self.actionAbout = QtWidgets.QAction(MainWindow)
self.actionAbout.setObjectName("actionAbout")
self.menuExport.addAction(self.actionCsv)
self.menuExport.addAction(self.actionJSON)
self.menuImport.addAction(self.actionFrom_txt_OCR)
self.menuFile.addAction(self.actionOpen_XML_for_analyze)
self.menuFile.addAction(self.actionSave)
self.menuFile.addAction(self.actionSave_changes_to_xml)
self.menuFile.addAction(self.menuExport.menuAction())
self.menuFile.addAction(self.menuImport.menuAction())
self.menuFile.addAction(self.actionAbout)
self.menubar.addAction(self.menuFile.menuAction())
self.toolBar.addAction(self.actionCreate_a_new_Person)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Kaira"))
self.entriesLabel.setText(_translate("MainWindow", "Entries "))
self.entriesComboBox.setItemText(0, _translate("MainWindow", "All"))
self.previousEntryLabel.setText(_translate("MainWindow", "Previous entry"))
self.rawTextLabel.setText(_translate("MainWindow", "Current entry"))
self.label.setText(_translate("MainWindow", "Next entry"))
self.menuFile.setTitle(_translate("MainWindow", "File"))
self.menuExport.setTitle(_translate("MainWindow", "Export"))
self.menuImport.setTitle(_translate("MainWindow", "Import"))
self.toolBar.setWindowTitle(_translate("MainWindow", "toolBar"))
self.actionOpen_XML_for_analyze.setText(_translate("MainWindow", "Open xml"))
self.actionCsv.setText(_translate("MainWindow", "CSV"))
self.actionJSON.setText(_translate("MainWindow", "JSON"))
self.actionOpen_txt.setText(_translate("MainWindow", "Open txt"))
self.actionSave_changes_to_xml.setText(_translate("MainWindow", "Save As..."))
self.actionFrom_txt_OCR.setText(_translate("MainWindow", "From OCR"))
self.actionRun_analysis_for_all.setText(_translate("MainWindow", "Run analysis for all"))
self.actionRun_analysis_for_all.setToolTip(_translate("MainWindow", "Run analysis for all entries in current file"))
self.actionRun_analysis_for_all.setShortcut(_translate("MainWindow", "Ctrl+Shift+R"))
self.actionRun_analysis_for_current.setText(_translate("MainWindow", "Run analysis for current"))
self.actionRun_analysis_for_current.setToolTip(_translate("MainWindow", "Run extraction for current person and update attributes"))
self.actionRun_analysis_for_current.setShortcut(_translate("MainWindow", "Ctrl+R"))
self.actionCreate_a_new_Person.setText(_translate("MainWindow", "Create a new Person"))
self.actionCreate_a_new_Person.setToolTip(_translate("MainWindow", "Create a new person from rawtext"))
self.actionSave.setText(_translate("MainWindow", "&Save"))
self.actionAbout.setText(_translate("MainWindow", "About"))
from qtgui.entriesModels import EntriesListView
from qtgui.entrytree import EntryTreeView
| gpl-2.0 | -7,739,739,108,013,947,000 | 59.652406 | 139 | 0.731441 | false |
thomasmf/nomenine | src/core/runtime/parser/integer.py | 1 | 1585 |
OBJECT( 'PARSE_TOKEN_INTEGER',
methods = [
MTID_IS( 'TYPE' ),
MS( ARG( CW( 'consume' ), CG( 'LIST', 'phrase' ) ), """
JUMP__consume_LIST( $CA(FRAME__PARSE_TOKEN_INTEGER_0_new( CONTEXT )), $CA(PLUS_new( $CA(RANGE_TYPE_new( $CA(CHARACTER_new( '0' )), $CA(CHARACTER_new( '9' )) )) )), PARAM_phrase ) ;
""" ),
]
)
FRAME( 'PARSE_TOKEN_INTEGER_0',
methods = [
MS( ARG( CW( 'return' ), CG( 'ANY', 'value' ) ), """
REFERENCE reference = nom_reference_new( $NONE ) ;
JUMP__value( $CA(FRAME__SPLIT_new( $CA(FRAME__PARSE_TOKEN_INTEGER_1_new( ACTION->parent, reference )), PARAM_value, reference )), PARAM_value ) ;
""" ),
]
)
FRAME( 'PARSE_TOKEN_INTEGER_1',
attributes = [
A( 'REFERENCE', 'value' ),
],
methods = [
MS( ARG( CW( 'return' ), CG( 'ANY', 'value' ) ), """
JUMP__join_STRING( $CA(FRAME__PARSE_TOKEN_INTEGER_2_new( ACTION->parent, PARAM_value )), ACTION->value->value, STRING_new( "" ) ) ;
""" ),
]
)
FRAME( 'PARSE_TOKEN_INTEGER_2',
attributes = [
A( 'ANY', 'next' ),
],
methods = [
MS( ARG( CW( 'return' ), CG( 'ANY', 'value' ) ), """
JUMP__produce_TID__INTEGER_FACTORY_single( $CA(FRAME__PARSE_TOKEN_INTEGER_3_new( ACTION->parent, ACTION->next )), PARAM_value, $CA(INTEGER_FACTORY_single()) ) ;
""" ),
]
)
FRAME( 'PARSE_TOKEN_INTEGER_3',
attributes = [
A( 'ANY', 'next' ),
],
methods = [
MS( ARG( CW( 'return' ), CG( 'ANY', 'value' ) ), """
JUMP__return_ANY( ACTION->parent, ACTION->parent, $CA(ELEMENT_new( PARAM_value, ACTION->next )) ) ;
""" ),
]
)
| mit | -6,288,916,120,556,903,000 | 28.351852 | 186 | 0.546372 | false |
PaddlePaddle/Paddle | python/paddle/fluid/tests/unittests/test_lookup_table_v2_bf16_op.py | 1 | 4629 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle
from paddle.fluid.tests.unittests.op_test import (skip_check_grad_ci,
convert_uint16_to_float)
from paddle.fluid.tests.unittests.test_lookup_table_bf16_op import (
_lookup, TestLookupTableBF16Op, TestLookupTableBF16OpIds4D,
TestLookupTableBF16OpWIsSelectedRows,
TestLookupTableBF16OpWIsSelectedRows4DIds)
import paddle.fluid as fluid
import paddle.fluid.core as core
class TestLookupTableV2BF16Op(TestLookupTableBF16Op):
def init_test(self):
self.op_type = "lookup_table_v2"
self.ids_shape = (4)
self.mkldnn_data_type = "bfloat16"
class TestLookupTableV2BF16OpIds4D(TestLookupTableBF16OpIds4D):
def init_test(self):
self.op_type = "lookup_table_v2"
self.ids_shape = (2, 4, 5)
self.mkldnn_data_type = "bfloat16"
class TestLookupTableV2BF16OpWIsSelectedRows(
TestLookupTableBF16OpWIsSelectedRows):
def init_test(self):
self.op_type = "lookup_table_v2"
self.ids_shape = (10)
class TestLookupTableV2BF16OpWIsSelectedRows4DIds(
TestLookupTableBF16OpWIsSelectedRows4DIds):
def init_test(self):
self.op_type = "lookup_table_v2"
self.ids_shape = (3, 4, 5)
class TestLookupTableBF16OpWithPadding(TestLookupTableV2BF16Op):
def test_check_output(self):
ids = np.squeeze(self.inputs['Ids'])
padding_idx = np.random.choice(ids, 1)[0]
self.outputs['Out'][ids == padding_idx] = np.zeros(31)
self.attrs = {'padding_idx': int(padding_idx)}
self.check_output_with_place(core.CPUPlace())
class TestLookupTableBF16OpIds4DPadding(TestLookupTableV2BF16OpIds4D):
def test_check_output(self):
ids = self.inputs['Ids']
flatten_idx = ids.flatten()
padding_idx = np.random.choice(flatten_idx, 1)[0]
self.outputs['Out'][np.squeeze(ids == padding_idx)] = np.zeros(31)
self.attrs = {'padding_idx': int(padding_idx)}
self.check_output_with_place(core.CPUPlace())
class TestEmbeddingLayerBF16ConstantInitializer(unittest.TestCase):
"""
Test embedding layer from input api and results for bfloat16
"""
def set_initializer(self):
self.initializer = fluid.initializer.Constant(value=self.value)
def setUp(self):
self.op_type = "lookup_table_v2"
self.ids_shape = [4]
self.w_shape = [10, 64]
self.ids = np.random.randint(
low=0, high=9, size=self.ids_shape).astype("int64")
self.flat_ids = self.ids.flatten()
self.value = 3.0
self.w_fp32 = np.full(self.w_shape, self.value)
self.place = fluid.CPUPlace()
self.prog = fluid.Program()
self.startup_prog = fluid.Program()
self.set_initializer()
with fluid.program_guard(self.prog, self.startup_prog):
x = fluid.layers.data(name='x', shape=self.ids_shape, dtype='int64')
self.emb = fluid.input.embedding(
input=x,
size=self.w_shape,
param_attr=fluid.ParamAttr(
name="emb_weight", initializer=self.initializer),
is_sparse=False,
dtype="uint16") # bfloat16
exe = fluid.Executor(self.place)
exe.run(self.startup_prog)
self.result = exe.run(self.prog,
feed={'x': self.ids},
fetch_list=['emb_weight', self.emb])
def test_embedding_weights(self):
result = convert_uint16_to_float(self.result[0])
self.assertTrue(np.array_equal(self.w_fp32, result))
def test_lookup_results(self):
lookup_result = convert_uint16_to_float(self.result[1])
lookup_ref = _lookup(self.w_fp32, self.ids, self.flat_ids, self.op_type)
self.assertTrue(np.array_equal(lookup_result, lookup_ref))
if __name__ == "__main__":
paddle.enable_static()
unittest.main()
| apache-2.0 | 5,117,035,170,479,563,000 | 35.738095 | 80 | 0.650897 | false |
adereis/avocado | avocado/core/virt.py | 1 | 10262 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright: Red Hat Inc. 2014
# Author: Ruda Moura <[email protected]>
"""
Module to provide classes for Virtual Machines.
"""
import logging
import time
from xml.dom import minidom
from . import remoter
LOG = logging.getLogger('avocado.test')
try:
import libvirt
except ImportError:
VIRT_CAPABLE = False
LOG.info('Virt module is disabled: could not import libvirt')
else:
VIRT_CAPABLE = True
if remoter.REMOTE_CAPABLE is False:
VIRT_CAPABLE = False
LOG.info('Virt module is disabled: remote module is disabled')
class VirtError(Exception):
"""
Generic exception class to propagate underling
errors to the caller.
"""
pass
class Hypervisor(object):
"""
The Hypervisor connection class.
"""
def __init__(self, uri=None):
"""
Creates an instance of class Hypervisor.
:param uri: the connection URI.
"""
self.uri = uri
self.connection = None
self.connected = False
def __str__(self):
return "%s(%s)" % (self.__class__.__name__,
self.uri)
@property
def domains(self):
"""
Property to get the list of all domains.
:return: a list of instances of :class:`libvirt.virDomain`.
"""
return self.connection.listAllDomains()
def connect(self):
"""
Connect to the hypervisor.
"""
if self.connected is False:
try:
libvirt.registerErrorHandler(self.handler, 'context')
self.connection = libvirt.open(self.uri)
except libvirt.libvirtError:
self.connected = False
return None
else:
self.connected = True
return self.connection
def find_domain_by_name(self, name):
"""
Find domain by name.
:param domain: the domain name.
:return: an instance of :class:`libvirt.virDomain`.
"""
for domain in self.domains:
if name == domain.name():
return domain
return None
@staticmethod
def handler(ctxt, err):
"""
This overwrites the libvirt default error handler, in order to
avoid unwanted messages from libvirt exceptions to be sent for
stdout.
"""
pass
class VM(object):
"""
The Virtual Machine handler class.
"""
def __init__(self, hypervisor, domain):
"""
Creates an instance of VM class.
:param hypervisor: an instance of :class:`Hypervisor`.
:param domain: an instance of :class:`libvirt.virDomain`.
"""
self.hypervisor = hypervisor
self.domain = domain
self.logged = False
self.snapshot = None
def __str__(self):
return "%s('%s', '%s')" % (self.__class__.__name__,
self.hypervisor.uri,
self.domain.name())
@property
def is_active(self):
"""
Property to check if VM is active.
:return: if VM is active.
:rtype: Boolean
"""
return bool(self.domain.isActive())
@property
def name(self):
"""
Property with the name of VM.
:return: the name of VM.
"""
return self.domain.name()
@property
def state(self):
"""
Property with the state of VM.
:return: current state name.
"""
states = ['No state',
'Running',
'Blocked',
'Paused',
'Shutting down',
'Shutoff',
'Crashed']
return states[self.domain.info()[0]]
def start(self):
"""
Start VM.
"""
if self.is_active is False:
self.domain.create()
def suspend(self):
"""
Suspend VM.
"""
if self.is_active:
self.domain.suspend()
def resume(self):
"""
Resume VM.
"""
if self.is_active:
self.domain.resume()
def reboot(self):
"""
Reboot VM.
"""
if self.is_active:
self.domain.reboot()
def shutdown(self):
"""
Shutdown VM.
"""
if self.is_active:
self.domain.shutdown()
def reset(self):
"""
Reset VM.
"""
if self.is_active:
self.domain.reset()
def stop(self):
"""
Stop VM.
"""
if self.is_active:
self.domain.destroy()
def _system_checkpoint_xml(self, name=None, description=None):
def create_element(doc, tag, text):
el = doc.createElement(tag)
txt = doc.createTextNode(text)
el.appendChild(txt)
return el
doc = minidom.Document()
root = doc.createElement('domainsnapshot')
doc.appendChild(root)
if name is not None:
root.appendChild(create_element(doc, 'name', name))
if description is None:
description = 'Avocado Test Runner'
root.appendChild(create_element(doc, 'description', description))
return doc.toxml()
@property
def snapshots(self):
return self.domain.snapshotListNames()
def create_snapshot(self, name=None):
"""
Creates a snapshot of kind 'system checkpoint'.
"""
xml = self._system_checkpoint_xml(name)
self.snapshot = self.domain.snapshotCreateXML(xml)
return self.snapshot
def revert_snapshot(self):
"""
Revert to previous snapshot.
"""
if self.snapshot is not None:
self.domain.revertToSnapshot(self.snapshot)
def delete_snapshot(self):
"""
Delete the current snapshot.
"""
if self.snapshot is not None:
self.snapshot.delete()
self.snapshot = None
def restore_snapshot(self):
"""
Revert to previous snapshot and delete the snapshot point.
"""
self.revert_snapshot()
self.delete_snapshot()
def setup_login(self, hostname, username, password=None):
"""
Setup login on VM.
:param hostname: the hostname.
:param username: the username.
:param password: the password.
"""
if not self.logged:
self.remote = remoter.Remote(hostname, username, password)
res = self.remote.uptime()
if res.succeeded:
self.logged = True
else:
self.logged = False
def ip_address(self, timeout=30):
"""
Returns the domain IP address consulting qemu-guest-agent
through libvirt.
:returns: either the IP address or None if not found
:rtype: str or None
"""
timelimit = time.time() + timeout
while True:
try:
ip = self._get_ip_from_libvirt_agent()
if ip is not None:
return ip
except libvirt.libvirtError as exception:
# Qemu guest agent takes time to be ready, but
# libvirt raises an exception here if it's not.
# Let's be nice and wait for the guest agent, if
# that's the problem.
errno = libvirt.VIR_ERR_AGENT_UNRESPONSIVE
if exception.get_error_code() == errno:
pass
else:
return None
if time.time() > timelimit:
return None
time.sleep(1)
def _get_ip_from_libvirt_agent(self):
"""
Retrieves from libvirt/qemu-guest-agent the first IPv4
non-loopback IP from the first non-loopback device.
Libvirt response example:
{'ens3': {'addrs': [{'addr': '192.168.122.4',
'prefix': 24,
'type': 0},
{'addr': 'fe80::5054:ff:fe0c:9c9b',
'prefix': 64,
'type': 1}],
'hwaddr': '52:54:00:0c:9c:9b'},
'lo': {'addrs': [{'addr': '127.0.0.1',
'prefix': 8,
'type': 0},
{'addr': '::1',
'prefix': 128,
'type': 1}],
'hwaddr': '00:00:00:00:00:00'}}
:return: either the IP address or None if not found.
"""
querytype = libvirt.VIR_DOMAIN_INTERFACE_ADDRESSES_SRC_AGENT
ipversion = libvirt.VIR_IP_ADDR_TYPE_IPV4
ifaces = self.domain.interfaceAddresses(querytype)
for iface, data in ifaces.iteritems():
if data['addrs'] and data['hwaddr'] != '00:00:00:00:00:00':
ip_addr = data['addrs'][0]['addr']
ip_type = data['addrs'][0]['type']
if ip_type == ipversion and not ip_addr.startswith('127.'):
return ip_addr
return None
def vm_connect(domain_name, hypervisor_uri='qemu:///system'):
"""
Connect to a Virtual Machine.
:param domain_name: the domain name.
:param hypervisor_uri: the hypervisor connection URI.
:return: an instance of :class:`VM`
"""
hyper = Hypervisor(hypervisor_uri)
if hyper.connect() is None:
raise VirtError('Cannot connect to hypervisor at "%s"' %
hypervisor_uri)
dom = hyper.find_domain_by_name(domain_name)
if dom is None:
raise VirtError('Domain "%s" could not be found' %
domain_name)
return VM(hyper, dom)
| gpl-2.0 | -6,645,299,367,930,012,000 | 26.586022 | 75 | 0.528747 | false |
xleng/YCM_WIN_X86 | third_party/ycmd/ycmd/identifier_utils.py | 6 | 4186 | #!/usr/bin/env python
#
# Copyright (C) 2014 Google Inc.
#
# This file is part of YouCompleteMe.
#
# YouCompleteMe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# YouCompleteMe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>.
import re
COMMENT_AND_STRING_REGEX = re.compile(
"//.*?$" # Anything following '//'
"|"
"#.*?$" # Anything following '#'
"|"
"/\*(?:\n|.)*?\*/" # C-style comments, '/* ... */'
"|"
# Python-style multi-line single-quote string
"'''(?:\n|.)*?'''"
"|"
# Python-style multi-line double-quote string
'"""(?:\n|.)*?"""'
"|"
# Anything inside single quotes, '...', but mind:
# 1. that the starting single quote is not escaped
# 2. the escaped slash (\\)
# 3. the escaped single quote inside the string
r"(?<!\\)'(?:\\\\|\\'|.)*?'"
"|"
# Anything inside double quotes, "...", but mind:
# 1. that the starting double quote is not escaped
# 2. the escaped slash (\\)
# 3. the escaped double quote inside the string
r'(?<!\\)"(?:\\\\|\\"|.)*?"', re.MULTILINE )
DEFAULT_IDENTIFIER_REGEX = re.compile( r"[_a-zA-Z]\w*", re.UNICODE )
FILETYPE_TO_IDENTIFIER_REGEX = {
# Spec: http://www.w3.org/TR/CSS2/syndata.html#characters
# Good summary: http://stackoverflow.com/a/449000/1672783
'css': re.compile( r"-?[_a-zA-Z]+[\w-]+", re.UNICODE ),
# Spec: http://www.w3.org/TR/html5/syntax.html#tag-name-state
# But not quite since not everything we want to pull out is a tag name. We
# also want attribute names (and probably unquoted attribute values).
'html': re.compile( r"[a-zA-Z][^\s/>='\"]*", re.UNICODE ),
# Spec: http://cran.r-project.org/doc/manuals/r-release/R-lang.pdf
# Section 10.3.2.
# Can be any sequence of '.', '_' and alphanum BUT can't start with:
# - '.' followed by digit
# - digit
# - '_'
'r': re.compile( r"(?!(?:\.\d|\d|_))[\.\w]+", re.UNICODE ),
# Spec: http://clojure.org/reader
# Section: Symbols
'clojure': re.compile(
r"[-\*\+!_\?:\.a-zA-Z][-\*\+!_\?:\.\w]*/?[-\*\+!_\?:\.\w]*",
re.UNICODE ),
# Spec: http://www.haskell.org/onlinereport/lexemes.html
# Section 2.4
'haskell': re.compile( r"[_a-zA-Z][\w']*", re.UNICODE ),
}
FILETYPE_TO_IDENTIFIER_REGEX[ 'scss' ] = FILETYPE_TO_IDENTIFIER_REGEX[ 'css' ]
FILETYPE_TO_IDENTIFIER_REGEX[ 'less' ] = FILETYPE_TO_IDENTIFIER_REGEX[ 'css' ]
def IdentifierRegexForFiletype( filetype ):
return FILETYPE_TO_IDENTIFIER_REGEX.get( filetype, DEFAULT_IDENTIFIER_REGEX )
def RemoveIdentifierFreeText( text ):
return COMMENT_AND_STRING_REGEX.sub( '', text )
def ExtractIdentifiersFromText( text, filetype = None ):
return re.findall( IdentifierRegexForFiletype( filetype ), text )
def IsIdentifier( text, filetype = None ):
if not text:
return False
regex = IdentifierRegexForFiletype( filetype )
match = regex.match( text )
return match and match.end() == len( text )
# index is 0-based and EXCLUSIVE, so ("foo.", 3) -> 0
# Works with both unicode and str objects.
# Returns the index on bad input.
def StartOfLongestIdentifierEndingAtIndex( text, index, filetype = None ):
if not text or index < 1 or index > len( text ):
return index
for i in xrange( index ):
if IsIdentifier( text[ i : index ], filetype ):
return i
return index
# If the index is not on a valid identifer, it searches forward until a valid
# identifier is found. Returns the identifier.
def IdentifierAtIndex( text, index, filetype = None ):
if index > len( text ):
return ''
for match in IdentifierRegexForFiletype( filetype ).finditer( text ):
if match.end() > index:
return match.group()
return ''
| gpl-3.0 | 8,896,798,435,556,643,000 | 32.488 | 79 | 0.641424 | false |
open-synergy/opensynid-fleet | fleet_work_order_passanger/wizards/passanger_boarding_disembark.py | 1 | 4409 | # -*- coding: utf-8 -*-
# © 2016 OpenSynergy Indonesia
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp import api, models, fields
class PassangerBoardingDisembark(models.TransientModel):
_name = "fleet.passanger_boarding_disembark"
_description = "Passanger Boarding/Disembark"
work_order_id = fields.Many2one(
string="# Work Order",
comodel_name="fleet.work.order"
)
@api.model
def _default_device_id(self):
result = False
obj_fleet_work_order =\
self.env["fleet.work.order"]
work_order_id = self._context.get('default_work_order_id', False)
if work_order_id:
criteria = [
("id", "=", work_order_id)
]
work_order =\
obj_fleet_work_order.search(criteria)
if work_order:
vehicle = work_order.vehicle_id
if vehicle:
door = vehicle.door_ids[0]
if door:
raspberry_relay =\
door.raspberry_relay_id
if raspberry_relay:
result =\
raspberry_relay.device_id
return result
passanger_code = fields.Char(
string="# Passanger",
default=False,
)
device_raspberry_id = fields.Many2one(
string="# Device",
comodel_name="proxy.backend_device",
default=_default_device_id
)
@api.model
def _default_channel(self):
result = False
obj_fleet_work_order =\
self.env["fleet.work.order"]
work_order_id = self._context.get('default_work_order_id', False)
if work_order_id:
criteria = [
("id", "=", work_order_id)
]
work_order =\
obj_fleet_work_order.search(criteria)
if work_order:
vehicle = work_order.vehicle_id
if vehicle:
door = vehicle.door_ids[0]
if door:
raspberry_relay =\
door.raspberry_relay_id
if raspberry_relay:
result =\
raspberry_relay.pin
return result
channel = fields.Integer(
string="Channel",
default=_default_channel
)
@api.onchange("passanger_code")
def onchange_passanger(self):
obj_passanger = self.env["fleet.work_order_passanger"]
warning = {}
domain = []
if self.passanger_code:
domain = self._prepare_domain()
passangers = obj_passanger.search(domain)
if len(passangers) == 1:
warning = {
"title": "A",
"message": "B"
}
passanger = passangers[0]
self.passanger_code = ""
warning.update({
"action": self._open_door_gpio(passanger),
})
else:
self.passanger_code = ""
return {"warning": warning}
@api.multi
def _open_door_gpio(self, passanger):
self.ensure_one()
action = self.env.ref(
"proxy_backend_gpio."
"proxy_backend_raspberry_relay_on_off_timer_action")
context = {
"device_id": self.device_raspberry_id.id,
"pin": self.channel,
"interval": 1
}
if passanger.state in ["valid", "disembarking"]:
passanger.write({"state": "boarding"})
elif passanger.state == "boarding":
passanger.write({"state": "disembarking"})
result = action.read()[0]
result.update({"context": context})
return result
@api.multi
def _reload_action(self):
self.ensure_one()
wiz = self.env.ref(
"fleet_work_order_passanger."
"fleet_passanger_boarding_disembark_action")
return wiz.read()[0]
@api.multi
def _prepare_domain(self):
self.ensure_one()
return [
("name", "=", self.passanger_code),
("work_order_id", "=", self.work_order_id.id)
]
@api.multi
def action_clear_barcode(self):
self.ensure_one()
self.passanger_code = False
return self._reload_action()
| agpl-3.0 | 4,147,877,347,711,753,700 | 28.386667 | 73 | 0.504991 | false |
jdddog/nao_hri | src/nao_hri/joint.py | 1 | 2417 | # Copyright (c) 2014, James Diprose
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import rospy
from ros_blender_bridge import Joint
from naoqi_bridge_msgs.msg import JointAnglesWithSpeed
class NaoJoint(Joint):
def __init__(self, joint_name):
Joint.__init__(self, joint_name)
self.msg = JointAnglesWithSpeed()
self.msg.joint_names.append(joint_name)
self.joint_pub = rospy.Publisher('joint_angles', JointAnglesWithSpeed, queue_size=10)
def reset_msg(self):
self.msg.joint_angles = []
self.msg.header.stamp = rospy.Time().now()
def set_position(self, position):
if position not in self.msg.joint_angles:
self.reset_msg()
self.msg.joint_angles.append(position)
self.msg.speed = self.speed
self.joint_pub.publish(self.msg)
def set_speed(self, speed):
self.speed = speed
def set_acceleration(self, acceleration):
pass | bsd-3-clause | -3,920,362,576,307,248,000 | 42.178571 | 93 | 0.73314 | false |
akrherz/iem | scripts/coop/compute_climate.py | 1 | 5477 | """Computes the Climatology and fills out the table!"""
import datetime
import psycopg2.extras
from tqdm import tqdm
from pyiem.reference import state_names
from pyiem.network import Table as NetworkTable
from pyiem.util import get_dbconn, logger
LOG = logger()
COOP = get_dbconn("coop")
THISYEAR = datetime.date.today().year
META = {
"climate51": {
"sts": datetime.datetime(1951, 1, 1),
"ets": datetime.datetime(THISYEAR, 1, 1),
},
"climate71": {
"sts": datetime.datetime(1971, 1, 1),
"ets": datetime.datetime(2001, 1, 1),
},
"climate": {
"sts": datetime.datetime(1893, 1, 1),
"ets": datetime.datetime(THISYEAR, 1, 1),
},
"climate81": {
"sts": datetime.datetime(1981, 1, 1),
"ets": datetime.datetime(2011, 1, 1),
},
}
def daily_averages(table):
"""
Compute and Save the simple daily averages
"""
for st in state_names:
nt = NetworkTable("%sCLIMATE" % (st,))
if not nt.sts:
LOG.info("Skipping %s as it has no stations", st)
continue
LOG.info("Computing Daily Averages for state: %s", st)
ccursor = COOP.cursor()
ccursor.execute(
f"DELETE from {table} WHERE substr(station, 1, 2) = %s", (st,)
)
LOG.info(" removed %s rows from %s", ccursor.rowcount, table)
ccursor.execute(
f"""
INSERT into {table} (station, valid, high, low,
max_high, min_high,
max_low, min_low,
max_precip, precip,
snow, years,
gdd32, gdd41, gdd46, gdd48, gdd50, gdd51, gdd52,
sdd86, hdd65, cdd65, max_range,
min_range, srad)
(SELECT station, ('2000-'|| to_char(day, 'MM-DD'))::date as d,
avg(high) as avg_high, avg(low) as avg_low,
max(high) as max_high, min(high) as min_high,
max(low) as max_low, min(low) as min_low,
max(precip) as max_precip, avg(precip) as precip,
avg(snow) as snow, count(*) as years,
avg(gddxx(32, 86, high, low)) as gdd32,
avg(gddxx(41, 86, high, low)) as gdd41,
avg(gddxx(46, 86, high, low)) as gdd46,
avg(gddxx(48, 86, high, low)) as gdd48,
avg(gddxx(50, 86, high, low)) as gdd50,
avg(gddxx(51, 86, high, low)) as gdd51,
avg(gddxx(52, 86, high, low)) as gdd52,
avg( sdd86(high,low) ) as sdd86,
avg( hdd65(high,low) ) as hdd65,
avg(cdd65(high,low)) as cdd65,
max( high - low) as max_range, min(high - low) as min_range,
avg(merra_srad) as srad
from alldata_{st} WHERE day >= %s and day < %s and
precip is not null and high is not null and low is not null
and station in %s
GROUP by d, station)""",
(
META[table]["sts"].strftime("%Y-%m-%d"),
META[table]["ets"].strftime("%Y-%m-%d"),
tuple(nt.sts.keys()),
),
)
LOG.info(" added %s rows to %s", ccursor.rowcount, table)
ccursor.close()
COOP.commit()
def do_date(ccursor2, table, row, col, agg_col):
"""Process date"""
ccursor2.execute(
f"""
SELECT year from alldata_{row['station'][:2]} where station = %s and
{col} = {row[agg_col]} and sday = %s and day >= %s and day < %s
ORDER by year ASC
""",
(
row["station"],
row["valid"].strftime("%m%d"),
META[table]["sts"],
META[table]["ets"],
),
)
years = []
for row in ccursor2:
years.append(row[0])
if not years:
LOG.info("None %s %s %s", row, col, agg_col)
return years
def set_daily_extremes(table):
"""Set the extremes on a given table"""
ccursor = COOP.cursor(cursor_factory=psycopg2.extras.DictCursor)
ccursor.execute(
f"""
SELECT * from {table} WHERE max_high_yr is null and
max_high is not null
and min_high_yr is null and min_high is not null
and max_low_yr is null and max_low is not null
and min_low_yr is null and min_low is not null
"""
)
ccursor2 = COOP.cursor()
cnt = 0
total = ccursor.rowcount
for row in tqdm(ccursor, total=total):
data = {}
data["max_high_yr"] = do_date(ccursor2, table, row, "high", "max_high")
data["min_high_yr"] = do_date(ccursor2, table, row, "high", "min_high")
data["max_low_yr"] = do_date(ccursor2, table, row, "low", "max_low")
data["min_low_yr"] = do_date(ccursor2, table, row, "low", "min_low")
data["max_precip_yr"] = do_date(
ccursor2, table, row, "precip", "max_precip"
)
ccursor2.execute(
f"""
UPDATE {table} SET max_high_yr = %s, min_high_yr = %s,
max_low_yr = %s, min_low_yr = %s, max_precip_yr = %s
WHERE station = %s and valid = %s
""",
(
data["max_high_yr"],
data["min_high_yr"],
data["max_low_yr"],
data["min_low_yr"],
data["max_precip_yr"],
row["station"],
row["valid"],
),
)
cnt += 1
if cnt % 1000 == 0:
ccursor2.close()
COOP.commit()
ccursor2 = COOP.cursor()
ccursor2.close()
ccursor.close()
COOP.commit()
def main():
"""Go Main Go"""
for table in META:
daily_averages(table)
set_daily_extremes(table)
if __name__ == "__main__":
main()
| mit | -5,248,187,992,509,591,000 | 30.65896 | 79 | 0.535877 | false |
probablytom/msci-model | resp_base/Responsibilities.py | 1 | 3605 | from .Constraints import Deadline, ResourceDelta
from random import random
class UnbalancedImportancesError(Exception):
pass
class Obligation:
def __init__(self,
constraint_set: list,
name="unnamed_responsibility"):
self.constraint_set = constraint_set
self.name = name
def set_importances(self, importances):
if len(importances) != len(self.constraint_set):
raise UnbalancedImportancesError()
for i in range(len(importances)):
self.constraint_set[i].assign_importance(importances[i])
class Responsibility:
def __init__(self,
obligation: Obligation,
authority,
delegee,
id=None):
if id is None:
self.id = random() * 100000000
else:
self.id = id
self.obligation = obligation
self.authority = authority
self.delegee = delegee
@property
def constraints(self):
return self.obligation.constraint_set
@property
def importances(self):
return [constraint.importance
for constraint in self.constraints]
def original_importances(self):
return [constraint.original_importance
for constraint in self.constraints]
def calculate_effect(self):
'''
Calculates the desired effect of a responsibility.
:return: A ResponsibilityEffect, which accounts for all effects of this responsibility's constraints.
'''
total_effect = {'duration': 0}
for constraint in self.constraints:
if isinstance(constraint, ResourceDelta):
for factor, effect in constraint.factors.items():
if factor not in total_effect.keys():
total_effect[factor] = 0
total_effect[factor] += effect
elif isinstance(constraint, Deadline):
total_effect['duration'] += constraint.duration
return ResponsibilityEffect(total_effect)
def __eq__(self, other):
return self.id == other.id
@property
def name(self):
return self.obligation.name
class ResponsibilityEffect:
def __init__(self, effect_dict):
self.effects = [(k, v) for k, v in effect_dict.items()]
def __hash__(self):
return str(self).__hash__() # Hacky but should work...
@property
def effect_dict(self):
return dict(self.effects)
def __eq__(self, other):
if type(self) != type(other):
return False
return self.effect_dict == other.effect_dict
def get(self, effect_key):
if effect_key in self.effect_dict.keys():
return self.effect_dict[effect_key]
return 0 # No effect for a key which this repsonsibility doesn't effect!
def disregard(self, effect_key):
'''
Alters the ResponsibilityEffect so as to ignore certain parameters.
This method mutates the ResponsibilityEffect, so it's best to make a backup.
:param effect_key: The key that should be ignored in this ResponsibilityEffect
'''
self.effects = [item for item in self.effects
if effect_key not in item]
def __str__(self):
return "ResponsibilityEffect: " + str(self.effect_dict)
class Act:
def __init__(self, effect, entry_point_function, workflow=None, args=()):
self.entry_point_function = entry_point_function
self.workflow = workflow
self.args = args
self.effect = effect | mit | 6,844,041,192,146,345,000 | 30.631579 | 109 | 0.603606 | false |
kbkailashbagaria/subliminal | subliminal/providers/thesubdb.py | 7 | 2602 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
from babelfish import Language
from requests import Session
from . import Provider, get_version
from .. import __version__
from ..subtitle import Subtitle, fix_line_ending
logger = logging.getLogger(__name__)
class TheSubDBSubtitle(Subtitle):
provider_name = 'thesubdb'
def __init__(self, language, hash):
super(TheSubDBSubtitle, self).__init__(language)
self.hash = hash
@property
def id(self):
return self.hash
def get_matches(self, video, hearing_impaired=False):
matches = super(TheSubDBSubtitle, self).get_matches(video, hearing_impaired=hearing_impaired)
# hash
if 'thesubdb' in video.hashes and video.hashes['thesubdb'] == self.hash:
matches.add('hash')
return matches
class TheSubDBProvider(Provider):
languages = {Language.fromalpha2(l) for l in ['en', 'es', 'fr', 'it', 'nl', 'pl', 'pt', 'ro', 'sv', 'tr']}
required_hash = 'thesubdb'
server_url = 'http://api.thesubdb.com/'
def initialize(self):
self.session = Session()
self.session.headers = {'User-Agent': 'SubDB/1.0 (subliminal/%s; https://github.com/Diaoul/subliminal)' %
get_version(__version__)}
def terminate(self):
self.session.close()
def query(self, hash):
# make the query
params = {'action': 'search', 'hash': hash}
logger.info('Searching subtitles %r', params)
r = self.session.get(self.server_url, params=params, timeout=10)
# handle subtitles not found and errors
if r.status_code == 404:
logger.debug('No subtitles found')
return []
r.raise_for_status()
# loop over languages
subtitles = []
for language_code in r.text.split(','):
language = Language.fromalpha2(language_code)
subtitle = TheSubDBSubtitle(language, hash)
logger.info('Found subtitle %r', subtitle)
subtitles.append(subtitle)
return subtitles
def list_subtitles(self, video, languages):
return [s for s in self.query(video.hashes['thesubdb']) if s.language in languages]
def download_subtitle(self, subtitle):
logger.info('Downloading subtitle %r')
params = {'action': 'download', 'hash': subtitle.hash, 'language': subtitle.language.alpha2}
r = self.session.get(self.server_url, params=params, timeout=10)
r.raise_for_status()
subtitle.content = fix_line_ending(r.content)
| mit | 3,608,087,149,503,811,600 | 30.731707 | 113 | 0.620676 | false |
numerigraphe/purchase-workflow | purchase_requisition_delivery_address/__openerp__.py | 4 | 1242 | # -*- coding: utf-8 -*-
#
#
# Author: Yannick Vaucher
# Copyright 2014 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
{'name': "Purchase Requisition - Delivery Address",
'summary': "Adds delivery address on Purchase requisition",
'version': "0.1",
'author': "Camptocamp",
'category': "Purchase Management",
'license': "AGPL-3",
'complexity': "easy",
'images': [],
'depends': ['purchase_requisition',
'stock_dropshipping',
],
'demo': [],
'data': ['view/purchase_requisition.xml'],
'test': [],
'installable': True,
'auto_install': False,
}
| agpl-3.0 | 3,717,544,609,304,154,000 | 32.567568 | 77 | 0.675523 | false |
ZefQ/Flexget | flexget/plugins/filter/if_condition.py | 23 | 4613 | from __future__ import unicode_literals, division, absolute_import
import __builtin__
import logging
import re
import datetime
from copy import copy
from flexget import plugin
from flexget.event import event
from flexget.task import Task
from flexget.entry import Entry
log = logging.getLogger('if')
def safer_eval(statement, locals):
"""A safer eval function. Does not allow __ or try statements, only includes certain 'safe' builtins."""
allowed_builtins = ['True', 'False', 'str', 'unicode', 'int', 'float', 'len', 'any', 'all', 'sorted']
for name in allowed_builtins:
locals[name] = getattr(__builtin__, name)
if re.search(r'__|try\s*:|lambda', statement):
raise ValueError('`__`, lambda or try blocks not allowed in if statements.')
return eval(statement, {'__builtins__': None}, locals)
class FilterIf(object):
"""Can run actions on entries that satisfy a given condition.
Actions include accept, reject, and fail, as well as the ability to run other filter plugins on the entries."""
schema = {
'type': 'array',
'items': {
'type': 'object',
'additionalProperties': {
'anyOf': [
{'$ref': '/schema/plugins'},
{'enum': ['accept', 'reject', 'fail']}
]
}
}
}
def check_condition(self, condition, entry):
"""Checks if a given `entry` passes `condition`"""
# Make entry fields and other utilities available in the eval namespace
# We need our namespace to be an Entry instance for lazy loading to work
eval_locals = copy(entry)
eval_locals.update({'has_field': lambda f: f in entry,
'timedelta': datetime.timedelta,
'now': datetime.datetime.now()})
try:
# Restrict eval namespace to have no globals and locals only from eval_locals
passed = safer_eval(condition, eval_locals)
if passed:
log.debug('%s matched requirement %s' % (entry['title'], condition))
return passed
except NameError as e:
# Extract the name that did not exist
missing_field = e.args[0].split('\'')[1]
log.debug('%s does not contain the field %s' % (entry['title'], missing_field))
except Exception as e:
log.error('Error occured while evaluating statement `%s`. (%s)' % (condition, e))
def __getattr__(self, item):
"""Provides handlers for all phases."""
for phase, method in plugin.phase_methods.iteritems():
if item == method and phase not in ['accept', 'reject', 'fail', 'input']:
break
else:
raise AttributeError(item)
def handle_phase(task, config):
entry_actions = {
'accept': Entry.accept,
'reject': Entry.reject,
'fail': Entry.fail}
for item in config:
requirement, action = item.items()[0]
passed_entries = [e for e in task.entries if self.check_condition(requirement, e)]
if isinstance(action, basestring):
if not phase == 'filter':
continue
# Simple entry action (accept, reject or fail) was specified as a string
for entry in passed_entries:
entry_actions[action](entry, 'Matched requirement: %s' % requirement)
else:
# Other plugins were specified to run on this entry
fake_task = Task(task.manager, task.name, config=action, options=task.options)
fake_task.session = task.session
# This entry still belongs to our feed, accept/reject etc. will carry through.
fake_task.all_entries[:] = passed_entries
methods = {}
for plugin_name, plugin_config in action.iteritems():
p = plugin.get_plugin_by_name(plugin_name)
method = p.phase_handlers.get(phase)
if method:
methods[method] = (fake_task, plugin_config)
# Run the methods in priority order
for method in sorted(methods, reverse=True):
method(*methods[method])
handle_phase.priority = 80
return handle_phase
@event('plugin.register')
def register_plugin():
plugin.register(FilterIf, 'if', api_ver=2)
| mit | -6,215,942,801,543,494,000 | 40.936364 | 115 | 0.558855 | false |
bjornaa/ladim | postladim/test/test_particlefile.py | 1 | 4283 | import os
import pytest
import datetime
import numpy as np
# import xarray as xr
# from pathlib import Path
from netCDF4 import Dataset
from postladim import ParticleFile
@pytest.fixture(scope="module")
def particle_file():
# set up
# Return a small particle file
#
# 0 - -
# 1 11 -
# 2 - 22
# - - 23
#
pfile = "test.nc"
nparticles = 3
X = np.array(
[[0, np.nan, np.nan], [1, 11, np.nan], [2, np.nan, 22], [np.nan, np.nan, 23]]
)
Y = np.array(
[[2, np.nan, np.nan], [3, 8, np.nan], [4, np.nan, 9], [np.nan, np.nan, 10]]
)
ntimes = X.shape[0]
pid = np.multiply.outer(ntimes * [1], list(range(nparticles)))
pid[np.isnan(X)] = -99 # Undefined integer
time = 3600 * np.arange(ntimes) # hourly timesteps
count = (np.ones(np.shape(X)) - np.isnan(X)).sum(axis=1)
with Dataset(pfile, mode="w") as nc:
# Dimensions
nc.createDimension("particle", nparticles)
nc.createDimension("particle_instance", None)
nc.createDimension("time", ntimes)
# Variables
v = nc.createVariable("time", "f8", ("time",))
v.units = "seconds since 1970-01-01 00:00:00"
v = nc.createVariable("particle_count", "i", ("time",))
v = nc.createVariable("start_time", "f8", ("particle",))
v.units = "seconds since 1970-01-01 00:00:00"
v = nc.createVariable("location_id", "i", ("particle",))
v = nc.createVariable("pid", "i", ("particle_instance",))
v = nc.createVariable("X", "f4", ("particle_instance",))
v = nc.createVariable("Y", "f4", ("particle_instance",))
# Data
nc.variables["time"][:] = time
nc.variables["particle_count"][:] = count
nc.variables["start_time"][:] = time[:nparticles]
nc.variables["location_id"][:] = [10000, 10001, 10002]
nc.variables["pid"][:] = [v for v in pid.flat if v >= 0]
nc.variables["X"][:] = [v for v in X.flat if not np.isnan(v)]
nc.variables["Y"][:] = [v for v in Y.flat if not np.isnan(v)]
yield pfile
# tear down
os.remove(pfile)
def test_open():
with pytest.raises(FileNotFoundError):
pf = ParticleFile("no_such_file.nc")
def test_count(particle_file):
"""Alignment of time frames in the particle file."""
with ParticleFile(particle_file) as pf:
assert pf.num_times == 4
assert all(pf.start == [0, 1, 3, 5])
assert list(pf.count) == [1, 2, 2, 1]
assert list(pf.end) == [1, 3, 5, 6]
assert len(pf) == 4
assert pf.num_particles == 3
def test_time(particle_file):
"""Time handled correctly"""
with ParticleFile(particle_file) as pf:
assert pf.time[3] == np.datetime64("1970-01-01 03")
times2 = [np.datetime64(t) for t in ["1970-01-01", "1970-01-01 01"]]
assert all(pf.time[:2] == times2)
# Old callable notation still works
assert pf.time(3) == pf.time[3]
assert str(pf.time(3)) == "1970-01-01T03:00:00"
def test_variables(particle_file):
"""Indentifies the variables to correct category"""
with ParticleFile(particle_file) as pf:
assert pf.instance_variables == ["pid", "X", "Y"]
assert pf.particle_variables == ["start_time", "location_id"]
def test_pid(particle_file):
"""The pid is correct"""
with ParticleFile(particle_file) as pf:
assert pf.pid.isel(time=0) == 0
assert pf["pid"][0] == 0
assert pf.pid[0] == 0
assert all(pf.pid[1] == [0, 1])
assert list(pf.pid[2]) == [0, 2]
assert pf.pid[3] == 2
def test_position(particle_file):
with ParticleFile(particle_file) as pf:
X, Y = pf.position(time=1)
assert all(X == pf.X[1])
assert all(Y == pf.Y[1])
X, Y = pf.position(2)
assert all(X == pf.X[2])
assert all(Y == pf.Y[2])
def test_trajectory(particle_file):
with ParticleFile(particle_file) as pf:
X, Y = pf.trajectory(2)
assert all(X == [22, 23])
assert all(Y == [9, 10])
traj = pf.trajectory(0)
assert len(traj) == 3
assert all(traj.time == pf.time[:-1])
assert all(traj.X == pf.X.sel(pid=0))
assert all(traj.Y == pf.Y.sel(pid=0))
| mit | -3,902,281,130,254,245,400 | 31.694656 | 85 | 0.564324 | false |
groovecoder/kuma | kuma/users/tests/test_models.py | 16 | 4070 | from nose.tools import eq_, ok_
from nose.plugins.attrib import attr
from kuma.wiki.tests import revision
from ..helpers import gravatar_url
from ..models import UserBan
from . import UserTestCase
class TestUser(UserTestCase):
def test_websites(self):
"""A list of websites can be maintained on a user"""
user = self.user_model.objects.get(username='testuser')
# Assemble a set of test sites.
test_sites = {
'website_url': 'http://example.com',
'twitter_url': 'http://twitter.com/lmorchard',
'github_url': 'http://github.com/lmorchard',
'stackoverflow_url': 'http://stackoverflow.com/users/lmorchard',
'mozillians_url': 'https://mozillians.org/u/testuser',
'facebook_url': 'https://www.facebook.com/test.user'
}
# Try a mix of assignment cases for the websites property
for name, url in test_sites.items():
setattr(user, name, url)
# Save and make sure a fresh fetch works as expected
user.save()
user2 = self.user_model.objects.get(pk=user.pk)
for name, url in test_sites.items():
eq_(getattr(user2, name), url)
def test_linkedin_urls(self):
user = self.user_model.objects.get(username='testuser')
linkedin_urls = [
'https://in.linkedin.com/in/testuser',
'https://www.linkedin.com/in/testuser',
'https://www.linkedin.com/pub/testuser',
]
for url in linkedin_urls:
user.linkedin_url = url
user.save()
new_user = self.user_model.objects.get(pk=user.pk)
eq_(url, new_user.linkedin_url)
def test_irc_nickname(self):
"""We've added IRC nickname as a profile field.
Make sure it shows up."""
user = self.user_model.objects.get(username='testuser')
ok_(hasattr(user, 'irc_nickname'))
eq_('testuser', user.irc_nickname)
def test_unicode_email_gravatar(self):
"""Bug 689056: Unicode characters in email addresses shouldn't break
gravatar URLs"""
user = self.user_model.objects.get(username='testuser')
user.email = u"Someguy Dude\xc3\xaas Lastname"
try:
gravatar_url(user.email)
except UnicodeEncodeError:
self.fail("There should be no UnicodeEncodeError")
def test_locale_timezone_fields(self):
"""We've added locale and timezone fields. Verify defaults."""
user = self.user_model.objects.get(username='testuser')
ok_(hasattr(user, 'locale'))
ok_(user.locale == 'en-US')
ok_(hasattr(user, 'timezone'))
eq_(user.timezone, 'US/Pacific')
def test_wiki_revisions(self):
user = self.user_model.objects.get(username='testuser')
rev = revision(save=True, is_approved=True)
ok_(rev.pk in user.wiki_revisions().values_list('pk', flat=True))
class BanTestCase(UserTestCase):
@attr('bans')
def test_ban_user(self):
testuser = self.user_model.objects.get(username='testuser')
admin = self.user_model.objects.get(username='admin')
ok_(testuser.is_active)
ban = UserBan(user=testuser,
by=admin,
reason='Banned by unit test')
ban.save()
testuser_banned = self.user_model.objects.get(username='testuser')
ok_(not testuser_banned.is_active)
ok_(testuser_banned.active_ban.by == admin)
ban.is_active = False
ban.save()
testuser_unbanned = self.user_model.objects.get(username='testuser')
ok_(testuser_unbanned.is_active)
ban.is_active = True
ban.save()
testuser_banned = self.user_model.objects.get(username='testuser')
ok_(not testuser_banned.is_active)
ok_(testuser_unbanned.active_ban)
ban.delete()
testuser_unbanned = self.user_model.objects.get(username='testuser')
ok_(testuser_unbanned.is_active)
ok_(testuser_unbanned.active_ban is None)
| mpl-2.0 | 9,195,369,261,417,133,000 | 35.339286 | 76 | 0.610811 | false |
egor-tensin/sorting_algorithms | algorithms/params.py | 2 | 4480 | # Copyright (c) 2016 Egor Tensin <[email protected]>
# This file is part of the "Sorting algorithms" project.
# For details, see https://github.com/egor-tensin/sorting-algorithms.
# Distributed under the MIT License.
from enum import Enum
from numbers import Integral
from .input_kind import InputKind
from .plotter import PlotBuilder
from . import registry
from .timer import Timer
class TimeUnits(Enum):
SECONDS = 'seconds'
MILLISECONDS = 'milliseconds'
MICROSECONDS = 'microseconds'
def get_factor(self):
if self is TimeUnits.SECONDS:
return 1.
if self is TimeUnits.MILLISECONDS:
return 1000.
if self is TimeUnits.MICROSECONDS:
return 1000000.
raise NotImplementedError('invalid time units: ' + str(self))
def __str__(self):
return self.value
class AlgorithmParameters:
def __init__(self, algorithm, min_len, max_len,
input_kind=InputKind.AVERAGE, iterations=1):
if isinstance(algorithm, str):
algorithm = registry.get(algorithm)
self.algorithm = algorithm
self.input_kind = input_kind
self._min_len = None
self._max_len = None
self.min_len = min_len
self.max_len = max_len
self._iterations = None
self.iterations = iterations
@property
def min_len(self):
return self._min_len
@min_len.setter
def min_len(self, val):
if not isinstance(val, Integral):
raise TypeError('must be an integral value')
val = int(val)
if val < 0:
raise ValueError('must be non-negative')
if self.max_len is not None and self.max_len < val:
raise ValueError('must not be greater than the maximum length')
self._min_len = val
@property
def max_len(self):
return self._max_len
@max_len.setter
def max_len(self, val):
if not isinstance(val, Integral):
raise TypeError('must be an integral value')
val = int(val)
if val < 0:
raise ValueError('must be non-negative')
if self.min_len is not None and self.min_len > val:
raise ValueError('must not be lesser than the minimum length')
self._max_len = val
@property
def iterations(self):
return self._iterations
@iterations.setter
def iterations(self, val):
if not isinstance(val, Integral):
raise TypeError('must be an integral value')
val = int(val)
if val < 1:
raise ValueError('must be positive')
self._iterations = val
def measure_running_time(self):
input_len_range = list(range(self.min_len, self.max_len + 1))
running_time = []
for input_len in input_len_range:
input_sample = self.algorithm.gen_input(input_len, self.input_kind)
input_copies = [list(input_sample) for _ in range(self.iterations)]
with Timer(running_time, self.iterations):
for i in range(self.iterations):
self.algorithm.function(input_copies[i])
return input_len_range, running_time
@staticmethod
def _format_plot_xlabel():
return 'Input length'
@staticmethod
def _format_plot_ylabel(units):
return 'Running time ({})'.format(units)
def _format_plot_title(self):
return '{}, {} case'.format(
self.algorithm.display_name, self.input_kind)
def _format_plot_suptitle(self):
return self.algorithm.display_name
@staticmethod
def _derive_time_units(ys):
max_y = max(ys)
if max_y > 0.1:
return TimeUnits.SECONDS
if max_y > 0.0001:
return TimeUnits.MILLISECONDS
return TimeUnits.MICROSECONDS
def plot_running_time(self, output_path=None):
xs, ys = self.measure_running_time()
units = self._derive_time_units(ys)
ys = [y * units.get_factor() for y in ys]
plot_builder = PlotBuilder()
plot_builder.show_grid()
plot_builder.set_xlabel(self._format_plot_xlabel())
plot_builder.set_ylabel(self._format_plot_ylabel(units))
#plot_builder.set_yticklabels_scientific()
plot_builder.set_title(self._format_plot_title())
plot_builder.plot(xs, ys)
if output_path is None:
plot_builder.show()
else:
plot_builder.save(output_path)
| mit | -3,324,626,552,379,483,600 | 30.111111 | 79 | 0.608705 | false |
grzes/djangae | djangae/db/unique_utils.py | 9 | 3947 | from hashlib import md5
from google.appengine.api import datastore
def _unique_combinations(model, ignore_pk=False):
unique_names = [ [ model._meta.get_field(y).name for y in x ] for x in model._meta.unique_together ]
for field in model._meta.fields:
if field.primary_key and ignore_pk:
continue
if field.unique:
unique_names.append([field.name])
return [ sorted(x) for x in unique_names ]
def _format_value_for_identifier(value):
# AppEngine max key length is 500 chars, so if the value is a string we hexdigest it to reduce the length
# otherwise we str() it as it's probably an int or bool or something.
return md5(value.encode("utf-8")).hexdigest() if isinstance(value, basestring) else str(value)
def unique_identifiers_from_entity(model, entity, ignore_pk=False, ignore_null_values=True):
"""
Given an instance, this function returns a list of identifier strings that represent
unique field/value combinations.
"""
from djangae.db.utils import get_top_concrete_parent
unique_combinations = _unique_combinations(model, ignore_pk)
meta = model._meta
identifiers = []
for combination in unique_combinations:
combo_identifiers = [[]]
include_combination = True
for field_name in combination:
field = meta.get_field(field_name)
if field.primary_key:
value = entity.key().id_or_name()
else:
value = entity.get(field.column) # Get the value from the entity
# If ignore_null_values is True, then we don't include combinations where the value is None
# or if the field is a multivalue field where None means no value (you can't store None in a list)
if (value is None and ignore_null_values) or (not value and isinstance(value, (list, set))):
include_combination = False
break
if not isinstance(value, (list, set)):
value = [value]
new_combo_identifers = []
for existing in combo_identifiers:
for v in value:
identifier = "{}:{}".format(field.column, _format_value_for_identifier(v))
new_combo_identifers.append(existing + [identifier])
combo_identifiers = new_combo_identifers
if include_combination:
for ident in combo_identifiers:
identifiers.append(get_top_concrete_parent(model)._meta.db_table + "|" + "|".join(ident))
return identifiers
def query_is_unique(model, query):
"""
If the query is entirely on unique constraints then return the unique identifier for
that unique combination. Otherwise return False
"""
if isinstance(query, datastore.MultiQuery):
# By definition, a multiquery is not unique
return False
combinations = _unique_combinations(model)
queried_fields = [ x.strip() for x in query.keys() ]
for combination in combinations:
unique_match = True
field_names = []
for field in combination:
if field == model._meta.pk.column:
field = "__key__"
else:
field = model._meta.get_field(field).column
field_names.append(field)
# We don't match this combination if the field didn't exist in the queried fields
# or if it was, but the value was None (you can have multiple NULL values, they aren't unique)
key = "{} =".format(field)
if key not in queried_fields or query[key] is None:
unique_match = False
break
if unique_match:
return "|".join([model._meta.db_table] + [
"{}:{}".format(x, _format_value_for_identifier(query["{} =".format(x)]))
for x in field_names
])
return False
| bsd-3-clause | -4,482,610,168,908,953,600 | 34.558559 | 110 | 0.60679 | false |
deepakgupta1313/models | im2txt/im2txt/train.py | 30 | 4250 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Train the model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from im2txt import configuration
from im2txt import show_and_tell_model
FLAGS = tf.app.flags.FLAGS
tf.flags.DEFINE_string("input_file_pattern", "",
"File pattern of sharded TFRecord input files.")
tf.flags.DEFINE_string("inception_checkpoint_file", "",
"Path to a pretrained inception_v3 model.")
tf.flags.DEFINE_string("train_dir", "",
"Directory for saving and loading model checkpoints.")
tf.flags.DEFINE_boolean("train_inception", False,
"Whether to train inception submodel variables.")
tf.flags.DEFINE_integer("number_of_steps", 1000000, "Number of training steps.")
tf.flags.DEFINE_integer("log_every_n_steps", 1,
"Frequency at which loss and global step are logged.")
tf.logging.set_verbosity(tf.logging.INFO)
def main(unused_argv):
assert FLAGS.input_file_pattern, "--input_file_pattern is required"
assert FLAGS.train_dir, "--train_dir is required"
model_config = configuration.ModelConfig()
model_config.input_file_pattern = FLAGS.input_file_pattern
model_config.inception_checkpoint_file = FLAGS.inception_checkpoint_file
training_config = configuration.TrainingConfig()
# Create training directory.
train_dir = FLAGS.train_dir
if not tf.gfile.IsDirectory(train_dir):
tf.logging.info("Creating training directory: %s", train_dir)
tf.gfile.MakeDirs(train_dir)
# Build the TensorFlow graph.
g = tf.Graph()
with g.as_default():
# Build the model.
model = show_and_tell_model.ShowAndTellModel(
model_config, mode="train", train_inception=FLAGS.train_inception)
model.build()
# Set up the learning rate.
learning_rate_decay_fn = None
if FLAGS.train_inception:
learning_rate = tf.constant(training_config.train_inception_learning_rate)
else:
learning_rate = tf.constant(training_config.initial_learning_rate)
if training_config.learning_rate_decay_factor > 0:
num_batches_per_epoch = (training_config.num_examples_per_epoch /
model_config.batch_size)
decay_steps = int(num_batches_per_epoch *
training_config.num_epochs_per_decay)
def _learning_rate_decay_fn(learning_rate, global_step):
return tf.train.exponential_decay(
learning_rate,
global_step,
decay_steps=decay_steps,
decay_rate=training_config.learning_rate_decay_factor,
staircase=True)
learning_rate_decay_fn = _learning_rate_decay_fn
# Set up the training ops.
train_op = tf.contrib.layers.optimize_loss(
loss=model.total_loss,
global_step=model.global_step,
learning_rate=learning_rate,
optimizer=training_config.optimizer,
clip_gradients=training_config.clip_gradients,
learning_rate_decay_fn=learning_rate_decay_fn)
# Set up the Saver for saving and restoring model checkpoints.
saver = tf.train.Saver(max_to_keep=training_config.max_checkpoints_to_keep)
# Run training.
tf.contrib.slim.learning.train(
train_op,
train_dir,
log_every_n_steps=FLAGS.log_every_n_steps,
graph=g,
global_step=model.global_step,
number_of_steps=FLAGS.number_of_steps,
init_fn=model.init_fn,
saver=saver)
if __name__ == "__main__":
tf.app.run()
| apache-2.0 | -385,671,210,219,667,100 | 36.280702 | 80 | 0.666118 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.