max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
python_cuda/NewVisualization.py | suvam0451/vehicle-counting-toolkit | 0 | 12785751 | <reponame>suvam0451/vehicle-counting-toolkit
import cv2
import json
import math
from os import listdir
from os.path import isfile, join
def pixel_to_screen(coords, size):
return (coords[0]/size[0], coords[1]/size[1])
# Converts screen-space position to pixel-space position
def screen_to_pixel(coords, size):
return (math.floor(coords[0]*size[0]), math.floor(coords[1]*size[1]))
img = cv2.imread('images/image_02_02.jpg', 0)
| 2.6875 | 3 |
white_matter/wm_recipe/synapse_types/__init__.py | alex4200/Long-range-micro-connectome | 9 | 12785752 | <filename>white_matter/wm_recipe/synapse_types/__init__.py
from ...utils.data_from_config import read_config
class SynapseTypes(object):
def __init__(self, cfg_file=None):
import os
from white_matter.utils.paths_in_config import path_local_to_cfg_root
if cfg_file is None:
cfg_file = os.path.join(os.path.split(__file__)[0], 'default.json')
cfg = read_config(cfg_file)
self.cfg = cfg["SynapseTypes"]
self.cfg["cfg_root"] = cfg["cfg_root"]
path_local_to_cfg_root(self.cfg, ["synapse_type_yaml"])
def __getitem__(self, item):
return self.cfg["synapse_type_mapping"][item]
| 2.046875 | 2 |
RDS/code/est_py/interface1.py | chrisjcameron/chrisjcameron.github.io | 0 | 12785753 | <gh_stars>0
from jpype import *
import numpy as np
#Interface1.py puts rdsat_analyze and rdsat_aggregate into a class so that the jpype portion is defined only once.
#This is an effort to reduce the potential of memory leakage. -pp286 1/25/2013
#############################################################################################################
#startJVM("C:/Program Files/Java/jre6/bin/server/jvm.dll", "-Djava.class.path=C:/Program Files/RDSAT 7.1.38/lib/cloning-1.8.1.jar;C:/Program Files/RDSAT 7.1.38/lib/RDSAT-api.jar;C:/Program Files/RDSAT 7.1.38/lib/RDSAT-core.jar;C:/Program Files/RDSAT 7.1.38/lib/opencsv-2.1.jar;C:/Program Files/RDSAT 7.1.38/lib/commons-betwixt-0.8.jar;C:/Program Files/RDSAT 7.1.38/lib/objenesis-1.2.jar")
#subgroupType = JPackage("edu").cornell.rdsat.api.beans.Subgroup.Type
###########
class rdsat_jpype:
def __init__(self, confidence_alpha= 0.025):
self.SubgroupType = JClass("edu/cornell/rdsat/api/beans/Subgroup$Type")
self.AlgorithmType = JPackage("edu/cornell/rdsat/api").AlgorithmType
self.AverageNetworkSizeEstimation = JPackage("edu/cornell/rdsat/api").AverageNetworkSizeEstimation
self.DualComponentType = JPackage("edu/cornell/rdsat/api/DualComponent").Type
self.Library = JPackage("edu").cornell.rdsat.api.Toolkit.getInstance().getLibrary()
self.options = JPackage("edu").cornell.rdsat.api.beans.PartitionAnalysisOptions()
self.StrategyType = JClass("edu/cornell/rdsat/api/beans/AnalysisStrategy$Strategy").INCLUDE_MISSING ##What happens if I use "EXCLUSE_MISSING"?
self.EST_Table = JClass("edu.cornell.rdsat.api.util.AggregateEstimatesUtil$WeightedEstimationTable")
self.confidence_alpha = confidence_alpha
self.aggregator = JClass("edu.cornell.rdsat.api.util.AggregateEstimatesUtil")(None, None, self.confidence_alpha )
def rdsat_analyze(var, ID, rID, degree, levels, method="EnhancedDataSmoothing", estimator='DualComponent', bootstraps=2500 ):
if estimator== "DualComponent":
#print AverageNetworkSizeEstimation.DUAL_COMPONENT
self.options.setAveNetSizeEstimation(self.AverageNetworkSizeEstimation.DUAL_COMPONENT)
elif esitmator == 'Multiplicity':
self.options.setAveNetSizeEstimation(self.AverageNetworkSizeEstimation.MULTIPLICITY_ESTIMATE)
self.options.setConfidenceInterval(float(self.confidence_alpha))
# Prepare partition analysis options - method
if method == 'DataSmoothing':
self.options.setAlgoType(self.AlgorithmType.DATA_SMOOTHING)
elif method == 'EnhancedDataSmoothing':
self.options.setAlgoType(self.AlgorithmType.ENHANCED_DATA_SMOOTHING)
elif method == 'LLS':
self.options.setAlgoType(self.AlgorithmType.LLS)
self.options.setNumberOfReSamplesForBootstrap(bootstraps)
# prepare the variable name to vector map
varsAsMap = JPackage("java").util.HashMap()
#print var
'''
util = JPackage("java.util")
al = util.ArrayList()
for item in var:
al.add(str(item))
varsAsMap.put('var',str(item))
'''
var_=[]
id_=[]
rid_=[]
degree_=[]
for i in range(len(var)):
var_.append(str(var[i]))
id_.append(str(ID[i]))
rid_.append(str(rID[i]))
degree_.append(str(degree[i]))
#print var1
varsAsMap.put('var',JArray(JString, 1)(var_))
adHocRDSDataset=self.Library.newRDSDataFile(JArray(JString,1)(id_), JArray(JString,1)(rid_), JArray(JString, 1)(degree_), varsAsMap)
#print adHocRDSDataset.getAttributeNames()
#print adHocRDSDataset.getCouponsSentOut()
#print adHocRDSDataset.getMissingDataString()
#print adHocRDSDataset.getRdsFile().getFileName()
# Prepare "var" subgroup
varSubgroup = JPackage("edu/cornell/rdsat/api/beans").Subgroup()
Jtype= self.SubgroupType.COMPLETE
varSubgroup.setType(Jtype)
varSubgroup.setVariable(JString("var"))
# gather subgroups into a list
variables = JPackage("java.util").ArrayList()
variables.add(varSubgroup)
partAnalysis = self.Library.partitionAnalysis(adHocRDSDataset, self.options)
# Explicitly set variable levels if specified
java.lang.System.out.println(JChar("H"))
if len(levels)>0:
varLevels = JPackage("java").util.HashSet()
for level in levels:
Jlevel = JPackage("java").lang.String(str(level))
#print Jlevel
varLevels.add(Jlevel)
partAnalysis.setAttributeLevels("var", varLevels)
#print "options.toString()", options.toString()
#print "variables", variables
#print partAnalysis.getAttributeLevels("var")
# perform partition analysis on subgroups
resultSet = partAnalysis.performAnalysis(variables)
#print 'HERE'
netSizes = resultSet.getNetworkSizes()
recResultset = resultSet.getRecruitmentResultset()
estResultset = resultSet.getEstimationResultset()
#print "AAAAAAAAAAAAAa"
est_table = resultSet.getEstimationResultset().getConfidenceInterval().getDataTable()
est_results=list()
for level in levels:
#print level
temp_list = list()
for i in range(4):
temp_list.append(est_table[level][i]) # Yongren originally had "temp_list.append(est_table[level][i-1])"
est_results.append(temp_list)
#Erase all variables.
varsAsMap = None
adHocRDSDataset = None
varSubgroup = None
Jtype = None
variables = None
varLevels = None
Jlevel = None
partAnalysis = None
netSizes = None
recResultset = None
estResultset = None
est_table = None
return resultSet, est_results
def rdsat_aggregate(analyses, popsizes):
tables = JPackage("java").util.ArrayList()
for i in range(len(analyses)):
#tables = JPackage("edu.cornell.rdsat.api.util.AggregateEstimatesUtil$WeightedEstimationTable")
CI = analyses[i].getEstimationResultset().getConfidenceInterval()
tables.add(self.EST_Table(CI, JDouble(popsizes[i])))
resultSet = self.aggregator.aggregateEstimates("Arbitrary Title", tables, self.StrategyType)
est_table = resultSet.getAggregated().getDataTable()
est_results=list()
levels=2
for level in range(levels):
#print level
temp_list = list()
for i in range(4):
temp_list.append(est_table[level][i])
est_results.append(temp_list)
tables = None
CI = None
resultSet = None
est_table = None
temp_list = None
return est_results
##########################
if __name__ == "__main__":
startJVM("C:/Program Files/Java/jre6/bin/server/jvm.dll", "-Djava.class.path=C:/Program Files/RDSAT 7.1.38/lib/cloning-1.8.1.jar;C:/Program Files/RDSAT 7.1.38/lib/RDSAT-api.jar;C:/Program Files/RDSAT 7.1.38/lib/RDSAT-core.jar;C:/Program Files/RDSAT 7.1.38/lib/opencsv-2.1.jar;C:/Program Files/RDSAT 7.1.38/lib/commons-betwixt-0.8.jar;C:/Program Files/RDSAT 7.1.38/lib/objenesis-1.2.jar")
#prepare the data
ID_ = list()
rID_=list()
degree_=list()
var_=list()
f=open('C:/Users/pp286/Documents/jpype_test_data.txt','r')
for count, line in enumerate(f):
if count>0:
#print line.split()[1]
ID_.append(line.split()[0])
rID_.append(line.split()[1])
degree_.append(line.split()[2])
var_.append(line.split()[3])
## separate estimations
resultSet1, est1 = rdsat_analyze(var=var_, ID=ID_, rID=rID_, degree=degree_,\
levels=[1,2], method="EnhancedDataSmoothing", estimator='DualComponent', confidence_alpha=.025, bootstraps=2500 )
resultSet2, est2 = rdsat_analyze(var=var_, ID=ID_, rID=rID_, degree=degree_,\
levels=[1,2], method="EnhancedDataSmoothing", estimator='DualComponent', confidence_alpha=.025, bootstraps=2500 )
## aggregating
analyses=[resultSet1, resultSet2]
popsizes=[10000, 15000]
StrategyType = JClass("edu/cornell/rdsat/api/beans/AnalysisStrategy$Strategy").INCLUDE_MISSING
aggr_est_results = rdsat_aggregate(analyses, popsizes, strategy=StrategyType)
##print out the separate estimates and aggregate estimates
print est1
print est2
print aggr_est_results
shutdownJVM()
| 2.28125 | 2 |
backend/run.py | kendricktan/quantum-game-theory | 0 | 12785754 | <reponame>kendricktan/quantum-game-theory
from quantum_game_theory.logic import Game
GAME = '4-minority'
PROTOCOL = 'EWL'
game = Game(GAME, PROTOCOL)
print(f'Running game: {GAME}')
print('-'*25)
print('Options: X, Y, S, Z, H, T, W, Rz1, X, Ry1, Rz2')
print('Please type as a list, eg. W, Rz1, Z')
print('-'*25)
print('Input player 1 gates:')
state_1_input = input()
state_1 = [x.strip() for x in state_1_input.split(',')]
print('Input player 2 gates:')
state_2_input = input()
state_2 = [x.strip() for x in state_2_input.split(',')]
print('Input player 3 gates:')
state_3_input = input()
state_3 = [x.strip() for x in state_3_input.split(',')]
print('Input player 4 gates:')
state_4_input = input()
state_4 = [x.strip() for x in state_4_input.split(',')]
all_states = [state_1, state_2, state_3, state_4]
results = game.play_game(all_states)
print('Game: ' + GAME)
print('Results:')
print(results)
| 3.359375 | 3 |
text_messaging/mms_example/mms.py | cerna/example_robot_programs | 1 | 12785755 | # imports
# re and datetime live on the base OS image, but twilio, dropbox, and cv2 must be installed every time a new docker container is started
# this may take a minute or two the first time you run this program after restarting your computer
import sh
import re
import datetime
from robot_command.rpl import *
# Setup required: Enter the account SID, auth token, and 'from' phone number from your Twilio account here:
account_sid = ""
auth_token = ""
from_phone_number = "+"
# Enter your 'to' (destination) phone number here:
to_phone_number = '+16088498381'
# Enter your Dropbox API Key here:
dropbox_api_key = ""
try:
from twilio.rest import Client
except:
notify("The first time this program is run it must import three packages (Twilio, Dropbox, OpenCV). "
"This may take a minute or two depending on your internet connection. Subsequent program runs will not experience this delay")
sh.sudo.pip3.install("twilio")
from twilio.rest import Client
try:
import dropbox
except:
sh.sudo.pip3.install("dropbox")
import dropbox
try:
import cv2
except:
sh.sudo.pip3.install("opencv-python")
import cv2
def take_and_send_image(filename=None):
if filename == None:
filename = "robot_photo.jpg"
take_snapshot(filename)
url = upload_file_to_dropbox(filename)
send_mms(url)
def take_snapshot(filename):
videoCaptureObject = cv2.VideoCapture(0)
result = True
while (result):
ret, frame = videoCaptureObject.read()
cv2.imwrite(filename, frame)
result = False
videoCaptureObject.release()
cv2.destroyAllWindows()
def upload_file_to_dropbox(filename):
# FIXME - look for an intelligent path, not CWD
# add timestamp
newfilename = "/robot_images/" + str(datetime.datetime.today().strftime('%d-%m-%Y-%H-%M-%S')) + "_" + filename
file_to_upload = newfilename
print("Begin uploading " + file_to_upload + " to DropBox")
# Create a dropbox object using an API v2 key
d = dropbox.Dropbox(dropbox_api_key)
# open the file and upload it
with open(filename, "rb") as f:
d.files_upload(f.read(), file_to_upload)
# create a shared link
link = d.sharing_create_shared_link(file_to_upload)
# url which can be shared
url = link.url
# link which directly downloads by replacing ?dl=0 with ?dl=1
dl_url = re.sub(r"\?dl\=0", "?raw=1", url)
print (dl_url)
return dl_url
def send_mms(url):
client = Client(account_sid, auth_token)
message = client.messages \
.create(
body='Here is a message from your ZA6 robot',
from_= from_phone_number,
media_url=[url],
to= to_phone_number
)
print(message.sid) | 2.828125 | 3 |
pythonlearn/riverpoll.py | kuljotbiring/Python | 0 | 12785756 | # Make a dictionary containing three major rivers and the country
# each river runs through. One key-value pair might be 'nile': 'egypt'.
major_rivers = {'egypt': 'nile', 'brazil': 'amazon', 'china': 'yangtze'}
# • Use a loop to print a sentence about each river, such as The Nile runs
# through Egypt.
for key, value in major_rivers.items():
print(f"\nThe {value.title()} river runs through {key.title()}")
# • Use a loop to print the name of each river included in the dictionary.
print("\nThe major rivers are:")
for value in major_rivers.values():
print(value.title())
# • Use a loop to print the name of each country included in the dictionary.
print("\nThe countries the major world rivers are located in are:")
for key in major_rivers.keys():
print(key.title())
# Use the code in favorite_languages.py (page 97).
favorite_languages = {
'jen': 'python',
'sarah': 'c',
'edward': 'ruby',
'phil': 'python',
}
# • Make a list of people who should take the favorite languages poll. Include
# some names that are already in the dictionary and some that are not.
voters = ['judy', 'jen', 'stacy', 'rick', 'frank', 'sarah', 'joe']
# • Loop through the list of people who should take the poll. If they have
# already taken the poll, print a message thanking them for responding.
# If they have not yet taken the poll, print a message inviting them to take
# the poll.
for name in voters:
if name not in favorite_languages.keys():
print(f"{name.title()}, please take the poll ASAP")
else:
print(f"{name.title()}, thank you for taking the poll")
| 4.6875 | 5 |
kios/tests/test_operations.py | apleshakov/kios | 0 | 12785757 | #
# SPDX-License-Identifier: Apache-2.0
#
# Copyright 2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
from unittest import TestCase
from unittest.mock import patch, call
from kios import config
from kios.data import EXECUTABLE_NAME, NETWORK_PROTOCOL, TRANSPORT_PROTOCOL, PORT_NUMBER, NetworkProtocol, \
TransportProtocol
from kios.exception import DoRollback, UnexpectedLineError
from kios.operation import find_and_save_ports, save_ports_from_data_file
from .helper import get_test_data, TestOperationControl, get_test_file_path, patch_config_app_platform
_data_se = [get_test_data('win_netstat.txt'), get_test_data('win_netstat2.txt')]
path_config_data_file = patch('kios.config.data_file', new=get_test_file_path('win_netstat.txt'))
part1 = [{EXECUTABLE_NAME: 'app1.exe', NETWORK_PROTOCOL: NetworkProtocol.IPv4,
TRANSPORT_PROTOCOL: TransportProtocol.TCP, PORT_NUMBER: 1},
{EXECUTABLE_NAME: None, NETWORK_PROTOCOL: NetworkProtocol.IPv4,
TRANSPORT_PROTOCOL: TransportProtocol.TCP, PORT_NUMBER: 2},
{EXECUTABLE_NAME: 'app1.exe', NETWORK_PROTOCOL: NetworkProtocol.IPv6,
TRANSPORT_PROTOCOL: TransportProtocol.TCP, PORT_NUMBER: 1}]
part2 = [{EXECUTABLE_NAME: None, NETWORK_PROTOCOL: NetworkProtocol.IPv6,
TRANSPORT_PROTOCOL: TransportProtocol.TCP, PORT_NUMBER: 2},
{EXECUTABLE_NAME: 'app2.exe', NETWORK_PROTOCOL: NetworkProtocol.IPv4,
TRANSPORT_PROTOCOL: TransportProtocol.UDP, PORT_NUMBER: 1},
{EXECUTABLE_NAME: None, NETWORK_PROTOCOL: NetworkProtocol.IPv4,
TRANSPORT_PROTOCOL: TransportProtocol.UDP, PORT_NUMBER: 2}]
part3 = [{EXECUTABLE_NAME: 'app1.exe', NETWORK_PROTOCOL: NetworkProtocol.IPv6,
TRANSPORT_PROTOCOL: TransportProtocol.UDP, PORT_NUMBER: 1},
{EXECUTABLE_NAME: None, NETWORK_PROTOCOL: NetworkProtocol.IPv6,
TRANSPORT_PROTOCOL: TransportProtocol.UDP, PORT_NUMBER: 2}]
port_data1 = [*part1, *part2, *part3]
port_data2 = [{EXECUTABLE_NAME: 'app3.exe', NETWORK_PROTOCOL: NetworkProtocol.IPv6,
TRANSPORT_PROTOCOL: TransportProtocol.UDP, PORT_NUMBER: 3}]
@patch_config_app_platform
@patch('kios.factory.persistence_manager', autospec=True)
class OperationsTestCase(TestCase):
@patch('kios.operation.time', autospec=True, side_effect=[0, 1, 6, 10, 0, 1, 6, 10])
@patch('kios.operation.sleep', autospec=True)
@patch('kios.factory.data_source', autospec=True, **{'return_value.get_port_data.side_effect': _data_se})
def test_find_and_save_ports_behavior(self, data_source, sleep, time, persistence_manager):
find_and_save_ports(TestOperationControl())
sleep.assert_called_once_with(config.port_scan_wake_up_interval)
self.assertEqual(persistence_manager.return_value.save_port_data.call_count, 2)
persistence_manager.return_value.save_port_data.assert_has_calls([call(port_data1, commit=True),
call(port_data2, commit=True)])
self.assertEqual(data_source.return_value.get_port_data.call_count, 2)
@path_config_data_file
def test_save_ports_from_data_file_behavior(self, persistence_manager):
save_ports_from_data_file(TestOperationControl())
self.assertEqual(persistence_manager.return_value.save_port_data.call_count, 1)
persistence_manager.return_value.save_port_data.assert_has_calls([call(port_data1)])
@path_config_data_file
@patch('kios.config.import_batch_size', new=3)
def test_save_ports_from_data_file_behavior2(self, persistence_manager):
save_ports_from_data_file(TestOperationControl(3))
self.assertEqual(persistence_manager.return_value.save_port_data.call_count, 3)
persistence_manager.return_value.save_port_data.assert_has_calls([call(part1), call(part2), call(part3)])
@path_config_data_file
@patch('kios.config.import_batch_size', new=3)
def test_save_ports_from_data_file_behavior3(self, persistence_manager):
with self.assertRaises(DoRollback):
save_ports_from_data_file(TestOperationControl(2))
self.assertEqual(persistence_manager.return_value.save_port_data.call_count, 2)
persistence_manager.return_value.save_port_data.assert_has_calls([call(part1), call(part2)])
@patch('kios.config.data_file', new=None)
def test_save_ports_from_data_file_behaviour4(self, persistence_manager):
with self.assertRaises(RuntimeError):
save_ports_from_data_file(TestOperationControl())
persistence_manager.return_value.save_port_data.assert_not_called()
@patch('kios.config.data_file', new=get_test_file_path('win_netstat3.txt'))
def test_save_ports_from_data_file_behaviour5(self, persistence_manager):
with self.assertRaises(UnexpectedLineError) as c:
save_ports_from_data_file(TestOperationControl())
persistence_manager.return_value.save_port_data.assert_not_called()
self.assertEqual(c.exception.line_no, 6)
self.assertEqual(c.exception.line, ' [app3.exe]')
| 1.804688 | 2 |
tonclient/test/test_crypto.py | move-ton/ton-client-py | 28 | 12785758 | <reponame>move-ton/ton-client-py
import base64
import os
import unittest
from tonclient.errors import TonException
from tonclient.objects import AppSigningBox, AppEncryptionBox
from tonclient.test.helpers import async_core_client, sync_core_client, \
SAMPLES_DIR
from tonclient.types import KeyPair, MnemonicDictionary, ParamsOfHash, \
ParamsOfHDKeyXPrvFromMnemonic, ParamsOfHDKeySecretFromXPrv, \
ParamsOfHDKeyPublicFromXPrv, ParamsOfHDKeyDeriveFromXPrv, \
ParamsOfHDKeyDeriveFromXPrvPath, ParamsOfConvertPublicKeyToTonSafeFormat, \
ParamsOfSign, ParamsOfVerifySignature, ParamsOfModularPower, \
ParamsOfFactorize, ParamsOfTonCrc16, ParamsOfGenerateRandomBytes, \
ParamsOfMnemonicWords, ParamsOfMnemonicFromRandom, \
ParamsOfMnemonicFromEntropy, ParamsOfMnemonicVerify, \
ParamsOfMnemonicDeriveSignKeys, ParamsOfNaclSignKeyPairFromSecret, \
ParamsOfNaclSign, ParamsOfNaclSignOpen, ParamsOfNaclBoxKeyPairFromSecret, \
ParamsOfNaclBox, ParamsOfNaclBoxOpen, ParamsOfNaclSecretBox, \
ParamsOfNaclSecretBoxOpen, ParamsOfScrypt, ParamsOfChaCha20, \
ParamsOfSigningBoxSign, ParamsOfAppRequest, ParamsOfAppSigningBox, \
ParamsOfResolveAppRequest, ResultOfAppSigningBox, AppRequestResult, \
ParamsOfNaclSignDetachedVerify, ParamsOfEncryptionBoxGetInfo, \
ParamsOfAppEncryptionBox, ResultOfAppEncryptionBox, EncryptionBoxInfo, \
ParamsOfEncryptionBoxEncrypt, ParamsOfEncryptionBoxDecrypt, \
ParamsOfCreateEncryptionBox, EncryptionAlgorithm, CipherMode
class TestTonCryptoAsyncCore(unittest.TestCase):
def setUp(self) -> None:
self.mnemonic = 'abuse boss fly battle rubber wasp afraid hamster guide essence vibrant tattoo'
self.master_xprv = '<KEY>jWJ8NY9e3GkRoistUjjcpHNsGBUv94istDPXvqGNuWpC'
def test_sha256(self):
params = ParamsOfHash(
data=base64.b64encode('TON is our future'.encode()).decode())
result = async_core_client.crypto.sha256(params=params)
self.assertEqual(
'1e7fd5ec201652b5375e5edf3e86d0513394d2c2004dd506415abf0578261951',
result.hash)
params.data = base64.b64encode(
bytes.fromhex('544f4e206973206f757220667574757265')).decode()
result = async_core_client.crypto.sha256(params=params)
self.assertEqual(
'1e7fd5ec201652b5375e5edf3e86d0513394d2c2004dd506415abf0578261951',
result.hash)
params.data = 'VE9OIGlzIG91ciBmdXR1cmU='
result = async_core_client.crypto.sha256(params=params)
self.assertEqual(
'1e7fd5ec201652b5375e5edf3e86d0513394d2c2004dd506415abf0578261951',
result.hash)
def test_sha512(self):
data = base64.b64encode('TON is our future'.encode()).decode()
params = ParamsOfHash(data=data)
result = async_core_client.crypto.sha512(params=params)
self.assertEqual(
'4c52dd4cefc68319bac5e97c1f0d18ae8194fb0dd8d9e090ba8376834a0756175a9a736d1e69cb1a58d25c3d554b02a2b8ed9c3ae5cbeeccc3277746a363a434',
result.hash)
def test_hdkey_xprv_from_mnemonic(self):
params = ParamsOfHDKeyXPrvFromMnemonic(phrase=self.mnemonic)
result = async_core_client.crypto.hdkey_xprv_from_mnemonic(
params=params)
self.assertEqual(self.master_xprv, result.xprv)
with self.assertRaises(TonException):
params.phrase = 0
async_core_client.crypto.hdkey_xprv_from_mnemonic(params=params)
def test_hdkey_secret_from_xprv(self):
params = ParamsOfHDKeySecretFromXPrv(xprv=self.master_xprv)
result = async_core_client.crypto.hdkey_secret_from_xprv(params=params)
self.assertEqual(
'0c91e53128fa4d67589d63a6c44049c1068ec28a63069a55ca3de30c57f8b365',
result.secret)
with self.assertRaises(TonException):
params.xprv = ''
async_core_client.crypto.hdkey_secret_from_xprv(params=params)
def test_hdkey_public_from_xprv(self):
params = ParamsOfHDKeyPublicFromXPrv(xprv=self.master_xprv)
result = async_core_client.crypto.hdkey_public_from_xprv(params=params)
self.assertEqual(
'7b70008d0c40992283d488b1046739cf827afeabf647a5f07c4ad1e7e45a6f89',
result.public)
def test_hdkey_derive_from_xprv(self):
params = ParamsOfHDKeyDeriveFromXPrv(
xprv=self.master_xprv, child_index=0, hardened=False)
result = async_core_client.crypto.hdkey_derive_from_xprv(params=params)
self.assertEqual(
'<KEY>',
result.xprv)
with self.assertRaises(TonException):
params.child_index = -1
async_core_client.crypto.hdkey_derive_from_xprv(params=params)
def test_hdkey_derive_from_xprv_path(self):
params = ParamsOfHDKeyDeriveFromXPrvPath(
xprv=self.master_xprv, path="m/44'/60'/0'/0'")
result = async_core_client.crypto.hdkey_derive_from_xprv_path(
params=params)
self.assertEqual(
'<KEY>',
result.xprv)
with self.assertRaises(TonException):
params.path = 'm/'
async_core_client.crypto.hdkey_derive_from_xprv_path(params=params)
def test_convert_public_key_to_ton_safe_format(self):
params = ParamsOfConvertPublicKeyToTonSafeFormat(
public_key='06117f59ade83e097e0fb33e5d29e8735bda82b3bf78a015542aaa853bb69600')
safe = async_core_client.crypto.convert_public_key_to_ton_safe_format(
params=params)
self.assertEqual(
'PuYGEX9Zreg-CX4Psz5dKehzW9qCs794oBVUKqqFO7aWAOTD',
safe.ton_public_key)
with self.assertRaises(TonException):
params.public_key = None
async_core_client.crypto.convert_public_key_to_ton_safe_format(
params=params)
def test_generate_random_sign_keys(self):
keypair = async_core_client.crypto.generate_random_sign_keys()
self.assertEqual(64, len(keypair.public))
self.assertEqual(64, len(keypair.secret))
self.assertNotEqual(keypair.secret, keypair.public)
def test_sign_and_verify(self):
unsigned = base64.b64encode('Test Message'.encode()).decode()
keypair = KeyPair(
public='<KEY>',
secret='56b6a77093d6fdf14e593f36275d872d75de5b341942376b2a08759f3cbae78f')
# Sign message
sign_params = ParamsOfSign(unsigned=unsigned, keys=keypair)
signed = async_core_client.crypto.sign(params=sign_params)
self.assertEqual(
'+wz+QO6l1slgZS5s65BNqKcu4vz24FCJz4NSAxef9lu0jFfs8x3PzSZRC+pn5k8+aJi3xYMA3BQzglQmjK3hA1Rlc3QgTWVzc2FnZQ==',
signed.signed)
self.assertEqual(
'fb0cfe40eea5d6c960652e6ceb904da8a72ee2fcf6e05089cf835203179ff65bb48c57ecf31dcfcd26510bea67e64f3e6898b7c58300dc14338254268cade103',
signed.signature)
# Verify signature
verify_params = ParamsOfVerifySignature(
signed=signed.signed, public=keypair.public)
verified = async_core_client.crypto.verify_signature(
params=verify_params)
self.assertEqual(unsigned, verified.unsigned)
self.assertEqual(
base64.b64decode(unsigned.encode()),
base64.b64decode(verified.unsigned.encode()))
with self.assertRaises(TonException):
sign_params.keys = KeyPair(public='1', secret='2')
async_core_client.crypto.sign(params=sign_params)
with self.assertRaises(TonException):
verify_params.signed = 'simple'
async_core_client.crypto.verify_signature(params=verify_params)
def test_modular_power(self):
params = ParamsOfModularPower(
base='0123456789ABCDEF', exponent='0123', modulus='01234567')
result = async_core_client.crypto.modular_power(params=params)
self.assertEqual('63bfdf', result.modular_power)
with self.assertRaises(TonException):
params.base = '1'
params.modulus = '0.2'
async_core_client.crypto.modular_power(params=params)
def test_factorize(self):
params = ParamsOfFactorize(composite='17ED48941A08F981')
result = async_core_client.crypto.factorize(params=params)
self.assertIsInstance(result.factors, list)
self.assertEqual('494C553B', result.factors[0])
self.assertEqual('53911073', result.factors[1])
with self.assertRaises(TonException):
params.composite = 'a3'
async_core_client.crypto.factorize(params=params)
def test_ton_crc16(self):
params = ParamsOfTonCrc16(
data=base64.b64encode(bytes.fromhex('0123456789abcdef')).decode())
result = async_core_client.crypto.ton_crc16(params=params)
self.assertEqual(43349, result.crc)
with self.assertRaises(TonException):
params.data = '0=='
async_core_client.crypto.ton_crc16(params=params)
def test_generate_random_bytes(self):
params = ParamsOfGenerateRandomBytes(length=32)
result = async_core_client.crypto.generate_random_bytes(params=params)
self.assertEqual(44, len(result.bytes))
bts = base64.b64decode(result.bytes.encode())
self.assertEqual(32, len(bts))
with self.assertRaises(TonException):
params.length = '1'
async_core_client.crypto.generate_random_bytes(params=params)
def test_mnemonic_words(self):
params = ParamsOfMnemonicWords()
result = async_core_client.crypto.mnemonic_words(params=params)
self.assertEqual(2048, len(result.words.split(' ')))
with self.assertRaises(TonException):
params.dictionary = 100
async_core_client.crypto.mnemonic_words(params=params)
def test_mnemonic_from_random(self):
params = ParamsOfMnemonicFromRandom()
result = async_core_client.crypto.mnemonic_from_random(params=params)
self.assertEqual(12, len(result.phrase.split(' ')))
for d in range(1, 8):
for count in [12, 15, 18, 21, 24]:
params.dictionary = list(MnemonicDictionary)[d]
params.word_count = count
result = async_core_client.crypto.mnemonic_from_random(
params=params)
self.assertEqual(count, len(result.phrase.split(' ')))
with self.assertRaises(TonException):
params.word_count = 0
async_core_client.crypto.mnemonic_from_random(params=params)
def test_mnemonic_from_entropy(self):
params = ParamsOfMnemonicFromEntropy(
entropy='00112233445566778899AABBCCDDEEFF')
result = async_core_client.crypto.mnemonic_from_entropy(params=params)
self.assertEqual(
'abandon math mimic master filter design carbon crystal rookie group knife young',
result.phrase)
with self.assertRaises(TonException):
params.entropy = '01'
async_core_client.crypto.mnemonic_from_entropy(params=params)
def test_mnemonic_verify(self):
m_params = ParamsOfMnemonicFromRandom()
result = async_core_client.crypto.mnemonic_from_random(params=m_params)
v_params = ParamsOfMnemonicVerify(phrase=result.phrase)
result = async_core_client.crypto.mnemonic_verify(params=v_params)
self.assertEqual(True, result.valid)
for d in range(1, 8):
for count in [12, 15, 18, 21, 24]:
m_params.dictionary = list(MnemonicDictionary)[d]
m_params.word_count = count
mnemonic = async_core_client.crypto.mnemonic_from_random(
params=m_params)
v_params.phrase = mnemonic.phrase
v_params.dictionary = m_params.dictionary
v_params.word_count = m_params.word_count
result = async_core_client.crypto.mnemonic_verify(
params=v_params)
self.assertEqual(True, result.valid)
v_params = ParamsOfMnemonicVerify(phrase='one')
result = async_core_client.crypto.mnemonic_verify(params=v_params)
self.assertEqual(False, result.valid)
def test_mnemonic_derive_sign_keys(self):
# Derive from random phrase
params = ParamsOfMnemonicFromRandom()
mnemonic = async_core_client.crypto.mnemonic_from_random(params=params)
params = ParamsOfMnemonicDeriveSignKeys(phrase=mnemonic.phrase)
keypair = async_core_client.crypto.mnemonic_derive_sign_keys(
params=params)
self.assertIsInstance(keypair, KeyPair)
# Derive from provided phrase and convert public to ton_safe
phrase = 'unit follow zone decline glare flower crisp vocal adapt magic much mesh cherry teach mechanic rain float vicious solution assume hedgehog rail sort chuckle'
derive_params = ParamsOfMnemonicDeriveSignKeys(
phrase=phrase, dictionary=MnemonicDictionary.TON, word_count=24)
keypair = async_core_client.crypto.mnemonic_derive_sign_keys(
params=derive_params)
convert_params = ParamsOfConvertPublicKeyToTonSafeFormat(
public_key=keypair.public)
result = async_core_client.crypto.convert_public_key_to_ton_safe_format(
params=convert_params)
self.assertEqual(
'<KEY>',
result.ton_public_key)
# Derive with path
derive_params = ParamsOfMnemonicDeriveSignKeys(
phrase=phrase, path='m', dictionary=MnemonicDictionary.TON,
word_count=24)
keypair = async_core_client.crypto.mnemonic_derive_sign_keys(
params=derive_params)
convert_params = ParamsOfConvertPublicKeyToTonSafeFormat(
public_key=keypair.public)
result = async_core_client.crypto.convert_public_key_to_ton_safe_format(
params=convert_params)
self.assertEqual(
'PubDdJkMyss2qHywFuVP1vzww0TpsLxnRNnbifTCcu-XEgW0',
result.ton_public_key)
# Derive from 12-word phrase
phrase = 'abandon math mimic master filter design carbon crystal rookie group knife young'
derive_params = ParamsOfMnemonicDeriveSignKeys(phrase=phrase)
keypair = async_core_client.crypto.mnemonic_derive_sign_keys(
params=derive_params)
convert_params = ParamsOfConvertPublicKeyToTonSafeFormat(
public_key=keypair.public)
result = async_core_client.crypto.convert_public_key_to_ton_safe_format(
params=convert_params)
self.assertEqual(
'<KEY>',
result.ton_public_key)
# Derive from mnemonic from entropy
params = ParamsOfMnemonicFromEntropy(
entropy='2199ebe996f14d9e4e2595113ad1e627')
mnemonic = async_core_client.crypto.mnemonic_from_entropy(
params=params)
derive_params = ParamsOfMnemonicDeriveSignKeys(phrase=mnemonic.phrase)
keypair = async_core_client.crypto.mnemonic_derive_sign_keys(
params=derive_params)
convert_params = ParamsOfConvertPublicKeyToTonSafeFormat(
public_key=keypair.public)
result = async_core_client.crypto.convert_public_key_to_ton_safe_format(
params=convert_params)
self.assertEqual(
'PuZdw_KyXIzo8IksTrERN3_WoAoYTyK7OvM-yaLk711sUIB3',
result.ton_public_key)
def test_nacl_sign_keypair_from_secret_key(self):
params = ParamsOfNaclSignKeyPairFromSecret(
secret='<KEY>')
keypair = async_core_client.crypto.nacl_sign_keypair_from_secret_key(
params=params)
self.assertEqual(
'<KEY>',
keypair.public)
with self.assertRaises(TonException):
params.secret = '0a'
async_core_client.crypto.nacl_sign_keypair_from_secret_key(
params=params)
def test_nacl_sign(self):
# Nacl sign data
unsigned = base64.b64encode('Test Message'.encode()).decode()
secret = '<KEY>'
params = ParamsOfNaclSign(unsigned=unsigned, secret=secret)
signed = async_core_client.crypto.nacl_sign(params=params)
self.assertEqual(
'+wz+QO6l1slgZS5s65BNqKcu4vz24FCJz4NSAxef9lu0jFfs8x3PzSZRC+pn5k8+aJi3xYMA3BQzglQmjK3hA1Rlc3QgTWVzc2FnZQ==',
signed.signed)
# Nacl sign open
params = ParamsOfNaclSignOpen(
signed=signed.signed,
public='<KEY>')
result = async_core_client.crypto.nacl_sign_open(params=params)
self.assertEqual(unsigned, result.unsigned)
# Nacl sign detached
params = ParamsOfNaclSign(unsigned=unsigned, secret=secret)
result = async_core_client.crypto.nacl_sign_detached(params=params)
self.assertEqual(
'fb0cfe40eea5d6c960652e6ceb904da8a72ee2fcf6e05089cf835203179ff65bb48c57ecf31dcfcd26510bea67e64f3e6898b7c58300dc14338254268cade103',
result.signature)
# Nacl sign detached verify signature
params = ParamsOfNaclSignDetachedVerify(
unsigned=unsigned, signature=result.signature,
public='<KEY>')
result = async_core_client.crypto.nacl_sign_detached_verify(
params=params)
self.assertEqual(True, result.succeeded)
with self.assertRaises(TonException):
params.secret = '0=='
async_core_client.crypto.nacl_sign(params=params)
with self.assertRaises(TonException):
params = ParamsOfNaclSignOpen(signed=signed.signed, public='0x00')
async_core_client.crypto.nacl_sign_open(params=params)
with self.assertRaises(TonException):
params = ParamsOfNaclSign(unsigned='100', secret=secret)
params.unsigned = 100
async_core_client.crypto.nacl_sign_detached(params=params)
def test_nacl_box_keypair(self):
keypair = async_core_client.crypto.nacl_box_keypair()
self.assertEqual(64, len(keypair.public))
self.assertEqual(64, len(keypair.secret))
self.assertNotEqual(keypair.public, keypair.secret)
def test_nacl_box_keypair_from_secret_key(self):
params = ParamsOfNaclBoxKeyPairFromSecret(
secret='<KEY>')
keypair = async_core_client.crypto.nacl_box_keypair_from_secret_key(
params=params)
self.assertEqual(
'a53b003d3ffc1e159355cb37332d67fc235a7feb6381e36c803274074dc3933a',
keypair.public)
with self.assertRaises(TonException):
params.secret = '0x00'
async_core_client.crypto.nacl_box_keypair_from_secret_key(
params=params)
def test_nacl_box_and_open(self):
decrypted = base64.b64encode('Test Message'.encode()).decode()
nonce = 'cd7f99924bf422544046e83595dd5803f17536f5c9a11746'
their_public = 'c4e2d9fe6a6baf8d1812b799856ef2a306291be7a7024837ad33a8530db79c6b'
secret = '<KEY>'
# Create nacl box
box_params = ParamsOfNaclBox(
decrypted=decrypted, nonce=nonce, their_public=their_public,
secret=secret)
box = async_core_client.crypto.nacl_box(params=box_params)
self.assertEqual(
'li4XED4kx/pjQ2qdP0eR2d/K30uN94voNADxwA==', box.encrypted)
# Open nacl box
box_open_params = ParamsOfNaclBoxOpen(
encrypted=box.encrypted, nonce=nonce, their_public=their_public,
secret=secret)
opened = async_core_client.crypto.nacl_box_open(params=box_open_params)
self.assertEqual(decrypted, opened.decrypted)
with self.assertRaises(TonException):
box_params.decrypted = '0x00'
box_params.their_public = ''
async_core_client.crypto.nacl_box(params=box_params)
with self.assertRaises(TonException):
box_open_params.secret = ''
async_core_client.crypto.nacl_box_open(params=box_open_params)
def test_nacl_secret_box_and_open(self):
decrypted = base64.b64encode(
'Test Message \' \" {} $=,?'.encode()).decode()
nonce = '2a33564717595ebe53d91a785b9e068aba625c8453a76e45'
key = '<KEY>'
# Create nacl secret box
box_params = ParamsOfNaclSecretBox(
decrypted=decrypted, nonce=nonce, key=key)
box = async_core_client.crypto.nacl_secret_box(params=box_params)
self.assertEqual(
'I6QZteixTdul0K0ldT+/U4QF0t/C1Q8RGyzQ2Hl7886DpW3/DK5ijg==',
box.encrypted)
# Open nacl secret box
box_open_params = ParamsOfNaclSecretBoxOpen(
encrypted=box.encrypted, nonce=nonce, key=key)
opened = async_core_client.crypto.nacl_secret_box_open(
params=box_open_params)
self.assertEqual(decrypted, opened.decrypted)
with self.assertRaises(TonException):
box_params.decrypted = '0x00'
box_params.key = None
async_core_client.crypto.nacl_secret_box(params=box_params)
with self.assertRaises(TonException):
box_open_params.key = ''
async_core_client.crypto.nacl_secret_box_open(
params=box_open_params)
def test_scrypt(self):
password = base64.b64encode('<PASSWORD>'.encode()).decode()
salt = base64.b64encode('Test Salt'.encode()).decode()
params = ParamsOfScrypt(
password=password, salt=salt, log_n=10, r=8, p=16, dk_len=64)
result = async_core_client.crypto.scrypt(params=params)
self.assertEqual(
'52e7fcf91356eca55fc5d52f16f5d777e3521f54e3c570c9bbb7df58fc15add73994e5db42be368de7ebed93c9d4f21f9be7cc453358d734b04a057d0ed3626d',
result.key)
with self.assertRaises(TonException):
params.dk_len = 0
async_core_client.crypto.scrypt(params=params)
def test_chacha20(self):
key = '01' * 32
nonce = 'ff' * 12
data = base64.b64encode(b'Message').decode()
params = ParamsOfChaCha20(data=data, key=key, nonce=nonce)
encrypted = async_core_client.crypto.chacha20(params=params)
self.assertEqual('w5QOGsJodQ==', encrypted.data)
params.data = encrypted.data
decrypted = async_core_client.crypto.chacha20(params=params)
self.assertEqual(data, decrypted.data)
def test_signing_box(self):
keypair = async_core_client.crypto.generate_random_sign_keys()
# Create handle
signing_box = async_core_client.crypto.get_signing_box(params=keypair)
self.assertIsInstance(signing_box.handle, int)
# Get public key from box
result = async_core_client.crypto.signing_box_get_public_key(
params=signing_box)
self.assertEqual(keypair.public, result.pubkey)
# Sign with box
message = base64.b64encode(b'Sign with box').decode()
params = ParamsOfSigningBoxSign(
signing_box=signing_box.handle, unsigned=message)
box_result = async_core_client.crypto.signing_box_sign(params=params)
sign_params = ParamsOfSign(unsigned=message, keys=keypair)
sign_result = async_core_client.crypto.sign(params=sign_params)
self.assertEqual(sign_result.signature, box_result.signature)
# Remove signing box
async_core_client.crypto.remove_signing_box(params=signing_box)
def test_register_signing_box(self):
from concurrent.futures import ThreadPoolExecutor
keys = async_core_client.crypto.generate_random_sign_keys()
keys_box_handle = async_core_client.crypto.get_signing_box(params=keys)
def __callback(response_data, *args):
request = ParamsOfAppRequest(**response_data)
box_params = ParamsOfAppSigningBox.from_dict(
data=request.request_data)
box_result = None
if isinstance(box_params, ParamsOfAppSigningBox.GetPublicKey):
# Run method and wait for result
with ThreadPoolExecutor() as executor:
future = executor.submit(
async_core_client.crypto.signing_box_get_public_key,
params=keys_box_handle)
_result = future.result()
# Resolve params
box_result = ResultOfAppSigningBox.GetPublicKey(
public_key=_result.pubkey)
if isinstance(box_params, ParamsOfAppSigningBox.Sign):
# Run method and wait for result
params = ParamsOfSigningBoxSign(
signing_box=keys_box_handle.handle,
unsigned=box_params.unsigned)
with ThreadPoolExecutor() as executor:
future = executor.submit(
async_core_client.crypto.signing_box_sign,
params=params)
_result = future.result()
# Resolve params
box_result = ResultOfAppSigningBox.Sign(
signature=_result.signature)
# Create resolve app request params
request_result = AppRequestResult.Ok(
result=box_result.dict)
resolve_params = ParamsOfResolveAppRequest(
app_request_id=request.app_request_id,
result=request_result)
with ThreadPoolExecutor() as executor:
future = executor.submit(
async_core_client.resolve_app_request,
params=resolve_params)
future.result()
# Get external signing box
external_box = async_core_client.crypto.register_signing_box(
callback=__callback)
# Request box public key
box_pubkey = async_core_client.crypto.signing_box_get_public_key(
params=external_box)
self.assertEqual(keys.public, box_pubkey.pubkey)
# Get signature from signing box
unsigned = base64.b64encode(b'Test Message').decode()
sign_params = ParamsOfSigningBoxSign(
signing_box=external_box.handle, unsigned=unsigned)
box_sign = async_core_client.crypto.signing_box_sign(
params=sign_params)
# Get signature by keys
sign_params = ParamsOfSign(unsigned=unsigned, keys=keys)
keys_sign = async_core_client.crypto.sign(params=sign_params)
self.assertEqual(keys_sign.signature, box_sign.signature)
async_core_client.crypto.remove_signing_box(params=external_box)
def test_register_signing_box_app_object(self):
class TestAppSigningBox(AppSigningBox):
"""
AppSigningBox implementation class.
Here we passed `box_handle` as init argument only for testing
purposes, in real world it should use its own keys
"""
def __init__(self, client, box_handle):
super(TestAppSigningBox, self).__init__(client=client)
self.box_handle = box_handle
def perform_get_public_key(self) -> str:
result = self.client.crypto.signing_box_get_public_key(
params=self.box_handle)
return result.pubkey
def perform_sign(self, params: ParamsOfAppSigningBox.Sign) -> str:
params = ParamsOfSigningBoxSign(
signing_box=self.box_handle.handle,
unsigned=params.unsigned)
result = self.client.crypto.signing_box_sign(params=params)
return result.signature
keys = async_core_client.crypto.generate_random_sign_keys()
keys_box_handle = async_core_client.crypto.get_signing_box(params=keys)
app_signin_box = TestAppSigningBox(
client=async_core_client, box_handle=keys_box_handle)
# Get external signing box
external_box = async_core_client.crypto.register_signing_box(
callback=app_signin_box.dispatcher)
# Request box public key
box_pubkey = async_core_client.crypto.signing_box_get_public_key(
params=external_box)
self.assertEqual(keys.public, box_pubkey.pubkey)
# Get signature from signing box
unsigned = base64.b64encode(b'Test Message').decode()
sign_params = ParamsOfSigningBoxSign(
signing_box=external_box.handle, unsigned=unsigned)
box_sign = async_core_client.crypto.signing_box_sign(
params=sign_params)
# Get signature by keys
sign_params = ParamsOfSign(unsigned=unsigned, keys=keys)
keys_sign = async_core_client.crypto.sign(params=sign_params)
self.assertEqual(keys_sign.signature, box_sign.signature)
async_core_client.crypto.remove_signing_box(params=external_box)
def test_encryption_box(self):
from concurrent.futures import ThreadPoolExecutor
def __callback(response_data, *args):
request = ParamsOfAppRequest(**response_data)
box_params = ParamsOfAppEncryptionBox.from_dict(
data=request.request_data)
box_result = None
if isinstance(box_params, ParamsOfAppEncryptionBox.GetInfo):
_info = EncryptionBoxInfo(algorithm='duplicator')
box_result = ResultOfAppEncryptionBox.GetInfo(info=_info)
if isinstance(box_params, ParamsOfAppEncryptionBox.Encrypt):
data = box_params.data * 2
box_result = ResultOfAppEncryptionBox.Encrypt(data=data)
if isinstance(box_params, ParamsOfAppEncryptionBox.Decrypt):
end = int(len(box_params.data) / 2)
data = box_params.data[:end]
box_result = ResultOfAppEncryptionBox.Decrypt(data=data)
# Create resolve app request params
request_result = AppRequestResult.Ok(
result=box_result.dict)
resolve_params = ParamsOfResolveAppRequest(
app_request_id=request.app_request_id,
result=request_result)
with ThreadPoolExecutor() as executor:
future = executor.submit(
async_core_client.resolve_app_request,
params=resolve_params)
future.result()
# Register box
box = async_core_client.crypto.register_encryption_box(
callback=__callback)
# Get info
info_result = async_core_client.crypto.encryption_box_get_info(
params=ParamsOfEncryptionBoxGetInfo(encryption_box=box.handle))
self.assertEqual(info_result.info.algorithm, 'duplicator')
# Encrypt
enc_data = '12345'
params = ParamsOfEncryptionBoxEncrypt(
encryption_box=box.handle, data=enc_data)
enc_result = async_core_client.crypto.encryption_box_encrypt(
params=params)
self.assertEqual(enc_data * 2, enc_result.data)
# Decrypt
params = ParamsOfEncryptionBoxDecrypt(
encryption_box=box.handle, data=enc_result.data)
dec_result = async_core_client.crypto.encryption_box_decrypt(
params=params)
self.assertEqual(enc_data, dec_result.data)
# Remove box
async_core_client.crypto.remove_encryption_box(params=box)
def test_encryption_box_app_object(self):
class TestAppEncryptionBox(AppEncryptionBox):
def perform_get_info(self) -> EncryptionBoxInfo:
return EncryptionBoxInfo(algorithm='duplicator')
def perform_encrypt(
self, params: ParamsOfAppEncryptionBox.Encrypt) -> str:
return params.data * 2
def perform_decrypt(
self, params: ParamsOfAppEncryptionBox.Decrypt) -> str:
end = int(len(params.data) / 2)
return params.data[:end]
# Register box
app_encryption_box = TestAppEncryptionBox(client=async_core_client)
box = async_core_client.crypto.register_encryption_box(
callback=app_encryption_box.dispatcher)
# Get info
info_result = async_core_client.crypto.encryption_box_get_info(
params=ParamsOfEncryptionBoxGetInfo(encryption_box=box.handle))
self.assertEqual(info_result.info.algorithm, 'duplicator')
# Encrypt
enc_data = '12345'
params = ParamsOfEncryptionBoxEncrypt(
encryption_box=box.handle, data=enc_data)
enc_result = async_core_client.crypto.encryption_box_encrypt(
params=params)
self.assertEqual(enc_data * 2, enc_result.data)
# Decrypt
params = ParamsOfEncryptionBoxDecrypt(
encryption_box=box.handle, data=enc_result.data)
dec_result = async_core_client.crypto.encryption_box_decrypt(
params=params)
self.assertEqual(enc_data, dec_result.data)
# Remove box
async_core_client.crypto.remove_encryption_box(params=box)
def test_encryption_box_aes(self):
# AES128
self._encryption_box_aes(
key='aes128.key.bin', data='aes.plaintext.bin',
encrypted='cbc-aes128.ciphertext.bin')
# AES256
self._encryption_box_aes(
key='aes256.key.bin', data='aes.plaintext.for.padding.bin',
encrypted='cbc-aes256.ciphertext.padded.bin')
def _encryption_box_aes(self, key: str, data: str, encrypted: str):
with open(os.path.join(SAMPLES_DIR, 'aes.iv.bin'), 'rb') as fp:
iv = fp.read().hex()
with open(os.path.join(SAMPLES_DIR, key), 'rb') as fp:
key = fp.read().hex()
with open(os.path.join(SAMPLES_DIR, data), 'rb') as fp:
data = fp.read()
fn = os.path.join(SAMPLES_DIR, encrypted)
with open(fn, 'rb') as fp:
encrypted = base64.b64encode(fp.read()).decode()
# Create encryption box
algorithm = EncryptionAlgorithm.Aes(
mode=CipherMode.CBC, key=key, iv=iv)
params = ParamsOfCreateEncryptionBox(algorithm=algorithm)
box = async_core_client.crypto.create_encryption_box(params)
# Encrypt data
params = ParamsOfEncryptionBoxEncrypt(
encryption_box=box.handle, data=base64.b64encode(data).decode())
enc_result = async_core_client.crypto.encryption_box_encrypt(params)
self.assertEqual(encrypted, enc_result.data)
# Decrypt data
params = ParamsOfEncryptionBoxDecrypt(
encryption_box=box.handle, data=enc_result.data)
dec_result = async_core_client.crypto.encryption_box_decrypt(params)
self.assertEqual(data, base64.b64decode(dec_result.data)[:len(data)])
# Remove encryption box
async_core_client.crypto.remove_encryption_box(params=box)
class TestTonCryptoSyncCore(unittest.TestCase):
""" Sync core is not recommended to use, so make just a couple of tests """
def test_sha256(self):
data = base64.b64encode('TON is our future'.encode()).decode()
params = ParamsOfHash(data=data)
result = sync_core_client.crypto.sha256(params=params)
self.assertEqual(
'1e7fd5ec201652b5375e5edf3e86d0513394d2c2004dd506415abf0578261951',
result.hash)
data = base64.b64encode(
bytes.fromhex('544f4e206973206f757220667574757265')).decode()
params.data = data
result = sync_core_client.crypto.sha256(params=params)
self.assertEqual(
'1e7fd5ec201652b5375e5edf3e86d0513394d2c2004dd506415abf0578261951',
result.hash)
data = 'VE9OIGlzIG91ciBmdXR1cmU='
params.data = data
result = sync_core_client.crypto.sha256(params=params)
self.assertEqual(
'1e7fd5ec201652b5375e5edf3e86d0513394d2c2004dd506415abf0578261951',
result.hash)
def test_sha512(self):
data = base64.b64encode('TON is our future'.encode()).decode()
params = ParamsOfHash(data=data)
result = sync_core_client.crypto.sha512(params=params)
self.assertEqual(
'4c52dd4cefc68319bac5e97c1f0d18ae8194fb0dd8d9e090ba8376834a0756175a9a736d1e69cb1a58d25c3d554b02a2b8ed9c3ae5cbeeccc3277746a363a434',
result.hash)
def test_scrypt(self):
password = <PASSWORD>encode('<PASSWORD> Password'.encode()).decode()
salt = base64.b64encode('Test Salt'.encode()).decode()
params = ParamsOfScrypt(
password=password, salt=salt, log_n=10, r=8, p=16, dk_len=64)
result = sync_core_client.crypto.scrypt(params=params)
self.assertEqual(
'52e7fcf91356eca55fc5d52f16f5d777e3521f54e3c570c9bbb7df58fc15add73994e5db42be368de7ebed93c9d4f21f9be7cc453358d734b04a057d0ed3626d',
result.key)
with self.assertRaises(TonException):
params.dk_len = 0
sync_core_client.crypto.scrypt(params=params)
| 1.859375 | 2 |
utz/tests/test_collections.py | ryan-williams/jupyter-rc | 0 | 12785759 | <filename>utz/tests/test_collections.py
from utz.collections import *
from utz.test import raises
def test_singleton():
assert singleton([123]) == 123
with raises(ValueError, '2 elems found: 456,123'):
singleton([123,456])
assert singleton([123,123]) == 123
with raises(ValueError, '2 elems found: 123,123'):
singleton([123,123], dedupe=False)
with raises(ValueError, 'No elems found'):
singleton([])
assert singleton({'a':1}) == ('a', 1)
with raises(ValueError, ["2 elems found: ('a', 1),('b', 2)","2 elems found: ('b', 2),('a', 1)"]):
singleton({'a':1,'b':2})
with raises(ValueError, 'No elems found'):
singleton({})
| 2.796875 | 3 |
workflow/scripts/download_genomes.py | MGXlab/pvogs_function | 2 | 12785760 | <gh_stars>1-10
#!/usr/bin/env python
import argparse
from Bio import SeqIO, Entrez
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import IUPAC
import time
from math import ceil
parser = argparse.ArgumentParser(description='Download a list of ncbi accessions to the output file')
optionalArgs = parser._action_groups.pop()
requiredArgs = parser.add_argument_group("required arguments")
requiredArgs.add_argument('-i', '--input-list',
dest='input_list',
required=True,
help="A txt file containing accessions to get from genbank")
requiredArgs.add_argument('-o', '--output-file',
dest='output_file',
type=str,
required=True,
help="The output file to write sequences to")
optionalArgs.add_argument('-e', '--e-mail',
help='E-mail address to be used with Bio.Entrez.email. Required by NCBI to notify if something is '
'off or if you overload their servers',
dest='email',
type=str,
)
optionalArgs.add_argument('--output-fmt',
required=False,
dest='output_fmt',
default='gb',
type=str,
help="Store the results in this file format [default='gb' (genbank)]"
)
parser._action_groups.append(optionalArgs)
def sequence_info_to_dic(sequence_info_file):
"""
Collapse protein ids per genome
Args:
sequence_info_file (str): A tsv file containing 3 columns uniprot_id, genome_id, protein_id,
with a header.
Return:
sequence_info (dict): A dictionary of the form {genome_id: [(protein_id_1, uniprot_id_1),
... ],
... }
"""
sequence_info = {}
with open(sequence_info_file, 'r') as f:
# Skip header
next(f)
for line in f:
fields = [field.strip() for field in line.split('\t')]
uniprot_id = fields[0]
genome_id = fields[1]
protein_id = fields[2]
if genome_id not in sequence_info:
sequence_info[genome_id] = [(uniprot_id, protein_id,)]
else:
sequence_info[genome_id].append((uniprot_id, protein_id))
return sequence_info
def txt_file_to_list(genomes_txt):
"""
Read ids from a one column file to a list
Args:
genomes_txt:str: Path to file with the ids.
"""
with open(genomes_txt, 'r') as fin:
genomes_list = [line.strip() for line in fin]
return genomes_list
def download_sequences(genomes_list,
genomes_file,
email_address='',
output_fmt="gb",
batch_size = 100
):
# Required by Bio.Entrez
if email_address:
Entrez.email = email_address
# Some progress tracking
total_batches = ceil(len(genomes_list) / batch_size)
batch_no = 0
with open(genomes_file, 'w') as fout:
for i in range(0, len(genomes_list), 100):
batch_no += 1
batch = genomes_list[i:i + 100]
print('Downloading batch {}/{}'.format(batch_no, total_batches))
handle = Entrez.efetch(db="nuccore", id=batch, rettype=output_fmt, retmode="text")
batch_data = handle.read()
fout.write(batch_data)
handle.close()
# Wait 2 seconds before next batch
# This is ok for small sets of batches
time.sleep(2)
def main():
args = parser.parse_args()
genomes_list = txt_file_to_list(args.input_list)
download_sequences(genomes_list,
args.output_file,
args.email,
output_fmt=args.output_fmt)
if __name__ == '__main__':
main()
| 2.875 | 3 |
src/bitmessageqt/bitmessage_icons_rc.py | coffeedogs/PyBitmessage | 1,583 | 12785761 | <gh_stars>1000+
# -*- coding: utf-8 -*-
# Resource object code
#
# Created: Sa 21. Sep 13:45:58 2013
# by: The Resource Compiler for PyQt (Qt v4.8.4)
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore
qt_resource_data = "\
\x00\x00\x03\x66\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xff\x61\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x03\x08\x49\x44\x41\x54\x78\xda\x84\
\x53\x6d\x48\x53\x51\x18\x7e\xce\xfd\xd8\x75\x9b\x8e\xdc\x2c\xdd\
\x4c\x5d\x4e\xa7\xc9\xe6\xb7\xf6\x61\x61\x11\x14\x52\x16\xf5\xc7\
\x0a\x0b\xa2\x3f\x41\x51\x41\x61\x7f\x0a\x84\xa2\x9f\xfd\xeb\x67\
\x7f\xfa\x51\x44\x50\x91\x14\x15\x5a\x14\x41\x25\x6d\x44\x59\x68\
\x69\xd9\x74\xa6\x6d\xd7\x7d\x38\xb6\xdd\x6d\x77\xa7\x73\x2d\x4d\
\x84\xe8\x81\x87\xf7\x7d\xef\x7b\xde\xe7\xbe\xe7\x9c\xf7\x10\x30\
\x48\x84\x20\x4f\xb3\xf8\x8b\xb3\x1b\xe9\xbc\xe5\x38\x14\xb3\x74\
\x2f\x73\xab\x18\x47\x28\x45\x6f\x36\x0b\xff\xc2\x3a\xde\xc6\xb2\
\x06\xcd\x61\x24\x4b\x04\xbe\x87\x09\x48\x82\x89\x8a\xb8\x62\xaf\
\x76\x75\x5a\x4a\xcb\x9d\x31\x85\xae\x9d\x0d\xce\x15\x7c\xf1\xa3\
\xef\x67\x18\xd0\xc8\xe1\x1f\xf0\xcf\x01\x43\x53\xc4\xf1\x33\x04\
\x57\x20\x12\x29\xcc\x31\x5b\x84\x4d\x7b\xf6\x18\xb5\x78\xcc\x0f\
\x07\x23\x34\x0a\xcb\xea\x0a\x19\x4f\x32\xda\x19\xc7\x53\x04\x91\
\x99\x10\xc4\xde\xd3\xa7\x61\x30\x1a\xa1\xb2\xde\xb5\x98\xe7\xb0\
\x85\xe5\xc7\xb4\x02\x81\x2e\xa9\x66\xfe\xb9\x86\xd6\xd6\xfd\xee\
\xba\x3a\xcb\x3b\x8f\x47\x9e\x78\xe7\x8d\xc5\x13\x88\x4a\x3a\x1d\
\x94\x78\x1c\x82\x28\x22\xae\x6d\x8b\x47\x23\x5b\x7e\x6d\x5e\xa0\
\xdd\xf9\x77\xe7\xcf\x3e\xd3\x0d\xbd\xa7\x3a\xac\x2e\xa7\x15\x43\
\x9f\x6d\xd6\xae\x43\xde\xb0\x51\x44\x74\x6c\x78\x18\xf6\x8a\x0a\
\x68\x96\xc5\x1a\x4a\x16\x6a\x84\xad\xce\xc5\xfa\xae\xc1\x69\x53\
\x65\xbd\xdb\x8e\x74\x32\x09\xcd\xea\xf2\x4c\xb9\x0e\x5b\x94\x0c\
\xdc\xba\xe9\x6d\xda\xbe\xa3\xd1\xf3\xe4\xb1\x37\xf7\xb7\x40\xc1\
\xa2\x40\x26\xbb\x28\xc0\x75\xd5\x29\x23\xc9\xb9\xb9\x8d\x99\x74\
\x1a\x2a\xe3\xae\xfa\xf4\xc7\xf1\x92\xa2\x60\xce\xc4\x0f\x4b\x85\
\xb3\x0a\xcf\xfb\x6e\xd2\x57\xdd\x35\x1f\x73\x43\xc9\x47\x33\x25\
\x26\x4c\x15\xe7\x82\x27\xb5\x07\x41\x09\x87\x7c\x75\x66\xc8\x28\
\x66\xaa\x4b\x2a\xdd\x4d\xec\x42\x85\xf0\x6c\x20\xf5\x32\x3c\xfa\
\x4d\x3a\xd1\xe3\xd4\xd7\xb4\x54\xa5\x14\x17\xa6\xdb\xaa\x6d\x85\
\x5b\xda\x0b\x9e\xe6\x04\x12\xe1\x3c\xc1\x8e\x2c\xfd\xc2\x7f\x6d\
\xba\x8c\x41\x7d\x07\x1e\x99\x8e\x40\xa5\x24\xc0\x7d\xb8\xb1\x3e\
\x96\x26\xb6\x57\xaf\x07\xfc\x74\x77\x77\x45\xc1\x6a\x87\x79\x2a\
\x91\xc0\xd9\x8e\xa3\xb8\x3d\xe5\x41\xe9\xaa\x62\x93\xcb\x5c\x5e\
\x6b\xa0\xba\x35\xdf\x02\x93\xe2\x92\x39\xa0\xcd\xfd\xa6\xc3\x3b\
\x83\xf2\x2c\x69\x6c\x6e\x41\x24\x1a\x13\xef\x8f\xb4\xbe\x1f\xf7\
\x49\x93\x49\x76\x26\xb2\x2c\x43\xb3\x1a\xd4\x54\x46\xaa\x36\x97\
\xb9\x69\x54\x69\x23\x7c\x77\xdf\x0a\x70\xe2\x7e\x83\x24\xd4\x1c\
\xeb\x74\xef\x5b\x19\x19\x2a\xb6\x4b\x32\xc6\x15\x0b\x82\xf9\x95\
\xa1\xab\x0f\xfb\x3d\x49\xce\x17\x6b\x19\xf6\x0e\x0c\x6e\xf0\x6f\
\xa3\x69\x55\x0f\x45\x35\xd0\x74\x36\x07\xa3\xd1\x27\x84\x3f\x70\
\xe7\x4c\xe7\xfa\xf2\xee\xa6\x2a\xeb\x5a\x4b\x7e\x9e\xe4\xf3\x4d\
\xe3\xd2\xde\x52\x9c\xbf\xeb\x43\x59\x99\x15\x72\x28\x9a\x7a\xfb\
\xe9\xfb\x68\x5f\xff\xeb\x7b\xea\x83\x93\xd7\x97\x0d\x9e\xcc\x41\
\x89\x36\xd7\xda\xcd\xf5\xd9\x4c\x76\xfe\x2d\x2d\x6f\x97\xaa\xd0\
\xd5\x39\xac\x35\x90\x4c\xe5\xfc\xe6\x9e\x11\xed\x41\x2d\x61\x90\
\xf0\xf5\x87\x2e\xc0\xda\xd0\x4e\x79\x29\x41\x05\x7d\x0c\x82\x3e\
\xde\x36\x7d\xf5\xcd\xcb\xa2\xe3\xeb\x48\x26\x69\x20\x99\x84\x91\
\xa8\x8a\x1e\x3f\xbc\x2f\xe8\xec\xe8\x45\x1a\x99\x04\x8d\x4c\x2c\
\xb6\x40\xfe\x0c\x85\x05\xff\x87\xac\xfd\x71\xf9\xc7\x5f\x02\x0c\
\x00\x00\x31\x44\x70\x94\xe4\x6d\xa8\x00\x00\x00\x00\x49\x45\x4e\
\x44\xae\x42\x60\x82\
\x00\x00\x02\xaf\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xff\x61\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x02\x51\x49\x44\x41\x54\x78\xda\x9c\
\x53\xcf\x6b\x13\x51\x10\xfe\x36\xfb\x62\x8d\x69\x48\x62\x9b\x18\
\x8d\xab\x5d\xd3\x84\xa2\x10\xbd\x48\x0f\x62\xa9\xd8\x93\xe0\x49\
\x0f\xa5\x20\x52\xb0\xe0\x7f\xe0\x45\x3c\xf5\xa6\x77\x7b\xe8\x55\
\x28\x41\x3d\x78\x28\x7a\xf0\x47\x51\xa4\xbd\x58\x0d\xa8\x60\x5a\
\x13\x51\xd0\x43\x89\x69\xf3\x63\xb3\xc9\xee\x3e\x67\x9e\xd9\xa2\
\x08\x4a\x1d\x18\xde\xdb\x99\xf9\xbe\xf7\xcd\x7b\xb3\xda\x8d\x85\
\x05\xb0\x69\x9a\x76\x9e\x96\xfd\xf8\xbb\x3d\x71\x1c\x67\xad\xdb\
\xe9\xe0\xdc\xf3\x19\x48\x0a\x08\xd7\x75\xfd\xe4\x81\xeb\x93\x93\
\x73\x0e\x7d\x73\xc2\x95\x12\x5d\xda\x77\x3d\x4f\xed\x2b\x95\x0a\
\x1e\x15\x8b\x57\xa5\x94\x1a\xa5\x4b\x3e\x28\x30\xf1\xf8\x32\xcc\
\xb5\x7b\x20\x56\x4d\x72\xb1\xe3\xc0\xe9\x76\xe1\xf6\xbc\xd3\x6e\
\xc3\x6a\x36\xd1\x68\x34\x30\x3b\x35\x35\x47\xb9\xb3\x44\x92\xf5\
\x09\x04\xfb\xf0\xa7\x07\x57\x5a\x32\x78\x41\xd3\x2e\xe1\xc5\xea\
\x2a\x3c\x22\x8a\xc5\x62\x68\xb5\x5a\x38\x3e\x32\xa2\x0a\xab\xd5\
\x2a\xee\x2c\x2e\x22\x9f\x4c\xde\x5e\x29\xcc\x3e\x85\x8e\x02\x85\
\xe7\x05\xa9\x1b\x44\x40\xcf\x65\x8f\x9e\x9c\x60\x6d\x99\x4c\x06\
\x74\x82\x22\x89\xc7\xe3\x08\xea\xba\x22\x38\x35\x3a\x8a\x0e\xa9\
\x0b\x85\xc3\x18\x68\x5d\x3c\x23\x1f\xbe\x7a\x2d\x3d\x77\x50\xb8\
\x12\xc6\x5e\xe3\xd8\xf0\x26\x5d\x4c\x40\xd3\x54\xaf\xd1\x68\x54\
\x9d\xc8\x24\x1f\x89\x8c\x09\x39\xc6\x8a\x4e\xe4\xf3\xb0\x6d\x1b\
\x49\xc2\x54\x2b\x45\x43\xb8\x1e\x0e\xed\x8e\x26\xf7\x59\x56\x1b\
\xbf\x2a\xe0\xd3\x7d\x25\xb2\x47\xe2\x2b\xe2\x5a\xc6\x30\x96\x14\
\xc8\xa1\x60\x38\x16\x6a\x12\x3b\x3d\x25\xca\xe5\xf2\x36\xc0\x57\
\xc2\x2b\x7f\xb3\x82\xc3\xa9\x14\xb8\x96\x31\x8c\x15\x8e\x87\x5c\
\x24\x65\x26\xac\xf7\x75\x94\x0b\xd7\x30\x40\xb7\xde\x97\x1b\x47\
\x5f\x76\xec\x37\x25\xf6\x87\x25\x04\x4b\x4b\xf8\xba\xbe\x07\x56\
\xdb\x46\xc4\x34\x13\x8c\xe5\x16\x44\x24\x91\x4e\x4d\x27\x7e\x3e\
\x0b\x4f\xd2\xca\xf2\x7d\x38\xc2\x50\x40\x7e\x0d\x6e\x63\x73\xf9\
\x2e\x4e\x8f\x8d\xab\x9a\x69\x53\x2d\x29\xc6\xb2\x02\xb1\xb5\xb1\
\x41\x7d\x59\x2a\xda\x4f\x00\x23\x9d\xc6\x97\x67\x37\x15\x41\x93\
\x62\x3c\x58\xe6\x90\x89\x66\xbd\x8e\x46\xad\xa6\xea\x42\xa1\x10\
\x1c\x45\xe0\x4a\xe1\xf0\xf0\x90\xb3\xd5\x88\xcc\xc8\x66\x71\xd0\
\x3c\xf2\xc7\x1c\x7f\x2e\x6d\x0f\xa0\xaa\x67\xac\xe8\x7a\x08\x76\
\x3a\x34\x71\xe4\xbe\xad\xbf\x7d\x87\x7f\x99\xae\x0b\x30\x56\x34\
\x6c\xf4\x4b\xc9\x5a\x74\xec\xc4\x18\xc3\x58\xf1\xe6\x9b\xac\x6c\
\xcd\xdf\x7a\x89\xff\xb0\xf2\x77\x54\x78\x76\x76\x91\xc7\x7a\xff\
\xc5\x4e\x8c\x2f\xad\xf6\x43\x80\x01\x00\xc1\x52\x4e\xcc\x97\x5f\
\x6c\x5a\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x02\xcc\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x01\x68\xf4\xcf\xf7\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\
\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x02\x6c\x49\x44\x41\x54\x38\
\xcb\x4d\xd2\xbd\x6b\xdd\x65\x14\x07\xf0\xcf\xaf\xb9\x49\x9a\x10\
\x69\x08\x06\x6a\x2c\x82\xf4\x25\x2e\xed\x50\x87\x4e\x85\x2e\x42\
\xb2\x34\xe0\xe4\x58\x10\xea\x58\x69\x57\x47\x57\x83\xd2\x49\xc1\
\xad\xfe\x01\xba\x28\xb8\xa8\x93\x83\xa0\x4b\x41\x89\x10\x1b\x43\
\x20\x37\x37\xf7\xe6\xbe\xfe\xee\xef\x2d\xc7\xe1\xb9\x6a\x1f\x38\
\x1c\x9e\xf3\x7c\xbf\xe7\x7c\x9f\x73\x4e\x16\xb7\x6e\x71\xf5\x6a\
\x2d\xae\x5f\x2f\x82\x10\x0f\x1e\x6c\xc6\x8d\x1b\xc4\xc5\x8b\xf1\
\x17\x21\xee\xdf\xbf\x19\x77\xee\x44\xac\xad\x45\xcc\xcf\x47\x36\
\xc4\x11\x91\xe3\x67\x64\xb1\xb3\xc3\xa5\x4b\xbf\xd8\xdf\xff\xd1\
\xf3\xe7\x4f\xc4\xce\x4e\xc4\x95\x2b\x11\xab\xab\x31\xa0\x16\x1b\
\x1b\x11\x44\x45\xfc\x40\x64\x07\xc4\x18\x2f\xb0\xc7\x6e\x16\xdb\
\xdb\xac\xaf\x7f\xab\x69\xb6\x74\x3a\x9c\x9d\x31\x1e\x27\xdf\xed\
\x2e\xb6\x8c\xc7\x7b\x8e\x8f\xaf\x39\x3c\xe4\xf4\x94\xf3\x73\xd0\
\x8f\xd0\xa6\x10\xcb\xcb\x4f\x83\x28\x67\x56\x10\x6d\xe2\x27\xe2\
\x19\x91\x75\x92\x6e\x6d\x86\x7d\x56\x06\xe8\xa2\xe6\x83\xd7\xf9\
\x22\x8b\x7b\xf7\xd8\xd8\x60\x71\xf1\x13\x79\xfe\xc8\xd9\xd9\x6f\
\xda\xed\xf7\xb4\xdb\x7f\xea\xf5\xb4\x5c\xbe\xbc\x60\x7e\xbe\xd0\
\xef\xd3\xe9\x30\x1a\xbd\x6d\x30\xd8\x33\x99\x7c\xa7\x28\xb6\x5b\
\xca\x72\xa2\xdb\xe5\xe0\x20\x89\xac\x6b\xea\x5a\x33\x1c\x6e\x9d\
\xb1\xd9\x72\x7c\x3c\xa7\xdd\xe6\xf0\x90\xe9\x14\x54\x11\x4e\xd0\
\xe1\xab\x96\xa3\x23\xfa\xfd\xf4\x18\x21\x90\xe3\x24\x89\x7f\x23\
\x8b\x56\x2b\x9a\xba\x56\x63\x0e\x25\xfe\xc6\xef\x18\xf0\x59\xd6\
\xe6\xd3\x21\x8f\x4a\x34\x29\xe8\x45\xfa\xb6\x55\xb2\xd6\x84\x0f\
\x8f\xd9\xef\x26\xa0\x5e\x02\x8d\x96\x79\xe5\x35\x64\x71\xf7\x2e\
\x6b\x6b\xac\xac\xb0\xb0\xf0\x58\x96\x7d\xac\xae\x97\x14\x45\xd2\
\x35\x9d\x52\x14\xe4\x39\x93\x49\x6e\x32\xf9\xc8\x64\xb2\x2b\xcf\
\x29\xcb\xd9\x42\x2c\x2d\x7d\xee\xc2\x85\x87\xaa\x2a\x01\x87\x43\
\x46\xa3\x44\x2e\x4b\x9a\x26\x59\x59\xa6\x58\x9e\x8b\xe9\x74\xb7\
\xe2\x49\x4b\x51\x3c\x55\x96\x0f\x4d\xa7\x89\xd8\xeb\xa5\x4d\xc8\
\x73\xaa\x8a\x08\x20\xcb\xa8\x6b\x65\x84\x1c\x13\x1e\x17\xcc\x65\
\x71\xfb\x76\xa1\xae\x17\xe4\x79\x5a\xa3\xe1\x30\x91\x9b\xe6\x7f\
\x32\xff\xb5\x77\x34\x6b\xd4\x20\x25\x39\x69\x39\x3a\x3a\x50\x55\
\xd7\x54\x55\xaa\x58\x96\xa2\x69\xbc\x7c\xce\x67\xed\x1f\xa6\xe1\
\xe9\xe2\x0c\x05\x07\x59\xc3\xcd\x29\xbf\x56\xcc\xd5\xb3\x4a\x90\
\xcd\x7c\x83\xe9\x4b\xe4\x53\xf4\x53\xc2\x66\x81\xb7\xb2\x6e\x92\
\xb5\x30\xe6\xeb\x9c\xad\x7f\xe7\xd9\xa0\x4a\x55\xe4\x33\xc9\xa3\
\xd9\x1d\xdf\x2c\xf3\xee\xab\x34\x59\x0f\xe3\x19\xa0\x9f\xfc\x9b\
\x23\xde\x1f\xf1\x4e\xce\x66\x91\x12\xfd\xd1\xf0\xfd\x1c\x5f\x2e\
\xb1\x7f\x09\xeb\x33\xfb\x07\x6a\x4f\x76\xe7\x35\x05\x41\x4b\x00\
\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x02\x24\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xff\x61\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x01\xc6\x49\x44\x41\x54\x78\xda\x8c\
\x53\x3d\x4b\x03\x41\x10\x7d\xd9\x3b\xf5\x44\x45\x8b\x44\x34\xa2\
\xc4\x42\x49\x2b\xe8\xcf\xb0\xf3\x07\x28\xd6\x82\x9d\x20\x58\x88\
\x0a\x36\x82\x8d\x95\xa0\x58\x0b\x16\xda\x8b\xd8\x08\x09\x44\x0b\
\x8b\x54\x22\x7e\x44\x10\x41\x42\xce\xe4\x2e\xb7\x1f\xce\x9c\xe6\
\xcb\x5c\xc0\xe5\x06\x6e\xdf\xbe\xf7\x66\x76\x76\x37\x96\xdd\x05\
\x62\xc0\x12\x80\x14\xfe\x3f\x1e\x0d\x70\x3c\xbb\x66\x60\x1b\xfa\
\xa3\x6f\x72\x6e\xf5\x62\x03\x5a\x03\x4a\x75\x96\x59\x16\x20\x04\
\xb2\xfb\xf3\x5b\x35\x48\x84\x06\x06\xe2\x25\x93\x01\x1b\x18\x29\
\x61\xaa\x7e\x7b\x10\xce\xeb\xcc\x63\x3e\xeb\x42\x03\xc5\x49\x35\
\x44\x6f\x3c\x8e\xfb\xcb\x4b\xca\x22\x60\x44\x7b\x30\xce\xeb\xcc\
\x63\x3e\xeb\x78\xd8\xfa\xc7\xc9\x1a\x1a\x4e\xa0\xe2\x96\x70\x73\
\x7e\x51\xaf\xd8\xf3\x3c\x38\x8e\x53\x9f\x4f\x4c\x4f\x81\x79\xa4\
\xb1\x6a\x98\xfd\xeb\x24\x0c\xed\x7d\x38\x39\x1a\x46\x08\x74\x75\
\xe3\x29\x9f\xc7\x44\x3a\x0d\x1d\x54\xeb\x26\xcc\xe3\x0a\xfe\x1a\
\x58\x5a\x05\x50\x32\x68\x34\x4c\xc4\x30\xd0\xd7\x87\x28\x9c\x34\
\x56\xbb\x81\x54\xd0\xdc\xa8\xdf\x11\x13\x16\x1d\x08\x63\x11\x78\
\x94\x81\x51\x92\xb2\x35\x88\x42\x59\x90\x94\x39\x0a\xef\x50\x41\
\x00\xdd\x54\xaa\x1f\x28\x2c\xf6\x6c\xa2\xfa\xa6\xa8\x99\x92\x22\
\x80\xef\x2b\x64\xa6\x8f\x5a\x0d\xa4\xaa\x19\x48\xda\x6b\x23\x53\
\xd9\xf5\x70\x32\x53\x6e\xba\x45\x22\x0c\xf7\xae\x04\xd2\x44\x54\
\x10\x96\xda\xa8\xc0\xfd\x2c\xc2\xae\x54\x90\xcb\xe5\x90\x48\x24\
\xc2\x7e\xa4\x52\x29\xe8\x62\xa9\x53\x0f\xa8\x59\x4d\xd7\xd8\x25\
\x62\x77\xb9\x8c\x34\x1d\x63\xbd\x2a\x9a\xeb\xd2\x57\xab\xc1\xdd\
\x23\x90\x4e\xc2\x79\x79\x7a\xa5\x9b\xaa\x9a\x7a\xe0\xe3\xe3\x74\
\xa5\xed\x39\x0c\xc6\x87\xe0\x55\xe1\xe4\x0b\xc0\x02\x1b\xec\x9c\
\x61\xf0\x60\x19\xfd\xe3\xe3\xc9\xd6\xf3\x1e\x1b\x89\x7e\x4f\x76\
\x17\x6e\xaf\xd1\xcf\xba\x6d\xa0\x68\xb3\xe9\xfd\x33\x0a\x87\x7b\
\xeb\x57\xff\x7d\xcb\x0f\xef\x28\xb0\x8e\xa2\xf8\x2d\xc0\x00\x14\
\x2c\x1a\x71\xde\xeb\xd2\x54\x00\x00\x00\x00\x49\x45\x4e\x44\xae\
\x42\x60\x82\
\x00\x00\x06\xe3\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x90\x00\x00\x00\x90\x08\x06\x00\x00\x00\xe7\x46\xe2\xb8\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0e\xc4\x00\x00\x0e\xc4\
\x01\x95\x2b\x0e\x1b\x00\x00\x06\x85\x49\x44\x41\x54\x78\x9c\xed\
\x9d\x4b\x72\xe3\x38\x10\x44\xc1\x8e\x5e\xf8\x3c\x3a\x8e\x0f\xa8\
\xe3\xf8\x3c\xda\x79\x16\x0e\x4c\xd3\x10\x89\x6f\x7d\xb2\x0a\xf5\
\x96\x0e\x59\x2c\x24\x12\x29\x90\x20\xc1\xe3\xf3\xe3\xeb\x3b\xa5\
\x94\x9e\xaf\xc7\x91\x0c\x92\xeb\xb7\x8c\x65\xed\x8f\xb2\x03\x2c\
\x37\x46\xbb\x86\x59\xac\x69\x7e\xd6\xfa\x28\xff\x90\xb1\xd6\xa8\
\x8c\x45\x23\x59\xd1\xfa\x4a\xdb\x5b\x03\x65\xac\x34\xae\xc4\x92\
\x91\xd0\x35\xbe\xd3\xf2\xf9\x7a\x1c\x47\xeb\x43\xe7\x0f\x53\x17\
\x26\x81\x05\x23\xa1\x6a\xdb\xe3\x89\x6e\x03\x9d\xff\x69\xb5\x30\
\x0d\x90\x8d\x84\xa6\x69\x8f\x56\xb9\xe6\x5f\x85\x8f\x88\x8c\xd6\
\xe8\x5e\x10\x8d\x84\xa2\xe5\x4c\xff\x4f\x1b\xa8\xfc\x22\x6b\x20\
\x19\x49\x5b\xc3\x51\x2d\xce\xf5\xbe\x15\x3e\x2b\xac\xb6\x08\xb3\
\x20\x18\x49\x4b\x3b\x8a\xbe\x26\x33\xd0\xd5\x97\x5b\x42\xd3\x48\
\xd2\x9a\xad\xb4\xb5\xac\xf5\xb2\x70\x0a\x31\xc3\x48\xfd\x48\x69\
\xc5\xd1\xaf\x6c\x06\xba\x3b\xa0\x15\x24\x8d\xc4\xad\x11\x55\x5b\
\xae\xea\xbc\x2d\x9c\x5a\x40\x8b\x46\x92\x32\x11\x97\x36\x12\x7d\
\xf8\x97\xf2\x00\x35\x2c\x2d\xda\x5e\xad\x0f\x22\x4c\xb6\x7b\xe1\
\xa8\xf5\xae\xdf\xaa\x9d\xc9\x29\x1a\xa2\x91\x6a\x97\xec\x5b\x9f\
\x59\x81\x4a\x0b\x8d\xfe\x12\x4b\xa0\x12\xa4\x44\x9a\xb9\x80\x86\
\x94\x48\xdc\xb5\xd4\xfa\xa8\xd9\x79\xd6\xe7\x01\x35\x28\x96\x6f\
\x34\xcf\x58\x11\xfa\x46\x2d\x81\x4a\x24\x13\x89\xe3\x2c\x53\x32\
\x91\x90\xce\x10\xbb\x3a\xcb\xcb\xb5\x11\x89\xab\xec\x9c\xcb\x41\
\x88\xfd\x00\x93\x40\x25\x94\x89\xa4\x31\x62\x29\x8f\xa9\x35\xdf\
\xea\xd1\x9e\x75\x64\x51\x32\x63\x24\xce\x0b\x68\x94\x35\xdc\x7d\
\xbf\x05\xcd\x61\x13\xa8\x64\x24\x91\xb4\x85\x3f\x33\x93\x48\x08\
\xf5\xf7\x0e\x9a\xa1\x91\x85\xd0\xb0\xcc\x55\x03\xb9\xea\xa3\x9c\
\x8f\xd5\xee\x3f\x47\xd7\xf7\x0a\xb3\x06\xca\x48\x5c\x25\x46\x9a\
\xd0\x4b\x30\xd2\xde\x3f\x5c\x5f\x2c\x05\x72\x47\xd4\x40\xd4\x72\
\x86\x21\x03\xa1\x62\xad\x33\x3e\x3f\xbe\xbe\x51\x8d\x3f\xaa\xe5\
\xb0\x81\x50\x3b\xeb\xf9\x7a\x1c\xa8\xb5\x65\x90\x8d\x33\x8b\x99\
\xb3\xb0\x5e\x10\x27\xa4\x48\xb5\xd4\x98\x19\x80\x53\x3f\x61\xe8\
\x23\x3d\x25\x8c\x44\xf2\x98\x38\x25\xee\x12\xa8\xc4\xfb\x5a\x15\
\x15\xb3\x83\x6d\x7a\x12\xad\x3d\xba\x47\x91\x48\xa4\x1d\x12\xa7\
\xc4\x7d\x02\x95\x78\x5a\xab\xa2\x62\x65\x60\x2d\x9d\xc6\x5b\x4b\
\xa1\x33\x14\x89\xb4\x63\xe2\x94\x6c\x97\x40\x25\x56\xd7\xaa\xa8\
\x58\x1d\x44\xcb\x17\x12\x2d\xa7\xd0\x99\x9e\x44\x8a\xc4\x79\x07\
\xfe\x66\xee\x1e\x76\x5b\xab\xa2\x82\x42\x37\x92\xa5\x0c\x2f\x29\
\x74\xc6\x63\x9b\x38\xd8\x7e\x0e\x74\x45\xa4\x4f\x3f\x64\x8b\xa9\
\x1e\x46\x6c\xcc\x71\xc6\x89\x04\x4a\x7b\x24\xce\x19\xca\xc1\x4e\
\x7a\x3b\x87\xb5\x14\x8a\xc4\x59\x67\xcb\x04\xda\xd9\x34\xd4\x83\
\x9c\xfc\x86\x32\xe4\x14\x8a\xc4\xa1\x67\x8b\x04\x0a\xd3\xfc\xc0\
\x31\xb8\x59\x6e\x69\x45\x49\xa1\x48\x1c\x7e\x5c\x26\x50\x98\xe6\
\x1d\xae\x41\xcd\x76\x53\xbd\xd6\x6e\x1b\x61\x1e\x59\x4c\xec\xcd\
\x17\xac\xc1\x39\x98\xff\x46\x27\x07\x2b\xb8\x9c\x03\x59\xc3\xca\
\x26\x9b\x57\xdf\xcf\xfe\x60\x21\xca\x19\x19\x32\x12\x1d\xcd\xf5\
\xdd\xac\x06\x0a\xf3\xe8\x21\x65\x4a\x91\x47\x9b\xc3\x48\x6d\xac\
\xa6\x90\xab\xd3\xf8\xe0\x07\x49\x33\x8a\x6d\xae\x10\x86\x6a\x63\
\x31\x85\x5c\x2f\x65\xec\x88\xb4\x09\x45\xb7\x77\x09\x63\xb5\xb1\
\x96\x42\x5b\xdd\xce\xe1\x1d\x0d\xf3\x89\x6f\x30\x15\x06\x6b\x63\
\x29\x85\xb6\xbe\xa5\xd5\x13\x5a\xa6\x53\xd9\xe2\x2e\x8c\xd6\xc6\
\x4a\x0a\xc5\x63\x3d\x0e\xd0\x34\x9b\xda\x26\x9b\x61\xb8\x36\x16\
\x52\x28\x1e\x6d\x36\x8e\xb6\xc9\x54\xb7\xf9\x0d\xe3\xb5\xd1\x36\
\x48\x8b\xd8\xde\xc5\x30\x08\xe6\x52\xdf\x68\x3c\x0c\xd8\x06\xc1\
\x28\x77\x6c\xbb\xc5\x9d\x75\x50\x4c\xa5\x9e\x40\x29\x85\x11\x7b\
\x40\x31\x4c\xc9\x36\xdb\xfc\x7a\x02\xc9\x4c\x10\x09\x94\x52\x18\
\xb2\x07\x24\xe3\x64\xa6\xde\xb5\x65\xf5\x29\x82\x80\x1e\xf6\x97\
\xb5\x05\xbe\x89\xe7\xc2\x0c\x82\xf4\x0b\x00\xf7\xbe\xb0\x98\x0b\
\xb5\x41\xfa\xd5\x80\x7a\xe5\x65\x98\x47\x0f\xd1\xd3\xf8\x30\x92\
\x3e\x28\x29\xd4\x6d\xa0\x30\x8d\x5f\x54\x96\x32\xc2\x50\xfa\x20\
\xa4\x50\x97\x81\xc2\x2c\x7e\x51\xbd\x9d\x23\x8c\xa5\x8f\x76\x0a\
\x35\x0d\x14\x26\xf1\x0b\xc4\x2d\xad\x61\x30\x7d\x34\x53\xa8\x6a\
\xa0\x30\x87\x5f\xa0\x1e\xeb\x09\xa3\xe9\xa3\x95\x42\xb7\x06\x0a\
\x53\xf8\x05\xf2\xd1\xe6\x30\x9c\x3e\x1a\x29\x74\x69\xa0\x30\x83\
\x5f\xa0\xb7\x77\x09\xe3\xe9\x23\x9d\x42\x6f\x06\x0a\x13\xf8\xc5\
\xc4\x16\x77\x61\x40\x7d\x24\x53\xe8\x97\x81\xa2\xf3\xfd\x62\x6a\
\x9b\xdf\x30\xa2\x3e\x52\x29\xf4\xbf\x81\xa2\xd3\xfd\x62\xf2\x55\
\x07\x61\x48\x7d\x24\x52\xe8\x4f\x4a\xd1\xd9\x9e\xe1\x36\xd1\xf1\
\xf9\xf1\xf5\xcd\xd9\xc1\xda\xf7\xab\x04\xbc\xc4\x1b\x0b\x15\x79\
\xbe\x1e\x22\x0f\x76\x72\x06\x04\xcc\xb3\xf1\x3b\xf1\x7c\x3d\x0e\
\xc9\x9f\x75\x4e\x93\xb2\x3d\x99\x1a\xe9\xf3\x8e\xc7\xb9\x60\x24\
\x90\x00\xd2\x89\x73\x05\xd7\x80\x66\x49\xa0\x48\x9f\x1f\xb4\x4d\
\x23\x41\x24\x10\x03\x08\x89\x73\x05\xc7\xc0\x26\x4f\xa0\x9d\xd3\
\x07\xd1\x34\xdc\x44\x02\x11\x80\x9a\x38\x57\x50\x0f\x70\xd2\x04\
\xda\x2d\x7d\xac\x98\x86\x93\x48\xa0\x09\x2c\x25\xce\x15\x94\x03\
\x9d\x2c\x81\x76\x48\x1f\xcb\xa6\xe1\x22\x12\xa8\x13\x6f\xe6\x81\
\x7a\xb0\xd0\x6b\xfa\x9c\x4d\xf3\xf9\xf1\xf5\xed\xb5\x9d\x2b\x44\
\x02\x5d\x50\x9b\xe3\x78\x32\x12\x45\x3b\x96\xe7\x40\x5e\xc4\x4c\
\x69\xec\x67\x2a\xb7\xdb\xdb\x4f\xdb\x28\x91\x40\x69\xed\xac\xca\
\x7a\x22\xad\xd6\xbe\x94\x40\x96\x85\x4b\x89\x36\x3d\x76\x4d\xa4\
\x2d\x13\x88\xf3\x3a\x8e\xc5\x44\x5a\xa9\x77\x3a\x81\xac\x89\x94\
\x92\x6c\x3a\xec\x92\x48\x5b\x24\x90\xe6\x95\x63\x2b\x89\x34\x5b\
\xe3\x54\x02\x59\x10\x24\x25\xac\xd1\xef\x35\x91\x5c\x26\x10\xf2\
\x5a\x15\x72\x22\xcd\xd4\x35\x9c\x40\xa8\x8d\x4f\xc9\xd6\xe8\x46\
\xd6\x71\x04\x37\x09\x64\xc9\x3c\xc8\x8c\x1a\x7b\xc8\x40\x68\xa3\
\xc6\xfa\x5a\x55\xfe\xa9\xb5\x6c\xfe\xa1\xc2\x51\x3a\xa8\x34\x4e\
\xeb\x33\x2b\x70\xb4\xb9\x56\x1b\xa2\xc6\x35\xba\xe7\x40\x08\x0d\
\xb3\xbe\x56\xd5\x53\x4b\xfe\x0c\x82\xde\x3d\x0c\x77\x88\x06\x14\
\x23\x76\x65\xad\x6b\xe6\xff\x28\x8e\x4d\x75\xfc\x59\x7a\xea\xee\
\x4a\x20\xad\x46\x58\x5f\xab\xa2\x38\x16\x7a\x22\x75\x35\x50\xba\
\xf8\x99\x9f\x2a\xae\x63\x20\xbd\x16\x3d\x25\xbc\xbe\x68\x26\x90\
\x64\xc1\xd6\xd7\xaa\x24\xea\x47\x4b\xa4\x66\x83\xd1\xb7\x1f\xa1\
\xaa\xaf\x76\x07\xe2\xec\xff\x4a\xa0\xdd\x3f\xd5\x04\xe2\x2e\x0e\
\xe9\x0c\x69\x26\x91\x10\xea\xd7\x4e\xa4\xaa\x00\x5c\x45\x71\x4c\
\x8e\xa9\xa9\x75\x0c\x82\x71\xee\x90\xee\x33\xd1\x0b\x5a\x1c\xc2\
\x7b\x9d\xa3\xad\x42\xad\x8b\xaa\x81\x3c\x9c\x95\x58\x32\xcf\x19\
\xee\x7e\x9c\x9e\x38\xce\x1e\x90\x1a\xb4\xd3\x5a\x54\xb8\x2e\x88\
\xb2\x18\xc8\xcb\xfe\x7f\x35\x76\x35\x52\xd9\xee\x37\x11\x56\x0e\
\xa0\x21\xaa\xf6\xf5\x90\xdd\x8c\xc4\x62\x20\xef\xd7\x41\x7a\xd8\
\xc9\x48\xe7\xb6\xfe\x6a\xf4\xe8\x97\x21\x88\x86\x62\xa0\x0c\x82\
\x26\x33\x8c\xe8\xb8\x6c\x20\x24\x91\xd0\x0c\x94\x41\xd2\x68\x84\
\x51\x0f\x34\x6f\xcc\xba\xfa\x27\x24\x50\x0d\x94\x41\xd4\xac\x87\
\x96\xae\x43\x06\x42\x16\x01\xdd\x40\x19\x64\x0d\x6b\xb4\x7c\x51\
\x5d\x47\xb1\xd0\x68\x2b\x06\xca\x58\xd0\xf4\x8a\xbb\x25\x9d\x4b\
\x03\x59\x6a\xa4\x35\x03\x65\x2c\x69\x7c\xa6\xd4\xfb\xd7\xdb\x62\
\x2c\x36\xca\xaa\x81\x32\x16\x35\x4f\xe9\x9f\xee\xff\x01\x8b\x65\
\xc9\x17\x1c\x9e\xef\x70\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\
\x60\x82\
\x00\x00\x02\xf0\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xff\x61\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x02\x92\x49\x44\x41\x54\x78\xda\x84\
\x53\x5f\x48\x53\x61\x14\x3f\xf7\xee\x6e\x22\x0b\x4c\x6b\xac\x60\
\x6a\xb5\x39\xdd\x06\x36\xd9\x70\x6b\xb8\x31\xd4\xb2\x35\x82\x3d\
\x4c\x50\x50\x08\x1f\x24\x84\x24\x56\xea\xc3\xdc\x2a\x63\x36\x1d\
\x4d\xa1\xb0\xf0\xc1\x17\x1f\x7c\xb0\x3f\x0f\x51\x96\x12\xe8\xa6\
\x84\xa6\xeb\x45\xa9\x84\x22\x25\x08\xb7\x97\xec\x45\xe6\xee\xdd\
\xed\x7c\x97\xb5\x36\x1a\x74\xe0\xc7\xe1\xfb\x9d\xf3\xfb\x9d\xf3\
\xdd\x8f\x4b\x41\x4e\xd0\xc5\xc5\x20\xa9\xa8\x80\xc3\xdd\x5d\x48\
\x1f\x1c\x40\x75\x43\x03\x68\x6d\x36\xa0\x45\x22\xa0\x69\x5a\x85\
\x2d\x8d\x90\x1f\x3f\x18\x28\x10\x4a\xa3\x11\xaa\x4d\x26\x41\x98\
\x89\xaa\x74\x3a\xdd\x38\x3b\x34\xf4\xf8\x0f\x41\x21\xdc\x7e\xff\
\xd5\x3c\x83\x53\x7a\xbd\x20\x16\x31\x79\x74\x55\x9a\xe3\x9a\x66\
\x03\x81\x47\xd1\xf5\x75\xf8\xb0\xb5\x05\x75\x3a\x1d\x58\xb1\x0f\
\x79\x4a\xe8\x2c\xaf\xab\x83\xd3\x48\x30\xcc\x3f\x0b\x55\x71\x1c\
\xd7\xfc\x34\x18\x9c\xc0\x0c\x89\xfd\x7d\x28\x2d\x2b\xa3\x30\xf3\
\xa4\xc8\x11\x03\x53\x47\x07\x88\xc4\xe2\x42\x37\x51\xe3\x84\xf3\
\xcf\x42\xa1\x87\xc9\x64\x12\x44\x78\x1d\x8d\x52\x09\xdf\xe3\x71\
\xbe\x5c\x2e\x17\x1a\xb0\x4e\xd3\x50\x38\xd4\x2c\xc7\x5d\x78\x82\
\xe2\x58\x2c\x06\x57\x70\xc8\xd6\xe6\x26\x9c\x51\x28\xc0\x6e\x30\
\x80\xba\xb2\x12\x2e\x79\x3c\xd7\x70\x83\x85\x42\x06\xd5\x1c\xcb\
\xb6\x3c\x0f\x87\x1f\xbc\x5f\x5b\x83\xbb\x7e\x3f\x1c\xe0\x8b\xdc\
\x1a\x1c\x24\x2b\x0b\x1f\xd6\xd1\xdb\xdb\x8b\xd3\x17\xf0\x1e\xdb\
\x4c\x01\xf1\xc5\x17\x13\x13\xe3\xef\x56\x56\xe0\x8e\xd7\x9b\x2d\
\x04\x46\x47\x41\x52\x54\x04\x2d\x3d\x3d\xd7\x29\x8a\x9a\x47\xa3\
\xcf\x84\xcf\x35\xa8\x61\x59\xd6\xf1\x6a\x72\x32\xbc\xbc\xb4\x04\
\xbe\xfe\xfe\x6c\x61\x64\x6c\x0c\x8c\xf5\xf5\xd0\xdc\xdd\xed\x41\
\xf1\x1b\x51\x46\x9c\x6b\xa0\x21\xe2\xd7\x53\x53\xf7\x23\x8b\x8b\
\xe0\xef\xeb\xcb\x8a\xef\x85\xc3\x60\xb6\x58\xa0\xb1\xab\xeb\x06\
\x8a\xe7\x50\xfc\x29\x77\x65\x62\xa0\xe1\x52\x29\xe7\xfc\xf4\x74\
\x28\x8a\xe2\x00\xae\x2d\x91\x48\x84\xe2\xed\x60\x10\x2c\x56\x2b\
\xd8\x3b\x3b\xfb\x80\x88\x19\x26\x2b\xfe\x8a\xdf\xe7\xcb\xea\x2a\
\x30\x38\xf9\xf2\xdb\x99\x99\x91\xbd\x78\x1c\xc6\x87\x87\x41\x2a\
\x95\x0a\x0d\x37\x7d\x3e\x41\x6c\x6b\x6f\x1f\xc0\xc9\x2f\x71\xf2\
\x47\xc2\xef\xe0\xab\xec\x6c\x6c\xfc\x5d\x41\xdf\xda\xaa\x3d\xeb\
\x76\x0f\xfc\xe4\x79\x7e\x2e\x12\xe1\x5d\x2e\x17\x3f\x1f\x8d\xf2\
\xbf\xf0\x4c\x78\x52\x37\xb4\xb5\x81\xa2\xb6\xb6\xf0\x83\x9f\xd0\
\x6a\xa1\xc6\xe9\xd4\xa9\x1d\x0e\x2f\x31\xd9\xde\xdb\xe3\xf7\x31\
\x93\x33\xe1\x49\xfd\x7f\x41\xfe\x98\x92\x12\x95\xaa\x49\x6e\x36\
\x0f\x11\x13\x92\x8f\xaa\x54\x76\xe4\x8f\x21\x4a\x49\x1d\x71\x04\
\x51\x8c\x28\x42\x88\x33\x3a\x8a\xca\x1c\x4e\x22\x8e\x4b\x64\x32\
\x85\x58\x26\x3b\x97\x4a\x24\x96\x0f\x13\x89\x6f\xc8\xa5\x10\x6c\
\x26\x13\x1c\xe6\x70\x04\xdc\x6f\x01\x06\x00\x2d\x06\x04\x62\x7f\
\xe8\x51\x71\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x03\x37\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xff\x61\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x02\xd9\x49\x44\x41\x54\x78\xda\x6c\
\x93\x5d\x48\x14\x51\x14\xc7\xff\xbb\xb3\xbb\xb6\xab\x95\x5b\x46\
\xae\x99\xba\x7e\x40\x42\x46\x12\xa5\x4b\x49\xe8\x83\x60\x22\x11\
\x3d\x94\xf6\xd6\x43\xe9\x6b\x81\x6f\x46\xd1\x43\x54\xd0\xf6\x49\
\xf6\x24\x44\x81\x6f\x3d\x04\x8b\x08\x9a\x60\x59\xb6\x18\x65\xe9\
\x22\xbb\xce\xb8\xea\xfa\xb1\x8b\xdf\xe3\x3a\xbb\xb3\xce\xed\xcc\
\x75\x35\xad\x2e\x73\xb9\x77\xe6\x9c\xf3\xbf\xbf\x73\xee\x19\xc3\
\xad\xf6\x76\x18\x0c\x06\x3e\x35\x4d\x83\x3e\x68\x5f\x47\x8b\x03\
\xff\x1f\xdd\xeb\x89\x44\x40\x4d\x24\xc0\x18\x83\x69\xbb\xc5\x48\
\x22\x09\x32\xd0\xc8\x6a\xa9\xaf\x6f\x9d\x8e\x44\x61\xb7\x5b\xb8\
\xb0\x46\xce\x43\x81\x00\x3a\x07\x07\x1b\xf5\x33\x68\xfa\x79\xcc\
\x0e\x6d\x12\x30\x0a\x02\x74\xf5\xc8\x9c\x02\x8a\x44\x24\xb2\x86\
\x99\xd9\x28\xa6\x66\x56\x21\xcb\x32\xee\x36\x34\xb4\x92\xbd\x8a\
\xbc\x8b\xf4\x90\x4d\x82\x3a\xc2\x71\x24\xf1\x21\x08\x42\xc5\xe4\
\x62\x08\xcb\xb2\x8a\xe2\x9c\x83\xc8\xb0\x5a\xa1\xae\xaf\xe3\xc7\
\xf0\x3c\xde\x7a\x3c\x28\xc9\xc8\x68\x7d\xd2\xde\x7e\x83\xdc\xdd\
\x26\x8d\x0c\x9b\xc8\x23\x81\x15\xe4\xe6\x59\x77\x20\x0f\x07\xa7\
\x91\x99\xbe\x1f\xa9\x29\x36\x9c\x38\xea\x42\x82\x6c\x66\xb3\x19\
\xe5\xc1\xa0\xc2\x09\xd4\x8d\x9c\xe1\x17\x65\x3d\x03\x04\xc7\xd6\
\x78\x71\xf4\x7a\xea\xc8\x35\xe5\xe5\xf8\xe8\xf3\xc1\xbe\xc7\x8c\
\x0c\xbb\x8d\x93\x08\x24\x10\x8b\xc5\x0c\x1b\x02\xaa\xca\xc9\x8b\
\x9c\xa9\xf0\x4b\xab\x70\x1e\xb6\xf0\x53\x74\xc7\x21\x71\x03\x59\
\x1f\x83\xbf\xfc\xa8\xad\xa8\x24\x1b\xa3\xca\xa9\x88\x93\xc0\xc9\
\xee\x6e\x12\x88\xc7\xb9\x80\x38\x1e\x85\xd1\x68\xc0\xd8\x64\x9c\
\x13\xd0\x83\x92\xc2\xd3\x9c\x44\x7f\x5f\x54\xc7\x71\x60\x5f\x0a\
\xdf\xc7\x07\x06\xd0\xe8\x76\x5f\xd3\xc2\xe1\x21\x23\xa1\x70\x9c\
\xc2\x1c\x1b\x4f\xa1\x20\x67\x17\xf2\xf9\x4c\x41\x2e\xd1\x64\x67\
\x0b\xc8\xcb\xb7\x52\x41\xe3\x98\x5f\x4a\x60\xc4\x1f\x42\xaf\xf7\
\x3b\xca\x9a\x9a\x8e\x45\x80\x3b\x26\x42\xe1\x04\x52\x68\x8d\xdf\
\xc0\x58\x28\xc6\x4f\xd7\x34\xb6\x45\xc2\x98\x02\x9b\x05\xb0\xa8\
\xfd\x08\x8e\x2e\xa3\xe6\xfa\x55\xd4\xb9\x5c\x3d\x17\x19\xbb\x67\
\x8a\x25\x05\x0a\xb2\x6d\x18\x9d\x8c\x22\x2f\xcb\xca\x6f\x80\x17\
\x32\xb9\x1a\xa8\x37\xc4\x2e\x2f\x7c\xa1\xf7\xa8\x39\x75\x1c\xee\
\xa7\x12\x66\x9d\xce\xaf\xdf\x04\xa1\xd3\xa4\x28\xca\x06\xc1\x54\
\x92\x60\x4a\xd9\xca\x7b\x93\x24\xb6\xf8\x09\xc6\xb9\x37\x28\xab\
\x3c\x8b\x8e\x8e\x7e\x5c\xba\xd0\x82\xd7\x7d\x3d\xe1\xb6\x89\x09\
\xfc\x21\x38\x44\x04\xa1\x28\xf2\x75\x02\x60\x8b\x60\x61\xb6\x17\
\xe2\xec\x73\x54\x53\xf0\xc3\x47\xee\xd1\x8e\x61\xc7\x87\xa1\x97\
\xcd\x7e\x4d\x96\x97\xe5\x70\x38\xdf\x34\x23\x8a\xd8\xeb\x70\x18\
\x44\x22\xd0\x5b\x5c\x9a\x56\x92\x79\x33\x44\x17\x46\x11\x09\x3c\
\xc0\x19\x57\x29\x1e\x3f\x7b\x21\x05\x25\xa5\xb9\xcf\x23\x7d\x01\
\xa4\xcd\xe6\xd7\x83\x90\x7e\xe4\xfc\xf9\x9b\x14\xc0\x88\x40\x5f\
\x18\x9d\xcc\xd6\x89\xfd\xb3\xe7\x36\x63\xf2\x08\x7b\xd7\x56\xc5\
\x6a\xaf\x94\xbe\x22\x5f\xfb\xdf\xbf\xa6\xde\x4d\xb9\xbb\x8b\x8b\
\xab\x8d\x69\x69\xff\x18\x97\xbc\xde\xfb\x97\xcf\xa5\xfe\x1c\x5e\
\xcd\xec\x93\xc2\x96\x81\x15\x9f\xaf\x8b\x3e\x8b\xdb\x7d\x7e\x0b\
\x30\x00\x66\x8d\xa1\xfd\x87\x65\xe6\x27\x00\x00\x00\x00\x49\x45\
\x4e\x44\xae\x42\x60\x82\
\x00\x00\x02\xa1\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xff\x61\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x02\x43\x49\x44\x41\x54\x78\xda\xa4\
\x93\xcf\x6b\x13\x51\x10\xc7\xbf\xfb\x23\x6b\xd3\x4d\x63\x53\x51\
\x69\x93\x2d\x52\x62\xc5\xb6\x54\x08\x52\x28\x28\xb4\xa8\x08\xc5\
\x5e\x84\xfe\x01\x3d\x49\x2f\x9e\x7a\x29\x96\x9e\xf2\x07\xf4\xa4\
\x88\x17\x7f\x1c\x82\x22\x2a\xb9\x05\x5a\xe8\x49\x94\x0a\xb1\x58\
\x24\x97\x68\x49\xa5\x31\x35\x90\xd2\xcd\x26\xd9\xcd\x6e\xd6\x99\
\xad\x5b\x82\xb4\x50\x71\x60\xf6\xbd\x37\xcc\x7c\xdf\x7c\xde\xdb\
\x27\xb8\xae\x8b\xff\x31\x79\x66\x69\xe9\x70\x21\x08\xc2\x34\x0d\
\x7d\xff\x50\xbf\x23\xf3\xd7\x69\xb5\xfc\x40\xf4\x4d\x32\xf9\xe8\
\x24\x3d\x09\xe4\x77\x17\x17\xe7\x64\xda\x15\x92\x28\xc2\x34\x4d\
\x8e\x8b\x0e\x21\x7d\x2e\xba\xa8\x14\xbe\x42\x8b\x0f\x63\x20\xd2\
\x3a\x52\x40\xa4\x1a\xbb\xd9\x14\xbd\x0e\x04\x5a\x28\x8a\x82\x9a\
\x61\x88\x36\x09\xec\x15\xbe\xa0\x98\xdf\x84\x08\x07\x5a\xe2\x32\
\x0a\xa5\x12\x82\xc1\x20\x6c\xdb\x46\x6f\x4f\x8f\x27\x20\xd1\xc6\
\xbe\xc0\x34\x5c\xb7\x8f\x15\x03\x8a\x72\x6d\x65\x7d\x1d\xdb\xbb\
\x3a\x8a\xe5\x32\x6a\xe1\x5f\xa8\x7e\x32\xd0\xa0\x42\xdf\xce\x77\
\x77\xe3\x4a\x3c\x8e\x00\xe5\x37\x2d\x4b\x94\x6d\xc7\x39\x86\xfb\
\xe6\x91\xdc\x4f\x33\x19\x9c\x56\x55\x5c\xd0\x34\x58\x96\x25\xc9\
\xdc\x06\x73\x3f\xcb\xba\xf8\xfe\xfe\x35\xc6\x6f\xcf\xe0\xd6\xc0\
\xf1\xdc\x6a\x67\x27\x62\xd1\x28\x6c\x3a\x78\xcb\x34\x45\x91\x05\
\x98\xfb\xe7\x87\x57\xd8\x5c\x4d\x61\x63\xe5\x25\x9a\x8e\x83\xb5\
\x6c\x16\x1b\x5b\x5b\xf8\x98\xcb\x79\x6b\x76\xce\x4b\x2e\x2f\xa7\
\x9f\xa4\x52\xab\xcd\x03\x01\x49\x66\x0e\x56\x3b\xa3\x0d\xa1\x5a\
\xad\xe2\x5c\xff\x10\x2c\x62\x8e\xc5\x62\xde\xae\x2a\xb5\x6b\xfd\
\x39\x03\xe6\x56\x43\x21\x69\x6e\x76\xf6\x06\xd5\xc1\xd0\xf5\x80\
\xcc\x1c\xac\xf6\xee\x6d\x1a\x86\x61\x60\x2d\x93\xc6\x9d\xeb\xf7\
\x91\xa3\x9d\x7d\x2b\x45\x22\xa8\xd7\xeb\x18\x4f\x24\x50\xd3\xf5\
\xca\xd9\x78\x7c\x21\x14\x0e\x77\x39\x86\x51\x96\x99\x83\x3b\x78\
\xf1\x70\x9e\x52\xe7\xbd\x82\x7a\xad\x86\xab\xa3\xa3\xde\x3c\x48\
\xcc\xbe\x71\x9e\x24\x49\xdf\xec\x7c\xfe\xf9\x1e\xc0\xe7\x5e\x11\
\x99\x83\x3b\x60\xae\xde\x91\x91\x05\x1e\x2d\xe2\xf5\xbd\x3d\xce\
\x79\xa4\x60\x5c\x9c\x9c\xdc\xa1\xe2\x22\x79\x03\x97\xa6\xa6\x1e\
\xec\x9a\xa6\x5b\xa1\x57\xc5\x73\x1e\x7f\xe8\xfa\xa1\xb7\xc7\x39\
\x8f\xe7\xe4\x88\x8d\x8d\x1d\x5c\x6d\xd7\xe0\xe0\x3d\x49\x55\xfb\
\xab\xfb\xfb\xba\xd2\x68\x6c\x5b\x1d\x1d\x1a\xf3\xf9\x6d\xff\x1d\
\x27\xee\x02\xf9\xe3\xf6\x7f\xe3\x14\x79\x84\xaf\xf9\x04\x6f\xc8\
\xe3\xf6\x5a\x27\x1b\x9e\x98\xc0\x6f\x01\x06\x00\x48\xae\x45\x78\
\x60\x4e\x1f\xe2\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\
\x00\x00\x02\x75\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xff\x61\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x02\x17\x49\x44\x41\x54\x78\xda\xa4\
\x93\x4f\x6b\x13\x51\x14\xc5\xcf\x9b\x37\x93\x49\xc6\xa9\x25\xa3\
\x15\x63\x16\x2e\x02\x06\xad\x0b\x45\xc1\x95\x5f\xc0\x3f\x0b\x21\
\x1b\xa5\x20\xb8\x31\xea\x7c\x82\xac\xf4\x2b\x54\x5b\x23\x88\x1b\
\x4b\xb3\x70\xe1\x42\xb7\x82\x20\xe8\x42\xbb\x70\xa1\x62\x36\xa2\
\xa0\x51\xda\x1a\xc2\x4c\xa6\x26\x93\xcc\x78\xef\x75\x5a\xb4\x2a\
\x54\x7c\x70\xb8\x2f\x93\x7b\x7e\xf7\xbc\x37\x89\xaa\xd5\x6a\x50\
\x4a\x9d\x06\xb0\x07\xff\xb6\x3e\xa5\x69\xfa\xc0\x4c\x92\x84\x3f\
\x94\x9b\xcd\xe6\xcd\x38\x8e\xb7\xe4\xb4\x2c\x0b\xf5\x7a\xfd\x12\
\xef\xcd\xd1\x68\xc4\xd5\x18\x0c\x06\xf0\x7d\x1f\x0c\x64\x11\x5d\
\xea\x78\x3c\x46\x18\xf6\xa9\xa6\x62\xf4\x3c\x0f\xf3\xf3\xd7\x41\
\x3e\xe3\x67\x80\xe2\xca\x86\x6a\xb5\x0a\x4e\xf2\xed\x68\x05\xa3\
\xc7\x2f\xb1\xb2\xf2\x95\x9e\x6b\x32\xdb\xb0\xed\x3c\xa2\x28\x60\
\x33\x4b\x09\x20\x8b\x6d\xf0\x43\x9e\xc6\x49\x58\x69\x79\x07\x56\
\x57\xbb\x64\x88\x91\xcf\x6f\x13\xb3\x65\xe5\xa9\x27\x16\x00\xf9\
\x8c\x0d\x23\x49\x33\x48\x00\x34\x39\x8a\x22\xa8\xbd\xbb\x08\x94\
\x60\xf2\x60\x05\xe5\xd9\x3a\x26\x4f\x1c\x13\x90\x69\xda\x92\x90\
\x3d\xec\x35\x86\xc3\x21\x48\x3f\x40\xa5\x22\x9d\x37\x84\x73\xed\
\xbc\x5c\xd6\xf6\xe9\x0a\x3c\xff\x14\x7a\x8d\x16\x01\x8b\xa8\x35\
\xaf\x12\xc0\x94\x04\xec\x61\xef\xc6\x11\xb8\x26\xef\xbf\xa0\xdf\
\x5d\x43\xf7\xe1\x53\xb8\x07\xf6\xa1\x78\xf9\x24\xfa\xb7\x1f\xc1\
\x75\x8b\x48\x5b\x4b\xb8\x77\xf7\x19\xbf\x72\x49\xb0\x7e\x04\x93\
\x29\xb4\x24\x8e\xe3\x38\xe8\xf5\x7a\x30\x0c\x0b\xfa\xed\x07\x84\
\xfe\x2c\x0a\x85\x09\x0c\x0c\x2d\x46\x5e\xb6\xad\xd7\x13\x68\x01\
\xb4\xdb\x6d\x94\x4a\xa5\x1c\x37\x34\x1a\x8d\x2d\xfd\x0e\xb8\x37\
\x08\x82\x5c\xa7\xd3\x01\x63\x3d\xd7\x75\x67\xb4\xd6\xbb\x37\x37\
\xd2\xa4\xb2\x4c\x31\xcd\x8f\x9b\xbf\xa3\x0b\xff\x4c\xf7\xb5\xc0\
\x80\x02\x69\x82\xfb\x7e\xe9\x98\xce\x01\xaf\x86\x7e\xb6\xbf\x41\
\xfb\xdf\xf8\xa4\x40\xfd\x35\xe7\xe2\xd4\x2d\xbc\x89\x8f\xc8\x7e\
\xbf\xb5\x84\x73\xcb\x17\xff\xd4\xc6\x53\x77\x92\x2a\xa4\x43\xa4\
\xc3\xa4\xaa\x24\x5a\x0c\x71\xe6\xce\x59\x01\xdc\xbf\xd0\xe2\xf2\
\x82\x93\x93\xde\x65\xfb\xe7\xa4\xd7\x9c\xc0\xca\x8e\xe1\x66\x72\
\xf8\xad\xe0\x8a\x33\x83\x29\x75\x5c\xc6\x2c\xa7\x4f\x30\x17\x2d\
\x64\x43\xd7\x38\x7a\xa6\x48\xf1\x9f\xe6\x7f\xd6\x77\x01\x06\x00\
\xf9\x1f\x11\xa0\x42\x25\x9c\x34\x00\x00\x00\x00\x49\x45\x4e\x44\
\xae\x42\x60\x82\
\x00\x00\x07\x62\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x18\x00\x00\x00\x18\x08\x06\x00\x00\x01\x97\x70\x0d\x6e\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\
\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x07\x02\x49\x44\x41\x54\x48\
\xc7\x55\x95\x6b\x70\x5d\x55\x15\xc7\x7f\xfb\x3c\xee\x3d\xe7\x9e\
\x7b\x93\xfb\x68\x4a\x93\x86\xb6\x69\x4b\xaa\x38\x16\x06\x3a\x16\
\x4b\x81\x22\x4c\xad\x55\xa9\x83\xce\xe0\xa3\x55\xa1\x0a\xc3\x07\
\xf0\x05\x0c\x33\x38\xea\x28\xc2\xe0\x03\x44\x1c\x18\x64\x8a\x03\
\xa3\x32\x08\x88\xa0\x0c\x8f\x3e\xd2\x34\x84\x16\xab\x96\x3e\xd2\
\x34\x49\x9b\x26\x6d\x92\x9b\xc7\xcd\x4d\x9a\xe6\xde\xdc\x7b\xf6\
\x39\x67\xf9\x21\x24\xc0\xfa\xb8\x67\xd6\xfa\xed\xb5\xd6\xff\xbf\
\xb7\x12\x11\x82\x20\xa0\xe2\x57\x2f\x41\x44\x78\xef\xf0\x51\x29\
\x57\x66\x04\x11\xa1\xa9\xa9\x59\x0e\x1f\x3d\x22\x68\xad\x69\x6b\
\x6b\x7f\x48\x6b\x8d\xaa\x56\xab\x4c\x97\x4b\x72\xf8\xbd\xa3\x18\
\x57\xac\xbd\x4a\x9c\xb8\xc3\xe2\xc6\x06\xc5\xfb\xd5\x6a\x47\xc7\
\x0a\xd7\x74\xf5\x74\x8b\xef\xfb\xf2\xce\x81\x03\x62\x00\xb4\xee\
\x6b\x9b\x0c\x42\xbd\x77\xf9\xb2\x26\x76\xed\xde\x8d\x6d\x59\xeb\
\x0d\x80\x96\xdd\xfb\x9e\xfc\xfa\x4d\xdf\xe4\xca\x75\x1b\x48\x67\
\xd2\x6a\xcd\xe5\x6b\xde\x26\x08\x02\x82\x20\x20\x0c\x43\x0e\x1d\
\x3a\xfc\x42\x10\x04\x68\xad\x21\x08\x02\xa5\xb5\x66\x4f\x4b\xab\
\xe4\x47\x86\x45\x6b\x2d\x22\x82\xa1\x94\x92\xb6\xb6\xf6\xb1\x65\
\x4d\x4b\x58\x90\xcd\xd1\xd6\xde\xce\xc0\xc0\xd0\x76\xd5\xdd\xd3\
\x43\x32\x99\x94\x64\x32\x89\x48\xc4\xa1\x43\x47\x58\x50\x97\x55\
\x6a\xed\xa7\xd6\xcb\xe4\xe4\x24\x7e\xe0\xf3\xf8\xe3\x7f\xc0\x30\
\x0d\xe3\xd2\x4b\x57\x8b\x71\xff\x03\x3f\x57\xb9\x5c\x8e\x86\x0b\
\xea\x19\x2f\x14\xd8\x78\xfd\x17\xa2\xa1\x81\x85\xa2\x44\x84\x30\
\x0c\x11\x11\xa5\x94\x32\x80\xb0\xe3\x78\x67\x7b\xa9\x34\xb3\xae\
\x7e\xf1\x42\x1a\x16\x35\x30\x75\xee\x5c\x21\x9b\xcd\xd6\x45\x51\
\x88\xc1\xfb\xa1\x94\x12\xd3\x34\xc3\x7f\xbd\xf6\xba\x80\x5a\x77\
\xe1\x92\x06\xea\x72\x75\xd8\x96\x05\x8a\x05\xbb\x5b\xf6\x4a\x7f\
\xff\xd9\x9b\x95\x88\x70\xac\xe3\x04\x41\xa8\x1f\x9d\x3a\x37\x75\
\x67\x43\x43\x3d\xa9\x9a\x24\xe9\x54\x2d\x96\x6d\xd1\xd5\xdd\xcd\
\xe0\x60\x9e\xfa\x86\x45\xac\x6a\x6e\x56\x4a\x44\xb8\x62\xed\xd5\
\x92\x1f\x1e\x22\x0c\x23\x1e\x7c\xf0\x97\x6c\xdb\xf6\x35\xb4\xd6\
\xb4\xef\xdf\x8f\x5f\xd5\xd4\xd6\xa6\x4e\x15\x27\x8a\x2b\x2f\xbb\
\xec\x32\x94\x88\xf0\xd8\x63\x4f\x7c\xef\xaf\x7f\x79\xee\x77\x5a\
\x6b\xb4\xd6\x84\x51\x44\xa9\x34\xcd\x8b\x7f\x7f\x9e\x6a\xb5\xf2\
\xd6\xba\x2b\xee\xdb\x08\x1d\x84\xe1\xc4\x6c\xc2\xce\xb7\x5a\xf0\
\xb5\xaf\x06\x06\x07\x6b\x0e\x1e\x3c\xf8\x8f\xb3\x67\x07\x36\x44\
\x41\xc8\x8f\xee\xfe\xfe\x77\x1a\x16\xd7\xef\x08\xfc\xd5\xfe\xd8\
\x18\x76\x47\x07\xcf\xcf\xef\x6e\x6e\x67\x5a\x6b\x46\x47\x47\xd7\
\xb6\xb4\xec\xdb\x7b\xba\xaf\xef\xae\x48\xa2\xb9\x73\x25\x22\x1f\
\x4c\x09\xb0\x94\x52\x18\x86\x61\xff\xef\xd0\x91\x03\x0b\xea\x72\
\xd7\x78\xa9\xe4\xaf\xc3\x20\x94\x62\xb1\xf8\x25\x40\x01\xf3\x09\
\x1e\x10\x98\xa6\xc9\x1b\x6f\xee\x2a\xcd\x4d\x2a\x1e\x8b\x61\x18\
\x06\x75\x75\x75\x2f\x9f\x3c\xd5\x7b\xe7\x87\x13\x4a\xb3\xe3\x3d\
\xfe\x70\x26\x93\xb6\x1d\x37\x46\x22\x91\xc0\x75\x5c\x82\x20\xe0\
\xc0\xbf\xdf\xa5\xb7\xb7\xef\x11\x00\x43\x29\x85\x52\x8a\x99\x4a\
\xf5\xaa\xfe\xfe\x33\x3f\x48\x67\xd2\x78\x9e\x47\xca\xf3\x30\x0d\
\x83\xa1\xfc\x10\xe5\x52\x85\x85\x0b\xeb\x66\xef\x7d\xbc\xb3\x1b\
\xa5\x14\x3d\x3d\xdd\xfb\x56\x35\x37\xe3\x79\x2e\x5e\xc2\xc3\xb2\
\x6c\x2a\x95\x0a\xdd\x3d\x27\xb1\x4c\x9b\x8b\x9a\x57\xdc\x00\x60\
\x84\x91\x06\x09\x5f\x6a\x6e\xbe\x88\x4c\x2e\x4d\x22\x91\x20\xee\
\xc4\x51\x4a\xd1\xd9\x75\x82\x28\x8c\x48\x26\x93\xd5\x7c\x3e\xff\
\x4f\x00\xa3\xab\xb3\xfb\x8e\x2f\xdf\x78\xd3\x8d\xd7\x6e\xb8\x9e\
\xad\xdf\xb8\x99\x94\x97\xc2\x89\xc5\x19\x2f\x8e\x33\x38\x90\x27\
\x9b\xcd\x31\x31\x39\xbe\xb4\x50\x28\x00\x60\x8c\x0c\x8f\xd6\x64\
\xb3\x59\xea\xea\xea\xe8\xeb\xeb\x63\x71\x63\x13\xcf\x3d\xf7\x3c\
\xa7\x7a\x7b\xc9\xe6\x72\x04\x81\xde\xa7\x94\x39\x72\xf1\xc5\x1f\
\x9f\x15\xe9\xce\xdd\x2d\xfc\xf4\xc7\x3f\x93\x30\x0c\xf1\x7d\x1f\
\xad\x03\x7c\x5d\xe5\xf6\xdb\x6f\x63\xd3\xe6\x8d\x7c\xac\xf9\xbe\
\x5e\x38\xb4\x1c\x26\x98\x98\x98\xd8\x6c\x48\x14\x71\xf7\x3d\x77\
\x27\x52\xa9\x14\xa9\x54\x8a\x4c\x26\x4d\x36\x93\xe5\xe9\x1d\xcf\
\x50\xa9\x54\xfe\x08\xef\x2c\x87\x5e\xca\xe5\x09\xda\xdb\xb9\xd5\
\x22\x52\xa4\x92\x5e\x75\xfb\x77\xb7\xab\xe3\xc7\x8f\xdf\x72\xf4\
\xe8\xb1\x27\xa7\xcf\x4f\x5b\xcb\x96\x37\x95\x6d\xdb\xbe\xad\xbf\
\x3f\x7f\x6c\x70\x90\xdf\x77\x76\x82\xd6\x6c\x9d\x77\xdc\x5c\x88\
\x08\x4a\x29\x00\x25\x22\x02\x60\x9a\x26\x85\xc2\xf8\xe7\x8e\x1c\
\x39\xfa\xd2\x74\xa9\xe4\xe6\x72\x59\x72\x0b\x72\x24\x93\x1e\xa6\
\x61\xcc\x58\xa6\xf5\x94\xeb\xba\x8f\x24\x12\x89\x3e\x80\x28\x12\
\x40\xb0\x6d\x9b\x8f\x00\x44\xc4\x02\x16\x29\xa5\xf2\x40\x38\x57\
\xbc\xf7\x74\xdf\xfd\x9d\x27\xba\xef\x4b\xd7\xd6\x90\x4c\x26\xf1\
\xbc\x04\xb1\xb8\x8d\xeb\xba\x24\x13\x1e\xf1\x78\x1c\xc3\x30\x98\
\x9a\x9a\x92\xf3\xe7\xcf\x3f\x5c\x5f\x5f\x7f\x97\x42\x50\x86\xf9\
\x11\x80\xfa\xa0\x89\xd9\x2e\xb4\xd6\xa9\xfd\xfb\xdf\x7d\xbb\xea\
\xfb\xab\x73\xb9\x1c\x6e\xc2\xc1\xf3\x12\xc4\xe3\x71\x1c\xc7\xc1\
\x4b\x24\xb0\x2d\x1b\x11\xc1\xf7\x7d\x86\x47\x46\xe8\xeb\xef\xa7\
\x5c\x9a\x29\x7c\xe6\xda\x0d\x97\xbb\xae\x73\xe6\xc3\xee\x11\x40\
\x94\x52\x58\x96\x45\xb1\x38\xb1\xe9\x8d\x37\x77\x4e\x99\x96\xb5\
\xba\xb1\xb1\x91\x74\x3a\x4d\x2a\x95\xc4\x71\x1d\x3c\xcf\x23\x99\
\x48\x60\x9b\x16\x22\x42\xb9\x5c\xa6\xb7\xef\x34\xbd\xa7\xfb\x08\
\x83\x88\x54\x2a\xb5\xa0\xab\xab\xfb\x16\x00\x0b\x60\x64\x74\xf4\
\x43\x14\x89\x9d\xe9\x1f\x78\x75\x64\x64\xec\xb3\x8d\x8d\x8d\x78\
\x09\x8f\xb8\x13\x23\xe6\xd8\x38\xb1\x38\xae\xe3\x12\x8b\xc5\x30\
\x0d\x83\x50\x84\xa9\xa9\x29\x7a\x7a\x4e\x52\x2a\x95\x09\xc3\x90\
\x78\x3c\xc6\xd2\x65\x4b\x06\x16\xd7\xd7\xef\x98\x07\x14\xc6\xc6\
\x67\x8d\x64\x59\x6b\xf2\xc3\xc3\xed\x5a\xeb\x58\xd3\xb2\xa5\xc4\
\xdd\x38\x8e\x13\x23\x16\x8b\x11\x77\x1c\xdc\x78\x1c\xdb\xb2\x31\
\x0c\x83\x20\x0c\x29\x8c\x17\xe8\xea\xea\x41\xeb\x00\x11\xa1\xb6\
\xb6\x96\x4c\x26\xbd\x2f\x1e\x8f\x5d\x33\x5e\x2c\x72\xc1\xc2\x85\
\xb3\x00\xc7\x71\x38\x71\xa2\xfb\xde\xd6\xd6\xd6\x07\xc3\x28\x62\
\xe9\x92\x0b\xb1\x6c\x93\x35\x6b\x2e\xa7\x26\x55\x83\x44\x11\x22\
\xb3\x1f\x9f\xa1\x0c\xb4\xd6\x0c\x0e\xe7\xe9\xe9\xea\x41\x04\x1c\
\xc7\x25\x93\x49\x33\x75\xfe\xdc\x43\xf9\xe1\xa1\x7b\x87\xf2\x83\
\xb8\xae\xfb\x01\xe0\x85\xbf\xbd\xf8\xea\xcb\x2f\xbf\xf2\x45\xa5\
\x14\x95\x4a\x05\x5f\xfb\xf8\x5a\x53\x2a\x95\xa8\x56\xab\xac\x5f\
\x7f\x25\x0f\x3c\xf0\x0b\x2e\x59\xfd\x49\x44\x84\x9e\x13\x27\x39\
\x75\xaa\x77\xbe\xb0\x9b\x70\x28\x14\x0a\xb7\x06\x81\x7e\x2a\x16\
\x8b\xb1\x72\xe5\x4a\x5c\xd7\x65\x4e\xeb\xbc\xf2\xca\x6b\x8d\xcf\
\x3e\xf3\xec\x7f\x06\x07\x87\x2e\x98\x83\x84\x61\x48\x18\x86\x44\
\x51\x84\x20\x54\x7c\x9f\xca\xcc\x0c\xdf\xfa\xf6\x56\xb6\x6c\xb9\
\x61\xf6\xf9\x4f\xd7\x00\x42\x6d\x4d\xcb\x1d\xcb\x9b\x8e\x6c\x87\
\x73\x97\x42\x11\x98\x28\xc0\xd0\x4f\x44\xa6\x9f\x50\x22\xc2\xae\
\xdd\x2d\xa0\x14\xad\x7b\x5a\x7f\xdb\xb2\x67\xef\x0f\xc3\x28\xc4\
\x34\x4d\xa2\x28\x9a\x87\x44\x51\x84\x44\xc2\x4c\x65\x86\xfa\x86\
\x7a\x1e\x79\xf4\x37\xd8\xb6\xe5\x17\x0a\x85\xeb\xae\x5e\x7f\x4f\
\x1b\x9c\x01\x86\x81\x88\x28\x82\xb1\x31\x18\x1b\xe3\xcf\xb3\xdf\
\xc8\xce\x96\x59\x05\x45\xc2\xe4\xe4\x24\xbd\xa7\x4f\x7f\xf5\xd8\
\xb1\x63\xbf\x1a\x19\x1e\xb9\x30\x08\x02\x4c\xd3\x44\x29\x35\x6f\
\xc8\x72\x79\x86\x1b\xbf\xb2\xe5\xfc\xe6\xcf\x6f\xfa\x74\x14\x45\
\x1d\xab\x2e\xba\xa4\xec\xfb\xe2\x2a\x05\x61\x08\x03\x03\xd0\xd1\
\x01\xa5\x12\x77\xcc\x76\xb0\x6b\xcf\x9c\x4a\x4d\x11\xe1\xdc\xd4\
\x74\x18\x86\x21\x95\x4a\x85\x62\xb1\x48\xb5\xea\xaf\x31\x4d\x63\
\xb3\x32\x68\x4e\xd7\xa6\x27\x57\xac\x58\xfe\xa7\x74\xa6\xf6\xbf\
\x86\xa9\xb0\x4c\x8b\xb8\xfd\x09\xfa\xfb\xb9\x6e\x68\x88\x6d\xc5\
\x22\xb5\x33\x33\xbc\x6e\xdb\x3c\x9d\x48\x10\xfc\x1f\x86\x93\xb9\
\x1a\xfd\x43\x9a\xa3\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\
\x82\
\x00\x00\x02\x77\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xff\x61\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x02\x19\x49\x44\x41\x54\x78\xda\x8c\
\x93\xbd\xaf\xa1\x41\x14\xc6\xcf\xcb\x2b\xe2\x26\x16\x05\xb1\x42\
\x43\xa2\x11\xbd\x66\x23\xa2\x22\xb9\xff\xc1\xaa\xf7\xd6\x1a\x0a\
\x09\x89\x46\xb7\xd1\xdd\x68\x97\xbf\x60\x2b\x8a\x0d\xb1\x85\x8f\
\x42\x50\xae\x50\x21\x44\x04\xb1\xbe\x3f\x76\x9e\xb3\x5e\xb9\x37\
\x5b\xac\x93\x8c\x31\x73\xcc\xef\x79\xce\x99\x21\x25\x93\x49\xba\
\x5e\xaf\x24\x49\xd2\x33\x11\x7d\xa4\xff\xc7\x8f\xf3\xf9\xdc\x3b\
\x1e\x8f\x94\xc9\x64\x48\xc6\x8e\x4a\xa5\xa2\xd3\xe9\x64\x4b\x24\
\x12\xaf\x22\xc9\x40\x0c\xb1\x77\x1f\x58\xf7\x7a\x3d\x2a\x95\x4a\
\x2f\x37\x50\x0f\x1f\x00\x3c\x8b\x24\x94\x3f\xb5\x5a\x2d\x9a\x4e\
\xa7\xa4\x56\xab\xc9\x60\x30\xd0\x78\x3c\x26\x9d\x4e\x47\xbb\xdd\
\x8e\xdc\x6e\x37\xad\xd7\x6b\x4a\xa7\xd3\xaf\xf1\x78\x1c\x10\x49\
\x8c\x5f\x92\x50\xfd\xf2\x88\x32\x20\xc3\xe1\x90\x34\x1a\x0d\xcb\
\x67\xb3\xd9\x68\xa3\xd1\xf8\x2a\xa3\x16\xfc\xe8\x72\xb9\xfc\x33\
\x2b\x10\x28\x87\x42\x21\x2a\x16\x8b\x64\xb5\x5a\xc9\x62\xb1\x50\
\xbd\x5e\xdf\x71\xf9\x02\x20\xfa\x27\x51\xb3\xd9\xa4\x6e\xb7\xcb\
\xfd\xa8\x56\xab\xd4\xef\xf7\xa9\xdd\x6e\x93\x2c\xcb\x34\x9f\xcf\
\xa9\x50\x28\xd0\x6c\x36\xa3\x72\xb9\x8c\x86\xd3\x7e\xbf\x97\xb8\
\x07\x0a\xc0\xe7\xf3\xdd\x95\x03\x81\x00\xcf\x18\x70\xe8\xf7\xfb\
\xd9\x89\x56\xab\xa5\x5a\xad\xc6\xd0\xc3\xe1\xf0\x17\x00\x12\x00\
\xb9\x5c\x8e\xed\x79\xbd\x5e\x4a\xa5\x52\x64\xb3\xd9\xc8\xe9\x74\
\x52\x24\x12\xa1\x7c\x3e\x4f\x66\xb3\x99\x3c\x1e\x0f\x94\x19\x70\
\x77\x00\x12\x00\xe1\x70\x98\x1d\x38\x1c\x0e\xee\xbc\xc9\x64\xe2\
\x32\x50\x52\x30\x18\x64\x37\xc8\x0d\x06\x03\x06\x88\xa6\x32\x40\
\xa5\x38\xc0\x95\x2d\x16\x0b\xae\x0f\xca\x88\xd1\x68\xc4\x80\xc9\
\x64\x42\xcb\xe5\x92\x0f\xe2\xb6\xde\xf5\x00\x24\x6c\xc0\x32\x1c\
\xe0\x40\x2c\x16\xbb\x5f\x29\x94\xed\x76\x3b\xcf\x6f\x01\xdb\xed\
\xf6\xbd\x03\x58\x53\x1c\x54\x2a\x15\xea\x74\x3a\x3c\x03\x88\x1c\
\xe6\xdb\x41\x5a\xad\x56\x70\xcc\xaf\x58\x56\x48\x8a\xed\xb7\x25\
\xa0\x0f\x58\xbb\x5c\x2e\xe5\xff\x82\x07\xf4\x3d\x1a\x8d\xfe\x14\
\x8e\x26\x0c\x00\x49\x51\xc7\x7d\x23\xb0\x36\x1a\x8d\xac\x86\x40\
\x0e\x00\xc4\x66\xb3\x69\x89\x7e\x7c\x13\x5f\x57\x2c\xa8\xd7\xeb\
\x3f\x0b\x7b\x36\x7a\x30\x84\xf2\x48\xf4\x2d\xaf\xbc\x60\xd8\x7f\
\x12\xe3\x03\xfa\xf1\xc0\xf9\xeb\x4d\xf9\x37\x2f\x04\xe0\x8f\x00\
\x03\x00\xe7\xe3\x7a\x6e\x30\xbb\xf3\xb7\x00\x00\x00\x00\x49\x45\
\x4e\x44\xae\x42\x60\x82\
\x00\x00\x06\x71\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x90\x00\x00\x00\x90\x08\x02\x00\x00\x00\x68\x24\x75\xef\
\x00\x00\x00\x03\x73\x42\x49\x54\x08\x08\x08\xdb\xe1\x4f\xe0\x00\
\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0e\xc4\x00\x00\x0e\xc4\x01\
\x95\x2b\x0e\x1b\x00\x00\x06\x14\x49\x44\x41\x54\x78\x9c\xed\x9d\
\x4b\x76\xa3\x30\x10\x45\x71\x8e\x07\xc9\x72\xb2\x9d\x64\x7d\xde\
\x4e\xb2\x1c\x0f\xe9\x01\x69\x2c\x0b\xa1\x6f\x7d\x5e\x95\xf5\x66\
\xdd\xb1\x41\xd4\xd5\xe5\x23\x0b\xb8\x7c\xbd\xff\x2c\xcb\x72\xbb\
\x7f\x2e\x18\xf9\xfe\xf8\xd5\x6e\x42\x1c\xa8\xe2\x5c\x36\x60\x5b\
\xa0\x5a\xa6\xdd\x84\x47\x10\xca\xb2\x17\xe4\xb2\xae\x6b\x54\x1d\
\x84\xf6\x6d\x01\xc1\xa6\x5b\x90\xa8\x08\xd7\xb3\x4f\x20\x60\xdb\
\xda\x00\x82\x4d\x3e\xc7\x0d\xbf\xdd\x3f\x2f\xeb\xba\x26\xff\xb6\
\x7f\x82\xbd\x5d\x75\x51\xc4\x26\x5f\x84\x0c\x8e\x84\x61\xc7\x6f\
\x22\x60\x7b\x11\xdb\x32\x1b\xb8\x55\xe0\xcf\xb0\xfc\x47\xc3\x2f\
\x20\x44\x18\x9b\xcc\x86\x57\xd6\xbf\x60\xd8\x71\x89\x08\xd8\x9c\
\xd9\x56\xb3\x21\x7b\xd9\x1f\x86\x55\x7e\x33\xfa\xbe\x7a\x04\xb0\
\xf1\x6d\x6c\x47\xc1\x1b\x0c\x3b\xae\x09\x01\x9b\x51\xdb\x9a\x1a\
\x1c\xd6\xf9\xc9\xb0\xd6\x05\x1d\x17\xa7\x1b\x26\x6c\xb4\x1b\x38\
\x58\xe1\x4e\xc3\x8e\x2d\x40\xc0\x06\x6e\x5b\x5f\xc3\xa2\xc2\xbe\
\xe5\xff\xdc\xd4\x1a\x90\x4a\x21\x74\x9d\x28\x84\xc5\x21\x30\x2c\
\x8c\xba\x6d\x61\x5d\x6e\xf7\x4f\xf5\x3e\x34\xd8\x80\x63\x25\x13\
\xc0\xc6\xb7\x53\x05\x5b\xb2\xcd\x8a\x3b\x49\xa6\x95\x12\x1b\x16\
\x46\x0c\x5b\xe5\x25\xa7\x18\x36\xaa\x15\x25\x4b\x97\x06\x46\xb8\
\x33\x61\xc5\xd6\x71\x72\xcc\x8a\x4d\xa0\x4f\x30\x1a\x16\x86\x1c\
\x5b\x77\x69\x98\xb0\x91\x2f\xf0\xac\x56\xa7\xc0\x38\x8e\xd8\x24\
\xd8\x48\x5a\x45\x88\x4d\xf8\x00\x29\x64\x58\x98\x6e\x6c\x4c\xbd\
\xb8\x7b\xb1\x7c\xa8\x32\xc5\xc9\x01\x63\x3d\x2d\x6e\xc2\xc6\xda\
\x8b\x3b\xb0\x29\x5e\x2d\x28\x18\x16\xa6\x88\x4d\xac\x34\x95\xd8\
\xd4\xc7\x9a\x0b\xc0\x64\xae\x3d\x93\xd8\x54\x7a\x71\x06\x9b\xfa\
\x35\xf8\x96\x78\xf0\xf7\x18\xf9\x5f\x0b\x59\xaf\x63\xea\xa3\xd8\
\x63\x32\x89\xc7\x12\x3b\x16\x41\x1b\x90\x8e\xbc\x40\x8e\x49\x2e\
\x35\xc0\xe4\x83\x50\x29\x95\xb1\xec\x9a\x0d\xaf\x02\x26\x5f\xc1\
\xdb\xfd\x53\x0b\x1b\xce\xcf\x0e\xc9\x28\x9f\x25\xe6\x63\x74\x0c\
\xb0\x2f\x95\x1d\xb4\x76\x97\xa8\xb8\x9b\x12\xb0\x0d\xdc\xaa\x30\
\xd0\x86\x85\xb1\x32\x06\xd8\x97\xfa\x1e\xd9\x70\xd2\x81\x70\x2e\
\x40\x68\x9b\x21\xab\xc2\x98\x31\x2c\x0c\xec\x18\x60\x5f\x9a\xba\
\x60\xdb\x69\x3d\x82\x64\x7b\x3a\x6c\x33\x6a\x55\x18\x93\x86\x85\
\xc1\x19\x03\xec\x4b\x6b\x9f\x6b\xbe\x70\x86\x92\x6c\x4f\xc6\x36\
\x07\x56\x85\x31\x6f\x58\x98\xc8\x36\x7c\x4e\x1d\xbd\xbf\x67\x68\
\x0a\x53\xb2\x3d\xe0\xcd\x1b\x8c\x2b\xc3\x16\x0b\x56\xed\xe9\xeb\
\x58\x9d\x83\xbf\x80\xbd\xd8\xd9\xb1\xea\x2c\x1e\x0c\xb3\xc8\xa9\
\xbb\xc7\xf7\xff\xbc\x82\x20\xd9\x8b\x58\x15\xc6\xaa\x61\xa6\x39\
\x8d\xf4\xf5\xa1\x1f\x30\x55\x24\x7b\x41\xab\xc2\x58\x32\xcc\x07\
\xa7\xc1\x5e\x3e\x3a\x45\x40\xec\x0e\x7b\x1f\xb4\xc6\x83\x6e\x98\
\x33\x4e\xe3\xfd\x9b\x60\x12\x0e\xdf\x9d\x29\xce\x68\x91\x04\xd1\
\x30\xaf\x9c\x48\x7a\xf6\xd5\x6b\x75\xbc\x06\xd1\x30\xb4\x8c\x9b\
\x41\x78\x77\x24\xd9\x44\x52\x84\x81\x0f\xa6\xd0\xde\x8c\x3a\x18\
\x1a\x60\x8e\x69\x8d\x87\x96\x37\xe5\x54\x6d\xc7\xd8\x70\x24\xc3\
\x3d\xad\xf7\x11\x72\xd2\xc4\x37\x43\x38\x86\x07\x22\x99\x8d\xa1\
\x29\xa3\xe1\x60\x4c\x7f\xbb\x91\x63\x84\x08\x92\xd9\xfb\x79\xc5\
\x4a\x98\xe8\xb2\xdc\xd0\xe7\x18\xa4\xba\x64\xb6\xa7\x08\xc0\x86\
\x8f\x2b\xd7\x2d\xb3\x8e\x71\xea\x4a\xe6\x67\x9a\x1b\x4e\x58\x89\
\x32\xde\x94\xee\x18\xaa\xa2\x64\x0e\xa7\x6a\xeb\x86\x9b\x25\xef\
\x63\x1f\x1c\xa3\xd5\x92\xcc\xc9\xed\x46\x20\x11\xa0\xc8\xfe\x60\
\x15\xc7\x80\x55\x24\x33\x7c\xcb\x2c\x5a\x64\xf8\x49\x3c\xba\xc8\
\x31\x66\x79\xc9\x8c\x3d\xf6\x01\x36\x62\xe4\x84\x1e\x0e\xe6\x18\
\xb6\xb0\x64\x4f\x6f\x99\xcd\x04\x67\xe6\xd0\x8b\xa7\x16\xd8\x0c\
\x48\xe6\xbc\x44\x9a\x88\xed\x81\x44\x9f\x97\x38\x8f\x64\xe3\x91\
\x7b\x84\xac\x63\x5a\xe3\xa1\x3f\xad\x9f\xd8\x8a\x91\x91\xac\x00\
\x6c\x72\x12\x08\xd7\xd0\xd4\x84\x57\x8c\x80\x64\x39\x60\x93\x90\
\x40\x78\x7f\x5e\x99\x08\x8b\xe1\x96\xec\x14\xd8\x64\x23\x10\x89\
\x29\x02\x13\x64\x31\xac\x92\xa5\x81\x4d\x2a\x02\x91\x9b\xe6\x36\
\x71\x16\xc3\x27\x59\x02\xd8\xe4\x21\x10\xe9\xa9\xda\x13\x6a\x31\
\x4c\x92\x91\xbd\xda\x9e\x69\x39\x2e\xa3\x73\xbb\xd1\x44\x5b\x0c\
\x87\x64\x4f\xc0\x26\x03\x81\x68\xde\x32\x3b\x01\x17\x43\x2e\xd9\
\x03\xd8\xac\xbe\x40\xf4\x1f\xfb\x30\x31\x17\x43\x2b\xd9\x1f\xb0\
\x59\x77\x81\xa0\x3c\xba\x68\xc2\x2e\x86\x50\xb2\xb7\x65\x56\x5c\
\x24\x54\xcc\x2e\x5f\xef\x3f\x24\x85\x9e\xf3\x44\x65\x52\x7e\x53\
\x7a\x4d\xbc\xd2\x22\x7c\x6d\xfb\x42\xb4\x07\x42\x7c\xf1\x36\x42\
\x38\x5e\x6d\x4b\xc2\x9e\x60\xe6\xaf\x33\xbd\xc0\x8f\xc4\xd3\xb0\
\x47\x64\x5e\x18\x3d\xb8\x84\x51\xc3\x7c\xe8\x05\x6e\x55\x98\x57\
\x37\x4c\xc0\xaa\x28\x83\x5d\x7c\xc8\x30\xd3\x7a\x19\xb2\x2a\xcc\
\x2b\x1a\x26\x6f\x55\x94\x91\x8e\xde\x6f\x98\x45\xbd\x8c\x5a\x15\
\xe6\x55\x0c\x53\xb7\x2a\x4a\x77\x77\xef\x34\xcc\x90\x5e\x50\x9c\
\xc6\xe3\xdc\x30\x64\x5a\x72\x13\x49\xf1\xf5\xda\x39\xf9\x7b\xa7\
\x95\x37\xc3\x92\xc7\x2a\x58\x6c\x1d\xad\x6a\x3e\x86\x61\x6e\xf9\
\x52\xb1\xf7\xdb\x5a\x8e\xbc\x93\xac\x89\x07\xc3\x9a\xce\x00\xd1\
\x6c\x6b\x6d\x4c\x9b\x61\x50\x9b\xba\x0c\xe8\x62\xd7\x36\xab\x86\
\x91\x5c\x57\x81\xd8\xd6\xd4\x86\x06\xc3\x10\xb6\x6d\x61\xd0\xc2\
\x96\x6d\x96\x0c\x63\x1d\xad\xd0\xb5\xad\x7e\xd5\xb5\x86\xe9\xea\
\x25\xd6\xfd\xf1\x6d\x43\x37\x4c\x65\x0c\x50\xc5\xb6\xca\x35\x56\
\x19\xa6\xa2\x97\x7a\x37\x07\x39\x66\x47\x01\x35\x4c\x9d\x96\x4a\
\x6a\xba\x48\x19\x98\x64\x47\x43\x1b\x03\xdc\x76\xc8\x50\xbd\x07\
\xe5\x01\x97\xc9\xa2\x28\x9e\x02\x44\x2b\xdd\xfe\x29\xd0\x87\xbe\
\x3f\x7e\xf3\xdb\x5b\x00\x26\xd0\x44\xb4\x31\xc0\xcc\x8a\xc4\xb0\
\x65\xa2\x69\x58\x13\x03\x01\x6c\x95\x0b\xe7\xc6\x96\x97\x2c\x07\
\x8c\xaf\x4d\x68\x63\x80\x1d\x0b\xd4\xb2\x4d\xda\x30\xc2\x3b\x65\
\x48\x16\x35\xb8\x10\x26\x6c\x19\xc9\x4e\x81\x91\x37\x02\x6d\x0c\
\x90\xb0\x3d\x92\xb6\x49\x18\xc6\x7a\xe0\xe9\xc0\xc6\xd4\x1e\x5a\
\x6c\x67\x92\xa5\x81\x51\xad\x15\x6d\x0c\x50\xa0\x3d\xdc\xb6\x71\
\x19\xa6\x72\xf1\x94\xc1\x26\xdc\x1e\x12\x6c\x49\xc9\x12\xc0\x06\
\x57\xa3\x3e\x2e\x10\xb5\x5f\xb1\x3d\x1c\xb6\x51\x8e\x25\xa2\x8d\
\xe2\x2c\x00\xbd\x67\x19\x2b\xcb\x11\x76\x6c\x58\x5f\x77\x40\xa8\
\x4b\x32\x38\xbf\x6f\x51\xd9\x16\xdf\x94\xde\xba\x44\xcc\x1b\x81\
\x93\x41\xc0\xb6\x65\xa4\xc8\x4f\x86\x35\x2d\x08\x67\xfb\x2b\xe3\
\xc3\xb6\x27\xc3\x2a\x17\x21\x70\x5d\xc5\x1d\x04\x6c\x5b\x5a\x6b\
\xfe\x30\xac\xe6\x9b\x38\xdb\x39\x18\xbb\xb6\x3d\x0c\xcb\x7f\x47\
\xf8\x12\x58\x32\x08\xd8\xb6\xd4\x20\xb8\x16\x3f\x8a\xb3\x3d\x4c\
\xb1\x65\xdb\x9f\x61\xc9\x0f\x29\x8e\x56\x68\x05\x01\xdb\x96\x33\
\x22\xd7\xe4\xdf\x70\xda\x2d\x1c\x7c\xdb\x2e\xeb\xba\x86\xff\xab\
\xde\x56\x84\xb9\x37\x5b\xd4\x4b\xb1\x27\xac\xc9\xe3\xb5\xc0\x20\
\xed\xc3\x01\xb6\x05\xa4\x2c\xcb\xff\xca\xfc\x03\x0c\x3a\xb7\xd7\
\x9d\x1e\xca\x90\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\
\x00\x00\x07\x3c\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x18\x00\x00\x00\x18\x08\x06\x00\x00\x01\x97\x70\x0d\x6e\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\
\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x06\xdc\x49\x44\x41\x54\x48\
\xc7\x55\x95\x59\x6c\x54\xd7\x19\xc7\xff\xe7\x2e\x73\x77\xcf\x6e\
\x7b\xc6\x0e\x60\x30\x13\xb1\x04\x12\x07\x95\x94\x90\x1a\xda\x4a\
\x49\x53\x35\x54\xb4\x52\x9a\x86\xb4\x4d\xe9\xf2\xd4\x28\x6d\x43\
\x15\xa9\x2f\x51\x93\x80\xd2\x2c\x28\x52\x15\xa9\x8d\x68\x95\xbc\
\x44\x6d\x1a\x51\xd2\x46\x4d\xd8\x8c\xed\xb8\x40\x69\x4b\xd8\xed\
\x19\xb0\xc7\x60\x7b\x6c\x33\x1e\xcc\xc0\x8c\x67\xee\xb9\xe7\x7c\
\x7d\x18\x6c\xc8\x79\x3c\xba\xff\xef\xf7\x2d\xff\xef\x1e\x46\x44\
\x08\x82\x00\x35\xbf\xbe\x16\x44\x84\xcf\x4e\x9d\xa1\x6a\x6d\x8e\
\x40\x44\xe8\xe8\xc8\xd0\xa9\x33\xa7\x09\x9c\x73\xf4\xf7\x0f\xbc\
\xc2\x39\x07\xab\xd7\xeb\xb8\x59\xad\xd0\xa9\xcf\xce\x40\x79\x60\
\xfd\x43\x64\x1a\x26\xda\xda\xd3\x0c\xb7\xa2\x85\xa7\xaf\x16\xbb\
\x87\x72\x59\xf2\x7d\x9f\xfe\x75\xec\x18\x29\x00\xd0\xdb\xd7\x3f\
\x1b\x08\x7e\x64\xe9\x92\x0e\x1c\x3c\x74\x08\xba\xa6\x6d\x54\x00\
\xa0\xe7\x50\xdf\xef\xbf\xfb\xf8\xf7\xf0\xe0\x86\x4d\x88\x44\x23\
\x6c\x5d\x57\xd7\x00\x82\x20\x40\x10\x04\x10\x42\xe0\xe4\xc9\x53\
\xef\x07\x41\x00\xce\x39\x10\x04\x01\xe3\x9c\xe3\x70\x4f\x2f\x15\
\xa6\x26\x89\x73\x4e\x44\x04\x85\x31\x46\xfd\xfd\x03\x57\x97\x74\
\x2c\x42\x22\x16\x47\xff\xc0\x00\xc6\xc6\x26\xb6\xb3\x6c\x2e\x07\
\xd7\x75\xc9\x75\x5d\x10\x49\x9c\x3c\x79\x1a\x89\x64\x8c\xb1\xf5\
\x5f\xd8\x48\xb3\xb3\xb3\xf0\x03\x1f\x6f\xbd\xf5\x3b\x28\xaa\xa2\
\xdc\x7b\xef\x1a\x52\x5e\xda\xf9\x1b\x16\x8f\xc7\x91\x6e\x49\x61\
\xa6\x58\x44\x24\xd2\x44\xc9\x78\x02\x8c\x88\x20\x84\x00\x11\x31\
\xc6\x98\x02\x40\x9c\x3b\x7f\x61\xa0\x52\x99\xdb\x90\x6a\x6b\x46\
\xba\x35\x8d\xf2\xf5\xeb\xc5\x58\x2c\x96\x94\x52\x40\xc1\xad\xc3\
\x18\x23\x55\x55\xc5\x3f\x3e\xfa\x27\x01\x6c\xc3\x5d\x8b\xd2\x48\
\xc6\x93\xd0\x35\x0d\x60\x48\x1c\xea\x39\x42\xa3\xa3\x57\x9e\x66\
\x44\x84\xb3\xe7\x06\x11\x08\xfe\x66\xf9\x7a\xf9\x99\x74\x3a\x05\
\xaf\xc9\x45\xc4\x0b\x43\xd3\x35\x0c\x65\xb3\x18\x1f\x2f\x20\x95\
\x6e\xc5\xdd\x99\x0c\x63\x44\x84\x07\xd6\x7f\x89\x0a\x93\x13\x10\
\x42\x62\xd7\xae\x97\xf1\xd4\x53\x4f\x80\x73\x8e\x81\xa3\x47\xe1\
\xd7\x39\xc2\x61\xef\x52\xe9\x5a\xa9\xb3\xab\xab\xab\x91\xd2\x93\
\xdb\x9e\x78\x36\x9d\x4a\xa3\xb5\xa5\x05\xaf\xbd\xfa\x3a\x56\xaf\
\xee\x42\x26\xb3\x0a\x9e\xe7\xc1\xf5\xec\xfd\x3e\xf7\x3b\xd3\xe9\
\x74\x23\x75\x22\xc2\x81\xfd\x3d\xf0\xb9\xcf\xc6\xc6\xc7\x9b\x4e\
\x9c\x38\xf1\xb7\x2b\x57\xc6\x36\xc9\x40\xe0\x97\x3b\x9e\xfd\x51\
\xba\x2d\xb5\x67\xd5\x8a\x95\x60\x8c\x35\x8a\x9d\x9f\xdd\xfc\xcc\
\x38\xe7\x98\x9e\x9e\x5e\xdf\xd3\xd3\x77\x64\x24\x9f\x7f\x4e\x92\
\x9c\xbf\x67\x44\x74\xbb\x4b\x00\x34\xc6\x18\x14\x45\xd1\xff\x77\
\xf2\xf4\xb1\x44\x32\xde\xed\x78\xee\xab\x22\x10\x54\x2a\x95\xbe\
\x09\x80\x01\x58\x10\x38\x00\x02\x55\x55\xf1\xf1\x27\x07\x2b\xf3\
\x9d\x32\x42\x21\x28\x8a\x82\x64\x32\xb9\xf7\xe2\xa5\xe1\x67\xee\
\x14\x54\x1a\xed\x3d\xff\x46\x34\x1a\xd1\x4d\x2b\x04\xdb\xb6\x61\
\x99\x16\x82\x20\xc0\xb1\x7f\x1f\xc7\xf0\x70\x7e\x37\x00\x28\x8c\
\x31\x30\xc6\x30\x57\xab\x3f\x34\x3a\x7a\xf9\xe7\x91\x68\x04\x8e\
\xe3\xc0\x73\x1c\xa8\x8a\x82\x89\xc2\x04\xaa\x95\x1a\x9a\x9b\x93\
\x8d\xbc\xcf\x5f\xc8\x82\x31\x86\x5c\x2e\xdb\x77\x77\x26\x03\xc7\
\xb1\xe0\xd8\x0e\x34\x4d\x47\xad\x56\x43\x36\x77\x11\x9a\xaa\x63\
\x79\x66\xd9\x63\x00\xa0\x08\xc9\x01\x12\x1f\x64\x32\xcb\x11\x8d\
\x47\x60\xdb\x36\x0c\xd3\x00\x63\x0c\x17\x86\x06\x21\x85\x84\xeb\
\xba\xf5\x42\xa1\xf0\x77\x00\x50\x86\x2e\x64\x7f\xf6\xad\xad\x8f\
\x6f\xdd\xbc\xe9\xab\xd8\xf6\xe4\xd3\xf0\x1c\x0f\x66\xc8\xc0\x4c\
\x69\x06\xe3\x63\x05\xc4\x62\x71\x5c\x9b\x9d\x59\x5c\x2c\x16\x01\
\x00\xca\xd4\xe4\x74\x53\x2c\x16\x43\x32\x99\x44\x3e\x9f\x47\x5b\
\x7b\x07\xde\x7b\xef\xcf\xb8\x34\x3c\x8c\x58\x3c\x8e\x20\xe0\x7d\
\x8c\xa9\x53\x2b\x57\xae\x68\x08\x56\xac\x5e\xf1\x32\x63\x0c\xae\
\xe3\xa2\xc9\xf5\x90\x6a\x69\xc5\x0b\x2f\xbc\x88\xe3\x47\x4f\x20\
\x91\x88\x22\x10\xbc\xdb\x30\x34\x08\x21\x1b\x02\x92\x12\x3b\x7e\
\xb5\xc3\xf6\x3c\x0f\x9e\xe7\x21\x1a\x8d\x20\x16\x8d\xe1\x8f\x7b\
\xde\x41\xad\x56\xfb\x83\xae\xeb\xb8\x6f\xed\x7d\x48\xc4\xe2\x0d\
\x01\x24\x83\xe7\x3a\xf5\xed\x3f\xde\xce\xba\x37\x77\x6f\x8f\x27\
\x13\x81\xeb\x79\x58\x75\xcf\xaa\xaa\xae\xeb\x3f\x4d\xb5\xa6\xe0\
\x79\x1e\x88\xe8\xb6\xf9\x84\x10\x0b\xfe\x20\xa2\x79\xa3\x31\xba\
\xf5\x95\xaa\xaa\x28\x16\x67\xbe\x76\xfa\xf4\x99\x0f\x6e\x56\x2a\
\x56\x3c\x1e\x43\x3c\x11\x87\xeb\x3a\x50\x15\x65\x4e\x53\xb5\xb7\
\x2d\xcb\xda\x6d\xdb\x76\x1e\x00\xa4\x24\x00\x04\x5d\xd7\x3f\x0f\
\x20\x22\x0d\x40\x2b\x63\xac\x00\x40\xcc\x07\x1f\x1e\xc9\xbf\x74\
\x61\x30\xfb\xeb\x48\xb8\x09\xae\xeb\xc2\x71\x6c\x84\x0c\x1d\x96\
\x65\xc1\xb5\x1d\x18\x86\x01\x45\x51\x50\x2e\x97\xe9\xc6\x8d\x1b\
\x6f\xa4\x52\xa9\xe7\x18\x08\x4c\x51\x3f\x07\x60\xb7\x8b\x68\x54\
\xc1\x39\xf7\x8e\x1e\x3d\xfe\x69\xdd\xf7\xd7\xc4\xe3\x71\x58\xb6\
\x09\xc7\xb1\x61\x18\x06\x4c\xd3\x84\x63\xdb\xd0\x35\x1d\x44\x04\
\xdf\xf7\x31\x39\x35\x85\xfc\xe8\x28\xaa\x95\xb9\xe2\x97\x37\x6f\
\xba\xdf\xb2\xcc\xcb\x77\x6e\x0f\x01\x20\xc6\x18\x34\x4d\x43\xa9\
\x74\xed\x91\x8f\x3f\x39\x50\x56\x35\x6d\x4d\x7b\x7b\x3b\x22\x91\
\x08\x3c\xcf\x85\x69\x99\x70\x1c\x07\xae\x6d\x43\x57\x35\x10\x11\
\xaa\xd5\x2a\x86\xf3\x23\x18\x1e\xc9\x43\x04\x12\x9e\xe7\x25\x86\
\x86\xb2\x3f\x04\x00\x0d\x00\xa6\xa6\xa7\xef\xa0\x50\xe8\xf2\xe8\
\xd8\x87\x53\x53\x57\x1f\x6e\x6f\x6f\x87\x63\x3b\x30\xcc\x10\x42\
\xa6\x0e\x33\x64\xc0\x32\x2d\x84\x42\x21\xa8\x8a\x02\x41\x84\x72\
\xb9\x8c\x5c\xee\x22\x2a\x95\x2a\x84\x10\x30\x8c\x10\x16\x2f\x59\
\x34\xd6\x96\x4a\xed\x59\x00\x14\xaf\xce\x34\x16\x49\xd3\xd6\x15\
\x26\x27\x07\x38\xe7\xa1\x8e\x25\x8b\x61\x58\x06\x4c\x33\x84\x50\
\x28\x04\xc3\x34\x61\x19\x06\x74\x4d\x87\xa2\x28\x08\x84\x40\x71\
\xa6\x88\xa1\xa1\x1c\x38\x0f\x40\x44\x08\x87\xc3\x88\x46\x23\x7d\
\x86\x11\xea\x9e\x29\x95\xd0\xd2\xdc\xdc\x00\x98\xa6\x89\xc1\xc1\
\xec\xf3\xbd\xbd\xbd\xbb\x84\x94\x58\xbc\xe8\x2e\x68\xba\x8a\x75\
\xeb\xee\x47\x93\xd7\x04\x92\x12\x44\x8d\x87\x4f\x61\x0a\x38\xe7\
\x18\x9f\x2c\x20\x37\x94\x03\x11\x60\x9a\x16\xa2\xd1\x08\xca\x37\
\xae\xbf\x52\x98\x9c\x78\x7e\xa2\x30\x0e\xcb\xb2\x6e\x03\xde\xff\
\xcb\x5f\x3f\xdc\xbb\x77\xdf\x37\x18\x63\xa8\xd5\x6a\xf0\xb9\x0f\
\x9f\x73\x54\x2a\x15\xd4\xeb\x75\x6c\xdc\xf8\x20\x76\xee\x7c\x11\
\x6b\xd7\xdc\x03\x22\x42\x6e\xf0\x22\x2e\x5d\x1a\x5e\x08\x6c\xd9\
\x26\x8a\xc5\xe2\x4f\x82\x80\xbf\x1d\x0a\x85\xd0\xd9\xd9\x09\xcb\
\xb2\x6e\xef\xc1\xbe\x7d\x1f\xb5\xbf\xfb\xce\xbb\xff\x19\x1f\x9f\
\x68\x99\x87\x08\x21\x20\x84\x80\x94\x12\x04\x42\xcd\xf7\x51\x9b\
\x9b\xc3\xf7\x7f\xb0\x0d\x5b\xb6\x3c\xd6\xf8\xfd\x47\x9a\x00\x10\
\x4a\xa5\x6b\x0f\x83\xb1\xfd\x52\x4a\x2c\x5d\xda\x81\x70\x53\x13\
\xa4\x94\x68\x4e\x24\x1b\x80\x83\x87\x7a\x00\xc6\xd0\x7b\xb8\xf7\
\xf5\x9e\xc3\x47\x7e\x21\xa4\x80\xaa\xaa\x90\x52\x2e\x40\xa4\x94\
\x20\x49\x98\xab\xcd\x21\x95\x4e\x61\xf7\x9b\xaf\x41\xd7\x35\xbf\
\x58\x2c\x7e\xc5\x34\xcd\x4f\x89\x08\xaa\xaa\xc2\x73\x3d\xa4\xd3\
\x69\xa8\xaa\x0a\xdb\xb2\x6e\x3d\x23\x07\x7a\x1a\x0e\x92\x84\xd9\
\xd9\x59\x0c\x8f\x8c\x7c\xe7\xec\xd9\xb3\xbf\x9d\x9a\x9c\xba\x2b\
\x08\x02\xa8\xaa\x0a\xc6\xd8\xc2\x42\x56\xab\x73\xd8\xfa\xed\x2d\
\x37\x1e\xfd\xfa\x23\x5f\x94\x52\x9e\xe3\x9c\xa3\xb5\xa5\x15\x6d\
\xe9\xf4\xc2\xac\x00\x40\x51\x94\x5b\x15\x1c\x3c\x3c\xef\x52\x95\
\x88\x70\xbd\x7c\x53\x08\x21\x50\xab\xd5\x50\x2a\x95\x50\xaf\xfb\
\xeb\x54\x55\x79\x94\x29\xc8\x44\xc2\x91\xd9\x65\xcb\x96\xfe\x29\
\x12\x0d\xff\x57\x51\x19\x34\x55\x43\xa6\x73\x39\x74\x5d\x87\x94\
\x12\x77\x1e\x45\x51\xf0\x7f\x60\x84\x69\x65\x48\xcf\xfa\x14\x00\
\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x07\x65\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x18\x00\x00\x00\x18\x08\x06\x00\x00\x01\x97\x70\x0d\x6e\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\
\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x07\x05\x49\x44\x41\x54\x48\
\xc7\x55\x95\x6b\x70\x54\xe5\x19\xc7\x7f\xef\x39\x67\x77\xcf\xd9\
\x73\x36\xd9\x4b\x16\xd8\x10\xb9\x08\xc6\x82\x8a\x8a\x99\x62\x11\
\x05\x6a\x47\xad\x9d\x8a\x83\x9d\xb1\x17\x6c\x6b\x69\xeb\x27\xed\
\x4d\x3a\x76\xec\x4c\x6d\x55\x1c\xab\x15\x1d\x3b\xf6\xc2\xd0\x8e\
\x8e\x2d\xb5\x6a\x29\xb6\x4e\xbd\x00\x21\x89\x29\x58\x6c\xb9\x5f\
\xb2\x81\x64\x03\x49\x36\x09\x9b\x4d\x08\x6c\xb2\xbb\xe7\xf2\xf4\
\x43\x48\xd0\xe7\xe3\x3b\xf3\x3c\xbf\xf7\x79\x9e\xff\xff\x7d\x95\
\x88\xe0\x79\x1e\xe5\x6a\xe5\x5a\x44\x84\x03\x07\x0f\xcb\x78\x79\
\x42\x10\x11\xe6\xcf\x6f\x94\x83\x87\x0f\x09\xae\xeb\xd2\xd6\xd6\
\xfe\xb4\xeb\xba\xa8\x4a\xa5\xc2\x85\xf1\x92\x1c\x3c\x70\x18\xed\
\xc6\x65\x37\x8b\x19\x31\x99\xdd\x50\xaf\xb8\x58\xad\x76\xe8\x6c\
\x61\x65\x47\x67\x56\xaa\xd5\xaa\xfc\x7b\xef\x5e\xd1\x00\x5a\x5a\
\xdb\x46\x3d\xdf\xdd\x7d\xf9\xbc\xf9\xec\xd8\xb9\x93\x90\x61\xac\
\xd0\x00\x9a\x77\xb6\xfe\xee\xab\xf7\x7e\x9d\x9b\x96\xaf\x22\x9e\
\x88\xab\xa6\xa6\xa6\x0f\xf0\x3c\x0f\xcf\xf3\xf0\x7d\x9f\xfd\xfb\
\x0f\xbe\xee\x79\x1e\xae\xeb\x82\xe7\x79\xca\x75\x5d\x76\x35\xb7\
\x48\x7e\x70\x40\x5c\xd7\x15\x11\x41\x53\x4a\x49\x5b\x5b\xfb\xd9\
\x79\xf3\xe7\x50\x97\x4c\xd1\xd6\xde\x4e\x6f\x6f\xff\x7a\x95\xed\
\xec\xc4\x71\x1c\x71\x1c\x07\x91\x80\xfd\xfb\x0f\x51\x97\x4e\x2a\
\xb5\xec\xd3\x2b\x64\x74\x74\x94\xaa\x57\xe5\xa5\x97\x7e\x8d\xa6\
\x6b\xda\x75\xd7\x2d\x11\xed\x89\x8d\xbf\x50\xa9\x54\x8a\xfa\x99\
\x19\x86\x0b\x05\x6e\xbb\x67\x6d\xd0\x9b\x9e\x21\x4a\x44\xf0\x7d\
\x1f\x11\x51\x4a\x29\x0d\xf0\x8f\x1e\x3b\xde\x5e\x2a\x4d\x2c\xcf\
\xcc\x9e\x41\xfd\xac\x7a\xc6\xce\x9d\x2b\x24\x93\xc9\x74\x10\xf8\
\x68\x5c\x0c\xa5\x94\xe8\xba\xee\xff\xf3\xed\x7f\x09\xa8\xe5\x97\
\xcd\xa9\x27\x9d\x4a\x13\x32\x0c\x50\xd4\xed\x6c\xde\x2d\x3d\x3d\
\x67\xee\x57\x22\xc2\x91\xa3\x27\xf0\x7c\xf7\x85\xb1\x73\x63\x0f\
\xd5\xd7\x67\x88\xd5\x38\xc4\x63\xb5\x18\x21\x83\x8e\x6c\x96\xbe\
\xbe\x3c\x99\xfa\x59\x5c\xd9\xd8\xa8\x94\x88\x70\xe3\xb2\x5b\x24\
\x3f\xd0\x8f\xef\x07\x3c\xf5\xd4\x93\xdc\x77\xdf\x57\x70\x5d\x97\
\xf6\x3d\x7b\xa8\x56\x5c\x6a\x6b\x63\xa7\x8a\x23\xc5\x85\x4b\x97\
\x2e\x45\x89\x08\x2f\xbe\xf8\x9b\xef\xfd\xf9\x4f\x5b\x9f\x77\x5d\
\x17\xd7\x75\xf1\x83\x80\x52\xe9\x02\x6f\xfc\xed\x35\x2a\x95\xf2\
\x7b\xcb\x9f\x79\xf6\x36\x0e\x1c\x40\xfa\xfb\xb3\x4a\x44\x78\xff\
\xbd\x66\xaa\x6e\x55\xf5\xf6\xf5\xd5\xec\xdb\xb7\xef\xef\x67\xce\
\xf4\xae\x0a\x3c\x9f\x1f\x6d\xf8\xfe\xb7\xeb\x67\x67\xb6\x04\xd7\
\x2c\x39\xd8\x07\x4b\xb2\xf0\xc4\xf4\xee\xa6\x76\xe6\xba\x2e\x43\
\x43\x43\xcb\x9a\x9b\x5b\x77\x77\xe7\x72\x0f\x07\x12\x4c\x9d\x2b\
\x11\xb9\x34\x25\xc0\x50\x4a\xa1\x69\x5a\xe8\x7f\xfb\x0f\xed\xad\
\x4b\xa7\x56\xda\x31\xe7\x19\xdf\xf3\xa5\x58\x2c\xde\x0d\x28\x60\
\x3a\xc1\x06\x3c\x5d\xd7\x79\xe7\xdd\x1d\xa5\xa9\x49\x45\xc2\x61\
\x34\x4d\x23\x9d\x4e\x6f\x3b\x79\xaa\xeb\xa1\x8f\x27\x94\x26\xc7\
\x7b\xec\xb9\x44\x22\x1e\x32\xad\x30\xd1\x68\x14\xcb\xb4\xf0\x3c\
\x8f\xbd\xff\xf9\x90\xae\xae\xdc\x26\x00\x4d\x29\x85\x52\x8a\x89\
\x72\xe5\xe6\x9e\x9e\xd3\x3f\x88\x27\xe2\xd8\xb6\x4d\xcc\xb6\xd1\
\x35\x8d\xfe\x7c\x3f\xe3\xa5\x32\x33\x66\xa4\x27\xef\x7d\xec\x78\
\x16\xa5\x14\x9d\x9d\xd9\xd6\x2b\x1b\x1b\xb1\x6d\x0b\x3b\x6a\x63\
\x18\x21\xca\xe5\x32\xd9\xce\x93\x18\x7a\x88\x2b\x1a\x17\xdc\x05\
\xa0\xf9\x81\x0b\xe2\xbf\xd9\xd8\x78\x05\x89\x54\x9c\x68\x34\x4a\
\xc4\x8c\xa0\x94\xe2\x78\xc7\x09\x02\x3f\xc0\x71\x9c\x4a\x3e\x9f\
\xff\x07\x80\xd6\x71\x3c\xfb\xe0\x3d\x6b\xef\x5d\xbb\x7a\xd5\xe7\
\x58\xf7\xb5\xfb\x89\xd9\x31\xcc\x70\x84\xe1\xe2\x30\x7d\xbd\x79\
\x92\xc9\x14\x23\xa3\xc3\x73\x0b\x85\x02\x00\xda\xe0\xc0\x50\x4d\
\x32\x99\x24\x9d\x4e\x93\xcb\xe5\x98\xdd\x30\x9f\xad\x5b\x5f\xe3\
\x54\x57\x17\xc9\x54\x0a\xcf\x73\x5b\x95\xd2\x07\x17\x2f\x5e\x34\
\x99\xb0\xe8\xea\x45\x4f\x2a\xa5\x70\x6c\x87\x1a\x27\x46\x66\xe6\
\x2c\x1e\x7b\xec\x71\x3e\xdc\xb3\x8f\xba\xba\x04\xcb\x9f\xdf\xf4\
\x97\xdb\x7e\xf2\x88\x24\x17\x5f\x2d\x25\x5d\xbf\x5e\x93\x20\x60\
\xc3\x8f\x37\x44\x63\xb1\x18\xb1\x58\x8c\x44\x22\x4e\x32\x91\xe4\
\x0f\x5b\x5e\xa6\x5c\x2e\xff\x9e\x7d\xfb\x7e\xce\x47\x1f\x31\x91\
\xcf\xd3\x1a\x04\x3f\xd5\x08\x14\x31\xc7\xae\xac\xff\xce\x7a\xb5\
\x72\xf5\xca\xf5\xa9\x74\x9d\xe7\xc4\x62\x5c\x75\xcd\x55\xe3\xa1\
\x50\xe8\x81\xfe\xee\xee\x9b\xdb\x80\x57\xc0\xcf\xc1\xba\x69\xc7\
\x4d\x85\x88\xa0\x94\x02\x50\x22\x22\x00\xba\xae\x53\x28\x0c\x7f\
\xfe\xd0\xa1\xc3\x6f\x5e\x28\x95\xac\x54\x2a\x49\xaa\x2e\x85\xe3\
\xd8\xe8\x9a\x36\x61\xe8\xc6\x66\xcb\xb2\x36\x45\xa3\xd1\x1c\x40\
\x10\x08\x20\x84\x42\x21\x3e\x01\x10\x11\x03\x98\xa5\x94\xca\x03\
\xfe\x54\xf1\xae\xee\xdc\x13\xc7\x4f\x64\x1f\x8d\xd7\xd6\xe0\x38\
\x0e\xb6\x1d\x25\x1c\x09\x61\x59\x16\x4e\xd4\x26\x12\x89\xa0\x69\
\x1a\x63\x63\x63\x72\xfe\xfc\xf9\xe7\x32\x99\xcc\xc3\x0a\x41\x69\
\xfa\x27\x00\xea\x52\x13\x93\x5d\xb8\xae\x1b\xdb\xb3\xe7\xc3\x0f\
\x2a\xd5\xea\x92\x54\x2a\x85\x15\x35\xb1\xed\x28\x91\x48\x04\xd3\
\x34\xb1\xa3\x51\x42\x46\x08\x11\xa1\x5a\xad\x32\x30\x38\x48\xae\
\xa7\x87\xf1\xd2\x44\xe1\xb3\xab\x57\xdd\x60\x59\xe6\xe9\x8f\xbb\
\x47\x00\x51\x4a\x61\x18\x06\xc5\xe2\xc8\x1d\xef\xbc\xfb\xfe\x98\
\x6e\x18\x4b\x1a\x1a\x1a\x88\xc7\xe3\xc4\x62\x0e\xa6\x65\x62\xdb\
\x36\x4e\x34\x4a\x48\x37\x10\x11\xc6\xc7\xc7\xe9\xca\x75\xd3\xd5\
\x9d\xc3\xf7\x02\x62\xb1\x58\x5d\x47\x47\xf6\x5b\x00\x06\xc0\xe0\
\xd0\xd0\xc7\x28\x12\x3e\xdd\xd3\xfb\xd6\xe0\xe0\xd9\xdb\x1b\x1a\
\x1a\xb0\xa3\x36\x11\x33\x4c\xd8\x0c\x61\x86\x23\x58\xa6\x45\x38\
\x1c\x46\xd7\x34\x7c\x11\xc6\xc6\xc6\xe8\xec\x3c\x49\xa9\x34\x8e\
\xef\xfb\x44\x22\x61\xe6\xce\x9b\xd3\x3b\x3b\x93\xd9\x32\x0d\x28\
\x9c\x1d\x9e\x34\x92\x61\x34\xe5\x07\x06\xda\x5d\xd7\x0d\xcf\x9f\
\x37\x97\x88\x15\xc1\x34\xc3\x84\xc3\x61\x22\xa6\x89\x15\x89\x10\
\x32\x42\x68\x9a\x86\xe7\xfb\x14\x86\x0b\x74\x74\x74\xe2\xba\x1e\
\x22\x42\x6d\x6d\x2d\x89\x44\xbc\x35\x12\x09\xaf\x1c\x2e\x16\x99\
\x39\x63\xc6\x24\xc0\x34\x4d\x4e\x9c\xc8\x3e\xd2\xd2\xd2\xf2\x94\
\x1f\x04\xcc\x9d\x73\x19\x46\x48\xa7\xa9\xe9\x06\x6a\x62\x35\x48\
\x10\x20\x32\xf9\xf1\x69\x4a\xc3\x75\x5d\xfa\x06\xf2\x74\x76\x74\
\x22\x02\xa6\x69\x91\x48\xc4\x19\x3b\x7f\xee\xe9\xfc\x40\xff\x23\
\xfd\xf9\x3e\x2c\xcb\xba\x04\x78\xfd\xaf\x6f\xbc\xb5\x6d\xdb\xf6\
\x2f\x2a\xa5\x28\x97\xcb\x54\xdd\x2a\x55\xd7\xa5\x54\x2a\x51\xa9\
\x54\x58\xb1\xe2\x26\x36\x6e\x7c\x9c\x6b\x97\x5c\x83\x88\xd0\x79\
\xe2\x24\xa7\x4e\x75\x4d\x17\xb6\xa2\x26\x85\x42\xe1\xbb\x9e\xe7\
\x6e\x0e\x87\xc3\x2c\x5c\xb8\x10\xcb\xb2\x98\xd2\x3a\xdb\xb7\xbf\
\xdd\xf0\xca\xcb\xaf\x7c\xd4\xd7\xd7\x3f\x73\x0a\xe2\xfb\x3e\xbe\
\xef\x13\x04\x01\x82\x50\xae\x56\x29\x4f\x4c\xf0\x8d\x6f\xae\x63\
\xcd\x9a\xbb\x26\x9f\xff\x78\x0d\x20\x14\x8b\x23\xb7\xdf\xfa\xea\
\xab\xb3\xa9\x56\x1f\x62\x64\x64\x16\x83\x83\xbb\xc9\xe5\x1e\xa5\
\x54\xea\x52\x22\xc2\x8e\x9d\xcd\xa0\x14\x2d\xbb\x5a\x7e\xd5\xbc\
\x6b\xf7\x0f\xfd\xc0\x47\xd7\x75\x82\x20\x98\x86\x04\x41\x80\x04\
\xc2\x44\x79\x82\x4c\x7d\x86\x4d\x2f\x3c\x4b\x28\x64\x54\x0b\x85\
\xc2\xad\xb7\xfc\xec\xb1\xcd\xe4\xf3\x9f\xa2\xbb\x1b\x2a\x15\x04\
\x28\x88\x30\x02\xbf\x9d\xfc\x46\xde\x6f\x9e\x54\x50\x20\x8c\x8e\
\x8e\xd2\xd5\xdd\xfd\xe5\x23\x47\x8e\xfc\x72\x70\x60\xf0\x32\xcf\
\xf3\xd0\x75\x1d\xa5\xd4\xb4\x21\xc7\xc7\x27\x58\xfb\xa5\x35\xe7\
\xef\xfc\xc2\x1d\x9f\x09\x82\xe0\xe8\xe2\xeb\xae\xdf\x56\xf6\xfd\
\xbb\xe5\xa2\xd6\x4f\x03\x87\x81\x32\x3c\x38\xd9\xc1\x8e\x5d\x53\
\x2a\xd5\x45\x84\x73\x63\x17\x7c\xdf\xf7\x29\x97\xcb\x14\x8b\x45\
\x2a\x95\x6a\x93\xae\x6b\x77\x2a\x8d\xc6\x78\x6d\x7c\x74\xc1\x82\
\xcb\xff\x18\x4f\xd4\xfe\x57\xd3\x15\x86\x6e\xe0\x2c\xbe\x8a\x3e\
\x58\x7c\x1a\x1e\x38\x0b\x0d\x25\x78\x2f\x0c\x5b\x2c\xf0\xfe\x0f\
\xa4\xa4\xa5\x79\xe8\x4b\xcf\x5e\x00\x00\x00\x00\x49\x45\x4e\x44\
\xae\x42\x60\x82\
\x00\x00\x03\x34\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x01\x68\xf4\xcf\xf7\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\
\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x02\xd4\x49\x44\x41\x54\x38\
\xcb\x55\x92\x4f\x68\x5d\x65\x10\xc5\x7f\xdf\xbb\x37\xaf\x49\x48\
\xd5\x04\x23\x22\x22\x88\x85\x08\x12\x05\x0b\xa2\x0b\xc1\x8d\x90\
\x6c\x14\x04\xa1\xee\x94\x42\x15\x14\x22\xed\x56\x77\x6e\x0d\x82\
\x2b\x05\x97\xba\x15\xdd\x58\x10\x0a\x82\xab\x5a\x50\x2b\x41\x24\
\xb1\x31\x31\x79\x6d\xfe\xbc\x97\xfb\xde\xfd\xf7\xdd\xef\x7e\x33\
\xe3\xe2\x3e\x0a\xce\x76\xe6\x9c\x39\x67\xe6\xb8\x67\x77\xde\xe4\
\xa9\xfe\xe3\xb1\x57\xab\x6f\xbe\x9d\xdc\x48\x92\xb7\x0f\x3e\x5e\
\xa9\xde\x7b\x68\xe8\x66\xb7\x5e\x30\x7f\x98\xe3\x5e\xdb\xdb\x58\
\x3d\x8a\xc3\xdb\xdb\x61\x9f\x5c\x4b\x1c\x37\x57\xe1\xb8\x35\x1a\
\x85\xdf\x2b\xdc\xeb\x7b\x1b\x3c\x98\x9c\xbf\xb5\x1b\x0e\x7f\xda\
\x6a\xfe\xbe\x96\x02\x76\xa3\xbc\x49\xa1\xd5\xc5\x6c\x32\xde\x48\
\x7f\xa9\xb7\x18\xc4\x13\x10\x83\x3f\xab\x24\x1d\x1c\x0c\xa0\x56\
\x18\x04\xd8\x6b\x36\xdd\xfa\x3f\xef\xb3\x9c\x2e\xfe\x20\x26\x6b\
\xa7\x92\x91\x49\x4e\xa9\x35\x99\xe6\x8c\x64\x7c\x2e\x2d\xb5\xde\
\x3e\xf2\xc3\x0b\x07\xf1\x88\xa1\x64\xa8\x19\x00\x56\x44\x18\xc6\
\x26\xbd\xe5\xb7\xae\x57\xea\x3f\x20\x76\x0d\x0c\x28\x04\xee\x34\
\x70\x37\xe0\xf8\xf9\x19\x38\x89\x30\x8c\x39\x85\x2c\x50\x08\x8c\
\x05\xc4\xde\xe5\x91\x99\x2f\xdd\x2b\xbb\x97\x79\x2c\x5d\xe6\x9c\
\xeb\x7f\x5a\x5b\xb3\x91\x49\xfe\xdb\x71\x1c\x5d\x3a\x96\xd1\xce\
\x99\x4c\x48\x1f\x4d\x1f\xee\xcf\xb8\xb4\x19\x6b\xc1\x69\xcc\x28\
\xb4\xba\x38\xd1\x62\xbb\x52\x7f\xbd\xb1\xb0\x9e\x06\x6b\xab\x91\
\x8c\xd9\x6f\xef\x31\x94\x8c\x68\x42\x34\x21\x8f\xc5\x1a\x13\x59\
\x49\x8f\xe2\x30\x39\x8e\x23\x0e\xe2\x11\x5e\x43\xa7\x33\x2a\x8c\
\x22\x64\xf2\x75\x3a\x88\x27\x8c\xa5\xc0\x6b\xc0\xb0\xce\x85\x57\
\x38\x8b\x70\x1c\x9f\x48\xff\x6d\xef\x11\x25\x42\x04\x12\xa0\x35\
\x38\x8d\x70\x18\xa0\xd4\x6f\x12\x7d\x6b\x69\x91\x91\xbc\x48\x26\
\x1d\xed\xdd\x00\xbb\x4d\x67\xbd\xdf\x7b\x29\xa5\xd6\x0f\x19\xb6\
\xbb\x8c\xe5\x33\x4a\x85\x89\x40\x21\x05\xb3\xbd\xf3\x2c\xa7\xb8\
\x97\xef\xbc\xc3\x52\xf2\x00\x0b\xbd\x79\xfa\x6e\xe6\xaa\x73\xee\
\x93\x68\x32\xd7\x58\xc0\x6b\x83\xb7\x40\x63\x81\x5a\x1b\x2a\xf3\
\x75\xa5\xfe\xa3\x4a\xeb\xcd\xda\x1a\x82\xb5\x5d\x20\xe6\x7a\xb3\
\x5f\xf4\x70\x57\x5a\x8b\xd4\xd6\x90\x6b\x49\xa1\x35\x5e\xbb\x21\
\x41\x11\x13\x82\xb5\xf8\x29\x99\xd7\x66\x93\x68\xd7\xd2\xc6\xda\
\xcf\x83\xc4\x2b\x7e\x0a\x3c\x93\x9c\x4c\x26\xd4\xd6\xd0\x5a\xec\
\x2e\x03\x38\x1c\xd1\x04\x6b\x15\x1a\x85\x5a\xaf\x12\x2c\x71\xcf\
\xef\x5c\x6a\x22\xd2\xaf\xd5\x53\x6a\x4d\xae\x15\xb5\x79\xc4\xf4\
\x3e\xf8\x7e\x48\x1a\x85\x4a\xbb\xb0\x14\x0a\x5e\x4f\xd2\x41\x3c\
\xd9\x6f\xad\xbd\xd0\x5a\xa4\x25\x76\x92\x55\xf9\x5f\x99\x75\xe7\
\x2f\xa7\xff\x19\x0b\xe4\x02\xc1\xf6\x1d\xb7\x9f\x5b\x25\xe8\xaf\
\x44\x4b\x88\xd3\x47\x76\x9a\xbb\xd2\xe9\xe6\x52\x21\x8b\x90\x4d\
\xc1\xad\x09\x33\xee\xe9\x94\x42\xfe\xa0\xd2\x79\x6a\xfd\x0e\xaf\
\x6b\xb4\x06\x6a\x20\x40\x34\x08\xd6\x11\x14\xd2\xc9\x0f\x06\xf0\
\x3d\x73\xbd\x37\x98\xef\x49\x8a\x03\x7a\x04\xcc\xd6\x09\x06\xa5\
\x3c\x49\xa5\x97\xa9\xf4\x55\xbc\xae\xd0\x1a\xb4\xf6\x17\x6a\x3f\
\xd2\x73\x5f\x31\xeb\x76\x59\x48\x60\x29\x85\xc5\x94\xff\x00\xe1\
\x78\x1f\x4c\x73\x1c\xbc\x8b\x00\x00\x00\x00\x49\x45\x4e\x44\xae\
\x42\x60\x82\
\x00\x00\x07\x6a\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x18\x00\x00\x00\x18\x08\x06\x00\x00\x01\x97\x70\x0d\x6e\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\
\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x07\x0a\x49\x44\x41\x54\x48\
\xc7\x55\x95\x6b\x70\x5c\x65\x19\xc7\x7f\xef\x39\x67\xf7\x9c\xb3\
\x97\x64\x2f\xd9\x96\x4d\x43\x2f\x50\xc2\x08\xd2\x32\x50\x2d\x96\
\x62\x5b\x75\x10\x61\x04\x06\x54\x04\x01\xc5\xaa\x0c\x1f\x44\x40\
\x40\x1c\x9d\xd1\x51\x04\xf1\x02\x32\x38\xf5\xc2\x54\x07\x66\xb4\
\x53\x2e\x56\x40\x46\xa0\x97\x34\x09\xb1\x05\x94\xd2\x0b\xb4\x49\
\xda\x5c\x20\xc9\xe6\xb2\xd9\x26\xdb\xec\x66\xf7\xbc\xe7\x9c\xc7\
\x0f\x21\x01\x9e\x8f\xef\xcc\xfb\xfc\xde\xe7\x79\xfe\xff\xf7\x51\
\x22\x82\xef\xfb\xd4\xbc\xfa\x6a\x44\x84\xb7\x0e\x1e\x96\x6a\x6d\
\x56\x10\x11\x56\xac\x68\x95\x83\x87\x0f\x09\x5a\x6b\x3a\x3b\xbb\
\x1e\xd2\x5a\xa3\xea\xf5\x3a\x33\xd5\x8a\x1c\x7c\xeb\x30\xc6\x45\
\x6b\x2f\x11\xc7\x76\x58\xd2\xd2\xac\x78\x3f\x5b\xe3\xf8\x44\x71\
\x43\x77\x6f\x8f\x78\x9e\x27\xff\xd9\xbf\x5f\x0c\x80\xf6\x8e\xce\
\x29\x3f\xd0\x7b\xcf\x58\xbe\x82\x5d\xbb\x77\x13\xb1\xac\xf5\x06\
\x40\xdb\xee\x8e\x3f\xdd\x70\xdd\xcd\x5c\xbc\x6e\x23\xa9\x74\x4a\
\x7d\xc2\xdd\xfc\x2a\xbe\xef\xe3\xfb\x3e\x41\x10\x70\xe0\xc0\xc1\
\xa7\x7d\xdf\x47\x6b\x0d\xbe\xef\x2b\xad\x35\x7b\xda\xda\xa5\x30\
\x36\x2a\x5a\x6b\x11\x11\x0c\xa5\x94\x74\x76\x76\x4d\x2c\x5f\xb1\
\x94\xa6\x4c\x96\xce\xae\x2e\x86\x86\x46\x36\xab\x9e\xde\x5e\x12\
\x89\x84\x24\x12\x09\x44\x42\x0e\x1c\x38\x44\x53\x2e\xa3\xd4\xda\
\x4f\xae\x97\xa9\xa9\x29\x3c\xdf\x63\xcb\x96\xdf\x63\x98\x86\x71\
\xfe\xf9\xab\xc4\xb8\xff\x81\x9f\xa9\x6c\x36\x4b\xf3\xe2\x3c\x93\
\xc5\x22\x57\x2c\xf9\x41\xb8\xf8\xa5\x75\xa2\x44\x84\x20\x08\x10\
\x11\xa5\x94\x32\x80\xe0\xed\x77\x8e\x76\x55\x2a\xb3\xeb\xf2\x4b\
\x16\xd1\x7c\x5a\x33\xe5\xe9\xe9\x62\x26\x93\xc9\x85\x61\x80\xc1\
\xfb\xa1\x94\x12\xd3\x34\x83\x7f\xbd\xf8\x6f\x01\xb5\xee\xf4\xa5\
\xcd\xe4\xb2\x39\x22\x96\x05\x8a\xa6\xdd\x6d\x7b\x65\x70\xf0\xbd\
\x5b\x94\x88\x70\xe4\xed\x63\xf8\x81\x7e\xb4\x3c\x5d\xbe\xbd\xb9\
\x39\x4f\xb2\x21\x41\x2a\xd9\x88\x15\xb1\xe8\xee\xe9\x61\x78\xb8\
\x40\xbe\xf9\x34\xce\x6e\x6d\x55\x4a\x44\xb8\x68\xed\xa7\xa5\x30\
\x3a\x42\x10\x84\x3c\xf8\xe0\x2f\xb8\xe9\xa6\xeb\xd1\x5a\xd3\xb5\
\x6f\x1f\x5e\x5d\xd3\xd8\x98\x3c\x51\x3a\x59\x5a\x79\xc1\x05\x17\
\xa0\x44\x84\xc7\x1e\xfb\xc3\xf7\xfe\xfe\xb7\x6d\xbf\xd3\x5a\xa3\
\xb5\x26\x08\x43\x2a\x95\x19\x9e\xf9\xc7\x76\xea\xf5\xda\x2b\x77\
\x2d\xda\x72\x69\x8f\x37\xc8\x74\xad\x1c\x28\x11\x61\xe7\x2b\x6d\
\x78\xda\x53\x43\xc3\xc3\x0d\x6f\xbc\xf1\xc6\x3f\xdf\x7b\x6f\x68\
\x63\xe8\x07\x7c\xff\x9e\x3b\xbe\xd5\xbc\x24\xbf\x75\xf5\xa1\x6b\
\xa7\x99\xd0\x0d\x0c\x7b\x77\x2c\xcc\x6e\x7e\x66\x5a\x6b\xc6\xc7\
\xc7\xd7\xb6\xb5\x75\xec\xed\x1f\x18\xb8\x3b\x94\x70\xfe\x5c\x89\
\xc8\x07\x5d\x02\x2c\xa5\x14\x86\x61\x44\xde\x3c\x70\x68\x7f\x53\
\x2e\xbb\x21\x9e\x4c\xfc\x3a\xf0\x03\x29\x95\x4a\x57\x03\x0a\x58\
\xb8\x10\x07\x7c\xd3\x34\x79\xe9\xe5\x5d\x95\xf9\x4e\xd9\xd1\x28\
\x86\x61\x90\xcb\xe5\x76\x1c\x3f\xd1\x77\xfb\x87\x2f\x54\xe6\xda\
\xfb\xce\xc3\xe9\x74\x2a\xe2\xb8\x51\x62\xb1\x18\xae\xe3\xe2\xfb\
\x3e\xfb\x5f\x7f\x8d\xbe\xbe\x81\x47\x00\x0c\xa5\x14\x4a\x29\x66\
\x6b\xf5\x4b\x06\x07\xdf\xbd\x33\x95\x4e\x11\x8f\xc7\x49\xc6\xe3\
\x98\x86\xc1\x48\x61\x84\x6a\xa5\xc6\xa2\x45\xb9\xb9\x77\xbf\x73\
\xb4\x07\xa5\x14\xbd\xbd\x3d\x1d\x67\xb7\xb6\x12\x8f\xbb\xc4\x63\
\x71\x2c\x2b\x42\xad\x56\xa3\xa7\xf7\x38\x96\x19\xe1\xac\xd6\x33\
\xaf\x04\x30\x82\x50\x83\x04\xcf\xb6\xb6\x9e\x45\x3a\x9b\x22\x16\
\x8b\x61\x3b\x36\x4a\x29\x8e\x76\x1f\x23\x0c\x42\x12\x89\x44\xbd\
\x50\x28\xbc\x00\x60\x74\x1f\xed\xf9\xee\xb5\xd7\x5c\x77\xcd\xa6\
\x8d\x9f\xe3\xc6\xaf\xdd\x42\x32\x9e\xc4\x89\xda\x4c\x96\x26\x19\
\x1e\x2a\x90\xc9\x64\x39\x39\x35\xb9\xac\x58\x2c\x02\x60\x8c\x8d\
\x8e\x37\x64\x32\x19\x72\xb9\x1c\x03\x03\x03\x2c\x69\x59\xc1\xb6\
\x6d\xdb\x39\xd1\xd7\x47\x26\x9b\xc5\xf7\x75\x87\x52\xe6\xd8\x39\
\xe7\x7c\x6c\x4e\xa4\x3b\x77\xb7\xf1\x93\x1f\xff\x54\x82\x20\xc0\
\xf3\x3c\xb4\xf6\xf1\x74\x9d\xdb\x6e\xbb\x95\xcb\x2e\xbf\x94\x1f\
\x3a\x7f\x9c\x7d\xbd\x76\xc4\x2d\x87\x15\x66\x26\xcb\xd7\x19\x12\
\x86\xdc\x73\xef\x3d\xb1\x64\x32\x49\x32\x99\x24\x9d\x4e\x91\x49\
\x67\xf8\xcb\xd6\x27\xa8\xd5\x6a\x7f\x7e\xb5\x76\xc0\x1d\xf1\x27\
\x98\x39\x59\x86\x37\xab\x5f\x36\x08\x15\xc9\x44\xbc\xbe\xf9\xdb\
\x9b\xd5\x86\x4d\x1b\x36\x67\x73\x4d\x7e\x22\x99\xe4\xdc\xf3\xce\
\xad\x46\x22\x91\x5b\x8b\x7d\x63\xb7\xb2\x7f\x06\x5e\x98\xaa\x32\
\xa1\xaf\x5f\x70\xdc\x7c\x88\x08\x4a\x29\x00\x25\x22\x02\x60\x9a\
\x26\xc5\xe2\xe4\x17\x0e\x1d\x3a\xfc\xec\x4c\xa5\xe2\x66\xb3\x19\
\xb2\x4d\x59\x12\x89\x38\xa6\x61\xcc\x5a\xa6\xf5\xb8\xeb\xba\x8f\
\xc4\x62\xb1\x01\x80\x30\x14\x40\x88\x44\x22\x7c\x04\x20\x22\x16\
\x70\x9a\x52\xaa\x00\x04\xf3\xc9\xfb\xfa\x07\xee\x3f\x7a\xac\xe7\
\x47\xa9\xc6\x06\x12\x89\x04\xf1\x78\x8c\xa8\x1d\xc1\x75\x5d\x12\
\xb1\x38\xb6\x6d\x63\x18\x06\xe5\x72\x59\x4e\x9d\x3a\xf5\x70\x3e\
\x9f\xbf\x5b\x21\x28\xc3\xfc\x08\x40\x7d\x50\xc4\x5c\x15\x5a\xeb\
\xe4\xbe\x7d\xaf\xbd\x5a\xf7\xbc\x55\xd9\x6c\x16\x37\xe6\x10\x8f\
\xc7\xb0\x6d\x1b\xc7\x71\x88\xc7\x62\x44\xac\x08\x22\x82\xe7\x79\
\x8c\x8e\x8d\x31\x30\x38\x48\xb5\x32\x5b\xfc\xcc\xa6\x8d\x17\xba\
\xae\xf3\xee\x87\xdd\x23\x80\x28\xa5\xb0\x2c\x8b\x52\xe9\xe4\x65\
\x2f\xbd\xbc\xb3\x6c\x5a\xd6\xaa\x96\x96\x16\x52\xa9\x14\xc9\x64\
\x02\xc7\x75\x88\xc7\xe3\x24\x62\x31\x22\xa6\x85\x88\x50\xad\x56\
\xe9\x1b\xe8\xa7\xaf\x7f\x80\xc0\x0f\x49\x26\x93\x4d\xdd\xdd\x3d\
\xdf\x04\xb0\x00\xc6\xc6\xc7\x3f\x44\x91\xe8\xbb\x83\x43\xcf\x8f\
\x8d\x4d\x7c\xbe\xa5\xa5\x85\x78\x2c\x8e\xed\x44\x89\x3a\x11\x9c\
\xa8\x8d\xeb\xb8\x44\xa3\x51\x4c\xc3\x20\x10\xa1\x5c\x2e\xd3\xdb\
\x7b\x9c\x4a\xa5\x4a\x10\x04\xd8\x76\x94\x65\xcb\x97\x0e\x2d\xc9\
\xe7\xb7\x2e\x00\x8a\x13\x93\x73\x46\xb2\xac\x35\x85\xd1\xd1\x2e\
\xad\x75\x74\xc5\xf2\x65\xd8\xae\x8d\xe3\x44\x89\x46\xa3\xd8\x8e\
\x83\x6b\xdb\x44\xac\x08\x86\x61\xe0\x07\x01\xc5\xc9\x22\xdd\xdd\
\xbd\x68\xed\x23\x22\x34\x36\x36\x92\x4e\xa7\x3a\x6c\x3b\xba\x61\
\xb2\x54\x62\xf1\xa2\x45\x73\x00\xc7\x71\x38\x76\xac\xe7\xbe\xf6\
\xf6\xf6\x07\x83\x30\x64\xd9\xd2\xd3\xb1\x22\x26\x6b\xd6\x5c\x48\
\x43\xb2\x01\x09\x43\x44\xe6\x16\x9f\xa1\x0c\xb4\xd6\x0c\x8f\x16\
\xe8\xed\xee\x45\x04\x1c\xc7\x25\x9d\x4e\x51\x3e\x35\xfd\x50\x61\
\x74\xe4\xbe\x91\xc2\x30\xae\xeb\x7e\x00\x78\xfa\xa9\x67\x9e\xdf\
\xb1\xe3\xb9\x2f\x2a\xa5\xa8\xd5\x6a\x78\xda\xc3\xd3\x9a\x4a\xa5\
\x42\xbd\x5e\x67\xfd\xfa\x8b\x79\xe0\x81\x9f\xb3\x7a\xd5\x79\x88\
\x08\xbd\xc7\x8e\x73\xe2\x44\xdf\x42\x62\x37\xe6\x50\x2c\x16\xbf\
\xe3\xfb\xfa\xf1\x68\x34\xca\xca\x95\x2b\x71\x5d\x97\x79\xad\xf3\
\xdc\x73\x2f\xb6\x3c\xf9\xc4\x93\xff\x1d\x1e\x1e\x59\x3c\x0f\x09\
\x82\x80\x20\x08\x08\xc3\x10\x41\xa8\x79\x1e\xb5\xd9\x59\xbe\xfe\
\x8d\x1b\xb9\xea\xaa\x2b\xe7\xbe\xff\x54\x03\x20\xec\xb0\x3b\x7e\
\x39\x90\x1c\xdf\x3c\x19\x4c\xe7\x4e\xfa\x65\xa6\xc2\xf2\x54\xc1\
\x2f\xde\x3b\x1b\xd4\xb6\x2a\x11\x61\xd7\xee\x36\x50\x8a\xf6\x3d\
\xed\xbf\x6d\xdb\xb3\xf7\xae\x20\x0c\x30\x4d\x93\x30\x0c\x17\x20\
\x61\x18\x22\xa1\x30\x5b\x9b\x25\xdf\x9c\xe7\x91\x47\x7f\x43\x24\
\x62\x79\xc5\x62\xf1\xb3\x77\xe6\xb7\x74\x0e\xea\x11\x26\x83\xe9\
\x39\xb1\xeb\x10\x26\x7c\x98\x0e\xb6\xcf\xad\x91\x9d\x6d\x73\x0a\
\x0a\x85\xa9\xa9\x29\xfa\xfa\xfb\xbf\x7a\xe4\xc8\x91\x5f\x8d\x8d\
\x8e\x9d\xee\xfb\x3e\xa6\x69\xa2\x94\x5a\x30\x64\xb5\x3a\xcb\x35\
\x5f\xba\xea\xd4\xe5\x57\x5c\xf6\xa9\x30\x0c\xdf\x5e\x5d\xbb\xa1\
\x8e\x27\x51\x94\x02\x2d\x30\x54\x87\xfe\x3a\x04\xdc\x3c\x57\xc1\
\xae\x3d\xf3\x2a\x35\x45\x84\xe9\xf2\x4c\x10\x04\x01\xb5\x5a\x8d\
\x52\xa9\x44\xbd\xee\xad\x31\x4d\xe3\x72\x65\xd0\x9a\x6a\x4c\x4d\
\x9d\x79\xe6\x19\x7f\x4d\xa5\x1b\xff\x67\x98\x0a\xcb\xb4\xf8\x78\
\xdf\xb5\x30\xe2\x5d\xcd\x84\xfe\x0a\xe5\xc0\xa4\x2e\xcf\x12\x55\
\x4f\x61\x1b\xfc\x1f\x0b\x03\xc8\x05\x59\x65\x3b\x42\x00\x00\x00\
\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x00\xc5\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x90\x00\x00\x00\x90\x08\x06\x00\x00\x00\xe7\x46\xe2\xb8\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0e\xc4\x00\x00\x0e\xc4\
\x01\x95\x2b\x0e\x1b\x00\x00\x00\x67\x49\x44\x41\x54\x78\x9c\xed\
\xc1\x31\x01\x00\x00\x00\xc2\xa0\xf5\x4f\xed\x69\x09\xa0\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\x06\
\x44\x9f\x00\x01\xc3\xcd\x96\xea\x00\x00\x00\x00\x49\x45\x4e\x44\
\xae\x42\x60\x82\
\x00\x00\x02\x7a\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xff\x61\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\
\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\
\xdd\x06\x0d\x08\x1d\x33\x51\xf1\xd4\x9e\x00\x00\x02\x07\x49\x44\
\x41\x54\x38\xcb\x65\x91\x3d\x6b\x14\x41\x18\xc7\x7f\x73\xb7\x2c\
\x6c\x0c\xb9\x20\xa6\x0d\x58\x45\x90\x88\x1f\x40\xb1\x12\x2e\x8d\
\x82\x5f\x41\x08\x58\x05\xb4\xb5\x0c\xd8\x09\xa2\x20\x08\x7e\x88\
\x58\x05\xac\x04\xfb\x08\x56\x42\x40\x48\x9d\xdc\xe5\xf6\x66\xe7\
\x75\x67\xc7\xe2\x19\x72\x07\x0e\xfc\x18\x06\xe6\xff\x32\xcf\xa8\
\x9c\x1f\x00\xb7\x81\x09\x70\x0b\xa8\xf7\x40\x1d\x42\x7a\x02\xe1\
\x21\x78\xc0\xfe\x82\xee\x07\x74\x9f\x41\x9f\x83\x41\xf0\xa8\x9c\
\x1f\x17\x83\x4d\xa0\x7e\x0d\xea\x18\xfa\x46\x84\xae\xe0\x01\x0b\
\x18\x0b\xe6\x2d\x98\xf7\x72\x0e\xa8\x9c\x0f\x80\x49\x0d\xf5\x09\
\xa8\x29\xf4\xe5\x72\x57\x76\x0f\x44\x20\xac\x19\x9a\x53\x70\xcf\
\x21\x84\x11\xd4\x00\x1f\xa1\x9f\x4a\xad\x05\x70\x05\x5c\x96\x7d\
\x06\x5c\x03\xcb\x62\xda\x01\x66\x9a\xb3\x79\x17\x42\x8f\xca\xf9\
\xd9\x3e\x54\x67\x90\xc6\x92\xb8\x28\xe8\x92\x9e\x80\x5c\x48\x80\
\x23\xa5\x88\x31\xa4\x10\xb8\x5f\x41\x38\x84\x38\x96\x6a\x4b\x60\
\x5e\x12\x6d\xa9\x9e\x91\xa5\x80\x9e\x10\x32\xd6\x82\x31\x8c\xbd\
\xe7\x55\x05\x66\x2a\xce\xb6\x18\x2c\xcb\x84\x03\x30\xb0\xbe\x62\
\x14\x71\xd7\x09\xd6\xf2\xa8\x02\xbd\xfb\xff\xe0\x62\x11\xe7\x1b\
\x71\xce\x10\x23\x78\x0f\xc6\xc0\x72\x09\xc6\xb0\x5b\x49\x62\xcf\
\xea\xdb\xe2\xda\xbb\x57\xe2\x94\xa0\xef\xb9\x69\x50\x0c\x18\xc1\
\xf2\x02\xda\x32\x34\x49\xcf\x39\x93\x33\x37\x0c\x83\xa4\x5b\x0b\
\x5a\x43\xdb\x0a\x5d\xc7\xc5\x08\xda\x53\x99\x7a\x4b\x4a\x96\x18\
\x13\x21\x48\x5a\x4a\xab\xda\x5a\xc3\xf5\x35\xcc\x66\x42\xdb\x82\
\xb5\xfc\x54\x29\xb1\xef\x1c\x67\x31\x32\xee\x7b\x49\x04\x50\x4a\
\xf6\x94\xc0\x39\xa9\x7c\x79\x09\x57\x57\xb0\x58\x40\x08\xa4\xba\
\xe6\x5e\x65\x0c\xbf\xad\xe5\x93\x73\x1c\xc5\x28\xc9\xc3\xb0\x12\
\xf7\xbd\xbc\xb5\x6d\x61\x3e\x17\xb1\xf7\x30\x1a\xf1\xa1\x69\x38\
\x57\xb3\x19\x68\x4d\xdd\x75\x9c\x58\xcb\x34\x04\x11\xae\xd7\xb7\
\x56\x0c\xb4\x96\x33\xf0\x6d\x63\x83\x17\x77\xee\x90\xaa\x61\x80\
\x61\x20\xc4\xc8\x81\x73\x1c\x19\xc3\xb1\x73\x6c\x7a\x0f\x21\x48\
\x7d\x6b\x85\x18\xd1\x4a\xf1\xa6\x69\xf8\xb2\xb5\x05\xdb\xdb\xa0\
\xe6\x73\xf9\x96\xb6\x95\x7a\x6d\xcb\x5d\xad\x79\xa9\x35\x4f\xad\
\x65\xcf\x7b\x88\x91\x3f\x29\xf1\x7d\x3c\xe6\x6b\xd3\xf0\x77\x32\
\x81\x9d\x1d\xe1\x1f\x3c\x20\x6c\x94\x65\x65\x77\x27\x00\x00\x00\
\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x06\xc9\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x90\x00\x00\x00\x90\x08\x02\x00\x00\x00\x68\x24\x75\xef\
\x00\x00\x00\x03\x73\x42\x49\x54\x08\x08\x08\xdb\xe1\x4f\xe0\x00\
\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0e\xc4\x00\x00\x0e\xc4\x01\
\x95\x2b\x0e\x1b\x00\x00\x06\x6c\x49\x44\x41\x54\x78\x9c\xed\x9d\
\x3b\x7a\xd3\x40\x14\x46\xc7\x7c\x2e\x48\x9f\x02\x0a\x16\x91\x2d\
\x50\xb2\x04\x52\x7a\x5b\xa4\xcc\x16\x52\x66\x09\x64\x11\x14\x2e\
\x48\xef\x52\x14\x0a\xf2\x58\x96\x34\x8f\xfb\xfa\xe7\x7a\x4e\x05\
\xc4\x1e\x8b\x39\x3e\x1e\x7b\x90\xc5\xee\xd7\xdf\x2f\x21\x84\xc3\
\xfd\x31\x60\xf0\x78\xf7\x66\x7d\x08\x73\x9e\x4f\x0f\xd6\x87\xf0\
\xc1\xd3\xfb\xd7\xfd\xf4\xab\x80\xa1\x6d\x9c\x1d\x40\x6d\xb6\x8c\
\x82\x42\x08\xbb\x61\x18\xa6\xdf\x8c\x20\x68\x1b\x01\xd1\x66\x5b\
\xd8\xcc\xce\x7e\xed\x16\x08\xda\x6e\xbc\xb6\x99\xaa\x10\xc2\xe1\
\xfe\xb8\x1b\x86\x61\xf1\x67\xd3\x2d\xc4\x8f\x2b\x0f\x43\x6d\xfa\
\x85\x6d\xe8\x58\x28\xec\xfa\x9e\x08\xda\x6e\xa4\xb6\x35\x55\xe1\
\xbf\x85\x8f\xc2\xb6\x6f\x1a\xdf\x01\x01\x65\x6d\x3a\x85\x65\xce\
\x7f\xa2\xb0\xeb\x11\x11\xb4\x39\xab\x2d\xa9\x2a\x44\xd3\x7e\x2e\
\x2c\xf3\x9e\xb3\xfb\x9b\xa3\xa0\x4d\xae\xb0\x8a\x09\x2f\x28\xec\
\xfa\x91\x10\xb4\x35\x5a\x5b\xbe\xaa\x70\x39\xcf\x17\x85\x95\x0e\
\x74\x3d\x9c\x2d\x42\xda\x78\x0b\x23\xce\x70\x65\x61\xd7\x47\x80\
\xa0\x0d\xbc\xb6\x0a\x55\xe1\x6a\x62\x3f\x6d\xff\xb8\xe8\x68\xea\
\x0e\x88\x1d\x9c\xad\xbf\x09\xc6\xc9\x61\x28\x2c\xc6\xbc\xb6\x38\
\xaf\xe7\xd3\x83\x79\x6d\x44\x4f\xd7\x33\xb9\x20\xec\x70\x7f\x24\
\x3e\x8c\x89\xb6\x45\x37\x86\x2f\x92\x42\xaf\x37\xcc\x85\xc5\xa8\
\x69\x4b\xfa\x50\xd6\xc6\xa5\x6a\x71\xea\x96\x85\xd1\x23\x9b\x10\
\xd5\x56\xe4\x40\x41\x9b\xc2\x2a\x2e\x58\x58\x0c\xbb\xb6\xea\x79\
\x17\xd2\xc6\xae\x6a\x6d\xae\x56\x85\x31\x46\x36\xc1\xa2\x8d\x65\
\xae\x19\xb5\x29\xbf\x37\x56\x2a\x2c\xa6\x5a\x1b\x7b\x16\x44\x6d\
\x72\xaa\x36\x26\x67\x4b\x98\x44\x64\x13\x45\xda\x44\x17\x9e\x0a\
\x6d\x86\x9f\x38\x0d\x0a\x8b\x49\x6a\x53\x7b\x6b\x97\xa9\x4d\x41\
\xd5\xf6\x93\x38\x21\x4c\x34\xb2\x89\x45\x6d\x26\x1f\x9e\x36\xb4\
\x81\xec\xe3\x18\x17\x16\x33\x69\x33\xdf\x9e\x98\x69\xd3\x54\x95\
\x5c\x23\xe6\x7b\x89\x15\x43\xf0\x02\xf2\x44\x0e\xff\xb5\x7d\xff\
\xf3\xc3\xfa\x40\x2e\x48\x0b\xd3\x07\x61\xf7\xf6\xf1\xee\x4d\x3f\
\xf4\x9c\x36\xb2\x5e\x12\x75\x56\xb2\x18\xc3\x3d\x40\xf3\x17\xe4\
\x6d\x80\xd6\xb0\x6b\x94\xb5\xd9\xaa\xca\x5c\x7a\x72\x85\xe9\x47\
\x36\xa1\xa0\x0d\xbc\xaa\x18\xe8\xc2\x62\x84\xb4\x81\xa8\xca\x7f\
\x67\x57\x20\xcc\x30\xb2\x09\x46\x6d\x20\xaa\x4a\x69\xa6\xb0\x18\
\xa2\x36\x34\x55\x45\x1f\x9c\xca\x84\x21\x44\x36\x51\xa1\x0d\x4d\
\x55\x05\x4d\x16\x16\x93\xa9\x0d\x56\x55\xe9\xbe\x44\xb1\x30\xa8\
\xc8\x26\x36\xb4\xc1\xaa\xaa\xa3\xf9\xc2\x62\x66\xda\xf0\x55\x55\
\x6c\xfb\xd5\x6c\x4d\x21\x9c\x33\xba\x01\xc2\xce\x96\x1c\xae\x0a\
\x0b\x2d\x54\x35\x51\xf7\xbc\xaf\x14\x06\xb8\x92\x35\xa4\x8a\x82\
\x87\xc2\x5a\x54\x55\xbd\xac\xd4\x0b\x43\x88\xac\x45\x55\x44\x5a\
\x2d\xac\x69\x55\x94\x77\x6d\x24\x61\x26\x91\x35\xad\x8a\x4e\x4b\
\x85\xf9\x50\x45\xfc\x50\x44\x15\xa6\x13\x99\x0f\x55\x2c\xa0\x17\
\xe6\x4c\x15\x7d\xcf\x81\x41\x98\x50\x64\xce\x54\x71\x81\x58\x98\
\x57\x55\x2c\x5b\x7a\x7b\xa6\xd9\x79\x41\x3b\x7f\x0f\x8d\xd7\x6f\
\x2f\x87\x13\xc3\x38\xbb\x9f\x9f\x7f\x33\x0c\xe3\x1a\xfa\x6e\xf2\
\x58\x05\xcb\x38\x6c\x27\x92\x3a\xde\x23\xe7\x7a\x89\x66\x19\x87\
\x47\x98\x63\x5b\x74\x78\x7d\x73\x9e\xaa\xed\x58\x1b\x4e\x64\x0c\
\xc2\x1c\x7b\xa2\xc3\x6e\x9a\xf9\xcb\x10\x8e\xe5\x81\x44\x46\x15\
\xe6\xd8\x10\x1d\x09\xc7\xfc\x5f\x37\x72\xac\x10\x21\x32\x92\x30\
\xc7\x6e\xe8\x08\xd9\x15\xf9\x42\x9f\x63\x91\xe6\x91\xd5\x0b\x73\
\x6c\x85\x8e\x9c\x57\xa9\xaf\xcc\x3a\xd6\x69\x1b\x59\xa5\x30\xc7\
\x3e\xe8\x88\x1a\x15\xfc\x52\xba\x63\xa9\x86\x91\xd5\x08\x73\x6c\
\x82\x8e\xb4\x4b\xd9\xcb\x3e\x38\x56\x6b\x15\x59\xb1\x30\xc7\x0e\
\xe8\x28\x58\x14\xbf\xb0\x8a\x63\xc1\x26\x91\x95\x09\x73\x3c\xfb\
\x74\x74\xfc\x69\x5c\xba\xc8\xb1\x66\xfd\xc8\x0a\x84\x39\x9e\x77\
\x3a\x6a\xe6\x94\x2e\x0e\xe6\x58\xb6\x72\x64\xb9\x67\x4d\x71\x9d\
\x39\xd4\x21\xd2\x4f\x73\x6b\x0c\xc4\x33\x7f\x5b\x44\xed\x15\x28\
\x6b\x0d\xe3\x5a\x81\xfa\x4a\x46\x27\x2d\xac\xdb\x52\x80\xff\x6d\
\x7d\xd7\x96\x44\x27\xb2\x84\xb0\xee\x49\x01\xa9\xad\xa9\x2e\x2f\
\x89\x42\x64\x5b\xc2\xba\x21\x05\x64\xff\x79\xa5\x2b\x4c\x22\x1d\
\xd9\xaa\xb0\xee\x46\x01\x8d\x53\x04\xba\xc8\x24\xa2\x91\x2d\x0b\
\xeb\x56\x14\xd0\x3b\xcd\xad\xeb\x4c\x22\x17\xd9\x82\xb0\xee\x43\
\x01\xed\x53\xb5\xbb\xd4\x24\x42\x91\xcd\x85\x75\x13\x0a\xd8\x7c\
\xdd\xa8\xab\x4d\x22\x11\xd9\x85\xb0\xee\x40\x01\xcb\xaf\xcc\x76\
\xc1\x49\xd8\x23\x3b\x0b\xeb\xb3\xaf\x80\xfd\x65\x1f\xba\xe6\x24\
\xbc\x91\x7d\x08\xeb\xf3\xae\x00\xca\xa5\x8b\xba\xec\x24\x8c\x91\
\x7d\x0a\x7d\xc6\x55\xe0\x72\xb6\xfb\xf9\xf9\x37\xcb\x44\x33\x5e\
\x94\xf4\xf5\xdb\x0b\xd7\x50\x74\x18\xaf\x03\xc9\xf2\xf7\xda\xa3\
\xd9\x82\xe2\xf9\xf4\xf0\xf4\xce\x39\x1a\x7d\x90\x7e\x22\xe9\x32\
\x12\x2f\xef\x4f\xef\x5f\x21\x2e\xd2\xec\x2c\x2f\xf0\x95\xb8\x17\
\x76\x46\x41\x15\x3d\x32\xaa\x30\x1f\x79\x81\x57\x15\x73\xeb\x85\
\xe9\xab\x22\x46\x46\x12\xd6\x74\x5e\x0d\x55\x15\x73\x8b\x85\x99\
\xab\xa2\x44\x56\x2f\xac\xc5\xbc\xcc\x55\xd1\xb9\x95\xc2\xd0\x54\
\x55\x47\x56\x29\xac\xa1\xbc\xd0\x54\x11\x51\xba\x8a\x80\x15\xc8\
\xb6\xea\x9e\xf4\x35\xc2\xf0\xf3\x9a\x3c\x3d\xde\xbd\x39\xbb\x7a\
\x81\xb7\x35\x6c\x31\x29\xae\xff\xab\x86\x9d\x8a\x95\xac\x58\x18\
\x6c\x5e\x49\x1f\xb0\xda\x8a\xf0\x50\x58\x91\x03\x34\x6d\xa5\x91\
\x95\x09\x43\xcb\xab\x7a\xde\xd1\xb4\xe5\xd3\x6a\x61\x2c\x73\x0d\
\xa2\xad\x28\xb2\x02\x61\x20\x79\xb1\xcf\x2f\x88\xb6\x4c\x5a\x2a\
\x4c\x74\x4e\x6d\xb5\xe5\x47\x96\x2b\xcc\x36\x2f\xb5\x79\xc4\xaf\
\x0d\xbd\x30\x93\xb9\x33\xd1\x96\x19\x59\x96\x30\x93\xbc\xcc\x9f\
\xe6\xa3\xb6\xef\x7f\x6c\x8f\x62\x0e\xdc\x5e\xe2\x78\xf2\x9e\xb9\
\x2d\x13\x72\xc2\x48\x17\xa6\x99\xd7\xe1\xfe\xf8\x1a\xde\x02\xcc\
\x5a\xf2\x7c\x7a\x08\xe1\x18\xac\x97\xf0\x18\x94\x35\x6c\xf1\xe5\
\xdb\x50\xdb\xec\x41\xc7\xc3\x53\xd0\x96\x5c\xc9\x12\xc2\x14\x0e\
\x31\xb9\xd2\x2a\x6b\xdb\x78\x20\x35\x6d\x1b\x58\x16\x56\xb4\x87\
\xa6\xa0\x2d\x73\x70\x69\x6d\xdb\x91\x6d\x09\x93\x3b\xa6\xea\x53\
\x50\x84\xb4\x55\x0c\x68\x55\x9b\x76\x61\xf4\x93\xcb\x03\xab\x36\
\xe2\x20\x42\xda\x36\x22\x5b\x15\xc6\x7e\x10\x2c\xaa\x62\x88\xda\
\x18\x33\xd5\xac\x4d\xa3\x30\x76\x55\x31\x15\xda\x84\x16\x42\x5e\
\x6d\x6b\x91\x2d\x0b\xe3\x7a\x54\x51\x55\x31\x99\xda\x14\xde\x6a\
\x4a\xd7\x26\x55\x98\x9a\xaa\x98\x0d\x6d\xca\x1f\xe6\x58\xb4\x2d\
\x46\xb6\x20\x8c\xf8\x30\x26\xaa\x62\x66\xa7\x49\x19\x6e\x97\x48\
\xd4\xc6\xb9\x97\x78\xb8\x3f\x9a\xdb\x9a\x61\xbe\xb9\x15\x68\xd3\
\x72\x2d\x7b\x5e\x58\xdd\xd3\x01\xcd\xd3\x04\xc8\x9e\x64\xe0\xab\
\x6d\x37\x0c\x43\xfc\xfb\xd2\x11\x85\xde\xac\x4b\x80\xa0\x6d\x84\
\x32\xc9\x17\x85\x15\x0d\x04\x5b\xd5\x1a\x3e\x6a\xbb\x28\x2c\x73\
\x08\x85\xcf\x55\xd2\x20\x68\x1b\x29\x9d\xf3\x73\x61\x39\xf7\x6c\
\xae\xaa\x35\xda\xad\xed\x5c\xd8\xf6\x7d\x94\x3f\x02\x6b\x82\xa0\
\x6d\x24\x47\xc1\x3e\x79\x53\x37\x55\xad\xd1\x56\x6d\x1f\x85\x2d\
\xde\xc8\x70\xb7\xc2\x0a\x04\x6d\x23\x6b\x46\xf6\x8b\x3f\x73\x5f\
\xd5\x1a\xf8\xb5\xed\x86\x61\x88\xff\xd4\x5c\x15\xce\xf7\xef\x10\
\xb4\x8d\xc4\x82\x76\xbf\xfe\x7e\x19\x7f\x65\xae\x6a\x04\x47\xd8\
\x08\x9a\xb6\x7f\x3b\xcf\xca\x48\x61\xee\x5b\x97\x00\x00\x00\x00\
\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x03\xef\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x01\x68\xf4\xcf\xf7\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\
\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x03\x8f\x49\x44\x41\x54\x38\
\xcb\x3d\x91\xdf\x4f\x5b\x75\x00\xc5\xcf\xf7\x7e\x2f\xb4\xbd\xb7\
\xdc\x76\x8c\xfe\x80\x5b\xe4\x87\x73\xcc\x84\x8c\x9f\x0d\x83\x25\
\x2e\x6e\x3e\x8c\x44\xb3\x64\x66\x21\x6a\x62\x8c\xba\x68\xa6\x0f\
\xea\x93\x0f\x26\x8b\x0f\xc6\x7f\xc0\x44\x97\xc5\x2c\x53\x5f\xdc\
\xe2\xa3\xc8\xa0\xb0\x1f\x22\x14\xc1\x6d\x08\x6d\x2d\x90\x51\xda\
\x4b\x27\x3f\xd6\x52\xee\xbd\x50\x7a\xbf\xbd\x5f\x1f\x10\x5e\x4f\
\xce\x39\xf9\xe4\x1c\xca\x18\x43\x4f\x4f\xdf\x23\xc4\xe2\x09\x30\
\xc6\x38\xb1\x2c\x0b\x00\x20\xe6\xb6\xf2\x7c\xf9\xc9\x0a\x08\x63\
\x0c\xdb\xdb\x7a\x6f\xb1\x54\x9c\x30\x0c\x03\x88\x8c\xde\x43\xb8\
\xbb\x8f\xf7\xf6\xbe\xc4\x1f\x8c\xff\x0e\x58\x96\x85\x72\xb9\x8c\
\x8c\xa6\xfd\x64\x59\x16\xc0\x18\xc3\xf4\xcc\x43\x5e\x2e\x97\xf9\
\x48\x64\xcc\xa4\xe1\x70\x8f\xd6\x15\xee\x54\xa2\xd1\x28\x8e\x54\
\x7b\x2b\x71\xee\xec\x79\xb3\xed\x64\x37\x9f\x8b\xcd\x9b\x1b\xcf\
\x36\xf7\x23\x96\x65\x29\x8c\x31\xcc\xfc\xf5\x68\x28\x91\x4c\xf2\
\x7c\x61\x8b\xdb\xb6\xcd\x4d\xd3\xac\x21\xa9\x95\x34\x04\x41\x40\
\x3c\x9e\xe4\x27\x4e\x1c\x43\xd0\x1f\x00\x07\xc7\xfd\x07\xe3\xe8\
\xec\x6c\xbf\x86\xef\xbe\xbd\xfe\x59\x7b\x5b\x98\x07\x82\xf5\xfc\
\xe2\xeb\x03\x5c\xd7\x75\x3e\x7a\xf7\x1e\x1f\x1e\x89\xb4\x65\xff\
\x7d\x0a\x32\x3c\x3c\x06\x70\xe0\xe7\x5b\xb7\x22\xa9\xe5\xd4\x39\
\xc3\x30\x71\xe3\x87\x6b\xde\x50\x5d\xa8\x20\xcb\xf2\x3e\xc3\xff\
\x1c\xd0\xb4\xec\xab\xd3\x33\x0f\xe7\x0d\xd3\xec\x38\xd0\x0e\x0c\
\x4e\x9b\x73\x8c\xdd\xbd\xcf\x57\x32\x69\x5e\x2e\x97\xb9\xb6\xaa\
\x4d\x70\xce\x21\x02\x00\xa5\xb4\xf8\xeb\xe0\x90\xd5\xd2\xf2\x02\
\x8e\x56\x1f\x85\x6d\x73\xc4\x13\xc9\xde\xbd\xa2\x75\x41\xd4\x56\
\xb3\x58\x5b\x5f\xbf\xd1\xdc\xd4\x24\x2a\x1e\x05\x2e\xa7\x13\x8f\
\x67\x67\xe1\x74\x38\xd0\xd4\xd4\x10\x11\x76\x4c\xb3\xc6\xd0\xf5\
\x77\x1a\x9b\x1b\xa0\x54\x29\xd0\x75\x1d\x6b\x6b\x1b\xa8\xac\xac\
\x7c\xf3\xf1\xdf\xb3\x26\xe9\xeb\x3d\xc3\xd7\x37\x36\x90\xcb\xe7\
\x30\x15\x1d\xc7\xc2\xe2\x22\x82\xc1\x20\x37\x4c\x5d\x68\x6d\x6d\
\x85\xf0\xd1\xc7\x57\xfa\x8e\x78\xbd\x68\xa8\x7f\x0e\xfd\xe7\x5f\
\x83\xa2\x28\x30\x77\x8c\xe7\x5d\x2e\x17\x00\x40\xf0\xf9\x7c\x93\
\xef\xbd\xff\xee\x8b\xb5\x75\xb5\x7b\xaa\xaa\x22\x12\x19\x4b\x29\
\x8a\xb2\x1c\xee\xea\x86\x57\xf1\xec\x3f\x0e\x00\x9c\x73\x00\xa0\
\x84\x10\x4e\x08\xb1\x4b\xa5\x52\xf3\x64\xf4\xcf\x39\x87\xc3\x21\
\xf9\x03\x35\xf0\x7a\xbd\x90\x5d\x92\xb5\xbb\xbb\x7b\xdd\xed\x96\
\x3f\xa1\x54\xb4\x38\xb7\x0f\x0b\x08\x00\x0a\x80\x51\x4a\xb1\xb0\
\xb0\xf8\xf9\x93\xe5\xd4\xd7\x75\x75\xb5\x50\x3c\x55\x90\xdd\x32\
\x14\xb7\x82\x4a\xb1\x02\x7b\xa5\x22\xe6\x62\x71\x80\xe3\xc7\x8e\
\xf6\x93\x57\x0e\x09\x00\x80\x31\xe6\x9f\x98\x8c\xfe\x51\x51\xe1\
\x38\xe6\xf3\xd7\x40\x92\x9c\xa8\xaa\xaa\x82\xec\x92\x40\x29\x45\
\x2e\x9f\x43\x2c\xfe\x0f\xac\x92\x05\x8f\xc7\xb3\x13\x0c\x04\xde\
\x12\xe7\x63\x09\x08\x44\x00\x21\xe4\xed\x8c\x96\xb9\x19\x0a\x85\
\x20\x49\x4e\xb8\x24\x17\xdc\x6e\x37\x24\xa7\x0b\x84\x10\xa4\xd3\
\x69\x24\x17\x96\x40\x29\x85\x3f\xe0\x5b\x57\xd5\xba\x16\x4a\xe9\
\x96\x98\xcf\xe5\x4e\xdd\xbe\xfd\xcb\x37\x53\x53\xd3\x5d\x82\x40\
\x50\xab\xd6\xe2\x8d\x81\x01\x5c\xba\x74\x11\x94\x52\x94\x4a\x25\
\xc4\x12\x09\x68\x99\x55\x54\x57\x57\x83\x08\x18\x5a\x5d\xd5\xfa\
\x4d\xd3\x40\x30\x18\x84\x90\xcf\x17\xb6\x0c\xdd\x14\x08\x00\x66\
\x31\x2c\x25\x97\x70\xf5\xea\x97\x50\xd5\x46\x74\x74\xf6\xe0\xb7\
\x3b\x77\xa0\x6f\x1b\x08\xd5\xab\x00\xe1\xdf\xeb\xba\xde\xaf\xaa\
\x21\xb4\x1c\x3f\x0e\x49\x92\x40\x46\x22\x63\x10\x45\x11\xb1\xb9\
\xc4\x85\xc1\xc1\xc1\x9b\xf9\x7c\xde\x23\x08\xc2\xc1\x26\x28\x14\
\x0a\xf8\xe0\xc3\xcb\x38\xfb\xca\x99\x2f\x4c\x73\xe7\x2b\x59\x96\
\xd1\xd4\xd0\x08\x49\x92\x20\x08\x02\xc8\xc8\xf0\x28\x00\x80\x08\
\x84\x1a\xe6\x4e\x59\xd7\x0d\x64\xb3\xd9\xe6\xcd\xcd\x67\x97\x19\
\xb3\x5e\xf6\xfb\xfd\x4f\x4f\x9f\x3e\xf5\xa9\xc7\xab\xa4\x82\x81\
\x00\xfc\x3e\x3f\x6c\xdb\x3e\x1c\xfe\x3f\x11\x5f\xc4\xbb\xcd\x16\
\x27\xa0\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x07\x22\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x90\x00\x00\x00\x90\x08\x06\x00\x00\x00\xe7\x46\xe2\xb8\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0e\xc4\x00\x00\x0e\xc4\
\x01\x95\x2b\x0e\x1b\x00\x00\x06\xc4\x49\x44\x41\x54\x78\x9c\xed\
\x9d\xbb\x71\x1b\x31\x14\x45\xb1\x1e\x05\x6a\x80\x33\x4a\xdc\x04\
\x4b\x70\xe8\x12\x14\xb3\x2c\xc5\x2a\xc1\xa1\x4b\x50\x13\x4e\x38\
\xa3\x06\x94\xd1\x81\x06\x36\x04\xee\x2e\x7e\xef\x73\x1f\xf6\x9d\
\x50\x23\xed\x82\x78\x87\x97\x58\x40\x00\x97\x97\xf7\xa7\x5b\x08\
\x21\x5c\x4e\xd7\x25\x18\xe4\xf9\xf1\xed\xa6\xdd\x86\x51\x5e\x3f\
\xce\x26\xfb\xfe\xe5\xfd\xe9\xb6\x44\x81\x22\x2e\x92\x3c\xd6\x04\
\x4a\x9d\x59\xf2\x1f\x44\x5c\x24\x39\xac\x08\xb4\xe6\xc9\xa6\x40\
\x11\x17\x89\x1f\x74\x81\xb6\xfc\xb8\x9c\xae\xcb\x52\xfa\xa5\xf4\
\x97\xa9\x1b\x26\x81\x05\x91\x50\x05\xaa\x71\xa2\x5a\xa0\xf4\x8f\
\x46\x1b\xa6\x01\xb2\x48\x68\x02\xd5\xb8\x10\x3d\xf8\xd2\xf0\x5a\
\x89\xd2\x0b\x58\x03\x51\x24\x14\x81\x7a\xea\xdf\x2d\x50\x7e\x21\
\x6b\x20\x89\xa4\x2d\x50\x6b\xdd\xd3\x9a\xdf\x35\xbc\x47\xa2\xfc\
\xa2\x96\x40\x10\x49\x4b\x20\x8a\x5a\x93\x09\xb4\x76\x71\x4b\x68\
\x8a\x24\x2d\xd0\x48\x8d\xf3\xfa\xae\x36\x7c\x54\xa2\xb5\x1b\x59\
\x41\x43\x24\x29\x81\x38\xea\xca\x26\xd0\xd6\x0d\xad\x20\x29\x12\
\xb7\x40\x54\xf5\x5c\xab\xe5\xb7\xda\x5f\xec\xe5\xe5\xfd\xe9\x46\
\x29\xa4\x14\xda\x03\x5b\x0a\x24\xfa\xfe\x81\xf3\xe2\x29\x96\x16\
\x6d\xf3\xf4\x79\xfd\x38\x2f\x08\x83\xed\x5a\x38\xa4\xd9\xaa\xdb\
\xa6\x40\x97\xd3\xf5\x6e\xa1\x95\x02\x64\x91\xf6\x24\x89\x89\x84\
\x2c\x92\x46\xd2\x8b\x25\x50\x0e\x92\x48\x2d\x52\x20\x8a\xc4\x2d\
\xce\x5e\x8d\x76\x05\xe2\x4a\xa1\x14\x4d\x91\x46\x24\x40\x10\x09\
\x61\x6c\xa9\x96\x40\x39\x92\x22\x51\x16\x5d\x43\x24\x49\x71\x4a\
\xf5\x28\x0a\x24\x91\x42\x29\x9c\x22\x71\x16\x59\x42\x24\x84\xc4\
\xc9\x81\x49\xa0\x1c\x4a\x91\x34\xe6\x74\x28\xef\xa9\x25\x4e\x4d\
\xdf\x57\x09\x24\x9d\x42\x29\x23\x22\x21\x2c\x4f\x8c\xb4\x01\x31\
\x71\x72\x60\x13\x28\xa7\x45\x24\xa4\x27\xa4\x1e\x91\x10\xc4\xa9\
\x7d\xc3\x56\x0b\xa4\x99\x42\x29\x7b\x22\x21\x89\x93\x53\x23\x12\
\x42\xff\xb6\x62\x26\x81\x72\x52\x91\x90\xc5\xc9\x59\x13\x09\x4d\
\x9c\x96\xe1\xc2\xea\x5a\x18\xc5\x85\xa5\x40\xeb\xfc\x5a\xa2\x48\
\x3f\xfe\xfc\xd4\x6e\xca\x10\x4d\x02\xa1\x62\x6d\xe1\xf3\xf9\xf1\
\xed\x86\x9a\x9a\xad\x21\xd1\xfc\x11\x86\x32\x16\xca\x41\x98\x19\
\x2e\x81\xdc\xb6\x5e\xcc\x8e\x81\xb6\x40\x14\x09\xa9\x2d\x7b\xf4\
\x0c\x51\xba\x04\x42\x4d\xa1\x14\x04\x91\xac\x88\x33\xc2\x74\x09\
\x94\xa3\x21\x92\x45\x71\x7a\x1f\x90\xba\x05\xb2\x90\x42\x29\x12\
\x22\x59\x14\x67\x94\xe9\x13\x28\x87\x43\x24\xeb\xe2\x8c\x4c\xcf\
\x0c\x09\x64\x2d\x85\x52\x28\x44\xb2\x2e\x0e\x05\x87\x4b\xa0\x9c\
\x1e\x91\x66\x12\x67\x74\x72\x78\x58\x20\xcb\x29\x94\x52\x23\xd2\
\x4c\xe2\x50\x71\xf8\x04\xca\x59\x13\x69\x56\x71\x28\x96\xa6\x48\
\x96\x32\x10\xd7\xc8\x46\xb1\xb6\x3c\xa2\x85\x27\xd0\x0a\xb3\x26\
\x4e\x0a\xd5\x9b\x9e\x4c\xa0\x19\xc6\x42\x47\x10\x87\x1a\x4f\xa0\
\x70\x3c\x71\x28\x87\x1c\xa4\x02\x59\x4b\xa1\xa3\x89\xc3\xc1\x21\
\x13\xe8\xc8\xe2\x50\x3f\xf0\x90\x0b\x84\x9c\x42\x47\x16\x87\x8b\
\x43\x24\x90\x8b\xf3\x09\xc7\x74\x0b\x8b\x40\x28\x29\xe4\xe2\xf0\
\x33\x65\x02\xb9\x38\xf7\x70\x4d\xf6\xb2\x09\xa4\x91\x42\x2e\x8e\
\x3c\x53\x24\x90\x8b\xb3\x0f\xe7\x52\xd3\x03\x67\xe7\xff\xfe\x6e\
\x7f\xdf\x93\x75\x7e\x7f\xff\x15\x42\x38\xb3\x5d\xdf\xd4\xae\xce\
\x59\xe1\x5a\xb8\x8d\xb5\xe5\xbc\x3e\xfb\xc6\x42\x5f\xd5\x2e\xc3\
\xfd\x26\xe6\xbc\x3e\xab\x40\x2e\x8f\x1e\x52\x52\x8a\x6c\x6d\x76\
\x91\xca\x58\x4d\x21\x36\x81\x5c\x1a\x3d\x24\x65\x14\x3b\x5c\xc1\
\x85\x2a\x63\x31\x85\x58\x04\x72\x59\xf4\x90\x96\x50\xf4\x78\x17\
\x17\xab\x8c\xb5\x14\x22\x17\xc8\x25\xd1\x43\x43\x3e\xf1\x03\xa6\
\x5c\xb0\x32\x96\x52\x88\x54\x20\x97\x43\x0f\x2d\xe9\x54\x8e\xb8\
\x73\xd1\xca\x58\x49\x21\x32\x81\x5c\x0a\x3d\x34\x65\x53\x3b\x64\
\xd3\x85\x2b\x63\x21\x85\x48\x04\x72\x19\xf4\xd0\x96\x4c\xf5\x98\
\x5f\x17\xaf\x8c\xb6\x20\x25\x86\x05\x72\x09\xf4\x40\x90\x4b\xfd\
\xa0\x71\x17\xb0\x0c\x82\x28\x5b\x0c\x09\xe4\xc5\xd7\x03\x45\x2a\
\xf5\x04\x0a\xc1\x45\xac\x01\x45\x98\x9c\x6e\x81\xbc\xe8\x7a\x20\
\xc9\x04\x91\x40\x21\xb8\x90\x35\x20\x89\x13\xe9\xda\x95\xc1\xbd\
\x8b\xc0\xb1\x83\x6f\xeb\x71\x86\x98\x62\x67\xea\xd1\x40\xfa\x04\
\x68\x1e\x03\x71\x8f\x55\x7c\x2c\x54\x06\xe9\x53\xa3\x49\x20\x97\
\x67\x5e\x44\x1f\xe3\x5d\x24\x7d\x50\x52\xa8\x5a\x20\x97\x66\x5e\
\x54\x96\x32\x5c\x28\x7d\x10\x52\xa8\x4a\x20\x97\x65\x5e\x54\xff\
\x9d\xc3\xc5\xd2\x47\x3b\x85\x8a\x02\xb9\x24\xf3\x02\xf1\x2f\xad\
\x2e\x98\x3e\x9a\x29\xb4\x2b\x90\xcb\x31\x2f\x50\xdb\x7a\x5c\x34\
\x7d\xb4\x52\x68\x53\x20\x97\x62\x5e\x20\xb7\x36\xbb\x70\xfa\x68\
\xa4\xd0\xaa\x40\x2e\xc3\xbc\x40\x1f\xef\xe2\xe2\xe9\x23\x9d\x42\
\x77\x02\xb9\x04\xf3\x62\xe2\x88\x3b\x17\x50\x1f\xc9\x14\xfa\x22\
\x90\x17\x7f\x5e\x4c\x1d\xf3\xeb\x22\xea\x23\x95\x42\xff\x04\xf2\
\xa2\xcf\x8b\xc9\xaf\x3a\x70\x21\xf5\x91\x48\xa1\x6f\x21\x78\xb1\
\x67\x86\x5b\xa2\xe5\xf9\xf1\xed\xc6\x59\x60\x89\x6f\x2d\xfc\xfc\
\x4e\x2c\x9b\x48\x7c\x9f\x1a\x67\xff\x3c\x58\x97\xc7\x32\xaf\x1f\
\xe7\xe5\x47\xe0\xef\x23\xce\x1a\xfb\xc6\x42\x05\xa4\x3f\xd2\x5f\
\xde\x9f\x6e\xe6\xbe\x74\xd7\xd3\xe7\x9e\x19\xc7\x82\x9e\x40\x02\
\x20\x88\xc3\x95\x42\x2c\x02\x79\xfa\x7c\x82\x20\x0e\x37\x9e\x40\
\x0c\xa0\x8a\xc3\x91\x42\xe4\x02\x1d\x39\x7d\x50\xc5\xe1\xc4\x13\
\x88\x00\x4b\xe2\x50\xa7\x10\xa9\x40\x47\x4b\x1f\x4b\xe2\x70\xe1\
\x09\xd4\x81\x75\x71\x28\x53\x88\x4c\xa0\x23\xa4\x8f\x75\x71\x38\
\x80\x39\xa5\x15\x9d\xd9\xe4\xa1\x7a\xc3\x93\x08\x34\x6b\xfa\xa4\
\xd2\x3c\x3f\xbe\xdd\xb4\x0f\x32\x40\xc4\xc7\x40\x2b\xec\xa5\x4d\
\x94\x68\x86\x44\xa2\x18\x0b\x0d\x0b\x34\x53\xfa\xb4\x48\x31\x93\
\x48\x23\x78\x02\x85\x31\x09\xac\x8b\x34\x9a\x42\x43\x02\x59\x4f\
\x1f\xca\xa2\x5b\x17\xa9\x97\x43\x26\x10\x67\x91\x2d\x8a\x34\x92\
\x42\xdd\x02\x59\x4c\x1f\xc9\xa2\x5a\x14\xa9\x87\x43\x24\x90\x66\
\x11\xad\x88\xd4\x9b\x42\x5d\x02\x59\x49\x1f\xa4\xa2\x59\x11\xa9\
\x95\x29\x13\x08\xb9\x48\xc8\x22\xf5\xa4\x50\xb3\x40\xc8\xe9\x83\
\x58\x94\x2d\xfe\xcf\x6a\xf3\x6f\xeb\xe1\xc4\xfc\x5a\x58\xdc\xf3\
\x64\x49\x1e\x64\x5a\x03\xa2\x49\x20\xb4\xf4\x49\xe3\xd6\xe2\x5a\
\xd5\xeb\xc7\x79\xb9\x9c\xae\x0b\xd7\x96\x1b\x09\x4c\x8e\x81\xf6\
\x3a\x1c\x79\x8c\x11\x59\x6b\x5b\x7c\x4d\x08\x6f\xd2\x96\xb1\x50\
\xb5\x40\x08\x2f\xac\xe5\x9d\x8a\x28\x52\x4d\x5b\x90\x44\xaa\xc1\
\x44\x02\x8d\x44\x3c\x82\x48\x3d\xf7\xd6\x16\xa9\x36\x85\xaa\x04\
\xd2\x7a\x11\x94\x63\x03\x0d\x91\x28\xee\xa5\x2d\x52\x09\xc8\x04\
\xe2\x1c\x54\x4a\x88\xc4\x71\x6d\x0d\x91\x6a\x52\xa8\x28\x90\x64\
\x83\x25\x9f\x46\x38\x44\x92\x48\x37\xb4\x44\x82\x48\x20\xcd\xc7\
\x58\x0a\x91\x34\xc6\x57\x52\x22\x95\x52\x68\x57\x20\xee\xc6\x21\
\xcd\x7f\xf4\x88\x84\xf0\x84\xa7\x9d\x48\x2a\x09\x84\x24\x4e\x4e\
\x8d\x48\x08\xe2\xe4\x70\x8a\xb4\x97\x42\x9b\x1d\xc1\xd1\x10\x0e\
\x71\x24\x67\x9f\x11\xc5\xd9\x82\xba\x7e\x5b\xb5\x13\x59\x0b\xb3\
\x3e\x5d\x1f\x82\x2d\x79\x42\xa0\xef\xf3\x2d\x21\x57\x6f\x40\x65\
\xaf\x84\x34\xd2\xeb\x5f\xd6\x44\x8a\x50\xd4\x74\xad\x9e\x2c\x02\
\x69\x3c\x8e\x4b\x73\x54\x91\xf2\xda\xde\x75\xc2\xc8\x0d\x34\x3e\
\xa6\xb4\x57\xe0\x8f\x26\x12\x8b\x40\x08\xf3\x38\xda\x1c\x49\xa4\
\xb4\xde\x5f\x5e\x74\xeb\xc5\x10\x06\xc6\x28\x02\x45\x8e\x20\xd2\
\xb0\x40\x08\xe2\x44\xd0\x04\x8a\xcc\x2e\x52\x74\xe0\xdf\x8b\xac\
\xf9\x43\x24\x71\x22\xa8\x02\x45\x66\x15\xa9\x49\x20\x44\x71\x22\
\xe8\x02\x45\x66\x14\xe9\x72\xba\x2e\xbb\xd3\xdf\xc8\xe2\x44\xac\
\x08\x14\x99\x49\xa4\x4d\x81\x2c\x88\x13\xb1\x26\x50\x64\x16\x91\
\x96\xf4\x07\x96\xc4\x89\x58\x15\x28\x62\x5d\xa4\xbf\xa8\xcc\xde\
\x47\x76\xb8\xb3\xea\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\
\x82\
"
qt_resource_name = "\
\x00\x09\
\x0c\x78\x54\x88\
\x00\x6e\
\x00\x65\x00\x77\x00\x50\x00\x72\x00\x65\x00\x66\x00\x69\x00\x78\
\x00\x06\
\x07\x03\x7d\xc3\
\x00\x69\
\x00\x6d\x00\x61\x00\x67\x00\x65\x00\x73\
\x00\x0e\
\x0a\x51\x2d\xe7\
\x00\x69\
\x00\x64\x00\x65\x00\x6e\x00\x74\x00\x69\x00\x74\x00\x69\x00\x65\x00\x73\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x09\
\x09\x6b\xb7\xc7\
\x00\x69\
\x00\x6e\x00\x62\x00\x6f\x00\x78\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0b\
\x0a\xd0\x22\xa7\
\x00\x72\
\x00\x65\x00\x64\x00\x69\x00\x63\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x08\
\x0c\x57\x58\x67\
\x00\x73\
\x00\x65\x00\x6e\x00\x74\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x10\
\x0c\xc3\x45\x27\
\x00\x71\
\x00\x69\x00\x64\x00\x65\x00\x6e\x00\x74\x00\x69\x00\x63\x00\x6f\x00\x6e\x00\x5f\x00\x78\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0d\
\x02\xe8\x12\x87\
\x00\x62\
\x00\x6c\x00\x61\x00\x63\x00\x6b\x00\x6c\x00\x69\x00\x73\x00\x74\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x08\
\x0c\x47\x58\x67\
\x00\x73\
\x00\x65\x00\x6e\x00\x64\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0f\
\x05\x46\x9a\xc7\
\x00\x61\
\x00\x64\x00\x64\x00\x72\x00\x65\x00\x73\x00\x73\x00\x62\x00\x6f\x00\x6f\x00\x6b\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x11\
\x07\x34\x2d\xc7\
\x00\x6e\
\x00\x65\x00\x74\x00\x77\x00\x6f\x00\x72\x00\x6b\x00\x73\x00\x74\x00\x61\x00\x74\x00\x75\x00\x73\x00\x2e\x00\x70\x00\x6e\x00\x67\
\
\x00\x18\
\x02\x47\xd6\x47\
\x00\x63\
\x00\x61\x00\x6e\x00\x2d\x00\x69\x00\x63\x00\x6f\x00\x6e\x00\x2d\x00\x32\x00\x34\x00\x70\x00\x78\x00\x2d\x00\x79\x00\x65\x00\x6c\
\x00\x6c\x00\x6f\x00\x77\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x11\
\x02\xa0\x44\xa7\
\x00\x73\
\x00\x75\x00\x62\x00\x73\x00\x63\x00\x72\x00\x69\x00\x70\x00\x74\x00\x69\x00\x6f\x00\x6e\x00\x73\x00\x2e\x00\x70\x00\x6e\x00\x67\
\
\x00\x0e\
\x09\x39\xff\x47\
\x00\x71\
\x00\x69\x00\x64\x00\x65\x00\x6e\x00\x74\x00\x69\x00\x63\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x11\
\x05\x89\x73\x07\
\x00\x63\
\x00\x61\x00\x6e\x00\x2d\x00\x69\x00\x63\x00\x6f\x00\x6e\x00\x2d\x00\x32\x00\x34\x00\x70\x00\x78\x00\x2e\x00\x70\x00\x6e\x00\x67\
\
\x00\x15\
\x0c\xfc\x45\x87\
\x00\x63\
\x00\x61\x00\x6e\x00\x2d\x00\x69\x00\x63\x00\x6f\x00\x6e\x00\x2d\x00\x32\x00\x34\x00\x70\x00\x78\x00\x2d\x00\x72\x00\x65\x00\x64\
\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0d\
\x07\x76\xdf\x07\
\x00\x67\
\x00\x72\x00\x65\x00\x65\x00\x6e\x00\x69\x00\x63\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x17\
\x00\xd3\x62\xc7\
\x00\x63\
\x00\x61\x00\x6e\x00\x2d\x00\x69\x00\x63\x00\x6f\x00\x6e\x00\x2d\x00\x32\x00\x34\x00\x70\x00\x78\x00\x2d\x00\x67\x00\x72\x00\x65\
\x00\x65\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x11\
\x02\x8c\x5e\x67\
\x00\x6e\
\x00\x6f\x00\x5f\x00\x69\x00\x64\x00\x65\x00\x6e\x00\x74\x00\x69\x00\x63\x00\x6f\x00\x6e\x00\x73\x00\x2e\x00\x70\x00\x6e\x00\x67\
\
\x00\x0e\
\x02\x47\x93\x47\
\x00\x79\
\x00\x65\x00\x6c\x00\x6c\x00\x6f\x00\x77\x00\x69\x00\x63\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x12\
\x03\xf4\x2e\xc7\
\x00\x71\
\x00\x69\x00\x64\x00\x65\x00\x6e\x00\x74\x00\x69\x00\x63\x00\x6f\x00\x6e\x00\x5f\x00\x74\x00\x77\x00\x6f\x00\x2e\x00\x70\x00\x6e\
\x00\x67\
\x00\x11\
\x03\x89\x73\x27\
\x00\x63\
\x00\x61\x00\x6e\x00\x2d\x00\x69\x00\x63\x00\x6f\x00\x6e\x00\x2d\x00\x31\x00\x36\x00\x70\x00\x78\x00\x2e\x00\x70\x00\x6e\x00\x67\
\
\x00\x14\
\x07\x12\xd0\xa7\
\x00\x71\
\x00\x69\x00\x64\x00\x65\x00\x6e\x00\x74\x00\x69\x00\x63\x00\x6f\x00\x6e\x00\x5f\x00\x74\x00\x77\x00\x6f\x00\x5f\x00\x78\x00\x2e\
\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct = "\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x18\x00\x02\x00\x00\x00\x15\x00\x00\x00\x03\
\x00\x00\x02\x36\x00\x00\x00\x00\x00\x01\x00\x00\x3f\x80\
\x00\x00\x02\x92\x00\x00\x00\x00\x00\x01\x00\x00\x47\xb7\
\x00\x00\x01\x3e\x00\x00\x00\x00\x00\x01\x00\x00\x1d\x49\
\x00\x00\x02\x6a\x00\x00\x00\x00\x00\x01\x00\x00\x46\xee\
\x00\x00\x01\x74\x00\x00\x00\x00\x00\x01\x00\x00\x24\xaf\
\x00\x00\x00\xbc\x00\x00\x00\x00\x00\x01\x00\x00\x11\xfc\
\x00\x00\x02\xde\x00\x00\x00\x00\x00\x01\x00\x00\x51\x02\
\x00\x00\x02\xb4\x00\x00\x00\x00\x00\x01\x00\x00\x4a\x35\
\x00\x00\x00\xf2\x00\x00\x00\x00\x00\x01\x00\x00\x18\x2b\
\x00\x00\x01\xbe\x00\x00\x00\x00\x00\x01\x00\x00\x2d\x9f\
\x00\x00\x03\x06\x00\x00\x00\x00\x00\x01\x00\x00\x54\xf5\
\x00\x00\x01\x16\x00\x00\x00\x00\x00\x01\x00\x00\x1a\xd0\
\x00\x00\x02\x16\x00\x00\x00\x00\x00\x01\x00\x00\x3c\x48\
\x00\x00\x01\x9c\x00\x00\x00\x00\x00\x01\x00\x00\x27\x2a\
\x00\x00\x00\x4c\x00\x00\x00\x00\x00\x01\x00\x00\x03\x6a\
\x00\x00\x00\x2a\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x00\x64\x00\x00\x00\x00\x00\x01\x00\x00\x06\x1d\
\x00\x00\x00\xdc\x00\x00\x00\x00\x00\x01\x00\x00\x14\xf0\
\x00\x00\x00\x80\x00\x00\x00\x00\x00\x01\x00\x00\x08\xed\
\x00\x00\x00\x96\x00\x00\x00\x00\x00\x01\x00\x00\x0b\x15\
\x00\x00\x01\xe6\x00\x00\x00\x00\x00\x01\x00\x00\x34\xdf\
"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| 1.078125 | 1 |
update/parser_clickagain.py | kuna/iidxranktable | 6 | 12785762 | #-*- coding: utf-8 -*-
# site: http://clickagain.sakura.ne.jp/cgi-bin/sort11/data.cgi?level10=1&mix=1
from bs4 import BeautifulSoup
import urllib
import re
def getGroup(arr, g):
for ele in arr:
if (ele[0] == g):
return ele
# if not, add group
new_group = (g, [])
arr.append( new_group )
return new_group
def processTitle(input):
# clickagain's title is so chaos...
# we need to change it
# remove braket and trim
# CF: timepiece phase (CN ver) ... -> SOLVED
return re.sub(r"\(\(.*?\)\)", '', input.replace(u"(", '((').replace(u")", '))')).strip()
#
# ==================================================================
#
def parse8():
return parse("8AC", "http://clickagain.sakura.ne.jp/cgi-bin/sort11/data.cgi?level8=1&mix=1")
def parse9():
return parse("9AC", "http://clickagain.sakura.ne.jp/cgi-bin/sort11/data.cgi?level9=1&mix=1")
def parse10():
return parse("10AC", "http://clickagain.sakura.ne.jp/cgi-bin/sort11/data.cgi?level10=1&mix=1")
def parse11():
return parse("11AC", "http://clickagain.sakura.ne.jp/cgi-bin/sort11/data.cgi?level11=1&mix=1")
def parse12():
return parse("12AC", "http://clickagain.sakura.ne.jp/cgi-bin/sort11/data.cgi?level12=1&mix=1")
def parse8N():
return parseN("8AC", "http://clickagain.sakura.ne.jp/cgi-bin/sort11/data.cgi?level8=1&mix=1")
def parse9N():
return parseN("9AC", "http://clickagain.sakura.ne.jp/cgi-bin/sort11/data.cgi?level9=1&mix=1")
def parse10N():
return parseN("10AC", "http://clickagain.sakura.ne.jp/cgi-bin/sort11/data.cgi?level10=1&mix=1")
def parse11N():
return parseN("11AC", "http://clickagain.sakura.ne.jp/cgi-bin/sort11/data.cgi?level11=1&mix=1")
def parse12N():
return parseN("12AC", "http://clickagain.sakura.ne.jp/cgi-bin/sort11/data.cgi?level12=1&mix=1")
def parse12_7():
# common
data = urllib.urlopen("http://clickagain.sakura.ne.jp/cgi-bin/sort11/data.cgi?level12=1")
soup = BeautifulSoup(data)
res = [] # [(group, [song name, ..]), ..]
table = soup.find_all('table')[5]
trs = table.find_all('tr')
group_name = 1
group_idx = 1
idx = 0
for tr in trs:
idx += 1
if (idx <= 4):
continue
# 0:ver, 1:title, 5:normal, 6:hard, 7:op1P, 8:op2P, 9:desc
tds = tr.find_all('td')
if (len(tds) < 9):
# group
# if group idx = 8 (that means 8기) then must exit
if (group_idx >= 8):
break
group_name = str(group_idx) + u'기'
group_idx += 1
continue
title = processTitle(tds[1].get_text())
diff = tds[1]['style']
if (diff.find("red") >= 0):
diff = "SPA"
elif (diff.find("orange") >= 0):
diff = "SPH"
elif (diff.find("#0066FF") >= 0):
diff = "SPN"
else:
diff = "SPA"
group = getGroup(res, group_name)
group[1].append( (title, diff) )
return res
def parse(tableID, uri):
# common
data = urllib.urlopen(uri)
soup = BeautifulSoup(data)
res = [] # [(group, [song name, ..]), ..]
table = soup.find('table', id=tableID)
trs = table.find_all('tr')
for tr in trs:
if (('class' in tr) and tr['class'][0] == u'top'):
continue
# 0:ver, 1:title, 5:normal, 6:hard, 7:op1P, 8:op2P, 9:desc
tds = tr.find_all('td')
if (len(tds) < 9):
break
title = processTitle(tds[1].get_text())
if (title == "title"):
continue
diff = tds[1]['style']
if (diff.find("red") >= 0):
diff = "SPA"
elif (diff.find("orange") >= 0):
diff = "SPH"
elif (diff.find("#0066FF") >= 0):
diff = "SPN"
else:
diff = "SPA"
lv = tds[6].get_text()
group = getGroup(res, lv)
group[1].append( (title, diff) )
return res
def parseN(tableID, uri):
# common
data = urllib.urlopen(uri)
soup = BeautifulSoup(data)
res = [] # [(group, [song name, ..]), ..]
table = soup.find('table', id=tableID)
trs = table.find_all('tr')
for tr in trs:
if (('class' in tr) and tr['class'][0] == u'top'):
continue
# 0:ver, 1:title, 5:normal, 6:hard, 7:op1P, 8:op2P, 9:desc
tds = tr.find_all('td')
if (len(tds) < 9):
break
title = processTitle(tds[1].get_text())
if (title == "title"):
continue
diff = tds[1]['style']
if (diff.find("red") >= 0):
diff = "SPA"
elif (diff.find("orange") >= 0):
diff = "SPH"
elif (diff.find("#0066FF") >= 0):
diff = "SPN"
else:
diff = "SPA"
lv = tds[5].get_text()
group = getGroup(res, lv)
group[1].append( (title, diff) )
return res
#print parse12_7() | 2.578125 | 3 |
scripts/buildit.py | rojkov/yottaci-azure-backend | 0 | 12785763 | <filename>scripts/buildit.py<gh_stars>0
#!/usr/bin/env python3
import asyncio
import logging
import os
import shutil
import tempfile
import tarfile
import json
import base64
import time
import jwt
import yaml
from urllib.parse import urlparse
from http.client import HTTPSConnection
from tempfile import NamedTemporaryFile
from azure.storage.blob.models import ContentSettings
from azure.storage.blob import AppendBlobService
from azure.storage.queue import QueueService
from azure.storage.blob.models import PublicAccess
from github import Github
LOG = logging.getLogger(__name__)
TASKDATA = "/etc/taskdata.json"
WORKDIR = "/data/cibot"
FILESHARE = "/fileshare"
REPOSDIR = os.path.join(FILESHARE, "gitrepos")
LOGDIR = os.path.join(FILESHARE, "logs")
LOCKFILENAME = os.path.join(REPOSDIR, "cibot-git.lock")
BBCACHE = os.path.join(FILESHARE, "bb-cache")
SRCDIR = os.path.join(WORKDIR, "deleteme")
AUTOCONFIG = """
DL_DIR = "%s"
SSTATE_DIR = "%s"
""" % (os.path.join(BBCACHE, "downloads"), os.path.join(SRCDIR, "sstate"))
BUILDSCRIPT = """
source git.openembedded.org.openembedded-core/oe-init-build-env build git.openembedded.org.bitbake
%s
#bitbake core-image-ros-world
bitbake %s
"""
def get_sstate_archive_path(ctx):
"""Returns path to sstate archive for given configuration."""
confname = ctx.config.get("configuration_name",
"configuration%s" % ctx.taskdata["config_num"])
return os.path.join(BBCACHE, "sstate-%s.tar.gz" % confname)
class Context(object):
"""Program context."""
def __init__(self, loop, config, blob_service,
queue_service, taskdata):
self.pid = "%s-%s" % (taskdata["pid"], taskdata["config_num"])
self.loop = loop
self.config = config
self.blob_service = blob_service
self.queue_service = queue_service
self.taskdata = taskdata
class CommandLineProtocol(asyncio.SubprocessProtocol):
def __init__(self, exit_future, pid, blob_service):
self.exit_future = exit_future
self.blob_service = blob_service
self.transport = None
self.pid = pid
def connection_made(self, transport):
self.transport = transport
def pipe_data_received(self, fd, data):
print(data.decode('ascii').rstrip())
self.blob_service.append_blob_from_bytes("logs", self.pid, data)
def process_exited(self):
self.exit_future.set_result(self.transport.get_returncode())
self.transport.close()
@asyncio.coroutine
def run_command(ctx, cmd, cwd):
exit_future = asyncio.Future(loop=ctx.loop)
proto_factory = lambda: CommandLineProtocol(exit_future, ctx.pid, ctx.blob_service)
proc_coro = ctx.loop.subprocess_exec(proto_factory, *cmd, stdin=None, cwd=cwd)
transport, protocol = yield from proc_coro
result = yield from exit_future
return result
def run(ctx, cmd, cwd, save_sstate_flag=False):
result = ctx.loop.run_until_complete(run_command(ctx, cmd, cwd))
if result != 0:
github = GithubAdapter(ctx.taskdata)
gh_commit = github.get_commit()
gh_commit.create_status("failure",
target_url=ctx.blob_service.make_blob_url("logs",
ctx.pid),
description="Build failed",
context=ctx.config.get("configuration_name", "configuration%s" % ctx.taskdata["config_num"]))
if save_sstate_flag:
save_sstate(ctx)
ctx.taskdata["build_result"] = "failure"
ctx.queue_service.put_message("buildresults", base64.b64encode(json.dumps(ctx.taskdata).encode("utf")).decode("utf"))
cloudlog_dir = os.path.join(LOGDIR, ctx.pid)
os.makedirs(cloudlog_dir, exist_ok=True)
shutil.copyfile("/var/log/cloud-init-output.log", os.path.join(cloudlog_dir,
"cloud-init-output.log"))
shutil.copyfile("/var/log/cloud-init.log", os.path.join(cloudlog_dir,
"cloud-init.log"))
raise RuntimeError("Failed to run '%s'" % " ".join(cmd))
def run_script(ctx, script, cwd):
with NamedTemporaryFile(mode="w") as scriptfile:
scriptfile.write(script)
scriptfile.flush()
return run(ctx, ["/bin/bash", "-xe", scriptfile.name], cwd, save_sstate_flag=True)
def save_sstate(ctx):
# Save sstate for future use
fd, tmpfile = tempfile.mkstemp(dir=BBCACHE, prefix="sstatearch")
with os.fdopen(fd, "wb") as stream:
with tarfile.open(fileobj=stream, mode="w:gz") as sstatetmp:
sstatetmp.add(os.path.join(SRCDIR, "sstate"), arcname="sstate")
stream.flush()
os.rename(tmpfile, get_sstate_archive_path(ctx))
def repodirname(url):
repourl = urlparse(url)
return ".".join([seg for seg in [repourl.hostname] + repourl.path.split("/") if seg])
def get_repos(config):
oecore_url = "git://git.openembedded.org/openembedded-core"
bitbake_url = "git://git.openembedded.org/bitbake"
repos = [
(repodirname(oecore_url), oecore_url, config.get("oecore_ref", "master"), None),
(repodirname(bitbake_url), bitbake_url, config.get("bitbake_ref", "master"), None)
]
for dep in config["dependencies"]:
repos.append(
(repodirname(dep["url"]), dep["url"], dep.get("ref", None), dep.get("layers", None))
)
return repos
def update_git_cache(ctx):
repos = [(repo, repourl) for (repo, repourl, _, _) in get_repos(ctx.config)]
repos.append((repodirname(ctx.taskdata["gh"]["repository"]["clone_url"]), ctx.taskdata["gh"]["repository"]["clone_url"]))
for repo, repourl in repos:
repodir = os.path.join(REPOSDIR, repo)
if not os.path.isdir(repodir):
run(ctx, ["git", "clone", "--bare", repourl, repo],
cwd=REPOSDIR)
else:
LOG.info("Fetching %s" % repourl)
run(ctx, ["git", "fetch"], cwd=repodir)
class GithubAdapter(object):
def __init__(self, taskdata):
timestamp = int(time.time())
payload = {
"iat": timestamp,
"exp": timestamp + (10 * 60),
"iss": taskdata["github_issuer_id"]
}
bearer = jwt.encode(payload,
key=taskdata["githubapp_pkey"],
algorithm="RS256").decode("ascii")
conn = HTTPSConnection("api.github.com")
conn.request(
method="POST",
url="/installations/{}/access_tokens".format(taskdata["gh"]["installation"]["id"]),
headers={
"Authorization": "Bearer {}".format(bearer),
"Accept": "application/vnd.github.machine-man-preview+json",
"User-Agent": "nodejs"
}
)
response = conn.getresponse()
token = json.loads(response.read().decode("ascii"))["token"]
self.github = Github(login_or_token=token)
self.repo = self.github.get_repo("%s/%s" % (taskdata["gh"]["repository"]["owner"]["login"],
taskdata["gh"]["repository"]["name"]))
self.taskdata = taskdata
def get_commit(self):
return self.repo.get_commit(self.taskdata["gh"]["sha"])
# TODO: !!! VALIDATE INPUT
def get_config(self):
if self.taskdata["gh"]["type"] == "pull_request":
repo = self.github.get_repo("%s/%s" % (self.taskdata["gh"]["pull_request"]["head"]["repo"]["owner"]["login"],
self.taskdata["gh"]["repository"]["name"]))
else:
repo = self.repo
contentobj = repo.get_file_contents(path=".yottaci.yml",
ref=self.taskdata["gh"]["ref"])
configs = list(yaml.load_all(contentobj.decoded_content))
return configs[self.taskdata["config_num"]-1]
def main():
logging.basicConfig(level=logging.DEBUG)
with open(TASKDATA) as taskdata_file:
taskdata = json.loads(taskdata_file.read())
github = GithubAdapter(taskdata)
gh_commit = github.get_commit()
config = github.get_config()
blob_service = AppendBlobService(account_name=taskdata["storage_account_name"],
account_key=taskdata["storage_account_key"])
queue_service = QueueService(connection_string=taskdata["queue_connection_string"])
loop = asyncio.get_event_loop()
ctx = Context(loop=loop,
config=config,
blob_service=blob_service,
queue_service=queue_service, taskdata=taskdata)
blob_service.create_container("logs",
fail_on_exist=False,
public_access=PublicAccess.Blob)
blob_service.create_blob("logs", ctx.pid, content_settings=ContentSettings(content_type="text/plain; charset=utf-8"))
gh_commit.create_status("pending",
target_url=blob_service.make_blob_url("logs", ctx.pid),
description="Build started",
context=config.get("configuration_name", "configuration%s" % taskdata["config_num"]))
os.makedirs(REPOSDIR, exist_ok=True)
# Check if we're the only process who updates the git cache on SMB share.
# Otherwise skip updating.
if not os.path.exists(LOCKFILENAME):
lock = open(LOCKFILENAME, "w")
lock.close()
update_git_cache(ctx)
os.unlink(LOCKFILENAME)
if os.path.exists(SRCDIR):
shutil.rmtree(SRCDIR)
os.makedirs(os.path.join(SRCDIR, "build/conf"))
with open(os.path.join(SRCDIR, "build/conf/auto.conf"), "a") as localconf:
localconf.write("\n%s\n" % config.get("localconf", ""))
localconf.write(AUTOCONFIG)
repos = get_repos(config)
repos.append((repodirname(taskdata["gh"]["repository"]["clone_url"]), taskdata["gh"]["repository"]["clone_url"], None, None))
for reponame, repourl, reporef, _ in repos:
refrepopath = os.path.join(REPOSDIR, reponame)
run(ctx, ["git", "clone",
"--reference", refrepopath, repourl, reponame],
cwd=SRCDIR)
if reporef:
LOG.info("Checkout %s to %s" % (reponame, reporef))
run(ctx, ["git", "checkout", "%s" % reporef],
cwd=os.path.join(SRCDIR, reponame))
# Do checkout
if taskdata["gh"]["type"] == "pull_request":
LOG.info("Add remote repo %s" % taskdata["gh"]["clone_url"])
run(ctx, ["git", "remote", "add", "contributor",
taskdata["gh"]["clone_url"]],
cwd=os.path.join(SRCDIR, repodirname(taskdata["gh"]["repository"]["clone_url"])))
LOG.info("Fetch contributor's repo")
run(ctx, ["git", "fetch", "contributor"], cwd=os.path.join(SRCDIR, repodirname(taskdata["gh"]["repository"]["clone_url"])))
LOG.info("Checkout %s to %s" % (repodirname(taskdata["gh"]["repository"]["clone_url"]), taskdata["gh"]["sha"]))
run(ctx, ["git", "checkout", taskdata["gh"]["sha"]],
cwd=os.path.join(SRCDIR, repodirname(taskdata["gh"]["repository"]["clone_url"])))
# Fetch sstate if any
if os.path.exists(get_sstate_archive_path(ctx)):
with tarfile.open(name=get_sstate_archive_path(ctx), mode="r:gz") as sstate_tar:
sstate_tar.extractall(path=SRCDIR)
addlayers = []
for dep in config["dependencies"]:
repodir = repodirname(dep["url"])
layers = dep.get("layers", None)
if layers:
addlayers.extend(["bitbake-layers add-layer ../%s/%s" % (repodir, layer)
for layer in layers])
else:
addlayers.append("bitbake-layers add-layer ../%s" % repodir)
addlayers.append("bitbake-layers add-layer ../%s" % repodirname(taskdata["gh"]["repository"]["clone_url"]))
run_script(ctx, BUILDSCRIPT % ("\n".join(addlayers), config["bitbake_target"]), cwd=SRCDIR)
save_sstate(ctx)
# Github auth token has expired by now most probably => renew
github = GithubAdapter(taskdata)
gh_commit = github.get_commit()
gh_commit.create_status("success",
target_url=blob_service.make_blob_url("logs",
ctx.pid),
description="Target has been built successfully",
context=config.get("configuration_name", "configuration%s" % taskdata["config_num"]))
loop.close()
# TODO: copy cloud-init log files to share
taskdata["build_result"] = "success"
queue_service.put_message("buildresults", base64.b64encode(json.dumps(taskdata).encode("utf")).decode("utf"))
if __name__ == "__main__":
main()
| 1.671875 | 2 |
data/upload_waterpoints.py | taarifa/taarifa_backend | 0 | 12785764 | #!/usr/bin/env python
import csv
import requests
import json
CSV_FN = 'waterpoints.csv'
REPORTS_URL = 'http://localhost:5000/reports'
SERVICES_URL = 'http://localhost:5000/services'
class Status(object):
FUNCTIONAL, NOT_FUNCTIONAL, IN_PROGRESS, UNKNOWN = range(4)
def resolve_status(status):
if status.lower().startswith("functional"):
return Status.FUNCTIONAL
elif status.lower().startswith("not functional") or \
status.lower().startswith("non functional"):
return Status.NOT_FUNCTIONAL
elif status.lower() == "in progress":
return Status.IN_PROGRESS
else:
return Status.UNKNOWN
def parse_csv(csv_fn):
waterpoints = []
with open(csv_fn, 'r') as f:
reader = csv.reader(f)
header = [field.lower().replace(' ', '_')
for field in reader.next()]
for _id, row in enumerate(reader):
wp = dict(zip(header, row))
wp['waterpoint_id'] = _id
wp['status'] = resolve_status(wp['status'])
try:
wp['latitude'] = float(wp['latitude'])
wp['longitude'] = float(wp['longitude'])
except ValueError:
# TODO log error
continue
waterpoints.append(wp)
return waterpoints
def create_wp_service():
headers = {'content-type': 'application/json'}
data = {'name': 'WaterpointService',
'fields': {'waterpoint_id': {'type': 'StringField', 'required': True},
'region': {'type': 'StringField', 'required': True},
'lga_name': {'type': 'StringField', 'required': True},
'ward': {'type': 'StringField', 'required': True},
'village': {'type': 'StringField', 'required': True},
'technology_in_use': {'type': 'StringField', 'required': True},
'waterpoint': {'type': 'StringField', 'required': True},
'status': {'type': 'StringField', 'required': True},
'latitude': {'type': 'FloatField', 'required': True},
'longitude': {'type': 'FloatField', 'required': True},
},
'group': 'location based reports',
'keywords': ['waterpoints'],
'protocol_type': '',
'service_name': '',
'service_code': 'wp1'
}
requests.post(SERVICES_URL, data=json.dumps(data), headers=headers)
def send_report(wp):
headers = {'content-type': 'application/json'}
data = {'service_code': 'wp1', 'data': wp}
requests.post(REPORTS_URL, data=json.dumps(data), headers=headers)
if __name__ == '__main__':
create_wp_service()
waterpoints = parse_csv(CSV_FN)
for w in waterpoints:
print w
send_report(w)
| 3.03125 | 3 |
lifeline.py | goggledefogger/BlinkyTapeQueue | 0 | 12785765 | #!/usr/bin/env python
from BlinkyTapeV2 import BlinkyTape
import sys
import logging
import time
import glob
import os
import pickle
import datetime
import calendar
import re
logging.basicConfig()
if sys.platform.startswith('win'):
serialPorts = ['COM%s' % (i + 1) for i in range(256)]
elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
# this excludes your current terminal "/dev/tty"
serialPorts = glob.glob('/dev/tty[A-Za-z]*')
elif sys.platform.startswith('darwin'):
serialPorts = glob.glob('/dev/tty.*')
else:
raise EnvironmentError('Unsupported platform')
for thisPort in serialPorts:
if 'usb' in thisPort:
port = thisPort
bt = BlinkyTape(port)
storageFileName = 'lifeline.p'
tickTime = 0.01
# the format of the data will be:
# {
# 'eventsById': {
# <id>: {
# 'id': <id>,
# 'date': <timestamp>
# }
# },
# 'eventsByWeek': {
# <weekIndex>: {
# 0: {
# <timestamp>: <id>,
# <timestamp>: <id>,
# ...
# },
# 1: {
# },
# ...,
# 6: {
# }
# }
# }
# }
lifelineMap = {
'eventsById': {},
'eventsByWeek': {}
}
loaded = False
def start():
global lifelineMap
diskData = loadFromFile()
if (diskData is not None):
lifelineMap = diskData
loaded = True
def addCalendarEvent(id, dateString):
global lifelineMap
if (not loaded):
start()
# formatted by IFTTT like this: March 13, 2016 at 01:00pm
matchObj = re.match(r'(.*?) ([0-9]+), ([0-9]+) at (.*)', dateString)
if matchObj:
monthString = matchObj.group(1)
dayInt = matchObj.group(2)
yearInt = matchObj.group(3)
timeString = matchObj.group(4)
#formattedDateString = monthString + ' ' + dayInt + ' ' + yearInt + ' ' + 'timeString'
format = '%B %d, %Y at %I:%M%p'
print 'ifttt date: ' + dateString
dateObj = datetime.datetime.strptime(dateString, format)
unixDateString = str(calendar.timegm(dateObj.timetuple()))
else:
# formatted as a unix timestamp with 10 digits
unixMatchObj = re.match(r'[0-9]{10}', dateString)
if unixMatchObj:
print 'unix: ' + dateString
unixDateString = dateString
else:
print 'unknown date format, ignoring: ' + dateString
return
unixDate = int(unixDateString)
lifelineMap['eventsById'][id] = {'id': id, 'date': unixDate}
weekIndex = calculateWeekIndex(unixDate)
dayIndex = calculateDayIndex(unixDate)
if weekIndex not in lifelineMap['eventsByWeek']:
lifelineMap['eventsByWeek'][weekIndex] = {0: {}, 1: {}, 2: {}, 3: {}, 4: {}, 5: {}, 6: {}}
if unixDate not in lifelineMap['eventsByWeek'][weekIndex][dayIndex]:
lifelineMap['eventsByWeek'][weekIndex][dayIndex][unixDate] = []
lifelineMap['eventsByWeek'][weekIndex][dayIndex][unixDate].append(id)
saveToFile(lifelineMap)
startCalendarLights()
def calculateWeekIndex(unixDate):
dateObj = datetime.date.fromtimestamp(unixDate)
return int((dateObj - datetime.date.min).days / 7)
def calculateDayIndex(unixDate):
dateObj = datetime.date.fromtimestamp(unixDate)
return dateObj.weekday()
def startCalendarLights():
stop = time.time() + 10
while time.time() < stop:
# Run a tick on each block
for i in range(0,bt.getSize()):
bt.setPixel(i,255,123,10)
bt.sendUpdate()
time.sleep(tickTime)
for i in range(0,bt.getSize()):
bt.setPixel(i,0,0,0)
bt.sendUpdate()
def loadFromFile():
if os.path.exists(storageFileName):
return pickle.load(open(storageFileName, "rb"))
else:
return None
def saveToFile(data):
pickle.dump(data, open(storageFileName, "wb"))
start()
| 2.328125 | 2 |
predict.py | koderjoker/Image-Classifier | 0 | 12785766 | import argparse
import torch
from torch import nn
from torch import optim
from torchvision import transforms, datasets, models
from collections import OrderedDict
from PIL import Image
import numpy as np
import json
#Take inputs from user
parser = argparse.ArgumentParser()
parser.add_argument('path_to_image', type=str, help='Set path to image', default='./flowers/test/1/image_06743.jpg')
parser.add_argument('checkpoint', type=str, help='Load checkpoint', default='./checkpoint.pth')
parser.add_argument('--top_k', type=int, help='Return top k most likely classes', default=5)
parser.add_argument('--category_names', type=str, help='Use a mapping of categories to real names', default='cat_to_name.json')
parser.add_argument('--gpu', type=str, help='Use GPU for inference', default='cpu')
args = parser.parse_args()
def load_checkpoint(filepath):
checkpoint = torch.load(filepath)
if checkpoint['model'] == "vgg16":
model = models.vgg16(pretrained=True)
elif checkpoint['model'] == "densenet121":
model = models.densenet121(pretrained=True)
model.eval()
model.classifier = checkpoint['classifier']
model.load_state_dict(checkpoint['state_dict'])
model.class_to_idx = checkpoint['class_to_idx']
epoch = checkpoint['epoch']
return model
def process_image(image):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
#Perform transformations, convert to tensor and normalize
transform = transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
#Open image and apply transformation
pil_image = Image.open(image)
pil_image = transform(pil_image)
#Convert to numpy array
np_image = np.array(pil_image)
return np_image
def predict(image_path, model, topk, device):
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
model = load_checkpoint(model)
model.eval()
model.to(device)
np_image = process_image(image_path) #numpy array returned
torch_image = torch.from_numpy(np_image).to(device) #convert to tensor
torch_image = torch_image.unsqueeze_(0)
torch_image = torch_image.float() #returns float tensor of single dimension (1 column)
with torch.no_grad():
output = model.forward(torch_image)
ps = torch.exp(output)
#taking top 5 probabilities and their indices
if topk is None:
probs, indices = torch.topk(ps, 1)
else:
probs, indices = torch.topk(ps, topk)
#invert class_to_idx
inv_class_to_idx = {index: cls for cls, index in model.class_to_idx.items()}
classes = []
for index in indices.cpu().numpy()[0]: #iterating through indices
classes.append(inv_class_to_idx[index])
return probs.cpu().numpy()[0], classes
# Print the most likely image class and it's associated probability
# map with json
if args.gpu == "gpu":
device = "cuda:0"
elif args.gpu == "cpu":
device = "cpu"
probs, classes = predict(args.path_to_image, args.checkpoint, args.top_k, device)
if args.category_names is not None:
with open(args.category_names, 'r') as f:
cat_to_name = json.load(f)
classes = [cat_to_name[c] for c in classes]
print("Most probable class:", classes[0])
print("Probability :", probs[0])
if args.top_k is not None:
print("\nTop",args.top_k,"probable classes and their probabilities are")
for index in range(len(classes)):
print(classes[index],":",probs[index])
| 2.4375 | 2 |
python_integration/Lecture_3_5_company_nemid_fat_sms/company_app.py | Jepp0103/System_Integration_Tasks | 1 | 12785767 | from bottle import run, get, view, post, request
import json
import jwt
import requests
##############################
@get("/company")
@view("index_company.html")
def do():
return dict(company_name="SUPER")
@get("/company-token")
@view("index_company_token.html")
def do():
return dict(company_name="Token stuff")
@post("/get-name-by-cpr")
def do():
# Connect to db
# Execute a SQL/Document query
data_from_client = json.load(request.body)
print("cpr", data_from_client)
cpr = data_from_client['cpr']
file_name = "./data/" + cpr + ".txt" # In python you go from the root
opened_file = open(file_name, "r")
return opened_file.read()
@post("/process-jwt-token")
def do():
result = ""
try:
token = json.load(request.body)["jwt"]
try:
result = jwt.decode(
token, "jwt-secret-key", algorithms=["HS256"])
except Exception as jwt_error:
send_sms(jwt_error)
try:
email = result["email"]
except Exception as emailException:
send_sms("Email missing")
except Exception as json_error:
send_sms(json_error)
return str(result)
def send_sms(message):
endpoint = "https://fatsms.com/api-send-sms"
phone = "42659183"
my_api_key = "7893f0d6872d606467a9e0e3a998d8db"
data_dict = {"to_phone": phone, "api_key": my_api_key, "message": message}
requests.post(endpoint, data = data_dict)
print(str(data_dict))
##############################
run(host="127.0.0.1", port=4444, debug=True, reloader=True, server="paste")
| 2.859375 | 3 |
busy_beaver/common/models.py | alysivji/github-adapter | 55 | 12785768 | from redis.exceptions import RedisError
from rq.exceptions import NoSuchJobError
from rq.job import Job
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy_utils import ChoiceType
from busy_beaver.extensions import db, rq
class BaseModel(db.Model):
__abstract__ = True
id = db.Column(db.Integer, primary_key=True)
date_created = db.Column(db.DateTime, default=db.func.current_timestamp())
date_modified = db.Column(
db.DateTime,
onupdate=db.func.current_timestamp(),
default=db.func.current_timestamp(),
)
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
def patch(self, data: dict):
for key, value in data.items():
setattr(self, key, value)
class Task(BaseModel):
"""Task Base Table"""
__tablename__ = "task"
class TaskState:
COMPLETED = "completed"
SCHEDULED = "scheduled"
FAILED = "failed"
CANCELLED = "cancelled"
STATES = [(COMPLETED,) * 2, (SCHEDULED,) * 2, (FAILED,) * 2, (CANCELLED,) * 2]
INITIAL_STATE = SCHEDULED
# Attributes
job_id = db.Column(db.String(36), index=True)
name = db.Column(db.String(128), index=True)
task_state = db.Column(
ChoiceType(TaskState.STATES), default=TaskState.INITIAL_STATE, index=True
)
data = db.Column(db.JSON)
time_to_post = db.Column(db.String(20), nullable=True)
def get_rq_job(self):
try:
rq_job = Job.fetch(self.job_id, rq.connection)
except (RedisError, NoSuchJobError):
return None
return rq_job
def get_progress(self):
job = self.get_rq_job()
return job.meta.get("progress", 0) if job is not None else 100
def __repr__(self): # pragma: no cover
return f"<Task: {self.job_id}-{self.name}>"
| 2.046875 | 2 |
timidy/testing.py | meawoppl/tiMIDI | 0 | 12785769 | import unittest, sys
import timidi.tests
def test():
return unittest.main(timidi.tests)
if __name__ == "__main__":
sys.exit(0 if test() else 1)
| 1.898438 | 2 |
stanCode_Projects/weather_master/weather_master.py | Cherry-RB/sc-projects | 0 | 12785770 | """
File: weather_master.py
Name:Cherry
-----------------------
This program should implement a console program
that asks weather data from user to compute the
average, highest, lowest, cold days among the inputs.
Output format should match what is shown in the sample
run in the Assignment 2 Handout.
"""
LEAVING = -1
def main():
"""
TODO(step):
1.Let user input one data.If the data is LEAVING,there are no temperatures.
2.Define variables:highest,lowest,average,amount,and cold_day.
3.Let user continuously input data until the data is LEAVING.(while)
4.Redefine the variables.
5.Print the results:Highest temperature,Lowest temperature,Average,and cold day(s).
"""
print('stanCode "Weather Master 4.0"!')
# step1.Let user input one data.
data = int(input('Next Temperature: (or -100 to quit)? '))
# If the data is LEAVING,there are no temperatures.
if data == LEAVING:
print('No temperatures were entered.')
else:
# step2.Define variables:highest,lowest,average,amount,and cold_day.
highest = data
lowest = data
average = float(data)
amount = 1
if data < 16:
cold_day = 1
else:
cold_day = 0
while True:
# step3.Let user continuously input data until the data is LEAVING.
data = int(input('Next Temperature: (or -100 to quit)? '))
if data == LEAVING:
break
else:
# step4.Redefine the variables.
average = average*amount + data
amount += 1
average = average/amount
if data > highest:
highest = data
if data < lowest:
lowest = data
if data < 16:
cold_day += 1
# step5.Print the results:Highest temperature,Lowest temperature,Average,and cold day(s).
print('Highest temperature = ' + str(highest))
print('Lowest temperature = ' + str(lowest))
print('Average = ' + str(average))
print(str(cold_day) + ' cold day(s)')
###### DO NOT EDIT CODE BELOW THIS LINE ######
if __name__ == "__main__":
main()
| 4.3125 | 4 |
atlas/prodtask/management/commands/synccvmfsevgen.py | PanDAWMS/panda-bigmon-atlas | 0 | 12785771 | from django.core.management.base import BaseCommand, CommandError
import time
from atlas.prodtask.mcevgen import sync_cvmfs_db
class Command(BaseCommand):
args = ''
help = 'Sync cvmfs JOs'
def handle(self, *args, **options):
self.stdout.write('Start sync cvmfs for JOs at %s'%time.ctime())
try:
sync_cvmfs_db()
except Exception as e:
raise CommandError('Some problem during syncing: %s'%str(e))
self.stdout.write('Successfully finished cvmfs sync: %s'%time.ctime()) | 2.265625 | 2 |
test/misc/test_query_performance/old_stuff/graph_drawer.py | madineniguna/EHR | 23 | 12785772 | <filename>test/misc/test_query_performance/old_stuff/graph_drawer.py
import json
import networkx as nx
import matplotlib.pyplot as plt
def get_structures(fname):
with open(fname) as struct_files:
structures = json.loads(struct_files.read())
return structures
def get_colors():
colors = {
'blood_pressure' : '#57B4E6',
'blood_glucose' : '#57B4E6',
'full_blood_count' : '#57B4E6',
'lipids' : '#57B4E6',
'liver_function': '#57B4E6',
'thyroid' : '#57B4E6',
'urea_and_electrolytes' : '#57B4E6',
'urin_analysis' : '#57B4E6',
'root' : '#464F4F'
}
for i in xrange(20):
colors['composition.lbl-%05d' % i] = '#943B3B'
return colors
def create_graph(graph, parent, structure, colors, child=0):
try:
parent_index = parent.split('@')[1]
except (IndexError, AttributeError):
parent_index = 0
if isinstance(structure, dict):
for node_name, node_children in structure.iteritems():
node_type = node_name
if parent is None:
node_name = 'composition_root'
node_type = 'root'
else:
node_name = "%s@%s.%s" % (node_name, parent_index, child)
if not node_name.startswith('composition'):
raise ValueError('Container type %s unknown' % node_name)
graph.add_node(node_name, color=colors[node_type])
if parent:
graph.add_edge(parent, node_name)
for i, child in enumerate(node_children):
create_graph(graph, node_name, child, colors, i)
return graph
else:
node_type = structure
node_name = "%s@%s.%s" % (structure, parent_index, child)
graph.add_node(node_name, color=colors[node_type])
if parent:
graph.add_edge(parent, node_name)
return graph
def draw_graph(data, label):
#p=nx.single_source_shortest_path_length(G,ncenter)
#for i in xrange(0, graphs_num):
G = nx.DiGraph()
create_graph(G, None, data, get_colors(), 0)
node_colors = [G.node[k]['color'] for k in G.nodes()]
# same layout using matplotlib with no labels
plt.title("draw_networkx")
prog = 'dot'
pos = nx.graphviz_layout(G, prog=prog)
nx.draw(G, pos=pos, node_size=1000, with_labels=False,
arrows=False, node_color=node_colors, K=10)
nx.write_dot(G, 'test%s.dot' % i)
# plt.show()
fig = plt.gcf()
fig.set_size_inches(18.5,10.5)
plt.savefig('nx_test%s%s.png' % (label, prog))
plt.clf()
if __name__ == '__main__':
strs = get_structures('structures_file.json')
graphs_num = 10
for i, g in enumerate(strs[:graphs_num]):
draw_graph(g, i)
# draw_graph(strs[1], 1)
# draw_graph(strs[50], 50) | 2.609375 | 3 |
management/commands/update_transaction_status.py | utkarshohm/mutual-fund-platform | 62 | 12785773 | <reponame>utkarshohm/mutual-fund-platform
'''
Author: utkarshohm
Description: update status of all transactions that need a status update (i.e. not completed or failed), using
(1) BSEStar api - only payment status has an API endpoint currently
(2) by crawling BSEStar web portal (bsestarmf.in) for all other transaction statuses after payment
'''
from django.core.management.base import BaseCommand
from django.core.management.base import CommandError
from models.transactions import Transaction
from web import crawl_to_update_transaction_status
from api import get_payment_status_bse
class Command(BaseCommand):
help = 'Update status of transactions using BSEStar api and by crawling its web portal'
def update_payment_status(self):
'''
Updates payment status of all transactions whose payment status is not updated internally
Uses api.get_payment_status_bse() as BSEStar offers API endpoint for this
'''
tr_list = Transaction.objects.filter(
order_type='1',
status__in=('2','4'),
)
status_list = []
for tr in tr_list:
try:
status_list.append(get_payment_status_bse(tr.user_id, tr.id))
except:
status_list.append('-1')
## this is a good place to put in a slack alert
def handle(self, *args, **options):
# Update payment stautus of transactions with pending payment using api
self.update_payment_status()
# Update transaction status of transactions by crawling web
crawl_to_update_transaction_status()
| 2.390625 | 2 |
old_scripts/atddm.py | clancia/air-traffic-data-driven-modelling | 0 | 12785774 | <filename>old_scripts/atddm.py
import glob
import pandas as pd
from pytz import timezone, utc
from scipy.stats import sem
from constants import PREFIX, POSTFIX, CLMNS, TIMERESAMPLE
def load(subset=None, pathtocsv=PREFIX, **kwargs):
"""
Read data from csv files in pathtocsv
Datasets must be named as XXXX.csv, where XXXX is the ICAO code of the
inbound airport
A list of ICAO codes can be passed to subset to filter the datasets loaded
Remaining arguments are passed to pd.read_csv
"""
dataframes = []
_ = kwargs.setdefault('parse_dates', [3, 4])
# in the default dataset columns 4 and 5 are dates
_ = kwargs.setdefault('infer_datetime_format', True)
if subset is None:
files = glob.glob(pathtocsv + '[A-Z]'*4 + '.csv')
# filters all csv files named XXXX.csv
else:
files = [pathtocsv+code+POSTFIX for code in subset]
failed = []
for f in files:
try:
df = pd.read_csv(f, **kwargs)
except OSError as e:
print('ERROR :: {}'.format(e))
print('This file will be skipped\n')
failed.append(f)
pass
else:
df.columns = CLMNS
df['delay'] = df.M3_FL240 - df.M1_FL240
dataframes.append(df)
notfailed = [code[-8:-4] for code in files if code not in failed]
if not len(notfailed):
print('WARNING :: No dataset loaded')
return dict(zip(notfailed, dataframes))
def binarrivals(ss, interval=TIMERESAMPLE, tz=None):
ts = pd.Series(index=ss, data=1, name='arrivals')
# ts = ts.resample(str(interval)+'Min', how='sum')
ts = ts.resample(str(interval)+'Min').sum()
ts = ts.sort_index()
if tz is not None:
tz = timezone(tz)
ts.index = ts.index.tz_localize(utc).tz_convert(tz)
return ts
def daily_avg(ts, tz=None):
freq = ts.index.freq.delta.components.hours*60 +\
ts.index.freq.delta.components.minutes
slices = list(map(pd.Timestamp,
['{:02d}:{:02d}'.format(i, j) for i in range(24)
for j in range(0, 60, freq)]))
if tz is not None:
slices = [timezone(tz).localize(sl) for sl in slices]
means = []
stdvs = []
# upper = []
# lower = []
sems = []
for i, j in zip(slices, slices[1:]+[slices[0]]):
ss = ts.between_time(i.time(), j.time()).fillna(value=0)
means.append(ss.mean())
stdvs.append(ss.std())
sems.append(sem(ss))
daily = list(map(lambda x: x.isoformat(), slices))
daily = pd.DataFrame(data={'mu': means, 'stermn': sems, 'sigma': stdvs},
index=pd.DatetimeIndex(daily))
daily = daily.asfreq(str(freq)+'Min')
if tz is not None:
tz = timezone(tz)
daily.index = daily.index.tz_localize(utc).tz_convert(tz)
return daily
| 2.53125 | 3 |
ditto/pinboard/views.py | philgyford/django-ditto | 54 | 12785775 | from django.http import Http404
from django.utils.translation import ugettext as _
from django.views.generic import DetailView, ListView
from django.views.generic.detail import SingleObjectMixin
from ..core.views import PaginatedListView
from .models import Account, Bookmark, BookmarkTag
class SingleAccountMixin(SingleObjectMixin):
"For views which list bookmarks and also need an Account object."
slug_field = "username"
slug_url_kwarg = "username"
def get(self, request, *args, **kwargs):
self.object = self.get_object(queryset=Account.objects.all())
return super().get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["account"] = self.object
return context
class HomeView(PaginatedListView):
"List all recent Bookmarks and all Accounts"
template_name = "pinboard/home.html"
queryset = Bookmark.public_objects.all().prefetch_related("account")
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["account_list"] = Account.objects.all()
return context
class ToreadListView(PaginatedListView):
template_name = "pinboard/toread_list.html"
queryset = Bookmark.public_toread_objects.all().prefetch_related("account")
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["account_list"] = Account.objects.all()
return context
class AccountDetailView(SingleAccountMixin, PaginatedListView):
"A single Pinboard Account and its Bookmarks."
template_name = "pinboard/account_detail.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["bookmark_list"] = context["object_list"]
return context
def get_queryset(self):
"Show all the public Bookmarks associated with this account."
return Bookmark.public_objects.filter(account=self.object).prefetch_related(
"account"
)
class AccountToreadView(SingleAccountMixin, PaginatedListView):
"A single Pinboard Account and its 'to read' Bookmarks."
template_name = "pinboard/account_toread.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["bookmark_list"] = context["object_list"]
return context
def get_queryset(self):
"Show all the public Bookmarks associated with this account."
return Bookmark.public_toread_objects.filter(
account=self.object
).prefetch_related("account")
class BookmarkDetailView(DetailView):
"A single Bookmark, from one Account"
model = Bookmark
# Only display public bookmarks; private ones will 404.
queryset = Bookmark.public_objects.all()
slug_field = "url_hash"
slug_url_kwarg = "hash"
class TagListView(ListView):
template_name = "pinboard/tag_list.html"
context_object_name = "tag_list"
def get_queryset(self):
return Bookmark.tags.most_common()[:100]
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["account_list"] = Account.objects.all()
return context
class TagDetailView(SingleObjectMixin, PaginatedListView):
"All Bookmarks with a certain tag from all Accounts"
template_name = "pinboard/tag_detail.html"
allow_empty = False
def get(self, request, *args, **kwargs):
self.object = self.get_object(queryset=BookmarkTag.objects.all())
return super().get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["tag"] = self.object
context["account_list"] = Account.objects.all()
context["bookmark_list"] = context["object_list"]
return context
def get_queryset(self):
"Show all the public Bookmarks associated with this tag."
return Bookmark.public_objects.filter(
tags__slug__in=[self.object.slug]
).prefetch_related("account")
class AccountTagDetailView(SingleAccountMixin, PaginatedListView):
"All Bookmarks with a certain Tag from one Account"
template_name = "pinboard/account_tag_detail.html"
allow_empty = False
def get(self, request, *args, **kwargs):
self.tag_object = self.get_tag_object()
return super().get(request, *args, **kwargs)
def get_tag_object(self):
"""Custom method for fetching the Tag."""
try:
obj = BookmarkTag.objects.get(slug=self.kwargs["tag_slug"])
except BookmarkTag.DoesNotExist:
raise Http404(_("No Tags found matching the query"))
return obj
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["tag"] = self.tag_object
context["bookmark_list"] = context["object_list"]
return context
def get_queryset(self):
"""Show all the public Bookmarks associated with this account."""
return Bookmark.public_objects.filter(
account=self.object, tags__slug__in=[self.kwargs["tag_slug"]]
)
| 2.125 | 2 |
cpp/conan/sqlite3/conanfile.py | dmerejkowsky/cpp-mobile-example | 23 | 12785776 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from conans import ConanFile, CMake, tools
import os
class ConanSqlite3(ConanFile):
name = "sqlite3"
version = "3.21.0"
year = "2017"
sha1 = "ebe33c20d37a715db95288010c1009cd560f2452"
description = "Self-contained, serverless, in-process SQL database engine."
url = "http://github.com/bincrafters/conan-sqlite3"
homepage = "https://www.sqlite.org"
license = "Public Domain"
generators = "cmake"
settings = "os", "compiler", "arch", "build_type"
exports = ["LICENSE.md"]
exports_sources = ["CMakeLists.txt", "FindSQLite3.cmake"]
options = {
"shared": [True, False],
"enable_json1": [True, False],
}
default_options = "shared=False", "enable_json1=False"
def configure(self):
del self.settings.compiler.libcxx
def source(self):
base_url = "https://www.sqlite.org/" + self.year
archive_name = "sqlite-amalgamation-" + self.version.replace(".","") + "000"
archive_ext = "zip"
download_url = "{0}/{1}.{2}".format(base_url, archive_name, archive_ext)
self.output.info("Attempting download of sources from: " + download_url)
tools.get(download_url, sha1=self.sha1)
os.rename(archive_name, "sources")
def build(self):
cmake = CMake(self)
cmake.definitions["CMAKE_POSITION_INDEPENDENT_CODE"] = "ON"
if self.options.shared:
cmake.definitions["BUILD_SHARED_LIBS"] = "ON"
cmake.definitions["ENABLE_JSON1"] = self.options.enable_json1
cmake.verbose = True
cmake.configure()
cmake.build()
def package(self):
self.copy("FindSQLite3.cmake", ".", ".")
self.copy("*.h", dst="include", src="sources")
self.copy(pattern="*.lib", dst="lib", keep_path=False)
self.copy(pattern="*.dll", dst="bin", keep_path=False)
self.copy(pattern="*.a", dst="lib", keep_path=False)
self.copy(pattern="*.pdb", dst="lib", keep_path=False)
self.copy(pattern="*.so", dst="lib", keep_path=False)
self.copy(pattern="*.dylib", dst="lib", keep_path=False)
def package_info(self):
self.cpp_info.libs = ['sqlite3']
if self.settings.os == "Linux":
self.cpp_info.libs.append("pthread")
self.cpp_info.libs.append("dl")
| 2.03125 | 2 |
lib/operators_expr/logic.py | goosemayor/AutoMarketFeature | 0 | 12785777 | <reponame>goosemayor/AutoMarketFeature
# coding=utf-8
#====================================逻辑====================================
def iff(cond, expr1, expr2):
'''
[Definition] 如果cond成立,则为expr1,否则为expr2,cond? expr1: expr2,cond is a matrix with True or False in each postion
[Category] 逻辑
'''
return 'iff(%s,%s,%s)' %(cond, expr1, expr2)
def or_expr(expr1, expr2):
"""
[Definition] expr1和expr2逻辑运算
[Category] 逻辑
"""
return 'or_expr(%s,%s)' %(expr1, expr2)
def max(x, y):
"""
[Definition] x和y对应位置元素比大小,取最大值
[Category] 逻辑
"""
return 'max(%s,%s)' %(x,y)
def min(x, y):
"""
[Definition] x和y对应位置元素比大小,取最小值
[Category] 逻辑
"""
return 'min(%s,%s)' %(x,y)
| 3.078125 | 3 |
tests/test_xarray_resample.py | uofuseismo/anxcor | 9 | 12785778 | <gh_stars>1-10
import unittest
# travis execution
try:
from tests.synthetic_trace_factory import create_sinsoidal_trace
except:
from synthetic_trace_factory import create_sinsoidal_trace
#ide testing
#from synthetic_trace_factory import create_sinsoidal_trace
import numpy as np
from anxcor.xarray_routines import XArrayResample, XArrayConverter
import anxcor.anxcor_utils as anxcor_utils
from obspy.clients.fdsn import Client
from obspy.core import UTCDateTime
converter =XArrayConverter()
class TestDownsample(unittest.TestCase):
def test_phase_shift_not_introduced(self):
target_rate = 20
process = XArrayResample(target_rate=target_rate)
trace_initial = converter(create_sinsoidal_trace(sampling_rate=100, period=0.5, duration=0.5))
trace_processed = converter(create_sinsoidal_trace(sampling_rate=100, period=0.5, duration=0.5))
trace_processed = process(trace_processed)
target = np.argmax(trace_initial.data.ravel()) * trace_initial.attrs['delta']
source = np.argmax(trace_processed.data.ravel()) * trace_processed.attrs['delta']
assert round(abs(target-source), int(np.log10(1/target_rate))) == 0,"filter introduced phase shift"
def test_nonetype_in_out(self):
result = converter(None)
assert result == None
def test_client_returns_not_null(self):
client = Client("IRIS")
t = UTCDateTime("2018-12-25 12:00:00").timestamp
st = client.get_waveforms("UU", "SPU", "*", "H*", t, t + 6 * 60 * 60, attach_response=True)
assert len(st)>=0
| 1.867188 | 2 |
simulation/main/SimulationResult.py | BillMakwae/Simulation | 8 | 12785779 | class SimulationResult:
def __init__(self, arrays=None, distance_travelled=None, time_taken=None, final_soc=None):
"""
Instantiates a SimulationResult object. This is used in the MainSimulation class when
running a simulation. This object simply stores desired simulation results while the
simulation is running its calculations to better encapsulate the information
"""
self.arrays = arrays
self.distance_travelled = distance_travelled
self.time_taken = time_taken
self.final_soc = final_soc | 3.296875 | 3 |
dictest/main.py | moreal/EngExam-Helper | 1 | 12785780 | <reponame>moreal/EngExam-Helper
# -*- coding: utf-8 -*-
import random
import json
import config
__author__ = "dsm_helper"
def get_quiz_data_from_input():
return input("입학년도를 입력해주세요 ex) 2017 >> "), input("학기를 입력해주세요 ex) 1st >> "), input("파일명을 입력해주세요 ex) 01 >> ")
def get_words_by_info(enter_year, semester, file_name):
try:
with open(f'../data/dsm{enter_year}/{semester}/{file_name}.json', 'r', encoding="UTF8") as f:
words = json.load(f)
return words
except Exception as e:
print("잘못된 입력입니다. 다시 확인해주세요.")
print(e)
exit(1)
if __name__ == "__main__":
enter_year, semester, file_name = get_quiz_data_from_input()
check = True
count = 0
words = get_words_by_info(enter_year, semester, file_name)
def finish_quiz():
print(config.EXIT_MESSAGE)
exit(1)
while True:
if len(words) == 0:
break
if check:
answer, meaning = random.choice(list(words.items()))
check = False
print(meaning)
question = input("I think.. >> ")
if question == answer:
check = True
count = 0
words.pop(answer)
print(config.CORRECT_MESSAGE.format(count=len(words)))
elif question != answer:
print(config.TRY_AGAIN_MESSAGE)
count += 1
if count == 5:
print(config.FIRST_CHARACTER_HINT.format(answer=answer))
if count == 7:
print(config.LENGTH_HINT.format(answer=answer))
if count == 10:
print(config.FAILED_MESSAGE.format(answer=answer))
words.pop(answer)
check = True
if question == "exit":
finish_quiz()
print(config.END_MESSAGE)
| 3 | 3 |
app/tests/routers/test_sections.py | NewShadesDAO/api | 1 | 12785781 | <reponame>NewShadesDAO/api<gh_stars>1-10
import pytest
from fastapi import FastAPI
from httpx import AsyncClient
from pymongo.database import Database
from app.models.server import Server
from app.models.user import User
class TestSectionRoutes:
@pytest.mark.asyncio
async def test_update_section_name(
self, app: FastAPI, db: Database, current_user: User, authorized_client: AsyncClient, server: Server
):
new_section_data = {"name": "community"}
response = await authorized_client.post(f"/servers/{str(server.pk)}/sections", json=new_section_data)
assert response.status_code == 201
json_resp = response.json()
section_id = json_resp["id"]
assert json_resp["name"] == new_section_data["name"]
data = {"name": "new-community"}
response = await authorized_client.patch(f"/sections/{section_id}", json=data)
assert response.status_code == 200
json_resp = response.json()
assert json_resp["name"] == data["name"]
@pytest.mark.asyncio
async def test_delete_section(
self, app: FastAPI, db: Database, current_user: User, authorized_client: AsyncClient, server: Server
):
new_section_data = {"name": "community"}
response = await authorized_client.post(f"/servers/{str(server.pk)}/sections", json=new_section_data)
assert response.status_code == 201
section = response.json()
response = await authorized_client.get(f"/servers/{str(server.pk)}/sections")
assert response.status_code == 200
resp_sections = response.json()
assert len(resp_sections) == 1
response = await authorized_client.delete(f"/sections/{section['id']}")
assert response.status_code == 204
response = await authorized_client.get(f"/servers/{str(server.pk)}/sections")
assert response.status_code == 200
resp_sections = response.json()
assert len(resp_sections) == 0
| 2.21875 | 2 |
s_vae/data/__init__.py | tillaczel/s_vae | 1 | 12785782 | from s_vae.data.mnist import create_MNIST, vis_mnist
from s_vae.data.synthetic_hypersphere import create_synthetic_hypersphere
def create_dataset(config: dict, seed=0):
data_config = config['data']
name = data_config['name']
path = data_config['path']
train_ratio = data_config['train_ratio']
if name == 'MNIST':
return create_MNIST(config)
elif name == 'synth':
latent_dim = data_config['latent_dim']
observed_dim = data_config['observed_dim']
n_dev_samples = data_config['n_dev_samples']
n_test_samples = data_config['n_test_samples']
return create_synthetic_hypersphere(path, latent_dim, observed_dim, n_dev_samples, n_test_samples, train_ratio,
seed=seed)
else:
raise ValueError(f'{name} is not in datasets')
def dataset_vis_factory(name):
if name == 'MNIST':
return vis_mnist
elif name == 'synth':
return None
else:
raise ValueError(f'{name} is not in datasets')
| 2.25 | 2 |
mvdnet/modeling/roi_heads/mvdnet_box_head.py | qiank10/MVDNet | 51 | 12785783 | import numpy as np
import fvcore.nn.weight_init as weight_init
import torch
from torch import nn
from torch.nn import functional as F
from typing import Dict
from detectron2.layers import Conv2d, Linear, ShapeSpec, get_norm
from detectron2.modeling.roi_heads import ROI_BOX_HEAD_REGISTRY
from ..attention import SelfAttentionBlock, CrossAttentionBlock
from mvdnet.layers import Conv3d
@ROI_BOX_HEAD_REGISTRY.register()
class MVDNetBoxHead(nn.Module):
def __init__(self, cfg, input_shape: Dict[str, ShapeSpec]):
super().__init__()
num_fc = cfg.MODEL.ROI_BOX_HEAD.NUM_FC
fc_dim = cfg.MODEL.ROI_BOX_HEAD.FC_DIM
norm = cfg.MODEL.ROI_BOX_HEAD.NORM
self.history_on = cfg.INPUT.HISTORY_ON
self.num_history = cfg.INPUT.NUM_HISTORY+1
self.pooler_size = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION
assert num_fc > 0
for f in input_shape.keys():
if f.startswith("radar"):
self.radar_key = f
self.radar_output_size = input_shape[f].channels * input_shape[f].height * input_shape[f].width
self.radar_input_channels = input_shape[f].channels
elif f.startswith("lidar"):
self.lidar_key = f
self.lidar_output_size = input_shape[f].channels * input_shape[f].height * input_shape[f].width
self.lidar_input_channels = input_shape[f].channels
assert(self.lidar_output_size >= self.radar_output_size)
if self.lidar_output_size != self.radar_output_size:
self.match_conv = Conv2d(
in_channels = self.lidar_input_channels,
out_channels = self.radar_input_channels,
kernel_size = 3,
padding = 1,
bias = False,
norm = nn.BatchNorm2d(self.radar_input_channels),
activation = F.leaky_relu_
)
else:
self.match_conv = None
self.radar_self_attention = SelfAttentionBlock(self.radar_output_size)
self.lidar_self_attention = SelfAttentionBlock(self.radar_output_size)
self.radar_cross_attention = CrossAttentionBlock(self.radar_output_size)
self.lidar_cross_attention = CrossAttentionBlock(self.radar_output_size)
if self.history_on:
self.tnn1 = Conv3d(
in_channels = self.radar_input_channels*2,
out_channels = self.radar_input_channels,
kernel_size = [3, 3, 3],
padding = [1, 1, 1],
bias=False,
norm=nn.BatchNorm3d(self.radar_input_channels),
activation=F.leaky_relu_
)
self.tnn2 = Conv3d(
in_channels = self.radar_input_channels,
out_channels = self.radar_input_channels,
kernel_size = [3, 3, 3],
padding = [1, 1, 1],
bias=False,
norm=nn.BatchNorm3d(self.radar_input_channels),
activation=F.leaky_relu_
)
self.tnn3 = Conv3d(
in_channels = self.radar_input_channels,
out_channels = self.radar_input_channels,
kernel_size = [self.num_history, 3, 3],
padding = [0, 1, 1],
bias=False,
norm=nn.BatchNorm3d(self.radar_input_channels),
activation=F.leaky_relu_
)
self.tnns = [self.tnn1, self.tnn2, self.tnn3]
else:
self.tnn = Conv2d(
in_channels = self.radar_input_channels*2,
out_channels = self.radar_input_channels,
kernel_size = 3,
padding = 1,
bias=False,
norm=nn.BatchNorm2d(self.radar_input_channels),
activation=F.leaky_relu_
)
self._output_size = self.radar_output_size
self.fcs = []
for k in range(num_fc):
fc = Linear(self._output_size, fc_dim)
self.add_module("fc{}".format(k + 1), fc)
self.fcs.append(fc)
self._output_size = fc_dim
for layer in self.fcs:
weight_init.c2_xavier_fill(layer)
if self.match_conv is not None:
weight_init.c2_msra_fill(self.match_conv)
if self.history_on:
for layer in self.tnns:
weight_init.c2_msra_fill(layer)
else:
weight_init.c2_msra_fill(self.tnn)
def forward(self, x):
radar_features = x[self.radar_key]
lidar_features = x[self.lidar_key]
if self.history_on:
fusion_feature = []
for radar_x, lidar_x in zip(radar_features, lidar_features):
if self.match_conv is not None:
lidar_x = self.match_conv(lidar_x)
radar_x = torch.flatten(radar_x, start_dim=1)
lidar_x = torch.flatten(lidar_x, start_dim=1)
radar_x = self.radar_self_attention(radar_x)
lidar_x = self.lidar_self_attention(lidar_x)
radar_y = self.radar_cross_attention([radar_x, lidar_x])
lidar_y = self.lidar_cross_attention([lidar_x, radar_x])
radar_y = radar_y.reshape(-1, self.radar_input_channels,
self.pooler_size, self.pooler_size)
lidar_y = lidar_y.reshape(-1, self.radar_input_channels,
self.pooler_size, self.pooler_size)
feature_x = torch.cat([radar_y, lidar_y], dim=1)
fusion_feature.append(feature_x)
fusion_feature = torch.stack(fusion_feature).permute(1,2,0,3,4).contiguous()
for layer in self.tnns:
fusion_feature = layer(fusion_feature)
fusion_feature = torch.flatten(fusion_feature, start_dim=1)
else:
if self.match_conv is not None:
lidar_features = self.match_conv(lidar_features)
radar_x = torch.flatten(radar_features, start_dim=1)
lidar_x = torch.flatten(lidar_features, start_dim=1)
radar_x = self.radar_self_attention(radar_x)
lidar_x = self.lidar_self_attention(lidar_x)
radar_y = self.radar_cross_attention([radar_x, lidar_x])
lidar_y = self.lidar_cross_attention([lidar_x, radar_x])
radar_y = radar_y.reshape(-1, self.radar_input_channels,
self.pooler_size, self.pooler_size)
lidar_y = lidar_y.reshape(-1, self.radar_input_channels,
self.pooler_size, self.pooler_size)
feature_x = torch.cat([radar_y, lidar_y], dim=1)
feature_x = self.tnn(feature_x)
fusion_feature = torch.flatten(feature_x, start_dim=1)
for layer in self.fcs:
fusion_feature = F.leaky_relu_(layer(fusion_feature))
return fusion_feature
@property
def output_size(self):
return self._output_size | 2.046875 | 2 |
src/reactive/storage.py | erik78se/layer-nextcloud | 2 | 12785784 | <reponame>erik78se/layer-nextcloud
import os
import shutil
import subprocess
import time
from charms.reactive import ( when_all, when, when_not, set_flag, set_state,
when_none, when_any, hook, clear_flag )
from charms import reactive, apt
from charmhelpers.core import ( hookenv, host, unitdata )
from charmhelpers.core.hookenv import ( storage_get, storage_list, status_set, config, log, DEBUG, WARNING )
from charmhelpers.core.host import chdir
data_mount_key = "nextcloud.storage.data.mount"
@hook("data-storage-attached")
def attach():
# This happens either with a non existing nextcloud installation
# -OR-
# After a nextcloud installation has been performed
# and the operator has decided to attach storage post installation.
# in which case the /var/www/nextcloud directory is present.
storageids = storage_list("data")
if not storageids:
status_set("blocked", "Cannot locate attached storage")
return
storageid = storageids[0]
mount = storage_get("location", storageid)
if not mount:
hookenv.status_set("blocked", "Cannot locate attached storage mount directory for data")
return
unitdata.kv().set(data_mount_key, mount)
log("data storage attached at {}".format(mount))
# In case storage is attached post deploy, we might have accumulated
# some data so we need to make sure the attached storage meets our requirements on available disk.
if os.path.exists('/var/www/nextcloud'):
required_space = shutil.disk_usage('/var/www/nextcloud/data').used
free_space = shutil.disk_usage(mount).free
if required_space > free_space:
hookenv.status_set("blocked", "attached storage to small.")
return
apt.queue_install(["rsync"])
reactive.set_state("nextcloud.storage.data.attached")
@hook("data-storage-detaching")
def detaching():
if reactive.is_state("nextcloud.storage.data.migrated"):
# We don't attempt to migrate data back to local storage as there
# is probably not enough of it. And we are most likely destroying
# the unit, so it would be a waste of time even if there is enough
# space.
hookenv.status_set("blocked", "Storage detached. No way to store files.")
host.service_stop('apache2')
else:
unitdata.kv().unset(data_mount_key)
reactive.remove_state("nextcloud.storage.data.attached")
@when("nextcloud.storage.data.attached")
@when_not("nextcloud.storage.data.migrated")
@when("apt.installed.rsync")
@when('nextcloud.initdone')
def migrate_data():
"""
We have got some attached storage and nextcloud initialized. This means that we migrate data
following the following strategy:
0. Stop apache2 to avoid getting out of sync AND place nextcloud in maintenance mode.
1. rsync from the original /var/www/nextcloud/data to the new storage path.
2. replace the original /var/www/nextcloud/data with a symlink.
3. Fix permissions.
4. Start apache2 and get out of maintenance mode.
Note that the original may already be a symlink, either from
the block storage broker or manual changes by admins.
"""
log("Initializing migration of data to {}".format(unitdata.kv().get(data_mount_key)), DEBUG)
# Attempting this while nextcloud is live would be bad. So, place in maintenance mode
maintenance_mode(True)
# clear_flag('apache.start') # layer:apache-php
host.service_stop('apache2') # don't wait for the layer to catch the flag
old_data_dir = '/var/www/nextcloud/data'
new_data_dir = unitdata.kv().get(data_mount_key)
backup_data_dir = "{}-{}".format(old_data_dir, int(time.time()))
status_set("maintenance","Migrating data from {} to {}".format(old_data_dir, new_data_dir),)
try:
rsync_cmd = ["rsync", "-av", old_data_dir + "/", new_data_dir + "/"]
log("Running {}".format(" ".join(rsync_cmd)), DEBUG)
subprocess.check_call(rsync_cmd, universal_newlines=True)
except subprocess.CalledProcessError:
status_set(
"blocked",
"Failed to sync data from {} to {}"
"".format(old_data_dir, new_data_dir),
)
return
os.replace(old_data_dir, backup_data_dir)
status_set("maintenance", "Relocated data-directory to {}".format(backup_data_dir))
os.symlink(new_data_dir, old_data_dir) # /mnt/ncdata0 <- /var/www/nextcloud/data
status_set("maintenance", "Created symlink to new data directory")
host.chownr(new_data_dir, "www-data", "www-data", follow_links=False, chowntopdir=True)
status_set("maintenance", "Ensured proper permissions on new data directory")
os.chmod(new_data_dir, 0o700)
status_set("maintenance", "Migration completed.")
# Bring back from maintenance mode.
maintenance_mode(False)
# set_flag('apache.start') # layer:apache-php
host.service_start('apache2') # don't wait for the layer to catch the flag
status_set("active", "Nextcloud is OK.")
reactive.set_state("nextcloud.storage.data.migrated")
def maintenance_mode(on_off):
on = "sudo -u www-data /usr/bin/php occ maintenance:mode --on"
off = "sudo -u www-data /usr/bin/php occ maintenance:mode --off"
with chdir('/var/www/nextcloud'):
if on_off:
subprocess.call(on.split())
else:
subprocess.call(off.split()) | 1.96875 | 2 |
dog-agent-game/ml-agents-envs/mlagents/envs/communicator_objects/brain_parameters_proto_pb2.py | antopraju/FiRescue | 58 | 12785785 | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: mlagents/envs/communicator_objects/brain_parameters_proto.proto
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1"))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from mlagents.envs.communicator_objects import (
resolution_proto_pb2 as mlagents_dot_envs_dot_communicator__objects_dot_resolution__proto__pb2,
)
from mlagents.envs.communicator_objects import (
space_type_proto_pb2 as mlagents_dot_envs_dot_communicator__objects_dot_space__type__proto__pb2,
)
DESCRIPTOR = _descriptor.FileDescriptor(
name="mlagents/envs/communicator_objects/brain_parameters_proto.proto",
package="communicator_objects",
syntax="proto3",
serialized_options=_b("\252\002\034MLAgents.CommunicatorObjects"),
serialized_pb=_b(
'\n?mlagents/envs/communicator_objects/brain_parameters_proto.proto\x12\x14\x63ommunicator_objects\x1a\x39mlagents/envs/communicator_objects/resolution_proto.proto\x1a\x39mlagents/envs/communicator_objects/space_type_proto.proto"\xd4\x02\n\x14\x42rainParametersProto\x12\x1f\n\x17vector_observation_size\x18\x01 \x01(\x05\x12\'\n\x1fnum_stacked_vector_observations\x18\x02 \x01(\x05\x12\x1a\n\x12vector_action_size\x18\x03 \x03(\x05\x12\x41\n\x12\x63\x61mera_resolutions\x18\x04 \x03(\x0b\x32%.communicator_objects.ResolutionProto\x12"\n\x1avector_action_descriptions\x18\x05 \x03(\t\x12\x46\n\x18vector_action_space_type\x18\x06 \x01(\x0e\x32$.communicator_objects.SpaceTypeProto\x12\x12\n\nbrain_name\x18\x07 \x01(\t\x12\x13\n\x0bis_training\x18\x08 \x01(\x08\x42\x1f\xaa\x02\x1cMLAgents.CommunicatorObjectsb\x06proto3'
),
dependencies=[
mlagents_dot_envs_dot_communicator__objects_dot_resolution__proto__pb2.DESCRIPTOR,
mlagents_dot_envs_dot_communicator__objects_dot_space__type__proto__pb2.DESCRIPTOR,
],
)
_BRAINPARAMETERSPROTO = _descriptor.Descriptor(
name="BrainParametersProto",
full_name="communicator_objects.BrainParametersProto",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="vector_observation_size",
full_name="communicator_objects.BrainParametersProto.vector_observation_size",
index=0,
number=1,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="num_stacked_vector_observations",
full_name="communicator_objects.BrainParametersProto.num_stacked_vector_observations",
index=1,
number=2,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="vector_action_size",
full_name="communicator_objects.BrainParametersProto.vector_action_size",
index=2,
number=3,
type=5,
cpp_type=1,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="camera_resolutions",
full_name="communicator_objects.BrainParametersProto.camera_resolutions",
index=3,
number=4,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="vector_action_descriptions",
full_name="communicator_objects.BrainParametersProto.vector_action_descriptions",
index=4,
number=5,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="vector_action_space_type",
full_name="communicator_objects.BrainParametersProto.vector_action_space_type",
index=5,
number=6,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="brain_name",
full_name="communicator_objects.BrainParametersProto.brain_name",
index=6,
number=7,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="is_training",
full_name="communicator_objects.BrainParametersProto.is_training",
index=7,
number=8,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=208,
serialized_end=548,
)
_BRAINPARAMETERSPROTO.fields_by_name[
"camera_resolutions"
].message_type = (
mlagents_dot_envs_dot_communicator__objects_dot_resolution__proto__pb2._RESOLUTIONPROTO
)
_BRAINPARAMETERSPROTO.fields_by_name[
"vector_action_space_type"
].enum_type = (
mlagents_dot_envs_dot_communicator__objects_dot_space__type__proto__pb2._SPACETYPEPROTO
)
DESCRIPTOR.message_types_by_name["BrainParametersProto"] = _BRAINPARAMETERSPROTO
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
BrainParametersProto = _reflection.GeneratedProtocolMessageType(
"BrainParametersProto",
(_message.Message,),
dict(
DESCRIPTOR=_BRAINPARAMETERSPROTO,
__module__="mlagents.envs.communicator_objects.brain_parameters_proto_pb2"
# @@protoc_insertion_point(class_scope:communicator_objects.BrainParametersProto)
),
)
_sym_db.RegisterMessage(BrainParametersProto)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 1.429688 | 1 |
GettingStartedWithMobSF/static-analysis/Mobile-Security-Framework-MobSF/mobsf/StaticAnalyzer/views/android/cert_analysis.py | Aviyel-oss/live-coding | 7 | 12785786 | <filename>GettingStartedWithMobSF/static-analysis/Mobile-Security-Framework-MobSF/mobsf/StaticAnalyzer/views/android/cert_analysis.py<gh_stars>1-10
# -*- coding: utf_8 -*-
"""Module holding the functions for code analysis."""
import binascii
import hashlib
import logging
import os
import re
from androguard.core.bytecodes.apk import APK
from androguard.util import get_certificate_name_string
from asn1crypto import x509
from oscrypto import asymmetric
from django.utils.html import escape
logger = logging.getLogger(__name__)
def get_hardcoded_cert_keystore(files):
"""Returns the hardcoded certificate keystore."""
try:
logger.info('Getting Hardcoded Certificates/Keystores')
findings = []
certz = []
key_store = []
for file_name in files:
if '.' not in file_name:
continue
ext = file_name.split('.')[-1]
if re.search('cer|pem|cert|crt|pub|key|pfx|p12|der', ext):
certz.append(escape(file_name))
if re.search('jks|bks', ext):
key_store.append(escape(file_name))
if certz:
desc = 'Certificate/Key files hardcoded inside the app.'
findings.append({'finding': desc, 'files': certz})
if key_store:
desc = 'Hardcoded Keystore found.'
findings.append({'finding': desc, 'files': key_store})
return findings
except Exception:
logger.exception('Getting Hardcoded Certificates/Keystores')
def cert_info(app_dir, app_file):
"""Return certificate information."""
try:
logger.info('Reading Code Signing Certificate')
manifestfile = None
manidat = ''
cert_info = ''
certlist = []
cert_path = os.path.join(app_dir, 'META-INF/')
apk_file = os.path.join(app_dir, app_file)
hashfunctions = {
'md5': hashlib.md5,
'sha1': hashlib.sha1,
'sha256': hashlib.sha256,
'sha512': hashlib.sha512,
}
files = [f for f in os.listdir(
cert_path) if os.path.isfile(os.path.join(cert_path, f))]
a = APK(apk_file)
if a.is_signed():
certlist.append('APK is signed')
else:
certlist.append('Missing certificate')
certlist.append('v1 signature: {}'.format(a.is_signed_v1()))
certlist.append('v2 signature: {}'.format(a.is_signed_v2()))
certlist.append('v3 signature: {}'.format(a.is_signed_v3()))
certs = set(a.get_certificates_der_v3() + a.get_certificates_der_v2()
+ [a.get_certificate_der(x)
for x in a.get_signature_names()])
pkeys = set(a.get_public_keys_der_v3() + a.get_public_keys_der_v2())
if len(certs) > 0:
certlist.append('Found {} unique certificates'.format(len(certs)))
for cert in certs:
x509_cert = x509.Certificate.load(cert)
certlist.append('Subject: {}'.format(
get_certificate_name_string(x509_cert.subject, short=True)))
certlist.append('Signature Algorithm: {}'.format(
x509_cert.signature_algo))
certlist.append('Valid From: {}'.format(
x509_cert['tbs_certificate']['validity']['not_before'].native))
certlist.append('Valid To: {}'.format(
x509_cert['tbs_certificate']['validity']['not_after'].native))
certlist.append('Issuer: {}'.format(
get_certificate_name_string(x509_cert.issuer, short=True)))
certlist.append('Serial Number: {}'.format(
hex(x509_cert.serial_number)))
certlist.append('Hash Algorithm: {}'.format(x509_cert.hash_algo))
for k, v in hashfunctions.items():
certlist.append('{}: {}'.format(k, v(cert).hexdigest()))
for public_key in pkeys:
x509_public_key = asymmetric.load_public_key(public_key)
certlist.append('PublicKey Algorithm: {}'.format(
x509_public_key.algorithm))
certlist.append('Bit Size: {}'.format(x509_public_key.bit_size))
certlist.append('Fingerprint: {}'.format(
binascii.hexlify(x509_public_key.fingerprint).decode('utf-8')))
cert_info = '\n'.join(certlist)
if 'MANIFEST.MF' in files:
manifestfile = os.path.join(cert_path, 'MANIFEST.MF')
if manifestfile:
with open(manifestfile, 'r', encoding='utf-8') as manifile:
manidat = manifile.read()
sha256_digest = bool(re.findall(r'SHA-256-Digest', manidat))
findings = []
if a.is_signed():
findings.append((
'good',
'Application is signed with a code '
'signing certificate'))
else:
findings.append((
'bad',
'Code signing certificate not found'))
if a.is_signed_v1():
status = 'bad'
if a.is_signed_v2() or a.is_signed_v3():
status = 'warning'
findings.append((
status,
'Application is signed with v1 signature scheme, '
'making it vulnerable to Janus vulnerability on '
'Android <7.0'))
if re.findall(r'CN=Android Debug', cert_info):
findings.append((
'bad',
'Application signed with a debug certificate. '
'Production application must not be shipped '
'with a debug certificate.'))
if re.findall(r'Hash Algorithm: sha1', cert_info):
status = 'bad'
desc = (
'Application is signed with SHA1withRSA. '
'SHA1 hash algorithm is known to have '
'collision issues.')
if sha256_digest:
status = 'warning'
desc += (
' The manifest file indicates SHA256withRSA'
' is in use.')
findings.append((status, desc))
cert_dic = {
'certificate_info': cert_info,
'certificate_findings': findings,
}
return cert_dic
except Exception:
logger.exception('Reading Code Signing Certificate')
return {}
| 2.40625 | 2 |
helperfunctions.py | WhiteGobo/plyhandler | 0 | 12785787 | def get_dataarray_as_strings( plyobject, elementname, propertyname, \
encoding="utf8" ):
stringdata = plyobject.get_dataarray( elementname, propertyname )
return [ str( single, encoding="utf8" ) for single in stringdata ]
def strings_to_uchararrays( stringlist, encoding="utf8" ):
return [ bytes( single, encoding="utf8" ) for single in stringlist ]
| 2.96875 | 3 |
ui/home.py | FellowHashbrown/MediaQueue | 0 | 12785788 | <reponame>FellowHashbrown/MediaQueue<filename>ui/home.py
import os
import sys
from functools import partial
from PyQt5 import QtWidgets, QtCore
from media import Movie, TVShow, Podcast, LimitedSeries
from media.util import get_type
from ui import MovieDialog, MediaListWidget, add_grid_to_layout, media_objects
from ui import MessageBox
from options import options
class Home(QtWidgets.QFrame):
"""The Home screen is what the user sees when first opening up the
Media Queue application
There are 4 filter combo boxes so the user can easily filter by:
1.) Whether or not the user has started or finished some Media
* Note that a piece of Media cannot be started and finished at the same time \n
2.) The type of Media (LimitedSeries, Movie, Podcast, TVShow) \n
3.) The Streaming Provider
* As of now, there are explicit options inside an Enumerated type. This may change \n
4.) The Person watching it
All the Media will follow an explicit sorting algorithm which will
sort the Media in the precedence of Type -> Streaming Provider -> Person -> Name
"""
def __init__(self, views: dict, parent: QtWidgets.QWidget = None, flags=QtCore.Qt.WindowFlags()):
super().__init__(parent, flags)
self.views = views
# Load all the media inside the movies, tv shows, podcasts, and limited series folders
media = []
paths = [Movie.FOLDER, TVShow.FOLDER, Podcast.FOLDER, LimitedSeries.FOLDER]
for path in paths:
if os.path.exists(f"{options.get_base_dir()}/data/{path}"):
for file in os.listdir(f"{options.get_base_dir()}/data/{path}"):
try:
if path == Movie.FOLDER and file.endswith(".json"):
media.append(Movie(filename=f"{options.get_base_dir()}/data/{path}/{file}"))
elif path == TVShow.FOLDER and file.endswith(".json"):
media.append(TVShow(filename=f"{options.get_base_dir()}/data/{path}/{file}"))
elif path == Podcast.FOLDER and file.endswith(".json"):
media.append(Podcast(filename=f"{options.get_base_dir()}/data/{path}/{file}"))
elif path == LimitedSeries.FOLDER and file.endswith(".json"):
media.append(LimitedSeries(filename=f"{options.get_base_dir()}/data/{path}/{file}"))
except Exception as e:
MessageBox(f"Error loading {file}",
str(e),
self)
media_objects.set_media(media)
# Setup the MediaListWidget and the attributes for the filter comboboxes
self.media_list_widget = MediaListWidget(
self,
edit_media_func=self.add_edit_media,
remove_media_func=self.remove_media
)
self.filter_labels = None
self.filter_start_finish_combobox = None
self.filter_type_combobox = None
self.filter_provider_combobox = None
self.filter_person_combobox = None
self.clear_filter_button = None
self.search_line_edit = None
self.sort_type_button = None
self.sort_provider_button = None
self.sort_person_button = None
self.sort_runtime_button = None
self.sort_name_button = None
self.clear_sort_button = None
self.add_limited_series_button = None
self.add_movie_button = None
self.add_tv_show_button = None
self.add_podcast_button = None
self.setup_ui()
# # # # # # # # # # # # # # # # # # # # # # # # #
def setup_ui(self):
"""Combines all the necessary UI for the Home screen"""
layout = QtWidgets.QVBoxLayout()
layout.addWidget(self.setup_filters_ui(self))
layout.addWidget(self.setup_sort_ui(self))
layout.addWidget(self.media_list_widget, 1)
layout.addWidget(self.setup_new_buttons_ui())
self.update_persons_filters()
self.update_providers_filters()
self.setLayout(layout)
self.show()
def setup_filters_ui(self, parent: QtWidgets.QWidget) -> QtWidgets.QWidget:
"""Creates and returns a grid of widgets necessary for
the filter widgets
:param parent: The parent widget for the filter widgets
"""
filters_widget = QtWidgets.QWidget(parent)
layout = QtWidgets.QGridLayout()
self.filter_start_finish_combobox = QtWidgets.QComboBox(parent)
self.filter_start_finish_combobox.addItems(["All", "Started", "Finished",
"Not Started", "Not Finished", "Neither"])
self.filter_start_finish_combobox.currentIndexChanged.connect(partial(self.filter_media, False))
self.filter_start_finish_combobox.setToolTip("Filter the media by their started and finished status")
self.filter_type_combobox = QtWidgets.QComboBox(parent)
self.filter_type_combobox.addItems(["All"] + get_type())
self.filter_type_combobox.currentIndexChanged.connect(partial(self.filter_media, False))
self.filter_type_combobox.setToolTip("Filter the media by their type")
self.filter_provider_combobox = QtWidgets.QComboBox(parent)
self.filter_provider_combobox.addItems(["All"] + [provider for provider in options.get_providers()])
self.filter_provider_combobox.currentIndexChanged.connect(partial(self.filter_media, False))
self.filter_provider_combobox.setToolTip("Filter the media by the streaming provider")
self.filter_person_combobox = QtWidgets.QComboBox(parent)
self.filter_person_combobox.addItems(["All"] + [person for person in options.get_persons()])
self.filter_person_combobox.currentIndexChanged.connect(partial(self.filter_media, False))
self.filter_person_combobox.setToolTip("Filter the media by who is watching")
self.clear_filter_button = QtWidgets.QPushButton("Clear Filter", parent)
self.clear_filter_button.clicked.connect(partial(self.filter_media, True))
self.clear_filter_button.setToolTip("Clear all media filters")
self.search_line_edit = QtWidgets.QLineEdit(parent)
self.search_line_edit.setPlaceholderText("Search")
self.search_line_edit.textChanged.connect(partial(self.filter_media, False))
self.search_line_edit.setToolTip("Search for specific media in the media queue")
# Create the filter labels
self.filter_labels = [
QtWidgets.QLabel("Filter By Started/Finished"), None,
QtWidgets.QLabel("Filter By Type"),
QtWidgets.QLabel("Filter By Streaming Provider"),
QtWidgets.QLabel("Filter By Person"),
self.search_line_edit
]
for label in self.filter_labels:
if label is not None:
label.setAlignment(QtCore.Qt.AlignHCenter)
widgets = [self.filter_labels,
[self.filter_start_finish_combobox, None,
self.filter_type_combobox, self.filter_provider_combobox,
self.filter_person_combobox, self.clear_filter_button, None, None]]
add_grid_to_layout(widgets, layout)
filters_widget.setLayout(layout)
return filters_widget
def setup_sort_ui(self, parent: QtWidgets.QWidget) -> QtWidgets.QWidget:
"""Creates and returns a grid of widgets necessary for
the sorting widgets
:param parent: The parent widget for the filter widgets
"""
sort_widgets = QtWidgets.QWidget(parent)
layout = QtWidgets.QGridLayout()
self.sort_type_button = QtWidgets.QPushButton("Sort By Type", parent)
self.sort_type_button.clicked.connect(partial(self.cycle_sort, "type"))
self.sort_type_button.setToolTip("Sort the Media by the Type")
self.sort_provider_button = QtWidgets.QPushButton("Sort By Streaming Provider", parent)
self.sort_provider_button.clicked.connect(partial(self.cycle_sort, "provider"))
self.sort_provider_button.setToolTip("Sort the Media by the Streaming Provider")
self.sort_person_button = QtWidgets.QPushButton("Sort By Person", parent)
self.sort_person_button.clicked.connect(partial(self.cycle_sort, "person"))
self.sort_person_button.setToolTip("Sort the Media by the Person")
self.sort_runtime_button = QtWidgets.QPushButton("Sort By Runtime", parent)
self.sort_runtime_button.clicked.connect(partial(self.cycle_sort, "runtime"))
self.sort_runtime_button.setToolTip("Sort the Media by the Runtime")
self.sort_name_button = QtWidgets.QPushButton("Sort By Name", parent)
self.sort_name_button.clicked.connect(partial(self.cycle_sort, "name"))
self.sort_name_button.setToolTip("Sort the Media by the Name")
self.clear_sort_button = QtWidgets.QPushButton("Clear Sorting", parent)
self.clear_sort_button.clicked.connect(partial(self.sort_media, True))
self.clear_sort_button.setToolTip("Clear the sorting on the Media to the default")
widgets = [[None, self.sort_type_button, self.sort_provider_button, self.sort_person_button,
self.sort_runtime_button, self.sort_name_button, self.clear_sort_button]]
add_grid_to_layout(widgets, layout)
sort_widgets.setLayout(layout)
return sort_widgets
def setup_new_buttons_ui(self):
"""Creates the buttons meant to be used when adding a new piece of Media"""
widget = QtWidgets.QWidget(self)
layout = QtWidgets.QGridLayout()
self.add_movie_button = QtWidgets.QPushButton("Add Movie", self)
self.add_movie_button.clicked.connect(partial(self.add_edit_media, 'Movie', None))
self.add_movie_button.setToolTip("Add a new Movie to the media queue")
self.add_tv_show_button = QtWidgets.QPushButton("Add TV Show", self)
self.add_tv_show_button.clicked.connect(partial(self.add_edit_media, 'TV Show', None))
self.add_tv_show_button.setToolTip("Add a new TV Show to the media queue")
self.add_podcast_button = QtWidgets.QPushButton("Add Podcast", self)
self.add_podcast_button.clicked.connect(partial(self.add_edit_media, 'Podcast', None))
self.add_podcast_button.setToolTip("Add a new Podcast to the media queue")
self.add_limited_series_button = QtWidgets.QPushButton("Add Limited Series", self)
self.add_limited_series_button.clicked.connect(partial(self.add_edit_media, 'Limited Series', None))
self.add_limited_series_button.setToolTip("Add a new Limited Series to the media queue")
grid = [[self.add_movie_button, self.add_tv_show_button,
self.add_podcast_button, self.add_limited_series_button]]
add_grid_to_layout(grid, layout)
widget.setLayout(layout)
return widget
# # # # # # # # # # # # # # # # # # # # # # # # #
def filter_media(self, clear: bool = False):
"""Filters the Media in the app based off the filter combo boxes
Because the start and finish attributes of a Media object
cannot be True at the same time, the filter comes from a combination laid out below:
1.) All: This will show all pieces of Media no matter their start and finish attributes \n
2.) Started: This will show only the Media that has been started \n
3.) Finished: This will show only the Media that has been finished \n
4.) Not Started: This will show the Media that has not been started
+ This also means it will show finished and unfinished Media \n
5.) Not Finished: This will show the Media that has not been finished
+ This also means it will show started and uninitiated Media \n
6.) Neither: This will show Media that has not been started nor finished
:param clear: Whether or not to clear the filters
"""
filter_start_finish = [None, None]
filter_type = None
filter_provider = None
filter_person = None
filter_search = None
if clear:
self.filter_start_finish_combobox.setCurrentIndex(0)
self.filter_type_combobox.setCurrentIndex(0)
self.filter_provider_combobox.setCurrentIndex(0)
self.filter_person_combobox.setCurrentIndex(0)
self.search_line_edit.setText("")
else:
# Get the filtering of the Started and Finished attributes
filter_start_finish = (self.filter_start_finish_combobox.currentIndex()
if self.filter_start_finish_combobox is not None else 0)
filter_start_finish = {0: [None, None], 1: [True, False], 2: [False, True],
3: [False, None], 4: [None, False], 5: [False, False]}[filter_start_finish]
# Get the filtering of the Type of Media
filter_type = (self.filter_type_combobox.currentText()
if self.filter_type_combobox is not None else "All")
filter_type = filter_type if filter_type != "All" else None
# Get the filtering of the Streaming Provider attribute
filter_provider = (self.filter_provider_combobox.currentText()
if self.filter_provider_combobox is not None else "All")
filter_provider = None if filter_provider == "All" else filter_provider
# Get the filtering of the Person attribute
filter_person = (self.filter_person_combobox.currentText()
if self.filter_person_combobox is not None else "All")
filter_person = None if filter_person == "All" else filter_person
# Get the filtering from the search bar
filter_search = self.search_line_edit.text().lower()
if len(filter_search) == 0:
filter_search = None
media_objects.set_media_filters(
started=filter_start_finish[0], finished=filter_start_finish[1],
media_type=filter_type, provider=filter_provider,
person=filter_person, search=filter_search)
self.media_list_widget.update_stats()
self.media_list_widget.scroll_area.filter()
def cycle_sort(self, sort: str):
"""Cycles the specified sorting variable to the next sort value
:param sort: The sorting variable to cycle
"""
# Cycle the correct sort
if sort == "type":
media_objects.cycle_type_sort()
self.sort_type_button.setText("Sort By Type {}".format(
"▲" if media_objects.get_type_sort() else
("▼" if media_objects.get_type_sort() is False else "")))
elif sort == "provider":
media_objects.cycle_provider_sort()
self.sort_provider_button.setText("Sort By Streaming Provider {}".format(
"▲" if media_objects.get_provider_sort() else
("▼" if media_objects.get_provider_sort() is False else "")))
elif sort == "person":
media_objects.cycle_person_sort()
self.sort_person_button.setText("Sort By Person {}".format(
"▲" if media_objects.get_person_sort() else
("▼" if media_objects.get_person_sort() is False else "")))
elif sort == "runtime":
media_objects.cycle_runtime_sort()
self.sort_runtime_button.setText("Sort By Runtime {}".format(
"▲" if media_objects.get_runtime_sort() else
("▼" if media_objects.get_runtime_sort() is False else "")))
elif sort == "name":
media_objects.cycle_name_sort()
self.sort_name_button.setText("Sort By Name {}".format(
"▲" if media_objects.get_name_sort() else
("▼" if media_objects.get_name_sort() is False else "")))
self.sort_media()
def sort_media(self, clear: bool = False):
"""Sorts the Media in the app based off the sorting values
:param clear: Whether or not to set the sorting values to their defaults
"""
if clear:
media_objects.set_media_sort()
self.sort_type_button.setText("Sort By Type")
self.sort_provider_button.setText("Sort By Streaming Provider")
self.sort_person_button.setText("Sort By Person")
self.sort_runtime_button.setText("Sort By Runtime")
self.sort_name_button.setText("Sort By Name")
self.media_list_widget.scroll_area.update_ui()
self.media_list_widget.scroll_area.filter()
# # # # # # # # # # # # # # # # # # # # # # # # #
def remove_media(self, index: int = None):
"""Removes the Media at the specified index from the list
:param index: The index of the Media to remove
"""
if index is not None:
media_objects.get_removed_media().append(index)
self.filter_media()
media = media_objects.get_media()[index]
os.remove(f"{options.get_base_dir()}/data/{media.FOLDER}/{media.get_id()}.json")
def callback_tv_show(self, index: int = None, canceled: bool = False):
"""The callback function when a user is finished editing a TV Show
:param index: The index of the TV Show to edit, if any.
:param canceled: Whether or not the editing of a TV Show was canceled
"""
self.parent().setCurrentWidget(self)
self.window().setWindowTitle("Media Queue")
# Get the TV Show object from the media objects
tv_show = media_objects.get_tv_show()
media_objects.set_tv_show()
media_objects.set_episodes()
if tv_show is not None:
# Check if an index was given,
# modify the existing TV Show at the index given
# and update the widgets in the scroll area for the media list
if index is not None:
media_objects.get_media()[index] = tv_show
hours, minutes = divmod(tv_show.get_runtime(), 60)
self.media_list_widget.scroll_area.widgets[index + 1][0].setChecked(tv_show.is_started())
self.media_list_widget.scroll_area.widgets[index + 1][1].setChecked(tv_show.is_finished())
self.media_list_widget.scroll_area.widgets[index + 1][3].setText(tv_show.get_provider())
self.media_list_widget.scroll_area.widgets[index + 1][4].setText(tv_show.get_person())
self.media_list_widget.scroll_area.widgets[index + 1][5].setText("{}hr{} {}min{}".format(
hours, "s" if hours != 1 else "",
minutes, "s" if minutes != 1 else ""
))
self.media_list_widget.scroll_area.widgets[index + 1][6].setText(tv_show.get_name())
# No index was given, add the TV Show if the addition was not canceled
# then sort the media
else:
if not canceled:
media_objects.get_media().append(tv_show)
media_objects.sort_media()
# Update the UI for the scroll area and re-filter it
self.media_list_widget.scroll_area.update_ui()
self.filter_media()
def callback_podcast(self, index: int = None, canceled: bool = False):
"""The callback function when a user is finished editing a Podcast
:param index: The index of the Podcast to edit, if any.
:param canceled: Whether or not the editing of a Podcast was canceled
"""
self.parent().setCurrentWidget(self)
self.window().setWindowTitle("Media Queue")
# Get the Podcast object from the media objects
podcast = media_objects.get_podcast()
media_objects.set_podcast()
media_objects.set_episodes()
if podcast is not None:
# Check if an index was given,
# modify the existing Podcast at the index given
# and update the widgets in the scroll area for the media list
if index is not None:
media_objects.get_media()[index] = podcast
hours, minutes = divmod(podcast.get_runtime(), 60)
self.media_list_widget.scroll_area.widgets[index + 1][0].setChecked(podcast.is_started())
self.media_list_widget.scroll_area.widgets[index + 1][1].setChecked(podcast.is_finished())
self.media_list_widget.scroll_area.widgets[index + 1][3].setText(podcast.get_provider())
self.media_list_widget.scroll_area.widgets[index + 1][4].setText(podcast.get_person())
self.media_list_widget.scroll_area.widgets[index + 1][5].setText("{}hr{} {}min{}".format(
hours, "s" if hours != 1 else "",
minutes, "s" if minutes != 1 else ""
))
self.media_list_widget.scroll_area.widgets[index + 1][6].setText(podcast.get_name())
else:
# No index was given, add the Podcast if the addition was not canceled
# then sort the media
if not canceled:
media_objects.get_media().append(podcast)
media_objects.sort_media()
# Update the UI for the scroll area and re-filter it
self.media_list_widget.scroll_area.update_ui()
self.filter_media()
def callback_limited_series(self, index: int = None, canceled: bool = False):
"""The callback function when a user is finished editing a LimitedSeries
:param index: The index of the LimitedSeries to edit, if any.
:param canceled: Whether or not the editing of a LimitedSeries was canceled
"""
self.parent().setCurrentWidget(self)
self.window().setWindowTitle("Media Queue")
# Get the Limited Series object from the media objects
limited_series = media_objects.get_limited_series()
media_objects.set_limited_series()
media_objects.set_episodes()
if limited_series is not None:
# Check if an index was given,
# modify the existing Limited Series at the index given
# and update the widgets in the scroll area for the media list
if index is not None:
media_objects.get_media()[index] = limited_series
hours, minutes = divmod(limited_series.get_runtime(), 60)
self.media_list_widget.scroll_area.widgets[index + 1][0].setChecked(limited_series.is_started())
self.media_list_widget.scroll_area.widgets[index + 1][1].setChecked(limited_series.is_finished())
self.media_list_widget.scroll_area.widgets[index + 1][3].setText(limited_series.get_provider())
self.media_list_widget.scroll_area.widgets[index + 1][4].setText(limited_series.get_person())
self.media_list_widget.scroll_area.widgets[index + 1][5].setText("{}hr{} {}min{}".format(
hours, "s" if hours != 1 else "",
minutes, "s" if minutes != 1 else ""
))
self.media_list_widget.scroll_area.widgets[index + 1][6].setText(limited_series.get_name())
else:
# No index was given, add the Limited Series if the addition was not canceled
# then sort the media
if not canceled:
media_objects.get_media().append(limited_series)
media_objects.sort_media()
# Update the UI for the scroll area and re-filter it
self.media_list_widget.scroll_area.update_ui()
self.filter_media()
# # # # # # # # # # # # # # # # # # # # # # # # #
def add_edit_media(self, media_type: str, index: int = None):
"""Manages adding or editing a piece of Media with an optional index
which controls whether or not a new piece of Media is being added
:param media_type: The type of Media that is being added or edited
:param index: The index of the Media to edit, if any.
"""
# Set the proper media object in the media_objects class
if index is not None:
if media_type == "Movie":
media_objects.set_movie(media_objects.get_media()[index])
elif media_type == "Limited Series":
media_objects.set_limited_series(media_objects.get_media()[index])
elif media_type == "Podcast":
media_objects.set_podcast(media_objects.get_media()[index])
elif media_type == "TV Show":
media_objects.set_tv_show(media_objects.get_media()[index])
# Retrieve the next view and the callback function that will be used
# after adding/editing a piece of Media
view_id = callback_func = None
if media_type == "Movie": # The Movie type is a dialog and therefore does not have a view
movie_dialog = MovieDialog(self)
if movie_dialog.result == QtWidgets.QDialog.Accepted:
movie = media_objects.get_movie()
media_objects.set_movie()
if index is not None:
media_objects.get_media()[index] = movie
else:
media_objects.get_media().append(movie)
self.media_list_widget.scroll_area.update_ui()
self.filter_media()
# The Limited Series, Podcast, and TV Shows are other views which is
# why it's changed
elif media_type == "Limited Series":
view_id = "limited_series"
callback_func = self.callback_limited_series
elif media_type == "Podcast":
view_id = "podcast"
callback_func = self.callback_podcast
elif media_type == "TV Show":
view_id = "tv_show"
callback_func = self.callback_tv_show
if media_type != "Movie":
self.views[view_id].edit(callback_func, index)
self.parent().setCurrentWidget(self.views[view_id])
# # # # # # # # # # # # # # # # # # # # # # # # #
def update_providers_filters(self):
"""Updates the list of providers in the combobox for providers filters"""
self.sort_provider_button.setVisible(len(options.get_providers()) > 2)
self.filter_provider_combobox.setVisible(len(options.get_providers()) > 2)
self.filter_labels[3].setVisible(len(options.get_providers()) > 2)
self.filter_provider_combobox.clear()
self.filter_provider_combobox.addItems(["All"] + [provider for provider in options.get_providers()])
self.media_list_widget.scroll_area.update_ui()
def update_persons_filters(self):
"""Updates the list of persons in the combobox for persons filters"""
self.sort_person_button.setVisible(len(options.get_persons()) > 1)
self.filter_person_combobox.setVisible(len(options.get_persons()) > 1)
self.filter_labels[4].setVisible(len(options.get_persons()) > 1)
self.filter_person_combobox.clear()
self.filter_person_combobox.addItems(["All"] + [person for person in options.get_persons()])
self.media_list_widget.scroll_area.update_ui()
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
home = Home({})
sys.exit(app.exec_())
| 2.765625 | 3 |
mmaction/models/localizers/utils/__init__.py | HypnosXC/mmaction2 | 648 | 12785789 | from .post_processing import post_processing
__all__ = ['post_processing']
| 1 | 1 |
deplugins/s3.py | arndtroth/AWSomeOverview | 1 | 12785790 | <gh_stars>1-10
"""
Class for Data Extraction of S3
"""
import boto3
from deplugins.base import AWSFact
class S3Buckets (AWSFact):
NAME = 'S3'
OPTION = 's3'
def get_all_regions(self):
return [None]
def retrieve(self, conn):
for bucket in boto3.resource('s3').buckets.all():
item = {"Name": bucket.name, 'Created': bucket.creation_date}
self.data.setdefault( 'N/A', []).append(item)
def connect(self, region):
# return S3Connection(
# aws_access_key_id=self.aws_id,
# aws_secret_access_key=self.aws_key
# )
# print "Creating connection ...."
return
| 2.5625 | 3 |
var/spack/repos/builtin/packages/r-rviennacl/package.py | jeanbez/spack | 0 | 12785791 | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class RRviennacl(RPackage):
"""'ViennaCL' C++ Header Files.
'ViennaCL' is a free open-source linear algebra library for computations on
many-core architectures (GPUs, MIC) and multi-core CPUs. The library is
written in C++ and supports 'CUDA', 'OpenCL', and 'OpenMP' (including
switches at runtime). I have placed these libraries in this package as a
more efficient distribution system for CRAN. The idea is that you can write
a package that depends on the 'ViennaCL' library and yet you do not need to
distribute a copy of this code with your package."""
cran = "RViennaCL"
version('1.7.1.8', sha256='adcc74537337582153d5b11d281e391e91a7f3afae116aa1b9a034ffd11b0252')
| 1.242188 | 1 |
setup.py | jannessm/quadric-mesh-simplification | 23 | 12785792 | from setuptools import Extension, setup
from Cython.Build import cythonize
import numpy as np
import os.path as osp
__version__ = '1.1.4'
url = 'https://github.com/jannessm/quadric-mesh-simplification'
files = [
'simplify.c',
'array.c',
'clean_mesh.c',
'contract_pair.c',
'edges.c',
'maths.c',
'mesh_inversion.c',
'pair_heap.c',
'pair.c',
'preserve_bounds.c',
'q.c',
'targets.c',
'upper_tri.c',
'valid_pairs.c',
'test_utils.c'
]
src_path = osp.join(osp.dirname(osp.abspath(__file__)), 'quad_mesh_simplify')
ext_modules = [
Extension(
'simplify',
[osp.join(src_path, 'c', f) for f in files] + [osp.join(src_path,'simplify.pyx')],
# extra_compile_args=['-fopenmp'],
# extra_link_args=['-fopenmp'],
include_dirs=[np.get_include()],
define_macros=[("NPY_NO_DEPRECATED_API", "NPY_1_17_API_VERSION")],
),
]
ext_modules = cythonize(ext_modules)
with open("README.md", "r") as fh:
long_description = fh.read()
def parse_requirements(filename):
"""Load requirements from a pip requirements file."""
lineiter = (line.strip() for line in open(filename))
return [line for line in lineiter if line and not line.startswith("#")]
setup(
name='quad_mesh_simplify',
version=__version__,
author='<NAME>',
url=url,
description="Simplify meshes including vertex features.",
long_description=long_description,
long_description_content_type="text/markdown",
install_requires=parse_requirements("requirements.txt"),
python_requires=">=3.6.3",
ext_modules=ext_modules,
zip_safe=False,
) | 1.710938 | 2 |
SymbolExtractorAndRenamer/lldb/packages/Python/lldbsuite/test/sample_test/TestSampleTest.py | Polidea/SiriusObfuscator | 427 | 12785793 | <reponame>Polidea/SiriusObfuscator
"""
Describe the purpose of the test class here.
"""
from __future__ import print_function
import os
import time
import re
import lldb
import lldbsuite.test.lldbutil as lldbutil
from lldbsuite.test.lldbtest import *
class RenameThisSampleTestTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
# If your test case doesn't stress debug info, the
# set this to true. That way it won't be run once for
# each debug info format.
NO_DEBUG_INFO_TESTCASE = True
def test_sample_rename_this(self):
"""There can be many tests in a test case - describe this test here."""
self.build()
self.sample_test()
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
def sample_test(self):
"""You might use the test implementation in several ways, say so here."""
exe = os.path.join(os.getcwd(), "a.out")
# Create a target by the debugger.
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# Now create a breakpoint in main.c at the source matching
# "Set a breakpoint here"
breakpoint = target.BreakpointCreateBySourceRegex(
"Set a breakpoint here", lldb.SBFileSpec("main.c"))
self.assertTrue(breakpoint and
breakpoint.GetNumLocations() >= 1,
VALID_BREAKPOINT)
error = lldb.SBError()
# This is the launch info. If you want to launch with arguments or
# environment variables, add them using SetArguments or
# SetEnvironmentEntries
launch_info = lldb.SBLaunchInfo(None)
process = target.Launch(launch_info, error)
self.assertTrue(process, PROCESS_IS_VALID)
# Did we hit our breakpoint?
from lldbsuite.test.lldbutil import get_threads_stopped_at_breakpoint
threads = get_threads_stopped_at_breakpoint(process, breakpoint)
self.assertTrue(
len(threads) == 1,
"There should be a thread stopped at our breakpoint")
# The hit count for the breakpoint should be 1.
self.assertTrue(breakpoint.GetHitCount() == 1)
frame = threads[0].GetFrameAtIndex(0)
test_var = frame.FindVariable("test_var")
self.assertTrue(test_var.GetError().Success(), "Failed to fetch test_var")
test_value = test_var.GetValueAsUnsigned()
self.assertEqual(test_value, 10, "Got the right value for test_var")
| 2.140625 | 2 |
openprocurement/auctions/insider/tests/award.py | oleksiyVeretiuk/openprocurement.auctions.insider | 0 | 12785794 | <gh_stars>0
# -*- coding: utf-8 -*-
import unittest
from datetime import timedelta
from openprocurement.api.models import get_now
from openprocurement.auctions.insider.tests.base import (
BaseInsiderAuctionWebTest, test_financial_bids,
test_insider_auction_data, test_financial_organization,
)
class InsiderAuctionCreateAwardTest(BaseInsiderAuctionWebTest):
initial_status = 'active.qualification'
initial_bids = test_financial_bids
def test_create_auction_award_invalid(self):
request_path = '/auctions/{}/awards'.format(self.auction_id)
response = self.app.post(request_path, 'data', status=415)
self.assertEqual(response.status, '415 Unsupported Media Type')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description':
u"Content-Type header should be one of ['application/json']", u'location': u'header', u'name': u'Content-Type'}
])
response = self.app.post(
request_path, 'data', content_type='application/json', status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'No JSON object could be decoded',
u'location': u'body', u'name': u'data'}
])
response = self.app.post_json(request_path, 'data', status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Data not available',
u'location': u'body', u'name': u'data'}
])
response = self.app.post_json(
request_path, {'not_data': {}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Data not available',
u'location': u'body', u'name': u'data'}
])
response = self.app.post_json(request_path, {'data': {
'invalid_field': 'invalid_value'}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Rogue field', u'location':
u'body', u'name': u'invalid_field'}
])
response = self.app.post_json(request_path, {
'data': {'suppliers': [{'identifier': 'invalid_value'}]}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': {u'identifier': [
u'Please use a mapping for this field or Identifier instance instead of unicode.']}, u'location': u'body', u'name': u'suppliers'}
])
response = self.app.post_json(request_path, {
'data': {'suppliers': [{'identifier': {'id': 0}}]}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': [{u'contactPoint': [u'This field is required.'], u'identifier': {u'scheme': [u'This field is required.']}, u'name': [u'This field is required.'], u'address': [u'This field is required.']}], u'location': u'body', u'name': u'suppliers'},
{u'description': [u'This field is required.'], u'location': u'body', u'name': u'bid_id'}
])
response = self.app.post_json(request_path, {'data': {'suppliers': [
{'name': 'name', 'identifier': {'uri': 'invalid_value'}}]}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': [{u'contactPoint': [u'This field is required.'], u'identifier': {u'scheme': [u'This field is required.'], u'id': [u'This field is required.'], u'uri': [u'Not a well formed URL.']}, u'address': [u'This field is required.']}], u'location': u'body', u'name': u'suppliers'},
{u'description': [u'This field is required.'], u'location': u'body', u'name': u'bid_id'}
])
response = self.app.post_json(request_path, {'data': {
'suppliers': [self.initial_organization],
'status': 'pending.verification',
'bid_id': self.initial_bids[0]['id'],
'lotID': '0' * 32
}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': [u'lotID should be one of lots'], u'location': u'body', u'name': u'lotID'}
])
response = self.app.post_json('/auctions/some_id/awards', {'data': {
'suppliers': [self.initial_organization], 'bid_id': self.initial_bids[0]['id']}}, status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'auction_id'}
])
response = self.app.get('/auctions/some_id/awards', status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'auction_id'}
])
self.set_status('complete')
response = self.app.post_json('/auctions/{}/awards'.format(
self.auction_id), {'data': {'suppliers': [self.initial_organization], 'status': 'pending.verification', 'bid_id': self.initial_bids[0]['id']}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't create award in current (complete) auction status")
def test_create_auction_award(self):
request_path = '/auctions/{}/awards'.format(self.auction_id)
now = get_now()
response = self.app.post_json(request_path, {'data': {'suppliers': [self.initial_organization], 'bid_id': self.initial_bids[0]['id']}})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
award = response.json['data']
self.assertEqual(award['suppliers'][0]['name'], self.initial_organization['name'])
self.assertIn('id', award)
self.assertIn(award['id'], response.headers['Location'])
response = self.app.get(request_path)
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data'][-1], award)
bid_token = self.initial_bids_tokens[self.initial_bids[0]['id']]
response = self.app.post('/auctions/{}/awards/{}/documents?acc_token={}'.format(
self.auction_id, award['id'], bid_token), upload_files=[('file', 'auction_protocol.pdf', 'content')])
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
doc_id = response.json["data"]['id']
self.assertIn(doc_id, response.headers['Location'])
self.assertEqual('auction_protocol.pdf', response.json["data"]["title"])
key = response.json["data"]["url"].split('?')[-1]
response = self.app.patch_json('/auctions/{}/awards/{}/documents/{}?acc_token={}'.format(self.auction_id, award['id'], doc_id, bid_token), {"data": {
"description": "auction protocol",
"documentType": 'auctionProtocol'
}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(doc_id, response.json["data"]["id"])
self.assertIn("documentType", response.json["data"])
self.assertEqual(response.json["data"]["documentType"], 'auctionProtocol')
response = self.app.get('/auctions/{}/awards/{}/documents/{}'.format(self.auction_id, award['id'], doc_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(doc_id, response.json["data"]["id"])
self.assertEqual('auctionProtocol', response.json["data"]["documentType"])
self.assertEqual('auction_protocol.pdf', response.json["data"]["title"])
self.assertEqual('bid_owner', response.json["data"]["author"])
response = self.app.post('/auctions/{}/awards/{}/documents?acc_token={}'.format(
self.auction_id, award['id'], self.auction_token), upload_files=[('file', 'auction_protocol.pdf', 'content')])
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
doc_id = response.json["data"]['id']
self.assertIn(doc_id, response.headers['Location'])
self.assertEqual('auction_protocol.pdf', response.json["data"]["title"])
key = response.json["data"]["url"].split('?')[-1]
response = self.app.patch_json('/auctions/{}/awards/{}/documents/{}?acc_token={}'.format(self.auction_id, award['id'], doc_id, self.auction_token), {"data": {
"description": "auction protocol",
"documentType": 'auctionProtocol'
}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(doc_id, response.json["data"]["id"])
self.assertIn("documentType", response.json["data"])
self.assertEqual(response.json["data"]["documentType"], 'auctionProtocol')
response = self.app.get('/auctions/{}/awards/{}/documents/{}'.format(self.auction_id, award['id'], doc_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(doc_id, response.json["data"]["id"])
self.assertEqual('auctionProtocol', response.json["data"]["documentType"])
self.assertEqual('auction_protocol.pdf', response.json["data"]["title"])
self.assertEqual('auction_owner', response.json["data"]["author"])
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, award['id']), {"data": {"status": "pending.payment"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']['status'], u'pending.payment')
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, award['id']), {"data": {"status": "active"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']['status'], u'active')
response = self.app.get('/auctions/{}'.format(self.auction_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']['status'], u'active.awarded')
class InsiderAuctionAwardProcessTest(BaseInsiderAuctionWebTest):
#initial_data = auction_data
initial_status = 'active.auction'
initial_bids = test_financial_bids
def upload_auction_protocol(self, award):
award_id = award['id']
bid_token = self.initial_bids_tokens[award['bid_id']]
response = self.app.post('/auctions/{}/awards/{}/documents?acc_token={}'.format(
self.auction_id, award_id, bid_token), upload_files=[('file', 'auction_protocol.pdf', 'content')])
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
doc_id = response.json["data"]['id']
self.assertIn(doc_id, response.headers['Location'])
self.assertEqual('auction_protocol.pdf', response.json["data"]["title"])
key = response.json["data"]["url"].split('?')[-1]
response = self.app.patch_json('/auctions/{}/awards/{}/documents/{}?acc_token={}'.format(self.auction_id, award_id, doc_id, bid_token), {"data": {
"description": "auction protocol",
"documentType": 'auctionProtocol'
}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(doc_id, response.json["data"]["id"])
self.assertIn("documentType", response.json["data"])
self.assertEqual(response.json["data"]["documentType"], 'auctionProtocol')
response = self.app.post('/auctions/{}/awards/{}/documents?acc_token={}'.format(
self.auction_id, award_id, self.auction_token), upload_files=[('file', 'auction_protocol.pdf', 'content')])
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
doc_id = response.json["data"]['id']
self.assertIn(doc_id, response.headers['Location'])
self.assertEqual('auction_protocol.pdf', response.json["data"]["title"])
key = response.json["data"]["url"].split('?')[-1]
response = self.app.patch_json(
'/auctions/{}/awards/{}/documents/{}?acc_token={}'.format(self.auction_id, award_id, doc_id, self.auction_token),
{"data": {
"description": "auction protocol",
"documentType": 'auctionProtocol'
}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(doc_id, response.json["data"]["id"])
self.assertIn("documentType", response.json["data"])
self.assertEqual(response.json["data"]["documentType"], 'auctionProtocol')
response = self.app.get('/auctions/{}/awards/{}/documents'.format(self.auction_id,award_id, doc_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual('auctionProtocol', response.json["data"][0]["documentType"])
self.assertEqual('auction_protocol.pdf', response.json["data"][0]["title"])
self.assertEqual('bid_owner', response.json["data"][0]["author"])
self.assertEqual('auctionProtocol', response.json["data"][1]["documentType"])
self.assertEqual('auction_owner', response.json["data"][1]["author"])
def setUp(self):
super(InsiderAuctionAwardProcessTest, self).setUp()
authorization = self.app.authorization
self.app.authorization = ('Basic', ('auction', ''))
now = get_now()
response = self.app.get('/auctions/{}'.format(self.auction_id))
self.assertEqual(response.status, '200 OK')
auction = response.json['data']
value_threshold = auction['value']['amount'] + auction['minimalStep']['amount']
now = get_now()
auction_result = {
'bids': [
{
"id": b['id'],
"date": (now - timedelta(seconds=i)).isoformat(),
"value": {"amount": value_threshold * 2},
}
for i, b in enumerate(self.initial_bids)
]
}
response = self.app.post_json('/auctions/{}/auction'.format(self.auction_id), {'data': auction_result})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
auction = response.json['data']
self.assertEqual('active.qualification', auction["status"])
self.first_award = auction['awards'][0]
self.second_award = auction['awards'][1]
self.first_award_id = self.first_award['id']
self.second_award_id = self.second_award['id']
self.app.authorization = authorization
def test_invalid_patch_auction_award(self):
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.first_award_id), {"data": {"status": "pending.payment"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't switch award status to (pending.payment) before auction owner load auction protocol")
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.first_award_id), {"data": {"status": "pending.waiting"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't switch award (pending.verification) status to (pending.waiting) status")
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.first_award_id), {"data": {"status": "active"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't switch award (pending.verification) status to (active) status")
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.first_award_id), {"data": {"status": "cancelled"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't switch award (pending.verification) status to (cancelled) status")
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.second_award_id), {"data": {"status": "unsuccessful"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't switch award (pending.waiting) status to (unsuccessful) status")
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.second_award_id), {"data": {"status": "pending.verification"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't switch award (pending.waiting) status to (pending.verification) status")
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.second_award_id), {"data": {"status": "pending.payment"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't switch award (pending.waiting) status to (pending.payment) status")
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.second_award_id), {"data": {"status": "active"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't switch award (pending.waiting) status to (active) status")
self.upload_auction_protocol(self.first_award)
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.first_award_id), {"data": {"status": "pending.waiting"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't switch award (pending.verification) status to (pending.waiting) status")
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.first_award_id), {"data": {"status": "active"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't switch award (pending.verification) status to (active) status")
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.first_award_id), {"data": {"status": "cancelled"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't switch award (pending.verification) status to (cancelled) status")
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.second_award_id), {"data": {"status": "unsuccessful"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't switch award (pending.waiting) status to (unsuccessful) status")
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.second_award_id), {"data": {"status": "pending.verification"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't switch award (pending.waiting) status to (pending.verification) status")
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.second_award_id), {"data": {"status": "pending.payment"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't switch award (pending.waiting) status to (pending.payment) status")
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.second_award_id), {"data": {"status": "active"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't switch award (pending.waiting) status to (active) status")
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.first_award_id), {"data": {"status": "pending.payment"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']['status'], "pending.payment")
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.first_award_id), {"data": {"status": "cancelled"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't switch award (pending.payment) status to (cancelled) status")
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.first_award_id), {"data": {"status": "pending.waiting"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't switch award (pending.payment) status to (pending.waiting) status")
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.first_award_id), {"data": {"status": "pending.verification"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't switch award (pending.payment) status to (pending.verification) status")
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.second_award_id), {"data": {"status": "unsuccessful"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't switch award (pending.waiting) status to (unsuccessful) status")
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.second_award_id), {"data": {"status": "pending.verification"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't switch award (pending.waiting) status to (pending.verification) status")
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.second_award_id), {"data": {"status": "pending.payment"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't switch award (pending.waiting) status to (pending.payment) status")
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.second_award_id), {"data": {"status": "active"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't switch award (pending.waiting) status to (active) status")
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.first_award_id), {"data": {"status": "active"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']['status'], "active")
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.first_award_id), {"data": {"status": "cancelled"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't switch award (active) status to (cancelled) status")
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.first_award_id), {"data": {"status": "pending.waiting"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't switch award (active) status to (pending.waiting) status")
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.first_award_id), {"data": {"status": "pending.verification"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't switch award (active) status to (pending.verification) status")
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.first_award_id), {"data": {"status": "pending.payment"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't switch award (active) status to (pending.payment) status")
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.second_award_id), {"data": {"status": "unsuccessful"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't switch award (pending.waiting) status to (unsuccessful) status")
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.second_award_id), {"data": {"status": "pending.verification"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't switch award (pending.waiting) status to (pending.verification) status")
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.second_award_id), {"data": {"status": "pending.payment"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't switch award (pending.waiting) status to (pending.payment) status")
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.second_award_id), {"data": {"status": "active"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't switch award (pending.waiting) status to (active) status")
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.first_award_id), {"data": {"status": "unsuccessful"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']['status'], "unsuccessful")
self.assertIn('Location', response.headers)
self.assertIn(self.second_award_id, response.headers['Location'])
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.first_award_id), {"data": {"status": "active"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't update award in current (unsuccessful) status")
self.upload_auction_protocol(self.second_award)
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.second_award_id), {"data": {"status": "pending.payment"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.second_award_id), {"data": {"status": "active"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.set_status('complete')
def test_patch_auction_award(self):
request_path = '/auctions/{}/awards'.format(self.auction_id)
response = self.app.get(request_path)
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(len(response.json['data']), 2)
response = self.app.patch_json('/auctions/{}/awards/some_id'.format(self.auction_id), {"data": {"status": "unsuccessful"}}, status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'award_id'}
])
response = self.app.patch_json('/auctions/some_id/awards/some_id', {"data": {"status": "unsuccessful"}}, status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'auction_id'}
])
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.first_award_id), {"data": {"awardStatus": "unsuccessful"}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'], [
{"location": "body", "name": "awardStatus", "description": "Rogue field"}
])
self.upload_auction_protocol(self.first_award)
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.first_award_id), {"data": {"status": "unsuccessful"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertIn('Location', response.headers)
new_award_location = response.headers['Location']
self.assertIn(self.second_award_id, new_award_location)
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.first_award_id), {"data": {"status": "active"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't update award in current (unsuccessful) status")
response = self.app.get(request_path)
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(len(response.json['data']), 2)
self.assertIn(response.json['data'][1]['id'], new_award_location)
response = self.app.patch_json('/auctions/{}/awards/{}?acc_token={}'.format(self.auction_id, self.second_award_id, self.auction_token),
{"data": {"title": "title", "description": "description"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']['title'], 'title')
self.assertEqual(response.json['data']['description'], 'description')
self.upload_auction_protocol(self.second_award)
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.second_award_id), {"data": {"status": "pending.payment"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.second_award_id), {"data": {"status": "active"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.set_status('complete')
response = self.app.get('/auctions/{}/awards/{}'.format(self.auction_id, self.second_award_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']["value"]["amount"], 200)
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.second_award_id), {"data": {"status": "unsuccessful"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't update award in current (complete) auction status")
def test_patch_auction_award_admin(self):
request_path = '/auctions/{}/awards'.format(self.auction_id)
response = self.app.get(request_path)
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(len(response.json['data']), 2)
authorization = self.app.authorization
self.app.authorization = ('Basic', ('administrator', ''))
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.first_award_id),
{"data":{
"paymentPeriod": {'endDate': self.first_award['paymentPeriod']['startDate']},
"verificationPeriod": {'endDate': self.first_award['verificationPeriod']['startDate']},
"signingPeriod": {'endDate': self.first_award['signingPeriod']['startDate']}}
})
self.assertEqual(response.status, '200 OK')
first_award = response.json['data']
self.assertEqual(first_award['verificationPeriod']['startDate'], first_award['verificationPeriod']['endDate'])
self.assertEqual(first_award['paymentPeriod']['startDate'], first_award['paymentPeriod']['endDate'])
self.assertEqual(first_award['signingPeriod']['startDate'], first_award['signingPeriod']['endDate'])
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.first_award_id),
{"data":{
"status": 'active',
"paymentPeriod": {'endDate': None},
"verificationPeriod": {'endDate': None},
"signingPeriod": {'endDate': None}}
})
self.assertEqual(response.status, '200 OK')
first_award = response.json['data']
self.assertNotEqual(first_award['status'], 'active')
self.assertNotEqual(first_award['paymentPeriod']['startDate'], first_award['paymentPeriod']['endDate'])
self.assertEqual(first_award['paymentPeriod']['endDate'], self.first_award['paymentPeriod']['endDate'])
self.assertNotEqual(first_award['verificationPeriod']['startDate'], first_award['verificationPeriod']['endDate'])
self.assertEqual(first_award['verificationPeriod']['endDate'], self.first_award['verificationPeriod']['endDate'])
self.assertNotEqual(first_award['signingPeriod']['startDate'], first_award['signingPeriod']['endDate'])
self.assertEqual(first_award['signingPeriod']['endDate'], self.first_award['signingPeriod']['endDate'])
self.app.authorization = authorization
self.upload_auction_protocol(self.first_award)
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.first_award_id), {"data": {"status": "pending.payment"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.first_award_id), {"data": {"status": "active"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.set_status('complete')
response = self.app.get('/auctions/{}'.format(self.auction_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']['status'], 'complete')
def test_complate_auction_with_second_award1(self):
self.upload_auction_protocol(self.first_award)
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.first_award_id), {"data": {"status": "unsuccessful"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertIn('Location', response.headers)
self.assertIn(self.second_award_id, response.headers['Location'])
self.upload_auction_protocol(self.second_award)
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.second_award_id), {"data": {"status": "pending.payment"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.second_award_id), {"data": {"status": "active"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.set_status('complete')
response = self.app.get('/auctions/{}/awards/{}'.format(self.auction_id, self.second_award_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']["value"]["amount"], 200)
def test_complate_auction_with_second_award2(self):
self.upload_auction_protocol(self.first_award)
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.first_award_id), {"data": {"status": "pending.payment"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.first_award_id), {"data": {"status": "unsuccessful"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertIn('Location', response.headers)
self.assertIn(self.second_award_id, response.headers['Location'])
self.upload_auction_protocol(self.second_award)
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.second_award_id), {"data": {"status": "pending.payment"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.second_award_id), {"data": {"status": "active"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.set_status('complete')
response = self.app.get('/auctions/{}/awards/{}'.format(self.auction_id, self.second_award_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']["value"]["amount"], 200)
def test_complate_auction_with_second_award3(self):
self.upload_auction_protocol(self.first_award)
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.first_award_id), {"data": {"status": "pending.payment"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.first_award_id), {"data": {"status": "active"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.first_award_id), {"data": {"status": "unsuccessful"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertIn('Location', response.headers)
self.assertIn(self.second_award_id, response.headers['Location'])
self.upload_auction_protocol(self.second_award)
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.second_award_id), {"data": {"status": "pending.payment"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.second_award_id), {"data": {"status": "active"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.set_status('complete')
response = self.app.get('/auctions/{}/awards/{}'.format(self.auction_id, self.second_award_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']["value"]["amount"], 200)
def test_successful_second_auction_award(self):
request_path = '/auctions/{}/awards'.format(self.auction_id)
self.upload_auction_protocol(self.first_award)
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.first_award_id), {"data": {"status": "pending.payment"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.first_award_id), {"data": {"status": "active"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.first_award_id), {"data": {"status": "unsuccessful"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertIn('Location', response.headers)
new_award_location = response.headers['Location']
response = self.app.get(request_path)
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(len(response.json['data']), 2)
self.assertIn(response.json['data'][1]['id'], new_award_location)
new_award = response.json['data'][-1]
self.upload_auction_protocol(self.second_award)
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.second_award_id), {"data": {"status": "pending.payment"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.second_award_id), {"data": {"status": "active"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.set_status('complete')
response = self.app.get('/auctions/{}/awards/{}'.format(self.auction_id, self.second_award_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']["value"]["amount"], 200)
def test_unsuccessful_auction1(self):
response = self.app.patch_json('/auctions/{}/awards/{}?acc_token=1'.format(self.auction_id, self.second_award_id), {"data": {"status": "cancelled"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Only bid owner may cancel award in current (pending.waiting) status', u'location':
u'body', u'name': u'data'}
])
bid_token = self.initial_bids_tokens[self.first_award['bid_id']]
response = self.app.patch_json('/auctions/{}/awards/{}?acc_token={}'.format(self.auction_id, self.second_award_id, bid_token), {"data": {"status": "cancelled"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.upload_auction_protocol(self.first_award)
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.first_award_id), {"data": {"status": "unsuccessful"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
response = self.app.get('/auctions/{}'.format(self.auction_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']['status'], 'unsuccessful')
def test_unsuccessful_auction2(self):
bid_token = self.initial_bids_tokens[self.first_award['bid_id']]
response = self.app.patch_json('/auctions/{}/awards/{}?acc_token={}'.format(self.auction_id, self.second_award_id, bid_token), {"data": {"status": "cancelled"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.upload_auction_protocol(self.first_award)
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.first_award_id), {"data": {"status": "pending.payment"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.first_award_id), {"data": {"status": "unsuccessful"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
response = self.app.get('/auctions/{}'.format(self.auction_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']['status'], 'unsuccessful')
def test_unsuccessful_auction3(self):
bid_token = self.initial_bids_tokens[self.first_award['bid_id']]
response = self.app.patch_json('/auctions/{}/awards/{}?acc_token={}'.format(self.auction_id, self.second_award_id, bid_token), {"data": {"status": "cancelled"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.upload_auction_protocol(self.first_award)
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.first_award_id), {"data": {"status": "pending.payment"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.first_award_id), {"data": {"status": "active"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.first_award_id), {"data": {"status": "unsuccessful"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
response = self.app.get('/auctions/{}'.format(self.auction_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']['status'], 'unsuccessful')
def test_unsuccessful_auction4(self):
self.upload_auction_protocol(self.first_award)
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.first_award_id), {"data": {"status": "pending.payment"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
bid_token = self.initial_bids_tokens[self.first_award['bid_id']]
response = self.app.patch_json('/auctions/{}/awards/{}?acc_token={}'.format(self.auction_id, self.second_award_id, bid_token), {"data": {"status": "cancelled"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.first_award_id), {"data": {"status": "active"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.first_award_id), {"data": {"status": "unsuccessful"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
response = self.app.get('/auctions/{}'.format(self.auction_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']['status'], 'unsuccessful')
def test_unsuccessful_auction5(self):
self.upload_auction_protocol(self.first_award)
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.first_award_id), {"data": {"status": "pending.payment"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.first_award_id), {"data": {"status": "active"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
bid_token = self.initial_bids_tokens[self.first_award['bid_id']]
response = self.app.patch_json('/auctions/{}/awards/{}?acc_token={}'.format(self.auction_id, self.second_award_id, bid_token), {"data": {"status": "cancelled"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.first_award_id), {"data": {"status": "unsuccessful"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
response = self.app.get('/auctions/{}'.format(self.auction_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']['status'], 'unsuccessful')
def test_get_auction_awards(self):
response = self.app.get('/auctions/{}/awards'.format(self.auction_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(len(response.json['data']), 2)
fist_award = response.json['data'][0]
second_award = response.json['data'][1]
response = self.app.get('/auctions/{}/awards/{}'.format(self.auction_id, fist_award['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']['status'], 'pending.verification')
self.assertIn('verificationPeriod', response.json['data'])
response = self.app.get('/auctions/{}/awards/{}'.format(self.auction_id, second_award['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']['status'], 'pending.waiting')
response = self.app.get('/auctions/{}/awards/some_id'.format(self.auction_id), status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'award_id'}
])
response = self.app.get('/auctions/some_id/awards/some_id', status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'auction_id'}
])
class InsiderAuctionAwardDocumentResourceTest(BaseInsiderAuctionWebTest):
initial_status = 'active.auction'
initial_bids = test_financial_bids
def setUp(self):
super(InsiderAuctionAwardDocumentResourceTest, self).setUp()
authorization = self.app.authorization
self.app.authorization = ('Basic', ('auction', ''))
now = get_now()
response = self.app.get('/auctions/{}'.format(self.auction_id))
self.assertEqual(response.status, '200 OK')
auction = response.json['data']
value_threshold = auction['value']['amount'] + auction['minimalStep']['amount']
now = get_now()
auction_result = {
'bids': [
{
"id": b['id'],
"date": (now - timedelta(seconds=i)).isoformat(),
"value": {"amount": value_threshold * 2},
}
for i, b in enumerate(self.initial_bids)
]
}
response = self.app.post_json('/auctions/{}/auction'.format(self.auction_id), {'data': auction_result})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
auction = response.json['data']
self.assertEqual('active.qualification', auction["status"])
self.first_award = auction['awards'][0]
self.second_award = auction['awards'][1]
self.first_award_id = self.first_award['id']
self.second_award_id = self.second_award['id']
self.app.authorization = authorization
def test_not_found(self):
response = self.app.post('/auctions/some_id/awards/some_id/documents', status=404, upload_files=[
('file', 'name.doc', 'content')])
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'auction_id'}
])
response = self.app.post('/auctions/{}/awards/some_id/documents'.format(self.auction_id), status=404, upload_files=[('file', 'name.doc', 'content')])
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'award_id'}
])
response = self.app.post('/auctions/{}/awards/{}/documents'.format(self.auction_id, self.first_award_id), status=404, upload_files=[
('invalid_value', 'name.doc', 'content')])
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'body', u'name': u'file'}
])
response = self.app.get('/auctions/some_id/awards/some_id/documents', status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'auction_id'}
])
response = self.app.get('/auctions/{}/awards/some_id/documents'.format(self.auction_id), status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'award_id'}
])
response = self.app.get('/auctions/some_id/awards/some_id/documents/some_id', status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'auction_id'}
])
response = self.app.get('/auctions/{}/awards/some_id/documents/some_id'.format(self.auction_id), status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'award_id'}
])
response = self.app.get('/auctions/{}/awards/{}/documents/some_id'.format(self.auction_id, self.first_award_id), status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'document_id'}
])
response = self.app.put('/auctions/some_id/awards/some_id/documents/some_id', status=404,
upload_files=[('file', 'name.doc', 'content2')])
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'auction_id'}
])
response = self.app.put('/auctions/{}/awards/some_id/documents/some_id'.format(self.auction_id), status=404,
upload_files=[('file', 'name.doc', 'content2')])
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'award_id'}
])
response = self.app.put('/auctions/{}/awards/{}/documents/some_id'.format(
self.auction_id, self.first_award_id), status=404, upload_files=[('file', 'name.doc', 'content2')])
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location': u'url', u'name': u'document_id'}
])
def test_create_auction_award_document(self):
response = self.app.post('/auctions/{}/awards/{}/documents'.format(
self.auction_id, self.first_award_id), upload_files=[('file', 'name.doc', 'content')])
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
doc_id = response.json["data"]['id']
self.assertIn(doc_id, response.headers['Location'])
self.assertEqual('name.doc', response.json["data"]["title"])
key = response.json["data"]["url"].split('?')[-1]
response = self.app.get('/auctions/{}/awards/{}/documents'.format(self.auction_id, self.first_award_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(doc_id, response.json["data"][0]["id"])
self.assertEqual('name.doc', response.json["data"][0]["title"])
response = self.app.get('/auctions/{}/awards/{}/documents?all=true'.format(self.auction_id, self.first_award_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(doc_id, response.json["data"][0]["id"])
self.assertEqual('name.doc', response.json["data"][0]["title"])
response = self.app.get('/auctions/{}/awards/{}/documents/{}?download=some_id'.format(
self.auction_id, self.first_award_id, doc_id), status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location': u'url', u'name': u'download'}
])
response = self.app.get('/auctions/{}/awards/{}/documents/{}?{}'.format(
self.auction_id, self.first_award_id, doc_id, key))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/msword')
self.assertEqual(response.content_length, 7)
self.assertEqual(response.body, 'content')
response = self.app.get('/auctions/{}/awards/{}/documents/{}'.format(
self.auction_id, self.first_award_id, doc_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(doc_id, response.json["data"]["id"])
self.assertEqual('name.doc', response.json["data"]["title"])
self.set_status('complete')
response = self.app.post('/auctions/{}/awards/{}/documents'.format(
self.auction_id, self.first_award_id), upload_files=[('file', 'name.doc', 'content')], status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't add document in current (complete) auction status")
def test_put_auction_award_document(self):
response = self.app.post('/auctions/{}/awards/{}/documents'.format(
self.auction_id, self.first_award_id), upload_files=[('file', 'name.doc', 'content')])
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
doc_id = response.json["data"]['id']
self.assertIn(doc_id, response.headers['Location'])
response = self.app.put('/auctions/{}/awards/{}/documents/{}'.format(self.auction_id, self.first_award_id, doc_id),
status=404,
upload_files=[('invalid_name', 'name.doc', 'content')])
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'body', u'name': u'file'}
])
response = self.app.put('/auctions/{}/awards/{}/documents/{}'.format(
self.auction_id, self.first_award_id, doc_id), upload_files=[('file', 'name.doc', 'content2')])
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(doc_id, response.json["data"]["id"])
key = response.json["data"]["url"].split('?')[-1]
response = self.app.get('/auctions/{}/awards/{}/documents/{}?{}'.format(
self.auction_id, self.first_award_id, doc_id, key))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/msword')
self.assertEqual(response.content_length, 8)
self.assertEqual(response.body, 'content2')
response = self.app.get('/auctions/{}/awards/{}/documents/{}'.format(
self.auction_id, self.first_award_id, doc_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(doc_id, response.json["data"]["id"])
self.assertEqual('name.doc', response.json["data"]["title"])
response = self.app.put('/auctions/{}/awards/{}/documents/{}'.format(
self.auction_id, self.first_award_id, doc_id), 'content3', content_type='application/msword')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(doc_id, response.json["data"]["id"])
key = response.json["data"]["url"].split('?')[-1]
response = self.app.get('/auctions/{}/awards/{}/documents/{}?{}'.format(
self.auction_id, self.first_award_id, doc_id, key))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/msword')
self.assertEqual(response.content_length, 8)
self.assertEqual(response.body, 'content3')
self.set_status('complete')
response = self.app.put('/auctions/{}/awards/{}/documents/{}'.format(
self.auction_id, self.first_award_id, doc_id), upload_files=[('file', 'name.doc', 'content3')], status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't update document in current (complete) auction status")
def test_patch_auction_award_document(self):
response = self.app.post('/auctions/{}/awards/{}/documents'.format(
self.auction_id, self.first_award_id), upload_files=[('file', 'name.doc', 'content')])
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
doc_id = response.json["data"]['id']
self.assertIn(doc_id, response.headers['Location'])
response = self.app.patch_json('/auctions/{}/awards/{}/documents/{}'.format(self.auction_id, self.first_award_id, doc_id), {"data": {"description": "document description"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(doc_id, response.json["data"]["id"])
response = self.app.get('/auctions/{}/awards/{}/documents/{}'.format(
self.auction_id, self.first_award_id, doc_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(doc_id, response.json["data"]["id"])
self.assertEqual('document description', response.json["data"]["description"])
self.set_status('complete')
response = self.app.patch_json('/auctions/{}/awards/{}/documents/{}'.format(self.auction_id, self.first_award_id, doc_id), {"data": {"description": "document description"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't update document in current (complete) auction status")
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(InsiderAuctionCreateAwardTest))
suite.addTest(unittest.makeSuite(InsiderAuctionAwardProcessTest))
suite.addTest(unittest.makeSuite(InsiderAuctionAwardDocumentResourceTest))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| 2.234375 | 2 |
hostray/web/component/__init__.py | hsky77/hostray | 9 | 12785795 | <reponame>hsky77/hostray<gh_stars>1-10
# Copyright (C) 2019-Present the hostray authors and contributors
#
# This module is part of hostray and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php:
'''
This module contains the "yaml" configurable component classes for hostray application.
- order to load components of hostray application are followings:
1. load the components of DefaultComponentTypes
2. initailize the components of DefaultComponentTypes with "server_config.yaml" if specified or default settings
3. check whether "server_config.yaml" specified components of OptionalComponentTypes to load
4. check whether "server_config.yaml" specified components of extension module to load
- to create component extension module:
0. the hierarchy of folders and files looks like:
server_directory/
server_config.yaml
component/
__init__.py
foo.py
1. use ComponentTypes enum class to define the extend components such as:
class ComponentExtension(ComponentTypes):
# tuple(<component_key>, <package_route>, <class_name_in_the_py_file>)
Foo = ('foo', 'foo', 'Foo')
2. in "foo.py" contains the class code:
from hostray.web.component import Component, ComponentManager
from . import ComponentExtension
class Foo(Component):
def __init__(self):
super().__init__(ComponentExtension.Foo)
def init(self, component_manager: ComponentManager, p1, *arugs, **kwargs) -> None:
self.p1 = p1
3. setup component block of "server_config.yaml" to tell hostray server load the extend components "Foo":
component: # block to setup component
foo: # component_key to load
p1: xxxx # parameter p1 of Foo.init()
Last Updated: Monday, 4th November 2019 by hsky77 (<EMAIL>)
'''
from typing import Dict, List, Union
from .base import ComponentTypes, Component, ComponentManager
from .. import HostrayWebException, LocalCode_Component_Duplicated_Key, LocalCode_Failed_To_Load_Component
class DefaultComponentTypes(ComponentTypes):
"""server loads all components of this enum type when start"""
Localization = ('localization', 'default_component',
'LocalizationComponent')
Logger = ('logger', 'default_component', 'LoggerComponent')
Callback = ('callback', 'default_component', 'CallbackComponent')
WorkerPool = ('worker_pool', 'default_component', 'WorkerPoolComponent')
TaskQueue = ('task_queue', 'default_component', 'TaskQueueComponent')
class OptionalComponentTypes(ComponentTypes):
"""server loads configured components of this enum type when start"""
Service = ('services', 'optional_component', 'ServicesComponent')
MemoryCache = ('memory_cache', 'optional_component',
'MemoryCacheComponent')
OrmDB = ('orm_db', 'optional_component', 'OrmDBComponent')
def __create_optional_components(component_manager: ComponentManager, component_settings: Dict, component_types: ComponentTypes, root_dir: str) -> None:
for key in component_settings:
for component_type in component_types:
comp_type = None
try:
comp_type = component_type(key)
except:
continue
if comp_type is not None:
comp = comp_type.import_class()(comp_type)
component_manager.set_component(comp)
break
for key in component_settings:
comp_type = None
for component_type in component_types:
try:
comp_type = component_type(key)
break
except:
continue
if comp_type:
component_manager.invoke(comp_type, 'init',
component_manager,
**(component_settings[comp_type.enum_key] or {}),
root_dir=root_dir)
def create_server_component_manager(component_settings: Union[Dict, None], root_dir: str,
option_component_types: List[ComponentTypes] = [OptionalComponentTypes]) -> ComponentManager:
component_manager = ComponentManager()
# default components
for default_type in DefaultComponentTypes:
comp = default_type.import_class()(default_type)
component_manager.set_component(comp)
# init
for default_type in DefaultComponentTypes:
if component_settings and default_type.enum_key in component_settings:
component_manager.invoke(default_type, 'init',
component_manager,
**(component_settings[default_type.enum_key] or {}),
root_dir=root_dir)
else:
component_manager.invoke(
default_type, 'init', component_manager, root_dir=root_dir)
# optional components
if component_settings:
__create_optional_components(
component_manager, component_settings, [OptionalComponentTypes], root_dir)
sort_types = [OptionalComponentTypes, DefaultComponentTypes]
# extensions
ext_comp_types = ComponentTypes.get_component_enum_class()
if ext_comp_types is not None:
sort_types = ext_comp_types + \
[OptionalComponentTypes, DefaultComponentTypes]
# check duplicated key:
for r_type in sort_types:
for key in r_type:
for l_type in sort_types:
d_key = None
if r_type is not l_type:
try:
d_key = l_type(key.enum_key)
except:
continue
if d_key is not None:
raise HostrayWebException(
LocalCode_Component_Duplicated_Key, l_type, r_type, key.enum_key)
if component_settings:
__create_optional_components(
component_manager, component_settings, ext_comp_types, root_dir)
# check componet load failed
if component_settings:
for key in component_settings:
checked = False
for component in component_manager.components:
if key == component.component_type.enum_key:
checked = True
if not checked:
raise HostrayWebException(
LocalCode_Failed_To_Load_Component, root_dir, key)
# sort with enums order
component_manager.sort_components(sort_types)
return component_manager
| 2.28125 | 2 |
restful_client_lite/contrib/__init__.py | huandzh/restful-client-lite | 0 | 12785796 | import restful_client_lite.contrib.aliyun # noqa
| 0.9375 | 1 |
coral_ordinal/layer.py | stephengmatthews/coral-ordinal | 39 | 12785797 | import tensorflow as tf
from tensorflow.python.keras import activations
class CoralOrdinal(tf.keras.layers.Layer):
# We skip input_dim/input_shape here and put in the build() method as recommended in the tutorial,
# in case the user doesn't know the input dimensions when defining the model.
def __init__(self, num_classes, activation = None, **kwargs):
""" Ordinal output layer, which produces ordinal logits by default.
Args:
num_classes: how many ranks (aka labels or values) are in the ordinal variable.
activation: (Optional) Activation function to use. The default of None produces
ordinal logits, but passing "ordinal_softmax" will cause the layer to output
a probability prediction for each label.
"""
# Via Dense Layer code:
# https://github.com/tensorflow/tensorflow/blob/v2.2.0/tensorflow/python/keras/layers/core.py#L1128
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
# Pass any additional keyword arguments to Layer() (i.e. name, dtype)
super(CoralOrdinal, self).__init__(**kwargs)
self.num_classes = num_classes
self.activation = activations.get(activation)
# Following https://www.tensorflow.org/guide/keras/custom_layers_and_models#best_practice_deferring_weight_creation_until_the_shape_of_the_inputs_is_known
def build(self, input_shape):
# Single fully-connected neuron - this is the latent variable.
num_units = 1
# I believe glorot_uniform (aka Xavier uniform) is pytorch's default initializer, per
# https://pytorch.org/docs/master/generated/torch.nn.Linear.html
# and https://www.tensorflow.org/api_docs/python/tf/keras/initializers/GlorotUniform
self.fc = self.add_weight(shape = (input_shape[-1], num_units),
# Need a unique name if there are multiple coral_ordinal layers.
name = self.name + "_latent",
initializer = 'glorot_uniform',
# Not sure if this is necessary:
dtype = tf.float32,
trainable = True)
# num_classes - 1 bias terms, defaulting to 0.
self.linear_1_bias = self.add_weight(shape = (self.num_classes - 1, ),
# Need a unique name if there are multiple coral_ordinal layers.
name = self.name + "_bias",
initializer = 'zeros',
# Not sure if this is necessary:
dtype = tf.float32,
trainable = True)
# This defines the forward pass.
def call(self, inputs):
fc_inputs = tf.matmul(inputs, self.fc)
logits = fc_inputs + self.linear_1_bias
if self.activation is None:
outputs = logits
else:
# Not yet tested:
outputs = self.activation(logits)
return outputs
# This allows for serialization supposedly.
# https://www.tensorflow.org/guide/keras/custom_layers_and_models#you_can_optionally_enable_serialization_on_your_layers
def get_config(self):
config = super(CoralOrdinal, self).get_config()
config.update({'num_classes': self.num_classes})
return config
| 3.5625 | 4 |
homu/tests/test_pr_body.py | jtgeibel/homu | 89 | 12785798 | <reponame>jtgeibel/homu<filename>homu/tests/test_pr_body.py<gh_stars>10-100
from homu.main import (
suppress_ignore_block,
suppress_pings,
IGNORE_BLOCK_START,
IGNORE_BLOCK_END,
)
def test_suppress_pings_in_PR_body():
body = (
"r? @matklad\n" # should escape
"@bors r+\n" # shouldn't
"<EMAIL>" # shouldn't
)
expect = (
"r? `@matklad`\n"
"`@bors` r+\n"
"<EMAIL>"
)
assert suppress_pings(body) == expect
def test_suppress_ignore_block_in_PR_body():
body = (
"Rollup merge\n"
"{}\n"
"[Create a similar rollup](https://fake.xyz/?prs=1,2,3)\n"
"{}"
)
body = body.format(IGNORE_BLOCK_START, IGNORE_BLOCK_END)
expect = "Rollup merge\n"
assert suppress_ignore_block(body) == expect
| 2.21875 | 2 |
hw3/09/9.py | dutta-alankar/PH-354-2018-IISc-Assignment-Problems | 9 | 12785799 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 9 17:53:39 2019
@author: alankar
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.special.orthogonal import p_roots #Legendre Polynomial roots
from scipy import constants
def gauss_quad(func,a,b,n,*args):#Legendre
[x,w] = p_roots(n+1)
I_G = 0.5*(b-a)*np.sum(w*func(0.5*(b-a)*x+0.5*(b+a),*args))
return I_G
V = 1000*1e-6 #m^3
rho = 6.022e28 #m^-3
thetaD = 428 #K
def CV(T):
N = 50
f = lambda x:(x**4*np.exp(x))/(np.exp(x)-1)**2
return 9*V*rho*constants.k*(T/thetaD)**3*gauss_quad(f,0,thetaD/T,N)
Temperature = np.linspace(5,500,1000)
Heat_cap = np.array([CV(T) for T in Temperature])
plt.figure(figsize=(13,10))
plt.plot(Temperature,Heat_cap)
plt.grid()
plt.title(r'Debye Heat capacity $C_V(T)$ in a solid',size=25,y=1.02)
plt.xlabel(r'$T$',size=22)
plt.ylabel(r'$C_V(T)$',size=22)
plt.tick_params(axis='both', which='major', labelsize=15)
plt.tick_params(axis='both', which='minor', labelsize=12)
plt.savefig('9.png')
plt.show() | 2.640625 | 3 |
src/product/admin.py | ivanjo39191/ivankao-erp | 0 | 12785800 | from django.contrib import admin
from django import forms
from django.db.models import Sum
from django.shortcuts import redirect
from django.http import HttpResponseRedirect
from .models import *
from .forms import *
# Register your models here.
class RelationalProductInline(admin.TabularInline):
search_fields = ['product']
model = SalesOrder.product.through
verbose_name = '商品名稱'
form = RelationalProductForm
extra = 2
fields = ('product', 'retail_price', 'number', 'discount','total')
readonly_fields = ('retail_price', 'total',)
suit_classes = 'suit-tab suit-tab-general'
autocomplete_fields = ['product']
@admin.register(Product)
class ProductAdmin(admin.ModelAdmin):
def get_sales_volume(self, obj):
return sum(obj.salesorder_set.through.objects.filter(product=obj).values_list('number', flat=True))
def get_inventory_volume(self, obj):
return obj.purchase_volume - sum(obj.salesorder_set.through.objects.filter(product=obj).values_list('number', flat=True))
# list_display = ('name', 'retail_price', 'special_price', 'purchase_volume', 'sales_volume', 'inventory_volume')
search_fields = ['name']
list_display = ('name', 'retail_price', 'purchase_volume', 'get_sales_volume', 'get_inventory_volume')
get_sales_volume.short_description = '銷售量'
get_inventory_volume.short_description = '庫存量'
@admin.register(SalesOrder)
class SalesOrderAdmin(admin.ModelAdmin):
def get_product(self, obj):
return "、".join([p.name for p in obj.product.all()])
list_display = ('order_id', 'customer', 'get_product', 'date')
form = SalesOrderForm
inlines = [RelationalProductInline,]
change_form_template = "admin/product/export_changeform.html"
autocomplete_fields = ['customer']
# def response_add(request, obj, post_url_continue=None):
# return redirect(f'/product/export/{obj.order_id}')
def response_change(self, request, obj):
if "_export" in request.POST:
obj.save()
return redirect(f'/product/export/?order_id={obj.order_id}')
return super().response_change(request, obj)
@admin.register(Customer)
class CustomerAdmin(admin.ModelAdmin):
search_fields = ['name']
list_display = ('name', 'tax_id', 'phone', 'address') | 1.945313 | 2 |
sympymod/__init__.py | mathcube7/sympymod | 0 | 12785801 | import types
import sys
from .equation import *
from .pick import *
from .calculus import *
from functools import wraps
this_module = sys.modules[__name__]
def _get_imported_names(module):
names = module.__all__ if hasattr(module, '__all__') else dir(module)
return [name for name in names if not name.startswith('_')]
def _wrap_function(func):
@wraps(func)
def f(*args, **kwargs):
if isinstance(args[0], Eq):
if len(args) > 1:
other_args = tuple(args[1:])
else:
other_args = ()
return args[0].apply('both', func, *other_args, **kwargs)
else:
return func(*args, **kwargs)
return f
_names_from_sympy = _get_imported_names(sympy)
for name in _names_from_sympy:
obj = getattr(sympy, name)
if isinstance(obj, types.FunctionType) or isinstance(obj, sympy.FunctionClass):
setattr(this_module, name, _wrap_function(obj))
| 2.453125 | 2 |
phantasy/library/exception/__init__.py | phantasy-project/phantasy | 0 | 12785802 | from .err import CSVFormatError, DataError
from .exceptions import TimeoutError
from .exceptions import PutFinishedException
| 1.226563 | 1 |
ikats/objects/tests/test_operator.py | IKATS/ikats_api | 0 | 12785803 | <filename>ikats/objects/tests/test_operator.py
# -*- coding: utf-8 -*-
"""
Copyright 2019 CS Systèmes d'Information
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from unittest import TestCase
from ikats import IkatsAPI
from ikats.exceptions import IkatsNotFoundError
from ikats.objects import InOutParam
class TestOperator(TestCase):
"""
Test Operator object
"""
def test_nominal(self):
"""
Get an Operator instance
"""
api = IkatsAPI()
op = api.op.get(name="slope_spark")
self.assertIsNotNone(op.op_id)
self.assertIsNotNone(op.desc)
self.assertIsNotNone(op.label)
self.assertEqual(1, len(op.inputs))
self.assertEqual(InOutParam, type(op.inputs[0]))
self.assertEqual(0, len(op.parameters))
self.assertEqual(1, len(op.outputs))
self.assertEqual(InOutParam, type(op.outputs[0]))
with self.assertRaises(IkatsNotFoundError):
api.op.results(pid=0)
| 2.53125 | 3 |
fault_injector/io/task.py | AlessioNetti/fault_injector | 8 | 12785804 | """
MIT License
Copyright (c) 2018 AlessioNetti
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from fault_injector.network.msg_builder import MessageBuilder
class Task:
"""
Struct-like class for easy access to task-related parameters
"""
# Hardcoded value to represent Tasks that have no bounded duration
VALUE_DUR_NO_LIM = 0
def __init__(self, args='', timestamp=0, duration=0, seqNum=0, isFault=False, cores='0'):
self.args = args
self.timestamp = timestamp
self.duration = duration
self.seqNum = seqNum
self.isFault = isFault
self.cores = cores
@staticmethod
def dict_to_task(entry):
"""
Converts a dictionary to a Task object. Mind that the dictionary MUST contain all of the attributes in the Task
class, with the same naming
:param entry: a dictionary
:return: a Task object
"""
if not isinstance(entry, dict):
return None
t = Task()
try:
for a in vars(t):
v_type = type(getattr(t, a))
if entry[a] is not None:
v = v_type(entry[a]) if v_type != bool else entry[a] == 'True'
else:
v = None
setattr(t, a, v)
return t
except KeyError:
return None
@staticmethod
def task_to_dict(task):
"""
Performs reverse conversion, from Task to dictionary
:param task: the task object
:return: the output dictionary
"""
if not isinstance(task, Task):
return None
d = {}
for a in vars(task):
d[a] = getattr(task, a)
return d
@staticmethod
def msg_to_task(msg):
"""
Converts a dictionary created by MessageBuilder to a Task object
:param msg: the input dictionary
:return: the Task object
"""
if not isinstance(msg, dict):
return None
t = Task()
t.args = msg[MessageBuilder.FIELD_DATA]
t.isFault = msg[MessageBuilder.FIELD_ISF]
t.seqNum = msg[MessageBuilder.FIELD_SEQNUM]
t.timestamp = msg[MessageBuilder.FIELD_TIME]
t.duration = msg[MessageBuilder.FIELD_DUR]
t.cores = msg[MessageBuilder.FIELD_CORES] if MessageBuilder.FIELD_CORES in msg else None
return t
| 2.03125 | 2 |
xos/synchronizer/steps/sync_exampletenant.py | pan2za/exampleservice | 0 | 12785805 | import os
import sys
from synchronizers.new_base.SyncInstanceUsingAnsible import SyncInstanceUsingAnsible
from synchronizers.new_base.modelaccessor import *
from xos.logger import Logger, logging
parentdir = os.path.join(os.path.dirname(__file__), "..")
sys.path.insert(0, parentdir)
logger = Logger(level=logging.INFO)
class SyncExampleTenant(SyncInstanceUsingAnsible):
provides = [ExampleTenant]
observes = ExampleTenant
requested_interval = 0
template_name = "exampletenant_playbook.yaml"
service_key_name = "/opt/xos/synchronizers/exampleservice/exampleservice_private_key"
watches = [ModelLink(CoarseTenant,via='coarsetenant'), ModelLink(ServiceMonitoringAgentInfo,via='monitoringagentinfo')]
def __init__(self, *args, **kwargs):
super(SyncExampleTenant, self).__init__(*args, **kwargs)
def get_exampleservice(self, o):
if not o.provider_service:
return None
exampleservice = ExampleService.objects.filter(id=o.provider_service.id)
if not exampleservice:
return None
return exampleservice[0]
# Gets the attributes that are used by the Ansible template but are not
# part of the set of default attributes.
def get_extra_attributes(self, o):
fields = {}
fields['tenant_message'] = o.tenant_message
exampleservice = self.get_exampleservice(o)
fields['service_message'] = exampleservice.service_message
return fields
def delete_record(self, port):
# Nothing needs to be done to delete an exampleservice; it goes away
# when the instance holding the exampleservice is deleted.
pass
def handle_service_monitoringagentinfo_watch_notification(self, monitoring_agent_info):
if not monitoring_agent_info.service:
logger.info("handle watch notifications for service monitoring agent info...ignoring because service attribute in monitoring agent info:%s is null" % (monitoring_agent_info))
return
if not monitoring_agent_info.target_uri:
logger.info("handle watch notifications for service monitoring agent info...ignoring because target_uri attribute in monitoring agent info:%s is null" % (monitoring_agent_info))
return
objs = ExampleTenant.objects.all()
for obj in objs:
if obj.provider_service.id != monitoring_agent_info.service.id:
logger.info("handle watch notifications for service monitoring agent info...ignoring because service attribute in monitoring agent info:%s is not matching" % (monitoring_agent_info))
return
instance = self.get_instance(obj)
if not instance:
logger.warn("handle watch notifications for service monitoring agent info...: No valid instance found for object %s" % (str(obj)))
return
logger.info("handling watch notification for monitoring agent info:%s for ExampleTenant object:%s" % (monitoring_agent_info, obj))
#Run ansible playbook to update the routing table entries in the instance
fields = self.get_ansible_fields(instance)
fields["ansible_tag"] = obj.__class__.__name__ + "_" + str(obj.id) + "_monitoring"
fields["target_uri"] = monitoring_agent_info.target_uri
template_name = "monitoring_agent.yaml"
super(SyncExampleTenant, self).run_playbook(obj, fields, template_name)
pass
| 1.976563 | 2 |
models/game_states.py | bumasi/matchTaker | 0 | 12785806 | <filename>models/game_states.py<gh_stars>0
"""Module defining game-state and its operations.
A game-state is a sequence of rows containing matches.
If rows are numbered from 1 .. 5, then row n must contain 0..n matches.
Here, rows are indexed from 0 .. 4, thus row with index k must contain 0 .. k+1 matches.
The rows are represented by a list.
A game-state is called "normalized", if the rows are sorted in ascending order,
i.e. match-count in row k <= match-count in row k+1.
The lexicographic order ist defined for game-states, e.g. [1,0,0,4,5] < [1,1,0,0,0].
Therefore, a list containing game-states, can be sorted.
"""
from __future__ import annotations # for type annotations with forward references
from typing import List # for type annotations
import copy
import functools
from utils import permutations
class Error(Exception):
"""Class for exceptions of this module."""
# todo: import from a module basic_defs
@classmethod
def check(cls, condition, *args):
"""Check condition and raise exception if it does not hold."""
if not condition:
raise cls(*args)
class GameMove:
"""A move in the game consists in selecting a row and taking off some matches.
At least 1 match and at most 3 matches must be taken.
Attributes:
row_index: int
Index of selected row.
match_count: int
Number of matches to take off the selected row
"""
def __init__(self, row_index: int, match_count: int):
assert row_index in range(5)
assert match_count in range(1, 3+1)
self.row_index: int = row_index
self.match_count: int = match_count
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.row_index == other.row_index and self.match_count == other.match_count
else:
return NotImplemented
Rows = List[int]
# For the rows of a game-state
@functools.total_ordering # uses __eq__ and __lt__ to generate the comparison operators
class GameState:
"""Models a game-state and its operations.
Attributes:
rows: Rows
A list of integers, representing a valid game state.
Trying to create an instance with an invalid game-state raises Error.
"""
def __init__(self, rows: Rows):
# Check and convert input
Error.check(isinstance(rows, list), "rows must be a list", "hahaha")
Error.check(len(rows) == 5, "rows must have length 5")
self.rows: Rows = []
for k in range(5):
x = rows[k]
Error.check(isinstance(x, int), "rows must contain integers only")
Error.check(0 <= x <= 5, 'rows must consist of digits in 0..5')
Error.check(x <= k+1, f"row at index {k} must contain <= {k + 1} matches")
self.rows.append(x)
Error.check(sum(self.rows) > 0, 'rows must contain at least 1 match')
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.rows == other.rows
else:
return NotImplemented
def __lt__(self, other):
# lexicographic ordering is used
if isinstance(other, self.__class__):
return self.rows < other.rows
else:
return NotImplemented
def __str__(self):
s = f"[{self.rows[0]}"
for k in range(1, len(self.rows)):
s += f"{self.rows[k]}"
s += "]"
return s
def get_rows(self) -> Rows:
"""Return a copy of the internal row list."""
return copy.deepcopy(self.rows)
def get_total_count(self) -> int:
"""Return total count of all matches."""
return sum(self.rows)
def normalize(self) -> permutations.Permutation:
"""Sort the internal row list in ascending order and return the permutation that undoes this sorting."""
self.rows, p = permutations.Permutation.sorted(self.rows)
return p.inv()
def is_normalized(self) -> bool:
"""Return True iff the internal row list is sorted in ascending order."""
test_list = [self.rows[k] <= self.rows[k+1] for k in range(4)]
return all(test_list)
def denormalize(self, p: permutations.Permutation) -> None:
"""Reset the internal rows list to the state before normalization.
:param p: The permutation that was returned by the call to normalize()
"""
self.rows = p.apply(self.rows)
def is_possible_move(self, move: GameMove) -> bool:
"""Return true iff move can be applied to self.
:param move: the move, that should be applied to self.
:return: result of the test
"""
return (move.match_count <= self.rows[move.row_index]) and (move.match_count < sum(self.rows))
# Note: the 2nd condition handles the case, where all matches are in 1 row.
def make_move(self, move: GameMove) -> GameState:
"""Apply the move to self and return the new game-state.
Assumption: the move is possible.
More precisely: take move.match_count matches off the row index move.row_index.
:param move: move to apply.
:return: resulting game-state.
"""
assert self.is_possible_move(move)
new_rows = self.get_rows()
new_rows[move.row_index] -= move.match_count
return GameState(new_rows)
def normalized_successors(self) -> List[GameState]:
"""Return the list of all possible normalized successors.
Assumption: self is a normalized game state.
:return: list of all normalized game states that can be generated from self with 1 move.
Example: [0, 0, 1, 2, 2] will return [0, 0, 0, 2, 2], [0, 0, 1, 1, 2], [0, 0, 0, 1, 2]
"""
assert self.is_normalized()
result = []
max_count = min(3, sum(self.rows)-1)
# later a list of lists may be used, therefore this double loop
for count in range(1, max_count+1):
temp = []
for k in range(5):
if count <= self.rows[k]:
game_state = self.make_move(GameMove(k, count))
game_state.normalize()
if game_state not in temp:
temp.append(game_state)
result = result + temp
return result
def get_move(self, game_state: GameState) -> GameMove:
"""Return a move which turns self into an intermediate game state, whose normalization is equal to game_state.
Example: get_move(12345,12235) == (3,2) because 12345 --> 12325 --> 12235
Assumption: (1) self and game_state are normalized
(2) game_state is a successor of self
:param game_state: the game_state to generate with the move and following normalization.
:return: the move.
"""
assert self.is_normalized()
assert game_state.is_normalized()
assert game_state in self.normalized_successors()
# match_count: the difference of the total counts of matches
match_count = self.get_total_count() - game_state.get_total_count()
# row_index: the first from right that has changed
candidates = [k for k in range(5) if self.rows[k] != game_state.rows[k]]
assert len(candidates) > 0
row_index = candidates[-1]
# check todo: unit-test
move = GameMove(row_index, match_count)
temp_state = self.make_move(move)
temp_state.normalize()
assert temp_state == game_state
# return result
return move
| 3.484375 | 3 |
app/models/component_group.py | arxcdr/silverback | 0 | 12785807 | """
Component Model
"""
# Third Party Library
from django.db import models
class ComponentGroup(models.Model):
UPTIME_CHOICES = (
('on', 'ON'),
('off', 'OFF')
)
name = models.CharField(max_length=100, verbose_name="Name")
description = models.CharField(max_length=200, verbose_name="Description")
uptime = models.CharField(max_length=50, choices=UPTIME_CHOICES, default="off", verbose_name="Uptime")
created_at = models.DateTimeField(auto_now_add=True, verbose_name="Created at")
updated_at = models.DateTimeField(auto_now=True, verbose_name="Updated at")
class Meta:
db_table = "app_component_group"
| 2.234375 | 2 |
get_diff_coverage2.py | poweredbygrow/coverage_tools | 0 | 12785808 | <filename>get_diff_coverage2.py<gh_stars>0
#!/usr/bin/env python3
import argparse
import math
import re
import subprocess # nosec
import xml.etree.ElementTree as element_tree # nosec
from collections import Counter, OrderedDict
from itertools import chain
IGNORED_PACKAGES = [".venv/", "target/"]
def _parse_coverage(file_name):
return element_tree.parse(file_name).getroot() # nosec
def _get_coverage_map(tree, file):
"""
Get a map of which lines are covered for a specified file
:param tree: an element tree for the coverage report xml
:param file: the file to find coverage for
:return: a map of int -> boolean with a key of lines we have info for and value of whether or not it's covered
"""
# skip files that are in an ignored package or in the root directory
if any(file.startswith(package) for package in IGNORED_PACKAGES) or "/" not in file:
return None
# split file into package/filename as defined in jacoco's report
file_info = re.search(r"(.*)\/(.*\.py)", file)
# skip files that don't match this, for example templates/info.html
if not file_info or len(file_info.groups()) != 2:
return None
package, file_name = file_info.groups()
# look up the source file info in the report
lookup = 'package[@name="{}"]/classes/class[@name="{}"]'.format(
package.replace("/", "."), file_name
)
source_tree = tree.find(lookup)
if not source_tree:
print("Couldn't find a test coverage file for " + lookup)
return []
# search for lines with coverage information
coverage_map = {}
for line in source_tree.find("lines").findall("line"):
line_number = int(line.attrib["number"])
coverage_map[line_number] = line.attrib["hits"] == "1"
return coverage_map
def _get_git_diff(commit):
"""Get a diff between a specified commit(or branch) and HEAD"""
return (
subprocess.check_output(["git", "diff", commit, "HEAD", "-U0"]) # nosec
.decode(errors="ignore")
.strip()
)
def _get_lines_changed(line_summary):
"""
Parse the line diff summary into a list of numbers representing line numbers added or changed
:param line_summary: the summary from a git diff of lines that have changed (ex: @@ -1,40 +1,23 @@)
:return: a list of integers indicating which lines changed for that summary
"""
lines = re.search(r"\@\@.*?\+(.+?) \@\@", line_summary).group(1)
if "," in lines:
start, count = [int(x) for x in lines.split(",")]
return list(range(start, start + count))
return [int(lines)]
def _parse_file_diff(diff):
"""Parse a single file's diff, return an object of that files name and the lines changed"""
file_name_info = re.search(r".*\+\+\+ b/(.+?)\s+", diff)
if not file_name_info or not file_name_info.group(1):
return None
file_name = file_name_info.group(1)
# find mapping of which lines where changed
diff_line_summaries = re.findall(r"\@\@.*?\@\@", diff)
# add line
added_lines = list(
chain.from_iterable([_get_lines_changed(s) for s in diff_line_summaries])
)
return {"file": file_name, "lines_changed": added_lines}
def _parse_diff(diff):
"""Parse the raw diff string into a set of objects containing the file name and changed lines"""
file_diffs = re.split(r"\ndiff --git ", diff)
return [
file_info
for file_info in [_parse_file_diff(file_diff) for file_diff in file_diffs]
if file_info is not None
]
def _reconcile_coverage(change, coverage_map):
"""
Given an object with change and the coverage map for that file, produce information about coverage on lines
changed.
:param change: an object containing the file name and list of changed/added lines
:param coverage_map: a map int->boolean of line numbers to coverage status
:return: a counter of covered/uncovered/ignored lines
"""
line_stats = Counter()
for line in change["lines_changed"]:
if line not in coverage_map:
line_stats["ignored"] += 1
else:
if coverage_map[line]:
line_stats["covered"] += 1
else:
line_stats["uncovered"] += 1
return line_stats
def get_coverage(line_stats):
denominator = line_stats["covered"] + line_stats["uncovered"]
if denominator == 0:
return None
return float(line_stats["covered"]) / denominator
def get_lines_to_display(file, buffer, content):
lines_to_display = []
for line in file["uncovered_lines"]:
for i in range(max(0, line - buffer), min(len(content), line + buffer + 1)):
if i not in lines_to_display:
lines_to_display.append(i)
return lines_to_display
def get_coverage_icons(lines_to_display, covered_lines, file):
coverage = {}
for line in lines_to_display:
if line not in covered_lines:
coverage[line] = " "
elif line in file["lines_changed"]:
coverage[line] = (
"✅" if line in covered_lines and covered_lines[line] else "❌"
)
else:
coverage[line] = (
"✔️ " if line in covered_lines and covered_lines[line] else "✖️ "
)
return OrderedDict(sorted(coverage.items()))
def get_file_message(file, buffer):
# create file -> (list ranges of number)
name = file["file"]
covered_lines = file["coverage"]
with open(name) as source_file:
content = source_file.readlines()
lines_to_display = get_lines_to_display(file, buffer, content)
coverage_icons = get_coverage_icons(lines_to_display, covered_lines, file)
groups = []
for i in coverage_icons.keys():
if not groups or i > groups[-1][-1] + 1:
groups.append([i])
else:
groups[-1].append(i)
if groups:
message = f"🚗 {name}\n"
for group in groups:
for line in group:
message += f"\t{coverage_icons[line]} {str(line)}\t\t{content[line - 1][:-1]}\n"
message += "\n"
return message
return ""
def get_untested_line_info(diff_changes, coverage_report, buffer):
"""Gets a message which contains untested lines in the commit"""
untested_lines = []
for change in diff_changes:
coverage_map = _get_coverage_map(coverage_report, change["file"])
# no coverage = entirely untested
if coverage_map is None:
continue
uncovered_lines = [
line
for line in change["lines_changed"]
if line in coverage_map and not coverage_map[line]
]
untested_lines.append(
{
"file": change["file"],
"lines_changed": change["lines_changed"],
"uncovered_lines": uncovered_lines,
"coverage": coverage_map,
}
)
return "\n".join([get_file_message(file, buffer) for file in untested_lines])
def get_required_lines_for_coverage(target_coverage, total_coverage, line_stats):
missing_coverage = target_coverage - total_coverage
line_count = line_stats["covered"] + line_stats["uncovered"]
return math.ceil(missing_coverage * line_count)
def get_diff_coverage(coverage_xml, commit, target_coverage):
"""
Given the coverage xml and a commit to diff against, find the percent of lines added/changed that were
covered
"""
diff_changes = _parse_diff(_get_git_diff(commit))
coverage_report = _parse_coverage(coverage_xml).find("packages")
file_stats = {}
line_stats = Counter()
# find coverage across git diff
for change in diff_changes:
coverage_map = _get_coverage_map(coverage_report, change["file"])
if coverage_map is not None:
file_stats[change["file"]] = _reconcile_coverage(change, coverage_map)
line_stats += file_stats[change["file"]]
total_coverage = get_coverage(line_stats)
if total_coverage is None:
# if you can't match any, assume adding tests
print("Couldn't get any coverage!")
total_coverage = 1
message = None
if total_coverage < target_coverage:
lines_required = get_required_lines_for_coverage(
target_coverage, total_coverage, line_stats
)
message = (
f"\n❗Coverage of {100*total_coverage}% did not meet target of {100*target_coverage}%.❗\n"
+ f"❗You require at least {lines_required} more lines of coverage❗\n\n"
+ get_untested_line_info(diff_changes, coverage_report, 4)
) # buffer size here is arbitrary
return total_coverage * 100, file_stats, message
def get_total_coverage(coverage_xml):
coverage_report = _parse_coverage(coverage_xml)
return float(coverage_report.attrib["line-rate"]) * 100
def main():
parser = argparse.ArgumentParser()
parser.add_argument("coverage_xml", help="The coverage report xml file")
parser.add_argument("commit", help="The commit hash or branch to diff against")
parser.add_argument("target_coverage", help="The target coverage percent")
args = parser.parse_args()
coverage, _, message = get_diff_coverage(
args.coverage_xml, args.commit, float(args.target_coverage)
)
print(f"Coverage={coverage}%")
if message:
print(message)
if __name__ == "__main__":
main()
| 2.703125 | 3 |
project/migrations/0006_post_like_num.py | tatsuki5820iso/birth-pro | 0 | 12785809 | <gh_stars>0
# Generated by Django 2.2.1 on 2019-07-06 15:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('project', '0005_like'),
]
operations = [
migrations.AddField(
model_name='post',
name='like_num',
field=models.IntegerField(default=0),
),
]
| 1.453125 | 1 |
messaging_components/services/service_artemis.py | fgiorgetti/qpid-dispatch-tests | 0 | 12785810 | <filename>messaging_components/services/service_artemis.py
import posixpath
import re
import logging
import time
from enum import Enum
from iqa_common.executor import Command, Execution, ExecutorAnsible, CommandAnsible, Executor
from iqa_common.utils.tcp_util import TcpUtil
from messaging_abstract.component import ServiceFake, ServiceStatus
class ServiceArtemis(ServiceFake):
"""
Implementation of a Artemis pseudo-service to manage a Server component.
"""
MAX_ATTEMPTS = 10
DELAY = 3
_logger = logging.getLogger(__name__)
def __init__(self, name: str, executor: Executor, **kwargs):
super().__init__(name, executor)
self.name = "artemis-service"
self.ansible_host = kwargs.get("ansible_host", "localhost")
self.service_default_port = kwargs.get("artemis_port", "61616")
self.service_web_port = kwargs.get("broker_web_port", "8161")
self.service_path = posixpath.join(kwargs.get("broker_path"), "bin", "artemis-service")
self.service_username = kwargs.get("broker_service_user", "jamq")
class ServiceSystemState(Enum):
STARTED = ('start', 'started')
STOPPED = ('stop', 'stopped')
RESTARTED = ('restart', 'restarted')
def __init__(self, system_state, ansible_state):
self.system_state = system_state
self.ansible_state = ansible_state
def status(self) -> ServiceStatus:
"""
Returns the service status based on linux service.
:return: The status of this specific service
:rtype: ServiceStatus
"""
# service output :
# is running
# is stopped
# systemctl output:
# (running)
# (dead)
# On RHEL7> service is automatically redirected to systemctl
cmd_status = Command(['runuser', '-l', self.service_username, '%s status' % self.service_path], stdout=True, timeout=self.TIMEOUT)
execution = self.executor.execute(cmd_status)
if not execution.read_stdout():
ServiceArtemis._logger.debug("Service: %s - Status: FAILED" % self.name)
return ServiceStatus.FAILED
service_output = execution.read_stdout()
if re.search('(is running|\(running\)|Running)', service_output):
ServiceArtemis._logger.debug("Service: %s - Status: RUNNING" % self.name)
return ServiceStatus.RUNNING
elif re.search('(is stopped|\(dead\)|Stopped)', service_output):
ServiceArtemis._logger.debug("Service: %s - Status: STOPPED" % self.name)
return ServiceStatus.STOPPED
ServiceArtemis._logger.debug("Service: %s - Status: UNKNOWN" % self.name)
return ServiceStatus.UNKNOWN
def start(self, wait_for_messaging=False) -> Execution:
execution = self.executor.execute(self._create_command(self.ServiceSystemState.STARTED))
self._wait_for_messaging(wait_for_messaging)
return execution
def stop(self) -> Execution:
return self.executor.execute(self._create_command(self.ServiceSystemState.STOPPED))
def restart(self, wait_for_messaging=False) -> Execution:
execution = self.executor.execute(self._create_command(self.ServiceSystemState.RESTARTED))
self._wait_for_messaging(wait_for_messaging)
return execution
def _wait_for_messaging(self, messaging_wait=False):
# Wait until broker web port is available
self.__tcp_wait_for_accessible_port(self.service_web_port, self.ansible_host)
# Or also messaging subsystem goes up
if messaging_wait:
self.__tcp_wait_for_accessible_port(self.service_default_port, self.ansible_host)
@staticmethod
def __tcp_wait_for_accessible_port(port, host):
for attempt in range(ServiceArtemis.MAX_ATTEMPTS):
if attempt == ServiceArtemis.MAX_ATTEMPTS - 1:
print(" broker is not reachable after %d attempts" % ServiceArtemis.MAX_ATTEMPTS)
if TcpUtil.is_tcp_port_available(int(port), host):
return True
time.sleep(ServiceArtemis.DELAY)
ServiceArtemis._logger.warning("Unable to connect to hostname:port: %s:%s" % (host, port))
return False
def _create_command(self, service_state: ServiceSystemState):
"""
Creates a Command instance based on executor type and state
that is specific to each type of command.
:param service_state:
:return:
:return:
"""
command = 'runuser -l %s %s %s' % (self.service_username, self.service_path, service_state.system_state)
if isinstance(self.executor, ExecutorAnsible):
state = service_state.ansible_state
return CommandAnsible(command,
ansible_module='command',
stdout=True,
timeout=self.TIMEOUT)
else:
state = service_state.system_state
return Command(command.split(), stdout=True, timeout=self.TIMEOUT)
| 2.046875 | 2 |
knoema/view_definitions.py | FromPerm/knoema-python-driver | 8 | 12785811 | class Dimension:
def __init__(self):
self.key = None
self.id = None
self.name = None
self.isGeo = None
self.datasetId = None
self.fields = []
self.members = None
class Field:
def __init__(self, field_info):
self.key = field_info['key']
self.name = field_info['name']
self.displayName = field_info['displayName']
self.type = field_info['type']
self.locale = field_info['locale']
self.baseKey = field_info['baseKey']
self.isSystemField = field_info['isSystemField']
| 2.6875 | 3 |
services/core-api/app/api/securities/models/bond_type.py | bcgov/mds | 25 | 12785812 | <reponame>bcgov/mds
from sqlalchemy.schema import FetchedValue
from app.extensions import db
from app.api.utils.models_mixins import Base, AuditMixin
class BondType(Base, AuditMixin):
__tablename__ = "bond_type"
bond_type_code = db.Column(db.String, nullable=False, primary_key=True)
description = db.Column(db.String, nullable=False)
active_ind = db.Column(db.Boolean, nullable=False, server_default=FetchedValue())
def __repr__(self):
return '<BondType %r>' % self.bond_type_code
@classmethod
def get_all(cls):
return cls.query.all()
| 2.15625 | 2 |
keras/downstream_tasks/config.py | joeranbosma/ModelsGenesis | 574 | 12785813 | import os
import shutil
import csv
import random
class bms_config:
arch = 'Vnet'
# data
data = '/mnt/dataset/shared/zongwei/BraTS'
csv = "data/bms"
deltr = 30
input_rows = 64
input_cols = 64
input_deps = 32
crop_rows = 100
crop_cols = 100
crop_deps = 50
# model
optimizer = 'adam'
lr = 1e-3
patience = 30
verbose = 1
batch_size = 16
workers = 1
max_queue_size = workers * 1
nb_epoch = 10000
def __init__(self, args):
self.exp_name = self.arch + '-' + args.suffix
if args.data is not None:
self.data = args.data
if args.suffix == 'random':
self.weights = None
elif args.suffix == 'genesis':
self.weights = 'pretrained_weights/Genesis_Chest_CT.h5'
elif args.suffix == 'genesis-autoencoder':
self.weights = 'pretrained_weights/Genesis_Chest_CT-autoencoder.h5'
elif args.suffix == 'genesis-nonlinear':
self.weights = 'pretrained_weights/Genesis_Chest_CT-nonlinear.h5'
elif args.suffix == 'genesis-localshuffling':
self.weights = 'pretrained_weights/Genesis_Chest_CT-localshuffling.h5'
elif args.suffix == 'genesis-outpainting':
self.weights = 'pretrained_weights/Genesis_Chest_CT-outpainting.h5'
elif args.suffix == 'genesis-inpainting':
self.weights = 'pretrained_weights/Genesis_Chest_CT-inpainting.h5'
elif args.suffix == 'denoisy':
self.weights = 'pretrained_weights/denoisy.h5'
elif args.suffix == 'patchshuffling':
self.weights = 'pretrained_weights/patchshuffling.h5'
elif args.suffix == 'hg':
self.weights = 'pretrained_weights/hg.h5'
else:
raise
train_ids = self._load_csv(os.path.join(self.csv, "fold_1.csv")) + self._load_csv(os.path.join(self.csv, "fold_2.csv"))
random.Random(4).shuffle(train_ids)
self.validation_ids = train_ids[:len(train_ids) // 8]
self.train_ids = train_ids[len(train_ids) // 8:]
self.test_ids = self._load_csv(os.path.join(self.csv, "fold_3.csv"))
self.num_train = len(self.train_ids)
self.num_validation = len(self.validation_ids)
self.num_test = len(self.test_ids)
# logs
self.model_path = os.path.join("models/bms", "run_"+str(args.run))
if not os.path.exists(self.model_path):
os.makedirs(self.model_path)
self.logs_path = os.path.join(self.model_path, "logs")
if not os.path.exists(self.logs_path):
os.makedirs(self.logs_path)
def _load_csv(self, foldfile=None):
assert foldfile is not None
patient_ids = []
with open(foldfile, 'r') as f:
reader = csv.reader(f, lineterminator='\n')
patient_ids.extend(reader)
for i, item in enumerate(patient_ids):
patient_ids[i] = item[0]
return patient_ids
def display(self):
"""Display Configuration values."""
print("\nConfigurations:")
for a in dir(self):
if not a.startswith("__") and not callable(getattr(self, a)) and not '_ids' in a:
print("{:30} {}".format(a, getattr(self, a)))
print("\n")
class ecc_config:
arch = 'Vnet'
# data
data = '/mnt/dfs/zongwei/Academic/MICCAI2020/Genesis_PE/dataset/augdata/VOIR'
csv = "data/ecc"
clip_min = -1000
clip_max = 1000
input_rows = 64
input_cols = 64
input_deps = 64
# model
optimizer = 'adam'
lr = 1e-3
patience = 38
verbose = 1
batch_size = 24
workers = 1
max_queue_size = workers * 1
nb_epoch = 10000
num_classes = 1
verbose = 1
def __init__(self, args=None):
self.exp_name = self.arch + '-' + args.suffix + '-cv-' + str(args.cv)
if args.data is not None:
self.data = args.data
if args.suffix == 'random':
self.weights = None
elif args.suffix == 'genesis':
self.weights = 'pretrained_weights/Genesis_Chest_CT.h5'
elif args.suffix == 'genesis-autoencoder':
self.weights = 'pretrained_weights/Genesis_Chest_CT-autoencoder.h5'
elif args.suffix == 'genesis-nonlinear':
self.weights = 'pretrained_weights/Genesis_Chest_CT-nonlinear.h5'
elif args.suffix == 'genesis-localshuffling':
self.weights = 'pretrained_weights/Genesis_Chest_CT-localshuffling.h5'
elif args.suffix == 'genesis-outpainting':
self.weights = 'pretrained_weights/Genesis_Chest_CT-outpainting.h5'
elif args.suffix == 'genesis-inpainting':
self.weights = 'pretrained_weights/Genesis_Chest_CT-inpainting.h5'
elif args.suffix == 'denoisy':
self.weights = 'pretrained_weights/denoisy.h5'
elif args.suffix == 'patchshuffling':
self.weights = 'pretrained_weights/patchshuffling.h5'
elif args.suffix == 'hg':
self.weights = 'pretrained_weights/hg.h5'
else:
raise
# logs
assert args.subsetting is not None
self.model_path = os.path.join("models/ecc", "run_"+str(args.run), args.subsetting)
if not os.path.exists(self.model_path):
os.makedirs(self.model_path)
self.logs_path = os.path.join(self.model_path, "logs")
if not os.path.exists(self.logs_path):
os.makedirs(self.logs_path)
self.patch_csv_path = 'Patch-20mm-cv-'+str(args.cv)+'-features_output_2_iter-100000.csv'
self.candidate_csv_path = 'Candidate-20mm-cv-'+str(args.cv)+'-features_output_2_iter-100000.csv'
self.csv_froc = 'features_output_2_iter-100000.csv'
def display(self):
print("Configurations")
for a in dir(self):
if not a.startswith("__") and not callable(getattr(self,a)):
print("{:30} {}".format(a,getattr(self,a)))
#print("\n")
class ncc_config:
arch = 'Vnet'
# data
data = '/mnt/dataset/shared/zongwei/LUNA16/LUNA16_FPR_32x32x32'
train_fold=[0,1,2,3,4]
valid_fold=[5,6]
test_fold=[7,8,9]
hu_min = -1000
hu_max = 1000
input_rows = 64
input_cols = 64
input_deps = 32
# model
optimizer = 'adam'
lr = 1e-3
patience = 10
verbose = 1
batch_size = 24
workers = 1
max_queue_size = workers * 1
nb_epoch = 10000
num_classes = 1
verbose = 1
def __init__(self, args=None):
self.exp_name = self.arch + '-' + args.suffix
if args.data is not None:
self.data = args.data
if args.suffix == 'random':
self.weights = None
elif args.suffix == 'genesis':
self.weights = 'pretrained_weights/Genesis_Chest_CT.h5'
elif args.suffix == 'genesis-autoencoder':
self.weights = 'pretrained_weights/Genesis_Chest_CT-autoencoder.h5'
elif args.suffix == 'genesis-nonlinear':
self.weights = 'pretrained_weights/Genesis_Chest_CT-nonlinear.h5'
elif args.suffix == 'genesis-localshuffling':
self.weights = 'pretrained_weights/Genesis_Chest_CT-localshuffling.h5'
elif args.suffix == 'genesis-outpainting':
self.weights = 'pretrained_weights/Genesis_Chest_CT-outpainting.h5'
elif args.suffix == 'genesis-inpainting':
self.weights = 'pretrained_weights/Genesis_Chest_CT-inpainting.h5'
elif args.suffix == 'denoisy':
self.weights = 'pretrained_weights/denoisy.h5'
elif args.suffix == 'patchshuffling':
self.weights = 'pretrained_weights/patchshuffling.h5'
elif args.suffix == 'hg':
self.weights = 'pretrained_weights/hg.h5'
else:
raise
# logs
self.model_path = os.path.join("models/ncc", "run_"+str(args.run))
if not os.path.exists(self.model_path):
os.makedirs(self.model_path)
self.logs_path = os.path.join(self.model_path, "logs")
if not os.path.exists(self.logs_path):
os.makedirs(self.logs_path)
def display(self):
print("Configurations")
for a in dir(self):
if not a.startswith("__") and not callable(getattr(self,a)):
print("{:30} {}".format(a,getattr(self,a)))
#print("\n")
class ncs_config:
arch = 'Vnet'
# data
data = '/mnt/dataset/shared/zongwei/LIDC'
input_rows = 64
input_cols = 64
input_deps = 32
# model
optimizer = 'adam'
lr = 1e-3
patience = 50
verbose = 1
batch_size = 16
workers = 1
max_queue_size = workers * 1
nb_epoch = 10000
def __init__(self, args):
self.exp_name = self.arch + '-' + args.suffix
if args.data is not None:
self.data = args.data
if args.suffix == 'random':
self.weights = None
elif args.suffix == 'genesis':
self.weights = 'pretrained_weights/Genesis_Chest_CT.h5'
elif args.suffix == 'genesis-autoencoder':
self.weights = 'pretrained_weights/Genesis_Chest_CT-autoencoder.h5'
elif args.suffix == 'genesis-nonlinear':
self.weights = 'pretrained_weights/Genesis_Chest_CT-nonlinear.h5'
elif args.suffix == 'genesis-localshuffling':
self.weights = 'pretrained_weights/Genesis_Chest_CT-localshuffling.h5'
elif args.suffix == 'genesis-outpainting':
self.weights = 'pretrained_weights/Genesis_Chest_CT-outpainting.h5'
elif args.suffix == 'genesis-inpainting':
self.weights = 'pretrained_weights/Genesis_Chest_CT-inpainting.h5'
elif args.suffix == 'denoisy':
self.weights = 'pretrained_weights/denoisy.h5'
elif args.suffix == 'patchshuffling':
self.weights = 'pretrained_weights/patchshuffling.h5'
elif args.suffix == 'hg':
self.weights = 'pretrained_weights/hg.h5'
else:
raise
# logs
self.model_path = os.path.join("models/ncs", "run_"+str(args.run))
if not os.path.exists(self.model_path):
os.makedirs(self.model_path)
self.logs_path = os.path.join(self.model_path, "logs")
if not os.path.exists(self.logs_path):
os.makedirs(self.logs_path)
def display(self):
"""Display Configuration values."""
print("\nConfigurations:")
for a in dir(self):
if not a.startswith("__") and not callable(getattr(self, a)):
print("{:30} {}".format(a, getattr(self, a)))
print("\n")
class lcs_config:
arch = 'Vnet'
# data
data = '/mnt/dfs/zongwei/Academic/MICCAI2019/Data/LiTS/3D_LiTS_NPY_256x256xZ'
nii = '/mnt/dataset/shared/zongwei/LiTS/Tr'
obj = 'liver'
train_idx = [n for n in range(0, 100)]
valid_idx = [n for n in range(100, 115)]
test_idx = [n for n in range(115, 130)]
num_train = len(train_idx)
num_valid = len(valid_idx)
num_test = len(test_idx)
hu_max = 1000
hu_min = -1000
input_rows = 64
input_cols = 64
input_deps = 32
# model
optimizer = 'adam'
lr = 1e-2
patience = 20
verbose = 1
batch_size = 16
workers = 1
max_queue_size = workers * 1
nb_epoch = 10000
def __init__(self, args):
self.exp_name = self.arch + '-' + args.suffix
if args.data is not None:
self.data = args.data
if args.suffix == 'random':
self.weights = None
elif args.suffix == 'genesis':
self.weights = 'pretrained_weights/Genesis_Chest_CT.h5'
elif args.suffix == 'genesis-autoencoder':
self.weights = 'pretrained_weights/Genesis_Chest_CT-autoencoder.h5'
elif args.suffix == 'genesis-nonlinear':
self.weights = 'pretrained_weights/Genesis_Chest_CT-nonlinear.h5'
elif args.suffix == 'genesis-localshuffling':
self.weights = 'pretrained_weights/Genesis_Chest_CT-localshuffling.h5'
elif args.suffix == 'genesis-outpainting':
self.weights = 'pretrained_weights/Genesis_Chest_CT-outpainting.h5'
elif args.suffix == 'genesis-inpainting':
self.weights = 'pretrained_weights/Genesis_Chest_CT-inpainting.h5'
elif args.suffix == 'denoisy':
self.weights = 'pretrained_weights/denoisy.h5'
elif args.suffix == 'patchshuffling':
self.weights = 'pretrained_weights/patchshuffling.h5'
elif args.suffix == 'hg':
self.weights = 'pretrained_weights/hg.h5'
else:
raise
# logs
self.model_path = os.path.join("models/lcs", "run_"+str(args.run))
if not os.path.exists(self.model_path):
os.makedirs(self.model_path)
self.logs_path = os.path.join(self.model_path, "logs")
if not os.path.exists(self.logs_path):
os.makedirs(self.logs_path)
def display(self):
"""Display Configuration values."""
print("\nConfigurations:")
for a in dir(self):
if not a.startswith("__") and not callable(getattr(self, a)) and not '_idx' in a:
print("{:30} {}".format(a, getattr(self, a)))
print("\n")
| 2.0625 | 2 |
hysynth/pwl/adaptation/__init__.py | HySynth/HySynth | 4 | 12785814 | from .modecreation import wrapper_adapt_ha as adapt_ha
from .relaxation import relax_ha
| 1.0625 | 1 |
code/candy_crush.py | ajhalthor/candy-crush | 1 | 12785815 | <filename>code/candy_crush.py
import copy
import signal
import time
class V:
"""The value of a node
A node consists of 2 values:
- value (int): score of obtained till end of game
- move (tuple): the candy crushed
"""
def __init__(self, player):
if player == 'min':
self.value = 1000000
self.move = ()
else:
self.value = -1000000
self.move = ()
class Node:
"""Nodes of the Tree that represent MAX and MIN players"""
def __init__(self, player, board, alpha, beta):
"""Node Constructor
Args:
- player (str): MAX or MIN
- board (list of list of `str`): NxN board of candies.
- alpha (int): Best end score of MAX
- beta (int): Best end score of MIN
Node:
value of node `v` consists of 2 components shown in class `V`.
"""
self.player = player
self.board = board
self.alpha = alpha
self.beta = beta
self.v = V(player)
self.visited = False #Node has not been traversed
class Graph:
"""Graph spcifically for crushing candies
Graph is used for board manipulation and determining possible moves.
"""
def __init__(self, N, p, g):
"""Graph Constructor
-
Args:
- N (int): Board Dimension
- p (int): Number of unique candies
- g (list of list of `str`): Instance of input board
"""
self.nrows = N
self.ncols = N
self.graph = g
self.p = p
self.p_arr = []
def isSafe(self, i, j, visited, l):
"""A function to check if a given cell (row, col) can be included in DFS
Args:
- i (int): row of candy
- j (int): column of candy
- visited(list of list of `boolean`): True if cell has been checked
Returns:
boolean: whether to include a cell in the next DFS iteration.
"""
# row number is in range, column number is in range and value is 1 and not yet visited
return (i >= 0 and i < self.nrows and
j >= 0 and j < self.ncols and
not visited[i][j] and self.graph[i][j]==l)
def DFS(self, i, j, visited, current_score, l, safe_path):
"""A utility function to do DFS for a 2D boolean matrix. It only considers the 4 neighbours as adjacent vertices.
Args:
- i (int): cell row
- j (int): cell column
- visited (list of list of boolean): Each cell determines if cell has been visited
- current_score(int): Number of candies crused from the move
- l (int): Candy number
- safe_path (list of tuples): List of crushed candies from the move.
Returns:
Tuple: (current_score, safe_path) as described above.
"""
# These arrays are used to get row and column numbers of 4 neighbours of a given cell
rowNbr = [-1, 0, 0, 1];
colNbr = [0 ,-1, 1, 0];
# Mark this cell as visited
visited[i][j] = True
current_score+=1
#See what other nodes became points
safe_path.append((i, j))
# Recur for all connected neighbours
for k in range(0, 4):
if self.isSafe(i + rowNbr[k], j + colNbr[k], visited, l):
current_score, safe_path = self.DFS(i + rowNbr[k], j + colNbr[k], visited, current_score, l, safe_path)
return (current_score, safe_path)
def countIslands(self):
"""The main function that returns the number of possible moves on a board"""
# Initialize count as 0 and travese through the all cells of given matrix
count = 0
for l in range(0, 10):
# Make - a bool array to mark visited cells. Initially all cells are unvisited
visited = [[False for j in range(self.ncols)]for i in range(self.nrows)]
for i in range(self.nrows):
for j in range(self.ncols):
# If a cell with value 1 is not visited yet, then new island found
if self.graph[i][j] == str(l) and visited[i][j] == False:
current_score = 0
safe_path = []
# Visit all cells in this island and increment island count
current_score, safe_path = self.DFS(i, j, visited, current_score, str(l), safe_path)
count += 1
self.p_arr.append((str(l), current_score, (i,j), safe_path))
return count
def make_move(self, row, col):
"""Select a candy and crush similar adjacent candies to make a move.
Args:
- row (int): row of selected candy
- col (int): Column of selected candy
Returns:
- current_score (int): Number of candies crushed
- safe_path (list of tuples): The `current_score` candies crushed.
"""
current_score = 0
safe_path = []
visited = [[False for j in range(self.ncols)]for i in range(self.nrows)]
l = self.graph[row][col]
# Visit all cells in this island and increment island count
current_score, safe_path = self.DFS(row, col, visited, current_score, l, safe_path)
return (current_score, safe_path)
def crush_candies(board, path):
"""Once a move is made, the crushed candies are replaced with `*`s. This function flushes the candies down and bubbles up the *s to the top of every column
Args:
- board (list of list of str): board for which the move is determined but hasnt been applied.
- path (list of tuples): List of candies to be crushed
Returns:
Board with crushed candies and *s have bubbled up.
"""
# Let '*' represent crushed candies
for i, j in path:
board[i][j] = '*'
#Transpose because python doesn't work like an array.
#Transform list of rows to list of columns
board_T = list(map(list, zip(*board)))
#Move candies down every column
for col in range(0, N):
for row in range(0, N):
if board_T[col][row] == '*':
# Move star up
board_T[col] = [board_T[col][row]] + [board_T[col][:row]] + [board_T[col][row+1:]]
# Flatten to list from list of lists
board_T[col] = [item for sublist in board_T[col] for item in sublist]
# Transpose back to get the original board
board = list(map(list, zip(*board_T)))
return board
def next_turn(iboard, player, alpha, beta, m=None, depth=None):
"""Performs the ab pruning
This recursively called function is the core that contains the ab pruning logic.
Args:
- iboard (list of list of str): Input board for node.
- player (str): MAX/MIN node
- alpha (int): Best end score for MAX
- beta (int): Best end score for MIN
- m (tuple, optional): Move made for current node
- depth (int, optional): Current depth of node in ab tree
Returns:
Tuple to the parent consisting of:
- :obj:v.move: the optimal move
- :obj:v.value: optimal end score.
Note:
1. Current score of board is the square of number of candies crushed.
Eg. If 5 candies are crushed by MAX, he gets 25 points.
2. Value of node = (Sum of MAX children scores) - (Sum of MIN children scores)
E.g. If We have a board with the following scores for subsequent moves:
MAX : 25
MIN : 36
MAX : 16
MIN : 4
MAX : 25
Then the value of the parent = 25 - 36 + 16 - 4 + 25 = 26.
"""
global max_depth_touched
#Create current node
node = Node(board=iboard, player=player, alpha=alpha, beta=beta)
node.depth = depth
g = Graph(N, p, iboard[:])
moves = g.countIslands()
#Check if any moves can be made
if moves == 0:
return (m ,0)
# Sort paths in descending number of candies crushed. This will help early pruning
new_sorted = sorted(g.p_arr, reverse=True, key=lambda x: x[1])
#For every g, make the move
for candy, score, move ,path in new_sorted:
board = crush_candies(copy.deepcopy(g.graph), path)
if (node.depth >= max_turns): #or moves == 0:
max_depth_touched = True
if player=='min':
return (move, -score*score)
return (move, score*score)
else:
node.visited = True
new_player = 'max'
if player == 'max':
new_player = 'min'
v_move, v_value = next_turn(board, new_player, node.alpha, node.beta, move, node.depth+1)
if player == 'max':
if node.v.value < v_value+score*score:
node.v.value = v_value+score*score
node.v.move = move
if node.v.value >= node.beta:
return (move, node.v.value)
node.alpha = max(node.alpha, node.v.value)
else:
if node.v.value > v_value-score*score:
node.v.value = v_value-score*score
node.v.move = move
if node.v.value <= node.alpha:
return (move, node.v.value)
node.beta = min(node.beta, node.v.value)
return (node.v.move, node.v.value)
def handler(signum, frame):
"""SIGALRM handler
Once the time alloted for the move has passed, the ab pruning stops and this function is called. It takes the best move computed thus far and writes it to an output file.
"""
#print("Optimal End score : ", max_score)
# Columns are alphabets
alphabets = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
alpha_dict = {i: alphabets[i-1] for i in range(1, N+1)}
g = Graph(N, p, input_board)
score, path = g.make_move(best_move[0], best_move[1])
board = crush_candies(input_board, path)
#Write to output file
with open('output.txt','w+') as out:
#Take the first node of the path as the selected candy
row = best_move[0]+1
col = alpha_dict[best_move[1]+1]
#Row is int, convert to str to write to file
out.write(col+str(row)+"\n")
for r in range(0, N):
for c in range(0, N):
out.write(str(board[r][c]))
out.write("\n")
exit()
if __name__ == '__main__':
start_time = time.time()
lines = tuple(open("input.txt", 'r'))
lines = [l.strip() for l in lines]
N = int(lines[0])
p = int(lines[1])
time_left = float(lines[2])
input_board = []
for i in range(0,N):
row = list(lines[3+i]) #[int(l) for l in list(lines[3+i])]
input_board.append(row)
# Redundancy to know time
g = Graph(N, p, input_board[:])
moves = g.countIslands()
time_per_move = int(time_left/moves)
max_turns = 2
best_move, max_score = next_turn(player='max', iboard=input_board, alpha=-1000000, beta=1000000, depth=1)
signal.signal(signal.SIGALRM, handler)
signal.alarm(time_per_move+1)
global max_depth_touched
while True:
max_turns += 1
max_depth_touched = False
best_move, max_score = next_turn(player='max', iboard=input_board, alpha=-1000000, beta=1000000, depth=1)
# If the maximum depth hasn't been explored, then we've completed DFS.
if max_depth_touched == False:
handler(0,0)
| 3.765625 | 4 |
genomepy/annotation/__init__.py | vanheeringen-lab/genomepy | 146 | 12785816 | <reponame>vanheeringen-lab/genomepy<gh_stars>100-1000
"""Annotation class, modules & related functions"""
import os
import re
from pathlib import Path
from typing import Iterable, Optional, Union
import numpy as np
import pandas as pd
from loguru import logger
from genomepy.annotation.mygene import map_genes as _map_genes
from genomepy.annotation.mygene import query_mygene
from genomepy.annotation.sanitize import sanitize as _sanitize
from genomepy.annotation.utils import _check_property, _parse_annot, read_annot
from genomepy.providers import map_locations
from genomepy.utils import get_genomes_dir
__all__ = ["Annotation", "query_mygene", "filter_regex"]
class Annotation:
"""
Manipulate genes and whole gene annotations with pandas dataframes.
Parameters
----------
genome : str
Genome name.
name : str, optional
Name of annotation file.
If name is not specified the default annotation for the genome is used.
genomes_dir : str, optional
Genomes installation directory.
Returns
-------
object
attributes & methods to manipulate gene annotations
"""
# import methods
map_genes = _map_genes
sanitize = _sanitize
# lazy attributes (loaded when called)
# listed here for code autocompletion
bed: pd.DataFrame = None
"Dataframe with BED format annotation"
gtf: pd.DataFrame = None
"Dataframe with GTF format annotation"
named_gtf: pd.DataFrame = None
"Dataframe with GTF format annotation, with gene_name as index"
genome_contigs: list = None
"Contigs found in the genome fasta"
annotation_contigs: list = None
"Contigs found in the gene annotation BED"
def __init__(self, genome: str, name: str = None, genomes_dir: str = None):
self.genome = genome
self.genome_dir = os.path.join(get_genomes_dir(genomes_dir), genome)
if not os.path.exists(self.genome_dir):
raise ValueError(f"Genome {self.genome} not found!")
# annotation file provided
if name:
suffixes = Path(name).suffixes[-2:]
if ".bed" in suffixes or ".BED" in suffixes:
self.annotation_bed_file = name
elif ".gtf" in suffixes or ".GTF" in suffixes:
self.annotation_gtf_file = name
else:
raise NotImplementedError(
"Only (gzipped) bed and gtf files are supported at the moment!"
)
else:
# annotation files
self.annotation_gtf_file = _get_file(
self.genome_dir, f"{self.genome}.annotation.gtf"
)
self.annotation_bed_file = _get_file(
self.genome_dir, f"{self.genome}.annotation.bed"
)
# genome files
self.readme_file = _get_file(self.genome_dir, "README.txt", False)
self.genome_file = _get_file(self.genome_dir, f"{self.genome}.fa", False)
self.index_file = _get_file(self.genome_dir, f"{self.genome}.fa.fai", False)
self.sizes_file = _get_file(self.genome_dir, f"{self.genome}.fa.sizes", False)
# lazy attributes
def __getattribute__(self, name):
val = super(Annotation, self).__getattribute__(name)
if val is not None:
return val
# if the attribute is None/empty, check if it is a lazy attribute
if name == "bed":
_check_property(self.annotation_bed_file, f"{self.genome}.annotation.bed")
val = read_annot(self.annotation_bed_file)
setattr(self, name, val)
elif name == "gtf":
_check_property(self.annotation_gtf_file, f"{self.genome}.annotation.gtf")
val = read_annot(self.annotation_gtf_file)
setattr(self, name, val)
elif name == "named_gtf":
df = self.gtf[self.gtf.attribute.str.contains("gene_name")]
names = []
for row in df.attribute:
name = str(row).split("gene_name")[1].split(";")[0]
names.append(name.replace('"', "").replace(" ", ""))
df = df.assign(gene_name=names)
val = df.set_index("gene_name")
setattr(self, name, val)
elif name == "genome_contigs":
_check_property(self.sizes_file, f"{self.genome}.fa.sizes")
val = list(
set(pd.read_csv(self.sizes_file, sep="\t", header=None, dtype=str)[0])
)
setattr(self, name, val)
elif name == "annotation_contigs":
val = list(set(self.bed.chrom))
setattr(self, name, val)
return val
# lazily update attributes if upstream attribute is updated
def __setattr__(self, name, value):
if name == "bed":
self.annotation_contigs = None # noqa
elif name == "gtf":
self.named_gtf = None # noqa
elif name == "sizes_file":
self.genome_contigs = None # noqa
super(Annotation, self).__setattr__(name, value)
def genes(self, annot: str = "bed") -> list:
"""
Retrieve gene names from an annotation.
For BED files, names are taken from the 'name' columns.
For GTF files, names are taken from the 'gene_name' field
in the attribute column, if available.
Parameters
----------
annot : str, optional
Annotation file type: 'bed' or 'gtf' (default: "bed")
Returns
-------
list
gene names
"""
if annot.lower() == "bed":
return list(set(self.bed.name))
return list(set(self.named_gtf.index))
def gene_coords(self, genes: Iterable[str], annot: str = "bed") -> pd.DataFrame:
"""
Retrieve gene locations.
Parameters
----------
genes : Iterable
List of gene names as found in the given annotation file type
annot : str, optional
Annotation file type: 'bed' or 'gtf' (default: "bed")
Returns
-------
pandas.DataFrame
gene annotation
"""
gene_list = list(genes)
if annot.lower() == "bed":
df = self.bed.set_index("name")
gene_info = df[["chrom", "start", "end", "strand"]]
else:
df = self.named_gtf
# 1 row per gene
df = (
df.groupby(["gene_name", "seqname", "strand"])
.agg({"start": np.min, "end": np.max})
.reset_index(level=["seqname", "strand"])
)
gene_info = df[["seqname", "start", "end", "strand"]]
gene_info = gene_info.reindex(gene_list).dropna()
pct = int(100 * len(set(gene_info.index)) / len(gene_list))
if pct < 90:
logger.warning(
(f"Only {pct}% of genes was found. " if pct else "No genes found. ")
+ "A list of all gene names can be found with `Annotation.genes()`"
)
if annot.lower() == "bed":
return gene_info.reset_index()[["chrom", "start", "end", "name", "strand"]]
else:
return gene_info.reset_index()[
["seqname", "start", "end", "gene_name", "strand"]
]
def map_locations(
self, annot: Union[str, pd.DataFrame], to: str, drop=True
) -> Union[None, pd.DataFrame]:
"""
Map chromosome mapping from one assembly to another.
Uses the NCBI assembly reports to find contigs.
Drops missing contigs.
Parameters
----------
annot : str or pd.Dataframe
annotation to map: "bed", "gtf" or a pandas dataframe.
to: str
target provider (UCSC, Ensembl or NCBI)
drop: bool, optional
if True, replace the chromosome column.
If False, add a 2nd chromosome column.
Returns
-------
pandas.DataFrame
chromosome mapping.
"""
genomes_dir = os.path.dirname(self.genome_dir)
mapping = map_locations(self.genome, to, genomes_dir)
if mapping is None:
return
df = _parse_annot(self, annot)
index_name = df.index.name
if not set([index_name] + df.columns.to_list()) & {"chrom", "seqname"}:
raise ValueError(
"Location mapping requires a column named 'chrom' or 'seqname'."
)
# join mapping on chromosome column and return with original index
is_indexed = df.index.to_list() != list(range(df.shape[0]))
if is_indexed:
df = df.reset_index(level=index_name)
index_col = "chrom" if "chrom" in df.columns else "seqname"
df = df.set_index(index_col)
df = mapping.join(df, how="inner")
df = df.reset_index(drop=drop)
df.columns = [index_col] + df.columns.to_list()[1:]
if is_indexed:
df = df.set_index(index_name if index_name else "index")
return df
def filter_regex(
self,
annot: Union[str, pd.DataFrame],
regex: Optional[str] = ".*",
invert_match: Optional[bool] = False,
column: Union[str, int] = 0,
) -> pd.DataFrame:
"""
Filter a dataframe by any column using regex.
Parameters
----------
annot : str or pd.Dataframe
annotation to filter: "bed", "gtf" or a pandas dataframe
regex : str
regex string to match
invert_match : bool, optional
keep contigs NOT matching the regex string
column: str or int, optional
column name or number to filter (default: 1st, contig name)
Returns
-------
pd.DataFrame
filtered dataframe
"""
df = _parse_annot(self, annot)
return filter_regex(df, regex, invert_match, column)
def _get_file(genome_dir: str, fname: str, warn_missing: Optional[bool] = True):
"""
Returns the filepath to a single (gzipped) file in the genome_dir with matching ext.
"""
fpath = os.path.join(genome_dir, fname)
if os.path.exists(fpath):
return fpath
if os.path.exists(f"{fpath}.gz"):
return f"{fpath}.gz"
if warn_missing:
logger.warning(
f"Could not find '{fname}(.gz)' in directory {genome_dir}. "
"Methods using this file won't work!"
)
return
def filter_regex(
df: pd.DataFrame,
regex: str,
invert_match: Optional[bool] = False,
column: Union[str, int] = 0,
) -> pd.DataFrame:
"""
Filter a pandas dataframe by a column (default: 1st, contig name).
Parameters
----------
df: pd.Dataframe
annotation to filter (a pandas dataframe)
regex : str
regex string to match
invert_match : bool, optional
keep contigs NOT matching the regex string
column: str or int, optional
column name or number to filter (default: 1st, contig name)
Returns
-------
pd.DataFrame
filtered dataframe
"""
if column not in df.columns:
if isinstance(column, int):
column = df.columns[column]
else:
raise ValueError(
f"Column '{column}' not found in annotation columns {list(df.columns)}"
)
pattern = re.compile(regex)
filter_func = df[column].map(lambda x: bool(pattern.match(x)) is not invert_match)
return df[filter_func]
| 2.484375 | 2 |
config/generate.py | BYU-PCCL/conversational-ai | 4 | 12785817 | """Generates a gin config from the current task/mixture list.
Usage: `python3 -m config.generate`
"""
from itertools import chain, product
from pathlib import Path
import t5
import conversational_ai.tasks # noqa: F401
WHITELIST = ["chitchat", "dailydialog", "convai2"]
sizes = ["small", "base", "large", "3b", "11b"]
mixtures = filter(
lambda task: any(name in task for name in WHITELIST),
chain(t5.data.TaskRegistry.names(), t5.data.MixtureRegistry.names()),
)
for size, mixture in product(sizes, mixtures):
path = Path(f"./config/mixtures/{mixture}/{size}.gin")
print(path)
path.parent.mkdir(parents=True, exist_ok=True)
body = """include "finetune_{size}.gin"
MIXTURE_NAME = "{mixture}"
utils.run.model_dir = "./checkpoints/conversational-ai/{mixture}/{size}"
""".format(
size=size, mixture=mixture
)
path.write_text(body)
| 2.390625 | 2 |
GraphGeneration.py | arunkumaraqm/Prims-Algorithm-Using-Fibonacci-Heap | 1 | 12785818 | <reponame>arunkumaraqm/Prims-Algorithm-Using-Fibonacci-Heap
# Program to generate a test case (adjacency matrix) for Prim's and Krukal's algorithms
# The graph generated will be connected.
from random import shuffle, sample, randint
import sys
def read():
nfverts = int(input("Number of vertices: "))
density = int(input("Density (Medium 1, High 2 or Complete 3): "))
limit = float(input("Upper limit of edge weight: "))
limit = int(limit) if int(limit) else limit
return nfverts, density, limit
def print_mat(nfverts, mat, fname):
with open(fname, 'w') as fil:
fil.write(f"{nfverts}\n")
for i in mat[1:]:
for j in i[1:]:
fil.write(f"{j} ")
fil.write("\n")
def generate_random_spanning_tree(nfverts, mat, rint):
mylist = list(range(1, nfverts + 1))
shuffle(mylist)
visited = [mylist[0]]
for v in mylist[1:]:
u = sample(visited, 1)[0] # sample function returns a list
mat[u][v] = rint()
mat[v][u] = mat[u][v]
visited += [v]
def add_some_more_edges(nfverts, mat, probability_distro_func):
for i in range(1, nfverts + 1 - 1):
for j in range(i + 1, nfverts + 1):
if not mat[i][j]:
mat[i][j] = probability_distro_func()
mat[j][i] = mat[i][j]
if __name__ == "__main__":
nfverts, density, limit = read()
mat = [[0 for i in range(nfverts + 1)] \
for j in range(nfverts + 1)]
rint = lambda: randint(1, limit)
random_weight_lambdas = {
# density: random_weight_lambda
1: lambda: [rint(), 0, 0][randint(0,2)],
2: lambda: [rint(), rint(), 0][randint(0,2)],
3: lambda: [rint(), rint(), rint()][randint(0,2)]
}
probability_distro_func = random_weight_lambdas[density]
generate_random_spanning_tree(nfverts, mat, rint)
add_some_more_edges(nfverts, mat, probability_distro_func)
fname = f"Tests/Graph Test ({nfverts}, {density}, {limit}).txt"
print_mat(nfverts, mat, fname)
def automated_tests(nfverts, density, limit):
mat = [[0 for i in range(nfverts + 1)] \
for j in range(nfverts + 1)]
rint = lambda: randint(1, limit)
random_weight_lambdas = {
# density: random_weight_lambda
1: lambda: [rint(), 0, 0][randint(0,2)],
2: lambda: [rint(), rint(), 0][randint(0,2)],
3: lambda: [rint(), rint(), rint()][randint(0,2)]
}
probability_distro_func = random_weight_lambdas[density]
generate_random_spanning_tree(nfverts, mat, rint)
add_some_more_edges(nfverts, mat, probability_distro_func)
fname = f"Tests/Mode {density}/Graph Test ({nfverts}, {density}, {limit}).txt"
print_mat(nfverts, mat, fname)
| 3.8125 | 4 |
setup.py | fgolemo/segar | 19 | 12785819 | <gh_stars>10-100
from setuptools import setup, find_packages
setup(
author='<NAME>, <NAME>, <NAME>, <NAME>, '
'and <NAME>',
name='segar',
install_requires=[
"absl-py",
"numpy",
"scipy",
"aenum",
"matplotlib",
"opencv-python-headless",
"scikit-image",
"sklearn",
"h5py",
"torch",
"torchvision",
"tqdm",
"gym",
"POT",
"wandb"
],
extras_require={
'rl': [
"ray[default]",
"ray[rllib]",
]
},
packages=find_packages(),
version='0.1a',
include_package_data=True,
package_data={'': ['*.np']})
| 1.296875 | 1 |
setup.py | sashaafm/cloudbridge | 0 | 12785820 | <reponame>sashaafm/cloudbridge
import ast
import os
import re
from setuptools import setup, find_packages
# Cannot use "from cloudbridge import get_version" because that would try to
# import the six package which may not be installed yet.
reg = re.compile(r'__version__\s*=\s*(.+)')
with open(os.path.join('cloudbridge', '__init__.py')) as f:
for line in f:
m = reg.match(line)
if m:
version = ast.literal_eval(m.group(1))
break
base_reqs = ['bunch>=1.0.1', 'six>=1.10.0', 'retrying>=1.3.3']
openstack_reqs = ['python-novaclient>=2.33.0',
'python-glanceclient',
'python-cinderclient>=1.4.0',
'python-swiftclient>=2.6.0',
'python-neutronclient>=3.1.0',
'python-keystoneclient>=2.0.0']
aws_reqs = ['boto>=2.38.0']
full_reqs = base_reqs + aws_reqs + openstack_reqs
dev_reqs = (['tox>=2.1.1', 'moto>=0.4.20', 'sphinx>=1.3.1'] + full_reqs)
setup(name='cloudbridge',
version=version,
description='A simple layer of abstraction over multiple cloud'
'providers.',
author='Galaxy and GVL Projects',
author_email='<EMAIL>',
url='http://cloudbridge.readthedocs.org/',
install_requires=full_reqs,
extras_require={
':python_version=="2.7"': ['py2-ipaddress'],
':python_version=="3"': ['py2-ipaddress'],
'full': full_reqs,
'dev': dev_reqs
},
packages=find_packages(),
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'],
test_suite="test"
)
| 1.875 | 2 |
getNetCDFile.py | jmpmcmanus/adcirc2mbtiles | 0 | 12785821 | <filename>getNetCDFile.py
#!/usr/bin/env python
import sys, os, wget
def getDataFile(dirpath, storm, url):
# Create storm netcdf directory path
if not os.path.exists(dirpath+storm):
mode = 0o755
os.makedirs(dirpath+storm, mode)
# Get infilename and download netcdf file
infilename = url.strip().split('/')[-1]
outfilename = wget.download(url, dirpath+storm+'/'+infilename)
# Create storm tiff directory path
tifpath = dirpath.split('/')[0:-2]
tifpath.append('tiff')
tifpath = "/".join(tifpath)
if not os.path.exists(tifpath+'/'+storm):
mode = 0o755
os.makedirs(tifpath+'/'+storm, mode)
# Create storm mbtile directory path
mbtilepath = dirpath.split('/')[0:-2]
mbtilepath.append('mbtile')
mbtilepath = "/".join(mbtilepath)
if not os.path.exists(mbtilepath+'/'+storm):
mode = 0o755
os.makedirs(mbtilepath+'/'+storm, mode)
dirpath = '/home/mbtiles/storage/netCDF/'
storm = sys.argv[1]
url = sys.argv[2]
getDataFile(dirpath, storm, url)
| 2.953125 | 3 |
pipelines/p1_orca_by_stop.py | CSE482Winter2021/Major-Dudes | 0 | 12785822 | <gh_stars>0
import os
import pandas as pd
import numpy as np
import sklearn.neighbors as knn
from tqdm import tqdm
from scripts.census_reader import TractCensusReader
from utils import constants
NAME = 'p1_orca_by_stop'
KNN_LEAF_SIZE = 50
OUTPUT_FILENAME = f'{NAME}.csv'
WRITE_DIR = constants.PIPELINE_OUTPUTS_DIR
def load_inputs():
"""
Load orca->xy and xy->stops datasets, filter for rows we care about.
"""
# Load data
path1 = os.path.join(constants.DATA_DIR, 'orca_by_xy.csv')
path2 = os.path.join(constants.DATA_DIR, 'stops_to_xy.csv')
orca_df = pd.read_csv(path1)
stop_df = pd.read_csv(path2)
# Clean and filter
orca_df = orca_df[orca_df['boarding_longitude'].notnull()]
orca_df = orca_df[orca_df['boarding_latitude'].notnull()]
orca_df = orca_df[orca_df['boarding_type'] != 'Transfer']
orca_df = orca_df[orca_df['mode_abbrev'] == 'Bus']
orca_df = orca_df[orca_df['agency'] == 'KCM']
stop_df = stop_df[stop_df['source_agency_id'] == 4]
orca_df = orca_df.reset_index(drop=True)
stop_df = stop_df.reset_index(drop=True)
return orca_df, stop_df
def reduce_stop_df(stop_df):
"""
Aggregate stop_df by creating a mapping from unique stop IDs to its set of
associated route IDs (i.e., the routes which include the stop).
"""
col_keys = ['lon', 'lat', 'route_ids']
seen = dict()
for row in stop_df.to_numpy():
_, route_id, stop_id, lon, lat = tuple(row)
stop_id = int(stop_id)
route_id = int(route_id)
if stop_id not in seen:
seen[stop_id] = dict(zip(col_keys, [lon, lat, {route_id}]))
else:
seen[stop_id]['route_ids'].add(route_id)
# I haven't seen any cases where the update is different and taking
# the avg is needed, but let's keep it to be safe.
seen[stop_id]['lon'] = np.average([seen[stop_id]['lon'], lon])
seen[stop_id]['lat'] = np.average([seen[stop_id]['lat'], lat])
result = [
[stop_id] + [seen[stop_id][key] for key in col_keys]
for stop_id in seen
]
return pd.DataFrame(result, columns=['stop_id'] + col_keys)
def add_census_data(stop_df):
"""
Maps each stop to its corresponding tract.
"""
result = []
reader = TractCensusReader()
bad_tracts = 0
bad_pops = 0
for row in tqdm(stop_df.to_numpy(), desc="Loading census data"):
lon, lat = (row[1], row[2])
tract = reader.xy_to_tract_num(lon, lat)
if tract == -1:
bad_tracts += 1
else:
pop = reader.get_tract_pop(tract)
if pop == -1:
bad_pops += 1
else:
# The row is good
result_row = np.concatenate((row, [tract, pop]))
result.append(result_row)
# Bad tracts typically mean that the point falls outside of King
# County. Not a huge deal. Bad pops, on the other hand, means that the
# point was found within a King County tract, but no population data was
# found for this tract. This is bad, but it might not be a huge deal. For
# now, we're just deleting rows corresponding to bad pops/tracts.
print(f'bad tracts: {bad_tracts}, bad pops: {bad_pops}')
cols = list(stop_df.columns) + ['tract_num', 'tract_population']
return pd.DataFrame(result, columns=cols)
def map_nearest_neighbors(orca_df, stop_df):
"""
Find the stop ID and route number for each entry in orca_df by the nearest
xy pair in stop_df.
"""
def get_pair(s_row):
return (float(s_row['lat']), float(s_row['lon']))
x = [get_pair(stop_df.iloc[i]) for i in range(stop_df.shape[0])]
tree = knn.KDTree(x, leaf_size=KNN_LEAF_SIZE)
# Columns:
# 0 'season', 1 'passenger_type', 2 'boarding_type', 3 'boarding_latitude',
# 4 'boarding_longitude', 5 'agency', 6 'mode_abbrev', 7 'product_type',
# 8 'time_period', 9 'day_type', 10 'boarding_count'
orca_arr = orca_df.to_numpy()
stop_arr = stop_df.to_numpy()
ids = []
n = len(orca_arr)
for i in tqdm(range(n), desc='Calculating nearest neighbors'):
o_row = orca_arr[i]
o_lat = o_row[3] # boarding_latitude
o_lon = o_row[4] # boarding_longitude
x_i = np.array([o_lat, o_lon]).reshape((1, -1))
# TODO some of these distances are really high. Look into it?
_, nn_index = tree.query(x_i, k=1)
nn_index = nn_index[0]
s_row = stop_arr[nn_index][0]
# stop_id, route_ids, tract_num, tract_population
ids.append((s_row[0], s_row[3], s_row[4], s_row[5]))
merged_arr = []
for i in range(n):
merged_row = list(orca_arr[i]) + list(ids[i])
merged_arr.append(merged_row)
cols = list(orca_df.columns) + [
'stop_id', 'route_ids', 'tract_num', 'tract_population'
]
return pd.DataFrame(merged_arr, columns=cols)
def run_pipeline():
"""
Creates a mapping from each entry in winter 2019 ORCA dataset to its
associated stop ID and route number and writes this mapping to disk.
"""
# Run pipeline
orca_df, stop_df = load_inputs()
stop_df = reduce_stop_df(stop_df)
stop_df = add_census_data(stop_df)
merged_df = map_nearest_neighbors(orca_df, stop_df)
# Write CSV
if not os.path.exists(WRITE_DIR):
os.mkdir(WRITE_DIR)
fpath = os.path.join(WRITE_DIR, OUTPUT_FILENAME)
merged_df.to_csv(fpath, index=False)
print(f'Wrote {OUTPUT_FILENAME} to {WRITE_DIR}')
if __name__ == '__main__':
run_pipeline()
| 2.640625 | 3 |
apps/contrib/response_codes.py | jimialex/django-wise-template-mysql | 1 | 12785823 | <reponame>jimialex/django-wise-template-mysql
# -*- coding: utf-8 -*-
from django.utils.translation import ugettext_lazy as _
# >> Providers
OPS_SERVER_UNAVAILABLE = {
'code': 'ops.ServerUnavailable',
'detail': _('Ops Server is unavailable.'),
}
INVALID_OPS_SERVER_REQUEST = {
'code': 'ops.InvalidRequest',
'detail': _('Ops Server invalid request.'),
}
SYNC_USER_ERROR = {
'code': 'ops.SyncUserError',
'detail': _('Sync User error.'),
}
SYNC_BASE_ERROR = {
'code': 'ops.SyncBaseError',
'detail': _('Sync Base error.'),
}
CORE_SERVER_UNAVAILABLE = {
'code': 'core.ServerUnavailable',
'detail': _('Core Server is unavailable.'),
}
MAP_SERVER_UNAVAILABLE = {
'code': 'maps.ServerUnavailable',
'detail': _('Maps Server is unavailable.'),
}
# >> SNS
NOT_SNS_REQUEST = {
'code': 'ops.NotSNSRequests',
'detail': _('This resource is forbidden for not SNS requests.'),
}
METHOD_NOT_ALLOWED = {
'code': 'sns.MethodNotAllowed',
'detail': _('This method is not allowed for SNS requests'),
}
INVALID_SNS_SIGNATURE = {
'code': 'ops.InvalidSNSSignature',
'detail': _('Invalid SNS Signature.'),
}
SNS_ENDPOINT_SUBSCRIBE_FAILED = {
'code': 'ops.SNSEndpointSubscribeFailed',
'detail': _('SNS endpoint subscribe failed.'),
}
SNS_ENDPOINT_SUBSCRIBE_CONFIRMED = {
'code': 'ops.SNSEndpointSubscribeConfirmed',
'detail': _('SNS endpoint subscribe confirmed.'),
}
| 1.734375 | 2 |
super_taxi/model/taxis.py | sanjayatb/taxi-booking-system | 0 | 12785824 | from super_taxi.model.generics import Vehicle, Coordinate
from super_taxi.model.cars import Car,SUVCar
class Taxi(Vehicle):
def __init__(self, id=None):
Vehicle.__init__(self, id=id)
self.position = Coordinate(0, 0)
self.ride = None
self.booked = False
self.pickup_distance = 0
def booked_for(self, ride):
self.ride = ride
self.booked = True
def is_booked(self):
return self.booked
def reset(self):
self.position = Coordinate(0, 0)
self.ride = None
self.booked = False
self.pickup_distance = 0
class TaxiCar(Car, Taxi):
def __init__(self, id=None):
Car.__init__(self, id)
Taxi.__init__(self, id)
class TaxiSuvCar(SUVCar, Taxi):
def __init__(self, id=None):
SUVCar.__init__(self, id)
Taxi.__init__(self, id)
| 2.984375 | 3 |
ms_teams_webhook_hook.py | mendhak/Airflow-MS-Teams-Operator | 47 | 12785825 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from airflow.hooks.http_hook import HttpHook
from airflow.exceptions import AirflowException
class MSTeamsWebhookHook(HttpHook):
"""
This hook allows you to post messages to MS Teams using the Incoming Webhook connector.
Takes both MS Teams webhook token directly and connection that has MS Teams webhook token.
If both supplied, the webhook token will be appended to the host in the connection.
:param http_conn_id: connection that has MS Teams webhook URL
:type http_conn_id: str
:param webhook_token: MS Teams webhook token
:type webhook_token: str
:param message: The message you want to send on MS Teams
:type message: str
:param subtitle: The subtitle of the message to send
:type subtitle: str
:param button_text: The text of the action button
:type button_text: str
:param button_url: The URL for the action button click
:type button_url : str
:param theme_color: Hex code of the card theme, without the #
:type message: str
:param proxy: Proxy to use when making the webhook request
:type proxy: str
"""
def __init__(self,
http_conn_id=None,
webhook_token=None,
message="",
subtitle="",
button_text="",
button_url="",
theme_color="00FF00",
proxy=None,
*args,
**kwargs
):
super(MSTeamsWebhookHook, self).__init__(*args, **kwargs)
self.http_conn_id = http_conn_id
self.webhook_token = self.get_token(webhook_token, http_conn_id)
self.message = message
self.subtitle = subtitle
self.button_text = button_text
self.button_url = button_url
self.theme_color = theme_color
self.proxy = proxy
def get_proxy(self, http_conn_id):
conn = self.get_connection(http_conn_id)
extra = conn.extra_dejson
print(extra)
return extra.get("proxy", '')
def get_token(self, token, http_conn_id):
"""
Given either a manually set token or a conn_id, return the webhook_token to use
:param token: The manually provided token
:param conn_id: The conn_id provided
:return: webhook_token (str) to use
"""
if token:
return token
elif http_conn_id:
conn = self.get_connection(http_conn_id)
extra = conn.extra_dejson
return extra.get('webhook_token', '')
else:
raise AirflowException('Cannot get URL: No valid MS Teams '
'webhook URL nor conn_id supplied')
def build_message(self):
cardjson = """
{{
"@type": "MessageCard",
"@context": "http://schema.org/extensions",
"themeColor": "{3}",
"summary": "{0}",
"sections": [{{
"activityTitle": "{1}",
"activitySubtitle": "{2}",
"markdown": true,
"potentialAction": [
{{
"@type": "OpenUri",
"name": "{4}",
"targets": [
{{ "os": "default", "uri": "{5}" }}
]
}}
]
}}]
}}
"""
return cardjson.format(self.message, self.message, self.subtitle, self.theme_color,
self.button_text, self.button_url)
def execute(self):
"""
Remote Popen (actually execute the webhook call)
:param cmd: command to remotely execute
:param kwargs: extra arguments to Popen (see subprocess.Popen)
"""
proxies = {}
proxy_url = self.get_proxy(self.http_conn_id)
print("Proxy is : " + proxy_url)
if len(proxy_url) > 5:
proxies = {'https': proxy_url}
self.run(endpoint=self.webhook_token,
data=self.build_message(),
headers={'Content-type': 'application/json'},
extra_options={'proxies': proxies})
| 1.976563 | 2 |
profiles_api/urls.py | juliatan/django-rest-api | 0 | 12785826 | <filename>profiles_api/urls.py
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from profiles_api import views
# Specifically for ViewSet example
router = DefaultRouter()
router.register('hello-viewset', views.HelloViewSet, base_name='hello-viewset') # base_name is for internal purposes
router.register('profile', views.UserProfileViewSet) # don't need base name because we specified the queryset in the viewset. This defaults to the model name in question.
router.register('feed', views.UserProfileFeedViewSet)
urlpatterns = [
path('hello-view/', views.HelloApiView.as_view()), # as_view() allows APIView class to be rendered by our URLs
path('login/', views.UserLoginApiView.as_view()),
path('', include(router.urls)), # include is imported from django.urls. Blank string because we don't want to include a prefix.
] | 2.5625 | 3 |
pype9/utils/arguments.py | tclose/Pype9 | 0 | 12785827 | """
Author: <NAME> (<EMAIL>)
Copyright: 2012-2014 <NAME>.
License: This file is part of the "NineLine" package, which is released under
the MIT Licence, see LICENSE for details.
"""
# from pype9.utils.mpi import mpi_comm
import os.path
import nineml
import ninemlcatalog
from argparse import ArgumentTypeError
import pype9.utils.logging.handlers.sysout # @UnusedImport
CATALOG_PREFIX = 'catalog://'
def existing_file(fname):
if not os.path.isfile(fname):
raise ArgumentTypeError(
"'{}' does not refer to an existing file".format(fname))
return fname
def nineml_document(doc_path):
if doc_path.startswith(CATALOG_PREFIX):
model = ninemlcatalog.load(doc_path[len(CATALOG_PREFIX):])
else:
if (not doc_path.startswith('/') and
not doc_path.startswith('./') and
not doc_path.startswith('../')):
doc_path = './' + doc_path
model = nineml.read(doc_path, relative_to=os.getcwd())
return model
def nineml_model(model_path):
model = nineml_document(model_path)
if isinstance(model, nineml.Document):
model = model.as_network(
os.path.splitext(os.path.basename(model_path))[0])
return model
# Might be useful so have kept it here
#
# class randomseed(int):
#
# """
# Automatically generates unique random seeds if none are provided, as well
# as ensuring that unique seeds are passed to each MPI process
#
# Parameters
# ----------
# arg : int
# An existing seed to use
# mirror_mpi: bool
# Flags whether the seeds should be the same on different
# MPI nodes or not
# """
# counter = 0
#
# def __new__(cls, arg=None, mirror_mpi=False):
# if arg is None or arg == 'None' or int(arg) == 0:
# seed = int(time.time() * 256) + cls.counter
# cls.counter += 1
# else:
# seed = int(arg)
# # Ensure a different seed gets used on each MPI node
# if not mirror_mpi:
# seed = seed * mpi_comm.size + mpi_comm.rank
# return cls(seed)
| 2.03125 | 2 |
heritage/migrations/0032_interview_attendees.py | stewardshiptools/stewardshiptools | 0 | 12785828 | <reponame>stewardshiptools/stewardshiptools<gh_stars>0
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('crm', '0004_auto_20160202_1146'),
('heritage', '0031_interview_other_interviewers'),
]
operations = [
migrations.AddField(
model_name='interview',
name='attendees',
field=models.ManyToManyField(to='crm.Person', related_name='interviews_attended'),
),
]
| 1.726563 | 2 |
testlibs/test_modules.py | kaistshadow/blockchain-sim | 1 | 12785829 | <reponame>kaistshadow/blockchain-sim<filename>testlibs/test_modules.py
#
# 2021-03-15
# created by <NAME>
#
import os
from subprocess import check_output
import argparse
import sys
import os
import lxml.etree as ET
import subprocess
import math
sys.path.append("")
from testlibs import utils, test_result
def exec_shell_cmd(cmd):
if os.system(cmd) != 0:
print("error while executing '%s'" % cmd)
exit(-1)
def simulation_test_result(condition_count, node_count, test_name):
if condition_count == node_count:
print("Success %s ... " %test_name)
print("test result : %d/%d " %(condition_count,node_count))
else:
print("Fail %s ..." %test_name)
print("test result : %d/%d " %(condition_count,node_count))
sys.exit(1)
# --------------------------------------------------------------------------------------------------------------
# Regression test-01 - xmlGenerate test
# --------------------------------------------------------------------------------------------------------------
# xml existence test - If there is no file in the path, return fail
def test_xml_existence(output):
path = os.path.abspath(".")
target_folder = path + "/" + output
if os.path.isfile(target_folder):
print("Success xml existence test ...")
return target_folder
else:
print("Fail xml existence test ...")
sys.exit(1)
# --------------------------------------------------------------------------------------------------------------
# Regression test-02-1 - shadow test
# --------------------------------------------------------------------------------------------------------------
def test_shadow_output_file_existence(condition_number, node_id_list):
# plugin output check
for i in range(0,len(node_id_list)):
path_ = os.path.abspath(".")
path_ = path_ + "/shadow.data/hosts/" + node_id_list[i]
if os.path.isdir(path_):
pass
else:
print("%s file not existence shadow output file ... " %node_id_list[i])
# shadow output.txt
if condition_number == "regtest":
path = os.path.abspath(".")
else:
path = os.path.abspath("./shadow.data")
target_folder_file = path + "/output.txt"
if os.path.isfile(target_folder_file):
return target_folder_file
else:
print("Fail not existence shadow output file ... ")
# --------------------------------------------------------------------------------------------------------------
# Regression test-02-2 - shadow test
# --------------------------------------------------------------------------------------------------------------
# test1 : whether runtime setting worked or not
# test2 : whether plugin(node_id) worked or not
def test_shadow(output_file, runtime, node_id_list, shadow_output):
f = open(output_file, "r")
# result_count more than 3 means success.
result_count = 0
return_count = 0
condition_runtime = 0
return_time = utils.get_time_form(runtime)
while True:
line = f.readline()
if not line: break
result = line.find("has set up the main pth thread")
if result != -1:
result = line.find("_process_start")
if result != -1:
for i in range(0,len(node_id_list)):
result = line.find(node_id_list[i])
if result != -1:
result_count += 1
break
# 설정한 runtime 동작 확인.
if condition_runtime == 0:
result = line.find(runtime)
if result != -1:
condition_runtime = 1
if result_count == len(node_id_list):
f.close()
print("Success shadow test ...")
return_count = 1
sys.exit(0)
else:
f.close()
print("shadow plugin error...")
sys.exit(1)
if return_count == 0:
f.close()
if result_count == len(node_id_list):
print("[shadow test] - runtime over ... ")
pass
else:
print("shadow runtime error .... ")
utils.filter_fail_shadow_test(shadow_output)
sys.exit(1)
else:
if condition_runtime != 1:
print("Fail shadow test - runtime fail ... ")
sys.exit(1)
pass
# --------------------------------------------------------------------------------------------------------------
# Regression test-02-2 - shadow test (emulation ver)
# --------------------------------------------------------------------------------------------------------------
# test1 : whether runtime setting worked or not
# test2 : whether plugin(node_id) worked or not'
def emul_test_shadow(output_file, runtime, node_id_list, shadow_output):
complete_node = []
f = open(output_file, "r")
# result_count more than 3 means success.
result_count = 0
return_time = utils.get_time_form(runtime)
while True:
line = f.readline()
if not line: break
result = line.find("_process_start")
if result != -1:
result = line.find("has set up the main pth thread")
if result != -1:
result = line.find("bitcoind")
if result != -1:
complete_node.append(line.split(" ")[4].split("~")[1][:-1])
for i in range(0,len(node_id_list)):
result = line.find(node_id_list[i])
if result != -1:
result_count = 1
result = line.find(return_time)
if result != -1:
if result_count == 1:
f.close()
print("Success shadow test ...")
return complete_node, runtime
else:
f.close()
print("Fail shadow test] - runtime error ...")
utils.filter_fail_shadow_test(shadow_output)
sys.exit(1)
f.close()
print("[Fail shadow test] - plugin does not run ...")
sys.exit(1)
# --------------------------------------------------------------------------------------------------------------
# Regression test-03-1 - bitcoinApplication file existence test.
# --------------------------------------------------------------------------------------------------------------
# After shadow, check output data
def test_file_existence(node_id_list, plugin_list):
if len(node_id_list) != len(plugin_list):
sys.exit(1)
path = os.path.abspath(".")
target_folder_list = []
target_error_list = []
error_list = []
for i in range(0,len(node_id_list)):
target_path = path + "/shadow.data/hosts/" + node_id_list[i] + "/stdout-" + node_id_list[i] + "." + plugin_list[i] + ".1000.log"
target_folder_list.append(target_path)
target_path = path + "/shadow.data/hosts/" + node_id_list[i] + "/stderr-" + node_id_list[i] + "." + plugin_list[i] + ".1000.log"
target_error_list.append(target_path)
for i in range(0,len(target_folder_list)):
if os.path.isfile(target_folder_list[i]) == False:
print("Fail not existence shadow plugin output file - %s" %(target_folder_list[i]))
sys.exit(1)
# 에러 파일이 존재하면 그 파일의 path를 error_list에 append.
if os.path.isfile(target_error_list[i]) == True:
error_list.append(target_error_list[i])
# 에러 파일이 존재하는 경우
if len(error_list) != 0:
print("Fail shadow plugin running ...")
print("-------------- shadow plugin error contents --------------")
for i in range(0,len(error_list)):
f = open(error_list[i], "r")
while True:
line = f.readline().strip()
print(line)
if not line: break
f.close()
sys.exit(1)
print("Success blockchain test output file existence ...")
return target_folder_list
# --------------------------------------------------------------------------------------------------------------
# Regression test-03-2 - bitcoinApplication test
# --------------------------------------------------------------------------------------------------------------
# This test compares the shadow result log with standard args,
# and succeeds if all of the args are in the shadow result log.
def test_bitcoinApplication(output_file, args_standard, node_count):
j = 0
condition_count = 0
for z in range(0,int(node_count)):
f = open(output_file[z], "r")
while True:
line = f.readline()
if not line: break
for i in range(j,len(args_standard)):
result = line.find(args_standard[i])
if result != -1:
j += 1
f.close()
if j == len(args_standard):
condition_count += 1
simulation_test_result(condition_count, node_count, "test_bitcoinApplication")
sys.exit(0)
# --------------------------------------------------------------------------------------------------------------
# Regression test-04 - bitcoin difficulty test
# --------------------------------------------------------------------------------------------------------------
# 설정한 난이도와 시뮬레이션된 비트코인 난이도가 일치한지 테스트
def test_difficulty_compare(bitcoin_log, xml_difficulty, node_count):
condition_count = 0
for z in range(0,node_count):
difficulty = ""
f = open(bitcoin_log[z + node_count], "r")
while True:
line = f.readline()
if not line: break
result = line.find("difficulty")
if result != -1:
split_list = line.split(",")[4]
difficulty = split_list.split(":")[1]
break
f.close()
if str(xml_difficulty) == "1":
if difficulty == "1":
condition_count += 1
elif str(xml_difficulty) == "2":
if difficulty == "0.00390625":
condition_count += 1
elif str(xml_difficulty) == "3":
if difficulty == "0.0002441371325370145":
condition_count += 1
simulation_test_result(condition_count, node_count, "test_difficulty_compare test")
sys.exit(0)
# --------------------------------------------------------------------------------------------------------------
# Regression test-05 - wallet address test
# --------------------------------------------------------------------------------------------------------------
# bitcoin-cli validateaddress call
# Return information about the given bitcoin address.
def test_walletAddress(simulation_output_file, node_count):
condition_count = 0
for z in range(0,node_count):
f = open(simulation_output_file[z+node_count], "r")
while True:
line = f.readline()
if not line: break
result = line.find("isvalid")
if result != -1:
the_wallet_validation = line.split(",")[0].split('"')[4].split(":")[1]
if the_wallet_validation == "true":
condition_count += 1
continue
simulation_test_result(condition_count, node_count, "test_walletAddress ")
# --------------------------------------------------------------------------------------------------------------
# Regression test-06 - mining test
# --------------------------------------------------------------------------------------------------------------
# "height>0" means mining is activated and works good.
# If "height>0" is found at bitcoin log, mining works.
def test_mining(shadow_output_file, node_count):
condition_count = 0
for z in range(0,node_count):
f = open(shadow_output_file[z], "r")
while True:
line = f.readline()
if not line: break
# height=1 means mining activated.
result = line.find("height=8")
if result != -1:
condition_count += 1
break
simulation_test_result(condition_count, node_count, "mining test")
sys.exit(0)
# --------------------------------------------------------------------------------------------------------------
# Regression test-07 - bitcoin mainchain test
# --------------------------------------------------------------------------------------------------------------
# Get "bestblockchash" value info using rpc.output_file then check for the same value in bitcoin output log.
def test_MainChainInfo(shadow_output_file, rpc_output_file, node_count):
pork_count = 0
while True:
condition_count = utils.get_last_hashValue(shadow_output_file, rpc_output_file, node_count, pork_count)
if condition_count == node_count:
break
else:
pork_count += 1
if pork_count > 6:
print("Fail mainchain test ... (network separation) ")
elif pork_count == 0:
pass
else:
print("There is a fork ...")
condition_count = node_count
simulation_test_result(condition_count, node_count, "mainchain test")
sys.exit(0)
# --------------------------------------------------------------------------------------------------------------
# Regression test-08 - transaction test
# --------------------------------------------------------------------------------------------------------------
# If rpc output file has error log, the transaction is not created properlys
def test_transaction_existence(simulation_output_file, node_count):
condition_count = 0
for z in range(0,node_count):
f = open(simulation_output_file[z+node_count], "r")
while True:
line = f.readline()
if not line: break
result = line.find('"error":null')
if result != -1:
condition_count += 1
break
f.close()
simulation_test_result(condition_count, node_count, "transaction test")
# --------------------------------------------------------------------------------------------------------------
# Regression test-09 - transaction count test (emulation ver)
# --------------------------------------------------------------------------------------------------------------
# 설정한 트랜잭션의 개수와 생성되 트랜잭션의 개수를 비교하는 테스트
def test_transaction_count(simulation_output_file):
txs_bitcoind = 0
blocks_count = 0
f = open(simulation_output_file[0], "r")
for line in f.readlines()[::-1]:
result = line.find("UpdateTip")
if result != -1:
split_list = line.split(" ")
for i in range(0,len(split_list)):
result = split_list[i].find("height=")
if result != -1:
blocks_count = int(split_list[i].split("=")[1])
continue
result = split_list[i].find("tx=")
if result != -1:
txs_bitcoind = int(split_list[i].split("=")[1])
break
if txs_bitcoind != 0:
break
f.close()
return txs_bitcoind
# --------------------------------------------------------------------------------------------------------------
# Regression test-09 - transaction count test (regtest ver)
# --------------------------------------------------------------------------------------------------------------
# count "sendtoaddress" rpc request in bitcoin log and get transaction counts in tx injector log.
# If the two are the same, true
def test_transaction_count_regtest(simulation_output_file):
txs_bitcoind = 0
blocks_count = 0
mempool_size = 0
condition_count = 0
f = open(simulation_output_file[0], "r")
for line in f.readlines()[::-1]:
result = line.find("UpdateTip")
if result != -1:
split_list = line.split(" ")
for i in range(0,len(split_list)):
result = split_list[i].find("height=")
if result != -1:
blocks_count = int(split_list[i].split("=")[1])
continue
result = split_list[i].find("tx=")
if result != -1:
txs_bitcoind = int(split_list[i].split("=")[1])
break
if txs_bitcoind != 0:
break
f.close()
# --------------------------------------------------------------------------------------------------------------
# Regression test-10 - initial coin test
# --------------------------------------------------------------------------------------------------------------
# initial coin이 생성된지 확인하기 위한 테스트.
def test_initialCoin(simulation_output_file):
f = open(simulation_output_file, "r")
block_count = 0
for line in f.readlines()[::-1]:
result = line.find("bestblockhash")
if result != -1:
f.close()
split_list = line.split(",")
for i in range(0,len(split_list)):
result = split_list[i].find("blocks")
if result != -1:
block_count = int(split_list[i].split(":")[1])
break
if block_count != 0:
f.close()
break
if block_count > 0:
print("Success InitalCoin test ...")
sys.exit(0)
else:
print("Fail InitalCoin test ...")
sys.exit(1)
# --------------------------------------------------------------------------------------------------------------
# Regression test-11 - peer connection test
# --------------------------------------------------------------------------------------------------------------
# bitcoin 기준으로 테스트를 한다고 할 경우, xml에 "addnode" 플래그 값으로 connection할 peer를 설정을 해주고, 설정해준 ip주소와
# "getpeerinfo"를 통해 시뮬레이션 상에서 connection된 peer ip와 일치하는지 확인하는 테스트.
def test_peer_connection(plugin_output_files, IP_list, xml_file):
addnode_list = utils.get_addnode_list(IP_list, xml_file)
getpeerinfo_list = utils.get_peerinfo(plugin_output_files, IP_list)
result_count = 0
con_count = 0
for i in range(0,len(addnode_list)):
for j in range(0,len(addnode_list[i])):
if addnode_list[i][j] in getpeerinfo_list[i]:
con_count += 1
if len(addnode_list[i]) == con_count:
result_count += 1
con_count = 0
if result_count == len(addnode_list):
print("Success peer connection test ...")
sys.exit(0)
else:
print("fail peer connection test ...")
sys.exit(1)
# --------------------------------------------------------------------------------------------------------------
# Regression test-12 - load data file test
# --------------------------------------------------------------------------------------------------------------
# 1. 플러그인 로그에서 "Reindexing finished" 로그가 없으면 test 실패
# 2. coinlip_hash.txt 파일에서 블록 hash 값들을 읽어서, 플러그인의 로그에서 1~7번의 블록 hash 값과 비교
def test_dumpfile_load(plugin_output_files, abs_path, difficulty):
print("dump test start ... ")
condition_count = 0
the_path = ""
ready_hash_list = []
f = open(plugin_output_files, "r")
while True:
line = f.readline()
if not line: break
result = line.find("Reindexing finished")
if result != -1:
condition_count = 1
f.close()
break
# check pork
event_value = 0
if condition_count == 1:
f = open(plugin_output_files, "r")
while True:
line = f.readline()
if not line: break
result = line.find("Disconnect block")
if result != -1:
event_value = 1
continue
if event_value == 1:
result = line.find("height=6")
if result != -1:
print("Fail dump file load ... ")
print("There are problems about initial block hashes ... ")
f.close()
sys.exit(1)
condition_count = 2
f.close()
if condition_count == 2:
the_path = utils.get_datadirDumpfile_path(abs_path, difficulty)
the_path = the_path + "/coinflip_hash.txt"
f = open(the_path, "r")
while True:
line = f.readline()
if not line: break
ready_hash_list.append(line.strip())
f.close()
i = 0
result_hash_list = []
f = open(plugin_output_files, "r")
while True:
line = f.readline()
if not line:break
result = line.find("UpdateTip")
if result != -1:
if ready_hash_list[i] == line.split(" ")[3].split("=")[1]:
i += 1
pass
# 0번째는 제네시스 블록이라 스킵.
elif i == 0:
continue
else:
print("Fail test dump file load ... ")
f.close()
sys.exit(1)
if i == 7:
print("Success test dump file load test ... ")
f.close()
break
else:
print("Fail load data file test ... (Not Reindexing finished)")
f.close()
sys.exit(1)
# --------------------------------------------------------------------------------------------------------------
# Regression test-13 - monitor node connection test
# --------------------------------------------------------------------------------------------------------------
# 이 테스트를 통해 실행될 플러그인 중에 monitor node 플러그인에 대한 테스트임.
# monitor 노드는 시뮬레이션되는 블록체인의 모든 노드의 블록 전파를 관장함.
# 현재 테스트에서는 시뮬레이션되는 블록체인의 노드와 monitor node와의 conncetion test임.
def monitor_connection_test(plugin_output_files, abs_path, node_count):
condition_count = 0
# 실행되는 node 수 +
node_count = int(node_count) + 1
for i in range(0,len(plugin_output_files)):
result = plugin_output_files[i].find("monitor")
if result != -1:
f = open(plugin_output_files[i], "r")
while True:
line = f.readline()
if not line: break
result = line.find("Socket is successfully connected")
if result != -1:
condition_count += 1
if condition_count == node_count:
f.close()
break
f.close()
if condition_count == node_count:
print("Success monitor connection test ... ")
sys.exit(0)
print("Fail monitor conncetion test ... ")
print("only %d node connection ... " %condition_count)
sys.exit(1)
# --------------------------------------------------------------------------------------------------------------
# Regression test-14 monitor node block propagation test
# --------------------------------------------------------------------------------------------------------------
# Monitor 노드는 비트코인 노드로 부터 블록을 전파받음. 이렇게 전파 받은 블록의 해시 값들과,
# 비트코인 실행 결과 로그 값에 기록된 블록 해시 값들을 비교하여, Monitor 노드가 제대로 전파를 받았나 확인을 하는 테스트
def Mointor_block_propagation_test(plugin_output_files, node_count, target_path):
each_node_block_list = utils.filter_block_hash(plugin_output_files, node_count)
monitor_result_path = target_path + "/shadow.data/monitor_result.txt"
f = open(monitor_result_path, "w")
for i in range(len(each_node_block_list)):
f.write("\t\t ---------- %d node monitor result ---------- \n\n" %i)
for j in range(len(each_node_block_list[i])):
f.write("Num %d block - %s\n" %(j, each_node_block_list[i][j]))
f.write("\n")
f.close()
node_blocklist = []
for i in range(node_count):
line = []
node_blocklist.append(line)
for i in range(0,len(plugin_output_files)):
result = plugin_output_files[i].find("stdout-bcdnode")
if result != -1:
node_blocklist[i] = test_result.filter_overlap_height(plugin_output_files[i])
for i in range(0,len(node_blocklist)):
for j in range(0,len(node_blocklist[i])-1):
# 모니터 노드에는 제네시스 블록이 등록이 안되어 있기에, (전파된 블록만 컨트롤)
if node_blocklist[i][j+1][0] in each_node_block_list[i]:
pass
else:
print("Fail Monitor block propagation test ... ")
print("There is no block (%s) " %node_blocklist[i][j+1][0])
sys.exit(1)
print("Success Monitor block propagation test ... ")
sys.exit(0)
# --------------------------------------------------------------------------------------------------------------
# Regression test-15 - TxGenerator connection test
# --------------------------------------------------------------------------------------------------------------
# TxGenerator 가 node connection 과정에서 xml파일로부터 정의된 노드의 IP 주소를 기준으로 필터링 시작.
def TxGenerator_connection_test(plugin_output_files, xmlfile, node_count):
IP_list = utils.get_address_list(xmlfile)
condition_count = 0
for i in range(0,len(plugin_output_files)):
result = plugin_output_files[i].find("txgenerator/stdout-txgenerator.BITCOINTPS_TESTER.1000.log")
if result != -1:
f = open(plugin_output_files[i], "r")
while True:
line = f.readline()
if not line: break
result = line.find("addTarget")
if result != -1:
for i in range(0,len(IP_list)):
result = line.find(IP_list[i])
if result != -1:
condition_count += 1
if condition_count == 2:
f.close()
print("Success txGenrator connection test ...")
sys.exit(0)
else:
break
f.close()
print(condition_count)
print("Fail txGenerator connection test ... ")
sys.exit(1)
| 2.203125 | 2 |
ex024-Nome com Silva.py | Mathelzu/PythonExercicios | 0 | 12785830 | nome = input('Nome: ')
a = nome.upper()
b = 'SILVA' in a
print(f'Seu nome tem Silva: {b}')
| 3.59375 | 4 |
s3dxrd/utils/topology.py | FABLE-3DXRD/scanning-xray-diffraction | 2 | 12785831 | import numpy as np
#import matplotlib.pyplot as plt
import shapely.geometry
from scipy.ndimage.morphology import binary_dilation
from scipy.ndimage import label
from multiprocessing import Pool
def voxels_to_polygon(image_stack, pixel_size, center=(0.5, 0.5)):
"""Take a stack of images and produce a stack of shapely polygons.
The images are interpreted as a solid shape with boundary along the pixel
exterior edge. Thus an image eith a single nonzero pixel will return a square
polygon with sidelength equal to the pixel_size.
IN:
image_stack: list of binary (1.0,0) numpy array 2d images each depicting
a single connected region of 1.0 surrounded by 0.0.
pixel_size: The absolute pixel size of the input images. Used to make the
output polygons coordinates real spaced.
center: the relative origin of the image, axis=0 is x and axis=1 is y
increasing with increasingf index. For instance center=(0.5,0.5)
will select the centre of the image as the orign.
OUT:
polygon_stack: list of shapely.geometry.polygons each representing the bound
of the corresponding input binary image.
"""
polygon_stack = [pixels_to_polygon(image, pixel_size, center) for image in image_stack]
return polygon_stack
def check_input(image):
"""Check that the provided image consists of a single connected domain of pixels.
"""
# Check that the input image has no floating pixels.
labeled_array, num_features = label(image.astype(int) + 1)
assert num_features == 1, "The input image must contain a single solid domain of connected pixels but it appears " \
"to have floating pixels "
#
# Check that the input image has no holes.
s = np.sum(np.abs(image.astype(int)[1:, :] - image.astype(int)[0:-1, :]), axis=0)
assert np.alltrue(
s <= 2), "The input image must contain a single solid domain of connected pixels but it appears to have holes"
#
def pixels_to_polygon(image, pixel_size, center=(0.5, 0.5)):
"""Take a single image and produce a shapely polygon.
"""
check_input(image)
expanded_image = expand_image(image, factor=3)
indices = get_image_boundary_index(expanded_image)
coordinates = indices_to_coordinates(indices, pixel_size / 3., center, expanded_image)
polygon = shapely.geometry.Polygon(coordinates)
# show_polygon_and_image(polygon, image, pixel_size, center) #<= DEBUG
return polygon
def expand_image(image, factor):
"""Expand 2d binary image so that each pixel is split by copying
into factor x factor number of pixels.
"""
expanded_image = np.repeat(image, factor, axis=1)
expanded_image = np.repeat(expanded_image, factor, axis=0)
return expanded_image
def get_image_boundary_index(image):
"""Find the pixel indices of the boundary pixels of a binary image.
"""
boundary_image = get_boundary_image(image)
bound_indx = np.where(boundary_image == 1)
ix, iy = bound_indx[0][0], bound_indx[1][0] # starting index
indices = [(ix, iy)]
while (not len(indices) == np.sum(boundary_image)):
# Walk around border and save boundary pixel indices
mask = np.zeros(boundary_image.shape)
mask[np.max([0, ix - 1]):ix + 2, iy] = 1
mask[ix, np.max([iy - 1]):iy + 2] = 1
neighbour_indx = np.where(boundary_image * mask)
for ix, iy in zip(neighbour_indx[0], neighbour_indx[1]):
if (ix, iy) not in indices:
indices.append((ix, iy))
break
indices = sparse_indices(indices)
return indices
def get_boundary_image(image):
"""Return a pixel image with 1 along the boundary if the assumed
object in image.
"""
k = np.ones((3, 3), dtype=int)
dilation = binary_dilation(image == 0, k, border_value=1)
boundary_image = dilation * image
return boundary_image
def sparse_indices(indices):
"""Remove uneccesary nodes in the polygon (three nodes on a line is uneccesary).
"""
new_indices = []
for i in range(0, len(indices) - 1):
if not (indices[i - 1][0] == indices[i][0] == indices[i + 1][0] or \
indices[i - 1][1] == indices[i][1] == indices[i + 1][1]):
new_indices.append(indices[i])
return new_indices
def indices_to_coordinates(indices, pixel_size, center, image):
"""Compute real space coordinates of image boundary form set of pixel indices.
"""
dx = image.shape[1] * center[0]
dy = image.shape[0] * center[1]
coordinates = []
for c in indices:
# Verified by simulated nonsymmetric grain
ycoord = pixel_size * (c[1] + 0.5 - dx + (c[1] % 3 - 1) * 0.5)
xcoord = pixel_size * (-c[0] - 0.5 + dy - (c[0] % 3 - 1) * 0.5)
coordinates.append((xcoord, ycoord))
return coordinates
def get_path_for_pos(args):
arr, all_entry, all_exit, all_nhat, all_L, all_nsegs, \
bad_lines, xray_endpoints, sample_polygon, zpos = args
for i, ang, dty in arr:
# Translate and rotate the xray endpoints according to ytrans and angle
c, s = np.cos(np.radians(-ang)), np.sin(np.radians(-ang))
rotz = np.array([[c, -s], [s, c]])
rx = rotz.dot(xray_endpoints + np.array([[0, 0], [dty, dty]]))
xray_polygon = shapely.geometry.LineString([rx[:, 0], rx[:, 1]])
# compute the intersections between beam and sample in sample coordinates
intersection_points = get_intersection(xray_polygon, sample_polygon, zpos)
if intersection_points is None:
# If a measurement missed the sample or graced a corner, we skipp ahead
bad_lines.append(int(i))
else:
# make a measurement at the current setting
entry, exit, nhat, L, nsegs = get_quanteties(intersection_points)
# save the measurement results in global lists
all_entry.append(entry)
all_exit.append(exit)
all_nhat.append(nhat)
all_L.append(L)
all_nsegs.append(nsegs)
return all_entry, all_exit, all_nhat, all_L, all_nsegs, bad_lines
def get_integral_paths(angles, ytrans, zpos, sample_polygon, nprocs, show_geom=False):
"""Compute entry-exit points for a scanrange.
"""
# Instantiate lists to contain all measurements
all_entry, all_exit, all_nhat, all_L, all_nsegs, bad_lines = [], [], [], [], [], []
xray_endpoints = get_xray_endpoints(sample_polygon)
# Loop over all experimental settings
split_arrays = np.array_split(list(zip(range(len(angles)), angles, ytrans)), nprocs)
# split_arrays = np.array_split(np.array(list(enumerate(zip(angles, ytrans)))), 2)
args = [(arr, all_entry, all_exit, all_nhat, all_L, all_nsegs, bad_lines,
xray_endpoints, sample_polygon, zpos) for arr in split_arrays]
with Pool(nprocs) as p:
out = p.map(get_path_for_pos, args)
# Unpack the multicore results
all_entry, all_exit, all_nhat, all_L, all_nsegs, bad_lines = [], [], [], [], [], []
for o in out:
for i, l in enumerate([all_entry, all_exit, all_nhat, all_L, all_nsegs, bad_lines]):
l.extend(o[i])
# repack lists of measurements into numpy arrays of desired format
entry, exit, nhat, L, nsegs = repack(all_entry, all_exit, all_nhat, all_L, all_nsegs)
return entry, exit, nhat, L, nsegs, bad_lines
def get_xray_endpoints(sample_polygon):
"""Calculate endpoitns of xray line segement. The lenght of the
line segment is adapted to make sure xray always convers the full
length of the sample.
"""
xc, yc = sample_polygon.exterior.xy
xmin = np.min(xc)
xmax = np.max(xc)
ymin = np.min(yc)
ymax = np.max(yc)
D = np.sqrt((xmax - xmin) ** 2 + (ymax - ymin) ** 2)
return np.array([[-1.1 * D, 1.1 * D], [0, 0]])
def get_intersection(xray_polygon, sample_polygon, z):
"""Compute the 3d coordinates of intersection between xray and
sample.
"""
intersection = sample_polygon.intersection(xray_polygon)
if intersection.is_empty or isinstance(intersection, shapely.geometry.point.Point):
# we missed the sample with the beam
intersection_points = None
elif isinstance(intersection, shapely.geometry.linestring.LineString):
# we got a single line segment intersection
intersection_points = np.zeros((2, 3))
intersection_points[:2, :2] = np.array(intersection.xy).T
intersection_points[:, 2] = z
elif isinstance(intersection, shapely.geometry.multilinestring.MultiLineString):
# we got multiple line segments intersection
intersection_points = np.zeros((2 * len(intersection.geoms), 3))
for i, line_segment in enumerate(intersection.geoms):
intersection_points[2 * i:2 * (i + 1), :2] = np.array(line_segment.xy).T
intersection_points[:, 2] = z
return intersection_points
def get_quanteties(intersection_points):
nsegs = intersection_points.shape[0] // 2
entry, exit = [], []
p1 = intersection_points[0, :]
p2 = intersection_points[1, :]
nhat = list((p2 - p1) / np.linalg.norm(p2 - p1))
L = 0
for i in range(nsegs):
p1 = intersection_points[2 * i, :]
p2 = intersection_points[2 * i + 1, :]
entry.extend(list(p1))
exit.extend(list(p2))
length = np.linalg.norm(p2 - p1)
L += length
return entry, exit, nhat, L, nsegs
def repack(all_entry, all_exit, all_nhat, all_L, all_nsegs):
"""Repack global measurement list into numpy arrays of desired format.
"""
N = len(all_L)
p = max(max(all_nsegs), 1)
nsegs = np.array(all_nsegs).reshape(1, N)
L = np.array(all_L).reshape(1, N)
entry = np.zeros((3 * p, N))
for i, en in enumerate(all_entry):
entry[:len(en[:]), i] = en[:]
exit = np.zeros((3 * p, N))
for i, ex in enumerate(all_exit):
exit[:len(ex[:]), i] = ex[:]
nhat = np.array(all_nhat).T
return entry, exit, nhat, L, nsegs
# def show_polygon_and_image(polygon, image, pixel_size, center):
# """Plot a image and polygon for debugging purposes
# """
# fig, ax = plt.subplots(1, 2, figsize=(12, 6))
# fig.suptitle('Center at ' + str(center))
# xc, yc = polygon.exterior.xy
# xcenter = image.shape[1] * pixel_size * center[0]
# ycenter = image.shape[0] * pixel_size * center[1]
# ax[0].imshow(image, cmap='gray')
# ax[0].set_title('Pixel image')
# ax[0].arrow(int(image.shape[1] * center[0]), int(image.shape[0] * center[1]), \
# image.shape[0] // 4, 0, color='r', head_width=0.15) # y
# ax[0].text(int(image.shape[1] * center[0]) + image.shape[1] // 4, int(image.shape[0] * center[1]) + 0.25, \
# 'y', color='r')
# ax[0].arrow(int(image.shape[1] * center[0]), int(image.shape[0] * center[1]), \
# 0, -image.shape[1] // 4, color='r', head_width=0.15) # x
# ax[0].text(int(image.shape[1] * center[0]) + 0.25, int(image.shape[0] * center[1]) - image.shape[1] // 4, \
# 'x', color='r')
# ax[1].set_title('Polygon representation')
# ax[1].fill(xc, yc, c='gray', zorder=1)
# ax[1].scatter(xc, yc, c='r', zorder=2)
# ax[1].grid(True)
# ax[1].scatter(0, 0, c='b', zorder=3)
# ax[1].set_xlim([-xcenter, image.shape[1] * pixel_size - xcenter])
# ax[1].set_ylim([-ycenter, image.shape[0] * pixel_size - ycenter])
# ax[1].set_xlabel('x')
# ax[1].set_ylabel('y')
# plt.show()
| 3.078125 | 3 |
test_lex_re.py | marcos-mv/ruspy | 0 | 12785832 | <filename>test_lex_re.py
"""
# lex-re
Converter e associar expressões regulares da teoria de compiladores com
expressões regulares escritas em linguagem de programação
Criar e manipular expressões regulares em Python ou outra linguagem de programação.
Vamos testar esta habilidade traduzindo as regras para símbolos terminais de
Rust em expressões regulares de Python. Esta habilidade verifica os tipos
numéricos em
* Inteiros: https://doc.rust-lang.org/reference/tokens.html#integer-literals
* Floats: https://doc.rust-lang.org/reference/tokens.html#floating-point-literals
* Comentários no formato C, tanto no estilo // até o fim da linha
quanto no estilo /* bloco */. O Rust possui regras mais sofisticadas, mas vamos
ignorá-las na atividade.
* Identificadores: https://doc.rust-lang.org/reference/identifiers.html
(mas a última é trivial, porque a referência já fornece a expressão regular).
Quem optar por implementar as regras de string e raw string ganha também a
habilidade opcional re-adv*, mas isto é testado pelo arquivo re_adv_V1. Se não
estiver interessado(a) nesta competência, implemente strings como sequências de
letras e espaços entre aspas.
"""
import pytest
import lark
@pytest.mark.parametrize("grp", "ID INT BIN_INT OCT_INT HEX_INT FLOAT".split())
def test_exemplos_positivos(grp, mod, data):
for ex in sorted(data(grp), key=len):
typ = None
if grp.endswith("INT"):
typ = int
if grp.endswith("FLOAT"):
typ = float
check_valid_token(ex, mod, grp, typ=typ)
def test_comentários(mod, data):
grp = "COMMENT"
for ex in sorted(data(grp), key=len):
print(f"Testando: {ex!r} ({grp})")
seq = mod.lex_list(ex)
if seq:
raise AssertionError(f"erro: esperava comentário, obteve sequência {seq}")
@pytest.mark.parametrize("grp", "ID INT BIN_INT OCT_INT HEX_INT FLOAT COMMENT".split())
def test_exemplos_negativos(grp, mod, data):
for ex in sorted(data(grp + "_bad"), key=len):
print(f"Testando: {ex!r} ({grp})")
try:
seq = mod.lex_list(ex)
except lark.LarkError:
continue
if grp == "COMMENT" and not seq:
raise AssertionError(f"aceitou elemento: {ex}")
elif len(seq) == 1 and seq[0].type == grp and seq[0] == ex:
raise AssertionError(f"aceitou elemento: {seq}")
def check_valid_token(ex, mod, grp, typ=None):
print(f"Testando: {ex!r} ({grp})")
seq = mod.lex_list(ex)
try:
[tk] = seq
except ValueError:
raise AssertionError(f"erro: esperava token único, obteve sequência {seq}")
if typ is not None:
val = mod.transform(tk)
assert isinstance(
val, typ
), f"tipo errado {tk} ({tk.type}): esperava {typ}, obteve {type(val)}"
return seq
| 3.125 | 3 |
moebiusgol/render.py | lorenzocerrone/moebius-game-of-life | 0 | 12785833 | <filename>moebiusgol/render.py
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from moebiusgol.utils import add_pattern_from_macro, add_pattern_from_file
from moebiusgol.gol_step import update_grid2d
import numpy as np
from PIL import Image
import os
from skimage.transform import rescale
import tqdm
import subprocess
import math
render_method = {'Basic2png', 'BasicAnimation'}
class BaseRender:
def __init__(self, config):
global_config = config['global_param']
self.canvas_size = global_config['canvas_size']
self.render_method = global_config['render']
self.max_iterations = global_config['max_iterations']
self.export_path = global_config['export_path']
self.export_size = global_config['export_size']
self.do_rescale = global_config['rescale']
self.do_crop_center = global_config['crop_center']
self.fps = global_config['fps']
self.name = global_config['name']
self.timeline = config['timeline']
self.timeline_keys = self.timeline.keys()
self.gol_state = np.zeros(self.canvas_size)
self.iteration = 0
self.fig, self.im, self.ani = None, None, None
def step(self):
if self.iteration in self.timeline.keys():
for value in self.timeline[self.iteration]:
if value['type'] == 'macro':
self.gol_state = add_pattern_from_macro(self.gol_state,
**value['kwargs'])
elif value['type'] == 'file':
self.gol_state = add_pattern_from_file(self.gol_state,
**value['kwargs'])
self.gol_state = update_grid2d(self.gol_state)
self.iteration += 1
def render_next(self, _):
self.step()
self.im.set_array(self.gol_state)
return self.im
def rescale(self, im):
min_scale = min(self.export_size[0]/self.canvas_size[0], self.export_size[1]/self.canvas_size[1])
im = rescale(im, scale=min_scale, order=0)
diff_x, diff_y = self.export_size[0] - im.shape[0], self.export_size[1] - im.shape[1]
im = np.pad(im, ((math.floor(diff_x/2), math.ceil(diff_x/2)),
(math.floor(diff_y/2), math.ceil(diff_y/2))))
return im
def crop(self, im):
im = im[self.canvas_size[0]//2 - self.export_size[0]//2:self.canvas_size[0]//2 + self.export_size[0]//2,
self.canvas_size[1]//2 - self.export_size[1]//2:self.canvas_size[1]//2 + self.export_size[1]//2]
return im
def save_png(self):
export_frames_path = os.path.join(self.export_path, 'frames')
os.makedirs(export_frames_path, exist_ok=True)
for _ in tqdm.tqdm(range(self.max_iterations)):
self.step()
if self.do_rescale:
im = self.rescale(self.gol_state)
elif self.do_crop_center:
im = self.crop(self.gol_state)
else:
raise NotImplemented
im = im[::-1]
im = Image.fromarray(255 * im)
im = im.convert("L")
file_name = os.path.join(export_frames_path, f"frame_{self.iteration:06d}.png")
im.save(file_name)
frame_naming_rule = os.path.join(export_frames_path, f"frame_%06d.png")
out_movie = os.path.join(self.export_path, f"{self.name}.mp4")
subprocess.run(['ffmpeg',
'-r', f'{self.fps}',
'-f', 'image2',
'-i', frame_naming_rule,
'-vcodec', 'libx264',
'-crf', '25',
'-pix_fmt', 'yuv420p',
'-y', out_movie])
def animate(self):
self.fig = plt.figure(figsize=(10, 10))
self.im = plt.imshow(self.gol_state, animated=True,
vmax=1,
vmin=0,
interpolation='nearest',
cmap='gray',
origin='lower')
self.ani = animation.FuncAnimation(self.fig,
self.render_next,
init_func=self.init_gol,
interval=0, frames=10, blit=False)
plt.tight_layout()
plt.show()
def init_gol(self):
return self.gol_state
| 2.21875 | 2 |
day/008/prime_number_checker.py | Wanzaz/100-days-of-code | 0 | 12785834 |
def prime_checker(number):
counter = 0
for i in range(1, number):
if number%i == 0:
counter += 1
if counter <= 2:
print("It's a prime number.")
else:
print("It's not a prime number.")
n = int(input("Check this number: "))
prime_checker(number=n)
| 4.03125 | 4 |
lightfield_plane.py | IDLabMedia/blender-lightfield-addon | 1 | 12785835 | <filename>lightfield_plane.py
import math
import random
import bpy
import bmesh
from mathutils import Color
from .lightfield import LightfieldPropertyGroup
from .camera_position import CameraPosition
class LightfieldPlane(LightfieldPropertyGroup):
def construct(self):
visuals = self.default_construct()
self.lf_type = 'PLANE'
self.obj_empty.empty_display_type = 'PLAIN_AXES'
# Update lightfield references
self.obj_visuals.add().obj_visual = visuals[0]
self.obj_visuals.add().obj_visual = visuals[1]
self.obj_visuals.add().obj_visual = visuals[2]
self.obj_grid = visuals[0]
self.set_camera_to_first_view()
def construct_visuals(self, collection):
grid = self.create_grid()
space = self.create_space()
front = self.create_front()
# Add to lightfield collection
collection.objects.link(grid)
collection.objects.link(space)
collection.objects.link(front)
return [grid, space, front]
def create_space(self):
"""
Create visual that represents the space the lightfield is occupying.
:return: Object.
"""
name = self.construct_names()['space']
bpy.ops.mesh.primitive_plane_add(location=(0, 0, 0))
p1 = bpy.context.object
dumped_mesh = p1.data
bpy.ops.mesh.primitive_plane_add(location=(0, 0, 0))
space = bpy.context.object
space.name = name
p1.select_set(True)
bpy.ops.object.join()
space.scale = [0.5] * 3
space.rotation_euler[0] = 0.5 * math.pi
# Remove mesh-data created by p1 which is not necessary anymore
bpy.data.meshes.remove(dumped_mesh)
space.data.name = name
# Unlink the object from its current collection
space.users_collection[0].objects.unlink(space)
space_mat = bpy.data.materials.new(name)
col = Color()
col.hsv = (random.random(), 1.0, 0.8)
space_mat.diffuse_color = col[:] + (0.1,)
space.data.materials.append(space_mat)
space.show_wire = True
space.hide_render = True
return space
@staticmethod
def construct_names():
base = "LFPlane"
return {'lightfield': base,
'camera': "{}_Camera".format(base),
'grid': "{}_Grid".format(base),
'space': "{}_Space".format(base),
'front': "{}_Front".format(base)}
def position_generator(self):
cube = self.cube_camera
for y in range(self.num_cams_y):
for x in range(self.num_cams_x):
# TODO: implement cube_camera in plane lightfield
yield self.get_camera_pos(x, y)
def get_camera_pos(self, x, y):
base_x = 1 / (self.num_cams_x - 1)
base_y = 1 / (self.num_cams_y - 1)
return CameraPosition("view_{:04d}f".format(y * self.num_cams_x + x),
-0.5 + x * base_x,
0.0,
0.5 - y * base_y,
alpha=0.5*math.pi)
| 2.421875 | 2 |
samples-python1/101-datatypes-hard.py | LunaticNeko/chandrakasem-lectures | 0 | 12785836 | # Int and Float
x = 100
y = 22.4
xpx = x+x
xpy = x+y
# print(type(xpx), xpx)
# print(type(xpy), xpy)
very_large_number = 1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890
very_large_float = float(very_large_number)
back_to_int = int(very_large_float)
# print(type(very_large_number), very_large_number)
# print(type(very_large_float), very_large_float)
# print(type(back_to_int), back_to_int)
# Addressing
S1 = "This is a sentence."
# 0123456789012345678
# print(S1[0])
# print(S1[9])
# print(S1[16])
# print(S1[0:5])
# print(S1[7:])
# print(S1[:12])
L = ['Dog', "Cat", 'Rabbit']
M = ['Horse', 'Deer']
# print(type(L))
# print(L[0]) # show value of item 0 in list L
L[0] = "Giraffe"
print(L + M)
# print(L)
T1 = (202, "Second Floor Office")
T2 = (201, "Technician Office")
T3 = (101, "Secretary Office")
# print(type(T1))
print(T1 + T2)
# T1[1] = "DANCE FLOOR" # <== Will cause error
T1 = (202, "DANCE FLOOR")
print(T1)
Z = "LOD Monitor User Guide"
# Z[1] = 'C' # <== Will cause error
Z = "LCD Monitor User Guide"
print(Z)
| 3.640625 | 4 |
deskew/__init__.py | jwbargsten/deskew | 0 | 12785837 | <reponame>jwbargsten/deskew
import numpy.typing as npt
import numpy as np
from skimage.feature import canny
from skimage.transform import hough_line, hough_line_peaks
def determine_skew(img, sigma=3.0, num_peaks=20):
# scale to 0-255
# makes deskewing possible in light-coloured images with low contrast
img = (((img - np.min(img)) / np.ptp(img)) * 255).astype("uint8")
edges = canny(img, sigma=sigma)
# assume that pages are already in the right orientation (portrait/landscape)
# therefore, only change +-30 deg (pi/6). The get the right peaks,
# we need to rotate theta also by 90 deg (p/2), because lines of text create the
# peaks on horizontal line
theta = np.linspace(np.pi / 2 - np.pi / 6, np.pi / 2 + np.pi / 6, 180)
hspace, angles, distances = hough_line(edges, theta=theta)
_, peaks, _ = hough_line_peaks(hspace, angles, distances, num_peaks=num_peaks)
unique_peaks, count = np.unique(peaks, return_counts=True)
pidx = np.argmax(count)
the_peak_deg = np.rad2deg(unique_peaks[pidx] - np.pi / 2)
return the_peak_deg
| 2.6875 | 3 |
angr_platforms/tricore/rcrr_instr.py | shahinsba/angr-platforms | 0 | 12785838 | #!/usr/bin/env python3
""" rcrr_instr.py
Implementation of RCRR format instructions.
"""
from pyvex.lifting.util import Type, Instruction
import bitstring
from .logger import log_this
class RCRR_Instructions(Instruction):
""" Insert Bit Field instruction.
op = 0x97
op2 = 0x00 3-bit
User Status Flags: no change.
"""
name = 'RCRR_Instructions ...'
op = "{0}{1}".format(bin(9)[2:].zfill(4), bin(7)[2:].zfill(4))
bin_format = op + 'a'*4 + 'b'*4 + 'c'*4 + 'd'*4 + 'e'*4 + 'f'*4
def parse(self, bitstrm):
data = Instruction.parse(self, bitstrm)
tmp = bitstring.BitArray(bin="{0}{1}{2}{3}{4}{5}".format(data['e'],
data['f'],
data['c'],
data['d'],
data['a'],
data['b']))
a = int(tmp[20:24].hex, 16)
const4 = int(tmp[16:20].hex, 16)
w = int(tmp[11:16].bin.zfill(8), 2)
op2 = int(tmp[8:11].bin, 2)
d = int(tmp[4:8].hex, 16)
c = int(tmp[:4].hex, 16)
if op2 == 0:
self.name = "RCRR_INSERT"
else:
self.name = "UNKNOWN"
data = {"a": a,
"const4": const4,
"c": c,
"w": w,
"d": d,
"op2": op2}
log_this(self.name, data, hex(self.addr))
return data
def get_dst_reg(self):
return "d{0}".format(self.data['c'])
def get_const4(self):
return self.constant(self.data['const4'], Type.int_32)
def get_d_d_2(self):
return self.get("d{0}".format(self.data['d']+1), Type.int_32)
def get_d_d_1(self):
return self.get("d{0}".format(self.data['d']), Type.int_32)
def get_d_a(self):
return self.get("d{0}".format(self.data['a']), Type.int_32)
def fetch_operands(self):
return self.get_d_a(), self.get_d_d_1(), self.get_d_d_2(), self.get_const4()
def compute_result(self, *args):
d_a = args[0]
d_d_1 = args[1]
d_d_2 = args[2]
const4 = args[3]
# E[d] = d_d_2 | d_d_1
pos = d_d_1 & 0x1f
width = d_d_2 & 0x1f
#TODO if (pos + width > 32) or (width == 0):
# print("Undefined result for (pos + width > 32)!")
# exit(1)
result = ""
if self.data['op2'] == 0:
const_2 = self.constant(2, Type.int_8)
power_2_cond_1 = ((width & 1) == 1).cast_to(Type.int_8)
power_2_cond_2 = ((width >> 1 & 1) == 1).cast_to(Type.int_8)
power_2_cond_3 = ((width >> 2 & 1) == 1).cast_to(Type.int_8)
power_2_cond_4 = ((width >> 3 & 1) == 1).cast_to(Type.int_8)
power_2_cond_5 = ((width >> 4 & 1) == 1).cast_to(Type.int_8)
power_2_calc = ((((const_2 << power_2_cond_1) <<
power_2_cond_2) << power_2_cond_3) << power_2_cond_4) << power_2_cond_5
mask = ((power_2_calc - 1) << pos.cast_to(Type.int_8)).cast_to(Type.int_32)
result = (d_a & ~mask) | ((const4 << pos.cast_to(Type.int_8)) & mask)
return result
def commit_result(self, res):
self.put(res, self.get_dst_reg())
| 2.609375 | 3 |
wagtail_localize_git/test/__init__.py | kaedroho/wagtail-localize-pontoo | 5 | 12785839 | <filename>wagtail_localize_git/test/__init__.py
default_app_config = "wagtail_localize_git.test.apps.WagtailLocalizeGitTestAppConfig"
| 1.046875 | 1 |
docs/Deployments/grabcut_mesher/main.py | erdc/AdhUI | 2 | 12785840 | import panel as pn
import holoviews as hv
from earthsim.grabcut import GrabCutPanel, SelectRegionPanel
from adhui import CreateMesh, ConceptualModelEditor
hv.extension('bokeh')
stages = [
('Select Region', SelectRegionPanel),
('Grabcut', GrabCutPanel),
('Path Editor', ConceptualModelEditor),
('Mesh', CreateMesh)
]
# create the pipeline
pipeline = pn.pipeline.Pipeline(stages, debug=True)
# modify button width (not exposed)
pipeline.layout[0][1]._widget_box.width = 100
pipeline.layout[0][2]._widget_box.width = 100
# return a display of the pipeline
pipeline.layout.servable()
| 2.234375 | 2 |
etk/unit_tests/test_language_identification_extractor.py | donaq/etk | 77 | 12785841 | import unittest
from etk.extractors.language_identification_extractor import LanguageIdentificationExtractor
class TestLanguageIdentificationExtractor(unittest.TestCase):
def test_langid(self):
extractor = LanguageIdentificationExtractor()
text_en = "langid.py comes pre-trained on 97 languages (ISO 639-1 codes given)"
result_en = extractor.extract(text_en, "LANGID")
self.assertEqual(result_en[0].value, "en")
text_es = "<NAME>"
result_es = extractor.extract(text_es, "LANGID")
self.assertEqual(result_es[0].value, "es")
text_de = "Ein, zwei, drei, vier"
result_de = extractor.extract(text_de, "LANGID")
self.assertEqual(result_de[0].value, "de")
text_unknown = "%$@$%##"
result_unknown = extractor.extract(text_unknown, "LANGID")
self.assertEqual(result_unknown[0].value, "en")
def test_langdetect(self):
extractor = LanguageIdentificationExtractor()
text_en = "langdetect supports 55 languages out of the box (ISO 639-1 codes)"
result_en = extractor.extract(text_en, "LANGDETECT")
self.assertEqual(result_en[0].value, "en")
text_es = "<NAME>"
result_es = extractor.extract(text_es, "LANGDETECT")
self.assertEqual(result_es[0].value, "es")
text_de = "Ein, zwei, drei, vier"
result_de = extractor.extract(text_de, "LANGDETECT")
self.assertEqual(result_de[0].value, "de")
text_unknown = "%$@$%##"
result_unknown = extractor.extract(text_unknown, "LANGDETECT")
self.assertTrue(len(result_unknown) == 0)
if __name__ == '__main__':
unittest.main()
| 2.90625 | 3 |
reference/generated/numpy-arccos-1.py | ahaldane/ahaldane.github.io | 16 | 12785842 | # We expect the arccos of 1 to be 0, and of -1 to be pi:
np.arccos([1, -1])
# array([ 0. , 3.14159265])
# Plot arccos:
import matplotlib.pyplot as plt
x = np.linspace(-1, 1, num=100)
plt.plot(x, np.arccos(x))
plt.axis('tight')
plt.show()
| 2.8125 | 3 |
953/Verifying an Alien Dictionary.py | cccccccccccccc/Myleetcode | 0 | 12785843 | <gh_stars>0
from typing import List
class Solution:
def isAlienSorted(self, words: List[str], order: str) -> bool:
orderdict = {c:i for i,c in enumerate(order)}
for i in range(len(words)-1):
word1 = words[i]
word2 = words[i+1]
for j in range(min(len(word1),len(word2))):
if word1[j] != word2[j]:
if orderdict[word1[j]]>orderdict[word2[j]]:
return False
break
else:
if len(word1)>len(word2):
return False
return True
A = Solution()
words = ["fxasxpc","dfbdrifhp","nwzgs","cmwqriv","ebulyfyve","miracx","sxckdwzv","dtijzluhts","wwbmnge","qmjwymmyox"]
order ="zkgwaverfimqxbnctdplsjyohu"
w= ["hello","leetcode"]
o = "hlabcdefgijkmnopqrstuvwxyz"
w1 = ["apple","app"]
o1 = "abcdefghijklmnopqrstuvwxyz"
print(A.isAlienSorted(w,o)) | 3.171875 | 3 |
functions.py | mimmospoto/ADM-HW2-Group9 | 0 | 12785844 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import copy
import os
import datetime
import time
from functools import reduce
# Function that provide some information about the cvs files
def infos(old_df_names, months):
"""
Print informations about databases
input:
- dataframe
- months
output:
- months, number of NaN values in each column
"""
for i in range(len(old_df_names)):
df = pd.read_csv(old_df_names[i])
print('Month %s :' %months[i])
for i in df.columns:
print('\t- {} has number of Nan : {:d} ({:.2f}%)'.format(i, int(df[i].isna().sum()), (int(df[i].isna().sum())/len(df))*100))
print('Total number of rows: {:d}'.format(len(df)))
print('\n')
return
# Function that clean the databases from NaN values
def clean_dataframe(df):
"""
Clean the dataframe, removing NaN from columns
input:
- dataframe
output:
- cleaned dataframe
"""
df.dropna(inplace = True)
return df
# Function that create new csv files
def make_new_csv(old_df_names, df_names):
"""
Make new csv files
input:
- dataframe
output:
- new csv files
"""
for i in range(len(old_df_names)):
df = pd.read_csv(old_df_names[i])
# cleaning function
df = clean_dataframe(df)
df.to_csv(df_names[i], index=False)
return
# RQ1 functions
# RQ1.1 functions
def compute_average_session(df_names):
"""
Compute average number of times users perform view/cart/purchase within each session
input:
- list of names of csv files to open
output:
- series of average of each operation
"""
# init the daily average dict
average_session_dict = {}
for i in range(len(df_names)):
average_session_dict[i] = {}
# load the ith dataframe, taking the event_type and user_session columns
df = pd.read_csv(df_names[i], usecols=['event_type', 'user_session'])
for j in df['event_type'].unique():
#print('{} of {:d} has average of : {:.2f} ' .format(j, i, float(df[df['event_type'] == j].groupby(['user_session']).count().mean())))
average_session_dict[i][j] = df[df['event_type'] == j].groupby(['user_session']).count().mean()
average_session_df = pd.DataFrame(average_session_dict).mean(axis=1)
return average_session_df
def plot_average_session(average_session_df, months):
"""
plots the average number of times users perform each operation
"""
# plot average_session_df
fig = plt.figure()
X = np.arange(len(average_session_df))
plt.bar(X, average_session_df)
plt.xticks(np.arange(len(average_session_df)),average_session_df.index)
plt.ylabel("average operation per session")
plt.xlabel("operations")
plt.title("Average number of times users perform each operation within a session")
plt.grid(color ='silver', linestyle = ':')
fig.set_figwidth(15)
fig.set_figheight(5)
return
# RQ1.2 functions
def compute_average_view_cart(df_names, months):
"""
Compute average number of times a user views a product before adding it to the cart
input:
- list of names of csv files to open
output:
- the average of how many times a product is viewed before to be added to the cart
"""
# init a dataframe with index as every months and column as the mean for each user
df_mean_database = pd.DataFrame(index=months, columns=['mean'])
for i in range(len(df_names)):
# load the ith dataframe, taking the event_time, event_type, product_id, user_id columns
df = pd.read_csv(df_names[i],
usecols=['event_time','event_type', 'product_id', 'user_id'], nrows=100000,
parse_dates=['event_time'])
# cut off the 'purchase' variable from event_type
df_2 = df[df['event_type'] != 'purchase']
df_3 = df_2[df_2.event_type=='view'].groupby(by=['product_id']).agg(view=('event_type', 'count'))
df_4 = df_2[df_2.event_type=='cart'].groupby(by=['product_id']).agg(cart=('event_type', 'count'))
# get dataframe where event_type is equal to 'cart'
df_cart = df_2[df_2['event_type']=='cart']
# init a dataframe with index as every user and column as the mean for each user
df_mean_user = pd.DataFrame(index=df_cart['user_id'].unique(), columns=['mean'])
df_cart.groupby(by=['user_id']).count()
for user in df_cart['user_id'].unique():
# get dataframe with one user at a time
df_user = df_2[df_2['user_id'] == user]
# init the dict where the key are the products and the values are the mean of each product
product_dict = {}
for prod in df_user['product_id'].unique():
# get dataframe with one product at a time
df_product = df_user[df_user['product_id'] == prod]
df_product_2 = df_product.copy()
product_dict[prod] = []
# init a list to append how many times 'view' appears before 'cart' for each product
product_lst = []
# check if at least a 'view' exist in the dataframe otherwise pass
if any(df_product_2['event_type'] == 'view') == True:
df_product_2_time = df_product_2[df_product_2['event_type'] == 'view'].event_time.reset_index(drop=True)[0]
# check if there are some 'cart' event before the 'view' event (only for the first time of seeing the 'cart')
if any(df_product_2[df_product_2['event_type'] == 'cart'].event_time <= df_product_2_time) == True:
df_product_3 = df_product_2[df_product_2.event_time <= df_product_2_time]
# drop any 'cart' events at the beginning
df_product_2 = df_product_2.drop(labels=df_product_3[df_product_3['event_type'] == 'cart'].index)
# count how many times 'view' is before 'cart'
if any(df_product_2['event_type'] == 'view') == True:
for index, row in df_product_2.iterrows():
if row['event_type'] == 'cart':
product_lst.append(np.sum(df_product_2[df_product['event_type'] == 'view'].event_time < row['event_time']))
df_product_2 = df_product_2[df_product_2.event_time > row['event_time']]
# compute mean for each product
if len(product_lst) > 0:
product_dict[prod] = [i for i in product_lst if i != 0]
product_dict[prod] = np.mean(product_dict[prod])
else:
product_dict[prod].append(0)
# compute mean for each user
try:
df_mean_user.loc[user,'mean'] = round(pd.DataFrame(product_dict).mean(axis=1)[0], 2)
except ValueError:
df_mean_user.loc[user,'mean'] = round(product_dict[prod], 2)
# compute final average for a user for a product
df_mean_user.dropna(inplace=True)
mean_prod_user = np.mean(df_mean_user)
# add final average per month
df_mean_database.loc[months[i], 'mean'] = round(mean_prod_user[0], 2)
df_mean_database.dropna(inplace=True)
final_mean = np.mean(df_mean_database)
return final_mean
# RQ1.3 functions
def compute_probability_cart_purchase(df_names, months):
"""
Compute the probability that products are bought once is added to the cart
input:
- list of names of csv files to open
output:
- probability products are purchased once are added to the cart
"""
# init dictionary to merge each monthly datasets
df_database = {}
for i in range(len(df_names)):
# load the ith dataframe, taking only the event_type
df = pd.read_csv(df_names[i],
usecols=['event_type'])
# cut off the view variable from event_type
df_database[months[i]] = df[df['event_type'] != 'view']
# function to concatenate each dataset
merged_df = pd.concat([df_database[months[i]] for i in range(len(df_database))])
# compute probability as the ratio between purchase and cart events
prob = round(merged_df[merged_df['event_type'] == 'purchase'].shape[0] /
merged_df[merged_df['event_type'] == 'cart'].shape[0], 4) * 100
return prob
# RQ1.4 functions
def compute_average_time_removed_item(df_names, months):
"""
Compute the average time an item stays in the cart before being removed
input:
- list of names of csv files to open
output:
- average time
"""
df_mean_database = pd.DataFrame(index=months, columns=['mean'])
for i in range(len(df_names)):
# load the ith dataframe, taking only the
df = pd.read_csv(df_names[i],
usecols=['event_time', 'event_type', 'product_id'], nrows=100000,
parse_dates=['event_time'])
# cut off the view variable from event_type
df_2 = df[df['event_type'] != 'view']
# init the dict where the key are the products and the values are the mean of each product
product_dict = {}
# loop through the event_type 'purchase' to find unique product_id
for prod in df_2[df_2['event_type'] == 'purchase']['product_id'].unique():
df_product = df_2[df_2['product_id'] == prod]
# check if at least a 'cart' event exist
if df_product['event_type'].str.contains('cart').any():
pass
else:
continue
# check if there are some 'purchase' event before the 'cart' event (only for the first time of seeing the 'purchase')
if any(df_product[df_product['event_type'] == 'purchase'].event_time <=
df_product[df_product['event_type'] == 'cart'].event_time.reset_index(drop=True)[0]) == True:
df_3 = df_product[df_product.event_time <= df_product[df_product['event_type'] == 'cart'].event_time.reset_index(drop=True)[0]]
# drop any 'cart' events at the beginning
df_product = df_product.drop(labels=df_3[df_3['event_type'] == 'purchase'].index)
# check if there are some 'cart' event before the 'purchase' event (only for the last time of seeing the 'cart')
if any(df_product[df_product['event_type'] == 'cart'].event_time >=
df_product[df_product['event_type'] == 'purchase'].event_time.reset_index(drop=True)[len(df_product[df_product['event_type'] == 'purchase'])-1]) == True:
df_3 = df_product[df_product.event_time >= df_product[df_product['event_type'] == 'purchase'].event_time.reset_index(drop=True)[len(df_product[df_product['event_type'] == 'purchase'])-1]]
# drop any 'cart' events at the beginning
df_product = df_product.drop(labels=df_3[df_3['event_type'] == 'cart'].index)
# check if at least a 'cart' event exist
if df_product['event_type'].str.contains('cart').any():
pass
else:
continue
# check if at least a 'purchase' event exist
if df_product['event_type'].str.contains('purchase').any():
pass
else:
continue
dist_prod = df_product.event_time[df_product.event_type == 'purchase'].values - df_product.event_time[df_product.event_type == 'cart'].values
product_dict[prod] = []
product_dict[prod].append(np.mean(dist_prod))
# add final average per month
df_mean_database.loc[months[i], 'mean'] = pd.DataFrame(product_dict).mean(axis=1)[0]
# RQ1.5 functions
def compute_average_time_first_view(df_names, months):
"""
Compute the average time an item stays in the cart between the first time view and purchase/addition to cart
input:
- list of names of csv files to open
output:
- average time
"""
df_mean_database = pd.DataFrame(index=months, columns=['mean'])
for i in range(len(df_names)):
# load the ith dataframe, taking only the
df = pd.read_csv(df_names[i],
usecols=['event_time', 'event_type', 'product_id'],
parse_dates=['event_time'])
# cut off the view variable from event_type
df_3 = df[df['event_type'] != 'view']
# init the dict where the key are the products and the values are the mean of each product
product_dict = {}
# loop through the event_type 'purchase' to find unique product_id
for prod in df_3['product_id'].unique():
df_product = df[df['product_id'] == prod]
# check if at least a 'view' event exist
if df_product['event_type'].str.contains('view').any():
pass
else:
continue
# check if there are some 'purchase' event before the 'view' event (only for the first time of seeing the 'purchase')
if any(df_product[df_product['event_type'] == 'purchase'].event_time <=
df_product[df_product['event_type'] == 'view'].event_time.reset_index(drop=True)[0]) == True:
df_3 = df_product[df_product.event_time <= df_product[df_product['event_type'] == 'view'].event_time.reset_index(drop=True)[0]]
# drop any 'cart' events at the beginning
df_product = df_product.drop(labels=df_3[df_3['event_type'] == 'purchase'].index)
# check if there are some 'cart' event before the 'view' event (only for the first time of seeing the 'purchase')
if any(df_product[df_product['event_type'] == 'cart'].event_time <=
df_product[df_product['event_type'] == 'view'].event_time.reset_index(drop=True)[0]) == True:
df_3 = df_product[df_product.event_time <= df_product[df_product['event_type'] == 'view'].event_time.reset_index(drop=True)[0]]
# drop any 'cart' events at the beginning
df_product = df_product.drop(labels=df_3[df_3['event_type'] == 'cart'].index)
# check if at least a 'purchase' event exist
if df_product['event_type'].str.contains('purchase').any():
pass
else:
continue
# check if at least a 'cart' event exist
if df_product['event_type'].str.contains('cart').any():
pass
else:
continue
product_dict[prod] = []
df_product.drop_duplicates(subset=['event_type'], keep='first', inplace=True)
df_product.reset_index(inplace=True)
product_dict[prod].append(df_product.event_time[1] - df_product.event_time[0])
# add final average per month
df_mean_database.loc[months[i], 'mean'] = pd.DataFrame(product_dict).mean(axis=1)[0]
return df_mean_database
# RQ2 functions
def compute_number_sold_per_category(df_names, months):
"""
Compute the most sold product per category
input:
- list of names of csv files to open
output:
- number of sold product per category
"""
# init a dataframe with index as months and column as most sold product
df_final = {}
for i in range(len(df_names)):
# load the ith dataframe, taking only the
df = pd.read_csv(df_names[i],
usecols=['product_id', 'category_code', 'event_type'])
df = df[df['event_type'] == 'purchase']
new = df['category_code'].str.split(".", expand=True)
df['category_1'] = new[0]
df.drop(columns=['category_code', 'event_type'], inplace=True)
df_final[months[i]] = df.groupby(by=['category_1']).count().sort_values('product_id', ascending=False)
df_final = [df_final[months[i]] for i in range(len(df_final))]
return df_final
def plot_number_sold_per_category(df_final, months):
"""
plot the number of sold product per category per month
"""
# plot number of sold product per category pe moth using subplots
fig, a = plt.subplots(4,2)
# Plot 1
df_final[0].reset_index().plot(kind='bar', y='product_id', x='category_1', ax=a[0][0])
a[0][0].set(title=months[0], xlabel='Categories', ylabel='Total Sales')
a[0][0].tick_params(labelrotation=45)
a[0][0].get_legend().remove()
a[0][0].grid(color ='silver', linestyle = ':')
# Plot 2
df_final[1].reset_index().plot(kind='bar', y='product_id', x='category_1', ax=a[0][1])
a[0][1].set(title=months[1], xlabel='Categories', ylabel='Total Sales')
a[0][1].tick_params(labelrotation=45)
a[0][1].get_legend().remove()
a[0][1].grid(color ='silver', linestyle = ':')
# Plot 3
df_final[2].reset_index().plot(kind='bar', y='product_id', x='category_1', ax=a[1][0])
a[1][0].set(title=months[2], xlabel='Categories', ylabel='Total Sales')
a[1][0].tick_params(labelrotation=45)
a[1][0].get_legend().remove()
a[1][0].grid(color ='silver', linestyle = ':')
# Plot 4
df_final[3].reset_index().plot(kind='bar', y='product_id', x='category_1', ax=a[1][1])
a[1][1].set(title=months[3], xlabel='Categories', ylabel='Total Sales')
a[1][1].tick_params(labelrotation=45)
a[1][1].get_legend().remove()
a[1][1].grid(color ='silver', linestyle = ':')
# Plot 5
df_final[4].reset_index().plot(kind='bar', y='product_id', x='category_1', ax=a[2][0])
a[2][0].set(title=months[4], xlabel='Categories', ylabel='Total Sales')
a[2][0].tick_params(labelrotation=45)
a[2][0].get_legend().remove()
a[2][0].grid(color ='silver', linestyle = ':')
# Plot 6
df_final[5].reset_index().plot(kind='bar', y='product_id', x='category_1', ax=a[2][1])
a[2][1].set(title=months[5], xlabel='Categories', ylabel='Total Sales')
a[2][1].tick_params(labelrotation=45)
a[2][1].get_legend().remove()
a[2][1].grid(color ='silver', linestyle = ':')
# Plot 7
df_final[6].reset_index().plot(kind='bar', y='product_id', x='category_1', ax=a[3][0])
a[3][0].set(title=months[6], xlabel='Categories', ylabel='Total Sales')
a[3][0].tick_params(labelrotation=45)
a[3][0].get_legend().remove()
a[3][0].grid(color ='silver', linestyle = ':')
a[3][1].axis('off')
# Title the figure
fig.suptitle('Category of the most trending products overall', fontsize=14, fontweight='bold')
fig.set_figwidth(20)
fig.set_figheight(50)
plt.show()
return
def plot_most_visited_subcategories(df_names, months):
"""
plot the most visited subcategories
"""
# init a dataframe with index as months and column as most sold product
df_final = {}
for i in range(len(df_names)):
# load the ith dataframe, taking only the
df = pd.read_csv(df_names[i],
usecols=['event_type', 'category_code'])
# take only the view events
df = df[df['event_type'] == 'view']
# split the categories into subcategories
new = df['category_code'].str.split(".", expand=True)
df['subcategory'] = new[1]
df.drop(columns=['category_code'], inplace=True)
# group the subcategories and sort in descending order the relative values
df_final[months[i]] = df.groupby(by=['subcategory']).count().sort_values('event_type', ascending=False)
# build a pool of lists
df_final = [df_final[months[i]] for i in range(len(df_final))]
# concat each list of month
merged_df = pd.concat([df_final[i] for i in range(len(df_final))]).reset_index()
df_tot = merged_df.groupby(by=['subcategory']).sum().sort_values('event_type', ascending=False).rename(columns={'event_type': 'view'}).reset_index()
# plot most visited subcategories
fig = plt.figure()
X = np.arange(len(df_tot))
plt.barh(X, df_tot['view'])
plt.yticks(np.arange(len(df_tot)),df_tot['subcategory'])
plt.ylabel("views")
plt.xlabel("subcategories")
plt.title("Most visited subcategories")
plt.grid(color ='silver', linestyle = ':')
fig.set_figwidth(15)
fig.set_figheight(15)
plt.show()
return
def plot_10_most_sold(df_final, months):
"""
plot the 10 most sold product per category
"""
# concat the dataset
merged_df = pd.concat([df_final[i] for i in range(len(df_final))]).reset_index()
# group together by category in descending order
df_tot = merged_df.groupby(by=['category_1']).sum().sort_values('product_id', ascending=False).rename(columns={'event_type': 'view'})[:10]
return df_tot
# RQ3 functions
# Function used for showing the values of the bars in the plots of RQ3
def plot_values_in_barh(y):
for index, value in enumerate(y):
plt.text(value, index, str(round(value, 2)))
# Function that given a category in input, returns a plot with the average price per brand for the selected category
def plot_average_price_per_category(category, df_names):
# Initializing an empty list where we will put every grouped-by DataFrame later on
l = []
# Starting a for loop to read every DataFrame
for i in range(len(df_names)):
# Selecting the columns to use for this task
data = pd.read_csv(df_names[i], usecols=['category_code', 'brand', 'price'])
# For every category_code and brand, calculating the average price of the products, then i reset the index
# because i do not want to work with MultiIndex
a = data.groupby(['category_code', 'brand']).mean().reset_index()
# Appending the DataFrame analyzed for 1 month to the list l
l.append(a)
# Concatenating every DataFrame of each month grouped by category_code and brand in one DataFrame that will not
# be memory expensive
final = pd.concat(l)
# Grouping again by category_code and brand after the concatenation. We reset again the index for the same
# reason as before
final2 = final.groupby(['category_code', 'brand']).mean().reset_index()
# Selecting the category_code we want to analyze
fplot = final2.loc[final2['category_code'] == category]
# Setting the values to show in the plot at the end of the bars
y = list(fplot['price'])
# Assigning a variable to the plot
end = fplot.plot(x='brand', kind='barh', figsize=(20, 60))
# Returning the plot and calling the function to show the prices on the top of the bars
return end, plot_values_in_barh(y)
# Function that returns for each category, the brand with the highest price
def brand_with_highest_price_for_category(df_names):
# Initializing an empty list where we will put our Dataframes later on
l = []
# Starting a for loop to read every DataFrame
for i in range(len(df_names)):
# Selecting the columns to use for this task
data = pd.read_csv(df_names[i], usecols=['category_code', 'brand', 'price'])
# For every category_code and brand, calculating the average price of the products
a = data.groupby(['category_code', 'brand']).mean()
# Selecting the rows with the higher average price for each category
a1 = a.loc[a.groupby(level='category_code')['price'].idxmax()]
# Appending the analyzed DataFrame for 1 month to the list l
l.append(a1)
# Concatenating every DataFrame of each month grouped by category_code and brand in one DataFrame that will not
# be memory expensive
final = pd.concat(l)
# Resetting the index because i do not want to work with MultiIndex
rfinal = final.reset_index()
# Selecting again only the rows with the higher average price for category after concatenating the DataFrames
last_final = rfinal.loc[rfinal.groupby('category_code')['price'].idxmax()]
# Return the output
return last_final.sort_values(by=['price'])
# RQ4 functions
# Function that is used to see if the prices of different brands are significantly different
def average_price_per_brand(df_names):
# Initializing an empty list
l = []
# Starting the loop to read the dataframes of every month
for i in range(len(df_names)):
# Selecting just the columns referring to the brand and price
data = pd.read_csv(df_names[i], usecols=['brand', 'price'])
# Grouping by brand and calculating the average price per brand
a = data.groupby('brand').mean()
# Appending the obtained DataFrame regarding the results of one month in the starting empty list
l.append(a)
# Concatenating every DataFrame of each month in one DataFrame that will not be memory expensive
t = pd.concat(l)
# Resetting the index because i do not want to work with MultiIndex
rt = t.reset_index()
# Grouping by brand the full DataFrame regarding all months and calculating the mean price
u = rt.groupby('brand').mean()
# Returning the Dataframe, the minimum and the maximum to compare the results
return u, u.min(), u.max()
# Function that is used to reduce the number of data we want to analyze for the RQ4
def make_df_purchase(df_names, months):
df_purchase = {}
# Reading the data of all months and selecting only purchase events from the DataFrame
for i in range(len(df_names)):
data = pd.read_csv(df_names[i], usecols=['brand', 'price', 'event_type'])
df_purchase[months[i]] = data[data['event_type'] == 'purchase']
# Appending the results of every months to a dictionary
return df_purchase
# Function that returns the profit of every brand in each month
def earning_per_month(df_purchase, months):
dict_earning = {}
# Calculating the earning per month of each brand grouping by brand and doing the sum of the prices of every sold
# product
for i in range(len(df_purchase)):
data = df_purchase[months[i]]
dict_earning[months[i]] = data.groupby('brand', as_index=False).sum()
return dict_earning
# Function that given a brand in input, returns the total profit for month of that brand
def brand_per_month(brand, dict_earning, months):
df_profit = {}
# For every month selecting the profit from the dictionary of earnings created before. If there is no profit for the
# selected brand, we set it equal to 0
for i in range(len(months)):
try:
df_profit[months[i]] = dict_earning[months[i]].loc[dict_earning[months[i]].brand == brand, 'price'].values[
0]
except IndexError:
df_profit[months[i]] = 0
return df_profit
# Function that given the earnings of every brand, returns the top 3 brands that have suffered the biggest losses
# between one month and the previous one
def find_3_worst_brand(dict_earning, months):
# Selecting the dictionary obtained from the total profits of the brands and then merging them in one DataFrame
# where on the columns we have the months and on the rows we have the brands. The values are the earnings of each
# brand for every month
data_frames = [dict_earning[months[i]] for i in range(len(dict_earning))]
df_merged = reduce(lambda left, right: pd.merge(left, right, on=['brand'],
how='outer'), data_frames)
df_merged.set_index('brand', inplace=True)
df_merged.set_axis(months, axis=1, inplace=True)
# Transposing the DataFrame and applying the pct_change to calculate the percentage change between every month
# and the month before
df_pct = df_merged.T.pct_change()
worst_brand = []
worst_value = []
worst_months = []
# Selecting the minimum of the percentage change(which means the bigger loss) in our DataFrame, the brand that
# corresponds to it and the month that refers to it. We append those values to the lists we defined before
for i in range(0, 3):
worst_brand.append(df_pct.min().sort_values().index[i])
worst_value.append(round(abs(df_pct.min().sort_values()[i]) * 100, 2))
L = list(df_pct[df_pct[worst_brand[i]] == df_pct.min().sort_values()[i]].index.values)
worst_months.append(''.join(L))
# Showing the result of the request
for j in range(0, 3):
print('{} lost {}% bewteen {} and the month before'.format(worst_brand[j], worst_value[j], worst_months[j]),
end=' \n')
return
#RQ5
#Function that create a plot that for each day of the week shows the hourly average of visitors
def plot_hour_avg(df_names,months):
'''
create a plot
input:
-dataframe
-months
output:
-plot
'''
for i in range(len(df_names)):
df=pd.read_csv(df_names[i],parse_dates=['event_time'],usecols=['event_time','user_id'])
#hourly averege of visitors for each day
domenica=df[df.event_time.dt.dayofweek==0].groupby(df.event_time.dt.hour).user_id.count()
lunedi=df[df.event_time.dt.dayofweek==1].groupby(df.event_time.dt.hour).user_id.count()
martedi=df[df.event_time.dt.dayofweek==2].groupby(df.event_time.dt.hour).user_id.count()
mercoledi=df[df.event_time.dt.dayofweek==3].groupby(df.event_time.dt.hour).user_id.count()
giovedi=df[df.event_time.dt.dayofweek==4].groupby(df.event_time.dt.hour).user_id.count()
venerdi=df[df.event_time.dt.dayofweek==5].groupby(df.event_time.dt.hour).user_id.count()
sabato=df[df.event_time.dt.dayofweek==6].groupby(df.event_time.dt.hour).user_id.count()
plt.figure(figsize=[10.0,5.0])
plt.plot(domenica, '-o', color='royalblue', label = 'SUNDAY')
plt.plot(lunedi, '-o', color='green', label = 'MONDAY')
plt.plot(martedi, '-o', color='red', label = 'TUESDAY')
plt.plot(mercoledi, '-o', color='yellow', label = 'WEDNESDAY')
plt.plot(giovedi, '-o', color='orange', label = 'THURSDAY')
plt.plot(venerdi, '-o', color='violet', label = 'FRIDAY')
plt.plot(sabato, '-o', color='grey', label = 'SATURDAY')
plt.xlabel('HOUR')
plt.ylabel('VISITORS')
plt.title("Daily average - %s " %months[i])
plt.xticks(range(0,24))
plt.legend()
plt.show()
return
#RQ6
#Function that calculates the overall conversion rate of the products, creates the plot of the number of purchases by category and shows the conversion rate of each category in descending order
def conversion_rate(df_names,months):
"""
calculate overall conversion rate
plot of purchase by category
calculate conversion rate for each category
input:
- dataframe
- months
output:
- overall conversion rate for each month
- conversion rate for each category of each month
- plot of purchase by category of each month
"""
for i in range(len(df_names)):
dataset=pd.read_csv(df_names[i],usecols=['event_type','category_code'])
#NUMBER OF ALL PURCHASE PRODUCTS
purchase=dataset[dataset.event_type=='purchase']
totpurc=len(purchase)
#NUMBER OF ALL VIEW PRODUCTS
view=dataset[dataset.event_type=='view']
totview=len(view)
#OVERALL CONVERSION RATE OF STORE
cr=totpurc/totview
print ('Overall conversion rate of %s'%months[i])
print (cr)
#CREATE A NEW COLUMN WITH THE SPLITTED CATEGORY NAME
new = dataset['category_code'].str.split(".", expand=True)
dataset['category_name'] = new[0]
dataset.drop(columns=['category_code'], inplace=True)
#NUMBER OF PURCHASE FOR CATEGORY
purc_4_category=dataset[dataset.event_type=='purchase'].groupby('category_name').agg(purchase=('event_type','count'))
#NUMBER OF VIEW FOR CATEGORY
view_4_category=dataset[dataset.event_type=='view'].groupby('category_name').agg(view=('event_type','count'))
#PLOT OF NUMBER OF PURCHASE FOR CATEGORY
fig = plt.figure()
purc_4_category.plot.bar(figsize = (18, 7), title='Number of purchase of %s'%months[i])
plt.show()
#CONVERSION RATE FOR CATEGORY
cr_4_cat=(purc_4_category.purchase/view_4_category.view)
dec=cr_4_cat.sort_values(axis=0, ascending=False)
print ('Conversion rate of each category of %s'%months[i])
print(dec, end='\n')
return
#RQ7
#Function that demonstrates the Pareto's principle
def pareto(df_names,months):
"""
Apply Pareto's principle
input:
- dataframe
- months
output:
- dimostration if Pareto's principle is apply for each month
"""
for i in range(len(df_names)):
dataset=pd.read_csv(df_names[i],usecols=['user_id','event_type','price'])
#PURCHASE BY USERS
purchase_by_user=dataset[dataset.event_type == 'purchase'].groupby(dataset.user_id).agg(number_of_purchases=('user_id','count'),total_spent=('price','sum'))
purchase_by_user=purchase_by_user.sort_values('total_spent',ascending=False)
#20% OF USERS
user_20=int(len(purchase_by_user)*20/100)
purch_by_user20=purchase_by_user[:user_20]
#TOTAL SPENT BY 20% OF USERS
spent_by_20=purch_by_user20.agg('sum')
#TOTAL PROFIT OF STORE
profit=dataset[dataset.event_type == 'purchase'].groupby(dataset.event_type).agg(gain=('price','sum'))
#80% OF STORE'S TOTAL PROFIT
profit_80=(profit*80)/100
#PERCENTAGE CHANGE BETWEEN 80% OF PROFIT AND 20% OF USERS
percent=int((float( spent_by_20.total_spent)/float(profit_80.gain))*100)
print("%d%% of the profit for the month of %s comes from 20%% of the user's purchases"%(percent,months[i]))
if (percent >= 80):
print ("For the month of %s Pareto's principle is applied." %months[i])
else:
print ("For the month of %s Pareto's principle isn't applied." %months[i])
return
| 3.375 | 3 |
app_core.py | licaiwang/Line_Bot_Heroku | 1 | 12785845 | # linebotTest1
# ngrok http 5000
import random
import re
from flask import Flask, abort, render_template, request
from linebot import LineBotApi, WebhookHandler
from linebot.exceptions import InvalidSignatureError
from linebot.models import (
ImageSendMessage,
LocationSendMessage,
MessageAction,
MessageEvent,
QuickReply,
QuickReplyButton,
StickerSendMessage,
TextMessage,
LocationMessage,
TextSendMessage,
)
import clawer
from weather import searchWeather, stickerSelect
from breakfast import selectBreakfast
from travel import allSight, searchShortlest
app = Flask(__name__)
line_bot_api = LineBotApi(
"<KEY>
)
handler = WebhookHandler("ff0138a40953ad704412fb621ebbee64")
# 接收 LINE 的資訊
@app.route("/callback", methods=["POST"])
def callback():
signature = request.headers["X-Line-Signature"]
# 獲得使用者輸入的訊息
body = request.get_data(as_text=True)
try:
# 送出
handler.handle(body, signature)
except InvalidSignatureError:
# 送出 Bad request (400)
abort(400)
# 回覆OK
return "ok"
# Line 要回傳什麼
@handler.add(MessageEvent, message=LocationMessage)
def handle_loc(event):
input_address = event.message.address
input_latitude = event.message.latitude
input_longitude = event.message.longitude
token = event.reply_token
loc = searchShortlest(input_address, input_latitude, input_longitude)
if loc:
message = [
TextSendMessage(text="為您推薦最近景點"),
LocationSendMessage(
title=loc[0], address=loc[2], latitude=loc[1][0], longitude=loc[1][1],
),
]
else:
message = TextSendMessage(text="查無最近景點!")
line_bot_api.reply_message(token, message)
@handler.add(MessageEvent, message=TextMessage)
# 加入一個 handle_message function
def handle_message(event):
"""
處理所有問題並分發答案
"""
input_text = event.message.text
token = event.reply_token
if isInvoice(input_text, token):
return
elif isTravel_2(input_text, token):
return
elif isWeather(input_text, token):
return
elif isTravel(input_text, token):
return
elif isBreakfast(input_text, token):
return
def isInvoice(input_text, token) -> bool:
"""
查看是否為發票相關問題,是就回答
"""
if input_text == "@本期中獎號碼":
line_bot_api.reply_message(token, TextSendMessage(text=clawer.askPrize(0)))
return True
if input_text == "@前期中獎號碼":
line_bot_api.reply_message(token, TextSendMessage(text=clawer.askPrize(1)))
return True
if input_text == "發票獎金":
line_bot_api.reply_message(token, TextSendMessage(text=clawer.PRIZE))
return True
else:
number = re.sub("\\D", "", input_text)
if number != "" and len(number) == 3:
(isWin, content) = clawer.checkWinPrize(number)
# 0 - 沒中, 1 當期有中 , 2 前期有中
if isWin:
try:
message = [
TextSendMessage(text=content),
TextSendMessage(text=clawer.askPrize(isWin - 1)),
]
line_bot_api.reply_message(token, message)
return True
except:
return False
else:
line_bot_api.reply_message(token, TextSendMessage(text=content))
return True
return False
def isWeather(input_text, token):
"""
查看是否為天氣相關問題,是就回答
"""
if "天氣" in input_text:
if searchWeather(input_text):
(loc, date, inf, tmp) = searchWeather(input_text)
if "週" in input_text:
res = ""
for i in range(7):
res += f"{date[i]}:\n白天:{inf[i][0]}\n溫度{tmp[i][0]}\n晚上:{inf[i][1]}\n溫度{tmp[i][1]}\n\n"
line_bot_api.reply_message(token, TextSendMessage(text=res))
return True
else:
url = stickerSelect(inf[0][0])
respone = [
TextSendMessage(
text=f"{loc}今天的天氣狀況:\n白天:{inf[0][0]},溫度{tmp[0][0]}\n晚上:{inf[0][1]},溫度{tmp[0][1]}"
),
ImageSendMessage(original_content_url=url, preview_image_url=url),
]
line_bot_api.reply_message(token, respone)
return True
elif "天氣" in input_text:
chose = TextSendMessage(
text="哪裡的天氣呢?",
quick_reply=QuickReply(
items=[
QuickReplyButton(action=MessageAction(label="宜蘭", text="宜蘭天氣")),
QuickReplyButton(action=MessageAction(label="台北", text="台北天氣")),
QuickReplyButton(action=MessageAction(label="台南", text="台南天氣")),
]
),
)
line_bot_api.reply_message(token, chose)
return isTravel_2(input_text, token)
return False
def isBreakfast(input_text, token):
if "早餐" in input_text:
line_bot_api.reply_message(token, TextSendMessage(text=selectBreakfast()))
return True
return False
def isTravel(input_text, token):
(loc_1, loc_2) = allSight(input_text)
if loc_2:
recommand = random.choice(loc_2)
message = [
TextSendMessage(text="為您隨機推薦景點"),
LocationSendMessage(
title=recommand[0],
address=recommand[2],
latitude=recommand[1][0],
longitude=recommand[1][1],
),
]
line_bot_api.reply_message(token, message)
return True
elif loc_1:
recommand = random.choice(loc_1)
message = [
TextSendMessage(text="為您隨機推薦景點"),
LocationSendMessage(
title=recommand[0],
address=recommand[2],
latitude=recommand[1][0],
longitude=recommand[1][1],
),
]
line_bot_api.reply_message(token, message)
return True
elif "景點" in input_text:
chose = TextSendMessage(
text="哪邊的景點呢?",
quick_reply=QuickReply(
items=[
QuickReplyButton(action=MessageAction(label="北部", text="@北")),
QuickReplyButton(action=MessageAction(label="中部", text="@中")),
QuickReplyButton(action=MessageAction(label="南部", text="@南")),
QuickReplyButton(action=MessageAction(label="東部", text="@東")),
QuickReplyButton(action=MessageAction(label="離島", text="@離島")),
]
),
)
line_bot_api.reply_message(token, chose)
return isTravel_2(input_text, token)
return False
def isTravel_2(input_text, token):
if "@北" in input_text:
chose = TextSendMessage(
text="北部地區",
quick_reply=QuickReply(
items=[
QuickReplyButton(action=MessageAction(label="基隆", text="基隆")),
QuickReplyButton(action=MessageAction(label="台北", text="台北")),
QuickReplyButton(action=MessageAction(label="新北", text="新北")),
QuickReplyButton(action=MessageAction(label="桃園", text="桃園")),
QuickReplyButton(action=MessageAction(label="新竹", text="新竹")),
QuickReplyButton(action=MessageAction(label="苗栗", text="苗栗")),
]
),
)
line_bot_api.reply_message(token, chose)
return True
if "@中" in input_text:
chose = TextSendMessage(
text="中部地區",
quick_reply=QuickReply(
items=[
QuickReplyButton(action=MessageAction(label="台中", text="台中")),
QuickReplyButton(action=MessageAction(label="彰化", text="彰化")),
QuickReplyButton(action=MessageAction(label="南投", text="南投")),
]
),
)
line_bot_api.reply_message(token, chose)
return True
if "@南" in input_text:
chose = TextSendMessage(
text="南部地區",
quick_reply=QuickReply(
items=[
QuickReplyButton(action=MessageAction(label="雲林", text="雲林")),
QuickReplyButton(action=MessageAction(label="嘉義", text="嘉義")),
QuickReplyButton(action=MessageAction(label="台南", text="台南")),
QuickReplyButton(action=MessageAction(label="高雄", text="高雄")),
QuickReplyButton(action=MessageAction(label="屏東", text="屏東")),
]
),
)
line_bot_api.reply_message(token, chose)
return True
if "@東" in input_text:
chose = TextSendMessage(
text="東部地區",
quick_reply=QuickReply(
items=[
QuickReplyButton(action=MessageAction(label="台東", text="台東")),
QuickReplyButton(action=MessageAction(label="花蓮", text="花蓮")),
QuickReplyButton(action=MessageAction(label="宜蘭", text="宜蘭")),
]
),
)
line_bot_api.reply_message(token, chose)
return True
if "@離島" in input_text:
chose = TextSendMessage(
text="外島地區",
quick_reply=QuickReply(
items=[
QuickReplyButton(action=MessageAction(label="澎湖", text="澎湖")),
QuickReplyButton(action=MessageAction(label="金門", text="金門")),
QuickReplyButton(action=MessageAction(label="馬祖", text="連江")),
]
),
)
line_bot_api.reply_message(token, chose)
return True
if __name__ == "__main__":
app.run()
| 2.40625 | 2 |
mango-python/bdgenomics/mango/test/notebook_test.py | heuermh/mango | 120 | 12785846 | <reponame>heuermh/mango
#
# Licensed to Big Data Genomics (BDG) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The BDG licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bdgenomics.mango.test import SparkTestCase
from bdgenomics.adam.adamContext import ADAMContext
class NotebookTest(SparkTestCase):
def test_example(self):
# these variables are read into mango-python.py
spark = self.ss
testMode = True
alignmentFile = self.exampleFile("chr17.7500000-7515000.sam")
variantFile = self.exampleFile("snv.chr17.7502100-7502500.vcf")
genotypeFile = self.exampleFile("genodata.v3.vcf")
featureFile = self.exampleFile("chr17.582500-594500.bed")
# this file is converted from ipynb in make test
testFile = self.exampleFile("notebooks/mango-pileup.py")
exec(open(testFile).read())
def test_coverage_example(self):
# these variables are read into mango-python.py
spark = self.ss
testMode = True
alignmentFile = self.exampleFile("chr17.7500000-7515000.sam")
# this file is converted from mango-python.coverage.ipynb in the Makefile
testCoverageFile = self.exampleFile("notebooks/mango-python-coverage.py")
exec(open(testCoverageFile).read())
def test_alignment_example(self):
# these variables are read into mango-python.py
spark = self.ss
testMode = True
alignmentFile = self.exampleFile("chr17.7500000-7515000.sam")
# this file is converted from mango-python-alignment.ipynb in the Makefile
testAlignmentFile = self.exampleFile("notebooks/mango-python-alignment.py")
exec(open(testAlignmentFile).read())
def test_variants_example(self):
# these variables are read into mango-python.py
spark = self.ss
testMode = True
vcfFile = self.exampleFile("genodata.v3.vcf")
# this file is converted from mango-python-alignment.ipynb in the Makefile
testVariantFile = self.exampleFile("notebooks/mango-python-variants.py")
exec(open(testVariantFile).read())
| 1.8125 | 2 |
setup.py | amstan/jackclient-python | 0 | 12785847 | <filename>setup.py
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
# "import" __version__
for line in open("jack.py"):
if line.startswith("__version__"):
exec(line)
break
setup(
name="JACK-Client",
version=__version__,
py_modules=["jack"],
install_requires=['cffi'],
author="<NAME>",
author_email="<EMAIL>",
description="JACK Audio Connection Kit (JACK) Client for Python",
long_description=open("README.rst").read(),
license="MIT",
keywords="JACK audio low-latency multi-channel".split(),
url="http://jackclient-python.rtfd.org/",
platforms="any",
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Topic :: Multimedia :: Sound/Audio",
],
tests_require=['pytest'],
cmdclass={'test': PyTest},
)
| 1.96875 | 2 |
uwb/export.py | unfoldingWord-dev/tools | 6 | 12785848 | #!/usr/bin/env python2
# -*- coding: utf8 -*-
#
# Copyright (c) 2015 unfoldingWord
# http://creativecommons.org/licenses/MIT/
# See LICENSE file for details.
#
# Contributors:
# <NAME> <<EMAIL>>
"""
This script exports a Bible into the given format from the API.
Requires that https://github.com/Door43/USFM-Tools be checked out to
/var/www/vhosts/door43.org/USFM-Tools or be on the path
"""
import os
import re
import sys
import json
import codecs
import shutil
import argparse
import datetime
import urllib2
import tempfile
CatalogJSON='https://api.unfoldingword.org/uw/txt/2/catalog.json'
from usfm_tools.transform import UsfmTransform
def main(langcode, ver, books, format, outfile):
sys.stdout = codecs.getwriter('utf8')(sys.stdout);
# Get the JSON
catalog = json.load(urllib2.urlopen(CatalogJSON))
bible=None
for item in catalog['cat']:
if item['slug'] == 'bible':
bible = item
break
lang=None
for language in bible['langs']:
if language['lc'] == langcode:
lang=language
break
if lang is None:
print "The language code {0} is not found in the catalog at {1}. Exiting...".format(langcode, CatalogJSON)
sys.exit(1)
bible=None
for version in lang['vers']:
if version['slug'] == ver:
bible=version
break
if bible is None:
print "The Bible version {0} for language {1} is not found in the catalog at {2}. Exiting...".format(ver, langcode, CatalogJSON)
sys.exit(1)
sources = []
for source in bible['toc']:
if books is None or source['slug'] in books:
sources += [source['src']]
if not sources:
print "No sources were found for langage {0} of version {1} in {2}. Exiting...".format(langcode, ver, CatalogJSON)
sys.exit(1)
tmpdir = tempfile.mkdtemp(prefix='uwb-{0}-{1}-'.format(ver, langcode))
if os.path.isdir(tmpdir):
shutil.rmtree(tmpdir)
os.makedirs(tmpdir+"/sources")
for source in sources:
f = urllib2.urlopen(source)
with open(tmpdir+"/sources/"+os.path.basename(source), "wb") as local_file:
local_file.write(f.read())
if format == 'html':
UsfmTransform.buildSingleHtml(tmpdir+"/sources", tmpdir, "bible")
shutil.copyfile(tmpdir+'/bible.html', outfile);
if format == 'tex':
UsfmTransform.buildConTeXt(tmpdir+"/sources", tmpdir, "bible")
shutil.copyfile(tmpdir+'/working/tex/bible.tex', outfile);
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-l', '--lang', dest='langcode', default=False, required=True, help="Language Code")
parser.add_argument('-v', '--version', dest='ver', default='udb', required=True, help="Bible Version")
parser.add_argument('-b', '--book', dest='books', nargs='+', default=None, required=False, help="Bible Book(s)")
parser.add_argument('-f', '--format', dest='format', default='html', required=False, help='Format')
parser.add_argument('-o', '--outfile', dest='outfile', default=False, required=True, help="Output file")
args = parser.parse_args(sys.argv[1:])
main(args.langcode, args.ver, args.books, args.format, args.outfile)
### chown -R syncthing:syncthing /var/www/vhosts/api.unfoldingword.org/httpdocs/
| 2.796875 | 3 |
normalizing_flows/flows/affine/planar.py | TanguyUrvoy/normalizing-flows | 0 | 12785849 | <filename>normalizing_flows/flows/affine/planar.py
import tensorflow as tf
import tensorflow_probability as tfp
import numpy as np
from normalizing_flows.flows import AmortizedTransform
class Planar(AmortizedTransform):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# define nonlinearity function
self.h = lambda x: tf.math.tanh(x)
self.dh = lambda x: 1.0 - tf.square(tf.tanh(x))
def _alpha(self, u, w):
wu = tf.matmul(w, u)
m = -1 + tf.nn.softplus(wu)
return m - wu
def _u_hat(self, u, w):
alpha = self._alpha(u, w)
alpha_w = alpha*w / tf.reduce_sum(w**2.0)
return u + tf.transpose(alpha_w, (0,2,1))
def _wzb(self, w, z, b):
wz = tf.matmul(w, z) # (b, 1, 1)
return wz + b
def _forward(self, z, args: tf.Tensor):
"""
Computes the forward pass of the transformation: z' = z + uh(wz + b)
Tensor shapes
z : (batch_size, d)
args : (batch_size, 2*d + 1)
"""
# set up parameters
d = tf.shape(z)[1]
u, w, b = args[:,:d], args[:,d:-1], args[:,-1]
u = tf.reshape(u, (-1, d, 1))
w = tf.reshape(w, (-1, 1, d))
b = tf.reshape(b, (-1, 1, 1))
z = tf.expand_dims(z, axis=-1)
# compute forward pass z_k -> z_k+1
wzb = self._wzb(w, z, b) # (batch_size, 1)
u_hat = self._u_hat(u, w)
z_ = z + tf.multiply(u_hat, self.h(wzb))
# compute log det jacobian
dh_dz = tf.multiply(self.dh(wzb), w) # (batch_size, 1, d)
r = 1.0 if self.use_residual else 0.0
ldj = tf.math.log(tf.math.abs(r + tf.matmul(dh_dz, u_hat)))
return tf.squeeze(z_, axis=-1), ldj
def _param_count(self, shape):
d = shape[-1]
return 2*d + 1
| 2.296875 | 2 |
appengine/findit/services/test/resultdb_util_test.py | xswz8015/infra | 0 | 12785850 | # Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import mock
from services import parameters
from services import resultdb
from waterfall.test import wf_testcase
from go.chromium.org.luci.resultdb.proto.v1 import (common_pb2, test_result_pb2)
from infra_api_clients.swarming import swarming_util
from services import resultdb
from services import resultdb_util
class ResultDBTest(wf_testcase.WaterfallTestCase):
@mock.patch.object(
swarming_util,
'GetInvocationNameForSwarmingTask',
return_value="inv_name")
@mock.patch.object(resultdb, 'query_resultdb')
def testGetFailedTestInStep(self, mock_result_db, *_):
failed_step = parameters.TestFailedStep()
failed_step.swarming_ids = ["1", "2"]
mock_result_db.side_effect = [
[
test_result_pb2.TestResult(
test_id="test_id_1",
tags=[
common_pb2.StringPair(key="test_name", value="test_id_1"),
])
],
[
test_result_pb2.TestResult(
test_id="test_id_2",
tags=[
common_pb2.StringPair(key="test_name", value="test_id_2"),
])
],
]
test_results = resultdb_util.get_failed_tests_in_step(failed_step)
self.assertEqual(len(test_results.test_results), 2)
failed_step.swarming_ids = []
test_results = resultdb_util.get_failed_tests_in_step(failed_step)
self.assertIsNone(test_results)
@mock.patch.object(
swarming_util, 'GetInvocationNameForSwarmingTask', return_value=None)
def testGetFailedTestInStepWithNoInvocationName(self, *_):
failed_step = parameters.TestFailedStep()
failed_step.swarming_ids = ["1", "2"]
test_results = resultdb_util.get_failed_tests_in_step(failed_step)
self.assertIsNone(test_results)
| 1.90625 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.