filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_29683 | """
在数字列表中查找最大的数字
算法:
[170 , 160 , 180 , 165]
假设第一个就是最大值
使用假设的和第二个进行比较, 发现更大的就替换假设的
使用假设的和第三个进行比较, 发现更大的就替换假设的
使用假设的和第四个进行比较, 发现更大的就替换假设的
最后,假设的就是最大的.
"""
list01 = [170, 160, 180, 165]
max_value = list01[0]
for i in range(1, len(list01)):# 1 2 3
if max_value < list01[i]:
max_value = list01[i]
print(max_value) |
the-stack_106_29684 | from __future__ import absolute_import, division, print_function
import matplotlib.pyplot as plt
import example_helpers
import drms
# Series name, timespan and wavelength
series = 'aia.lev1_euv_12s'
series_lev1 = 'aia.lev1'
wavelen = 335
#tsel = '2015-01-01T00:00:01Z/1h'
#tsel = '2015-01-01T00:00:01Z/1d'
#tsel = '2015-01-01T00:00:01Z/1d@60s'
#tsel = '2015-01-01T00:00:01Z/7d@1h'
#tsel = '2015-01-01T00:00:01Z/30d@6h'
#tsel = '2015-01-01T00:00:01Z/100d@1d'
tsel = '2014-01-01T00:00:01Z/365d@1d'
# DRMS query string
qstr = '%s[%s][%d]' % (series, tsel, wavelen)
# Some keywords we are interested in; you can use c.keys(series) to get a
# list of all available keywords of a series.
keys = ['T_REC', 'T_OBS', 'DATAMIN', 'DATAMAX', 'DATAMEAN', 'DATARMS',
'DATASKEW', 'DATAKURT', 'QUALITY']
# Create DRMS client, uses JSOC baseurl by default, set debug=True to see the
# DRMS query URLs.
c = drms.Client(debug=False)
# Get detailed information about the series. Some keywords from
# aia.lev1_euv_12s are links to keywords in aia.lev1 and unfortunally some
# entries (like note) are missing for linked keywords, so we are using the
# entries from aia.lev1 in this case.
print('Querying series info...')
si = c.info(series)
si_lev1 = c.info(series_lev1)
for k in keys:
linkinfo = si.keywords.loc[k].linkinfo
if linkinfo is not None and linkinfo.startswith('lev1->'):
note_str = si_lev1.keywords.loc[k].note
else:
note_str = si.keywords.loc[k].note
print('%10s : %s' % (k, note_str))
# Get keyword values for the selected timespan and wavelength
print('Querying keyword data...\n -> %s' % qstr)
res = c.query(qstr, key=keys)
print(' -> %d lines retrieved.' % len(res))
# Only use entries with QUALITY==0
res = res[res.QUALITY == 0]
print(' -> %d lines after QUALITY selection.' % len(res))
# Convert T_REC strings to datetime and use it as index for the series
res.index = drms.to_datetime(res.T_REC)
# Create some simple plots
ax = res[['DATAMIN', 'DATAMAX', 'DATAMEAN', 'DATARMS', 'DATASKEW']].plot(
figsize=(8, 10), subplots=True)
ax[0].set_title(qstr, fontsize='medium')
plt.tight_layout()
plt.show()
|
the-stack_106_29687 | #!/usr/bin/env python3
# Add the current folder to PYTHONPATH by Yiming
import os
import sys
sys.path.append(
os.path.abspath(
os.path.join(
os.path.abspath(os.path.join(os.getcwd(),os.pardir)), os.pardir)))
from baselines.common.cmd_util import gym_ctrl_arg_parser, make_gym_control_env
from baselines.common import tf_util as U
from baselines import logger
def train(env_id, num_timesteps, seed):
from baselines.nac_fisher import mlp_policy, nac_fisher_simple
U.make_session(num_cpu=1).__enter__()
def policy_fn(name, ob_space, ac_space):
return mlp_policy.MlpPolicy(name=name, ob_space=ob_space, ac_space=ac_space,
hid_size=64, num_hid_layers=2)
env = make_gym_control_env(env_id, seed)
nac_fisher_simple.learn(env,policy_fn,
max_timesteps=num_timesteps,
timesteps_per_actorbatch=2048,
clip_param=0.2, entcoeff=0.0,
optim_epochs=10, optim_stepsize=2e-4, optim_batchsize=64,
gamma=0.99, lam=0.95, shift=0, schedule='linear'
)
env.close()
def main():
args = gym_ctrl_arg_parser().parse_args()
logger.configure(
format_strs=['stdout', 'log', 'csv'], log_suffix = "NAC_Fisher-"+args.env)
logger.log("Algorithm: NAC_Fisher-"+args.env)
import random
args.seed += random.randint(0, 2**32 - 1)
logger.log("Algorithm: SEED-"+str(args.seed))
train(args.env, num_timesteps=args.num_timesteps, seed=args.seed)
if __name__ == '__main__':
main()
|
the-stack_106_29688 | # ! /usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2019 NVIDIA. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import nemo
from nemo.core.neural_types import ChannelType, NeuralType
from tests.common_setup import NeMoUnitTest
class NeuralModulesTests(NeMoUnitTest):
def test_call_TaylorNet(self):
x_tg = nemo.core.neural_modules.NmTensor(
producer=None, producer_args=None, name=None, ntype=NeuralType(('B', 'D'), ChannelType())
)
tn = nemo.backends.pytorch.tutorials.TaylorNet(dim=4)
# note that real port's name: x was used
y_pred = tn(x=x_tg)
self.assertEqual(y_pred.producer, tn)
self.assertEqual(y_pred.producer_args.get("x"), x_tg)
def test_simplest_example_chain(self):
data_source = nemo.backends.pytorch.tutorials.RealFunctionDataLayer(n=10000, batch_size=1)
trainable_module = nemo.backends.pytorch.tutorials.TaylorNet(dim=4)
loss = nemo.backends.pytorch.tutorials.MSELoss()
x, y = data_source()
y_pred = trainable_module(x=x)
loss_tensor = loss(predictions=y_pred, target=y)
# check producers' bookkeeping
self.assertEqual(loss_tensor.producer, loss)
self.assertEqual(loss_tensor.producer_args, {"predictions": y_pred, "target": y})
self.assertEqual(y_pred.producer, trainable_module)
self.assertEqual(y_pred.producer_args, {"x": x})
self.assertEqual(y.producer, data_source)
self.assertEqual(y.producer_args, {})
self.assertEqual(x.producer, data_source)
self.assertEqual(x.producer_args, {})
|
the-stack_106_29691 | from turtle import Turtle
from functions import setupScreen, gradient_color, format_color
screen = setupScreen(720,1280)
tl = Turtle()
tl.speed(0)
tl.hideturtle()
def run():
tl.penup()
tl.back(150)
tl.pendown()
color_ini = [0xff, 0x00, 0x99]
color_fim = [0x42, 0x86, 0xf4]
for k in range(3):
color_ini = [0x42, 0x86, 0xf4]
color_fim = [0xff, 0x00, 0x99]
for i in range(80):
color_ini = gradient_color(color_ini, color_fim)
tl.pencolor(format_color(color_ini))
tl.circle(300,45)
tl.left(45)
tl.circle(300,45)
tl.left(100+k)
screen.exitonclick()
screen.onkey(run, "r")
screen.listen()
screen.mainloop() |
the-stack_106_29695 | # ----------------------------------------------------------------------------
# Copyright (c) 2020, Franck Lejzerowicz.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import os
import re
import random
import datetime
import itertools
import pandas as pd
from os.path import dirname, isdir, isfile, splitext
from routine_qiime2_analyses.analyses_prep import AnalysisPrep
from routine_qiime2_analyses._routine_q2_cmds import (
get_case, get_new_meta_pd, run_import, songbird_cmd,
)
from routine_qiime2_analyses._routine_q2_metadata import (
rename_duplicate_columns, get_train_perc_from_numeric,
get_cat_vars_and_vc, make_train_test_from_cat
)
from routine_qiime2_analyses._routine_q2_io_utils import (
read_meta_pd, get_analysis_folder, filter_mb_table, filter_non_mb_table,
write_filtered_tsv
)
from routine_qiime2_analyses._routine_q2_songbird import (
get_songbird_dicts, get_unique_filterings
)
class DiffModels(object):
def __init__(self, config, project) -> None:
self.cmds = {}
self.config = config
self.project = project
if config.diff_models:
songbird_dicts = get_songbird_dicts(config.diff_models)
self.songbird_models = songbird_dicts[0]
self.songbird_filtering = songbird_dicts[1]
self.unique_filtering = get_unique_filterings(songbird_dicts[1])
self.params = songbird_dicts[2]
self.models_baselines = songbird_dicts[3]
self.models = {}
self.models_issues = {}
self.songbird_datasets = songbird_dicts[4]
self.songbird_subsets = songbird_dicts[5]
self.songbirds = pd.DataFrame(columns=[
'dataset', 'is_mb', 'filter', 'prevalence', 'abundance'])
self.params_list = [
'train', 'batches', 'learns', 'epochs', 'diff_priors',
'thresh_feats', 'thresh_samples', 'summary_interval']
self.q2s_pd = pd.DataFrame()
self.songbird_pd = pd.DataFrame()
def merge_subsets_apply(self):
subsets_fp = [
[dataset, var, subset, get_case(subset, var), '']
for var, subsets in self.songbird_subsets.items()
for subset in subsets
for dataset in self.songbirds.dataset.unique()]
if subsets_fp:
subsets = pd.DataFrame(
subsets_fp, columns=['dataset', 'variable', 'factors',
'subset', 'pair'])
self.songbirds = self.songbirds.merge(
subsets, on=['dataset'], how='outer')
def get_songbirds_filts(self, project):
filts_df = []
for (dat, is_mb), filts_dats in self.unique_filtering.items():
if dat not in project.datasets:
continue
for (filt, prev, abund) in filts_dats:
filts_df.append([dat, is_mb, filt, prev, abund])
if filts_df:
self.songbirds = pd.DataFrame(filts_df, columns=[
'dataset', 'is_mb', 'filter', 'prevalence', 'abundance'])
def prep_songbirds(self, mmvec_pd, project):
self.get_songbirds_filts(project)
self.merge_subsets_apply()
self.make_datasets_paths()
self.merge_mmvecs(mmvec_pd)
def merge_mmvecs(self, mmvec_pd):
mmvecs = []
for row in mmvec_pd.values:
pair, filt, subset, dat1, dat2, prev1, abun1, prev2, abun2 = row[:9]
meta_common_fp = row[10]
omic1_common_qza = row[13]
omic2_common_qza = row[14]
mmvecs.append([dat1, filt, prev1, abun1, subset, pair,
omic1_common_qza, omic2_common_qza, meta_common_fp])
mmvecs.append([dat2, filt, prev2, abun2, subset, pair,
omic1_common_qza, omic2_common_qza, meta_common_fp])
if mmvecs and self.songbirds.shape[0]:
self.songbirds.drop(
columns=['is_mb', 'variable', 'factors'],
inplace=True)
self.songbirds = pd.concat([
self.songbirds,
pd.DataFrame(mmvecs, columns=self.songbirds.columns)])
def make_datasets_paths(self):
cmds = {}
self.get_datasets_paths()
if self.songbirds.shape[0]:
for (dataset, filter, subset), row in self.songbirds.groupby(
['dataset', 'filter', 'subset']):
row_d = row.iloc[0, :].to_dict()
tsv, qza, meta = row_d['tsv'], row_d['qza'], row_d['meta']
data = self.project.datasets[dataset]
variable, factors = row_d['variable'], row_d['factors']
meta_pd = get_new_meta_pd(
data.metadata[0], subset, variable, factors)
meta_pd.to_csv(meta, index=False, sep='\t')
if isfile(tsv) and isfile(qza):
continue
tsv_pd = data.data[0][meta_pd.sample_name.tolist()]
preval, abund = row_d['prevalence'], row_d['abundance']
if row_d['is_mb']:
tsv_pd, res = filter_mb_table(preval, abund, tsv_pd)
else:
tsv_pd, res = filter_non_mb_table(preval, abund, tsv_pd)
if self.config.force or not isfile(tsv):
write_filtered_tsv(tsv, tsv_pd)
if self.config.force or not isfile(qza):
cmd = run_import(tsv, qza, 'FeatureTable[Frequency]')
cmds.setdefault(dataset, []).append(cmd)
self.register_command('songbird_imports', cmds)
def get_datasets_paths(self):
paths = []
if self.songbirds.shape[0]:
for r, row in self.songbirds.iterrows():
dataset = row['dataset']
filter = row['filter']
subset = row['subset']
for analysis in ['mmvec', 'songbird']:
odir = get_analysis_folder(
self.config.i_datasets_folder, '%s/datasets/%s/%s' % (
analysis, dataset, subset))
rad = '%s_%s' % (dataset, filter)
tsv = '%s/tab_%s.tsv' % (odir, rad)
qza = '%s.qza' % splitext(tsv)[0]
meta = '%s/meta_%s.tsv' % (odir, rad)
if isfile(tsv) and isfile(qza) and isfile(meta):
paths.append([tsv, qza, meta])
break
elif analysis == 'songbird':
paths.append([tsv, qza, meta])
if paths:
self.songbirds = pd.concat([
self.songbirds,
pd.DataFrame(paths, columns=['tsv', 'qza', 'meta'])], axis=1)
@staticmethod
def get_traintests(meta_fp, new_meta_pd, vars, train, train_col):
if train.isdigit() or train.replace('.', '').isdigit():
train_perc = get_train_perc_from_numeric(train, new_meta_pd)
vars_pd = new_meta_pd[vars].copy()
cat_vars, cat_pd, vc, rep_d = get_cat_vars_and_vc(vars, vars_pd)
if cat_vars and vc.size < cat_pd.shape[0] * 0.5:
train_samples = make_train_test_from_cat(
cat_pd, vc, train_perc, meta_fp, cat_vars, train_col, rep_d)
else:
train_samples = random.sample(
new_meta_pd.index.tolist(),
k=int(train_perc * new_meta_pd.shape[0]))
return train_samples
return None
def make_train_test_column(self, meta_fp, train_test_d,
meta_pd, dat) -> dict:
train_tests = {}
train = train_test_d['train']
meta_tt_pd = meta_pd.set_index('sample_name').copy()
if 'datasets' in train_test_d and dat in train_test_d['datasets']:
for tt, vars in train_test_d['datasets'][dat].items():
vars_pd = meta_tt_pd[vars].copy()
vars_pd = vars_pd.loc[~vars_pd.isna().any(1)]
vars_pd = rename_duplicate_columns(vars_pd)
trains = self.get_traintests(meta_fp, vars_pd, vars,
str(train), tt)
if trains:
train_tests[tt] = trains
return train_tests
def make_train_test(self):
if self.songbirds.shape[0]:
for _, sb in self.songbirds.groupby(
['dataset', 'filter', 'subset']):
d = sb.iloc[0, :].to_dict()
fps = ['dataset', 'tsv', 'qza', 'meta']
dat, tsv, qza, meta_fp = [d[x] for x in fps]
meta_subset = read_meta_pd(meta_fp)
train_tests = self.make_train_test_column(
meta_fp, self.config.train_test_dict, meta_subset, dat)
rewrite = False
meta_subset_cols = set(meta_subset.columns)
for train_col, train_samples in train_tests.items():
if train_col not in meta_subset_cols:
rewrite = True
meta_subset[train_col] = [
'Train' if x in set(train_samples) else
'Test' for x in meta_subset.sample_name.tolist()]
if self.config.force or rewrite:
meta_subset.to_csv(meta_fp, index=False, sep='\t')
def get_params_combinations(self):
"""Make a pandas data frame from the combinations
of songbird run/hyper-parameters. It includes the
handling of user-specified 'train_column', which
always take precedence over the default 'n_examples'.
Returns
-------
params_pd : pd.DataFrame
Comobinations of parameters as rows, and
individual parameters as columns.
"""
params = []
to_combine = [self.params[param] for param in self.params_list]
for params_combination in itertools.product(*to_combine):
params.append(params_combination)
params_pd = pd.DataFrame(params, columns=self.params_list).astype(str)
return params_pd
@staticmethod
def print_message_or_not(mess, m):
if m not in mess:
mess.add(m)
def process_params_combinations(
self,
dataset: str,
meta_pd: pd.DataFrame,
params_pd: pd.DataFrame,
mess: set):
"""Filter the combinations of parameters too remove
those involving unusable train/test splits, e.g. not
having the specified or too few samples therein.
Parameters
----------
dataset : str
Dataset
meta_pd : pd.DataFrame
Dataset metadata table.
params_pd : pd.DataFrame
Combinations of parameters (rows)
mess : set
Messages to print
"""
examples = []
valid_params = []
nsams = meta_pd.shape[0]
meta_cols = meta_pd.columns
for p, params in params_pd.iterrows():
train = params['train']
if train.replace('.', '').isdigit():
if float(train) < 0.1:
valid_params.append(p)
m = '\t[skip] "%s": train %s too low (%s)' % (
dataset, '%', train)
self.print_message_or_not(mess, m)
elif float(train) > 0.95:
valid_params.append(p)
m = '\t[skip] "%s": train %s too high (%s)' % (
dataset, '%', train)
self.print_message_or_not(mess, m)
else:
examples.append(int(nsams * (1 - float(train))))
else:
if train not in set(meta_cols):
valid_params.append(p)
m = '\t[skip] Training column "%s" not in metadata' % (
train)
self.print_message_or_not(mess, m)
else:
train_vc = meta_pd[train].value_counts()
if {'Train', 'Test'}.issubset(set(train_vc.index)):
ntrain = train_vc['Train']
if nsams < (1.2 * ntrain):
valid_params.append(p)
m = '\t[skip] "%s": %s samples for %s training ' \
'samples:' % (dataset, nsams, ntrain)
self.print_message_or_not(mess, m)
else:
valid_params.append(p)
m = '\t[skip] "%s": no TrainTest in column "%s"' % (
dataset, train)
self.print_message_or_not(mess, m)
if valid_params:
params_pd.drop(index=valid_params, inplace=True)
if examples:
params_pd['examples'] = examples
@staticmethod
def get_params_dir(params):
"""
Parameters
----------
params : pd.Series
Returns
-------
params_dir : str
"""
params_dir = 'filt_f%s_s%s/%s_%s_%s_%s_%s_%s' % (
str(params['thresh_feats']),
str(params['thresh_samples']),
str(params['batches']),
str(params['learns']),
str(params['epochs']),
str(params['diff_priors'].replace('.', '')),
str(params['train'].replace('.', '')),
str(params['summary_interval'].replace('.', ''))
)
return params_dir
@staticmethod
def get_out(
odir: str,
model_null: str) -> tuple:
"""
Parameters
----------
odir : str
Output dierctory for a mmvec model/null pair.
model_null : str
"model" or null""
Returns
-------
mod_nul_dir : str
mod_nul_rnk : str
mod_nul_ord : str
mod_nul_stt : str
"""
mod_nul_dir = '%s/%s' % (odir, model_null)
if not isdir(mod_nul_dir):
os.makedirs(mod_nul_dir)
mod_nul_rnk = '%s/ranks.tsv' % mod_nul_dir
mod_nul_ord = '%s/ordination.txt' % mod_nul_dir
mod_nul_stt = '%s/stats.qza' % mod_nul_dir
return mod_nul_dir, mod_nul_rnk, mod_nul_ord, mod_nul_stt
@staticmethod
def get_dat_pair_dir(dat, pair):
if pair:
dat_pair = '%s_%s' % (dat, pair)
pair_dir = '%s/%s' % (dat, pair)
else:
dat_pair = '%s/unpaired' % dat
pair_dir = '%s/unpaired' % dat
return dat_pair, pair_dir
@staticmethod
def get_main_dirs(
pair_dir, filt, subset, params_dir, model, config) -> tuple:
datdir = '%s/%s/%s/%s/%s' % (pair_dir, filt, subset,
params_dir, model)
odir = get_analysis_folder(config.i_datasets_folder,
'songbird/%s' % datdir)
new_qza = '%s/tab.qza' % odir
new_meta = '%s/metadata.tsv' % odir
return datdir, odir, new_qza, new_meta
@staticmethod
def get_out_paths(odir, bodir, model_baseline, baselines) -> dict:
if model_baseline in baselines:
bdiff_qza = ''
bstat = baselines[model_baseline]
bplot = ''
else:
bdiff_qza = '%s/differentials-baseline.qza' % bodir
bstat = '%s/differentials-stats-baseline.qza' % bodir
bplot = '%s/differentials-biplot-baseline.qza' % bodir
baselines[model_baseline] = bstat
out_paths = {
'diff': '%s/differentials.tsv' % odir,
'diff_qza': '%s/differentials.qza' % odir,
'stat': '%s/differentials-stats.qza' % odir,
'plot': '%s/differentials-biplot.qza' % odir,
'tens': '%s/tensorboard.qzv' % bodir,
'html': '%s/tensorboard.html' % bodir,
'bdiff_qza': bdiff_qza,
'bstat': bstat,
'bplot': bplot
}
return out_paths
@staticmethod
def write_new_meta(meta_pd, new_meta, meta_vars, drop, params):
meta_cols = set(meta_pd.columns)
if params['train'] in meta_cols:
meta_vars.add(params['train'])
new_meta_pd = meta_pd[
(['sample_name'] + [x for x in meta_vars if x in meta_cols])
].copy()
new_meta_pd = new_meta_pd.loc[~new_meta_pd.isna().any(1)]
new_meta_pd = rename_duplicate_columns(new_meta_pd)
if drop:
to_remove = pd.concat([
new_meta_pd[meta_var].isin(var_drop)
# new_meta_pd[meta_var.lower()].isin(var_drop)
for meta_var, var_drop in drop.items()
], axis=1).any(axis=1)
new_meta_pd = new_meta_pd.loc[~to_remove]
new_meta_pd.to_csv(new_meta, index=False, sep='\t')
return new_meta_pd.shape[0]
def summarize_songbirds(self):
q2s = []
songbird = get_analysis_folder(
self.config.i_datasets_folder, 'songbird')
for root, dirs, files in os.walk(songbird):
for fil in files:
if fil == 'tensorboard.html':
path = root + '/' + fil
diff = '%s/differentials.tsv' % dirname(root)
root_split = root.split('%s/' % songbird)[-1].split('/')
d, pr, fr, sb, sr, ps, ml, be = root_split
with open(path) as f:
for line in f:
if 'Pseudo Q-squared' in line:
ls = line.split(
'Pseudo Q-squared:</a></strong> ')
q2s.append([
pr, d, fr, sb, ml, sr, ps, be, diff,
float(ls[-1].split('<')[0])
])
if q2s:
self.q2s_pd = pd.DataFrame(q2s, columns=[
'pair', 'dataset', 'filter', 'subset', 'model',
'songbird_filter', 'parameters', 'baseline', 'differentials',
'Pseudo_Q_squared'])
q2s_fp = '%s/songbird_q2.tsv' % songbird
self.q2s_pd.to_csv(q2s_fp, index=False, sep='\t')
print('[%s]\t\t==> Written: %s' % (datetime.datetime.now(), q2s_fp))
def create_songbird_feature_metadata(self):
if self.q2s_pd.shape[0]:
q2_pd = self.q2s_pd.loc[(self.q2s_pd.pair == 'no_pair') &
(self.q2s_pd.Pseudo_Q_squared > 0)]
for dat, dataset_pd in q2_pd.groupby('dataset'):
dataset_sbs = []
for r, row in dataset_pd.iterrows():
pr = 'pair=%s' % row['pair']
fr = 'filter=%s' % row['filter']
sb = 'subset=%s' % row['subset']
ml = 'model=%s' % row['model']
st = 'sb_filt=%s' % row['songbird_filter']
ps = 'params=%s' % row['parameters']
be = 'baseline=%s' % row['baseline']
q2 = '[Q2=%s]' % row['Pseudo_Q_squared']
diffs = row['differentials']
sb_pd = pd.read_csv(diffs, index_col=0, sep='\t')
sb_pd.columns = ['%s %s: %s' % (
'__'.join([dat, pr, fr, sb, ml, st, ps, be])
, q2, x) for x in sb_pd.columns]
dataset_sbs.append(sb_pd)
if len(dataset_sbs):
dataset_sbs_pd = pd.concat(dataset_sbs, axis=1, sort=False)
odir = get_analysis_folder(self.config.i_datasets_folder,
'songbird/%s' % dat)
fpo_tsv = '%s/differentials_%s.tsv' % (odir, dat)
fpo_qza = '%s/differentials_%s.qza' % (odir, dat)
dataset_sbs_pd = dataset_sbs_pd.reset_index()
dataset_sbs_pd = dataset_sbs_pd.rename(
columns={
dataset_sbs_pd.columns.tolist()[0]: 'Feature ID'})
dataset_sbs_pd.to_csv(fpo_tsv, index=True, sep='\t')
run_import(fpo_tsv, fpo_qza, 'FeatureData[Differential]')
def get_songbird_pd(self, songbird):
self.songbird_pd = pd.DataFrame(
songbird, columns=[
'dataset', 'qza', 'meta', 'filter', 'params',
'subset', 'differentials', 'baseline', 'html', 'pair'
])
def check_metadata_models(self, meta, meta_pd, songbird_models):
models = {}
for model, formula_ in songbird_models.items():
vars = set()
drop = {}
formula = formula_.strip('"').strip("'")
# print()
# print()
# print('formula:', formula)
if formula.startswith('C('):
formula_split = formula.split('C(')[-1].rsplit(')', 1)
# print('formula_split:', formula_split)
formula_split_c = formula_split[0].split(',')[0].strip().strip()
# print('formula_split_c:', formula_split_c)
formula = 'C(%s)' % formula_split[0].replace(
formula_split_c, formula_split_c)
# print('formula:', formula)
formula += formula_split[1]
# print('formula:', formula)
if 'Diff' in formula:
levels = {formula_split_c: [
x.strip().strip('"').strip("'")
for x in formula.split(
"levels=['")[-1].split("']")[0].split(",")
]}
elif "Treatment('" in formula:
levels = {formula_split_c: [
formula.split("Treatment('")[-1].split("')")[0]
]}
# print('levels:', levels)
elif 'Treatment("' in formula:
levels = {formula_split_c: [
formula.split('Treatment("')[-1].split('")')[0]
]}
# print('levels:', levels)
vars.add(formula_split_c)
vars.update(set([x for x in re.split(
'[+/:*]', formula_split[1]) if x]))
# print("vars:", vars)
else:
formula_split = re.split('[+/:*]', formula)
formula = formula
vars.update(set([x for x in formula_split]))
levels = {}
common_with_md = set(meta_pd.columns.values) & vars
if sorted(vars) != sorted(common_with_md):
only_formula = sorted(vars ^ common_with_md)
issue = 'Songbird formula term(s) missing in metadata:\n\t' \
'%s\n\t [not used]: %s=%s' % (
', '.join(only_formula), model, formula)
self.models_issues.setdefault(issue, set()).add(meta)
# print(issue)
continue
if levels:
levels_set = sorted([x for x in meta_pd[
formula_split_c].unique() if str(x) != 'nan'])
# print('levels_set:', levels_set)
if 'Diff' in formula:
cur_levels = levels[formula_split_c]
common_levels = set(levels_set) & set(cur_levels)
only_meta = set(levels_set) ^ common_levels
only_model = set(cur_levels) ^ common_levels
if len(only_model):
issue = 'Songbird formula "Diff" factors(s) missing' \
' in metadata "%s": %s' % (
formula_split_c, list(only_model))
self.models_issues.setdefault(issue, set()).add(meta)
continue
if len(only_meta):
drop[formula_split_c] = list(only_meta)
issue = 'Songbird formula "Diff" factors(s) ' \
'incomplete for metadata "%s":\n' \
'\t -> skipping samples with %s' % (
formula_split_c, list(only_meta))
self.models_issues.setdefault(issue, set()).add(meta)
elif 'Treatment(' in formula:
levels = {formula_split_c: formula.split(
"Treatment('")[-1].split("')")[0]}
# print("levels (2):", levels)
if levels[formula_split_c] not in levels_set:
issue = 'Songbird formula "Treatment" factors(s)' \
' missing in metadata "%s" [%s]' % (
formula_split_c, levels)
self.models_issues.setdefault(issue, set()).add(meta)
continue
models[model] = [formula, vars, drop]
return models
def show_models_issues(self, mess):
if mess:
for m in sorted(mess):
print('[%s]' % datetime.datetime.now(), m)
if self.models_issues:
print('\n[%s] %s Issues with model (will not run) %s' % (
datetime.datetime.now(), '#'*10, '#'*10))
for model_issue, metas in self.models_issues.items():
print('-', model_issue)
for meta in metas:
print('\t', meta.replace(self.config.i_datasets_folder, ''))
print('#'*60)
def make_qurros(self) -> None:
"""Make qurro plots"""
cmds = {}
for r, row in self.songbird_pd.iterrows():
dat = row['dataset']
tax = self.project.datasets[dat].tax[-1]
qurro_qzv = '%s_qurro.qzv' % splitext(row['differentials'])[0]
if not isfile(qurro_qzv) and isfile(row['differentials']):
cmd = 'qiime qurro differential-plot'
cmd += ' --i-table %s' % row['qza']
cmd += ' --i-ranks %s.qza' % splitext(row['differentials'])[0]
cmd += ' --m-sample-metadata-file %s' % row['meta']
cmd += ' --m-feature-metadata-file %s' % tax
cmd += ' --o-visualization %s' % qurro_qzv
cmds.setdefault(dat, []).append(cmd)
self.register_command('qurro', cmds)
def songbird(self) -> None:
"""Main script for the creation of songbird jobs.
It iterates over the rows of the table created
upfront and over each combination of parameters
and collect the output info for potential reuse
in figure generation and post-analysis.
Parameters
----------
config : Class instance of AnalysesConfig
Contains all the routine analyses config info.
project
Darasets.
"""
mess = set()
songbird = []
dat_cmds, dat_fcmds, dat_bcmds = {}, {}, {}
params_pd = self.get_params_combinations()
for r, row in self.songbirds.iterrows():
qza, pair, meta_fp = row['qza'], row['pair'], row['meta']
dat, filt, subset = row['dataset'], row['filter'], row['subset']
if dat not in self.songbird_models:
continue
dat_pair, pair_dir = self.get_dat_pair_dir(dat, pair)
meta_pd = read_meta_pd(meta_fp)
models = self.check_metadata_models(
meta_fp, meta_pd, self.songbird_models[dat])
row_params_pd = params_pd.copy()
self.process_params_combinations(dat, meta_pd, row_params_pd, mess)
for p, params in row_params_pd.iterrows():
params_dir = self.get_params_dir(params)
baselines, model_baselines = {}, {'1': '1'}
for modx, model in enumerate(models.keys()):
formula, meta_vars, drop = models[model]
datdir, odir, new_qza, new_meta = self.get_main_dirs(
pair_dir, filt, subset, params_dir, model, self.config)
nsams = self.write_new_meta(
meta_pd, new_meta, meta_vars, drop, params)
if dat in self.models_baselines and model in \
self.models_baselines[dat]:
model_baselines = self.models_baselines[dat][model]
for model_baseline in model_baselines:
bformula = model_baselines[model_baseline]
bodir = get_analysis_folder(
self.config.i_datasets_folder,
'songbird/%s/b-%s' % (datdir, model_baseline))
out_paths = self.get_out_paths(
odir, bodir, model_baseline, baselines)
# convergence = self.check_stats_convergence(out_paths)
cmd, fcmd, bcmd = songbird_cmd(
qza, new_qza, new_meta, nsams, params, formula,
bformula, out_paths)
songbird.append([
dat, new_qza, meta_fp, filt, '%s_%s' % (
params_dir.replace('/', '__'), model),
subset, out_paths['diff'], model_baseline,
out_paths['html'], pair])
if cmd:
dat_cmds.setdefault(dat, []).append(cmd)
if fcmd:
dat_fcmds.setdefault(dat, []).append(fcmd)
if bcmd:
dat_bcmds.setdefault(dat, []).append(bcmd)
if songbird:
self.get_songbird_pd(songbird)
self.show_models_issues(mess)
self.register_command('songbird_filter', dat_fcmds)
self.register_command('songbird_baselines', dat_bcmds)
self.register_command('songbird', dat_cmds)
self.summarize_songbirds()
self.create_songbird_feature_metadata()
@staticmethod
def register_command(analysis, cmds):
AnalysisPrep.analyses_commands[analysis] = cmds
|
the-stack_106_29696 | import json
import numpy as np
from pycocotools import mask
from skimage import measure
import os, json
import cv2
import pandas as pd
from shutil import copyfile
import re
from imantics import Polygons, Mask
fishial_dataset = r'resources/new_part'
os.makedirs(fishial_dataset, exist_ok=True)
mask_dir = r'resources/old_data/train_label'
img_dir = r'resources/old_data/train'
list_path_img = os.listdir(img_dir)
list_path_mask = os.listdir(mask_dir)
count = 0
result_dict = {}
list_png = os.listdir(mask_dir)
for index, mask_path in enumerate(list_png):
title, ext = os.path.splitext(os.path.basename(mask_path))
match = re.finditer("([_]{1})", title)
base_title = ""
for i in match:
base_title = title[:i.start(0)]
if mask_path in list_path_mask and base_title + ".jpeg" in list_path_img:
fullname_mask = os.path.join(mask_dir, mask_path)
fullname_image = os.path.join(img_dir, base_title + ".jpeg")
ground_truth_binary_mask = cv2.imread(fullname_mask, 0)
img_tmp = cv2.imread(fullname_image)
w, h, _ = img_tmp.shape
ground_truth_binary_mask = cv2.resize(ground_truth_binary_mask, (h, w))
new_size_w = int(w * 0.03)
new_size_h = int(h * 0.03)
ground_truth_binary_mask = ground_truth_binary_mask[new_size_w: w - new_size_w, new_size_h: h - new_size_h]
img_tmp = img_tmp[new_size_w: w - new_size_w, new_size_h: h - new_size_h]
polygons = Mask(ground_truth_binary_mask).polygons()
title, ext = os.path.splitext(os.path.basename(fullname_mask))
x_array = []
y_array = []
for i in polygons.points[0]:
x_array.append(int(i[0]))
y_array.append(int(i[1]))
if len(x_array) < 8: continue
# cv2.circle(img_tmp, (int(i[0]), int(i[1])), 2, (0, 255, 0), -1)
# cv2.imshow('image', img_tmp)
# cv2.waitKey(0)
dst_path = os.path.join(fishial_dataset, os.path.basename(fullname_image))
copyfile(fullname_image, dst_path)
result_dict.update({
title: {
"fileref": "",
"size": w*h,
"filename": os.path.basename(fullname_image),
"base64_img_data": "",
"file_attributes": {
},
"regions": {
"0": {
"shape_attributes": {
"name": "polygon",
"all_points_x": x_array,
"all_points_y": y_array
},
"region_attributes": {
}
}
}
}
})
with open(os.path.join(fishial_dataset, 'via_region_data.json'), 'w') as fp:
json.dump(result_dict, fp)
|
the-stack_106_29698 | import speech_recognition as sr
import face_recognition as fc
import numpy as np
import pandas as pd
import cv2 as cv
import time as t
import vlc
import random as r
import pyttsx3 as sx
import wikipedia
from datetime import date
from googlesearch import search
import webbrowser as wb
import requests, json
import geocoder
from pynput.mouse import Button,Controller
v=cv.VideoCapture(0)
try:
db=pd.read_csv('jarvise_image.csv')
label=list(db['labels'])
d=np.array(db)
db_image=list(d[:,1:129])
db_inf=pd.read_csv('person_imformation.csv')
log=open('log.txt','r+')
log_data=log.read()
print("i'm ready")
except:
print('file not available')
label=[]
db_image=[]
db_inf=[]
def log_(data,jarvis=True):
if jarvis==False:
log.write(user+' :'+data+'\n')
else:
log.write('Jarvis :'+data+'\n')
def take_image():
r,live=v.read()
if r==True:
face=fc.face_locations(live)
if len(face)>0:
[x1,y1,x2,y2]=face[0]
cv.rectangle(live,(y2,x1),(y1,x2),(0,0,255),3)
cv.imshow('image',live)
cv.waitKey(4)
e=fc.face_encodings(live,face)[0]
return e
else:
return take_image()
def find_label():
try:
l=label[-1]+1
except:
l=0
return l
def save_(dataframe1,dataframe2):
if len(label)>0:
dataframe2.to_csv('person_imformation.csv',mode='a',header=False)
dataframe1.to_csv('jarvise_image.csv',mode='a',header=False)
else:
dataframe2.to_csv('person_imformation.csv')
dataframe1.to_csv('jarvise_image.csv')
def say(word):
engine=sx.init()
engine.setProperty('rate', 100) # Speed percent (can go over 100)
engine.setProperty('volume', 0.9)
engine.say(word)
engine.runAndWait()
def face_recognizer():
e=take_image()
res=fc.compare_faces(db_image,e)
if True in res:
index=label[res.index(True)]
return index
else:
print("i'm not recognizing you,come in front of camera to take few pictures")
l=find_label()
E=[]
n=[]
q=[]
a=[]
count=1
while count<=5:
e=take_image()
E.append(e)
count+=1
dataframe1=pd.DataFrame(E)
dataframe1['labels']=[l,l,l,l,l]
name,quali,age=input("enter name qualification and age seprated by ','").split(',')
n.append(name)
q.append(quali)
a.append(age)
dataframe2=pd.DataFrame({'name':n,'qualification':q,'age':a})
save_(dataframe1,dataframe2)
return l
def speech_recognizer():
global word
word=input('enter...')
return word
'''def speech_recognizer():
s=sr.Recognizer()
with sr.Microphone() as source:
print('speak...')
s.adjust_for_ambient_noise(source)
audio=s.listen(source)
try:
d=s.recognize_google(audio)
d=d.lower()
return d
except Exception as e:
print("coudn't recognize")
return speech_recognizer()'''
def vlc_player(word,player=''):
ps=list #previous state
track=['D:\Downloads\Video\(643) Tu Cheez Badi 4k Video song - YouTube.MKV',
'D:\Downloads\Video\(643) Official Video- Humnava Mere Song - Jubin Nautiyal - Manoj Muntashir - Rocky - Shiv - Bhushan Kumar - YouTube.MKV',
'D:\Downloads\Video\(643) Full Video- Tera Yaar Hoon Main - Sonu Ke Titu Ki Sweety - Arijit Singh Rochak Kohli - Song 2018 - YouTube.MKV',
'D:\Downloads\Video\(643) Yo Yo Honey Singh- DIL CHORI (Video) Simar Kaur, Ishers - Hans Raj Hans - Sonu Ke Titu Ki Sweety - YouTube.MKV,',
'D:\Downloads\Video\(643) Saiyaara - Full Song - Ek Tha Tiger - Salman Khan - Katrina Kaif - Mohit Chauhan - Taraannum Mallik - YouTube.MKV',
'D:\Downloads\Video\(643) Bom Diggy Diggy (VIDEO) - Zack Knight - Jasmin Walia - Sonu Ke Titu Ki Sweety - YouTube.MKV']
v=cv.VideoCapture(0)
fd=cv.CascadeClassifier(r'C:\Users\pankaj kumar\AppData\Local\Programs\Python\Python36\Lib\site-packages\cv2\data\haarcascade_frontalface_alt2.xml')
#while True:
status,image=v.read()
if status==True:
if 'open vlc' in word or 'play song' in word:
song=track[r.randint(0,5)]
player=vlc.MediaPlayer(song)
gray_image=cv.cvtColor(image,cv.COLOR_BGR2GRAY)
face=fd.detectMultiScale(gray_image)
for [x,y,w,h]in face:
cv.rectangle(image,(x,y),(x+w,y+h),(255,255,255),1)
k=cv.waitKey(3)
cs=type(face)
if ps!=cs:
ps=cs
if type(face)==tuple:
player.pause()
else:
player.play()
return player
elif 'stop' in word or 'close' in word:
player.stop()
cv.destroyAllWindows()
#break
elif 'next' in word or 'next song' in word:
player.stop()
player=vlc.MediaPlayer(track[r.randint(0,len(track)-1)]) #5 also be included not excluded here in randint()
player.play()
return player
elif 'pause' in word:
player.pause()
elif 'play' in word:
player.play()
cv.imshow('my image',image)
def browser(word):
mouse=Controller()
if 'next recomended song' in word:
mouse.position=(1000,200)
mouse.press(Button.left)
mouse.release(Button.left)
print('Jarvis :next song played')
say('next song played')
log_('next song played')
else:
try:
print('Jarvis :searchng in progress...please wait!')
say('searchng in progress...please wait!')
log_('searchng in progress...please wait!')
#word=speech_recogniser()
for link in search(word,tld='co.in',num=1,stop=1,pause=2):
print("Jarvis :obtaied link :"+link)
log_(link)
wb.open(link)
if 'youtube' in word:
t.sleep(1)
mouse=Controller()
#mouse.position=(400,300)
#mouse.press(Button.left)
#mouse.release(Button.left)
print('jarvis :done')
print("Jarvis :"+'searching done!')
say('searching done!')
log_('searching done!')
except:
p='sorry!\n word not recognized try again.'
print(p)
log_(p)
return browser()
def Wikipedia():
try:
print('Jarvis :what you want to search?')
say('what you want to search?')
log_('what you want to search?')
word=speech_recognizer()
print(user+' :'+word)
wi=wikipedia.summary(word, sentences=1)
print("Jarvis :" ,wi)
say(wi)
log_(word,False)
log_(wi)
except:
print('hey!\n'+'Matching not found try with different words')
log_('hey!\n'+'Matching not found try with different words')
return Wikipedia()
def mouse_(word='a'):
while 1:
if word=='a':
word=speech_recognizer()
mouse=Controller()
if 'down' in word:
mouse.move(0,40)
elif 'up' in word or 'upper side' in word:
mouse.move(0,-40)
elif 'right' in word:
mouse.move(40,0)
elif 'left' in word:
mouse.move(-40,0)
elif 'on first link' in word :
mouse.position=(250,300)
elif 'first link' in word:
mouse.position=(250,300)
mouse.press(Button.left)
mouse.release(Button.left)
elif 'at middle' in word:
mouse.position(670,400)
if 'left click' in word or 'click left' in word:
mouse.press(Button.left)
mouse.release(Button.left)
if'right click' in word or 'click right' in word:
mouse.press(Button.right)
mouse.release(Button.right)
elif 'stop song' in word or 'play' in word or 'paly song' in word:
mouse.position=(400,300)
mouse.press(Button.left)
mouse.release(Button.left)
else :
return word
word='a'
def conversation(matched_index):
global user
user=db_inf['name'][matched_index]
if matched_index==0:
print('welcome admin,how can i help you')
say('welcome admin,how can i help you')
else:
print('welcome to personel jarvis, how can i help you')
say('welcome to personel jarvis, how can i help you')
while True:
word=speech_recognizer()
print('Jarvis :recognized word :',word)
log_(word,False)
res=''
if 'google' in word or 'browser' in word or 'youtube' in word or 'tell me' in word or 'what' in word or 'recomended' in word or 'search' in word or 'website' in word:
browser(word)
word=mouse_()
elif 'stop song' in word or 'play' in word or 'paly song' in word:
mouse_(word)
elif 'who are you' in word or 'give your intro' in word:
res="i'm a personel jarvis of pankaj.i can also help you by providing some imformation if you want"
print('Jarvis :'+res)
say(res)
elif 'hello' in word or 'hi' in word:
res='hii!...how can i help you'
print('jarvis :'+res)
say(res)
elif 'manufacturer' in word:
res='pankaj kumar,he made me durring summer training in techienest,jaipur by saurabh sir.at that time he was persuing B.tech from nit jalandhar'
print('Jarvis :'+res)
say(res)
elif 'how are you' in word:
res='fine and i am not intrested in know your fucking mood,want any help than stay otherwise get out from here'
print('Jarvis :'+res)
say(res)
elif "wikipedia" in word:
Wikipedia()
elif "today's date" in word or 'date' in word:
d=str(date.today())
res="today's date is :"+d
print('Jarvis :'+res)
say(res)
elif 'time' in word or 'current time' in word:
time=t.ctime()
time=str(time.split(' ')[4])
res='current time :'+time
print('Jarvis :'+res)
say('current time is'+str(time.split(':')[0:2]))
elif "weather" in word or "temperature" in word:
say("Tell your city")
log_("Tell your city")
city_name=speech_recognizer()
print("city you said is",city_name)
#city_name=input("enter city name to confirm")
api_key = "cca979ed5fb2c8d3a9c99594191482f9"
base_url = "http://api.openweathermap.org/data/2.5/weather?"
complete_url = base_url + "appid=" + api_key + "&q=" + city_name
json_data=requests.get(complete_url).json()
try:
temp=json_data['main']
temp=str(int(int(temp['temp'])-273.15))
temp1=json_data['weather'][0]['description']
d =" Current Temperature in "+city_name+" is "+temp+" degree celsius with "+temp1
print("Jarvis : ",d)
say(d)
log_(d)
except KeyError:
print("Key invalid or city not found")
elif "location" in word:
g = geocoder.ip('me')
lat=g.latlng
str1= "latitude position is "+str(lat[0])
str2= "longitude position is "+str(lat[1])
print("Jarvis: ",str1)
print("Jarvis: ",str2)
d= str1 +str2
log_(d)
say(str1)
say(str2)
elif 'bye' in word or 'bye jarvis' in word:
log_(word)
break
log_(res)
if matched_index==0:
if 'open vlc' in word or 'play song' in word:
#print('processing...')
#say('processing...')
global player
player=vlc_player(word)
if 'next song' in word or 'pause' in word or 'play' in word or 'stop song' in word or 'close' in word:
try:
player=vlc_player(word,player)
except:
pass
flag=1
global matched_index
while True:
if flag!=0:
matched_index=face_recognizer()
if not(str(date.today())+'*' in log_data):
print('you are first recognizer!')
log.write('**********************************************************\n')
log.write(str(date.today())+'*\n')
conversation(matched_index)
log.write(' ********* ')
log.close()
log=open('log.txt','r+')
log_data=log.read()
flag=1
|
the-stack_106_29699 | #!/usr/bin/env python
# ===========================================================================
# Copyright 2017 `Tung Thanh Le`
# Email: ttungl at gmail dot com
#
# Heterogeneous Architecture Configurations Generator for Multi2Sim simulator
# (aka, `HeteroArchGen4M2S`)
# `HeteroArchGen4M2S` is free software, which is freely to be
# redistributed and modified it under the terms of
# the GNU General Public License as published by
# the Free Software Foundation.
# For more details `http://www.gnu.org/licenses`
# `HeteroArchGen4M2S` is written to help you configure M2S
# easily, but non-warranty and non-mechantability.
# ============================================================================
# from write_to_File import write_to_File
# Read network performance
# Input: net_report.out
# Output: network throughput and latency;
# Description:
# This program calculates the network throughput and latency, from `net_report.out` file,
# which is generated under multi2sim directory after running the shell script in
# the run_simulation_files directory.
def read_network_performance(net_file, simtime, benchmark):
with open(net_file) as fopen:
for line in fopen:
if "AverageLatency" in line:
avg_lat = line.split()
latency = avg_lat[2]
if "Cycles" in line:
Cycles = line.split()
Cycles = Cycles[2]
break
## throughput
throughput = float(Cycles) / float(simtime)
## Write tofile
writeToFile(throughput, latency, benchmark)
def writeToFile(wthroughput, wlatency, benchmark):
fw = open('results/%s_network_performance.out' % benchmark, 'w');
fw.write('Network Throughput (MBps): %03.2f \n' % wthroughput);
fw.write('Network Latency (cycles): ');
fw.write(wlatency);
fw.close()
|
the-stack_106_29703 | from django import forms
PRODUCT_QUANTITY_CHOICES = [(i, str(i)) for i in range(1, 26)]
class CartAddProductForm(forms.Form):
quantity = forms.TypedChoiceField(label='Количество:', choices=PRODUCT_QUANTITY_CHOICES, coerce=int,
widget=forms.Select(
attrs={'class': 'form-control'}
))
update = forms.BooleanField(required=False, initial=False, widget=forms.HiddenInput)
|
the-stack_106_29704 | # -*- coding: utf-8 -*-
# Copyright 2021 Cohesity Inc.
class BackupJobProtoDRToCloudParams(object):
"""Implementation of the 'BackupJobProto_DRToCloudParams' model.
A Proto needed in case objects backed up by this job need to DR to cloud.
"Fail over" signifies the mechanism to move the workload to cloud.
Attributes:
need_to_fail_over (bool): Whether the objects in this job will be
failed over to cloud. In case of VMs, we need to fetch information
about the logical volumes present on the VM. Magneto might fail
backup of a VM in case volume information can not be fetched
(maybe because the agent is not installed or if the VM is turned
off etc.). The VM will be backed up using the physical agent when
it is running in the cloud. We might choose to backup the VM in
the cloud using native API at a later point. This flag makes
sense when configuring a job to backup on-prem VMs.
"""
# Create a mapping from Model property names to API property names
_names = {
"need_to_fail_over":'needToFailOver'
}
def __init__(self,
need_to_fail_over=None):
"""Constructor for the BackupJobProtoDRToCloudParams class"""
# Initialize members of the class
self.need_to_fail_over = need_to_fail_over
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
need_to_fail_over = dictionary.get('needToFailOver')
# Return an object of this model
return cls(need_to_fail_over)
|
the-stack_106_29705 | from tensorflow.keras.layers import Dropout, MaxPool1D, MaxPool2D, Conv1D, Conv2D, Dense
from tensorflow.keras.models import load_model
from pathlib import Path
def writeShape(shape):
return ", ".join(str(s) for s in shape[1:])
def getName(layer):
if isinstance(layer, Conv2D):
return "Convolution 2D"
if isinstance(layer, Conv1D):
return "Convolution 1D"
if isinstance(layer, MaxPool2D):
return "MaxPooling 2D"
if isinstance(layer, MaxPool1D):
return "MaxPooling 1D"
if isinstance(layer, Dropout):
return "DropOut"
if isinstance(layer, Dense):
return "Dense"
return layer.__class__.__name__
def printModelInfo(model):
if isinstance(model, (str, Path)):
model = load_model(str(model))
i = 0
for layer in model.layers:
if layer.name == "reshape_cast":
continue
try:
active = layer.activation.__name__
except AttributeError:
active = "~"
extra = "~"
if isinstance(layer, Dropout):
extra = f"droupout: {layer.rate}"
if isinstance(layer, (MaxPool1D, MaxPool2D)):
extra = f"poolsize: {layer.pool_size}"
i += 1
print(i, getName(layer), writeShape(layer.input.shape) + "; " + writeShape(layer.output.shape), active, extra, sep=" & ", end="\\\\\n")
from pathlib import Path
import numpy as np
import tensorflow.keras as keras
class TrainingHistory(keras.callbacks.Callback):
def __init__(self, output):
output = Path(output)
output.mkdir(parents=True, exist_ok=True)
self.output = output / "data.txt"
def on_train_begin(self, logs={}):
self.data = []
def on_epoch_end(self, epoch, logs={}):
self.data.append([logs.get('loss'), logs.get('accuracy'), logs.get('val_loss'), logs.get('val_accuracy')])
np.savetxt(self.output, self.data) |
the-stack_106_29706 | # author: Eric S. Tellez <[email protected]>
import os
import json
import numpy as np
import logging
from itertools import combinations
try:
from tqdm import tqdm
except ImportError:
def tqdm(x, **kwargs):
return x
logging.basicConfig(format='%(asctime)s : %(levelname)s :%(message)s')
class Fixed:
def __init__(self, value):
self.value = value
self.valid_values = [value]
def neighborhood(self, v):
return []
def get_random(self):
return self.value
class SetVariable:
def __init__(self, values):
self.valid_values = list(values)
def neighborhood(self, value):
return [u for u in self.valid_values if u != value]
def get_random(self):
i = np.random.randint(len(self.valid_values))
return self.valid_values[i]
class PowersetVariable:
def __init__(self, initial_set, max_size=None):
self.valid_values = []
if max_size is None:
max_size = len(initial_set) // 2 + 1
for i in range(1, len(initial_set)+1):
for l in combinations(initial_set, i):
if len(l) <= max_size:
self.valid_values.append(l)
def mismatches(self, value):
lvalue = len(value)
for v in self.valid_values:
# if len(value.intersection(v)) == lvalue - 1 or len(value.union(v)) == lvalue + 1:
ulen = len(value.union(v))
ilen = len(value.intersection(v))
if ulen in (lvalue, lvalue + 1) and ilen in (lvalue, lvalue - 1):
yield v
def neighborhood(self, value):
L = []
for v in value:
if isinstance(v, list):
v = tuple(v)
L.append(v)
return list(self.mismatches(set(L)))
def get_random(self):
x = len(self.valid_values)
i = np.random.randint(x)
return self.valid_values[i]
OPTION_NONE = 'none'
OPTION_GROUP = 'group'
OPTION_DELETE = 'delete'
BASIC_OPTIONS = [OPTION_DELETE, OPTION_GROUP, OPTION_NONE]
def Option():
return SetVariable(BASIC_OPTIONS)
def Uniform(left, right, k=10):
d = (right - left) * np.random.random_sample(k) + left
return SetVariable(d)
def Normal(mean, sigma, k=10):
d = mean * np.random.randn(k) + sigma
return SetVariable(d)
def Boolean():
return SetVariable([False, True])
TOKENLIST = [(3, 1), (2, 2), (2, 1), -3, -2, -1, 1, 2, 3, 5, 7, 9]
if "TOKENLIST" in os.environ:
def _simple_cast(x):
if isinstance(x, list):
return tuple(x)
else:
return x
TOKENLIST = [_simple_cast(x) for x in json.loads(os.environ["TOKENLIST"])]
MAXTOKENLIST = os.environ.get("MAXTOKENLIST", len(TOKENLIST)//2 + 1)
DefaultParams = dict(
num_option=Option(),
usr_option=Option(),
url_option=Option(),
emo_option=Option(),
ent_option=Fixed(OPTION_NONE),
# hashtag_option=Fixed(OPTION_NONE),
hashtag_option=Option(),
select_ent=Fixed(False),
select_suff=Fixed(False),
select_conn=Fixed(False),
lc=Boolean(),
del_dup=Boolean(),
del_punc=Boolean(),
del_diac=Boolean(),
token_list=PowersetVariable(TOKENLIST, max_size=MAXTOKENLIST),
# negative values means for absolute frequencies, positive values between 0 and 1 means for ratio
token_min_filter=Fixed(-1),
token_max_filter=Fixed(1.0),
# token_max_filter=SetVariable([0.5, 0.9, 1.0]),
# token_min_filter=SetVariable([-1, -5, -10]),
tfidf=Boolean(),
dist_vector=SetVariable([OPTION_NONE, 'plain+1', 'plain+3', 'entropy+1', 'entropy+3'])
# dist_vector=Fixed(OPTION_NONE)
)
if "PARAMS" in os.environ:
for k, v in json.loads(os.environ["PARAMS"]).items():
DefaultParams[k] = Fixed(v)
class ParameterSelection:
def __init__(self, params=None):
if (params is None) or (0 == len(params)):
params = DefaultParams
else:
for k in DefaultParams.keys():
assert k in params, "{0} is not in given parameters; {1}".format(k, params)
self.params = params
def sample_param_space(self, n):
for i in range(n):
kwargs = {}
for k, v in self.params.items():
kwargs[k] = v.get_random()
yield kwargs
def expand_neighbors(self, s, keywords=None):
if keywords is None:
keywords = set(s.keys())
for k, v in sorted(s.items()):
if k[0] == '_' or k not in keywords:
# by convention, metadata starts with underscore
continue
vtype = self.params[k]
if isinstance(vtype, Fixed):
continue
for neighbor in vtype.neighborhood(v):
x = s.copy()
x[k] = neighbor
yield(x)
def get_best(self, fun_score, cand, desc="searching for params", pool=None):
if pool is None:
# X = list(map(fun_score, cand))
X = [fun_score(x) for x in tqdm(cand, desc=desc, total=len(cand))]
else:
# X = list(pool.map(fun_score, cand))
X = [x for x in tqdm(pool.imap_unordered(fun_score, cand), desc=desc, total=len(cand))]
# a list of tuples (score, conf)
X.sort(key=lambda x: x['_score'], reverse=True)
return X
def search(self, fun_score, bsize=32, hill_climbing=True, pool=None, best_list=None):
# initial approximation, montecarlo based procesess
tabu = set() # memory for tabu search
if best_list is None:
L = []
for conf in self.sample_param_space(bsize):
code = get_filename(conf)
if code in tabu:
continue
tabu.add(code)
L.append((conf, code))
best_list = self.get_best(fun_score, L, pool=pool)
else:
for conf in best_list:
tabu.add(get_filename(conf))
def _hill_climbing(keywords, desc):
# second approximation, a hill climbing process
i = 0
while True:
i += 1
bscore = best_list[0]['_score']
L = []
for conf in self.expand_neighbors(best_list[0], keywords=keywords):
code = get_filename(conf)
if code in tabu:
continue
tabu.add(code)
L.append((conf, code))
best_list.extend(self.get_best(fun_score, L, desc=desc + " {0}".format(i), pool=pool))
best_list.sort(key=lambda x: x['_score'], reverse=True)
if bscore == best_list[0]['_score']:
break
if hill_climbing:
_hill_climbing(['token_list'], "optimizing token_list")
# _hill_climbing(['token_min_filter', 'token_max_filter'], "optimizing token max and min filters")
do_vectorizing_opt = len(self.params['token_min_filter'].valid_values) > 1 or len(self.params['token_max_filter'].valid_values) > 1
if do_vectorizing_opt:
_hill_climbing(['token_list', 'token_min_filter', 'token_max_filter', 'tfidf'], "optimizing all token parameters")
ks = list(self.params.keys())
if do_vectorizing_opt:
ks.remove('token_list')
ks.remove('token_min_filter')
ks.remove('token_max_filter')
ks.remove('tfidf')
_hill_climbing(ks, "optimizing the rest of params")
return best_list
def get_filename(kwargs, basename=None):
L = []
if basename:
L.append(basename)
for k, v in sorted(kwargs.items()):
if k[0] == '_':
continue
L.append("{0}={1}".format(k, v).replace(" ", ""))
return "-".join(L)
|
the-stack_106_29707 | # Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import unittest.mock as mock
from shutil import rmtree
from threading import Thread
from time import sleep
from os.path import exists
import mycroft.audio
from mycroft.util import create_signal, check_for_signal
from mycroft.util.file_utils import get_temp_path
"""Tests for public audio service utils."""
done_waiting = False
def wait_while_speaking_thread():
global done_waiting
mycroft.audio.wait_while_speaking()
done_waiting = True
class TestInterface(unittest.TestCase):
def setUp(self):
if exists(get_temp_path('mycroft')):
rmtree(get_temp_path('mycroft'))
def test_is_speaking(self):
create_signal('isSpeaking')
self.assertTrue(mycroft.audio.is_speaking())
# Check that the signal hasn't been removed
self.assertTrue(check_for_signal('isSpeaking'))
self.assertFalse(mycroft.audio.is_speaking())
def test_wait_while_speaking(self):
# Check that test terminates
create_signal('isSpeaking')
Thread(target=wait_while_speaking_thread).start()
sleep(2)
self.assertFalse(done_waiting)
check_for_signal('isSpeaking')
sleep(2)
self.assertTrue(done_waiting)
@mock.patch('mycroft.audio.utils.is_speaking')
@mock.patch('mycroft.messagebus.send_func.send')
def test_stop_speaking(self, mock_send, mock_is_speaking):
"""Test that stop speak message is sent."""
mock_is_speaking.return_value = True
mycroft.audio.stop_speaking()
mock_send.assert_called_with('mycroft.audio.speech.stop')
@mock.patch('mycroft.audio.utils.is_speaking')
@mock.patch('mycroft.messagebus.send_func.send')
def test_stop_speaking_when_not(self, mock_send, mock_is_speaking):
"""Check that the stop speaking msg isn't sent when not speaking."""
mock_is_speaking.return_value = False
mycroft.audio.stop_speaking()
mock_send.assert_not_called()
if __name__ == "__main__":
unittest.main()
|
the-stack_106_29708 | import tensorflow as tf
from tensorflow.keras.layers import Conv1D, Input, Dense, Reshape, ReLU, Permute
from tensorflow.keras.layers import Conv1D, Input, LSTM, Embedding, Dense, TimeDistributed, Bidirectional, \
LayerNormalization
from tensorflow.keras.models import Model
from globals import *
def softmax(logits):
shape = tf.shape(logits)
res = tf.nn.softmax(tf.reshape(logits, [-1, N_CHAR]))
return tf.reshape(res, shape)
class ResidualBlock(tf.keras.layers.Layer):
def __init__(self):
super(ResidualBlock, self).__init__()
self.relu = ReLU()
self.conv1d_1 = Conv1D(filters=DIM, kernel_size=KERNEL_SIZE, padding='same', strides=1, activation='relu')
self.conv1d_2 = Conv1D(filters=DIM, kernel_size=KERNEL_SIZE, padding='same', strides=1)
def __call__(self, X, alpha=0.3):
x = self.relu(X)
x = self.conv1d_1(x)
x = self.conv1d_2(x)
return x + alpha * x
class Generator(tf.keras.Model):
def __init__(self):
"""
implementation of Generator
:param input_size: size of the sequence (input noise)
"""
super(Generator, self).__init__(name='generator')
self.model = tf.keras.models.Sequential()
self.model.add(Input(shape=(NOISE_SHAPE,), batch_size=BATCH_SIZE))
self.model.add(Dense(units=DIM * SEQ_LENGTH))
self.model.add(Reshape((SEQ_LENGTH, DIM)))
self.model.add(ResidualBlock())
self.model.add(ResidualBlock())
self.model.add(ResidualBlock())
self.model.add(ResidualBlock())
self.model.add(ResidualBlock())
self.model.add(Conv1D(filters=N_CHAR, kernel_size=1))
def call(self, inputs):
x = self.model(inputs)
x = softmax(x)
return x
class Discriminator(tf.keras.Model):
def __init__(self, clip=1):
"""
implementation of Discriminator
:param clip: value to which you clip the gradients (or False)
"""
super(Discriminator, self).__init__(name='discriminator')
self.model = tf.keras.models.Sequential()
self.model.add(Input(shape=(SEQ_LENGTH, N_CHAR), batch_size=BATCH_SIZE))
self.model.add(Conv1D(filters=DIM, kernel_size=1))
self.model.add(ResidualBlock())
self.model.add(ResidualBlock())
self.model.add(ResidualBlock())
self.model.add(ResidualBlock())
self.model.add(ResidualBlock())
self.model.add(Reshape((-1, DIM * SEQ_LENGTH)))
self.model.add(Dense(units=DIM * SEQ_LENGTH))
self.model.add(Dense(units=1))
def call(self, inputs, training=False):
"""
model's forward pass
:param X: input of the size [batch_size, seq_length];
:param training: specifies the behavior of the call;
:return: Y: probability of each sequences being real of shape [batch_size, 1]
"""
x = self.model(inputs)
return x
class Feedback():
def __init__(self):
input = Input(shape=(MAX_LEN,))
x = Embedding(input_dim=n_words, output_dim=128, input_length=MAX_LEN)(input)
x = LayerNormalization()(x)
x = Bidirectional(LSTM(units=128, return_sequences=True, use_bias=True))(x)
x = Bidirectional(LSTM(units=128, return_sequences=True, use_bias=True))(x)
x = Bidirectional(LSTM(units=128, use_bias=True))(x)
y = Dense(n_tags, activation="sigmoid")(x)
self.model = Model(input, y)
def train(self, OPTIM="rmsprop", LOSS='binary_crossentropy', BATCH_SIZE=128, EPOCHS=5):
self.model.compile(optimizer=OPTIM, loss=LOSS, metrics=[tf.keras.metrics.Precision(),
tf.keras.metrics.Recall(),
tf.keras.metrics.Hinge()])
history = self.model.fit(X_train, y_train, batch_size=BATCH_SIZE, epochs=EPOCHS,
validation_data=(X_test, y_test), verbose=1)
self.model.save(save_feedback)
return history
|
the-stack_106_29715 | from Jumpscale import j
try:
import digitalocean
except:
j.builders.runtimes.python3.pip_package_install("python-digitalocean")
import digitalocean
from .DigitalOceanVM import DigitalOceanVM
from .Project import Project
class DigitalOcean(j.baseclasses.object_config):
_SCHEMATEXT = """
@url = jumpscale.digitalocean.client
name** = "" (S)
token_ = "" (S)
project_name = "" (S)
vms = (LO) !jumpscale.digitalocean.vm
@url = jumpscale.digitalocean.vm
name = "" (S)
do_id = "" (S)
meta = {} (DICT)
"""
# _CHILDCLASS = DigitalOceanVM
def _init(self, **kwargs):
self._client = None
self.reset()
def reset(self):
self._droplets = []
self._projects = []
self._digitalocean_images = None
self._digitalocean_sizes = None
self._digitalocean_regions = None
self._sshkeys = None
@property
def client(self):
"""If client not set, a new client is created
:raises RuntimeError: Auth token not configured
:return: client
:rtype:
"""
if not self._client:
self._client = digitalocean.Manager(token=self.token_)
return self._client
@property
def digitalocean_images(self):
if not self._digitalocean_images:
self._digitalocean_images = self.client.get_distro_images()
return self._digitalocean_images
@property
def digitalocean_myimages(self):
return self.client.get_images(private=True)
@property
def digitalocean_sizes(self):
if not self._digitalocean_sizes:
self._digitalocean_sizes = self.client.get_all_sizes()
return self._digitalocean_sizes
@property
def digitalocean_regions(self):
if not self._digitalocean_regions:
self._digitalocean_regions = self.client.get_all_regions()
return self._digitalocean_regions
@property
def digitalocean_region_names(self):
return [i.slug for i in self.digitalocean_regions]
@property
def sshkeys(self):
if not self._sshkeys:
self._sshkeys = self.client.get_all_sshkeys()
return self._sshkeys
def droplet_exists(self, name):
for droplet in self.droplets:
if droplet.name.lower() == name.lower():
return True
return False
def _droplet_get(self, name):
for droplet in self.droplets:
if droplet.name.lower() == name.lower():
return droplet
return False
def _sshkey_get_default(self):
sshkey_ = j.clients.sshkey.default
pubkeyonly = sshkey_.pubkey_only
for item in self.sshkeys:
if item.public_key.find(pubkeyonly) != -1:
return item
return None
def sshkey_get(self, name):
for item in self.sshkeys:
if name == item.name:
return item
raise j.exceptions.Base("did not find key:%s" % name)
def region_get(self, name):
for item in self.digitalocean_regions:
if name == item.slug:
return item
if name == item.name:
return item
raise j.exceptions.Base("did not find region:%s" % name)
@property
def digitalocean_account_images(self):
return self.digitalocean_images + self.digitalocean_myimages
def image_get(self, name):
for item in self.digitalocean_account_images:
if item.description:
name_do = item.description.lower()
else:
name_do = item.distribution + " " + item.name
if name_do.lower().find(name) != -1:
return item
raise j.exceptions.Base("did not find image:%s" % name)
def image_names_get(self, name=""):
res = []
name = name.lower()
for item in self.digitalocean_images:
if item.description:
name_do = item.description.lower()
else:
name_do = item.distribution + " " + item.name
if name_do.find(name) != -1:
res.append(name_do)
return res
def droplet_create(
self,
name="test",
sshkey=None,
region="Amsterdam 3",
image="ubuntu 18.04",
size_slug="s-1vcpu-2gb",
delete=True,
project_name=None,
):
"""
:param name:
:param sshkey:
:param region:
:param image:
:param size_slug: s-1vcpu-2gb,s-6vcpu-16gb,gd-8vcpu-32gb
:param delete:
:param mosh: when mosh will be used to improve ssh experience
:param project_name: project to add this droplet it. If not specified the default project will be used.
:return: droplet,sshclient
"""
project = None
if project_name:
project = self._project_get(project_name)
if not project:
raise j.exceptions.Input("could not find project with name:%s" % project_name)
delete = j.data.types.bool.clean(delete)
sshkey = j.data.types.string.clean(sshkey)
if not sshkey:
sshkey_do = self._sshkey_get_default()
if not sshkey_do:
sshkey_ = j.clients.sshkey.default
# means we did not find the sshkey on digital ocean yet, need to create
key = digitalocean.SSHKey(token=self.token_, name=sshkey_.name, public_key=sshkey_.pubkey)
key.create()
sshkey_do = self._sshkey_get_default()
assert sshkey_do
sshkey = sshkey_do.name
if self.droplet_exists(name):
dr0 = self._droplet_get(name=name)
if delete:
dr0.destroy()
else:
sshcl = j.clients.ssh.get(
name="do_%s" % name, addr=dr0.ip_address, client_type="pssh", sshkey_name=sshkey
)
sshcl.save()
return dr0, sshcl
sshkey = self.sshkey_get(sshkey)
region = self.region_get(region)
imagedo = self.image_get(image)
if region.slug not in imagedo.regions:
j.shell()
img_slug_or_id = imagedo.slug if imagedo.slug else imagedo.id
droplet = digitalocean.Droplet(
token=self.token_,
name=name,
region=region.slug,
image=img_slug_or_id,
size_slug=size_slug,
ssh_keys=[sshkey],
backups=False,
)
droplet.create()
# dr = self.get(name=name)
# dr.do_id = droplet.id
self._droplets.append(droplet)
self.reset()
if project:
project.assign_resources(["do:droplet:%s" % droplet.id])
vm = self._vm_get(name)
vm.do_id = droplet.id
self.save()
def actions_wait():
while True:
actions = droplet.get_actions()
if len(actions) == 0:
return
for action in actions:
action.load()
# Once it shows complete, droplet is up and running
print(action.status)
if action.status == "completed":
return
actions_wait()
droplet.load()
sshcl = j.clients.ssh.get(
name="do_%s" % name, addr=droplet.ip_address, client_type="pssh", sshkey_name=sshkey.name
)
sshcl.state_reset() # important otherwise the state does not correspond
sshcl.save()
return droplet, sshcl
def _vm_get(self, name, new=True):
vm = None
for vm in self.vms:
if vm.name == name:
break
if new:
if not vm:
vm = self.vms.new()
vm.name = name
return vm
def _vm_exists(self, name):
return self._vm_get(name, new=False) != None
def droplet_get(self, name):
if not self.droplet_exists(name):
raise j.exceptions.Input("could not find vm with name:%s" % name)
return self._droplet_get(name)
@property
def droplets(self):
if not self._droplets:
self._droplets = []
for d in self.client.get_all_droplets():
self._droplets.append(d)
return self._droplets
def droplets_all_delete(self, ignore=None, interactive=True):
ignore = j.data.types.bool.clean(ignore)
interactive = j.data.types.bool.clean(interactive)
if not ignore:
ignore = []
def test(ignore, name):
if name.startswith("TF-"):
return False
for item in ignore:
if name.lower().find(item.lower()) != -1:
return False
return True
todo = []
for droplet in self.droplets:
if test(ignore, droplet.name):
name = droplet.name
todo.append(droplet)
if todo != []:
todotxt = ",".join([i.name for i in todo])
if not interactive or j.tools.console.askYesNo("ok to delete:%s" % todotxt):
for droplet in todo:
droplet.destroy()
def droplets_all_shutdown(self):
for droplet in self.droplets:
droplet.shutdown()
def droplets_list(self, project=None):
"""list droplets
:param project: name of the project to filter on, defaults to None
:type project: str, optional
:raises j.exceptions.Input: raise an error if project doesn't exist.
:return: list of droplets
:rtype: [Droplet]
"""
if not project:
return self.droplets
project = self._project_get(project)
if not project:
raise j.exceptions.Input("could not find project with name:%s" % project)
return project.list_droplets()
def _projects_list(self):
return Project.list(self.client)
@property
def projects(self):
"""property to return all the cached projects
:return: list of project
:rtype: [Project]
"""
if not self._projects:
for project in self._projects_list():
self._projects.append(project)
return self._projects
def _project_get(self, name):
for project in self.projects:
if project.name.lower() == name.lower():
return project
return None
def project_create(self, name, purpose, description="", environment="", is_default=False):
"""Create a digital ocean project
:param name: name of the project
:type name: str
:param purpose: purpose of the project
:type purpose: str
:param description: description of the project, defaults to ""
:type description: str, optional
:param environment: environment of project's resources, defaults to ""
:type environment: str, optional
:param is_default: make this the default project for your user
:type is_default: bool
:return: project instance
:rtype: Project
"""
if self._project_get(name):
raise j.exceptions.Value("A project with the same name already exists")
project = Project(
token=self.token_,
name=name,
purpose=purpose,
description=description,
environment=environment,
is_default=is_default,
)
project.create()
if is_default:
project.update(is_default=True)
self._projects.append(project)
return project
def project_get(self, name):
"""Get an existing prooject
:param name: project name
:type name: str
:raises j.exceptions.Input: raises an error if there is no project with this name
:return: Project object
:rtype: Project
"""
project = self._project_get(name)
if not project:
raise j.exceptions.Input("could not find project with name:%s" % name)
return project
def project_delete(self, name):
"""Delete an exisiting project.
A project can't be deleted unless it has no resources.
:param name: project name
:type name: str
:raises j.exceptions.Input: raises an error if there is no project with this name
"""
project = self._project_get(name)
if not project:
raise j.exceptions.Input("could not find project with name:%s" % name)
project.delete()
self._projects.remove(project)
def __str__(self):
return "digital ocean client:%s" % self.name
__repr__ = __str__
|
the-stack_106_29718 | # -*- coding: utf-8 -*-
"""
sphinx.quickstart
~~~~~~~~~~~~~~~~~
Quickly setup documentation source to work with Sphinx.
:copyright: Copyright 2007-2017 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
from __future__ import absolute_import
import re
import os
import sys
import optparse
import time
from os import path
from io import open
# try to import readline, unix specific enhancement
try:
import readline
if readline.__doc__ and 'libedit' in readline.__doc__:
readline.parse_and_bind("bind ^I rl_complete")
else:
readline.parse_and_bind("tab: complete")
except ImportError:
pass
from six import PY2, PY3, text_type, binary_type
from six.moves import input
from six.moves.urllib.parse import quote as urlquote
from docutils.utils import column_width
from sphinx import __display_version__, package_dir
from sphinx.util.osutil import make_filename
from sphinx.util.console import ( # type: ignore
purple, bold, red, turquoise, nocolor, color_terminal
)
from sphinx.util.template import SphinxRenderer
from sphinx.util import texescape
if False:
# For type annotation
from typing import Any, Callable, Dict, List, Pattern # NOQA
TERM_ENCODING = getattr(sys.stdin, 'encoding', None)
DEFAULT_VALUE = {
'path': '.',
'sep': False,
'dot': '_',
'language': None,
'suffix': '.rst',
'master': 'index',
'epub': False,
'ext_autodoc': False,
'ext_doctest': False,
'ext_todo': False,
'makefile': True,
'batchfile': True,
}
EXTENSIONS = ('autodoc', 'doctest', 'intersphinx', 'todo', 'coverage',
'imgmath', 'mathjax', 'ifconfig', 'viewcode', 'githubpages')
PROMPT_PREFIX = '> '
def mkdir_p(dir):
# type: (unicode) -> None
if path.isdir(dir):
return
os.makedirs(dir)
# function to get input from terminal -- overridden by the test suite
def term_input(prompt):
# type: (unicode) -> unicode
print(prompt, end='')
return input('')
class ValidationError(Exception):
"""Raised for validation errors."""
def is_path(x):
# type: (unicode) -> unicode
x = path.expanduser(x)
if path.exists(x) and not path.isdir(x):
raise ValidationError("Please enter a valid path name.")
return x
def allow_empty(x):
# type: (unicode) -> unicode
return x
def nonempty(x):
# type: (unicode) -> unicode
if not x:
raise ValidationError("Please enter some text.")
return x
def choice(*l):
# type: (unicode) -> Callable[[unicode], unicode]
def val(x):
# type: (unicode) -> unicode
if x not in l:
raise ValidationError('Please enter one of %s.' % ', '.join(l))
return x
return val
def boolean(x):
# type: (unicode) -> bool
if x.upper() not in ('Y', 'YES', 'N', 'NO'):
raise ValidationError("Please enter either 'y' or 'n'.")
return x.upper() in ('Y', 'YES')
def suffix(x):
# type: (unicode) -> unicode
if not (x[0:1] == '.' and len(x) > 1):
raise ValidationError("Please enter a file suffix, "
"e.g. '.rst' or '.txt'.")
return x
def ok(x):
# type: (unicode) -> unicode
return x
def term_decode(text):
# type: (unicode) -> unicode
if isinstance(text, text_type):
return text
# for Python 2.x, try to get a Unicode string out of it
if text.decode('ascii', 'replace').encode('ascii', 'replace') == text:
return text
if TERM_ENCODING:
text = text.decode(TERM_ENCODING)
else:
print(turquoise('* Note: non-ASCII characters entered '
'and terminal encoding unknown -- assuming '
'UTF-8 or Latin-1.'))
try:
text = text.decode('utf-8')
except UnicodeDecodeError:
text = text.decode('latin1')
return text
def do_prompt(d, key, text, default=None, validator=nonempty):
# type: (Dict, unicode, unicode, unicode, Callable[[unicode], Any]) -> None
while True:
if default is not None:
prompt = PROMPT_PREFIX + '%s [%s]: ' % (text, default) # type: unicode
else:
prompt = PROMPT_PREFIX + text + ': '
if PY2:
# for Python 2.x, try to get a Unicode string out of it
if prompt.encode('ascii', 'replace').decode('ascii', 'replace') \
!= prompt:
if TERM_ENCODING:
prompt = prompt.encode(TERM_ENCODING)
else:
print(turquoise('* Note: non-ASCII default value provided '
'and terminal encoding unknown -- assuming '
'UTF-8 or Latin-1.'))
try:
prompt = prompt.encode('utf-8')
except UnicodeEncodeError:
prompt = prompt.encode('latin1')
prompt = purple(prompt)
x = term_input(prompt).strip()
if default and not x:
x = default
x = term_decode(x)
try:
x = validator(x)
except ValidationError as err:
print(red('* ' + str(err)))
continue
break
d[key] = x
def convert_python_source(source, rex=re.compile(r"[uU]('.*?')")):
# type: (unicode, Pattern) -> unicode
# remove Unicode literal prefixes
if PY3:
return rex.sub('\\1', source)
else:
return source
class QuickstartRenderer(SphinxRenderer):
def __init__(self, templatedir):
# type: (unicode) -> None
self.templatedir = templatedir or ''
super(QuickstartRenderer, self).__init__()
def render(self, template_name, context):
# type: (unicode, Dict) -> unicode
user_template = path.join(self.templatedir, path.basename(template_name))
if self.templatedir and path.exists(user_template):
return self.render_from_file(user_template, context)
else:
return super(QuickstartRenderer, self).render(template_name, context)
def ask_user(d):
# type: (Dict) -> None
"""Ask the user for quickstart values missing from *d*.
Values are:
* path: root path
* sep: separate source and build dirs (bool)
* dot: replacement for dot in _templates etc.
* project: project name
* author: author names
* version: version of project
* release: release of project
* language: document language
* suffix: source file suffix
* master: master document name
* epub: use epub (bool)
* ext_*: extensions to use (bools)
* makefile: make Makefile
* batchfile: make command file
"""
print(bold('Welcome to the Sphinx %s quickstart utility.') % __display_version__)
print('''
Please enter values for the following settings (just press Enter to
accept a default value, if one is given in brackets).''')
if 'path' in d:
print(bold('''
Selected root path: %s''' % d['path']))
else:
print('''
Enter the root path for documentation.''')
do_prompt(d, 'path', 'Root path for the documentation', '.', is_path)
while path.isfile(path.join(d['path'], 'conf.py')) or \
path.isfile(path.join(d['path'], 'source', 'conf.py')):
print()
print(bold('Error: an existing conf.py has been found in the '
'selected root path.'))
print('sphinx-quickstart will not overwrite existing Sphinx projects.')
print()
do_prompt(d, 'path', 'Please enter a new root path (or just Enter '
'to exit)', '', is_path)
if not d['path']:
sys.exit(1)
if 'sep' not in d:
print('''
You have two options for placing the build directory for Sphinx output.
Either, you use a directory "_build" within the root path, or you separate
"source" and "build" directories within the root path.''')
do_prompt(d, 'sep', 'Separate source and build directories (y/n)', 'n',
boolean)
if 'dot' not in d:
print('''
Inside the root directory, two more directories will be created; "_templates"
for custom HTML templates and "_static" for custom stylesheets and other static
files. You can enter another prefix (such as ".") to replace the underscore.''')
do_prompt(d, 'dot', 'Name prefix for templates and static dir', '_', ok)
if 'project' not in d:
print('''
The project name will occur in several places in the built documentation.''')
do_prompt(d, 'project', 'Project name')
if 'author' not in d:
do_prompt(d, 'author', 'Author name(s)')
if 'version' not in d:
print('''
Sphinx has the notion of a "version" and a "release" for the
software. Each version can have multiple releases. For example, for
Python the version is something like 2.5 or 3.0, while the release is
something like 2.5.1 or 3.0a1. If you don't need this dual structure,
just set both to the same value.''')
do_prompt(d, 'version', 'Project version', '', allow_empty)
if 'release' not in d:
do_prompt(d, 'release', 'Project release', d['version'], allow_empty)
if 'language' not in d:
print('''
If the documents are to be written in a language other than English,
you can select a language here by its language code. Sphinx will then
translate text that it generates into that language.
For a list of supported codes, see
http://sphinx-doc.org/config.html#confval-language.''')
do_prompt(d, 'language', 'Project language', 'en')
if d['language'] == 'en':
d['language'] = None
if 'suffix' not in d:
print('''
The file name suffix for source files. Commonly, this is either ".txt"
or ".rst". Only files with this suffix are considered documents.''')
do_prompt(d, 'suffix', 'Source file suffix', '.rst', suffix)
if 'master' not in d:
print('''
One document is special in that it is considered the top node of the
"contents tree", that is, it is the root of the hierarchical structure
of the documents. Normally, this is "index", but if your "index"
document is a custom template, you can also set this to another filename.''')
do_prompt(d, 'master', 'Name of your master document (without suffix)',
'index')
while path.isfile(path.join(d['path'], d['master'] + d['suffix'])) or \
path.isfile(path.join(d['path'], 'source', d['master'] + d['suffix'])):
print()
print(bold('Error: the master file %s has already been found in the '
'selected root path.' % (d['master'] + d['suffix'])))
print('sphinx-quickstart will not overwrite the existing file.')
print()
do_prompt(d, 'master', 'Please enter a new file name, or rename the '
'existing file and press Enter', d['master'])
if 'epub' not in d:
print('''
Sphinx can also add configuration for epub output:''')
do_prompt(d, 'epub', 'Do you want to use the epub builder (y/n)',
'n', boolean)
if 'ext_autodoc' not in d:
print('''
Please indicate if you want to use one of the following Sphinx extensions:''')
do_prompt(d, 'ext_autodoc', 'autodoc: automatically insert docstrings '
'from modules (y/n)', 'n', boolean)
if 'ext_doctest' not in d:
do_prompt(d, 'ext_doctest', 'doctest: automatically test code snippets '
'in doctest blocks (y/n)', 'n', boolean)
if 'ext_intersphinx' not in d:
do_prompt(d, 'ext_intersphinx', 'intersphinx: link between Sphinx '
'documentation of different projects (y/n)', 'n', boolean)
if 'ext_todo' not in d:
do_prompt(d, 'ext_todo', 'todo: write "todo" entries '
'that can be shown or hidden on build (y/n)', 'n', boolean)
if 'ext_coverage' not in d:
do_prompt(d, 'ext_coverage', 'coverage: checks for documentation '
'coverage (y/n)', 'n', boolean)
if 'ext_imgmath' not in d:
do_prompt(d, 'ext_imgmath', 'imgmath: include math, rendered '
'as PNG or SVG images (y/n)', 'n', boolean)
if 'ext_mathjax' not in d:
do_prompt(d, 'ext_mathjax', 'mathjax: include math, rendered in the '
'browser by MathJax (y/n)', 'n', boolean)
if d['ext_imgmath'] and d['ext_mathjax']:
print('''Note: imgmath and mathjax cannot be enabled at the same time.
imgmath has been deselected.''')
d['ext_imgmath'] = False
if 'ext_ifconfig' not in d:
do_prompt(d, 'ext_ifconfig', 'ifconfig: conditional inclusion of '
'content based on config values (y/n)', 'n', boolean)
if 'ext_viewcode' not in d:
do_prompt(d, 'ext_viewcode', 'viewcode: include links to the source '
'code of documented Python objects (y/n)', 'n', boolean)
if 'ext_githubpages' not in d:
do_prompt(d, 'ext_githubpages', 'githubpages: create .nojekyll file '
'to publish the document on GitHub pages (y/n)', 'n', boolean)
if 'no_makefile' in d:
d['makefile'] = False
elif 'makefile' not in d:
print('''
A Makefile and a Windows command file can be generated for you so that you
only have to run e.g. `make html' instead of invoking sphinx-build
directly.''')
do_prompt(d, 'makefile', 'Create Makefile? (y/n)', 'y', boolean)
if 'no_batchfile' in d:
d['batchfile'] = False
elif 'batchfile' not in d:
do_prompt(d, 'batchfile', 'Create Windows command file? (y/n)',
'y', boolean)
print()
def generate(d, overwrite=True, silent=False, templatedir=None):
# type: (Dict, bool, bool, unicode) -> None
"""Generate project based on values in *d*."""
template = QuickstartRenderer(templatedir=templatedir)
texescape.init()
indent = ' ' * 4
if 'mastertoctree' not in d:
d['mastertoctree'] = ''
if 'mastertocmaxdepth' not in d:
d['mastertocmaxdepth'] = 2
d['PY3'] = PY3
d['project_fn'] = make_filename(d['project'])
d['project_url'] = urlquote(d['project'].encode('idna'))
d['project_manpage'] = d['project_fn'].lower()
d['now'] = time.asctime()
d['project_underline'] = column_width(d['project']) * '='
d.setdefault('extensions', [])
for name in EXTENSIONS:
if d.get('ext_' + name):
d['extensions'].append('sphinx.ext.' + name)
d['extensions'] = (',\n' + indent).join(repr(name) for name in d['extensions'])
d['copyright'] = time.strftime('%Y') + ', ' + d['author']
d['author_texescaped'] = text_type(d['author']).\
translate(texescape.tex_escape_map)
d['project_doc'] = d['project'] + ' Documentation'
d['project_doc_texescaped'] = text_type(d['project'] + ' Documentation').\
translate(texescape.tex_escape_map)
# escape backslashes and single quotes in strings that are put into
# a Python string literal
for key in ('project', 'project_doc', 'project_doc_texescaped',
'author', 'author_texescaped', 'copyright',
'version', 'release', 'master'):
d[key + '_str'] = d[key].replace('\\', '\\\\').replace("'", "\\'")
if not path.isdir(d['path']):
mkdir_p(d['path'])
srcdir = d['sep'] and path.join(d['path'], 'source') or d['path']
mkdir_p(srcdir)
if d['sep']:
builddir = path.join(d['path'], 'build')
d['exclude_patterns'] = ''
else:
builddir = path.join(srcdir, d['dot'] + 'build')
exclude_patterns = map(repr, [
d['dot'] + 'build',
'Thumbs.db', '.DS_Store',
])
d['exclude_patterns'] = ', '.join(exclude_patterns)
mkdir_p(builddir)
mkdir_p(path.join(srcdir, d['dot'] + 'templates'))
mkdir_p(path.join(srcdir, d['dot'] + 'static'))
def write_file(fpath, content, newline=None):
# type: (unicode, unicode, unicode) -> None
if overwrite or not path.isfile(fpath):
print('Creating file %s.' % fpath)
with open(fpath, 'wt', encoding='utf-8', newline=newline) as f:
f.write(content)
else:
print('File %s already exists, skipping.' % fpath)
conf_path = os.path.join(templatedir, 'conf.py_t') if templatedir else None
if not conf_path or not path.isfile(conf_path):
conf_path = os.path.join(package_dir, 'templates', 'quickstart', 'conf.py_t')
with open(conf_path) as f:
conf_text = convert_python_source(f.read())
write_file(path.join(srcdir, 'conf.py'), template.render_string(conf_text, d))
masterfile = path.join(srcdir, d['master'] + d['suffix'])
write_file(masterfile, template.render('quickstart/master_doc.rst_t', d))
if d.get('make_mode') is True:
makefile_template = 'quickstart/Makefile.new_t'
batchfile_template = 'quickstart/make.bat.new_t'
else:
makefile_template = 'quickstart/Makefile_t'
batchfile_template = 'quickstart/make.bat_t'
if d['makefile'] is True:
d['rsrcdir'] = d['sep'] and 'source' or '.'
d['rbuilddir'] = d['sep'] and 'build' or d['dot'] + 'build'
# use binary mode, to avoid writing \r\n on Windows
write_file(path.join(d['path'], 'Makefile'),
template.render(makefile_template, d), u'\n')
if d['batchfile'] is True:
d['rsrcdir'] = d['sep'] and 'source' or '.'
d['rbuilddir'] = d['sep'] and 'build' or d['dot'] + 'build'
write_file(path.join(d['path'], 'make.bat'),
template.render(batchfile_template, d), u'\r\n')
if silent:
return
print()
print(bold('Finished: An initial directory structure has been created.'))
print('''
You should now populate your master file %s and create other documentation
source files. ''' % masterfile + ((d['makefile'] or d['batchfile']) and '''\
Use the Makefile to build the docs, like so:
make builder
''' or '''\
Use the sphinx-build command to build the docs, like so:
sphinx-build -b builder %s %s
''' % (srcdir, builddir)) + '''\
where "builder" is one of the supported builders, e.g. html, latex or linkcheck.
''')
def usage(argv, msg=None):
# type: (List[unicode], unicode) -> None
if msg:
print(msg, file=sys.stderr)
print(file=sys.stderr)
USAGE = """\
Sphinx v%s
Usage: %%prog [options] [projectdir]
""" % __display_version__
EPILOG = """\
For more information, visit <http://sphinx-doc.org/>.
"""
def valid_dir(d):
# type: (Dict) -> bool
dir = d['path']
if not path.exists(dir):
return True
if not path.isdir(dir):
return False
if set(['Makefile', 'make.bat']) & set(os.listdir(dir)):
return False
if d['sep']:
dir = os.path.join('source', dir)
if not path.exists(dir):
return True
if not path.isdir(dir):
return False
reserved_names = [
'conf.py',
d['dot'] + 'static',
d['dot'] + 'templates',
d['master'] + d['suffix'],
]
if set(reserved_names) & set(os.listdir(dir)):
return False
return True
class MyFormatter(optparse.IndentedHelpFormatter):
def format_usage(self, usage): # type: ignore
# type: (str) -> str
return usage
def format_help(self, formatter):
result = []
if self.description:
result.append(self.format_description(formatter))
if self.option_list:
result.append(self.format_option_help(formatter))
return "\n".join(result)
def main(argv=sys.argv[1:]):
# type: (List[str]) -> int
if not color_terminal():
nocolor()
parser = optparse.OptionParser(USAGE, epilog=EPILOG,
version='Sphinx v%s' % __display_version__,
formatter=MyFormatter())
parser.add_option('-q', '--quiet', action='store_true', dest='quiet',
default=False,
help='quiet mode')
group = parser.add_option_group('Structure options')
group.add_option('--sep', action='store_true', dest='sep',
help='if specified, separate source and build dirs')
group.add_option('--dot', metavar='DOT', dest='dot',
help='replacement for dot in _templates etc.')
group = parser.add_option_group('Project basic options')
group.add_option('-p', '--project', metavar='PROJECT', dest='project',
help='project name')
group.add_option('-a', '--author', metavar='AUTHOR', dest='author',
help='author names')
group.add_option('-v', metavar='VERSION', dest='version',
help='version of project')
group.add_option('-r', '--release', metavar='RELEASE', dest='release',
help='release of project')
group.add_option('-l', '--language', metavar='LANGUAGE', dest='language',
help='document language')
group.add_option('--suffix', metavar='SUFFIX', dest='suffix',
help='source file suffix')
group.add_option('--master', metavar='MASTER', dest='master',
help='master document name')
group.add_option('--epub', action='store_true', dest='epub',
default=False,
help='use epub')
group = parser.add_option_group('Extension options')
for ext in EXTENSIONS:
group.add_option('--ext-' + ext, action='store_true',
dest='ext_' + ext, default=False,
help='enable %s extension' % ext)
group.add_option('--extensions', metavar='EXTENSIONS', dest='extensions',
action='append', help='enable extensions')
group = parser.add_option_group('Makefile and Batchfile creation')
group.add_option('--makefile', action='store_true', dest='makefile',
default=False,
help='create makefile')
group.add_option('--no-makefile', action='store_true', dest='no_makefile',
default=False,
help='not create makefile')
group.add_option('--batchfile', action='store_true', dest='batchfile',
default=False,
help='create batchfile')
group.add_option('--no-batchfile', action='store_true', dest='no_batchfile',
default=False,
help='not create batchfile')
group.add_option('-M', '--no-use-make-mode', action='store_false', dest='make_mode',
help='not use make-mode for Makefile/make.bat')
group.add_option('-m', '--use-make-mode', action='store_true', dest='make_mode',
default=True,
help='use make-mode for Makefile/make.bat')
group = parser.add_option_group('Project templating')
group.add_option('-t', '--templatedir', metavar='TEMPLATEDIR', dest='templatedir',
help='template directory for template files')
group.add_option('-d', metavar='NAME=VALUE', action='append', dest='variables',
help='define a template variable')
# parse options
try:
opts, args = parser.parse_args(argv)
except SystemExit as err:
return err.code
if len(args) > 0:
opts.ensure_value('path', args[0])
d = vars(opts)
# delete None or False value
d = dict((k, v) for k, v in d.items() if not (v is None or v is False))
try:
if 'quiet' in d:
if not set(['project', 'author']).issubset(d):
print('''"quiet" is specified, but any of "project" or \
"author" is not specified.''')
return 1
if set(['quiet', 'project', 'author']).issubset(d):
# quiet mode with all required params satisfied, use default
d.setdefault('version', '')
d.setdefault('release', d['version'])
d2 = DEFAULT_VALUE.copy()
d2.update(dict(("ext_" + ext, False) for ext in EXTENSIONS))
d2.update(d)
d = d2
if 'no_makefile' in d:
d['makefile'] = False
if 'no_batchfile' in d:
d['batchfile'] = False
if not valid_dir(d):
print()
print(bold('Error: specified path is not a directory, or sphinx'
' files already exist.'))
print('sphinx-quickstart only generate into a empty directory.'
' Please specify a new root path.')
return 1
else:
ask_user(d)
except (KeyboardInterrupt, EOFError):
print()
print('[Interrupted.]')
return 130 # 128 + SIGINT
# decode values in d if value is a Python string literal
for key, value in d.items():
if isinstance(value, binary_type):
d[key] = term_decode(value)
# parse extensions list
d.setdefault('extensions', [])
for ext in d['extensions'][:]:
if ',' in ext:
d['extensions'].remove(ext)
for modname in ext.split(','):
d['extensions'].append(modname)
for variable in d.get('variables', []):
try:
name, value = variable.split('=')
d[name] = value
except ValueError:
print('Invalid template variable: %s' % variable)
generate(d, templatedir=opts.templatedir)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
the-stack_106_29719 | #!/usr/bin/env python3
# Party inspired by https://github.com/CUN-bjy/gym-ddpg-keras
import rospy
import gym
import rospkg
import time
import numpy as np
from ddpg import ActorNet, CriticNet
from ddpg_utils import MemoryBuffer, OrnsteinUhlenbeckProcess
from utils import tcolors
import rosnode
import task_arm_office # import the training environment
class ddpgAgent():
"""
Deep Deterministic Policy Gradient(DDPG) Agent
"""
def __init__(self, env_, is_discrete=False, batch_size=100, w_per=True, buffer_size=20000, outdir='./ddpg_data'):
# gym environments
self.env = env_
self.discrete = is_discrete
self.obs_dim = env_.observation_space.shape[0]
self.act_dim = env_.action_space.n if is_discrete else env_.action_space.shape[0]
self.outdir = outdir
self.action_bound = (env_.action_space.high - env_.action_space.low) / 2 if not is_discrete else 1.
self.action_shift = (env_.action_space.high + env_.action_space.low) / 2 if not is_discrete else 0.
# initialize actor & critic and their targets
self.discount_factor = 0.99
self.actor = ActorNet(self.obs_dim, self.act_dim, self.action_bound, lr_=1e-4, tau_=1e-3)
self.critic = CriticNet(self.obs_dim, self.act_dim, lr_=1e-3, tau_=1e-3, discount_factor=self.discount_factor)
# Experience Buffer
self.buffer = MemoryBuffer(buffer_size, with_per=w_per)
self.with_per = w_per
self.batch_size = batch_size
# OU-Noise-Process
self.noise = OrnsteinUhlenbeckProcess(size=self.act_dim)
###################################################
# Network Related
###################################################
def make_action(self, obs, t, noise=True):
""" predict next action from Actor's Policy
"""
action_ = self.actor.predict(obs)[0]
a = np.clip(action_ + self.noise.generate(t) if noise else 0, -self.action_bound, self.action_bound)
return a
def update_networks(self, obs, acts, critic_target):
""" Train actor & critic from sampled experience
"""
self.critic.train(obs, acts, critic_target) # Update the critic
# get next action and Q-value Gradient
n_actions = self.actor.network.predict(obs)
q_grads = self.critic.Qgradient(obs, n_actions)
# update actor
self.actor.train(obs,self.critic.network, q_grads)
# update target networks
self.actor.target_update()
self.critic.target_update()
def replay(self, replay_num_):
if self.with_per and (self.buffer.size() <= self.batch_size): return
for _ in range(replay_num_):
# sample from buffer
states, actions, rewards, dones, new_states, idx = self.sample_batch(self.batch_size)
new_states = new_states.tolist() # Fix to "ValueError: Failed to convert a NumPy array to a Tensor (Unsupported object type list)"
# get target q-value using target network
q_vals = self.critic.target_predict([new_states, self.actor.target_predict(new_states)])
# bellman iteration for target critic value
critic_target = np.asarray(q_vals)
for i in range(q_vals.shape[0]):
if dones[i]:
critic_target[i] = rewards[i]
else:
critic_target[i] = self.discount_factor * q_vals[i] + rewards[i]
if self.with_per:
self.buffer.update(idx[i], abs(q_vals[i] - critic_target[i]))
# train(or update) the actor & critic and target networks
self.update_networks(states, actions, critic_target)
####################################################
# Buffer Related
####################################################
def memorize(self,obs,act,reward,done,new_obs):
"""store experience in the buffer
"""
if self.with_per:
q_val = self.critic.network([np.expand_dims(obs, axis=0), self.actor.predict(obs)])
next_action = self.actor.target_network.predict(np.expand_dims(new_obs, axis=0))
q_val_t = self.critic.target_predict([np.expand_dims(new_obs, axis=0), next_action])
new_val = reward + self.discount_factor * q_val_t
td_error = abs(new_val - q_val)[0]
else:
td_error = 0
self.buffer.memorize(obs,act,reward, done,new_obs, td_error)
def sample_batch(self, batch_size):
""" Sampling from the batch
"""
return self.buffer.sample_batch(batch_size)
###################################################
# Save & Load Networks
###################################################
def save_weights(self, episode):
""" Agent's Weights Saver
"""
self.actor.save_network(self.outdir, episode)
self.critic.save_network(self.outdir, episode)
rospy.loginfo('''
====================================
Saved model to disk
====================================
''')
def load_weights(self, episode):
""" Agent's Weights Loader
"""
self.actor.load_network(self.outdir, episode)
self.critic.load_network(self.outdir, episode)
rospy.loginfo('''
====================================
Loaded model from disk
====================================
''')
if __name__ == '__main__':
rospy.init_node('turtlex_arm_algorithm', anonymous=True, log_level=rospy.DEBUG)
# logging hierarchy: CRITICAL > ERROR > WARNING > INFO > DEBUG; the chosen log level outputs that levels and all the ones above it
while('/spawn_turtlex_model' in rosnode.get_node_names()):
pass
batch_size = rospy.get_param('/turtlex_arm/batch_size')
buffer_size = rospy.get_param('/turtlex_arm/replay_buffer_size')
monitor = rospy.get_param('/turtlex_arm/monitor')
max_ep_steps = rospy.get_param('/turtlex_arm/max_episode_steps')
load_model = rospy.get_param("/turtlex_arm/load_model")
n_actions = rospy.get_param('/turtlex_arm/n_actions')
n_observations = rospy.get_param('/turtlex_arm/n_observations')
is_training = rospy.get_param('/turtlex_arm/training')
if is_training:
episode_num = rospy.get_param('/turtlex_arm/episodes_training') # Get the number of episodes for training
else:
test_loops = rospy.get_param('/turtlex_arm/test_loops')
episode_num = test_loops * len(rospy.get_param('/turtlex_arm/ee_goals/x')) # Get the number of episodes for testing
# Get the world name
world_name = rospy.get_param('/turtlex_arm/world_name')
rospackage_name = "turtlex_gym"
environment_name = 'TaskArmOffice-v0'
rospack = rospkg.RosPack()
pkg_path = rospack.get_path('results')
gym_outdir = pkg_path + '/gym/' + world_name + '_arm_ddpg'
nets_outdir = pkg_path + '/nets_train/' + world_name + '_arm_ddpg'
env = gym.make(environment_name)
if monitor: env = gym.wrappers.Monitor(env, gym_outdir, force=True)
try:
# Ensure that the action bound is symmetric
assert (np.all(env.action_space.high + env.action_space.low) == 0)
is_discrete = False
rospy.loginfo('Continuous Action Space')
except AttributeError:
is_discrete = True
rospy.logerr('Discrete Action Space')
# Create Agent model
agent = ddpgAgent(env, batch_size=batch_size, w_per=False, is_discrete=is_discrete, buffer_size=buffer_size, outdir=nets_outdir)
if load_model != False:
agent.load_weights(load_model)
rospy.logdebug('State Dimension: ' + str(n_actions))
rospy.logdebug('Action Dimension: ' + str(n_observations))
rospy.logdebug(f"env.action_space.high: {env.action_space.high}")
rospy.logdebug(f"env.action_space.low: {env.action_space.low}")
rospy.logdebug(f"env.observation_space.high: {env.observation_space.high}")
rospy.logdebug(f"env.observation_space.low: {env.observation_space.low}")
highest_reward = 0
start_time = time.time()
for ep in range(1, episode_num + 1):
rospy.loginfo(tcolors.CYAN + "######################## Beginning episode => " + str(ep) + tcolors.ENDC)
if monitor: env.stats_recorder.done = None
state = env.reset()
cumulated_ep_reward = 0
for step in range(1, max_ep_steps + 1):
rospy.loginfo(tcolors.CYAN + "############### Starting Step => " + str(step) + tcolors.ENDC)
#env.render() # openai_ros does not support render for the moment
action = agent.make_action(state, step)
next_state, reward, done, _ = env.step(action)
if is_training:
agent.memorize(state, action, reward, done, next_state) # store the results into buffer
state = next_state
cumulated_ep_reward += reward
if highest_reward < cumulated_ep_reward:
highest_reward = cumulated_ep_reward
if is_training:
agent.replay(1)
rospy.loginfo(tcolors.CYAN + "# State used for the action => [" + str(', '.join(map(str, state))) + "]" + tcolors.ENDC)
#rospy.loginfo(tcolors.CYAN + "# Action performed => [" + str(', '.join(map(str, action))) + "]" + tcolors.ENDC)
rospy.loginfo(tcolors.CYAN + "# Action performed => " + str(action) + tcolors.ENDC)
rospy.loginfo(tcolors.CYAN + "# Reward that action generated => " + str(reward) + tcolors.ENDC)
rospy.loginfo(tcolors.CYAN + "# Cumulated episode reward => " + str(cumulated_ep_reward) + tcolors.ENDC)
rospy.loginfo(tcolors.CYAN + "# Starting state of the next step => [" + str(', '.join(map(str, next_state))) + "]" + tcolors.ENDC)
if done:
rospy.loginfo(tcolors.CYAN + f"Episode {ep} done" + tcolors.ENDC)
rospy.loginfo(tcolors.CYAN + "############### END Step => " + str(step) + tcolors.ENDC)
break
else:
rospy.loginfo(tcolors.CYAN + f"Episode {ep} NOT done" + tcolors.ENDC)
rospy.loginfo(tcolors.CYAN + "############### END Step => " + str(step) + tcolors.ENDC)
m, s = divmod(int(time.time() - start_time), 60)
h, m = divmod(m, 60)
rospy.loginfo(tcolors.MAGENTA + "Episode: " + str(ep) + " | cumulated_ep_reward: " + str(cumulated_ep_reward) + " | highest_reward: " +
str(highest_reward) + " | Final step: " + str(step) + " | Time: %d:%02d:%02d" % (h, m, s) + "\n\n" + tcolors.ENDC)
if is_training and ep % 20 == 0 and ep != episode_num:
agent.save_weights(ep)
if not is_training:
rospy.loginfo(f"\nTest results: {env.solved_counter} / {episode_num}\n")
else:
agent.save_weights(episode_num)
env.close() # Known issue: https://stackoverflow.com/questions/64679139
|
the-stack_106_29720 | from setuptools import setup, find_packages
from os import path
from io import open
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='spotifyscraper',
version='1.0.5',
description='Spotify Web Player Scraper using python, scrape and download song and cover from Spotify.',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/AliAkhtari78/SpotifyScraper',
author='Ali Akhtari',
packages=["SpotifyScraper"],
author_email='[email protected]',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
keywords='spotify spotifydownloader downloader mp3downloader webscraper spotifywebscraper spotifyscraper music cover setuptools development',
python_requires='>=3.6.*, <4',
install_requires=['appdirs',
'beautifulsoup4',
'bs4',
'certifi',
'chardet',
'cssselect',
'deprecation',
'eyeD3',
'fake-useragent',
'filetype',
'idna',
'lxml',
'packaging',
'parse',
'pyee',
'pyparsing',
'pyppeteer',
'pyquery',
'PyYAML',
'requests',
'six',
'soupsieve',
'tqdm',
'urllib3',
'w3lib',
'websockets',
],
project_urls={
'Bug Reports': 'https://github.com/AliAkhtari78/SpotifyScraper/issues',
'Source': 'https://github.com/AliAkhtari78/SpotifyScraper',
'PyPi': 'https://pypi.org/project/spotifyscraper',
'Documentation': 'https://spotifyscraper.readthedocs.io/en/latest',
'Full Tutorial Blog': 'https://aliakhtari.com/Blog/SpotifyScraper',
'Author WebSite': 'https://aliakhtari.com',
},
)
|
the-stack_106_29722 | import cv2
import numpy as np
import scipy.ndimage
from sklearn.externals import joblib
from tools import *
from ml import *
import argparse
# Arguments
parser = argparse.ArgumentParser()
parser.add_argument('--mode', '-mode', help="Mode : train or predict", type=str)
parser.add_argument('--a', '-algorithm', help="algorithm/model name", type=str)
parser.add_argument('--i', '-image', help="licence plate to read", type=str)
parser.add_argument('--model', '-model', help="Model file path", type=str)
parser.add_argument('--d', '-dataset', help="dataset folder path", type=str)
args = parser.parse_args()
if args.mode == "train":
# Load Data
data, labels = load_dataset(args.d)
# Train ML models
mlp(data, labels, "mlp.pkl")
knn(data, labels, "knn.pkl")
elif args.mode == "predict":
# Load image
img = cv2.imread(args.i)
# Apply image segmentation and extract digits
digits = histogram_of_pixel_projection(img)
# Load ML model
clf = joblib.load(args.model)
# List of predicted classes
prediction = []
for i in range(len(digits)):
# Get digit
digit = digits[i]
# Make the image squared
squared_digit = square(digit)
# Resize the image
resized_digit = cv2.resize(squared_digit, (20, 20), interpolation=cv2.INTER_AREA)
# Convert to one dim vector
one_vector_digit = np.array(resized_digit).ravel()
# Predict digit class
resultat = clf.predict([one_vector_digit])
# Append to total predictions
prediction.append(resultat[0])
print(prediction)
else:
print(" Error mode argument !")
|
the-stack_106_29723 | import re
import pytest
import pandas as pd
@pytest.mark.filterwarnings(
# openpyxl
"ignore:defusedxml.lxml is no longer supported:DeprecationWarning"
)
@pytest.mark.filterwarnings(
# html5lib
"ignore:Using or importing the ABCs from:DeprecationWarning"
)
@pytest.mark.filterwarnings(
# fastparquet
"ignore:pandas.core.index is deprecated:FutureWarning"
)
@pytest.mark.filterwarnings(
# pandas_datareader
"ignore:pandas.util.testing is deprecated:FutureWarning"
)
@pytest.mark.filterwarnings(
# https://github.com/pandas-dev/pandas/issues/35252
"ignore:Distutils:UserWarning"
)
def test_show_versions(capsys):
# gh-32041
pd.show_versions()
captured = capsys.readouterr()
result = captured.out
# check header
assert "INSTALLED VERSIONS" in result
# check full commit hash
assert re.search(r"commit\s*:\s[0-9a-f]{40}\n", result)
# check required dependency
assert re.search(r"numpy\s*:\s([0-9\.\+a-f]|dev)+\n", result)
# check optional dependency
assert re.search(r"pyarrow\s*:\s([0-9\.]+|None)\n", result)
|
the-stack_106_29724 | from app import create_app, db
from flask_script import Manager,Server
from app.models import User,Role,Comment,Blog
from flask_migrate import Migrate,MigrateCommand
#Create app instance
app = create_app('production')
app.secret_key = '12'
manager = Manager(app)
manager.add_command('server',Server)
migrate = Migrate(app,db)
manager.add_command('db',MigrateCommand)
@manager.shell
def make_shell_context():
return dict(app=app, db=db, User = User,Role = Role, Comment = Comment, Blog = Blog)
@manager.command
def test():
import unittest
tests=unittest.TestLoader().discover("tests")
unittest.TextTestRunner(verbosity=2).run(tests)
if __name__ == '__main__':
manager.run() |
the-stack_106_29726 | # -*- coding: utf-8 -*-
from random import randint
import sys
reload(sys)
sys.setdefaultencoding('utf8')
#Numero Antes
def numeroAntes(minn, maxn):
numero = randint(minn,maxn)
pregunta = '¿Qué número está antes del ' + str(numero) + '?'
respuesta = numero - 1
return [pregunta, respuesta, None]
#Numero Despues
def numeroDespues(minn, maxn):
numero = randint(minn,maxn)
pregunta = '¿Qué número esta después del número ' + str(numero) + '?'
respuesta = numero + 1
return [pregunta, respuesta, None]
#Numero Entre
def numeroEntre(minn, maxn):
numero1 = randint(minn,maxn)
numero2 = numero1 + 2
pregunta = '¿Qué número está entre el número ' + str(numero1) + ' y el ' + str(numero2) + '?'
respuesta = numero1 + 1
return [pregunta, respuesta, None]
#Preguntas valor posicional
def valorPosicional(persona, objeto, minn, maxn):
numero = randint(minn,maxn)
tamanoNumero = len(str(numero))
unidad = randint(1, tamanoNumero)
if unidad < 2: valorPosicional, respuesta = 'Unidades' , int(str(numero)[tamanoNumero - 1])
elif unidad < 3: valorPosicional, respuesta = 'Decenas', int(str(numero)[tamanoNumero - 2])# + '0')
elif unidad < 4: valorPosicional, respuesta = 'Centenas', int(str(numero)[tamanoNumero - 3])# + '00')
else : valorPosicional, respuesta = 'Millares', int(str(numero)[tamanoNumero - 4])# + '000')
if valorPosicional == 'Millares': cuanto = 'cuantos'
else: cuanto = 'cuantas'
if numero != 1:
objeto = objeto + 's'
listaModelosPreguntas = [
'Si ' + persona + ' tiene ' + str(numero) + ' ' + objeto + ', ' + cuanto + ' ' + valorPosicional + ' de ' + objeto + ' tiene?',
'Si compro ' + str(numero) + ' ' + objeto + ', ' + cuanto + ' ' + valorPosicional + ' de ' + objeto + ' compre?',
'Si regalo ' + str(numero) + ' ' + objeto + ', ' + cuanto + ' ' + valorPosicional + ' de ' + objeto + ' regale?'
]
indiceModeloPregunta = randint(0,len(listaModelosPreguntas) - 1)
pregunta = listaModelosPreguntas[indiceModeloPregunta]
imagen = "/static/svgs/" + objeto + ".svg"
return [pregunta, respuesta, imagen]
# Preguntas Restas
def resta(persona, comida, minn, maxn):
# Si <persona> tiene <numero> <comida> y se come <numero>, cuanta(s) <comida> le queda(n)?
numero1 = randint(minn,maxn)
numero2 = randint(1,numero1)
respuesta = numero1-numero2
imagen = "/static/svgs/" + comida + ".svg"
if comida[len(comida)-1] == 'a': cuanto = 'cuantas'
else: cuanto = 'cuantos'
if numero1 != 1: plural = 's'
else: plural = ''
listaModelosPreguntas = [
'Si '+persona+' tiene '+str(numero1)+' '+comida+plural + ' y se come '+str(numero2)+', '+cuanto+' '+comida+'s'+' le quedan?',
'Si '+persona+' compra '+str(numero1)+' '+comida+plural + ' y luego vende '+str(numero2)+', '+cuanto+' '+comida+'s'+' le quedan?',
'Tengo '+str(numero1)+' '+comida+plural + ' y le regalo '+str(numero2)+' a '+persona+ ', ' +cuanto+' '+comida+'s'+' me quedan?'
]
indiceModeloPregunta = randint(0,len(listaModelosPreguntas) - 1)
pregunta = listaModelosPreguntas[indiceModeloPregunta]
return [pregunta,respuesta,imagen]
#Preguntas Sumas
def suma(persona1, persona2, accion, accionPasado, minn, maxn):
# <persona> <accion> <numero> veces, <persona2> <accion> <numero> veces mas que <persona>, cuantas veces <accionpasado> <persona2>?
imagen = "/static/svgs/" + accion + ".svg"
numero1 = randint(minn,maxn)
if (numero1 + maxn) > 1000:
maxn = 1000 - numero1
numero2 = randint(1,maxn)
respuesta = numero1 + numero2
pregunta = 'Si ' + persona1 + ' ' + accion + ' ' + str(numero1) + ' metros y ' + persona2 + ' ' + str(numero2) + ' cuantos metros ' + accionPasado + ' entre los dos?'
return [pregunta,respuesta,imagen]
#Preguntas Multiplicaciones
def multiplicacion(persona, objeto, minn, maxn):
imagen = "/static/svgs/" + objeto + ".svg"
numero1 = randint(1,maxn)
if (numero1 * maxn) > 1000:
maxn = int(1000 / numero1)
numero2 = randint(1, maxn)
if numero1 > 1: plural1 = 's'
else: plural1 = ''
if numero2 > 1: plural2 = 's'
else: plural2 = ''
respuesta = numero1 * numero2
listaModelosPreguntas = [
persona + ' tiene ' + str(numero1) + ' bolsos cada uno con ' + str(numero2) + ' ' + objeto + plural2 + ' dentro, cuantas ' + objeto + plural2 + ' compró ' + persona + '?',
'Si en una camioneta llevo ' + str(numero1) + ' cajas con ' + str(numero2) + ' ' + objeto + plural2 + ' cada una, ' + ' qué número de ' + objeto + plural2 + ' llevo?',
persona + ' recoge ' + str(numero1) + ' ' + objeto + plural1 + ' cada dia, cúal es el número de ' + objeto + plural1 + ' que tendrá ' + persona + ' después de ' + str(numero2) + ' dias?'
]
indiceModeloPregunta = randint(0,len(listaModelosPreguntas) - 1)
pregunta = listaModelosPreguntas[indiceModeloPregunta]
return [pregunta,respuesta,imagen]
#Preguntas posicion numerica
# def posicionNumericaA(persona, maxn):
# numero1 = randint(1,maxn)
# if randint(1, 2) == 1: antesDespues, respuesta = 'antes', numero1 - 1
# else: antesDespues, respuesta = 'despues ', numero1 + 1
# pregunta = persona + ' tiene el turno ' + str(numero1) + ' para jugar con el balon, que turno tiene la persona ' + antesDespues + ' que ' + persona + '?'
# return [pregunta, respuesta]
# def posicionNumericaB(persona1, persona2, persona3):
# numero = 0
# if randint(1, 2) == 1:
# numero = randint(1,10)
# antesDespues, respuesta = numero + 2, numero + 1
# else:
# numero = randint(3,12)
# antesDespues, respuesta = numero - 2, numero - 1
# pregunta = 'Un tren que sale a cada hora, el cual ' + persona1 + ' lo toma a las ' + str(numero) + ' y ' + persona2 + ' toma el tren a las ' + str(antesDespues) + ' a que hora tomo el tren ' + persona3 + ' si salio entre ' + persona1 + ' y ' + persona2 +'?'
# return [pregunta, respuesta]
#Preguntas conjuntos
def conjuntosIguales(animal1, animal2):
totalConjuntoA = randint(1, 5)
totalConjuntoB = randint(1, 5)
pregunta = 'Hay dos manadas de animales, escúchalos y escribe si el número de animales en cada manada es diferente o es igual.'
if totalConjuntoA != totalConjuntoB:
respuesta = 'diferente'
else:
respuesta = 'igual'
imagen = "/static/svgs/" + animal1 + ".svg"
return [pregunta, respuesta, imagen , animal1, animal2, totalConjuntoA, totalConjuntoB]
def conjuntoMayor(animal1, animal2):
totalConjuntoA = randint(1, 5)
totalConjuntoB = randint(1, 5)
while totalConjuntoA == totalConjuntoB:
totalConjuntoB = randint(1, 5)
pregunta = 'Hay dos manadas de animales, escúchalos y escribe cual es el nombre del animal con mayor número'
if totalConjuntoA > totalConjuntoB:
respuesta = str(animal1)
else:
respuesta = str(animal2)
imagen = "/static/svgs/" + animal1 + ".svg"
return [pregunta, respuesta, imagen , animal1, animal2, totalConjuntoA, totalConjuntoB]
def conjuntoMenor(animal1, animal2):
totalConjuntoA = randint(1, 5)
totalConjuntoB = randint(1, 5)
while totalConjuntoA == totalConjuntoB:
totalConjuntoB = randint(1, 5)
pregunta = 'Hay dos manadas de animales, escúchalos y escribe cual es el nombre del animal con menor número'
if totalConjuntoA < totalConjuntoB:
respuesta = str(animal1)
else:
respuesta = str(animal2)
imagen = "/static/svgs/" + animal1 + ".svg"
return [pregunta, respuesta, imagen , animal1, animal2, totalConjuntoA, totalConjuntoB]
def contarSonidos(animal1, animal2):
totalConjuntoA = randint(1, 5)
totalConjuntoB = randint(1, 5)
pregunta = 'Dos animales cantan sin parar, cuenta cuantas veces cantan en total?'
respuesta = totalConjuntoA + totalConjuntoB
imagen = "/static/svgs/" + animal1 + ".svg"
return [pregunta, respuesta, imagen , animal1, animal2, totalConjuntoA, totalConjuntoB] |
the-stack_106_29728 | import taichi as ti
import sys
import math
import numpy as np
import os
import taichi as tc
import matplotlib.pyplot as plt
real = ti.f32
ti.set_default_fp(real)
max_steps = 2048
vis_interval = 64
output_vis_interval = 2
steps = 1024
assert steps * 2 <= max_steps
vis_resolution = 1024
scalar = lambda: ti.var(dt=real)
vec = lambda: ti.Vector(2, dt=real)
loss = scalar()
# ti.cfg.arch = ti.cuda
init_x = vec()
init_v = vec()
x = vec()
v = vec()
impulse = vec()
billiard_layers = 4
n_balls = 1 + (1 + billiard_layers) * billiard_layers // 2
target_ball = n_balls - 1
# target_ball = 0
goal = [0.9, 0.75]
radius = 0.03
elasticity = 0.8
@ti.layout
def place():
ti.root.dense(ti.l, max_steps).dense(ti.i, n_balls).place(x, v, impulse)
ti.root.place(init_x, init_v)
ti.root.place(loss)
ti.root.lazy_grad()
dt = 0.003
alpha = 0.00000
learning_rate = 0.01
@ti.kernel
def collide(t: ti.i32):
for i in range(n_balls):
for j in range(i):
imp = ti.Vector([0.0, 0.0])
if i != j:
dist = x[t, i] - x[t, j]
dist_norm = dist.norm()
if dist_norm < 2 * radius:
dir = ti.Vector.normalized(dist)
rela_v = v[t, i] - v[t, j]
projected_v = dir.dot(rela_v)
if projected_v < 0:
imp = -(1 + elasticity) * 0.5 * projected_v * dir
ti.atomic_add(impulse[t + 1, i], imp)
for j_ in range(n_balls - i - 1):
j = j_ + i + 1
imp = ti.Vector([0.0, 0.0])
if i != j:
dist = x[t, i] - x[t, j]
dist_norm = dist.norm()
if dist_norm < 2 * radius:
dir = ti.Vector.normalized(dist)
rela_v = v[t, i] - v[t, j]
projected_v = dir.dot(rela_v)
if projected_v < 0:
imp = -(1 + elasticity) * 0.5 * projected_v * dir
ti.atomic_add(impulse[t + 1, i], imp)
@ti.kernel
def advance(t: ti.i32):
for i in range(n_balls):
v[t, i] = v[t - 1, i] + impulse[t, i]
x[t, i] = x[t - 1, i] + dt * v[t, i]
@ti.kernel
def compute_loss(t: ti.i32):
loss[None] = ti.sqr(x[t, target_ball][0] - goal[0]) + ti.sqr(
x[t, target_ball][1] - goal[1])
@ti.kernel
def initialize():
x[0, 0] = init_x
v[0, 0] = init_v
gui = tc.core.GUI("Billiards", tc.veci(1024, 1024))
def forward(visualize=False, output=None):
initialize()
interval = vis_interval
if output:
interval = output_vis_interval
os.makedirs('billiards/{}/'.format(output), exist_ok=True)
count = 0
for i in range(billiard_layers):
for j in range(i + 1):
count += 1
x[0, count] = [i * 2 * radius + 0.5,
j * 2 * radius + 0.5 - i * radius * 0.7]
pixel_radius = int(radius * 1024) + 1
canvas = gui.get_canvas()
for t in range(1, steps):
collide(t - 1)
advance(t)
if (t + 1) % interval == 0 and visualize:
canvas.clear(0x3C733F)
canvas.circle(tc.vec(goal[0], goal[1])).radius(pixel_radius // 2).color(0x00000).finish()
for i in range(n_balls):
if i == 0:
color = 0xCCCCCC
elif i == n_balls - 1:
color = 0x3344cc
else:
color = 0xF20530
canvas.circle(tc.vec(x[t, i][0], x[t, i][1])).radius(
pixel_radius).color(color).finish()
gui.update()
if output:
gui.screenshot('billiards/{}/{:04d}.png'.format(output, t))
compute_loss(steps - 1)
@ti.kernel
def clear():
for t in range(0, max_steps):
for i in range(0, n_balls):
impulse[t, i] = ti.Vector([0.0, 0.0])
def optimize():
init_x[None] = [0.1, 0.5]
init_v[None] = [0.3, 0.0]
clear()
forward(visualize=True, output='initial')
for iter in range(200):
clear()
with ti.Tape(loss):
if iter % 20 == 0:
output = 'iter{:04d}'.format(iter)
else:
output = None
forward(visualize=True, output=output)
print('Iter=', iter, 'Loss=', loss[None])
for d in range(2):
init_x[None][d] -= learning_rate * init_x.grad[None][d]
init_v[None][d] -= learning_rate * init_v.grad[None][d]
clear()
forward(visualize=True, output='final')
def scan(zoom):
N = 1000
angles = []
losses = []
forward(visualize=True, output='initial')
for i in range(N):
alpha = ((i + 0.5) / N - 0.5) * math.pi * zoom
init_x[None] = [0.1, 0.5]
init_v[None] = [0.3 * math.cos(alpha), 0.3 * math.sin(alpha)]
loss[None] = 0
clear()
forward(visualize=False)
print(loss[None])
losses.append(loss[None])
angles.append(math.degrees(alpha))
plt.plot(angles, losses)
fig = plt.gcf()
fig.set_size_inches(5, 3)
plt.title('Billiard Scene Objective')
plt.ylabel('Objective')
plt.xlabel('Angle of velocity')
plt.tight_layout()
plt.show()
if __name__ == '__main__':
if len(sys.argv) > 1:
scan(float(sys.argv[1]))
else:
optimize()
|
the-stack_106_29730 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe, os, copy, json, re
from frappe import _
from frappe.modules import get_doc_path
from frappe.core.doctype.access_log.access_log import make_access_log
from frappe.utils import cint, sanitize_html, strip_html
from six import string_types
no_cache = 1
base_template_path = "templates/www/printview.html"
standard_format = "templates/print_formats/standard.html"
def get_context(context):
"""Build context for print"""
if not ((frappe.form_dict.doctype and frappe.form_dict.name) or frappe.form_dict.doc):
return {
"body": sanitize_html("""<h1>Error</h1>
<p>Parameters doctype and name required</p>
<pre>%s</pre>""" % repr(frappe.form_dict))
}
if frappe.form_dict.doc:
doc = frappe.form_dict.doc
else:
doc = frappe.get_doc(frappe.form_dict.doctype, frappe.form_dict.name)
settings = frappe.parse_json(frappe.form_dict.settings)
letterhead = frappe.form_dict.letterhead or None
meta = frappe.get_meta(doc.doctype)
print_format = get_print_format_doc(None, meta = meta)
make_access_log(doctype=frappe.form_dict.doctype, document=frappe.form_dict.name, file_type='PDF', method='Print')
return {
"body": get_rendered_template(doc, print_format = print_format,
meta=meta, trigger_print = frappe.form_dict.trigger_print,
no_letterhead=frappe.form_dict.no_letterhead, letterhead=letterhead,
settings=settings),
"css": get_print_style(frappe.form_dict.style, print_format),
"comment": frappe.session.user,
"title": doc.get(meta.title_field) if meta.title_field else doc.name,
"has_rtl": True if frappe.local.lang in ["ar", "he", "fa", "ps"] else False
}
def get_print_format_doc(print_format_name, meta):
"""Returns print format document"""
if not print_format_name:
print_format_name = frappe.form_dict.format \
or meta.default_print_format or "Standard"
if print_format_name == "Standard":
return None
else:
try:
return frappe.get_doc("Print Format", print_format_name)
except frappe.DoesNotExistError:
# if old name, return standard!
return None
def get_rendered_template(doc, name=None, print_format=None, meta=None,
no_letterhead=None, letterhead=None, trigger_print=False,
settings=None):
print_settings = frappe.get_single("Print Settings").as_dict()
print_settings.update(settings or {})
if isinstance(no_letterhead, string_types):
no_letterhead = cint(no_letterhead)
elif no_letterhead is None:
no_letterhead = not cint(print_settings.with_letterhead)
doc.flags.in_print = True
doc.flags.print_settings = print_settings
if not frappe.flags.ignore_print_permissions:
validate_print_permission(doc)
if doc.meta.is_submittable:
if doc.docstatus==0 and not cint(print_settings.allow_print_for_draft):
frappe.throw(_("Not allowed to print draft documents"), frappe.PermissionError)
if doc.docstatus==2 and not cint(print_settings.allow_print_for_cancelled):
frappe.throw(_("Not allowed to print cancelled documents"), frappe.PermissionError)
doc.run_method("before_print", print_settings)
if not hasattr(doc, "print_heading"): doc.print_heading = None
if not hasattr(doc, "sub_heading"): doc.sub_heading = None
if not meta:
meta = frappe.get_meta(doc.doctype)
jenv = frappe.get_jenv()
format_data, format_data_map = [], {}
# determine template
if print_format:
doc.print_section_headings = print_format.show_section_headings
doc.print_line_breaks = print_format.line_breaks
doc.align_labels_right = print_format.align_labels_right
doc.absolute_value = print_format.absolute_value
def get_template_from_string():
return jenv.from_string(get_print_format(doc.doctype,
print_format))
if print_format.custom_format:
template = get_template_from_string()
elif print_format.format_data:
# set format data
format_data = json.loads(print_format.format_data)
for df in format_data:
format_data_map[df.get("fieldname")] = df
if "visible_columns" in df:
for _df in df.get("visible_columns"):
format_data_map[_df.get("fieldname")] = _df
doc.format_data_map = format_data_map
template = "standard"
elif print_format.standard=="Yes":
template = get_template_from_string()
else:
# fallback
template = "standard"
else:
template = "standard"
if template == "standard":
template = jenv.get_template(standard_format)
letter_head = frappe._dict(get_letter_head(doc, no_letterhead, letterhead) or {})
if letter_head.content:
letter_head.content = frappe.utils.jinja.render_template(letter_head.content, {"doc": doc.as_dict()})
if letter_head.footer:
letter_head.footer = frappe.utils.jinja.render_template(letter_head.footer, {"doc": doc.as_dict()})
convert_markdown(doc, meta)
args = {
"doc": doc,
"meta": frappe.get_meta(doc.doctype),
"layout": make_layout(doc, meta, format_data),
"no_letterhead": no_letterhead,
"trigger_print": cint(trigger_print),
"letter_head": letter_head.content,
"footer": letter_head.footer,
"print_settings": print_settings
}
html = template.render(args, filters={"len": len})
if cint(trigger_print):
html += trigger_print_script
return html
def convert_markdown(doc, meta):
'''Convert text field values to markdown if necessary'''
for field in meta.fields:
if field.fieldtype=='Text Editor':
value = doc.get(field.fieldname)
if value and '<!-- markdown -->' in value:
doc.set(field.fieldname, frappe.utils.md_to_html(value))
@frappe.whitelist()
def get_html_and_style(doc, name=None, print_format=None, meta=None,
no_letterhead=None, letterhead=None, trigger_print=False, style=None,
settings=None, templates=None):
"""Returns `html` and `style` of print format, used in PDF etc"""
if isinstance(doc, string_types) and isinstance(name, string_types):
doc = frappe.get_doc(doc, name)
if isinstance(doc, string_types):
doc = frappe.get_doc(json.loads(doc))
print_format = get_print_format_doc(print_format, meta=meta or frappe.get_meta(doc.doctype))
try:
html = get_rendered_template(doc, name=name, print_format=print_format, meta=meta,
no_letterhead=no_letterhead, letterhead=letterhead, trigger_print=trigger_print,
settings=frappe.parse_json(settings))
except frappe.TemplateNotFoundError:
frappe.clear_last_message()
html = None
return {
"html": html,
"style": get_print_style(style=style, print_format=print_format)
}
@frappe.whitelist()
def get_rendered_raw_commands(doc, name=None, print_format=None, meta=None, lang=None):
"""Returns Rendered Raw Commands of print format, used to send directly to printer"""
if isinstance(doc, string_types) and isinstance(name, string_types):
doc = frappe.get_doc(doc, name)
if isinstance(doc, string_types):
doc = frappe.get_doc(json.loads(doc))
print_format = get_print_format_doc(print_format, meta=meta or frappe.get_meta(doc.doctype))
if not print_format or (print_format and not print_format.raw_printing):
frappe.throw(_("{0} is not a raw printing format.").format(print_format),
frappe.TemplateNotFoundError)
return {
"raw_commands": get_rendered_template(doc, name=name, print_format=print_format, meta=meta)
}
def validate_print_permission(doc):
if frappe.form_dict.get("key"):
if frappe.form_dict.key == doc.get_signature():
return
for ptype in ("read", "print"):
if (not frappe.has_permission(doc.doctype, ptype, doc)
and not frappe.has_website_permission(doc)):
raise frappe.PermissionError(_("No {0} permission").format(ptype))
def get_letter_head(doc, no_letterhead, letterhead=None):
if no_letterhead:
return {}
if letterhead:
return frappe.db.get_value("Letter Head", letterhead, ["content", "footer"], as_dict=True)
if doc.get("letter_head"):
return frappe.db.get_value("Letter Head", doc.letter_head, ["content", "footer"], as_dict=True)
else:
return frappe.db.get_value("Letter Head", {"is_default": 1}, ["content", "footer"], as_dict=True) or {}
def get_print_format(doctype, print_format):
if print_format.disabled:
frappe.throw(_("Print Format {0} is disabled").format(print_format.name),
frappe.DoesNotExistError)
# server, find template
path = os.path.join(get_doc_path(frappe.db.get_value("DocType", doctype, "module"),
"Print Format", print_format.name), frappe.scrub(print_format.name) + ".html")
if os.path.exists(path):
with open(path, "r") as pffile:
return pffile.read()
else:
if print_format.raw_printing:
return print_format.raw_commands
if print_format.html:
return print_format.html
frappe.throw(_("No template found at path: {0}").format(path),
frappe.TemplateNotFoundError)
def make_layout(doc, meta, format_data=None):
"""Builds a hierarchical layout object from the fields list to be rendered
by `standard.html`
:param doc: Document to be rendered.
:param meta: Document meta object (doctype).
:param format_data: Fields sequence and properties defined by Print Format Builder."""
layout, page = [], []
layout.append(page)
if format_data:
# extract print_heading_template from the first field
# and remove the field
if format_data[0].get("fieldname") == "print_heading_template":
doc.print_heading_template = format_data[0].get("options")
format_data = format_data[1:]
def get_new_section(): return {'columns': [], 'has_data': False}
def append_empty_field_dict_to_page_column(page):
""" append empty columns dict to page layout """
if not page[-1]['columns']:
page[-1]['columns'].append({'fields': []})
for df in format_data or meta.fields:
if format_data:
# embellish df with original properties
df = frappe._dict(df)
if df.fieldname:
original = meta.get_field(df.fieldname)
if original:
newdf = original.as_dict()
newdf.hide_in_print_layout = original.get('hide_in_print_layout')
newdf.update(df)
df = newdf
df.print_hide = 0
if df.fieldtype=="Section Break" or page==[]:
if len(page) > 1:
if page[-1]['has_data']==False:
# truncate last section if empty
del page[-1]
section = get_new_section()
if df.fieldtype=='Section Break' and df.label:
section['label'] = df.label
page.append(section)
elif df.fieldtype=="Column Break":
# if last column break and last column is not empty
page[-1]['columns'].append({'fields': []})
else:
# add a column if not yet added
append_empty_field_dict_to_page_column(page)
if df.fieldtype=="HTML" and df.options:
doc.set(df.fieldname, True) # show this field
if df.fieldtype=='Signature' and not doc.get(df.fieldname):
placeholder_image = '/assets/frappe/images/signature-placeholder.png'
doc.set(df.fieldname, placeholder_image)
if is_visible(df, doc) and has_value(df, doc):
append_empty_field_dict_to_page_column(page)
page[-1]['columns'][-1]['fields'].append(df)
# section has fields
page[-1]['has_data'] = True
# if table, add the row info in the field
# if a page break is found, create a new docfield
if df.fieldtype=="Table":
df.rows = []
df.start = 0
df.end = None
for i, row in enumerate(doc.get(df.fieldname)):
if row.get("page_break"):
# close the earlier row
df.end = i
# new page, with empty section and column
page = [get_new_section()]
layout.append(page)
append_empty_field_dict_to_page_column(page)
# continue the table in a new page
df = copy.copy(df)
df.start = i
df.end = None
page[-1]['columns'][-1]['fields'].append(df)
return layout
def is_visible(df, doc):
"""Returns True if docfield is visible in print layout and does not have print_hide set."""
if df.fieldtype in ("Section Break", "Column Break", "Button"):
return False
if (df.permlevel or 0) > 0 and not doc.has_permlevel_access_to(df.fieldname, df):
return False
return not doc.is_print_hide(df.fieldname, df)
def has_value(df, doc):
value = doc.get(df.fieldname)
if value in (None, ""):
return False
elif isinstance(value, string_types) and not strip_html(value).strip():
if df.fieldtype in ["Text", "Text Editor"]:
return True
return False
elif isinstance(value, list) and not len(value):
return False
return True
def get_print_style(style=None, print_format=None, for_legacy=False):
print_settings = frappe.get_doc("Print Settings")
if not style:
style = print_settings.print_style or ''
context = {
"print_settings": print_settings,
"print_style": style,
"font": get_font(print_settings, print_format, for_legacy)
}
css = frappe.get_template("templates/styles/standard.css").render(context)
if style and frappe.db.exists('Print Style', style):
css = css + '\n' + frappe.db.get_value('Print Style', style, 'css')
# move @import to top
for at_import in list(set(re.findall("(@import url\([^\)]+\)[;]?)", css))):
css = css.replace(at_import, "")
# prepend css with at_import
css = at_import + css
if print_format and print_format.css:
css += "\n\n" + print_format.css
return css
def get_font(print_settings, print_format=None, for_legacy=False):
default = 'Inter, "Helvetica Neue", Helvetica, Arial, "Open Sans", sans-serif'
if for_legacy:
return default
font = None
if print_format:
if print_format.font and print_format.font!="Default":
font = '{0}, sans-serif'.format(print_format.font)
if not font:
if print_settings.font and print_settings.font!="Default":
font = '{0}, sans-serif'.format(print_settings.font)
else:
font = default
return font
def get_visible_columns(data, table_meta, df):
"""Returns list of visible columns based on print_hide and if all columns have value."""
columns = []
doc = data[0] or frappe.new_doc(df.options)
hide_in_print_layout = df.get('hide_in_print_layout') or []
def add_column(col_df):
if col_df.fieldname in hide_in_print_layout:
return False
return is_visible(col_df, doc) \
and column_has_value(data, col_df.get("fieldname"), col_df)
if df.get("visible_columns"):
# columns specified by column builder
for col_df in df.get("visible_columns"):
# load default docfield properties
docfield = table_meta.get_field(col_df.get("fieldname"))
if not docfield:
continue
newdf = docfield.as_dict().copy()
newdf.update(col_df)
if add_column(newdf):
columns.append(newdf)
else:
for col_df in table_meta.fields:
if add_column(col_df):
columns.append(col_df)
return columns
def column_has_value(data, fieldname, col_df):
"""Check if at least one cell in column has non-zero and non-blank value"""
has_value = False
if col_df.fieldtype in ['Float', 'Currency'] and not col_df.print_hide_if_no_value:
return True
for row in data:
value = row.get(fieldname)
if value:
if isinstance(value, string_types):
if strip_html(value).strip():
has_value = True
break
else:
has_value = True
break
return has_value
trigger_print_script = """
<script>
//allow wrapping of long tr
var elements = document.getElementsByTagName("tr");
var i = elements.length;
while (i--) {
if(elements[i].clientHeight>300){
elements[i].setAttribute("style", "page-break-inside: auto;");
}
}
window.print();
// close the window after print
// NOTE: doesn't close if print is cancelled in Chrome
setTimeout(function() {
window.close();
}, 1000);
</script>
"""
|
the-stack_106_29731 | #
# This file is part of the GROMACS molecular simulation package.
#
# Copyright (c) 2015, by the GROMACS development team, led by
# Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
# and including many others, as listed in the AUTHORS file in the
# top-level source directory and at http://www.gromacs.org.
#
# GROMACS is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# GROMACS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with GROMACS; if not, see
# http://www.gnu.org/licenses, or write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# If you want to redistribute modifications to GROMACS, please
# consider that scientific software is very special. Version
# control is crucial - bugs must be traceable. We will be happy to
# consider code for inclusion in the official distribution, but
# derived work must not be called official GROMACS. Details are found
# in the README & COPYING files - if they are missing, get the
# official version at http://www.gromacs.org.
#
# To help us fund GROMACS development, we humbly ask that you cite
# the research papers on the package. Check out http://www.gromacs.org.
from sphinx import addnodes
class MdpNodeParser(object):
def __init__(self):
self._current_option = None
def parse_option(self, env, text, nodes):
nodes += addnodes.desc_name(text, text)
self._current_option = text
return text
def parse_value(self, env, text, nodes):
nodes += addnodes.desc_name(text, text)
if self._current_option is None:
return text
return self._current_option + '=' + text
def setup(app):
mdp_parser = MdpNodeParser()
app.add_object_type('mdp', 'mdp',
indextemplate='pair: %s; mdp option',
parse_node = mdp_parser.parse_option,
objname='mdp option')
app.add_object_type('mdp-value', 'mdp-value',
parse_node = mdp_parser.parse_value,
objname='mdp value')
app.add_object_type('cmake', 'cmake',
indextemplate='pair: %s; cmake option',
objname='CMake cache variable')
|
the-stack_106_29732 | # SPDX-FileCopyrightText: Fondazione Istituto Italiano di Tecnologia
# SPDX-License-Identifier: BSD-3-Clause
import os
import yarp
import argparse
import numpy as np
from adherent.trajectory_control import trajectory_controller
from adherent.trajectory_control.utils import define_foot_name_to_index_mapping
from adherent.trajectory_control.utils import compute_initial_joint_reference
# ==================
# USER CONFIGURATION
# ==================
parser = argparse.ArgumentParser()
parser.add_argument("--trajectory_path", help="Path where the generated trajectory is stored. Relative path from script folder.",
type = str, default = "../datasets/inference/")
parser.add_argument("--time_scaling", help="Time scaling to be applied to the generated trajectory. Keep it integer.",
type=int, default=2)
parser.add_argument("--footstep_scaling", help="Footstep scaling to be applied to the generated footsteps. Keep it between 0 and 1.",
type=float, default=0.5)
parser.add_argument("--deactivate_postural", help="Deactivate usage of the postural from Adherent.", action="store_true")
args = parser.parse_args()
trajectory_path = args.trajectory_path
time_scaling = args.time_scaling
footstep_scaling = args.footstep_scaling
use_joint_references = not args.deactivate_postural
# ==================
# YARP CONFIGURATION
# ==================
# YARP initialization
yarp.Network.init(yarp.YARP_CLOCK_NETWORK)
# ===================================
# TRAJECTORY CONTROLLER CONFIGURATION
# ===================================
# Retrieve script directory
script_directory = os.path.dirname(os.path.abspath(__file__))
# Retrieve the robot model
robot_urdf = "/iit/sources/robotology-superbuild/src/icub-models/iCub/robots/iCubGenova04/model.urdf"
# Define the paths for the generated footsteps and postural
trajectory_path = os.path.join(script_directory, trajectory_path)
footsteps_path = trajectory_path + "footsteps.txt"
posturals_path = trajectory_path + "postural.txt"
# Define the beginning of the path where the trajectory control data will be stored
storage_path = os.path.join(script_directory, "../datasets/trajectory_control_simulation/sim_")
# Define the joints list used by the different components in the pipeline
controlled_joints = ['l_hip_pitch', 'l_hip_roll', 'l_hip_yaw', 'l_knee', 'l_ankle_pitch', 'l_ankle_roll', # left leg
'r_hip_pitch', 'r_hip_roll', 'r_hip_yaw', 'r_knee', 'r_ankle_pitch', 'r_ankle_roll', # right leg
'torso_pitch', 'torso_roll', 'torso_yaw', # torso
'l_shoulder_pitch', 'l_shoulder_roll', 'l_shoulder_yaw', 'l_elbow', # left arm
'r_shoulder_pitch', 'r_shoulder_roll', 'r_shoulder_yaw', 'r_elbow'] # right arm
# Define robot-specific feet mapping between feet frame names and indexes
foot_name_to_index = define_foot_name_to_index_mapping(robot="iCubV2_5")
# Define robot-specific initial joint reference
initial_joint_reference = compute_initial_joint_reference(robot="iCubV2_5")
# Instantiate the trajectory controller
controller = trajectory_controller.TrajectoryController.build(robot_urdf=robot_urdf,
footsteps_path=footsteps_path,
posturals_path=posturals_path,
storage_path = storage_path,
time_scaling=time_scaling,
footstep_scaling=footstep_scaling,
use_joint_references=use_joint_references,
controlled_joints=controlled_joints,
foot_name_to_index=foot_name_to_index,
initial_joint_reference=initial_joint_reference)
# Configure all the components of the trajectory control pipeline
controller.configure()
# ===================
# TRAJECTORY PLANNING
# ===================
# Trajectory optimization
controller.compute_dcm_trajectory()
# ==================
# TRAJECTORY CONTROL
# ==================
# Trajectory control loop running at dt = 100 Hz
for idx in np.arange(start=0, stop=controller.get_trajectory_duration(), step=controller.get_dt()):
# Measure joint values and feet wrenches
controller.read_data()
# Update the legged odometry estimator
controller.update_legged_odom()
# Advance the DCM and swing foot planners
controller.update_planners()
# Compute the desired CoM
controller.update_controllers()
# Update the feet, CoM and joint targets for the inverse kinematics
controller.update_ik_targets(idx)
# Compute the joint reference realizing the ik targets
controller.retrieve_joint_reference()
# Set the joint reference
controller.set_current_joint_reference(idx)
# Update the storage of the quantities of interest
controller.update_storage(idx)
# At the end of the control loop, store the relevant data
controller.storage.save_data_as_json()
|
the-stack_106_29733 | #!/usr/bin/env python
import torch.nn
from deepinpy.utils import utils
class Conv2dSame(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, bias=True, padding_layer=torch.nn.ReflectionPad2d):
super().__init__()
ka = kernel_size // 2
kb = ka - 1 if kernel_size % 2 == 0 else ka
self.net = torch.nn.Sequential(
padding_layer((ka,kb,ka,kb)),
torch.nn.Conv2d(in_channels, out_channels, kernel_size, bias=bias)
)
def forward(self, x):
return self.net(x)
class ResNet5Block(torch.nn.Module):
def __init__(self, num_filters=32, filter_size=3, T=4, num_filters_start=2, num_filters_end=2, batch_norm=False):
super(ResNet5Block, self).__init__()
num_filters_start = num_filters_end = 2
if batch_norm:
self.model = torch.nn.Sequential(
Conv2dSame(num_filters_start,num_filters,filter_size),
torch.nn.BatchNorm2d(num_filters),
nn.ReLU(),
Conv2dSame(num_filters,num_filters,filter_size),
torch.nn.BatchNorm2d(num_filters),
torch.nn.ReLU(),
Conv2dSame(num_filters,num_filters,filter_size),
torch.nn.BatchNorm2d(num_filters),
torch.nn.ReLU(),
Conv2dSame(num_filters,num_filters,filter_size),
torch.nn.BatchNorm2d(num_filters),
torch.nn.ReLU(),
Conv2dSame(num_filters,num_filters,filter_size),
torch.nn.BatchNorm2d(num_filters),
torch.nn.ReLU(),
Conv2dSame(num_filters,num_filters_end,filter_size)
)
else:
self.model = torch.nn.Sequential(
Conv2dSame(num_filters_start,num_filters,filter_size),
torch.nn.ReLU(),
Conv2dSame(num_filters,num_filters,filter_size),
torch.nn.ReLU(),
Conv2dSame(num_filters,num_filters,filter_size),
torch.nn.ReLU(),
Conv2dSame(num_filters,num_filters,filter_size),
torch.nn.ReLU(),
Conv2dSame(num_filters,num_filters,filter_size),
torch.nn.ReLU(),
Conv2dSame(num_filters,num_filters_end,filter_size)
)
self.T = T
def forward(self,x,device='cpu'):
return x + self.step(x, device=device)
def step(self, x, device='cpu'):
# reshape (batch,x,y,channel=2) -> (batch,channe=2,x,y)
x = x.permute(0, 3, 1, 2)
y = self.model(x)
return y.permute(0, 2, 3, 1)
class ResNetBlock(torch.nn.Module):
def __init__(self, in_channels=2, latent_channels=64, out_channels=64, kernel_size=3, bias=False, batch_norm=True, final_relu=True, dropout=0):
super(ResNetBlock, self).__init__()
self.batch_norm = batch_norm
self.final_relu = final_relu
# initialize conv variables
self.in_channels = in_channels
self.latent_channels = latent_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.bias = bias
if dropout > 0:
self.dropout = torch.nn.Dropout(dropout)
else:
self.dropout = None
if self.in_channels == self.out_channels:
self.conv0 = None
else:
self.conv0 = self._conv_zero(self.in_channels, self.out_channels)
self.conv1 = self._conv(self.in_channels, self.latent_channels)
self.conv2 = self._conv(self.latent_channels, self.out_channels)
if self.batch_norm:
self.bn1 = self._bn(self.in_channels)
self.bn2 = self._bn(self.latent_channels)
self.relu = self._relu()
def forward(self, x):
if self.conv0:
residual = self.conv0(x)
else:
residual = x
out = x
if self.batch_norm:
out = self.bn1(out)
out = self.relu(out)
out = self.conv1(out)
if self.dropout is not None:
out = self.dropout(out)
if self.batch_norm:
out = self.bn2(out)
if self.final_relu:
out = self.relu(out)
out = self.conv2(out)
if self.dropout is not None:
out = self.dropout(out)
out += residual
return out
def _conv(self, in_channels, out_channels):
return Conv2dSame(in_channels=in_channels,
out_channels=out_channels,
kernel_size=self.kernel_size,
bias=self.bias)
def _conv_zero(self, in_channels, out_channels):
return Conv2dSame(in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
bias=self.bias)
def _bn(self, channels):
return torch.nn.BatchNorm2d(channels)
def _relu(self):
#return torch.nn.ReLU(inplace=True)
return torch.nn.ReLU()
class ResNet(torch.nn.Module):
def __init__(self, in_channels=2, latent_channels=64, num_blocks=3, kernel_size=7, bias=False, batch_norm=True, dropout=0, topk=None, l1lam=None):
super(ResNet, self).__init__()
self.batch_norm = batch_norm
self.num_blocks = num_blocks
# initialize conv variables
self.in_channels = in_channels
self.latent_channels = latent_channels
self.kernel_size = kernel_size
self.bias = bias
self.dropout = dropout
self.ResNetBlocks = self._build_model()
#self.ResNetBlocks = self._build_model_bottleneck()
#self.weights = self.ResNetBlocks[self.num_blocks // 2].conv2.weight
self.l1lam = l1lam
if self.l1lam:
#self.threshold = torch.nn.Threshold(self.l1lam, 0)
self.threshold = torch.nn.Softshrink(self.l1lam)
self.topk = topk
def forward(self, x):
x = x.permute(0, 3, 1, 2)
residual = x
for n in range(self.num_blocks):
x = self.ResNetBlocks[n](x)
if n == self.num_blocks // 2:
act = x
if self.l1lam:
act = self.threshold(act)
if self.topk:
act = utils.topk(act, self.topk, dim=1)
x = act
x += residual
#return x.permute(0, 2, 3, 1), act
return x.permute(0, 2, 3, 1)
def _build_model(self):
ResNetBlocks = torch.nn.ModuleList()
# first block goes from input space (2ch) to latent space (64ch)
ResNetBlocks.append(self._add_block(final_relu=True, in_channels=self.in_channels, latent_channels=self.latent_channels, out_channels=self.latent_channels))
# middle blocks go from latent space to latent space
for n in range(self.num_blocks - 2):
ResNetBlocks.append(self._add_block(final_relu=True, in_channels=self.latent_channels, latent_channels=self.latent_channels, out_channels=self.latent_channels))
# last block goes from latent space to output space (2ch) with no ReLU
ResNetBlocks.append(self._add_block(final_relu=False, in_channels=self.latent_channels, latent_channels=self.latent_channels, out_channels=self.in_channels))
return ResNetBlocks
def _add_block(self, in_channels, latent_channels, out_channels, final_relu=True):
return ResNetBlock(in_channels=in_channels,
latent_channels=latent_channels,
out_channels=out_channels,
kernel_size=self.kernel_size,
bias=self.bias,
batch_norm=self.batch_norm,
final_relu=final_relu, dropout=self.dropout)
|
the-stack_106_29734 | """
This script simply runs the intersection driving environment.
In this example, the ego vehicle first stops at the intersection and the continues to drive after 35 s.
"""
import numpy as np
import sys
sys.path.append('../src')
import parameters_intersection as p
from intersection_env import IntersectionEnv
p.sim_params['safety_check'] = False
ego_at_intersection = False
gui_params = {'use_gui': True, 'print_gui_info': True, 'draw_sensor_range': True, 'zoom_level': 3000}
np.random.seed(13)
env = IntersectionEnv(sim_params=p.sim_params, road_params=p.road_params, gui_params=gui_params)
episode_rewards = []
episode_steps = []
for i in range(0, 100):
np.random.seed(i)
env.reset(ego_at_intersection=ego_at_intersection)
done = False
episode_reward = 0
step = 0
while done is False:
if step < 35:
action = 2
else:
action = 1
obs, reward, done, info = env.step(action)
episode_reward += reward
step += 1
episode_rewards.append(episode_reward)
episode_steps.append(step)
print("Episode: " + str(i))
print("Episode steps: " + str(step))
print("Episode reward: " + str(episode_reward))
print(episode_rewards)
print(episode_steps)
|
the-stack_106_29736 | # coding:utf-8
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
import json
import paddle.fluid as fluid
import paddlehub as hub
from paddlehub.module.module import runnable
from paddlehub.module.nlp_module import DataFormatError
from paddlehub.common.logger import logger
from paddlehub.module.module import moduleinfo, serving
import argparse
import os
import numpy as np
import paddle.fluid.dygraph as D
from ernie_gen_poetry.model.tokenizing_ernie import ErnieTokenizer
from ernie_gen_poetry.model.decode import beam_search_infilling
from ernie_gen_poetry.model.modeling_ernie_gen import ErnieModelForGeneration
@moduleinfo(
name="ernie_gen_poetry",
version="1.0.1",
summary=
"ERNIE-GEN is a multi-flow language generation framework for both pre-training and fine-tuning. This module has fine-tuned for poetry generation task.",
author="baidu-nlp",
author_email="",
type="nlp/text_generation",
)
class ErnieGen(hub.NLPPredictionModule):
def _initialize(self):
"""
initialize with the necessary elements
"""
assets_path = os.path.join(self.directory, "assets")
gen_checkpoint_path = os.path.join(assets_path, "ernie_gen_poetry")
ernie_cfg_path = os.path.join(assets_path, 'ernie_config.json')
with open(ernie_cfg_path, encoding='utf8') as ernie_cfg_file:
ernie_cfg = dict(json.loads(ernie_cfg_file.read()))
ernie_vocab_path = os.path.join(assets_path, 'vocab.txt')
with open(ernie_vocab_path, encoding='utf8') as ernie_vocab_file:
ernie_vocab = {
j.strip().split('\t')[0]: i
for i, j in enumerate(ernie_vocab_file.readlines())
}
with fluid.dygraph.guard(fluid.CPUPlace()):
with fluid.unique_name.guard():
self.model = ErnieModelForGeneration(ernie_cfg)
finetuned_states, _ = D.load_dygraph(gen_checkpoint_path)
self.model.set_dict(finetuned_states)
self.tokenizer = ErnieTokenizer(ernie_vocab)
self.rev_dict = {v: k for k, v in self.tokenizer.vocab.items()}
self.rev_dict[self.tokenizer.pad_id] = '' # replace [PAD]
self.rev_dict[self.tokenizer.unk_id] = '' # replace [PAD]
self.rev_lookup = np.vectorize(lambda i: self.rev_dict[i])
@serving
def generate(self, texts, use_gpu=False, beam_width=5):
"""
Get the continuation of the input poetry.
Args:
texts(list): the front part of a poetry.
use_gpu(bool): whether use gpu to predict or not
beam_width(int): the beam search width.
Returns:
results(list): the poetry continuations.
"""
if use_gpu and "CUDA_VISIBLE_DEVICES" not in os.environ:
use_gpu = False
logger.warning(
"use_gpu has been set False as you didn't set the environment variable CUDA_VISIBLE_DEVICES while using use_gpu=True"
)
if use_gpu:
place = fluid.CUDAPlace(0)
else:
place = fluid.CPUPlace()
if texts and isinstance(texts, list):
predicted_data = texts
else:
raise ValueError(
"The input data is inconsistent with expectations.")
with fluid.dygraph.guard(place):
self.model.eval()
results = []
for text in predicted_data:
sample_results = []
ids, sids = self.tokenizer.encode(text)
src_ids = D.to_variable(np.expand_dims(ids, 0))
src_sids = D.to_variable(np.expand_dims(sids, 0))
output_ids = beam_search_infilling(
self.model,
src_ids,
src_sids,
eos_id=self.tokenizer.sep_id,
sos_id=self.tokenizer.cls_id,
attn_id=self.tokenizer.vocab['[MASK]'],
max_decode_len=80,
max_encode_len=20,
beam_width=beam_width,
tgt_type_id=1)
output_str = self.rev_lookup(output_ids[0].numpy())
for ostr in output_str.tolist():
if '[SEP]' in ostr:
ostr = ostr[:ostr.index('[SEP]')]
sample_results.append("".join(ostr))
results.append(sample_results)
return results
def add_module_config_arg(self):
"""
Add the command config options
"""
self.arg_config_group.add_argument(
'--use_gpu',
type=ast.literal_eval,
default=False,
help="whether use GPU for prediction")
self.arg_config_group.add_argument(
'--beam_width', type=int, default=5, help="the beam search width")
@runnable
def run_cmd(self, argvs):
"""
Run as a command
"""
self.parser = argparse.ArgumentParser(
description='Run the %s module.' % self.name,
prog='hub run %s' % self.name,
usage='%(prog)s',
add_help=True)
self.arg_input_group = self.parser.add_argument_group(
title="Input options", description="Input data. Required")
self.arg_config_group = self.parser.add_argument_group(
title="Config options",
description=
"Run configuration for controlling module behavior, optional.")
self.add_module_config_arg()
self.add_module_input_arg()
args = self.parser.parse_args(argvs)
try:
input_data = self.check_input_data(args)
except DataFormatError and RuntimeError:
self.parser.print_help()
return None
results = self.generate(
texts=input_data, use_gpu=args.use_gpu, beam_width=args.beam_width)
return results
if __name__ == "__main__":
module = ErnieGen()
for result in module.generate(['昔年旅南服,始识王荆州。', '高名出汉阴,禅阁跨香岑。'],
beam_width=5):
print(result)
|
the-stack_106_29739 | import re
import sys
import warnings
from urllib.parse import urlparse
import joblib
from googlesearch import search
from newspaper import Article
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.model_selection import train_test_split
warnings.filterwarnings("ignore")
def extractor(url):
"""Extractor function that gets the article body from the URL
Args:
url: URL of the article
Returns:
article: Article fetched from the URL
article_title: Title of the Article
"""
article = Article(url)
try:
article.download()
article.parse()
except:
pass
#Get the article title and convert them to lower-case
article_title = article.title
article = article.text.lower()
article = [article]
return (article, article_title)
def text_area_extractor(text):
"""
Textbox extractor function to preprocess and extract text
Args:
text: Raw Extracted Text from the News Article
Returns:
text: Preprocessed and clean text ready for analysis
"""
text = text.lower()
text = re.sub(r'[^a-zA-Z0-9\s]', ' ', text)
text = re.sub("(\\r|\r|\n)\\n$", " ", text)
text = [text]
return text
def google_search(title, url):
"""
Function to perform a Google Search with the specified title and URL
Args:
title: Title of the Article
url: URL of the specified article
Returns:
search_urls: Similar News Articles found over the Web
source_sites: Hostname of the Articles founder over the Web
"""
target = url
domain = urlparse(target).hostname
search_urls = []
source_sites = []
for i in search(title, tld = "com", num = 10, start = 1, stop = 6):
if "youtube" not in i and domain not in i:
source_sites.append(urlparse(i).hostname)
search_urls.append(i)
return search_urls, source_sites
def similarity(url_list, article):
"""
Function to check the similarity of the News Article through Cosine Similarity
Args:
url_list: List of the URLs similar to the news article
article: Preprocessed article which would be vectorized
Returns:
cosine_cleaned: Cosine Similarity Scores of each URL passed
average_score: Average value of the cosine similarity scores fetched
"""
article = article
sim_tfv = TfidfVectorizer(stop_words ="english")
sim_transform1 = sim_tfv.fit_transform(article)
cosine = []
cosine_cleaned = []
cosine_average = 0
count = 0
for i in url_list:
test_article, test_title = extractor(i)
test_article = [test_article]
sim_transform2 = sim_tfv.transform(test_article[0])
score = cosine_similarity(sim_transform1, sim_transform2)
cosine.append(score*100)
count+=1
for i in cosine:
x = str(i).replace('[','').replace(']','')
cosine_cleaned.append(x)
for i in cosine:
if i !=0:
cosine_average = cosine_average + i
else:
count-=1
average_score = cosine_average/count
average_score = str(average_score).replace('[','').replace(']','')
average_score = float(average_score)
return cosine_cleaned, average_score
def handlelink(article_link):
"""Classification function to take the article link and predict the similar news articles
Args:
article_link: URL of the article
Returns:
pred: Predicted news sources from the machine learning model
article_title: Title of the Article
article: Article fetched from the URL
search_urls: Similar News Articles found over the Web
source_sites: Hostname of the Articles founder over the Web
"""
job_pac = joblib.load('models/pac.pkl')
job_vec = joblib.load('models/tfv.pkl')
url = (article_link)
article, article_title = extractor(article_link)
pred = job_pac.predict(job_vec.transform(article))
return pred, article_title, article, url
def similarNews(url):
"""
Driver function to return a dictionary with all the similar news and their similarity score
Args:
url: URL of the article
Returns:
dictionary: Dictionary containing all the similar news articles and their similarity score
"""
prediction, article_title, article, url = handlelink(article_link=url)
url_list, sitename = google_search(article_title, url)
similarity_score, avgScore = similarity(url_list, article)
dictionary = dict(zip(url_list, similarity_score))
return dictionary
if __name__ == "__main__":
url=sys.argv[1]
similarNews = similarNews(url)
print ("{:<10} {:<10}".format('News Link', 'Similarity Score'))
for key, value in similarNews.items():
print ("{:<10} {:<10}".format(key, value))
|
the-stack_106_29741 | #!/usr/bin/env python3
"""This is an example to train a task with TRPO algorithm (PyTorch).
Uses Ray sampler instead of OnPolicyVectorizedSampler.
Here it runs InvertedDoublePendulum-v2 environment with 100 iterations.
"""
import torch
from metarl.experiment import LocalRunner, run_experiment
from metarl.np.baselines import LinearFeatureBaseline
from metarl.sampler import RaySampler
from metarl.tf.envs import TfEnv
from metarl.torch.algos import TRPO
from metarl.torch.policies import GaussianMLPPolicy
def run_task(snapshot_config, *_):
"""Set up environment and algorithm and run the task.
Args:
snapshot_config (metarl.experiment.SnapshotConfig): The snapshot
configuration used by LocalRunner to create the snapshotter.
If None, it will create one with default settings.
_ : Unused parameters
"""
env = TfEnv(env_name='InvertedDoublePendulum-v2')
runner = LocalRunner(snapshot_config)
policy = GaussianMLPPolicy(env.spec,
hidden_sizes=[32, 32],
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None)
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = TRPO(env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=100,
discount=0.99,
center_adv=False)
runner.setup(algo, env, sampler_cls=RaySampler)
runner.train(n_epochs=100, batch_size=1024)
run_experiment(
run_task,
snapshot_mode='last',
seed=1,
)
|
the-stack_106_29742 | import numpy as np
from .. import util
from ..element import Element
from ..ndmapping import NdMapping, item_check, sorted_context
from .dictionary import DictInterface
from .interface import Interface, DataError
class MultiInterface(Interface):
"""
MultiInterface allows wrapping around a list of tabular datasets
including dataframes, the columnar dictionary format or 2D tabular
NumPy arrays. Using the split method the list of tabular data can
be split into individual datasets.
The interface makes the data appear a list of tabular datasets as
a single dataset. The length, shape and values methods therefore
make the data appear like a single array of concatenated subpaths,
separated by NaN values.
"""
types = ()
datatype = 'multitabular'
subtypes = ['dictionary', 'dataframe', 'array', 'dask']
multi = True
@classmethod
def init(cls, eltype, data, kdims, vdims):
new_data = []
dims = {'kdims': eltype.kdims, 'vdims': eltype.vdims}
if kdims is not None:
dims['kdims'] = kdims
if vdims is not None:
dims['vdims'] = vdims
if not isinstance(data, list):
raise ValueError('MultiInterface data must be a list tabular data types.')
prev_interface, prev_dims = None, None
for d in data:
d, interface, dims, _ = Interface.initialize(eltype, d, kdims, vdims,
datatype=cls.subtypes)
if prev_interface:
if prev_interface != interface:
raise DataError('MultiInterface subpaths must all have matching datatype.', cls)
if dims['kdims'] != prev_dims['kdims']:
raise DataError('MultiInterface subpaths must all have matching kdims.', cls)
if dims['vdims'] != prev_dims['vdims']:
raise DataError('MultiInterface subpaths must all have matching vdims.', cls)
new_data.append(d)
prev_interface, prev_dims = interface, dims
return new_data, dims, {}
@classmethod
def validate(cls, dataset, vdims=True):
if not dataset.data:
return
from holoviews.element import Polygons
ds = cls._inner_dataset_template(dataset)
for d in dataset.data:
ds.data = d
ds.interface.validate(ds, vdims)
if isinstance(dataset, Polygons) and ds.interface is DictInterface:
holes = ds.interface.holes(ds)
if not isinstance(holes, list):
raise DataError('Polygons holes must be declared as a list-of-lists.', cls)
subholes = holes[0]
coords = ds.data[ds.kdims[0].name]
splits = np.isnan(coords.astype('float')).sum()
if len(subholes) != (splits+1):
raise DataError('Polygons with holes containing multi-geometries '
'must declare a list of holes for each geometry.', cls)
@classmethod
def _inner_dataset_template(cls, dataset):
"""
Returns a Dataset template used as a wrapper around the data
contained within the multi-interface dataset.
"""
from . import Dataset
vdims = dataset.vdims if getattr(dataset, 'level', None) is None else []
return Dataset(dataset.data[0], datatype=cls.subtypes,
kdims=dataset.kdims, vdims=vdims)
@classmethod
def dimension_type(cls, dataset, dim):
if not dataset.data:
# Note: Required to make empty datasets work at all (should fix)
# Other interfaces declare equivalent of empty array
# which defaults to float type
return float
ds = cls._inner_dataset_template(dataset)
return ds.interface.dimension_type(ds, dim)
@classmethod
def range(cls, dataset, dim):
if not dataset.data:
return (None, None)
ranges = []
ds = cls._inner_dataset_template(dataset)
# Backward compatibility for Contours/Polygons level
level = getattr(dataset, 'level', None)
dim = dataset.get_dimension(dim)
if level is not None and dim is dataset.vdims[0]:
return (level, level)
for d in dataset.data:
ds.data = d
ranges.append(ds.interface.range(ds, dim))
return util.max_range(ranges)
@classmethod
def has_holes(cls, dataset):
if not dataset.data:
return False
ds = cls._inner_dataset_template(dataset)
for d in dataset.data:
ds.data = d
if ds.interface.has_holes(ds):
return True
return False
@classmethod
def holes(cls, dataset):
holes = []
if not dataset.data:
return holes
ds = cls._inner_dataset_template(dataset)
for d in dataset.data:
ds.data = d
holes += ds.interface.holes(ds)
return holes
@classmethod
def isscalar(cls, dataset, dim):
"""
Tests if dimension is scalar in each subpath.
"""
if not dataset.data:
return True
ds = cls._inner_dataset_template(dataset)
isscalar = []
for d in dataset.data:
ds.data = d
isscalar.append(ds.interface.isscalar(ds, dim))
return all(isscalar)
@classmethod
def select(cls, dataset, selection_mask=None, **selection):
"""
Applies selectiong on all the subpaths.
"""
if not dataset.data:
return []
ds = cls._inner_dataset_template(dataset)
data = []
for d in dataset.data:
ds.data = d
sel = ds.interface.select(ds, **selection)
data.append(sel)
return data
@classmethod
def select_paths(cls, dataset, selection):
"""
Allows selecting paths with usual NumPy slicing index.
"""
return [s[0] for s in np.array([{0: p} for p in dataset.data])[selection]]
@classmethod
def aggregate(cls, dataset, dimensions, function, **kwargs):
raise NotImplementedError('Aggregation currently not implemented')
@classmethod
def groupby(cls, dataset, dimensions, container_type, group_type, **kwargs):
# Get dimensions information
dimensions = [dataset.get_dimension(d) for d in dimensions]
kdims = [kdim for kdim in dataset.kdims if kdim not in dimensions]
# Update the kwargs appropriately for Element group types
group_kwargs = {}
group_type = list if group_type == 'raw' else group_type
if issubclass(group_type, Element):
group_kwargs.update(util.get_param_values(dataset))
group_kwargs['kdims'] = kdims
group_kwargs.update(kwargs)
# Find all the keys along supplied dimensions
values = []
for d in dimensions:
if not cls.isscalar(dataset, d):
raise ValueError('MultiInterface can only apply groupby '
'on scalar dimensions, %s dimension'
'is not scalar' % d)
vals = cls.values(dataset, d, False, True)
values.append(vals)
values = tuple(values)
# Iterate over the unique entries applying selection masks
from . import Dataset
ds = Dataset(values, dimensions)
keys = (tuple(vals[i] for vals in values) for i in range(len(vals)))
grouped_data = []
for unique_key in util.unique_iterator(keys):
mask = ds.interface.select_mask(ds, dict(zip(dimensions, unique_key)))
selection = [data for data, m in zip(dataset.data, mask) if m]
group_data = group_type(selection, **group_kwargs)
grouped_data.append((unique_key, group_data))
if issubclass(container_type, NdMapping):
with item_check(False), sorted_context(False):
return container_type(grouped_data, kdims=dimensions)
else:
return container_type(grouped_data)
@classmethod
def sample(cls, dataset, samples=[]):
raise NotImplementedError('Sampling operation on subpaths not supported')
@classmethod
def shape(cls, dataset):
"""
Returns the shape of all subpaths, making it appear like a
single array of concatenated subpaths separated by NaN values.
"""
if not dataset.data:
return (0, len(dataset.dimensions()))
rows, cols = 0, 0
ds = cls._inner_dataset_template(dataset)
for d in dataset.data:
ds.data = d
r, cols = ds.interface.shape(ds)
rows += r
return rows+len(dataset.data)-1, cols
@classmethod
def length(cls, dataset):
"""
Returns the length of the multi-tabular dataset making it appear
like a single array of concatenated subpaths separated by NaN
values.
"""
if not dataset.data:
return 0
length = 0
ds = cls._inner_dataset_template(dataset)
for d in dataset.data:
ds.data = d
length += ds.interface.length(ds)
return length+len(dataset.data)-1
@classmethod
def dtype(cls, dataset, dimension):
if not dataset.data:
return np.dtype('float')
ds = cls._inner_dataset_template(dataset)
return ds.interface.dtype(ds, dimension)
@classmethod
def nonzero(cls, dataset):
return bool(dataset.data)
@classmethod
def redim(cls, dataset, dimensions):
if not dataset.data:
return dataset.data
new_data = []
ds = cls._inner_dataset_template(dataset)
for d in dataset.data:
ds.data = d
new_data.append(ds.interface.redim(ds, dimensions))
return new_data
@classmethod
def values(cls, dataset, dimension, expanded=True, flat=True, compute=True):
"""
Returns a single concatenated array of all subpaths separated
by NaN values. If expanded keyword is False an array of arrays
is returned.
"""
if not dataset.data:
return np.array([])
values = []
ds = cls._inner_dataset_template(dataset)
for d in dataset.data:
ds.data = d
dvals = ds.interface.values(ds, dimension, expanded, flat, compute)
if not len(dvals):
continue
elif expanded:
values.append(dvals)
values.append([np.NaN])
else:
values.append(dvals)
if not values:
return np.array([])
elif expanded:
return np.concatenate(values[:-1])
else:
return np.concatenate(values)
@classmethod
def split(cls, dataset, start, end, datatype, **kwargs):
"""
Splits a multi-interface Dataset into regular Datasets using
regular tabular interfaces.
"""
objs = []
if datatype is None:
for d in dataset.data[start: end]:
objs.append(dataset.clone(d, datatype=cls.subtypes))
return objs
elif not dataset.data:
return objs
ds = cls._inner_dataset_template(dataset)
for d in dataset.data:
ds.data = d
if datatype == 'array':
obj = ds.array(**kwargs)
elif datatype == 'dataframe':
obj = ds.dframe(**kwargs)
elif datatype == 'columns':
if ds.interface.datatype == 'dictionary':
obj = dict(ds.data)
else:
obj = ds.columns(**kwargs)
else:
raise ValueError("%s datatype not support" % datatype)
objs.append(obj)
return objs
@classmethod
def add_dimension(cls, dataset, dimension, dim_pos, values, vdim):
if not len(dataset.data):
return dataset.data
elif values is None or util.isscalar(values):
values = [values]*len(dataset.data)
elif not len(values) == len(dataset.data):
raise ValueError('Added dimension values must be scalar or '
'match the length of the data.')
new_data = []
template = cls._inner_dataset_template(dataset)
array_type = template.interface.datatype == 'array'
for d, v in zip(dataset.data, values):
template.data = d
if array_type:
ds = template.clone(template.columns())
else:
ds = template
new_data.append(ds.interface.add_dimension(ds, dimension, dim_pos, v, vdim))
return new_data
Interface.register(MultiInterface)
|
the-stack_106_29743 | from typing import Any, Callable, Dict, List, Optional
from chia.consensus.block_record import BlockRecord
from chia.consensus.pos_quality import UI_ACTUAL_SPACE_CONSTANT_FACTOR
from chia.full_node.full_node import FullNode
from chia.full_node.mempool_check_conditions import get_puzzle_and_solution_for_coin
from chia.types.blockchain_format.program import Program, SerializedProgram
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.types.coin_record import CoinRecord
from chia.types.coin_spend import CoinSpend
from chia.types.full_block import FullBlock
from chia.types.generator_types import BlockGenerator
from chia.types.mempool_inclusion_status import MempoolInclusionStatus
from chia.types.spend_bundle import SpendBundle
from chia.types.unfinished_header_block import UnfinishedHeaderBlock
from chia.util.byte_types import hexstr_to_bytes
from chia.util.ints import uint32, uint64, uint128
from chia.util.log_exceptions import log_exceptions
from chia.util.ws_message import WsRpcMessage, create_payload_dict
def coin_record_dict_backwards_compat(coin_record: Dict[str, Any]):
coin_record["spent"] = coin_record["spent_block_index"] > 0
return coin_record
class FullNodeRpcApi:
def __init__(self, service: FullNode):
self.service = service
self.service_name = "chia_full_node"
self.cached_blockchain_state: Optional[Dict] = None
def get_routes(self) -> Dict[str, Callable]:
return {
# Blockchain
"/get_blockchain_state": self.get_blockchain_state,
"/get_block": self.get_block,
"/get_blocks": self.get_blocks,
"/get_block_count_metrics": self.get_block_count_metrics,
"/get_block_record_by_height": self.get_block_record_by_height,
"/get_block_record": self.get_block_record,
"/get_block_records": self.get_block_records,
"/get_unfinished_block_headers": self.get_unfinished_block_headers,
"/get_network_space": self.get_network_space,
"/get_additions_and_removals": self.get_additions_and_removals,
# this function is just here for backwards-compatibility. It will probably
# be removed in the future
"/get_initial_freeze_period": self.get_initial_freeze_period,
"/get_network_info": self.get_network_info,
"/get_recent_signage_point_or_eos": self.get_recent_signage_point_or_eos,
# Coins
"/get_coin_records_by_puzzle_hash": self.get_coin_records_by_puzzle_hash,
"/get_coin_records_by_puzzle_hashes": self.get_coin_records_by_puzzle_hashes,
"/get_coin_record_by_name": self.get_coin_record_by_name,
"/get_coin_records_by_names": self.get_coin_records_by_names,
"/get_coin_records_by_parent_ids": self.get_coin_records_by_parent_ids,
"/get_coin_records_by_hint": self.get_coin_records_by_hint,
"/push_tx": self.push_tx,
"/get_puzzle_and_solution": self.get_puzzle_and_solution,
# Mempool
"/get_all_mempool_tx_ids": self.get_all_mempool_tx_ids,
"/get_all_mempool_items": self.get_all_mempool_items,
"/get_mempool_item_by_tx_id": self.get_mempool_item_by_tx_id,
}
async def _state_changed(self, change: str, change_data: Dict[str, Any] = None) -> List[WsRpcMessage]:
if change_data is None:
change_data = {}
payloads = []
if change == "new_peak" or change == "sync_mode":
data = await self.get_blockchain_state({})
assert data is not None
payloads.append(
create_payload_dict(
"get_blockchain_state",
data,
self.service_name,
"wallet_ui",
)
)
payloads.append(
create_payload_dict(
"get_blockchain_state",
data,
self.service_name,
"metrics",
)
)
if change in ("block", "signage_point"):
payloads.append(create_payload_dict(change, change_data, self.service_name, "metrics"))
return payloads
# this function is just here for backwards-compatibility. It will probably
# be removed in the future
async def get_initial_freeze_period(self, _: Dict):
# Mon May 03 2021 17:00:00 GMT+0000
return {"INITIAL_FREEZE_END_TIMESTAMP": 1620061200}
async def get_blockchain_state(self, _request: Dict):
"""
Returns a summary of the node's view of the blockchain.
"""
node_id = self.service.server.node_id.hex()
if self.service.initialized is False:
res: Dict = {
"blockchain_state": {
"peak": None,
"genesis_challenge_initialized": self.service.initialized,
"sync": {
"sync_mode": False,
"synced": False,
"sync_tip_height": 0,
"sync_progress_height": 0,
},
"difficulty": 0,
"sub_slot_iters": 0,
"space": 0,
"mempool_size": 0,
"mempool_cost": 0,
"mempool_min_fees": {
"cost_5000000": 0,
},
"mempool_max_total_cost": 0,
"block_max_cost": 0,
"node_id": node_id,
},
}
return res
peak: Optional[BlockRecord] = self.service.blockchain.get_peak()
if peak is not None and peak.height > 0:
difficulty = uint64(peak.weight - self.service.blockchain.block_record(peak.prev_hash).weight)
sub_slot_iters = peak.sub_slot_iters
else:
difficulty = self.service.constants.DIFFICULTY_STARTING
sub_slot_iters = self.service.constants.SUB_SLOT_ITERS_STARTING
sync_mode: bool = self.service.sync_store.get_sync_mode() or self.service.sync_store.get_long_sync()
sync_tip_height: Optional[uint32] = uint32(0)
if sync_mode:
if self.service.sync_store.get_sync_target_height() is not None:
sync_tip_height = self.service.sync_store.get_sync_target_height()
assert sync_tip_height is not None
if peak is not None:
sync_progress_height: uint32 = peak.height
# Don't display we're syncing towards 0, instead show 'Syncing height/height'
# until sync_store retrieves the correct number.
if sync_tip_height == uint32(0):
sync_tip_height = peak.height
else:
sync_progress_height = uint32(0)
else:
sync_progress_height = uint32(0)
if peak is not None and peak.height > 1:
newer_block_hex = peak.header_hash.hex()
# Average over the last day
header_hash = self.service.blockchain.height_to_hash(uint32(max(1, peak.height - 4608)))
assert header_hash is not None
older_block_hex = header_hash.hex()
space = await self.get_network_space(
{"newer_block_header_hash": newer_block_hex, "older_block_header_hash": older_block_hex}
)
else:
space = {"space": uint128(0)}
if self.service.mempool_manager is not None:
mempool_size = len(self.service.mempool_manager.mempool.spends)
mempool_cost = self.service.mempool_manager.mempool.total_mempool_cost
mempool_min_fee_5m = self.service.mempool_manager.mempool.get_min_fee_rate(5000000)
mempool_max_total_cost = self.service.mempool_manager.mempool_max_total_cost
else:
mempool_size = 0
mempool_cost = 0
mempool_min_fee_5m = 0
mempool_max_total_cost = 0
if self.service.server is not None:
is_connected = len(self.service.server.get_full_node_connections()) > 0
else:
is_connected = False
synced = await self.service.synced() and is_connected
assert space is not None
response: Dict = {
"blockchain_state": {
"peak": peak,
"genesis_challenge_initialized": self.service.initialized,
"sync": {
"sync_mode": sync_mode,
"synced": synced,
"sync_tip_height": sync_tip_height,
"sync_progress_height": sync_progress_height,
},
"difficulty": difficulty,
"sub_slot_iters": sub_slot_iters,
"space": space["space"],
"mempool_size": mempool_size,
"mempool_cost": mempool_cost,
"mempool_min_fees": {
# We may give estimates for varying costs in the future
# This Dict sets us up for that in the future
"cost_5000000": mempool_min_fee_5m,
},
"mempool_max_total_cost": mempool_max_total_cost,
"block_max_cost": self.service.constants.MAX_BLOCK_COST_CLVM,
"node_id": node_id,
},
}
self.cached_blockchain_state = dict(response["blockchain_state"])
return response
async def get_network_info(self, request: Dict):
network_name = self.service.config["selected_network"]
address_prefix = self.service.config["network_overrides"]["config"][network_name]["address_prefix"]
return {"network_name": network_name, "network_prefix": address_prefix}
async def get_recent_signage_point_or_eos(self, request: Dict):
if "sp_hash" not in request:
challenge_hash: bytes32 = bytes32.from_hexstr(request["challenge_hash"])
# This is the case of getting an end of slot
eos_tuple = self.service.full_node_store.recent_eos.get(challenge_hash)
if not eos_tuple:
raise ValueError(f"Did not find eos {challenge_hash.hex()} in cache")
eos, time_received = eos_tuple
# If it's still in the full node store, it's not reverted
if self.service.full_node_store.get_sub_slot(eos.challenge_chain.get_hash()):
return {"eos": eos, "time_received": time_received, "reverted": False}
# Otherwise we can backtrack from peak to find it in the blockchain
curr: Optional[BlockRecord] = self.service.blockchain.get_peak()
if curr is None:
raise ValueError("No blocks in the chain")
number_of_slots_searched = 0
while number_of_slots_searched < 10:
if curr.first_in_sub_slot:
assert curr.finished_challenge_slot_hashes is not None
if curr.finished_challenge_slot_hashes[-1] == eos.challenge_chain.get_hash():
# Found this slot in the blockchain
return {"eos": eos, "time_received": time_received, "reverted": False}
number_of_slots_searched += len(curr.finished_challenge_slot_hashes)
curr = self.service.blockchain.try_block_record(curr.prev_hash)
if curr is None:
# Got to the beginning of the blockchain without finding the slot
return {"eos": eos, "time_received": time_received, "reverted": True}
# Backtracked through 10 slots but still did not find it
return {"eos": eos, "time_received": time_received, "reverted": True}
# Now we handle the case of getting a signage point
sp_hash: bytes32 = bytes32.from_hexstr(request["sp_hash"])
sp_tuple = self.service.full_node_store.recent_signage_points.get(sp_hash)
if sp_tuple is None:
raise ValueError(f"Did not find sp {sp_hash.hex()} in cache")
sp, time_received = sp_tuple
# If it's still in the full node store, it's not reverted
if self.service.full_node_store.get_signage_point(sp_hash):
return {"signage_point": sp, "time_received": time_received, "reverted": False}
# Otherwise we can backtrack from peak to find it in the blockchain
rc_challenge: bytes32 = sp.rc_vdf.challenge
next_b: Optional[BlockRecord] = None
curr_b_optional: Optional[BlockRecord] = self.service.blockchain.get_peak()
assert curr_b_optional is not None
curr_b: BlockRecord = curr_b_optional
for _ in range(200):
sp_total_iters = sp.cc_vdf.number_of_iterations + curr_b.ip_sub_slot_total_iters(self.service.constants)
if curr_b.reward_infusion_new_challenge == rc_challenge:
if next_b is None:
return {"signage_point": sp, "time_received": time_received, "reverted": False}
next_b_total_iters = next_b.ip_sub_slot_total_iters(self.service.constants) + next_b.ip_iters(
self.service.constants
)
return {
"signage_point": sp,
"time_received": time_received,
"reverted": sp_total_iters > next_b_total_iters,
}
if curr_b.finished_reward_slot_hashes is not None:
assert curr_b.finished_challenge_slot_hashes is not None
for eos_rc in curr_b.finished_challenge_slot_hashes:
if eos_rc == rc_challenge:
if next_b is None:
return {"signage_point": sp, "time_received": time_received, "reverted": False}
next_b_total_iters = next_b.ip_sub_slot_total_iters(self.service.constants) + next_b.ip_iters(
self.service.constants
)
return {
"signage_point": sp,
"time_received": time_received,
"reverted": sp_total_iters > next_b_total_iters,
}
next_b = curr_b
curr_b_optional = self.service.blockchain.try_block_record(curr_b.prev_hash)
if curr_b_optional is None:
break
curr_b = curr_b_optional
return {"signage_point": sp, "time_received": time_received, "reverted": True}
async def get_block(self, request: Dict) -> Optional[Dict]:
if "header_hash" not in request:
raise ValueError("No header_hash in request")
header_hash = bytes32.from_hexstr(request["header_hash"])
block: Optional[FullBlock] = await self.service.block_store.get_full_block(header_hash)
if block is None:
raise ValueError(f"Block {header_hash.hex()} not found")
return {"block": block}
async def get_blocks(self, request: Dict) -> Optional[Dict]:
if "start" not in request:
raise ValueError("No start in request")
if "end" not in request:
raise ValueError("No end in request")
exclude_hh = False
if "exclude_header_hash" in request:
exclude_hh = request["exclude_header_hash"]
exclude_reorged = False
if "exclude_reorged" in request:
exclude_reorged = request["exclude_reorged"]
start = int(request["start"])
end = int(request["end"])
block_range = []
for a in range(start, end):
block_range.append(uint32(a))
blocks: List[FullBlock] = await self.service.block_store.get_full_blocks_at(block_range)
json_blocks = []
for block in blocks:
hh: bytes32 = block.header_hash
if exclude_reorged and self.service.blockchain.height_to_hash(block.height) != hh:
# Don't include forked (reorged) blocks
continue
json = block.to_json_dict()
if not exclude_hh:
json["header_hash"] = hh.hex()
json_blocks.append(json)
return {"blocks": json_blocks}
async def get_block_count_metrics(self, request: Dict):
compact_blocks = 0
uncompact_blocks = 0
with log_exceptions(self.service.log, consume=True):
compact_blocks = await self.service.block_store.count_compactified_blocks()
uncompact_blocks = await self.service.block_store.count_uncompactified_blocks()
hint_count = 0
if self.service.hint_store is not None:
with log_exceptions(self.service.log, consume=True):
hint_count = await self.service.hint_store.count_hints()
return {
"metrics": {
"compact_blocks": compact_blocks,
"uncompact_blocks": uncompact_blocks,
"hint_count": hint_count,
}
}
async def get_block_records(self, request: Dict) -> Optional[Dict]:
if "start" not in request:
raise ValueError("No start in request")
if "end" not in request:
raise ValueError("No end in request")
start = int(request["start"])
end = int(request["end"])
records = []
peak_height = self.service.blockchain.get_peak_height()
if peak_height is None:
raise ValueError("Peak is None")
for a in range(start, end):
if peak_height < uint32(a):
self.service.log.warning("requested block is higher than known peak ")
break
header_hash: Optional[bytes32] = self.service.blockchain.height_to_hash(uint32(a))
if header_hash is None:
raise ValueError(f"Height not in blockchain: {a}")
record: Optional[BlockRecord] = self.service.blockchain.try_block_record(header_hash)
if record is None:
# Fetch from DB
record = await self.service.blockchain.block_store.get_block_record(header_hash)
if record is None:
raise ValueError(f"Block {header_hash.hex()} does not exist")
records.append(record)
return {"block_records": records}
async def get_block_record_by_height(self, request: Dict) -> Optional[Dict]:
if "height" not in request:
raise ValueError("No height in request")
height = request["height"]
header_height = uint32(int(height))
peak_height = self.service.blockchain.get_peak_height()
if peak_height is None or header_height > peak_height:
raise ValueError(f"Block height {height} not found in chain")
header_hash: Optional[bytes32] = self.service.blockchain.height_to_hash(header_height)
if header_hash is None:
raise ValueError(f"Block hash {height} not found in chain")
record: Optional[BlockRecord] = self.service.blockchain.try_block_record(header_hash)
if record is None:
# Fetch from DB
record = await self.service.blockchain.block_store.get_block_record(header_hash)
if record is None:
raise ValueError(f"Block {header_hash} does not exist")
return {"block_record": record}
async def get_block_record(self, request: Dict):
if "header_hash" not in request:
raise ValueError("header_hash not in request")
header_hash_str = request["header_hash"]
header_hash = bytes32.from_hexstr(header_hash_str)
record: Optional[BlockRecord] = self.service.blockchain.try_block_record(header_hash)
if record is None:
# Fetch from DB
record = await self.service.blockchain.block_store.get_block_record(header_hash)
if record is None:
raise ValueError(f"Block {header_hash.hex()} does not exist")
return {"block_record": record}
async def get_unfinished_block_headers(self, request: Dict) -> Optional[Dict]:
peak: Optional[BlockRecord] = self.service.blockchain.get_peak()
if peak is None:
return {"headers": []}
response_headers: List[UnfinishedHeaderBlock] = []
for ub_height, block, _ in (self.service.full_node_store.get_unfinished_blocks()).values():
if ub_height == peak.height:
unfinished_header_block = UnfinishedHeaderBlock(
block.finished_sub_slots,
block.reward_chain_block,
block.challenge_chain_sp_proof,
block.reward_chain_sp_proof,
block.foliage,
block.foliage_transaction_block,
b"",
)
response_headers.append(unfinished_header_block)
return {"headers": response_headers}
async def get_network_space(self, request: Dict) -> Optional[Dict]:
"""
Retrieves an estimate of total space validating the chain
between two block header hashes.
"""
if "newer_block_header_hash" not in request or "older_block_header_hash" not in request:
raise ValueError("Invalid request. newer_block_header_hash and older_block_header_hash required")
newer_block_hex = request["newer_block_header_hash"]
older_block_hex = request["older_block_header_hash"]
if newer_block_hex == older_block_hex:
raise ValueError("New and old must not be the same")
newer_block_bytes = bytes32.from_hexstr(newer_block_hex)
older_block_bytes = bytes32.from_hexstr(older_block_hex)
newer_block = await self.service.block_store.get_block_record(newer_block_bytes)
if newer_block is None:
raise ValueError("Newer block not found")
older_block = await self.service.block_store.get_block_record(older_block_bytes)
if older_block is None:
raise ValueError("Newer block not found")
delta_weight = newer_block.weight - older_block.weight
delta_iters = newer_block.total_iters - older_block.total_iters
weight_div_iters = delta_weight / delta_iters
additional_difficulty_constant = self.service.constants.DIFFICULTY_CONSTANT_FACTOR
eligible_plots_filter_multiplier = 2 ** self.service.constants.NUMBER_ZERO_BITS_PLOT_FILTER
network_space_bytes_estimate = (
UI_ACTUAL_SPACE_CONSTANT_FACTOR
* weight_div_iters
* additional_difficulty_constant
* eligible_plots_filter_multiplier
)
return {"space": uint128(int(network_space_bytes_estimate))}
async def get_coin_records_by_puzzle_hash(self, request: Dict) -> Optional[Dict]:
"""
Retrieves the coins for a given puzzlehash, by default returns unspent coins.
"""
if "puzzle_hash" not in request:
raise ValueError("Puzzle hash not in request")
kwargs: Dict[str, Any] = {"include_spent_coins": False, "puzzle_hash": hexstr_to_bytes(request["puzzle_hash"])}
if "start_height" in request:
kwargs["start_height"] = uint32(request["start_height"])
if "end_height" in request:
kwargs["end_height"] = uint32(request["end_height"])
if "include_spent_coins" in request:
kwargs["include_spent_coins"] = request["include_spent_coins"]
coin_records = await self.service.blockchain.coin_store.get_coin_records_by_puzzle_hash(**kwargs)
return {"coin_records": [coin_record_dict_backwards_compat(cr.to_json_dict()) for cr in coin_records]}
async def get_coin_records_by_puzzle_hashes(self, request: Dict) -> Optional[Dict]:
"""
Retrieves the coins for a given puzzlehash, by default returns unspent coins.
"""
if "puzzle_hashes" not in request:
raise ValueError("Puzzle hashes not in request")
kwargs: Dict[str, Any] = {
"include_spent_coins": False,
"puzzle_hashes": [hexstr_to_bytes(ph) for ph in request["puzzle_hashes"]],
}
if "start_height" in request:
kwargs["start_height"] = uint32(request["start_height"])
if "end_height" in request:
kwargs["end_height"] = uint32(request["end_height"])
if "include_spent_coins" in request:
kwargs["include_spent_coins"] = request["include_spent_coins"]
coin_records = await self.service.blockchain.coin_store.get_coin_records_by_puzzle_hashes(**kwargs)
return {"coin_records": [coin_record_dict_backwards_compat(cr.to_json_dict()) for cr in coin_records]}
async def get_coin_record_by_name(self, request: Dict) -> Optional[Dict]:
"""
Retrieves a coin record by it's name.
"""
if "name" not in request:
raise ValueError("Name not in request")
name = bytes32.from_hexstr(request["name"])
coin_record: Optional[CoinRecord] = await self.service.blockchain.coin_store.get_coin_record(name)
if coin_record is None:
raise ValueError(f"Coin record 0x{name.hex()} not found")
return {"coin_record": coin_record_dict_backwards_compat(coin_record.to_json_dict())}
async def get_coin_records_by_names(self, request: Dict) -> Optional[Dict]:
"""
Retrieves the coins for given coin IDs, by default returns unspent coins.
"""
if "names" not in request:
raise ValueError("Names not in request")
kwargs: Dict[str, Any] = {
"include_spent_coins": False,
"names": [hexstr_to_bytes(name) for name in request["names"]],
}
if "start_height" in request:
kwargs["start_height"] = uint32(request["start_height"])
if "end_height" in request:
kwargs["end_height"] = uint32(request["end_height"])
if "include_spent_coins" in request:
kwargs["include_spent_coins"] = request["include_spent_coins"]
coin_records = await self.service.blockchain.coin_store.get_coin_records_by_names(**kwargs)
return {"coin_records": [coin_record_dict_backwards_compat(cr.to_json_dict()) for cr in coin_records]}
async def get_coin_records_by_parent_ids(self, request: Dict) -> Optional[Dict]:
"""
Retrieves the coins for given parent coin IDs, by default returns unspent coins.
"""
if "parent_ids" not in request:
raise ValueError("Parent IDs not in request")
kwargs: Dict[str, Any] = {
"include_spent_coins": False,
"parent_ids": [hexstr_to_bytes(ph) for ph in request["parent_ids"]],
}
if "start_height" in request:
kwargs["start_height"] = uint32(request["start_height"])
if "end_height" in request:
kwargs["end_height"] = uint32(request["end_height"])
if "include_spent_coins" in request:
kwargs["include_spent_coins"] = request["include_spent_coins"]
coin_records = await self.service.blockchain.coin_store.get_coin_records_by_parent_ids(**kwargs)
return {"coin_records": [coin_record_dict_backwards_compat(cr.to_json_dict()) for cr in coin_records]}
async def get_coin_records_by_hint(self, request: Dict) -> Optional[Dict]:
"""
Retrieves coins by hint, by default returns unspent coins.
"""
if "hint" not in request:
raise ValueError("Hint not in request")
if self.service.hint_store is None:
return {"coin_records": []}
names: List[bytes32] = await self.service.hint_store.get_coin_ids(bytes32.from_hexstr(request["hint"]))
kwargs: Dict[str, Any] = {
"include_spent_coins": False,
"names": names,
}
if "start_height" in request:
kwargs["start_height"] = uint32(request["start_height"])
if "end_height" in request:
kwargs["end_height"] = uint32(request["end_height"])
if "include_spent_coins" in request:
kwargs["include_spent_coins"] = request["include_spent_coins"]
coin_records = await self.service.blockchain.coin_store.get_coin_records_by_names(**kwargs)
return {"coin_records": [coin_record_dict_backwards_compat(cr.to_json_dict()) for cr in coin_records]}
async def push_tx(self, request: Dict) -> Optional[Dict]:
if "spend_bundle" not in request:
raise ValueError("Spend bundle not in request")
spend_bundle = SpendBundle.from_json_dict(request["spend_bundle"])
spend_name = spend_bundle.name()
if self.service.mempool_manager.get_spendbundle(spend_name) is not None:
status = MempoolInclusionStatus.SUCCESS
error = None
else:
status, error = await self.service.respond_transaction(spend_bundle, spend_name)
if status != MempoolInclusionStatus.SUCCESS:
if self.service.mempool_manager.get_spendbundle(spend_name) is not None:
# Already in mempool
status = MempoolInclusionStatus.SUCCESS
error = None
if status == MempoolInclusionStatus.FAILED:
assert error is not None
raise ValueError(f"Failed to include transaction {spend_name}, error {error.name}")
return {
"status": status.name,
}
async def get_puzzle_and_solution(self, request: Dict) -> Optional[Dict]:
coin_name: bytes32 = bytes32.from_hexstr(request["coin_id"])
height = request["height"]
coin_record = await self.service.coin_store.get_coin_record(coin_name)
if coin_record is None or not coin_record.spent or coin_record.spent_block_index != height:
raise ValueError(f"Invalid height {height}. coin record {coin_record}")
header_hash = self.service.blockchain.height_to_hash(height)
assert header_hash is not None
block: Optional[FullBlock] = await self.service.block_store.get_full_block(header_hash)
if block is None or block.transactions_generator is None:
raise ValueError("Invalid block or block generator")
block_generator: Optional[BlockGenerator] = await self.service.blockchain.get_block_generator(block)
assert block_generator is not None
error, puzzle, solution = get_puzzle_and_solution_for_coin(
block_generator, coin_name, self.service.constants.MAX_BLOCK_COST_CLVM
)
if error is not None:
raise ValueError(f"Error: {error}")
puzzle_ser: SerializedProgram = SerializedProgram.from_program(Program.to(puzzle))
solution_ser: SerializedProgram = SerializedProgram.from_program(Program.to(solution))
return {"coin_solution": CoinSpend(coin_record.coin, puzzle_ser, solution_ser)}
async def get_additions_and_removals(self, request: Dict) -> Optional[Dict]:
if "header_hash" not in request:
raise ValueError("No header_hash in request")
header_hash = bytes32.from_hexstr(request["header_hash"])
block: Optional[FullBlock] = await self.service.block_store.get_full_block(header_hash)
if block is None:
raise ValueError(f"Block {header_hash.hex()} not found")
async with self.service._blockchain_lock_low_priority:
if self.service.blockchain.height_to_hash(block.height) != header_hash:
raise ValueError(f"Block at {header_hash.hex()} is no longer in the blockchain (it's in a fork)")
additions: List[CoinRecord] = await self.service.coin_store.get_coins_added_at_height(block.height)
removals: List[CoinRecord] = await self.service.coin_store.get_coins_removed_at_height(block.height)
return {
"additions": [coin_record_dict_backwards_compat(cr.to_json_dict()) for cr in additions],
"removals": [coin_record_dict_backwards_compat(cr.to_json_dict()) for cr in removals],
}
async def get_all_mempool_tx_ids(self, request: Dict) -> Optional[Dict]:
ids = list(self.service.mempool_manager.mempool.spends.keys())
return {"tx_ids": ids}
async def get_all_mempool_items(self, request: Dict) -> Optional[Dict]:
spends = {}
for tx_id, item in self.service.mempool_manager.mempool.spends.items():
spends[tx_id.hex()] = item
return {"mempool_items": spends}
async def get_mempool_item_by_tx_id(self, request: Dict) -> Optional[Dict]:
if "tx_id" not in request:
raise ValueError("No tx_id in request")
tx_id: bytes32 = bytes32.from_hexstr(request["tx_id"])
item = self.service.mempool_manager.get_mempool_item(tx_id)
if item is None:
raise ValueError(f"Tx id 0x{tx_id.hex()} not in the mempool")
return {"mempool_item": item}
|
the-stack_106_29744 | from typing import Optional
from great_expectations.core.expectation_configuration import ExpectationConfiguration
from great_expectations.expectations.util import render_evaluation_parameter_string
from ...render.renderer.renderer import renderer
from ...render.types import RenderedStringTemplateContent
from ...render.util import (
num_to_str,
parse_row_condition_string_pandas_engine,
substitute_none_for_missing,
)
from ..expectation import ColumnMapExpectation, InvalidExpectationConfigurationError
try:
import sqlalchemy as sa
except ImportError:
pass
class ExpectColumnValuesToBeUnique(ColumnMapExpectation):
"""Expect each column value to be unique.
This expectation detects duplicates. All duplicated values are counted as exceptions.
For example, `[1, 2, 3, 3, 3]` will return `[3, 3, 3]` in `result.exceptions_list`, with \
`unexpected_percent = 60.0`.
expect_column_values_to_be_unique is a \
:func:`column_map_expectation <great_expectations.execution_engine.execution_engine.MetaExecutionEngine
.column_map_expectation>`.
Args:
column (str): \
The column name.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
"""
# This dictionary contains metadata for display in the public gallery
library_metadata = {
"maturity": "production",
"package": "great_expectations",
"tags": ["core expectation", "column map expectation"],
"contributors": ["@great_expectations"],
"requirements": [],
}
map_metric = "column_values.unique"
success_keys = ("mostly",)
default_kwarg_values = {
"row_condition": None,
"condition_parser": None, # we expect this to be explicitly set whenever a row_condition is passed
"mostly": 1,
"parse_strings_as_datetimes": False,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": True,
}
def validate_configuration(self, configuration: Optional[ExpectationConfiguration]):
super().validate_configuration(configuration)
try:
assert (
"column" in configuration.kwargs
), "'column' parameter is required for column map expectations"
if "mostly" in configuration.kwargs:
mostly = configuration.kwargs["mostly"]
assert isinstance(
mostly, (int, float)
), "'mostly' parameter must be an integer or float"
assert 0 <= mostly <= 1, "'mostly' parameter must be between 0 and 1"
except AssertionError as e:
raise InvalidExpectationConfigurationError(str(e))
return True
@classmethod
def _atomic_prescriptive_template(
cls,
configuration=None,
result=None,
language=None,
runtime_configuration=None,
**kwargs,
):
runtime_configuration = runtime_configuration or {}
include_column_name = runtime_configuration.get("include_column_name", True)
include_column_name = (
include_column_name if include_column_name is not None else True
)
styling = runtime_configuration.get("styling")
params = substitute_none_for_missing(
configuration.kwargs,
["column", "mostly", "row_condition", "condition_parser"],
)
params_with_json_schema = {
"column": {"schema": {"type": "string"}, "value": params.get("column")},
"mostly": {"schema": {"type": "number"}, "value": params.get("mostly")},
"row_condition": {
"schema": {"type": "string"},
"value": params.get("row_condition"),
},
"condition_parser": {
"schema": {"type": "string"},
"value": params.get("condition_parser"),
},
}
if include_column_name:
template_str = "$column values must be unique"
else:
template_str = "values must be unique"
if params["mostly"] is not None:
params["mostly_pct"] = num_to_str(
params["mostly"] * 100, precision=15, no_scientific=True
)
# params["mostly_pct"] = "{:.14f}".format(params["mostly"]*100).rstrip("0").rstrip(".")
template_str += ", at least $mostly_pct % of the time."
else:
template_str += "."
if params["row_condition"] is not None:
(
conditional_template_str,
conditional_params,
) = parse_row_condition_string_pandas_engine(
params["row_condition"], with_schema=True
)
template_str = conditional_template_str + ", then " + template_str
params_with_json_schema.update(conditional_params)
return (template_str, params_with_json_schema, styling)
@classmethod
@renderer(renderer_type="renderer.prescriptive")
@render_evaluation_parameter_string
def _prescriptive_renderer(
cls,
configuration=None,
result=None,
language=None,
runtime_configuration=None,
**kwargs,
):
runtime_configuration = runtime_configuration or {}
include_column_name = runtime_configuration.get("include_column_name", True)
include_column_name = (
include_column_name if include_column_name is not None else True
)
styling = runtime_configuration.get("styling")
params = substitute_none_for_missing(
configuration.kwargs,
["column", "mostly", "row_condition", "condition_parser"],
)
if include_column_name:
template_str = "$column values must be unique"
else:
template_str = "values must be unique"
if params["mostly"] is not None:
params["mostly_pct"] = num_to_str(
params["mostly"] * 100, precision=15, no_scientific=True
)
# params["mostly_pct"] = "{:.14f}".format(params["mostly"]*100).rstrip("0").rstrip(".")
template_str += ", at least $mostly_pct % of the time."
else:
template_str += "."
if params["row_condition"] is not None:
(
conditional_template_str,
conditional_params,
) = parse_row_condition_string_pandas_engine(params["row_condition"])
template_str = conditional_template_str + ", then " + template_str
params.update(conditional_params)
return [
RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": template_str,
"params": params,
"styling": styling,
},
}
)
]
|
the-stack_106_29748 | # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
long_desc = '''
This contrib extension, sphinxcontrib.httpdomain provides a Sphinx
domain for describing RESTful HTTP APIs.
You can find the documentation from the following URL:
http://pythonhosted.org/sphinxcontrib-httpdomain/
'''
requires = [
'Sphinx >= 1.0',
'six'
]
setup(
name='sphinxcontrib-httpdomain',
version='1.3.0',
url='http://bitbucket.org/birkenfeld/sphinx-contrib',
download_url='http://pypi.python.org/pypi/sphinxcontrib-httpdomain',
license='BSD',
author='Hong Minhee',
author_email='[email protected]',
description='Sphinx domain for HTTP APIs',
long_description=long_desc,
zip_safe=False,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Documentation',
'Topic :: Utilities',
],
platforms='any',
packages=find_packages(),
include_package_data=True,
install_requires=requires,
namespace_packages=['sphinxcontrib'],
)
|
the-stack_106_29750 | # Author: Aretas Gaspariunas
from typing import List, Dict, Optional, Iterable, Union, Tuple, Any
import warnings
import os
from contextlib import redirect_stderr
from anarci import anarci
import pandas as pd
from pandarallel import pandarallel
with redirect_stderr(open(os.devnull, "w")): # disable Keras messages
import keras
from pacpac.parapred.parapred import predict_sequence_probabilities as parapred
from pacpac.utils import convert_to_typed_numba_dict, rename_dict_keys
# disable TF messages
warnings.filterwarnings("ignore")
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
# init pandarallel
pandarallel.initialize(verbose=1)
def run_and_parse_anarci(
sequence: str,
allow: Optional[set] = {"H", "K", "L"},
scheme: Optional[str] = "imgt",
assign_germline: Optional[bool] = True,
database: Optional[str] = "ALL",
) -> Dict[str, str]:
"""
Finds V and J germline genes and assigns numbering using anarci for a given amino acid sequence.
"""
anarci_output = anarci(
[("1", sequence)],
scheme=scheme,
assign_germline=assign_germline,
output=False,
allow=allow,
database=database
)
if assign_germline:
output_dict = {
"CHAIN_TYPE": anarci_output[1][0][0]["chain_type"],
"IDENTITY_SPECIES": anarci_output[1][0][0]["germlines"]["v_gene"][0][0],
"V_GENE": anarci_output[1][0][0]["germlines"]["v_gene"][0][1],
"V_IDENTITY": round(anarci_output[1][0][0]["germlines"]["v_gene"][1], 2),
"J_GENE": anarci_output[1][0][0]["germlines"]["j_gene"][0][1],
"J_IDENTITY": round(anarci_output[1][0][0]["germlines"]["j_gene"][1], 2),
"NUMBERING": anarci_output[0][0][0][0],
}
else:
output_dict = {
"CHAIN_TYPE": anarci_output[1][0][0]["chain_type"],
"IDENTITY_SPECIES": None,
"V_GENE": None,
"V_IDENTITY": None,
"J_GENE": None,
"J_IDENTITY": None,
"NUMBERING": anarci_output[0][0][0][0],
}
return output_dict
def get_sequence_annotations(
sequence: str,
allow: Optional[set] = {"H", "K", "L"},
scheme: Optional[str] = "chothia",
cdr1_scheme: Optional[Dict[str, Iterable]] = {
"H": range(26, 33),
"L": range(24, 35),
},
cdr2_scheme: Optional[Dict[str, Iterable]] = {
"H": range(52, 57),
"L": range(50, 57),
},
cdr3_scheme: Optional[Dict[str, Iterable]] = {
"H": range(95, 103),
"L": range(89, 98),
},
assign_germline: Optional[bool] = True,
) -> Dict[str, Union[str, int, List[str]]]:
"""
For VH or VL amino acid sequence returns the three CDR sequences as determined
from the input numbering (scheme) and the given ranges.
default ranges are Chothia CDRs.
============================================================================
Note:
* Gracefully stolen and refactored get_cdr_simple() from Parapred source code.
* Returns a dictionary with CDR sequences, numbering scheme positions for each CDR residue.
"""
anarci_output = run_and_parse_anarci(
sequence, scheme=scheme, allow=allow, assign_germline=assign_germline
)
numbering = anarci_output["NUMBERING"] # numbering starts with 1 and not 0
chain_type = anarci_output["CHAIN_TYPE"]
if chain_type == "K" and chain_type not in cdr1_scheme:
chain_type = "L"
if chain_type not in cdr1_scheme:
raise ValueError(f"chain_type {chain_type} is not in input CDR scheme")
cdr1_scheme = cdr1_scheme[chain_type]
cdr2_scheme = cdr2_scheme[chain_type]
cdr3_scheme = cdr3_scheme[chain_type]
# extract CDR sequences
cdr1, cdr2, cdr3 = "", "", ""
cdr1_numbering, cdr2_numbering, cdr3_numbering = [], [], []
for num_tuple, res in numbering:
residue_position = str(num_tuple[0]) + num_tuple[1].rstrip()
if res == "-":
continue
if num_tuple[0] in cdr1_scheme:
cdr1_numbering.append(residue_position)
cdr1 += res
elif num_tuple[0] in cdr2_scheme:
cdr2_numbering.append(residue_position)
cdr2 += res
elif num_tuple[0] in cdr3_scheme:
cdr3_numbering.append(residue_position)
cdr3 += res
annotation_dict = {
"CDR1": cdr1,
"CDR1_NUMBERING": cdr1_numbering,
"CDR2": cdr2,
"CDR2_NUMBERING": cdr2_numbering,
"CDR3": cdr3,
"CDR3_NUMBERING": cdr3_numbering,
}
annotation_dict = {**annotation_dict, **anarci_output}
del annotation_dict["NUMBERING"]
return annotation_dict
def get_annotations(
sequence: str,
assign_germline: Optional[bool] = True,
scheme: Optional[str] = "chothia",
cdr_scheme: Optional[str] = "chothia",
num_extra_residues: Optional[int] = 2,
) -> Dict[str, str]:
"""
Annotation and CDR definition for a given VH or VL sequence.
Convenience wrapper around get_sequence_annotations() with defined CDR schemas.
"""
if cdr_scheme in ("imgt"):
scheme = "imgt"
elif cdr_scheme in ("chothia", "contact") and scheme not in ("chothia", "martin"):
scheme = "chothia"
if cdr_scheme == "chothia":
cdr1_scheme = {
"H": range(26 - num_extra_residues, 33 + num_extra_residues),
"L": range(24 - num_extra_residues, 35 + num_extra_residues),
}
cdr2_scheme = {
"H": range(52 - num_extra_residues, 57 + num_extra_residues),
"L": range(50 - num_extra_residues, 57 + num_extra_residues),
}
cdr3_scheme = {
"H": range(95 - num_extra_residues, 103 + num_extra_residues),
"L": range(89 - num_extra_residues, 98 + num_extra_residues),
}
elif cdr_scheme == "imgt":
cdr1_scheme = {
"H": range(27 - num_extra_residues, 39 + num_extra_residues),
"L": range(27 - num_extra_residues, 39 + num_extra_residues),
}
cdr2_scheme = {
"H": range(56 - num_extra_residues, 66 + num_extra_residues),
"L": range(56 - num_extra_residues, 66 + num_extra_residues),
}
cdr3_scheme = {
"H": range(105 - num_extra_residues, 118 + num_extra_residues),
"L": range(105 - num_extra_residues, 118 + num_extra_residues),
}
elif cdr_scheme == "north" and scheme == "imgt":
cdr1_scheme = {
"H": range(24 - num_extra_residues, 41 + num_extra_residues),
"L": range(24 - num_extra_residues, 41 + num_extra_residues),
}
cdr2_scheme = {
"H": range(55 - num_extra_residues, 67 + num_extra_residues),
"L": range(55 - num_extra_residues, 70 + num_extra_residues),
}
cdr3_scheme = {
"H": range(105 - num_extra_residues, 118 + num_extra_residues),
"L": range(105 - num_extra_residues, 118 + num_extra_residues),
}
elif cdr_scheme == "north" and scheme != "imgt":
cdr1_scheme = {
"H": range(21 - num_extra_residues, 38 + num_extra_residues),
"L": range(22 - num_extra_residues, 37 + num_extra_residues),
}
cdr2_scheme = {
"H": range(49 - num_extra_residues, 61 + num_extra_residues),
"L": range(47 - num_extra_residues, 59 + num_extra_residues),
}
cdr3_scheme = {
"H": range(91 - num_extra_residues, 105 + num_extra_residues),
"L": range(87 - num_extra_residues, 102 + num_extra_residues),
}
elif cdr_scheme == "contact":
cdr1_scheme = {
"H": range(30 - num_extra_residues, 36 + num_extra_residues),
"L": range(30 - num_extra_residues, 37 + num_extra_residues),
}
cdr2_scheme = {
"H": range(47 - num_extra_residues, 59 + num_extra_residues),
"L": range(46 - num_extra_residues, 56 + num_extra_residues),
}
cdr3_scheme = {
"H": range(93 - num_extra_residues, 102 + num_extra_residues),
"L": range(89 - num_extra_residues, 97 + num_extra_residues),
}
annotations = get_sequence_annotations(
sequence,
scheme=scheme,
cdr1_scheme=cdr1_scheme,
cdr2_scheme=cdr2_scheme,
cdr3_scheme=cdr3_scheme,
assign_germline=assign_germline,
)
return annotations
def annotations_for_df(
df: pd.DataFrame,
aa_sequence_col_name: str,
assign_germline: Optional[bool] = True,
scheme: Optional[str] = "chothia",
cdr_scheme: Optional[str] = "chothia",
num_extra_residues: Optional[int] = 2,
) -> pd.DataFrame:
"""
Annotates sequences in pandas dataframe with CDRs and germline genes.
"""
def assign_annotations(row):
try:
annotations = get_annotations(
row[aa_sequence_col_name],
assign_germline=assign_germline,
scheme=scheme,
cdr_scheme=cdr_scheme,
)
except TypeError:
annotations = {
"CDR1": None,
"CDR1_NUMBERING": None,
"CDR2": None,
"CDR2_NUMBERING": None,
"CDR3": None,
"CDR3_NUMBERING": None,
"CHAIN_TYPE": None,
"IDENTITY_SPECIES": None,
"V_GENE": None,
"V_IDENTITY": None,
"J_GENE": None,
"J_IDENTITY": None,
# "NUMBERING": None,
}
for key, value in annotations.items():
row[key] = value
return row
df = df.parallel_apply(assign_annotations, axis=1)
return df
def get_paratope_probabilities(cdrs: Dict[str, str]) -> Dict[str, List[tuple]]:
"""
Runs Parapred prediction on a set of CDRs.
Returns probability dictionary for each residue for being part of a paratope.
Dictionary value is a list of tuples with residue position, residue and probability.
"""
paratope_probs = {}
for cdr, cdr_seq in cdrs.items():
if cdr not in ["CDR1", "CDR2", "CDR3"]:
continue
prob = parapred([cdr_seq])
paratope_probs[cdr] = [
(pos, residue, prob[0, pos]) for pos, residue in enumerate(cdr_seq)
]
return paratope_probs
def apply_numbering_scheme_positions(
prob_dict: Dict[str, List[tuple]],
numbering_dict: Dict[str, List[str]],
) -> Dict[str, List[tuple]]:
"""
Applies numbering scheme to get_paratope_probabilities() prediciton dictionary
to enable structurally equivalence when comparing paratopes.
"""
numbered_prob_dict = {}
for cdr_name, cdr in prob_dict.items():
numbered_prob_dict[cdr_name] = [
(numbering_dict[f"{cdr_name}_NUMBERING"][index], res[1], res[2])
for index, res in enumerate(cdr)
]
return numbered_prob_dict
def apply_paratope_prediction_threshold(
prob_dict: Dict[str, str], paratope_residue_threshold: float
) -> Dict[str, str]:
"""
Applies paratope residue prediction threshold on a CDR dictionary.
Returns dictionary of CDRs with non-paratope residues omitted.
"""
paratope_dict = {}
for cdr_name, cdr in prob_dict.items():
paratope_dict[cdr_name] = [
residue
for residue in cdr if residue[2] > paratope_residue_threshold
]
return paratope_dict
def get_paratope_string(
paratope_probs: Dict[str, List[tuple]],
paratope_residue_threshold: Optional[float] = 0.67,
) -> str:
"""
Returns paratope as a string for a given dictionary of CDRs.
Non-paratope residues replaced with '-' and CDRs separated by spaces.
"""
paratope_str = ""
for cdr_name, cdr in paratope_probs.items():
for res in cdr:
if res[2] > paratope_residue_threshold:
paratope_str += res[1]
else:
paratope_str += "-"
paratope_str += " " * 4
return paratope_str.rstrip()
def parapred_for_df(
df: pd.DataFrame, paratope_residue_threshold: Optional[float] = 0.67
) -> pd.DataFrame:
"""
Runs parapred on CDRs in pandas dataframe.
"""
def run_parapred(cdr1, cdr2, cdr3, threshold=paratope_residue_threshold):
try:
prob_dict = get_paratope_probabilities(
{"CDR1": cdr1, "CDR2": cdr2, "CDR3": cdr3}
)
except ValueError:
prob_dict = None
return prob_dict
df["PARATOPE_PROBS"] = df[["CDR1", "CDR2", "CDR3"]].apply(
lambda x: run_parapred(*x), axis=1
)
return df
def paratopes_for_df(
df: pd.DataFrame, paratope_residue_threshold: Optional[float] = 0.67
) -> pd.DataFrame:
"""
Reformats parapred output in pandas dataframe.
"""
def reformat_parapred_output(row, threshold=paratope_residue_threshold):
if row["PARATOPE_PROBS"] is not None:
prob_dict_numbered = apply_numbering_scheme_positions(
row["PARATOPE_PROBS"],
row[["CDR1_NUMBERING", "CDR2_NUMBERING", "CDR3_NUMBERING"]],
)
paratope_dict = apply_paratope_prediction_threshold(
row["PARATOPE_PROBS"], paratope_residue_threshold
)
paratope_dict_numbered = apply_paratope_prediction_threshold(
prob_dict_numbered, paratope_residue_threshold
)
paratope = get_paratope_string(row["PARATOPE_PROBS"], threshold)
else:
prob_dict_numbered = None
paratope_dict = None
paratope_dict_numbered = None
paratope = None
return prob_dict_numbered, paratope_dict, paratope_dict_numbered, paratope
(
df["PARATOPE_PROBS_NUMBERED"],
df["PARATOPE_DICT"],
df["PARATOPE_DICT_NUMBERED"],
df["PARATOPE"],
) = zip(*df.parallel_apply(reformat_parapred_output, axis=1))
return df
def get_residue_token_dict() -> Dict[str, str]:
# as described by Wong et al., 2020
# S = small; N = nucleophilic; H = hydrophobic; A = aromatic; C = acidic; M = amine; B = basic
residue_token_dict = {
"G": "S",
"A": "S",
"S": "N",
"T": "N",
"C": "N",
"V": "H",
"L": "H",
"I": "H",
"M": "H",
"P": "H",
"F": "A",
"W": "A",
"Y": "A",
"D": "C",
"E": "C",
"N": "M",
"Q": "M",
"K": "B",
"H": "B",
"R": "B",
}
return residue_token_dict
def annotate_sequence(
sequence: str,
scheme: Optional[str] = "chothia",
cdr_scheme: Optional[str] = "chothia",
assign_germline: Optional[bool] = True,
num_extra_residues: Optional[int] = 2,
paratope_residue_threshold: Optional[float] = 0.67,
structural_equivalence: Optional[bool] = True,
tokenize: Optional[bool] = False,
) -> Dict[str, Any]:
"""
Annotates input VH or VL sequence with anarci and parapred
"""
annotations = get_annotations(
sequence,
assign_germline=assign_germline,
scheme=scheme,
cdr_scheme=cdr_scheme,
num_extra_residues=num_extra_residues,
)
prob_dict = get_paratope_probabilities(annotations)
if structural_equivalence is True:
prob_dict = apply_numbering_scheme_positions(prob_dict, annotations)
paratope = apply_paratope_prediction_threshold(
prob_dict, paratope_residue_threshold
)
annotations["PARATOPE_DICT"] = paratope
if tokenize:
residue_token_dict = get_residue_token_dict()
annotations["PARATOPE_DICT_REFORMAT"] = {
cdr: {str(residue[0]): residue_token_dict[residue[1]] for residue in value}
for cdr, value in annotations["PARATOPE_DICT"].items()
}
else:
annotations["PARATOPE_DICT_REFORMAT"] = {
cdr: {str(residue[0]): residue[1] for residue in value}
for cdr, value in annotations["PARATOPE_DICT"].items()
}
annotations["PARATOPE_LEN"] = sum(
len(res_dict) for cdr, res_dict in annotations["PARATOPE_DICT_REFORMAT"].items()
)
annotations["PARATOPE_DICT_REFORMAT"] = convert_to_typed_numba_dict(
annotations["PARATOPE_DICT_REFORMAT"]
)
return annotations
def paratopes_for_df_both_chains(
df: pd.DataFrame,
vl_df: pd.DataFrame,
paratope_residue_threshold: Optional[float] = 0.67,
both_chains: Optional[bool] = True,
) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""
Calculates paratopes for VH or VH and VL,
formats output into a single dataframe
"""
df = parapred_for_df(df, paratope_residue_threshold=paratope_residue_threshold)
df = paratopes_for_df(df, paratope_residue_threshold=paratope_residue_threshold)
if both_chains:
vl_df = parapred_for_df(
vl_df, paratope_residue_threshold=paratope_residue_threshold
)
vl_df = paratopes_for_df(
vl_df, paratope_residue_threshold=paratope_residue_threshold
)
vl_df = vl_df[
[
"CDR1",
"CDR2",
"CDR3",
"PARATOPE_DICT",
"PARATOPE_DICT_NUMBERED",
"PARATOPE",
]
]
# and 'L' and 'H' prefix for VL and VL col names
vl_df.columns = ["L" + i for i in vl_df.columns]
df.rename(
columns={"CDR1": "HCDR1", "CDR2": "HCDR2", "CDR3": "HCDR3"}, inplace=True
)
# merge both dataframes
df = df.merge(vl_df, how="left", left_index=True, right_index=True)
# exclude sequences where parapred has failed for VH paratope or VL paratope
nan_df3 = df[df["PARATOPE"].isnull() | df["LPARATOPE"].isnull()]
df = df[df["PARATOPE"].notnull() | df["LPARATOPE"].notnull()]
# rename paratope dict keys
df["PARATOPE_DICT"] = [rename_dict_keys(i, "H") for i in df["PARATOPE_DICT"]]
df["LPARATOPE_DICT"] = [rename_dict_keys(i, "L") for i in df["LPARATOPE_DICT"]]
df["PARATOPE_DICT_NUMBERED"] = [
rename_dict_keys(i, "H") for i in df["PARATOPE_DICT_NUMBERED"]
]
df["LPARATOPE_DICT_NUMBERED"] = [
rename_dict_keys(i, "L") for i in df["LPARATOPE_DICT_NUMBERED"]
]
# merge paratope columns
df["PARATOPE_DICT"] = [
{**dict1, **dict2}
for dict1, dict2 in zip(df["PARATOPE_DICT"], df["LPARATOPE_DICT"])
]
df["PARATOPE_DICT_NUMBERED"] = [
{**dict1, **dict2}
for dict1, dict2 in zip(
df["PARATOPE_DICT_NUMBERED"], df["LPARATOPE_DICT_NUMBERED"]
)
]
df["PARATOPE"] = df["PARATOPE"] + " " * 4 + df["LPARATOPE"]
else:
# exclude sequences where parapred has failed
nan_df3 = df[df["PARATOPE"].isnull()]
df = df[df["PARATOPE"].notnull()]
return df, nan_df3
def tokenize_and_reformat(
df: pd.DataFrame,
structural_equivalence: Optional[bool] = True,
tokenize: Optional[bool] = False,
) -> pd.DataFrame:
"""
Reformats paratope column in the dataframe;
Counts paratope length;
Optionally tokenizes residues;
"""
paratope_dict_col = "PARATOPE_DICT"
if structural_equivalence is True:
paratope_dict_col = "PARATOPE_DICT_NUMBERED"
# reformatting paratope dict column
if tokenize:
residue_token_dict = get_residue_token_dict()
df["PARATOPE_DICT_REFORMAT"] = [
{
cdr: {
str(residue[0]): residue_token_dict[residue[1]] for residue in value
}
for cdr, value in paratope_dict.items()
}
for paratope_dict in df[paratope_dict_col]
]
else:
df["PARATOPE_DICT_REFORMAT"] = [
{
cdr: {str(residue[0]): residue[1] for residue in value}
for cdr, value in paratope_dict.items()
}
for paratope_dict in df[paratope_dict_col]
]
df["PARATOPE_DICT_REFORMAT"] = [
convert_to_typed_numba_dict(paratope_dict)
for paratope_dict in df["PARATOPE_DICT_REFORMAT"]
]
df["PARATOPE_LEN"] = [
sum(len(res_dict) for cdr, res_dict in paratope_dict.items())
for paratope_dict in df["PARATOPE_DICT_REFORMAT"]
]
return df
|
the-stack_106_29751 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Performs setup tasks for Learned Interpreters binaries."""
import dataclasses
import json
import os
import random
from typing import Any, Text
from absl import logging # pylint: disable=unused-import
import numpy as np
import tensorflow as tf
from ipagnn.adapters import adapters_lib
from ipagnn.config import overrides_lib
from ipagnn.lib import checkpoint_utils
from ipagnn.lib import config_utils
from ipagnn.lib import dataset_utils
from ipagnn.models import models_lib
gfile = tf.io.gfile
@dataclasses.dataclass
class RunConfiguration:
"""The configuration for a single experimental run."""
mode: Text
method: Text
run_dir: Text
data_dir: Text
original_checkpoint_path: Text
model: Any
info: Any # Info
config: Any # Config
adapter: Any # Adapter
dataset_info: Any # Tuple
def seed():
random.seed(0)
np.random.seed(0)
def configure(data_dir, run_dir, config, override_values, xm_parameters=None):
"""Sets up the Learned Interpreter code with the specified configuration."""
seed()
# Apply any overrides set at the command line or in the launcher.
if config.overrides != config.default_overrides:
logging.info('Applying overrides set at command line: %s', config.overrides)
overrides_lib.apply_overrides(
config, override_names=config.overrides.split(','))
config.update_from_flattened_dict(override_values)
# If a checkpoint is specified, it determines the "original run."
# Otherwise the run_dir, if already present, determines the "original run."
config_filepath = os.path.join(run_dir, 'config.json')
if checkpoint_utils.is_checkpoint_specified(config.checkpoint):
original_checkpoint_path = checkpoint_utils.get_specified_checkpoint_path(
run_dir, config.checkpoint)
original_run_dir = checkpoint_utils.get_run_dir(original_checkpoint_path)
original_config_filepath = os.path.join(original_run_dir, 'config.json')
else:
checkpoint_dir = checkpoint_utils.build_checkpoint_dir(run_dir)
original_checkpoint_path = checkpoint_utils.latest_checkpoint(
checkpoint_dir)
original_config_filepath = config_filepath
original_config_exists = gfile.exists(original_config_filepath)
# Handle any existing configs.
if original_config_exists:
original_config = config_utils.load_config(original_config_filepath)
# Handle the model config.
if config.runner.model_config == 'load':
logging.info('Loading the model config from %s', original_config_filepath)
config.model.update(original_config.model)
config.dataset.representation = original_config.dataset.representation
elif config.runner.model_config == 'assert':
same_config = config_utils.equals(config.model, original_config.model)
# Resolution:
# Either use a new run_dir, or set model_config to 'load' or 'keep'.
assert same_config, 'Model config has changed.'
else:
assert config.runner.model_config == 'keep'
# Handle the dataset config.
if config.runner.dataset_config == 'load':
logging.info('Loading the data config from %s', original_config_filepath)
config.dataset.update(original_config.dataset)
elif config.runner.dataset_config == 'assert':
same_config = config_utils.equals(config.dataset, original_config.dataset)
assert same_config, 'Dataset config has changed.'
else:
assert config.runner.dataset_config == 'keep'
elif (config.runner.model_config == 'load'
or config.runner.dataset_config == 'load'):
raise ValueError('Original model config not found.')
# In interactive mode, force batch size 1.
if config.runner.mode == 'interact':
config.dataset.batch_size = 1
config_exists = gfile.exists(config_filepath)
if not config_exists and config.runner.mode in 'train':
gfile.makedirs(run_dir)
config_utils.save_config(config, config_filepath)
# Load dataset.
if config.setup.setup_dataset:
dataset_info = dataset_utils.get_dataset(data_dir, config)
info = dataset_info.info
else:
dataset_info = None
info = None
# Create model.
if config.setup.setup_model:
model = models_lib.get_model(info, config)
else:
model = None
adapter = adapters_lib.get_default_adapter(info, config)
return RunConfiguration(
mode=config.runner.mode,
method=config.runner.method,
run_dir=run_dir,
data_dir=data_dir,
original_checkpoint_path=original_checkpoint_path,
model=model,
info=info,
config=config,
adapter=adapter,
dataset_info=dataset_info)
|
the-stack_106_29752 | import math
import os
from concurrent.futures import Future, ThreadPoolExecutor, as_completed, wait
from dataclasses import dataclass
from pathlib import Path
from typing import List, Literal, Sequence, Union
from google.cloud import storage as gcs
from tqdm import tqdm
from practipy.text import remove_prefix
"""
TODO:
- Return generator instead of list?
"""
@dataclass
class TransferEvent:
"""Represents a file download or upload operation."""
num_bytes: int
source_path: str
target_path: str
def catch_unauthenticated(f):
def aux(*args, **kwargs):
from google.auth.exceptions import RefreshError
try:
return f(*args, **kwargs)
except RefreshError as e:
if isinstance(e.args, tuple) and len(e.args) == 2:
if e.args[1] == {
'error': 'invalid_grant',
'error_description': 'Bad Request'
}:
cmd = "gcloud auth application-default login --no-launch-browser"
raise ValueError(f"Captured potentially known error: {e}. Please make sure that "
f"you have authenticated your machine using '{cmd}' command")
raise e
return aux
@catch_unauthenticated
def download_folder(
project: str,
source_dir: str,
target_dir: Union[Path, str],
progress_bar: bool = True,
):
"""Download all the contents of `source_dir` on GCS `target_dir` on the local
filesystem.
Note: The bucket should be included in the source path!
"""
target_dir = Path(target_dir)
# Remove any gs:// prefix and split the bucket name off the source dir
source_dir = Path(remove_prefix(source_dir, "gs://"))
bucket_name = source_dir.parts[0]
source_dir = str(source_dir.relative_to(bucket_name))
client = gcs.Client(project=project)
def download_blob(blob: gcs.Blob) -> TransferEvent:
relative_path = remove_prefix(blob.name, source_dir)
local_path = target_dir.joinpath(relative_path.strip("/"))
num_bytes = 0
# If this is an empty folder, just create it, don't download it.
if relative_path.endswith("/"):
local_path.mkdir(exist_ok=True)
# Otherwise, make sure the folder for this file exists and download the file.
elif not local_path.exists():
local_path.parent.mkdir(exist_ok=True, parents=True)
blob.download_to_filename(str(local_path))
# blob.size is unreliable and may return None for some reason...
num_bytes = local_path.stat().st_size
return TransferEvent(num_bytes, blob.name, str(local_path))
# We simply download all blobs that are prefixed with the source dir
blobs = list(client.list_blobs(bucket_name, prefix=source_dir))
# Create a ThreadPool to download multiple files in parallel
with ThreadPoolExecutor() as e:
futures = [e.submit(download_blob, blob) for blob in blobs]
if progress_bar:
network_futures_progress_bar(futures, mode="download", keep_order=False)
else:
wait(futures)
@catch_unauthenticated
def download_files(
project: str,
bucket: str,
gcs_paths: Sequence[str],
download_dir: Union[Path, str],
strip_prefix: str = "",
keep_order: bool = True,
progress_bar: bool = True,
) -> List[str]:
"""Strips `strip_prefix` from all GCS paths in `gcs_paths` and then downloads them
to `download_dir` on the local filesystem, creating it if it does not yet exist.
Returns the list of local filepaths.
Note: paths are relative to `gs://<bucket_name>`!.
"""
bucket = gcs.Client(project=project).get_bucket(bucket)
blobs = [bucket.blob(gcs_path) for gcs_path in gcs_paths]
download_dir = Path(download_dir)
def download_blob(blob: gcs.Blob) -> TransferEvent:
relative_path = remove_prefix(blob.name, strip_prefix)
local_path = download_dir.joinpath(relative_path)
num_bytes = 0
if not local_path.exists():
local_path.parent.mkdir(exist_ok=True, parents=True)
blob.download_to_filename(str(local_path))
# blob.size is unreliable and may return None for some reason...
num_bytes = local_path.stat().st_size
return TransferEvent(num_bytes, blob.name, str(local_path))
# Create a ThreadPool to download multiple files in parallel
with ThreadPoolExecutor() as e:
futures = [e.submit(download_blob, blob) for blob in blobs]
if progress_bar:
events = network_futures_progress_bar(futures, keep_order=keep_order)
else:
events = [f.result() for f in futures]
return [event.target_path for event in events]
@catch_unauthenticated
def upload_folder(
project: str,
source_dir: Union[Path, str],
target_dir: str,
progress_bar: bool = True,
) -> None:
"""Upload all the contents of `source_dir` on the local filesystem into `target_dir`
on GCS.
Note: The bucket should be included in the target path!
"""
source_dir = Path(source_dir)
# Remove any gs:// prefix and split the bucket name off the target dir
target_dir = Path(remove_prefix(target_dir, "gs://"))
bucket_name = target_dir.parts[0]
target_dir = str(target_dir.relative_to(bucket_name))
bucket = gcs.Client(project=project).get_bucket(str(bucket_name))
# Note: This will overwrite any blobs that already exist.
def upload_file(file: Path) -> TransferEvent:
blob = bucket.blob(os.path.join(target_dir, str(file.relative_to(source_dir))))
blob.upload_from_filename(str(file), checksum="md5")
return TransferEvent(file.stat().st_size, str(file), blob.name)
files = source_dir.glob("**/*")
# Create a ThreadPool to upload multiple files in parallel
with ThreadPoolExecutor() as e:
futures = [e.submit(upload_file, file) for file in files if file.is_file()]
if progress_bar:
network_futures_progress_bar(futures, mode="upload", keep_order=False)
else:
wait(futures)
@catch_unauthenticated
def upload_files(
project: str,
paths: Sequence[Union[Path, str]],
target_dir: str,
strip_prefix: str = "",
progress_bar: bool = True,
) -> None:
"""Upload all provided files from the local filesystem into `target_dir` on GCS.
`strip_prefix` is removed from each local filepath and the remainder is appended to
`target_dir` to create the target path.
Note: The bucket should be included in the target path!
"""
# Remove any gs:// prefix and split the bucket name off the target dir
target_dir = Path(remove_prefix(target_dir, "gs://"))
bucket_name = target_dir.parts[0]
target_dir = str(target_dir.relative_to(bucket_name))
bucket = gcs.Client(project=project).get_bucket(str(bucket_name))
# Note: This will overwrite any blobs that already exist.
def upload_file(file: Path) -> TransferEvent:
blob = bucket.blob(
os.path.join(target_dir, remove_prefix(str(file), strip_prefix).strip("/"))
)
blob.upload_from_filename(str(file), checksum="md5")
return TransferEvent(file.stat().st_size, str(file), blob.name)
# Create a ThreadPool to upload multiple files in parallel
with ThreadPoolExecutor() as e:
futures = [e.submit(upload_file, path) for path in paths]
if progress_bar:
network_futures_progress_bar(futures, mode="upload", keep_order=False)
else:
wait(futures)
def network_futures_progress_bar(
futures: Sequence[Future],
mode: Literal["download", "upload"] = "download",
keep_order: bool = True,
) -> List[TransferEvent]:
"""Given a sequence of futures that return TransferEvents, display a progress bar
that computes the transfer speed and finally return the list of TransferEvents."""
iterable = futures if keep_order else as_completed(futures)
progress_bar = tqdm(
iterable, total=len(futures), desc=f"{mode.capitalize()}ing files"
)
total_bytes = 0
events = []
# Update either every 100 events or every 1% of the number of events
interval = min(100, math.ceil(len(futures) / 100.0))
for f, future in enumerate(progress_bar):
event = future.result()
events.append(event)
total_bytes += event.num_bytes
if f % interval == 0 or f == len(futures) - 1:
megabytes = total_bytes / 1048576.0 # 1024^2
speed = megabytes / progress_bar.format_dict["elapsed"]
progress_bar.set_postfix_str(
f"{mode.capitalize()}ed {megabytes:.2f} MiB at {speed:.2f} MiB/s."
)
return events
|
the-stack_106_29754 | import os
import platform
import argparse
import urllib.request
# import nightsawayforms code
import campdeets
import configuration
import defaults
import equipment
import header
import health
import kitlist
import menu
import programme
import riskassessment
import nanform
def set_up():
"""Check local environment and set up as necessary"""
# check if config directory exists and, if not, create it
try:
directory = "config"
if not os.path.exists(directory):
os.makedirs(directory)
except:
print("Failed to create config directory")
quit()
# check if data files are present and, if not, write defaults
try:
if not os.path.isfile('config/equipment.json'):
defaults.default_equip()
if not os.path.isfile('config/kitList.json'):
defaults.default_kit()
if not os.path.isfile('config/risks.json'):
defaults.default_risk()
except:
print("Failed to search config directory")
# check if NAN form template exists and, if not, create it
try:
if not os.path.isfile('config/nanFormTemplate.docx'):
url = r"https://scouts.org.uk/media/981094/Form-NAN_Sept-2018.docx"
# download the NAN form and save it to the config directory
with urllib.request.urlopen(url) as response, \
open("config/nanFormTemplate.docx", 'wb') as outFile:
data = response.read()
outFile.write(data)
except:
print("Failed to download NAN form template")
def camp_directory():
"""Check directory for camp forms and create as necessary"""
directory = input("What directory do you want to save the camp paperwork in?" \
" (Leave blank for this directory) ")
try:
# if nothing entered, set as blank string for this directory
if not directory:
directory = ""
# else check if camp directory exists and, if not, create it
elif not os.path.exists(directory):
os.makedirs(directory)
return os.path.join(directory, '')
except:
print("Failed to create directory:", directory)
def blank_doc(directory, myGroup, event):
"""Produces a blank document using the standard header for the camp"""
print("Generating blank document...")
doc, docName = header.word_heading(myGroup, event)
# save document
try:
docName = 'Blank document' + docName
print("Saving:", docName)
doc.save(directory + docName)
except:
print("Failed to save " + docName)
def ops(config):
"""Runs the primary operations"""
# check local set up
set_up()
# get required data
if config == "ignore":
myGroup = campdeets.get_group()
else:
myGroup = configuration.config_reader()
event = campdeets.get_camp()
leader = campdeets.get_leader()
print("")
directory = camp_directory()
print("")
# write programme
doc, docName = header.word_heading(myGroup, event)
programme.programme(doc, docName, directory, event)
print("")
# write kit list
doc, docName = header.word_heading(myGroup, event)
kitlist.kit_list(doc, docName, directory, event)
print("")
# write group equipment list
wb, ws, bookName = header.excel_heading(myGroup, event)
equipment.equipment(wb, ws, bookName, directory, leader, myGroup, event)
print("")
# write menu, if applicable
if event.catering:
doc, docName = header.word_heading(myGroup, event)
menu.menu(doc, docName, directory, event)
print("")
# write risk assessment
doc, docName = header.word_heading(myGroup, event)
riskassessment.risk_assessment(doc, docName, directory, event)
print("")
# write health and emergency contact form if required
hf = input("Do you want to create a health and emergency contact form (y/n)? ")
if hf == 'y':
doc, docName = header.word_heading(myGroup, event)
health.health_and_emergency(doc, docName, directory, event)
print("")
# copy blank NAN form into the camp directory
nanform.nan_form(directory, event)
print("")
print("-" * 40)
print("Have a good camp!")
print("-" * 40, end="\n\n")
def main():
"""
Main entry point to programme.
Captures command line arguments
and acts on primary decisions
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"-c",
"--config",
help="set up a new Scout Group configuration",
action="store_true"
)
parser.add_argument(
"-i",
"--ignore",
help="ignores existing configuration",
action="store_true"
)
parser.add_argument(
"-d",
"--default",
help="restore settings files to default",
action="store_true"
)
parser.add_argument(
"-b",
"--blank",
help="generate a blank document with a camp header",
action="store_true"
)
args = parser.parse_args()
# clear the terminal and show welcome screen
if platform.system() == 'Linux' or platform.system() == 'Darwin':
os.system('clear')
elif platform.system() == 'Windows':
os.system('cls')
print("-" * 40)
print("The Nights Away Form Generator")
print("-" * 40, end="\n\n")
# set up a new Scout Group configuration
if args.config:
myGroup = campdeets.get_group()
configuration.config_writer(myGroup)
# ignore any existing Scout Group configurations
elif args.ignore:
ops("ignore")
# restore default settings files
elif args.default:
defaults.restore_defaults()
# generate a blank document with a camp header
elif args.blank:
# get required data
if args.ignore:
myGroup = campdeets.get_group()
else:
myGroup = configuration.config_reader()
event = campdeets.get_camp()
print("")
directory = camp_directory()
print("")
# produce blank document
blank_doc(directory, myGroup, event)
# run normal nights away form generator
else:
ops("include")
if __name__ == '__main__':
main()
|
the-stack_106_29758 | #!/usr/bin/env python
__author__ = "alvaro barbeira"
import logging
import os
import re
import pandas
import numpy
import gzip
from timeit import default_timer as timer
from pyarrow import parquet as pq
from genomic_tools_lib import Logging, Utilities
from genomic_tools_lib.data_management import TextFileTools
from genomic_tools_lib.miscellaneous import matrices, PandasHelpers
from genomic_tools_lib.miscellaneous import Genomics
from genomic_tools_lib.file_formats import Parquet
class Context:
def __init__(self, args):
self.args = args
self.file_map = None
self.vmf = None
self.of = None
self.regions = None
def get_genotype_file(self, chromosome):
logging.info("Opening genotype for chromosome %d", chromosome)
g = pq.ParquetFile(self.file_map[chromosome])
return g
def __enter__(self):
logging.info("initializing resources")
logging.info("Loading regions")
regions = load_regions(self.args.region_file, self.args.chromosome)
if args.sub_batches and args.sub_batch is not None:
logging.log(9, "Selecting target regions from sub-batches")
regions = PandasHelpers.sub_batch(regions, args.sub_batches, args.sub_batch)
self.regions = regions
logging.info("Opening variants metadata")
self.vmf = pq.ParquetFile(args.parquet_genotype_metadata)
logging.info("Creating destination")
if args.text_output:
if os.path.exists(args.text_output):
raise RuntimeError("Output exists. Nope.")
Utilities.ensure_requisite_folders(args.text_output)
self.of = TextFileTools.TextDataSink(args.text_output, [("region", "id1", "id2", "value")])
self.of.initialize()
elif args.text_output_folder:
Utilities.maybe_create_folder(args.text_output_folder)
else:
raise RuntimeError("Unrecognized output specification")
if (args.parquet_genotype_folder and args.parquet_genotype_pattern):
self.file_map = get_file_map(args)
else:
raise RuntimeError("Unrecognized genotype specification")
return self
def __exit__(self, exc_type, exc_val, exc_tb):
logging.info("finalizing resources")
if self.of:
self.of.finalize()
def sink(self, cov, ids, region):
logging.log(9, "Serializing covariance")
_region = "{}_{}_{}_{}".format(region.name, region.chr, region.start, region.stop)
if args.text_output:
if args.dapg_output:
raise RuntimeError("Not supported for this option")
else:
cov = matrices._flatten_matrix_data([(_region, ids, cov)])
self.of.sink(cov)
elif args.text_output_folder:
if args.dapg_output:
f = os.path.join(args.text_output_folder, _region) + ".txt.gz"
with gzip.open(f, "w") as o:
for i in range(0, cov.shape[0]):
l = "\t".join(["{:0.4f}".format(x) for x in cov[i]]) + "\n"
o.write(l.encode())
id = os.path.join(args.text_output_folder, _region) + ".id.txt.gz"
with gzip.open(id, "w") as o:
l = "\n".join(ids).encode()
o.write(l)
else:
cov = matrices._flatten_matrix_data_2(ids, cov)
cov = pandas.DataFrame(cov)[["id1", "id2", "value"]]
f = os.path.join(args.text_output_folder, _region) + ".txt.gz"
Utilities.save_dataframe(cov, f)
def get_file_map(args):
r = re.compile(args.parquet_genotype_pattern)
files = os.listdir(args.parquet_genotype_folder)
files = {int(r.search(f).groups()[0]):os.path.join(args.parquet_genotype_folder, f) for f in files if r.search(f)}
return files
def filter_by_frequency(vm, frequency):
return vm.loc[(frequency < vm.allele_1_frequency) &
(vm.allele_1_frequency < 1 - frequency)]
def load_regions(path, chromosome):
regions = pandas.read_table(path)
regions = regions.assign(name = ["region_{}".format(x) for x in regions.index.values])
regions.dropna(inplace=True)
regions = regions.assign(start = regions.start.astype(numpy.int32), stop = regions.stop.astype(numpy.int32))
if chromosome:
regions = regions.loc[regions.chr == "chr{}".format(chromosome)]
return regions
#TODO remove debug code
def by_chromosome(context, chromosome):
vm = context.vmf.read_row_group(chromosome - 1).to_pandas()
if args.frequency_filter:
vm = filter_by_frequency(vm, args.frequency_filter)
g = context.get_genotype_file(chromosome)
regions = context.regions
regions = regions[regions.chr == "chr{}".format(chromosome)]
for i,region in enumerate(regions.itertuples()):
logging.log(9, "Processing region in chr %d: %d/%d", chromosome, i+1, regions.shape[0])
vmw = Genomics.entries_for_window(chromosome, region.start - args.window, region.stop + args.window, vm)
ids = vmw.id.values
logging.log(9, "%d variants", len(ids))
d = Parquet._read(g, columns=ids, skip_individuals=True)
d = numpy.array([d[x] for x in ids], dtype=numpy.float32)
if context.args.standardise_geno:
cov = numpy.corrcoef(d, ddof=1).astype(numpy.float32, copy=False)
else:
cov = numpy.cov(d).astype(numpy.float32, copy=False)
logging.log(9, "%d rows", cov.shape[0])
context.sink(cov, ids, region)
def run(args):
start = timer()
logging.info("Starting")
with Context(args) as context:
if args.chromosome:
by_chromosome(context, args.chromosome)
else:
for chromosome in range(1,23):
by_chromosome(context, chromosome)
end = timer()
logging.info("Ended in %s", str(end-start))
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser("Generate BSLMM runs on study")
parser.add_argument("-region_file", help="LD-independent regions.")
parser.add_argument("-parquet_genotype", help="Parquet Genotype folder")
parser.add_argument("-parquet_genotype_folder", help="Parquet Genotype folder")
parser.add_argument("-parquet_genotype_pattern", help="Pattern to detect parquet genotypes by chromosome")
parser.add_argument("-parquet_genotype_metadata", help="Parquet Genotype variant metadata file")
parser.add_argument("-window", help="How far to extend in each direction when searching for variants", type=int, default=0)
parser.add_argument("-chromosome", help="Work only with one chromosome", type=int)
parser.add_argument("-text_output", help="Where to save stuff")
parser.add_argument("-text_output_folder", help="Where to save stuff")
parser.add_argument("--frequency_filter", help="Skip variants with frequency (below f) or above (1-f)", type=float)
parser.add_argument("-sub_batches", help="Split the data into subsets", type=int)
parser.add_argument("-sub_batch", help="only do this subset", type=int)
parser.add_argument("-parsimony", help="Log verbosity level. 1 is everything being logged. 10 is only high level messages, above 10 will hardly log anything", default = "10")
parser.add_argument("--dapg_output", help="Output matrices in DAP-G format", action="store_true")
parser.add_argument("--standardise_geno", help="Standardise geno, or get correlation matrix", action="store_true")
args = parser.parse_args()
Logging.configure_logging(int(args.parsimony))
run(args) |
the-stack_106_29760 | #!/usr/bin/env python3
from pydbusbluez.device import Device, Adapter
from pydbusbluez.error import BluezDoesNotExistError, BluezError, DBusTimeoutError
from pydbusbluez.gatt import Gatt, FormatUint8, FormatBitfield
from pydbusbluez.gatt_generic import device_information
import sys
from gi.repository.GLib import MainLoop, timeout_add_seconds
from argparse import ArgumentParser
from time import sleep
from datetime import datetime
def_adapter = "hci0"
def print_char(gatt_char, new_value):
print(
datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
"New value:",
gatt_char.service.name,
gatt_char.name,
" = ",
str(gatt_char.form.decode(new_value["Value"])),
)
def read_char(gatt_char):
try:
if gatt_char.read({"timeout": 2}) == None:
print("Warn timeout")
except DBusTimeoutError:
pass
return True
def main():
parser = ArgumentParser(description="bluetooth tester")
parser.add_argument(
"-i",
"--adapter",
metavar="hciX",
default=def_adapter,
help="bluetooh adapter to use (default={})".format(def_adapter),
)
parser.add_argument(
"-a",
"--device",
metavar="addr",
default=None,
help="device address to connect to",
)
parser.add_argument(
"-p",
"--pair",
default=False,
action="store_true",
help="Send pairing request to device, if not paired (Needs an agent)",
)
parser.add_argument(
"-k", "--keep", default=False, action="store_true", help="keep connection alive"
)
parser.add_argument(
"-l",
"--loop",
default=False,
action="store_true",
help="loop requesting info (sleeps 1s)",
)
parser.add_argument(
"-w",
"--wait",
metavar="sec",
default=1,
type=int,
help="time to wait before starting to read",
)
args = parser.parse_args()
print("Scanning on: {}".format(args.adapter))
try:
adapter = Adapter(args.adapter)
except BluezDoesNotExistError as e:
print(str(e))
sys.exit(2)
devs = adapter.devices()
dev = None
for d in devs:
da = d.address()
if da and da.upper() == args.device.upper():
print("Found {}: {}".format(args.device, d))
dev = d
if not dev:
adapter.scan()
sleep(3)
sr = adapter.devices()
for d in sr:
da = d.address()
if da and da.upper() == args.device.upper():
print("Found {}: {}".format(args.device, d))
dev = d
if not dev:
print("Could not find device nearby: {}".format(args.device))
adapter.scan(enable=False)
sys.exit(1)
adapter.scan(enable=False)
if dev.connected():
print("Already connected: {}".format(dev))
else:
if args.pair:
if not dev.paired():
print("Device is not paired")
print("Connecting/pairing to: {}".format(str(dev)))
dev.pair()
# waait for paring-agent
wait_secs = 60
while wait_secs > 0 and not dev.paired():
wait_secs -= 1
sleep(1)
if not dev.paired():
print("Pairing failed")
sys.exit(1)
if not dev.trusted():
dev.trust(True)
print("Device is now trusted")
if not dev.connect():
print("Connecting failed")
sys.exit(1)
gatt = Gatt(dev, [device_information])
gatt.resolve()
if not dev.services_resolved:
print("Waited not long enough for service resolving, did not find uuids")
sys.exit(1)
print("Service UUIDs resolved")
dev_info = gatt.device_information # pylint: disable=no-member
for dinfo in dev_info.chars:
if dinfo.obj:
print(dinfo.name, ":", dinfo.read(options={"timeout": 4}))
if args.wait > 0:
sleep(args.wait)
if args.loop:
loop = MainLoop.new(None, False)
for dinfo_char in dev_info.chars:
if dinfo_char.obj:
# add callback for printing if new value is avalable
dinfo_char.onPropertiesChanged(print_char)
# add cyclic read every 1 sec
timeout_add_seconds(1, read_char, dinfo_char)
try:
loop.run()
except (KeyboardInterrupt, SystemExit) as e:
print("Interupted:", str(e))
if not args.keep:
dev.disconnect()
if __name__ == "__main__":
main()
|
the-stack_106_29762 | import typing
from .model.snowflake import Snowflake
class CacheContainer:
def __init__(self, default_expiration_time=None, **max_sizes):
self.default_expiration_time = default_expiration_time
self.__cache_dict: typing.Dict[str, typing.Union[dict, CacheStorage]] = {"guild_cache": {}}
self.max_sizes = max_sizes
def get(self, snowflake_id: typing.Union[str, int, Snowflake], storage_type: str = None, *, ignore_expiration=True):
if storage_type:
return self.get_storage(storage_type).get(snowflake_id, ignore_expiration=ignore_expiration)
for x in self.__cache_dict.values():
if isinstance(x, dict):
continue
res = x.get(snowflake_id, ignore_expiration=ignore_expiration)
if res:
return res
def get_storage(self, storage_type: str):
if storage_type == "guild_cache":
return self.__cache_dict["guild_cache"]
if storage_type not in self.__cache_dict:
self.__cache_dict[storage_type] = CacheStorage(max_size=self.max_sizes.get(storage_type, 0), root_remove=self.remove, cache_type=storage_type)
return self.__cache_dict.get(storage_type)
def get_guild_container(self, guild_id: typing.Union[str, int, Snowflake]):
guild_caches = self.get_storage("guild_cache")
guild_id = Snowflake.ensure_snowflake(guild_id)
if guild_id not in guild_caches:
guild_caches[guild_id] = GuildCacheContainer(default_expiration_time=self.default_expiration_time)
return guild_caches[guild_id]
def add(self, snowflake_id: typing.Union[str, int, Snowflake], obj_type: str, obj, expire_at=None):
if not expire_at:
expire_at = self.default_expiration_time
if obj_type not in self.__cache_dict:
self.__cache_dict[obj_type] = CacheStorage(max_size=self.max_sizes.get(obj_type, 0), root_remove=self.remove, cache_type=obj_type)
self.__cache_dict[obj_type].add(Snowflake.ensure_snowflake(snowflake_id), obj, expire_at)
def remove(self, snowflake_id: typing.Union[str, int, Snowflake], obj_type: str):
if obj_type in self.__cache_dict:
self.__cache_dict[obj_type].remove(snowflake_id)
if "guild_cache" in self.__cache_dict:
for x in self.__cache_dict["guild_cache"].values():
x.remove(snowflake_id, obj_type)
def get_size(self, cache_type: str):
storage = self.get_storage(cache_type)
return storage.size
@property
def available_cache_types(self):
return self.__cache_dict.keys()
@property
def size(self):
ret = 0
for k, v in self.__cache_dict.items():
if k == "guild_cache":
for b in v.values():
ret += b.size
continue
ret += v.size
return ret
class GuildCacheContainer(CacheContainer):
def get_guild_container(self, *args, **kwargs):
raise NotImplementedError
class CacheStorage:
def __init__(self, max_size: int = 0, root_remove=None, cache_type=None):
self.__cache_dict = {}
self.max_size = max_size
self._root_remove = root_remove
self.cache_type = cache_type
def __iter__(self):
for x in self.__cache_dict.values():
yield x
def get(self, snowflake_id: typing.Union[str, int, Snowflake], *, ignore_expiration=True):
res = self.__cache_dict.get(Snowflake.ensure_snowflake(snowflake_id))
if res: # TODO: add expiration time check
return res["value"]
def add(self, snowflake_id: typing.Union[str, int, Snowflake], obj, expire_at=None):
snowflake_id = Snowflake.ensure_snowflake(snowflake_id)
self.__cache_dict[snowflake_id] = {"value": obj, "expire_at": expire_at}
if 0 < self.max_size < self.size:
while self.size > self.max_size:
key = (*self.__cache_dict.keys(),)[0]
self._root_remove(key, self.cache_type)
# del self.__cache_dict[key]
def remove(self, snowflake_id: typing.Union[str, int, Snowflake]):
snowflake_id = Snowflake.ensure_snowflake(snowflake_id)
if snowflake_id in self.__cache_dict:
self.__cache_dict.pop(snowflake_id)
@property
def size(self):
return len(self.__cache_dict)
|
the-stack_106_29764 | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""operator dsl function: conv2d using tensorcore"""
import akg.tvm as tvm
import akg.topi as topi
import akg.utils as utils
def TensorcoreConv(data, weight, stride=[1, 1], pad=[0, 0, 0, 0], dilation=[1, 1], out_dtype="float32",
name="out", target=utils.CUDA):
batch, in_h, in_w, in_c = data.shape
out_c, k_h, k_w, _ = weight.shape
pad_top, pad_bottom, pad_left, pad_right = pad
s_h, s_w = stride
d_h, d_w = dilation
k_h_d = (k_h - 1) * d_h + 1
k_w_d = (k_w - 1) * d_w + 1
o_h = (in_h + pad_top + pad_bottom - k_h_d) // s_h + 1
o_w = (in_w + pad_left + pad_right - k_w_d) // s_w + 1
has_pad = not(pad_left == 0 and pad_right == 0 and pad_top == 0 and pad_bottom == 0)
if has_pad:
data_pad = tvm.compute(
(batch, in_h+pad_top+pad_bottom, in_w+pad_left+pad_right, in_c),
lambda n, h, w, i: tvm.if_then_else(
tvm.all(h >= pad_top, h - pad_bottom < in_h, w >= pad_left, w - pad_right < in_w),
data[n, h - pad_top, w - pad_left, i],
tvm.const(0.0, "float16"),
),
name="Pad",
)
else:
data_pad = data
rc = tvm.reduce_axis((0, in_c), name="rc")
rh = tvm.reduce_axis((0, k_h), name="rh")
rw = tvm.reduce_axis((0, k_w), name="rw")
if out_dtype == "float32":
out = tvm.compute(
(batch, o_h, o_w, out_c),
lambda n, h, w, o: tvm.sum(
data_pad[n, (h * s_h + rh * d_h), (w * s_w + rw * d_w), rc].astype("float32")
* weight[o, rh, rw, rc].astype("float32"),
axis=[rc, rh, rw]),
name=name
)
else:
out = tvm.compute(
(batch, o_h, o_w, out_c),
lambda n, h, w, o: tvm.sum(
data_pad[n, (h * s_h + rh * d_h), (w * s_w + rw * d_w), rc]
* weight[o, rh, rw, rc],
axis=[rc, rh, rw]),
name=name
)
return out
|
the-stack_106_29765 | import os
import unittest
from openprocurement.agreement.core.tests.base import BaseAgreementWebTest
from openprocurement.agreement.cfaua.tests.base import TEST_AGREEMENT
class Base(BaseAgreementWebTest):
relative_to = os.path.dirname(__file__)
initial_data = TEST_AGREEMENT
initial_auth = ('Basic', ('broker', ''))
class AgreementContractsResourceTest(Base):
def test_get_agreement_contracts(self):
resp = self.app.get('/agreements/{}/contracts'.format(self.agreement_id))
self.assertEqual(resp.status, '200 OK')
resp = self.app.get('/agreements/{}/contracts'.format('some_id'), status=404)
self.assertEqual(resp.status, '404 Not Found')
def test_get_agreement_contracts_by_id(self):
response = self.app.get('/agreements/{}/contracts/{}'.format(self.agreement_id, 'some_id'), status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location': u'url', u'name': u'contract_id'}])
while True:
resp = self.app.get('/agreements')
if len(resp.json['data']) >= 1:
break
agr_id = resp.json['data'][0]['id']
response = self.app.get('/agreements/{}/contracts'.format(agr_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
contract_id = self.initial_data['contracts'][0]['id']
response = self.app.get('/agreements/{}/contracts/{}'.format(agr_id, contract_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data'], self.initial_data['contracts'][0])
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(AgreementContractsResourceTest))
return suite
|
the-stack_106_29766 | import socket
import sys
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
addr = ('localhost', int(sys.argv[1]))
print >>sys.stderr, 'listening on %s port %s' % addr
sock.bind(addr)
while True:
buf, raddr = sock.recvfrom(4096)
print >>sys.stderr, buf
if buf:
sent = sock.sendto(buf, raddr)
|
the-stack_106_29768 | # -*- coding: utf-8 -*-
# Resource object code
#
# Created by: The Resource Compiler for PyQt5 (Qt v5.15.2)
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x69\x54\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x01\xe0\x00\x00\x01\x2c\x08\x02\x00\x00\x00\x94\x9c\xf7\x0d\
\x00\x00\x00\x2c\x74\x45\x58\x74\x43\x72\x65\x61\x74\x69\x6f\x6e\
\x20\x54\x69\x6d\x65\x00\x57\x65\x64\x20\x32\x36\x20\x46\x65\x62\
\x20\x32\x30\x31\x34\x20\x30\x33\x3a\x31\x32\x3a\x35\x31\x20\x2d\
\x30\x38\x30\x30\x03\xbc\x89\xe6\x00\x00\x00\x07\x74\x49\x4d\x45\
\x07\xe5\x0b\x13\x13\x25\x29\x0b\x00\xbc\x53\x00\x00\x00\x09\x70\
\x48\x59\x73\x00\x00\x0a\xf0\x00\x00\x0a\xf0\x01\x42\xac\x34\x98\
\x00\x00\x00\x04\x67\x41\x4d\x41\x00\x00\xb1\x8f\x0b\xfc\x61\x05\
\x00\x00\x68\xab\x49\x44\x41\x54\x78\xda\xec\x9d\x59\x6f\xe3\x4a\
\x76\xc7\x6b\x23\x45\x52\xa2\xf6\xd5\x52\xcb\xee\xb6\x7b\xbd\xfb\
\x9d\xc1\x64\x3e\x40\x90\x87\x7c\xe3\x00\x79\xc8\x53\x30\x01\x06\
\x48\xe6\xce\xbd\xdd\xd3\xed\xdb\xee\x76\xb7\x37\xc9\xb2\x16\x6b\
\x27\x6b\xc9\x39\xc5\xbe\x93\x97\x3c\x8c\x9e\x6c\x03\xf5\x87\x25\
\x53\x74\xb1\x58\x14\x81\x5f\xfd\x79\xea\x54\x99\x1a\x63\x88\x93\
\x93\x93\x93\xd3\xfd\x13\xbb\xeb\x06\x38\x39\x39\x39\x39\xfd\xff\
\x72\x80\x76\x72\x72\x72\xba\xa7\x72\x80\x76\x72\x72\x72\xba\xa7\
\x72\x80\x76\x72\x72\x72\xba\xa7\x72\x80\x76\x72\x72\x72\xba\xa7\
\x72\x80\x76\x72\x72\x72\xba\xa7\x72\x80\x76\x72\x72\x72\xba\xa7\
\x72\x80\x76\x72\x72\x72\xba\xa7\x72\x80\x76\x72\x72\x72\xba\xa7\
\x72\x80\x76\x72\x72\x72\xba\xa7\x72\x80\x76\x72\x72\x72\xba\xa7\
\x72\x80\x76\x72\x72\x72\xba\xa7\x72\x80\x76\x72\x72\x72\xba\xa7\
\x72\x80\x76\x72\x72\x72\xba\xa7\x12\x3b\x95\xfe\xb7\x7f\xff\x0f\
\x61\x88\xa1\xbe\xd6\xa9\x31\x86\x51\xce\x8c\xa7\x8d\xa6\x94\x12\
\x9a\x12\x92\xad\x5c\x4a\x7f\x5b\xc2\xd4\x30\x46\xe1\x97\xc6\xcf\
\x06\xcb\x33\x66\x34\xec\x80\xbd\x9c\x31\xae\x95\x46\x51\xaa\xe1\
\x10\xdc\x92\x06\x37\xb0\x14\x33\x82\x41\xdf\x01\x7f\xe1\xca\x18\
\x81\x45\x08\x14\xc1\x1a\x60\x2f\x54\x6c\xa8\x82\xcf\x8c\x89\x34\
\x4d\xe1\xec\xb0\x5f\x13\xa9\x14\x14\xe0\x14\x0f\x80\xb6\x41\x1b\
\x8c\xc2\xea\x78\xa2\x0d\xe7\x78\x46\xaa\x38\x25\x09\x56\xc6\x89\
\xd4\x58\x91\x36\xa9\x94\x32\x0c\x0a\x4a\x41\x69\xf8\x83\xe6\x78\
\x36\x96\xa4\x5b\xc1\xe1\x3c\x42\x6a\xe9\xf9\x0c\xcf\x0d\x8d\xa3\
\x0c\x2e\x14\x5a\x96\xe2\xd5\xe3\x59\x3d\x8f\x28\x89\x67\xb7\x2d\
\xd4\x49\x0a\xad\xa5\xd9\xb5\x7b\x9c\x78\x9e\x27\x6d\xa3\xe0\xc7\
\x27\xc9\xbf\xfc\xeb\x3f\xdf\xf5\xed\x76\x72\x72\x7a\x48\xda\x0d\
\xd0\xdc\x00\xb0\xa8\xa1\x8c\x13\x51\xae\xe4\x1b\xcd\x6a\x14\x45\
\xcb\xc5\xf6\xe4\xe4\xb4\x5c\x6a\xb5\x3a\x31\xe7\xfe\xdb\x37\x1f\
\xd2\x54\x02\x34\xeb\x8d\xb8\xd9\xaa\x4f\x26\xb7\x8b\xf9\xb2\x5a\
\x2b\x43\x49\xdf\xf7\x98\x21\xf3\xf9\xf2\xf2\x62\x38\xdf\xa6\x9a\
\x73\x09\x2d\x60\xa6\x56\x8a\xdb\xed\x56\x21\x8e\x36\xeb\xed\xeb\
\x5f\x3e\x34\x9b\xe5\x72\x25\x17\x04\x21\xf0\x77\xbd\x49\x2e\x2f\
\xae\x26\xe3\x05\x25\x7e\x1c\xe7\xba\xbd\x5a\x18\xe6\xde\xbe\x3e\
\xee\xef\xf7\xf3\x85\x48\x59\x01\x1f\x7d\x3f\xfc\xf9\xe7\x37\xfd\
\xfd\x4e\x1c\x47\xbf\xbe\x3d\x9f\x4c\x97\xca\x24\x51\x40\xfa\x8f\
\xdb\xb5\x5a\x7b\x9b\xca\x8b\xf3\xe1\x70\x30\x02\x96\x56\xab\xe5\
\x27\x07\xbd\x28\x0c\x4e\xde\x9f\x5f\x5d\x0e\xa1\xe3\xf8\xfa\xd5\
\xb3\x42\x5c\xf8\xf3\x9f\x5f\x4b\x0e\x1d\x4e\x0e\xc0\xab\x8d\x64\
\x41\x00\xbd\x00\xd7\x42\x70\x9f\x31\x09\xa0\xa7\x0c\xc8\x4d\x6d\
\x57\x44\x11\xe1\x80\x7b\x4e\x94\x49\xa9\xf0\xec\x2e\x40\xb8\xe4\
\x0c\x3b\x0f\xf8\x04\x1b\x5a\x49\x25\x81\xdc\x0c\xb1\x0e\xd4\x4e\
\xd3\xbb\xbe\xd7\x4e\x4e\x4e\x0f\x4c\xbb\x85\x38\x28\xf5\x05\xe7\
\x60\x2d\x9f\x3e\x3b\xf8\xf1\xc7\xaf\x4b\xe5\xc2\x7c\xb9\x44\x42\
\xb1\x80\x30\x56\x6b\x56\x1a\xed\x72\xab\x53\x47\xab\xc9\xd9\xd3\
\xe7\x87\x61\x14\x5c\x8f\xc6\x95\x6a\xe5\xe8\x68\xbf\x5a\x8d\x47\
\xa3\xe1\xcd\x6c\xd4\xec\xd6\xbf\xfb\xfd\xab\x38\x08\x7c\xc3\x42\
\xc6\x5f\x3c\xdd\xff\xf1\x77\x2f\x01\xbe\xa3\xeb\xeb\xf5\x7a\x13\
\x46\xf4\xe9\xf3\xde\xa3\x47\x1d\x30\xb7\x83\xab\xeb\xb8\x10\xfe\
\xee\xf7\x5f\x03\x79\x81\x72\x9e\x2f\xfa\xfb\xad\x5a\xa3\x78\x70\
\x78\x10\xe6\xf3\xc2\xe7\xad\x4e\xa5\x5a\x2f\x52\x6e\xce\x3e\x9d\
\x83\xff\xed\x3f\xea\xd4\x6a\x45\x80\x7e\xa2\xb9\xe1\xb9\x6f\x7e\
\xfc\x21\x2e\x57\x3f\x9f\x0f\xc2\x5c\xee\xdb\x6f\x9e\x3f\xea\x76\
\xa9\xa6\xe5\x38\x6e\x36\xaa\x61\x3e\xd7\xec\x36\x52\x34\xcc\x7e\
\xa1\x52\x4c\x8d\x22\x5c\x08\x78\x32\x90\x5c\x6e\x39\x05\x90\x6b\
\xc0\xaf\x30\x1c\x50\x4d\x52\xe3\x49\xea\x4b\x30\xf3\x54\xa4\x86\
\x49\x22\x34\xf7\x14\x05\x40\x7b\x84\x79\xd0\x5d\x41\x77\x04\x28\
\x06\xef\xad\x38\xd8\xe6\x9c\xe1\x22\x85\x47\x01\x2f\x07\xef\x5b\
\x83\x2c\xd7\xf0\x75\xe4\xfc\xbb\xbe\xd7\x4e\x4e\x4e\x0f\x4c\xbb\
\x39\x68\x02\xa0\x31\xa6\xb5\x57\xef\x3f\xda\x1b\x0c\xae\xdf\x1d\
\x9f\xa4\x8a\x68\x2d\x00\x9e\x93\xe9\xf4\xe4\x3d\x7b\xf6\x7c\xbf\
\xdb\xad\x5f\x7c\x1e\xf4\xf6\x9a\x9c\x91\xd7\x3f\x1f\xab\xd4\x2c\
\x17\x4b\x30\xaa\xc9\x36\x3d\xff\x74\x23\x3c\x26\xb7\xfa\xe5\xab\
\xa3\xa3\xa7\xbd\xbf\xfe\xf4\xae\xd5\xae\xf4\xfb\xcd\xd1\x68\xfa\
\xe6\x97\x0f\x18\x06\x21\x4c\x29\xb9\x5e\x6d\xa3\x5c\x6e\x3a\x9e\
\x5f\x0f\x66\x93\xc9\xec\x0f\x7f\xfc\xee\xe8\xa8\x77\x33\x5e\x6c\
\x36\x1b\x02\xfe\xd6\xb0\x93\x5f\xcf\xa5\x34\xf5\x7a\xa1\xfa\xe3\
\xd1\x78\x34\x3b\x7e\xfb\x49\x5b\x57\x9f\x4a\xe5\x71\xa1\x15\xf1\
\x38\xdd\xeb\x14\x0b\xf9\xdc\x7f\xfd\xe9\x75\xb2\xe1\x8b\xe9\xea\
\x0f\x7f\x7c\xb5\xff\x78\xef\xec\x72\xba\x5a\xd9\xe0\x0c\x21\xb5\
\x52\xc1\x17\x62\xbb\xd1\x3a\x51\x46\x19\xa2\x29\x05\x7f\xaf\x25\
\x10\xb5\xd7\x6d\xd6\xea\xe5\xff\xf9\xcb\x31\x15\xf0\xfd\x18\xce\
\x04\x23\x80\x59\x62\x43\x35\xd0\x44\x30\xcf\x54\xa2\x41\x06\xf3\
\x2e\x30\xd8\x01\x2f\x00\x36\xf3\x30\x40\x63\x04\x58\x69\x02\x5d\
\xc1\x97\x60\x0e\xbc\xa0\x4f\x13\x26\x49\xee\xfa\x5e\x3b\x39\x39\
\x3d\x30\xed\xe6\xa0\xb5\x80\xe7\x7d\x75\xb0\xdf\x06\x8c\x7e\xfc\
\xf8\x59\x4b\x7c\xce\xf7\x18\x01\xcf\x99\xf3\xc8\xcd\xf5\x60\xbd\
\x58\x16\xf2\x61\x7b\xaf\xd4\x3f\x68\x9f\xbc\xff\x04\xf8\x03\x84\
\x71\x8e\x61\x59\x83\x81\x60\x26\x84\x58\xad\x56\xf0\xb1\x50\xc8\
\x83\x4d\xdd\xef\x77\xb5\x36\x1f\x4e\x4e\x0d\x06\xa7\x95\xe0\xd4\
\xf3\x38\x54\x4e\x28\x46\x94\x19\xa5\x69\xa2\x66\xd3\x19\x17\xac\
\x52\xc9\x33\x8e\x81\x6a\x82\xd1\x5f\xa8\x0a\xda\x6e\xd0\xd3\xc3\
\x6f\xc6\x30\x70\xcc\x8c\x94\x29\x05\x82\x6a\xd9\x69\x16\xbf\xfb\
\xf6\x59\x92\xa4\xeb\x75\x42\xa9\x9e\x2f\xe6\xeb\x55\x12\x86\xb9\
\x72\x35\xd2\x06\x00\x4d\x16\x8b\x95\xf0\x78\xa3\x55\x81\xa3\x94\
\x32\x32\x05\xa3\x0c\xcd\x61\x82\x8a\x9c\xe7\x1f\x3d\xed\xe4\x02\
\x3c\x81\xd2\x22\x55\x34\xc5\xe8\xb8\xd6\xca\xb6\x11\x00\xcd\xe0\
\xc7\x40\x3b\x31\x58\x6e\xb2\xe8\xb6\x47\x89\xc7\x18\x7c\x39\x70\
\x14\xe1\x0c\xf6\x71\x1b\xe7\x10\x0c\x03\x23\x02\x2e\x88\x11\xef\
\xae\xef\xb5\x93\x93\xd3\x03\xd3\x6e\x80\xf6\x59\x52\xaf\xc4\xb9\
\x5c\xb0\x58\xac\xc1\x0b\xe7\x0b\x5e\x21\x2f\xfc\x9c\xf2\x73\xa9\
\x10\xda\xf7\xf8\xd5\xe5\x15\x18\xc6\x97\x2f\x9e\xaf\x57\x9b\xe9\
\x04\xc0\x4a\x7d\x1f\xc8\xbc\xb5\x47\x1b\xa0\x33\x80\x2e\x8e\x63\
\x00\xab\x54\x69\xa9\x14\xe7\x0b\x51\x0a\x70\x64\x22\x8a\x82\xb8\
\x18\x01\x2e\x29\xda\x51\x9d\x9d\xce\x02\xd8\xdb\x6c\xb6\x00\xd9\
\x42\x1c\x18\x03\x9c\x34\xe6\x0b\xa0\x81\x7f\x88\xe7\x0c\x91\x80\
\x4b\xd8\xa3\xc0\x3c\x83\x87\xa5\xaa\xd9\x2c\x03\xe1\x7d\xdf\x0b\
\x82\x08\x1c\x31\xfc\x0d\x88\x0c\x85\x0b\x05\x1f\xba\x01\x4a\xe9\
\xe7\xcf\xe7\x70\x96\x5e\xaf\x45\x48\x82\xc3\x9b\x4c\x06\x01\xf5\
\x85\x0c\x3c\xd5\xac\x17\x80\xdd\x61\x94\x3b\x78\xdc\xe6\x46\xe7\
\x7d\xf1\xb8\xdf\xf8\xe6\xab\xc7\xdf\x7d\xff\xec\xe5\xab\x27\x61\
\x04\x9c\x55\x18\x5a\x86\x6e\x09\x11\x8d\x8c\x86\x9e\x43\x78\xda\
\x0e\x93\x6a\x66\x63\xce\x42\xd0\x5c\x00\x3f\x04\x0c\x3d\xe7\x1a\
\xde\xe1\xfb\xb9\xeb\x7b\xed\xe4\xe4\xf4\xc0\xb4\x5b\x88\xa3\xd5\
\x2c\x1c\x1e\x1c\x18\x20\x5d\x29\xf8\xe1\xfb\xaf\x80\x9f\xeb\xcd\
\xfa\xcd\x9b\xd7\x36\x6d\x03\xd3\x1a\x16\x8b\xdb\x54\x02\xac\xbd\
\xab\xc1\x05\x60\x2b\x40\x88\xe2\x5f\x08\xe6\x72\xa8\x76\xa7\xd8\
\xee\x74\x02\x60\x21\xa5\x49\xb2\xfd\xfa\x9b\x17\xb0\x01\xb8\xff\
\xfe\x87\x97\x50\x15\x18\xde\xbf\xfe\xf4\x37\x89\xd9\x10\xc4\x1e\
\x03\x00\xe4\x98\x12\xa2\xa0\x1b\x61\x01\x50\x93\x30\x4b\x67\x1c\
\x93\xb3\x7e\x1c\xc3\x0b\x59\x26\x88\xc5\x34\xb3\x27\x02\x37\xad\
\x4b\xe5\x3c\xa1\xc2\xf3\x58\xab\xd5\xb8\xbc\x80\x3e\x83\xa4\x12\
\x23\x0c\x81\xcf\xd7\x98\xdd\x01\x66\x7f\x2e\x8f\x64\xa5\x98\xaf\
\x96\x4b\x18\xdc\xc0\x7d\xe0\xb5\xf1\x94\xbe\x67\x93\x4c\x0c\x31\
\x69\x12\x08\xba\xd7\xa9\xc6\x85\xa0\x5a\x29\x81\x01\xff\xe9\x2f\
\x6f\xd5\x16\xc0\x2c\x88\x7d\x28\xe0\x98\x46\x82\xfe\x5d\xeb\x2d\
\xcb\xc6\x0e\xc1\x34\x53\x69\x1b\x49\x6c\x72\x0a\xb1\xc3\x86\x1a\
\x5a\xce\x89\xfb\xe7\xbc\x4e\x4e\x4e\xbb\x69\x37\x40\x6f\xd7\x8b\
\xf1\x68\x58\x6f\x77\xaf\x87\xc3\x9b\xeb\x01\xd8\xd7\x54\x81\x63\
\x05\x5a\x6a\xc6\xf1\x11\x3e\x0c\x43\x04\x25\x31\xb5\x5a\x0d\x43\
\xcf\x20\xcc\xc6\xc3\xdf\x98\xe8\x60\x74\x14\x85\x40\xb4\xf1\x78\
\xfc\xf1\xe3\xc7\x7a\xbd\xde\xef\xf7\x6f\x6e\x6e\x3e\x7e\x3c\x05\
\xb6\x69\xcc\x72\x03\x37\x0a\xee\xd4\x16\x07\xf6\x71\x60\x9c\xb6\
\x61\x03\xa2\xb5\xc2\x14\x3b\x84\x37\xc3\x38\xb4\x40\x76\x13\x1b\
\x4f\xb0\x29\x14\x5a\x51\x9d\xa5\xf7\x09\x2b\xd8\x38\x3d\x3d\xbb\
\xba\x1a\x60\x3d\x18\xbf\xc0\x2e\x84\xd8\x24\x3e\xa8\x5c\x69\x39\
\x9d\xce\xeb\xf5\x6a\xb7\xd7\x32\x98\x07\x68\x9b\x89\x89\x73\x62\
\x32\x99\xa5\x69\xba\x59\x27\xe3\xf1\x24\x8c\x78\xb3\x55\x3a\x3f\
\xbf\xbe\xb8\x18\x7c\xfb\xdd\xf3\x83\xc7\xbd\xf9\xed\xaf\xa9\x61\
\x0a\xaf\x1a\xe3\x17\x04\xb3\x3b\x34\x25\x19\x91\x49\x96\xdd\x61\
\xac\x30\x1d\xd0\xbe\xa0\x7e\x90\xcf\xef\xfa\x56\x3b\x39\x39\x3d\
\x34\xed\x38\x51\x45\x2b\xa3\x25\x1c\xe3\x01\x47\xe1\x49\x9f\xa4\
\x82\x91\x9c\xc0\xdc\x63\x62\x11\xdc\xed\x76\x67\xb3\x99\x94\x12\
\x00\x9d\x0f\x03\x0f\xf3\x99\x89\x0d\x41\x13\x1b\x67\x58\xfc\x62\
\x75\x72\x72\xa2\x94\x82\x62\x68\x3a\x31\x9c\x8c\x83\x6f\x00\x38\
\xa8\x48\x08\xd8\x43\x6c\xd2\xb1\xb1\x69\x11\x2a\x0a\x73\x70\x78\
\x92\x26\x58\x90\xa2\xdd\x05\x26\x62\x0c\x21\xe7\x65\x27\xcd\xf2\
\xa0\x33\x87\x4a\x2d\xa0\xd1\x65\x63\x3e\xdf\x02\x48\x6a\xf3\xaf\
\x49\x2e\x07\xc8\x56\x69\x2a\xed\xe9\x08\x98\xeb\xab\xcb\x21\x1c\
\xd7\xe9\x34\xa3\x28\xa0\x38\x9a\x27\x38\xf3\x80\xb7\x36\x5d\x1b\
\x8d\x3f\x54\xd2\xdf\xef\x16\xe2\x70\xb9\x9a\x82\x53\x1f\x8d\x46\
\xc5\x62\x14\x84\x88\x7e\xa8\x53\x2a\x45\x6c\xe7\x24\x30\xbe\x41\
\xa1\xe5\x68\xf7\xc1\xe2\x13\xa9\xb5\x94\x2a\xcd\x30\x6d\xdb\x46\
\x84\x97\xf5\x3a\x4e\x4e\x4e\x4e\x3b\x68\x37\x6c\x28\x42\xd3\x74\
\x0b\x96\x30\x8a\x63\x25\x99\x4c\xc1\x97\x22\x56\x19\x8e\x14\xb2\
\x72\xa9\x0c\x30\xbd\x3c\xfb\x3c\x1a\x0c\x19\xe5\xcd\x76\x5b\x67\
\xe1\x0d\xc4\xb7\xe6\x80\x30\x4a\x64\x9a\x6c\xb7\x6b\x1b\x19\x20\
\x49\x92\x00\xcf\x0a\x85\x82\xef\x7b\x60\x6a\xc1\x7e\x0a\x6e\x80\
\xc8\x99\x07\xc7\xe8\xad\x30\xb9\xc0\x2b\x96\x8a\x50\x6c\xb3\x5e\
\x23\x04\x31\xad\x18\x09\x6e\x13\xdb\x08\x1a\x68\x34\xc4\x29\x0e\
\x3f\x72\x34\xdf\x08\x44\x6e\xb6\xc9\x06\x4a\xc5\x50\xb3\x47\x3c\
\x81\xa1\xe2\x28\x2c\x02\x51\xd7\xeb\x05\xc6\x5a\x00\xe2\x9e\x58\
\x2d\x97\xdb\x4d\xe2\x09\x0f\xfa\x12\x8c\x68\x0b\x49\xbf\xe4\x39\
\xa3\x1d\x26\x16\xad\xc5\x62\x2c\xa5\xce\x52\x9f\x81\xf5\xb0\x01\
\x64\xf7\x04\xc1\x76\x32\x4d\x38\xe6\xd7\x19\x95\x64\xbd\x86\x0d\
\x95\x30\xdb\x1f\xe1\xf5\x51\xe4\x32\x06\x69\x84\x07\x9d\x13\xc6\
\x45\x9c\x9c\x9c\x9c\x76\xd2\x6e\x80\xe6\x54\x2f\x56\x6b\xa9\x49\
\x10\x04\x7b\xfd\xbe\x17\x46\x5c\xf8\xf9\x42\xd1\xf3\x73\xb9\x5c\
\xd0\xeb\x75\x27\x93\x09\x20\x78\x3c\x19\x29\x2d\x2b\xd5\x4a\xb1\
\x54\xc2\xa8\x81\xf7\xc5\xea\x5a\x5b\x6a\x30\xb9\x01\x13\x1d\x68\
\xb2\x5d\xcb\x34\xf5\x3c\xf1\xe4\xc9\xe3\x52\x29\x8e\xf2\x61\xb5\
\x5a\x29\x14\x22\x81\x39\x19\x34\x8e\xf3\xa0\x83\x83\x03\x70\xc4\
\x9b\xcd\x76\xb9\x5a\xda\x4a\x90\xc1\xbe\x87\xd1\x85\x2c\xb8\x01\
\x54\x86\x1a\x80\xc2\x42\x70\x4b\x76\x0a\x1f\x67\x33\xf0\xbc\xa6\
\x51\xaf\x06\x81\x0f\x7e\xba\x58\x2c\x04\x41\x78\x7b\x3b\xdb\x2e\
\x17\xbe\x87\x86\xdd\x86\x8c\xcd\x68\x74\x9d\x35\xcc\xf3\xa1\x42\
\xe8\x12\x00\xa6\xb0\xcd\xd1\x83\xa3\x23\x57\x06\x07\x36\x39\x4e\
\x98\x64\x0c\xae\x10\x5c\x7f\x2a\x53\x9c\x22\xc8\x49\x18\x60\x29\
\x0e\x47\xd9\x11\xcb\x2c\xc4\x61\x8c\x86\x07\x03\x78\x87\x3d\xf0\
\xe8\x20\x38\xb1\xdd\x12\xb5\xf3\x55\xee\xfa\x56\x3b\x39\x39\x3d\
\x34\xed\xe6\xeb\x3c\xa0\x51\xba\x99\x8e\x47\xf5\x7a\xbd\x54\xab\
\x15\x2b\x15\x66\x30\x98\xf0\xfa\xf5\xeb\xde\xa3\x2e\x80\x18\x00\
\x0d\x8f\xf9\xe0\xb2\x27\x13\x28\xd3\xec\xf7\xfb\xa7\xa7\xa7\x48\
\x2f\x1b\xe2\xe0\x98\x45\x6d\x53\xd5\x88\xe2\x36\x79\xf8\x7a\x70\
\xd9\xe9\x76\xc1\x44\x1f\x1e\x1e\x52\x9b\xc0\x71\xf2\xfe\x3d\x94\
\x83\x8d\xb6\x15\xb1\xf1\x84\xf3\xf3\x73\x30\xd1\x80\x48\x3b\xa9\
\x9a\x02\x1a\xd3\x34\x45\xdf\x4d\x48\xf6\x8e\x11\x5f\xc6\x6c\xe8\
\x19\x73\x45\x46\xa3\x51\xa3\x51\xcf\x17\x82\xa7\xcf\x0e\xd7\xab\
\x75\xb5\x56\x49\xd3\xcd\xf9\xc5\x67\xdf\x63\x01\xbc\x88\xf6\x7d\
\xbe\x5e\x27\x37\xa3\x61\xb7\xbb\x47\x09\x0f\xc3\x90\xda\xc0\x06\
\x3a\x77\x94\xf0\x7d\x11\x84\x74\x36\x9b\x14\x8b\x8f\xe0\x62\xa1\
\x87\xa8\x55\xab\xc3\xe1\xb5\xc2\x4c\x3e\x0a\x1d\x04\xb1\x49\xce\
\xf0\x6c\x80\xdf\x80\xc7\x30\x18\x9f\xcd\x56\xc7\x3c\xbc\xcc\xce\
\xa3\x97\xc6\xb0\x37\x4e\x29\x77\x78\x76\x72\x72\xda\x59\xbb\x01\
\xda\x26\x6b\x98\xc1\xf9\x19\xd1\x24\xae\x94\x80\x64\x49\x9a\x8c\
\xae\xaf\xc1\xab\x16\xe2\x68\x7c\x33\xda\x6e\xb7\x36\xe6\x41\x07\
\x57\x83\x20\x17\x00\x99\x73\x38\x83\x8e\x2a\x29\x57\xcb\x25\x46\
\x27\x48\x96\xbb\x4c\x70\x30\x4d\x93\x9b\x9b\x91\x36\xa6\x5a\xab\
\x02\xdc\x8d\x92\x93\xd1\x48\x6e\xd7\x44\xa7\x00\x53\xcf\xf3\x4a\
\xa5\x62\x9a\xe8\xe1\x60\xb8\x98\x2f\xc0\x17\xfb\xbe\x2f\xa5\x9a\
\xcf\x17\x98\x0d\x8d\xb1\x66\x6f\xb5\x5a\xdf\xde\xde\x62\x86\x84\
\x56\x40\x41\xa5\xf4\x6c\x7a\x9b\x26\xa9\xd1\xe6\xf8\xdd\x71\x67\
\xaf\x53\x28\xc4\x80\xda\xd9\x74\x3c\xb8\xba\x90\xe9\x06\xb0\x0a\
\xee\x7b\x3e\x9b\x30\xa2\x6c\xb8\x65\x3d\xba\xbe\x8a\xe3\xd2\x72\
\x3e\x13\x0c\x07\x19\x6d\x77\xc0\x56\xab\x95\xed\x08\xc8\x70\x38\
\x04\x58\xb7\x3b\x4d\xad\xcd\x60\x38\x1c\x0c\x06\x18\x28\x27\x58\
\x12\x63\x2b\xb8\x8e\x08\x5c\x88\xc0\x97\x20\x52\x1a\xc1\x73\x5f\
\x56\x26\x41\x2e\x1b\x9a\x65\x05\x02\xa8\x71\x51\x12\x17\xe3\x70\
\x72\x72\xda\x4d\x7f\x5f\xd8\xe8\x1f\xd2\x2f\xff\xfd\x9f\x12\x1f\
\xeb\x11\x4b\xca\xae\x7f\x04\x1c\x82\xe7\x7b\x9f\x0b\x99\x7d\xb4\
\x23\x72\x36\x29\xe2\xcb\x9a\x41\x04\xbd\x33\x4e\x23\x01\xaa\xe2\
\x52\x43\x38\xcb\xce\x12\x0c\xc7\xe1\x44\x36\x0b\x4f\x67\x8b\x26\
\x19\x2d\xa8\x1d\x2b\x04\xa0\xa3\x31\xc7\xf8\x36\xa1\x22\x6b\xa0\
\xc9\xe2\x23\xbf\x8d\xbc\x65\xed\x91\x52\x66\x0b\x12\x59\xb0\x62\
\x39\x46\x6d\x92\x07\x94\xb1\xc0\xcd\x2a\x17\x40\x48\xad\x88\xb6\
\x4b\x6a\x58\x69\x25\x31\xfb\x0d\xac\x3a\xe1\xc6\x46\x30\x94\x21\
\x52\xe1\x4c\x18\x4c\x6d\x26\x38\x96\x08\x95\x63\x35\x36\xb8\x01\
\xdb\x59\x32\x9f\x9d\xa8\xc2\x95\x52\xd8\x04\x2d\x99\xb5\xf4\xdc\
\xc6\x70\xb2\x32\x59\xdb\xf0\x14\x5f\xae\x9d\xab\x14\x43\x2a\xbe\
\x91\x3f\xfc\xd3\x1f\xee\xfa\x76\x3b\x39\x39\x3d\x24\xed\x66\xeb\
\x98\x46\xf0\x6a\x02\xb0\x43\x73\x08\xb6\xb7\xde\xac\xdf\x5c\xdf\
\x00\x9b\xa4\x8d\x2d\x84\x61\xe8\x79\xfe\x74\x3a\xb1\xe3\x65\x2c\
\x9f\x8f\xd3\x34\x5d\xaf\x37\x59\xb6\x05\x78\xe0\x30\xcc\x03\xb7\
\xd2\x24\x91\x6a\x0b\x28\x0b\x82\x10\x10\x96\xcd\x2d\x04\x97\xad\
\xb4\x02\xe0\x82\xf5\x06\xbb\x0c\x3b\x37\x9b\x0d\xc0\xd1\xcf\xe5\
\xd6\x9b\x0d\x54\x62\x30\xa0\xe1\xc7\x71\x0c\xae\x39\x9f\xcf\x43\
\x49\x28\xc0\xac\xc0\x5f\xdf\xdc\x8c\x2d\x40\xd1\x5c\x23\x28\x71\
\x98\x0e\x53\x44\xea\xf5\xda\x64\x7c\xa3\xb6\x8a\xd8\xc1\x3b\x0d\
\xa4\x26\x14\x1c\x6f\x5c\x88\xa1\xb5\xf3\xc5\x12\xce\x83\xc0\x65\
\x3c\xca\x07\x59\x8e\xf6\x7a\xbd\x5e\xad\x96\x50\x7f\x3e\x5f\x80\
\xae\x65\x36\x9b\xc1\xa5\xd9\x7c\x67\x5d\x2c\xc6\x59\x80\x25\x49\
\xb6\x70\x02\x28\x03\xfb\xe1\x03\x1c\x82\x13\x1a\x39\x8f\xa2\x08\
\xaf\x05\x1a\x80\x93\x59\x30\x71\x7b\xb5\xd8\xe6\x60\xe7\x76\x79\
\xd7\xf7\xda\xc9\xc9\xe9\x81\x69\x47\x40\x53\x8f\x33\x0a\xf4\x61\
\x44\x84\x41\xf0\xe4\xf0\x89\xe7\x0b\x99\xc8\xe9\xf8\x06\xac\xeb\
\xe1\xe1\x61\x18\xe5\xc1\xa4\x6e\xb7\xdb\xcd\x66\x01\xf8\xdb\x3f\
\x38\xb8\x38\x3f\x87\x0f\x70\x48\xbb\xdd\xa9\xd5\x1a\x49\xb2\x01\
\xe0\xc2\xc7\xcf\x9f\x4e\xf2\x51\xa1\xdd\xe9\x01\xf5\xde\xff\xfa\
\x0e\x10\x09\xe8\xaf\x37\x1a\xf5\x7a\x03\x3c\x69\x22\xd3\x38\x2d\
\x4d\xa7\xb3\x5e\xb7\x07\x88\x04\xe2\x7f\x38\x39\x01\xfa\xee\xed\
\xf5\xe1\x28\x28\xb0\xb7\xd7\x61\x76\xe8\x2d\xf3\xd7\x93\xc9\x74\
\x34\x82\x7e\x02\x27\x8b\x1b\x4c\x76\xc6\x59\x85\x00\x7a\x68\x52\
\x2e\x17\x60\xb0\xfb\xf2\x82\x68\x9c\xd4\xad\x0d\x69\xb5\xe0\xb4\
\xfb\xd9\x52\xa5\xa5\x9a\x3a\x7e\xfb\x16\x2a\xea\x3d\x7a\x54\xaa\
\x54\x6f\xe7\xb7\xb0\x1d\xc7\xfb\xe3\xf1\x0d\x34\xac\xd1\x68\xc1\
\xfb\xeb\xd7\x6f\x96\xcb\x95\x10\x1c\x98\x7e\x74\x74\x04\xd5\x0c\
\xaf\xaf\x5b\xad\x23\xe8\x60\x60\x3f\x18\xe5\x28\x0a\xe1\x8a\x4f\
\x3f\x9e\x3e\xea\xf7\xe1\xaa\xb3\x60\xb6\xc6\x01\x43\x75\x7c\x7c\
\x7c\x70\xd0\x2b\x55\xab\x97\xa7\x1f\xee\xfa\x5e\x3b\x39\x39\x3d\
\x30\xed\x06\xe8\x8d\x5d\xf1\x22\xcb\xf9\xdd\xdf\x7f\x84\xb3\x43\
\x88\x17\x86\xd1\x5c\xdc\xd8\x58\x82\xc2\x20\x33\x23\xc5\x62\x69\
\xbb\x9e\x63\xa4\x98\x1a\x8d\xa3\x6a\xba\xde\xa8\x34\x9b\xcd\xc1\
\x60\x38\x1a\x5e\x94\xca\xd5\x5e\xff\x00\x40\x3c\xbe\x19\x03\xe3\
\x10\x64\x72\x4b\x88\xa9\x54\x5b\xa0\xc5\x6c\x7a\x75\x71\xb6\x4e\
\x36\x9c\xfb\x61\x10\xd9\xf4\x61\x9d\x2f\x44\x61\x28\xa4\xdc\x68\
\x0d\x6e\x15\xcc\xeb\xe6\x6f\x6f\xde\x94\x2b\x95\x6e\xaf\x3b\x1a\
\x5d\x8f\xc7\xe0\x9d\xa9\x1d\x42\x4c\xb5\x4a\x0d\xce\xb7\x66\xe0\
\x78\xfb\x8f\x7a\x06\xfd\xb2\xce\x05\x81\x34\x92\x12\x0c\x59\xd4\
\x1b\xbd\xde\xc1\xfe\xe2\x76\x7a\xfe\xf9\x0c\x6a\xb2\x19\x74\xac\
\xdd\xed\x55\x6a\xf5\xe1\xf0\xf2\xe2\xf2\x02\x4c\xf1\xcb\xe7\xaf\
\xaa\xd5\x0a\x9c\xa2\x80\x2e\x3b\x6a\xb7\xdb\x1f\x3f\x7c\xcc\xc2\
\x26\xd0\x9a\xe1\xf5\xe8\xf6\x76\x0e\x4d\xd5\xda\xbc\x7b\x77\x0c\
\x95\xd4\x6a\x15\x00\xf7\xe3\xc3\x27\xa7\xa7\xa7\x60\xa5\x5f\xbc\
\x78\x01\x95\xbc\x7d\xfb\xd6\xe6\x11\xea\x14\xaf\x8e\x6c\x36\xab\
\xbb\xbe\xd7\x4e\x4e\x4e\x0f\x4c\x3b\x2e\x37\xca\x24\x78\x53\x42\
\x45\xab\xd9\x01\xe2\x9c\x9f\x7d\x02\x6c\x81\x67\x24\x38\x85\x45\
\x4b\x5c\x9d\x1e\x23\x0c\xa5\x52\x09\xb8\xa7\x30\x86\x6b\x12\x5c\
\x50\x9f\x35\x1a\x1d\xa5\xe4\xf5\xf5\x15\x10\xb9\x58\x2a\xda\x5c\
\x66\x96\xe2\xba\xf7\xb8\x0e\x3f\xce\xb1\x66\xbc\xd9\x68\x42\x0d\
\xe7\xe7\xa7\x4a\x6d\xb8\xc0\x59\x8a\xda\xc8\x2c\x40\x2d\x84\xc8\
\xe7\x43\x1c\x0c\x34\x52\xe9\x74\xbe\x98\x26\xe9\x46\x7e\x01\xdf\
\x66\xb9\x5c\x4a\x99\x68\x8c\x08\x1b\x2e\xbe\xac\x8f\xd1\x68\x34\
\xe0\xfd\xfd\xfb\xf7\x70\x78\x3e\x9f\x27\x36\x4a\x2d\x3c\x0f\x98\
\x0e\x4d\x3d\xfb\x7c\x96\x4d\x93\xc1\xa4\x3d\xdf\xaf\xd5\x6a\x50\
\xec\x66\x34\x12\x8c\x97\x63\x1c\xfc\x94\x76\x4d\xfe\xb3\xb3\xcf\
\xe0\xbe\xab\xd5\x72\x10\xfa\x70\x89\x95\x4a\x71\xbd\x5e\x0e\x87\
\x57\x49\xb2\xb6\x09\x1b\xca\xf3\x58\x10\x78\xf3\xf9\x1c\x6a\xcb\
\x42\x1f\xf8\x28\x60\x5d\x3d\xd0\x19\xb7\xb5\x49\xec\x3a\x76\x9b\
\xcd\xfa\xae\xef\xb5\x93\x93\xd3\x03\xd3\x6e\x80\x36\x9a\x53\x8a\
\x44\x06\x8f\x7c\x79\x79\x01\x94\xc4\x49\xcc\x7e\x2e\xfb\x6f\x29\
\xe0\x28\x57\xab\xd5\x76\x9b\x84\xe0\x77\x83\xc0\xd8\xf9\x81\xc0\
\xe5\x28\xcc\xfb\x5e\xb0\x5a\x2f\x31\xbb\x8e\x73\xf0\xa4\x40\xdd\
\xf9\xed\x2d\xb5\x93\xa1\xb3\x71\xbc\xd8\xc6\x76\x67\x33\x5c\xca\
\xc3\x50\x49\x7e\x9b\xde\x02\x05\x66\x53\x4c\x6a\x2e\x97\xcb\x34\
\xdb\x63\xb4\xe7\x71\x20\x63\xb6\x48\x5e\x56\x03\xb4\x24\x45\x64\
\x27\xf6\xdf\xb8\xe0\xb0\x5e\xbd\x5e\x3f\x3b\x3b\x03\x62\x02\x3a\
\x01\xc1\x36\x52\xcd\x8b\xc5\xa2\xe7\xf9\x93\xc9\x64\x8b\xd0\xcc\
\x26\x31\x62\x4e\x37\xb7\x71\x64\x9d\xa6\x44\xe9\xe9\x78\x0c\x46\
\xf8\xe4\xc3\x07\x68\x55\x92\x6c\x26\xd3\x31\x74\x0c\x9d\x4e\xb3\
\x50\x88\xca\xe5\xe2\xc5\xc5\x19\x65\xda\xfe\x97\x18\x5c\xa2\x34\
\x9b\x80\x83\x91\xe8\x2c\x75\xc3\x18\xcf\xfb\xb2\x6a\x5d\x96\xcd\
\x4d\xec\xe0\xe1\xdf\x97\x7f\x72\x72\x72\x72\xfa\xc7\xb5\x5b\x88\
\x43\x18\x30\xc9\x5e\xb7\x56\x1f\x4f\xc6\x29\xfe\x7b\x27\x2d\x15\
\x38\x47\x1f\x4d\xb5\x96\x88\xe3\x64\xb5\x59\xcd\xbd\x46\xab\x5c\
\xae\x82\x5f\x26\x86\x83\xe9\x0e\xa3\x0a\x98\xe8\xcd\x7a\x03\x44\
\x05\xea\x7d\xfa\xf8\x1e\x60\x99\x6c\xd6\x94\x7a\xd4\x50\xad\x4c\
\x54\x28\xd4\x9b\x1d\x80\x7c\xbe\x58\x7e\x12\x7d\x45\x89\x59\xad\
\x57\x57\xe7\x1f\x14\xae\xa9\x4c\xe7\xd3\x1b\x9c\xc6\x52\x28\xe5\
\x84\x27\xa0\x02\x30\xa5\xbf\xbd\xff\x5f\x46\x87\xc9\x86\x04\x49\
\xb6\xbf\xd1\x6a\xdf\xce\x6f\x37\xdb\x14\x7a\xa0\x34\x51\x39\x3f\
\xc8\xc1\xcb\x8f\xbb\xfd\xc7\x80\x65\x30\xf8\x51\xf8\x02\x9a\xb4\
\xdd\x6e\x3f\x7d\xfa\xf4\xbf\xec\x7d\x69\x8f\x1c\xc7\x76\xe5\x8d\
\x88\xcc\x5a\x7b\xe7\xaa\xed\x2d\xd6\xc0\x82\x3d\xb0\x31\xff\xff\
\x1f\xf8\x8b\x31\x18\xcc\x00\x1e\x58\xb2\x64\x89\x94\xc4\xa5\xd9\
\xec\xea\x5a\x33\x63\x99\xb8\xe7\x46\x44\x26\xf5\xbe\x30\xe7\x0b\
\x41\x38\xee\x6b\xf1\x91\xd5\x55\x91\x59\xdd\xc0\x89\x5b\x27\xce\
\x3d\x67\xbe\x5c\xc5\xd7\x47\x2c\x66\x3d\x4a\x50\xb3\xf9\x52\xda\
\xe1\xf8\x67\x5c\xf2\xee\xf6\xfd\xcd\xd5\xa3\xab\xcb\x47\xc6\xcc\
\x4e\xfb\xae\x3f\x9c\xda\x46\x31\x71\xc2\xf4\x4d\x84\xe6\xd9\xd5\
\xf5\x45\x5c\x5b\x1b\xc5\x54\x7b\xc7\x4d\xbd\x86\x32\x85\xdd\x94\
\x18\xcb\xf9\x8e\xbc\x0f\x93\xd4\x32\xb5\x6a\xd5\xaa\x45\x53\x01\
\x7a\x7d\x7e\xf6\xfc\xab\xff\x16\x5b\xe6\x95\xb3\x37\x37\x4f\x88\
\x3f\xe3\xc7\x86\x51\xf3\xb9\xdf\xae\x27\xcc\x65\xdc\xdf\xbd\x7d\
\xf4\xf4\xd9\xe5\xf5\xa3\xd7\x6f\x5e\x79\x51\xb9\x35\x4d\xfc\x96\
\xe3\xd4\xbf\xd8\xa1\xba\xd8\x65\xb3\xa2\x83\xa7\x37\x9c\x28\xe8\
\xac\x65\x66\x20\xa2\xef\x76\x73\xd7\x9f\x8e\x71\xd9\xed\x6e\x0b\
\x87\x67\x98\xca\xf5\xdd\x61\x7f\x58\x9d\xaf\xcf\x2e\xae\x9d\xe7\
\xe1\x90\xf8\x74\x8c\x64\x6b\x4c\x60\xa7\xbf\x07\xee\xd6\xd9\xcb\
\xf4\xab\xaf\xbe\x8e\x77\x15\x31\xf1\xf2\xf2\x86\x69\x0d\x0c\x59\
\xc7\x36\x39\xf6\xbb\x71\x7b\x99\xcd\x28\xf6\xe9\xa7\xd3\xd1\x59\
\x27\xea\x11\x83\xc9\x71\xbe\x3d\x45\x37\x37\x8f\x9e\x3d\xff\x52\
\x4e\xf9\x7e\xf8\xe1\x87\xee\xd8\x9d\x8e\xa7\xed\x76\x17\x97\x8d\
\x2d\xfc\xcf\x3f\xfe\x64\xe2\x6a\xda\xc3\xe8\x94\x9f\x13\xdf\xda\
\xd3\xa7\x4f\x23\x8e\x47\x14\x7e\xf3\xe6\x4d\x5c\x8a\x89\x78\x7e\
\x79\x88\x77\xdf\xb4\x9a\xdd\xf5\xc4\x6c\x44\xd5\x59\x95\x5a\xb5\
\x6a\x4d\xab\x69\x00\x7d\x38\xd9\x5d\xec\x7c\xb5\x7b\xf9\xf2\x17\
\xc7\x0e\xf6\xf4\xec\xf9\x57\xe7\x67\x57\xf3\xd9\x6c\xb7\x05\x16\
\x6b\xb5\x7b\xb8\x8f\x20\xdb\xce\x16\x97\xd7\x37\x0a\x3a\xeb\x20\
\xc6\x16\x5a\xf7\xb6\x17\x4b\xfb\x34\xca\xe1\xc5\x23\x49\xed\x76\
\x7b\x67\x2d\xcd\xc3\x7e\xf7\xf0\xf0\xfe\xce\xc0\x1e\x2f\xde\x99\
\x67\xff\x7d\x76\xaa\x7b\xff\xfe\xfd\xe2\x7c\x7d\x79\xf5\x64\x73\
\xbf\x85\x77\x9d\x92\x10\x58\x59\xca\xf1\x10\x5f\xec\xee\x59\x59\
\x7d\x3c\xf6\x0f\x0f\xbb\x08\xa6\xdf\x7f\xff\x83\x5c\x34\x62\xee\
\xa3\x9b\xeb\xe5\x6a\xf9\xee\xcd\x6d\xc4\xfa\x78\xdd\xdd\x7e\xf7\
\xfe\xee\x1d\xa3\x2a\xba\x5a\x89\xb4\xe5\x03\xc0\x40\xef\xde\xbd\
\xbb\xbb\xbb\xff\xfb\xef\xbe\x8b\x10\x2f\xdc\x4b\x5c\xe3\x61\xb3\
\x89\x00\x7d\x80\xec\x4f\x31\x65\x81\xfe\x38\xf0\x9b\x8b\x8f\xbc\
\x7c\xf9\x72\x31\x5b\x3c\x3c\x6c\x31\xde\x02\x4b\xbc\x84\xc5\x4c\
\x83\xb4\x4d\x8b\x14\x9a\x60\x74\xb5\xb3\xab\x55\xab\xd6\xb4\x9a\
\xc6\x41\xdb\xfe\xb0\x98\xb7\x5d\xdf\xbd\xdf\xbc\x8b\x28\x77\xd8\
\xdf\xa3\x09\x0d\x0b\x8e\x58\xe5\x01\x42\xaf\x7d\x50\xe1\xfe\xfd\
\xbb\xf8\xe4\x67\xcf\xbe\x82\x06\xa3\xc5\x3c\x1d\xad\xd6\x67\x6c\
\xec\x0c\x12\x39\x42\xe1\x62\xbd\xc6\x94\x78\x44\x68\xcd\xf9\x50\
\x96\xb3\xa6\x74\xd3\xf0\xd9\x9f\x8f\x8d\x36\x75\x4c\x6f\x38\x46\
\x78\xaf\xde\xdf\xbf\xf3\xce\xc5\xc6\xfd\xea\xe6\x86\x30\x6c\x12\
\xa1\x53\x18\x64\x40\xa2\xe5\x41\x13\x86\xf2\x70\x38\xec\x31\xba\
\x62\x37\x9b\xcd\x03\x6a\xb7\xdb\x46\x14\x5f\x2e\xaf\xbc\x9a\xc5\
\x6e\x3b\xbe\x64\xbd\x3a\x67\xab\x0d\x65\x55\xec\x77\x83\x63\x5f\
\x3a\xf6\xf7\x98\xc7\x5b\x8b\x1d\x3b\x1b\x37\x07\x19\x36\x09\xda\
\xc4\x2f\xee\xfa\xe3\x3f\x7b\x6b\xf9\xd6\xe1\x9a\xa7\xb8\x8f\x96\
\x9e\x58\xbd\xbf\xdb\xbc\x7e\x7d\xbb\xdf\x45\xec\x36\xfc\x66\xda\
\x19\xc7\xcb\xc2\xd8\xce\x20\xad\x9c\xdf\xbc\x8a\x9d\x7b\x9d\x24\
\xac\x55\xab\xd6\xb4\x9a\x98\xa8\x82\x79\x6b\x6e\x5d\xd9\x13\x99\
\xfb\xe2\xbe\x3f\x45\x90\x3a\x3b\xbf\x80\x8e\x99\xbd\x8a\x62\x73\
\x7b\x77\x77\xc7\xdc\x42\xc3\x43\x1c\x46\x37\xdb\xdd\x43\x44\xd9\
\xb3\xb3\xf3\x9b\x47\x4f\xf8\x35\xa6\x79\xfc\xec\xf9\xd5\xf5\x75\
\xb1\x0a\x8d\x68\x7b\x38\x1c\xe2\x6b\x9f\x3e\x79\xb6\x5a\x9d\xfb\
\xb8\x78\x44\xfb\xd9\xc2\x91\x9c\xbc\x51\xd7\x9d\x36\xf7\xf7\xc4\
\x36\xa1\x6d\x5c\x96\xd0\xf6\x1a\x98\x3e\x2b\x71\xd3\x33\xf2\x46\
\x18\x32\xe3\x6e\x21\x9d\xac\xb4\xc0\x58\x99\x96\xcb\x75\xdf\xbb\
\xed\x96\xc7\xcd\x6f\x6e\x6e\xe2\xcd\xb0\xe1\x34\xb1\x38\x2f\x76\
\xc1\xf1\x99\x0b\x3e\xd7\x5c\xb1\x93\x29\xc9\xe1\x25\x6f\x1c\x06\
\x4e\x78\xd2\x0f\x43\x76\xad\x3d\x40\x5a\x3a\x6f\x46\xe4\xd0\x3a\
\xdb\x78\x50\x35\x3c\x6a\x28\x37\x21\x91\x02\xbc\x14\x3f\xff\xb0\
\x3f\xbc\x7e\xfd\xaa\x32\x1c\xb5\x6a\xd5\x9a\x5a\x13\xdb\x3a\x90\
\x06\x11\xec\xfe\xf4\xe7\xbf\xbe\xfa\xf5\xa5\x73\x3c\x90\x12\xa1\
\x67\xb5\xbc\x30\x66\x66\x10\x71\xed\xd8\x2e\x63\xb3\xdf\xed\xd6\
\x11\x04\x19\x52\x9b\xd8\xc6\x76\xfd\x61\x3e\x5b\xfe\xe9\x9b\xbf\
\x9e\x9e\x3e\x89\xa0\x3d\x9b\x2d\x7e\xfd\xe5\x47\x83\xbc\x15\xc0\
\xa1\xba\xbd\xbd\xbd\x79\xfa\xe5\x7c\xbe\xfc\xee\xbb\x7f\x04\x57\
\xc0\xc8\xfb\xe2\xc5\x0b\xd1\x5a\x44\x04\xbe\x7d\xf7\xee\xfa\xea\
\x11\xac\xec\x5a\xe1\x88\x1b\x74\xd0\x70\x7f\xe6\xfb\x72\x96\x65\
\xd7\xc2\x7b\x44\x1c\xff\xf6\xdb\x6f\x5f\xbe\x7c\x79\x42\x41\x6a\
\x32\x8b\xc0\xfd\xee\xdd\xbb\xe7\xcf\x9f\x47\x50\xfe\xeb\x5f\xbe\
\x3d\x1e\x0f\x22\xba\xf8\xe1\x87\xff\xd8\xed\x76\x67\x67\x67\xdf\
\xfc\xe5\xaf\xaf\x7e\x7f\x85\x2d\xc3\x48\x92\x96\xe7\x96\x5b\x3c\
\x98\x92\xcd\x34\x3b\x6f\x04\x93\xd2\x03\x42\xbc\x37\x4c\xca\xc0\
\x93\x5f\xb6\x84\x3c\xea\xcd\xe8\xec\x70\x54\xb8\x5a\xf0\xa4\x78\
\xdf\xdb\x4f\xfd\xbb\xae\x55\xab\xd6\x67\x56\xd3\x3a\xe8\xee\xe4\
\x4f\xfb\x6d\x04\xa4\xb3\xd5\x5a\xb4\x09\xc7\xc3\xde\xbb\x3e\x02\
\xe8\xcd\xe3\xc7\x30\xed\xd4\x8e\x09\x0c\xf5\xdb\x6f\x2f\x09\x42\
\xb4\xa6\x6d\x7d\xb0\xbf\xfc\xfc\x43\xdf\x1f\x35\x53\x0d\xab\x88\
\x89\xef\x6e\x6f\xef\x6e\x6f\x67\x0c\x7c\x8e\xc8\x36\x8d\xee\xfb\
\xee\x97\xff\xfc\xb1\xef\xe3\x52\xea\xfc\x6c\xbd\x5a\x2e\xef\x6e\
\xef\x66\xcd\x22\x36\xa6\xb3\xc5\x8c\xc7\xc1\xb7\xdb\xc3\x61\x27\
\x8d\x39\x2c\x3e\x02\x6b\x3a\x42\x04\x4a\x8e\x28\xa4\x60\x39\xcd\
\x44\xe9\xe0\x69\xb7\xdd\x47\x58\x5c\xaf\x57\xae\x67\x61\x49\x7f\
\xea\xac\xed\x22\x9e\x3e\x7e\xcc\x62\xe7\x9f\x7e\xfa\x29\x42\x76\
\x04\xd0\xb8\xcd\xc4\xaf\x77\xb7\xef\xfb\x53\xff\xd3\x8f\xff\xb9\
\xd9\x3c\xcc\x66\xf3\x3f\xfd\xf9\xcf\x5f\x7f\xf3\x4d\x5c\xf3\x78\
\x8c\xef\xcb\xc1\x1d\x8a\xa5\x1a\x94\x8c\x47\xd8\xcf\x5f\x91\x8b\
\x9f\x22\x14\xcf\xbd\x3b\xf8\xee\xf5\x4c\xd4\xc4\x6e\xda\x33\x1b\
\x9e\x3c\xf1\x78\xd8\x92\xf7\xaa\x58\xf1\xbd\x7c\xf3\xf5\x5f\xe2\
\xb5\x3e\xf5\xef\xba\x56\xad\x5a\x9f\x59\x4d\x33\x4b\xfa\x5f\xff\
\xfa\x2f\x8d\xa1\xf9\xf9\xe5\x61\xb7\xed\xed\x11\xc1\x23\xfa\xfc\
\xfc\xc6\xe8\xd9\xc3\xf6\x6e\xb9\x58\x1c\xd1\xb1\xca\x93\xaf\xaf\
\x6e\x22\x40\xed\x76\xf7\xac\xdd\xf0\x6e\xd6\x2e\xcf\xcf\xae\xe3\
\x4b\xb6\xdb\x87\xc3\xe1\xa0\xbc\x5b\x9f\x9d\x2d\xd6\xab\xdd\x76\
\x6b\x39\xe8\x04\x9c\x89\x6e\x30\x2a\xad\xfa\xee\x70\x3a\xf5\x8b\
\xf9\xaa\x69\xcd\x7e\xbf\x13\x91\xda\x62\xb9\x9a\xb5\x8b\xf8\x5a\
\xe1\x55\xc4\xc8\x7f\xb7\x7b\x60\x6f\xa5\x08\xc4\x5d\x70\x1c\xf6\
\xc2\x5d\xed\xf9\xc5\xea\x78\x3c\x1d\x0f\x27\x39\x8e\xbc\xba\xbe\
\x8c\xfd\x2f\x37\xf2\x5d\x2f\x76\xa0\x71\x29\x91\xd9\xf5\x6c\xa9\
\x41\x30\x48\x62\x96\x23\x16\x47\xbd\xf4\x5d\xfc\xc3\x48\x82\x8b\
\xa7\xf5\xea\x22\x7e\x0e\x88\x17\x12\xbf\x24\x71\x68\x5a\xad\x56\
\xfb\xe3\x01\xb3\x36\x81\x9b\x69\xcf\x16\xd2\xce\x9f\xda\xd9\x6c\
\xbd\x5e\xc7\x0f\x01\x71\x63\x90\x9e\x3a\x2e\xd3\xce\xcf\xdc\x61\
\xfb\xdd\x3f\xfd\xf3\xa7\xfe\x75\xd7\xaa\x55\xeb\x73\xaa\x69\x00\
\xfd\xbf\xff\xe7\xbf\x04\xdf\xf5\x9a\xa3\x42\x7c\xe8\xe1\x3b\xa7\
\xd8\xf0\x58\x71\x3b\x59\x8c\xdc\xe4\x93\xbe\x77\x38\xc1\xd3\x5e\
\x9c\xfa\x39\xb0\xcf\x79\x05\x2b\x38\xb6\x82\x26\xd7\x40\xe0\xc6\
\x52\x3c\x09\xfc\x66\xa4\x53\x08\xbb\x8a\x28\x1b\xef\x0a\xac\x05\
\xb8\x5f\xb6\xec\xe4\xe8\x43\x62\x35\x04\x37\xcc\x3c\x5e\xc8\xc2\
\xb8\x26\x36\xd7\xca\xba\x2e\xae\xa7\xa8\x71\xce\xe0\x7c\x8f\x8f\
\x16\x71\x1b\x94\x7d\xef\x54\xf1\xba\x53\x8c\xb9\x5e\xb2\x02\xe3\
\xf5\x59\x58\x22\x7e\xfb\xc9\xa9\x8e\x15\x17\x1c\xf1\x4a\x30\x88\
\x66\x13\x3e\xbe\xae\x69\x34\xf2\x06\x07\x6a\x9b\x0b\x7e\x75\x20\
\xa9\xe3\xd5\x67\xf1\x45\xec\xa0\xe7\x93\xab\x9f\x4f\x51\x87\x9c\
\x52\xe8\x82\x69\x83\xfd\x87\x7f\xfe\x1f\x9f\xfa\xd7\x5d\xab\x56\
\xad\xcf\xa9\xa6\x71\xd0\x11\xa4\x58\x44\xc6\x3a\x0a\x56\x91\x25\
\x63\x4f\x8e\x14\xa1\x60\xc1\x30\x04\x1e\x1f\x41\x42\x60\xfc\x6e\
\x80\x94\x8d\x95\x6a\x38\xeb\xf3\x40\x3f\x36\x18\xc5\x10\x20\x9b\
\xf7\x87\x9c\xac\x9a\x72\x57\xe1\x56\xaa\x59\xd6\xc1\x87\x7f\x81\
\x01\x11\x87\x76\xfc\x2a\x0a\xcc\x9e\xb0\x4f\x9d\x42\x56\x78\xfc\
\xa6\x75\x16\x71\x25\xf1\x8f\x86\xe7\x42\x48\x48\x06\x05\x13\x25\
\x5e\xbe\x18\x93\xf2\x0c\x22\x54\x7d\x71\xad\x06\x19\xb2\x60\xae\
\x1d\x47\x83\x2b\xcd\x66\x7c\x4c\xf7\x04\xfe\x9a\xcd\xbc\x8f\xcd\
\x75\xc7\x69\x2d\x48\xc2\x65\x36\x23\xee\x2d\xbe\x6f\xda\xa6\x31\
\x4d\xd7\x9d\x7a\x4c\xb1\xc3\x1e\x4f\xc1\x46\x35\xde\xa9\x6b\x82\
\x99\x81\x1e\x67\xdf\x51\xbe\xb6\x04\xc0\xf0\x01\x26\xc7\x7d\xb3\
\x27\x6a\xad\x5a\xb5\x6a\x4d\xa8\xa9\x51\xa6\x5e\xc2\x9b\x22\x56\
\x06\x17\x62\x2b\xcc\xaa\x34\xcd\x69\x7c\xba\x35\x08\x5c\xe5\xe1\
\x41\x47\x9c\xb1\x8d\x70\x3e\x84\xbf\x42\x5b\x87\xb3\x34\xc6\x45\
\x3d\x33\xca\xa8\x86\x41\x14\x80\xa9\x54\x6c\xa5\x19\xab\x19\x84\
\x15\x2b\x21\xe0\x52\xa7\xf8\x5a\x1e\x23\xdf\x48\xa8\x42\xfe\x20\
\xbf\x26\x36\xc8\x71\x59\x1d\xbb\xda\x66\xce\x0d\x34\x0b\xde\x34\
\x2e\xa3\x78\x2c\x45\x21\x16\x1c\xff\x24\xd9\x24\x82\x41\xdf\x8b\
\x16\x9c\x44\xa7\x8c\x67\x35\x8a\xcc\x7a\xbd\xbe\xbe\xbe\x16\x85\
\x86\x61\xb3\x3e\xf5\xec\x8b\xe7\x17\x57\xe7\x11\xbe\xad\x75\xf3\
\x45\xeb\xdc\xc9\xba\xc3\xb3\x67\x37\xf1\x13\xc3\x97\x5f\x3d\x5b\
\xae\x66\x57\xd7\x97\x71\xdb\x40\x96\x15\x43\x73\xdb\xb0\x54\xa3\
\x67\x56\xc4\x5d\xdf\x3c\x3a\xbf\xbc\xb8\x79\xf2\x28\xde\x7d\xc4\
\xf2\xb8\xf2\xf3\xe7\xcf\xd7\x67\xab\x78\xcf\x71\x17\xf8\xd4\xbf\
\xeb\x5a\xb5\x6a\x7d\x66\x35\xb5\x83\x4e\x5c\x81\x64\x9a\x28\x8c\
\x3b\x13\x44\xc5\x01\x26\x18\x84\xd3\xb4\x3c\x7e\xcd\x68\x1c\x72\
\x89\x16\x22\xa5\x96\x44\xc0\xca\x19\x53\xb2\x9a\x48\x9b\x21\xbb\
\x0b\xa0\x7a\x9d\xe4\xae\xe0\x99\x9c\xf3\x2a\x81\xb1\xdc\x8c\xb2\
\x56\x83\xd1\x5c\x38\x8f\xd8\x9f\x7a\xee\x94\x09\x9e\xce\x5a\xdc\
\x9a\x88\xe7\x03\x75\x59\x33\xae\x26\x43\x2d\xcc\xc6\x68\x46\x6a\
\xd1\x30\xf3\xd9\xa2\xb3\x97\xab\x15\xdb\xea\x5b\x37\x33\xfa\xcd\
\xeb\xdf\x0e\xc7\xe3\x6e\xbb\x79\xfe\xc5\xd3\xd3\xb1\xbf\xbc\x3a\
\xdf\xed\x5a\xa6\x5f\x62\xaf\xdd\xc4\xd5\x6c\xd3\x9a\xc5\x7c\x31\
\x9b\x3d\x3f\x1c\x0e\xeb\xf5\x99\xbc\x35\x11\x5f\xbf\x79\xf3\xa6\
\x69\xdb\xd8\x7a\xb3\xad\xd2\xd5\x4d\xd7\x77\xf1\x7e\x63\x33\x1d\
\x17\xbf\xbe\xbe\xda\xdd\xbd\xf9\xd4\xbf\xeb\x5a\xb5\x6a\x7d\x66\
\x35\x11\xa0\xe1\x78\xaf\x31\x2a\x07\xe0\x63\x66\x42\x98\x59\x1f\
\x3a\xb1\x4c\x22\x99\xcd\x93\x50\x2b\x4c\xe8\x81\xa2\x40\xc0\x37\
\xd3\x11\x3c\x25\xc8\x23\x2d\x3d\x02\x47\xb4\xf6\x1f\x92\xe0\xf0\
\xb7\xe3\xee\x97\x61\x97\x57\x27\xb8\x98\x2a\x81\x54\xb6\xdb\x2f\
\x82\xe2\xe4\x4f\xe4\x64\x28\x91\xef\x07\x49\x28\xf2\xf7\x08\xc4\
\xdc\xb8\x03\x8b\xcb\x14\x9f\x95\xb1\x11\x8d\x60\x2b\xf0\x2c\x46\
\xd3\xe1\x78\x60\x0c\x6e\x1a\x7b\x3c\x12\xe2\x55\x2e\x2e\xcf\xf7\
\xfb\xed\x6a\x75\x76\x3a\x9d\xda\x76\x86\xc1\x71\x1b\xbc\x8d\x57\
\xd8\xdc\xbf\x3b\x2d\x96\x67\x67\x67\x8b\xe5\xfc\xc8\xc7\xa1\xe1\
\x74\x3c\xc6\x9e\x7a\xbb\x7d\x88\xed\xf6\x66\x73\xbf\x5c\xae\x8d\
\x69\x2c\xdb\xda\xc5\xbd\x81\x63\xbd\xd8\xfb\xb4\x99\xc9\x96\x56\
\xab\x56\xad\x5a\x1f\x5f\xd3\x00\xda\x43\xd8\x0b\x66\x55\x43\x0e\
\xc1\x40\xca\x10\x89\x88\x27\x8d\x28\x6c\xd8\x50\xf0\x93\x99\xa3\
\xc0\x90\x87\x77\xae\xe1\x36\x9b\xc1\xd4\x21\x92\x4a\x05\xd5\x28\
\x01\x2c\x8c\x53\x9b\x12\x55\x28\xa1\x56\x0a\xb3\x30\xcc\x59\xf3\
\x01\x24\x4e\x0c\x99\x9d\xd0\xe4\x13\x67\x2d\xa7\x7a\x18\x64\xe1\
\xdb\x50\x81\xbb\x67\x6d\x23\x86\x6a\xaf\x87\x11\xee\x20\x7d\x34\
\xf6\x06\x3e\xa2\xd4\x82\xf3\x46\xb5\x3c\xa6\x82\x59\x73\xd7\xcf\
\x1a\xda\x3e\xbc\x5f\x2c\x97\x0f\xbb\x1d\xda\x75\x77\x3c\x1e\xd6\
\x67\xcb\xfd\x7e\xb3\x98\x9f\xc5\xf7\x7a\xea\xf6\xfc\x5e\x88\xfa\
\x63\xef\x6d\x84\x5d\x07\x23\xe9\x66\xbf\xdb\x69\xbe\x35\xeb\xfa\
\xee\xb0\xdd\xc5\x67\x2e\x97\x8b\xde\xf6\x87\xc3\xe9\x74\x3c\xad\
\xd7\xf3\xbe\x3f\xc5\xcb\x37\x14\xf1\xda\xda\x53\xe5\xa0\x6b\xd5\
\xaa\x35\xad\xa6\x52\x1c\x1c\xcf\xda\x30\x2c\x05\x50\x0e\x24\xdd\
\x2f\x1c\x82\x70\x2a\xc6\x87\x66\x89\x59\x66\xa2\x38\xe4\xd7\x0d\
\x43\x1c\x99\xf1\xc8\xc7\x77\xa6\x31\x84\xc4\x42\x90\x24\x41\xc8\
\x6a\xe3\x85\xbd\x56\x12\xd8\x2d\x5d\x73\x49\xfc\x2b\xc9\x84\x0a\
\x2c\x0a\xcf\x8f\x83\x1c\x41\xc6\xa0\xf4\xef\x43\x57\x3e\x50\x2e\
\x8a\xb0\x11\x44\x04\x26\xa7\x23\xba\xf3\x0e\x61\xfb\xd3\xe1\xf5\
\x6f\xf1\x39\xeb\xb3\x88\xad\xdb\xa6\xd5\x77\x77\xbf\x5b\x17\xb6\
\xdb\x7b\xce\xbe\xda\xee\x70\xde\xc9\x4d\x7a\xbc\xca\xdb\x57\xf1\
\x99\xb4\xb9\x3b\x2a\xb5\xd1\xf9\x9e\xd9\x62\xe9\xea\xda\x87\xde\
\x77\xfe\xe5\x8b\x9f\x23\x20\xf3\x4f\xc0\xf9\xc3\x36\x7d\x92\x88\
\xf7\x65\x83\x6a\x43\xff\xa9\x7f\xd7\xb5\x6a\xd5\xfa\xcc\x6a\x62\
\x07\xed\xad\x61\x95\x05\x68\x5c\x12\x22\x43\x3a\xde\x08\x62\x1e\
\xd9\xd5\x01\x7f\x82\x43\x10\x9a\x81\xc1\x35\x7d\xba\xcf\xf6\x43\
\x02\x9f\x4a\xd0\x17\x42\x0d\xe6\x01\x20\xd4\x0b\xe9\x51\x37\x88\
\x3a\x92\xe3\x92\x1a\xf6\x83\x04\xf7\x84\x33\x47\xb6\xc8\xe3\xd6\
\x3e\x5d\x42\x9a\xe5\x44\x67\x0b\x63\xae\xc4\x8d\x14\x12\xba\x1e\
\xbe\x77\xac\xed\x93\x78\x14\x4a\x2f\x0c\xb7\xb7\x6f\x25\xc1\x16\
\xd7\x12\xe5\x49\xfc\xb7\x05\x47\xc2\x17\x88\xf0\x0e\xe5\x5c\xc0\
\x3c\x7b\xec\xee\x1d\x16\x48\xd9\xe4\x7c\x89\xa4\x19\x74\xbc\x73\
\x35\x11\xd7\x2d\x4c\x3d\x94\xe5\x7c\xc2\x34\x0e\x5e\xab\x56\xad\
\x5a\x1f\x5f\xd3\x00\x7a\xa5\x75\xc7\x68\xdc\x31\x06\x49\xc6\x36\
\xeb\x33\x3c\x14\xc7\xcc\x6f\x30\xab\x90\x00\x34\xc3\xa8\x06\xd1\
\x0b\x7d\x72\x70\x01\xd2\x39\x90\xd6\x82\xba\x68\x4d\x03\xf8\x59\
\x1e\x18\x71\xa9\x59\x76\x9a\x4f\x05\x35\x4c\xfb\xb9\x4b\x65\x26\
\x97\xe0\xe1\xcf\x80\xea\x15\xce\x0c\xf1\x72\x81\x6f\xad\x12\xf7\
\xcd\xa7\x8b\x5a\xa8\x72\x91\x6c\x08\xde\x93\xe3\x00\x94\xd4\x4c\
\x63\x1b\x90\xf6\x9c\x6f\x1e\x0c\xb9\xe7\x24\x5c\x70\xe8\xb8\x9c\
\x6c\x22\xc8\xff\xe6\xab\x18\x79\x73\x08\x1a\x87\xe5\x88\x12\xa0\
\xe6\x62\xd4\xee\x3a\xa1\xe5\x59\x33\xad\x58\xf9\x27\xa2\x6f\xec\
\x0e\x1a\xe0\x1e\x34\x55\x80\xae\x55\xab\xd6\xb4\x9a\x2a\xb3\x3b\
\x01\x6c\x87\x7f\x0b\x44\xea\xa4\x46\xa6\x44\x0d\x33\x6c\x3b\xfe\
\x52\x39\x7c\x44\x51\xea\x79\x15\x5c\xa3\x29\xcd\x7a\xf0\x12\x81\
\x1f\x64\xb9\xb4\xcc\x95\x84\xc4\x68\xa8\xe4\x75\xc7\x50\xeb\x58\
\x86\x8c\xf3\xbd\xd4\x7b\xe3\x8a\x59\x22\x82\x1b\x09\x92\x6f\x82\
\x6d\xc0\x29\xe9\x73\xa1\xda\x93\xb1\x16\x69\xde\x09\xad\xba\x5c\
\xbd\x4c\xe8\xe8\xd4\x6b\x7b\x99\x20\x27\x01\xe2\x50\xc8\x98\x64\
\xb7\x0f\xb0\x55\x5a\xfc\x43\x60\xb4\x24\x1a\x15\x31\x35\xa5\xe1\
\xf3\x81\xdc\x82\x15\x2a\x5d\x26\x09\x91\x71\x5e\x01\xba\x56\xad\
\x5a\xd3\x6a\x5a\x07\x1d\xa8\x09\x3e\x4d\x0c\x16\x1a\x81\x72\xeb\
\xca\x23\x1b\xd0\xce\x29\xe8\x97\xf1\xb8\x97\x46\x14\x7d\x28\x9c\
\x43\xe1\x04\x87\x11\x12\x2d\x8b\x00\x73\x95\x01\xca\x0f\xbc\x84\
\x2c\xcb\x72\x0e\x88\x3d\x4c\x92\x61\xf0\x45\x1b\xe3\x10\xa8\xa2\
\x31\xf8\x97\x5a\x71\xe8\xf6\xb8\xf5\xf5\xb0\x85\x4e\x1c\x38\x8e\
\x01\xb3\xea\x43\x9e\x29\x33\x29\x26\x5f\x4b\x68\xe4\x82\xb0\x09\
\xc7\x71\x6f\x42\x95\x50\x72\xa7\x23\x89\xad\x92\x91\xc8\xa2\x57\
\x89\x55\x42\x08\xe5\x3e\x7d\x9e\x33\x34\x6c\x85\x07\xab\xfe\xe0\
\xe4\x0e\x3f\xf5\xef\xba\x56\xad\x5a\x9f\x59\x4d\x06\x68\x15\x3f\
\xc2\xcb\x3c\x34\xe0\x52\x66\xa3\x01\xa9\x02\x6e\x81\xfd\xde\xa8\
\x60\x2c\x09\xee\x49\xc3\xc9\xf6\x9c\x68\x5f\x19\xc8\xb8\xe1\xd5\
\x08\xfe\x66\x75\x73\xc1\x38\xf9\x8b\x91\x57\x19\xe5\x65\x22\x10\
\x28\x0a\xfd\x08\x2f\x61\x20\xde\x03\x13\x81\x7e\x3b\x0d\x70\x33\
\x02\x36\x2c\xff\x00\xca\xf3\xfa\xc1\xe8\x20\x39\x2f\x24\xd8\xaa\
\xf3\x5d\xa2\xd3\x17\x62\x26\x41\x7c\x90\xe9\x70\x84\x2b\x8a\x08\
\x04\x43\x32\xb2\x07\xf1\xf1\xa3\xcf\x3b\x8a\x0c\x3d\x0e\x3b\x87\
\x16\xed\x36\xe5\x03\x49\xc5\x53\x2a\x0d\x33\xd8\x30\x27\xe5\x34\
\x71\xe2\xc9\x98\x4f\xfd\xbb\xae\x55\xab\xd6\x67\x56\x53\xed\x46\
\x09\x6c\x72\xf0\x49\x9f\xa0\x0a\x57\x60\x60\xcc\x4c\x83\x6a\xa2\
\x7c\xa2\x57\x18\xe3\x0e\x12\xfb\xa4\x81\x79\x90\xd6\x49\x0f\x8e\
\x69\x6e\x9f\x6c\x2e\xca\x64\xb6\x34\xce\x42\x76\xe8\x0c\xdf\x92\
\x8f\x15\x58\x2d\x27\xaf\x4b\x1c\xb5\x1c\x48\x2a\xfc\x03\xc4\x88\
\x4f\x17\xd5\x24\x33\x2c\xa0\x50\xc0\xaf\xc8\x31\xa3\x28\x3f\x68\
\x58\x32\xdf\x68\xbe\x81\xd2\x11\xb7\x8d\x12\xa3\x67\x1f\x0a\xa1\
\x21\xa8\x2d\xcf\xc7\x66\x40\xb2\x37\x28\xc1\xec\xb2\x26\xc6\xde\
\x35\x9f\x17\x6a\x9f\xc6\x18\x6b\xd5\xaa\x55\x6b\x42\x4d\x9e\x24\
\x04\x58\x79\x81\x6a\x41\x67\x03\xcf\xcd\x30\x06\xb8\x6c\x4e\x34\
\xe6\x40\x28\xcf\xb6\x08\x5f\xa1\xd0\xe1\x62\xb4\x6f\xc0\xcb\x81\
\x26\xce\x23\x8b\xb9\x27\x15\x64\x95\xc1\xf1\x31\xf9\xab\x4a\xe7\
\x6b\xb4\xc1\xfd\x78\x10\x29\x5a\x28\x14\x21\x22\xf0\x34\x36\x6f\
\x0a\x03\x9b\xe1\xcb\x5e\x30\xb8\xec\xe7\x2a\x71\x2d\x52\xb0\x16\
\xc9\x6f\x4d\xe9\x22\x4a\xc9\x37\xac\x74\xfe\x48\x91\x3f\x4c\x08\
\x4b\x2e\x61\x5a\xe8\xd7\x75\x23\xc7\x98\xb5\x6a\xd5\xaa\xf5\xf1\
\x35\x91\xe2\xe0\x73\x3f\x2d\x24\x2f\xba\x5c\x34\xa8\xc9\x28\x0e\
\x4d\xa5\x06\xab\xcb\xdc\x82\x3c\x07\xaf\xa2\x82\xce\x22\x96\x93\
\x40\x42\xd2\xa4\xd1\xeb\x26\x68\x2b\xb0\x18\x44\xa9\x81\x83\x3b\
\x35\x10\xbe\xa2\x6f\x4e\x7b\x83\xca\x0a\x8d\xd4\xbf\x17\x20\x86\
\x72\x2f\x5d\x36\xe9\xaf\x79\xb1\x88\xdb\x86\xa5\xda\x65\xdb\x48\
\x5a\x40\x25\x8a\x0c\xe8\xe1\x48\x09\x39\xae\x4c\x96\x9d\x48\xe5\
\x3d\x06\x6f\x0c\xa1\x57\x65\xe4\x1d\xd1\x84\x4c\x76\xeb\xac\x4a\
\xe1\xcd\xc3\x39\x36\xc8\x83\xb8\x0f\xdc\x8f\xc6\x41\x63\xed\xa0\
\x6b\xd5\xaa\x35\xad\x26\xbb\xd9\x41\x58\x21\xed\x2c\xa3\x12\x4e\
\xff\x64\x62\x3b\x8f\x80\x93\x13\x06\x1a\x7d\x6e\x39\x42\xcc\x2b\
\x08\x27\x2c\xdf\x05\x93\x2b\x9d\x66\x84\x30\xc6\x74\x89\x2f\x24\
\x11\x29\x8b\xa6\x42\x15\xa9\x06\x1e\x84\x67\x69\x0a\xc9\x0e\x65\
\x7e\xda\xb3\x73\xbf\xa0\xea\x10\x6b\x92\x1b\xf9\x60\x40\x72\x30\
\xcb\x90\x98\x16\xd1\x91\x04\xb9\xa2\x38\xdf\x49\x84\x15\x69\xf1\
\xe8\x48\xf9\x55\xa2\x26\x14\xc2\xa2\xe0\xb4\xc8\x09\x0d\x92\xba\
\xb0\x4f\x39\x99\x77\x94\x0d\x81\x7f\x28\x84\x8d\x82\xbb\x7a\x34\
\xf9\x9e\xf9\x67\xdd\xd7\xcc\xab\x5a\xb5\x6a\x4d\xab\x69\x00\xad\
\x4d\xc3\xe9\xae\x24\x36\x17\x6c\x7e\x2c\x2d\x2a\xda\x43\xd1\xd1\
\x91\xa8\x27\x48\x34\x1b\xf2\xb9\xde\x03\xc0\xb4\x20\x1b\x3f\x80\
\x46\x53\x25\x0e\x00\x22\x87\xa6\x49\x6d\x6f\x10\x7b\x4f\xca\x79\
\xdb\xf0\x60\x2a\x3c\x06\xdf\x43\x1a\xed\xa3\x4c\x7e\xa8\x22\xa1\
\x1b\xa8\x61\x2c\x24\x17\x10\xb1\x86\xc1\xab\x72\x1f\x8e\x73\x3d\
\xac\x87\xf3\x49\xb1\x81\xa6\x81\x54\xd1\xaa\x5c\x34\xdf\x86\x2a\
\x64\x4b\xc8\xd6\x4e\xf2\x33\x61\x93\x26\x05\xfb\x54\xd1\x50\xa7\
\xfe\x5c\xb4\x28\xc2\xbe\x07\xa1\x41\x3e\xf5\xef\xba\x56\xad\x5a\
\x9f\x59\x4d\x43\x8d\x20\xf3\xd7\x5a\x86\x03\xd3\x17\xc6\xea\xb8\
\xb5\x85\x57\xa8\x93\x38\x41\xb6\x75\x36\xf8\xd2\x9c\x8d\x1d\x71\
\x10\xb6\xf8\xa0\x1b\xf0\x64\xe0\x95\x87\x3a\xd8\xf3\x0c\x5e\xd2\
\x2d\x63\x88\x90\xdc\x08\x6a\xd3\x30\x22\x25\xea\x83\x28\xb9\x76\
\x60\xc8\x50\x49\x6c\x76\x52\x50\x8c\x89\xe0\x8c\xce\x04\x8a\xc5\
\xa3\x27\x76\x0a\xdc\x0b\x6f\x14\x3c\xef\x47\xe6\x43\x1e\xa3\x10\
\xd6\x72\x45\x9d\x5c\xfc\xf3\xb2\xe0\xb7\x39\x07\x2b\x93\xef\x41\
\xf6\x1b\x6e\x92\x93\xc0\x83\x5f\xe1\x5c\xc1\xee\x80\xcc\x70\xd1\
\xe8\x55\x15\x74\xad\x5a\xb5\xa6\xd6\xc4\x0e\x5a\x65\xc8\x83\x17\
\x68\xec\x1c\xf3\xd0\x07\x4b\xcb\x60\x0b\x8a\xae\x9a\xff\xce\xc9\
\x26\x3a\x78\x39\x61\x63\xbe\x00\x06\xff\xc4\x24\xb3\x08\x8a\x13\
\xfd\x91\xc7\x0a\x0b\xf8\x8e\xb4\xd5\x40\x48\xcf\xae\xfc\xe2\x4e\
\xaa\x65\x56\x25\x09\x36\x82\x2c\x15\x12\xb3\x82\xc6\xdc\x8d\xfc\
\x3a\xb2\x22\x23\x4d\x23\xea\x2c\x2d\x91\x03\xcd\xe1\x10\x33\x41\
\x70\xc1\x77\x2a\xf4\xb9\xe6\x6d\x07\xe3\x92\x72\x96\x09\x82\xda\
\x63\xff\x08\x08\x20\x48\x9a\x68\x7e\xd7\x38\xde\xf4\xba\x15\x9e\
\x85\x84\xbf\x89\x6f\x97\x17\x6b\x28\xd1\xdf\xb5\x6a\xd5\xaa\xf5\
\xf1\x35\xb1\x83\xf6\xbd\x0b\x96\x20\x93\x48\x98\xa8\xb2\x2a\x43\
\x0d\x66\x46\x05\x49\x53\x57\x1b\x86\xa1\x0f\xad\x8b\x0d\x74\x28\
\x58\x9c\x6e\xe5\x0f\x24\x80\x2c\x65\xc8\x34\x89\xda\x0d\x79\xb9\
\xa2\x09\xa1\x0f\xa4\x23\x58\x24\xcf\x98\x14\x4a\xa4\xfc\xb3\x3c\
\x4d\xe7\x69\x73\x02\x41\x51\x04\xdb\xc3\x78\x61\x48\x63\xe8\x3e\
\xcb\xfb\x94\x4e\x1f\x05\x8a\x84\x03\xcb\x79\xde\x81\xf0\x08\x27\
\x8f\x17\x13\x12\x39\xd2\xe4\xe4\x2d\x0f\xa3\x11\x87\x29\x95\x3a\
\xa8\x52\xab\x56\xad\x69\x35\xf5\x90\x90\x1b\x47\x85\x99\x11\x7c\
\x70\xe7\xf1\x90\xa2\xb4\x03\x30\xc9\xe9\x9a\xff\xa0\x81\x55\x09\
\xc7\xc7\x2a\xb4\xb1\xa5\x11\x15\x62\x61\x68\x7e\x05\x82\x09\x34\
\x08\x1f\xc3\x05\xf2\x29\x63\x70\x38\x03\x24\xca\xb0\x5e\x86\x1b\
\xc3\x48\x34\x92\x10\x1c\x4f\x13\xa9\x35\x51\x21\x9a\x93\xad\x52\
\x7e\x5f\xe9\xea\xe3\x91\xc2\x30\x0c\xb0\x40\xbc\x01\xe5\x87\xf3\
\x69\xdc\x9c\xb7\x25\xdf\x2b\x69\xde\x31\x4c\x03\x92\xbd\xa4\x19\
\x24\x8e\x25\x7f\xd4\x18\xb4\xd5\xb5\x6a\xd5\xaa\xf5\x91\x35\x71\
\x50\x85\xfc\x2c\x7e\x5e\x67\x1e\xda\xb1\x59\x33\xbc\xa0\x9b\xc6\
\x64\x16\x98\xc4\xed\x59\x9e\x0a\x80\x44\x5b\xe9\x59\xd8\x51\xda\
\xd8\x3c\x85\xe8\xc4\xc2\x9f\x86\x53\x38\x2f\x03\x30\x59\xb9\xc7\
\x15\x78\xda\x50\x3c\xf6\x65\x4a\x06\x0d\x6c\xf2\xd2\xe3\xbc\xbf\
\xc4\xfc\xca\xd9\x63\xd6\xd7\xa5\x41\x71\x0c\xc2\x08\x74\xcb\x9e\
\xc1\x53\x8e\x3e\x38\x2f\x5d\xb3\x4a\x24\x33\xce\x30\x89\xe4\xc0\
\x10\x1f\x02\x10\x28\x08\x0d\x61\xea\xa3\x35\x24\x80\x99\x9c\x06\
\xe3\xcc\xf2\x3a\x23\x3e\x1d\x32\x0b\x8e\x30\xc6\x61\x28\x91\xd2\
\xe8\x8a\x68\xf8\xc8\x7c\xe4\x0f\xb8\x56\xad\x5a\xb5\x72\x4d\xe4\
\xa0\xc9\x3a\xf6\xfe\xd1\x4c\xa9\x86\xe4\xc7\x8f\xcf\xf2\x04\x7f\
\x64\xa2\xb1\xca\x2d\x48\x9b\x2c\x83\x1c\x83\x46\x62\x7c\x60\x56\
\x1e\xa4\x62\xee\x31\x32\x74\x4e\x6e\xd0\x4a\xe7\x47\x31\x87\x9d\
\xfa\xd3\xc2\x57\x70\x43\x6e\xf2\x4c\xe3\xb8\x37\xcf\x4f\x26\x50\
\x1a\x65\x76\x90\x5d\xfc\xc3\x48\xbd\x97\x3a\x6a\x4a\x6d\xb2\x7c\
\x14\x18\xb3\x28\x9a\x68\x94\xe6\x45\x29\x95\xc0\x0f\x27\x96\xf1\
\x9b\x1c\xb5\xc5\x4f\xd6\x7f\x78\x6b\xf2\x53\xf3\xce\x9b\x21\x09\
\xa6\x56\xad\x5a\xb5\x3e\xaa\xa6\x71\xd0\xda\xb4\xac\xc8\x08\x43\
\xd2\x20\x33\xb3\xe0\x67\x8b\xbf\x28\xe5\xf1\x3f\x80\xa0\x66\x46\
\x24\xa8\xbf\x45\xe7\x31\x34\xff\xad\x04\x2d\x11\x14\x32\x58\x3d\
\x5e\xf3\xc3\x11\xf3\xac\xaf\xf0\x2e\x8b\x2b\x28\x2f\x38\xf2\xab\
\x1b\xc8\x93\x7c\x2d\x55\xee\x9f\x46\x64\x88\xce\x8e\x73\x63\x2f\
\xa4\xc2\x7b\x50\x36\xb9\x86\xbd\x06\x66\x50\x02\x62\x0a\xf8\x8b\
\xb0\x6d\x8d\x0d\xf3\x46\x6f\x36\xa4\x7e\xfd\x53\xff\xae\x6b\xd5\
\xaa\xf5\x99\xd5\x44\xc3\xfe\x08\x7c\xbd\x02\x43\xec\x45\x7d\xe1\
\x1d\x2c\xdc\x40\x26\x88\xae\x41\x40\x1f\x8f\x60\xb2\x4e\x1b\x88\
\x3c\x04\x16\x9d\xb4\xc5\x00\x44\x9f\x0e\xfc\xc4\x57\x34\xe4\x4e\
\x39\xcd\x1c\xfa\x3c\xcf\x2d\x8f\x24\x5a\x43\x44\x6d\xb8\x90\x2e\
\x22\xbc\xd4\xc8\x43\x10\x42\x19\x8f\x75\xc2\x77\x10\xe7\x3c\x48\
\xc8\x8d\x73\x96\xcd\x49\x42\x40\xb9\x4f\x95\x97\xcd\x7e\x49\xac\
\x0e\x94\x01\xf4\x50\x28\x94\x01\xe2\x8b\x01\x88\x19\xa6\x1c\x95\
\x70\x39\x5e\x62\x65\x32\x13\x93\x93\x5c\x98\x0e\x71\x75\x92\xb0\
\x56\xad\x5a\xd3\x6a\x1a\x40\x5b\xe7\x35\x7b\xc9\xf1\xc7\xfa\x34\
\x2d\xc2\xe2\x67\x9f\xe5\x69\xf8\x17\xec\xf3\x25\xb3\x55\x52\x4b\
\x52\x32\xd4\x70\x62\xa7\x40\x2b\x07\xf1\x3c\x12\x35\x71\x7c\x04\
\x6a\x10\x9f\x27\xc2\x05\xc7\x29\x05\xaf\x84\xf1\x81\x1e\x29\x35\
\xb4\xb7\x3c\x58\x68\x34\x0d\x1d\xea\x07\x14\x47\x48\xc4\x76\x12\
\x81\x94\xa5\x04\xfd\xe9\x6f\x04\x1e\xc9\xe2\x63\x98\x4f\x49\x09\
\x2f\xc0\x74\x43\x34\xf0\x1e\x21\x65\xb5\x94\x37\x25\x7f\xc1\xbb\
\xc9\x16\xa6\x21\x0f\x91\x67\x1f\xa5\x5a\xb5\x6a\xd5\x9a\x50\x13\
\xc7\xdb\x78\xce\x03\x76\x6e\x89\x67\x70\x6c\xfd\x8c\x96\x3a\x90\
\x8d\x00\x1e\xbf\x15\x61\x16\x93\x26\x41\x27\xc7\xa2\x30\x28\xea\
\x46\xc6\x9e\x63\xf7\x67\xd1\x78\x14\xd5\xc7\x48\xcb\x21\x9e\x19\
\xa4\x3e\x94\x5b\x94\xef\xca\xb0\xb5\x38\x32\x07\x78\x3d\x0b\xc9\
\x20\xfc\x86\x3c\xae\x21\x43\x66\x87\x52\x4f\xb2\x95\xc8\x02\x63\
\xff\xbc\x3f\x80\x75\xd3\x34\x63\x66\x43\x06\x0b\x85\x45\xa1\x81\
\x90\x29\xfa\x8c\x0f\x6e\x4f\xde\x8b\x36\xd9\x85\x35\xa4\xae\xbd\
\x4e\x12\xd6\xaa\x55\x6b\x6a\x4d\x94\xd9\xf1\x59\xa0\x69\x8c\xc3\
\xa0\x34\xe9\x46\x7c\x9b\x99\x93\x6d\x98\xe8\x50\x25\x56\x45\xa1\
\x97\x16\x17\x3a\x11\x33\x88\x1e\x8d\x54\xa1\x23\x74\x66\xb1\x75\
\x11\x1f\xd3\x07\xea\x66\x50\x10\xce\x8b\x46\x42\xe5\xe4\xd9\x31\
\x8b\x5d\xf0\x5d\xd6\x11\x93\x23\x6c\x1b\xf4\x87\x6e\x57\xce\x29\
\xcb\x22\x12\x7f\x2b\x7c\x34\xb7\xdb\xf2\xee\xf2\xdd\x12\x0e\x12\
\xc5\xa9\x8e\xf2\xb7\xb4\x36\xe5\xba\x3a\x05\xd5\xea\x4c\x8f\xfb\
\xd2\xf2\xab\x2c\xc4\xc6\x0f\x84\x7b\x69\xdc\x9f\xe4\x73\xd5\xaa\
\x55\xab\xd6\x84\x9a\x06\xd0\x86\x49\x0c\xad\x42\x07\x45\x1a\xac\
\xda\x34\xe5\xe9\x11\xdf\x98\x24\x84\xd3\x94\x03\x04\x83\x4c\x75\
\x43\x91\x27\xd3\x78\xac\x58\xf3\x32\x29\x12\x1b\xd0\xd8\xf3\x16\
\x0b\x50\x21\xa9\xcb\x64\xdf\x68\x9e\x3b\x39\x74\x8c\x3c\x42\xc7\
\x34\x48\x72\xde\x00\x6e\x26\x39\x76\x51\x74\xb0\x55\x11\x03\x30\
\xa3\x36\x4c\xf7\xb3\xf4\x22\x29\xa1\x01\xac\xe2\x75\x04\x68\x17\
\xfa\x1b\x94\x8b\x8c\x9f\xa8\x7c\x18\x99\x1a\x76\xfd\xa1\xe7\xb5\
\x10\xdd\x22\xbf\x16\xb8\x77\x08\xcf\xc5\xfb\x11\x1f\x28\x4a\xfe\
\x4c\xba\x0a\xed\x6a\xd5\xaa\x35\xad\xa6\x9a\x25\x09\x8a\x05\x18\
\xe1\x0f\xcc\x6a\x0e\xfa\x63\x4b\xb9\xa0\x5c\x12\xc7\x69\x72\xf2\
\x48\xb0\xa9\x9b\x26\x04\xfa\x69\x9f\xe2\xa9\xd0\x95\x42\xa7\x21\
\x2d\x33\x41\x70\xcc\xac\x05\x6b\x8e\x55\x28\x46\x48\xd0\xc6\x19\
\xe7\x04\xe5\xb5\x4c\x89\xa3\xb3\xce\x99\xb5\x08\xb1\xa2\x6c\x6a\
\x2a\xe7\x96\x04\x24\x75\xce\x09\x9b\x51\xf2\x5f\xc6\x8a\xec\x32\
\x41\x53\x5e\x1b\xff\x84\xff\x51\x9a\x7c\x91\x37\x38\xa2\xaa\xf9\
\xc0\x73\x3c\x62\x43\xb2\xf1\xe4\x5e\x3b\x49\xf4\xb2\x17\x14\x73\
\x23\xac\x99\xae\x27\x84\xb5\x6a\xd5\x9a\x5c\x93\x27\x09\x3d\x46\
\x96\x61\xb6\xe9\x44\x65\x01\xb0\x66\x24\x14\x1e\x59\x9a\xd6\xa6\
\xd1\xf1\xdb\x8d\x36\x3c\x15\x32\x0c\x75\x17\x63\x23\x5f\x1a\x64\
\xe6\x76\x83\xce\x13\x2b\x0e\x34\x45\x7c\x9e\x55\xd9\x12\x23\x2e\
\x25\xa6\x77\xc6\x64\x5a\x23\xb7\xbc\x68\x5e\x8b\x2f\x92\xc6\xf0\
\x8b\x2a\x1d\xb4\x11\x5c\xc6\x9f\x63\x03\xfe\x11\x37\x42\x99\x9d\
\xa0\xfc\xf2\x21\x22\xe0\x0f\x5c\x4a\xd1\x7b\x10\x0d\x59\x88\x49\
\xe0\x31\x0a\x28\x48\xdf\x95\x2c\x5d\x11\xba\x20\x5a\x51\x4d\xce\
\xe7\xad\x55\xab\xd6\x7f\xf5\x9a\x38\x49\x18\x62\xcb\x6a\xb5\xe3\
\x19\xba\x4c\x08\xeb\x11\xf8\xe6\x3c\xa9\x88\xd1\xa1\x87\x65\x52\
\x12\x48\x87\x3c\xea\x5c\x00\x11\x38\xee\xca\xe1\x19\xc8\x0d\x27\
\x62\x3c\x68\x2c\x8a\xb9\xe8\x60\xfb\x49\xf9\x64\x92\xb0\xac\x50\
\x0a\x49\xc9\xc6\x0a\x0b\x57\x38\xea\x90\x02\x05\x79\x44\xbc\xd1\
\xec\xb3\x97\xfe\x37\x7e\x27\x08\x12\xc4\xfa\x3e\xdf\x00\xe1\x43\
\x80\xa1\x6c\xc9\x2f\x14\x73\xea\xed\x75\x91\xfd\xa5\x57\xa5\xe8\
\x16\xca\x22\xbc\x14\xfb\x92\xed\xad\xd3\xb1\x68\x04\xe6\xa6\xf7\
\xa7\x46\xcf\x3e\xf5\xef\xba\x56\xad\x5a\x9f\x59\x4d\x04\x68\x66\
\x13\x0c\x2b\x39\x94\xf4\xaa\x54\xb0\x32\x91\xb3\xe0\xa0\x9b\xa6\
\x71\xae\x4b\xe7\x6f\x69\x62\x85\x4a\xf0\x60\xfa\xe0\x0f\x3e\x97\
\xd3\x46\xc8\xe7\x5c\x6d\x57\x4e\x0e\xcb\xe1\x9e\x73\x7e\x3c\x9f\
\x82\x9b\x60\x1f\x22\x6d\x34\x1a\x6a\x92\xd8\x6e\x21\x46\xc0\x27\
\x83\xef\xd5\x92\xa4\x02\xa9\x1f\x67\xd2\x6a\x69\x72\x25\xb2\x76\
\x80\xd7\x0f\x0d\x95\xf2\x7c\x8d\x1f\x9d\xfb\x91\x08\xf2\xe4\x49\
\x59\xf1\x9d\xd8\x8f\xf2\xf7\xec\xa5\x27\xfe\x1e\x61\xdc\x83\xcb\
\x40\x64\xdb\x34\xb5\x83\xae\x55\xab\xd6\xd4\x9a\x78\x48\x68\x1a\
\xcd\x93\x84\x0e\x1a\x0d\x01\x35\x92\xb1\x0d\xf1\xf1\x17\x99\x19\
\x27\x97\x84\xa4\x82\x00\xc3\x0b\x0d\x03\x0e\xd2\x04\xee\x8a\xc7\
\x10\x80\x15\x7e\xcd\x08\xc3\xca\xe1\xdb\x22\xe2\x60\x8d\x73\xd1\
\x35\xd3\x10\x15\x18\x8a\x5a\x83\x44\xe1\x17\x04\x3a\x43\x39\x54\
\x8c\x8f\x7b\xeb\x8c\x92\x41\x70\xc2\xbf\xa5\xdb\xe5\x8e\x1f\x6f\
\x65\x34\xcf\x9d\x5c\x45\xca\x1e\x20\x1b\x4a\xba\xc9\x0c\xb5\x2a\
\x50\x89\x05\x20\xca\x13\x86\x45\x70\x9d\xd3\x5b\xd8\x11\x9b\xb7\
\x03\xaf\x9c\xcb\x9f\x00\x08\x1e\x1d\xbe\x2a\xa1\x6b\xd5\xaa\x35\
\xad\x26\x66\x12\x1a\xd5\x32\xbf\xeb\xa0\xd1\xd0\xaa\xe4\x3c\x51\
\x82\x32\x62\x88\xe2\x26\x96\x71\x17\x16\xce\xdc\x8e\x12\xce\xfd\
\x1a\xcd\x48\x15\x54\x03\xa1\x74\x92\x6a\x04\xca\x7e\x44\xc8\x07\
\x34\xd2\xb1\x52\x70\x7e\x3c\x42\x22\xa2\xb7\x6c\xa8\xaf\x45\xe3\
\x21\x87\x75\xc9\x94\x29\x71\x0c\x0a\xdd\x2f\xbc\x89\x54\xea\xd6\
\xe3\xc5\x59\xbe\xc1\x56\xfa\x36\xee\x1d\x32\xe4\x87\x63\xbb\x20\
\x48\x4a\x9e\xcd\x36\x20\xce\x0b\x50\x70\x53\x93\x46\x13\x81\xf9\
\x68\xf0\x4b\x7c\x17\x3a\x6c\x2b\xf6\xd1\x6c\x6d\x2a\x31\xe7\xf8\
\x6c\xe0\xf0\x88\x97\x96\x3e\xae\x6d\xb4\xc3\xf4\xa0\x29\x0d\x76\
\xad\x5a\xb5\x6a\x4d\xa9\x69\x9f\xbb\x91\x5e\x82\x29\x40\x36\x1a\
\xe5\x2f\xc0\x16\x8b\xa3\xd5\xc8\xae\x28\x5b\x52\x18\xac\x2f\x99\
\x52\xd9\x5a\x83\x54\x19\x27\xa1\xd1\xa0\x87\x46\xe0\xab\x0a\x9a\
\x5c\x44\xd2\x80\x07\x1a\xe0\x26\x07\x66\xa1\x1b\x4d\x9d\xaf\xbc\
\x90\x8d\xf3\x19\x3e\xe3\x25\x0c\x6e\x43\x1a\xf3\x74\x1d\xe2\x88\
\x58\x93\x0c\xf0\x84\x14\xc6\xbd\xc1\xfb\x1f\x37\x43\x69\xf2\x1b\
\x97\x75\x12\x0a\x03\xf9\x87\xce\x01\x02\xc9\x96\xc3\x68\x3d\x8a\
\xce\x52\xe9\x56\x73\x72\x21\x3c\xfc\x3d\x7c\x9f\xa5\x35\x67\x01\
\x49\x13\xf7\x22\x96\x21\x46\xc4\xb6\x8a\x5d\xa4\xe3\xcb\xad\x0c\
\x5e\xd6\xaa\x55\xab\xd6\xc7\xd7\x34\x80\xce\xc8\xa5\xc0\xd0\x1a\
\xad\x1a\xc5\xfa\x36\x13\xc1\x47\xa4\x69\x85\x28\x48\x29\x7f\x11\
\xc4\x9d\x28\xa6\x85\x00\x4e\x34\xc8\x88\xa5\x1d\x61\x5f\xfc\x4e\
\x4f\x2a\x2e\x63\xb9\x05\x06\x28\x73\xd3\x8d\x93\x39\x04\xb1\xa4\
\x30\x40\xb6\xe2\x80\x59\x29\xa7\x56\xc9\x97\xf7\xf9\xb4\x31\x36\
\xad\xd6\x95\x23\x4c\x81\xf5\x90\x93\x10\x4b\xb1\xba\x8f\x9f\xc6\
\x17\x48\xa3\x8f\x9a\xb2\xbf\x07\x7e\x2e\xb9\x5b\x27\x35\x68\x36\
\xb0\x54\xb9\x8d\xb8\x3f\x58\x46\x76\xd8\x24\x39\xdf\xf3\xa8\x62\
\xa2\xa6\x39\xd6\x4b\xa0\x19\x6e\xfd\x56\xa2\xbf\x3e\xf5\xef\xba\
\x56\xad\x5a\x9f\x59\x4d\xe4\xa0\x39\xbb\x5b\x19\x15\x1b\x42\x28\
\x85\xa1\xb8\xf0\x0c\xa5\x02\x85\xe9\xf4\x2c\xeb\x34\x0c\x89\xef\
\x1d\x37\xd1\x7d\x44\x28\xe7\x2c\x1e\x67\xc2\xc3\xc9\xa4\x08\x81\
\x0a\x90\x4c\x6e\x26\x25\x1a\x26\x48\x40\xdb\x42\x89\x11\x91\xde\
\x41\x79\xad\x21\xd4\xd0\x18\xfa\x53\x32\x90\x82\x31\x6e\x4a\x54\
\x04\x5f\x11\x30\x2d\xde\xa6\x64\x45\x8b\x2c\x01\x28\xf2\xa2\xc0\
\xf9\x27\x0a\x5e\xa3\x4e\xb8\x67\x0f\xe6\x83\xc4\x37\x35\xa4\x26\
\x3a\x88\x4c\x44\xa0\xb9\x68\xa2\xb3\xf3\x92\xb8\x9e\x32\x0d\xd2\
\x18\x89\xc0\x92\xd9\x16\x90\x35\x2a\xc5\x70\xc5\x4e\x3c\x18\xf1\
\x63\xc2\x6b\xf9\x5d\x57\x33\xbb\x5a\xb5\x6a\x4d\xad\x69\x00\xed\
\xd0\xd3\x46\xec\x71\x2a\x4d\xfa\x71\xb3\x6c\xa4\x11\xfe\x30\x4e\
\x10\xb3\x85\x06\x48\x2a\xe2\x87\xe2\x5b\xc7\xd1\xe0\x32\x5d\xcd\
\x4a\x37\x00\x9b\x6e\x24\xf3\x5b\x86\xa2\x71\x29\x3e\x55\x44\x2b\
\x2a\xae\xd3\xc1\xe8\x06\x66\xa2\x69\x78\x2f\xf5\xdd\x60\x3c\x4c\
\x46\x42\xe4\xc1\x86\x9c\x24\x9e\xc6\x0e\x99\x1d\xc6\xff\x73\x36\
\xa0\x9e\x85\x10\xfb\x59\xdd\x3b\x6f\x98\x23\x4e\x6a\x8d\x62\x45\
\x2d\x59\xb1\x49\xbf\xc1\x63\x81\x59\x19\x0d\xd4\x07\x59\xee\x1b\
\xa3\x8b\x07\x88\x4e\x49\x89\x59\x97\xad\xc1\x51\x7b\xf1\x48\x32\
\xb0\x6a\x0a\x79\xbd\xaa\xe2\xa8\x55\xab\xd6\xb4\x9a\x68\x37\x0a\
\x21\x9b\xa5\x32\xa6\x31\xc8\x93\x45\xf6\x00\xa4\x13\x1d\xb1\x56\
\x79\x12\x0f\xfc\x43\x2b\x46\xfb\x38\x35\xe4\xca\x32\x67\x93\x02\
\xb4\x82\x0c\xbc\xf8\x4c\x85\x70\x4f\x2a\x03\x1f\x2a\x45\x4a\x25\
\xbe\xa2\xc4\x6b\x69\x8c\x2a\x42\xb1\xc7\xa1\x7f\xb1\xb3\xe7\x4e\
\x16\xf3\x29\x65\x14\x30\x41\xa4\x4c\xa9\x24\xef\x4f\xb6\xd1\xd0\
\x63\x89\x86\x28\xff\xa0\x8b\x16\x66\xc3\x5a\x5b\x6c\x8f\xe4\xbb\
\x98\x61\x64\x98\x36\x10\xf7\x65\x26\x3a\xed\x3d\xd8\x1e\x30\x68\
\xe3\x43\x4a\xb7\x65\x8a\x9c\x2f\x87\x2d\x0a\x6b\xb8\xda\x42\xd7\
\xaa\x55\x6b\x5a\x4d\xd6\x41\x13\x4b\xdc\xb8\x67\x16\x37\xe4\xe4\
\xb1\xc9\x20\xd5\x00\x61\xf1\xd1\x3e\x31\xb6\x22\x43\x56\x29\xd3\
\x8f\xb1\x4f\xa6\xb4\xe5\x21\x25\x82\x0d\x55\x1c\x8b\x14\x06\x08\
\x81\xf2\x8d\x81\x87\x86\xa7\xec\xc5\xcc\xa8\x0a\x97\x67\x11\x4a\
\x27\x8d\x47\xd0\x05\xd1\x99\x3f\x49\xb3\xd8\x39\x83\x2a\x4d\x72\
\x73\xd4\xab\xf6\x89\xfb\xf6\x12\xa7\xa2\x94\x0b\xc9\xb9\x54\x88\
\x1a\x7e\x73\xd2\x26\xa7\x11\x18\x19\x4e\x31\x3a\xc9\xfe\xc0\xa3\
\xf0\xe4\xba\x52\x69\x4e\x10\xda\x12\x6c\x39\x4a\xfa\x6f\x70\x39\
\xbc\xd1\x18\xd3\x48\xdf\x2c\x7b\x89\x97\xfd\xac\x72\x1c\xb5\x6a\
\xd5\x9a\x58\x53\x47\xbd\x49\xb8\x8a\xd8\x4a\x16\x91\xb2\x4e\x67\
\x78\xec\xbc\x21\x36\xa2\xf2\x64\x4c\x7c\xf0\xd3\x81\xaa\xfc\x64\
\xef\x52\xef\x19\x1f\x71\x64\xb3\x00\x19\x13\x2b\x2a\xa9\xec\x44\
\xa7\x91\x5b\xe0\x90\x2e\xa1\xe5\xef\x8c\x81\x5a\xb5\xd0\xf9\x65\
\x9a\x04\x06\x4c\x46\x35\x39\xc3\xc4\x97\xd0\x58\x4f\x92\x66\x4b\
\x22\x91\x66\x37\x0f\xcf\xd2\x0a\x79\x95\xbc\xa1\x62\x78\x04\x9c\
\x1f\xf2\x67\xe5\x4d\x8a\xd3\x47\xca\xc1\x32\x4a\x6e\x58\xf6\x06\
\xc9\x54\x1c\x67\xd1\x16\x1b\x3c\x0c\x18\x1a\x2f\x41\x88\x71\x8b\
\xc2\x76\x46\xba\x02\x74\xad\x5a\xb5\xa6\xd5\x34\x62\x34\x31\x14\
\x1f\xca\xce\x92\x9f\x51\xfe\xe0\x4f\x1f\x86\x48\x09\x44\x8a\xde\
\xa3\x69\x66\x09\x44\xb9\x37\xf6\xac\x42\x63\x8d\x9a\xc3\x57\x80\
\x2e\x2d\x8c\xcd\x2e\x84\x61\x28\x88\x29\xf2\x10\x49\xa2\x2d\xde\
\x1a\x09\x94\x87\x9c\xc3\xf4\xf2\x41\x6b\xc1\xed\x36\x63\xa5\x75\
\x5d\x04\x58\x13\x6f\x84\x92\xe2\xad\x34\xda\xe3\x17\x96\xb7\xc6\
\x8b\x43\xac\x82\xc3\x48\x35\x7e\x83\x24\xe4\x0c\x87\xd6\x9a\x42\
\xa7\x50\xa2\x5f\x78\x25\x71\xca\x86\x93\x1f\x0e\x45\xf9\xc7\x56\
\x65\x76\xb5\x6a\xd5\x9a\x56\xd3\xdd\xec\x98\x92\xe0\xb9\xb8\x2c\
\x31\xf6\x69\xc2\x3a\x05\x61\x3b\x89\xdb\xc6\xd4\x1f\xc6\xb8\x61\
\xda\xa9\xf8\x42\x3c\xb8\xc1\x9c\x33\x77\xd3\x01\x03\xe3\x25\x99\
\x50\x3c\x92\x12\xaa\x82\xd8\x75\xa9\x33\xcd\x9d\xb3\x1e\x32\x56\
\x48\x5a\x72\x1c\x3f\x26\xdc\xf4\x89\xd1\x4e\x4a\x65\xb1\x3d\x1a\
\xc9\xfe\xf8\x55\x22\xc2\xf0\xde\x22\x45\x20\xf5\xe6\xce\x7a\xd3\
\x98\x34\x35\x43\xaa\x78\xe0\x89\x7e\x43\xeb\xa6\x84\x5d\x29\x71\
\xd7\x83\xb8\xaf\x9c\x79\xf2\xc1\x63\xd3\x88\xc8\xaf\xec\x49\xf9\
\x9f\x24\x8b\x4a\x58\x57\x35\xec\xaf\x55\xab\xd6\xd4\x9a\x48\x71\
\xf8\x53\x13\xda\x40\xbd\x40\x15\x54\x72\xa0\x8f\x35\xc3\x99\x74\
\xb5\x1c\xf2\xcd\xac\x86\x09\x34\x96\x3c\x7b\x46\x61\xbe\x5a\x90\
\x68\x57\xe7\x7c\x71\x01\xcd\xd6\x74\xc3\x3f\x87\xae\x59\x25\xbb\
\x25\xec\x00\x81\x61\xd3\x27\xdc\xc4\x19\x61\x97\xd1\x93\x6f\x2f\
\x2d\x15\x58\xf6\xa7\x46\x94\x82\x02\xe0\xf2\x0a\x21\xf9\x82\x16\
\xab\xa7\xa6\x69\x05\x9d\x8b\xe9\x68\x28\xb2\x6d\xbe\xa2\x37\xd9\
\x4d\x09\xba\x12\x4a\x4a\x3a\x12\xfc\xf5\x89\x75\x61\xec\x75\xb8\
\x4d\x93\x13\xc6\x1d\x58\x17\x90\xd2\x71\xd3\xf2\xa4\xa8\xea\xa0\
\x6b\xd5\xaa\x35\xad\x26\x8e\x7a\x0b\x6c\xa9\x61\x58\xa3\x50\x0d\
\x83\x45\xa7\xcc\x7f\x27\xeb\x8c\xc4\xcf\x0a\x7c\x8b\x90\x43\x9e\
\x2f\xae\x1a\xf2\xc2\x84\xec\x23\x9b\xa4\x82\xd4\xc1\x5b\x93\x6c\
\x46\x83\x77\x42\xf8\x7a\xb1\x2d\xf5\x72\xe2\x57\x18\x09\x46\xe5\
\xb1\xa7\xd2\x28\xc9\xf0\x43\x03\xa6\x71\x7a\x77\xb9\xf9\x92\x68\
\x95\x35\x76\x34\xb0\x28\x88\x62\x29\x0b\x0a\xa7\x21\xd7\x14\x5f\
\x6c\x49\x59\x94\x4b\xf5\x7d\xaf\xf3\x34\x62\x6c\xd5\x53\xd0\x38\
\xab\xc5\x2b\x40\xd7\xaa\x55\x6b\x5a\x4d\x54\x71\x60\xf4\x39\xc9\
\x1e\x00\x57\x59\x8f\x4c\xc5\x7b\x88\xcf\xcd\x94\x2e\x76\xfb\xb9\
\x15\x05\x0b\x22\xa3\xd5\x03\x7f\x3d\xfa\xd4\x1f\xc1\xce\x39\xc9\
\x9a\x15\xfb\x7f\x42\x5f\x6c\xb8\x6f\x75\x0c\x7f\x79\x9a\x1c\xad\
\xae\x4f\x87\x7b\x02\xe5\xf9\xba\x45\x15\x37\xcc\x6a\x8f\x6c\xf0\
\x4a\x5c\x56\xc8\x91\x2d\x23\x76\x5b\x9e\x9b\x88\x88\xa2\xc0\xf3\
\x69\x40\x3c\x7b\x30\x65\x8c\x16\x12\x86\x06\xa1\x1e\x4b\xc3\x79\
\x82\x27\xde\x12\x4f\x31\x82\x81\x21\x36\x3b\x05\xe3\xc1\x7d\x77\
\xd3\xd4\x44\x95\x5a\xb5\x6a\x4d\xab\x89\x1c\x34\xce\xbf\x18\x45\
\x01\x6a\x46\xe5\x14\x15\x04\x14\x82\x9f\xd5\x38\xc3\xe3\x67\xc4\
\x67\x6a\x58\xd5\x51\x72\xb5\xf7\x3c\xb8\x9d\xc9\x07\x97\x0d\x48\
\x85\xa2\xc5\xf9\x5b\x5c\x5f\x2c\x40\xad\xa0\x77\x48\x2e\x1b\xec\
\xad\xcc\x13\x2d\x1a\x78\x17\x9c\x86\xa4\x02\xf0\xca\xe4\x83\x38\
\x3e\x4b\xdb\xea\xac\x95\x59\xc3\xc4\x69\x24\xee\x23\xfb\xe7\x65\
\x90\xd5\xc5\x92\x0e\x4e\x7a\x24\x63\x29\x1c\xd6\xa5\xc4\x22\xc9\
\xcb\x56\x84\x8a\x7d\xba\xb5\x3d\x8f\x0e\x66\xe3\x10\xc2\x21\x21\
\xd3\xd9\x78\xc8\xb2\xe7\x86\x7c\xae\x90\x77\x07\x69\x0a\xeb\x36\
\x52\xe6\x0c\xf6\x0b\xfb\xa9\x7f\xd7\xb5\x6a\xd5\xfa\xcc\x6a\x6a\
\x07\x4d\xb1\x9b\x35\xb9\xbb\x1c\xdb\x37\x8b\x54\x18\x56\x70\x11\
\xa8\xac\x8c\x4a\x8b\xb7\x3d\xd8\x83\x6c\x88\x94\xdb\x58\x70\x05\
\xd2\xf3\xf2\xc8\xb5\xe0\xb6\x1e\x91\x27\xd2\x9a\x3a\x67\x93\xaa\
\x3a\x3b\x7e\x0a\xe2\x0a\x56\x16\xe2\x42\x9e\xcf\xc7\x7a\x23\x6d\
\x09\x95\x71\x18\x03\x81\xb3\x1f\x26\x6b\x68\x2c\x35\xa1\xc1\xb0\
\x89\x25\x1f\x08\xcf\x72\x4e\x4c\xfb\x02\x5c\x99\x52\xce\x8b\xa0\
\xbc\xf8\x87\x7c\x90\x27\x10\x1b\x64\xc3\x76\x76\x32\x58\x43\x18\
\x6e\x14\x2e\xc7\xe7\x37\x2b\x1d\x77\xad\x5a\xb5\x6a\x7d\x7c\x4d\
\x4d\xf5\x66\x05\x85\x24\x8f\x14\xf7\xfa\xe4\x7f\x6f\x7b\xcd\x1f\
\xf5\x61\x4c\x11\x1c\x7f\x9e\x47\xe0\x49\x04\x6b\xd3\x68\xc4\x63\
\x39\x78\x68\x24\x50\x0b\x9c\x65\xd5\xa4\x81\x3e\x12\x29\x9e\x92\
\x36\x73\x6c\x78\xdf\x94\xc0\xc0\x90\x1c\x4e\x65\x18\x24\x3f\x2d\
\x99\x72\x0c\x86\x4b\xa8\x72\x62\x49\xe8\xa6\xc5\xdb\x54\x25\x17\
\x7f\x1a\xc3\xf4\x68\x1e\x32\x50\x5a\xd6\x09\x85\x9e\x1f\x67\x1b\
\xd2\xe4\xfd\x84\x76\x5e\xa6\xc1\x75\x86\x60\xf9\xb8\x90\x77\x83\
\x21\xad\x9c\x52\xef\xcc\x3d\xb4\xb5\xb6\xaa\x38\x6a\xd5\xaa\x35\
\xb5\x26\x1e\x12\xa2\xc9\x55\x09\x86\x78\xde\x04\xea\x85\x20\x0e\
\x1a\x11\x8c\xfa\xbe\x17\xa7\x66\x0c\xa6\x04\xdd\xa0\x6b\x66\xea\
\x00\xfc\x2c\x66\x3e\x84\x51\x90\xe4\x6f\x92\x11\x6a\xe6\x00\x52\
\xb0\xb7\xca\x8e\xfd\xe0\x7f\x15\x74\x13\x04\x32\x57\x65\x97\xff\
\x40\x39\x5a\x0a\x3e\x9f\x6c\x77\xd7\xe8\x34\x7c\x2d\xac\x71\x84\
\xfe\x9c\x8a\xa2\x39\x0a\x0b\xfd\xb3\x64\x9a\xc8\xc8\x0c\x62\x03\
\xd3\x3d\x0b\x29\x2d\x09\x87\xc2\xb7\xb0\xa8\x43\x62\x70\xc3\x20\
\xd4\x2b\x47\xa3\x44\x94\x66\x53\x0a\xbb\xcd\x34\x0b\x53\xd0\x9e\
\x25\x2a\xc2\x4d\x53\x49\xc6\xf2\xd6\x22\x45\xab\xfd\xd4\xbf\xeb\
\x5a\xb5\x6a\x7d\x66\x35\xb1\x83\xc6\xe9\x9d\x1c\xb1\xe9\x12\x4a\
\xa8\x95\xf5\x8e\xf2\x47\xfe\x6c\x35\xc4\xa0\x9d\x99\x0a\x74\xa7\
\x1e\x3c\xac\x2e\xb2\xe2\x62\xf0\x96\x1a\xd8\xec\xd2\xa9\x41\x5f\
\x7b\x71\x91\xd6\xaa\xe1\xd8\x59\x1d\x86\x64\x80\xcc\x25\x33\xa7\
\x91\x63\xb4\xe0\x9c\xe7\x69\xa4\xdf\xc8\x89\x56\x10\x62\xf3\x49\
\x9d\x92\x34\xdb\xe1\xfb\x83\x50\x3a\x1d\x12\x3a\x1f\x92\xa8\x0e\
\xee\x74\x98\xa6\xe1\xb9\x70\xa3\x87\x18\x6f\xb9\x96\xf7\xc3\x68\
\x4c\xc8\xc9\xb9\x3a\x2d\x15\xd2\xe8\x63\x6e\xcd\x53\xf4\x55\xa8\
\x2a\x8e\x5a\xb5\x6a\x4d\xab\xc9\x32\xbb\x91\xc7\x90\x66\x61\x86\
\x11\x17\x64\x6e\x21\xc7\x63\x75\x21\x7d\xde\x17\xfa\x38\x8f\x93\
\x30\x9e\x66\x14\x65\x6d\x06\xa0\x93\xa7\x54\x1a\x68\x30\xa0\x81\
\x28\xec\x80\xe4\x01\x8a\x5f\xdd\x28\x0c\x30\xef\x0a\xbc\xd0\xcc\
\x34\x70\xa4\xa3\x14\xfc\x0a\xbc\x2e\x63\x8d\x83\x3f\x68\x6e\x75\
\x73\x92\xc0\x10\xc5\xa2\xf5\x30\xa6\xd8\x98\x36\x3f\x33\xe4\x31\
\x99\x3c\x12\x3e\x88\xff\x12\x13\xf2\x61\x7c\x62\x72\x73\x52\x5a\
\x15\x41\x1e\xc9\x07\x00\xa2\x64\x55\x5a\xf1\xb9\x56\xad\x5a\x13\
\x6b\x1a\x40\x37\x60\x06\x7c\xa2\x19\x70\x86\x06\x8c\x32\xca\x00\
\x81\xb4\x08\x93\x93\x99\x11\x26\x59\x72\x78\x95\xf8\x25\x71\x4b\
\x9b\xb4\x71\xa0\x36\x1c\x66\x5d\x2c\x3f\xc7\x88\x5a\x43\x32\x0e\
\x8d\x86\xf5\xb4\x8e\x88\xdf\x25\x5f\x53\x12\x52\x21\xb9\x6a\x50\
\xca\x49\x61\x6c\x36\x6c\x7d\xe4\x62\xaf\xcd\x3e\xcc\x5e\xf6\x03\
\x24\x5f\x25\x57\x23\xef\x87\x46\x5b\x51\x62\x33\x92\x18\x43\xfa\
\x65\x91\xf7\x61\x92\x10\xce\x21\x7c\x50\x08\x9b\xa4\x61\xa0\x9d\
\xf1\x56\x54\xd8\x69\x6e\x85\x4a\xf7\x9d\x9c\x42\xe0\x75\x2a\x8a\
\x69\xa0\x33\xe5\x8d\x26\x0d\x7f\xfb\x4f\xfd\xbb\xae\x55\xab\xd6\
\x67\x56\x53\x0f\x09\x19\xce\x80\x44\x4c\x02\x18\x0d\x44\xe3\xfe\
\x37\x42\x92\x1b\x29\x8c\x19\x0d\xad\xed\xa1\x6f\x03\xa6\x87\xd4\
\xdb\xc2\x87\x13\xae\xa5\x5a\xe2\xc0\x4d\x3e\x02\xc4\xea\x21\x53\
\x10\xf9\xbc\x2e\x04\x97\xf5\xd4\x90\x57\xff\x41\xd4\x8c\x31\x10\
\xf4\xad\xdc\x90\xc7\x3b\x92\xd8\x00\xc9\xa9\x82\x79\xd3\x70\xf3\
\x69\x38\x1b\xab\x61\x23\xc9\x4d\x37\x98\x10\x74\xc7\x16\x63\x38\
\x0c\xd6\x4e\x7a\x6d\x61\x98\x99\x6d\x31\xc5\x27\x2f\x07\x7e\xb3\
\x66\x03\x59\x04\x71\xcb\x19\xf1\x27\x49\x61\x5d\x84\xe1\xa9\x77\
\xd7\xcd\x64\xc1\x4c\xad\x5a\xb5\xfe\x8b\xd7\xc4\x44\x95\xd8\xd2\
\x7a\xa4\x6f\x83\x28\x0e\x08\x2a\xe1\x48\xa7\x34\x5a\x22\x11\xb0\
\xe9\xb3\x7f\x99\xe6\xa0\x4c\x08\xc0\xbb\x39\xd9\xbf\x51\x19\x12\
\x09\x83\xa9\xf4\x08\x04\xc5\x29\x7f\x98\xfd\x13\x7f\x8f\xf1\x74\
\xf8\x78\x4a\x90\xbb\x78\x38\x39\x53\x26\x1c\x64\x1b\xc8\xcd\x7b\
\x91\xf7\x15\xe2\xd8\xe5\x91\x96\xf2\xe6\x18\x96\x31\x53\xee\x64\
\x2e\x1b\x3a\x8d\x44\x85\x23\x0b\x66\xb0\x82\xca\x8e\xa6\x4c\x54\
\xe3\xd6\xc1\x7c\xc8\x6c\x0e\xef\x49\x7c\x79\xe4\xdf\xd2\x70\xdd\
\xca\x41\xd7\xaa\x55\x6b\x62\x4d\x34\xec\xf7\x80\x66\x69\x21\x43\
\x76\xc0\x87\x11\x1d\x69\x51\x9a\x71\x13\xac\x10\x5a\x85\x74\x6f\
\x38\x38\x8b\xf5\x85\x87\x5b\xbe\xc8\x8c\x53\x09\x43\x9d\xd6\x91\
\x43\x42\xf4\xd4\x7e\xdc\xab\x16\x24\x15\x84\x1b\xbb\xc7\x25\x9b\
\x0e\x7e\xa1\x49\x76\xd2\x94\x62\xb0\xd2\xd3\xe1\x37\x1d\x70\x44\
\x28\x2d\x73\x0e\x7c\xf5\xc2\xba\xc8\xd1\x26\xc9\x3c\x37\x04\x7c\
\xe0\x70\xe4\x69\xa9\x9d\xe7\x48\x2f\x65\xd2\xab\x32\x03\x4f\x69\
\xf5\x24\xcd\x2e\x8e\x20\x4a\x8b\x47\x6a\x40\x9e\x96\x4a\xee\xa7\
\x83\x67\x5e\xad\x5a\xb5\x6a\x7d\x6c\x4d\x04\x68\xb8\xbc\x05\x08\
\xca\x30\x34\x28\xe6\x1b\x30\x42\xd2\x8d\xf3\x94\xcc\x93\x29\x99\
\x46\x47\xa0\xb3\x12\xef\x8a\x7c\x56\x80\x79\x9a\xc6\xd6\x00\xef\
\xac\x07\x01\x86\xb2\x97\x92\xcf\x2a\x08\x3d\xb2\xbc\xc0\x5c\x8c\
\x4d\x23\x8b\x85\x80\x96\x5b\xca\x08\xee\x53\x12\x8b\x12\x3f\x54\
\xca\x29\xda\x06\x4b\xf2\x1c\x22\xe5\x63\xc6\x7c\xc2\xe9\xf3\xfd\
\x13\x25\xe3\x27\x7e\x50\xc4\x22\x88\x24\xa7\xb4\xb5\x08\x7b\x2e\
\x08\xce\xad\x72\x32\xae\xe3\xed\x04\x8f\x6a\x4a\x67\x83\xfc\x89\
\x01\x70\xcc\xdb\x8c\xe0\xbb\x12\xa3\xa7\x50\x01\xba\x56\xad\x5a\
\x53\x6b\xb2\x61\x3f\xe8\x04\xb1\x4c\xc2\x64\x5f\xe8\x23\x06\x6f\
\xef\xde\x9d\xb6\xbb\xab\xc7\xcf\xf5\x7c\xcd\x29\xdb\x09\x04\x09\
\x0a\x8d\x94\x86\x95\x10\x4a\x25\x02\x17\xfd\xb6\x70\xd3\x2a\x8c\
\x86\xfd\x48\xd1\x48\x28\xa2\x06\x83\x66\x71\xf0\xcc\x4a\x0c\x35\
\xd8\xe0\x71\x15\x4b\x10\xca\x54\x89\xe4\xaf\x80\x62\x09\xe5\x55\
\x34\x50\x28\x79\xa0\x91\xbb\x67\x5f\x7a\xe2\x44\x62\xe8\xc0\x5f\
\xdc\xad\x27\x39\x07\xfb\x3b\xe7\x9c\x6f\xb9\xd4\x07\x3b\x04\x0d\
\xb8\x2f\x74\x3a\x27\xe1\xba\x41\xec\xc1\xa3\x38\x6d\xd5\x41\xd7\
\xaa\x55\x6b\x5a\x4d\x95\xd9\x11\x3e\xba\x73\x1b\x8c\xa8\x14\xea\
\xba\xe3\xe6\xed\xdb\xed\xed\xed\xe6\xed\x0b\xf2\xdd\xd5\xf3\x3f\
\x29\xbd\x2c\xfa\x62\x8d\x93\x3a\xbc\x34\x33\x15\xe4\x42\x46\xb1\
\x3c\x8d\xcd\x7f\xb8\x80\xc8\xd7\xe2\x87\xe7\x13\xe1\x0b\x57\xd2\
\x4c\x89\x88\xd2\x42\x89\x35\x74\x19\xce\x66\x4c\x37\x1c\xe4\x1a\
\x32\xd1\xa0\xd2\x93\xc3\xd8\x12\x3a\x29\x31\xc6\xbe\x74\x62\x94\
\x0a\xfe\x3c\x64\x77\x24\x7e\x7f\x46\x30\x39\x1b\x2a\xc5\xab\xd8\
\xde\x36\x29\x0b\x91\x64\xa4\x50\xa9\x01\xeb\x65\x94\x86\xb2\x02\
\x9c\x48\x26\x6b\x74\x16\xe1\xf1\x69\xa2\xe0\x75\xad\x5a\xb5\x6a\
\x7d\x7c\x4d\x94\x16\xe0\x78\x30\xe2\xa8\x83\xc4\xb7\xb5\xa7\xfb\
\x5f\x7f\xfd\xf5\xc5\xef\xde\x85\xb6\x6d\x7e\x7f\xf1\x63\xbb\x3c\
\x3b\x7b\xfa\x85\xb0\x0c\x06\x8c\x41\xc3\x44\x00\x53\xb1\xe2\xf1\
\x0f\x76\xc3\x46\xc0\x53\xa6\xc1\x00\xa0\x16\x23\x68\xd8\x75\x72\
\x58\x14\xec\xfc\x45\x86\x87\x0b\x2a\x2d\xc9\x84\x3c\x0e\x4e\x79\
\xe8\x0f\xdf\x21\x4a\x94\x30\x1e\xf7\x2a\xf9\xfb\x13\xa9\x02\x85\
\x5a\x0e\x0c\x85\xae\xd0\xd2\xab\x93\xbc\x2e\xd9\x9c\x5a\xdb\x67\
\x1b\x3c\xb4\xf0\xe4\x8a\xb5\xb4\x48\xe8\xd8\x94\x4e\x64\x79\xb2\
\xb1\x30\x75\xe3\x25\x63\xa5\x4c\x6f\xcb\x34\x24\x15\x6a\x5e\xa0\
\x9f\xc4\x79\x04\xcc\x09\x6f\x0c\xd5\xcd\xae\x56\xad\x5a\xd3\x6a\
\x22\x07\x7d\xdc\xaa\xd0\xbb\x08\xac\xb3\xb5\x31\xf3\x97\x3f\xfd\
\xdc\x3f\x6c\x9e\x3c\xb9\xec\xad\xdb\xbc\xba\x9f\xcf\x9b\xdb\xdf\
\x7f\x69\xd6\xe7\xab\xb3\x73\xb8\x74\xfa\xc1\x42\x8e\xe1\x0d\xe0\
\x1c\xfc\x9b\x9f\xbe\xd7\x87\x8d\x9a\x2f\x9e\xfc\xe9\x5b\x3f\x3b\
\x67\xdc\x86\xd1\x06\x64\x11\x3e\x11\x17\xc5\xd9\x19\x84\xb6\x2e\
\x61\x80\x34\x16\x14\xb3\x38\x23\x3d\x07\xa5\x52\x6b\x9d\x9c\x37\
\x72\x97\xad\x68\x98\x7c\x51\x63\x3d\xb2\xe8\x43\xa0\x09\x54\x08\
\x84\x95\x80\x2e\x0f\x5d\x4a\x31\xe8\x60\x0e\xbd\x69\xb4\x4c\x2a\
\xca\x39\xa0\xe8\x46\xa8\xf0\x24\x45\x2c\xfd\x21\xd1\xac\x65\xbe\
\x46\x2e\x64\xaa\x17\x47\xad\x5a\xb5\xa6\xd5\x34\x80\xfe\xf9\x87\
\x7f\x53\xdd\xae\x3d\xbf\xfc\xf2\xef\xfe\xb1\x3f\x86\x9b\x47\x8f\
\xe9\x7a\xf9\xf2\xc7\x1f\x4f\x0f\x9b\xdf\x7e\x7e\xf1\xec\xe9\xf5\
\xd5\x6a\xdd\x26\x3f\x7c\xd6\x53\xf8\x22\x32\x53\xc9\x81\xc8\xdb\
\xe3\xfd\xab\x37\xbf\xfd\xfc\xe3\xa3\xa7\x8f\x76\x87\xe3\xe3\xaf\
\xbe\x59\x5e\x3d\x55\x7a\x86\xa3\x45\xa7\xd0\x51\x6b\x19\x71\x01\
\xa3\xc1\x8b\x29\xe4\x54\xa9\x64\x7e\x21\xf8\x4b\xa0\x8f\x4d\xca\
\x95\x62\x3c\x4e\x6a\x3b\x1e\x6f\xb4\x90\x57\x34\x43\x90\x21\xe6\
\x44\x8a\x25\x7f\x49\x7a\x45\x09\xd6\x2a\x11\x59\x23\xe6\x4a\x9a\
\x72\x69\xc6\x21\xb1\x66\xb5\xb5\xcc\xa1\x24\x86\x3c\x1d\x43\x7e\
\x68\xbd\x54\x06\x67\x88\x84\x72\x49\x57\x84\x1e\x11\xdf\xaa\x55\
\xab\x56\xad\x29\x35\xad\xad\x3b\xbc\x7f\xbf\xbf\xbb\x3f\x6d\x76\
\x6f\x7f\xf9\xbe\x25\x75\x76\x79\x45\x7a\xd6\x9d\x7a\x83\x06\xf8\
\xd8\x75\x6d\xdb\x5a\x7b\x8a\x70\xac\x34\xb3\x15\x8d\xd6\xad\x89\
\xbd\xa3\x01\x5c\x32\x94\xd9\xee\xe4\x4f\x47\x52\xed\xbf\x7f\xff\
\xf3\x6e\xf3\x70\xb8\x7f\xed\xfb\x6d\xa0\x8e\x54\x8f\x64\x55\xcf\
\xde\xce\x14\x7b\xef\x88\x67\xd0\x7b\x38\x07\x5a\x25\xa2\xa4\x93\
\x03\xbd\x88\x93\x06\x23\x83\xac\xf9\x50\xb0\xcf\x60\xa7\x25\xa6\
\x38\x9c\xf2\x3d\x0f\xb7\xc0\xe6\x03\xeb\xe8\xb8\x2c\x75\x56\x1d\
\xbd\x86\xe5\x3f\xa2\x11\xb9\x8f\x8e\xad\x7c\xf2\x7f\x26\x13\xb8\
\xb7\x8f\xad\xb5\x71\x3a\x7e\x69\x1b\x17\xe2\x2f\xe5\x8d\x75\x9c\
\xe2\x62\xe2\xfe\xc1\x7b\x8d\x45\x02\x6c\xdc\x01\xac\x4e\x3b\x08\
\xae\xe4\xc5\xc8\x9a\x71\x38\x1b\x70\x84\x6c\xd6\xc1\x64\xba\x8f\
\x77\xca\xf1\x57\x35\xd5\xbb\x56\xad\x5a\xd3\x6a\x22\x07\xed\xdc\
\xfb\xdb\x77\xf3\x63\xdf\x1d\xdf\x3f\xfb\xfa\xbb\x5e\x79\x65\xe6\
\x37\xcf\xbe\xbc\x7b\xfd\xdb\xfa\xf2\xa2\x9d\x9b\xd9\x62\xb9\xba\
\x58\x85\xc6\x87\x34\xb8\x81\x57\x31\x41\x1b\xc1\x91\x7d\x3b\x4f\
\xfb\xdd\x62\xd5\x9c\x9f\xb5\xdd\xc1\x34\xa6\x6d\xcf\x9f\x7a\xb3\
\x24\x49\x9a\x05\xb0\x85\xfe\xe8\xe2\x97\xf6\x8b\xc5\x95\xcc\xf0\
\x61\x38\x5c\xe6\xa9\x1d\x88\x0b\x41\x6c\xa6\x1e\xfa\x5e\x59\x17\
\xc2\xf1\xd8\x68\x56\x59\x70\x64\xa1\xa4\x00\x32\x8e\x13\xc8\x6b\
\x13\xe2\x97\xe1\x60\x01\x9a\x85\x88\xbe\x4a\x35\x9a\xef\x8f\x6f\
\xc9\x1e\xc8\x77\x3b\xe2\xf6\x9c\x03\x50\x7c\xb6\xdc\x00\x7b\xc1\
\x06\x20\xb6\xeb\xbc\x9f\x9d\x5d\xad\xcd\x1c\x24\x72\x80\x77\x92\
\x69\xc0\x76\xe4\xe0\x02\x8c\xa5\x8c\x3c\x40\x12\xd3\x82\xf3\x43\
\xb1\x13\x51\xc8\x27\xa8\x55\xab\x56\xad\x69\x35\x0d\xa0\xed\xa9\
\x3b\xee\xf6\x11\x85\x97\x8b\xc5\x61\x73\x08\xcd\xac\xa5\xd9\xf5\
\xc5\x13\xdf\x59\x77\x8c\x40\x77\xbc\xba\xfe\xb2\x3f\xc0\x57\x94\
\x43\xbb\xb5\x30\x06\x8e\xc7\x5b\x38\xcf\x3b\x36\xbb\xba\x7d\xbc\
\xb8\x39\xad\x7a\xa3\xf4\xba\x59\x5c\x2e\x2e\x9e\xe8\x08\x7e\xce\
\x63\xd6\x25\x42\xa9\x7f\xf5\xe2\xe7\xfb\xb7\xbf\x2f\xce\xd7\x5f\
\xfd\xf9\x1f\x9a\xe5\xaa\xc4\xa0\xc4\xbe\xb9\xef\xf8\xbc\x8e\xb5\
\x6f\x0e\xe9\x24\x11\xcc\x7b\xd7\x5b\x65\xf7\x1b\x15\xba\xb6\xb1\
\xc1\x6f\xf9\xd1\xd8\x6b\x47\xcc\xe5\x59\xc1\x78\xd5\x33\x1b\xe6\
\x7a\xb6\xfa\xcf\x17\xdb\x27\x5f\x3d\xf9\xe2\xab\x2b\x11\xc5\x19\
\x1e\x03\xa7\x6e\x77\x6f\xef\xfe\xdd\x77\x7b\x3e\xf7\x73\x1c\xd4\
\xa2\x13\x95\xcc\x67\x9a\x66\x36\x53\x6d\xd3\xb9\xeb\x7e\xde\x34\
\xf3\x45\xfc\x59\xb1\xc0\xd0\x91\x64\xa6\xc4\x37\x27\xe3\xdd\x98\
\xaa\xa1\x3c\xdc\xa8\x47\xb6\x50\xc9\x42\x5a\x1b\x13\x40\x64\x7f\
\xea\xdf\x75\xad\x5a\xb5\x3e\xb3\x9a\x06\xd0\xf3\xf9\xfa\xfc\xfc\
\x3a\xa2\xb1\x0b\xfe\xe5\x8f\xb7\x41\xdd\xb7\xe1\x18\x5b\xc4\xfd\
\xc3\xd6\x75\xd6\xf7\xbb\xdf\x7e\xfe\x9e\xda\xa6\x61\x75\x1d\x75\
\x6c\x73\xb4\x0c\xda\xee\xf6\xf4\xfa\x9d\x75\x2c\x9a\x50\xb3\x79\
\xec\x72\x1f\x8c\x77\xfd\x26\xcc\x2e\x9b\xb7\xaf\xee\x9e\x3e\xbb\
\x31\xba\x0d\x2a\xb6\xa5\xfd\xc3\xfd\xfb\x57\x2f\x5f\x6c\x6f\x6f\
\x2f\x6e\x9e\x9c\x9d\xdd\x3d\xf9\x6a\x25\x7a\x0b\x34\xc0\xfa\xb0\
\x3d\x2c\x55\xd7\xed\x6f\x5d\xdf\x79\xf1\x0f\x55\x61\xbb\x39\xfc\
\xdb\xff\xf9\xbf\x11\x72\xff\xe9\xbf\x7f\x79\x71\xee\x94\x8b\xf7\
\x63\x03\xfa\x61\x52\xb3\xf7\xb7\xfb\xc3\x49\x75\xfa\xfc\xe1\x9d\
\xda\xef\xe7\xce\xae\x56\xab\xe6\xe6\x11\x47\x08\x30\xd4\xf7\x77\
\xa6\xbb\x9f\xfb\xbd\x56\x3c\x2a\x8e\x33\x3d\x16\x93\xb0\x74\x5b\
\x29\xeb\x0f\x3a\xf6\xde\x66\xa6\x93\x06\xa3\x11\x07\x25\x62\xcc\
\x6d\x9c\x03\xa5\xc1\x7d\xb4\x25\x9f\x67\x67\x54\xca\xf1\x56\x99\
\x87\x87\x28\x91\x09\xf4\x71\xca\x78\xad\x5a\xb5\x6a\x7d\x4c\x4d\
\x03\xe8\xe5\xe5\xf5\xe6\xee\xfd\xac\x35\xed\x7c\x75\x7e\xf1\xf8\
\xb0\xdf\xe9\xee\x14\xba\xc3\xc5\xb2\x77\xa7\xfd\xfa\x4c\x35\xf6\
\xb5\xb5\xc8\x48\x8d\xd0\xec\xc8\x34\x7d\x4f\xb1\xcb\x55\xfd\x2e\
\x3e\x2f\xc2\xa0\xdd\xb7\xb3\x26\x74\xe4\x0e\xf6\x61\xe7\xf4\xcb\
\xfd\xf6\xad\xbd\x5d\xdf\x3c\xff\x7a\x71\xf3\x85\x57\xc6\x5b\x1f\
\x9b\xd5\xe3\x21\xec\x5e\xbc\x9b\x9f\xdd\x9e\x5d\xdf\xcc\xd7\x6b\
\x31\xf8\xef\xe3\x9e\x60\xf7\xf6\xf4\xb2\x09\x0f\x73\xb3\x88\x57\
\x8a\x0d\x74\x7c\xfe\x3c\x1c\xdd\xf1\xde\xba\xf6\xfd\xbb\xb7\x17\
\xcb\x2b\xe7\x1b\xcb\xd4\x8a\xed\x5c\x7b\x3a\xae\xee\xee\xee\x77\
\xdb\x7d\xb3\xf2\xa7\x1d\xbd\x7e\x63\x37\x7b\xdf\x18\xf7\xd7\xbf\
\x5c\x7d\xfb\xdd\x13\xee\x99\x4f\xbb\x85\x51\x48\x34\x74\xb3\x66\
\xae\xc9\x92\xf1\xc9\x08\x95\x9a\xb8\x6d\xc4\x3e\xd8\xd3\xf1\x74\
\xec\x16\x10\x8c\x08\xf1\x0c\xb3\xff\xf8\x5f\x0b\xcb\xd2\x3e\xc7\
\xe3\xaa\x34\xc5\x23\x67\x88\x29\x38\x3c\xc8\x94\x21\x26\x0c\x6b\
\xe4\x55\xad\x5a\xb5\xa6\xd5\x34\x80\xbe\x7a\xfe\x2c\x36\xb9\xed\
\x62\xf1\xe4\xeb\xbf\xb7\x56\x75\xf7\x0b\x7b\x5a\x33\xcb\xab\xba\
\x63\x08\xfe\xd8\x3d\xba\xba\x39\x3c\xdc\xf6\xbe\x5b\x9f\xaf\xdd\
\x29\x76\xa5\x0b\x22\x3f\x9f\xcf\x9a\x15\x87\xab\xb8\xc3\x46\xb5\
\xdd\x69\xb7\x6d\xc9\x1f\xfb\xbb\x95\x3d\xdb\xdf\x1d\xde\x1c\xde\
\xac\x16\xb3\xf5\xa3\x47\x8e\x1b\xf4\x85\x69\x8d\x6a\x54\xe8\x94\
\xb5\x47\x76\xeb\xb7\x60\x0c\x20\xf1\x68\x4d\x04\xe9\x07\x52\x27\
\xd6\x44\x74\x07\x72\x5d\x6c\xd5\x17\x0d\x47\xa6\x5c\x5d\x5d\x93\
\x32\x6f\xdf\x6e\x9c\x5d\xdc\x1f\xba\x67\x8f\x57\x7e\x7f\xda\x6c\
\x5f\xb7\xaa\x3f\x5f\xfa\x83\x7d\xdb\x34\xcb\xfd\x41\x99\xfb\x45\
\xbc\xd6\x6e\xcb\x47\x7f\x71\x7d\x7b\xd8\x92\x3f\xb1\xa8\xce\x04\
\xeb\x4f\xb6\xb7\x1a\x8d\x34\x2b\x04\xe9\xa4\x9b\xcb\x78\x33\xad\
\x9e\x3d\x6c\xbb\x4b\xfc\xa0\x92\x31\x1d\x3b\x7e\x98\x6c\x45\xda\
\xb0\x74\x90\x28\x65\xc1\xb0\xaa\x5b\x72\x08\x4b\x42\xcc\x60\x3c\
\xf2\xa9\x7f\xd7\xb5\x6a\xd5\xfa\xcc\x6a\x1a\x40\x9f\xdf\x3c\xfa\
\xe2\x9b\x6f\x62\xff\xdc\xcc\xaf\x22\xae\x29\xbd\x6c\x57\x17\xf6\
\xb4\x09\xe1\xe4\xe7\xbd\x9e\x79\x37\xbf\x3e\xdc\x9f\x5f\x9d\x2f\
\xda\x8b\xe5\xfd\xfd\xc3\x7c\xf5\xc4\x6d\x7f\x8f\x9d\xe9\xe5\xe3\
\xc7\xb3\xc5\xf2\xfd\x9b\xd8\x91\xbe\x3f\x3c\xdc\xcd\x97\x97\xfd\
\xca\x58\xdf\xed\xfa\x26\x76\xa9\x6f\x5f\xdf\xde\x7c\xb3\x6d\xe6\
\xc4\x51\x86\xb1\x99\x85\xf2\xa3\x9d\x2b\xdd\xc0\x42\x03\x5e\x45\
\xde\xf6\xda\x1d\x89\xc7\x55\x16\xce\x59\xe5\x9a\x6e\xdf\x9d\xfa\
\xfd\xed\xb6\x6f\xe7\x97\xe7\x67\x11\x78\xe3\xe2\x7d\xd3\x9c\x2e\
\xcf\x94\x3b\x6e\x8d\xed\x2e\x16\xce\x76\xc7\xd8\x33\xc7\x4e\x3b\
\x2e\x6c\xfb\x66\x73\xff\x26\xd8\xf3\xde\x9e\x33\x02\xdb\xb8\xcc\
\xae\xf7\x27\xcd\xa7\x7e\x2c\x19\xd9\x1c\xed\xfd\xc6\x1f\x0f\xfd\
\xf5\xb9\xb9\xba\x30\xcb\x96\x67\x6d\xac\xb5\xfb\xd3\xc9\x39\x65\
\xe0\x17\xca\xbd\xb0\x62\x4b\x3f\xf6\x59\x4d\xe3\x86\x9a\xbf\x05\
\x6b\x54\x4c\x42\x82\xe2\x60\xd5\xb4\x13\x99\x9e\xf7\xbe\xf2\xcf\
\xb5\x6a\xd5\xfa\xff\xa8\xc9\xa1\xb1\xab\xd5\x32\x02\xa3\x52\xfa\
\x74\xb0\xdb\xbb\xd7\x9e\x96\xed\x3c\x50\xbf\x79\xfd\xfb\xe6\xeb\
\x67\x8f\x74\x98\xd9\x2e\xec\xb6\x07\xef\x76\xc1\x99\x17\xbf\xdd\
\xfe\xf9\xab\x67\x2f\xff\xe3\x5f\xe7\x57\x17\x97\xe7\xcb\xed\xdb\
\xd0\x3f\xbc\x0f\xa7\xfb\xed\x89\x10\x81\x7d\xda\x6c\x76\xfd\x62\
\x11\x5e\xef\xbe\xd9\x91\x69\x8c\x75\xb3\xa0\xd6\xeb\x8b\xa7\xdd\
\x69\x7f\xbe\xb8\x78\x78\x75\x1b\x3c\xbb\xd0\x71\xc6\xb6\x3b\xb5\
\xfd\xab\xe5\x4c\xef\xba\xd8\x89\x77\xad\x6a\x8f\xfb\x66\xb3\x57\
\xbf\xfc\xea\x1f\x4e\xfe\xcb\xe7\x61\xd1\xec\x9b\x9e\x45\x1c\x8d\
\xd8\x35\x19\x0b\x85\x07\x6b\x90\x67\x46\xaf\x5b\x45\xb1\x25\x37\
\xce\xf5\xb3\xdd\x7e\xcb\xac\x43\xd7\x6b\xea\xfa\xbe\xdd\xdc\xfb\
\xf5\xba\x3d\x9b\x77\xb7\x6f\x4e\xbf\xbc\xe6\x58\xef\x37\xf7\xa7\
\xbf\x7b\x3e\xff\x72\xd6\xeb\x66\xd6\x18\xb5\x79\x78\x20\x5a\x84\
\xd0\xfe\x3f\xf6\xce\xe5\x47\x92\x2b\x2b\xe3\xf7\x7d\x23\x22\x1f\
\x55\x99\x5d\xfd\x70\xbb\xdb\x76\xdb\x1e\xdb\x33\x20\x8c\x07\x63\
\x6c\x09\x89\x61\x18\x69\x40\x80\x60\x39\x2b\x84\xd8\xb3\x60\xcb\
\x9f\xc0\x86\x0d\x5b\x04\x5b\x76\x20\xa4\xd1\x60\x89\x61\xec\x01\
\x99\xd7\xd8\xcc\xc3\xed\x66\xda\x76\x77\x57\xd3\xdd\x55\x5d\x95\
\x55\x99\x19\xaf\xfb\xbe\x9c\x13\x6d\xb3\x76\xae\x10\x22\xbe\x2e\
\x95\x54\x59\x99\x91\x51\xd9\xd2\x77\x7f\x71\xe3\x9c\xef\x3c\xe9\
\xf2\xc6\x66\x16\x2c\xd6\x4b\x9c\xa6\x80\x1d\x8c\x9f\x25\xdc\x0d\
\x89\xd1\x43\x33\x0e\xcb\x11\x7e\x43\x39\xde\xcc\x1c\x4a\xef\xe0\
\x22\x23\x8e\x9d\x84\xa3\x46\x8d\xda\x51\xbb\x19\xb4\x0f\x42\xc8\
\x89\x59\x6f\xf6\xb0\x6e\xc1\xf5\xf5\xea\xf2\x73\x5f\x59\x77\x5d\
\x29\xa4\x94\x59\x2a\xbe\x3a\x5d\x01\x8a\xf6\xed\xa6\xab\xd7\x52\
\xef\x3f\xfb\xfc\x9b\x9c\x76\x52\xf3\x83\x6b\xd7\x69\x74\xa9\xef\
\xab\x6a\x5e\x6f\x1e\x13\x6f\x7c\x88\xa4\x98\xa1\x95\x65\x77\x72\
\x16\x4e\x1e\x6e\xe5\xf1\x3a\x99\x4d\x3a\x3b\x02\x9f\xed\x9b\x6d\
\xf7\xc0\x25\x2e\x95\x06\xd0\xee\xb8\x04\xb6\x06\x33\xcc\xf5\x76\
\xc8\xed\x4f\xc9\xc2\x1a\x91\xc0\x8a\xf5\xaa\x33\x3e\xf1\x94\x01\
\xc5\x5b\x8c\xab\xa6\x38\x08\x1c\x9b\x18\x59\xc0\x59\xe1\x12\x8e\
\x4f\xb9\x64\xbd\x77\x58\xd7\x91\x75\xf4\xbd\x37\x0e\x1e\xb4\x8e\
\x38\x57\x3e\x38\x5c\xdd\x3f\x6a\xf6\xf7\xf5\x8d\xa7\xb5\xd6\x45\
\x48\xe7\x31\x6a\x67\xe3\x83\x33\x3b\x99\x17\x4b\x99\x02\x89\x4d\
\x6d\xe1\xa5\x43\x5e\x08\x30\x35\x25\xde\x01\x11\x8b\x61\x97\x99\
\xd3\x61\xda\x61\x86\x73\x8b\x9f\x07\xa5\x62\x1b\xfc\x90\xc9\xca\
\xe8\x40\xd9\xd8\x36\x13\x02\xa5\xa3\x41\x8f\x1a\x35\x6a\x37\xed\
\x66\xd0\x8f\x8f\x69\x49\x74\xbb\xc5\xab\xf9\xb2\xd4\xcb\xbd\x52\
\x16\xb2\xe0\x7b\x13\xef\x0e\x0e\xf6\xb6\xf5\xc6\x79\x5a\x6f\x4f\
\xbc\xdd\x54\x05\xa5\xa2\xbe\xfe\xf2\x6b\x24\xe5\x69\xa9\xb5\xa0\
\x84\x97\x17\x2e\x5d\x7b\xf4\xf0\xd3\xc4\xca\xaa\x9a\xb9\xba\xee\
\x1a\x33\x99\x15\x45\xc9\x53\x12\x76\xfd\xb0\x6b\x1f\x77\xcd\xc9\
\x72\x51\x86\x0c\x6c\xdb\xea\x58\x33\x92\xba\x46\x02\x9a\x06\x97\
\xb4\xc4\xf7\xe4\x62\xc2\x30\xa8\x4e\x25\x6a\x33\xf5\x00\xf2\x81\
\x81\x13\x53\xa5\x99\xe6\x8e\x11\x1d\xb0\xc1\x3a\xf9\xe8\x71\xd7\
\x01\x9e\x99\x52\x18\xb2\xf4\xf9\x50\x46\x1d\x7d\x00\xfc\x5d\x9f\
\x9d\xdd\xbe\x75\xd8\x9b\xe0\x1a\x7a\xb2\xee\xb7\xc6\x86\x0d\x71\
\x76\xfd\xf4\xa5\x6a\x36\x9b\x9e\x9e\xba\x00\x74\x4d\x16\xd6\x95\
\x80\xcb\xd8\xc4\x98\x8b\xe0\x9d\x28\xe8\x93\xd1\x83\x0c\x4f\x0f\
\xa7\x75\xf1\x28\x9e\x44\x70\x10\x86\x7e\x8d\xc1\x4e\xb8\x21\x1d\
\xb1\x0a\x1b\x6f\x88\x46\xf6\xf9\x78\x00\x40\xed\xff\xc9\x01\x19\
\x35\x6a\xd4\xa8\x2f\xa8\x5d\xeb\xa0\x65\x43\xc2\xc3\xb5\x9d\x9f\
\x01\x15\x3a\xac\x20\xb3\x80\x8c\x78\x53\xaf\xab\x6b\x1b\x45\x30\
\x76\xb3\xde\xb0\xec\x5c\x07\x8e\x26\x6e\xbd\xff\xf6\xb3\x57\x5f\
\x92\x6a\x4a\x3c\xcb\x4a\xe3\x7d\x45\x93\x54\x16\x8d\x27\xd3\x83\
\x1b\x9c\x81\x61\x36\x94\xeb\x59\x19\x7a\x73\xfa\x68\xd5\x92\xcc\
\xea\x93\xe6\xd2\xfe\x82\x4d\x94\xa5\x52\x70\x7a\xb6\x39\xf7\x2e\
\x4b\xc9\x67\xfb\x85\x92\x6c\x46\x49\x8a\x21\x13\xab\x14\xd0\x2b\
\x6f\xfb\x30\xe1\xd4\x11\x58\x26\xb8\xc0\x90\xbd\xa8\x87\xc0\xfc\
\x3c\x20\x6c\x88\x51\x60\x4e\x12\xe6\x7b\xe0\xdd\x3e\xac\x77\x03\
\xd8\x06\x94\xb7\x47\x87\x47\x39\x92\xe8\x90\xe3\xb5\xd4\xce\xb0\
\x96\xc8\xd5\x59\x62\xbc\x8c\x89\x63\xbb\x0b\xfc\x42\xe4\xce\x26\
\x4a\x37\xb1\x2b\x4c\xef\x15\xd7\x58\x0a\x0d\xf6\x9c\xbd\xcc\x40\
\xe7\x6e\xd8\xcf\x80\x67\xe7\x61\xe0\x17\x02\x33\x25\x22\x65\xfe\
\xa4\xa5\x05\x33\xf1\x70\xab\x25\x62\x3f\x3a\x2d\x86\x4c\x8e\x51\
\xa3\x46\x8d\xda\x41\xbb\x19\x74\x57\xaf\x49\xd8\x82\xaf\x9e\x1f\
\xdf\xe7\xa9\x0b\x5d\x9f\xf3\x83\xce\x93\x44\xb7\x3e\x84\xf5\xa6\
\x91\x8c\x35\x4d\x5b\x68\x91\x42\x16\xc1\xf6\xd3\xed\xa7\x1f\xdf\
\xdc\x5f\xea\xde\x28\x22\xb8\xab\x4f\x39\xf1\x4d\x48\xb3\xe5\x42\
\x4f\x66\xe0\x71\xb2\x77\xc9\x77\x67\xc6\x6c\x9b\x50\xe8\x79\xf4\
\xae\xeb\x7c\xdb\xd7\x12\xef\x16\x76\x00\xde\x6d\x9b\xbc\xa5\x9c\
\x07\xbd\x6d\xf6\x26\x05\xbd\xac\x94\xc8\x82\x4a\x29\x9f\x44\x78\
\xda\x94\x93\x2e\xc1\x16\xc3\x76\xd3\x14\xb3\x3d\xce\xdc\x30\x51\
\x10\x37\x3a\x86\x00\x90\x27\x61\xd0\x78\xf2\x14\x3c\x73\x98\xf5\
\x92\x7c\x00\x32\xc7\xaa\x92\xd0\x0b\x91\xf5\x6c\x76\x72\xbc\x55\
\x5a\x19\x9f\x44\x08\x2c\x67\x17\x49\xdb\x85\x4f\xef\x1e\x1f\x2c\
\xf5\xd5\x2b\x4b\x21\xe9\xe1\x27\xa7\xa5\xe6\x82\x2b\x21\x95\x71\
\x66\xb9\x98\x2c\x97\x80\xf3\x1e\xbb\xcf\x29\x00\xb2\x44\x8e\x4e\
\x43\xe6\x06\x0d\xd8\x20\x43\x86\xe9\x05\x99\xe7\x88\x35\x21\xf4\
\xf3\x73\x18\x35\x6a\xd4\xa8\x2f\xae\xdd\x0c\x9a\xd1\x46\x90\x7a\
\x42\x56\x62\x7d\x2a\xa2\xad\x98\xe7\xb1\x03\xaa\xb4\xc6\x8a\xd0\
\x3d\xb5\x9c\x77\xf5\x76\x5e\x59\xef\xea\x59\x51\xc2\xc5\x3e\xeb\
\xb7\x17\x2e\x94\xc4\x1c\x89\x04\xbe\xe9\x2f\x30\x55\x4d\xe2\xc3\
\xce\x2c\x94\x55\xb2\x03\x0a\x3e\xf7\xf5\xb4\xd4\xa7\x35\xc3\xe8\
\x8c\x50\x27\x5b\x07\x0f\x9c\x2e\x86\x18\x38\x9f\x52\x50\xaa\x04\
\x48\xb5\xce\xf5\x2e\xf7\x3d\x8d\x91\x5c\x3e\x10\xfb\x13\x19\x83\
\x03\x74\x07\x3c\x0d\xc1\x73\x9d\xb5\x92\xc2\x49\xf8\x11\xcc\x97\
\x0a\x2a\xb8\xc6\xf2\x09\x60\xed\x3c\x64\x7a\xe0\x8d\x43\x2a\x44\
\xb2\x0e\x4b\x97\x83\x8d\xb9\x0f\xb8\x7f\x4c\x3c\x81\x6b\x80\xe0\
\xa8\xd0\x00\xcd\x5d\xdf\xce\x0a\xa5\x85\xac\x3b\xec\xbc\x31\x06\
\x16\x1d\x5a\x14\x49\x17\xb0\xd4\x44\x16\x48\x17\xbb\x18\x6b\xca\
\x59\x57\xbb\xf3\xb5\x00\x8f\x9e\xcf\xa7\x5c\x60\x25\x75\x86\x8b\
\x81\x18\x42\x04\xb7\x96\x9f\x0d\x04\x88\x80\xea\x0e\x08\x9a\x0d\
\x1d\xe7\x69\x2c\x83\x1e\x35\x6a\xd4\x8e\xda\xcd\xa0\x65\x68\x6c\
\xdf\xca\x60\x94\x74\x5a\x58\x9a\x02\x20\x2f\x01\x48\x0c\xa4\x92\
\xae\x6f\x0c\x31\xf6\xe2\x54\x77\x2d\xe0\x30\x8d\x3e\x55\x3c\xcd\
\x27\x4d\x4e\xc9\x99\xb3\x1c\xb3\x52\xba\xe2\x6e\x39\xad\x94\xf2\
\xb6\x5b\x4d\x35\xd3\x8a\x38\x0f\x56\xea\xe7\x13\x95\x63\x61\xbb\
\xf3\x21\x56\x4e\x3b\xf0\xc6\x88\xd1\xf9\x5c\x6a\xdc\x25\x20\xc2\
\x1b\xdf\x8b\xb0\xae\x69\xa5\xc8\xbc\xc4\xce\x3f\x2e\x84\xb7\x3d\
\x93\x9a\x53\x8e\x63\xb7\xc8\xc4\x3b\xf0\x6c\x26\xb0\xcf\x3a\x3a\
\x8b\x5e\x0d\xae\xec\x7c\x72\x21\x18\xc2\x80\x7f\xdb\x1c\x38\x49\
\x92\xd3\xce\xd8\xb2\x10\xbd\x69\x05\xac\x12\x94\x28\xe1\x93\x87\
\x0f\x03\x4e\x0b\xd6\x11\x83\xd9\xfc\x9c\xb5\x89\xc8\x5e\xac\x37\
\x89\x15\xc4\x18\x33\x29\x2a\x25\x68\x70\x24\xf3\x64\x7d\x6e\x9a\
\xd8\x6c\x3b\xc1\xd7\xfb\xf3\x59\x35\x63\x42\x74\x9a\xc0\x6a\xa1\
\x81\xa0\xe1\xfd\xe1\x08\x82\x71\xa0\x73\xac\xdf\x26\x96\xf8\x98\
\xc7\x2a\x8e\x51\xa3\x46\xed\xa8\x1d\x5b\xbd\xe9\x69\xe6\x06\x30\
\xd1\x7a\x8a\x31\xf8\x91\x02\xe4\x72\x95\x2a\xb4\x63\xee\x93\x95\
\x39\x48\x91\xcb\xb9\xe4\x9c\x30\x22\x2f\x2c\x0b\x9e\x2d\xa5\x52\
\x4c\x35\xc1\x91\xde\x2c\x87\xaa\xe4\xda\x33\x1e\x43\xed\x8d\xf2\
\x46\x6e\xb6\x2c\xa4\xa2\x98\xe9\x90\x19\xd7\x17\x42\x00\x6a\x9e\
\x57\x9c\x9d\x9c\xdc\x17\x42\x06\x17\x04\x8e\x15\x4c\x54\x14\x99\
\xcb\x3e\xc4\xf5\xb6\x3d\x00\xd0\x55\x02\xbc\xb2\xb7\xc3\x4e\x06\
\x69\x83\xe1\xe0\xce\x40\xd3\x14\x2c\x98\xc1\x17\x47\x72\xcd\x98\
\x83\x04\x66\x1a\xb3\x46\x74\x4e\x74\x68\x11\x04\x0c\x8f\xeb\xae\
\x71\x5e\x52\x96\xba\x6d\x43\xa8\xc4\xa1\x89\x02\xae\x04\x88\x89\
\x18\x59\x07\x6f\x2a\xe9\xde\xac\x58\xce\x66\xa5\x31\xae\xd0\x24\
\x46\xcb\xf9\x34\x7a\x80\xfa\x0c\xd6\x5b\x2a\x16\x73\xc2\x2d\xe6\
\x90\xce\xcf\xeb\xf3\xa6\x10\x98\xc8\x14\x30\xb3\x89\x62\xb1\x87\
\x92\xb2\x2c\x4a\x0d\xcb\x14\x66\x4c\x47\xcc\xe2\x1b\x11\x7a\xd4\
\xa8\x51\x3b\x6a\xc7\x4e\xc2\xaa\x0f\x8a\x98\x62\x81\x71\x9c\x22\
\x93\x0c\x80\xb9\x51\x85\x04\x5e\xdd\x9f\xc4\x79\x25\x63\xc8\x36\
\xf2\xde\x18\x8c\x49\xc2\x08\x4e\xd5\xb6\x59\x89\xc2\x66\xcf\x8a\
\xd9\x87\x37\x1f\x83\xd1\x12\xaa\x7a\xe3\xf7\xf6\x66\xab\xb3\x7e\
\xd3\xba\xbd\xfd\xab\x75\x6d\xc0\xed\x95\xa6\xb3\x6a\xf1\xa4\x5d\
\x7a\x3e\x9d\x70\xbe\x5f\x96\x2a\xf4\xc4\x38\x53\xf7\x6e\x68\xa6\
\xc6\x7b\x7f\x5a\x2b\x13\x22\x53\x55\x22\xd4\x78\x0b\x8e\xa9\x39\
\xe1\x49\x38\xae\x30\xef\x83\xe9\x44\x1c\x3c\x21\x47\x8e\xdf\x32\
\xf7\x91\x1b\x47\x2c\x55\x06\x03\x4b\xa9\x75\x16\x4e\x18\x5b\x4a\
\xb8\x30\xb6\x8f\x18\x2b\x92\xa4\x50\x9c\x97\x29\x83\xb9\x0e\x05\
\x18\x74\xd8\x40\x36\x96\xed\x95\x52\xef\x51\x5e\xb5\x1b\x83\x73\
\x6c\x71\x57\x05\x1b\x07\xa5\x08\x22\x60\xef\xe0\xa6\xed\x6b\xe3\
\xf7\x97\x0b\xdc\xd8\x90\xcc\x82\x0d\xfb\x04\xa6\x0c\x7f\xe0\xba\
\xc6\xdd\x70\xad\x75\xa9\x79\x25\x25\xd3\xe3\x4c\xc2\x51\xa3\x46\
\xed\xa6\xdd\x0c\xda\xd5\x5b\xca\xc0\xcc\x28\x56\x92\xf1\xa9\x7a\
\xe6\xcd\xe5\x6c\xae\x38\x73\xab\xc7\xb7\xde\xf9\xce\x6c\x22\x39\
\x53\x24\x44\x41\x81\x46\x01\x27\x69\x12\xb9\x12\x95\x10\xaa\x08\
\xf1\x2c\x14\x8e\x5d\xc1\x7c\x68\xdb\xf0\xa2\xea\x9e\x54\x2a\x6b\
\xc1\x54\x9e\x16\x42\x08\x1c\xfc\x37\x99\x4c\x83\x4f\x1c\xa8\xbb\
\xdf\x64\xe0\x58\xf0\xf5\x28\xa6\xf3\x3d\x9c\xef\x0d\x3f\x04\x3b\
\x2b\x39\x00\xe9\xd6\x9b\xe0\x30\x61\xb9\x9a\xa4\x2b\x1a\x87\x81\
\x9f\x9d\x9f\xa4\x9c\x42\x18\x30\x39\x73\x87\xe5\x71\x60\xa4\x3c\
\x65\x65\x4c\x5a\x6d\x6b\x4b\x49\x08\x45\xc6\xaa\x8d\x04\x0f\x33\
\x5d\x50\xa1\x45\x8e\xd4\x63\x8d\x9c\x52\x15\x2c\x2d\x8a\x4b\x26\
\x31\xb6\x49\x97\x32\x67\xcf\x81\xd1\xbd\xa1\x54\x4f\x67\xa5\x0b\
\x06\x8e\xa6\x95\xd4\x32\x3b\xf0\xf4\xc0\xee\x7c\xf2\x69\x0a\x99\
\x08\x69\x43\x5a\x2e\x2e\x90\x40\x31\xa0\x14\x87\xb2\x08\x70\x7c\
\xe0\x71\x58\x9d\x7c\x08\xb6\xf7\xae\xf7\x35\x0f\x40\xd6\x2f\xfc\
\x6f\xff\x67\x8f\x1a\x35\xea\xff\x96\x76\x33\xe8\x75\x07\x8e\xd3\
\x4d\x66\x25\xd8\x2a\xcf\xf2\x99\x97\xde\x3a\xfe\xe8\xdf\x8e\x3e\
\xf9\x27\x1a\x2d\x53\x35\x17\x18\xca\x11\x69\x90\x42\xa6\x90\xd0\
\x7f\x7d\xf0\xb3\x6b\xb9\x9a\xc4\xc7\x87\x52\x29\xc6\xdb\xe0\xab\
\x4c\xe7\x52\x80\x81\x1a\x81\xad\xe2\x12\xb8\x54\xc8\x6c\xf2\x5a\
\x65\x47\x9d\x3f\xb8\x70\x15\xdc\xbf\xde\x02\xaa\xce\xc0\x7a\x61\
\x39\xd8\x5f\x5c\xc0\x4e\x6b\xda\x96\x60\xfc\x22\xce\xa7\x05\xa0\
\x34\x18\x2c\x78\xe8\x94\xe7\x10\x5c\xd7\x1a\xe7\x23\x63\x9a\x45\
\xe2\x82\x4b\x84\x25\xcc\x0f\x4d\xe0\xd4\x9b\x3e\x6c\xea\xe4\x7d\
\x65\x71\x8b\x03\xa0\x1e\xb0\x38\x34\xad\xe0\x38\x4f\xc0\xc1\xa2\
\xa0\x64\xf9\x64\xd6\x09\xc0\xb1\xcd\x16\x8b\x2d\x58\x8a\x11\xe0\
\x37\xd4\xbd\xb5\x39\x96\x9a\x96\xfb\xf3\x18\xc2\x9d\xbb\x0f\xb4\
\x66\xb3\x69\x59\x15\x65\xdf\x07\xe3\x70\xbb\x24\x18\xa3\xca\x42\
\x17\x2a\xc1\xbb\xe5\x27\xc3\x02\xc8\x93\xf8\x51\xc9\x31\x3e\x15\
\xe7\xbb\x64\xea\x13\x19\xf9\x79\xd4\xa8\x51\xbb\x6a\x37\x83\xbe\
\xff\xe8\x31\xa3\xea\x74\xdb\xd0\xcc\xfb\x70\xfa\xcc\x37\xd2\xbd\
\xdb\xb7\xb6\x77\x6f\x5e\x5c\x4e\x66\x8b\xab\x27\x47\x2b\x15\xc1\
\x1f\xa3\x97\x95\xb7\x84\x01\x9a\xf6\xf1\xfa\xeb\x6f\xb5\x75\x6b\
\xee\xdc\x4b\xbd\x59\x4e\xe7\x9c\x84\x64\x3a\x2a\x35\xa9\xd4\xfe\
\x94\x5a\x6b\xba\xfa\x4c\xf0\x60\xb1\xa7\x90\x3e\x5e\x9d\x82\xd9\
\xc6\x04\xc6\xa8\xa6\x93\x85\x52\x58\xcc\xdc\x34\xdb\x18\x09\xfc\
\xe3\x54\x28\x5d\x39\x0f\x64\x1e\x4c\x6f\x62\xb6\x11\xcb\x27\x48\
\xd7\x5b\x11\x55\x8e\x0e\x3b\xc2\x71\x88\xf6\x30\x36\x0b\x4b\xde\
\x3c\x4f\x7a\x7f\x5a\x02\xee\x0b\xae\xb6\xc6\x6e\x7b\xd7\x76\xd1\
\xf4\x3e\x53\x9b\x33\xae\x18\x25\x7c\xc3\x7a\x3d\xcf\x60\x6d\x81\
\xb3\x10\x49\x8a\xec\xfb\xc6\x78\x47\x18\x75\x3d\x50\x7b\xca\x87\
\xa6\x77\xd3\xae\x93\xd6\xf4\x93\xe9\xfc\xfa\xb5\xeb\x8b\xbd\x09\
\x97\x72\x3e\xad\x7a\x07\xa7\x1b\x42\x0c\x82\x8b\xcf\xd2\xa0\x53\
\x52\x4a\xe1\x87\x05\xd7\x0f\x18\xbc\x8a\xc5\x7e\x70\x5a\x11\x0e\
\x38\x6a\xd4\xa8\x51\xbb\x68\x37\x83\x2e\x04\xa6\xbd\x21\xf0\xfa\
\xf4\xfc\x0b\x2f\x73\xa5\x5f\x7c\xe3\x97\xdb\x2f\xbd\xb0\x3a\xfc\
\xf0\xb9\x37\x7f\xa3\xbc\x73\xc8\x84\xf8\x8f\xef\x7e\xfb\xeb\xdf\
\xfa\xc3\x93\x7b\x1f\x2f\x2e\x5e\xbc\xfb\xde\x3b\xcb\xab\x2f\x1e\
\x24\x7f\xf3\xe1\x4f\x7f\xf6\xcd\x6f\x3e\x57\xdb\xc9\x7c\xfe\xa3\
\xbf\xfd\x8b\x4b\x5f\xfd\xda\xe2\xf2\x33\x29\x77\x84\xa9\xa3\x07\
\x77\x6e\xbe\xfb\x9d\xdf\xfe\x83\x3f\x7e\xff\xdd\xef\x7f\xf0\xce\
\xb7\xdb\x7e\x2d\xc1\xe6\x9d\x78\x7c\x6a\x5f\x7a\xe5\x1a\x20\x38\
\xf0\x2c\x25\x22\x78\x58\x12\x72\x21\x95\xb7\x61\xc8\x90\x23\x38\
\xad\x1b\xbb\xf8\x38\xf1\x32\x73\x4e\x44\xe6\x92\x30\x12\x30\x7b\
\x99\x08\x60\x6a\xb0\xfc\x8a\x58\xc6\x22\x15\xd4\x18\xda\x6e\x9b\
\x75\x9b\x87\xb8\xe6\xc0\x39\x53\xba\x4c\xde\x6d\xea\x6e\x6f\x52\
\x96\x1a\x63\x46\x45\xa2\x5a\xc2\x1f\x18\xb2\xc0\x9c\x50\xcc\xd4\
\xc0\x48\x25\xbb\xdd\x98\x24\xf0\xee\xa0\x02\xef\x77\xf6\xde\xfd\
\xfb\x45\x79\xc3\xe3\x6e\x35\x53\x98\x1c\x8d\x47\x4c\x43\x0c\x07\
\x26\x28\x7d\x3e\x6c\x16\x7e\x3f\xb4\x16\x62\x4c\xa9\xe0\x92\xca\
\x1d\x87\xd7\x8c\x1a\x35\xea\xff\xbd\x76\x9b\x49\x78\x41\xcb\x65\
\x25\x2e\x16\xfc\xca\x54\xa7\x7a\x85\xfd\xd1\x87\x1f\x9d\xfd\xeb\
\xf7\x78\xdf\xfc\xd7\xdd\xc3\x7b\xb7\x3f\xba\xf2\xa5\xaf\x3c\xb5\
\x2c\xa7\xfb\x8b\x7b\x1f\xfc\x5d\xbd\x3d\x99\x5e\xba\x7a\x74\x78\
\xf8\xf0\xe3\xdb\x9d\x49\x47\xa7\xed\xbb\xdf\xfb\x3e\x5f\x5e\x5f\
\x65\x7d\xf5\x67\xde\xba\xf3\x9f\x3f\x79\xfb\xaf\xfe\xf2\xa7\xef\
\xff\xf3\xb3\x2f\xbf\x75\x72\xba\x3d\x3b\xdd\xdc\xbd\xf3\x69\xd3\
\x86\xde\x90\xf5\x26\xdb\xc4\x3a\x97\x3e\xbe\x7b\xdc\xdb\x34\x44\
\xc2\x05\xa0\x5d\x06\xac\xda\xb7\xc9\xf5\xc1\xb4\x80\xe1\xc4\x74\
\xc1\x34\xf0\x63\xf4\x3d\xf1\x7d\x8e\x26\x25\x3b\x84\x39\x47\x1f\
\x82\x4f\xc4\x66\xc2\x15\xc3\xfa\x0a\x2a\x1c\x7c\x25\x4e\x98\xf0\
\x98\xe4\xcf\x94\x2a\x53\x4a\x38\x40\xa0\xd4\xd8\x81\x38\xcc\x89\
\x1d\x06\x21\x62\xc0\x87\x52\x85\x16\x5a\x71\xa9\xa5\xe4\x02\x16\
\x24\x45\x42\x6f\xbb\x2d\x65\x21\xd2\x2e\xc6\xee\xd6\x47\xb7\x37\
\x5b\x7b\xb2\x69\x2d\xde\x12\x14\x58\xd0\x82\xdb\x31\x3c\x51\x1e\
\x32\x73\x01\x90\x1c\x4b\x3c\x04\x96\xda\x11\x2c\x34\x71\x61\xd8\
\x20\x1f\x35\x6a\xd4\xa8\x1d\xb4\x9b\x41\x17\x00\xcf\xe0\x49\xc6\
\x66\x67\x43\xb7\x19\x66\x5a\xd9\xdc\xaf\x98\x9c\x5f\xb9\x7c\xf9\
\xf2\x53\x2f\xe4\x24\xda\xc6\xb3\x48\xfa\xad\x21\xac\x7c\xf8\xe8\
\xd4\x5b\xdf\xd6\x76\x75\xda\xce\x17\xf3\x2b\xcf\xde\xa0\x3e\x3f\
\x7c\xb0\x7e\xfb\xaf\xff\xe6\xcb\xbf\xf4\xcd\xaf\xfd\xce\xef\xfd\
\xe4\xc7\x37\x85\x28\x26\xd7\x5e\xb5\xcd\xf9\xfd\x3b\xb7\x23\x65\
\xd6\x53\xe3\x58\xf4\x40\xa6\xa9\xef\xdc\xc3\xa3\xb3\xd5\x79\xbd\
\xae\xb7\x4d\xd7\x11\xac\x54\x46\x88\xe5\xd4\xd1\xf4\xb9\xaf\xe6\
\x3c\xd4\x4c\x93\x14\x69\x0c\xa4\xb7\xa4\x73\x7c\xd3\xe4\xe3\x95\
\xd9\x74\xac\xf5\xda\xe6\x32\x50\x09\xce\xaa\xd5\x64\xa8\xd0\xc0\
\x3a\x41\x87\xdb\x12\x28\x78\xc0\xc3\x1f\xe4\xa2\x16\x6c\x52\x68\
\x4c\xaa\x63\x52\x70\x85\x43\x54\x32\x66\x8d\x96\x65\x89\x41\x50\
\x19\xf7\xb0\x9b\x76\xbb\xd9\x9e\x31\x6a\x73\x72\x8c\xa4\xcd\x76\
\x7d\x5a\x5b\x63\xe9\xfd\x7b\xf7\xf1\xa3\x64\x9c\x02\xd3\x33\x41\
\x85\x82\xef\x9d\x35\xeb\xe6\xac\xb3\x5b\x1f\x0c\xd8\xb4\x2e\xc6\
\x5d\xe8\x51\xa3\x46\xed\xa6\x1d\xa7\x7a\x3b\x6e\x02\x70\x62\x95\
\x58\xc1\x8a\x29\x61\x14\x7c\x4d\x08\x72\xe3\xf5\x5f\xdb\x9c\x1d\
\xaf\x6e\xbd\x47\xa9\xd3\xcb\x0a\xe7\x92\x88\x72\xef\xe2\xf5\xb6\
\x35\x0a\x87\x5c\xf1\x97\x7e\xf1\x57\xce\xad\xbc\xf5\x8f\xdf\xf5\
\x39\x5e\x7c\xe1\x95\x6b\xcf\x5c\xfb\xf3\x3f\xfd\x93\x2b\x2f\xbe\
\xa6\x65\x79\xf7\xd6\x0f\x7f\xf3\x5b\xbf\xff\xc1\x7b\xff\x92\x7d\
\xc2\x68\xbb\x40\x42\xe4\xce\x61\xf4\x90\xe9\x43\xdb\xe6\xcd\xc6\
\x1b\x43\x1a\xc3\x4f\xb6\xf1\xe8\xdc\x5a\x52\x9a\x24\x6b\x1b\x2d\
\xe1\x96\x6a\x47\x75\xe2\xf8\x48\x17\xf9\xba\x8b\x67\x1d\x3d\xde\
\xa4\x2e\x57\x44\xed\x9b\x20\xce\x6b\xbf\xed\xa2\x0d\x84\x4b\xb2\
\x37\xa7\x7b\x9a\x96\x9c\x48\x01\x2c\x1e\xad\xb5\x94\x50\x67\xfa\
\x98\x53\x6f\x7d\xf4\x36\x47\x5f\x16\x95\x2e\xca\xbd\xd9\x62\x5a\
\xcd\x81\xb2\xf7\xf7\x16\x00\xda\xc6\x18\xca\xc8\x6c\x36\x99\x4e\
\x27\x55\xa9\x5d\xbf\xa5\x79\x5b\x55\x3d\x17\x0e\xa7\x1f\x66\xec\
\x1c\xcf\x04\x16\x24\xc7\x64\x54\x05\x65\x2a\x31\x1d\x65\xc9\xa8\
\xe0\xbd\x8d\x06\x3c\xdc\x19\x1f\xc7\x3d\xe8\x51\xa3\x46\xed\xa6\
\xdd\x36\x46\xef\x1c\xbb\xe4\x5d\x59\x4d\xf1\x0e\xdc\x54\xbe\x6a\
\xc2\xf9\x23\x0b\x2c\xbb\x3e\x7e\x74\xe5\xe5\x9f\x3f\x7d\xbc\x4a\
\xae\x9f\xef\x5d\x70\x6d\xef\xad\x69\xce\x4f\x66\x8b\x03\x06\xe8\
\x58\x54\x27\x8f\xee\xbd\xf1\xea\x1b\x5f\xfd\xfa\xaf\xbb\xf6\x64\
\x71\xe9\xe0\xf9\xe7\xbf\x7c\xe9\xe0\xfa\xdd\x0f\x7f\xdc\x9d\x77\
\xff\xfe\x83\x0f\xbe\x7e\xf1\xe9\xdb\xb7\x3f\xc6\xb2\xe9\xcc\x28\
\xc7\x12\xe4\x84\x9b\xbe\x28\x4e\x62\x48\xb1\xed\x03\x17\x20\x4e\
\x2c\x33\x2b\x2f\x25\x3c\x4b\x38\xc7\xd6\xad\xad\x74\xd6\x42\x51\
\x81\x0d\xd6\x05\x2f\x27\x42\x80\xf3\xe6\xe4\xb3\x14\xc6\x64\xa6\
\x58\x8a\x26\x87\xcc\xf1\x46\x21\x9b\x4d\xb8\x0b\xac\x0f\x18\x0f\
\x0a\x47\xa3\xc3\xfc\x41\xce\x12\x23\xd4\x00\x8f\xa7\xe8\x8d\x2b\
\x15\x5c\x04\x6c\x01\xb0\x33\x1f\x06\xd6\x62\x26\x1e\x06\x3e\x07\
\x8f\x55\xce\xc3\x54\x59\xe2\x2d\xf6\xa4\x0c\xd5\x86\x3c\xe4\x2c\
\x92\x80\x03\x68\xf8\x58\x12\x19\x06\xc9\x0e\xaf\xe0\x9a\x49\xae\
\xab\xa1\x18\x9b\x4b\xca\x46\x83\x1e\x35\x6a\xd4\x6e\xa2\xc3\x14\
\xbd\x2f\xaa\x3f\xfb\xa3\xdf\xcd\x98\x6b\x21\x72\x0a\x54\x30\x35\
\xbf\x78\xe3\x95\xd7\xce\xef\xfc\x28\x07\xf7\x73\xbf\xfa\x5b\xb7\
\x7f\xf0\xf7\x9b\xf5\x09\xf7\x7d\x10\xb3\xb0\x59\x37\x41\x17\xba\
\x88\x21\x28\x2d\xfb\x76\xc5\x27\x8b\x6c\x5c\x08\xb2\x33\x21\x24\
\xc6\x0a\xdd\x9b\xf6\xe2\x64\xf6\xd4\x2f\xbc\x0e\x4e\xf9\xc3\x77\
\xff\xa1\xd4\x7a\xdb\x34\x60\x9d\x3e\x00\x45\x27\x0d\x2f\x53\x92\
\x09\x82\xa9\x70\xd1\x09\x21\xb5\x92\xe0\xb3\x85\x56\x93\x0a\x21\
\xd8\x99\x7c\xde\xe1\xc6\xae\xe0\xe2\xbf\xd9\xbb\xb6\x5e\xb9\xca\
\xf3\xfc\x9d\xbf\xb5\xd6\xcc\xec\xbd\xbd\x31\xde\x06\xdb\x10\xc0\
\x80\xb0\x69\xc1\x04\x95\x5a\x90\xa6\x81\x00\xa9\x94\x36\x51\x29\
\x2d\xea\x41\x95\x9a\x5c\xf5\xa6\xcd\x5f\xa8\xd4\xab\xaa\x51\xa5\
\x5e\xf4\xb2\x6a\x14\xd2\x0a\xe5\x70\x11\x85\x00\x22\x84\x70\x08\
\x08\x12\x40\x1c\x62\x28\x86\x60\x8c\x6d\x8c\x0f\xdb\x7b\xf6\xcc\
\xac\xb5\xbe\x63\xdf\xe7\xdb\x7f\xa0\x73\x85\x90\xd6\x23\x18\x8d\
\xf7\xcc\xac\x59\x33\x23\x3d\xdf\xb3\x9e\xef\x7d\x9f\xd7\x48\x61\
\xad\x21\x28\xe9\x39\x8f\x42\x88\xed\xed\x2e\x95\x1d\x44\x22\xd4\
\x18\x45\x08\x3e\xc4\x28\xb5\x5c\xb4\xbe\x75\xc9\x61\xf8\xa1\x92\
\x82\xa4\xba\x57\x42\x4c\x9a\x4a\x6b\xe1\x93\x93\x49\xae\x8c\x56\
\xbc\x8b\x3e\x78\x9f\x53\x5d\x57\xce\xfb\xc4\x59\xdb\xb9\x14\xb3\
\x10\xb2\x4c\x17\x44\x5d\x5e\xd3\xd4\x65\x8a\xec\x44\x9a\x8a\xce\
\xed\xd0\xa1\x2b\x57\x26\x15\xcf\xc5\xc7\x28\xc3\xaf\x60\x92\x20\
\xe1\x94\x9e\x2f\xca\x3c\xc3\xfe\xc8\x1f\xfd\xd5\xa7\xfd\x73\x0f\
\x18\x30\xe0\xb3\x84\xe5\x2c\x0e\x1f\x59\x87\x94\x8b\x45\x10\x61\
\x11\x5a\x26\xc2\x5d\x7f\xfc\xc0\x66\xaf\x2f\xe6\xf1\x75\xb7\xfd\
\xe1\x85\x45\x3c\x3f\xbd\xb4\xe9\xe2\x7c\xfb\xdc\x76\xb6\x32\xb0\
\xad\xc5\x8c\xb5\x7e\x3a\x9b\xb7\x1d\x9f\x9d\xbf\xd0\xb5\x31\x44\
\x3f\x5b\xf4\x5b\xdd\x6c\x6b\xeb\x62\x68\xfd\x27\x97\x16\xb7\x1d\
\xbd\x73\xfb\xe2\xb9\x06\xa5\x69\xd2\x58\x66\x2b\x4e\xb7\x44\xcd\
\xa4\x61\x49\x0b\xcf\x3b\xe4\x36\x67\x8c\x09\x8f\x9e\x88\x93\xf4\
\x70\xd7\x39\x47\x6a\x94\xd7\x95\x59\x69\xc4\xa8\x56\x24\xa7\xbd\
\x60\xd3\xae\xbd\xb4\x98\x9f\xb9\xb8\x38\x7d\xa1\xff\x64\x2b\x92\
\xc6\xe5\xcc\x90\x50\x76\x24\xfa\x89\x48\xb5\x25\x6a\x6d\xe7\xbd\
\x88\xb9\xca\x79\xa4\x30\x02\x8b\x0e\xa2\x75\x85\xb1\x28\x39\x72\
\x81\xc1\xde\x59\xb0\x59\xd7\xce\x5c\x97\x04\x2f\xd6\x33\x47\xd1\
\x8a\xb2\xe3\x66\x42\xb7\x4a\xea\xf1\x68\x22\x84\xa6\x13\xe9\xfb\
\xb2\xef\x97\x3b\x85\x58\x3d\xa6\x84\xb7\x7c\x51\x8b\xbe\x56\xae\
\x96\xce\xf0\x85\xca\x5b\x22\x5e\x52\x79\xca\xe2\x54\xa4\x99\xe2\
\x8b\x4f\xfb\xb7\x1e\x30\x60\xc0\x67\x0c\x4b\x4e\x54\xf1\xc6\x43\
\x97\xa2\xe0\x38\x73\x9d\xa7\x44\x7e\x61\x6b\x1e\xb2\xe1\x24\x30\
\x2f\x4d\xbb\xe9\xbc\x22\x29\xdb\x08\xcc\xf2\xbb\x24\xac\x56\x7c\
\xc6\x03\xb1\x5c\x90\x75\x9b\x3a\x36\xeb\x0b\x27\x4e\xa4\xa4\x03\
\x04\x97\x48\xbc\xa6\x8b\x67\xcf\x85\x59\xd7\xc7\xe8\xe7\x73\x45\
\x62\x94\x47\x8d\xf8\x7d\x1d\x52\x5a\x38\x92\xb4\x08\xc2\x37\xb5\
\x41\x1a\x34\x82\x8c\x7a\x5d\x19\xe7\xfa\x10\x9c\x94\xd2\x68\x93\
\x43\x90\x39\x74\x51\x92\xb0\xb5\x0a\xbb\x8b\x0e\xe5\xd1\xa4\x9a\
\xd3\x3c\x25\x92\xd6\x91\x05\x9d\x32\xc2\x95\x88\x7f\xb9\xc5\x5c\
\x15\x96\x24\x23\xed\xdc\x33\xef\x38\xc9\x6f\x8b\xf1\xe0\xb1\x8b\
\x65\x2c\x37\x1f\x35\x35\xdc\x0c\xef\x89\xa0\x7d\x8e\xf0\x55\x52\
\xa0\xfb\xc6\x8e\x3c\x69\xfb\x10\x05\xdf\xc9\xc6\x43\xc1\xa1\x47\
\xc5\xc6\x6c\xb2\x6b\x3d\x44\xfa\x9c\x22\x24\x3a\x12\x1a\x0a\xe9\
\xbb\x4a\x48\x22\xa5\xff\x05\x1d\x00\x0d\xe5\xc3\xd0\xd8\x01\x03\
\x06\x2c\x89\x25\x8b\x73\x45\xa1\x19\x21\x15\xd7\x89\xcb\xc3\x87\
\x8f\x10\x1b\x3f\xf0\x97\x7f\x13\x30\x86\xaf\x0d\x21\x1d\x39\x7a\
\xf7\x91\xbb\xbe\xb0\x98\x76\x4f\xfe\xf0\x7f\x1e\xfc\xeb\x6f\xcc\
\xa6\xdb\x67\x4f\x9f\x54\x46\xaf\xad\xef\xb6\x23\xfb\xc3\xef\x7e\
\xef\x9a\x03\x9f\xfb\xfc\x3d\x5f\xea\x36\xb7\x9f\x7c\xfc\x27\x5f\
\x7d\xf0\xa1\x76\xfb\xd2\xfb\xc7\x4f\x7c\xf8\xf6\xff\x72\x15\xad\
\x94\x21\x26\xbe\x33\xb3\x2a\x44\xab\x89\x3a\x25\xca\x8f\xa5\x22\
\x6a\xc6\x7c\x3f\xc6\x6a\x8b\xc1\x80\x4a\x69\x0f\x0f\xc2\x7b\x04\
\x69\xa0\x31\xc4\x46\x22\x65\x29\x7a\x51\x1b\x69\x0d\xaa\x8f\x49\
\xdd\x7a\x8f\xfd\xbb\xd6\x45\x3a\x59\xe2\x7d\x25\xa4\xe4\xcc\x6a\
\x43\xdc\x9a\x13\xab\x12\xc3\x28\x14\x01\x8d\x4c\xd4\x1c\xd1\x09\
\x88\xdb\xbe\x9b\x6a\x43\xe4\x8b\xf6\x3f\x5a\x46\x44\x88\x92\xf8\
\x9e\x88\x98\x11\x99\x4b\x38\x1e\x8c\xd8\xd6\xd1\x42\xc3\xe9\x8b\
\xe0\x8c\x67\x3f\xdf\x9e\x32\xb6\x77\x65\x7d\x8d\x23\xca\x0e\x4e\
\x08\xc6\x14\x32\x3a\x79\x46\xdf\x09\xfc\xeb\x40\xdf\xd0\x72\x17\
\x2b\x03\x06\x0c\x18\xb0\x1c\x6b\x10\xc9\x34\x4d\xa5\x0d\xf1\x8f\
\xa7\xff\xde\x7e\xe7\xad\xae\x9b\x3d\xf1\xe3\x87\x9f\x7d\xfc\x07\
\xa4\x24\x4d\x33\xbe\xec\xb2\xcb\x1e\xf9\xaf\xff\x58\xdf\x7d\x79\
\x9f\xa5\xa9\xea\xe7\x9e\x7d\xe6\x8a\x7d\xfb\x7c\x88\x27\x4e\x9c\
\xf4\x2e\x5e\x71\xcd\x35\x93\xbd\x7b\xbf\xff\xf0\xc3\x93\xcb\xf7\
\xb8\x18\x57\xd7\xd6\x7e\xf9\xfc\xf3\x37\xde\x78\x83\x51\xa2\x1a\
\x59\x12\xa6\x24\x71\x41\xa3\x30\x70\x19\x31\xf1\x58\xf1\x4a\x44\
\x53\x76\xe2\x0c\x17\x16\x81\xf9\xc4\xd6\x62\x36\x6b\x1d\x0a\xe5\
\x72\xdb\xb9\x45\xe7\xb6\xe7\xed\x62\x11\x82\x27\x6e\xed\xe8\xaf\
\x8c\x49\x7a\x2f\x0c\x7f\x95\xda\x18\x6d\xad\x71\x1d\xeb\x48\xc0\
\x2f\x62\xdb\x87\x45\x8f\x07\xe9\x94\x52\x8a\xd8\x84\x14\x25\xd3\
\x09\x63\xaa\x58\xdf\x91\xfa\xce\xde\xb1\x4b\xd3\xfe\xd2\x94\x0e\
\x1b\x23\x93\x9d\xf7\xce\x27\xba\x47\x62\x79\x3e\xef\xfa\xce\xd3\
\x5b\x63\xb4\x0a\x3a\x26\x17\xc4\xfb\x82\xd1\x45\x85\x3f\x73\x66\
\x6b\xde\x1b\xa6\x6a\x65\x0c\xe6\x6b\xa1\x02\x9a\x0b\x89\x2d\x46\
\xfa\x5c\xa4\xfa\x85\x1e\xe2\x46\x07\x0c\x18\xb0\x1c\x96\x53\xd0\
\x24\x55\x19\x06\xa7\x0a\x81\xf4\x09\x4e\x9c\x23\x64\x66\xa1\x55\
\x98\xa9\xca\x0f\xdd\xf2\xbb\x99\x44\xf0\xe6\x09\x9f\xbb\x5d\xeb\
\x35\x11\xe0\xd9\x13\x6f\x3f\xf7\xc4\x85\x83\x37\xdf\xd1\x4f\x37\
\x5f\xfa\xe9\x6f\xf6\x1d\xfa\x1d\x22\xc5\xe9\xc9\x13\x2c\x87\x8d\
\xd5\x95\xe8\xd3\xa5\x53\x27\xdf\x7a\xf1\x79\x63\x89\x7e\x5d\xbd\
\x62\x7d\xcf\x02\x89\x67\x16\x6b\xe4\x77\x12\xc5\x12\x1b\xf3\x18\
\x9c\xd1\x3a\xa2\x9d\x5a\xb7\x44\xa2\x24\x67\x7b\x74\x9b\x48\xad\
\xb2\x51\x99\x14\x2c\xa9\x56\xcf\xda\xae\xd7\x5a\xf0\xed\x6c\x34\
\x53\xca\xf4\x3e\x10\x51\x12\x65\xc3\x7f\x96\xaa\x5d\x74\x11\x2d\
\x90\x88\xcb\xe8\x68\x2d\x51\x3a\x64\x89\x68\xba\x00\x0d\x5e\x4a\
\x35\x78\x66\x22\x22\x52\x23\xfa\x40\x57\x0a\xc2\x43\x0d\x7b\x7a\
\x45\x1b\x3d\xcb\x44\xe5\x65\x8e\x8b\x10\xc8\xfd\x40\xfe\xb4\x74\
\xbd\x8f\x3a\x90\xc0\x0f\xa1\x3f\x7f\x7e\xfb\xcd\x37\x4f\x1d\xbc\
\xe1\xca\xc9\x2a\x29\x74\x4c\x59\x41\x26\x87\xe0\xa6\xb1\x99\x73\
\x3a\xb8\x42\xe0\xf4\x80\x01\x03\x06\x2c\x81\xe5\x08\xba\xd4\x2f\
\x20\x36\x6e\xd1\x76\xc6\xd4\x08\x86\x23\x5d\xdb\x4c\xcc\x5a\xa3\
\xb4\x21\xe2\xda\xb7\xb1\x7f\x7c\xf9\x55\x98\xa0\x9a\x9c\x24\x6a\
\xf2\xed\xa9\xf7\x8f\x1f\xbe\xeb\x6e\x75\xc9\x7e\xf0\xf6\xb1\xc9\
\xee\xdd\x57\x1d\xba\xb9\xd9\xbd\x8b\x68\x9d\x54\xa9\xd4\x3c\xf5\
\x5b\xc7\x8f\xbd\xe6\x79\x26\xae\xed\x3d\x77\x30\x6b\x79\x2c\xb4\
\x49\x42\x74\x3e\xeb\x49\x0f\x5b\x5d\x05\xd2\xeb\x49\xf8\x44\xba\
\x55\x96\x24\x7f\xac\x08\xf3\xd6\x9b\xc8\xea\x66\x14\x7c\x4f\xef\
\x1e\x45\x72\x31\x69\xa5\x5b\x9f\xdd\xbc\x8d\x45\x1d\xa3\x8e\x0e\
\xe5\x74\xa9\xae\xeb\xe0\x63\x09\x3b\x45\xfb\xc9\xbc\x8f\x01\x4d\
\x80\xbc\x95\xa9\x4c\xc5\xe2\xda\xe8\x48\x17\x05\x70\x54\x48\xf7\
\x7a\x46\x4c\x9d\x62\xe8\x4b\x89\x4b\x49\x19\x8d\x9c\x7b\x1f\xe8\
\xe5\x42\xea\xaa\x96\xc8\x3e\x15\xc4\xc5\xe8\x6d\x11\x02\x35\x80\
\xbd\xf7\x1f\x9e\xbc\xb0\xc7\xad\xed\xbd\x72\x15\xa9\x1c\x20\xf1\
\x72\x70\x5e\x9a\xcf\x87\x99\xb1\x03\x06\x0c\x58\x12\xcb\x11\x74\
\xdb\xce\x94\x96\xe3\x71\xa5\xb5\xa2\x2b\xfd\x46\xf1\x17\x9f\x7c\
\x74\x7e\xe1\x9c\xef\xed\x2b\xcf\x3c\x76\xfc\xd7\x2f\x37\x32\xdd\
\x71\xf4\x8b\x3f\x7b\xe4\x3f\x73\xdb\xbd\xf5\xfc\xcf\x2a\x91\xb2\
\x66\xdb\x67\xde\xdb\xfc\xe8\xe4\xfa\xd8\x9e\x7a\xe7\xb5\x8d\xbd\
\x7b\xef\xbd\xef\xee\x9f\xff\xe8\xbf\xa5\x6f\x8f\xbd\xfc\x0b\x9b\
\xbc\xcf\x25\x49\xae\x67\x85\xff\x53\xe6\xa8\xbb\x48\x1a\x23\x5f\
\x89\x9d\x03\xc9\x58\xdf\x19\x25\x2d\xbd\xb1\xd0\xca\x28\x62\x49\
\xe2\x3e\x21\xa5\x8f\x41\xc2\xee\x95\xa6\xd6\xa8\x0e\x89\xd9\xd4\
\x35\x44\xb1\xa6\x07\x89\x96\x25\x88\x39\x21\x45\x3a\x26\xf8\x12\
\x46\x12\xad\x2b\x22\xde\x18\xb2\xeb\x9d\x28\x15\xcb\xac\x6c\xe8\
\xc1\x83\x0e\x18\xff\xcd\x31\xfb\x95\x1b\x29\x48\x57\x7b\xfa\x4b\
\x48\x06\xfb\x83\xba\xef\x83\x28\x33\x5c\xe8\x0d\x31\x8b\xb6\xf8\
\x2d\x4c\x8b\x9d\x3e\x43\xae\xab\xbd\xfb\xaf\xde\xb8\x6a\x43\xd7\
\x74\x9a\x55\x44\xa7\xb7\x0b\xc5\xad\x47\xc1\x5d\xe2\x89\x08\x3b\
\x0d\xad\xde\x03\x06\x0c\x58\x0e\xcb\xd5\x41\xff\xfb\xb7\x1e\x44\
\x42\x90\x21\x9a\x42\x2a\x3f\x4a\x8c\x49\xcc\xfa\x3e\x89\x86\x54\
\xaf\x8e\x2a\xba\xb9\xd4\x56\x25\x37\xe3\x8d\x65\x59\x21\x73\x33\
\xf3\x10\x25\x8b\x2a\xf3\x45\xc8\x5d\xe4\x10\xc0\x22\x08\x24\x26\
\x23\x72\xd9\x65\x19\x4a\xa5\x5b\x61\xcc\x90\x18\x5a\xa5\x89\x99\
\x49\xda\xba\x1c\x35\x92\x2c\xd0\x2e\x92\x42\xa8\xaa\x8a\xde\x18\
\xf5\x6d\x99\x88\x8f\x13\x41\x67\x6c\xf1\x31\xb1\x93\xa5\x8f\x10\
\x7e\x52\xcc\x02\x27\x57\x82\x99\x4b\xc3\x0b\xe9\x60\xf8\x2f\x44\
\xd0\x12\x81\x73\xa4\x6c\xe9\x0f\x24\x76\x17\x44\xac\xf4\x68\xd3\
\xd4\xa5\x5d\x9c\x29\xa9\x63\x42\x06\x93\x14\x92\x84\xb2\x73\x3e\
\x97\x78\xba\xba\xae\xe8\x83\x77\x6d\xa7\xa1\x92\x31\x7f\xd0\x58\
\x43\x87\x18\xd5\x35\x9d\x45\xdd\xd4\x30\x4d\x84\x3c\x70\xf0\x86\
\xfd\xd7\xed\x57\x15\xbd\xa4\x66\x38\x3d\xbf\x13\x3b\x8a\x70\x3d\
\xfa\x02\xe8\x5e\x3f\xff\xdc\x2d\xf7\x7d\xda\x3f\xf7\x80\x01\x03\
\x3e\x4b\x58\x72\x26\xa1\x24\xf1\x98\x73\xdf\x11\x8b\xd6\x9a\xb8\
\xca\x7c\xe1\x6b\xdf\x5c\x3f\x70\x7d\xa5\x64\xf2\xdc\x79\xff\xf6\
\x1b\xcf\xbd\xfe\xf3\xc7\xf7\xed\x3f\x78\xcb\xbd\x0f\xac\xac\xae\
\x48\x69\x58\xcc\x1f\x9e\x78\xff\x17\x3f\xfa\xde\xf4\xfc\x69\xc6\
\xea\x3d\x57\xdf\x74\xfb\xfd\xf7\xae\x6d\xec\x22\xa6\xfc\xe8\x9d\
\xf7\x9e\xfe\xf1\xf7\x65\xee\x44\x29\x78\x40\xde\x28\x94\x31\x02\
\x49\x49\xb7\x5a\x5d\x86\x50\xed\x30\x2c\x17\x55\x3d\x52\xd2\x14\
\xe7\x19\xee\x03\x9c\x06\x19\x19\x1e\xc7\x90\xaa\xaa\xd2\x3b\x2b\
\x8d\x2c\x09\x9f\x0c\x71\x72\x28\xcf\x20\x8d\x1c\x22\x52\x32\xea\
\xc6\x16\xbe\x8d\xf3\x59\xab\xad\xab\xad\xa8\x2b\xbb\x58\xf4\x06\
\x8d\x8b\x88\x63\xd2\x5a\xf4\x3e\xa4\x88\xad\x45\x45\x6c\xad\x35\
\xcc\x96\x14\x8d\xae\x89\x97\x69\xe1\xa8\x6b\x4b\x2b\x04\xc9\x7a\
\x52\xcd\x74\x3b\x99\x4c\x3a\xd7\x92\xa4\x86\xd7\x81\xd7\xce\xda\
\xc5\xb6\x89\x9a\xb1\x58\x42\x39\xe8\x29\x11\x25\x1e\x98\x76\x95\
\x89\xc1\xf5\xa0\xa0\x07\x0c\x18\xb0\x24\x96\x23\x68\xc9\x93\x55\
\x59\x5a\xd4\xc3\xf9\xc4\x42\xef\xb5\x19\x4d\xd6\xaf\x78\xf4\x3b\
\xff\x72\xe6\xb7\xc7\x8e\xdc\xf9\xf5\x3b\xee\xf9\xf3\x4a\xdb\xd7\
\x5f\x7e\xee\xca\xeb\x0f\x7f\x7c\xea\x83\xef\xfc\xeb\x3f\x5d\xb9\
\xb1\xef\x4f\xbf\xf1\xf7\x5f\xfb\xe6\xb7\xbe\xfb\x6f\xff\xbc\xb1\
\xef\xaa\x87\xfe\xe1\x1f\x8f\x1f\x7b\xed\x91\x6f\x7f\xfb\xda\xdf\
\xbb\xf3\x4f\x1e\xfa\xdb\xd9\xd6\xf4\x8d\x67\x7e\xa2\x75\xf1\x69\
\xb9\xf6\x41\x91\x2e\xd6\xd8\xd6\xe3\x22\x91\xe8\x06\xb9\x72\x85\
\xba\x63\x02\x86\xc8\x22\xee\x34\x73\x96\x88\x19\x51\x62\x2c\x30\
\x66\x90\x38\xda\x2a\xec\xe3\xe1\x0c\x55\x82\xe3\x2b\xc0\x8d\x90\
\xd1\x42\x40\x47\xb3\x88\x41\x2c\x31\xcc\x17\x5d\xca\x0e\x25\xd3\
\x74\xfa\x39\x40\x3b\xb3\x54\x12\x97\xe8\x28\x44\xac\x78\xbe\xd2\
\xa2\x34\x0d\x66\xe7\x51\xae\xd7\x75\x2d\x31\x32\xa7\x4f\x2e\x72\
\x1f\x1d\x3d\xd9\x25\xb8\x32\x9d\xef\x7c\xe8\x8a\x25\xcd\x34\x57\
\xf3\xe9\xec\xec\xe9\xf3\xd6\xd2\x27\xd1\x28\xe0\xe0\x70\x68\x04\
\x72\xf8\x60\x72\x30\x04\x96\x0c\xad\xde\x03\x06\x0c\x58\x0e\xcb\
\x11\x34\x09\x56\xa2\xb9\x3e\xf0\x2e\x88\x10\x05\x49\xcf\xcd\xcd\
\xad\x2b\x98\xdb\x3a\x73\xa6\xbb\xd4\xbe\xf1\xea\x4b\x9f\xbf\xe7\
\x2b\xfb\xaf\xbf\xed\xe9\xc7\x9f\x60\xc9\x77\xdb\x73\xdb\x77\x5b\
\xa7\x8e\x1f\x7b\xfd\x57\xb7\xdf\xf9\xe5\x6b\x0e\xdd\x7e\xf4\xde\
\xfb\xb3\xa8\x7e\xfd\xd8\x53\x24\x70\xaf\xde\x7f\x00\xd4\x58\xe9\
\xa6\xa9\x2d\x82\x34\x22\x31\x34\x71\xa2\x4f\xa8\x25\x66\x29\x19\
\xe2\x60\xe4\xf5\xf3\x52\x56\xcc\xa4\x04\xed\x72\xd0\x2e\x08\xd6\
\x68\x8e\xc9\x52\x60\x58\x48\x67\x43\x54\xcd\x30\x45\x05\xd5\x24\
\x70\x95\xb3\xd1\xa2\x78\x17\xa4\x5b\xe9\x25\x0c\x23\x54\x84\x3c\
\xe5\xe9\xf3\x82\x7f\x39\x87\x0b\x42\xba\x9b\x78\x1c\xc7\xcb\xb9\
\xae\x95\xf3\x74\x4c\x92\xd2\x3c\x84\x9c\x5c\x6a\x2a\x8d\x08\x0d\
\xc1\x9b\xc6\x8a\x79\xa8\x9a\x5a\x6b\x38\xe0\x24\xae\xc1\xe3\x4a\
\x5a\xdb\xc4\xe8\x10\x35\x8a\x62\x3d\xae\x69\xe1\x22\x5e\xcf\x58\
\x13\x84\x44\x03\xe2\xce\x97\xc6\x33\x47\xf5\x60\x1c\x14\xf4\x80\
\x01\x03\x96\xc3\x92\x0a\x5a\x5b\xe2\x54\x12\x85\xe8\xca\x0b\x02\
\xbb\x79\xc9\x8b\xac\x12\xe6\xf5\x8d\xf7\x5f\x73\x88\x31\xb3\x79\
\xee\x63\xa2\x4b\x62\xba\x1c\x53\x3d\x19\x83\xc2\x88\x61\xb9\x58\
\xbd\x7c\xef\xc6\x55\x07\xa6\xe7\xcf\xcf\xda\x6d\xa1\xcd\x3b\xaf\
\xbc\xba\x79\x61\xfa\xea\x0b\xcf\x72\xcf\x66\x9e\x97\xba\x34\x0c\
\x8c\xe2\x1c\xae\x01\x69\xe4\x80\x30\x38\xc5\x89\x72\xe1\x59\x2b\
\x38\x1e\x60\x3d\xa2\x59\x91\x3c\x9b\x10\x19\x66\x3a\x7d\x21\xa5\
\x76\xbe\x53\x4c\x64\x4c\xd3\x16\x85\x9e\xb3\x51\xf0\xbe\x73\x19\
\x7c\x45\xff\x84\x34\x46\xfd\x86\x32\x68\x63\x09\xc6\x4a\xdf\xf7\
\x06\x35\x73\x28\x9a\xae\x2a\xf4\x2f\xd2\x2d\xc6\xb1\xc0\x3e\x16\
\x21\x45\xe7\x64\xdf\x65\x74\x0b\x6a\x3a\x2a\xda\xc6\x6b\xc9\x94\
\x11\x74\x16\x5a\x21\x70\x83\x63\xea\xac\xc1\x2e\xa3\x64\xa6\x32\
\x76\x54\x37\x8d\xd2\xb6\x24\x30\xc1\xd9\xc0\x91\x00\xdc\x63\x65\
\xf1\x19\x1a\x55\x06\x0c\x18\xb0\x1c\x96\xec\x24\x54\xa3\x54\xa6\
\x3a\x91\x62\xac\x2b\x55\x55\x60\x5e\xfa\x73\x33\x9e\xdc\xf9\xd5\
\xbf\xd8\x38\x70\xfd\x7b\xaf\xbe\xf8\xd2\x53\x8f\xd6\x55\x85\x22\
\x33\x9e\x0e\xde\x76\xc7\xea\xfa\x9e\x9b\x6e\x3b\x1a\xfc\x3c\xf1\
\x5e\x9a\x6a\x6b\x6b\x93\x08\x56\x4e\xd6\xa4\xe5\x1f\x9f\x3d\x35\
\x5e\x5b\x9b\x5d\x70\x30\x00\x12\xfc\xdf\x42\xe5\x3c\x16\x37\x19\
\xed\x23\x24\x45\x63\x92\xb0\x56\x58\xa5\xe8\x1d\x8b\xbb\x4b\x32\
\x57\x70\x6b\x88\x75\x35\x42\x8d\x42\x42\xff\x0a\x51\xb0\xa5\x05\
\xc3\x69\x78\xd0\xb0\xae\x41\xf3\xf4\xd4\x32\xe1\xa4\xb4\x0a\xa2\
\xf0\x6d\xb5\xb6\x53\x9b\x85\xca\x35\xc9\xe7\x5a\x96\xf9\xae\xa9\
\xae\x99\x36\xa2\xae\xa5\x77\x50\xd0\x38\xa0\xce\x38\x49\x1e\x0d\
\x31\xb2\x25\xb5\x8c\x16\xc4\xd5\x15\xa3\x6c\xed\xd1\xb8\x58\xd1\
\xfa\xe4\x9c\xd7\x86\x8e\xaf\x48\x4d\x93\x56\xa6\x43\x59\x95\xa4\
\x86\xcd\x52\xdc\x98\x2c\x76\x82\xef\xca\x07\x41\xad\x9f\x1e\x08\
\x7a\xc0\x80\x01\xcb\x61\xc9\x3a\x68\x55\x07\xf4\x44\x07\x55\x0c\
\x07\xe2\x1d\x63\x20\x27\x89\x16\xbb\xe9\x05\xa5\x6f\xda\xbc\xf8\
\x89\xf0\xb3\xba\x1e\x61\xbb\x4f\xc4\x3d\x57\x6c\xdc\xfa\xfb\xf7\
\xc4\xc4\x5f\x7b\xfe\x49\x1e\x7a\x92\xc9\x3e\xb8\xc9\xc8\xde\xfa\
\x07\xf7\x5d\x75\xf3\x2d\x76\xb4\xf2\xee\x2b\x2f\x3c\xf5\x83\x87\
\x2d\xe2\x2e\x40\xc6\x5a\x12\xaf\x09\xef\x03\xa2\x2b\x04\xd3\x5a\
\x3a\x44\xe6\x6b\x6b\x58\xa3\x73\x53\x11\xc9\xc6\xaa\xc2\x7e\x9b\
\x36\x68\xe8\xa6\xa7\x7b\x47\x6a\x37\x46\x38\x15\xc8\xe6\x20\x82\
\x2f\x91\x47\xa4\xad\x69\x89\x08\x0c\x26\x30\xb8\x52\x0a\x49\x04\
\x5d\x69\x27\x2d\x06\x9d\x54\xc2\xd0\xea\x42\x1c\x4d\x74\x4c\xef\
\x62\x2d\xe9\x68\x2b\x25\x76\x1f\x63\x14\x56\xd3\x67\x43\x26\x1d\
\x6a\xa8\x25\xe2\x42\xa3\xcf\x93\x09\x5d\x3f\xd8\xde\x27\x12\xf3\
\x28\x65\xb1\xc8\xe5\xe7\xa4\xaa\x75\xc5\xe9\x59\x76\x62\x46\x63\
\xa1\x65\x71\x39\xf2\xce\x24\x81\x9d\x32\x0e\x52\xe9\x4c\x98\xa1\
\xd5\x7b\xc0\x80\x01\xcb\x62\x39\x82\x9e\x4c\x56\x23\x5d\xfe\xf7\
\x8e\x2e\xdb\x85\x62\x3b\x51\x6f\x1c\xe6\x42\xfa\xcd\x0b\x4f\x6d\
\xec\xbf\xee\xc8\x17\xef\x3f\xf5\xc1\x6b\xac\x9d\xc1\xe5\xe5\xe2\
\xbd\x5f\x3d\xbd\x79\xfa\x64\x6c\xe7\xe7\xce\x9d\xda\x7d\xf5\x8d\
\x74\x04\x84\x85\x56\xea\x8d\x5f\x3e\x71\xfa\xd4\xfb\x5f\xfa\xb3\
\xbf\x4b\xc1\xaf\xd4\x99\x04\xaa\x77\xf0\x24\x64\xee\x89\x5c\xc1\
\xce\x19\xee\x01\x96\x81\x26\x13\x15\x26\x86\xad\x3d\x62\x71\x22\
\x63\xa2\xde\x18\x03\x31\x23\x64\x77\x4e\xa3\x4a\xb3\xb9\x37\xb6\
\xb2\x4a\xa6\x90\x35\x04\xb6\xcf\x30\xa3\xa3\x94\x65\x5c\x77\x4a\
\x0a\xe3\xb5\x1d\x96\x0c\x62\x52\xc9\x2b\x5b\x55\x3c\xb0\xdc\x42\
\x42\xa3\x45\xc5\x8c\x26\xb5\x31\xb2\x8a\x30\xb2\x03\x6c\xef\xc8\
\x99\x84\xe5\x8e\x6a\x6b\xbf\x63\x93\xc4\x14\x94\x48\x06\xc1\xa4\
\x4a\x6b\xb4\x3e\xd2\x05\x41\xc4\x24\xc4\xa0\x45\x8d\xf1\xb3\x76\
\xa4\x74\x85\x49\x2c\xa5\xa0\x04\x3d\x3d\xe8\x6e\x29\x3d\x85\x9c\
\x04\xf4\xd0\xea\x3d\x60\xc0\x80\xe5\xb0\x1c\x41\xaf\x8d\x33\x91\
\xa3\x37\x98\x55\x8d\xcb\x7e\x18\x05\x88\x9c\xd8\xb5\xab\xea\xc5\
\x56\xce\xad\xd6\xd5\xee\xf5\x71\xdc\x0e\x74\x95\x4f\xca\x74\xb5\
\x4a\xed\xb9\x77\x89\xa0\x56\xc7\x51\xb1\x29\x63\x6e\x7d\xf7\x8a\
\x69\xb8\x8d\x7d\x63\x3c\x67\x5e\x6b\xb7\xbe\x12\x88\x71\x7b\x64\
\x13\x25\x90\x9a\xd2\xa9\x4d\xc5\x07\x16\x6b\xeb\x2b\x6d\x9b\x2a\
\xab\x33\x83\x0d\xdd\x54\x2a\xda\x6e\x7d\x9d\xc5\x8e\x78\x39\x12\
\x7d\x3b\x8f\x00\xa4\x4a\x28\x55\x71\x8d\x90\xea\xa4\xb0\xe5\x27\
\x4b\xb9\x9e\xce\x3b\x33\x5b\x33\xca\x39\x52\x44\x02\xdd\x56\x16\
\x6a\x96\x47\x93\x31\xf7\x2d\x7a\x13\x49\xfe\xd2\xe9\x2b\xcd\x78\
\xe3\x03\xba\x55\x72\xe9\xcf\xe6\xac\x42\x82\x33\x84\xb0\x88\xf8\
\xb7\xc5\x14\x70\x26\xc1\xb3\xb6\xa1\xdb\x40\xf7\xd1\x07\x8e\xc1\
\x5b\x5c\xd2\x52\x40\x22\x3d\x86\xe4\x6c\xa3\xc0\xcc\x85\xa0\x91\
\x54\x8a\xc1\xb3\x0c\x73\xbd\x53\x92\xa5\xc8\x64\xc0\x80\x01\x03\
\xfe\xff\x58\x92\xa0\x1b\x9f\x99\x6f\x17\x09\x39\x44\x56\xd2\x15\
\x7d\xd5\xa8\xc8\xf8\x9e\xcb\x9b\x88\x78\x7d\x14\x2c\xef\xbf\xe2\
\xf2\xd6\x22\xba\x73\x3c\xd1\x7b\x77\x13\xbd\xa1\x8c\x59\x8b\x9c\
\xe4\x56\x58\x9c\x6d\xd6\xf7\x5e\x7f\xe4\xe8\xe6\x47\x6f\x8e\xd7\
\x56\x79\x92\xa3\xc6\xec\xd9\x53\x27\x97\xfa\x4e\xfb\xe8\x3a\xd7\
\x67\xb9\x33\x40\x3b\x47\xaf\xad\xa9\xd0\x2e\x68\x0c\x11\x9e\x80\
\x53\x91\xa5\x55\x55\x6d\x02\x0b\x12\x3d\x25\x49\x2a\xe8\x6a\xa2\
\xda\x48\x34\xdd\xd4\x22\x28\x56\x6a\xec\x72\x8e\x0c\xb6\x89\x2f\
\xe3\xb5\x0b\xd1\x32\x49\xe7\x51\x8d\xec\xea\x2e\xb3\xba\xba\x96\
\x43\x2d\x54\x6c\x9a\x8a\x41\xa7\xb3\xba\x41\xc6\x48\x24\xea\xc5\
\x9b\xbb\x92\x85\x44\x1f\x2b\x6a\x05\xab\x3b\x78\x49\x64\xbc\xb2\
\x36\xd6\xcd\x58\x55\x93\x88\xb8\x3d\x95\xb9\xf5\xb8\x8a\xb0\x82\
\x3b\x21\x74\xe6\x5a\xdb\x91\xe0\x0a\x89\xfe\x0c\x67\x41\xca\xb9\
\xb4\x9a\xe7\x52\x68\xa7\x54\x1c\x2c\x8e\x01\x03\x06\x2c\x87\x25\
\xc3\x92\x78\x17\x20\x31\x3d\x7c\x5e\x0c\xc2\x56\x74\x51\xcf\x13\
\x97\x62\x21\xf2\xc2\x75\xe7\x63\xce\xeb\xd7\x1e\x3e\xe3\xe6\x22\
\x33\x12\xbe\xca\x2a\x93\x7d\x2a\x0e\x70\xe2\xfd\xd9\x63\xcf\x5c\
\x79\xeb\xd7\xaf\x3d\xf2\xe5\xc5\xc1\xc3\x56\x4f\x72\xc6\x5c\x6d\
\xae\x47\x42\x04\x4b\x1c\xd8\xf1\xb2\x99\x57\x19\x74\x78\x90\xea\
\x54\x41\xd4\xa8\x75\x56\x46\x72\x92\xd7\xdd\x78\x75\xec\x3b\xad\
\xf4\x18\x53\x4a\x24\x29\x5f\xeb\x3d\x71\xa8\x4e\xac\xcd\xc2\x38\
\x5e\x05\xd6\x2b\x12\xce\x0c\x53\xb6\xb1\x44\xa8\x54\x3a\x10\x9d\
\x64\xf0\x62\xb2\x56\xa4\x72\xab\xba\xa9\xaa\x9a\x08\xdc\x18\x6d\
\x0c\x52\xa1\x89\x6c\x63\x76\x74\x0b\xf3\x98\x84\x31\x2d\x09\x32\
\x28\x0d\x4f\x03\x7d\x26\x11\xd3\x63\x48\x5c\x7b\xcc\x0e\x57\x5c\
\x8e\xfb\x14\xa4\xa8\x4a\x8f\x0c\xad\x10\x0c\x65\x26\xdc\x71\xe2\
\xe9\xd8\x4b\x51\xba\x12\xb1\x28\x78\x74\x3f\xf2\x62\x6f\x30\x11\
\x59\xd4\xa9\xff\xb4\x7f\xeb\x01\x03\x06\x7c\xc6\xb0\x64\x99\xdd\
\x88\x08\x57\xc7\xb8\x20\xb5\x2c\x65\x22\x89\x9b\x2e\x1e\xff\xf8\
\xec\xbb\xba\xdb\x4e\x95\xb9\xf8\xd1\x9b\x29\x56\x2c\xcc\xc6\x9a\
\x5f\xf8\xed\x0b\x8b\x73\xef\x28\x25\x22\xaf\x49\x6c\xa2\x14\x59\
\xd8\xae\xdb\x3a\xf1\xd6\x4f\xd7\xf6\x1c\xe6\x7c\xe4\x66\x67\xb6\
\xb7\xdf\xde\x3a\xf5\x81\x1d\xed\x76\xae\x25\x5e\xe5\xc6\x09\xd7\
\x45\xa2\x55\x29\x6c\x55\x87\x40\xf2\xdc\x2a\x1b\x9a\xda\xf0\xec\
\x04\xb7\xd2\xe8\x1e\xf5\x6f\x55\x32\x89\x69\xd9\x33\x2d\x8c\x46\
\x76\x92\x44\x7e\x06\x17\x24\x8a\x2d\x22\x9a\x51\xe0\x17\x60\x3d\
\x10\x73\x4a\x7a\xb0\x46\x55\xa0\xb1\xe8\xe2\xf6\x5e\x3b\xc8\x5c\
\xcc\x50\x49\x08\x11\x15\x44\xaf\x88\xc6\xcb\xa5\xcc\x03\x5b\x93\
\x08\xf4\xcf\x44\xcd\x74\x8f\x14\x7b\xe2\x68\x25\xe7\x42\x33\x65\
\x65\x2c\x9c\x8b\xe5\x46\x1a\xcc\xfd\x42\xa9\x06\xfc\x6d\xba\xcb\
\x85\x81\xcf\x5c\x02\x46\x59\xe9\xf2\x2e\x79\xfd\x3b\x5d\x8d\x58\
\x70\xe8\x48\x9f\xf6\x6f\x3d\x60\xc0\x80\xcf\x18\x96\x54\xd0\x93\
\x75\x18\xaf\x2d\x91\xa9\x25\x9a\xe3\x5a\x5e\xfc\xe8\xf5\xb2\x97\
\xa6\xb2\x5c\xc9\xdb\xd3\x93\x67\x1f\x9b\xac\x8f\x2c\x4b\x9b\xa7\
\x9f\xfb\xbf\xf6\xee\x25\xb9\x6d\x23\x0a\xc3\x28\x1a\x20\x09\xc8\
\x7a\xb8\x32\xc9\xfe\x57\x93\x95\x64\xea\x51\x92\xb2\x44\xe2\x91\
\xbe\x90\x17\x10\x4f\x92\x3f\x55\xe7\x4c\x4c\xdb\x14\x5d\x94\xab\
\x3e\x42\x40\xf7\xc5\xf5\x3e\xb4\xf9\xe5\x9c\x0e\x57\xdb\x38\xda\
\x34\xd7\x74\xfc\xfb\x1f\xdf\x7e\xff\xed\xe3\xfb\x9f\xe7\xde\x93\
\x6b\xff\xc9\x7f\x1b\x9f\xe6\xe5\x75\xfb\x78\x0c\xeb\x63\x79\x1b\
\xfa\xaf\x5b\x5b\x5f\x7f\xf9\x7a\x7f\x5f\xe7\x6b\xdb\x86\xc7\x74\
\xab\xfd\x28\x53\x9b\xdb\xfc\x34\xec\xd7\x6d\x7e\x1b\x97\x9a\xc6\
\x31\x4e\x73\xcf\xe0\x65\xbb\x3f\x1e\xdf\x7a\x32\xc7\xcb\x52\x37\
\xad\x3a\xd7\x21\xf7\xb2\xf6\x68\x0f\xdb\x36\xd5\xfb\x3b\x6a\x2d\
\x47\x3f\x2a\xde\xfb\x0b\x3c\x2d\xc7\x31\x3f\x7d\xd9\x6b\x33\x77\
\x8d\xd6\x58\xb7\xf7\xfe\xfc\xf3\xee\x56\xbd\xea\xf7\x5a\xdf\x7d\
\x1c\x35\xe5\x79\x3c\x07\xd2\x5d\xcf\x02\xef\xd7\x2f\xed\xb2\x3c\
\x3f\xef\xe3\xdb\x6d\x7e\xb9\xd6\x02\x93\xa5\x9d\x63\xf0\x7a\xa9\
\xeb\xec\x4b\x3b\xa6\xcb\x32\x4c\x4b\xdd\x7e\x70\xdf\x7f\x2c\xe1\
\x68\xe3\xe7\x32\xbb\xfe\x33\x42\x6d\x3c\xd7\x67\xe0\x27\xfd\xe4\
\x4e\xc2\xeb\xdb\x36\xee\xfb\x5c\x53\x3e\x6b\x99\xf2\x6d\xdf\x6a\
\x91\x71\x2f\x63\xcd\x6b\x9b\xf6\xc7\xb4\xae\xc7\x38\xad\x6d\xee\
\xcf\xdd\xc6\x3a\x6a\xec\xc7\x9c\xe7\x8c\xba\x5a\x17\xfd\xb9\xbb\
\x63\xed\x3d\xbb\xd7\x78\x8a\x76\x5c\x3f\xd6\xc7\x56\x09\x1b\x1f\
\x75\x7d\x6e\xdc\xf6\xcb\xbd\x3f\xb8\x0c\xdb\x58\xe7\x3d\x86\x9a\
\x3d\xfd\xe8\x85\x5f\x6b\xda\x50\x7f\xfe\xb2\x5f\x6f\x8f\xe3\xb5\
\x6e\x51\x52\x4b\x35\xea\x8c\x42\xab\xd3\xc1\xfd\xe3\x61\xdf\xc7\
\xfe\x60\x3f\xa6\xe3\xbc\x67\xeb\xf4\x63\x03\xf7\xb1\xee\xc7\x63\
\xda\xc6\xa1\xff\x7d\xff\x58\x99\x9e\x5e\x9e\xaf\xcb\xf2\x7c\x5c\
\xa7\x73\xca\x52\x5d\x13\x1c\x2e\xe7\x8b\xac\x75\xeb\x96\x56\x5b\
\x5a\xea\xf5\x7e\x7c\x77\xce\x2d\x36\x7b\xbd\xdd\xa1\x76\x9c\x8f\
\xb7\xa1\x1f\x26\xf7\x03\xe9\xcf\xfb\x81\xd7\x81\x72\xff\xe2\x47\
\x2d\xfe\xa8\x39\x79\xe7\x08\xe8\xcf\x0b\x84\x35\x03\x7a\x38\x17\
\x93\xf4\x07\xd3\x5e\x7b\x66\xae\xff\xf5\xff\x35\xf0\x3f\xf3\x73\
\x81\xfe\x98\x7e\x6d\x6d\x3b\x0f\x0d\xe7\x1a\x93\x5f\xc3\x3c\xdb\
\x7b\xbb\xde\xda\x3e\x1d\xb7\xfb\xf1\xb1\x8d\xf3\x7e\x7d\x5d\xa7\
\xb6\x6f\x97\xe1\x72\x6b\x3d\x9a\xb5\xbc\xac\x72\xd6\x5b\x36\xd4\
\xc2\xe5\xcb\xd6\xbf\x6a\xfe\xde\xf6\xbf\x86\x71\x6e\xed\x63\x3a\
\x6a\xfe\xfd\xb6\xdf\xcf\xd9\x46\xed\xd8\x1e\xb7\xda\x06\xb8\xb7\
\x5b\x6f\xe7\x31\xd7\xf6\xe9\x3a\x55\x7d\x0c\x8f\x36\xbc\xcf\x97\
\x69\xdc\xdf\xeb\xec\x41\xdb\x8e\x9a\x86\x57\x69\x1c\x97\xa5\x2e\
\x14\xb6\xaf\xfb\xf0\xb1\x0d\x5b\xdd\x4f\xe0\xa8\x2b\x75\x7b\x5b\
\xcf\x9b\x67\xf5\x3f\xac\x95\xca\xb5\xab\xf0\xf6\xb2\xb4\x79\xeb\
\xbf\xef\xff\xd6\xb0\x0f\xe7\xd9\x8b\x1a\x04\xda\x86\xf3\x5c\xf2\
\xb9\xf6\xe3\xa8\xfb\x8a\xff\xd8\x63\x52\x1b\x03\x6b\x00\x5e\xff\
\x60\xd9\xd7\xfe\x19\xb1\xee\xdb\x3a\x8c\xcb\x71\xee\xe8\x6e\xe7\
\x89\x91\xd6\x2e\xeb\xb8\xd7\x98\x92\x1a\x55\xda\xab\xdd\xdf\xe6\
\xe7\x69\x8e\xf3\x5b\xf6\x39\x8d\x63\x98\x8e\x73\x3d\x34\xc0\x3f\
\xf7\x73\xe3\x46\x01\xf8\xd7\x58\xfb\x05\x10\x4a\xa0\x01\x42\x09\
\x34\x40\x28\x81\x06\x08\x25\xd0\x00\xa1\x04\x1a\x20\x94\x40\x03\
\x84\x12\x68\x80\x50\x02\x0d\x10\x4a\xa0\x01\x42\x09\x34\x40\x28\
\x81\x06\x08\x25\xd0\x00\xa1\x04\x1a\x20\x94\x40\x03\x84\x12\x68\
\x80\x50\x02\x0d\x10\x4a\xa0\x01\x42\x09\x34\x40\x28\x81\x06\x08\
\x25\xd0\x00\xa1\x04\x1a\x20\x94\x40\x03\x84\x12\x68\x80\x50\x02\
\x0d\x10\x4a\xa0\x01\x42\x09\x34\x40\x28\x81\x06\x08\x25\xd0\x00\
\xa1\x04\x1a\x20\x94\x40\x03\x84\x12\x68\x80\x50\x02\x0d\x10\x4a\
\xa0\x01\x42\x09\x34\x40\x28\x81\x06\x08\x25\xd0\x00\xa1\x04\x1a\
\x20\x94\x40\x03\x84\x12\x68\x80\x50\x02\x0d\x10\x4a\xa0\x01\x42\
\x09\x34\x40\x28\x81\x06\x08\x25\xd0\x00\xa1\x04\x1a\x20\x94\x40\
\x03\x84\x12\x68\x80\x50\x02\x0d\x10\x4a\xa0\x01\x42\x09\x34\x40\
\x28\x81\x06\x08\x25\xd0\x00\xa1\x04\x1a\x20\x94\x40\x03\x84\x12\
\x68\x80\x50\x02\x0d\x10\x4a\xa0\x01\x42\x09\x34\x40\x28\x81\x06\
\x08\x25\xd0\x00\xa1\x04\x1a\x20\x94\x40\x03\x84\x12\x68\x80\x50\
\x02\x0d\x10\x4a\xa0\x01\x42\x09\x34\x40\x28\x81\x06\x08\x25\xd0\
\x00\xa1\x04\x1a\x20\x94\x40\x03\x84\x12\x68\x80\x50\x02\x0d\x10\
\x4a\xa0\x01\x42\x09\x34\x40\x28\x81\x06\x08\x25\xd0\x00\xa1\x04\
\x1a\x20\x94\x40\x03\x84\x12\x68\x80\x50\x02\x0d\x10\x4a\xa0\x01\
\x42\x09\x34\x40\x28\x81\x06\x08\x25\xd0\x00\xa1\x04\x1a\x20\x94\
\x40\x03\x84\x12\x68\x80\x50\x02\x0d\x10\x4a\xa0\x01\x42\x09\x34\
\x40\x28\x81\x06\x08\x25\xd0\x00\xa1\x04\x1a\x20\x94\x40\x03\x84\
\x12\x68\x80\x50\x02\x0d\x10\x4a\xa0\x01\x42\x09\x34\x40\x28\x81\
\x06\x08\x25\xd0\x00\xa1\x04\x1a\x20\x94\x40\x03\x84\x12\x68\x80\
\x50\x02\x0d\x10\x4a\xa0\x01\x42\x09\x34\x40\x28\x81\x06\x08\x25\
\xd0\x00\xa1\x04\x1a\x20\x94\x40\x03\x84\x12\x68\x80\x50\x02\x0d\
\x10\x4a\xa0\x01\x42\x09\x34\x40\x28\x81\x06\x08\x25\xd0\x00\xa1\
\x04\x1a\x20\x94\x40\x03\x84\x12\x68\x80\x50\x02\x0d\x10\x4a\xa0\
\x01\x42\x09\x34\x40\x28\x81\x06\x08\x25\xd0\x00\xa1\x04\x1a\x20\
\x94\x40\x03\x84\x12\x68\x80\x50\x02\x0d\x10\x4a\xa0\x01\x42\x09\
\x34\x40\x28\x81\x06\x08\x25\xd0\x00\xa1\x04\x1a\x20\x94\x40\x03\
\x84\x12\x68\x80\x50\x02\x0d\x10\x4a\xa0\x01\x42\x09\x34\x40\x28\
\x81\x06\x08\x25\xd0\x00\xa1\x04\x1a\x20\x94\x40\x03\x84\x12\x68\
\x80\x50\x02\x0d\x10\x4a\xa0\x01\x42\x09\x34\x40\xa8\xbf\x01\x38\
\x62\x33\x89\x4b\xfc\xb7\x3d\x00\x00\x00\x00\x49\x45\x4e\x44\xae\
\x42\x60\x82\
\x00\x00\x04\x7e\
\x00\
\x00\x01\x00\x01\x00\x10\x10\x00\x00\x01\x00\x20\x00\x68\x04\x00\
\x00\x16\x00\x00\x00\x28\x00\x00\x00\x10\x00\x00\x00\x20\x00\x00\
\x00\x01\x00\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x86\xb0\xcb\xff\x9f\xc3\xd7\xff\xa5\xc4\xd7\
\xff\x81\xa4\xbe\xff\x7b\x9b\xb8\xff\x7e\xac\xce\xff\x79\xb1\xd5\
\xff\x81\xa9\xc7\xff\x86\xb4\xcf\xff\x90\xb8\xd0\xff\xb2\xca\xd8\
\xff\xc3\xd4\xde\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x70\x8a\xa5\xff\x9c\xaa\xbb\xff\x9b\xa5\xb8\
\xff\x62\x73\x91\xff\x5a\x6d\x8f\xff\x5e\x75\x9a\xff\x5f\x79\x9f\
\xff\x59\x6f\x91\xff\x60\x73\x94\xff\x6b\x77\x95\xff\x88\x99\xb1\
\xff\xb4\xc8\xd6\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x75\x8e\xab\xff\x92\xa5\xb9\xff\x8c\x98\xac\
\xff\x6e\x7b\x93\xff\x54\x69\x87\xff\x5a\x77\x9a\xff\x5f\x75\x97\
\xff\x59\x6b\x89\xff\x5f\x70\x8f\xff\x64\x71\x8e\xff\x81\x8e\xa6\
\xff\xa9\xbc\xd4\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x7c\xa9\xcf\xff\x74\x9f\xc6\xff\x78\x9a\xbe\
\xff\x6b\x88\xa8\xff\x5f\x80\xa3\xff\x62\x93\xbe\xff\x5d\x8b\xb7\
\xff\x5f\x6f\x8a\xff\x63\x74\x95\xff\x84\x8c\xa8\xff\xa1\xa1\xba\
\xff\xb0\xb1\xcd\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x9a\xb7\xcf\xff\x7a\x9b\xb9\xff\x7a\x95\xb1\
\xff\x78\x9d\xb9\xff\x7e\xad\xcc\xff\x6d\xa3\xca\xff\x64\xa6\xd5\
\xff\x7f\x94\xad\xff\x66\x93\xba\xff\xa6\xc6\xdc\xff\xc0\xdd\xec\
\xff\xbe\xdc\xeb\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x9c\xc2\xd6\xff\xa1\xc8\xda\xff\xa4\xc9\xd9\
\xff\xb5\xda\xe6\xff\xbc\xdf\xe9\xff\xb4\xd7\xe5\xff\xad\xd5\xe5\
\xff\xb8\xd9\xe4\xff\xa7\xd4\xe7\xff\xb7\xdf\xea\xff\xbf\xe3\xec\
\xff\xbe\xe2\xeb\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\xc0\xe0\xea\xff\xbe\xdf\xe9\xff\xbf\xdf\xe9\
\xff\xc2\xe1\xe9\xff\xc0\xdf\xe9\xff\xbf\xdf\xe9\xff\xc1\xdf\xe9\
\xff\xbf\xdf\xe8\xff\xbd\xdf\xe9\xff\xbe\xe0\xe9\xff\xc0\xe1\xea\
\xff\xbc\xe0\xe9\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\xc1\xde\xe5\xff\xc1\xde\xe5\xff\xc1\xde\xe5\
\xff\xc1\xde\xe5\xff\xbf\xdd\xe5\xff\xc2\xde\xe6\xff\xc1\xde\xe6\
\xff\xc1\xde\xe6\xff\xc0\xde\xe5\xff\xc0\xde\xe7\xff\xbf\xde\xe7\
\xff\xc1\xde\xe7\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\xc1\xdc\xe3\xff\xc0\xdb\xe3\xff\xbf\xda\xe2\
\xff\xc0\xda\xe2\xff\xc0\xdb\xe2\xff\xc2\xdb\xe3\xff\xc1\xdb\xe3\
\xff\xc0\xda\xe3\xff\xbf\xda\xe2\xff\xbf\xda\xe2\xff\xbe\xd9\xe2\
\xff\xc1\xd8\xe3\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\xbd\xd9\xe1\xff\xbc\xd9\xe1\xff\xbf\xd8\xdf\
\xff\xc0\xd8\xe0\xff\xc0\xd8\xe0\xff\xc1\xd8\xdf\xff\xc2\xd7\xdf\
\xff\xc3\xd8\xdf\xff\xc3\xd7\xdf\xff\xc0\xd6\xde\xff\xc2\xd6\xde\
\xff\xc2\xd5\xde\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\xbe\xd6\xdd\xff\xbe\xd6\xde\xff\xc0\xd6\xde\
\xff\xc0\xd6\xde\xff\xc1\xd4\xdc\xff\xc2\xd4\xdc\xff\xc3\xd5\xdc\
\xff\xc2\xd5\xdc\xff\xc2\xd4\xdb\xff\xc2\xd3\xda\xff\xc2\xd2\xd9\
\xff\xc2\xd1\xd9\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\xc2\xd3\xda\xff\xc2\xd3\xda\xff\xc3\xd2\xd9\
\xff\xc4\xd2\xd9\xff\xc4\xd2\xda\xff\xc4\xd3\xd9\xff\xc4\xd3\xd8\
\xff\xc5\xd2\xd5\xff\xc3\xd2\xd5\xff\xc4\xd2\xd5\xff\xc4\xd1\xd5\
\xff\xc3\xce\xd1\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\xc6\xd1\xd4\xff\xc5\xd1\xd4\xff\xc6\xd1\xd5\
\xff\xc6\xd2\xd6\xff\xc8\xd3\xd7\xff\xc8\xd3\xd5\xff\xc9\xd0\xd2\
\xff\xcb\xd1\xd4\xff\xd3\xd8\xda\xff\xd5\xd9\xda\xff\xd2\xd6\xd8\
\xff\xd6\xd9\xdb\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\xde\xe3\xe5\xff\xe1\xe6\xe7\xff\xdc\xe1\xe3\
\xff\xe3\xe7\xe8\xff\xe0\xe5\xe7\xff\xd8\xda\xda\xff\xde\xdf\xdf\
\xff\xdf\xe2\xe3\xff\xe3\xe5\xe6\xff\xdf\xe0\xe1\xff\xe0\xe0\xe1\
\xff\xde\xe0\xe1\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\xd8\xdc\xde\xff\xdb\xde\xe0\xff\xd7\xdb\xdc\
\xff\xdf\xe1\xe3\xff\xe0\xe1\xe2\xff\xdd\xdb\xdb\xff\xdc\xdb\xdc\
\xff\xd4\xd8\xda\xff\xda\xd9\xda\xff\xdb\xd8\xd9\xff\xd2\xcf\xd1\
\xff\xd1\xd0\xd2\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\xd5\xd0\xd1\xff\xd7\xd3\xd3\xff\xde\xda\xda\
\xff\xda\xd3\xd3\xff\xde\xd7\xd7\xff\xda\xd2\xd2\xff\xda\xd4\xd4\
\xff\xde\xd9\xd9\xff\xd1\xc8\xc7\xff\xcc\xc2\xc1\xff\xc7\xbb\xba\
\xff\xc5\xbb\xbc\xff\x00\x00\x00\x00\x00\x00\x00\x00\xc0\x03\xac\
\x41\xc0\x03\xac\x41\xc0\x03\xac\x41\xc0\x03\xac\x41\xc0\x03\xac\
\x41\xc0\x03\xac\x41\xc0\x03\xac\x41\xc0\x03\xac\x41\xc0\x03\xac\
\x41\xc0\x03\xac\x41\xc0\x03\xac\x41\xc0\x03\xac\x41\xc0\x03\xac\
\x41\xc0\x03\xac\x41\xc0\x03\xac\x41\xc0\x03\xac\x41\
"
qt_resource_name = b"\
\x00\x05\
\x00\x6f\xa6\x53\
\x00\x69\
\x00\x63\x00\x6f\x00\x6e\x00\x73\
\x00\x0e\
\x09\xb5\x12\x07\
\x00\x68\
\x00\x6f\x00\x61\x00\x5f\x00\x64\x00\x69\x00\x61\x00\x6c\x00\x6f\x00\x67\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x07\
\x0f\x54\x4f\x5f\
\x00\x68\
\x00\x6f\x00\x61\x00\x2e\x00\x69\x00\x63\x00\x6f\
"
qt_resource_struct_v1 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x02\
\x00\x00\x00\x10\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x00\x32\x00\x00\x00\x00\x00\x01\x00\x00\x69\x58\
"
qt_resource_struct_v2 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x02\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x10\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01\x7d\x39\xb4\x5f\x27\
\x00\x00\x00\x32\x00\x00\x00\x00\x00\x01\x00\x00\x69\x58\
\x00\x00\x01\x7d\x39\xc2\x48\xd7\
"
qt_version = [int(v) for v in QtCore.qVersion().split('.')]
if qt_version < [5, 8, 0]:
rcc_version = 1
qt_resource_struct = qt_resource_struct_v1
else:
rcc_version = 2
qt_resource_struct = qt_resource_struct_v2
def qInitResources():
QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
|
the-stack_106_29771 | """
Python Interchangeable Virtual Instrument Library
Copyright (c) 2013-2014 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .agilentBase8590E import *
class agilent8594L(agilentBase8590E):
"Agilent 8594L IVI spectrum analyzer driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'HP8594L')
super(agilent8594L, self).__init__(*args, **kwargs)
self._input_impedance = 50
self._frequency_low = 9e3
self._frequency_high = 2.9e9
|
the-stack_106_29772 | import random
import threading
import time
from statistics import mean
from typing import Optional
from cereal import log
from common.params import Params, put_nonblocking
from common.realtime import sec_since_boot
from selfdrive.hardware import HARDWARE
from selfdrive.swaglog import cloudlog
from selfdrive.statsd import statlog
CAR_VOLTAGE_LOW_PASS_K = 0.091 # LPF gain for 5s tau (dt/tau / (dt/tau + 1))
# A C2 uses about 1W while idling, and 30h seens like a good shutoff for most cars
# While driving, a battery charges completely in about 30-60 minutes
CAR_BATTERY_CAPACITY_uWh = 30e6
CAR_CHARGING_RATE_W = 45
VBATT_PAUSE_CHARGING = 11.0 # Lower limit on the LPF car battery voltage
VBATT_INSTANT_PAUSE_CHARGING = 7.0 # Lower limit on the instant car battery voltage measurements to avoid triggering on instant power loss
MAX_TIME_OFFROAD_S = 10*3600
MIN_ON_TIME_S = 3600
class PowerMonitoring:
def __init__(self):
self.params = Params()
self.last_measurement_time = None # Used for integration delta
self.last_save_time = 0 # Used for saving current value in a param
self.power_used_uWh = 0 # Integrated power usage in uWh since going into offroad
self.next_pulsed_measurement_time = None
self.car_voltage_mV = 12e3 # Low-passed version of peripheralState voltage
self.car_voltage_instant_mV = 12e3 # Last value of peripheralState voltage
self.integration_lock = threading.Lock()
car_battery_capacity_uWh = self.params.get("CarBatteryCapacity")
if car_battery_capacity_uWh is None:
car_battery_capacity_uWh = 0
# Reset capacity if it's low
self.car_battery_capacity_uWh = max((CAR_BATTERY_CAPACITY_uWh / 10), int(car_battery_capacity_uWh))
# Calculation tick
def calculate(self, peripheralState, ignition):
try:
now = sec_since_boot()
# If peripheralState is None, we're probably not in a car, so we don't care
if peripheralState is None or peripheralState.pandaType == log.PandaState.PandaType.unknown:
with self.integration_lock:
self.last_measurement_time = None
self.next_pulsed_measurement_time = None
self.power_used_uWh = 0
return
# Low-pass battery voltage
self.car_voltage_instant_mV = peripheralState.voltage
self.car_voltage_mV = ((peripheralState.voltage * CAR_VOLTAGE_LOW_PASS_K) + (self.car_voltage_mV * (1 - CAR_VOLTAGE_LOW_PASS_K)))
statlog.gauge("car_voltage", self.car_voltage_mV / 1e3)
# Cap the car battery power and save it in a param every 10-ish seconds
self.car_battery_capacity_uWh = max(self.car_battery_capacity_uWh, 0)
self.car_battery_capacity_uWh = min(self.car_battery_capacity_uWh, CAR_BATTERY_CAPACITY_uWh)
if now - self.last_save_time >= 10:
put_nonblocking("CarBatteryCapacity", str(int(self.car_battery_capacity_uWh)))
self.last_save_time = now
# First measurement, set integration time
with self.integration_lock:
if self.last_measurement_time is None:
self.last_measurement_time = now
return
if ignition:
# If there is ignition, we integrate the charging rate of the car
with self.integration_lock:
self.power_used_uWh = 0
integration_time_h = (now - self.last_measurement_time) / 3600
# if integration_time_h < 0:
# raise ValueError(f"Negative integration time: {integration_time_h}h")
self.car_battery_capacity_uWh += (CAR_CHARGING_RATE_W * 1e6 * integration_time_h)
self.last_measurement_time = now
else:
# No ignition, we integrate the offroad power used by the device
is_uno = peripheralState.pandaType == log.PandaState.PandaType.uno
# Get current power draw somehow
current_power = HARDWARE.get_current_power_draw() # pylint: disable=assignment-from-none
if current_power is not None:
pass
elif HARDWARE.get_battery_status() == 'Discharging':
# If the battery is discharging, we can use this measurement
# On C2: this is low by about 10-15%, probably mostly due to UNO draw not being factored in
current_power = ((HARDWARE.get_battery_voltage() / 1000000) * (HARDWARE.get_battery_current() / 1000000))
elif (self.next_pulsed_measurement_time is not None) and (self.next_pulsed_measurement_time <= now):
# TODO: Figure out why this is off by a factor of 3/4???
FUDGE_FACTOR = 1.33
# Turn off charging for about 10 sec in a thread that does not get killed on SIGINT, and perform measurement here to avoid blocking thermal
def perform_pulse_measurement(now):
try:
HARDWARE.set_battery_charging(False)
time.sleep(5)
# Measure for a few sec to get a good average
voltages = []
currents = []
for _ in range(6):
voltages.append(HARDWARE.get_battery_voltage())
currents.append(HARDWARE.get_battery_current())
time.sleep(1)
current_power = ((mean(voltages) / 1000000) * (mean(currents) / 1000000))
self._perform_integration(now, current_power * FUDGE_FACTOR)
# Enable charging again
HARDWARE.set_battery_charging(True)
except Exception:
cloudlog.exception("Pulsed power measurement failed")
# Start pulsed measurement and return
threading.Thread(target=perform_pulse_measurement, args=(now,)).start()
self.next_pulsed_measurement_time = None
return
elif self.next_pulsed_measurement_time is None and not is_uno:
# On a charging EON with black panda, or drawing more than 400mA out of a white/grey one
# Only way to get the power draw is to turn off charging for a few sec and check what the discharging rate is
# We shouldn't do this very often, so make sure it has been some long-ish random time interval
self.next_pulsed_measurement_time = now + random.randint(120, 180)
return
else:
# Do nothing
return
# Do the integration
self._perform_integration(now, current_power)
except Exception:
cloudlog.exception("Power monitoring calculation failed")
def _perform_integration(self, t: float, current_power: float) -> None:
with self.integration_lock:
try:
if self.last_measurement_time:
integration_time_h = (t - self.last_measurement_time) / 3600
power_used = (current_power * 1000000) * integration_time_h
# if power_used < 0:
# raise ValueError(f"Negative power used! Integration time: {integration_time_h} h Current Power: {power_used} uWh")
self.power_used_uWh += power_used
self.car_battery_capacity_uWh -= power_used
self.last_measurement_time = t
except Exception:
cloudlog.exception("Integration failed")
# Get the power usage
def get_power_used(self) -> int:
return int(self.power_used_uWh)
def get_car_battery_capacity(self) -> int:
return int(self.car_battery_capacity_uWh)
# See if we need to disable charging
def should_disable_charging(self, ignition: bool, in_car: bool, offroad_timestamp: Optional[float], dp_auto_shutdown, dp_auto_shutdown_in) -> bool:
if offroad_timestamp is None:
return False
now = sec_since_boot()
disable_charging = False
if dp_auto_shutdown:
disable_charging |= (now - offroad_timestamp) > dp_auto_shutdown_in * 60
disable_charging |= (now - offroad_timestamp) > MAX_TIME_OFFROAD_S
disable_charging |= (self.car_voltage_mV < (VBATT_PAUSE_CHARGING * 1e3)) and (self.car_voltage_instant_mV > (VBATT_INSTANT_PAUSE_CHARGING * 1e3))
disable_charging |= (self.car_battery_capacity_uWh <= 0)
disable_charging &= not ignition
disable_charging &= (not self.params.get_bool("DisablePowerDown"))
disable_charging &= in_car
disable_charging |= self.params.get_bool("ForcePowerDown")
return disable_charging
# See if we need to shutdown
def should_shutdown(self, peripheralState, ignition, in_car, offroad_timestamp, started_seen, dp_auto_shutdown, dp_auto_shutdown_in):
if offroad_timestamp is None:
return False
now = sec_since_boot()
panda_charging = (peripheralState.usbPowerMode != log.PeripheralState.UsbPowerMode.client)
BATT_PERC_OFF = 10
should_shutdown = False
# Wait until we have shut down charging before powering down
should_shutdown |= (not panda_charging and self.should_disable_charging(ignition, in_car, offroad_timestamp, dp_auto_shutdown, dp_auto_shutdown_in))
should_shutdown |= ((HARDWARE.get_battery_capacity() < BATT_PERC_OFF) and (not HARDWARE.get_battery_charging()) and ((now - offroad_timestamp) > 60))
should_shutdown &= started_seen or (now > MIN_ON_TIME_S)
return should_shutdown
|
the-stack_106_29774 | """suplerlists URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^new$', views.new_list, name='new_list'),
url(r'^(\d+)/$', views.view_list, name='view_list'),
url(r'^(\d+)/add_item$', views.add_item, name='add_item')
]
|
the-stack_106_29775 | import tensorflow as tf
import pointnet_cls_basic as pointnet
import utils.tf_util as tf_util
pcl_feat_size = 16
bn_decay = 0.9
weight_decay = 0.005
def placeholder_inputs(batch_size, num_points, num_steps):
'''
Returns placeholders for both geometry and state prediction modules.
'''
pcl_pl = tf.placeholder(tf.float32, shape=(batch_size, num_points, 3))
pcl_feat_pl = tf.placeholder(tf.float32, shape=(batch_size, pcl_feat_size)) # for test-time
lin_vel_pl = tf.placeholder(tf.float32, shape=(batch_size, num_steps, 3))
ang_vel_pl = tf.placeholder(tf.float32, shape=(batch_size, num_steps, 3))
pos_pl = tf.placeholder(tf.float32, shape=(batch_size, num_steps, 3))
delta_rot_pl = tf.placeholder(tf.float32, shape=(batch_size, num_steps, 3))
topple_label_pl = tf.placeholder(tf.float32, shape=(batch_size, num_steps))
return pcl_pl, pcl_feat_pl, lin_vel_pl, ang_vel_pl, pos_pl, delta_rot_pl, topple_label_pl
def get_geom_model(pcl, is_training):
'''
Build the graph for the shape processing branch
'''
# first get shape feature
pointnet_feat = get_pointnet_model(pcl, is_training, bn_decay=bn_decay)
# process pointnet output
pt_vec = tf_util.fully_connected(pointnet_feat, 1024, weight_decay=weight_decay, bn=True, \
is_training=is_training, scope='geom_fc1', bn_decay=bn_decay)
pt_vec = tf_util.fully_connected(pt_vec, 512, weight_decay=weight_decay, bn=True, \
is_training=is_training, scope='geom_fc2', bn_decay=bn_decay)
pt_vec = tf_util.fully_connected(pt_vec, 128, weight_decay=weight_decay, bn=True, \
is_training=is_training, scope='geom_fc3', bn_decay=bn_decay)
pt_vec = tf_util.fully_connected(pt_vec, 32, weight_decay=weight_decay, bn=True, \
is_training=is_training, scope='geom_fc4', bn_decay=bn_decay)
shape_feat = tf_util.fully_connected(pt_vec, pcl_feat_size, weight_decay=weight_decay, bn=True, \
is_training=is_training, scope='geom_fc5', bn_decay=bn_decay)
return shape_feat
def get_pointnet_model(pcl, is_training, bn_decay=None):
'''
PointNet classifier model. Returns only global feature.
'''
_, _, global_feat = pointnet.get_model(pcl, is_training, bn_decay=bn_decay)
return global_feat
def get_dynamics_model(shape_feat, lin_vel, ang_vel, cell_type, num_cells, hidden_size, dropout_keep_prob, time_steps, is_training):
'''
Build the graph for the state prediction module
'''
batch_size = shape_feat.get_shape()[0].value
# inputs are a 22-vec [lin_vel, ang_vel, shape_feat]
tile_arg = tf.stack([tf.constant(1), time_steps, tf.constant(1)])
step_shape = tf.tile(tf.expand_dims(shape_feat, 1), tile_arg)
inputs = tf.concat([lin_vel, ang_vel, step_shape], axis=2)
# ouputs are size 23, 4 size-3 vectors representing change in state: dv, dw, dp, d\theta, one topply classify logit
num_params = 13
W_hy = tf.get_variable('W_hy', shape=(hidden_size, num_params), initializer=tf.contrib.layers.xavier_initializer(), dtype=tf.float32)
l2_normalization = tf.multiply(tf.nn.l2_loss(W_hy), weight_decay, name='weight_loss')
tf.add_to_collection('losses', l2_normalization)
b_hy = tf.get_variable('b_hy', shape=(1, num_params), initializer=tf.contrib.layers.xavier_initializer(), dtype=tf.float32)
if cell_type=='fc':
# need to do it differently
# num_cells used as number of FC layers, each with hidden_size nodes
# inputs is B, num_steps, 12
input_feat_size = inputs.get_shape()[2].value
inputs = tf.reshape(inputs, [batch_size*time_steps, input_feat_size])
cur_input = inputs
for j in range(num_cells):
cell_name = 'cell_fc' + str(j)
# NOTE: batch norm causes some issues that really hinders training here - don't use it
cur_input = tf_util.fully_connected(cur_input, hidden_size, weight_decay=weight_decay, bn=False, \
is_training=is_training, scope=cell_name, activation_fn=tf.nn.tanh, bn_decay=bn_decay)
# final output
y = tf.matmul(cur_input, W_hy) + b_hy
y = tf.reshape(y, [batch_size, time_steps, num_params])
init_state = tf.constant(0)
return y, init_state, init_state # no state to return
# then feed to RNN with velocites
if cell_type=='rnn':
rnn_cell = [tf.nn.rnn_cell.DropoutWrapper(tf.nn.rnn_cell.BasicRNNCell(hidden_size), output_keep_prob=dropout_keep_prob) for i in range(0, num_cells)]
if cell_type=='gru':
rnn_cell = [tf.nn.rnn_cell.DropoutWrapper(tf.nn.rnn_cell.GRUCell(hidden_size, kernel_initializer=tf.contrib.layers.xavier_initializer()), output_keep_prob=dropout_keep_prob) for i in range(0, num_cells)]
if cell_type=='lstm':
rnn_cell = [tf.nn.rnn_cell.DropoutWrapper(tf.nn.rnn_cell.LSTMCell(hidden_size, initializer=tf.contrib.layers.xavier_initializer()), output_keep_prob=dropout_keep_prob) for i in range(0, num_cells)]
if num_cells > 1:
rnn_cell = tf.nn.rnn_cell.MultiRNNCell(rnn_cell)
else:
rnn_cell = rnn_cell[0]
init_state = rnn_cell.zero_state(batch_size, dtype=tf.float32)
# feed through RNN
# outputs are [batch, time_steps, hidden_size]
outputs, state = tf.nn.dynamic_rnn(rnn_cell, inputs, initial_state=init_state, dtype=tf.float32)
y = tf.matmul(tf.reshape(outputs, [batch_size*time_steps, hidden_size]), W_hy) + b_hy
y = tf.reshape(y, [batch_size, time_steps, num_params])
return y, state, init_state
def get_loss(pred, gt_lin_vel, gt_ang_vel, gt_pos, gt_delta_rot, topple_label, num_steps, loss_weights=None, is_training=None):
'''
Calculate loss for the given prediction and ground truth values.
Input pred is size 13: 4 size-3 vectors representing change in state: dv, dw, dp, dtheta and topple logit
(batch_size, time_steps, 13)
'''
lin_vel_weight, ang_vel_weight, pos_weight, angle_weight, axis_weight, classify_weight = (1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
if loss_weights != None:
lin_vel_weight, ang_vel_weight, pos_weight, angle_weight, axis_weight, classify_weight = loss_weights
# calculate change in linear vel for gt
vel_t = gt_lin_vel[:,0:(num_steps-1)]
vel_tp1 = gt_lin_vel[:, 1:]
vel_diff = vel_tp1 - vel_t
# calclate change in ang vel for gt
angvel_t = gt_ang_vel[:, 0:(num_steps-1)]
angvel_tp1 = gt_ang_vel[:, 1:]
angvel_diff = angvel_tp1 - angvel_t
# calculate change in pos for gt
pos_t = gt_pos[:, 0:(num_steps-1)]
pos_tp1 = gt_pos[:, 1:]
pos_diff = pos_tp1 - pos_t
# already have change in rot for gt (in axis-angle rep) - first entry is useless all zeros
rot_diff = gt_delta_rot[:, 1:, :]
# linear velocity
gt_diff_lin_vel = tf.norm(pred[:,:,:3] - vel_diff, axis=2)
lin_vel_rel = tf.norm(vel_diff, axis=2) + tf.norm(pred[:,:,:3], axis=2)
lin_vel_loss = tf.reduce_mean(tf.reduce_mean(gt_diff_lin_vel / lin_vel_rel, axis=1))
# angular velocity
gt_diff_ang_vel = tf.norm(pred[:,:,3:6] - angvel_diff, axis=2)
ang_vel_rel = tf.norm(angvel_diff, axis=2) + tf.norm(pred[:,:,3:6], axis=2)
ang_vel_loss = tf.reduce_mean(tf.reduce_mean(gt_diff_ang_vel / ang_vel_rel, axis=1))
# position
gt_diff_pos = tf.norm(pred[:,:,6:9] - pos_diff, axis=2)
pos_rel = tf.norm(pos_diff, axis=2) + tf.norm(pred[:,:,6:9], axis=2)
pos_loss = tf.reduce_mean(tf.reduce_mean(gt_diff_pos / pos_rel, axis=1))
# rotation
gt_diff_rot = tf.norm(pred[:,:,9:12] - rot_diff, ord=1, axis=2)
rot_rel = tf.norm(pred[:,:,9:12], ord=1, axis=2) + tf.norm(rot_diff, ord=1, axis=2)
rot_loss = tf.reduce_mean(tf.reduce_mean(gt_diff_rot / rot_rel, axis=1))
# topple classification
topple_logits = pred[:, :, 12]
ce_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=topple_label[:,1:], logits=topple_logits) # starts at second step since this is first prediction step
classify_loss = tf.reduce_mean(tf.reduce_mean(ce_loss, axis=1))
# final loss
loss = tf.constant(lin_vel_weight)*lin_vel_loss + tf.constant(ang_vel_weight)*ang_vel_loss + \
tf.constant(pos_weight)*pos_loss + tf.constant(angle_weight)*rot_loss + tf.constant(classify_weight)*classify_loss
# now calculate meaningful errors
# absolute error averaged over all timesteps for each sequence then the entire batch
lin_vel_err = tf.reduce_mean(tf.reduce_mean(tf.abs(pred[:,:,:3] - vel_diff), axis=1), axis=0)
ang_vel_err = tf.reduce_mean(tf.reduce_mean(tf.abs(pred[:,:,3:6] - angvel_diff), axis=1), axis=0)
pos_err = tf.reduce_mean(tf.reduce_mean(tf.abs(pred[:,:,6:9] - pos_diff), axis=1), axis=0)
pred_angle = tf.norm(pred[:,:,9:12], axis=2)
gt_angle = tf.norm(rot_diff, axis=2)
angle_err = tf.reduce_mean(tf.reduce_mean(tf.abs(gt_angle - pred_angle), axis=1), axis=0)
#axis error
axis_prod = pred[:,:,9:12] * rot_diff
dot_prod = tf.reduce_sum(axis_prod, axis=2)
denom = pred_angle * gt_angle + tf.constant(1e-6) # for stability
cos_sim = tf.constant(1.0) - (dot_prod / denom)
# pay no attention when angle is extremely small < 1 degree
zero = tf.constant(7.5e-3, dtype=tf.float32)
mask = tf.math.greater(gt_angle, 7.5e-3)
cos_sim = tf.boolean_mask(cos_sim, mask)
axis_loss = tf.reduce_mean(cos_sim)
errors = (lin_vel_err, ang_vel_err, pos_err, angle_err, axis_loss, classify_loss)
return loss, errors
|
the-stack_106_29776 | # -*- coding:UTF-8 -*-
import wget
import requests
import urllib.request
import numpy as np
import os
import cv2
import json
import math
#--------------Driver Library-----------------#
import RPi.GPIO as GPIO
import OLED_Driver as OLED
#--------------Image Library---------------#
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
from PIL import ImageColor
#-------------Test Display Functions---------------#
def Display_Picture(File_Name):
image = Image.open(File_Name)
OLED.Display_Image(image)
def url_to_image(url):
resp = urllib.request.urlopen(url)
image = np.asarray(bytearray(resp.read()), dtype="uint8")
image = cv2.imdecode(image, cv2.IMREAD_COLOR)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
return image
def analyze_images(image):
faceCascadef = cv2.CascadeClassifier("haarcascades/haarcascade_frontalface_alt.xml")
facesf = faceCascadef.detectMultiScale(
image,
minNeighbors=6,
minSize=(20, 20)
)
faceCascadep = cv2.CascadeClassifier("haarcascades/haarcascade_profileface.xml")
facesp = faceCascadep.detectMultiScale(
image,
minNeighbors=6,
minSize=(20, 20)
)
faces=[*facesf,*facesp]
return faces
#----------------------MAIN-------------------------#
cola = ["https://iiif.manducus.net/collections/0020/collection.json"] # MKK Dortmund
colb = ["https://iiif.manducus.net/collections/0019/collection.json"] # Burg Posterstein
colc = ["https://iiif.manducus.net/collections/0008/collection.json"] # Städel Museum
cold = ["https://wellcomelibrary.org/service/collections/genres/Group%20portraits/"]
cole = ["https://wellcomelibrary.org/service/collections/genres/Portrait%20prints/"]
colf = ["https://wellcomelibrary.org/service/collections/genres/Portrait%20paintings/"]
cols = [*cola,*colb,*colc,*cold,*cole,*colf]
font = ImageFont.truetype('UbuntuMono-Bold.ttf',12)
try:
def main():
OLED.Device_Init()
while True:
for cu in cols:
col = requests.get(cu)
col = col.json()
for m in col["manifests"]:
man = requests.get(m["@id"])
man = man.json()
try:
for c in man["sequences"][0]["canvases"]:
# interlude
Display_Picture("iiiflogo128.jpg")
w = float(man["sequences"][0]["canvases"][0]["width"])
f = int(math.ceil(w/1000))
srv = c["images"][0]["resource"]["service"]["@id"]
# analyzing
if os.path.exists('temp.jpg'):
os.remove('temp.jpg')
wget.download(srv+"/full/128,128/0/default.jpg", 'temp.jpg')
image = Image.open('temp.jpg')
draw = ImageDraw.Draw(image)
draw.text((0,8), 'Analyzing', fill = "GREEN", font = font)
OLED.Display_Image(image)
uti = srv+"/full/%d,/0/native.jpg" % (int(round(w/f)))
print(uti)
img = url_to_image(uti)
fcs = analyze_images(img)
os.remove('temp.jpg')
for (x, y, w, h) in fcs:
if os.path.exists('temp.jpg'):
os.remove('temp.jpg')
wget.download(srv+"/%d,%d,%d,%d/128,128/0/default.jpg" % (x*f,y*f,w*f,h*f), 'temp.jpg')
Display_Picture('temp.jpg')
OLED.Delay(3000)
os.remove('temp.jpg')
except Exception as e:
print(e)
print("Error with %s" % m["@id"])
if __name__ == '__main__':
main()
except:
print("\r\nEnd")
if os.path.exists('temp.jpg'):
os.remove('temp.jpg')
OLED.Clear_Screen()
GPIO.cleanup()
|
the-stack_106_29778 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""A module with utility functions for working with collections."""
import itertools
from typing import Callable, Dict, Iterable, Iterator, List, Tuple, TypeVar
T = TypeVar("T")
K = TypeVar("K")
def Flatten(iterator: Iterable[Iterable[T]]) -> Iterator[T]:
"""Flattens nested iterables into one iterator.
Examples:
>>> list(Flatten([[1, 2, 3], [4, 5, 6]]))
[1, 2, 3, 4, 5, 6]
>>> list([range(3), range(5), range(3)])
[0, 1, 2, 0, 1, 2, 3, 4, 0, 1, 2]
Args:
iterator: An iterator of iterators to flatten.
Yields:
Items yielded by the given iterators.
"""
for items in iterator:
for item in items:
yield item
def Trim(lst: List[T], limit: int) -> List[T]:
"""Trims a given list so that it is not longer than given limit.
Args:
lst: A list to trim.
limit: A maximum number of elements in the list after trimming.
Returns:
A suffix of the input list that was trimmed.
"""
limit = max(0, limit)
clipping = lst[limit:]
del lst[limit:]
return clipping
def Group(items: Iterable[T], key: Callable[[T], K]) -> Dict[K, T]:
"""Groups items by given key function.
Args:
items: An iterable or an iterator of items.
key: A function which given each item will return the key.
Returns:
A dict with keys being each unique key and values being a list of items of
that key.
"""
result = {}
for item in items:
result.setdefault(key(item), []).append(item)
return result
def Batch(items: Iterable[T], size: int) -> Iterator[List[T]]:
"""Divide items into batches of specified size.
In case where number of items is not evenly divisible by the batch size, the
last batch is going to be shorter.
Args:
items: An iterable or an iterator of items.
size: A size of the returned batches.
Yields:
Lists of items with specified size.
"""
batch = []
for item in items:
batch.append(item)
if len(batch) == size:
yield batch
batch = []
if batch:
yield batch
def StartsWith(this: Iterable[T], that: Iterable[T]) -> bool:
"""Checks whether an items of one iterable are a prefix of another.
Args:
this: An iterable that needs to be checked.
that: An iterable of which items must match the prefix of `this`.
Returns:
`True` if `that` is a prefix of `this`, `False` otherwise.
"""
this_iter = iter(this)
that_iter = iter(that)
while True:
try:
this_value = next(that_iter)
except StopIteration:
return True
try:
that_value = next(this_iter)
except StopIteration:
return False
if this_value != that_value:
return False
def Unzip(iterable: Iterable[Tuple[K, T]]) -> Tuple[Iterable[K], Iterable[T]]:
"""Unzips specified iterable of pairs to pair of two iterables.
This function is an inversion of the standard `zip` function and the following
hold:
* ∀ l, r. l, r == unzip(zip(l, r))
* ∀ p. p == zip(unzip(p))
Examples:
>>> Unzip([("foo", 1), ("bar", 2), ("baz", 3)])
(["foo", "bar", "baz"], [1, 2, 3])
Args:
iterable: An iterable of pairs to unzip.
Returns:
A pair of iterables after unzipping.
"""
lefts = []
rights = []
for left, right in iterable:
lefts.append(left)
rights.append(right)
return lefts, rights
def DictProduct(dictionary: Dict[K, Iterable[T]]) -> Iterator[Dict[K, T]]:
"""Computes a cartesian product of dict with iterable values.
This utility function, accepts a dictionary with iterable values, computes
cartesian products of these values and yields dictionaries of expanded values.
Examples:
>>> list(DictProduct({"a": [1, 2], "b": [3, 4]}))
[{"a": 1, "b": 3}, {"a": 1, "b": 4}, {"a": 2, "b": 3}, {"a": 2, "b": 4}]
Args:
dictionary: A dictionary with iterable values.
Yields:
Dictionaries with values being a result of cartesian product of values of
the input dictionary.
"""
keys, values = Unzip(dictionary.items())
for product_values in itertools.product(*values):
yield dict(zip(keys, product_values))
|
the-stack_106_29781 | """Supporting definitions for the Python regression tests."""
if __name__ != 'test.support':
raise ImportError('support must be imported from the test package')
import collections.abc
import contextlib
import errno
import faulthandler
import fnmatch
import functools
import gc
import importlib
import importlib.util
import logging.handlers
import nntplib
import os
import platform
import re
import shutil
import socket
import stat
import struct
import subprocess
import sys
import sysconfig
import tempfile
import time
import types
import unittest
import urllib.error
import warnings
try:
import _thread, threading
except ImportError:
_thread = None
threading = None
try:
import multiprocessing.process
except ImportError:
multiprocessing = None
try:
import zlib
except ImportError:
zlib = None
try:
import gzip
except ImportError:
gzip = None
try:
import bz2
except ImportError:
bz2 = None
try:
import lzma
except ImportError:
lzma = None
try:
import resource
except ImportError:
resource = None
__all__ = [
# globals
"PIPE_MAX_SIZE", "verbose", "max_memuse", "use_resources", "failfast",
# exceptions
"Error", "TestFailed", "ResourceDenied",
# imports
"import_module", "import_fresh_module", "CleanImport",
# modules
"unload", "forget",
# io
"record_original_stdout", "get_original_stdout", "captured_stdout",
"captured_stdin", "captured_stderr",
# filesystem
"TESTFN", "SAVEDCWD", "unlink", "rmtree", "temp_cwd", "findfile",
"create_empty_file", "can_symlink", "fs_is_case_insensitive",
# unittest
"is_resource_enabled", "requires", "requires_freebsd_version",
"requires_linux_version", "requires_mac_ver", "check_syntax_error",
"TransientResource", "time_out", "socket_peer_reset", "ioerror_peer_reset",
"transient_internet", "BasicTestRunner", "run_unittest", "run_doctest",
"skip_unless_symlink", "requires_gzip", "requires_bz2", "requires_lzma",
"bigmemtest", "bigaddrspacetest", "cpython_only", "get_attribute",
"requires_IEEE_754", "skip_unless_xattr", "requires_zlib",
"anticipate_failure", "load_package_tests", "detect_api_mismatch",
"check__all__",
# sys
"is_jython", "check_impl_detail",
# network
"HOST", "IPV6_ENABLED", "find_unused_port", "bind_port", "open_urlresource",
# processes
'temp_umask', "reap_children",
# logging
"TestHandler",
# threads
"threading_setup", "threading_cleanup", "reap_threads", "start_threads",
# miscellaneous
"check_warnings", "check_no_resource_warning", "EnvironmentVarGuard",
"run_with_locale", "swap_item",
"swap_attr", "Matcher", "set_memlimit", "SuppressCrashReport", "sortdict",
"run_with_tz",
]
class Error(Exception):
"""Base class for regression test exceptions."""
class TestFailed(Error):
"""Test failed."""
class ResourceDenied(unittest.SkipTest):
"""Test skipped because it requested a disallowed resource.
This is raised when a test calls requires() for a resource that
has not be enabled. It is used to distinguish between expected
and unexpected skips.
"""
@contextlib.contextmanager
def _ignore_deprecated_imports(ignore=True):
"""Context manager to suppress package and module deprecation
warnings when importing them.
If ignore is False, this context manager has no effect.
"""
if ignore:
with warnings.catch_warnings():
warnings.filterwarnings("ignore", ".+ (module|package)",
DeprecationWarning)
yield
else:
yield
def import_module(name, deprecated=False, *, required_on=()):
"""Import and return the module to be tested, raising SkipTest if
it is not available.
If deprecated is True, any module or package deprecation messages
will be suppressed. If a module is required on a platform but optional for
others, set required_on to an iterable of platform prefixes which will be
compared against sys.platform.
"""
with _ignore_deprecated_imports(deprecated):
try:
return importlib.import_module(name)
except ImportError as msg:
if sys.platform.startswith(tuple(required_on)):
raise
raise unittest.SkipTest(str(msg))
def _save_and_remove_module(name, orig_modules):
"""Helper function to save and remove a module from sys.modules
Raise ImportError if the module can't be imported.
"""
# try to import the module and raise an error if it can't be imported
if name not in sys.modules:
__import__(name)
del sys.modules[name]
for modname in list(sys.modules):
if modname == name or modname.startswith(name + '.'):
orig_modules[modname] = sys.modules[modname]
del sys.modules[modname]
def _save_and_block_module(name, orig_modules):
"""Helper function to save and block a module in sys.modules
Return True if the module was in sys.modules, False otherwise.
"""
saved = True
try:
orig_modules[name] = sys.modules[name]
except KeyError:
saved = False
sys.modules[name] = None
return saved
def anticipate_failure(condition):
"""Decorator to mark a test that is known to be broken in some cases
Any use of this decorator should have a comment identifying the
associated tracker issue.
"""
if condition:
return unittest.expectedFailure
return lambda f: f
def load_package_tests(pkg_dir, loader, standard_tests, pattern):
"""Generic load_tests implementation for simple test packages.
Most packages can implement load_tests using this function as follows:
def load_tests(*args):
return load_package_tests(os.path.dirname(__file__), *args)
"""
if pattern is None:
pattern = "test*"
top_dir = os.path.dirname( # Lib
os.path.dirname( # test
os.path.dirname(__file__))) # support
package_tests = loader.discover(start_dir=pkg_dir,
top_level_dir=top_dir,
pattern=pattern)
standard_tests.addTests(package_tests)
return standard_tests
def import_fresh_module(name, fresh=(), blocked=(), deprecated=False):
"""Import and return a module, deliberately bypassing sys.modules.
This function imports and returns a fresh copy of the named Python module
by removing the named module from sys.modules before doing the import.
Note that unlike reload, the original module is not affected by
this operation.
*fresh* is an iterable of additional module names that are also removed
from the sys.modules cache before doing the import.
*blocked* is an iterable of module names that are replaced with None
in the module cache during the import to ensure that attempts to import
them raise ImportError.
The named module and any modules named in the *fresh* and *blocked*
parameters are saved before starting the import and then reinserted into
sys.modules when the fresh import is complete.
Module and package deprecation messages are suppressed during this import
if *deprecated* is True.
This function will raise ImportError if the named module cannot be
imported.
"""
# NOTE: test_heapq, test_json and test_warnings include extra sanity checks
# to make sure that this utility function is working as expected
with _ignore_deprecated_imports(deprecated):
# Keep track of modules saved for later restoration as well
# as those which just need a blocking entry removed
orig_modules = {}
names_to_remove = []
_save_and_remove_module(name, orig_modules)
try:
for fresh_name in fresh:
_save_and_remove_module(fresh_name, orig_modules)
for blocked_name in blocked:
if not _save_and_block_module(blocked_name, orig_modules):
names_to_remove.append(blocked_name)
fresh_module = importlib.import_module(name)
except ImportError:
fresh_module = None
finally:
for orig_name, module in orig_modules.items():
sys.modules[orig_name] = module
for name_to_remove in names_to_remove:
del sys.modules[name_to_remove]
return fresh_module
def get_attribute(obj, name):
"""Get an attribute, raising SkipTest if AttributeError is raised."""
try:
attribute = getattr(obj, name)
except AttributeError:
raise unittest.SkipTest("object %r has no attribute %r" % (obj, name))
else:
return attribute
verbose = 1 # Flag set to 0 by regrtest.py
use_resources = None # Flag set to [] by regrtest.py
max_memuse = 0 # Disable bigmem tests (they will still be run with
# small sizes, to make sure they work.)
real_max_memuse = 0
failfast = False
match_tests = None
# _original_stdout is meant to hold stdout at the time regrtest began.
# This may be "the real" stdout, or IDLE's emulation of stdout, or whatever.
# The point is to have some flavor of stdout the user can actually see.
_original_stdout = None
def record_original_stdout(stdout):
global _original_stdout
_original_stdout = stdout
def get_original_stdout():
return _original_stdout or sys.stdout
def unload(name):
try:
del sys.modules[name]
except KeyError:
pass
if sys.platform.startswith("win"):
def _waitfor(func, pathname, waitall=False):
# Perform the operation
func(pathname)
# Now setup the wait loop
if waitall:
dirname = pathname
else:
dirname, name = os.path.split(pathname)
dirname = dirname or '.'
# Check for `pathname` to be removed from the filesystem.
# The exponential backoff of the timeout amounts to a total
# of ~1 second after which the deletion is probably an error
# anyway.
# Testing on an [email protected] shows that usually only 1 iteration is
# required when contention occurs.
timeout = 0.001
while timeout < 1.0:
# Note we are only testing for the existence of the file(s) in
# the contents of the directory regardless of any security or
# access rights. If we have made it this far, we have sufficient
# permissions to do that much using Python's equivalent of the
# Windows API FindFirstFile.
# Other Windows APIs can fail or give incorrect results when
# dealing with files that are pending deletion.
L = os.listdir(dirname)
if not (L if waitall else name in L):
return
# Increase the timeout and try again
time.sleep(timeout)
timeout *= 2
warnings.warn('tests may fail, delete still pending for ' + pathname,
RuntimeWarning, stacklevel=4)
def _unlink(filename):
_waitfor(os.unlink, filename)
def _rmdir(dirname):
_waitfor(os.rmdir, dirname)
def _rmtree(path):
def _rmtree_inner(path):
for name in os.listdir(path):
fullname = os.path.join(path, name)
try:
mode = os.lstat(fullname).st_mode
except OSError as exc:
print("support.rmtree(): os.lstat(%r) failed with %s" % (fullname, exc),
file=sys.__stderr__)
mode = 0
if stat.S_ISDIR(mode):
_waitfor(_rmtree_inner, fullname, waitall=True)
os.rmdir(fullname)
else:
os.unlink(fullname)
_waitfor(_rmtree_inner, path, waitall=True)
_waitfor(os.rmdir, path)
else:
_unlink = os.unlink
_rmdir = os.rmdir
_rmtree = shutil.rmtree
def unlink(filename):
try:
_unlink(filename)
except (FileNotFoundError, NotADirectoryError):
pass
def rmdir(dirname):
try:
_rmdir(dirname)
except FileNotFoundError:
pass
def rmtree(path):
try:
_rmtree(path)
except FileNotFoundError:
pass
def make_legacy_pyc(source):
"""Move a PEP 3147/488 pyc file to its legacy pyc location.
:param source: The file system path to the source file. The source file
does not need to exist, however the PEP 3147/488 pyc file must exist.
:return: The file system path to the legacy pyc file.
"""
pyc_file = importlib.util.cache_from_source(source)
up_one = os.path.dirname(os.path.abspath(source))
legacy_pyc = os.path.join(up_one, source + 'c')
os.rename(pyc_file, legacy_pyc)
return legacy_pyc
def forget(modname):
"""'Forget' a module was ever imported.
This removes the module from sys.modules and deletes any PEP 3147/488 or
legacy .pyc files.
"""
unload(modname)
for dirname in sys.path:
source = os.path.join(dirname, modname + '.py')
# It doesn't matter if they exist or not, unlink all possible
# combinations of PEP 3147/488 and legacy pyc files.
unlink(source + 'c')
for opt in ('', 1, 2):
unlink(importlib.util.cache_from_source(source, optimization=opt))
# Check whether a gui is actually available
def _is_gui_available():
if hasattr(_is_gui_available, 'result'):
return _is_gui_available.result
reason = None
if sys.platform.startswith('win'):
# if Python is running as a service (such as the buildbot service),
# gui interaction may be disallowed
import ctypes
import ctypes.wintypes
UOI_FLAGS = 1
WSF_VISIBLE = 0x0001
class USEROBJECTFLAGS(ctypes.Structure):
_fields_ = [("fInherit", ctypes.wintypes.BOOL),
("fReserved", ctypes.wintypes.BOOL),
("dwFlags", ctypes.wintypes.DWORD)]
dll = ctypes.windll.user32
h = dll.GetProcessWindowStation()
if not h:
raise ctypes.WinError()
uof = USEROBJECTFLAGS()
needed = ctypes.wintypes.DWORD()
res = dll.GetUserObjectInformationW(h,
UOI_FLAGS,
ctypes.byref(uof),
ctypes.sizeof(uof),
ctypes.byref(needed))
if not res:
raise ctypes.WinError()
if not bool(uof.dwFlags & WSF_VISIBLE):
reason = "gui not available (WSF_VISIBLE flag not set)"
elif sys.platform == 'darwin':
# The Aqua Tk implementations on OS X can abort the process if
# being called in an environment where a window server connection
# cannot be made, for instance when invoked by a buildbot or ssh
# process not running under the same user id as the current console
# user. To avoid that, raise an exception if the window manager
# connection is not available.
from ctypes import cdll, c_int, pointer, Structure
from ctypes.util import find_library
app_services = cdll.LoadLibrary(find_library("ApplicationServices"))
if app_services.CGMainDisplayID() == 0:
reason = "gui tests cannot run without OS X window manager"
else:
class ProcessSerialNumber(Structure):
_fields_ = [("highLongOfPSN", c_int),
("lowLongOfPSN", c_int)]
psn = ProcessSerialNumber()
psn_p = pointer(psn)
if ( (app_services.GetCurrentProcess(psn_p) < 0) or
(app_services.SetFrontProcess(psn_p) < 0) ):
reason = "cannot run without OS X gui process"
# check on every platform whether tkinter can actually do anything
if not reason:
try:
from tkinter import Tk
root = Tk()
root.update()
root.destroy()
except Exception as e:
err_string = str(e)
if len(err_string) > 50:
err_string = err_string[:50] + ' [...]'
reason = 'Tk unavailable due to {}: {}'.format(type(e).__name__,
err_string)
_is_gui_available.reason = reason
_is_gui_available.result = not reason
return _is_gui_available.result
def is_resource_enabled(resource):
"""Test whether a resource is enabled.
Known resources are set by regrtest.py. If not running under regrtest.py,
all resources are assumed enabled unless use_resources has been set.
"""
return use_resources is None or resource in use_resources
def requires(resource, msg=None):
"""Raise ResourceDenied if the specified resource is not available."""
if resource == 'gui' and not _is_gui_available():
raise ResourceDenied(_is_gui_available.reason)
if not is_resource_enabled(resource):
if msg is None:
msg = "Use of the %r resource not enabled" % resource
raise ResourceDenied(msg)
def _requires_unix_version(sysname, min_version):
"""Decorator raising SkipTest if the OS is `sysname` and the version is less
than `min_version`.
For example, @_requires_unix_version('FreeBSD', (7, 2)) raises SkipTest if
the FreeBSD version is less than 7.2.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
if platform.system() == sysname:
version_txt = platform.release().split('-', 1)[0]
try:
version = tuple(map(int, version_txt.split('.')))
except ValueError:
pass
else:
if version < min_version:
min_version_txt = '.'.join(map(str, min_version))
raise unittest.SkipTest(
"%s version %s or higher required, not %s"
% (sysname, min_version_txt, version_txt))
return func(*args, **kw)
wrapper.min_version = min_version
return wrapper
return decorator
def requires_freebsd_version(*min_version):
"""Decorator raising SkipTest if the OS is FreeBSD and the FreeBSD version is
less than `min_version`.
For example, @requires_freebsd_version(7, 2) raises SkipTest if the FreeBSD
version is less than 7.2.
"""
return _requires_unix_version('FreeBSD', min_version)
def requires_linux_version(*min_version):
"""Decorator raising SkipTest if the OS is Linux and the Linux version is
less than `min_version`.
For example, @requires_linux_version(2, 6, 32) raises SkipTest if the Linux
version is less than 2.6.32.
"""
return _requires_unix_version('Linux', min_version)
def requires_mac_ver(*min_version):
"""Decorator raising SkipTest if the OS is Mac OS X and the OS X
version if less than min_version.
For example, @requires_mac_ver(10, 5) raises SkipTest if the OS X version
is lesser than 10.5.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
if sys.platform == 'darwin':
version_txt = platform.mac_ver()[0]
try:
version = tuple(map(int, version_txt.split('.')))
except ValueError:
pass
else:
if version < min_version:
min_version_txt = '.'.join(map(str, min_version))
raise unittest.SkipTest(
"Mac OS X %s or higher required, not %s"
% (min_version_txt, version_txt))
return func(*args, **kw)
wrapper.min_version = min_version
return wrapper
return decorator
# Don't use "localhost", since resolving it uses the DNS under recent
# Windows versions (see issue #18792).
HOST = "127.0.0.1"
HOSTv6 = "::1"
def find_unused_port(family=socket.AF_INET, socktype=socket.SOCK_STREAM):
"""Returns an unused port that should be suitable for binding. This is
achieved by creating a temporary socket with the same family and type as
the 'sock' parameter (default is AF_INET, SOCK_STREAM), and binding it to
the specified host address (defaults to 0.0.0.0) with the port set to 0,
eliciting an unused ephemeral port from the OS. The temporary socket is
then closed and deleted, and the ephemeral port is returned.
Either this method or bind_port() should be used for any tests where a
server socket needs to be bound to a particular port for the duration of
the test. Which one to use depends on whether the calling code is creating
a python socket, or if an unused port needs to be provided in a constructor
or passed to an external program (i.e. the -accept argument to openssl's
s_server mode). Always prefer bind_port() over find_unused_port() where
possible. Hard coded ports should *NEVER* be used. As soon as a server
socket is bound to a hard coded port, the ability to run multiple instances
of the test simultaneously on the same host is compromised, which makes the
test a ticking time bomb in a buildbot environment. On Unix buildbots, this
may simply manifest as a failed test, which can be recovered from without
intervention in most cases, but on Windows, the entire python process can
completely and utterly wedge, requiring someone to log in to the buildbot
and manually kill the affected process.
(This is easy to reproduce on Windows, unfortunately, and can be traced to
the SO_REUSEADDR socket option having different semantics on Windows versus
Unix/Linux. On Unix, you can't have two AF_INET SOCK_STREAM sockets bind,
listen and then accept connections on identical host/ports. An EADDRINUSE
OSError will be raised at some point (depending on the platform and
the order bind and listen were called on each socket).
However, on Windows, if SO_REUSEADDR is set on the sockets, no EADDRINUSE
will ever be raised when attempting to bind two identical host/ports. When
accept() is called on each socket, the second caller's process will steal
the port from the first caller, leaving them both in an awkwardly wedged
state where they'll no longer respond to any signals or graceful kills, and
must be forcibly killed via OpenProcess()/TerminateProcess().
The solution on Windows is to use the SO_EXCLUSIVEADDRUSE socket option
instead of SO_REUSEADDR, which effectively affords the same semantics as
SO_REUSEADDR on Unix. Given the propensity of Unix developers in the Open
Source world compared to Windows ones, this is a common mistake. A quick
look over OpenSSL's 0.9.8g source shows that they use SO_REUSEADDR when
openssl.exe is called with the 's_server' option, for example. See
http://bugs.python.org/issue2550 for more info. The following site also
has a very thorough description about the implications of both REUSEADDR
and EXCLUSIVEADDRUSE on Windows:
http://msdn2.microsoft.com/en-us/library/ms740621(VS.85).aspx)
XXX: although this approach is a vast improvement on previous attempts to
elicit unused ports, it rests heavily on the assumption that the ephemeral
port returned to us by the OS won't immediately be dished back out to some
other process when we close and delete our temporary socket but before our
calling code has a chance to bind the returned port. We can deal with this
issue if/when we come across it.
"""
tempsock = socket.socket(family, socktype)
port = bind_port(tempsock)
tempsock.close()
del tempsock
return port
def bind_port(sock, host=HOST):
"""Bind the socket to a free port and return the port number. Relies on
ephemeral ports in order to ensure we are using an unbound port. This is
important as many tests may be running simultaneously, especially in a
buildbot environment. This method raises an exception if the sock.family
is AF_INET and sock.type is SOCK_STREAM, *and* the socket has SO_REUSEADDR
or SO_REUSEPORT set on it. Tests should *never* set these socket options
for TCP/IP sockets. The only case for setting these options is testing
multicasting via multiple UDP sockets.
Additionally, if the SO_EXCLUSIVEADDRUSE socket option is available (i.e.
on Windows), it will be set on the socket. This will prevent anyone else
from bind()'ing to our host/port for the duration of the test.
"""
if sock.family == socket.AF_INET and sock.type == socket.SOCK_STREAM:
if hasattr(socket, 'SO_REUSEADDR'):
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) == 1:
raise TestFailed("tests should never set the SO_REUSEADDR " \
"socket option on TCP/IP sockets!")
if hasattr(socket, 'SO_REUSEPORT'):
try:
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT) == 1:
raise TestFailed("tests should never set the SO_REUSEPORT " \
"socket option on TCP/IP sockets!")
except OSError:
# Python's socket module was compiled using modern headers
# thus defining SO_REUSEPORT but this process is running
# under an older kernel that does not support SO_REUSEPORT.
pass
if hasattr(socket, 'SO_EXCLUSIVEADDRUSE'):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1)
sock.bind((host, 0))
port = sock.getsockname()[1]
return port
def _is_ipv6_enabled():
"""Check whether IPv6 is enabled on this host."""
if socket.has_ipv6:
sock = None
try:
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
sock.bind((HOSTv6, 0))
return True
except OSError:
pass
finally:
if sock:
sock.close()
return False
IPV6_ENABLED = _is_ipv6_enabled()
def system_must_validate_cert(f):
"""Skip the test on TLS certificate validation failures."""
@functools.wraps(f)
def dec(*args, **kwargs):
try:
f(*args, **kwargs)
except IOError as e:
if "CERTIFICATE_VERIFY_FAILED" in str(e):
raise unittest.SkipTest("system does not contain "
"necessary certificates")
raise
return dec
# A constant likely larger than the underlying OS pipe buffer size, to
# make writes blocking.
# Windows limit seems to be around 512 B, and many Unix kernels have a
# 64 KiB pipe buffer size or 16 * PAGE_SIZE: take a few megs to be sure.
# (see issue #17835 for a discussion of this number).
PIPE_MAX_SIZE = 4 * 1024 * 1024 + 1
# A constant likely larger than the underlying OS socket buffer size, to make
# writes blocking.
# The socket buffer sizes can usually be tuned system-wide (e.g. through sysctl
# on Linux), or on a per-socket basis (SO_SNDBUF/SO_RCVBUF). See issue #18643
# for a discussion of this number).
SOCK_MAX_SIZE = 16 * 1024 * 1024 + 1
# decorator for skipping tests on non-IEEE 754 platforms
requires_IEEE_754 = unittest.skipUnless(
float.__getformat__("double").startswith("IEEE"),
"test requires IEEE 754 doubles")
requires_zlib = unittest.skipUnless(zlib, 'requires zlib')
requires_gzip = unittest.skipUnless(gzip, 'requires gzip')
requires_bz2 = unittest.skipUnless(bz2, 'requires bz2')
requires_lzma = unittest.skipUnless(lzma, 'requires lzma')
is_jython = sys.platform.startswith('java')
# Filename used for testing
if os.name == 'java':
# Jython disallows @ in module names
TESTFN = '$test'
else:
TESTFN = '@test'
# Disambiguate TESTFN for parallel testing, while letting it remain a valid
# module name.
TESTFN = "{}_{}_tmp".format(TESTFN, os.getpid())
# FS_NONASCII: non-ASCII character encodable by os.fsencode(),
# or None if there is no such character.
FS_NONASCII = None
for character in (
# First try printable and common characters to have a readable filename.
# For each character, the encoding list are just example of encodings able
# to encode the character (the list is not exhaustive).
# U+00E6 (Latin Small Letter Ae): cp1252, iso-8859-1
'\u00E6',
# U+0130 (Latin Capital Letter I With Dot Above): cp1254, iso8859_3
'\u0130',
# U+0141 (Latin Capital Letter L With Stroke): cp1250, cp1257
'\u0141',
# U+03C6 (Greek Small Letter Phi): cp1253
'\u03C6',
# U+041A (Cyrillic Capital Letter Ka): cp1251
'\u041A',
# U+05D0 (Hebrew Letter Alef): Encodable to cp424
'\u05D0',
# U+060C (Arabic Comma): cp864, cp1006, iso8859_6, mac_arabic
'\u060C',
# U+062A (Arabic Letter Teh): cp720
'\u062A',
# U+0E01 (Thai Character Ko Kai): cp874
'\u0E01',
# Then try more "special" characters. "special" because they may be
# interpreted or displayed differently depending on the exact locale
# encoding and the font.
# U+00A0 (No-Break Space)
'\u00A0',
# U+20AC (Euro Sign)
'\u20AC',
):
try:
os.fsdecode(os.fsencode(character))
except UnicodeError:
pass
else:
FS_NONASCII = character
break
# TESTFN_UNICODE is a non-ascii filename
TESTFN_UNICODE = TESTFN + "-\xe0\xf2\u0258\u0141\u011f"
if sys.platform == 'darwin':
# In Mac OS X's VFS API file names are, by definition, canonically
# decomposed Unicode, encoded using UTF-8. See QA1173:
# http://developer.apple.com/mac/library/qa/qa2001/qa1173.html
import unicodedata
TESTFN_UNICODE = unicodedata.normalize('NFD', TESTFN_UNICODE)
TESTFN_ENCODING = sys.getfilesystemencoding()
# TESTFN_UNENCODABLE is a filename (str type) that should *not* be able to be
# encoded by the filesystem encoding (in strict mode). It can be None if we
# cannot generate such filename.
TESTFN_UNENCODABLE = None
if os.name in ('nt', 'ce'):
# skip win32s (0) or Windows 9x/ME (1)
if sys.getwindowsversion().platform >= 2:
# Different kinds of characters from various languages to minimize the
# probability that the whole name is encodable to MBCS (issue #9819)
TESTFN_UNENCODABLE = TESTFN + "-\u5171\u0141\u2661\u0363\uDC80"
try:
TESTFN_UNENCODABLE.encode(TESTFN_ENCODING)
except UnicodeEncodeError:
pass
else:
print('WARNING: The filename %r CAN be encoded by the filesystem encoding (%s). '
'Unicode filename tests may not be effective'
% (TESTFN_UNENCODABLE, TESTFN_ENCODING))
TESTFN_UNENCODABLE = None
# Mac OS X denies unencodable filenames (invalid utf-8)
elif sys.platform != 'darwin':
try:
# ascii and utf-8 cannot encode the byte 0xff
b'\xff'.decode(TESTFN_ENCODING)
except UnicodeDecodeError:
# 0xff will be encoded using the surrogate character u+DCFF
TESTFN_UNENCODABLE = TESTFN \
+ b'-\xff'.decode(TESTFN_ENCODING, 'surrogateescape')
else:
# File system encoding (eg. ISO-8859-* encodings) can encode
# the byte 0xff. Skip some unicode filename tests.
pass
# TESTFN_UNDECODABLE is a filename (bytes type) that should *not* be able to be
# decoded from the filesystem encoding (in strict mode). It can be None if we
# cannot generate such filename (ex: the latin1 encoding can decode any byte
# sequence). On UNIX, TESTFN_UNDECODABLE can be decoded by os.fsdecode() thanks
# to the surrogateescape error handler (PEP 383), but not from the filesystem
# encoding in strict mode.
TESTFN_UNDECODABLE = None
for name in (
# b'\xff' is not decodable by os.fsdecode() with code page 932. Windows
# accepts it to create a file or a directory, or don't accept to enter to
# such directory (when the bytes name is used). So test b'\xe7' first: it is
# not decodable from cp932.
b'\xe7w\xf0',
# undecodable from ASCII, UTF-8
b'\xff',
# undecodable from iso8859-3, iso8859-6, iso8859-7, cp424, iso8859-8, cp856
# and cp857
b'\xae\xd5'
# undecodable from UTF-8 (UNIX and Mac OS X)
b'\xed\xb2\x80', b'\xed\xb4\x80',
# undecodable from shift_jis, cp869, cp874, cp932, cp1250, cp1251, cp1252,
# cp1253, cp1254, cp1255, cp1257, cp1258
b'\x81\x98',
):
try:
name.decode(TESTFN_ENCODING)
except UnicodeDecodeError:
TESTFN_UNDECODABLE = os.fsencode(TESTFN) + name
break
if FS_NONASCII:
TESTFN_NONASCII = TESTFN + '-' + FS_NONASCII
else:
TESTFN_NONASCII = None
# Save the initial cwd
SAVEDCWD = os.getcwd()
@contextlib.contextmanager
def temp_dir(path=None, quiet=False):
"""Return a context manager that creates a temporary directory.
Arguments:
path: the directory to create temporarily. If omitted or None,
defaults to creating a temporary directory using tempfile.mkdtemp.
quiet: if False (the default), the context manager raises an exception
on error. Otherwise, if the path is specified and cannot be
created, only a warning is issued.
"""
dir_created = False
if path is None:
path = tempfile.mkdtemp()
dir_created = True
path = os.path.realpath(path)
else:
try:
os.mkdir(path)
dir_created = True
except OSError:
if not quiet:
raise
warnings.warn('tests may fail, unable to create temp dir: ' + path,
RuntimeWarning, stacklevel=3)
try:
yield path
finally:
if dir_created:
rmtree(path)
@contextlib.contextmanager
def change_cwd(path, quiet=False):
"""Return a context manager that changes the current working directory.
Arguments:
path: the directory to use as the temporary current working directory.
quiet: if False (the default), the context manager raises an exception
on error. Otherwise, it issues only a warning and keeps the current
working directory the same.
"""
saved_dir = os.getcwd()
try:
os.chdir(path)
except OSError:
if not quiet:
raise
warnings.warn('tests may fail, unable to change CWD to: ' + path,
RuntimeWarning, stacklevel=3)
try:
yield os.getcwd()
finally:
os.chdir(saved_dir)
@contextlib.contextmanager
def temp_cwd(name='tempcwd', quiet=False):
"""
Context manager that temporarily creates and changes the CWD.
The function temporarily changes the current working directory
after creating a temporary directory in the current directory with
name *name*. If *name* is None, the temporary directory is
created using tempfile.mkdtemp.
If *quiet* is False (default) and it is not possible to
create or change the CWD, an error is raised. If *quiet* is True,
only a warning is raised and the original CWD is used.
"""
with temp_dir(path=name, quiet=quiet) as temp_path:
with change_cwd(temp_path, quiet=quiet) as cwd_dir:
yield cwd_dir
if hasattr(os, "umask"):
@contextlib.contextmanager
def temp_umask(umask):
"""Context manager that temporarily sets the process umask."""
oldmask = os.umask(umask)
try:
yield
finally:
os.umask(oldmask)
# TEST_HOME_DIR refers to the top level directory of the "test" package
# that contains Python's regression test suite
TEST_SUPPORT_DIR = os.path.dirname(os.path.abspath(__file__))
TEST_HOME_DIR = os.path.dirname(TEST_SUPPORT_DIR)
# TEST_DATA_DIR is used as a target download location for remote resources
TEST_DATA_DIR = os.path.join(TEST_HOME_DIR, "data")
def findfile(filename, subdir=None):
"""Try to find a file on sys.path or in the test directory. If it is not
found the argument passed to the function is returned (this does not
necessarily signal failure; could still be the legitimate path).
Setting *subdir* indicates a relative path to use to find the file
rather than looking directly in the path directories.
"""
if os.path.isabs(filename):
return filename
if subdir is not None:
filename = os.path.join(subdir, filename)
path = [TEST_HOME_DIR] + sys.path
for dn in path:
fn = os.path.join(dn, filename)
if os.path.exists(fn): return fn
return filename
def create_empty_file(filename):
"""Create an empty file. If the file already exists, truncate it."""
fd = os.open(filename, os.O_WRONLY | os.O_CREAT | os.O_TRUNC)
os.close(fd)
def sortdict(dict):
"Like repr(dict), but in sorted order."
items = sorted(dict.items())
reprpairs = ["%r: %r" % pair for pair in items]
withcommas = ", ".join(reprpairs)
return "{%s}" % withcommas
def make_bad_fd():
"""
Create an invalid file descriptor by opening and closing a file and return
its fd.
"""
file = open(TESTFN, "wb")
try:
return file.fileno()
finally:
file.close()
unlink(TESTFN)
def check_syntax_error(testcase, statement):
testcase.assertRaises(SyntaxError, compile, statement,
'<test string>', 'exec')
def open_urlresource(url, *args, **kw):
import urllib.request, urllib.parse
check = kw.pop('check', None)
filename = urllib.parse.urlparse(url)[2].split('/')[-1] # '/': it's URL!
fn = os.path.join(TEST_DATA_DIR, filename)
def check_valid_file(fn):
f = open(fn, *args, **kw)
if check is None:
return f
elif check(f):
f.seek(0)
return f
f.close()
if os.path.exists(fn):
f = check_valid_file(fn)
if f is not None:
return f
unlink(fn)
# Verify the requirement before downloading the file
requires('urlfetch')
if verbose:
print('\tfetching %s ...' % url, file=get_original_stdout())
opener = urllib.request.build_opener()
if gzip:
opener.addheaders.append(('Accept-Encoding', 'gzip'))
f = opener.open(url, timeout=15)
if gzip and f.headers.get('Content-Encoding') == 'gzip':
f = gzip.GzipFile(fileobj=f)
try:
with open(fn, "wb") as out:
s = f.read()
while s:
out.write(s)
s = f.read()
finally:
f.close()
f = check_valid_file(fn)
if f is not None:
return f
raise TestFailed('invalid resource %r' % fn)
class WarningsRecorder(object):
"""Convenience wrapper for the warnings list returned on
entry to the warnings.catch_warnings() context manager.
"""
def __init__(self, warnings_list):
self._warnings = warnings_list
self._last = 0
def __getattr__(self, attr):
if len(self._warnings) > self._last:
return getattr(self._warnings[-1], attr)
elif attr in warnings.WarningMessage._WARNING_DETAILS:
return None
raise AttributeError("%r has no attribute %r" % (self, attr))
@property
def warnings(self):
return self._warnings[self._last:]
def reset(self):
self._last = len(self._warnings)
def _filterwarnings(filters, quiet=False):
"""Catch the warnings, then check if all the expected
warnings have been raised and re-raise unexpected warnings.
If 'quiet' is True, only re-raise the unexpected warnings.
"""
# Clear the warning registry of the calling module
# in order to re-raise the warnings.
frame = sys._getframe(2)
registry = frame.f_globals.get('__warningregistry__')
if registry:
registry.clear()
with warnings.catch_warnings(record=True) as w:
# Set filter "always" to record all warnings. Because
# test_warnings swap the module, we need to look up in
# the sys.modules dictionary.
sys.modules['warnings'].simplefilter("always")
yield WarningsRecorder(w)
# Filter the recorded warnings
reraise = list(w)
missing = []
for msg, cat in filters:
seen = False
for w in reraise[:]:
warning = w.message
# Filter out the matching messages
if (re.match(msg, str(warning), re.I) and
issubclass(warning.__class__, cat)):
seen = True
reraise.remove(w)
if not seen and not quiet:
# This filter caught nothing
missing.append((msg, cat.__name__))
if reraise:
raise AssertionError("unhandled warning %s" % reraise[0])
if missing:
raise AssertionError("filter (%r, %s) did not catch any warning" %
missing[0])
@contextlib.contextmanager
def check_warnings(*filters, **kwargs):
"""Context manager to silence warnings.
Accept 2-tuples as positional arguments:
("message regexp", WarningCategory)
Optional argument:
- if 'quiet' is True, it does not fail if a filter catches nothing
(default True without argument,
default False if some filters are defined)
Without argument, it defaults to:
check_warnings(("", Warning), quiet=True)
"""
quiet = kwargs.get('quiet')
if not filters:
filters = (("", Warning),)
# Preserve backward compatibility
if quiet is None:
quiet = True
return _filterwarnings(filters, quiet)
@contextlib.contextmanager
def check_no_resource_warning(testcase):
"""Context manager to check that no ResourceWarning is emitted.
Usage:
with check_no_resource_warning(self):
f = open(...)
...
del f
You must remove the object which may emit ResourceWarning before
the end of the context manager.
"""
with warnings.catch_warnings(record=True) as warns:
warnings.filterwarnings('always', category=ResourceWarning)
yield
gc_collect()
testcase.assertEqual(warns, [])
class CleanImport(object):
"""Context manager to force import to return a new module reference.
This is useful for testing module-level behaviours, such as
the emission of a DeprecationWarning on import.
Use like this:
with CleanImport("foo"):
importlib.import_module("foo") # new reference
"""
def __init__(self, *module_names):
self.original_modules = sys.modules.copy()
for module_name in module_names:
if module_name in sys.modules:
module = sys.modules[module_name]
# It is possible that module_name is just an alias for
# another module (e.g. stub for modules renamed in 3.x).
# In that case, we also need delete the real module to clear
# the import cache.
if module.__name__ != module_name:
del sys.modules[module.__name__]
del sys.modules[module_name]
def __enter__(self):
return self
def __exit__(self, *ignore_exc):
sys.modules.update(self.original_modules)
class EnvironmentVarGuard(collections.abc.MutableMapping):
"""Class to help protect the environment variable properly. Can be used as
a context manager."""
def __init__(self):
self._environ = os.environ
self._changed = {}
def __getitem__(self, envvar):
return self._environ[envvar]
def __setitem__(self, envvar, value):
# Remember the initial value on the first access
if envvar not in self._changed:
self._changed[envvar] = self._environ.get(envvar)
self._environ[envvar] = value
def __delitem__(self, envvar):
# Remember the initial value on the first access
if envvar not in self._changed:
self._changed[envvar] = self._environ.get(envvar)
if envvar in self._environ:
del self._environ[envvar]
def keys(self):
return self._environ.keys()
def __iter__(self):
return iter(self._environ)
def __len__(self):
return len(self._environ)
def set(self, envvar, value):
self[envvar] = value
def unset(self, envvar):
del self[envvar]
def __enter__(self):
return self
def __exit__(self, *ignore_exc):
for (k, v) in self._changed.items():
if v is None:
if k in self._environ:
del self._environ[k]
else:
self._environ[k] = v
os.environ = self._environ
class DirsOnSysPath(object):
"""Context manager to temporarily add directories to sys.path.
This makes a copy of sys.path, appends any directories given
as positional arguments, then reverts sys.path to the copied
settings when the context ends.
Note that *all* sys.path modifications in the body of the
context manager, including replacement of the object,
will be reverted at the end of the block.
"""
def __init__(self, *paths):
self.original_value = sys.path[:]
self.original_object = sys.path
sys.path.extend(paths)
def __enter__(self):
return self
def __exit__(self, *ignore_exc):
sys.path = self.original_object
sys.path[:] = self.original_value
class TransientResource(object):
"""Raise ResourceDenied if an exception is raised while the context manager
is in effect that matches the specified exception and attributes."""
def __init__(self, exc, **kwargs):
self.exc = exc
self.attrs = kwargs
def __enter__(self):
return self
def __exit__(self, type_=None, value=None, traceback=None):
"""If type_ is a subclass of self.exc and value has attributes matching
self.attrs, raise ResourceDenied. Otherwise let the exception
propagate (if any)."""
if type_ is not None and issubclass(self.exc, type_):
for attr, attr_value in self.attrs.items():
if not hasattr(value, attr):
break
if getattr(value, attr) != attr_value:
break
else:
raise ResourceDenied("an optional resource is not available")
# Context managers that raise ResourceDenied when various issues
# with the Internet connection manifest themselves as exceptions.
# XXX deprecate these and use transient_internet() instead
time_out = TransientResource(OSError, errno=errno.ETIMEDOUT)
socket_peer_reset = TransientResource(OSError, errno=errno.ECONNRESET)
ioerror_peer_reset = TransientResource(OSError, errno=errno.ECONNRESET)
@contextlib.contextmanager
def transient_internet(resource_name, *, timeout=30.0, errnos=()):
"""Return a context manager that raises ResourceDenied when various issues
with the Internet connection manifest themselves as exceptions."""
default_errnos = [
('ECONNREFUSED', 111),
('ECONNRESET', 104),
('EHOSTUNREACH', 113),
('ENETUNREACH', 101),
('ETIMEDOUT', 110),
]
default_gai_errnos = [
('EAI_AGAIN', -3),
('EAI_FAIL', -4),
('EAI_NONAME', -2),
('EAI_NODATA', -5),
# Encountered when trying to resolve IPv6-only hostnames
('WSANO_DATA', 11004),
]
denied = ResourceDenied("Resource %r is not available" % resource_name)
captured_errnos = errnos
gai_errnos = []
if not captured_errnos:
captured_errnos = [getattr(errno, name, num)
for (name, num) in default_errnos]
gai_errnos = [getattr(socket, name, num)
for (name, num) in default_gai_errnos]
def filter_error(err):
n = getattr(err, 'errno', None)
if (isinstance(err, socket.timeout) or
(isinstance(err, socket.gaierror) and n in gai_errnos) or
(isinstance(err, urllib.error.HTTPError) and
500 <= err.code <= 599) or
(isinstance(err, urllib.error.URLError) and
(("ConnectionRefusedError" in err.reason) or
("TimeoutError" in err.reason))) or
n in captured_errnos):
if not verbose:
sys.stderr.write(denied.args[0] + "\n")
raise denied from err
old_timeout = socket.getdefaulttimeout()
try:
if timeout is not None:
socket.setdefaulttimeout(timeout)
yield
except nntplib.NNTPTemporaryError as err:
if verbose:
sys.stderr.write(denied.args[0] + "\n")
raise denied from err
except OSError as err:
# urllib can wrap original socket errors multiple times (!), we must
# unwrap to get at the original error.
while True:
a = err.args
if len(a) >= 1 and isinstance(a[0], OSError):
err = a[0]
# The error can also be wrapped as args[1]:
# except socket.error as msg:
# raise OSError('socket error', msg).with_traceback(sys.exc_info()[2])
elif len(a) >= 2 and isinstance(a[1], OSError):
err = a[1]
else:
break
filter_error(err)
raise
# XXX should we catch generic exceptions and look for their
# __cause__ or __context__?
finally:
socket.setdefaulttimeout(old_timeout)
@contextlib.contextmanager
def captured_output(stream_name):
"""Return a context manager used by captured_stdout/stdin/stderr
that temporarily replaces the sys stream *stream_name* with a StringIO."""
import io
orig_stdout = getattr(sys, stream_name)
setattr(sys, stream_name, io.StringIO())
try:
yield getattr(sys, stream_name)
finally:
setattr(sys, stream_name, orig_stdout)
def captured_stdout():
"""Capture the output of sys.stdout:
with captured_stdout() as stdout:
print("hello")
self.assertEqual(stdout.getvalue(), "hello\\n")
"""
return captured_output("stdout")
def captured_stderr():
"""Capture the output of sys.stderr:
with captured_stderr() as stderr:
print("hello", file=sys.stderr)
self.assertEqual(stderr.getvalue(), "hello\\n")
"""
return captured_output("stderr")
def captured_stdin():
"""Capture the input to sys.stdin:
with captured_stdin() as stdin:
stdin.write('hello\\n')
stdin.seek(0)
# call test code that consumes from sys.stdin
captured = input()
self.assertEqual(captured, "hello")
"""
return captured_output("stdin")
def gc_collect():
"""Force as many objects as possible to be collected.
In non-CPython implementations of Python, this is needed because timely
deallocation is not guaranteed by the garbage collector. (Even in CPython
this can be the case in case of reference cycles.) This means that __del__
methods may be called later than expected and weakrefs may remain alive for
longer than expected. This function tries its best to force all garbage
objects to disappear.
"""
gc.collect()
if is_jython:
time.sleep(0.1)
gc.collect()
gc.collect()
@contextlib.contextmanager
def disable_gc():
have_gc = gc.isenabled()
gc.disable()
try:
yield
finally:
if have_gc:
gc.enable()
def python_is_optimized():
"""Find if Python was built with optimizations."""
cflags = sysconfig.get_config_var('PY_CFLAGS') or ''
final_opt = ""
for opt in cflags.split():
if opt.startswith('-O'):
final_opt = opt
return final_opt not in ('', '-O0', '-Og')
_header = 'nP'
_align = '0n'
if hasattr(sys, "gettotalrefcount"):
_header = '2P' + _header
_align = '0P'
_vheader = _header + 'n'
def calcobjsize(fmt):
return struct.calcsize(_header + fmt + _align)
def calcvobjsize(fmt):
return struct.calcsize(_vheader + fmt + _align)
_TPFLAGS_HAVE_GC = 1<<14
_TPFLAGS_HEAPTYPE = 1<<9
def check_sizeof(test, o, size):
import _testcapi
result = sys.getsizeof(o)
# add GC header size
if ((type(o) == type) and (o.__flags__ & _TPFLAGS_HEAPTYPE) or\
((type(o) != type) and (type(o).__flags__ & _TPFLAGS_HAVE_GC))):
size += _testcapi.SIZEOF_PYGC_HEAD
msg = 'wrong size for %s: got %d, expected %d' \
% (type(o), result, size)
test.assertEqual(result, size, msg)
#=======================================================================
# Decorator for running a function in a different locale, correctly resetting
# it afterwards.
def run_with_locale(catstr, *locales):
def decorator(func):
def inner(*args, **kwds):
try:
import locale
category = getattr(locale, catstr)
orig_locale = locale.setlocale(category)
except AttributeError:
# if the test author gives us an invalid category string
raise
except:
# cannot retrieve original locale, so do nothing
locale = orig_locale = None
else:
for loc in locales:
try:
locale.setlocale(category, loc)
break
except:
pass
# now run the function, resetting the locale on exceptions
try:
return func(*args, **kwds)
finally:
if locale and orig_locale:
locale.setlocale(category, orig_locale)
inner.__name__ = func.__name__
inner.__doc__ = func.__doc__
return inner
return decorator
#=======================================================================
# Decorator for running a function in a specific timezone, correctly
# resetting it afterwards.
def run_with_tz(tz):
def decorator(func):
def inner(*args, **kwds):
try:
tzset = time.tzset
except AttributeError:
raise unittest.SkipTest("tzset required")
if 'TZ' in os.environ:
orig_tz = os.environ['TZ']
else:
orig_tz = None
os.environ['TZ'] = tz
tzset()
# now run the function, resetting the tz on exceptions
try:
return func(*args, **kwds)
finally:
if orig_tz is None:
del os.environ['TZ']
else:
os.environ['TZ'] = orig_tz
time.tzset()
inner.__name__ = func.__name__
inner.__doc__ = func.__doc__
return inner
return decorator
#=======================================================================
# Big-memory-test support. Separate from 'resources' because memory use
# should be configurable.
# Some handy shorthands. Note that these are used for byte-limits as well
# as size-limits, in the various bigmem tests
_1M = 1024*1024
_1G = 1024 * _1M
_2G = 2 * _1G
_4G = 4 * _1G
MAX_Py_ssize_t = sys.maxsize
def set_memlimit(limit):
global max_memuse
global real_max_memuse
sizes = {
'k': 1024,
'm': _1M,
'g': _1G,
't': 1024*_1G,
}
m = re.match(r'(\d+(\.\d+)?) (K|M|G|T)b?$', limit,
re.IGNORECASE | re.VERBOSE)
if m is None:
raise ValueError('Invalid memory limit %r' % (limit,))
memlimit = int(float(m.group(1)) * sizes[m.group(3).lower()])
real_max_memuse = memlimit
if memlimit > MAX_Py_ssize_t:
memlimit = MAX_Py_ssize_t
if memlimit < _2G - 1:
raise ValueError('Memory limit %r too low to be useful' % (limit,))
max_memuse = memlimit
class _MemoryWatchdog:
"""An object which periodically watches the process' memory consumption
and prints it out.
"""
def __init__(self):
self.procfile = '/proc/{pid}/statm'.format(pid=os.getpid())
self.started = False
def start(self):
try:
f = open(self.procfile, 'r')
except OSError as e:
warnings.warn('/proc not available for stats: {}'.format(e),
RuntimeWarning)
sys.stderr.flush()
return
watchdog_script = findfile("memory_watchdog.py")
self.mem_watchdog = subprocess.Popen([sys.executable, watchdog_script],
stdin=f, stderr=subprocess.DEVNULL)
f.close()
self.started = True
def stop(self):
if self.started:
self.mem_watchdog.terminate()
self.mem_watchdog.wait()
def bigmemtest(size, memuse, dry_run=True):
"""Decorator for bigmem tests.
'size' is a requested size for the test (in arbitrary, test-interpreted
units.) 'memuse' is the number of bytes per unit for the test, or a good
estimate of it. For example, a test that needs two byte buffers, of 4 GiB
each, could be decorated with @bigmemtest(size=_4G, memuse=2).
The 'size' argument is normally passed to the decorated test method as an
extra argument. If 'dry_run' is true, the value passed to the test method
may be less than the requested value. If 'dry_run' is false, it means the
test doesn't support dummy runs when -M is not specified.
"""
def decorator(f):
def wrapper(self):
size = wrapper.size
memuse = wrapper.memuse
if not real_max_memuse:
maxsize = 5147
else:
maxsize = size
if ((real_max_memuse or not dry_run)
and real_max_memuse < maxsize * memuse):
raise unittest.SkipTest(
"not enough memory: %.1fG minimum needed"
% (size * memuse / (1024 ** 3)))
if real_max_memuse and verbose:
print()
print(" ... expected peak memory use: {peak:.1f}G"
.format(peak=size * memuse / (1024 ** 3)))
watchdog = _MemoryWatchdog()
watchdog.start()
else:
watchdog = None
try:
return f(self, maxsize)
finally:
if watchdog:
watchdog.stop()
wrapper.size = size
wrapper.memuse = memuse
return wrapper
return decorator
def bigaddrspacetest(f):
"""Decorator for tests that fill the address space."""
def wrapper(self):
if max_memuse < MAX_Py_ssize_t:
if MAX_Py_ssize_t >= 2**63 - 1 and max_memuse >= 2**31:
raise unittest.SkipTest(
"not enough memory: try a 32-bit build instead")
else:
raise unittest.SkipTest(
"not enough memory: %.1fG minimum needed"
% (MAX_Py_ssize_t / (1024 ** 3)))
else:
return f(self)
return wrapper
#=======================================================================
# unittest integration.
class BasicTestRunner:
def run(self, test):
result = unittest.TestResult()
test(result)
return result
def _id(obj):
return obj
def requires_resource(resource):
if resource == 'gui' and not _is_gui_available():
return unittest.skip(_is_gui_available.reason)
if is_resource_enabled(resource):
return _id
else:
return unittest.skip("resource {0!r} is not enabled".format(resource))
def cpython_only(test):
"""
Decorator for tests only applicable on CPython.
"""
return impl_detail(cpython=True)(test)
def impl_detail(msg=None, **guards):
if check_impl_detail(**guards):
return _id
if msg is None:
guardnames, default = _parse_guards(guards)
if default:
msg = "implementation detail not available on {0}"
else:
msg = "implementation detail specific to {0}"
guardnames = sorted(guardnames.keys())
msg = msg.format(' or '.join(guardnames))
return unittest.skip(msg)
def _parse_guards(guards):
# Returns a tuple ({platform_name: run_me}, default_value)
if not guards:
return ({'cpython': True}, False)
is_true = list(guards.values())[0]
assert list(guards.values()) == [is_true] * len(guards) # all True or all False
return (guards, not is_true)
# Use the following check to guard CPython's implementation-specific tests --
# or to run them only on the implementation(s) guarded by the arguments.
def check_impl_detail(**guards):
"""This function returns True or False depending on the host platform.
Examples:
if check_impl_detail(): # only on CPython (default)
if check_impl_detail(jython=True): # only on Jython
if check_impl_detail(cpython=False): # everywhere except on CPython
"""
guards, default = _parse_guards(guards)
return guards.get(platform.python_implementation().lower(), default)
def no_tracing(func):
"""Decorator to temporarily turn off tracing for the duration of a test."""
if not hasattr(sys, 'gettrace'):
return func
else:
@functools.wraps(func)
def wrapper(*args, **kwargs):
original_trace = sys.gettrace()
try:
sys.settrace(None)
return func(*args, **kwargs)
finally:
sys.settrace(original_trace)
return wrapper
def refcount_test(test):
"""Decorator for tests which involve reference counting.
To start, the decorator does not run the test if is not run by CPython.
After that, any trace function is unset during the test to prevent
unexpected refcounts caused by the trace function.
"""
return no_tracing(cpython_only(test))
def _filter_suite(suite, pred):
"""Recursively filter test cases in a suite based on a predicate."""
newtests = []
for test in suite._tests:
if isinstance(test, unittest.TestSuite):
_filter_suite(test, pred)
newtests.append(test)
else:
if pred(test):
newtests.append(test)
suite._tests = newtests
def _run_suite(suite):
"""Run tests from a unittest.TestSuite-derived class."""
if verbose:
runner = unittest.TextTestRunner(sys.stdout, verbosity=2,
failfast=failfast)
else:
runner = BasicTestRunner()
result = runner.run(suite)
if not result.wasSuccessful():
if len(result.errors) == 1 and not result.failures:
err = result.errors[0][1]
elif len(result.failures) == 1 and not result.errors:
err = result.failures[0][1]
else:
err = "multiple errors occurred"
if not verbose: err += "; run in verbose mode for details"
raise TestFailed(err)
def run_unittest(*classes):
"""Run tests from unittest.TestCase-derived classes."""
valid_types = (unittest.TestSuite, unittest.TestCase)
suite = unittest.TestSuite()
for cls in classes:
if isinstance(cls, str):
if cls in sys.modules:
suite.addTest(unittest.findTestCases(sys.modules[cls]))
else:
raise ValueError("str arguments must be keys in sys.modules")
elif isinstance(cls, valid_types):
suite.addTest(cls)
else:
suite.addTest(unittest.makeSuite(cls))
def case_pred(test):
if match_tests is None:
return True
for name in test.id().split("."):
if fnmatch.fnmatchcase(name, match_tests):
return True
return False
_filter_suite(suite, case_pred)
_run_suite(suite)
#=======================================================================
# Check for the presence of docstrings.
# Rather than trying to enumerate all the cases where docstrings may be
# disabled, we just check for that directly
def _check_docstrings():
"""Just used to check if docstrings are enabled"""
MISSING_C_DOCSTRINGS = (check_impl_detail() and
sys.platform != 'win32' and
not sysconfig.get_config_var('WITH_DOC_STRINGS'))
HAVE_DOCSTRINGS = (_check_docstrings.__doc__ is not None and
not MISSING_C_DOCSTRINGS)
requires_docstrings = unittest.skipUnless(HAVE_DOCSTRINGS,
"test requires docstrings")
#=======================================================================
# doctest driver.
def run_doctest(module, verbosity=None, optionflags=0):
"""Run doctest on the given module. Return (#failures, #tests).
If optional argument verbosity is not specified (or is None), pass
support's belief about verbosity on to doctest. Else doctest's
usual behavior is used (it searches sys.argv for -v).
"""
import doctest
if verbosity is None:
verbosity = verbose
else:
verbosity = None
f, t = doctest.testmod(module, verbose=verbosity, optionflags=optionflags)
if f:
raise TestFailed("%d of %d doctests failed" % (f, t))
if verbose:
print('doctest (%s) ... %d tests with zero failures' %
(module.__name__, t))
return f, t
#=======================================================================
# Support for saving and restoring the imported modules.
def modules_setup():
return sys.modules.copy(),
def modules_cleanup(oldmodules):
# Encoders/decoders are registered permanently within the internal
# codec cache. If we destroy the corresponding modules their
# globals will be set to None which will trip up the cached functions.
encodings = [(k, v) for k, v in sys.modules.items()
if k.startswith('encodings.')]
sys.modules.clear()
sys.modules.update(encodings)
# XXX: This kind of problem can affect more than just encodings. In particular
# extension modules (such as _ssl) don't cope with reloading properly.
# Really, test modules should be cleaning out the test specific modules they
# know they added (ala test_runpy) rather than relying on this function (as
# test_importhooks and test_pkg do currently).
# Implicitly imported *real* modules should be left alone (see issue 10556).
sys.modules.update(oldmodules)
#=======================================================================
# Threading support to prevent reporting refleaks when running regrtest.py -R
# NOTE: we use thread._count() rather than threading.enumerate() (or the
# moral equivalent thereof) because a threading.Thread object is still alive
# until its __bootstrap() method has returned, even after it has been
# unregistered from the threading module.
# thread._count(), on the other hand, only gets decremented *after* the
# __bootstrap() method has returned, which gives us reliable reference counts
# at the end of a test run.
def threading_setup():
if _thread:
return _thread._count(), threading._dangling.copy()
else:
return 1, ()
def threading_cleanup(*original_values):
if not _thread:
return
_MAX_COUNT = 100
for count in range(_MAX_COUNT):
values = _thread._count(), threading._dangling
if values == original_values:
break
time.sleep(0.01)
gc_collect()
# XXX print a warning in case of failure?
def reap_threads(func):
"""Use this function when threads are being used. This will
ensure that the threads are cleaned up even when the test fails.
If threading is unavailable this function does nothing.
"""
if not _thread:
return func
@functools.wraps(func)
def decorator(*args):
key = threading_setup()
try:
return func(*args)
finally:
threading_cleanup(*key)
return decorator
def reap_children():
"""Use this function at the end of test_main() whenever sub-processes
are started. This will help ensure that no extra children (zombies)
stick around to hog resources and create problems when looking
for refleaks.
"""
# Reap all our dead child processes so we don't leave zombies around.
# These hog resources and might be causing some of the buildbots to die.
if hasattr(os, 'waitpid'):
any_process = -1
while True:
try:
# This will raise an exception on Windows. That's ok.
pid, status = os.waitpid(any_process, os.WNOHANG)
if pid == 0:
break
except:
break
@contextlib.contextmanager
def start_threads(threads, unlock=None):
threads = list(threads)
started = []
try:
try:
for t in threads:
t.start()
started.append(t)
except:
if verbose:
print("Can't start %d threads, only %d threads started" %
(len(threads), len(started)))
raise
yield
finally:
try:
if unlock:
unlock()
endtime = starttime = time.time()
for timeout in range(1, 16):
endtime += 60
for t in started:
t.join(max(endtime - time.time(), 0.01))
started = [t for t in started if t.isAlive()]
if not started:
break
if verbose:
print('Unable to join %d threads during a period of '
'%d minutes' % (len(started), timeout))
finally:
started = [t for t in started if t.isAlive()]
if started:
faulthandler.dump_traceback(sys.stdout)
raise AssertionError('Unable to join %d threads' % len(started))
@contextlib.contextmanager
def swap_attr(obj, attr, new_val):
"""Temporary swap out an attribute with a new object.
Usage:
with swap_attr(obj, "attr", 5):
...
This will set obj.attr to 5 for the duration of the with: block,
restoring the old value at the end of the block. If `attr` doesn't
exist on `obj`, it will be created and then deleted at the end of the
block.
"""
if hasattr(obj, attr):
real_val = getattr(obj, attr)
setattr(obj, attr, new_val)
try:
yield
finally:
setattr(obj, attr, real_val)
else:
setattr(obj, attr, new_val)
try:
yield
finally:
delattr(obj, attr)
@contextlib.contextmanager
def swap_item(obj, item, new_val):
"""Temporary swap out an item with a new object.
Usage:
with swap_item(obj, "item", 5):
...
This will set obj["item"] to 5 for the duration of the with: block,
restoring the old value at the end of the block. If `item` doesn't
exist on `obj`, it will be created and then deleted at the end of the
block.
"""
if item in obj:
real_val = obj[item]
obj[item] = new_val
try:
yield
finally:
obj[item] = real_val
else:
obj[item] = new_val
try:
yield
finally:
del obj[item]
def strip_python_stderr(stderr):
"""Strip the stderr of a Python process from potential debug output
emitted by the interpreter.
This will typically be run on the result of the communicate() method
of a subprocess.Popen object.
"""
stderr = re.sub(br"\[\d+ refs, \d+ blocks\]\r?\n?", b"", stderr).strip()
return stderr
def args_from_interpreter_flags():
"""Return a list of command-line arguments reproducing the current
settings in sys.flags and sys.warnoptions."""
return subprocess._args_from_interpreter_flags()
def optim_args_from_interpreter_flags():
"""Return a list of command-line arguments reproducing the current
optimization settings in sys.flags."""
return subprocess._optim_args_from_interpreter_flags()
#============================================================
# Support for assertions about logging.
#============================================================
class TestHandler(logging.handlers.BufferingHandler):
def __init__(self, matcher):
# BufferingHandler takes a "capacity" argument
# so as to know when to flush. As we're overriding
# shouldFlush anyway, we can set a capacity of zero.
# You can call flush() manually to clear out the
# buffer.
logging.handlers.BufferingHandler.__init__(self, 0)
self.matcher = matcher
def shouldFlush(self):
return False
def emit(self, record):
self.format(record)
self.buffer.append(record.__dict__)
def matches(self, **kwargs):
"""
Look for a saved dict whose keys/values match the supplied arguments.
"""
result = False
for d in self.buffer:
if self.matcher.matches(d, **kwargs):
result = True
break
return result
class Matcher(object):
_partial_matches = ('msg', 'message')
def matches(self, d, **kwargs):
"""
Try to match a single dict with the supplied arguments.
Keys whose values are strings and which are in self._partial_matches
will be checked for partial (i.e. substring) matches. You can extend
this scheme to (for example) do regular expression matching, etc.
"""
result = True
for k in kwargs:
v = kwargs[k]
dv = d.get(k)
if not self.match_value(k, dv, v):
result = False
break
return result
def match_value(self, k, dv, v):
"""
Try to match a single stored value (dv) with a supplied value (v).
"""
if type(v) != type(dv):
result = False
elif type(dv) is not str or k not in self._partial_matches:
result = (v == dv)
else:
result = dv.find(v) >= 0
return result
_can_symlink = None
def can_symlink():
global _can_symlink
if _can_symlink is not None:
return _can_symlink
symlink_path = TESTFN + "can_symlink"
try:
os.symlink(TESTFN, symlink_path)
can = True
except (OSError, NotImplementedError, AttributeError):
can = False
else:
os.remove(symlink_path)
_can_symlink = can
return can
def skip_unless_symlink(test):
"""Skip decorator for tests that require functional symlink"""
ok = can_symlink()
msg = "Requires functional symlink implementation"
return test if ok else unittest.skip(msg)(test)
_can_xattr = None
def can_xattr():
global _can_xattr
if _can_xattr is not None:
return _can_xattr
if not hasattr(os, "setxattr"):
can = False
else:
tmp_fp, tmp_name = tempfile.mkstemp()
try:
with open(TESTFN, "wb") as fp:
try:
# TESTFN & tempfile may use different file systems with
# different capabilities
os.setxattr(tmp_fp, b"user.test", b"")
os.setxattr(fp.fileno(), b"user.test", b"")
# Kernels < 2.6.39 don't respect setxattr flags.
kernel_version = platform.release()
m = re.match("2.6.(\d{1,2})", kernel_version)
can = m is None or int(m.group(1)) >= 39
except OSError:
can = False
finally:
unlink(TESTFN)
unlink(tmp_name)
_can_xattr = can
return can
def skip_unless_xattr(test):
"""Skip decorator for tests that require functional extended attributes"""
ok = can_xattr()
msg = "no non-broken extended attribute support"
return test if ok else unittest.skip(msg)(test)
def fs_is_case_insensitive(directory):
"""Detects if the file system for the specified directory is case-insensitive."""
with tempfile.NamedTemporaryFile(dir=directory) as base:
base_path = base.name
case_path = base_path.upper()
if case_path == base_path:
case_path = base_path.lower()
try:
return os.path.samefile(base_path, case_path)
except FileNotFoundError:
return False
def detect_api_mismatch(ref_api, other_api, *, ignore=()):
"""Returns the set of items in ref_api not in other_api, except for a
defined list of items to be ignored in this check.
By default this skips private attributes beginning with '_' but
includes all magic methods, i.e. those starting and ending in '__'.
"""
missing_items = set(dir(ref_api)) - set(dir(other_api))
if ignore:
missing_items -= set(ignore)
missing_items = set(m for m in missing_items
if not m.startswith('_') or m.endswith('__'))
return missing_items
def check__all__(test_case, module, name_of_module=None, extra=(),
blacklist=()):
"""Assert that the __all__ variable of 'module' contains all public names.
The module's public names (its API) are detected automatically based on
whether they match the public name convention and were defined in
'module'.
The 'name_of_module' argument can specify (as a string or tuple thereof)
what module(s) an API could be defined in in order to be detected as a
public API. One case for this is when 'module' imports part of its public
API from other modules, possibly a C backend (like 'csv' and its '_csv').
The 'extra' argument can be a set of names that wouldn't otherwise be
automatically detected as "public", like objects without a proper
'__module__' attriubute. If provided, it will be added to the
automatically detected ones.
The 'blacklist' argument can be a set of names that must not be treated
as part of the public API even though their names indicate otherwise.
Usage:
import bar
import foo
import unittest
from test import support
class MiscTestCase(unittest.TestCase):
def test__all__(self):
support.check__all__(self, foo)
class OtherTestCase(unittest.TestCase):
def test__all__(self):
extra = {'BAR_CONST', 'FOO_CONST'}
blacklist = {'baz'} # Undocumented name.
# bar imports part of its API from _bar.
support.check__all__(self, bar, ('bar', '_bar'),
extra=extra, blacklist=blacklist)
"""
if name_of_module is None:
name_of_module = (module.__name__, )
elif isinstance(name_of_module, str):
name_of_module = (name_of_module, )
expected = set(extra)
for name in dir(module):
if name.startswith('_') or name in blacklist:
continue
obj = getattr(module, name)
if (getattr(obj, '__module__', None) in name_of_module or
(not hasattr(obj, '__module__') and
not isinstance(obj, types.ModuleType))):
expected.add(name)
test_case.assertCountEqual(module.__all__, expected)
class SuppressCrashReport:
"""Try to prevent a crash report from popping up.
On Windows, don't display the Windows Error Reporting dialog. On UNIX,
disable the creation of coredump file.
"""
old_value = None
old_modes = None
def __enter__(self):
"""On Windows, disable Windows Error Reporting dialogs using
SetErrorMode.
On UNIX, try to save the previous core file size limit, then set
soft limit to 0.
"""
if sys.platform.startswith('win'):
# see http://msdn.microsoft.com/en-us/library/windows/desktop/ms680621.aspx
# GetErrorMode is not available on Windows XP and Windows Server 2003,
# but SetErrorMode returns the previous value, so we can use that
import ctypes
self._k32 = ctypes.windll.kernel32
SEM_NOGPFAULTERRORBOX = 0x02
self.old_value = self._k32.SetErrorMode(SEM_NOGPFAULTERRORBOX)
self._k32.SetErrorMode(self.old_value | SEM_NOGPFAULTERRORBOX)
# Suppress assert dialogs in debug builds
# (see http://bugs.python.org/issue23314)
try:
import msvcrt
msvcrt.CrtSetReportMode
except (AttributeError, ImportError):
# no msvcrt or a release build
pass
else:
self.old_modes = {}
for report_type in [msvcrt.CRT_WARN,
msvcrt.CRT_ERROR,
msvcrt.CRT_ASSERT]:
old_mode = msvcrt.CrtSetReportMode(report_type,
msvcrt.CRTDBG_MODE_FILE)
old_file = msvcrt.CrtSetReportFile(report_type,
msvcrt.CRTDBG_FILE_STDERR)
self.old_modes[report_type] = old_mode, old_file
else:
if resource is not None:
try:
self.old_value = resource.getrlimit(resource.RLIMIT_CORE)
resource.setrlimit(resource.RLIMIT_CORE,
(0, self.old_value[1]))
except (ValueError, OSError):
pass
if sys.platform == 'darwin':
# Check if the 'Crash Reporter' on OSX was configured
# in 'Developer' mode and warn that it will get triggered
# when it is.
#
# This assumes that this context manager is used in tests
# that might trigger the next manager.
value = subprocess.Popen(['/usr/bin/defaults', 'read',
'com.apple.CrashReporter', 'DialogType'],
stdout=subprocess.PIPE).communicate()[0]
if value.strip() == b'developer':
print("this test triggers the Crash Reporter, "
"that is intentional", end='', flush=True)
return self
def __exit__(self, *ignore_exc):
"""Restore Windows ErrorMode or core file behavior to initial value."""
if self.old_value is None:
return
if sys.platform.startswith('win'):
self._k32.SetErrorMode(self.old_value)
if self.old_modes:
import msvcrt
for report_type, (old_mode, old_file) in self.old_modes.items():
msvcrt.CrtSetReportMode(report_type, old_mode)
msvcrt.CrtSetReportFile(report_type, old_file)
else:
if resource is not None:
try:
resource.setrlimit(resource.RLIMIT_CORE, self.old_value)
except (ValueError, OSError):
pass
def patch(test_instance, object_to_patch, attr_name, new_value):
"""Override 'object_to_patch'.'attr_name' with 'new_value'.
Also, add a cleanup procedure to 'test_instance' to restore
'object_to_patch' value for 'attr_name'.
The 'attr_name' should be a valid attribute for 'object_to_patch'.
"""
# check that 'attr_name' is a real attribute for 'object_to_patch'
# will raise AttributeError if it does not exist
getattr(object_to_patch, attr_name)
# keep a copy of the old value
attr_is_local = False
try:
old_value = object_to_patch.__dict__[attr_name]
except (AttributeError, KeyError):
old_value = getattr(object_to_patch, attr_name, None)
else:
attr_is_local = True
# restore the value when the test is done
def cleanup():
if attr_is_local:
setattr(object_to_patch, attr_name, old_value)
else:
delattr(object_to_patch, attr_name)
test_instance.addCleanup(cleanup)
# actually override the attribute
setattr(object_to_patch, attr_name, new_value)
def run_in_subinterp(code):
"""
Run code in a subinterpreter. Raise unittest.SkipTest if the tracemalloc
module is enabled.
"""
# Issue #10915, #15751: PyGILState_*() functions don't work with
# sub-interpreters, the tracemalloc module uses these functions internally
try:
import tracemalloc
except ImportError:
pass
else:
if tracemalloc.is_tracing():
raise unittest.SkipTest("run_in_subinterp() cannot be used "
"if tracemalloc module is tracing "
"memory allocations")
import _testcapi
return _testcapi.run_in_subinterp(code)
def check_free_after_iterating(test, iter, cls, args=()):
class A(cls):
def __del__(self):
nonlocal done
done = True
try:
next(it)
except StopIteration:
pass
done = False
it = iter(A(*args))
# Issue 26494: Shouldn't crash
test.assertRaises(StopIteration, next, it)
# The sequence should be deallocated just after the end of iterating
gc_collect()
test.assertTrue(done)
|
the-stack_106_29782 | import sys
import csv
import copy
import numpy as np
import pandas as pd
from lib.visualize import TrainHistoryPlot
def from_dataframe(dataframe):
#only return numpy array
seq = []
for i in range(len(dataframe)):
seq.append(dataframe.iloc[i])
return np.asarray(seq)
def grab_FThis(dataframe_list, sys_argv, mode):
his = []
his_label = []
for i in range(len(dataframe_list)):
if i == 0 and mode == 'train':
his.append(from_dataframe(dataframe_list[i].iloc[:, 0]))
his.append(from_dataframe(dataframe_list[i].iloc[:, 1]))
elif i == 0 and mode == 'test':
his.append(from_dataframe(dataframe_list[i].iloc[:, 0]))
his.append(from_dataframe(dataframe_list[i].iloc[:, 2]))
elif i != 0 and mode == 'train':
if (from_dataframe(dataframe_list[i].iloc[:, 0]) == his[0]).all():
his.append(from_dataframe(dataframe_list[i].iloc[:, 1]))
else:
raise RuntimeError('Please check the file or all trained by same training setting.')
elif i != 0 and mode == 'test':
if (from_dataframe(dataframe_list[i].iloc[:, 0]) == his[0]).all():
his.append(from_dataframe(dataframe_list[i].iloc[:, 2]))
else:
raise RuntimeError('Please check the file or all trained by same training setting.')
else:
raise RuntimeError('Please check the args of python command line.')
his_label.append(sys_argv[i].replace('.csv', ''))
return his, his_label
if __name__ == '__main__':
if len(sys.argv) < 3:
print('Usage: python3 plot_his_FT.py [image name] [csv file 1] [csv file 2] ...')
exit(0)
dataframe_list = []
args = []
for i in range(2, len(sys.argv)):
dataframe_list.append(pd.read_csv(sys.argv[i]))
args.append(sys.argv[i])
his, his_label = grab_FThis(dataframe_list, args, 'train')
TrainHistoryPlot(his, his_label, sys.argv[1] + '_train_loss', 'FT_train_loss', ['iter', 'loss'])
his, his_label = grab_FThis(dataframe_list, args, 'test')
TrainHistoryPlot(his, his_label, sys.argv[1] + '_test_loss', 'FT_test_loss', ['iter', 'loss'])
print('All process done.')
|
the-stack_106_29783 | import os.path
from data.base_dataset import BaseDataset
from data.image_folder import make_dataset
class CelebaDataset(BaseDataset):
def get_paths(self, opt, phase="train"):
root = opt.dataroot
assert phase == "train", "Only training data is available for this dataset"
seg_dir = os.path.join(root, 'CelebAMask-HQ-mask')
seg_paths_all = make_dataset(seg_dir, recursive=True)
seg_paths = [p for p in seg_paths_all if p.endswith('.png')]
img_dir = os.path.join(root, 'CelebA-HQ-img')
img_paths_all = make_dataset(img_dir, recursive=True)
img_paths = [p for p in img_paths_all if p.endswith('.jpg')]
return seg_paths, None, img_paths
def paths_match(self, path1, path2):
name1 = os.path.basename(path1).split(".")[0]
name2 = os.path.basename(path2).split(".")[0]
return name1 == name2
|
the-stack_106_29784 | #!/usr/bin/env python3
# Copyright (c) 2012-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Extract _("...") strings for translation and convert to Qt stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import operator
import os
import sys
OUT_CPP="qt/helleniccoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = sys.argv[1:]
# xgettext -n --keyword=_ $FILES
XGETTEXT=os.getenv('XGETTEXT', 'xgettext')
if not XGETTEXT:
print('Cannot extract strings: xgettext utility is not installed or not configured.',file=sys.stderr)
print('Please install package "gettext" and re-run \'./configure\'.',file=sys.stderr)
sys.exit(1)
child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out.decode('utf-8'))
f = open(OUT_CPP, 'w', encoding="utf8")
f.write("""
#include <QtGlobal>
// Automatically generated by extract_strings_qt.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *helleniccoin_strings[] = {\n')
f.write('QT_TRANSLATE_NOOP("helleniccoin-core", "%s"),\n' % (os.getenv('PACKAGE_NAME'),))
f.write('QT_TRANSLATE_NOOP("helleniccoin-core", "%s"),\n' % (os.getenv('COPYRIGHT_HOLDERS'),))
if os.getenv('COPYRIGHT_HOLDERS_SUBSTITUTION') != os.getenv('PACKAGE_NAME'):
f.write('QT_TRANSLATE_NOOP("helleniccoin-core", "%s"),\n' % (os.getenv('COPYRIGHT_HOLDERS_SUBSTITUTION'),))
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("helleniccoin-core", %s),\n' % ('\n'.join(msgid)))
f.write('};\n')
f.close()
|
the-stack_106_29786 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from heat.common.i18n import _
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine.resources import server_base
from heat.engine import support
cfg.CONF.import_opt('default_software_config_transport', 'heat.common.config')
cfg.CONF.import_opt('default_user_data_format', 'heat.common.config')
cfg.CONF.import_opt('max_server_name_length', 'heat.common.config')
LOG = logging.getLogger(__name__)
class DeployedServer(server_base.BaseServer):
"""A resource for managing servers that are already deployed.
A DeployedServer resource manages resources for servers that have been
deployed externally from OpenStack. These servers can be associated with
SoftwareDeployments for further orchestration via Heat.
"""
PROPERTIES = (
NAME, METADATA, SOFTWARE_CONFIG_TRANSPORT,
DEPLOYMENT_SWIFT_DATA
) = (
'name', 'metadata', 'software_config_transport',
'deployment_swift_data'
)
_SOFTWARE_CONFIG_TRANSPORTS = (
POLL_SERVER_CFN, POLL_SERVER_HEAT, POLL_TEMP_URL, ZAQAR_MESSAGE
) = (
'POLL_SERVER_CFN', 'POLL_SERVER_HEAT', 'POLL_TEMP_URL', 'ZAQAR_MESSAGE'
)
_DEPLOYMENT_SWIFT_DATA_KEYS = (
CONTAINER, OBJECT
) = (
'container', 'object',
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('Server name.'),
update_allowed=True
),
METADATA: properties.Schema(
properties.Schema.MAP,
_('Arbitrary key/value metadata to store for this server. Both '
'keys and values must be 255 characters or less. Non-string '
'values will be serialized to JSON (and the serialized '
'string must be 255 characters or less).'),
update_allowed=True,
support_status=support.SupportStatus(
status=support.DEPRECATED,
message='This property will be ignored',
version='9.0.0',
previous_status=support.SupportStatus(
status=support.SUPPORTED,
version='8.0.0'
)
)
),
SOFTWARE_CONFIG_TRANSPORT: properties.Schema(
properties.Schema.STRING,
_('How the server should receive the metadata required for '
'software configuration. POLL_SERVER_CFN will allow calls to '
'the cfn API action DescribeStackResource authenticated with '
'the provided keypair. POLL_SERVER_HEAT will allow calls to '
'the Heat API resource-show using the provided keystone '
'credentials. POLL_TEMP_URL will create and populate a '
'Swift TempURL with metadata for polling. ZAQAR_MESSAGE will '
'create a dedicated zaqar queue and post the metadata '
'for polling.'),
default=cfg.CONF.default_software_config_transport,
update_allowed=True,
constraints=[
constraints.AllowedValues(_SOFTWARE_CONFIG_TRANSPORTS),
]
),
DEPLOYMENT_SWIFT_DATA: properties.Schema(
properties.Schema.MAP,
_('Swift container and object to use for storing deployment data '
'for the server resource. The parameter is a map value '
'with the keys "container" and "object", and the values '
'are the corresponding container and object names. The '
'software_config_transport parameter must be set to '
'POLL_TEMP_URL for swift to be used. If not specified, '
'and software_config_transport is set to POLL_TEMP_URL, a '
'container will be automatically created from the resource '
'name, and the object name will be a generated uuid.'),
support_status=support.SupportStatus(version='9.0.0'),
default={},
update_allowed=True,
schema={
CONTAINER: properties.Schema(
properties.Schema.STRING,
_('Name of the container.'),
constraints=[
constraints.Length(min=1)
]
),
OBJECT: properties.Schema(
properties.Schema.STRING,
_('Name of the object.'),
constraints=[
constraints.Length(min=1)
]
)
}
)
}
ATTRIBUTES = (
NAME_ATTR, OS_COLLECT_CONFIG
) = (
'name', 'os_collect_config'
)
attributes_schema = {
NAME_ATTR: attributes.Schema(
_('Name of the server.'),
type=attributes.Schema.STRING
),
OS_COLLECT_CONFIG: attributes.Schema(
_('The os-collect-config configuration for the server\'s local '
'agent to be configured to connect to Heat to retrieve '
'deployment data.'),
type=attributes.Schema.MAP,
support_status=support.SupportStatus(version='9.0.0'),
cache_mode=attributes.Schema.CACHE_NONE
),
}
def __init__(self, name, json_snippet, stack):
super(DeployedServer, self).__init__(name, json_snippet, stack)
self._register_access_key()
def handle_create(self):
metadata = self.metadata_get(True) or {}
self.resource_id_set(self.uuid)
self._create_transport_credentials(self.properties)
self._populate_deployments_metadata(metadata, self.properties)
return self.resource_id
def _delete(self):
self._delete_queue()
self._delete_user()
self._delete_temp_url()
def resource_mapping():
return {
'OS::Heat::DeployedServer': DeployedServer,
}
|
the-stack_106_29787 | # -*- coding: utf-8 -*-
"""Model unit tests."""
import datetime as dt
import pytest
from blockflix.store.models import Staff, Country, City, Address, Actor, Category,\
Customer, Film, Language, Payment, \
Rental, Store, Role
from .factories import StaffFactory, CountryFactory, CityFactory, AddressFactory, \
ActorFactory, CategoryFactory, CustomerFactory, FilmFactory, \
LanguageFactory, PaymentFactory, RentalFactory, StoreFactory
@pytest.mark.usefixtures('db')
class TestStaff:
"""Staff tests."""
def test_get_by_id(self):
"""Get user by ID."""
user = Staff('foo', '[email protected]', first_name="Test", last_name="Test")
user.save()
retrieved = Staff.get_by_id(user.id)
assert retrieved == user
def test_created_at_defaults_to_datetime(self):
"""Test creation date."""
user = Staff(username='foo', email='[email protected]', first_name="Test", last_name="Test")
user.save()
assert bool(user.created_at)
assert isinstance(user.created_at, dt.datetime)
def test_password_is_nullable(self):
"""Test null password."""
user = Staff(username='foo', email='[email protected]', first_name="Test", last_name="Test")
user.save()
assert user.password is None
def test_factory(self, db):
"""Test user factory."""
user = StaffFactory(password='myprecious')
db.session.commit()
assert bool(user.username)
assert bool(user.email)
assert bool(user.created_at)
assert user.is_admin is False
assert user.active is True
assert user.check_password('myprecious')
def test_check_password(self):
"""Check password."""
user = Staff.create(username='foo', email='[email protected]',
password='foobarbaz123', first_name="Test", last_name="Test")
assert user.check_password('foobarbaz123') is True
assert user.check_password('barfoobaz') is False
def test_full_name(self):
"""Staff full name."""
user = StaffFactory(first_name='Foo', last_name='Bar')
assert user.full_name == 'Foo Bar'
def test_roles(self):
"""Add a role to a user."""
role = Role(name='admin')
role.save()
user = StaffFactory()
user.roles.append(role)
user.save()
assert role in user.roles
def test_address(self):
"""Add a role to a user."""
address = AddressFactory(address="123 Main St")
address.save()
user = StaffFactory()
user.address = address
user.save()
assert address == user.address
def test_relationships(self):
store = StoreFactory()
store.save()
address = AddressFactory()
address.save()
staff = Staff(username='foo', email='[email protected]', first_name="Test", last_name="Test")
staff.store = store
staff.address = address
staff.save()
assert store == staff.store
assert address == staff.address
assert [staff] == store.staff
assert [staff] == address.staff
@pytest.mark.usefixtures('db')
class TestCountry:
"""Country tests."""
def test_get_by_id(self):
"""Get country by ID."""
country = Country(country="USA")
country.save()
retrieved = Country.get_by_id(country.id)
assert retrieved == country
def test_factory(self, db):
"""Test country factory."""
country = CountryFactory()
db.session.commit()
assert bool(country.country)
@pytest.mark.usefixtures('db')
class TestCity:
"""City tests."""
def test_relationships(self):
country = CountryFactory()
country.save()
city = City(city="New York")
city.country = country
city.save()
assert country == city.country
def test_factory(self, db):
"""Test country factory."""
city = CityFactory()
db.session.commit()
assert bool(city.city)
assert bool(city.country)
@pytest.mark.usefixtures('db')
class TestAddress:
"""Address tests."""
def test_relationships(self):
city = CityFactory()
city.save()
address = Address(address="123 Main St", postal_code="12345")
address.city = city
address.save()
assert city == address.city
def test_factory(self, db):
"""Test country factory."""
address = AddressFactory()
db.session.commit()
assert bool(address.address)
assert bool(address.city)
assert bool(address.postal_code)
@pytest.mark.usefixtures('db')
class TestStore:
"""Store tests."""
def test_relationships(self):
staff = StaffFactory()
staff.save()
address = AddressFactory()
address.save()
store = Store()
store.address = address
store.manager = staff
store.save()
assert staff == store.manager
assert address == store.address
assert store in address.stores
assert store in staff.managing_stores
def test_factory(self, db):
store = StoreFactory()
db.session.commit()
assert type(store.manager) == Staff
assert type(store.address) == Address
@pytest.mark.usefixtures('db')
class TestLanguage:
def test_relationships(self):
language = Language(name="Test")
language.save()
film = FilmFactory(language=language)
film.save()
assert [film] == language.films
def test_factory(self, db):
language = LanguageFactory()
db.session.commit()
assert bool(language.name)
@pytest.mark.usefixtures('db')
class TestActor:
def test_relationships(self):
actor = Actor(first_name="Test", last_name="Test")
actor.save()
film = FilmFactory(actors=[actor])
film.save()
assert [film] == actor.films
def test_factory(self, db):
actor = ActorFactory()
db.session.commit()
assert bool(actor.first_name)
assert bool(actor.last_name)
@pytest.mark.usefixtures('db')
class TestCategory:
def test_relationships(self):
category = Category(name="Test")
category.save()
film = FilmFactory(categories=[category])
film.save()
assert [film] == category.films
def test_factory(self, db):
category = CategoryFactory()
db.session.commit()
assert bool(category.name)
@pytest.mark.usefixtures('db')
class TestCustomer:
"""Customer tests."""
def test_relationships(self):
store = StoreFactory()
store.save()
address = AddressFactory()
address.save()
customer = Customer(first_name="Test", last_name="Test",
email="[email protected]", active=True)
customer.address = address
customer.store = store
customer.save()
assert store == customer.store
assert address == customer.address
assert customer in store.customers
assert customer in address.customers
def test_factory(self, db):
customer = CustomerFactory()
db.session.commit()
assert type(customer.store) == Store
assert type(customer.address) == Address
@pytest.mark.usefixtures('db')
class TestFilm:
"""Film tests."""
def test_relationships(self):
language = LanguageFactory()
language.save()
category = CategoryFactory()
category.save()
actor = ActorFactory()
actor.save()
film = Film(title="Test", description="Test",
release_year=2000, rental_duration=7, rental_rate=4.99,
replacement_cost=49.99, length=120)
film.language = language
film.categories = [category]
film.actors = [actor]
film.save()
assert language == film.language
assert category in film.categories
assert actor in film.actors
assert film in language.films
assert film in category.films
assert film in actor.films
def test_factory(self, db):
film = FilmFactory()
db.session.commit()
assert type(film.language) == Language
@pytest.mark.usefixtures('db')
class TestRental:
def test_relationships(self):
store = StoreFactory()
store.save()
staff = StaffFactory()
staff.save()
inventory = FilmFactory()
inventory.save()
store = StoreFactory()
store.save()
customer = CustomerFactory()
customer.save()
rental = Rental(inventory=inventory, customer=customer, staff=staff)
rental.save()
assert staff == rental.staff
assert inventory == rental.inventory
assert customer == rental.customer
assert [rental] == staff.rentals
assert [rental] == inventory.rentals
assert [rental] == customer.rentals
def test_factory(self, db):
rental = RentalFactory()
db.session.commit()
assert type(rental.customer) == Customer
assert type(rental.inventory) == Film
assert type(rental.staff) == Staff
@pytest.mark.usefixtures('db')
class TestPayment:
def test_relationships(self):
staff = StaffFactory()
staff.save()
rental = RentalFactory()
rental.save()
customer = CustomerFactory()
customer.save()
payment = Payment(amount=5.99, customer=customer, rental=rental, staff=staff)
assert staff == payment.staff
assert rental == payment.rental
assert customer == payment.customer
assert [payment] == staff.payments
assert [payment] == rental.payments
assert [payment] == customer.payments
def test_factory(self, db):
payment = PaymentFactory()
db.session.commit()
assert type(payment.customer) == Customer
assert type(payment.rental) == Rental
assert type(payment.staff) == Staff
|
the-stack_106_29790 | import os
from setuptools import setup, find_packages
ROOT = os.path.realpath(os.path.join(os.path.dirname(__file__)))
about = {}
with open(os.path.join(ROOT, "awseipext", "__about__.py")) as f:
exec (f.read(), about)
setup(
name=about["__title__"],
version=about["__version__"],
author=about["__author__"],
author_email=about["__email__"],
url=about["__uri__"],
description=about["__summary__"],
license=about["__license__"],
packages=find_packages(exclude=["test*"]),
install_requires=[
'kmsauth>=0.1.5,<0.2.0',
'marshmallow>=2.9.0,<3.0.0'
],
extras_require={
'tests': [
'coverage==4.1',
'flake8==2.6.2',
'mccabe==0.5.0',
'mock==1.0.1',
'pep8==1.7.0',
'py==1.4.31',
'pyflakes==1.2.3',
'pytest==2.9.2'
]
},
entry_points={
"console_scripts": [
"awseipext = awseipext.client:main"
]
}
)
|
the-stack_106_29791 |
from fusedwind.interface import base, implement_base
def configure_planform(cls, file_base, planform_nC=6, spline_type='pchip'):
"""
method that adds a ``SplinedBladePlanform`` instance to the assembly
Parameters
----------
cls: class instance
Instance of an OpenMDAO Assembly that the analysis is run from
filebase:
planform_nC: int
number of spline control points for the planform variables
"""
from fusedwind.turbine.geometry import SplinedBladePlanform, read_blade_planform
cls.add('pf_splines', SplinedBladePlanform())
cls.driver.workflow.add('pf_splines')
cls.pf_splines.nC = planform_nC
cls.pf_splines.pfIn = read_blade_planform(file_base)
cls.pf_splines.configure_splines(spline_type=spline_type)
cls.create_passthrough('pf_splines.blade_length')
cls.create_passthrough('pf_splines.span_ni')
def configure_bladesurface(cls, file_base, planform_nC=6, spline_type='pchip'):
"""
method that adds a ``LoftedBladeSurface`` instance to the assembly
Parameters
----------
cls: class instance
Instance of an OpenMDAO Assembly that the analysis is run from
planform_nC: int
number of spline control points for the planform variables
"""
from fusedwind.turbine.geometry import LoftedBladeSurface
if not hasattr(cls, 'pf_splines'):
configure_planform(cls, file_base, planform_nC, spline_type=spline_type)
cls.add('blade_surface', LoftedBladeSurface())
cls.driver.workflow.add('blade_surface')
cls.connect('pf_splines.pfOut', 'blade_surface.pf')
cls.connect('span_ni', 'blade_surface.span_ni')
def configure_bladestructure(cls, file_base, structure_nC=8, spline_type='pchip'):
"""
method for configuring an assembly with
blade geometry and structural parameterization
of a blade.
Parameters
----------
cls: class instance
Instance of an OpenMDAO Assembly that the analysis is run from
file_base: str
path + filebase to the blade structure files, e.g. data/DTU10MW
planform_nC: int
number of spline control points for the planform variables
structure_nC: int
number of spline control points for the structural variables
"""
from fusedwind.turbine.blade_structure import BladeStructureReader, \
BladeStructureWriter, \
SplinedBladeStructure, \
BladeStructureCSBuilder
cls.add('st_reader', BladeStructureReader())
cls.add('st_splines', SplinedBladeStructure())
cls.add('st_builder', BladeStructureCSBuilder())
cls.add('st_writer', BladeStructureWriter())
cls.driver.workflow.add(['st_splines',
'st_builder',
'st_writer'])
cls.connect('blade_surface.pf', 'st_splines.pfIn')
cls.connect('blade_length', 'st_builder.blade_length')
# connect the blade structure vartrees through the chain
# of components
cls.connect('st_splines.st3dOut', 'st_builder.st3d')
cls.connect('st_splines.st3dOut', 'st_writer.st3d')
# connect the stacked blade surface to the st_builder component
cls.connect('blade_surface.surfnorot', 'st_builder.surface')
cls.st_reader.filebase = file_base
cls.st_reader.execute()
cls.st_splines.st3dIn = cls.st_reader.st3d.copy()
cls.st_splines.nC = structure_nC
cls.st_splines.configure_bladestructure(spline_type=spline_type)
|
the-stack_106_29792 |
from __future__ import absolute_import
import sys
import inspect
from collections import OrderedDict
import attr
is_py3 = sys.version_info.major >= 3
inspect_iscoroutinefunction = getattr(
inspect, 'iscoroutinefunction', lambda f: False)
class temporal_property(object):
'''Assiginable property'''
def __init__(self, getter):
self.getter = getter
def __get__(self, obj, cls):
return self.getter(obj)
@attr.s
class Argument(object):
_prefix = ''
callable = attr.ib()
position = attr.ib()
index = attr.ib()
@property
def varname(self):
return self.callable.code.co_varnames[self.index]
@property
def has_default(self):
raise NotImplementedError # pragma: no cover
@property
def has_annotation(self):
return self.varname in self.callable.annotations
@property
def annotation(self):
return self.callable.annotations[self.varname]
class PositionalArgument(Argument):
@property
def has_default(self):
boundary = self.callable.code.co_argcount - len(self.callable.defaults)
return boundary <= self.index
@property
def default(self):
boundary = self.callable.code.co_argcount - len(self.callable.defaults)
def_idx = self.index - boundary
return self.callable.defaults[def_idx]
class KeywordOnlyArgument(Argument):
@property
def has_default(self):
return self.varname in self.callable.kwdefaults
@property
def default(self):
return self.callable.kwdefaults[self.varname]
class StarArgument(PositionalArgument):
_prefix = '*'
@property
def has_default(self):
return False
class DoubleStarArgument(PositionalArgument):
_prefix = '**'
@property
def has_default(self):
return False
class ArgumentList(list):
@temporal_property
def _table(self):
self._table = {arg.varname: arg for arg in self}
return self._table
def get(self, varname):
return self._table[varname]
def keys(self):
return self._table.keys()
def __getitem__(self, item):
if isinstance(item, str):
return self.get(item)
return super(ArgumentList, self).__getitem__(item)
class Callable(object):
def __init__(self, f):
self.callable = f
self.is_wrapped_coroutine = getattr(f, '_is_coroutine', None)
self.is_coroutine = self.is_wrapped_coroutine or \
inspect_iscoroutinefunction(f)
self.decompose()
def decompose(self):
self.positional_arguments = ArgumentList()
self.keyword_only_arguments = ArgumentList()
self.star_argument = None
self.double_star_argument = None
code = self.code
var_idx = 0
for var_idx in range(code.co_argcount):
argument = PositionalArgument(self, var_idx, var_idx)
self.positional_arguments.append(argument)
has_sarg = code.co_flags & 4
pos_sarg = var_idx
has_ssarg = code.co_flags & 8
kwonlyargcount = getattr(code, 'co_kwonlyargcount', 0)
for var_idx in range(var_idx + 1, var_idx + 1 + kwonlyargcount):
argument = KeywordOnlyArgument(
self, bool(has_sarg) + var_idx, var_idx)
self.keyword_only_arguments.append(argument)
if has_sarg:
var_idx += 1
self.star_argument = StarArgument(self, pos_sarg, var_idx)
if has_ssarg:
var_idx += 1
self.double_star_argument = DoubleStarArgument(
self, var_idx, var_idx)
def iter_arguments(self):
for arg in self.positional_arguments:
yield arg
if self.star_argument:
yield self.star_argument
for arg in self.keyword_only_arguments:
yield arg
if self.double_star_argument:
yield self.double_star_argument
@temporal_property
def arguments(self):
arguments = ArgumentList(self.iter_arguments())
self.arguments = arguments
return arguments
@temporal_property
def code(self):
'''REAL __code__ for the given callable'''
c = self.callable
if self.is_wrapped_coroutine:
code = c.__wrapped__.__code__
else:
code = c.__code__
self.code = code
return code
@property
def defaults(self):
if self.is_wrapped_coroutine:
c = self.callable.__wrapped__
else:
c = self.callable
return getattr(c, '__defaults__', None) or ()
@property
def kwdefaults(self):
return getattr(self.callable, '__kwdefaults__', None) or {}
@property
def annotations(self):
return getattr(self.callable, '__annotations__', None) or {}
def kwargify(self, args, kwargs):
kwargs = kwargs.copy()
code = self.code
if len(args) > code.co_argcount and not self.star_argument:
raise TypeError(
'{}() takes {} positional arguments but {} were given'.format(
code.co_name, code.co_argcount, len(args)))
merged = OrderedDict(
(self.arguments[i].varname, arg)
for i, arg in enumerate(args[:len(self.positional_arguments)]))
i = len(merged)
while i < len(self.positional_arguments):
argument = self.positional_arguments[i]
if argument.varname in kwargs:
merged[argument.varname] = kwargs.pop(argument.varname)
elif argument.has_default:
merged[argument.varname] = argument.default
else:
missing_count = len(self.positional_arguments) - i
raise TypeError(
"{}() missing {} required positional argument: '{}'".format(
code.co_name, missing_count, ', '.join(
'{arg.varname}'.format(arg=arg)
for arg in self.positional_arguments[i:i + missing_count])))
i += 1
if self.star_argument:
merged[self.star_argument.varname] = args[i:]
unhandled_kws = []
for kw, arg in kwargs.items():
if kw in merged:
raise TypeError(
"{}() got multiple values for argument '{}'".format(
code.co_name, kw))
elif kw in self.arguments.keys():
merged[kw] = arg
else:
unhandled_kws.append(kw)
for argument in self.keyword_only_arguments:
if argument.varname in merged:
continue
if not argument.has_default:
raise TypeError(
"{}() missing 1 required keyword-only argument: '{}'".format(
code.co_name, argument.varname))
merged[argument.varname] = argument.default
if self.double_star_argument:
merged[self.double_star_argument.varname] = {
kw: kwargs[kw] for kw in unhandled_kws}
return merged
|
the-stack_106_29793 | # Copyright 2020 DeepMind Technologies Limited
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
MNIST example with Haiku and JAXopt.
====================================
"""
from absl import app
from absl import flags
import haiku as hk
import jax
import jax.numpy as jnp
from jaxopt import loss
from jaxopt import OptaxSolver
from jaxopt import PolyakSGD
from jaxopt import tree_util
import optax
import tensorflow_datasets as tfds
flags.DEFINE_float("l2reg", 1e-4, "L2 regularization.")
flags.DEFINE_float("learning_rate", 0.001, "Learning rate (used in adam).")
flags.DEFINE_bool("manual_loop", False, "Whether to use a manual training loop.")
flags.DEFINE_integer("maxiter", 100, "Maximum number of iterations.")
flags.DEFINE_float("max_step_size", 0.1, "Maximum step size (used in polyak-sgd).")
flags.DEFINE_float("momentum", 0.9, "Momentum strength (used in adam, polyak-sgd).")
flags.DEFINE_enum("solver", "adam", ["adam", "sgd", "polyak-sgd"], "Solver to use.")
FLAGS = flags.FLAGS
def load_dataset(split, *, is_training, batch_size):
"""Loads the dataset as a generator of batches."""
ds = tfds.load("mnist:3.*.*", split=split).cache().repeat()
if is_training:
ds = ds.shuffle(10 * batch_size, seed=0)
ds = ds.batch(batch_size)
return iter(tfds.as_numpy(ds))
def net_fun(batch):
"""Standard LeNet-300-100 MLP network."""
x = batch["image"].astype(jnp.float32) / 255.
mlp = hk.Sequential([
hk.Flatten(),
hk.Linear(300), jax.nn.relu,
hk.Linear(100), jax.nn.relu,
hk.Linear(10),
])
return mlp(x)
net = hk.without_apply_rng(hk.transform(net_fun))
@jax.jit
def accuracy(params, data):
predictions = net.apply(params, data)
return jnp.mean(jnp.argmax(predictions, axis=-1) == data["label"])
logistic_loss = jax.vmap(loss.multiclass_logistic_loss)
def loss_fun(params, l2reg, data):
"""Compute the loss of the network."""
logits = net.apply(params, data)
labels = data["label"]
sqnorm = tree_util.tree_l2_norm(params, squared=True)
loss_value = jnp.mean(logistic_loss(labels, logits))
return loss_value + 0.5 * l2reg * sqnorm
def main(argv):
del argv
train_ds = load_dataset("train", is_training=True, batch_size=1000)
test_ds = load_dataset("test", is_training=False, batch_size=10000)
def pre_update(params, state, *args, **kwargs):
if state.iter_num % 10 == 0:
# Periodically evaluate classification accuracy on test set.
test_accuracy = accuracy(params, next(test_ds))
test_accuracy = jax.device_get(test_accuracy)
print(f"[Step {state.iter_num}] Test accuracy: {test_accuracy:.3f}.")
return params, state
# Initialize solver and parameters.
if FLAGS.solver == "adam":
# Equilent to:
# opt = optax.chain(optax.scale_by_adam(b1=0.9, b2=0.999, eps=1e-8),
# optax.scale(-FLAGS.learning_rate))
opt = optax.adam(FLAGS.learning_rate)
solver = OptaxSolver(opt=opt, fun=loss_fun, maxiter=FLAGS.maxiter,
pre_update=pre_update)
elif FLAGS.solver == "sgd":
opt = optax.sgd(FLAGS.learning_rate, FLAGS.momentum)
solver = OptaxSolver(opt=opt, fun=loss_fun, maxiter=FLAGS.maxiter,
pre_update=pre_update)
elif FLAGS.solver == "polyak-sgd":
solver = PolyakSGD(fun=loss_fun, maxiter=FLAGS.maxiter,
momentum=FLAGS.momentum,
max_step_size=FLAGS.max_step_size, pre_update=pre_update)
else:
raise ValueError("Unknown solver: %s" % FLAGS.solver)
init_params = net.init(jax.random.PRNGKey(42), next(train_ds))
# Run training loop.
# In JAXopt, stochastic solvers can be run either using a manual for loop or
# using `run_iterator`. We include both here for demonstration purpose.
if FLAGS.manual_loop:
params, state = solver.init(init_params)
for _ in range(FLAGS.maxiter):
params, state = pre_update(params=params, state=state)
params, state = solver.update(params=params, state=state,
l2reg=FLAGS.l2reg,
data=next(train_ds))
else:
solver.run_iterator(init_params=init_params,
iterator=train_ds,
l2reg=FLAGS.l2reg)
if __name__ == "__main__":
app.run(main)
|
the-stack_106_29796 | """
Run this file for Problem 1
"""
import torch
import torch.nn as nn
from torch.utils.data import TensorDataset
import logging
import cv2
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt; plt.style.use('seaborn-darkgrid')
from copy import deepcopy
from pusher_goal import PusherEnv
from pusher_mod import PusherEnvModified
from bc_model import BC_Model
from ppo_model import PPO_Model
from a_c import ActorCritic
logger = logging.basicConfig(level=logging.INFO)
num_pushes_in_vid = 10
vid_path = './results/p1/p1_video_behavioralcloning.mp4'
num_episodes_to_evaluate_on = 100
batch_size = 128
num_epochs = 70
learning_rate = 2e-4
act_layer_one = 128
act_layer_two = 64
crit_layer_one = 64
crit_layer_two = 32
def main():
# Load data
expert_data = np.load("./expert.npz")
expert_data = TensorDataset(torch.tensor(expert_data["obs"]), torch.tensor(expert_data["action"]))
# Instantiate the environment (had to modify it slightly from the form given to make for easier recording later)
environment = PusherEnvModified()
policy = ActorCritic(state_space_dimension=environment.state_space_dimension,
action_space_dimension=environment.action_space_dimension,
actor_hidden_layer_units=(act_layer_one, act_layer_two),
critic_hidden_layer_units=(crit_layer_one, crit_layer_two), actor_std=4e-2,
activation=nn.Tanh)
# Use the policy from above to instantiate our behavioral cloning model
bc_model = BC_Model(policy=deepcopy(policy), batch_size=batch_size, num_epochs=num_epochs, learning_rate=learning_rate)
# Train model and save resulting policy parameters
bc_model.train(expert_data=expert_data)
bc_model.policy.save(path="./results/p1/bc_model_params.pt")
pd.DataFrame(bc_model.training_loss_list, columns=["train_loss"]).to_csv("./results/p1/bc_train_loss.csv")
pd.DataFrame(bc_model.avg_loss_list, columns=["avg_train_loss"]).to_csv("./results/p1/bc_avg_train_loss.csv")
# Plot training loss
plt.plot(bc_model.training_loss_list, label="Training loss")
plt.title("Loss as a Function of Time")
plt.xlabel("# of batches")
plt.legend()
plt.savefig("./results/p1/bc_train_loss_chart.png")
plt.close()
# Plot avg. training loss
plt.plot(bc_model.avg_loss_list, label="Average training loss per epoch")
plt.title("Avg. Loss as a Function of Time")
plt.xlabel("# of epochs")
plt.legend()
plt.savefig("./results/p1/bc_avg_train_loss_chart.png")
plt.close()
# Now use the policy from the post-training behavioral cloning model, and compare the results
produced_model = PPO_Model(environment=environment, policy=deepcopy(bc_model.policy), n_steps_per_trajectory=64)
# For comparison, we evaluate the learned policy on 100 episodes
ltwo_dist_list = []
trajectories_list = []
for i in range(num_episodes_to_evaluate_on):
_, actions, _, _ = produced_model.generate_trajectory(use_argmax=True, perform_reset=False)
trajectories_list.append(actions)
state = produced_model.environment.simulator.get_obs()
ltwo_dist_list.append(np.linalg.norm(state[3:6] - state[6:9]))
produced_model.environment.reset()
pd.DataFrame({"mean_L2_distance": np.mean(ltwo_dist_list),
"standard_L2dist": np.std(ltwo_dist_list) / np.sqrt(len(ltwo_dist_list))},
index=["BC"]).to_csv("./results/p1/bc_l2distance.csv")
# Using the trajectories generated above,
# make video showing evaluation of policy on 10 episodes
env_for_vid = PusherEnv(render=True)
env_for_vid.render()
vid_output = cv2.VideoWriter(vid_path, cv2.VideoWriter_fourcc(*'mp4v'), 30, (640, 480))
for given_trajectory in trajectories_list[:num_pushes_in_vid]:
for action in given_trajectory:
# apply action and record into video
env_for_vid.apply_action(action)
scene_image = env_for_vid.robot.cam.get_images(get_rgb=True, get_depth=False)[0]
vid_output.write(np.array(scene_image))
# Reset video environment after a given push
env_for_vid.reset()
if __name__ == "__main__":
main() |
the-stack_106_29797 | __author__ = ["Nurendra Choudhary <[email protected]>","Anoop Kunchukuttan <[email protected]>"]
__license__ = "GPLv3"
""" Transliterate texts between unicode and standard transliteration schemes.
Transliterate texts between non-latin scripts and commonly-used latin
transliteration schemes. Uses standard Unicode character blocks --
e.g. DEVANAGARI U+0900 ... U+097F -- and transliteration schemes --
e.g. the IAST convention for transliteration of Sanskrit to latin-with-dots.
The following character blocks and transliteration schemes are included:
DEVANAGARI
IAST
ITRANS -- http://www.aczoom.com/itrans/#itransencoding (Sanskrit only)
Harvard Kyoto
CYRILLIC
ISO 9:1995 (Russian only)
New character blocks and transliteration schemes can be added by creating
new CharacterBlock and TransliterationScheme objects.
USAGE
--------
Transliterate a text:
>>> import transliterator
>>> transliterator.transliterate('yogazcittavRttinirodhaH', 'harvardkyoto',
... 'devanagari', {'outputASCIIEncoded' : True})
'योगश्चित्तवृत्तिनिरोधः'
Create a new CharacterBlock and TransliterationScheme:
>>> import transliterator
>>> cb = transliterator.CharacterBlock('NEWBLOCK', range(0x901, 0x9FF))
>>> scheme = transliterator.TransliterationScheme(cb.name, 'NEWSCHEME',
... {'ab': 0x901, 'cd': 0x902})
>>> transliterator.transliterate('abcd', scheme, cb, {'outputASCIIEncoded' : True})
'ँं'
COPYRIGHT AND DISCLAIMER
------------------------------------
Transliterator is:
version 0.1 software - use at your own risk.
The IAST, ITRANS and Harvard-Kyoto transliteration schemes have been
tested for classical Sanskrit, not for any other language.
The Cyrillic alphabet and ISO 9:1995 transliteration (for Russian only)
are included but have been even more lightly tested than Devanagari.
Copyright (c) 2005 by Alan Little
By obtaining, using, and/or copying this software and/or its
associated documentation, you agree that you have read, understood,
and will comply with the following terms and conditions:
Permission to use, copy, modify, and distribute this software and
its associated documentation for any purpose and without fee is
hereby granted, provided that the above copyright notice appears in
all copies, and that both that copyright notice and this permission
notice appear in supporting documentation, and that the name of
the author not be used in advertising or publicity pertaining to
distribution of the software without specific, written prior permission.
THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, INDIRECT OR
CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
""" TO DO
cyrillic: GOST & something ASCII-only & sensible
punctuation &/ numerals:
HK
check Itrans punctuation
implicit conversion to unicode if no to format specified
URLs for the input text
Bugs
is there a problem with implicit A before visarga?
"""
__version__ = '2.0'
import unicodedata
#from sets import Set
import sys
try:
unicode = unicode
except NameError:
# 'unicode' is undefined, must be Python 3
str = str
unicode = str
bytes = bytes
basestring = (str,bytes)
else:
# 'unicode' exists, must be Python 2
str = str
unicode = unicode
bytes = str
basestring = basestring
characterBlocks = {}
_names = {}
# handle unrecognised characters
UNRECOGNISED_FAIL = 0
UNRECOGNISED_ECHO = 1
UNRECOGNISED_SUBSTITUTE = 2
# default options
options = {}
def resetOptions():
""" Reset options to their default values. """
global options
defaultOptions = {
'inputEncoding' : 'utf-8', # default input encoding for strings
'outputEncoding' : 'utf-8', # default output encoding
'substituteChar' : '?', # use to substitute unrecognised characters
'handleUnrecognised' : UNRECOGNISED_FAIL, # unrecognised characters:
# fail, echo or substitute
'outputASCIIEncoded' : False, # HTML-encoded ASCII output?
}
options = defaultOptions.copy()
resetOptions()
def _unrecognised(achr):
""" Handle unrecognised characters. """
if options['handleUnrecognised'] == UNRECOGNISED_ECHO:
return achr
elif options['handleUnrecognised'] == UNRECOGNISED_SUBSTITUTE:
return options['substituteChar']
else:
raise KeyError(achr)
def py23char(x):
return chr(x)
class TLCharacter (object):
""" Class representing a Unicode character with its equivalents.
Public attributes:
unicodeHexValue -- the numeric value of the Unicode code point.
unichr -- the character value of the Unicode code point.
name -- the name of the Unicode code point.
equivalents -- a dict containing the character's equivalents in
various transliteration schemes, in the format:
{'Scheme A': 'A', 'Scheme B': 'aah', }
where keys are TransliterationScheme names,
values are transliterated equivalents of the
character.
"""
def __init__(self, unicodeHexValue, block):
""" Set up a unicode character.
Arguments:
unicodeHexValue -- an integer that should correspond to a
Unicode code point.
block -- the CharacterBlock this character belongs to.
Raises:
ValueError -- if unicodeHexValue is not a valid code point.
"""
if unicodeHexValue < 0 or unicodeHexValue > 0x10FFFF:
raise ValueError("numeric value outside Unicode range")
self.unicodeHexValue = unicodeHexValue
""" Use name check to filter out unused characters.
unicodedata.name() raises ValueError for these
"""
self.unichr = py23char(self.unicodeHexValue)
self.name = unicodedata.name(self.unichr)
self.equivalents = {}
self._block = block
def addEquivalent(self, equivName, equivalent):
""" Add an equivalent for the character.
Arguments:
equivName -- the name of a TransliterationScheme
equivalent -- string/unicode equivalent in the named
TransliterationScheme for this code point.
"""
self.equivalents[equivName] = equivalent
class CharacterBlock(dict):
""" Dictionary-like representation of a set of unicode characters.
For our purposes, a character block corresponds to an alphabet/script
that we want to be able to transliterate to or from, e.g. Cyrillic,
Devanagari.
Keys are unicode characters.
Values are TLCharacter instances.
"""
def __init__(self, name, charRange, charClass=TLCharacter):
""" Set up a character block corresponding to a range of code points.
Keyword arguments:
name -- a string containing the name of the character block.
(should normally use a standard Unicode character block name)
range -- a list of code points. Reserved code points are ignored.
charClass -- the class to be used to create the characters.
Should be a subclass of TLCharacter.
"""
""" Ensure that any character sequence dependencies will be ok.
e.g. set up Devanagari standalone vowels before dependents.
"""
charRange.sort()
for c in charRange:
try:
tlchar = charClass(c, self)
self[tlchar.unichr] = tlchar
except ValueError: # Unicode reserved code points.
# not an error
pass
self._longestEntry = 1
self.name = name
self.transliterationSchemes = {}
self._register()
def _register(self):
characterBlocks[self.name] = self
_names[self.name.upper()] = self
def _transliterate (self, text, outFormat):
""" Transliterate the text to the target transliteration scheme."""
result = []
for c in text:
if c.isspace(): result.append(c)
try:
result.append(self[c].equivalents[outFormat.name])
except KeyError:
result.append(_unrecognised(c))
return result
def _preprocess(self, text):
""" Make our signature compatible with TransliterationScheme. """
return text
def _getNextChar(self, text, startPos):
return text[startPos]
class TransliterationScheme(dict):
""" Dictionary-like representation of a transliteration scheme.
e.g. the Harvard-Kyoto, IAST or ITRANS schemes for
transliterating Devanagari to or from the latin alphabet.
Keys are unicode strings representing the letter-equivalents used
in the transliteration scheme.
Values are TLCharacter instances.
"""
def __init__(self, blockName, schemeName, data, swapTable=None):
""" Set up a transliteration scheme.
Keyword arguments:
blockName -- a string containg the name of the character block this
transliteration scheme is used for,
e.g. 'CYRILLIC', 'DEVANAGARI'.
schemeName -- the name of the transliteration scheme.
Must be unique.
data -- a dict containing the data for the transliteration scheme.
Keys are transliterated Unicode characters or strings.
Values are integers corresponding to Unicode code points.
For examples, see the data for the built-in transliteration
schemes.
swapTable -- a dict (default None) containing any non-standard
letter combinations used in the transliteration scheme
that we want to pre-process away before transliterating.
See the ITRANS data for examples.
Raises:
KeyError: unknown block name.
TypeError: swapTable is not a dict
"""
self.block = characterBlocks[blockName]
self.name = schemeName
for equiv, unicodeHexValue in data.items():
self[equiv] = self.block[py23char(unicodeHexValue)]
self[equiv].addEquivalent(self.name, equiv)
self._longestEntry = max([len(e) for e in list(data.keys())])
if self._longestEntry > 1:
self._parseTree = {}
self._parsedata = list(data.keys())
self._parsedata.sort()
self._setupParseTree(0, len(data) - 1, 0, self._parseTree)
if swapTable is not None:
if not isinstance(swapTable, dict): raise TypeError
self.swapTable = swapTable
self._register()
def _register(self):
self.block.transliterationSchemes[self.name] = self
_names[self.name.upper()] = self
def _setupParseTree(self, rowFrom, rowTo, colIndex, tree):
""" Build the search tree for multi-character encodings.
"""
if colIndex == self._longestEntry:
return
prevchar = None
rowIndex = rowFrom
while rowIndex <= rowTo:
if colIndex < len(self._parsedata[rowIndex]):
c = self._parsedata[rowIndex][colIndex]
if c != prevchar:
tree[c] = {}
if prevchar is not None:
self._setupParseTree(rowFrom, rowIndex - 1, colIndex + 1, tree[prevchar])
rowFrom = rowIndex
prevchar = c
if rowIndex == rowTo:
self._setupParseTree(rowFrom, rowIndex, colIndex + 1, tree[prevchar])
rowIndex = rowIndex + 1
def __getNextChar(self, text, startPos, tree):
i = startPos
matched = text[i]
if i < len(text) - 1:
try:
if text[i + 1] in tree[text[i]]:
matched = matched + self.__getNextChar(text, i + 1, tree[text[i]])
except KeyError:
# Ignore. The lookup for the equivalent character later on will handle it.
pass
return matched
def _getNextChar(self, text, startPos):
if self._longestEntry > 1 and not text[startPos].isspace():
return self.__getNextChar(text, startPos, self._parseTree)
else:
return text[startPos]
def _preprocess(self, text):
if self.swapTable:
for c in self.swapTable:
if isinstance(text,bytes):
text = text.decode()
text = text.replace(c, self.swapTable[c])
return text
def _transliterate (self, text, outFormat):
""" Transliterate the text to Unicode."""
result = []
text = self._preprocess(text)
i = 0
while i < len(text):
if text[i].isspace():
result.append(text[i])
i = i+1
else:
chr = self._getNextChar(text, i)
try:
result.append(self[chr].unichr)
except KeyError:
result.append(_unrecognised(chr))
i = i + len(chr)
return result
def transliterate(text, inFormat, outFormat, requestOptions={}):
""" Transliterate a text.
Keyword arguments:
text -- a unicode string containing the text to be transliterated
inFormat -- the "from" CharacterBlock or TransliterationScheme, or its name
outFormat -- the target CharacterBlock or TransliterationScheme, or its name
requestOptions -- optional dict containing option settings that override the
defaults for this request.
Returns a unicode object containing the text transliterated into the
target character set.
Raises:
ValueError -- unrecognised input or output format.
KeyError -- a character in text is not a member of inFormat, or has no
corresponding character defined in outFormat.
"""
def asciiEncode(chr):
value = ord(chr)
if value > 255:
return '&#x%x;' % (value)
return chr
try:
options.update(requestOptions)
""" Ensure we have the correct encoding for input text. """
if isinstance(text, str):
try:
text = text.decode(options['inputEncoding'])
except:
pass
""" Look up input & output format names. """
def findFormat(fmt):
if isinstance(fmt, basestring):
try:
fmt = _names[fmt.upper()]
except KeyError:
raise ValueError('unrecognised format ' + fmt)
return fmt
inFormat = findFormat(inFormat)
outFormat = findFormat(outFormat)
""" Perform sanity checks. """
if not isinstance(text, basestring):
raise TypeError("The text must be a string or a unicode object")
def getBlock(format):
if isinstance(format, CharacterBlock):
return format
else:
return format.block
inBlock = getBlock(inFormat)
outBlock = getBlock(outFormat)
if not inBlock is outBlock:
raise ValueError("incompatible input and output formats")
if inFormat is outFormat:
# They're trying to trick us. Just do a quick sanity check & bounce it back.
if inFormat._longestEntry == 1:
[inFormat[c] for c in set(text) if not c.isspace()]
# -> KeyError for extraneous chars.
return text
""" At last we're happy. Do it. """
result = inFormat._transliterate(text, outFormat)
if options['outputASCIIEncoded']:
result = [asciiEncode(c) for c in result]
return u''.join(result).encode(options['outputEncoding'])
finally:
resetOptions()
""" DEVANAGARI PROCESSING
Specialised classes & functions to handle Devanagari.
"""
class DevanagariCharacter (TLCharacter):
""" Special processing for Devanagari characters.
"""
"""
Devanaagarii characters need to know if they are vowels or not.
Unicode Data doesn't help with this - category 'Mn' is not unique to dependent vowels
- so need to hard code the ranges
"""
_vowelOffset = 0x93E - 0x906
_depVowelRange = list(range(0x93E, 0x94D)) + [0x962,0x963]
_vowelRange = list(range(0x904, 0x915)) + [0x960,0x961]
_VIRAMA = py23char(0x94D)
_LETTER_A = py23char(0x905)
""" Unicode calls agravaha a letter. Not for our purposes:
we need to not treat it as one for handling virama & implicit 'a'
"""
_AGRAVAHA = 0x93D
_OM = 0x950
def __init__(self, unicodeHexValue, block):
""" Create an object representing a Devanagari character.
Extends TLCharacter.__init__ to distinguish Devanagari standalone
vowels, dependent vowels and consonants.
Raises
ValueError -- for characters in the Devanagari dependent vowel range.
We want these as variants of the corresponding standalone
vowels, not as separate characters.
"""
TLCharacter.__init__(self, unicodeHexValue, block)
self.isVowel = False
if unicodeHexValue in DevanagariCharacter._vowelRange:
self.isVowel = True
self._dependentVowel = None
if unicodeHexValue==0x960:
## dependency vowel sign for vocalic RR is set only when processing the vowel, since the maatra precedes the vowel in the Unicode chart
self._setDependentVowel(0x944)
if unicodeHexValue in DevanagariCharacter._depVowelRange:
vowel=None
if unicodeHexValue == 0x962:
vowel=block[py23char(0x90C)]
elif unicodeHexValue == 0x963:
vowel=block[py23char(0x961)]
elif unicodeHexValue == 0x944:
## dependency vowel sign for vocalic RR is set only when processing the vowel, since the maatra precedes the vowel in the Unicode chart
## That step's cpde is above, with documentation
pass
else:
vowel=block[py23char(unicodeHexValue - DevanagariCharacter._vowelOffset)]
if vowel is not None:
# The check condition is for 0x944, processing deferred for later
vowel._setDependentVowel(unicodeHexValue)
raise ValueError # don't create dependent vowels as separate instances
#TLCharacter.__init__(self, unicodeHexValue, block)
self.isConsonant = False
if self.isVowel == False \
and self.unichr.isalpha() \
and self.unicodeHexValue not in (DevanagariCharacter._AGRAVAHA,
DevanagariCharacter._OM):
self.isConsonant = True
def _setDependentVowel(self, unicodeHexValue):
if unicodeHexValue is not None:
if not self.isVowel: raise ValueError
self._dependentVowel = py23char(unicodeHexValue)
self._block[py23char(unicodeHexValue)] = self
class _Devanagari(object):
""" Holder class for the Devanagari transliteration algorithm. """
def _transliterate(self, text, outFormat):
""" Transliterate a devanagari text into the target format.
Transliterating a character to or from Devanagari is not a simple
lookup: it depends on the preceding and following characters.
"""
def getResult():
if curMatch.isspace():
result.append(curMatch)
return
if prevMatch in self:
prev = self[prevMatch]
else:
prev = None
if nextMatch in self:
next = self[nextMatch]
else:
next = None
try:
equiv = outFormat._equivalent(self[curMatch],
prev, #self.get(prevMatch, None),
next, #self.get(nextMatch, None),
self._implicitA)
except KeyError:
equiv = _unrecognised(curMatch)
for e in equiv:
result.append(e)
def incr(c):
if self._longestEntry == 1:
return 1
return len(c)
result = []
try:
text = text.decode()
except:
pass
text = self._preprocess(text)
i = 0
prevMatch = None
nextMatch = None
curMatch = self._getNextChar(text, i)
i = i + len(curMatch)
while i < len(text):
nextMatch = self._getNextChar(text, i)
getResult()
i = i + len(nextMatch)
prevMatch = curMatch
curMatch = nextMatch
nextMatch = None
getResult()
return result
class DevanagariCharacterBlock(CharacterBlock, _Devanagari):
""" Class representing the Devanagari Unicode character block.
"""
def __init__(self, name, charRange):
""" Set up the Devanagari character block.
Extends CharacterBlock.__init__ by specifiying that the characters
created should be instances of DevanagariCharacter.
"""
CharacterBlock.__init__(self, name, charRange, DevanagariCharacter)
self._implicitA = True # generate implicit As when transliterating
# *FROM* this scheme
def _transliterate(self, text, outFormat):
""" Need to specify which superclass _transliterate() to call. """
return _Devanagari._transliterate(self, text, outFormat)
def _equivalent(self, char, prev, next, implicitA):
""" Transliterate a Latin character equivalent to Devanagari.
Add VIRAMA for ligatures.
Convert standalone to dependent vowels.
"""
result = []
if char.isVowel == False:
result.append(char.unichr)
if char.isConsonant \
and ((next is not None and next.isConsonant) \
or next is None):
result.append(DevanagariCharacter._VIRAMA)
else:
if prev is None or prev.isConsonant == False:
result.append(char.unichr)
else:
if char._dependentVowel is not None:
result.append(char._dependentVowel)
return result
class DevanagariTransliterationScheme(TransliterationScheme, _Devanagari):
""" Class representing a Devanagari transliteration scheme. """
def __init__(self, blockName, schemeName, data, swapTable=None):
""" Set up a Devanagari transliteration scheme.
Extends TransliterationScheme.__init__
"""
TransliterationScheme.__init__\
(self, blockName, schemeName, data, swapTable)
self._implicitA = False # generate implicit As when transliterating
# *FROM* this scheme
def _transliterate(self, text, outFormat):
""" Need to specify which superclass _transliterate() to call. """
return _Devanagari._transliterate(self, text, outFormat)
def _equivalent(self, char, prev, next, implicitA):
""" Transliterate a Devanagari character to Latin.
Add implicit As unless overridden by VIRAMA.
"""
result = []
if char.unichr != DevanagariCharacter._VIRAMA:
result.append(char.equivalents[self.name])
""" Append implicit A to consonants if the next character isn't a vowel. """
if implicitA and char.isConsonant \
and ((next is not None \
and next.unichr != DevanagariCharacter._VIRAMA \
and not next.isVowel) \
or next is None):
result.append(characterBlocks['DEVANAGARI']\
[DevanagariCharacter._LETTER_A].equivalents[self.name])
return result
""" DEVANAGARI DATA
set up the Devanagari character set with three commonly used transliteration
schemes:
ITRANS
Harvard Kyoto
IAST
"""
DevanagariCharacterBlock('DEVANAGARI', list(range(0x900, 0x97F)))
HARVARDKYOTO = { \
'M': 0x902,
'H': 0x903,
'a': 0x905,
'A': 0x906,
'i': 0x907,
'I': 0x908,
'u': 0x909,
'U': 0x90A,
'R': 0x90B,
'lR': 0x90C,
'e': 0x90F,
'ai': 0x910,
'o': 0x913,
'au': 0x914,
'k': 0x915,
'kh': 0x916,
'g': 0x917,
'gh': 0x918,
'G': 0x919,
'c': 0x91A,
'ch': 0x91B,
'j': 0x91C,
'jh': 0x91D,
'J': 0x91E,
'T': 0x91F,
'Th': 0x920,
'D': 0x921,
'Dh': 0x922,
'N': 0x923,
't': 0x924,
'th': 0x925,
'd': 0x926,
'dh': 0x927,
'n': 0x928,
'p': 0x92A,
'ph': 0x92B,
'b': 0x92C,
'bh': 0x92D,
'm': 0x92E,
'y': 0x92F,
'r': 0x930,
'l': 0x932,
'v': 0x935,
'z': 0x936,
'S': 0x937,
's': 0x938,
'h': 0x939,
"'": 0x93D,
'oM': 0x950,
}
DevanagariTransliterationScheme('DEVANAGARI','HARVARDKYOTO', HARVARDKYOTO)
ITRANS = { \
'R': 0x931, # added by anoop # extension
'M': 0x902,
'.n': 0x902,
'.m': 0x902,
'H': 0x903,
'a': 0x905,
'A': 0x906,
'aa': 0x906,
'i': 0x907,
'I': 0x908,
'ii': 0x908,
'u': 0x909,
'U': 0x90A,
'uu': 0x90A,
'RRi': 0x90B,
'R^i': 0x90B,
'RRI': 0x960, # added by Anoop # extension
'R^I': 0x960,# added by Anoop # extension
'LLi': 0x90C,
'L^i': 0x90C,
'LLI': 0x961,# added by Anoop # extension
'L^I': 0x961,# added by Anoop # extension
'.e': 0x90E, # added by Anoop # extension
'e': 0x90F,
'ai': 0x910,
'.o': 0x912, # added by Anoop # extension
'o': 0x913,
'au': 0x914,
'k': 0x915,
'kh': 0x916,
'g': 0x917,
'gh': 0x918,
'~N': 0x919,
'c': 0x91A,
'ch': 0x91A,
'Ch': 0x91B,
'j': 0x91C,
'jh': 0x91D,
'~n': 0x91E,
'T': 0x91F,
'Th': 0x920,
'D': 0x921,
'Dh': 0x922,
'N': 0x923,
't': 0x924,
'z': 0x936,
'th': 0x925,
'd': 0x926,
'dh': 0x927,
'n': 0x928,
'p': 0x92A,
'ph': 0x92B,
'b': 0x92C,
'bh': 0x92D,
'm': 0x92E,
'y': 0x92F,
'r': 0x930,
'l': 0x932,
'L': 0x933, # added by anoop
'ld': 0x933, # added by anoop
'zh': 0x934, # added by anoop # extension
'v': 0x935,
'w': 0x935,
'sh': 0x936,
'Sh': 0x937,
's': 0x938,
'h': 0x939,
".a": 0x93D, # avagraha
'OM': 0x950,
'AUM': 0x950,
'.': 0x0964,
'..': 0x0965,
'0': 0x0966,
'1': 0x0967,
'2': 0x0968,
'3': 0x0969,
'4': 0x096A,
'5': 0x096B,
'6': 0x096C,
'7': 0x096D,
'8': 0x096E,
'9': 0x096F,
# non-standard/obsolete iTrans variants still used in texts from
# http://sanskrit.gde.to/
'.h': 0x903,
'N^': 0x919,
'shh': 0x937,
'JN': 0x91E,
}
""" ITrans uses some characters only in common ligatures.
The easiest way to deal with these is to replace them with their
"normal consonant" equivalents before we try to transliterate.
(This assumes we are mainly transliterating iTrans inbound, and that
the normal consonants are acceptable outbound. ITrans is not a good
choice for outbound anyway because it has so many ambiguities)
"""
_swapTable = {'GY': 'j~n', 'dny': 'j~n', 'x': 'kSh',
}
DevanagariTransliterationScheme('DEVANAGARI', 'ITRANS', ITRANS, _swapTable)
IAST = { \
py23char(0x1E43): 0x902,
py23char(0x1E25): 0x903,
'a': 0x905,
py23char(0x101): 0x906,
'i': 0x907,
py23char(0x12B): 0x908,
'u': 0x909,
py23char(0x16B): 0x90A,
py23char(0x1E5B): 0x90B,
py23char(0x1E37): 0x90C,
'e': 0x90F,
'ai': 0x910,
'o': 0x913,
'au': 0x914,
'k': 0x915,
'kh': 0x916,
'g': 0x917,
'gh': 0x918,
py23char(0x1E45): 0x919,
'c': 0x91A,
'ch': 0x91B,
'j': 0x91C,
'jh': 0x91D,
py23char(0xF1): 0x91E,
py23char(0x1E6D): 0x91F,
py23char(0x1E6D) +'h': 0x920,
py23char(0x1E0D): 0x921,
py23char(0x1E0D) + 'h': 0x922,
py23char(0x1E47): 0x923,
't': 0x924,
'th': 0x925,
'd': 0x926,
'dh': 0x927,
'n': 0x928,
'p': 0x92A,
'ph': 0x92B,
'b': 0x92C,
'bh': 0x92D,
'm': 0x92E,
'y': 0x92F,
'r': 0x930,
'l': 0x932,
'v': 0x935,
py23char(0x15B): 0x936,
py23char(0x1E63): 0x937,
's': 0x938,
'h': 0x939,
"'": 0x93D, # avagraha
'O' + py23char(0x1E43): 0x950,
'.': 0x0964,
'..': 0x0965,
'0': 0x0966,
'1': 0x0967,
'2': 0x0968,
'3': 0x0969,
'4': 0x096A,
'5': 0x096B,
'6': 0x096C,
'7': 0x096D,
'8': 0x096E,
'9': 0x096F,
}
DevanagariTransliterationScheme('DEVANAGARI', 'IAST', IAST)
""" CYRILLIC DATA
set up Cyrillic and the ISO 9:1995 (Russian) transliteration scheme.
The Cyrillic unicode range contains about 4x what contemporary Russian
uses - other languages & history wacky stuff. Set the full range up in case
anybody ever has occasion to use it.
"""
CharacterBlock('CYRILLIC', list(range(0x400, 0x510)))
_ISO9RUS = {\
py23char(0x0CB): 0x401, # IO
'A': 0x410,
'B': 0x411,
'V': 0x412,
'G': 0x413,
'D': 0x414,
'E': 0x415,
py23char(0x17D): 0x416, # ZHE
'Z': 0x417,
'I': 0x418,
'J': 0x419,
'K': 0x41a,
'L': 0x41b,
'M': 0x41c,
'N': 0x41d,
'O': 0x41e,
'P': 0x41f,
'R': 0x420,
'S': 0x421,
'T': 0x422,
'U': 0x423,
'F': 0x424,
'H': 0x425,
'C': 0x426, # TS
py23char(0x10C): 0x427, # CH
py23char(0x160): 0x428, # SH
py23char(0x15C): 0x429, # SHCH
py23char(0x2BA): 0x42a, # hard
'Y': 0x42b,
py23char(0x2B9): 0x42c, # soft
py23char(0x0C8): 0x42d, # YE
py23char(0x0DB): 0x42e, # YU
py23char(0x0C2): 0x42f, # YA
'a': 0x430,
'b': 0x431,
'v': 0x432,
'g': 0x433,
'd': 0x434,
'e': 0x435,
py23char(0x17E): 0x436, # zhe
'z': 0x437,
'i': 0x438,
'j': 0x439,
'k': 0x43a,
'l': 0x43b,
'm': 0x43c,
'n': 0x43d,
'o': 0x43e,
'p': 0x43f,
'r': 0x440,
's': 0x441,
't': 0x442,
'u': 0x443,
'f': 0x444,
'h': 0x445,
'c': 0x446, # ts
py23char(0x10D): 0x447, # ch
py23char(0x161): 0x448, # sh
py23char(0x15D): 0x449, # shch
# py23char(0x2BA): 0x44a, # hard - same upper & lowercase
'y': 0x44b,
# py23char(0x2B9): 0x44c, # soft - same upper & lowercase
py23char(0xE8): 0x44d, # ye
py23char(0x0FB): 0x44e, #yu
py23char(0x0E2): 0x44f, # ya
py23char(0x0EB): 0x451, # io
}
TransliterationScheme('CYRILLIC', 'ISO9RUS', _ISO9RUS)
|
the-stack_106_29798 | from codecs import open
from setuptools import setup, find_packages
from os import path
REQUIREMETS_DEV_FILE = 'requirements_dev.txt'
REQUIREMETS_TEST_FILE = 'requirements_test.txt'
REQUIREMETS_FILE = 'requirements.txt'
PROJECTNAME = 'aio_windows_patch'
VERSION = '0.0.1'
DESCRIPTION = 'simple tools'
URL = 'https://github.com/Python-Tools/aio_windows_patch'
AUTHOR = 'hsz'
AUTHOR_EMAIL = '[email protected]'
LICENSE = 'MIT'
CLASSIFIERS = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Documentation :: Sphinx',
]
KEYWORDS = ["asyncio", "patch"]
PACKAGES = find_packages(exclude=['contrib', 'docs', 'test'])
ZIP_SAFE = False
HERE = path.abspath(path.dirname(__file__))
with open(path.join(HERE, 'README.rst'), encoding='utf-8') as f:
LONG_DESCRIPTION = f.read()
REQUIREMETS_DIR = path.join(HERE, "requirements")
with open(path.join(REQUIREMETS_DIR, REQUIREMETS_FILE), encoding='utf-8') as f:
REQUIREMETS = f.readlines()
with open(path.join(REQUIREMETS_DIR, REQUIREMETS_DEV_FILE), encoding='utf-8') as f:
REQUIREMETS_DEV = f.readlines()
with open(path.join(REQUIREMETS_DIR, REQUIREMETS_TEST_FILE), encoding='utf-8') as f:
REQUIREMETS_TEST = f.readlines()
setup(
name=PROJECTNAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
url=URL,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license=LICENSE,
classifiers=CLASSIFIERS,
keywords=KEYWORDS,
packages=PACKAGES,
include_package_data=True,
install_requires=REQUIREMETS,
extras_require={
'dev': REQUIREMETS_DEV,
'test': REQUIREMETS_TEST
},
zip_safe=ZIP_SAFE,
data_files=[('requirements',
['requirements/requirements.txt',
'requirements/requirements_dev.txt',
'requirements/requirements_test.txt'])]
)
|
the-stack_106_29801 | from setuptools import setup, find_packages
classifiers = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Education',
'Operating System :: OS Independent',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3'
]
keywords = ['Data Structure', 'dsa', 'Algorithms', 'pydspack']
setup(
name='pydspack',
version='0.0.3',
description='A basic library of Data Structures for learning',
long_description=open('README.txt').read(),
url='',
author='Mandar Parte',
author_email='[email protected]',
license='MIT',
classifiers=classifiers,
keywords=keywords,
packages=find_packages(),
install_requires=['']
) |
the-stack_106_29803 | # -*- coding: utf-8 -*-
# vim:set et tabstop=4 shiftwidth=4 nu nowrap fileencoding=utf-8:
from unittest import TestCase
from devicehive.client.ws import WsCommand
class WsCommandCreateTestCase(TestCase):
def test_dict_expected(self):
self.assertRaises(TypeError, WsCommand.create, None)
def test_parameters_defaults_to_tuple(self):
cmd = WsCommand.create({'id': 1, 'command': 'test', })
self.assertIsInstance(cmd.parameters, (tuple, list, ))
def test_parameters_stored_in_attribute(self):
expected_value = (1, 2, 3, )
cmd = WsCommand.create({'id': 1, 'command': 'test', 'parameters': expected_value, })
self.assertEqual(expected_value, cmd.parameters)
def test_default_values_of_attributes(self):
cmd = WsCommand.create({'id': 1, 'command': 'test', })
self.assertIsNone(cmd.timestamp)
self.assertIsNone(cmd.user_id)
self.assertIsNone(cmd.lifetime)
self.assertIsNone(cmd.flags)
self.assertIsNone(cmd.status)
self.assertIsNone(cmd.result)
def test_timestamp_can_be_set(self):
expected_timestamp = 1
cmd = WsCommand.create({'id': 1, 'command': 'test', 'timestamp': expected_timestamp, })
self.assertEqual(expected_timestamp, cmd.timestamp)
def test_user_id_can_be_set(self):
expected_user_id = 1
cmd = WsCommand.create({'id': 1, 'command': 'test', 'userId': expected_user_id, })
self.assertEqual(expected_user_id, cmd.user_id)
def test_lifetime_can_be_set(self):
expected_lifetime = 1
cmd = WsCommand.create({'id': 1, 'command': 'test', 'lifetime': expected_lifetime, })
self.assertEqual(expected_lifetime, cmd.lifetime)
def test_flags_can_be_set(self):
expected_flags = 1
cmd = WsCommand.create({'id': 1, 'command': 'test', 'flags': expected_flags, })
self.assertEqual(expected_flags, cmd.flags)
def test_status_can_be_set(self):
expected_status = 1
cmd = WsCommand.create({'id': 1, 'command': 'test', 'status': expected_status, })
self.assertEqual(expected_status, cmd.status)
def test_result_can_be_set(self):
expected_result = 1
cmd = WsCommand.create({'id': 1, 'command': 'test', 'result': expected_result, })
self.assertEqual(expected_result, cmd.result)
def test_default_dict(self):
command_name = 'test'
cmd = WsCommand.create({'id': 1, 'command': command_name, })
result = cmd.to_dict()
self.assertDictEqual({'command': command_name, 'parameters': [], }, result)
def test_can_serialize_attributes(self):
command_name = 'test'
lifetime = 1
flags = 2
cmd = WsCommand.create({'id': 1, 'command': command_name, 'lifetime': lifetime, 'flags': flags, })
result = cmd.to_dict()
self.assertDictEqual({
'command': command_name,
'parameters': [],
'lifetime': lifetime,
'flags': flags,
}, result)
|
the-stack_106_29806 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import collections
import contextlib
import errno
import inspect
import itertools
import os
import os.path
import shutil
import tempfile
import xml.etree.ElementTree
import ordereddict_backport
import py
import pytest
import ruamel.yaml as yaml
from llnl.util.filesystem import mkdirp, remove_linked_tree
import spack.architecture
import spack.compilers
import spack.config
import spack.caches
import spack.database
import spack.directory_layout
import spack.environment as ev
import spack.package
import spack.package_prefs
import spack.paths
import spack.platforms.test
import spack.repo
import spack.stage
import spack.util.executable
import spack.util.gpg
from spack.util.pattern import Bunch
from spack.dependency import Dependency
from spack.fetch_strategy import FetchStrategyComposite, URLFetchStrategy
from spack.fetch_strategy import FetchError
from spack.spec import Spec
from spack.version import Version
@pytest.fixture
def no_path_access(monkeypatch):
def _can_access(path, perms):
return False
monkeypatch.setattr(os, 'access', _can_access)
#
# Disable any activate Spack environment BEFORE all tests
#
@pytest.fixture(scope='session', autouse=True)
def clean_user_environment():
env_var = ev.spack_env_var in os.environ
active = ev._active_environment
if env_var:
spack_env_value = os.environ.pop(ev.spack_env_var)
if active:
ev.deactivate()
yield
if env_var:
os.environ[ev.spack_env_var] = spack_env_value
if active:
ev.activate(active)
# Hooks to add command line options or set other custom behaviors.
# They must be placed here to be found by pytest. See:
#
# https://docs.pytest.org/en/latest/writing_plugins.html
#
def pytest_addoption(parser):
group = parser.getgroup("Spack specific command line options")
group.addoption(
'--fast', action='store_true', default=False,
help='runs only "fast" unit tests, instead of the whole suite')
def pytest_collection_modifyitems(config, items):
if not config.getoption('--fast'):
# --fast not given, run all the tests
return
slow_tests = ['db', 'network', 'maybeslow']
skip_as_slow = pytest.mark.skip(
reason='skipped slow test [--fast command line option given]'
)
for item in items:
if any(x in item.keywords for x in slow_tests):
item.add_marker(skip_as_slow)
#
# These fixtures are applied to all tests
#
@pytest.fixture(scope='function', autouse=True)
def no_chdir():
"""Ensure that no test changes Spack's working dirctory.
This prevents Spack tests (and therefore Spack commands) from
changing the working directory and causing other tests to fail
mysteriously. Tests should use ``working_dir`` or ``py.path``'s
``.as_cwd()`` instead of ``os.chdir`` to avoid failing this check.
We assert that the working directory hasn't changed, unless the
original wd somehow ceased to exist.
"""
original_wd = os.getcwd()
yield
if os.path.isdir(original_wd):
assert os.getcwd() == original_wd
@pytest.fixture(scope='function', autouse=True)
def reset_compiler_cache():
"""Ensure that the compiler cache is not shared across Spack tests
This cache can cause later tests to fail if left in a state incompatible
with the new configuration. Since tests can make almost unlimited changes
to their setup, default to not use the compiler cache across tests."""
spack.compilers._compiler_cache = {}
yield
spack.compilers._compiler_cache = {}
@pytest.fixture(scope='function', autouse=True)
def mock_stage(tmpdir_factory, monkeypatch, request):
"""Establish the temporary build_stage for the mock archive."""
# The approach with this autouse fixture is to set the stage root
# instead of using spack.config.override() to avoid configuration
# conflicts with dozens of tests that rely on other configuration
# fixtures, such as config.
if 'nomockstage' not in request.keywords:
# Set the build stage to the requested path
new_stage = tmpdir_factory.mktemp('mock-stage')
new_stage_path = str(new_stage)
# Ensure the source directory exists within the new stage path
source_path = os.path.join(new_stage_path,
spack.stage._source_path_subdir)
mkdirp(source_path)
monkeypatch.setattr(spack.stage, '_stage_root', new_stage_path)
yield new_stage_path
# Clean up the test stage directory
if os.path.isdir(new_stage_path):
shutil.rmtree(new_stage_path)
else:
# Must yield a path to avoid a TypeError on test teardown
yield str(tmpdir_factory)
@pytest.fixture(scope='session')
def ignore_stage_files():
"""Session-scoped helper for check_for_leftover_stage_files.
Used to track which leftover files in the stage have been seen.
"""
# to start with, ignore the .lock file at the stage root.
return set(['.lock', spack.stage._source_path_subdir, 'build_cache'])
def remove_whatever_it_is(path):
"""Type-agnostic remove."""
if os.path.isfile(path):
os.remove(path)
elif os.path.islink(path):
remove_linked_tree(path)
else:
shutil.rmtree(path)
@pytest.fixture
def working_env():
saved_env = os.environ.copy()
yield
# os.environ = saved_env doesn't work
# it causes module_parsing::test_module_function to fail
# when it's run after any test using this fixutre
os.environ.clear()
os.environ.update(saved_env)
@pytest.fixture(scope='function', autouse=True)
def check_for_leftover_stage_files(request, mock_stage, ignore_stage_files):
"""
Ensure that each (mock_stage) test leaves a clean stage when done.
Tests that are expected to dirty the stage can disable the check by
adding::
@pytest.mark.disable_clean_stage_check
and the associated stage files will be removed.
"""
stage_path = mock_stage
yield
files_in_stage = set()
try:
stage_files = os.listdir(stage_path)
files_in_stage = set(stage_files) - ignore_stage_files
except OSError as err:
if err.errno == errno.ENOENT:
pass
else:
raise
if 'disable_clean_stage_check' in request.keywords:
# clean up after tests that are expected to be dirty
for f in files_in_stage:
path = os.path.join(stage_path, f)
remove_whatever_it_is(path)
else:
ignore_stage_files |= files_in_stage
assert not files_in_stage
@pytest.fixture(autouse=True)
def mock_fetch_cache(monkeypatch):
"""Substitutes spack.paths.fetch_cache with a mock object that does nothing
and raises on fetch.
"""
class MockCache(object):
def store(self, copy_cmd, relative_dest):
pass
def fetcher(self, target_path, digest, **kwargs):
return MockCacheFetcher()
class MockCacheFetcher(object):
def fetch(self):
raise FetchError('Mock cache always fails for tests')
def __str__(self):
return "[mock fetch cache]"
monkeypatch.setattr(spack.caches, 'fetch_cache', MockCache())
@pytest.fixture(autouse=True)
def _skip_if_missing_executables(request):
"""Permits to mark tests with 'require_executables' and skip the
tests if the executables passed as arguments are not found.
"""
if request.node.get_marker('requires_executables'):
required_execs = request.node.get_marker('requires_executables').args
missing_execs = [
x for x in required_execs if spack.util.executable.which(x) is None
]
if missing_execs:
msg = 'could not find executables: {0}'
pytest.skip(msg.format(', '.join(missing_execs)))
# FIXME: The lines below should better be added to a fixture with
# FIXME: session-scope. Anyhow doing it is not easy, as it seems
# FIXME: there's some weird interaction with compilers during concretization.
spack.architecture.real_platform = spack.architecture.platform
spack.architecture.platform = lambda: spack.platforms.test.Test()
#
# Context managers used by fixtures
#
# Because these context managers modify global state, they should really
# ONLY be used persistently (i.e., around yield statements) in
# function-scoped fixtures, OR in autouse session- or module-scoped
# fixtures.
#
# If they're used in regular tests or in module-scoped fixtures that are
# then injected as function arguments, weird things can happen, because
# the original state won't be restored until *after* the fixture is
# destroyed. This makes sense for an autouse fixture, where you know
# everything in the module/session is going to need the modified
# behavior, but modifying global state for one function in a way that
# won't be restored until after the module or session is done essentially
# leaves garbage behind for other tests.
#
# In general, we should module- or session-scope the *STATE* required for
# these global objects, but we shouldn't module- or session-scope their
# *USE*, or things can get really confusing.
#
@contextlib.contextmanager
def use_configuration(config):
"""Context manager to swap out the global Spack configuration."""
saved = spack.config.config
spack.config.config = config
yield
spack.config.config = saved
@contextlib.contextmanager
def use_store(store):
"""Context manager to swap out the global Spack store."""
saved = spack.store.store
spack.store.store = store
yield
spack.store.store = saved
@contextlib.contextmanager
def use_repo(repo):
"""Context manager to swap out the global Spack repo path."""
with spack.repo.swap(repo):
yield
#
# Test-specific fixtures
#
@pytest.fixture(scope='session')
def mock_repo_path():
yield spack.repo.RepoPath(spack.paths.mock_packages_path)
@pytest.fixture
def mock_pkg_install(monkeypatch):
def _pkg_install_fn(pkg, spec, prefix):
# sanity_check_prefix requires something in the install directory
mkdirp(prefix.bin)
monkeypatch.setattr(spack.package.PackageBase, 'install', _pkg_install_fn,
raising=False)
@pytest.fixture(scope='function')
def mock_packages(mock_repo_path, mock_pkg_install):
"""Use the 'builtin.mock' repository instead of 'builtin'"""
with use_repo(mock_repo_path):
yield mock_repo_path
@pytest.fixture(scope='function')
def mutable_mock_repo(mock_repo_path):
"""Function-scoped mock packages, for tests that need to modify them."""
mock_repo_path = spack.repo.RepoPath(spack.paths.mock_packages_path)
with use_repo(mock_repo_path):
yield mock_repo_path
@pytest.fixture(scope='session')
def linux_os():
"""Returns a named tuple with attributes 'name' and 'version'
representing the OS.
"""
platform = spack.architecture.platform()
name, version = 'debian', '6'
if platform.name == 'linux':
platform = spack.architecture.platform()
current_os = platform.operating_system('default_os')
name, version = current_os.name, current_os.version
LinuxOS = collections.namedtuple('LinuxOS', ['name', 'version'])
return LinuxOS(name=name, version=version)
@pytest.fixture(scope='session')
def configuration_dir(tmpdir_factory, linux_os):
"""Copies mock configuration files in a temporary directory. Returns the
directory path.
"""
tmpdir = tmpdir_factory.mktemp('configurations')
# <test_path>/data/config has mock config yaml files in it
# copy these to the site config.
test_config = py.path.local(spack.paths.test_path).join('data', 'config')
test_config.copy(tmpdir.join('site'))
# Create temporary 'defaults', 'site' and 'user' folders
tmpdir.ensure('user', dir=True)
# Slightly modify compilers.yaml to look like Linux
compilers_yaml = test_config.join('compilers.yaml')
content = ''.join(compilers_yaml.read()).format(linux_os)
t = tmpdir.join('site', 'compilers.yaml')
t.write(content)
yield tmpdir
# Once done, cleanup the directory
shutil.rmtree(str(tmpdir))
@pytest.fixture(scope='session')
def mock_configuration(configuration_dir):
"""Create a persistent Configuration object from the configuration_dir."""
defaults = spack.config.InternalConfigScope(
'_builtin', spack.config.config_defaults
)
test_scopes = [defaults]
test_scopes += [
spack.config.ConfigScope(name, str(configuration_dir.join(name)))
for name in ['site', 'system', 'user']]
test_scopes.append(spack.config.InternalConfigScope('command_line'))
yield spack.config.Configuration(*test_scopes)
@pytest.fixture(scope='function')
def config(mock_configuration):
"""This fixture activates/deactivates the mock configuration."""
with use_configuration(mock_configuration):
yield mock_configuration
@pytest.fixture(scope='function')
def mutable_config(tmpdir_factory, configuration_dir, monkeypatch):
"""Like config, but tests can modify the configuration."""
mutable_dir = tmpdir_factory.mktemp('mutable_config').join('tmp')
configuration_dir.copy(mutable_dir)
cfg = spack.config.Configuration(
*[spack.config.ConfigScope(name, str(mutable_dir))
for name in ['site', 'system', 'user']])
# This is essential, otherwise the cache will create weird side effects
# that will compromise subsequent tests if compilers.yaml is modified
monkeypatch.setattr(spack.compilers, '_cache_config_file', [])
with use_configuration(cfg):
yield cfg
@pytest.fixture()
def mock_low_high_config(tmpdir):
"""Mocks two configuration scopes: 'low' and 'high'."""
config = spack.config.Configuration(
*[spack.config.ConfigScope(name, str(tmpdir.join(name)))
for name in ['low', 'high']])
with use_configuration(config):
yield config
def _populate(mock_db):
r"""Populate a mock database with packages.
Here is what the mock DB looks like:
o mpileaks o mpileaks' o mpileaks''
|\ |\ |\
| o callpath | o callpath' | o callpath''
|/| |/| |/|
o | mpich o | mpich2 o | zmpi
| | o | fake
| | |
| |______________/
| .____________/
|/
o dyninst
|\
| o libdwarf
|/
o libelf
"""
def _install(spec):
s = spack.spec.Spec(spec).concretized()
pkg = spack.repo.get(s)
pkg.do_install(fake=True, explicit=True)
# Transaction used to avoid repeated writes.
with mock_db.write_transaction():
_install('mpileaks ^mpich')
_install('mpileaks ^mpich2')
_install('mpileaks ^zmpi')
_install('externaltest')
@pytest.fixture(scope='session')
def _store_dir_and_cache(tmpdir_factory):
"""Returns the directory where to build the mock database and
where to cache it.
"""
store = tmpdir_factory.mktemp('mock_store')
cache = tmpdir_factory.mktemp('mock_store_cache')
return store, cache
@pytest.fixture(scope='session')
def mock_store(tmpdir_factory, mock_repo_path, mock_configuration,
_store_dir_and_cache):
"""Creates a read-only mock database with some packages installed note
that the ref count for dyninst here will be 3, as it's recycled
across each install.
This does not actually activate the store for use by Spack -- see the
``database`` fixture for that.
"""
store_path, store_cache = _store_dir_and_cache
store = spack.store.Store(str(store_path))
# If the cache does not exist populate the store and create it
if not os.path.exists(str(store_cache.join('.spack-db'))):
with use_configuration(mock_configuration):
with use_store(store):
with use_repo(mock_repo_path):
_populate(store.db)
store_path.copy(store_cache, mode=True, stat=True)
# Make the DB filesystem read-only to ensure we can't modify entries
store_path.join('.spack-db').chmod(mode=0o555, rec=1)
yield store
store_path.join('.spack-db').chmod(mode=0o755, rec=1)
@pytest.fixture(scope='function')
def database(mock_store, mock_packages, config):
"""This activates the mock store, packages, AND config."""
with use_store(mock_store):
yield mock_store.db
@pytest.fixture(scope='function')
def mutable_database(database, _store_dir_and_cache):
"""Writeable version of the fixture, restored to its initial state
after each test.
"""
# Make the database writeable, as we are going to modify it
store_path, store_cache = _store_dir_and_cache
store_path.join('.spack-db').chmod(mode=0o755, rec=1)
yield database
# Restore the initial state by copying the content of the cache back into
# the store and making the database read-only
store_path.remove(rec=1)
store_cache.copy(store_path, mode=True, stat=True)
store_path.join('.spack-db').chmod(mode=0o555, rec=1)
@pytest.fixture()
def dirs_with_libfiles(tmpdir_factory):
lib_to_libfiles = {
'libstdc++': ['libstdc++.so', 'libstdc++.tbd'],
'libgfortran': ['libgfortran.a', 'libgfortran.dylib'],
'libirc': ['libirc.a', 'libirc.so']
}
root = tmpdir_factory.mktemp('root')
lib_to_dirs = {}
i = 0
for lib, libfiles in lib_to_libfiles.items():
dirs = []
for libfile in libfiles:
root.ensure(str(i), dir=True)
root.join(str(i)).ensure(libfile)
dirs.append(str(root.join(str(i))))
i += 1
lib_to_dirs[lib] = dirs
all_dirs = list(itertools.chain.from_iterable(lib_to_dirs.values()))
yield lib_to_dirs, all_dirs
@pytest.fixture(scope='function', autouse=True)
def disable_compiler_execution(monkeypatch):
def noop(*args):
return []
# Compiler.determine_implicit_rpaths actually runs the compiler. So this
# replaces that function with a noop that simulates finding no implicit
# RPATHs
monkeypatch.setattr(
spack.compiler.Compiler,
'_get_compiler_link_paths',
noop
)
@pytest.fixture(scope='function')
def install_mockery(tmpdir, config, mock_packages, monkeypatch):
"""Hooks a fake install directory, DB, and stage directory into Spack."""
real_store = spack.store.store
spack.store.store = spack.store.Store(str(tmpdir.join('opt')))
# We use a fake package, so temporarily disable checksumming
with spack.config.override('config:checksum', False):
yield
tmpdir.join('opt').remove()
spack.store.store = real_store
@pytest.fixture()
def mock_fetch(mock_archive):
"""Fake the URL for a package so it downloads from a file."""
fetcher = FetchStrategyComposite()
fetcher.append(URLFetchStrategy(mock_archive.url))
@property
def fake_fn(self):
return fetcher
orig_fn = spack.package.PackageBase.fetcher
spack.package.PackageBase.fetcher = fake_fn
yield
spack.package.PackageBase.fetcher = orig_fn
class MockLayout(object):
def __init__(self, root):
self.root = root
def path_for_spec(self, spec):
return '/'.join([self.root, spec.name])
def check_installed(self, spec):
return True
@pytest.fixture()
def gen_mock_layout(tmpdir):
# Generate a MockLayout in a temporary directory. In general the prefixes
# specified by MockLayout should never be written to, but this ensures
# that even if they are, that it causes no harm
def create_layout(root):
subroot = tmpdir.mkdir(root)
return MockLayout(str(subroot))
yield create_layout
@pytest.fixture()
def module_configuration(monkeypatch, request):
"""Reads the module configuration file from the mock ones prepared
for tests and monkeypatches the right classes to hook it in.
"""
# Class of the module file writer
writer_cls = getattr(request.module, 'writer_cls')
# Module where the module file writer is defined
writer_mod = inspect.getmodule(writer_cls)
# Key for specific settings relative to this module type
writer_key = str(writer_mod.__name__).split('.')[-1]
# Root folder for configuration
root_for_conf = os.path.join(
spack.paths.test_path, 'data', 'modules', writer_key
)
def _impl(filename):
file = os.path.join(root_for_conf, filename + '.yaml')
with open(file) as f:
configuration = yaml.load(f)
def mock_config_function():
return configuration
def writer_key_function():
return mock_config_function()[writer_key]
monkeypatch.setattr(
spack.modules.common,
'configuration',
mock_config_function
)
monkeypatch.setattr(
writer_mod,
'configuration',
writer_key_function
)
monkeypatch.setattr(
writer_mod,
'configuration_registry',
{}
)
return _impl
@pytest.fixture()
def mock_gnupghome(monkeypatch):
# GNU PGP can't handle paths longer than 108 characters (wtf!@#$) so we
# have to make our own tmpdir with a shorter name than pytest's.
# This comes up because tmp paths on macOS are already long-ish, and
# pytest makes them longer.
short_name_tmpdir = tempfile.mkdtemp()
monkeypatch.setattr(spack.util.gpg, 'GNUPGHOME', short_name_tmpdir)
monkeypatch.setattr(spack.util.gpg.Gpg, '_gpg', None)
yield
# clean up, since we are doing this manually
shutil.rmtree(short_name_tmpdir)
##########
# Fake archives and repositories
##########
@pytest.fixture(scope='session', params=[('.tar.gz', 'z')])
def mock_archive(request, tmpdir_factory):
"""Creates a very simple archive directory with a configure script and a
makefile that installs to a prefix. Tars it up into an archive.
"""
tar = spack.util.executable.which('tar', required=True)
tmpdir = tmpdir_factory.mktemp('mock-archive-dir')
tmpdir.ensure(spack.stage._source_path_subdir, dir=True)
repodir = tmpdir.join(spack.stage._source_path_subdir)
# Create the configure script
configure_path = str(tmpdir.join(spack.stage._source_path_subdir,
'configure'))
with open(configure_path, 'w') as f:
f.write(
"#!/bin/sh\n"
"prefix=$(echo $1 | sed 's/--prefix=//')\n"
"cat > Makefile <<EOF\n"
"all:\n"
"\techo Building...\n\n"
"install:\n"
"\tmkdir -p $prefix\n"
"\ttouch $prefix/dummy_file\n"
"EOF\n"
)
os.chmod(configure_path, 0o755)
# Archive it
with tmpdir.as_cwd():
archive_name = '{0}{1}'.format(spack.stage._source_path_subdir,
request.param[0])
tar('-c{0}f'.format(request.param[1]), archive_name,
spack.stage._source_path_subdir)
Archive = collections.namedtuple('Archive',
['url', 'path', 'archive_file',
'expanded_archive_basedir'])
archive_file = str(tmpdir.join(archive_name))
# Return the url
yield Archive(
url=('file://' + archive_file),
archive_file=archive_file,
path=str(repodir),
expanded_archive_basedir=spack.stage._source_path_subdir)
@pytest.fixture(scope='session')
def mock_git_repository(tmpdir_factory):
"""Creates a simple git repository with two branches,
two commits and two submodules. Each submodule has one commit.
"""
git = spack.util.executable.which('git', required=True)
suburls = []
for submodule_count in range(2):
tmpdir = tmpdir_factory.mktemp('mock-git-repo-submodule-dir-{0}'
.format(submodule_count))
tmpdir.ensure(spack.stage._source_path_subdir, dir=True)
repodir = tmpdir.join(spack.stage._source_path_subdir)
suburls.append((submodule_count, 'file://' + str(repodir)))
# Initialize the repository
with repodir.as_cwd():
git('init')
git('config', 'user.name', 'Spack')
git('config', 'user.email', '[email protected]')
# r0 is just the first commit
submodule_file = 'r0_file_{0}'.format(submodule_count)
repodir.ensure(submodule_file)
git('add', submodule_file)
git('commit', '-m', 'mock-git-repo r0 {0}'.format(submodule_count))
tmpdir = tmpdir_factory.mktemp('mock-git-repo-dir')
tmpdir.ensure(spack.stage._source_path_subdir, dir=True)
repodir = tmpdir.join(spack.stage._source_path_subdir)
# Initialize the repository
with repodir.as_cwd():
git('init')
git('config', 'user.name', 'Spack')
git('config', 'user.email', '[email protected]')
url = 'file://' + str(repodir)
for number, suburl in suburls:
git('submodule', 'add', suburl,
'third_party/submodule{0}'.format(number))
# r0 is just the first commit
r0_file = 'r0_file'
repodir.ensure(r0_file)
git('add', r0_file)
git('commit', '-m', 'mock-git-repo r0')
branch = 'test-branch'
branch_file = 'branch_file'
git('branch', branch)
tag_branch = 'tag-branch'
tag_file = 'tag_file'
git('branch', tag_branch)
# Check out first branch
git('checkout', branch)
repodir.ensure(branch_file)
git('add', branch_file)
git('commit', '-m' 'r1 test branch')
# Check out a second branch and tag it
git('checkout', tag_branch)
repodir.ensure(tag_file)
git('add', tag_file)
git('commit', '-m' 'tag test branch')
tag = 'test-tag'
git('tag', tag)
git('checkout', 'master')
# R1 test is the same as test for branch
rev_hash = lambda x: git('rev-parse', x, output=str).strip()
r1 = rev_hash(branch)
r1_file = branch_file
checks = {
'master': Bunch(
revision='master', file=r0_file, args={'git': url}
),
'branch': Bunch(
revision=branch, file=branch_file, args={
'git': url, 'branch': branch
}
),
'tag-branch': Bunch(
revision=tag_branch, file=tag_file, args={
'git': url, 'branch': tag_branch
}
),
'tag': Bunch(
revision=tag, file=tag_file, args={'git': url, 'tag': tag}
),
'commit': Bunch(
revision=r1, file=r1_file, args={'git': url, 'commit': r1}
)
}
t = Bunch(checks=checks, url=url, hash=rev_hash,
path=str(repodir), git_exe=git)
yield t
@pytest.fixture(scope='session')
def mock_hg_repository(tmpdir_factory):
"""Creates a very simple hg repository with two commits."""
hg = spack.util.executable.which('hg', required=True)
tmpdir = tmpdir_factory.mktemp('mock-hg-repo-dir')
tmpdir.ensure(spack.stage._source_path_subdir, dir=True)
repodir = tmpdir.join(spack.stage._source_path_subdir)
get_rev = lambda: hg('id', '-i', output=str).strip()
# Initialize the repository
with repodir.as_cwd():
url = 'file://' + str(repodir)
hg('init')
# Commit file r0
r0_file = 'r0_file'
repodir.ensure(r0_file)
hg('add', r0_file)
hg('commit', '-m', 'revision 0', '-u', 'test')
r0 = get_rev()
# Commit file r1
r1_file = 'r1_file'
repodir.ensure(r1_file)
hg('add', r1_file)
hg('commit', '-m' 'revision 1', '-u', 'test')
r1 = get_rev()
checks = {
'default': Bunch(
revision=r1, file=r1_file, args={'hg': str(repodir)}
),
'rev0': Bunch(
revision=r0, file=r0_file, args={
'hg': str(repodir), 'revision': r0
}
)
}
t = Bunch(checks=checks, url=url, hash=get_rev, path=str(repodir))
yield t
@pytest.fixture(scope='session')
def mock_svn_repository(tmpdir_factory):
"""Creates a very simple svn repository with two commits."""
svn = spack.util.executable.which('svn', required=True)
svnadmin = spack.util.executable.which('svnadmin', required=True)
tmpdir = tmpdir_factory.mktemp('mock-svn-stage')
tmpdir.ensure(spack.stage._source_path_subdir, dir=True)
repodir = tmpdir.join(spack.stage._source_path_subdir)
url = 'file://' + str(repodir)
# Initialize the repository
with repodir.as_cwd():
# NOTE: Adding --pre-1.5-compatible works for NERSC
# Unknown if this is also an issue at other sites.
svnadmin('create', '--pre-1.5-compatible', str(repodir))
# Import a structure (first commit)
r0_file = 'r0_file'
tmpdir.ensure('tmp-path', r0_file)
tmp_path = tmpdir.join('tmp-path')
svn('import',
str(tmp_path),
url,
'-m',
'Initial import r0')
tmp_path.remove()
# Second commit
r1_file = 'r1_file'
svn('checkout', url, str(tmp_path))
tmpdir.ensure('tmp-path', r1_file)
with tmp_path.as_cwd():
svn('add', str(tmpdir.ensure('tmp-path', r1_file)))
svn('ci', '-m', 'second revision r1')
tmp_path.remove()
r0 = '1'
r1 = '2'
checks = {
'default': Bunch(
revision=r1, file=r1_file, args={'svn': url}),
'rev0': Bunch(
revision=r0, file=r0_file, args={
'svn': url, 'revision': r0})
}
def get_rev():
output = svn('info', '--xml', output=str)
info = xml.etree.ElementTree.fromstring(output)
return info.find('entry/commit').get('revision')
t = Bunch(checks=checks, url=url, hash=get_rev, path=str(repodir))
yield t
@pytest.fixture()
def mutable_mock_env_path(tmpdir_factory):
"""Fixture for mocking the internal spack environments directory."""
saved_path = spack.environment.env_path
mock_path = tmpdir_factory.mktemp('mock-env-path')
spack.environment.env_path = str(mock_path)
yield mock_path
spack.environment.env_path = saved_path
@pytest.fixture()
def installation_dir_with_headers(tmpdir_factory):
"""Mock installation tree with a few headers placed in different
subdirectories. Shouldn't be modified by tests as it is session
scoped.
"""
root = tmpdir_factory.mktemp('prefix')
# Create a few header files:
#
# <prefix>
# |-- include
# | |--boost
# | | |-- ex3.h
# | |-- ex3.h
# |-- path
# |-- to
# |-- ex1.h
# |-- subdir
# |-- ex2.h
#
root.ensure('include', 'boost', 'ex3.h')
root.ensure('include', 'ex3.h')
root.ensure('path', 'to', 'ex1.h')
root.ensure('path', 'to', 'subdir', 'ex2.h')
return root
##########
# Mock packages
##########
class MockPackage(object):
def __init__(self, name, dependencies, dependency_types, conditions=None,
versions=None):
self.name = name
self.spec = None
self.dependencies = ordereddict_backport.OrderedDict()
self._installed_upstream = False
assert len(dependencies) == len(dependency_types)
for dep, dtype in zip(dependencies, dependency_types):
d = Dependency(self, Spec(dep.name), type=dtype)
if not conditions or dep.name not in conditions:
self.dependencies[dep.name] = {Spec(name): d}
else:
dep_conditions = conditions[dep.name]
dep_conditions = dict(
(Spec(x), Dependency(self, Spec(y), type=dtype))
for x, y in dep_conditions.items())
self.dependencies[dep.name] = dep_conditions
if versions:
self.versions = versions
else:
versions = list(Version(x) for x in [1, 2, 3])
self.versions = dict((x, {'preferred': False}) for x in versions)
self.variants = {}
self.provided = {}
self.conflicts = {}
self.patches = {}
class MockPackageMultiRepo(object):
def __init__(self, packages):
self.spec_to_pkg = dict((x.name, x) for x in packages)
self.spec_to_pkg.update(
dict(('mockrepo.' + x.name, x) for x in packages))
def get(self, spec):
if not isinstance(spec, spack.spec.Spec):
spec = Spec(spec)
return self.spec_to_pkg[spec.name]
def get_pkg_class(self, name):
return self.spec_to_pkg[name]
def exists(self, name):
return name in self.spec_to_pkg
def is_virtual(self, name):
return False
def repo_for_pkg(self, name):
import collections
Repo = collections.namedtuple('Repo', ['namespace'])
return Repo('mockrepo')
##########
# Specs of various kind
##########
@pytest.fixture(
params=[
'conflict%clang',
'conflict%clang+foo',
'conflict-parent%clang',
'[email protected]^conflict~foo'
]
)
def conflict_spec(request):
"""Specs which violate constraints specified with the "conflicts"
directive in the "conflict" package.
"""
return request.param
@pytest.fixture(
params=[
'conflict%~'
]
)
def invalid_spec(request):
"""Specs that do not parse cleanly due to invalid formatting.
"""
return request.param
@pytest.fixture("module")
def mock_test_repo(tmpdir_factory):
"""Create an empty repository."""
repo_namespace = 'mock_test_repo'
repodir = tmpdir_factory.mktemp(repo_namespace)
repodir.ensure(spack.repo.packages_dir_name, dir=True)
yaml = repodir.join('repo.yaml')
yaml.write("""
repo:
namespace: mock_test_repo
""")
repo = spack.repo.RepoPath(str(repodir))
with spack.repo.swap(repo):
yield repo, repodir
shutil.rmtree(str(repodir))
##########
# Class and fixture to work around problems raising exceptions in directives,
# which cause tests like test_from_list_url to hang for Python 2.x metaclass
# processing.
#
# At this point only version and patch directive handling has been addressed.
##########
class MockBundle(object):
has_code = False
name = 'mock-bundle'
versions = {}
@pytest.fixture
def mock_directive_bundle():
"""Return a mock bundle package for directive tests."""
return MockBundle()
@pytest.fixture
def clear_directive_functions():
"""Clear all overidden directive functions for subsequent tests."""
yield
# Make sure any directive functions overidden by tests are cleared before
# proceeding with subsequent tests that may depend on the original
# functions.
spack.directives.DirectiveMeta._directives_to_be_executed = []
|
the-stack_106_29807 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from collections import namedtuple
from logging import NullHandler, getLogger
from pkg_resources import resource_stream
from six import text_type
import yaml
from .serializer import get_yaml_loader
logger = getLogger(__name__)
logger.addHandler(NullHandler())
_valid_aliases = ('code', 'default', 'human', 'technical')
_Value = namedtuple('_Value', _valid_aliases)
class Config(object):
"""Application config class."""
@classmethod
def build(cls, path=None):
"""Build config instance."""
loader = get_yaml_loader()
with resource_stream('knowit', 'defaults.yml') as stream:
cfgs = [yaml.load(stream, Loader=loader)]
if path:
with open(path, 'r') as stream:
cfgs.append(yaml.load(stream, Loader=loader))
profiles_data = {}
for cfg in cfgs:
if 'profiles' in cfg:
profiles_data.update(cfg['profiles'])
knowledge_data = {}
for cfg in cfgs:
if 'knowledge' in cfg:
knowledge_data.update(cfg['knowledge'])
data = {'general': {}}
for class_name, data_map in knowledge_data.items():
data.setdefault(class_name, {})
for code, detection_values in data_map.items():
alias_map = (profiles_data.get(class_name) or {}).get(code) or {}
alias_map.setdefault('code', code)
alias_map.setdefault('default', alias_map['code'])
alias_map.setdefault('human', alias_map['default'])
alias_map.setdefault('technical', alias_map['human'])
value = _Value(**{k: v for k, v in alias_map.items() if k in _valid_aliases})
for detection_value in detection_values:
data[class_name][text_type(detection_value)] = value
config = Config()
config.__dict__ = data
return config
|
the-stack_106_29808 | def counter(start=0):
count = start
def incr():
nonlocal count
count += 1
return count
return incr
if __name__ == '__main__':
a = counter()
print(a())
b = counter(10)
print(b())
print(a())
print(b())
|
the-stack_106_29809 | #!/usr/bin/env python3
import re
from pwn import *
p = remote("2018shell.picoctf.com",1225)
'''
pwntools 공식 문서 : 설치법
$sudo -i
#apt-get update
#apt-get install python3 python3-pip python3-dev git libssl-dev libffi-dev build-essential
#python3 -m pip install --upgrade pip
#python3 -m pip install --upgrade pwntools
'''
temp = p.recv().decode()
print(temp)
regexrule = re.compile(r"\d{8}")
matchobj = regexrule.findall(temp)
answer = ''
for i in range(len(matchobj)): #0b 덧붙이고 2진수를 10진수로 변환, 그 후 문자로 변환
if len(matchobj[i]) == 8:
answer += chr(int('0b'+matchobj[i], 2))
print(answer)
p.send(answer+"\n")
#엔터키도 같이 #답안 보내기
temp = (p.recv().decode())
print(temp) #문제 읽어오기
regexrule = re.compile('the [0-9a-f]+')
matchobj = regexrule.findall(temp)
asciiq = matchobj[0][4:]
answer = ''
for i in range(len(asciiq)//2):
answer +=chr(int('0x'+asciiq[2*i : 2+2*i], 16))
print(answer)
p.send(answer+'\n')
temp = p.recv().decode()
print(temp)#
regexrule = re.compile(r"\d{3}")
matchobj = regexrule.findall(temp) # 숫자 3개가 연속해서 붙은 것들을 찾아냅니다.
answer = ''
for i in range(len(matchobj)):
answer += chr(int('0o'+matchobj[i], 8))
print(answer)
p.send(answer+'\n')
temp = p.recv().decode()
print(temp)
|
the-stack_106_29810 | import inspect
from typing import Dict, Generator, Tuple, Any, Set, List, Union
from pathlib import Path
from pydantic import BaseModel
from openpyxl import Workbook
from openpyxl.worksheet.worksheet import Worksheet
from openpyxl.styles import Border, Alignment
from openpyxl.cell import Cell
BORDER_ATTRIBUTES: Set[str] = {
"left",
"right",
"top",
"bottom",
"diagonal",
"diagonal_direction",
"vertical",
"horizontal",
"diagonalUp",
"diagonalDown",
"outline",
"start",
"end",
}
ALIGNMENT_ATTRIBUTES: Set[str] = {
"horizontal",
"vertical",
"textRotation",
"wrapText",
"shrinkToFit",
"indent",
"relativeIndent",
"justifyLastLine",
"readingOrder",
"text_rotation",
"wrap_text",
"shrink_to_fit",
}
def _apply_or_to_objects(
self_var: Any, other_var: Any, attributes: Set[str]
) -> Dict[str, Any]:
return {
x: getattr(self_var, x, None) or getattr(other_var, x, None) for x in attributes
}
def _base_value_or(self_var: Any, entry_var: Any) -> Any:
"""
For some properties it is more convenient
to also have their fields merged, like:
borders and alignment
"""
if isinstance(self_var, Border):
return Border(**_apply_or_to_objects(self_var, entry_var, BORDER_ATTRIBUTES))
if isinstance(self_var, Alignment):
return Alignment(
**_apply_or_to_objects(self_var, entry_var, ALIGNMENT_ATTRIBUTES)
)
return self_var or entry_var
class BaseXLSXCellData:
"""
It is used to "or" `openpyxl.cell.Cell` properties where
the below conditions apply. Refer to `openpyxl.cell.Cell` for
supported fields.
The idea is to have cells which are additive so properties
are chained via "|" operator.
Values are defined also via class level properties so they
can easily be overwritten, via class inheritance and constructors.
All properties will be merged with "or" rule, the below is True:
BaseXLSXCellData(value="text") | BaseXLSXCellData(font=Font())
== BaseXLSXCellData(value="text", font=Font())
The "or" operator has left-to-right associativity, the below is True:
BaseXLSXCellData(value="text") | BaseXLSXCellData(value="other_text")
== BaseXLSXCellData(value="text")
"""
def __init__(self, **kwargs):
# gather all class attributes
# a class attribute is considered a member not starting with "__" or "_"
for name, value in vars(self.__class__).items():
if name.startswith("__") or name.startswith("_"):
continue
self.__setattr__(name, value)
# do not overwrite class level definition if already exists
for name, value in kwargs.items():
to_store = self.__getattribute__(name) if value is None else value
self.__setattr__(name, to_store)
def __or__(self, other):
"""
Applies or operation to all shared attributes and
copy the unshared from both.
"""
self_vars = vars(self)
other_vars = vars(other)
self_vars_keys = set(self_vars.keys())
other_vars_keys = set(other_vars.keys())
keys_in_common = self_vars_keys & other_vars_keys
keys_self_vars_to_copy = self_vars_keys - keys_in_common
keys_other_vars_to_copy = other_vars_keys - keys_in_common
# apply or to common properties
merged_values = {
k: _base_value_or(self_vars[k], other_vars[k]) for k in keys_in_common
}
# copy properties which are not shared
for key in keys_self_vars_to_copy:
merged_values[key] = self_vars[key]
for key in keys_other_vars_to_copy:
merged_values[key] = other_vars[key]
return BaseXLSXCellData(**merged_values)
def __repr__(self):
"""Only outputs not None attributes"""
formatted_vars = ", ".join(
[f"{x[0]}={x[1]}" for x in vars(self).items() if x[1] is not None]
)
return f"<{self.__class__.__name__} {formatted_vars}>"
class BaseXLSXSheet:
# name of the sheet
name: str = None
# cell style contents, using a list of tuples instead of dict
# to allow for "duplicate keys"
cell_styles: List[Tuple[str, Dict[str, BaseXLSXCellData]]] = None
# used to merge cells via ranges like A1:B2
cell_merge: Set[str] = set()
# specify each column's length liek {"B": 10}
column_dimensions: Dict[str, int] = {}
def _check_attribute(self, attribute_name: str):
if getattr(self, attribute_name) is None:
raise ValueError(f"'{attribute_name}' attribute is None, please define it")
def __init__(self):
self._check_attribute("cell_styles")
self._check_attribute("name")
def __repr__(self):
return f"<{self.__class__.__name__} name={self.name}, cell_styles={self.cell_styles}"
def assemble_data_for_template(
self, template_data: BaseModel
) -> List[Tuple[str, Dict[str, BaseXLSXCellData]]]:
"""
Expected to be implemented by the user.
Used to polpulate the sheet before applying the
static part of the template.
"""
def _update_cell(cell: Cell, data: BaseXLSXCellData) -> None:
"""Extract properties from the cell_styles and apply them to the cell"""
for name, value in vars(data).items():
cell.__setattr__(name, value)
def _update_entry_in_cell(
target: Dict[str, BaseXLSXCellData],
address: str,
new_entry: BaseXLSXCellData,
) -> None:
"""
There may be multiple entires for the same cell, coming from different sources.
It is useful for applying styling to existing cells and storing values
"""
exiting_entry = target.get(address, None)
target[address] = (
new_entry if exiting_entry is None else (exiting_entry | new_entry)
)
def _parse_multiple_cell_ranges(
single_cells_cell_styles: Dict[str, BaseXLSXCellData],
xls_sheet: Worksheet,
entry: BaseXLSXCellData,
cell_address: str,
):
for cell_row in xls_sheet[cell_address]:
for cell in cell_row:
_update_entry_in_cell(
target=single_cells_cell_styles,
address=cell.coordinate,
new_entry=entry,
)
class BaseXLSXDocument:
def _check_attribute(self, attribute_name: str):
if getattr(self, attribute_name) is None:
raise ValueError(f"'{attribute_name}' attribute is None, please define it")
def __init__(self, *args, file_name: Union[str, Path] = None):
for k, entry in enumerate(args):
self.__dict__[f"__sheet__entry__{k}"] = entry
self.file_name = (
self.__getattribute__("file_name") if file_name is None else file_name
)
self._check_attribute("file_name")
self._sheets_by_name: Dict[str, Worksheet] = {}
def _get_sheets(self) -> Generator[Tuple[str, Any], None, None]:
for member in inspect.getmembers(self):
if isinstance(member[1], BaseXLSXSheet):
yield member
def __repr__(self):
formatted_sheets = "\n\t".join([f"{x[0]}={x[1]}" for x in self._get_sheets()])
return f"<{self.__class__.__name__}\n\t{formatted_sheets}>"
def _assemble_workbook(
self,
sheets_entries: Generator[Tuple[str, Any], None, None],
template_data: BaseModel,
) -> Workbook:
workbook = Workbook()
for _, sheet_data in sheets_entries:
sheet_data: BaseXLSXSheet = sheet_data
sheet_name = sheet_data.name
xls_sheet = workbook.create_sheet(sheet_name)
single_cells_cell_styles: Dict[str, BaseXLSXCellData] = {}
all_cells = []
data_cells = sheet_data.assemble_data_for_template(template_data)
if data_cells:
all_cells.extend(data_cells)
all_cells.extend(sheet_data.cell_styles)
for cell_address, entry in all_cells:
if ":" in cell_address:
# ranges like A1:B4 will be flattened into single cell entries
_parse_multiple_cell_ranges(
single_cells_cell_styles=single_cells_cell_styles,
xls_sheet=xls_sheet,
entry=entry,
cell_address=cell_address,
)
else:
_update_entry_in_cell(
target=single_cells_cell_styles,
address=cell_address,
new_entry=entry,
)
# finally apply data from cell cell_styles to xls cells
for cell_address, entry in single_cells_cell_styles.items():
_update_cell(xls_sheet[cell_address], entry)
# apply column widths
for column, width in sheet_data.column_dimensions.items():
xls_sheet.column_dimensions[column].width = width
# apply cell merging
for to_merge in sheet_data.cell_merge:
xls_sheet.merge_cells(to_merge)
# store for future usage
self._sheets_by_name[sheet_data] = xls_sheet
# remove the default sheet
sheet_to_remove = workbook.get_sheet_by_name(workbook.get_sheet_names()[0])
workbook.remove(sheet_to_remove)
return workbook
def _generate_document(self, template_data: BaseModel) -> Workbook:
return self._assemble_workbook(self._get_sheets(), template_data)
def document_path(self, base_path: Path) -> Path:
return base_path / Path(self.file_name)
def save_document(self, base_path: Path, template_data: BaseModel) -> None:
workbook = self._generate_document(template_data)
destination_path = self.document_path(base_path)
workbook.save(destination_path)
|
the-stack_106_29812 | # Copyright 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import hashlib
import time
from sawtooth_validator.journal.batch_injector import BatchInjector
from sawtooth_validator.protobuf.transaction_pb2 import TransactionHeader
from sawtooth_validator.protobuf.transaction_pb2 import Transaction
from sawtooth_validator.protobuf.batch_pb2 import BatchHeader
from sawtooth_validator.protobuf.batch_pb2 import Batch
from sawtooth_validator.protobuf.block_pb2 import BlockHeader
from sawtooth_block_info.protobuf.block_info_pb2 import BlockInfoTxn
from sawtooth_block_info.protobuf.block_info_pb2 import BlockInfo
from sawtooth_block_info.common import FAMILY_NAME
from sawtooth_block_info.common import FAMILY_VERSION
from sawtooth_block_info.common import CONFIG_ADDRESS
from sawtooth_block_info.common import BLOCK_INFO_NAMESPACE
class BlockInfoInjector(BatchInjector):
"""Inject BlockInfo transactions at the beginning of blocks."""
def __init__(self, state_view_factory, signer):
self._state_view_factory = state_view_factory
self._signer = signer
def create_batch(self, block_info):
payload = BlockInfoTxn(block=block_info).SerializeToString()
public_key = self._signer.get_public_key().as_hex()
header = TransactionHeader(
signer_public_key=public_key,
family_name=FAMILY_NAME,
family_version=FAMILY_VERSION,
inputs=[CONFIG_ADDRESS, BLOCK_INFO_NAMESPACE],
outputs=[CONFIG_ADDRESS, BLOCK_INFO_NAMESPACE],
dependencies=[],
payload_sha512=hashlib.sha512(payload).hexdigest(),
batcher_public_key=public_key,
).SerializeToString()
transaction_signature = self._signer.sign(header)
transaction = Transaction(
header=header,
payload=payload,
header_signature=transaction_signature,
)
header = BatchHeader(
signer_public_key=public_key,
transaction_ids=[transaction_signature],
).SerializeToString()
batch_signature = self._signer.sign(header)
return Batch(
header=header,
transactions=[transaction],
header_signature=batch_signature,
)
def block_start(self, previous_block):
"""Returns an ordered list of batches to inject at the beginning of the
block. Can also return None if no batches should be injected.
Args:
previous_block (Block): The previous block.
Returns:
A list of batches to inject.
"""
previous_header_bytes = previous_block.header
previous_header = BlockHeader()
previous_header.ParseFromString(previous_header_bytes)
block_info = BlockInfo(
block_num=previous_header.block_num,
previous_block_id=previous_header.previous_block_id,
signer_public_key=previous_header.signer_public_key,
header_signature=previous_block.header_signature,
timestamp=int(time.time()))
return [self.create_batch(block_info)]
def before_batch(self, previous_block, batch):
pass
def after_batch(self, previous_block, batch):
pass
def block_end(self, previous_block, batches):
pass
|
the-stack_106_29813 | ################################################################
## In principle all you have to setup is defined in this file ##
################################################################
from configManager import configMgr
from ROOT import kBlack,kWhite,kGray,kRed,kPink,kMagenta,kViolet,kBlue,kAzure,kCyan,kTeal,kGreen,kSpring,kYellow,kOrange
from configWriter import fitConfig,Measurement,Channel,Sample
from systematic import Systematic
from math import sqrt
import os
# Setup for ATLAS plotting
from ROOT import gROOT
#gROOT.LoadMacro("./macros/AtlasStyle.C")
import ROOT
#ROOT.SetAtlasStyle()
##########################
# Set observed and expected number of events in counting experiment
ndata = 7. # Number of events observed in data
nbkg = 5. # Number of predicted bkg events
nsig = 1. # Number of predicted signal events
nbkgErr = 1. # (Absolute) Statistical error on bkg estimate *from limited MC statistics*
nsigErr = 2. # (Absolute) Statistical error on signal estimate *from limited MC statistics*
lumiError = 0.039 # Relative luminosity uncertainty
# Set uncorrelated systematics for bkg and signal (1 +- relative uncertainties)
ucb = Systematic("uncorrl_bkg", configMgr.weights, 1.2,0.8, "user","userOverallSys") # 20% error up and down
# correlated systematic between background and signal (1 +- relative uncertainties)
# Keep SRs also in background fit confuguration
configMgr.keepSignalRegionType = True
##########################
# Setting the parameters of the hypothesis test
configMgr.doExclusion=True # True=exclusion, False=discovery
#configMgr.nTOYs=5000
configMgr.calculatorType=2 # 2=asymptotic calculator, 0=frequentist calculator
configMgr.testStatType=3 # 3=one-sided profile likelihood test statistic (LHC default)
configMgr.nPoints=20 # number of values scanned of signal-strength for upper-limit determination of signal strength.
configMgr.writeXML = True
##########################
# Give the analysis a name
configMgr.analysisName = "MyUpperLimitAnalysis_SS"
configMgr.outputFileName = "results/%s_Output.root"%configMgr.analysisName
# Define cuts
configMgr.cutsDict["UserRegion"] = "1."
# Define weights
configMgr.weights = "1."
# Define samples
bkgSample = Sample("Bkg",kGreen-9)
bkgSample.setStatConfig(True)
bkgSample.buildHisto([nbkg],"UserRegion","cuts",0.5)
bkgSample.addSystematic(ucb)
sigSample = Sample("Sig",kPink)
sigSample.setNormFactor("mu_SS",1.,0.,10.)
#sigSample.setStatConfig(True)
sigSample.setNormByTheory()
sigSample.buildHisto([nsig],"UserRegion","cuts",0.5)
dataSample = Sample("Data",kBlack)
dataSample.setData()
dataSample.buildHisto([ndata],"UserRegion","cuts",0.5)
# Define top-level
ana = configMgr.addFitConfig("SPlusB")
ana.addSamples([bkgSample,sigSample,dataSample])
ana.setSignalSample(sigSample)
# Define measurement
meas = ana.addMeasurement(name="NormalMeasurement",lumi=1.0,lumiErr=lumiError)
meas.addPOI("mu_SS")
meas.addParamSetting("Lumi",True)
# Add the channel
chan = ana.addChannel("cuts",["UserRegion"],1,0.5,1.5)
ana.addSignalChannels([chan])
# These lines are needed for the user analysis to run
# Make sure file is re-made when executing HistFactory
if configMgr.executeHistFactory:
if os.path.isfile("data/%s.root"%configMgr.analysisName):
os.remove("data/%s.root"%configMgr.analysisName)
|
the-stack_106_29814 | from uuid import uuid4
import pytest
from pydent.marshaller.base import add_schema
from pydent.marshaller.base import ModelRegistry
from pydent.marshaller.fields import Alias
from pydent.marshaller.fields import Callback
from pydent.marshaller.fields import Field
from pydent.marshaller.fields import Nested
from pydent.marshaller.fields import Relationship
class TestDump:
def test_dump_empty_data(self, base):
"""Dump should produce an empty dictionary."""
@add_schema
class MyModel(base):
pass
model = MyModel()
assert model.dump() == {}
def test_dump_empty_data_with_non_tracked_attrs(self, base):
"""Expect that non-tracked attributes are excluded from the dump."""
@add_schema
class MyModel(base):
pass
model = MyModel()
model.id = 4
assert model.dump() == {}
def test_dump_loaded_data(self, base):
"""Manually set data should appear in the dump."""
@add_schema
class MyModel(base):
pass
model = MyModel._set_data({"id": 5, "name": "MyName"})
assert model.dump() == {"id": 5, "name": "MyName"}
def test_dump_loaded_data_and_overwrite(self, base):
"""Manually set data can be overridden by setting attributes."""
@add_schema
class MyModel(base):
pass
model = MyModel._set_data({"id": 5, "name": "MyName"})
model.id = 6
assert model.dump() == {"id": 6, "name": "MyName"}
def test_dump_empty_field(self, base):
"""Empty fields should return an empty dictionary."""
@add_schema
class MyModel(base):
fields = dict(field=Field())
model = MyModel()
assert model.dump() == {}
def test_dump_field(self, base):
@add_schema
class MyModel(base):
fields = dict(field=Field())
model = MyModel._set_data({"name": "NAME"})
assert model.dump() == {"name": "NAME"}
def test_dump_with_new_data_key(self, base):
@add_schema
class MyModel(base):
fields = {
"field": Field(),
"source": Callback(
lambda s: getattr(s, "field"),
callback_args=(Callback.SELF,),
always_dump=True,
data_key="field",
),
}
model = MyModel({"field": 5})
assert model.field == model.source
print(model._get_data())
assert model.dump() == {"field": 5}
model.source = 6
assert model.field == model.source
assert model.dump() == {"field": 6}
model.field = 7
assert model.field == model.source
assert model.dump() == {"field": 7}
model = MyModel({"source": 5})
assert model.field == model.source
print(model._get_data())
assert model.dump() == {"field": 5}
model.source = 6
assert model.field == model.source
assert model.dump() == {"field": 6}
model.field = 7
assert model.field == model.source
assert model.dump() == {"field": 7}
print(model._get_data())
def test_alias(self, base):
"""Expect that alias fields refer to exactly the attribute set in the
alias.
That means, the 'source' field should refer to the 'field'
attribute.
"""
@add_schema
class MyModel(base):
fields = {"field": Field(), "source": Alias("field")}
model = MyModel({"field": 5})
assert model.field == model.source
assert model.dump() == {"field": 5}
model.source = 6
assert model.field == model.source
assert model.dump() == {"field": 6}
model.field = 7
assert model.field == model.source
assert model.dump() == {"field": 7}
model = MyModel({"source": 5})
assert model.field == model.source
print(model._get_data())
assert model.dump() == {"field": 5}
model.source = 6
assert model.field == model.source
assert model.dump() == {"field": 6}
model.field = 7
assert model.field == model.source
assert model.dump() == {"field": 7}
def test_dump_marshalling_field(self, base):
"""Expect the custom HTMLTag field to be properly
serialized/deserialized."""
class HTMLTag(Field):
def serialize(self, caller, val):
return "<{tag}>{val}</{tag}>".format(tag=self.data_key, val=val)
@add_schema
class MyModel(base):
fields = dict(h1=HTMLTag())
model = MyModel._set_data({"h1": "raw"})
assert model.h1 == "raw"
model.h1 = "This is a Heading 1 Title"
assert model.h1 == "This is a Heading 1 Title"
assert model.dump() == {"h1": "<h1>This is a Heading 1 Title</h1>"}
def test_always_dump(self, base):
"""Expect that fields with 'always_dump' are, by default, dumped as
empty constructors event when they are empty."""
@add_schema
class MyModel(base):
fields = dict(
field1=Callback("find"), field2=Callback("find", always_dump=True)
)
def find(self):
return 100
m = MyModel()
assert m.dump() == {"field2": 100}
assert m.dump(include="field1") == {"field1": 100, "field2": 100}
assert m.dump(ignore="field2") == {}
def test_empty_list_field(self, base):
"""Expect."""
@add_schema
class ModelWithList(base):
fields = dict(mylist=Field())
model = ModelWithList()
model.mylist = []
assert model.mylist == []
model.mylist.append(5)
assert model.mylist == [5]
class TestNested:
"""Tests for nested serialization/deserialization."""
@pytest.fixture(scope="function")
def Company(self, base):
@add_schema
class Company(base):
pass
return Company
@pytest.fixture(scope="function")
def Publisher(self, base):
@add_schema
class Publisher(base):
fields = dict(author=Nested("Author"), company=Nested("Company"))
return Publisher
@pytest.fixture(scope="function")
def Author(self, base):
@add_schema
class Author(base):
fields = dict(
publisher=Nested("Publisher"), id=Field("id", allow_none=True)
)
return Author
def test_simple_nested(self, Author, Publisher):
author = Author._set_data(
{"name": "Richard Dawkings", "publisher": {"name": "Scotts Books"}}
)
print(author._get_data())
assert isinstance(author, Author)
assert isinstance(author.publisher, Publisher)
assert author.name == "Richard Dawkings"
assert author.publisher.name == "Scotts Books"
def test_double_nested(self, Author, Publisher, Company):
author = Author._set_data(
{
"name": "Samuel",
"publisher": {"name": "Archive 81", "company": {"name": "Damage Inc."}},
}
)
print(author._get_data())
assert isinstance(author, Author)
assert isinstance(author.publisher, Publisher)
assert isinstance(author.publisher.company, Company)
@pytest.fixture(scope="function")
def author_example_data(self):
data = {
"name": "Samuel",
"publisher": {"name": "Archive 81", "company": {"name": "Damage Inc."}},
}
return data
@pytest.fixture(scope="function")
def author_example(self, author_example_data, Author, Publisher, Company):
author = Author._set_data(author_example_data)
return author
def test_shared_data(self, author_example, author_example_data):
author = author_example
company = author.publisher.company
print(id(author_example_data["publisher"]))
assert author_example_data["publisher"] is author._get_data()["publisher"]
print(id(author._get_data()["publisher"]))
publisher = author.publisher
print(id(publisher._get_data()))
assert author._get_data()["publisher"] is publisher._get_data()
def test_double_nested_dump(self, author_example, author_example_data):
assert author_example._get_data() == author_example_data
assert author_example.publisher._get_data() == author_example_data["publisher"]
assert (
author_example.publisher.company._get_data()
== author_example_data["publisher"]["company"]
)
def test_del_nested(self, author_example, author_example_data):
author_example.name = "TIM"
assert author_example.name == "TIM"
author_example.publisher.name = "Holland"
assert author_example.publisher.name == "Holland"
author_example.publisher.company.name = "ABC"
assert author_example.publisher.company.name == "ABC"
del author_example.publisher.company
with pytest.raises(AttributeError):
author_example.publisher.company
assert "company" not in author_example._get_data()["publisher"]
assert "company" not in author_example.publisher._get_data()
def test_set_none_on_nested(self, author_example):
author_example.publisher = None
assert author_example.publisher is None
assert author_example._get_data()["publisher"] is None
def test_set_nested_attribute(self, author_example, Publisher):
author_example.publisher = None
assert author_example.publisher is None
assert author_example._get_data()["publisher"] is None
publisher = Publisher._set_data({"name": "P"})
author_example.publisher = publisher
assert author_example.publisher.name == "P"
assert author_example._get_data()["publisher"] is publisher._get_data()
def test_nested_dump(self, author_example, author_example_data):
new_company_name = str(uuid4())
expected_data = dict(author_example_data)
expected_data["publisher"]["company"]["name"] = new_company_name
author_example.publisher.company.name = new_company_name
expected_data_copy = dict(expected_data)
expected_data_copy.pop("publisher")
assert expected_data_copy == author_example.dump()
expected_data_copy = dict(expected_data["publisher"])
expected_data_copy.pop("company")
assert expected_data_copy == author_example.publisher.dump()
assert (
expected_data["publisher"]["company"]
== author_example.publisher.company.dump()
)
def test_load_a_model(self, base, author_example):
@add_schema
class AuthorList(base):
fields = dict(author=Nested("Author"))
author_list = AuthorList()
author_example.publisher.company.name = "Umbrella Corp"
author_list.author = author_example
assert author_list.author.publisher.company.name == "Umbrella Corp"
author_example.publisher.company.name = "LexCorp"
assert author_list.author.publisher.company.name == "LexCorp"
class TestRelationship:
@pytest.fixture(scope="function")
def Company(self, base):
@add_schema
class Company(base):
pass
return Company
@pytest.fixture(scope="function")
def Publisher(self, base):
@add_schema
class Publisher(base):
fields = dict(
company=Relationship(
"Company", "instantiate_model", 6, {"name": "MyCompany"}
)
)
def instantiate_model(self, model_name, model_id, name="Default"):
return ModelRegistry.get_model(model_name)._set_data(
{"id": model_id, "name": name}
)
return Publisher
@pytest.fixture(scope="function")
def Author(self, base):
@add_schema
class Author(base):
fields = dict(
publisher=Relationship(
"Publisher", "instantiate_model", 4, {"name": "MyPublisher"}
)
)
def instantiate_model(self, model_name, model_id, name="Default"):
return ModelRegistry.get_model(model_name)._set_data(
{"id": model_id, "name": name}
)
return Author
@pytest.mark.parametrize(
"model,include,expected",
[
("Company", None, {}),
("Publisher", None, {}),
("Author", None, {}),
pytest.param(
"Publisher",
"company",
{"company": {"id": 6, "name": "MyCompany"}},
id="include 1 layer nested",
),
pytest.param(
"Author",
"publisher",
{"publisher": {"id": 4, "name": "MyPublisher"}},
id="include 1 layer nested",
),
pytest.param(
"Author",
{"publisher": "company"},
{
"publisher": {
"id": 4,
"name": "MyPublisher",
"company": {"id": 6, "name": "MyCompany"},
}
},
id="include 2 layer nested",
),
],
)
def test_nested_dump_with_include(
self, base, Author, Publisher, Company, model, include, expected
):
instance = ModelRegistry.get_model(model)()
assert instance.dump(include=include) == expected
@pytest.mark.parametrize(
"model,only,expected",
[
pytest.param(
"Author", "publisher", {"publisher": {"name": "MyPublisher", "id": 4}}
),
pytest.param(
"Author", {"publisher": "name"}, {"publisher": {"name": "MyPublisher"}}
),
pytest.param("Author", {"publisher": "id"}, {"publisher": {"id": 4}}),
pytest.param(
"Author",
{"publisher": "company"},
{"publisher": {"company": {"name": "MyCompany", "id": 6}}},
),
pytest.param(
"Author",
{"publisher": {"company": "id"}},
{"publisher": {"company": {"id": 6}}},
),
],
)
def test_relationship_dump_with_only(
self, base, Author, Publisher, Company, model, only, expected
):
instance = ModelRegistry.get_model(model)()
assert instance.dump(only=only) == expected
def test_relationship_dump_ignore(self, base, Author):
instance = Author._set_data({"name": "MyName", "id": 5})
assert instance.dump() == {"name": "MyName", "id": 5}
assert instance.dump(ignore="name") == {"id": 5}
assert instance.dump(ignore=["name", "id"]) == {}
def test_basic_relationship(self, base):
@add_schema
class Publisher(base):
pass
@add_schema
class Author(base):
fields = dict(
publisher=Relationship(
"Publisher", "instantiate_model", 4, {"name": "MyPublisher"}
),
cache=True,
)
def instantiate_model(self, model_name, model_id, name="Default"):
return ModelRegistry.get_model(model_name)._set_data(
{"id": model_id, "name": name}
)
author = Author()
assert author.dump() == {}
assert isinstance(
author.publisher, Publisher
), "publisher attribute should be a Publisher type"
assert (
author.publisher._get_data() is author._get_data()["publisher"]
), "data should be shared between the Author and Publisher"
assert author._get_data() == {"publisher": {"id": 4, "name": "MyPublisher"}}
assert author.dump() == {}
assert author.dump(include=["publisher"]) == author._get_data()
def test_many_relationship(self, base):
@add_schema
class Publisher(base):
pass
@add_schema
class Author(base):
fields = dict(
publisher=Relationship(
"Publisher",
"instantiate_model",
4,
{"name": "MyPublisher"},
many=True,
)
)
def instantiate_model(self, model_name, model_id, name="Default"):
models = []
for i in range(3):
models.append(
ModelRegistry.get_model(model_name)._set_data(
{"id": model_id, "name": name}
)
)
return models
author = Author()
print(author.dump())
print(author.dump(include="publisher"))
assert len(author.publisher) == 3
assert len(author.dump(include="publisher")["publisher"]) == 3
author.publisher = [
Publisher._set_data({"id": 3}),
Publisher._set_data({"id": 5}),
]
assert len(author.publisher) == 2
assert len(author.dump(include="publisher")["publisher"]) == 2
assert author.publisher[0].id == 3
assert author.publisher[1].id == 5
author.publisher.append(Publisher())
assert len(author.publisher) == 3
assert len(author.dump(include="publisher")["publisher"]) == 3
def test_load_relationship(self, base):
@add_schema
class Publisher(base):
pass
@add_schema
class Author(base):
fields = dict(
publishers=Relationship(
"Publisher",
"instantiate_model",
4,
{"name": "MyPublisher"},
many=True,
)
)
def instantiate_model(self, model_name, model_id, name="Default"):
models = []
for i in range(3):
models.append(
ModelRegistry.get_model(model_name)._set_data(
{"id": model_id, "name": name}
)
)
return models
publisher_data = {"name": "Torr Books"}
author = Author._set_data(
{"name": "Steven King", "publishers": [publisher_data]}
)
assert len(author.publishers) == 1
assert isinstance(author.publishers[0], Publisher)
assert author.publishers[0]._get_data() is publisher_data
del author.publishers
assert len(author.publishers) == 3
assert isinstance(author.publishers[0], Publisher)
|
the-stack_106_29815 | # Copyright (c) 2008-2009 Pedro Matiello <[email protected]>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""
Radial search filter.
"""
class radius(object):
"""
Radial search filter.
This will keep searching contained inside a specified limit.
"""
def __init__(self, radius):
"""
Initialize the filter.
@type radius: number
@param radius: Search radius.
"""
self.graph = None
self.spanning_tree = None
self.radius = radius
self.done = False
def configure(self, graph, spanning_tree):
"""
Configure the filter.
@type graph: graph
@param graph: Graph.
@type spanning_tree: dictionary
@param spanning_tree: Spanning tree.
"""
self.graph = graph
self.spanning_tree = spanning_tree
def __call__(self, node, parent):
"""
Decide if the given node should be included in the search process.
@type node: node
@param node: Given node.
@type parent: node
@param parent: Given node's parent in the spanning tree.
@rtype: boolean
@return: Whether the given node should be included in the search process.
"""
def cost_to_root(node):
if (node is not None):
return cost_to_parent(node, st[node]) + cost_to_root(st[node])
else:
return 0
def cost_to_parent(node, parent):
if (parent is not None):
return gr.get_edge_weight(parent, node)
else:
return 0
gr = self.graph
st = self.spanning_tree
cost = cost_to_parent(node, parent) + cost_to_root(parent)
if (cost <= self.radius):
return True
else:
return False |
the-stack_106_29816 | #!/usr/bin/env python3
import json
import os
import subprocess
import time
import numpy as np
import unittest
from collections import Counter
from pathlib import Path
import cereal.messaging as messaging
from cereal.services import service_list
from common.basedir import BASEDIR
from common.timeout import Timeout
from common.params import Params
from selfdrive.hardware import TICI
from selfdrive.loggerd.config import ROOT
from selfdrive.test.helpers import set_params_enabled
from tools.lib.logreader import LogReader
# Baseline CPU usage by process
PROCS = {
"selfdrive.controls.controlsd": 50.0,
"./loggerd": 45.0,
"./locationd": 9.1,
"selfdrive.controls.plannerd": 20.0,
"./_ui": 15.0,
"selfdrive.locationd.paramsd": 9.1,
"./camerad": 7.07,
"./_sensord": 6.17,
"selfdrive.controls.radard": 5.67,
"./_modeld": 4.48,
"./boardd": 3.63,
"./_dmonitoringmodeld": 2.67,
"selfdrive.thermald.thermald": 2.41,
"selfdrive.locationd.calibrationd": 2.0,
"./_soundd": 2.0,
"selfdrive.monitoring.dmonitoringd": 1.90,
"./proclogd": 1.54,
"selfdrive.logmessaged": 0.2,
"./clocksd": 0.02,
"./ubloxd": 0.02,
"selfdrive.tombstoned": 0,
"./logcatd": 0,
}
if TICI:
PROCS.update({
"./loggerd": 60.0,
"selfdrive.controls.controlsd": 28.0,
"./camerad": 31.0,
"./_ui": 21.0,
"selfdrive.controls.plannerd": 12.0,
"selfdrive.locationd.paramsd": 5.0,
"./_dmonitoringmodeld": 10.0,
"selfdrive.thermald.thermald": 1.5,
})
def cputime_total(ct):
return ct.cpuUser + ct.cpuSystem + ct.cpuChildrenUser + ct.cpuChildrenSystem
def check_cpu_usage(first_proc, last_proc):
result = "------------------------------------------------\n"
result += "------------------ CPU Usage -------------------\n"
result += "------------------------------------------------\n"
r = True
dt = (last_proc.logMonoTime - first_proc.logMonoTime) / 1e9
for proc_name, normal_cpu_usage in PROCS.items():
first, last = None, None
try:
first = [p for p in first_proc.procLog.procs if proc_name in p.cmdline][0]
last = [p for p in last_proc.procLog.procs if proc_name in p.cmdline][0]
cpu_time = cputime_total(last) - cputime_total(first)
cpu_usage = cpu_time / dt * 100.
if cpu_usage > max(normal_cpu_usage * 1.15, normal_cpu_usage + 5.0):
# cpu usage is high while playing sounds
if proc_name == "./_soundd" and cpu_usage < 25.:
continue
result += f"Warning {proc_name} using more CPU than normal\n"
r = False
elif cpu_usage < min(normal_cpu_usage * 0.65, max(normal_cpu_usage - 1.0, 0.0)):
result += f"Warning {proc_name} using less CPU than normal\n"
r = False
result += f"{proc_name.ljust(35)} {cpu_usage:.2f}%\n"
except IndexError:
result += f"{proc_name.ljust(35)} NO METRICS FOUND {first=} {last=}\n"
r = False
result += "------------------------------------------------\n"
print(result)
return r
class TestOnroad(unittest.TestCase):
@classmethod
def setUpClass(cls):
os.environ['SKIP_FW_QUERY'] = "1"
os.environ['FINGERPRINT'] = "TOYOTA COROLLA TSS2 2019"
set_params_enabled()
# Make sure athena isn't running
Params().delete("DongleId")
Params().delete("AthenadPid")
os.system("pkill -9 -f athena")
logger_root = Path(ROOT)
initial_segments = set()
if logger_root.exists():
initial_segments = set(Path(ROOT).iterdir())
# start manager and run openpilot for a minute
try:
manager_path = os.path.join(BASEDIR, "selfdrive/manager/manager.py")
proc = subprocess.Popen(["python", manager_path])
sm = messaging.SubMaster(['carState'])
with Timeout(150, "controls didn't start"):
while sm.rcv_frame['carState'] < 0:
sm.update(1000)
# make sure we get at least two full segments
cls.segments = []
with Timeout(300, "timed out waiting for logs"):
while len(cls.segments) < 3:
new_paths = set()
if logger_root.exists():
new_paths = set(logger_root.iterdir()) - initial_segments
segs = [p for p in new_paths if "--" in str(p)]
cls.segments = sorted(segs, key=lambda s: int(str(s).rsplit('--')[-1]))
time.sleep(5)
finally:
proc.terminate()
if proc.wait(60) is None:
proc.kill()
cls.lr = list(LogReader(os.path.join(str(cls.segments[1]), "rlog.bz2")))
def test_cloudlog_size(self):
msgs = [m for m in self.lr if m.which() == 'logMessage']
total_size = sum(len(m.as_builder().to_bytes()) for m in msgs)
self.assertLess(total_size, 3.5e5)
cnt = Counter([json.loads(m.logMessage)['filename'] for m in msgs])
big_logs = [f for f, n in cnt.most_common(3) if n / sum(cnt.values()) > 30.]
self.assertEqual(len(big_logs), 0, f"Log spam: {big_logs}")
def test_cpu_usage(self):
proclogs = [m for m in self.lr if m.which() == 'procLog']
self.assertGreater(len(proclogs), service_list['procLog'].frequency * 45, "insufficient samples")
cpu_ok = check_cpu_usage(proclogs[0], proclogs[-1])
self.assertTrue(cpu_ok)
def test_model_timings(self):
#TODO this went up when plannerd cpu usage increased, why?
cfgs = [("modelV2", 0.035, 0.03), ("driverState", 0.025, 0.021)]
for (s, instant_max, avg_max) in cfgs:
ts = [getattr(getattr(m, s), "modelExecutionTime") for m in self.lr if m.which() == s]
self.assertLess(min(ts), instant_max, f"high '{s}' execution time: {min(ts)}")
self.assertLess(np.mean(ts), avg_max, f"high avg '{s}' execution time: {np.mean(ts)}")
if __name__ == "__main__":
unittest.main()
|
the-stack_106_29817 | from typing import List
from catalyst.dl.callbacks import MeterMetricsCallback
from catalyst.tools import meters
class AUCCallback(MeterMetricsCallback):
"""Calculates the AUC per class for each loader.
.. note::
Currently, supports binary and multi-label cases.
"""
def __init__(
self,
input_key: str = "targets",
output_key: str = "logits",
prefix: str = "auc",
class_names: List[str] = None,
num_classes: int = 2,
activation: str = "Sigmoid",
):
"""
Args:
input_key (str): input key to use for auc calculation
specifies our ``y_true``
output_key (str): output key to use for auc calculation;
specifies our ``y_pred``
prefix (str): name to display for auc when printing
class_names (List[str]): class names to display in the logs.
If None, defaults to indices for each class, starting from 0
num_classes (int): Number of classes; must be > 1
activation (str): An torch.nn activation applied to the outputs.
Must be one of ``'none'``, ``'Sigmoid'``, or ``'Softmax2d'``
"""
num_classes = num_classes if class_names is None else len(class_names)
meter_list = [meters.AUCMeter() for _ in range(num_classes)]
super().__init__(
metric_names=[prefix],
meter_list=meter_list,
input_key=input_key,
output_key=output_key,
class_names=class_names,
num_classes=num_classes,
activation=activation,
)
__all__ = ["AUCCallback"]
|
the-stack_106_29818 | import torch
import torchvision.datasets as dsets
from torchvision import transforms
class Data_Loader():
def __init__(self, train, dataset, image_path, image_size, batch_size, shuf=True):
self.dataset = dataset
self.path = image_path
self.imsize = image_size
self.batch = batch_size
self.shuf = shuf
self.train = train
def transform(self, resize, totensor, normalize, centercrop):
options = []
if centercrop:
options.append(transforms.CenterCrop(160))
if resize:
options.append(transforms.Resize((self.imsize,self.imsize)))
if totensor:
options.append(transforms.ToTensor())
if normalize:
options.append(transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)))
transform = transforms.Compose(options)
return transform
def load_lsun(self, classes=['church_outdoor_train','classroom_train']):
transforms = self.transform(True, True, True, False)
dataset = dsets.LSUN(self.path, classes=classes, transform=transforms)
return dataset
def load_imagenet(self):
transforms = self.transform(True, True, True, True)
dataset = dsets.ImageFolder(self.path+'/imagenet', transform=transforms)
return dataset
def load_celeb(self):
transforms = self.transform(True, True, True, True)
dataset = dsets.ImageFolder(self.path+'/CelebA', transform=transforms)
return dataset
def load_off(self):
transforms = self.transform(True, True, True, False)
dataset = dsets.ImageFolder(self.path, transform=transforms)
return dataset
def loader(self):
if self.dataset == 'lsun':
dataset = self.load_lsun()
elif self.dataset == 'imagenet':
dataset = self.load_imagenet()
elif self.dataset == 'celeb':
dataset = self.load_celeb()
elif self.dataset == 'off':
dataset = self.load_off()
print('dataset',len(dataset))
loader = torch.utils.data.DataLoader(dataset=dataset,
batch_size=self.batch,
shuffle=self.shuf,
num_workers=2, # subprocess number
drop_last=True) # drop if incomplete batch size
return loader
|
the-stack_106_29819 | # -*- coding: utf-8 -*-
import collections
import datetime
import logging
import os
import dateutil.parser
import dateutil.tz
from auth import Auth
from elasticsearch import RequestsHttpConnection
from elasticsearch.client import Elasticsearch
from six import string_types
logging.basicConfig()
elastalert_logger = logging.getLogger('elastalert')
def new_get_event_ts(ts_field):
""" Constructs a lambda that may be called to extract the timestamp field
from a given event.
:returns: A callable function that takes an event and outputs that event's
timestamp field.
"""
return lambda event: lookup_es_key(event[0], ts_field)
def _find_es_dict_by_key(lookup_dict, term):
""" Performs iterative dictionary search based upon the following conditions:
1. Subkeys may either appear behind a full stop (.) or at one lookup_dict level lower in the tree.
2. No wildcards exist within the provided ES search terms (these are treated as string literals)
This is necessary to get around inconsistencies in ES data.
For example:
{'ad.account_name': 'bob'}
Or:
{'csp_report': {'blocked_uri': 'bob.com'}}
And even:
{'juniper_duo.geoip': {'country_name': 'Democratic People's Republic of Korea'}}
We want a search term of form "key.subkey.subsubkey" to match in all cases.
:returns: A tuple with the first element being the dict that contains the key and the second
element which is the last subkey used to access the target specified by the term. None is
returned for both if the key can not be found.
"""
if term in lookup_dict:
return lookup_dict, term
# If the term does not match immediately, perform iterative lookup:
# 1. Split the search term into tokens
# 2. Recurrently concatenate these together to traverse deeper into the dictionary,
# clearing the subkey at every successful lookup.
#
# This greedy approach is correct because subkeys must always appear in order,
# preferring full stops and traversal interchangeably.
#
# Subkeys will NEVER be duplicated between an alias and a traversal.
#
# For example:
# {'foo.bar': {'bar': 'ray'}} to look up foo.bar will return {'bar': 'ray'}, not 'ray'
dict_cursor = lookup_dict
subkeys = term.split('.')
subkey = ''
while len(subkeys) > 0:
if not dict_cursor:
return {}, None
subkey += subkeys.pop(0)
if subkey in dict_cursor:
if len(subkeys) == 0:
break
dict_cursor = dict_cursor[subkey]
subkey = ''
elif len(subkeys) == 0:
# If there are no keys left to match, return None values
dict_cursor = None
subkey = None
else:
subkey += '.'
return dict_cursor, subkey
def set_es_key(lookup_dict, term, value):
""" Looks up the location that the term maps to and sets it to the given value.
:returns: True if the value was set successfully, False otherwise.
"""
value_dict, value_key = _find_es_dict_by_key(lookup_dict, term)
if value_dict is not None:
value_dict[value_key] = value
return True
return False
def lookup_es_key(lookup_dict, term):
""" Performs iterative dictionary search for the given term.
:returns: The value identified by term or None if it cannot be found.
"""
value_dict, value_key = _find_es_dict_by_key(lookup_dict, term)
return None if value_key is None else value_dict[value_key]
def ts_to_dt(timestamp):
if isinstance(timestamp, datetime.datetime):
return timestamp
dt = dateutil.parser.parse(timestamp)
# Implicitly convert local timestamps to UTC
if dt.tzinfo is None:
dt = dt.replace(tzinfo=dateutil.tz.tzutc())
return dt
def dt_to_ts(dt):
if not isinstance(dt, datetime.datetime):
logging.warning('Expected datetime, got %s' % (type(dt)))
return dt
ts = dt.isoformat()
# Round microseconds to milliseconds
if dt.tzinfo is None:
# Implicitly convert local times to UTC
return ts + 'Z'
# isoformat() uses microsecond accuracy and timezone offsets
# but we should try to use millisecond accuracy and Z to indicate UTC
return ts.replace('000+00:00', 'Z').replace('+00:00', 'Z')
def ts_to_dt_with_format(timestamp, ts_format):
if isinstance(timestamp, datetime.datetime):
return timestamp
dt = datetime.datetime.strptime(timestamp, ts_format)
# Implicitly convert local timestamps to UTC
if dt.tzinfo is None:
dt = dt.replace(tzinfo=dateutil.tz.tzutc())
return dt
def dt_to_ts_with_format(dt, ts_format):
if not isinstance(dt, datetime.datetime):
logging.warning('Expected datetime, got %s' % (type(dt)))
return dt
ts = dt.strftime(ts_format)
return ts
def ts_now():
return datetime.datetime.utcnow().replace(tzinfo=dateutil.tz.tzutc())
def inc_ts(timestamp, milliseconds=1):
"""Increment a timestamp by milliseconds."""
dt = ts_to_dt(timestamp)
dt += datetime.timedelta(milliseconds=milliseconds)
return dt_to_ts(dt)
def pretty_ts(timestamp, tz=True):
"""Pretty-format the given timestamp (to be printed or logged hereafter).
If tz, the timestamp will be converted to local time.
Format: YYYY-MM-DD HH:MM TZ"""
dt = timestamp
if not isinstance(timestamp, datetime.datetime):
dt = ts_to_dt(timestamp)
if tz:
dt = dt.astimezone(dateutil.tz.tzlocal())
return dt.strftime('%Y-%m-%d %H:%M %Z')
def ts_add(ts, td):
""" Allows a timedelta (td) add operation on a string timestamp (ts) """
return dt_to_ts(ts_to_dt(ts) + td)
def hashable(obj):
""" Convert obj to a hashable obj.
We use the value of some fields from Elasticsearch as keys for dictionaries. This means
that whatever Elasticsearch returns must be hashable, and it sometimes returns a list or dict."""
if not obj.__hash__:
return str(obj)
return obj
def format_index(index, start, end, add_extra=False):
""" Takes an index, specified using strftime format, start and end time timestamps,
and outputs a wildcard based index string to match all possible timestamps. """
# Convert to UTC
start -= start.utcoffset()
end -= end.utcoffset()
original_start = start
indices = set()
while start.date() <= end.date():
indices.add(start.strftime(index))
start += datetime.timedelta(days=1)
num = len(indices)
if add_extra:
while len(indices) == num:
original_start -= datetime.timedelta(days=1)
new_index = original_start.strftime(index)
assert new_index != index, "You cannot use a static index with search_extra_index"
indices.add(new_index)
return ','.join(indices)
class EAException(Exception):
pass
def seconds(td):
return td.seconds + td.days * 24 * 3600
def total_seconds(dt):
# For python 2.6 compatability
if dt is None:
return 0
elif hasattr(dt, 'total_seconds'):
return dt.total_seconds()
else:
return (dt.microseconds + (dt.seconds + dt.days * 24 * 3600) * 10**6) / 10**6
def dt_to_int(dt):
dt = dt.replace(tzinfo=None)
return int(total_seconds((dt - datetime.datetime.utcfromtimestamp(0))) * 1000)
def unixms_to_dt(ts):
return unix_to_dt(float(ts) / 1000)
def unix_to_dt(ts):
dt = datetime.datetime.utcfromtimestamp(float(ts))
dt = dt.replace(tzinfo=dateutil.tz.tzutc())
return dt
def dt_to_unix(dt):
return int(total_seconds(dt - datetime.datetime(1970, 1, 1, tzinfo=dateutil.tz.tzutc())))
def dt_to_unixms(dt):
return int(dt_to_unix(dt) * 1000)
def cronite_datetime_to_timestamp(self, d):
"""
Converts a `datetime` object `d` into a UNIX timestamp.
"""
if d.tzinfo is not None:
d = d.replace(tzinfo=None) - d.utcoffset()
return total_seconds((d - datetime.datetime(1970, 1, 1)))
def add_raw_postfix(field, is_five_or_above):
if is_five_or_above:
end = '.keyword'
else:
end = '.raw'
if not field.endswith(end):
field += end
return field
def replace_dots_in_field_names(document):
""" This method destructively modifies document by replacing any dots in
field names with an underscore. """
for key, value in list(document.items()):
if isinstance(value, dict):
value = replace_dots_in_field_names(value)
if isinstance(key, string_types) and key.find('.') != -1:
del document[key]
document[key.replace('.', '_')] = value
return document
def elasticsearch_client(conf):
""" returns an Elasticsearch instance configured using an es_conn_config """
es_conn_conf = build_es_conn_config(conf)
auth = Auth()
es_conn_conf['http_auth'] = auth(host=es_conn_conf['es_host'],
username=es_conn_conf['es_username'],
password=es_conn_conf['es_password'],
aws_region=es_conn_conf['aws_region'],
profile_name=es_conn_conf['profile'])
return Elasticsearch(host=es_conn_conf['es_host'],
port=es_conn_conf['es_port'],
url_prefix=es_conn_conf['es_url_prefix'],
use_ssl=es_conn_conf['use_ssl'],
verify_certs=es_conn_conf['verify_certs'],
ca_certs=es_conn_conf['ca_certs'],
connection_class=RequestsHttpConnection,
http_auth=es_conn_conf['http_auth'],
timeout=es_conn_conf['es_conn_timeout'],
send_get_body_as=es_conn_conf['send_get_body_as'],
client_cert=es_conn_conf['client_cert'],
client_key=es_conn_conf['client_key'])
def build_es_conn_config(conf):
""" Given a conf dictionary w/ raw config properties 'use_ssl', 'es_host', 'es_port'
'es_username' and 'es_password', this will return a new dictionary
with properly initialized values for 'es_host', 'es_port', 'use_ssl' and 'http_auth' which
will be a basicauth username:password formatted string """
parsed_conf = {}
parsed_conf['use_ssl'] = os.environ.get('ES_USE_SSL', False)
parsed_conf['verify_certs'] = True
parsed_conf['ca_certs'] = None
parsed_conf['client_cert'] = None
parsed_conf['client_key'] = None
parsed_conf['http_auth'] = None
parsed_conf['es_username'] = None
parsed_conf['es_password'] = None
parsed_conf['aws_region'] = None
parsed_conf['profile'] = None
parsed_conf['es_host'] = os.environ.get('ES_HOST', conf['es_host'])
parsed_conf['es_port'] = int(os.environ.get('ES_PORT', conf['es_port']))
parsed_conf['es_url_prefix'] = ''
parsed_conf['es_conn_timeout'] = conf.get('es_conn_timeout', 20)
parsed_conf['send_get_body_as'] = conf.get('es_send_get_body_as', 'GET')
if 'es_username' in conf:
parsed_conf['es_username'] = os.environ.get('ES_USERNAME', conf['es_username'])
parsed_conf['es_password'] = os.environ.get('ES_PASSWORD', conf['es_password'])
if 'aws_region' in conf:
parsed_conf['aws_region'] = conf['aws_region']
# Deprecated
if 'boto_profile' in conf:
logging.warning('Found deprecated "boto_profile", use "profile" instead!')
parsed_conf['profile'] = conf['boto_profile']
if 'profile' in conf:
parsed_conf['profile'] = conf['profile']
if 'use_ssl' in conf:
parsed_conf['use_ssl'] = conf['use_ssl']
if 'verify_certs' in conf:
parsed_conf['verify_certs'] = conf['verify_certs']
if 'ca_certs' in conf:
parsed_conf['ca_certs'] = conf['ca_certs']
if 'client_cert' in conf:
parsed_conf['client_cert'] = conf['client_cert']
if 'client_key' in conf:
parsed_conf['client_key'] = conf['client_key']
if 'es_url_prefix' in conf:
parsed_conf['es_url_prefix'] = conf['es_url_prefix']
return parsed_conf
def parse_duration(value):
"""Convert ``unit=num`` spec into a ``timedelta`` object."""
unit, num = value.split('=')
return datetime.timedelta(**{unit: int(num)})
def parse_deadline(value):
"""Convert ``unit=num`` spec into a ``datetime`` object."""
duration = parse_duration(value)
return ts_now() + duration
def flatten_dict(dct, delim='.', prefix=''):
ret = {}
for key, val in dct.items():
if type(val) == dict:
ret.update(flatten_dict(val, prefix=prefix + key + delim))
else:
ret[prefix + key] = val
return ret
def resolve_string(string, match, missing_text='<MISSING VALUE>'):
"""
Given a python string that may contain references to fields on the match dictionary,
the strings are replaced using the corresponding values.
However, if the referenced field is not found on the dictionary,
it is replaced by a default string.
Strings can be formatted using the old-style format ('%(field)s') or
the new-style format ('{match[field]}').
:param string: A string that may contain references to values of the 'match' dictionary.
:param match: A dictionary with the values to replace where referenced by keys in the string.
:param missing_text: The default text to replace a formatter with if the field doesnt exist.
"""
flat_match = flatten_dict(match)
flat_match.update(match)
dd_match = collections.defaultdict(lambda: missing_text, flat_match)
dd_match['_missing_value'] = missing_text
while True:
try:
string = string % dd_match
string = string.format(**dd_match)
break
except KeyError as e:
if '{%s}' % e.message not in string:
break
string = string.replace('{%s}' % e.message, '{_missing_value}')
return string
|
the-stack_106_29820 | """
Utilities for end-users.
"""
from __future__ import absolute_import
import __main__
from collections import namedtuple
import logging
import traceback
import re
import os
import sys
from parso import split_lines
from jedi import Interpreter
from jedi.api.helpers import get_on_completion_name
READLINE_DEBUG = False
def setup_readline(namespace_module=__main__):
"""
Install Jedi completer to :mod:`readline`.
This function setups :mod:`readline` to use Jedi in Python interactive
shell. If you want to use a custom ``PYTHONSTARTUP`` file (typically
``$HOME/.pythonrc.py``), you can add this piece of code::
try:
from jedi.utils import setup_readline
setup_readline()
except ImportError:
# Fallback to the stdlib readline completer if it is installed.
# Taken from http://docs.python.org/2/library/rlcompleter.html
print("Jedi is not installed, falling back to readline")
try:
import readline
import rlcompleter
readline.parse_and_bind("tab: complete")
except ImportError:
print("Readline is not installed either. No tab completion is enabled.")
This will fallback to the readline completer if Jedi is not installed.
The readline completer will only complete names in the global namespace,
so for example::
ran<TAB>
will complete to ``range``
with both Jedi and readline, but::
range(10).cou<TAB>
will show complete to ``range(10).count`` only with Jedi.
You'll also need to add ``export PYTHONSTARTUP=$HOME/.pythonrc.py`` to
your shell profile (usually ``.bash_profile`` or ``.profile`` if you use
bash).
"""
if READLINE_DEBUG:
logging.basicConfig(
filename='/tmp/jedi.log',
filemode='a',
level=logging.DEBUG
)
class JediRL(object):
def complete(self, text, state):
"""
This complete stuff is pretty weird, a generator would make
a lot more sense, but probably due to backwards compatibility
this is still the way how it works.
The only important part is stuff in the ``state == 0`` flow,
everything else has been copied from the ``rlcompleter`` std.
library module.
"""
if state == 0:
sys.path.insert(0, os.getcwd())
# Calling python doesn't have a path, so add to sys.path.
try:
logging.debug("Start REPL completion: " + repr(text))
interpreter = Interpreter(text, [namespace_module.__dict__])
lines = split_lines(text)
position = (len(lines), len(lines[-1]))
name = get_on_completion_name(
interpreter._module_node,
lines,
position
)
before = text[:len(text) - len(name)]
completions = interpreter.completions()
logging.debug("REPL completions: %s", completions)
except:
logging.error("REPL Completion error:\n" + traceback.format_exc())
raise
finally:
sys.path.pop(0)
self.matches = [before + c.name_with_symbols for c in completions]
try:
return self.matches[state]
except IndexError:
return None
try:
# Need to import this one as well to make sure it's executed before
# this code. This didn't use to be an issue until 3.3. Starting with
# 3.4 this is different, it always overwrites the completer if it's not
# already imported here.
import rlcompleter # noqa: F401
import readline
except ImportError:
print("Jedi: Module readline not available.")
else:
readline.set_completer(JediRL().complete)
readline.parse_and_bind("tab: complete")
# jedi itself does the case matching
readline.parse_and_bind("set completion-ignore-case on")
# because it's easier to hit the tab just once
readline.parse_and_bind("set show-all-if-unmodified")
readline.parse_and_bind("set show-all-if-ambiguous on")
# don't repeat all the things written in the readline all the time
readline.parse_and_bind("set completion-prefix-display-length 2")
# No delimiters, Jedi handles that.
readline.set_completer_delims('')
def version_info():
"""
Returns a namedtuple of Jedi's version, similar to Python's
``sys.version_info``.
"""
Version = namedtuple('Version', 'major, minor, micro')
from jedi import __version__
tupl = re.findall(r'[a-z]+|\d+', __version__)
return Version(*[x if i == 3 else int(x) for i, x in enumerate(tupl)])
|
the-stack_106_29822 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v9.enums",
marshal="google.ads.googleads.v9",
manifest={"ListingGroupFilterBiddingCategoryLevelEnum",},
)
class ListingGroupFilterBiddingCategoryLevelEnum(proto.Message):
r"""Container for enum describing the levels of bidding category
used in ListingGroupFilterDimension.
"""
class ListingGroupFilterBiddingCategoryLevel(proto.Enum):
r"""The level of the listing group filter bidding category."""
UNSPECIFIED = 0
UNKNOWN = 1
LEVEL1 = 2
LEVEL2 = 3
LEVEL3 = 4
LEVEL4 = 5
LEVEL5 = 6
__all__ = tuple(sorted(__protobuf__.manifest))
|
the-stack_106_29823 | import os
import torch
from ignite.exceptions import NotComputableError
from ignite.metrics import RootMeanSquaredError
import pytest
def test_zero_div():
rmse = RootMeanSquaredError()
with pytest.raises(NotComputableError):
rmse.compute()
def test_compute():
rmse = RootMeanSquaredError()
y_pred = torch.Tensor([[2.0], [-2.0]])
y = torch.zeros(2)
rmse.update((y_pred, y))
assert isinstance(rmse.compute(), float)
assert rmse.compute() == 2.0
rmse.reset()
y_pred = torch.Tensor([[3.0], [-3.0]])
y = torch.zeros(2)
rmse.update((y_pred, y))
assert isinstance(rmse.compute(), float)
assert rmse.compute() == 3.0
def _test_distrib_itegration(device):
import numpy as np
import torch.distributed as dist
from ignite.engine import Engine
rank = dist.get_rank()
n_iters = 100
s = 50
offset = n_iters * s
y_true = torch.arange(0, offset * dist.get_world_size(), dtype=torch.float).to(device)
y_preds = (rank + 1) * torch.ones(offset, dtype=torch.float).to(device)
def update(engine, i):
return y_preds[i * s : (i + 1) * s], y_true[i * s + offset * rank : (i + 1) * s + offset * rank]
engine = Engine(update)
m = RootMeanSquaredError(device=device)
m.attach(engine, "rmse")
data = list(range(n_iters))
engine.run(data=data, max_epochs=1)
assert "rmse" in engine.state.metrics
res = engine.state.metrics["rmse"]
y_preds_full = []
for i in range(dist.get_world_size()):
y_preds_full.append((i + 1) * torch.ones(offset))
y_preds_full = torch.stack(y_preds_full).to(device).flatten()
true_res = np.sqrt(np.mean(np.square((y_true - y_preds_full).cpu().numpy())))
assert pytest.approx(res) == true_res
@pytest.mark.distributed
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_gpu(local_rank, distributed_context_single_node_nccl):
device = "cuda:{}".format(local_rank)
_test_distrib_itegration(device)
@pytest.mark.distributed
def test_distrib_cpu(local_rank, distributed_context_single_node_gloo):
device = "cpu"
_test_distrib_itegration(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_cpu(distributed_context_multi_node_gloo):
device = "cpu"
_test_distrib_itegration(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gpu(distributed_context_multi_node_nccl):
device = "cuda:{}".format(distributed_context_multi_node_nccl["local_rank"])
_test_distrib_itegration(device)
|
the-stack_106_29824 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, [email protected]
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'fit_to_pages04.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = ['xl/printerSettings/printerSettings1.bin',
'xl/worksheets/_rels/sheet1.xml.rels']
self.ignore_elements = {'[Content_Types].xml': ['<Default Extension="bin"'],
'xl/worksheets/sheet1.xml': ['<pageMargins', '<pageSetup']}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with fit to print."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.fit_to_pages(3, 2)
worksheet.set_paper(9)
worksheet.write('A1', 'Foo')
workbook.close()
self.assertExcelEqual()
|
the-stack_106_29825 | import numpy as np
import pandas as pd
from statsmodels.genmod.bayes_mixed_glm import BinomialBayesMixedGLM
def glmm_model(data, features, y, random_effects):
model = BinomialBayesMixedGLM.from_formula(f'{y} ~ {features}', random_effects, data)
result = model.fit_vb()
return result
def call_glmm_model(data, features_basic, outcome_list, random_effects, list_of_featues_to_add, feature_number=0, outcome_number=0):
features = features_basic + list_of_featues_to_add[feature_number]
outcome = outcome_list[outcome_number]
data['Mortalty'] = data['Mortalty'].astype(np.int64)
return glmm_model(data, features, outcome, random_effects)
def add_Summary_Data_To_ImputedData(df):
df1 = df.groupby(['HospID', 'surgyear'])['HospID'].count().reset_index(name='total_CABG')
df2 = df.groupby(['HospID', 'surgyear'])['Reoperation'].apply(lambda x: (x == 'Reoperation').sum()).reset_index(
name='Reop_CABG')
df_aggr = pd.read_csv("aggregate_csv.csv")
df3 = pd.merge(df1, df, left_on=['HospID', 'surgyear'], right_on=['HospID', 'surgyear'], how='outer')
df4 = pd.merge(df2, df3, left_on=['HospID', 'surgyear'], right_on=['HospID', 'surgyear'], how='outer')
df5 = pd.merge(df_aggr, df4, left_on=['HospID', 'surgyear'], right_on=['HospID', 'surgyear'],
how='inner') # how='left', on=['HospID','surgyear'])
del df5["Unnamed: 0"]
df5.to_csv("imputed_data_with_sum.csv")
print(df5.head(10))
return df5
def refactor_categorical_values_to_numeric_values(df, col_names):
# df = df.filter(col_names, axis=1)
for col in col_names:
try:
df = df.replace({col: {False: 0, True: 1}})
df = df.replace({col: {"No": 0, "Yes": 1}})
df = df.replace({col: {"Male": 0, "Female": 1}})
df = df.replace({col: {"Elective": 0, "Urgent": 1}})
df = df.replace({col: {"Non-Hispanic": 0, "Hispanic": 1}})
df = df.replace({col: {"Previous Non-CAB": 0, "Previous CAB": 1}})
df = df.replace({col: {"None/Trivial/Trace/Mild": 0, "Moderate/Severe": 1}})
df = df.replace({col: {"Unknown": 1, "Alive": 1, "Dead": 0}})
df = df.replace({col: {"First cardiovascular surgery": 0, "NA - Not a cardiovascular surgery": 0,
"First re-op cardiovascular surgery": 0, "Second re-op cardiovascular surgery": 1,
"Third re-op cardiovascular surgery": 1,
"Fourth or more re-op cardiovascular surgery": 1}})
df = df.replace({col: {"Never smoker": 0, "Smoker": 1}})
df = df.replace({col: {"I/II": 0, "III/IV": 1}})
df = df.replace({col: {"None": 0, "One": 1, "Two": 2, "Three": 3}})
except:
x = "none"
df.to_csv('/tmp/pycharm_project_957/imputed_data_with_float_values_glmm.csv')
def clean_data(df_to_clean):
df_to_clean.rename(columns={"EF<=35%": "EF_less_equal_35"}, inplace=True)
print("type of hosp id: ", type(df_to_clean['HospID_total_cardiac_surgery'][0]))
df_to_clean['HospID_total_cardiac_surgery'] = df_to_clean['HospID_total_cardiac_surgery'].astype(str)
df_to_clean['HospID_total_cardiac_surgery'] = df_to_clean['HospID_total_cardiac_surgery'].str.replace(',', '')
df_to_clean['HospID_total_cardiac_surgery'] = df_to_clean['HospID_total_cardiac_surgery'].astype(np.float)
df_to_clean.to_csv("imputed_data_after_cleaning_glmm.csv")
def create_reop_imputed_data(df_imputed_data):
df_imputed_data = df_imputed_data[df_imputed_data['Reoperation'] == 'Reoperation']
df_imputed_data.to_csv('/tmp/pycharm_project_957/imputed_data_reop.csv')
if __name__ == "__main__":
df_reop = pd.read_csv('/tmp/pycharm_project_957/imputed_data_reop_clean.csv')
df_reop = df_reop.dropna()
df_reop.to_csv('/tmp/pycharm_project_957/imputed_data_reop_clean.csv')
print()
# yaara's script
# df_all = pd.read_csv("/tmp/pycharm_project_957/imputed_data_sum_info_surg_and_Hosp.csv")
# df_with_sum = add_Summary_Data_To_ImputedData(df_all)
# df_columns = pd.DataFrame(df_all.columns)
# df_columns.to_csv("columns_of_imputed_data.csv")
# GLMM
# read table
# df_with_sum = pd.read_csv('/tmp/pycharm_project_957/imputed_data_with_sum.csv')
# # change categorical values to numerical values
# list_vals = ["surgyear", "Reoperation", "BMI", "Age", "Gender", "RaceCaucasian", "RaceBlack", "Ethnicity",
# "RaceOther", "FHCAD", "Diabetes", "InsulinDiab", "Dyslip", "Dialysis", "Hypertn", "InfEndo",
# "SmokingStatus", "ChrLungD", "ModSevereLungDis", "ImmSupp", "PVD", "DualAntiPlat", 'RenFail',
# "CreatLst", 'PreCVAorTIAorCVD', "POCPCI", "PrevMI", "Angina", "UnstableAngina", "HeartFail",
# "ClassNYHGroup", "Arrhythmia", "ArrhythAtrFibFlutter", "ArrhythOther", "MedACEI", "MedBeta",
# "MedNitIV", "MedASA", "MedAntiplateltNoASA", "AntiCoag", "MedInotr", "MedSter", "HDEF", "EF<=35%",
# "NumDisV", 'NumDisV_ordinal', "LeftMain", "VDInsufA", "VDStenA", "VDInsufM", "VDStenM", "VDInsufT",
# "VDStenT", "Status", 'MedHeparin', 'Mortality', 'PrCVInt']
# # list_val = ['PrCVInt']
# refactor_categorical_values_to_numeric_values(df_all, list_vals)
df = pd.read_csv("/tmp/pycharm_project_957/imputed_data_after_cleaning_glmm.csv")
df_short = df[:1000]
features_basic = '''Age+surgyear+Reoperation+BMI+Gender+RaceCaucasian+RaceBlack+Ethnicity+RaceOther+FHCAD+Diabetes+
InsulinDiab+Dyslip+Dialysis+Hypertn+InfEndo+SmokingStatus+ChrLungD+ModSevereLungDis+ImmSupp+PVD+DualAntiPlat+RenFail+
CreatLst+PreCVAorTIAorCVD+POCPCI+PrevMI+Angina+UnstableAngina+HeartFail+ClassNYHGroup+Arrhythmia+ArrhythAtrFibFlutter+
ArrhythOther+MedACEI+MedBeta+MedNitIV+MedASA+MedAntiplateltNoASA+AntiCoag+MedInotr+MedSter+HDEF+EF_less_equal_35+
NumDisV+NumDisV_ordinal+LeftMain+VDInsufA+VDStenA+VDInsufM+VDStenM+VDInsufT+VDStenT+Status+MedHeparin+PrCVInt'''
list_features_to_add = ['+HospID_total_CABG', '+HospID_total_cardiac_surgery', '+HospID_Reop_CABG']
list_of_outcome = ['Mortalty', 'Complics']
# features_CABG = features_basic + '+HospID_total_CABG'
# features_total_surgeries = features_basic + '+HospID_total_cardiac_surgery'
# features_reop = features_basic + '+HospID_Reop_CABG'
# random_effect_variables = {'HospID': '0 + C(HospID)'}
random_effect_variables = {'HospID': '0 + C(HospID)', 'surgid': '0 + C(surgid)'}
result_glmm = call_glmm_model(df_short, features_basic, list_of_outcome, random_effect_variables, list_features_to_add)
# result_glmm_total = glmm_model(df_with_sum, features_total_surgeries, 'Mortalty', random_effect_variables)
# result_glmm_reop = glmm_model(df_with_sum, features_reop, 'Mortalty', random_effect_variables)
print("result_glmm", result_glmm.summary())
|
the-stack_106_29827 |
import os
def to_head( projectpath ):
pathlayers = os.path.join( projectpath, 'layers/' ).replace('\\', '/')
return r"""
\documentclass[border=8pt, multi, tikz]{standalone}
\usepackage{import}
\subimport{"""+ pathlayers + r"""}{init}
\usetikzlibrary{positioning}
\usetikzlibrary{3d} %for including external image
"""
def to_cor():
return r"""
\def\ConvColor{rgb:yellow,5;red,2.5;white,5}
\def\ConvReluColor{rgb:yellow,5;red,5;white,5}
\def\PoolColor{rgb:red,1;black,0.3}
\def\UnpoolColor{rgb:blue,2;green,1;black,0.3}
\def\FcColor{rgb:blue,5;red,2.5;white,5}
\def\FcReluColor{rgb:blue,5;red,5;white,4}
\def\SoftmaxColor{rgb:magenta,5;black,7}
"""
def to_begin():
return r"""
\newcommand{\copymidarrow}{\tikz \draw[-Stealth,line width=0.8mm,draw={rgb:blue,4;red,1;green,1;black,3}] (-0.3,0) -- ++(0.3,0);}
\begin{document}
\begin{tikzpicture}
\tikzstyle{connection}=[ultra thick,every node/.style={sloped,allow upside down},draw=\edgecolor,opacity=0.7]
\tikzstyle{copyconnection}=[ultra thick,every node/.style={sloped,allow upside down},draw={rgb:blue,4;red,1;green,1;black,3},opacity=0.7]
"""
# layers definition
def to_input( pathfile, to='(-3,0,0)', width=8, height=8, name="temp" ):
return r"""
\node[canvas is zy plane at x=0] (""" + name + """) at """+ to +""" {\includegraphics[width="""+ str(width)+"cm"+""",height="""+ str(height)+"cm"+"""]{"""+ pathfile +"""}};
"""
# Conv
#xlabel={{"""+ str(n_filer) +""", }},
def to_Conv( name, s_filer=256, n_filer=64, offset="(0,0,0)", to="(0,0,0)", width=1, height=40, depth=40, caption=" " ):
return r"""
\pic[shift={"""+ offset +"""}] at """+ to +"""
{Box={
name=""" + name +""",
caption="""+ caption +r""",
zlabel="""+ str(s_filer) +""",
zlabel= ,
fill=\ConvColor,
height="""+ str(height) +""",
width="""+ str(width) +""",
depth="""+ str(depth) +"""
}
};
"""
# Conv,Conv,relu
# Bottleneck
# xlabel={{ """+ str(n_filer[0]) +""", """+ str(n_filer[1]) +""" }},
def to_ConvConvRelu( name, s_filer=256, n_filer=(64,64), offset="(0,0,0)", to="(0,0,0)", width=(2,2), height=40, depth=40, caption=" " ):
return r"""
\pic[shift={ """+ offset +""" }] at """+ to +"""
{RightBandedBox={
name="""+ name +""",
caption="""+ caption +""",
zlabel="""+ str(s_filer) +""",
zlabel= ,
fill=\ConvColor,
bandfill=\ConvReluColor,
height="""+ str(height) +""",
width={ """+ str(width[0]) +""" , """+ str(width[1]) +""" },
depth="""+ str(depth) +"""
}
};
"""
# Pool
def to_Pool(name, offset="(0,0,0)", to="(0,0,0)", width=1, height=32, depth=32, opacity=0.5, caption=" "):
return r"""
\pic[shift={ """+ offset +""" }] at """+ to +"""
{Box={
name="""+name+""",
caption="""+ caption +r""",
fill=\PoolColor,
opacity="""+ str(opacity) +""",
height="""+ str(height) +""",
width="""+ str(width) +""",
depth="""+ str(depth) +"""
}
};
"""
# unpool4,
def to_UnPool(name, offset="(0,0,0)", to="(0,0,0)", width=1, height=32, depth=32, opacity=0.5, caption=" "):
return r"""
\pic[shift={ """+ offset +""" }] at """+ to +"""
{Box={
name="""+ name +r""",
caption="""+ caption +r""",
fill=\UnpoolColor,
opacity="""+ str(opacity) +""",
height="""+ str(height) +""",
width="""+ str(width) +""",
depth="""+ str(depth) +"""
}
};
"""
# xlabel={{ """+ str(n_filer) + """, }},
def to_ConvRes( name, s_filer=256, n_filer=64, offset="(0,0,0)", to="(0,0,0)", width=6, height=40, depth=40, opacity=0.2, caption=" " ):
return r"""
\pic[shift={ """+ offset +""" }] at """+ to +"""
{RightBandedBox={
name="""+ name + """,
caption="""+ caption + """,
zlabel="""+ str(s_filer) +r""",
zlabel= ,
fill={rgb:white,1;black,3},
bandfill={rgb:white,1;black,2},
opacity="""+ str(opacity) +""",
height="""+ str(height) +""",
width="""+ str(width) +""",
depth="""+ str(depth) +"""
}
};
"""
# ConvSoftMax
def to_ConvSoftMax( name, s_filer=40, offset="(0,0,0)", to="(0,0,0)", width=1, height=40, depth=40, caption=" " ):
return r"""
\pic[shift={"""+ offset +"""}] at """+ to +"""
{Box={
name=""" + name +""",
caption="""+ caption +""",
zlabel="""+ str(s_filer) +""",
zlabel= ,
fill=\SoftmaxColor,
height="""+ str(height) +""",
width="""+ str(width) +""",
depth="""+ str(depth) +"""
}
};
"""
# SoftMax
def to_SoftMax( name, s_filer=10, offset="(0,0,0)", to="(0,0,0)", width=1.5, height=3, depth=25, opacity=0.8, caption=" " ):
return r"""
\pic[shift={"""+ offset +"""}] at """+ to +"""
{Box={
name=""" + name +""",
caption="""+ caption +""",
xlabel={{" ","dummy"}},
zlabel="""+ str(s_filer) +""",
fill=\SoftmaxColor,
opacity="""+ str(opacity) +""",
height="""+ str(height) +""",
width="""+ str(width) +""",
depth="""+ str(depth) +"""
}
};
"""
def to_connection( of, to):
return r"""
\draw [connection] ("""+of+"""-east) -- node {\midarrow} ("""+to+"""-west);
"""
def to_skip( of, to, pos=1.25):
return r"""
\path ("""+ of +"""-southeast) -- ("""+ of +"""-northeast) coordinate[pos="""+ str(pos) +"""] ("""+ of +"""-top) ;
\path ("""+ to +"""-south) -- ("""+ to +"""-north) coordinate[pos="""+ str(pos) +"""] ("""+ to +"""-top) ;
\draw [copyconnection] ("""+of+"""-northeast)
-- node {\copymidarrow}("""+of+"""-top)
-- node {\copymidarrow}("""+to+"""-top)
-- node {\copymidarrow} ("""+to+"""-north);
"""
def to_end():
return r"""
\end{tikzpicture}
\end{document}
"""
def to_generate( arch, pathname="file.tex" ):
with open(pathname, "w") as f:
for c in arch:
print(c)
f.write( c )
|
the-stack_106_29828 | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
################################################
Testcase_PrepareCondition:
Testcase_TestSteps:
Testcase_ExpectedResult:
"""
import os
import pytest
from tests.common.base import TestBase
from tests.common.test_run.ascend.relu6_grad_run import relu6_grad_run
############################################################
# TestCase= class: put to tests/*/
############################################################
class TestCase(TestBase):
def setup(self):
case_name = "test_akg_relu6_grad_001"
case_path = os.getcwd()
# params init
self.params_init(case_name, case_path)
self.caseresult = True
self._log.info("============= {0} Setup case============".format(self.casename))
self.testarg = [
# testflag,opfuncname,testRunArgs, dimArgs
("relu6_grad_001", relu6_grad_run, ((1, 128), "float16")),
("relu6_grad_002", relu6_grad_run, ((8, 28, 28, 4), "float16")),
("relu6_grad_003", relu6_grad_run, ((8, 14, 14, 6), "float16")),
("relu6_grad_004", relu6_grad_run, ((8, 7, 7, 6), "float16")),
("relu6_grad_005", relu6_grad_run, ((8, 4, 4, 6), "float16")),
("relu6_grad_006", relu6_grad_run, ((8, 2, 2, 4), "float16")),
]
self.testarg_cloud = [
# testflag,opfuncname,testRunArgs, dimArgs
("relu6_grad_001", relu6_grad_run, ((1, 128), "float32")),
]
self.testarg_rpc_cloud = [
("relu6_grad_fp32_001", relu6_grad_run, ((8, 28, 28, 4), "float32")),
("relu6_grad_fp32_002", relu6_grad_run, ((8, 14, 14, 6), "float32")),
("relu6_grad_fp32_003", relu6_grad_run, ((8, 7, 7, 6), "float32")),
("relu6_grad_fp32_004", relu6_grad_run, ((8, 4, 4, 6), "float32")),
("relu6_grad_fp32_005", relu6_grad_run, ((8, 2, 2, 4), "float32")),
]
return
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_run(self):
self.common_run(self.testarg)
def test_run_cloud(self):
self.common_run(self.testarg_cloud)
def test_run_rpc_cloud(self):
self.common_run(self.testarg_rpc_cloud)
def teardown(self):
self._log.info("============= {0} Teardown============".format(self.casename))
return
# a=TestCase()
# a.setup()
# a.test_run()
|
the-stack_106_29829 | # :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack
import functools
import FnAssetAPI.ui.implementation
import FnAssetAPI.ui.constants
import FnAssetAPI.ui
import ftrack_connect_foundry.ui.browser
import ftrack_connect_foundry.ui.inline_picker
import ftrack_connect_foundry.ui.workflow_relationship
import ftrack_connect_foundry.ui.registration_options
from FnAssetAPI.ui.toolkit import is_webwidget_supported
has_webwidgets = is_webwidget_supported()
class Delegate(FnAssetAPI.ui.implementation.ManagerUIDelegate):
def __init__(self, bridge):
'''Initialise delegate with *bridge*.
*bridge* should be an instance of
:py:class:`ftrack_connect_foundry.bridge.Bridge`.
'''
self._bridge = bridge
# Store mapping of widgets to their identifiers.
# Note: The widget classes are partialed with this delegate's bridge
# to provide them access to common functionality whilst maintaining
# compatibility with their parent class interfaces.
import ftrack_connect_foundry
compatible_widgets = [
ftrack_connect_foundry.ui.browser.Browser,
ftrack_connect_foundry.ui.inline_picker.InlinePicker,
ftrack_connect_foundry.ui.workflow_relationship.WorkflowRelationship,
ftrack_connect_foundry.ui.registration_options.RegistrationOptions
]
if has_webwidgets:
import ftrack_connect_foundry.ui.tasks_view
import ftrack_connect_foundry.ui.info_view
incompatible_widgets = [
ftrack_connect_foundry.ui.info_view.InfoView,
ftrack_connect_foundry.ui.info_view.WorkingTaskInfoView,
ftrack_connect_foundry.ui.tasks_view.TasksView,
]
all_widgets = compatible_widgets + incompatible_widgets
else:
all_widgets = compatible_widgets
self._widgetMapping = {}
for widgetClass in all_widgets:
identifier = widgetClass.getIdentifier()
# Bind bridge as first argument to class on instantiation.
boundWidgetClass = functools.partial(widgetClass, self._bridge)
# The returned callable is expected to be a class with certain
# class methods available. Therefore, also dynamically assign
# original class methods to wrapper.
for name in ('getIdentifier', 'getDisplayName', 'getAttributes'):
setattr(boundWidgetClass, name, getattr(widgetClass, name))
self._widgetMapping[identifier] = boundWidgetClass
super(Delegate, self).__init__()
def getWidget(self, identifier):
'''Return appropriate widget class for *identifier*.'''
return self._widgetMapping.get(identifier, None)
def getWidgets(self, host):
'''Return mapping of classes for all supported widgets.'''
return self._widgetMapping.copy()
|
the-stack_106_29830 | #
# Copyright (c) 2018 Bobby Noelte
#
# SPDX-License-Identifier: Apache-2.0
#
from extract.globals import *
from extract.directive import DTDirective
##
# @brief Manage directives in a default way.
#
class DTDefault(DTDirective):
def __init__(self):
pass
##
# @brief Extract directives in a default way
#
# @param node_address Address of node owning the clockxxx definition.
# @param yaml YAML definition for the owning node.
# @param prop property name
# @param def_label Define label string of node owning the directive.
#
def extract(self, node_address, yaml, prop, def_label):
prop_def = {}
prop_alias = {}
prop_values = reduced[node_address]['props'][prop]
if isinstance(prop_values, list):
for i, prop_value in enumerate(prop_values):
prop_name = convert_string_to_label(prop)
label = def_label + '_' + prop_name
if isinstance(prop_value, str):
prop_value = "\"" + prop_value + "\""
prop_def[label + '_' + str(i)] = prop_value
else:
prop_name = convert_string_to_label(prop)
label = def_label + '_' + prop_name
if prop_values == 'parent-label':
prop_values = find_parent_prop(node_address, 'label')
if isinstance(prop_values, str):
prop_values = "\"" + prop_values + "\""
prop_def[label] = prop_values
# generate defs for node aliases
if node_address in aliases:
for i in aliases[node_address]:
alias_label = convert_string_to_label(i)
alias = alias_label + '_' + prop_name
prop_alias[alias] = label
insert_defs(node_address, prop_def, prop_alias)
##
# @brief Management information for directives handled by default.
default = DTDefault()
|
the-stack_106_29831 | # TODO deadline reminder for all students
# Copyright (c) 2021 War-Keeper
# This functionality provides various methods to manage reminders (in the form of creation, retrieval, updation and deletion)
# A user can set up a reminder, check what is due this week or what is due today. He/She can also check all the due homeworks based on hte coursename.
# A user can also update or delete a reminder if needed.
import discord
from discord.ext import commands
import json
import os
import asyncio
import time
from datetime import datetime
class Deadline(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.reminders = json.load(open("data/remindme/reminders.json"))
self.units = {"second": 1, "minute": 60, "hour": 3600, "day": 86400, "week": 604800, "month": 2592000}
# -----------------------------------------------------------------------------------------------------------------
# Function: duedate(self, ctx, coursename: str, hwcount: str, *, date: str)
# Description: Adds the homework to json in the specified format
# Inputs:
# - self: used to access parameters passed to the class through the constructor
# - ctx: used to access the values passed through the current context
# - coursename: name of the course for which homework is to be added
# - hwcount: name of the homework
# - date: due date of the assignment
# Outputs: returns either an error stating a reason for failure or returns a success message
# indicating that the reminder has been added
# -----------------------------------------------------------------------------------------------------------------
@commands.command(name="addhw",
help="add homework and due-date $addhw CLASSNAME HW_NAME MMM DD YYYY optional(HH:MM) ex. $addhw CSC510 HW2 SEP 25 2024 17:02")
async def duedate(self, ctx, coursename: str, hwcount: str, *, date: str):
author = ctx.message.author
# print('Author: '+str(author)+' coursename: '+coursename+' homework count: '+hwcount+' date: '+str(date))
try:
duedate = datetime.strptime(date, '%b %d %Y %H:%M')
# print(seconds)
except ValueError:
try:
duedate = datetime.strptime(date, '%b %d %Y')
except:
await ctx.send("Due date could not be parsed")
return
a_timedelta = duedate - datetime.today()
seconds = (time.time() + a_timedelta.total_seconds())
flag = True
if self.reminders:
for reminder in self.reminders:
if ((reminder["COURSE"] == coursename) and (reminder["HOMEWORK"] == hwcount)):
flag = False
break
if (flag):
self.reminders.append({"ID": author.id, "COURSE": coursename, "HOMEWORK": hwcount, "DUEDATE": str(duedate),
"FUTURE": seconds})
json.dump(self.reminders, open("data/remindme/reminders.json", "w"))
await ctx.send(
"A date has been added for: {} homework named: {} which is due on: {} by {}.".format(coursename,
hwcount,
str(duedate),
author))
else:
await ctx.send("This homework has already been added..!!")
@duedate.error
async def duedate_error(self, ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send(
'To use the addhw command, do: $addhw CLASSNAME HW_NAME MMM DD YYYY optional(HH:MM) \n ( For example: $addhw CSC510 HW2 SEP 25 2024 17:02 )')
# -----------------------------------------------------------------------------------------------------------------
# Function: deleteReminder(self, ctx, courseName: str, hwName: str)
# Description: Delete a reminder using Classname and Homework name
# Inputs:
# - self: used to access parameters passed to the class through the constructor
# - ctx: used to access the values passed through the current context
# - coursename: name of the course for which homework is to be added
# - hwName: name of the homework
# Outputs: returns either an error stating a reason for failure or
# returns a success message indicating that the reminder has been deleted
# -----------------------------------------------------------------------------------------------------------------
@commands.command(name="deletereminder", pass_context=True,
help="delete a specific reminder using course name and homework name using $deletereminder CLASSNAME HW_NAME ex. $deletereminder CSC510 HW2 ")
async def deleteReminder(self, ctx, courseName: str, hwName: str):
author = ctx.message.author
to_remove = []
for reminder in self.reminders:
# print('in json '+str(reminder["HOMEWORK"])+' hwName '+hwName)
if ((reminder["HOMEWORK"] == hwName) and (reminder["COURSE"] == courseName)):
# print('true '+hwName)
to_remove.append(reminder)
# print('to_remove '+ str(to_remove))
for reminder in to_remove:
self.reminders.remove(reminder)
if to_remove:
json.dump(self.reminders, open("data/remindme/reminders.json", "w"))
await ctx.send("Following reminder has been deleted: Course: {}, Homework Name: {}, Due Date: {}".format(
str(reminder["COURSE"]), str(reminder["HOMEWORK"]), str(reminder["DUEDATE"])))
@deleteReminder.error
async def deleteReminder_error(self, ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send(
'To use the deletereminder command, do: $deletereminder CLASSNAME HW_NAME \n ( For example: $deletereminder CSC510 HW2 )')
# -----------------------------------------------------------------------------------------------------------------
# Function: changeduedate(self, ctx, classid: str, hwid: str, *, date: str)
# Description: Update the 'Due date' for a homework by providing the classname and homewwork name
# Inputs:
# - self: used to access parameters passed to the class through the constructor
# - ctx: used to access the values passed through the current context
# - classid: name of the course for which homework is to be added
# - hwid: name of the homework
# - date: due date of the assignment
# Outputs: returns either an error stating a reason for failure or
# returns a success message indicating that the reminder has been updated
# -----------------------------------------------------------------------------------------------------------------
@commands.command(name="changeduedate", pass_context=True,
help="update the assignment date. $changeduedate CLASSNAME HW_NAME MMM DD YYYY optional(HH:MM) ex. $changeduedate CSC510 HW2 SEP 25 2024 17:02 ")
async def changeduedate(self, ctx, classid: str, hwid: str, *, date: str):
author = ctx.message.author
flag = False
try:
duedate = datetime.strptime(date, '%b %d %Y %H:%M')
except ValueError:
try:
duedate = datetime.strptime(date, '%b %d %Y')
except:
await ctx.send("Due date could not be parsed")
return
for reminder in self.reminders:
flag = False
if ((reminder["HOMEWORK"] == hwid) and (reminder["COURSE"] == classid)):
reminder["DUEDATE"] = str(duedate)
a_timedelta = duedate - datetime.today()
seconds = (time.time() + a_timedelta.total_seconds())
reminder["FUTURE"] = seconds
reminder["ID"] = author.id
flag = True
if (flag):
json.dump(self.reminders, open("data/remindme/reminders.json", "w"))
await ctx.send(
"{} {} has been updated with following date: {}".format(classid, hwid, reminder["DUEDATE"]))
# await ctx.send("Data updated..!!")
@changeduedate.error
async def changeduedate_error(self, ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send(
'To use the changeduedate command, do: $changeduedate CLASSNAME HW_NAME MMM DD YYYY optional(HH:MM) \n ( For example: $changeduedate CSC510 HW2 SEP 25 2024 17:02 )')
# -----------------------------------------------------------------------------------------------------------------
# Function: duethisweek(self, ctx)
# Description: Displays all the homeworks that are due this week along with the coursename and due date
# Inputs:
# - self: used to access parameters passed to the class through the constructor
# - ctx: used to access the values passed through the current context
# Outputs: returns either an error stating a reason for failure
# or returns a list of all the assignments that are due this week
# -----------------------------------------------------------------------------------------------------------------
@commands.command(name="duethisweek", pass_context=True,
help="check all the homeworks that are due this week $duethisweek")
async def duethisweek(self, ctx):
time = ctx.message.created_at
for reminder in self.reminders:
timeleft = datetime.strptime(reminder["DUEDATE"], '%Y-%m-%d %H:%M:%S') - time
print("timeleft: " + str(timeleft) + " days left: " + str(timeleft.days))
if timeleft.days <= 7:
await ctx.send("{} {} is due this week at {}".format(reminder["COURSE"], reminder["HOMEWORK"],
reminder["DUEDATE"]))
# -----------------------------------------------------------------------------------------------------------------
# Function: duetoday(self, ctx)
# Description: Displays all the homeworks that are due today
# Inputs:
# - self: used to access parameters passed to the class through the constructor
# - ctx: used to access the values passed through the current context
# Outputs: returns either an error stating a reason for failure or
# returns a list of all the assignments that are due on the day the command is run
# -----------------------------------------------------------------------------------------------------------------
@commands.command(name="duetoday", pass_context=True, help="check all the homeworks that are due today $duetoday")
async def duetoday(self, ctx):
flag = True
for reminder in self.reminders:
timedate = datetime.strptime(reminder["DUEDATE"], '%Y-%m-%d %H:%M:%S')
if timedate.date() == ctx.message.created_at.date():
flag = False
await ctx.send(
"{} {} is due today at {}".format(reminder["COURSE"], reminder["HOMEWORK"], timedate.time()))
if (flag):
await ctx.send("You have no dues today..!!")
# -----------------------------------------------------------------------------------------------------------------
# Function: coursedue(self, ctx, courseid: str)
# Description: Displays all the homeworks that are due for a specific course
# Inputs:
# - self: used to access parameters passed to the class through the constructor
# - ctx: used to access the values passed through the current context
# - courseid: name of the course for which homework is to be added
# Outputs: returns either an error stating a reason for failure or
# a list of assignments that are due for the provided courseid
# -----------------------------------------------------------------------------------------------------------------
@commands.command(name="coursedue", pass_context=True,
help="check all the homeworks that are due for a specific course $coursedue coursename ex. $coursedue CSC505")
async def coursedue(self, ctx, courseid: str):
course_due = []
for reminder in self.reminders:
if reminder["COURSE"] == courseid:
course_due.append(reminder)
await ctx.send("{} is due at {}".format(reminder["HOMEWORK"], reminder["DUEDATE"]))
if not course_due:
await ctx.send("Rejoice..!! You have no pending homeworks for {}..!!".format(courseid))
@coursedue.error
async def coursedue_error(self, ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send(
'To use the coursedue command, do: $coursedue CLASSNAME \n ( For example: $coursedue CSC510 )')
# ---------------------------------------------------------------------------------
# Function: listreminders(self, ctx)
# Description: Print out all the reminders
# Inputs:
# - self: used to access parameters passed to the class through the constructor
# - ctx: used to access the values passed through the current context
# Outputs: returns either an error stating a reason for failure or
# returns a list of all the assignments
# ---------------------------------------------------------------------------------
@commands.command(name="listreminders", pass_context=True, help="lists all reminders")
async def listreminders(self, ctx):
to_remove = []
for reminder in self.reminders:
# if reminder["FUTURE"] <= int(time.time()):
try:
# await ctx.send("{} homework named: {} which is due on: {} by {}".format(self.bot.get_user(reminder["ID"]), reminder["TEXT"]))
await ctx.send(
"{} homework named: {} which is due on: {} by {}".format(reminder["COURSE"], reminder["HOMEWORK"],
reminder["DUEDATE"],
self.bot.get_user(reminder["ID"])))
except (discord.errors.Forbidden, discord.errors.NotFound):
to_remove.append(reminder)
except discord.errors.HTTPException:
pass
else:
to_remove.append(reminder)
if not self.reminders:
await ctx.send("Mission Accomplished..!! You don't have any more dues..!!")
# ---------------------------------------------------------------------------------
# Function: clearallreminders(self, ctx)
# Description: Delete all the reminders
# Inputs:
# - self: used to access parameters passed to the class through the constructor
# - ctx: used to access the values passed through the current context
# Outputs: returns either an error stating a reason for failure or
# returns a success message stating that reminders have been deleted
# ---------------------------------------------------------------------------------
@commands.command(name="clearreminders", pass_context=True, help="deletes all reminders")
async def clearallreminders(self, ctx):
to_remove = []
for reminder in self.reminders:
to_remove.append(reminder)
for reminder in to_remove:
self.reminders.remove(reminder)
if to_remove:
json.dump(self.reminders, open("data/remindme/reminders.json", "w"))
await ctx.send("All reminders have been cleared..!!")
# ---------------------------------------------------------------------------------
# Function: remindme(self, ctx, quantity: int, time_unit : str,*, text :str)
# Description: Personal remind me functionality
# Inputs:
# - self: used to access parameters passed to the class through the constructor
# - ctx: used to access the values passed through the current context
# - quantity - time after which the data will be erased
# Outputs: returns either an error stating a reason for failure or
# returns a success message stating that reminders have been deleted
# ---------------------------------------------------------------------------------
@commands.command(name="remindme", pass_context=True, help="Request the bot to set a reminder for a due date")
async def remindme(self, ctx, quantity: int, time_unit: str, *, text: str):
time_unit = time_unit.lower()
author = ctx.message.author
s = ""
if time_unit.endswith("s"):
time_unit = time_unit[:-1]
s = "s"
if not time_unit in self.units:
await ctx.send("Invalid unit of time. Select from seconds/minutes/hours/days/weeks/months")
return
if quantity < 1:
await ctx.send("Quantity must not be 0 or negative")
return
if len(text) > 1960:
await ctx.send("Text is too long.")
return
seconds = self.units[time_unit] * quantity
future = int(time.time() + seconds)
self.reminders.append({"ID": author.id, "FUTURE": future, "TEXT": text})
await ctx.send("I will remind you that in {} {}.".format(str(quantity), time_unit + s))
json.dump(self.reminders, open("data/remindme/reminders.json", "w"))
@commands.Cog.listener()
async def on_command_error(self, ctx, error):
await ctx.send('Unidentified command..please use $help to get the list of available comamnds')
# -----------------------------------------------------------------------------------------------------
# Function: delete_old_reminders(self)
# Description: asynchronously keeps on tracking the json file for expired reminders and cleans them.
# Inputs:
# - self: used to access parameters passed to the class through the constructor
# -----------------------------------------------------------------------------------------------------
async def delete_old_reminders(self):
print("inside delete old reminders")
while self is self.bot.get_cog("Deadline"):
to_remove = []
for reminder in self.reminders:
if reminder["FUTURE"] <= int(time.time()):
try:
print("Deleting an old reminder..!!")
except (discord.errors.Forbidden, discord.errors.NotFound):
to_remove.append(reminder)
except discord.errors.HTTPException:
pass
else:
to_remove.append(reminder)
for reminder in to_remove:
self.reminders.remove(reminder)
if to_remove:
json.dump(self.reminders, open("data/remindme/reminders.json", "w"))
await asyncio.sleep(5)
# -----------------------------------------------------------------------------
# checks if the folder that is going to hold json exists else creates a new one
# -----------------------------------------------------------------------------
def check_folders():
if not os.path.exists("data/remindme"):
print("Creating data/remindme folder...")
os.makedirs("data/remindme")
# ----------------------------------------------------
# checks if a json file exists else creates a new one
# ----------------------------------------------------
def check_files():
f = "data/remindme/reminders.json"
print("Creating file...")
if not os.path.exists(f):
print("Creating empty reminders.json...")
json.dump([], open(f, "w"))
# -------------------------------------
# add the file to the bot's cog system
# -------------------------------------
def setup(bot):
check_folders()
check_files()
n = Deadline(bot)
loop = asyncio.get_event_loop()
loop.create_task(n.delete_old_reminders())
bot.add_cog(n)
|
the-stack_106_29832 | # from importlib import reload
# -*- coding:utf-8 -*-
import os
import json
import threading
import numpy as np
from PIL import Image
import tensorflow as tf
from keras import losses
from keras import backend as K
from keras.utils import plot_model
from keras.preprocessing import image
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Input, Dense, Flatten
from keras.layers.core import Reshape, Masking, Lambda, Permute
from keras.layers.recurrent import GRU, LSTM
from keras.layers.wrappers import Bidirectional, TimeDistributed
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import Conv2D, MaxPooling2D, ZeroPadding2D
from keras.optimizers import SGD, Adam
from keras.models import Model
from keras.callbacks import EarlyStopping, ModelCheckpoint, LearningRateScheduler, TensorBoard
from chinese_ocr.densenet_common import densenet
# 训练图片的宽高
img_h = 32
img_w = 280
batch_size = 20
maxlabellength = 10
def get_session(gpu_fraction=1.0):
num_threads = os.environ.get('OMP_NUM_THREADS')
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction)
if num_threads:
return tf.Session(config=tf.ConfigProto(
gpu_options=gpu_options, intra_op_parallelism_threads=num_threads))
else:
return tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
def readfile(filename):
res = []
with open(filename, 'r') as f:
lines = f.readlines()
for i in lines:
res.append(i.strip())
dic = {}
for i in res:
p = i.split(' ')
dic[p[0]] = p[1:]
return dic
class random_uniform_num():
"""
均匀随机,确保每轮每个只出现一次
"""
def __init__(self, total):
self.total = total
self.range = [i for i in range(total)]
np.random.shuffle(self.range)
self.index = 0
def get(self, batchsize):
r_n = []
if self.index + batchsize > self.total:
r_n_1 = self.range[self.index:self.total]
np.random.shuffle(self.range)
self.index = (self.index + batchsize) - self.total
r_n_2 = self.range[0:self.index]
r_n.extend(r_n_1)
r_n.extend(r_n_2)
else:
r_n = self.range[self.index: self.index + batchsize]
self.index = self.index + batchsize
return r_n
def gen(data_file, image_path, batchsize=128, maxlabellength=10, imagesize=(32, 280)):
image_label = readfile(data_file)
_imagefile = [i for i, j in image_label.items()]
x = np.zeros((batchsize, imagesize[0], imagesize[1], 1), dtype=np.float)
labels = np.ones([batchsize, maxlabellength]) * 10000
input_length = np.zeros([batchsize, 1])
label_length = np.zeros([batchsize, 1])
r_n = random_uniform_num(len(_imagefile))
_imagefile = np.array(_imagefile)
while 1:
shufimagefile = _imagefile[r_n.get(batchsize)]
for i, j in enumerate(shufimagefile):
img1 = Image.open(os.path.join(image_path, j)).convert('L')
img = np.array(img1, 'f') / 255.0 - 0.5
x[i] = np.expand_dims(img, axis=2)
# print('imag:shape', img.shape)
str = image_label[j]
label_length[i] = len(str)
if (len(str) <= 0):
print("len < 0", j)
input_length[i] = imagesize[1] // 8
labels[i, :len(str)] = [int(k) - 1 for k in str]
inputs = {'the_input': x,
'the_labels': labels,
'input_length': input_length,
'label_length': label_length,
}
outputs = {'ctc': np.zeros([batchsize])}
yield (inputs, outputs)
def ctc_lambda_func(args):
y_pred, labels, input_length, label_length = args
return K.ctc_batch_cost(labels, y_pred, input_length, label_length)
def get_lr_metric(optimizer):
def lr(y_true, y_pred):
return optimizer.lr
return lr
def get_model(img_h, nclass):
input = Input(shape=(img_h, None, 1), name='the_input')
y_pred = densenet.dense_cnn(input, nclass)
basemodel = Model(inputs=input, outputs=y_pred)
basemodel.summary()
labels = Input(name='the_labels', shape=[None], dtype='float32')
input_length = Input(name='input_length', shape=[1], dtype='int64')
label_length = Input(name='label_length', shape=[1], dtype='int64')
loss_out = Lambda(ctc_lambda_func, output_shape=(1,), name='ctc')([y_pred, labels, input_length, label_length])
optimizer = Adam(lr=0.001)
lr_metric = get_lr_metric(optimizer)
model = Model(inputs=[input, labels, input_length, label_length], outputs=loss_out)
# model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer='adam', metrics=['accuracy'])
model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer=optimizer, metrics=['accuracy', lr_metric])
return basemodel, model
if __name__ == '__main__':
char_set = open('char_std_5990.txt', 'r', encoding='utf-8').readlines()
char_set = ''.join([ch.strip('\n') for ch in char_set][1:] + ['卍'])
nclass = len(char_set)
K.set_session(get_session())
# reload(densenet)
basemodel, model = get_model(img_h, nclass)
modelPath = './models/pretrain_model/keras.h5'
if os.path.exists(modelPath):
print("Loading model weights...")
basemodel.load_weights(modelPath)
print('done!')
train_loader = gen('data_train.txt', './images', batchsize=batch_size, maxlabellength=maxlabellength,
imagesize=(img_h, img_w))
test_loader = gen('data_test.txt', './images', batchsize=batch_size, maxlabellength=maxlabellength,
imagesize=(img_h, img_w))
# checkpoint = ModelCheckpoint(filepath='./models/weights_densenet-{epoch:02d}-{val_loss:.2f}.h5', monitor='val_loss', save_best_only=False, save_weights_only=True)
checkpoint = ModelCheckpoint(filepath='./models/weights_densenet-{epoch:02d}-{val_loss:.2f}.h5', monitor='val_loss',
save_best_only=False)
lr_schedule = lambda epoch: 0.0005 * 0.4 ** epoch
learning_rate = np.array([lr_schedule(i) for i in range(10)])
changelr = LearningRateScheduler(lambda epoch: float(learning_rate[epoch]))
earlystop = EarlyStopping(monitor='val_loss', patience=2, verbose=1)
tensorboard = TensorBoard(log_dir='./models/logs', write_graph=True)
print('-----------Start training-----------')
model.fit_generator(train_loader,
# steps_per_epoch = 3607567 // batch_size,
steps_per_epoch=100 // batch_size,
epochs=10,
initial_epoch=0,
validation_data=test_loader,
# validation_steps = 36440 // batch_size,
validation_steps=100 // batch_size,
callbacks=[checkpoint, earlystop, changelr, tensorboard])
|
the-stack_106_29835 | from __future__ import print_function
import time
import zlib
import errno
import select
import signal
import logging
import tempfile
import threading
import subprocess
from agent_module import queue, noraise
mod_name = "cli"
__version__ = (1, 0)
logger = logging.getLogger("agent.cli")
class Proc(object):
"""Background process class"""
STDOUT = 0
STDERR = 1
EXIT_CODE = 2
term_timeout = 1
kill_timeout = 1
RUNNING = 0
TERM_SEND = 1
KILL_SEND = 2
def __init__(self, cmd, timeout, input_data=None, merge_out=True):
self.input_data = input_data
if isinstance(cmd, unicode):
cmd = cmd.encode("utf8")
self.cmd = cmd
self.timeout = timeout
self.merge_out = merge_out
self.proc = None
self.end_time = None
self.input_file = None
self.output_q = None
self.state = None
def spawn(self):
if self.input_data:
self.input_file = tempfile.TemporaryFile(prefix="inp_data")
if self.merge_out:
stderr = subprocess.STDOUT
else:
stderr = subprocess.PIPE
self.proc = subprocess.Popen(self.cmd,
shell=isinstance(self.cmd, str),
stdout=subprocess.PIPE,
stderr=stderr,
stdin=self.input_file)
self.state = self.RUNNING
self.output_q = queue.Queue()
if self.timeout:
self.end_time = time.time() + self.timeout
if self.input_data:
self.input_file.write(self.input_data)
self.input_file.close()
watch_th = threading.Thread(target=self.watch_proc_th)
watch_th.daemon = True
watch_th.start()
def on_timeout(self):
try:
if self.state == self.RUNNING:
self.term()
self.end_time = time.time() + self.term_timeout
elif self.state == self.TERM_SEND:
self.kill()
self.end_time = time.time() + self.kill_timeout
else:
assert self.state == self.KILL_SEND
raise RuntimeError("Can't kill process")
except OSError as exc:
if exc.errno == errno.ESRCH:
pass
def watch_proc_th(self):
output_size = {self.STDOUT: 0, self.STDERR: 0}
def pipe_thread(pipe, code):
while True:
data = pipe.read(1024)
if not data:
return
output_size[code] += len(data)
self.output_q.put((code, data))
ths = [threading.Thread(target=pipe_thread, args=(self.proc.stdout, self.STDOUT))]
if not self.merge_out:
ths.append(threading.Thread(target=pipe_thread, args=(self.proc.stderr, self.STDERR)))
for th in ths:
th.daemon = True
th.start()
while True:
for th in ths:
try:
th.join(timeout=self.end_time - time.time())
except:
self.on_timeout()
break
else:
break
if self.end_time is None:
self.proc.wait()
else:
self.proc.poll()
while self.proc.returncode is None:
while time.time() < self.end_time:
if self.proc.poll() is not None:
break
time.sleep(0.1)
self.on_timeout()
self.output_q.put((self.EXIT_CODE, self.proc.returncode))
logger.debug("Proc %r returns %s and provides %s bytes of output", self.cmd, self.proc.returncode, output_size)
def get_updates(self):
stdout_data = ""
stderr_data = ""
code = None
while not self.output_q.empty():
msg_code, data = self.output_q.get()
if msg_code == self.STDOUT:
assert code is None, "Data after exit_code"
stdout_data += data
elif msg_code == self.STDERR:
assert code is None, "Data after exit_code"
stderr_data += data
elif msg_code == self.EXIT_CODE:
assert code is None, "Exit code after exit_code"
code = data
else:
assert False, "Unknown typecode {0}".format(msg_code)
return code, stdout_data, stderr_data
def term(self):
self.proc.terminate()
def kill(self, signal=signal.SIGKILL):
self.proc.send_signal(signal)
procs_lock = threading.Lock()
proc_id = 0
procs = {}
@noraise
def rpc_spawn(cmd, timeout=None, input_data=None, merge_out=False):
global proc_id
logger.info("CMD start requested: %s", cmd)
proc = Proc(cmd, timeout, input_data, merge_out=merge_out)
proc.spawn()
with procs_lock:
curr_id = proc_id
proc_id += 1
procs[curr_id] = proc
return curr_id
@noraise
def rpc_killall(signal=signal.SIGKILL):
logger.info("Signal %s is requested for all procs", signal)
with procs_lock:
for proc in procs.values():
try:
proc.kill(signal)
except:
pass
@noraise
def rpc_kill(proc_id, signal=signal.SIGKILL):
with procs_lock:
proc = procs[proc_id]
logger.info("Signal %s is requested for %s", signal, proc.cmd)
proc.kill(signal)
@noraise
def rpc_get_updates(proc_id, compress_limit=None):
with procs_lock:
proc = procs[proc_id]
ecode, d_out, d_err = proc.get_updates()
if ecode is not None:
with procs_lock:
del procs[proc_id]
if compress_limit is not None:
cumulative_out = d_out + d_err
if len(cumulative_out) > compress_limit:
return ecode, True, zlib.compress(cumulative_out), len(d_out)
return ecode, False, cumulative_out, len(d_out)
return ecode, d_out, d_err
|
the-stack_106_29836 | import builtins
import numpy as np
from yt._maintenance.deprecation import issue_deprecation_warning
from yt.config import ytcfg
from yt.funcs import get_brewer_cmap, mylog
from yt.units.yt_array import YTQuantity
from yt.utilities import png_writer as pw
from yt.utilities.exceptions import YTNotInsideNotebook
from yt.utilities.lib import image_utilities as au
from . import _colormap_data as cmd
from ._commons import get_canvas, validate_image_name
from .color_maps import mcm
def scale_image(image, mi=None, ma=None):
r"""Scale an image ([NxNxM] where M = 1-4) to be uint8 and values scaled
from [0,255].
Parameters
----------
image : array_like or tuple of image info
Examples
--------
>>> image = scale_image(image)
>>> image = scale_image(image, min=0, max=1000)
"""
if isinstance(image, np.ndarray) and image.dtype == np.uint8:
return image
if isinstance(image, (tuple, list)):
image, mi, ma = image
if mi is None:
mi = image.min()
if ma is None:
ma = image.max()
image = (np.clip((image - mi) / (ma - mi) * 255, 0, 255)).astype("uint8")
return image
def multi_image_composite(
fn, red_channel, blue_channel, green_channel=None, alpha_channel=None
):
r"""Write an image with different color channels corresponding to different
quantities.
Accepts at least a red and a blue array, of shape (N,N) each, that are
optionally scaled and composited into a final image, written into `fn`.
Can also accept green and alpha.
Parameters
----------
fn : string
Filename to save
red_channel : array_like or tuple of image info
Array, of shape (N,N), to be written into the red channel of the output
image. If not already uint8, will be converted (and scaled) into
uint8. Optionally, you can also specify a tuple that includes scaling
information, in the form of (array_to_plot, min_value_to_scale,
max_value_to_scale).
blue_channel : array_like or tuple of image info
Array, of shape (N,N), to be written into the blue channel of the output
image. If not already uint8, will be converted (and scaled) into
uint8. Optionally, you can also specify a tuple that includes scaling
information, in the form of (array_to_plot, min_value_to_scale,
max_value_to_scale).
green_channel : array_like or tuple of image info, optional
Array, of shape (N,N), to be written into the green channel of the
output image. If not already uint8, will be converted (and scaled)
into uint8. If not supplied, will be left empty. Optionally, you can
also specify a tuple that includes scaling information, in the form of
(array_to_plot, min_value_to_scale, max_value_to_scale).
alpha_channel : array_like or tuple of image info, optional
Array, of shape (N,N), to be written into the alpha channel of the output
image. If not already uint8, will be converted (and scaled) into uint8.
If not supplied, will be made fully opaque. Optionally, you can also
specify a tuple that includes scaling information, in the form of
(array_to_plot, min_value_to_scale, max_value_to_scale).
Examples
--------
>>> red_channel = np.log10(frb["Temperature"])
>>> blue_channel = np.log10(frb["Density"])
>>> multi_image_composite("multi_channel1.png", red_channel, blue_channel)
"""
red_channel = scale_image(red_channel)
blue_channel = scale_image(blue_channel)
if green_channel is None:
green_channel = np.zeros(red_channel.shape, dtype="uint8")
else:
green_channel = scale_image(green_channel)
if alpha_channel is None:
alpha_channel = np.zeros(red_channel.shape, dtype="uint8") + 255
else:
alpha_channel = scale_image(alpha_channel)
image = np.array([red_channel, green_channel, blue_channel, alpha_channel])
image = image.transpose().copy() # Have to make sure it's contiguous
pw.write_png(image, fn)
def write_bitmap(bitmap_array, filename, max_val=None, transpose=False):
r"""Write out a bitmapped image directly to a PNG file.
This accepts a three- or four-channel `bitmap_array`. If the image is not
already uint8, it will be scaled and converted. If it is four channel,
only the first three channels will be scaled, while the fourth channel is
assumed to be in the range of [0,1]. If it is not four channel, a fourth
alpha channel will be added and set to fully opaque. The resultant image
will be directly written to `filename` as a PNG with no colormap applied.
`max_val` is a value used if the array is passed in as anything other than
uint8; it will be the value used for scaling and clipping in the first
three channels when the array is converted. Additionally, the minimum is
assumed to be zero; this makes it primarily suited for the results of
volume rendered images, rather than misaligned projections.
Parameters
----------
bitmap_array : array_like
Array of shape (N,M,3) or (N,M,4), to be written. If it is not already
a uint8 array, it will be scaled and converted to uint8.
filename : string
Filename to save to. If None, PNG contents will be returned as a
string.
max_val : float, optional
The upper limit to clip values to in the output, if converting to uint8.
If `bitmap_array` is already uint8, this will be ignore.
transpose : boolean, optional
If transpose is False, we assume that the incoming bitmap_array is such
that the first element resides in the upper-left corner. If True, the
first element will be placed in the lower-left corner.
"""
if len(bitmap_array.shape) != 3 or bitmap_array.shape[-1] not in (3, 4):
raise RuntimeError(
"Expecting image array of shape (N,M,3) or "
"(N,M,4), received %s" % str(bitmap_array.shape)
)
if bitmap_array.dtype != np.uint8:
s1, s2 = bitmap_array.shape[:2]
if bitmap_array.shape[-1] == 3:
alpha_channel = 255 * np.ones((s1, s2, 1), dtype="uint8")
else:
alpha_channel = (255 * bitmap_array[:, :, 3]).astype("uint8")
alpha_channel.shape = s1, s2, 1
if max_val is None:
max_val = bitmap_array[:, :, :3].max()
bitmap_array = np.clip(bitmap_array[:, :, :3] / max_val, 0.0, 1.0) * 255
bitmap_array = np.concatenate(
[bitmap_array.astype("uint8"), alpha_channel], axis=-1
)
if transpose:
bitmap_array = bitmap_array.swapaxes(0, 1).copy(order="C")
if filename is not None:
pw.write_png(bitmap_array, filename)
else:
return pw.write_png_to_string(bitmap_array.copy())
return bitmap_array
def write_image(image, filename, color_bounds=None, cmap_name=None, func=lambda x: x):
r"""Write out a floating point array directly to a PNG file, scaling it and
applying a colormap.
This function will scale an image and directly call libpng to write out a
colormapped version of that image. It is designed for rapid-fire saving of
image buffers generated using `yt.visualization.api.FixedResolutionBuffers`
and the likes.
Parameters
----------
image : array_like
This is an (unscaled) array of floating point values, shape (N,N,) to
save in a PNG file.
filename : string
Filename to save as.
color_bounds : tuple of floats, optional
The min and max to scale between. Outlying values will be clipped.
cmap_name : string, optional
An acceptable colormap. See either yt.visualization.color_maps or
https://scipy-cookbook.readthedocs.io/items/Matplotlib_Show_colormaps.html .
func : function, optional
A function to transform the buffer before applying a colormap.
Returns
-------
scaled_image : uint8 image that has been saved
Examples
--------
>>> sl = ds.slice(0, 0.5, "Density")
>>> frb1 = FixedResolutionBuffer(sl, (0.2, 0.3, 0.4, 0.5),
(1024, 1024))
>>> write_image(frb1["Density"], "saved.png")
"""
if cmap_name is None:
cmap_name = ytcfg.get("yt", "default_colormap")
if len(image.shape) == 3:
mylog.info("Using only channel 1 of supplied image")
image = image[:, :, 0]
to_plot = apply_colormap(image, color_bounds=color_bounds, cmap_name=cmap_name)
pw.write_png(to_plot, filename)
return to_plot
def apply_colormap(image, color_bounds=None, cmap_name=None, func=lambda x: x):
r"""Apply a colormap to a floating point image, scaling to uint8.
This function will scale an image and directly call libpng to write out a
colormapped version of that image. It is designed for rapid-fire saving of
image buffers generated using `yt.visualization.api.FixedResolutionBuffers`
and the likes.
Parameters
----------
image : array_like
This is an (unscaled) array of floating point values, shape (N,N,) to
save in a PNG file.
color_bounds : tuple of floats, optional
The min and max to scale between. Outlying values will be clipped.
cmap_name : string, optional
An acceptable colormap. See either yt.visualization.color_maps or
https://scipy-cookbook.readthedocs.io/items/Matplotlib_Show_colormaps.html .
func : function, optional
A function to transform the buffer before applying a colormap.
Returns
-------
to_plot : uint8 image with colorbar applied.
"""
if cmap_name is None:
cmap_name = ytcfg.get("yt", "default_colormap")
from yt.data_objects.image_array import ImageArray
image = ImageArray(func(image))
if color_bounds is None:
mi = np.nanmin(image[~np.isinf(image)]) * image.uq
ma = np.nanmax(image[~np.isinf(image)]) * image.uq
color_bounds = mi, ma
else:
color_bounds = [YTQuantity(func(c), image.units) for c in color_bounds]
image = (image - color_bounds[0]) / (color_bounds[1] - color_bounds[0])
to_plot = map_to_colors(image, cmap_name)
to_plot = np.clip(to_plot, 0, 255)
return to_plot
def map_to_colors(buff, cmap_name):
try:
lut = cmd.color_map_luts[cmap_name]
except KeyError as e:
try:
# if cmap is tuple, then we're using palettable or brewer2mpl cmaps
if isinstance(cmap_name, tuple):
cmap = get_brewer_cmap(cmap_name)
else:
cmap = mcm.get_cmap(cmap_name)
cmap(0.0)
lut = cmap._lut.T
except ValueError:
raise KeyError(
"Your color map (%s) was not found in either the extracted"
" colormap file or matplotlib colormaps" % cmap_name
) from e
if isinstance(cmap_name, tuple):
# If we are using the colorbrewer maps, don't interpolate
shape = buff.shape
# We add float_eps so that digitize doesn't go out of bounds
x = np.mgrid[0.0 : 1.0 + np.finfo(np.float32).eps : lut[0].shape[0] * 1j]
inds = np.digitize(buff.ravel(), x)
inds.shape = (shape[0], shape[1])
mapped = np.dstack([(v[inds] * 255).astype("uint8") for v in lut])
del inds
else:
x = np.mgrid[0.0 : 1.0 : lut[0].shape[0] * 1j]
mapped = np.dstack([(np.interp(buff, x, v) * 255).astype("uint8") for v in lut])
return mapped.copy("C")
def strip_colormap_data(
fn="color_map_data.py",
cmaps=(
"jet",
"algae",
"hot",
"gist_stern",
"RdBu",
"kamae",
"kelp",
"arbre",
"octarine",
"dusk",
),
):
import pprint
from . import color_maps as rcm
f = open(fn, "w")
f.write("### Auto-generated colormap tables, taken from Matplotlib ###\n\n")
f.write("from numpy import array\n")
f.write("color_map_luts = {}\n\n\n")
if cmaps is None:
cmaps = rcm.ColorMaps
if isinstance(cmaps, str):
cmaps = [cmaps]
for cmap_name in sorted(cmaps):
vals = rcm._extract_lookup_table(cmap_name)
f.write(f"### {cmap_name} ###\n\n")
f.write(f"color_map_luts['{cmap_name}'] = \\\n")
f.write(" (\n")
for v in vals:
f.write(pprint.pformat(v, indent=3))
f.write(",\n")
f.write(" )\n\n")
f.close()
def splat_points(image, points_x, points_y, contribution=None, transposed=False):
if contribution is None:
contribution = 100.0
val = contribution * 1.0 / points_x.size
if transposed:
points_y = 1.0 - points_y
points_x = 1.0 - points_x
im = image.copy()
au.add_points_to_image(im, points_x, points_y, val)
return im
def write_projection(
data,
filename,
colorbar=True,
colorbar_label=None,
title=None,
vmin=None,
vmax=None,
limits=None,
take_log=True,
figsize=(8, 6),
dpi=100,
cmap_name=None,
extent=None,
xlabel=None,
ylabel=None,
):
r"""Write a projection or volume rendering to disk with a variety of
pretty parameters such as limits, title, colorbar, etc. write_projection
uses the standard matplotlib interface to create the figure. N.B. This code
only works *after* you have created the projection using the standard
framework (i.e. the Camera interface or off_axis_projection).
Accepts an NxM sized array representing the projection itself as well
as the filename to which you will save this figure. Note that the final
resolution of your image will be a product of dpi/100 * figsize.
Parameters
----------
data : array_like
image array as output by off_axis_projection or camera.snapshot()
filename : string
the filename where the data will be saved
colorbar : boolean
do you want a colorbar generated to the right of the image?
colorbar_label : string
the label associated with your colorbar
title : string
the label at the top of the figure
vmin : float or None
the lower limit of the zaxis (part of matplotlib api)
vmax : float or None
the lower limit of the zaxis (part of matplotlib api)
take_log : boolean
plot the log of the data array (and take the log of the limits if set)?
figsize : array_like
width, height in inches of final image
dpi : int
final image resolution in pixels / inch
cmap_name : string
The name of the colormap.
Examples
--------
>>> image = off_axis_projection(ds, c, L, W, N, "Density", no_ghost=False)
>>> write_projection(image, 'test.png',
colorbar_label="Column Density (cm$^{-2}$)",
title="Offaxis Projection", vmin=1e-5, vmax=1e-3,
take_log=True)
"""
if cmap_name is None:
cmap_name = ytcfg.get("yt", "default_colormap")
import matplotlib.colors
import matplotlib.figure
if limits is not None:
if vmin is not None or vmax is not None:
raise ValueError(
"The `limits` keyword argument is deprecated and can not "
"be used simultaneously with `vmin` or `vmax`."
)
issue_deprecation_warning(
"The `limits` keyword argument is deprecated and will "
"be removed in a future version of yt. Use `vmin` and `vmax` instead.",
since="4.0.0",
removal="4.1.0",
)
vmin, vmax = limits
# If this is rendered as log, then apply now.
if take_log:
norm_cls = matplotlib.colors.LogNorm
else:
norm_cls = matplotlib.colors.Normalize
norm = norm_cls(vmin=vmin, vmax=vmax)
# Create the figure and paint the data on
fig = matplotlib.figure.Figure(figsize=figsize)
ax = fig.add_subplot(111)
cax = ax.imshow(
data.to_ndarray(),
norm=norm,
extent=extent,
cmap=cmap_name,
)
if title:
ax.set_title(title)
if xlabel:
ax.set_xlabel(xlabel)
if ylabel:
ax.set_ylabel(ylabel)
# Suppress the x and y pixel counts
if extent is None:
ax.set_xticks(())
ax.set_yticks(())
# Add a color bar and label if requested
if colorbar:
cbar = fig.colorbar(cax)
if colorbar_label:
cbar.ax.set_ylabel(colorbar_label)
filename = validate_image_name(filename)
canvas = get_canvas(fig, filename)
mylog.info("Saving plot %s", filename)
fig.tight_layout()
canvas.print_figure(filename, dpi=dpi)
return filename
def display_in_notebook(image, max_val=None):
"""
A helper function to display images in an IPython notebook
Must be run from within an IPython notebook, or else it will raise
a YTNotInsideNotebook exception.
Parameters
----------
image : array_like
This is an (unscaled) array of floating point values, shape (N,N,3) or
(N,N,4) to display in the notebook. The first three channels will be
scaled automatically.
max_val : float, optional
The upper limit to clip values of the image. Only applies to the first
three channels.
"""
if "__IPYTHON__" in dir(builtins):
from IPython.core.displaypub import publish_display_data
data = write_bitmap(image, None, max_val=max_val)
publish_display_data(
data={"image/png": data},
source="yt.visualization.image_writer.display_in_notebook",
)
else:
raise YTNotInsideNotebook
|
the-stack_106_29837 | # This file is part of the MapProxy project.
# Copyright (C) 2010 Omniscale <http://omniscale.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from io import BytesIO
from mapproxy.client.arcgis import ArcGISInfoClient
from mapproxy.layer import InfoQuery
from mapproxy.request.arcgis import ArcGISIdentifyRequest
from mapproxy.srs import SRS
from mapproxy.test.http import assert_query_eq
TESTSERVER_ADDRESS = ('127.0.0.1', 56413)
TESTSERVER_URL = 'http://%s:%s' % TESTSERVER_ADDRESS
class MockHTTPClient(object):
def __init__(self):
self.requested = []
def open(self, url, data=None):
self.requested.append(url)
result = BytesIO(b'{}')
result.seek(0)
result.headers = {}
return result
class TestArcGISInfoClient(object):
def test_fi_request(self):
req = ArcGISIdentifyRequest(url=TESTSERVER_URL + '/MapServer/export?map=foo', param={'layers':'foo'})
http = MockHTTPClient()
wms = ArcGISInfoClient(req, http_client=http, supported_srs=[SRS(4326)])
fi_req = InfoQuery((8, 50, 9, 51), (512, 512),
SRS(4326), (128, 64), 'text/plain')
wms.get_info(fi_req)
assert_query_eq(http.requested[0],
TESTSERVER_URL+'/MapServer/identify?map=foo'
'&imageDisplay=512,512,96&sr=4326&f=json'
'&layers=foo&tolerance=5&returnGeometry=false'
'&geometryType=esriGeometryPoint&geometry=8.250000,50.875000'
'&mapExtent=8,50,9,51',
fuzzy_number_compare=True)
def test_transform_fi_request_supported_srs(self):
req = ArcGISIdentifyRequest(url=TESTSERVER_URL + '/MapServer/export?map=foo', param={'layers':'foo'})
http = MockHTTPClient()
wms = ArcGISInfoClient(req, http_client=http, supported_srs=[SRS(25832)])
fi_req = InfoQuery((8, 50, 9, 51), (512, 512),
SRS(4326), (128, 64), 'text/plain')
wms.get_info(fi_req)
assert_query_eq(http.requested[0],
TESTSERVER_URL+'/MapServer/identify?map=foo'
'&imageDisplay=512,797,96&sr=25832&f=json'
'&layers=foo&tolerance=5&returnGeometry=false'
'&geometryType=esriGeometryPoint&geometry=447229.979084,5636149.370634'
'&mapExtent=428333.552496,5538630.70275,500000.0,5650300.78652',
fuzzy_number_compare=True)
|
the-stack_106_29839 | import asyncio
import os
from concurrent.futures import ThreadPoolExecutor
from multiprocessing import Process
from multiprocessing import Queue
from typing import Callable
from typing import Dict
from typing import Any
class Executor():
MAX_WORKERS = 10
processes = MAX_WORKERS or os.cpu_count()
executor = ThreadPoolExecutor(max_workers=processes)
def __init__(self,
interval,
func, fargs,
timeout=None,
callback_timeout=None,
daemon=False,
debug=False):
"""
Periodic process executor. Calls func and sleeps for interval,
repeatedly. Kills the process after a timeout.
Call schedule() to put it into asyncio loop.
:param interval: sleep interval between calls, in seconds. If None, Executor will only execute once.
:param func: the function to call
:param fargs: function args (tuple) or a single arg
:param timeout: kill the process after this many seconds
:param callback_timeout: will be called if the process gets killed on timeout
:param daemon:
"""
self.interval = interval
self.params = {'func': func, 'fn_args': fargs, "p_kwargs": {},
'timeout': timeout, 'callback_timeout': callback_timeout,
'daemon': daemon}
self.process = None
self.oneshot = interval is None
self.should_stop = False
self.debug = debug
async def start(self):
""" start calling the process periodically """
while not self.should_stop:
self.executor.submit(self._submit_unpack_kwargs, self.params)
if self.oneshot:
break
await asyncio.sleep(self.interval)
def stop(self):
""" terminate running process """
self.should_stop = True
if self.process:
self.process.terminate()
def _submit_unpack_kwargs(self, params):
""" unpack the kwargs and call submit """
return self._submit(**params)
def _submit(self,
func: Callable,
fn_args: Any,
p_kwargs: Dict,
timeout: float,
callback_timeout: Callable[[Any], Any],
daemon: bool):
"""
Submits a callable to be executed with the given arguments.
Schedules the callable to be executed as func(*args, **kwargs) in a new
process.
:param func: the function to execute
:param fn_args: the arguments to pass to the function. Can be one argument
or a tuple of multiple args.
:param p_kwargs: the kwargs to pass to the function
:param timeout: after this time, the process executing the function
will be killed if it did not finish
:param callback_timeout: this function will be called with the same
arguments, if the task times out.
:param daemon: run the child process as daemon
:return: the result of the function, or None if the process failed or
timed out
"""
p_args = fn_args if isinstance(fn_args, tuple) else (fn_args,)
queue = Queue()
if self.debug:
print("Executor: starting {} {}".format(func.__name__, p_args))
p = Process(target=self._process_run,
args=(queue, func, *p_args,), kwargs=p_kwargs)
if daemon:
p.daemon = True
self.process = p
p.start()
p.join(timeout=timeout)
if not queue.empty():
return queue.get()
if callback_timeout:
callback_timeout(*p_args, **p_kwargs)
if p.is_alive():
if self.debug:
print('Executor: terminating by timeout')
p.terminate()
p.join()
@staticmethod
def _process_run(queue: Queue, func: Callable[[Any], Any] = None,
*args, **kwargs):
"""
Executes the specified function as func(*args, **kwargs).
The result will be stored in the shared dictionary
:param func: the function to execute
:param queue: a Queue
"""
queue.put(func(*args, **kwargs))
def schedule(executor: Executor) -> asyncio.Future:
"""
Put executor into asyncio loop.
:param executor:
:return: executor.start() wrapped in Future
"""
return asyncio.ensure_future(executor.start())
def spin():
asyncio.get_event_loop().run_forever()
|
the-stack_106_29840 | from smilPython import *
import time
# Load an image
imIn= Image("https://smil.cmm.minesparis.psl.eu/images/DNA_small.png")
imThresh = Image(imIn)
imDist = Image(imIn)
imIn.show()
imThresh.show()
imDist.showLabel()
def displMax():
print("Distance max value: " + str(rangeVal(imDist)[1]))
links = linkManager()
links.add(imIn, threshold, imIn, 255, imThresh)
links.add(imThresh, dist, imThresh, imDist)
links.add(imDist, displMax)
for i in range(1, 10):
print("\nThreshold level: " + str(i*10))
links[0].args[1] = i*10
Gui.processEvents() # refresh images
time.sleep(1)
|
the-stack_106_29841 | ## www.pubnub.com - PubNub Real-time push service in the cloud.
# coding=utf8
## PubNub Real-time Push APIs and Notifications Framework
## Copyright (c) 2010 Stephen Blum
## http://www.pubnub.com/
import sys
from Pubnub import PubnubTornado as Pubnub
publish_key = len(sys.argv) > 1 and sys.argv[1] or 'demo'
subscribe_key = len(sys.argv) > 2 and sys.argv[2] or 'demo'
secret_key = len(sys.argv) > 3 and sys.argv[3] or 'demo'
cipher_key = len(sys.argv) > 4 and sys.argv[4] or ''
ssl_on = len(sys.argv) > 5 and bool(sys.argv[5]) or False
## -----------------------------------------------------------------------
## Initiate Pubnub State
## -----------------------------------------------------------------------
pubnub = Pubnub(publish_key=publish_key, subscribe_key=subscribe_key,
secret_key=secret_key, cipher_key=cipher_key, ssl_on=ssl_on)
channel = 'hello_world'
message = 'Hello World !!!'
# Asynchronous usage
def callback(message):
print(message)
pubnub.publish(channel, message, callback=callback, error=callback)
pubnub.start()
|
the-stack_106_29843 | # ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by Bin Xiao ([email protected])
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
import json_tricks as json
from collections import OrderedDict
import numpy as np
from scipy.io import loadmat, savemat
from dataset.JointsDataset import JointsDataset
logger = logging.getLogger(__name__)
class MPIIDataset(JointsDataset):
"""
{ 0 - r ankle,
1 - r knee,
2 - r hip,
3 - l hip,
4 - l knee,
5 - l ankle,
6 - pelvis,
7 - thorax,
8 - upper neck,
9 - head top,
10 - r wrist,
11 - r elbow,
12 - r shoulder,
13 - l shoulder,
14 - l elbow,
15 - l wrist }
"""
def __init__(self, cfg, root, image_set, is_train, transform=None):
super().__init__(cfg, root, image_set, is_train, transform)
# 示意图见mpii.vsdx
self.num_joints = 16
self.flip_pairs = [[0, 5], [1, 4], [2, 3], [10, 15], [11, 14], [12, 13]]
self.parent_ids = [1, 2, 6, 6, 3, 4, 6, 6, 7, 8, 11, 12, 7, 7, 13, 14]
self.upper_body_ids = (7, 8, 9, 10, 11, 12, 13, 14, 15)
self.lower_body_ids = (0, 1, 2, 3, 4, 5, 6)
self.db = self._get_db()
if is_train and cfg.DATASET.SELECT_DATA:
self.db = self.select_data(self.db)
logger.info('=> load {} samples'.format(len(self.db)))
# 读取json文件并返回处理后的groundtruth_database
def _get_db(self):
# create train/val split
# 分别处理训练子集/验证子集/测试子集,格式见mpii_test.json
file_name = os.path.join(
self.root, 'annot', self.image_set + '.json'
)
with open(file_name) as anno_file:
anno = json.load(anno_file)
gt_db = []
for a in anno:
# 详见mpii_README.md
image_name = a['image'] # 图像名称 image = {str}
c = np.array(a['center'], dtype=np.float) # 图中人体的大致中心坐标 center = {ndarray: (2,)}
# 把框设置为正方形
# TODO 处理细节:是否可以把框设置为其他形状
s = np.array([a['scale'], a['scale']], dtype=np.float) # 人体框的高度/200像素 scale = {float64}
# Adjust center/scale slightly to avoid cropping limbs
# 太高人体中心位置,放大人体尺度,避免裁剪到边缘
if c[0] != -1:
c[1] = c[1] + 15 * s[1]
s = s * 1.25
# MPII uses matlab format, index is based 1,
# we should first convert to 0-based index
# matlab格式的角标是以1开头的
c = c - 1
# TODO 为了推广适用于到三维而增加了一维0
joints_3d = np.zeros((self.num_joints, 3), dtype=np.float)
joints_3d_vis = np.zeros((self.num_joints, 3), dtype=np.float)
if self.image_set != 'test':
joints = np.array(a['joints']) # 所有关节的坐标 joints = {ndarray: (16, 2)}
joints[:, 0:2] = joints[:, 0:2] - 1
joints_vis = np.array(a['joints_vis']) # 关节是否可见 joints_vis = {ndarray: (16,)}
assert len(joints) == self.num_joints, \
'joint num diff: {} vs {}'.format(len(joints),
self.num_joints)
joints_3d[:, 0:2] = joints[:, 0:2]
joints_3d_vis[:, 0] = joints_vis[:]
joints_3d_vis[:, 1] = joints_vis[:]
image_dir = 'images.zip@' if self.data_format == 'zip' else 'images'
gt_db.append(
{
'image': os.path.join(self.root, image_dir, image_name),
'center': c,
'scale': s,
'joints_3d': joints_3d,
'joints_3d_vis': joints_3d_vis,
'filename': '',
'imgnum': 0,
}
)
return gt_db
# 利用testset的预测关节坐标结果,评估PCKh指标
def evaluate(self, cfg, preds, output_dir, *args, **kwargs):
# convert 0-based index to 1-based index
preds = preds[:, :, 0:2] + 1.0
if output_dir:
pred_file = os.path.join(output_dir, 'pred.mat')
savemat(pred_file, mdict={'preds': preds})
if 'test' in cfg.DATASET.TEST_SET:
return {'Null': 0.0}, 0.0
SC_BIAS = 0.6
threshold = 0.5
gt_file = os.path.join(cfg.DATASET.ROOT,
'annot',
'gt_{}.mat'.format(cfg.DATASET.TEST_SET))
gt_dict = loadmat(gt_file)
dataset_joints = gt_dict['dataset_joints'] # 关节名称 dataset_joints = {ndarray: (1, 16)}
jnt_missing = gt_dict['jnt_missing'] # 是否未标注关节 jnt_missing = {ndarray: (16, 2958)}
pos_gt_src = gt_dict['pos_gt_src'] # 实际关节坐标 pos_gt_src = {ndarray: (16, 2, 2958)}
headboxes_src = gt_dict['headboxes_src'] # 头部边界框左上角和右下角坐标 headboxes_src = {ndarray: (2, 2, 2958)}
# batch*num_joints*coordinate变为对应于.mat格式
pos_pred_src = np.transpose(preds, [1, 2, 0]) # 预测关节位置 pos_pred_src = {ndarray: (16, 2, 2958)}
# 提取角标
# head_all = np.where(dataset_joints == 'head') # head_all: {tuple: 2}
head = np.where(dataset_joints == 'head')[1][0] # 头部序号 head = {int64}:9
lsho = np.where(dataset_joints == 'lsho')[1][0]
lelb = np.where(dataset_joints == 'lelb')[1][0]
lwri = np.where(dataset_joints == 'lwri')[1][0]
lhip = np.where(dataset_joints == 'lhip')[1][0]
lkne = np.where(dataset_joints == 'lkne')[1][0]
lank = np.where(dataset_joints == 'lank')[1][0]
rsho = np.where(dataset_joints == 'rsho')[1][0]
relb = np.where(dataset_joints == 'relb')[1][0]
rwri = np.where(dataset_joints == 'rwri')[1][0]
rkne = np.where(dataset_joints == 'rkne')[1][0]
rank = np.where(dataset_joints == 'rank')[1][0]
rhip = np.where(dataset_joints == 'rhip')[1][0]
jnt_visible = 1 - jnt_missing # 是否标注了关节 jnt_visible = {ndarray: (16, 2958)}
uv_error = pos_pred_src - pos_gt_src # 关节位置误差 uv_error = {ndarray: (16, 2, 2958)}
# linalg=linear(线性)+algebra(代数),norm则表示范数,默认2范数
uv_err = np.linalg.norm(uv_error, axis=1) # 误差距离(对坐标求2范数) uv_err = {ndarray: (16, 2958)}
headsizes = headboxes_src[1, :, :] - headboxes_src[0, :, :] # 头部尺寸 headsizes = {ndarray: (2, 2958)}
headsizes = np.linalg.norm(headsizes, axis=0)
headsizes *= SC_BIAS # TODO 处理细节:以头部框对角线距离作为归一化参考 headsizes = {ndarray: (2958,)}
# np.multiply: 数组和矩阵对应位置相乘,输出与相乘数组/矩阵的大小一致
scale = np.multiply(headsizes, np.ones((len(uv_err), 1))) # 每个关节都参照头 scale = {ndarray: (16, 2958)}
scaled_uv_err = np.divide(uv_err, scale)
scaled_uv_err = np.multiply(scaled_uv_err, jnt_visible) # 归一化结果 scaled_uv_err = {ndarray: (16, 2958)}
jnt_count = np.sum(jnt_visible, axis=1) # 每个关节可见的总数 jnt_count = {ndarray: (16,)}
less_than_threshold = np.multiply((scaled_uv_err <= threshold),
jnt_visible) # 关节是否低于门限 less_than_threshold = {ndarray: (16, 2958)}
# 每个关节归一化超过0.5的百分比概率 PCKh = {ndarray: (16,)}
PCKh = np.divide(100. * np.sum(less_than_threshold, axis=1), jnt_count)
# save
rng = np.arange(0, 0.5 + 0.01, 0.01) # 门限设置从0到0.5
pckAll = np.zeros((len(rng), 16)) # 每个关节归一化超过各门限的百分比概率 PCKALL = {ndarray: (50, 16)}
for r in range(len(rng)):
threshold = rng[r]
less_than_threshold = np.multiply(scaled_uv_err <= threshold,
jnt_visible)
pckAll[r, :] = np.divide(100. * np.sum(less_than_threshold, axis=1),
jnt_count)
PCKh = np.ma.array(PCKh, mask=False)
PCKh.mask[6:8] = True # TODO 处理细节:把骨盆、胸部、上颈给mask掉了。。
jnt_count = np.ma.array(jnt_count, mask=False)
jnt_count.mask[6:8] = True
jnt_ratio = jnt_count / np.sum(jnt_count).astype(np.float64)
name_value = [
('Head', PCKh[head]),
('Shoulder', 0.5 * (PCKh[lsho] + PCKh[rsho])), # TODO 处理细节:对称关节一起计算
('Elbow', 0.5 * (PCKh[lelb] + PCKh[relb])),
('Wrist', 0.5 * (PCKh[lwri] + PCKh[rwri])),
('Hip', 0.5 * (PCKh[lhip] + PCKh[rhip])),
('Knee', 0.5 * (PCKh[lkne] + PCKh[rkne])),
('Ankle', 0.5 * (PCKh[lank] + PCKh[rank])),
('Mean', np.sum(PCKh * jnt_ratio)),
('[email protected]', np.sum(pckAll[11, :] * jnt_ratio))
]
name_value = OrderedDict(name_value) # 创建有序字典
return name_value, name_value['Mean']
|
the-stack_106_29845 | """This module contains the general information for PkiTP ManagedObject."""
from ...ucscmo import ManagedObject
from ...ucsccoremeta import UcscVersion, MoPropertyMeta, MoMeta
from ...ucscmeta import VersionMeta
class PkiTPConsts():
CERT_STATUS_CERT_CHAIN_TOO_LONG = "certChainTooLong"
CERT_STATUS_EMPTY_CERT = "emptyCert"
CERT_STATUS_EXPIRED = "expired"
CERT_STATUS_FAILED_TO_VERIFY_WITH_PRIVATE_KEY = "failedToVerifyWithPrivateKey"
CERT_STATUS_FAILED_TO_VERIFY_WITH_TP = "failedToVerifyWithTp"
CERT_STATUS_NOT_YET_VALID = "notYetValid"
CERT_STATUS_REVOKED = "revoked"
CERT_STATUS_SELF_SIGNED_CERTIFICATE = "selfSignedCertificate"
CERT_STATUS_UNKNOWN = "unknown"
CERT_STATUS_VALID = "valid"
INT_ID_NONE = "none"
POLICY_OWNER_LOCAL = "local"
POLICY_OWNER_PENDING_POLICY = "pending-policy"
POLICY_OWNER_POLICY = "policy"
POLICY_OWNER_UNSPECIFIED = "unspecified"
class PkiTP(ManagedObject):
"""This is PkiTP class."""
consts = PkiTPConsts()
naming_props = set([u'name'])
mo_meta = MoMeta("PkiTP", "pkiTP", "tp-[name]", VersionMeta.Version101a, "InputOutput", 0x7f, [], ["aaa", "admin"], [u'orgDomainGroup', u'pkiEp'], [u'faultInst'], ["Add", "Get", "Remove", "Set"])
prop_meta = {
"cert_chain": MoPropertyMeta("cert_chain", "certChain", "string", VersionMeta.Version101a, MoPropertyMeta.READ_WRITE, 0x2, None, None, None, [], []),
"cert_status": MoPropertyMeta("cert_status", "certStatus", "string", VersionMeta.Version112a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["certChainTooLong", "emptyCert", "expired", "failedToVerifyWithPrivateKey", "failedToVerifyWithTp", "notYetValid", "revoked", "selfSignedCertificate", "unknown", "valid"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version101a, MoPropertyMeta.INTERNAL, None, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"descr": MoPropertyMeta("descr", "descr", "string", VersionMeta.Version101a, MoPropertyMeta.READ_WRITE, 0x4, None, None, r"""[ !#$%&\(\)\*\+,\-\./:;\?@\[\]_\{\|\}~a-zA-Z0-9]{0,256}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version101a, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []),
"fp": MoPropertyMeta("fp", "fp", "string", VersionMeta.Version101a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"int_id": MoPropertyMeta("int_id", "intId", "string", VersionMeta.Version101a, MoPropertyMeta.INTERNAL, None, None, None, None, ["none"], ["0-4294967295"]),
"name": MoPropertyMeta("name", "name", "string", VersionMeta.Version101a, MoPropertyMeta.NAMING, 0x10, None, None, r"""[\-\.:_a-zA-Z0-9]{1,16}""", [], []),
"num_certs": MoPropertyMeta("num_certs", "numCerts", "uint", VersionMeta.Version101a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"policy_level": MoPropertyMeta("policy_level", "policyLevel", "uint", VersionMeta.Version101a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"policy_owner": MoPropertyMeta("policy_owner", "policyOwner", "string", VersionMeta.Version101a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["local", "pending-policy", "policy", "unspecified"], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version101a, MoPropertyMeta.READ_ONLY, 0x20, 0, 256, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version101a, MoPropertyMeta.READ_WRITE, 0x40, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"certChain": "cert_chain",
"certStatus": "cert_status",
"childAction": "child_action",
"descr": "descr",
"dn": "dn",
"fp": "fp",
"intId": "int_id",
"name": "name",
"numCerts": "num_certs",
"policyLevel": "policy_level",
"policyOwner": "policy_owner",
"rn": "rn",
"status": "status",
}
def __init__(self, parent_mo_or_dn, name, **kwargs):
self._dirty_mask = 0
self.name = name
self.cert_chain = None
self.cert_status = None
self.child_action = None
self.descr = None
self.fp = None
self.int_id = None
self.num_certs = None
self.policy_level = None
self.policy_owner = None
self.status = None
ManagedObject.__init__(self, "PkiTP", parent_mo_or_dn, **kwargs)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.