hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a25305a7babbe322d60da76d347533910b26de0 | 6,881 | py | Python | openGaussBase/testcase/SQL/INNERFUNC/pg_buffercache_pages/Opengauss_Function_Innerfunc_Pg_Buffercache_Pages_Case0007.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | openGaussBase/testcase/SQL/INNERFUNC/pg_buffercache_pages/Opengauss_Function_Innerfunc_Pg_Buffercache_Pages_Case0007.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | openGaussBase/testcase/SQL/INNERFUNC/pg_buffercache_pages/Opengauss_Function_Innerfunc_Pg_Buffercache_Pages_Case0007.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | """
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : pg_buffercache_pages验证
Case Name : pg_buffercache_pages函数,表对象reldatabase字段验证
Description :
1、gsql连接数据库,创建表,两张表不同库
2、初始查询两张表的缓存信息
3、两张表中插入不同量数据
4、再次查询两张表的缓存信息
5、查询两张表的数据库oid
Expect :
1、gsql连接数据库,创建表,两张表不同库,创建成功
2、初始查询两张表的缓存信息,为空
3、两张表中插入不同量数据,插入成功
4、再次查询两张表的缓存信息,根据表的oid查询结果不为空,且数据量较大的表缓存数量大
5、查询两张表的数据库oid,与步骤4查询结果reldatabase字段一致
History :
"""
import unittest
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger
from yat.test import Node
class PgBuffercachePagesCase0007(unittest.TestCase):
def setUp(self):
self.log = Logger()
self.log.info(
'Opengauss_Function_Innerfunc_Pg_Buffercache_Pages_Case0006:初始化')
self.pri_sh = CommonSH('PrimaryDbUser')
self.pri_dbuser = Node(node='PrimaryDbUser')
self.constant = Constant()
self.db_name1 = self.pri_dbuser.db_name
self.db_name2 = 'db_pg_buffercache_pages_case0007'
self.t_name1 = 't_pg_buffercache_pages_case0007_1'
self.t_name2 = 't_pg_buffercache_pages_case0007_2'
def test_main(self):
step_txt = '----step1:创建两张表,指定不同库,expect: 创建成功----'
self.log.info(step_txt)
create_sql = f'drop table if exists {self.t_name1}, {self.t_name2};' \
f'create table {self.t_name1}(id int,content text);'
create_result = self.pri_sh.execut_db_sql(create_sql)
self.log.info(create_result)
self.assertIn(self.constant.TABLE_CREATE_SUCCESS, create_result,
'执行失败:' + step_txt)
create_sql = f"drop database if exists {self.db_name2};" \
f"create database {self.db_name2};"
create_result = self.pri_sh.execut_db_sql(create_sql)
self.log.info(create_result)
self.assertIn(self.constant.CREATE_DATABASE_SUCCESS, create_result,
'执行失败:' + step_txt)
create_sql = f'create table {self.t_name2}(id int,content text);'
create_result = self.pri_sh.execut_db_sql(create_sql,
dbname=f'{self.db_name2}')
self.assertIn(self.constant.TABLE_CREATE_SUCCESS, create_result,
'执行失败:' + step_txt)
step_txt = '----step2: 初始查询两张表的缓存信息,expect: 均为0行----'
self.log.info(step_txt)
select_sql = f"select count(*) from pg_buffercache_pages() where " \
f"relfilenode in (select oid from pg_class " \
f"where relname='{self.t_name1}');"
select_result = self.pri_sh.execut_db_sql(select_sql)
self.log.info(select_result)
tmp_count1 = int(select_result.strip().splitlines()[-2])
self.assertEqual(tmp_count1, 0, '执行失败:' + step_txt)
select_sql = f"select count(*) from pg_buffercache_pages() where " \
f"relfilenode in (select oid from pg_class " \
f"where relname='{self.t_name2}');"
select_result = self.pri_sh.execut_db_sql(select_sql,
dbname=f'{self.db_name2}')
self.log.info(select_result)
tmp_count2 = int(select_result.strip().splitlines()[-2])
self.assertEqual(tmp_count2, 0, '执行失败:' + step_txt)
step_txt = '----step3:两张表中插入不同量数据,expect: 插入成功----'
self.log.info(step_txt)
insert_sql = f"insert into {self.t_name1} " \
f"values(generate_series(1, 100), 'testtext');"
insert_result = self.pri_sh.execut_db_sql(insert_sql)
self.log.info(insert_result)
self.assertIn(self.constant.INSERT_SUCCESS_MSG, insert_result,
'执行失败:' + step_txt)
insert_sql = f"insert into {self.t_name2} " \
f"values(generate_series(1, 1000), 'testtext');"
insert_result = self.pri_sh.execut_db_sql(insert_sql,
dbname=f'{self.db_name2}')
self.log.info(insert_result)
self.assertIn(self.constant.INSERT_SUCCESS_MSG, insert_result,
'执行失败:' + step_txt)
step_txt = '----step4: 再次查询两张表的缓存信息,expect: 两张表的数据库OID不相等----'
self.log.info(step_txt)
select_sql = f"select distinct reldatabase " \
f"from pg_buffercache_pages() where " \
f"relfilenode in (select oid from pg_class " \
f"where relname='{self.t_name1}');"
select_result = self.pri_sh.execut_db_sql(select_sql)
self.log.info(select_result)
tmp_db_oid1 = select_result.strip().splitlines()[-2].strip()
select_sql = f"select distinct reldatabase " \
f"from pg_buffercache_pages() where " \
f"relfilenode in (select oid from pg_class " \
f"where relname='{self.t_name2}');"
select_result = self.pri_sh.execut_db_sql(select_sql,
dbname=f'{self.db_name2}')
self.log.info(select_result)
tmp_db_oid2 = select_result.strip().splitlines()[-2].strip()
self.assertNotEqual(tmp_db_oid1, tmp_db_oid2, '执行失败:' + step_txt)
step_txt = '--step5:查询两张表的数据库oid expect: 与步骤4查询结果reldatabase字段一致--'
self.log.info(step_txt)
select_sql = f"select oid from pg_database " \
f"where datname = '{self.db_name1}';"
select_result = self.pri_sh.execut_db_sql(select_sql)
self.log.info(select_result)
db_oid1 = select_result.strip().splitlines()[-2].strip()
self.assertEqual(db_oid1, tmp_db_oid1, '执行失败:' + step_txt)
select_sql = f"select oid from pg_database " \
f"where datname = '{self.db_name2}';"
select_result = self.pri_sh.execut_db_sql(select_sql)
self.log.info(select_result)
db_oid2 = select_result.strip().splitlines()[-2].strip()
self.assertEqual(db_oid2, tmp_db_oid2, '执行失败:' + step_txt)
def tearDown(self):
self.log.info('----this is teardown----')
step_txt = '----step5: 清除表数据----'
self.log.info(step_txt)
drop_sql = f'drop table if exists {self.t_name1};' \
f'drop database if exists {self.db_name2}; '
drop_result = self.pri_sh.execut_db_sql(drop_sql)
self.log.info(drop_result)
self.log.info(
'Opengauss_Function_Innerfunc_Pg_Buffercache_Pages_Case0007:执行完毕')
| 45.269737 | 84 | 0.640023 |
4a2530a8a3bbb94735f282c17a76a8273e300b78 | 6,082 | py | Python | tests/python/gpu/test_kvstore_gpu.py | leeesangwon/incubator-mxnet | 0514233103baff5e1581cf2057f561f7a36616c2 | [
"Apache-2.0"
] | 211 | 2016-06-06T08:32:36.000Z | 2021-07-03T16:50:16.000Z | tests/python/gpu/test_kvstore_gpu.py | leeesangwon/incubator-mxnet | 0514233103baff5e1581cf2057f561f7a36616c2 | [
"Apache-2.0"
] | 42 | 2017-01-05T02:45:13.000Z | 2020-08-11T23:45:27.000Z | tests/python/gpu/test_kvstore_gpu.py | leeesangwon/incubator-mxnet | 0514233103baff5e1581cf2057f561f7a36616c2 | [
"Apache-2.0"
] | 58 | 2016-10-27T07:37:08.000Z | 2021-07-03T16:50:17.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
import sys
import os
import mxnet as mx
import numpy as np
import pytest
from mxnet.test_utils import assert_almost_equal, default_context, environment
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.insert(0, os.path.join(curr_path, '../unittest'))
shape = (4, 4)
keys = [5, 7, 11]
str_keys = ['b', 'c', 'd']
def init_kv_with_str(stype='default', kv_type='local'):
"""init kv """
kv = mx.kv.create(kv_type)
# single
kv.init('a', mx.nd.zeros(shape, stype=stype))
# list
kv.init(str_keys, [mx.nd.zeros(shape=shape, stype=stype)] * len(keys))
return kv
# 1. Test seed 89411477 (module seed 1829754103) resulted in a py3-gpu CI runner core dump.
# 2. Test seed 1155716252 (module seed 1032824746) resulted in py3-mkldnn-gpu have error
# src/operator/nn/mkldnn/mkldnn_base.cc:567: Check failed: similar
# Both of them are not reproducible, so this test is back on random seeds.
@pytest.mark.skipif(mx.context.num_gpus() < 2, reason="test_rsp_push_pull needs more than 1 GPU")
@pytest.mark.skip("Flaky test https://github.com/apache/incubator-mxnet/issues/14189")
@pytest.mark.serial
def test_rsp_push_pull():
def check_rsp_push_pull(kv_type, sparse_pull, is_push_cpu=True):
kv = init_kv_with_str('row_sparse', kv_type)
kv.init('e', mx.nd.ones(shape).tostype('row_sparse'))
push_ctxs = [mx.cpu(i) if is_push_cpu else mx.gpu(i) for i in range(2)]
kv.push('e', [mx.nd.ones(shape, ctx=context).tostype('row_sparse') for context in push_ctxs])
def check_rsp_pull(kv, ctxs, sparse_pull, is_same_rowid=False, use_slice=False):
count = len(ctxs)
num_rows = shape[0]
row_ids = []
all_row_ids = np.arange(num_rows)
vals = [mx.nd.sparse.zeros(shape=shape, ctx=ctxs[i], stype='row_sparse') for i in range(count)]
if is_same_rowid:
row_id = np.random.randint(num_rows, size=num_rows)
row_ids = [mx.nd.array(row_id)] * count
elif use_slice:
total_row_ids = mx.nd.array(np.random.randint(num_rows, size=count*num_rows))
row_ids = [total_row_ids[i*num_rows : (i+1)*num_rows] for i in range(count)]
else:
for i in range(count):
row_id = np.random.randint(num_rows, size=num_rows)
row_ids.append(mx.nd.array(row_id))
row_ids_to_pull = row_ids[0] if (len(row_ids) == 1 or is_same_rowid) else row_ids
vals_to_pull = vals[0] if len(vals) == 1 else vals
kv.row_sparse_pull('e', out=vals_to_pull, row_ids=row_ids_to_pull)
for val, row_id in zip(vals, row_ids):
retained = val.asnumpy()
excluded_row_ids = np.setdiff1d(all_row_ids, row_id.asnumpy())
for row in range(num_rows):
expected_val = np.zeros_like(retained[row])
expected_val += 0 if row in excluded_row_ids else 2
assert_almost_equal(retained[row], expected_val)
if sparse_pull is True:
kv.pull('e', out=vals_to_pull, ignore_sparse=False)
for val in vals:
retained = val.asnumpy()
expected_val = np.zeros_like(retained)
expected_val[:] = 2
assert_almost_equal(retained, expected_val)
check_rsp_pull(kv, [mx.gpu(0)], sparse_pull)
check_rsp_pull(kv, [mx.cpu(0)], sparse_pull)
check_rsp_pull(kv, [mx.gpu(i//2) for i in range(4)], sparse_pull)
check_rsp_pull(kv, [mx.gpu(i//2) for i in range(4)], sparse_pull, is_same_rowid=True)
check_rsp_pull(kv, [mx.cpu(i) for i in range(4)], sparse_pull)
check_rsp_pull(kv, [mx.cpu(i) for i in range(4)], sparse_pull, is_same_rowid=True)
check_rsp_pull(kv, [mx.gpu(i//2) for i in range(4)], sparse_pull, use_slice=True)
check_rsp_pull(kv, [mx.cpu(i) for i in range(4)], sparse_pull, use_slice=True)
envs = [None, '1']
key = 'MXNET_KVSTORE_USETREE'
for val in envs:
with environment(key, val):
if val is '1':
sparse_pull = False
else:
sparse_pull = True
check_rsp_push_pull('local', sparse_pull)
check_rsp_push_pull('device', sparse_pull)
check_rsp_push_pull('device', sparse_pull, is_push_cpu=False)
def test_row_sparse_pull_single_device():
kvstore = mx.kv.create('device')
copy = mx.nd.random_normal(shape=(4,4), ctx=mx.gpu(0))
grad = copy.tostype("row_sparse")
key = 0
kvstore.init(key, grad)
idx = grad.indices
kvstore.push(key, grad)
kvstore.row_sparse_pull(key, out=grad, row_ids=idx)
assert_almost_equal(grad.asnumpy(), copy.asnumpy())
@pytest.mark.serial
def test_rsp_push_pull_large_rowid():
num_rows = 793470
val = mx.nd.ones((num_rows, 1)).tostype('row_sparse').copyto(mx.gpu())
kv = mx.kv.create('device')
kv.init('a', val)
out = mx.nd.zeros((num_rows,1), stype='row_sparse').copyto(mx.gpu())
kv.push('a', val)
kv.row_sparse_pull('a', out=out, row_ids=mx.nd.arange(0, num_rows, dtype='int64'))
assert(out.indices.shape[0] == num_rows)
| 44.394161 | 107 | 0.649622 |
4a253174fede91a0e5e4798fbd6515d2aa72841a | 27,276 | py | Python | flight-simulator/hackflight/parser/build/scripts-3.5/msppg.py | CobraPi/Gesture-Controled-Drone | 94053b27f1ecd4d667ea603d45a5e29ffbfe2787 | [
"CC0-1.0"
] | 1 | 2020-03-02T16:38:21.000Z | 2020-03-02T16:38:21.000Z | flight-simulator/hackflight/parser/build/scripts-3.5/msppg.py | CobraPi/Gesture-Controled-Drone | 94053b27f1ecd4d667ea603d45a5e29ffbfe2787 | [
"CC0-1.0"
] | null | null | null | flight-simulator/hackflight/parser/build/scripts-3.5/msppg.py | CobraPi/Gesture-Controled-Drone | 94053b27f1ecd4d667ea603d45a5e29ffbfe2787 | [
"CC0-1.0"
] | null | null | null | #!/Library/Frameworks/Python.framework/Versions/3.5/bin/python3
'''
msppg.py Multiwii Serial Protocol Parser Generator
Copyright (C) Rob Jones, Alec Singer, Chris Lavin, Blake Liebling, Simon D. Levy 2015
This program is part of Hackflight
This code is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This code is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this code. If not, see <http:#www.gnu.org/licenses/>.
'''
PYTHON_EXAMPLES = ['getimu', 'getrc', 'imudisplay', 'blueimudisplay', 'setrc']
from sys import exit, argv
import os
import json
from pkg_resources import resource_string
def clean(string):
cleaned_string = string[1: len(string) - 1]
return cleaned_string
def mkdir_if_missing(dirname):
if not os.path.exists(dirname):
os.mkdir(dirname)
def error(errmsg):
print(errmsg)
exit(1)
def _openw(fname):
print('Creating file ' + fname)
return open(fname, 'w')
class CodeEmitter(object):
def __init__(self, folder, ext):
mkdir_if_missing('output/%s' % folder)
self._copyfile('%s.makefile' % folder, '%s/Makefile' % folder)
self.indent = ' '
self.type2size = {'byte': 1, 'short' : 2, 'float' : 4, 'int' : 4}
def _copyfile(self, src, dst):
outfile = _openw('output/' + dst)
outfile.write(self._getsrc(src))
outfile.close()
def warning(self, cmt):
return cmt + ' AUTO-GENERATED CODE: DO NOT EDIT!!!\n\n'
# Helper for writing parameter list with type declarations
def _write_params(self, outfile, argtypes, argnames, prefix = ''):
outfile.write('(')
outfile.write(prefix)
for argtype,argname in zip(argtypes, argnames):
outfile.write(self.type2decl[argtype] + ' ' + argname)
if argname != argnames[-1]:
outfile.write(', ')
outfile.write(')')
def _paysize(self, argtypes):
return sum([self.type2size[atype] for atype in argtypes])
def _msgsize(self, argtypes):
return self._paysize(argtypes)
def _getsrc(self, filename):
return resource_string('msppg_resources', filename).decode('utf-8')
def _getargnames(self, message):
return [argname for (argname,_) in self._getargs(message)]
def _getargtypes(self, message):
return [argtype for (_,argtype) in self._getargs(message)]
def _getargs(self, message):
return [(argname,argtype) for (argname,argtype) in
zip(message[1], message[2]) if argname.lower()!='comment']
# Python emitter ============================================================================
class Python_Emitter(CodeEmitter):
def _copy_example(self, name):
CodeEmitter._copyfile(self, '%s.py' % name, 'python/' + ('%s.py' % name))
def __init__(self, msgdict):
CodeEmitter.__init__(self, 'python', 'py')
for example in PYTHON_EXAMPLES:
self._copy_example(example)
mkdir_if_missing('output/python/msppg')
self._copyfile('setup.py', 'python/setup.py')
self.output = _openw('output/python/msppg/__init__.py')
self._write(self.warning('#'))
self.type2pack = {'byte' : 'B', 'short' : 'h', 'float' : 'f', 'int' : 'i'}
self._write(self._getsrc('top-py') + '\n')
for msgtype in msgdict.keys():
msgstuff = msgdict[msgtype]
msgid = msgstuff[0]
if msgid < 200:
self._write(4*self.indent + ('if self.message_id == %d:\n\n' % msgstuff[0]))
self._write(5*self.indent + ('if self.message_direction == 0:\n\n'))
self._write(6*self.indent + 'if hasattr(self, \'' + msgtype + '_Request_Handler\'):\n\n')
self._write(7*self.indent + 'self.%s_Request_Handler()\n\n' % msgtype)
self._write(5*self.indent + 'else:\n\n')
self._write(6*self.indent + 'if hasattr(self, \'' + msgtype + '_Handler\'):\n\n')
self._write(7*self.indent + 'self.%s_Handler(*struct.unpack(\'=' % msgtype)
for argtype in self._getargtypes(msgstuff):
self._write('%s' % self.type2pack[argtype])
self._write("\'" + ', self.message_buffer))\n\n')
self._write(self._getsrc('bottom-py') + '\n')
# Emit handler methods for parser
for msgtype in msgdict.keys():
msgstuff = msgdict[msgtype]
msgid = msgstuff[0]
if msgid < 200:
self._write(self.indent + 'def set_%s_Handler(self, handler):\n\n' % msgtype)
self._write(2*self.indent + 'self.%s_Handler = handler\n\n' % msgtype)
self._write(self.indent + 'def set_%s_Request_Handler(self, handler):\n\n' % msgtype)
self._write(2*self.indent + 'self.%s_Request_Handler = handler\n\n' % msgtype)
# Emit serializer functions for module
for msgtype in msgdict.keys():
msgstuff = msgdict[msgtype]
msgid = msgstuff[0]
self._write('def serialize_' + msgtype + '(' + ', '.join(self._getargnames(msgstuff)) + '):\n\n')
self._write(self.indent + 'message_buffer = struct.pack(\'')
for argtype in self._getargtypes(msgstuff):
self._write(self.type2pack[argtype])
self._write('\'')
for argname in self._getargnames(msgstuff):
self._write(', ' + argname)
self._write(')\n\n')
self._write(self.indent)
self._write('msg = chr(len(message_buffer)) + chr(%s) + str(message_buffer)\n\n' % msgid)
self._write(self.indent + 'return _bytes(\'$M%c\' + msg + chr(_CRC8(msg)))\n\n' %
('>' if msgid < 200 else '<'))
if msgid < 200:
self._write('def serialize_' + msgtype + '_Request():\n\n')
self._write(self.indent + 'return _bytes(\'$M<\' + chr(0) + chr(%s) + chr(%s))\n\n' % (msgid, msgid))
def _write(self, s):
self.output.write(s)
# C++ / Arduino emitter ============================================================================
class CPP_Emitter(CodeEmitter):
def __init__(self, msgdict):
CodeEmitter.__init__(self, 'cpp', 'cpp')
mkdir_if_missing('output/cpp/msppg')
# Create C++ example
self._copyfile('example.cpp', 'cpp/example.cpp')
# Create Arduino stuff
mkdir_if_missing('output/arduino')
mkdir_if_missing('output/arduino/MSPPG')
mkdir_if_missing('output/arduino/MSPPG/examples')
mkdir_if_missing('output/arduino/MSPPG/examples/imuexample')
self._copyfile('imuexample.ino', 'arduino/MSPPG/examples/imuexample/imuexample.ino')
self.type2decl = {'byte': 'byte', 'short' : 'short', 'float' : 'float', 'int' : 'int'}
self.coutput = _openw('output/cpp/msppg/msppg.cpp')
self.houtput = _openw('output/cpp/msppg/msppg.h')
self.acoutput = _openw('output/arduino/MSPPG/msppg.cpp')
self.ahoutput = _openw('output/arduino/MSPPG/msppg.h')
self._cwrite(self.warning('//'))
self._hwrite(self._getsrc('top-hpp'))
self._cwrite('\n' + self._getsrc('top-cpp'))
for msgtype in msgdict.keys():
msgstuff = msgdict[msgtype]
msgid = msgstuff[0]
argnames = self._getargnames(msgstuff)
argtypes = self._getargtypes(msgstuff)
self._hwrite(self.indent*2 + 'static MSP_Message serialize_%s' % msgtype)
self._write_params(self.houtput, argtypes, argnames)
self._write_params(self.ahoutput, argtypes, argnames)
self._hwrite(';\n\n')
# Write handler code for incoming messages
if msgid < 200:
self._cwrite(5*self.indent + ('case %s: {\n\n' % msgdict[msgtype][0]))
nargs = len(argnames)
offset = 0
for k in range(nargs):
argname = argnames[k]
argtype = argtypes[k]
decl = self.type2decl[argtype]
self._cwrite(6*self.indent + decl + ' ' + argname + ';\n')
self._cwrite(6*self.indent +
'memcpy(&%s, &this->message_buffer[%d], sizeof(%s));\n\n' %
(argname, offset, decl))
offset += self.type2size[argtype]
self._cwrite(6*self.indent + 'this->handlerFor%s->handle_%s(' % (msgtype, msgtype))
for k in range(nargs):
self._cwrite(argnames[k])
if k < nargs-1:
self._cwrite(', ')
self._cwrite(');\n')
self._cwrite(6*self.indent + '} break;\n\n')
self._hwrite(self.indent*2 + 'static MSP_Message serialize_%s_Request();\n\n' % msgtype)
self._hwrite(self.indent*2 +
'void set_%s_Handler(class %s_Handler * handler);\n\n' % (msgtype, msgtype))
self._hwrite(self.indent + 'private:\n\n')
for msgtype in msgdict.keys():
msgstuff = msgdict[msgtype]
msgid = msgstuff[0]
if msgid < 200:
self._hwrite(2*self.indent +
'class %s_Handler * handlerFor%s;\n\n' % (msgtype, msgtype));
self._hwrite('};\n');
self._cwrite(self._getsrc('bottom-cpp'))
for msgtype in msgdict.keys():
msgstuff = msgdict[msgtype]
msgid = msgstuff[0]
argnames = self._getargnames(msgstuff)
argtypes = self._getargtypes(msgstuff)
# Incoming messages
if msgid < 200:
# Declare handler class
self._hwrite('\n\n' + 'class %s_Handler {\n' % msgtype)
self._hwrite('\n' + self.indent + 'public:\n\n')
self._hwrite(2*self.indent + '%s_Handler() {}\n\n' % msgtype)
self._hwrite(2*self.indent + 'virtual void handle_%s' % msgtype)
self._write_params(self.houtput, argtypes, argnames)
self._write_params(self.ahoutput, argtypes, argnames)
self._hwrite('{ }\n\n')
self._hwrite('};\n\n')
# Write handler method
self._cwrite('void MSP_Parser::set_%s_Handler(class %s_Handler * handler) {\n\n' %
(msgtype, msgtype))
self._cwrite(self.indent + 'this->handlerFor%s = handler;\n' % msgtype)
self._cwrite('}\n\n')
# Write request method
self._cwrite('MSP_Message MSP_Parser::serialize_%s_Request() {\n\n' % msgtype)
self._cwrite(self.indent + 'MSP_Message msg;\n\n')
self._cwrite(self.indent + 'msg.bytes[0] = 36;\n')
self._cwrite(self.indent + 'msg.bytes[1] = 77;\n')
self._cwrite(self.indent + 'msg.bytes[2] = %d;\n' % 60 if msgid < 200 else 62)
self._cwrite(self.indent + 'msg.bytes[3] = 0;\n')
self._cwrite(self.indent + 'msg.bytes[4] = %d;\n' % msgid)
self._cwrite(self.indent + 'msg.bytes[5] = %d;\n\n' % msgid)
self._cwrite(self.indent + 'msg.len = 6;\n\n')
self._cwrite(self.indent + 'return msg;\n')
self._cwrite('}\n\n')
# Add parser method for serializing message
self._cwrite('MSP_Message MSP_Parser::serialize_%s' % msgtype)
self._write_params(self.coutput, argtypes, argnames)
self._write_params(self.acoutput, argtypes, argnames)
self._cwrite(' {\n\n')
self._cwrite(self.indent + 'MSP_Message msg;\n\n')
msgsize = self._msgsize(argtypes)
self._cwrite(self.indent + 'msg.bytes[0] = 36;\n')
self._cwrite(self.indent + 'msg.bytes[1] = 77;\n')
self._cwrite(self.indent + 'msg.bytes[2] = 62;\n')
self._cwrite(self.indent + 'msg.bytes[3] = %d;\n' % msgsize)
self._cwrite(self.indent + 'msg.bytes[4] = %d;\n\n' % msgid)
nargs = len(argnames)
offset = 5
for k in range(nargs):
argname = argnames[k]
argtype = argtypes[k]
decl = self.type2decl[argtype]
self._cwrite(self.indent +
'memcpy(&msg.bytes[%d], &%s, sizeof(%s));\n' % (offset, argname, decl))
offset += self.type2size[argtype]
self._cwrite('\n')
self._cwrite(self.indent +
'msg.bytes[%d] = CRC8(&msg.bytes[3], %d);\n\n' % (msgsize+5, msgsize+2))
self._cwrite(self.indent + 'msg.len = %d;\n\n' % (msgsize+6))
self._cwrite(self.indent + 'return msg;\n')
self._cwrite('}\n\n')
def _cwrite(self, s):
self.coutput.write(s)
self.acoutput.write(s)
def _hwrite(self, s):
self.houtput.write(s)
self.ahoutput.write(s)
# C emitter ===============================================================================
class C_Emitter(CodeEmitter):
def __init__(self, msgdict):
CodeEmitter.__init__(self, 'c', 'c')
mkdir_if_missing('output/c/msppg')
self._copyfile('example.c', 'c/example.c')
self.type2decl = {'byte': 'char', 'short' : 'short', 'float' : 'float', 'int' : 'int'}
self.coutput = _openw('output/c/msppg/msppg.c')
self.houtput = _openw('output/c/msppg/msppg.h')
self._cwrite(self.warning('//'))
self._hwrite(self._getsrc('top-h'))
for msgtype in msgdict.keys():
msgstuff = msgdict[msgtype]
msgid = msgstuff[0]
if msgid < 200:
argnames = self._getargnames(msgstuff)
argtypes = self._getargtypes(msgstuff)
self._hwrite(self.indent + 'void (*handler_for_%s)' % msgtype)
self._write_params(self.houtput, argtypes, argnames)
self._hwrite(';\n')
self._hwrite(self._getsrc('bottom-h'))
self._cwrite('\n' + self._getsrc('top-c'))
for msgtype in msgdict.keys():
msgstuff = msgdict[msgtype]
msgid = msgstuff[0]
argnames = self._getargnames(msgstuff)
argtypes = self._getargtypes(msgstuff)
self._hwrite('msp_message_t msp_serialize_%s' % msgtype)
self._write_params(self.houtput, argtypes, argnames)
self._hwrite(';\n\n')
# Write handler code for incoming messages
if msgid < 200:
self._cwrite(5*self.indent + ('case %s: {\n\n' % msgdict[msgtype][0]))
nargs = len(argnames)
offset = 0
for k in range(nargs):
argname = argnames[k]
argtype = argtypes[k]
decl = self.type2decl[argtype]
self._cwrite(6*self.indent + decl + ' ' + argname + ';\n')
self._cwrite(6*self.indent +
'memcpy(&%s, &parser->message_buffer[%d], sizeof(%s));\n\n' %
(argname, offset, decl))
offset += self.type2size[argtype]
self._cwrite(6*self.indent + 'parser->handler_for_%s(' % msgtype)
for k in range(nargs):
self._cwrite(argnames[k])
if k < nargs-1:
self._cwrite(', ')
self._cwrite(');\n')
self._cwrite(6*self.indent + '} break;\n\n')
self._hwrite('msp_message_t msp_serialize_%s_request();\n\n' % msgtype)
self._hwrite('void msp_set_%s_handler(msp_parser_t * parser, void (*handler)' % msgtype)
self._write_params(self.houtput, argtypes, argnames)
self._hwrite(');\n\n')
self._cwrite(self._getsrc('bottom-cpp'))
for msgtype in msgdict.keys():
msgstuff = msgdict[msgtype]
msgid = msgstuff[0]
argnames = self._getargnames(msgstuff)
argtypes = self._getargtypes(msgstuff)
# Incoming messages
if msgid < 200:
# Write handler method
self._cwrite('void msp_set_%s_handler(msp_parser_t * parser, void (*handler)' % msgtype)
self._write_params(self.coutput, argtypes, argnames)
self._cwrite(') {\n\n')
self._cwrite(self.indent + 'parser->handler_for_%s = handler;\n' % msgtype)
self._cwrite('}\n\n')
# Write request method
self._cwrite('msp_message_t serialize_%s_Request() {\n\n' % msgtype)
self._cwrite(self.indent + 'msp_message_t msg;\n\n')
self._cwrite(self.indent + 'msg.bytes[0] = 36;\n')
self._cwrite(self.indent + 'msg.bytes[1] = 77;\n')
self._cwrite(self.indent + 'msg.bytes[2] = %d;\n' % 60 if msgid < 200 else 62)
self._cwrite(self.indent + 'msg.bytes[3] = 0;\n')
self._cwrite(self.indent + 'msg.bytes[4] = %d;\n' % msgid)
self._cwrite(self.indent + 'msg.bytes[5] = %d;\n\n' % msgid)
self._cwrite(self.indent + 'msg.len = 6;\n\n')
self._cwrite(self.indent + 'return msg;\n')
self._cwrite('}\n\n')
# Add parser method for serializing message
self._cwrite('msp_message_t msp_serialize_%s' % msgtype)
self._write_params(self.coutput, argtypes, argnames)
self._cwrite(' {\n\n')
self._cwrite(self.indent + 'msp_message_t msg;\n\n')
msgsize = self._msgsize(argtypes)
self._cwrite(self.indent + 'msg.bytes[0] = 36;\n')
self._cwrite(self.indent + 'msg.bytes[1] = 77;\n')
self._cwrite(self.indent + 'msg.bytes[2] = 62;\n')
self._cwrite(self.indent + 'msg.bytes[3] = %d;\n' % msgsize)
self._cwrite(self.indent + 'msg.bytes[4] = %d;\n\n' % msgid)
nargs = len(argnames)
offset = 5
for k in range(nargs):
argname = argnames[k]
argtype = argtypes[k]
decl = self.type2decl[argtype]
self._cwrite(self.indent +
'memcpy(&msg.bytes[%d], &%s, sizeof(%s));\n' % (offset, argname, decl))
offset += self.type2size[argtype]
self._cwrite('\n')
self._cwrite(self.indent +
'msg.bytes[%d] = CRC8(&msg.bytes[3], %d);\n\n' % (msgsize+5, msgsize+2))
self._cwrite(self.indent + 'msg.len = %d;\n\n' % (msgsize+6))
self._cwrite(self.indent + 'return msg;\n')
self._cwrite('}\n\n')
def _cwrite(self, s):
self.coutput.write(s)
def _hwrite(self, s):
self.houtput.write(s)
# Java emitter =======================================================================================
class Java_Emitter(CodeEmitter):
def __init__(self, msgdict):
CodeEmitter.__init__(self, 'java', 'java')
self._copyfile('example.java', 'java/example.java')
mkdir_if_missing('output/java/edu')
mkdir_if_missing('output/java/edu/wlu')
mkdir_if_missing('output/java/edu/wlu/cs')
mkdir_if_missing('output/java/edu/wlu/cs/msppg')
self.type2decl = {'byte': 'byte', 'short' : 'short', 'float' : 'float', 'int' : 'int'}
self.type2bb = {'byte': '', 'short' : 'Short', 'float' : 'Float', 'int' : 'Int'}
self.output = _openw('output/java/edu/wlu/cs/msppg/Parser.java')
self._write(self.warning('//'))
self._write(self._getsrc('top-java'))
# Write handler cases for incoming messages
for msgtype in msgdict.keys():
msgstuff = msgdict[msgtype]
msgid = msgstuff[0]
if msgid < 200:
self._write(6*self.indent + 'case (byte)%d:\n' % msgid)
self._write(7*self.indent + 'if (this.%s_handler != null) {\n' % msgtype)
self._write(8*self.indent + 'this.%s_handler.handle_%s(\n' % (msgtype, msgtype));
argnames = self._getargnames(msgstuff)
argtypes = self._getargtypes(msgstuff)
nargs = len(argnames)
offset = 0
for k in range(nargs):
argtype = argtypes[k]
self._write(8*self.indent + 'bb.get%s(%d)' % (self.type2bb[argtype], offset))
offset += self.type2size[argtype]
if k < nargs-1:
self._write(',\n')
self._write(');\n')
self._write(7*self.indent + '}\n')
self._write(7*self.indent + 'break;\n\n')
self._write(self._getsrc('bottom-java'))
for msgtype in msgdict.keys():
msgstuff = msgdict[msgtype]
msgid = msgstuff[0]
argnames = self._getargnames(msgstuff)
argtypes = self._getargtypes(msgstuff)
# For messages from FC
if msgid < 200:
# Declare handler
self._write(self.indent + 'private %s_Handler %s_handler;\n\n' % (msgtype, msgtype))
self._write(self.indent +
'public void set_%s_Handler(%s_Handler handler) {\n\n' % (msgtype, msgtype))
self._write(2*self.indent + 'this.%s_handler = handler;\n' % msgtype)
self._write(self.indent + '}\n\n')
# Write serializer for requests
self._write(self.indent + 'public byte [] serialize_%s_Request() {\n\n' % msgtype)
paysize = self._paysize(argtypes)
msgsize = self._msgsize(argtypes)
self._write('\n' + 2*self.indent + 'byte [] message = new byte[6];\n\n')
self._write(2*self.indent + 'message[0] = 36;\n')
self._write(2*self.indent + 'message[1] = 77;\n')
self._write(2*self.indent + 'message[2] = 60;\n')
self._write(2*self.indent + 'message[3] = 0;\n')
self._write(2*self.indent + 'message[4] = (byte)%d;\n' % msgid)
self._write(2*self.indent + 'message[5] = (byte)%d;\n\n' % msgid)
self._write(2*self.indent + 'return message;\n')
self._write(self.indent + '}\n\n')
# Write serializer method for messages from FC
self._write(self.indent + 'public byte [] serialize_%s' % msgtype)
self._write_params(self.output, argtypes, argnames)
self._write(' {\n\n')
paysize = self._paysize(argtypes)
msgsize = self._msgsize(argtypes)
self._write(2*self.indent + 'ByteBuffer bb = newByteBuffer(%d);\n\n' % paysize)
for (argname,argtype) in zip(argnames,argtypes):
self._write(2*self.indent + 'bb.put%s(%s);\n' % (self.type2bb[argtype], argname))
self._write('\n' + 2*self.indent + 'byte [] message = new byte[%d];\n' % (msgsize+6))
self._write(2*self.indent + 'message[0] = 36;\n')
self._write(2*self.indent + 'message[1] = 77;\n')
self._write(2*self.indent + 'message[2] = %d;\n' % (62 if msgid < 200 else 60))
self._write(2*self.indent + 'message[3] = %d;\n' % msgsize)
self._write(2*self.indent + 'message[4] = (byte)%d;\n' %msgdict[msgtype][0])
self._write(2*self.indent + 'byte [] data = bb.array();\n')
self._write(2*self.indent + 'int k;\n')
self._write(2*self.indent + 'for (k=0; k<data.length; ++k) {\n')
self._write(3*self.indent + 'message[k+5] = data[k];\n')
self._write(2*self.indent + '}\n\n')
self._write(2*self.indent + 'message[%d] = CRC8(message, 3, %d);\n\n' %
(msgsize+5, msgsize+4))
self._write(2*self.indent + 'return message;\n')
self._write(self.indent + '}\n\n')
self._write('}')
self.output.close()
# Write handler classes for each type of incoming message
for msgtype in msgdict.keys():
msgstuff = msgdict[msgtype]
msgid = msgstuff[0]
if msgid < 200:
argnames = self._getargnames(msgstuff)
argtypes = self._getargtypes(msgstuff)
self.output = _openw('output/java/edu/wlu/cs/msppg/%s_Handler.java' % msgtype)
self.output.write(self.warning('//'))
self.output.write('package edu.wlu.cs.msppg;\n\n')
self.output.write('public interface %s_Handler {\n\n' % msgtype)
self.output.write(self.indent + 'public void handle_%s' % msgtype)
self._write_params(self.output, argtypes, argnames)
self.output.write(';\n')
self.output.write('}\n')
def _write(self, s):
self.output.write(s)
# main ===============================================================================================
if __name__ == '__main__':
# default to input from simple example
data = json.load(open(argv[1] if len(argv) > 1 else 'messages.json', 'r'))
# takes the types of messages from the json file
unicode_message_types = data.keys()
# make a list of messages from the JSON file
message_type_list = list()
for key in unicode_message_types:
message_type = json.dumps(key)
clean_type = clean(message_type)
message_type_list.append(clean_type)
# make dictionary of names, types for each message's components
argument_lists = list()
argument_types = list()
msgdict = {}
for msgtype in message_type_list:
argnames = list()
argtypes = list()
msgid = None
for arg in data[msgtype]:
argname = clean(clean(json.dumps(list(arg.keys()))))
argtype = arg[list(arg.keys())[0]]
if argname == 'ID':
msgid = int(argtype)
else:
argtypes.append(argtype)
argnames.append(argname)
argument_lists.append(argnames)
if msgid is None:
error('Missing ID for message ' + msgtype)
argument_types.append(argtypes)
msgdict[msgtype] = (msgid, argnames, argtypes)
# make output directory if necessary
mkdir_if_missing('output')
# Emit Python
Python_Emitter(msgdict)
# Emit C++
CPP_Emitter(msgdict)
# Emit C
C_Emitter(msgdict)
# Emite Java
Java_Emitter(msgdict)
| 38.965714 | 117 | 0.541428 |
4a2533731eeda6a951c653fb9bdcd1391b440c23 | 3,613 | py | Python | tests/search/IndexCreatorVisitor.py | Gawaboumga/PyMatex | 3ccc0aa23211a064aa31a9b509b108cd606a4992 | [
"MIT"
] | 1 | 2019-03-05T09:45:04.000Z | 2019-03-05T09:45:04.000Z | tests/search/IndexCreatorVisitor.py | Gawaboumga/PyMatex | 3ccc0aa23211a064aa31a9b509b108cd606a4992 | [
"MIT"
] | null | null | null | tests/search/IndexCreatorVisitor.py | Gawaboumga/PyMatex | 3ccc0aa23211a064aa31a9b509b108cd606a4992 | [
"MIT"
] | null | null | null | from tests import BaseTest
from pymatex.node import NodeType
from pymatex.search import IndexCreatorVisitor
class IndexCreatorVisitorTests(BaseTest.BaseTest):
def test_visit_basic_addition(self):
ast = self.parse(r'3 + x')
data = {}
pk = 1
visitor = IndexCreatorVisitor(data, pk)
ast.accept(visitor)
self.assertTrue(self.has(data[0][NodeType.CONSTANT], pk, '3'))
self.assertTrue(self.has(data[0][NodeType.VARIABLE], pk, 'x'))
self.assertTrue(pk in data[1][NodeType.ADDITION])
def test_visit_addition_mixed_with_multiplication(self):
ast = self.parse(r'3 * x + y * 5')
data = {}
pk = 1
visitor = IndexCreatorVisitor(data, pk)
ast.accept(visitor)
self.assertTrue(self.has(data[0][NodeType.CONSTANT], pk, '3'))
self.assertTrue(self.has(data[0][NodeType.VARIABLE], pk, 'x'))
self.assertTrue(self.has(data[0][NodeType.VARIABLE], pk, 'y'))
self.assertTrue(self.has(data[0][NodeType.CONSTANT], pk, '5'))
self.assertTrue(pk in data[1][NodeType.MULTIPLICATION])
self.assertTrue(pk in data[2][NodeType.ADDITION])
def test_visit_summation(self):
ast = self.parse(r'\sum_{i=0}^{\infty} i * i')
data = {}
pk = 1
visitor = IndexCreatorVisitor(data, pk)
ast.accept(visitor)
self.assertTrue(self.has(data[0][NodeType.CONSTANT], pk, '0'))
self.assertTrue(self.has(data[0][NodeType.BOUNDVARIABLE], pk, 'i'))
self.assertTrue(self.has(data[0][NodeType.CONSTANT], pk, '\\infty'))
self.assertTrue(pk in data[1][NodeType.MULTIPLICATION])
self.assertTrue(pk in data[2][NodeType.SUMMATION])
def test_visit_summation_with_free_and_bound_variable(self):
ast = self.parse(r'\sum_{i=0}^{\infty} (i + j)')
data = {}
pk = 1
visitor = IndexCreatorVisitor(data, pk)
ast.accept(visitor)
self.assertTrue(self.has(data[0][NodeType.CONSTANT], pk, '0'))
self.assertTrue(self.has(data[0][NodeType.BOUNDVARIABLE], pk, 'i'))
self.assertTrue(self.has(data[0][NodeType.VARIABLE], pk, 'j'))
self.assertTrue(self.has(data[0][NodeType.CONSTANT], pk, '\\infty'))
self.assertTrue(pk in data[1][NodeType.ADDITION])
self.assertTrue(pk in data[2][NodeType.SUMMATION])
def test_visit_exponentiation_and_function(self):
ast = self.parse(r'e^{x!}')
data = {}
pk = 1
visitor = IndexCreatorVisitor(data, pk)
ast.accept(visitor)
self.assertTrue(self.has(data[0][NodeType.VARIABLE], pk, 'e'))
self.assertTrue(self.has(data[0][NodeType.VARIABLE], pk, 'x'))
self.assertTrue(pk in data[1][NodeType.FUNCTION])
self.assertTrue(pk in data[2][NodeType.EXPONENTIATION])
def test_visit_indexed_variable(self):
ast = self.parse(r'3*B_{2n}')
data = {}
pk = 1
visitor = IndexCreatorVisitor(data, pk)
ast.accept(visitor)
self.assertTrue(self.has(data[0][NodeType.CONSTANT], pk, '2'))
self.assertTrue(self.has(data[0][NodeType.CONSTANT], pk, '3'))
self.assertTrue(self.has(data[0][NodeType.VARIABLE], pk, 'n'))
self.assertTrue(pk in data[1][NodeType.MULTIPLICATION])
self.assertTrue(self.has(data[2][NodeType.INDEXEDVARIABLE], pk, 'B'))
self.assertTrue(pk in data[3][NodeType.MULTIPLICATION])
def has(self, data: dict, pk: int, constant_value: str):
self.assertIn(constant_value, data)
self.assertIn(pk, data[constant_value])
return True
| 40.144444 | 77 | 0.632438 |
4a253413d40c1b79b932086e01316341b9122459 | 908 | py | Python | env/lib/python2.7/site-packages/django/contrib/admin/decorators.py | diego-d5000/MisValesMd | b641782bc2546776e9f55f452ec7fb48100dc482 | [
"MIT"
] | null | null | null | env/lib/python2.7/site-packages/django/contrib/admin/decorators.py | diego-d5000/MisValesMd | b641782bc2546776e9f55f452ec7fb48100dc482 | [
"MIT"
] | null | null | null | env/lib/python2.7/site-packages/django/contrib/admin/decorators.py | diego-d5000/MisValesMd | b641782bc2546776e9f55f452ec7fb48100dc482 | [
"MIT"
] | null | null | null | def register(*models, **kwargs):
"""
Registers the given model(s) classes and wrapped ModelAdmin class with
admin site:
@register(Author)
class AuthorAdmin(admin.ModelAdmin):
pass
A kwarg of `site` can be passed as the admin site, otherwise the default
admin site will be used.
"""
from django.contrib.admin import ModelAdmin
from django.contrib.admin.sites import site, AdminSite
def _model_admin_wrapper(admin_class):
admin_site = kwargs.pop('site', site)
if not isinstance(admin_site, AdminSite):
raise ValueError('site must subclass AdminSite')
if not issubclass(admin_class, ModelAdmin):
raise ValueError('Wrapped class must subclass ModelAdmin.')
admin_site.register(models, admin_class=admin_class)
return admin_class
return _model_admin_wrapper
| 31.310345 | 77 | 0.669604 |
4a2534260135bb46bf59bab894f8e9e029e7f1ca | 21,546 | py | Python | app/celery/tasks.py | department-of-veterans-affairs/notification-api | 698bc98d8e78a13a0b2cfc432cfc718ff1016b06 | [
"MIT"
] | 10 | 2020-05-04T14:11:06.000Z | 2022-02-22T19:06:36.000Z | app/celery/tasks.py | department-of-veterans-affairs/notification-api | 698bc98d8e78a13a0b2cfc432cfc718ff1016b06 | [
"MIT"
] | 554 | 2020-05-07T21:56:24.000Z | 2022-03-31T23:04:51.000Z | app/celery/tasks.py | department-of-veterans-affairs/notification-api | 698bc98d8e78a13a0b2cfc432cfc718ff1016b06 | [
"MIT"
] | 4 | 2020-08-27T16:43:29.000Z | 2021-02-17T22:17:27.000Z | from datetime import datetime
from collections import namedtuple, defaultdict
from flask import current_app
from notifications_utils.recipients import (
RecipientCSV
)
from notifications_utils.statsd_decorators import statsd
from notifications_utils.template import (
SMSMessageTemplate,
WithSubjectTemplate,
)
from notifications_utils.timezones import convert_utc_to_local_timezone
from sqlalchemy.exc import SQLAlchemyError
from app import (
create_uuid,
create_random_identifier,
encryption,
notify_celery,
)
from app.aws import s3
from app.celery import provider_tasks, letters_pdf_tasks, research_mode_tasks
from app.config import QueueNames
from app.dao.daily_sorted_letter_dao import dao_create_or_update_daily_sorted_letter
from app.dao.jobs_dao import (
dao_update_job,
dao_get_job_by_id,
)
from app.dao.notifications_dao import (
get_notification_by_id,
dao_update_notifications_by_reference,
dao_get_last_notification_added_for_job_id,
update_notification_status_by_reference,
dao_get_notification_history_by_reference,
)
from app.dao.provider_details_dao import get_current_provider
from app.dao.service_email_reply_to_dao import dao_get_reply_to_by_id
from app.dao.service_sms_sender_dao import dao_get_service_sms_sender_by_id, dao_get_sms_sender_by_service_id_and_number
from app.dao.services_dao import dao_fetch_service_by_id, fetch_todays_total_message_count
from app.dao.templates_dao import dao_get_template_by_id
from app.exceptions import DVLAException, NotificationTechnicalFailureException
from app.feature_flags import is_feature_enabled, FeatureFlag
from app.models import (
DVLA_RESPONSE_STATUS_SENT,
EMAIL_TYPE,
JOB_STATUS_CANCELLED,
JOB_STATUS_FINISHED,
JOB_STATUS_IN_PROGRESS,
JOB_STATUS_PENDING,
KEY_TYPE_NORMAL,
LETTER_TYPE,
NOTIFICATION_CREATED,
NOTIFICATION_DELIVERED,
NOTIFICATION_SENDING,
NOTIFICATION_TEMPORARY_FAILURE,
NOTIFICATION_TECHNICAL_FAILURE,
NOTIFICATION_RETURNED_LETTER,
SMS_TYPE,
DailySortedLetter,
)
from app.notifications.process_notifications import persist_notification
from app.service.utils import service_allowed_to_send_to
@notify_celery.task(name="process-job")
@statsd(namespace="tasks")
def process_job(job_id, sender_id=None):
start = datetime.utcnow()
job = dao_get_job_by_id(job_id)
if job.job_status != JOB_STATUS_PENDING:
return
service = job.service
if not service.active:
job.job_status = JOB_STATUS_CANCELLED
dao_update_job(job)
current_app.logger.warning(
"Job {} has been cancelled, service {} is inactive".format(job_id, service.id))
return
if __sending_limits_for_job_exceeded(service, job, job_id):
return
job.job_status = JOB_STATUS_IN_PROGRESS
job.processing_started = start
dao_update_job(job)
db_template = dao_get_template_by_id(job.template_id, job.template_version)
TemplateClass = get_template_class(db_template.template_type)
template = TemplateClass(db_template.__dict__)
current_app.logger.debug("Starting job {} processing {} notifications".format(job_id, job.notification_count))
for row in RecipientCSV(
s3.get_job_from_s3(str(service.id), str(job_id)),
template_type=template.template_type,
placeholders=template.placeholders
).get_rows():
process_row(row, template, job, service, sender_id=sender_id)
job_complete(job, start=start)
def job_complete(job, resumed=False, start=None):
job.job_status = JOB_STATUS_FINISHED
finished = datetime.utcnow()
job.processing_finished = finished
dao_update_job(job)
if resumed:
current_app.logger.info(
"Resumed Job {} completed at {}".format(job.id, job.created_at)
)
else:
current_app.logger.info(
"Job {} created at {} started at {} finished at {}".format(job.id, job.created_at, start, finished)
)
def process_row(row, template, job, service, sender_id=None):
template_type = template.template_type
encrypted = encryption.encrypt({
'template': str(template.id),
'template_version': job.template_version,
'job': str(job.id),
'to': row.recipient,
'row_number': row.index,
'personalisation': dict(row.personalisation)
})
send_fns = {
SMS_TYPE: save_sms,
EMAIL_TYPE: save_email,
LETTER_TYPE: save_letter
}
send_fn = send_fns[template_type]
task_kwargs = {}
if sender_id:
task_kwargs['sender_id'] = sender_id
send_fn.apply_async(
(
str(service.id),
create_uuid(),
encrypted,
),
task_kwargs,
queue=QueueNames.DATABASE if not service.research_mode else QueueNames.RESEARCH_MODE
)
def __sending_limits_for_job_exceeded(service, job, job_id):
total_sent = fetch_todays_total_message_count(service.id)
if total_sent + job.notification_count > service.message_limit:
job.job_status = 'sending limits exceeded'
job.processing_finished = datetime.utcnow()
dao_update_job(job)
current_app.logger.info(
"Job {} size {} error. Sending limits {} exceeded".format(
job_id, job.notification_count, service.message_limit)
)
return True
return False
@notify_celery.task(bind=True, name="save-sms", max_retries=5, default_retry_delay=300)
@statsd(namespace="tasks")
def save_sms(self,
service_id,
notification_id,
encrypted_notification,
sender_id=None):
notification = encryption.decrypt(encrypted_notification)
service = dao_fetch_service_by_id(service_id)
template = dao_get_template_by_id(notification['template'], version=notification['template_version'])
if sender_id:
reply_to_text = dao_get_service_sms_sender_by_id(service_id, sender_id).sms_sender
else:
reply_to_text = template.get_reply_to_text()
if not service_allowed_to_send_to(notification['to'], service, KEY_TYPE_NORMAL):
current_app.logger.debug(
"SMS {} failed as restricted service".format(notification_id)
)
return
try:
saved_notification = persist_notification(
template_id=notification['template'],
template_version=notification['template_version'],
recipient=notification['to'],
service=service,
personalisation=notification.get('personalisation'),
notification_type=SMS_TYPE,
api_key_id=None,
key_type=KEY_TYPE_NORMAL,
created_at=datetime.utcnow(),
job_id=notification.get('job', None),
job_row_number=notification.get('row_number', None),
notification_id=notification_id,
reply_to_text=reply_to_text
)
sms_sender = dao_get_sms_sender_by_service_id_and_number(notification.get(service_id),
notification.get(reply_to_text))
if is_feature_enabled(FeatureFlag.SMS_SENDER_RATE_LIMIT_ENABLED) and sms_sender and sms_sender.rate_limit:
provider_tasks.deliver_sms_with_rate_limiting.apply_async(
[str(saved_notification.id)],
queue=QueueNames.SEND_SMS if not service.research_mode else QueueNames.RESEARCH_MODE
)
else:
provider_tasks.deliver_sms.apply_async(
[str(saved_notification.id)],
queue=QueueNames.SEND_SMS if not service.research_mode else QueueNames.RESEARCH_MODE
)
current_app.logger.debug(
"SMS {} created at {} for job {}".format(
saved_notification.id,
saved_notification.created_at,
notification.get('job', None))
)
except SQLAlchemyError as e:
handle_exception(self, notification, notification_id, e)
@notify_celery.task(bind=True, name="save-email", max_retries=5, default_retry_delay=300)
@statsd(namespace="tasks")
def save_email(self,
service_id,
notification_id,
encrypted_notification,
sender_id=None):
notification = encryption.decrypt(encrypted_notification)
service = dao_fetch_service_by_id(service_id)
template = dao_get_template_by_id(notification['template'], version=notification['template_version'])
if sender_id:
reply_to_text = dao_get_reply_to_by_id(service_id, sender_id).email_address
else:
reply_to_text = template.get_reply_to_text()
if not service_allowed_to_send_to(notification['to'], service, KEY_TYPE_NORMAL):
current_app.logger.info("Email {} failed as restricted service".format(notification_id))
return
try:
saved_notification = persist_notification(
template_id=notification['template'],
template_version=notification['template_version'],
recipient=notification['to'],
service=service,
personalisation=notification.get('personalisation'),
notification_type=EMAIL_TYPE,
api_key_id=None,
key_type=KEY_TYPE_NORMAL,
created_at=datetime.utcnow(),
job_id=notification.get('job', None),
job_row_number=notification.get('row_number', None),
notification_id=notification_id,
reply_to_text=reply_to_text
)
provider_tasks.deliver_email.apply_async(
[str(saved_notification.id)],
queue=QueueNames.SEND_EMAIL if not service.research_mode else QueueNames.RESEARCH_MODE
)
current_app.logger.debug("Email {} created at {}".format(saved_notification.id, saved_notification.created_at))
except SQLAlchemyError as e:
handle_exception(self, notification, notification_id, e)
@notify_celery.task(bind=True, name="save-letter", max_retries=5, default_retry_delay=300)
@statsd(namespace="tasks")
def save_letter(
self,
service_id,
notification_id,
encrypted_notification,
):
notification = encryption.decrypt(encrypted_notification)
# we store the recipient as just the first item of the person's address
recipient = notification['personalisation']['addressline1']
service = dao_fetch_service_by_id(service_id)
template = dao_get_template_by_id(notification['template'], version=notification['template_version'])
try:
# if we don't want to actually send the letter, then start it off in SENDING so we don't pick it up
status = NOTIFICATION_CREATED if not service.research_mode else NOTIFICATION_SENDING
saved_notification = persist_notification(
template_id=notification['template'],
template_version=notification['template_version'],
template_postage=template.postage,
recipient=recipient,
service=service,
personalisation=notification['personalisation'],
notification_type=LETTER_TYPE,
api_key_id=None,
key_type=KEY_TYPE_NORMAL,
created_at=datetime.utcnow(),
job_id=notification['job'],
job_row_number=notification['row_number'],
notification_id=notification_id,
reference=create_random_identifier(),
reply_to_text=template.get_reply_to_text(),
status=status
)
if not service.research_mode:
letters_pdf_tasks.create_letters_pdf.apply_async(
[str(saved_notification.id)],
queue=QueueNames.CREATE_LETTERS_PDF
)
elif current_app.config['NOTIFY_ENVIRONMENT'] in ['preview', 'development']:
research_mode_tasks.create_fake_letter_response_file.apply_async(
(saved_notification.reference,),
queue=QueueNames.RESEARCH_MODE
)
else:
update_notification_status_by_reference(saved_notification.reference, 'delivered')
current_app.logger.debug("Letter {} created at {}".format(saved_notification.id, saved_notification.created_at))
except SQLAlchemyError as e:
handle_exception(self, notification, notification_id, e)
@notify_celery.task(bind=True, name='update-letter-notifications-to-sent')
@statsd(namespace="tasks")
def update_letter_notifications_to_sent_to_dvla(self, notification_references):
# This task will be called by the FTP app to update notifications as sent to DVLA
provider = get_current_provider(LETTER_TYPE)
updated_count, _ = dao_update_notifications_by_reference(
notification_references,
{
'status': NOTIFICATION_SENDING,
'sent_by': provider.identifier,
'sent_at': datetime.utcnow(),
'updated_at': datetime.utcnow()
}
)
current_app.logger.info("Updated {} letter notifications to sending".format(updated_count))
@notify_celery.task(bind=True, name='update-letter-notifications-to-error')
@statsd(namespace="tasks")
def update_letter_notifications_to_error(self, notification_references):
# This task will be called by the FTP app to update notifications as sent to DVLA
updated_count, _ = dao_update_notifications_by_reference(
notification_references,
{
'status': NOTIFICATION_TECHNICAL_FAILURE,
'updated_at': datetime.utcnow()
}
)
message = "Updated {} letter notifications to technical-failure with references {}".format(
updated_count, notification_references
)
raise NotificationTechnicalFailureException(message)
def handle_exception(task, notification, notification_id, exc):
if not get_notification_by_id(notification_id):
retry_msg = '{task} notification for job {job} row number {row} and notification id {noti}'.format(
task=task.__name__,
job=notification.get('job', None),
row=notification.get('row_number', None),
noti=notification_id
)
# Sometimes, SQS plays the same message twice. We should be able to catch an IntegrityError, but it seems
# SQLAlchemy is throwing a FlushError. So we check if the notification id already exists then do not
# send to the retry queue.
current_app.logger.exception('Retry' + retry_msg)
try:
task.retry(queue=QueueNames.RETRY, exc=exc)
except task.MaxRetriesExceededError:
current_app.logger.error('Max retry failed' + retry_msg)
def get_template_class(template_type):
if template_type == SMS_TYPE:
return SMSMessageTemplate
elif template_type in (EMAIL_TYPE, LETTER_TYPE):
# since we don't need rendering capabilities (we only need to extract placeholders) both email and letter can
# use the same base template
return WithSubjectTemplate
@notify_celery.task(bind=True, name='update-letter-notifications-statuses')
@statsd(namespace="tasks")
def update_letter_notifications_statuses(self, filename):
notification_updates = parse_dvla_file(filename)
temporary_failures = []
for update in notification_updates:
check_billable_units(update)
update_letter_notification(filename, temporary_failures, update)
if temporary_failures:
# This will alert Notify that DVLA was unable to deliver the letters, we need to investigate
message = "DVLA response file: {filename} has failed letters with notification.reference {failures}" \
.format(filename=filename, failures=temporary_failures)
raise DVLAException(message)
@notify_celery.task(bind=True, name="record-daily-sorted-counts")
@statsd(namespace="tasks")
def record_daily_sorted_counts(self, filename):
sorted_letter_counts = defaultdict(int)
notification_updates = parse_dvla_file(filename)
for update in notification_updates:
sorted_letter_counts[update.cost_threshold.lower()] += 1
unknown_status = sorted_letter_counts.keys() - {'unsorted', 'sorted'}
if unknown_status:
message = 'DVLA response file: {} contains unknown Sorted status {}'.format(
filename, unknown_status.__repr__()
)
raise DVLAException(message)
billing_date = get_billing_date_in_est_from_filename(filename)
persist_daily_sorted_letter_counts(day=billing_date,
file_name=filename,
sorted_letter_counts=sorted_letter_counts)
def parse_dvla_file(filename):
bucket_location = '{}-ftp'.format(current_app.config['NOTIFY_EMAIL_FROM_DOMAIN'])
response_file_content = s3.get_s3_file(bucket_location, filename)
try:
return process_updates_from_file(response_file_content)
except TypeError:
raise DVLAException('DVLA response file: {} has an invalid format'.format(filename))
def get_billing_date_in_est_from_filename(filename):
# exclude seconds from the date since we don't need it. We got a date ending in 60 second - which is not valid.
datetime_string = filename.split('-')[1][:-2]
datetime_obj = datetime.strptime(datetime_string, '%Y%m%d%H%M')
return convert_utc_to_local_timezone(datetime_obj).date()
def persist_daily_sorted_letter_counts(day, file_name, sorted_letter_counts):
daily_letter_count = DailySortedLetter(
billing_day=day,
file_name=file_name,
unsorted_count=sorted_letter_counts['unsorted'],
sorted_count=sorted_letter_counts['sorted']
)
dao_create_or_update_daily_sorted_letter(daily_letter_count)
def process_updates_from_file(response_file):
NotificationUpdate = namedtuple('NotificationUpdate', ['reference', 'status', 'page_count', 'cost_threshold'])
notification_updates = [NotificationUpdate(*line.split('|')) for line in response_file.splitlines()]
return notification_updates
def update_letter_notification(filename, temporary_failures, update):
if update.status == DVLA_RESPONSE_STATUS_SENT:
status = NOTIFICATION_DELIVERED
else:
status = NOTIFICATION_TEMPORARY_FAILURE
temporary_failures.append(update.reference)
updated_count, _ = dao_update_notifications_by_reference(
references=[update.reference],
update_dict={"status": status,
"updated_at": datetime.utcnow()
}
)
if not updated_count:
msg = "Update letter notification file {filename} failed: notification either not found " \
"or already updated from delivered. Status {status} for notification reference {reference}".format(
filename=filename, status=status, reference=update.reference)
current_app.logger.info(msg)
def check_billable_units(notification_update):
notification = dao_get_notification_history_by_reference(notification_update.reference)
if int(notification_update.page_count) != notification.billable_units:
msg = 'Notification with id {} has {} billable_units but DVLA says page count is {}'.format(
notification.id, notification.billable_units, notification_update.page_count)
try:
raise DVLAException(msg)
except DVLAException:
current_app.logger.exception(msg)
@notify_celery.task(name='process-incomplete-jobs')
@statsd(namespace="tasks")
def process_incomplete_jobs(job_ids):
jobs = [dao_get_job_by_id(job_id) for job_id in job_ids]
# reset the processing start time so that the check_job_status scheduled task doesn't pick this job up again
for job in jobs:
job.job_status = JOB_STATUS_IN_PROGRESS
job.processing_started = datetime.utcnow()
dao_update_job(job)
current_app.logger.info("Resuming Job(s) {}".format(job_ids))
for job_id in job_ids:
process_incomplete_job(job_id)
def process_incomplete_job(job_id):
job = dao_get_job_by_id(job_id)
last_notification_added = dao_get_last_notification_added_for_job_id(job_id)
if last_notification_added:
resume_from_row = last_notification_added.job_row_number
else:
resume_from_row = -1 # The first row in the csv with a number is row 0
current_app.logger.info("Resuming job {} from row {}".format(job_id, resume_from_row))
db_template = dao_get_template_by_id(job.template_id, job.template_version)
TemplateClass = get_template_class(db_template.template_type)
template = TemplateClass(db_template.__dict__)
for row in RecipientCSV(
s3.get_job_from_s3(str(job.service_id), str(job.id)),
template_type=template.template_type,
placeholders=template.placeholders
).get_rows():
if row.index > resume_from_row:
process_row(row, template, job, job.service)
job_complete(job, resumed=True)
@notify_celery.task(name='process-returned-letters-list')
@statsd(namespace="tasks")
def process_returned_letters_list(notification_references):
updated, updated_history = dao_update_notifications_by_reference(
notification_references,
{"status": NOTIFICATION_RETURNED_LETTER}
)
current_app.logger.info(
"Updated {} letter notifications ({} history notifications, from {} references) to returned-letter".format(
updated, updated_history, len(notification_references)
)
)
| 37.933099 | 120 | 0.704864 |
4a25360bc3bd82a7b4326f53dc1d57738cd802b1 | 7,339 | py | Python | research/delf/delf/python/training/model/export_global_model.py | jkreddy123/models | 30b6962c4772d7d7b24a68d4477979fa88a713a5 | [
"Apache-2.0"
] | null | null | null | research/delf/delf/python/training/model/export_global_model.py | jkreddy123/models | 30b6962c4772d7d7b24a68d4477979fa88a713a5 | [
"Apache-2.0"
] | null | null | null | research/delf/delf/python/training/model/export_global_model.py | jkreddy123/models | 30b6962c4772d7d7b24a68d4477979fa88a713a5 | [
"Apache-2.0"
] | null | null | null | # Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Export global feature tensorflow inference model.
The exported model may leverage image pyramids for multi-scale processing.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
import tensorflow as tf
from delf.python.training.model import delf_model
from delf.python.training.model import delg_model
from delf.python.training.model import export_model_utils
FLAGS = flags.FLAGS
flags.DEFINE_string('ckpt_path', '/tmp/delf-logdir/delf-weights',
'Path to saved checkpoint.')
flags.DEFINE_string('export_path', None, 'Path where model will be exported.')
flags.DEFINE_list(
'input_scales_list', None,
'Optional input image scales to use. If None (default), an input end-point '
'"input_scales" is added for the exported model. If not None, the '
'specified list of floats will be hard-coded as the desired input scales.')
flags.DEFINE_enum(
'multi_scale_pool_type', 'None', ['None', 'average', 'sum'],
"If 'None' (default), the model is exported with an output end-point "
"'global_descriptors', where the global descriptor for each scale is "
"returned separately. If not 'None', the global descriptor of each scale is"
' pooled and a 1D global descriptor is returned, with output end-point '
"'global_descriptor'.")
flags.DEFINE_boolean('normalize_global_descriptor', False,
'If True, L2-normalizes global descriptor.')
flags.DEFINE_boolean('delg_global_features', False,
'Whether the model uses a DELG-like global feature head.')
flags.DEFINE_float(
'delg_gem_power', 3.0,
'Power for Generalized Mean pooling. Used only if --delg_global_features'
'is present.')
flags.DEFINE_integer(
'delg_embedding_layer_dim', 2048,
'Size of the FC whitening layer (embedding layer). Used only if'
'--delg_global_features is present.')
class _ExtractModule(tf.Module):
"""Helper module to build and save global feature model."""
def __init__(self,
multi_scale_pool_type='None',
normalize_global_descriptor=False,
input_scales_tensor=None,
delg_global_features=False,
delg_gem_power=3.0,
delg_embedding_layer_dim=2048):
"""Initialization of global feature model.
Args:
multi_scale_pool_type: Type of multi-scale pooling to perform.
normalize_global_descriptor: Whether to L2-normalize global descriptor.
input_scales_tensor: If None, the exported function to be used should be
ExtractFeatures, where an input end-point "input_scales" is added for
the exported model. If not None, the specified 1D tensor of floats will
be hard-coded as the desired input scales, in conjunction with
ExtractFeaturesFixedScales.
delg_global_features: Whether the model uses a DELG-like global feature
head.
delg_gem_power: Power for Generalized Mean pooling in the DELG model. Used
only if 'delg_global_features' is True.
delg_embedding_layer_dim: Size of the FC whitening layer (embedding
layer). Used only if 'delg_global_features' is True.
"""
self._multi_scale_pool_type = multi_scale_pool_type
self._normalize_global_descriptor = normalize_global_descriptor
if input_scales_tensor is None:
self._input_scales_tensor = []
else:
self._input_scales_tensor = input_scales_tensor
# Setup the DELF model for extraction.
if delg_global_features:
self._model = delg_model.Delg(
block3_strides=False,
name='DELG',
gem_power=delg_gem_power,
embedding_layer_dim=delg_embedding_layer_dim)
else:
self._model = delf_model.Delf(block3_strides=False, name='DELF')
def LoadWeights(self, checkpoint_path):
self._model.load_weights(checkpoint_path)
@tf.function(input_signature=[
tf.TensorSpec(shape=[None, None, 3], dtype=tf.uint8, name='input_image'),
tf.TensorSpec(shape=[None], dtype=tf.float32, name='input_scales'),
tf.TensorSpec(
shape=[None], dtype=tf.int32, name='input_global_scales_ind')
])
def ExtractFeatures(self, input_image, input_scales, input_global_scales_ind):
extracted_features = export_model_utils.ExtractGlobalFeatures(
input_image,
input_scales,
input_global_scales_ind,
lambda x: self._model.backbone.build_call(x, training=False),
multi_scale_pool_type=self._multi_scale_pool_type,
normalize_global_descriptor=self._normalize_global_descriptor)
print("inside ExtractFeatures called")
named_output_tensors = {}
if self._multi_scale_pool_type == 'None':
named_output_tensors['global_descriptors'] = tf.identity(
extracted_features, name='global_descriptors')
else:
named_output_tensors['global_descriptor'] = tf.identity(
extracted_features, name='global_descriptor')
return named_output_tensors
@tf.function(input_signature=[
tf.TensorSpec(shape=[None, None, 3], dtype=tf.uint8, name='input_image')
])
def ExtractFeaturesFixedScales(self, input_image):
return self.ExtractFeatures(input_image, self._input_scales_tensor,
tf.range(tf.size(self._input_scales_tensor)))
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
export_path = FLAGS.export_path
if os.path.exists(export_path):
raise ValueError('export_path %s already exists.' % export_path)
if FLAGS.input_scales_list is None:
input_scales_tensor = None
else:
input_scales_tensor = tf.constant(
[float(s) for s in FLAGS.input_scales_list],
dtype=tf.float32,
shape=[len(FLAGS.input_scales_list)],
name='input_scales')
module = _ExtractModule(FLAGS.multi_scale_pool_type,
FLAGS.normalize_global_descriptor,
input_scales_tensor, FLAGS.delg_global_features,
FLAGS.delg_gem_power, FLAGS.delg_embedding_layer_dim)
# Save the module
if FLAGS.input_scales_list is None:
served_function = module.ExtractFeatures
else:
served_function = module.ExtractFeaturesFixedScales
# Load the weights.
checkpoint_path = FLAGS.ckpt_path
module.LoadWeights(checkpoint_path)
print('Checkpoint loaded from ', checkpoint_path)
tf.saved_model.save(
module, export_path, signatures={'serving_default': served_function})
if __name__ == '__main__':
app.run(main)
| 39.67027 | 80 | 0.710315 |
4a25373bbb45aded58d37b3ce2e9b4130ef30b75 | 1,680 | py | Python | scmoib/plotting/_silhouette.py | colomemaria/scmoib-package | 2e707502463a8a5fee62459e26570bee456334f4 | [
"MIT"
] | null | null | null | scmoib/plotting/_silhouette.py | colomemaria/scmoib-package | 2e707502463a8a5fee62459e26570bee456334f4 | [
"MIT"
] | null | null | null | scmoib/plotting/_silhouette.py | colomemaria/scmoib-package | 2e707502463a8a5fee62459e26570bee456334f4 | [
"MIT"
] | null | null | null | import episcanpy as epi
from anndata import AnnData
from typing import Union
# TODO remove extra parameters
def silhouette(
adata: AnnData,
cluster_annot: str,
value: str = 'X_pca',
metric: str = 'euclidean',
save: Union[str, None] = None,
palette: object = None,
key: object = None,
xlabel: object = None,
ylabel: object = None,
title: Union[str, None] = None,
size: str = 'large',
alternative_plot: bool = False,
name_cluster: bool = True,
name_cluster_pos: str = 'left'
) -> None:
"""
Parameters
----------
adata
Annotated data matrix.
cluster_annot
obs variable containing cluster annotation.
value
Obsm variable.
metric
Metric type.
save
Save the plot.
palette
key
xlabel
ylabel
title
size
alternative_plot
name_cluster
name_cluster_pos
"""
epi.tl.silhouette(adata,
cluster_annot,
value=value,
metric=metric)
epi.pl.silhouette(adata,
cluster_annot,
value=value,
metric=metric,
key=key,
xlabel=xlabel,
ylabel=ylabel,
title=title,
size=size,
alternative_plot=alternative_plot,
name_cluster=name_cluster,
name_cluster_pos=name_cluster_pos,
palette=palette,
save=save,
)
| 25.846154 | 56 | 0.491071 |
4a2537dd6156b884f52989e67ce5afd52ade7758 | 3,534 | py | Python | Tuition_Management_Software/Registration_Section/Analysis.py | Sam-Varghese/Tuition-Management-Software | 858b9b8832418ddc3f9cde29d89fd0c917614d17 | [
"MIT"
] | 1 | 2021-04-07T15:09:41.000Z | 2021-04-07T15:09:41.000Z | Tuition_Management_Software/Registration_Section/Analysis.py | Sam-Varghese/Tuition-Management-Software | 858b9b8832418ddc3f9cde29d89fd0c917614d17 | [
"MIT"
] | null | null | null | Tuition_Management_Software/Registration_Section/Analysis.py | Sam-Varghese/Tuition-Management-Software | 858b9b8832418ddc3f9cde29d89fd0c917614d17 | [
"MIT"
] | null | null | null | # Python file containing program to analyse registration records
# Importing necessary libraries
from Classes import *
from tkinter import *
from tkinter import ttk
import threading
import matplotlib.pyplot as plt
import time
import pandas as pd
lock = threading.Lock()
print('Importing necessary libraries for records button...')
table = pd.read_excel("Students_Records.xlsx")
def imports():
global Calendar, DateEntry, datetime, pd, plt, string, os, pywhatkit
from tkcalendar import Calendar, DateEntry
print('Calendar, DateEntry imported')
from datetime import datetime
print('Datetime imported')
import pandas as pd
print('Pandas imported')
import matplotlib.pyplot as plt
print('Matplotlib imported')
import string
print('String imported')
import os
print('OS imported')
import pywhatkit
print('Pywhatkit imported')
threading.Thread(target=imports).start()
def registration_analysis():
# Preparing window for analysis
print('Preparing window for registration analysis...')
reg_anal_win = Tk()
reg_anal_win_gui = window(reg_anal_win, 'Registration Analysis')
reg_anal_win_lf1 = LabelFrame(
reg_anal_win, text='Analysis Section', relief='groove', bd=10)
reg_anal_win_lf1.grid(row=1, column=0, columnspan=3, padx=10, pady=10)
# Asking for the type of analysis user wants
# Monthly analysis button program
def reg_anal_b1_func():
table['Joining Date']=pd.to_datetime(table['Joining Date'])
final_table=table.groupby(table['Joining Date'].dt.strftime('%B %Y'))
dic={}
for i in final_table:
dic[i[0]]=len(i[1].index)
plt.barh(list(dic.keys()),list(dic.values()))
plt.ylabel('Months')
plt.xlabel('Students Count')
plt.title('Monthly Analysis')
plt.gcf().canvas.set_window_title('Monthly Analysis')
plt.show()
reg_anal_b1 = ttk.Button(
reg_anal_win_lf1, text='Monthly Analysis', command=reg_anal_b1_func)
reg_anal_b1.grid(row=0, column=0, padx=10, pady=10)
# Classwise analysis button program
def reg_anal_b2_func():
unique_class=table.Class.unique()
stu_count=[]
for i in unique_class:
stu_count.append(len(table[table['Class']==i].index))
plt.barh(unique_class, stu_count)
plt.ylabel('Classes')
plt.xlabel('Strength')
plt.title('Classwise Analysis')
plt.gcf().canvas.set_window_title('Class Analysis')
plt.show()
print(list(unique_class), stu_count)
reg_anal_b2 = ttk.Button(
reg_anal_win_lf1, text='Classwise Analysis', command=reg_anal_b2_func)
reg_anal_b2.grid(row=0, column=1, padx=10, pady=10)
# Genderwise analysis button program
def reg_anal_b3_func():
graph=plt.bar(['Males', 'Females'], [len(table[table['Gender'] == 'Male'].index), len(
table[table['Gender'] == 'Female'].index)])
plt.title('Gender Analysis')
plt.xlabel('Gender')
plt.ylabel('Students Count')
plt.gcf().canvas.set_window_title('Gender Analysis')
graph[0].set_color('red')
graph[1].set_color('pink')
plt.show()
reg_anal_b3 = ttk.Button(
reg_anal_win_lf1, text='Genderwise Analysis', command=reg_anal_b3_func)
reg_anal_b3.grid(row=1, column=0, padx=10, pady=10, columnspan=2)
reg_anal_win.mainloop()
| 29.949153 | 94 | 0.651104 |
4a25383f159f7b8f9ec4fef6778b5eeeea8ce233 | 9,297 | py | Python | sysinv/cgts-client/cgts-client/cgtsclient/tests/v1/test_kube_upgrade_shell.py | albailey/config | 40ebe63d7dfc6a0a03216ebe55ed3ec9cf5410b9 | [
"Apache-2.0"
] | 10 | 2020-02-07T18:57:44.000Z | 2021-09-11T10:29:34.000Z | sysinv/cgts-client/cgts-client/cgtsclient/tests/v1/test_kube_upgrade_shell.py | albailey/config | 40ebe63d7dfc6a0a03216ebe55ed3ec9cf5410b9 | [
"Apache-2.0"
] | 1 | 2021-01-14T12:01:55.000Z | 2021-01-14T12:01:55.000Z | sysinv/cgts-client/cgts-client/cgtsclient/tests/v1/test_kube_upgrade_shell.py | albailey/config | 40ebe63d7dfc6a0a03216ebe55ed3ec9cf5410b9 | [
"Apache-2.0"
] | 10 | 2020-10-13T08:37:46.000Z | 2022-02-09T00:21:25.000Z | #
# Copyright (c) 2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import mock
from cgtsclient.tests import test_shell
from cgtsclient.v1.kube_upgrade import KubeUpgrade
class KubeUpgradeTest(test_shell.ShellTest):
def setUp(self):
super(KubeUpgradeTest, self).setUp()
def tearDown(self):
super(KubeUpgradeTest, self).tearDown()
@mock.patch('cgtsclient.v1.kube_upgrade.KubeUpgradeManager.list')
@mock.patch('cgtsclient.client._get_ksclient')
@mock.patch('cgtsclient.client._get_endpoint')
def test_kube_upgrade_show(self, mock_get_endpoint, mock_get_client,
mock_list):
mock_get_endpoint.return_value = 'http://fakelocalhost:6385/v1'
fake_kube_upgrade = {'from_version': 'v1.42.1',
'to_version': 'v1.42.2',
'state': 'upgrade-started',
'uuid': 'cb737aba-1820-4184-b0dc-9b073822af48',
'created_at': 'fake-created-time',
'updated_at': 'fake-updated-time',
}
fake_kube_upgrade_list = [KubeUpgrade(None, fake_kube_upgrade, True)]
mock_list.return_value = fake_kube_upgrade_list
self.make_env()
results = self.shell("kube-upgrade-show")
self.assertIn(fake_kube_upgrade['from_version'], results)
self.assertIn(fake_kube_upgrade['to_version'], results)
self.assertIn(fake_kube_upgrade['state'], results)
self.assertIn(fake_kube_upgrade['uuid'], results)
self.assertIn(fake_kube_upgrade['created_at'], results)
self.assertIn(fake_kube_upgrade['updated_at'], results)
@mock.patch('cgtsclient.v1.kube_upgrade.KubeUpgradeManager.create')
@mock.patch('cgtsclient.v1.kube_upgrade.KubeUpgradeManager.get')
@mock.patch('cgtsclient.client._get_ksclient')
@mock.patch('cgtsclient.client._get_endpoint')
def test_kube_upgrade_start(self, mock_get_endpoint, mock_get_client,
mock_get, mock_create):
mock_get_endpoint.return_value = 'http://fakelocalhost:6385/v1'
fake_kube_upgrade = {'from_version': 'v1.42.1',
'to_version': 'v1.42.2',
'state': 'upgrade-started',
'uuid': 'cb737aba-1820-4184-b0dc-9b073822af48',
'created_at': 'fake-created-time',
'updated_at': 'fake-updated-time',
}
mock_create.return_value = KubeUpgrade(None, fake_kube_upgrade, True)
mock_get.return_value = KubeUpgrade(None, fake_kube_upgrade, True)
self.make_env()
results = self.shell("kube-upgrade-start %s" %
fake_kube_upgrade['to_version'])
self.assertIn(fake_kube_upgrade['from_version'], results)
self.assertIn(fake_kube_upgrade['to_version'], results)
self.assertIn(fake_kube_upgrade['state'], results)
self.assertIn(fake_kube_upgrade['uuid'], results)
self.assertIn(fake_kube_upgrade['created_at'], results)
self.assertIn(fake_kube_upgrade['updated_at'], results)
@mock.patch('cgtsclient.v1.kube_upgrade.KubeUpgradeManager.create')
@mock.patch('cgtsclient.v1.kube_upgrade.KubeUpgradeManager.get')
@mock.patch('cgtsclient.client._get_ksclient')
@mock.patch('cgtsclient.client._get_endpoint')
def test_kube_upgrade_start_force(self, mock_get_endpoint, mock_get_client,
mock_get, mock_create):
mock_get_endpoint.return_value = 'http://fakelocalhost:6385/v1'
fake_kube_upgrade = {'from_version': 'v1.42.1',
'to_version': 'v1.42.2',
'state': 'upgrade-started',
'uuid': 'cb737aba-1820-4184-b0dc-9b073822af48',
'created_at': 'fake-created-time',
'updated_at': 'fake-updated-time',
}
mock_create.return_value = KubeUpgrade(None, fake_kube_upgrade, True)
mock_get.return_value = KubeUpgrade(None, fake_kube_upgrade, True)
self.make_env()
results = self.shell("kube-upgrade-start %s --force" %
fake_kube_upgrade['to_version'])
self.assertIn(fake_kube_upgrade['from_version'], results)
self.assertIn(fake_kube_upgrade['to_version'], results)
self.assertIn(fake_kube_upgrade['state'], results)
self.assertIn(fake_kube_upgrade['uuid'], results)
self.assertIn(fake_kube_upgrade['created_at'], results)
self.assertIn(fake_kube_upgrade['updated_at'], results)
@mock.patch('cgtsclient.v1.kube_upgrade.KubeUpgradeManager.update')
@mock.patch('cgtsclient.client._get_ksclient')
@mock.patch('cgtsclient.client._get_endpoint')
def test_kube_upgrade_download_images(self, mock_get_endpoint,
mock_get_client,
mock_update):
mock_get_endpoint.return_value = 'http://fakelocalhost:6385/v1'
fake_kube_upgrade = {'from_version': 'v1.42.1',
'to_version': 'v1.42.2',
'state': 'downloading-images',
'uuid': 'cb737aba-1820-4184-b0dc-9b073822af48',
'created_at': 'fake-created-time',
'updated_at': 'fake-updated-time',
}
mock_update.return_value = KubeUpgrade(None, fake_kube_upgrade, True)
self.make_env()
results = self.shell("kube-upgrade-download-images")
self.assertIn(fake_kube_upgrade['from_version'], results)
self.assertIn(fake_kube_upgrade['to_version'], results)
self.assertIn(fake_kube_upgrade['state'], results)
self.assertIn(fake_kube_upgrade['uuid'], results)
self.assertIn(fake_kube_upgrade['created_at'], results)
self.assertIn(fake_kube_upgrade['updated_at'], results)
@mock.patch('cgtsclient.v1.kube_upgrade.KubeUpgradeManager.update')
@mock.patch('cgtsclient.client._get_ksclient')
@mock.patch('cgtsclient.client._get_endpoint')
def test_kube_upgrade_networking(self, mock_get_endpoint, mock_get_client,
mock_update):
mock_get_endpoint.return_value = 'http://fakelocalhost:6385/v1'
fake_kube_upgrade = {'from_version': 'v1.42.1',
'to_version': 'v1.42.2',
'state': 'upgrading-networking',
'uuid': 'cb737aba-1820-4184-b0dc-9b073822af48',
'created_at': 'fake-created-time',
'updated_at': 'fake-updated-time',
}
mock_update.return_value = KubeUpgrade(None, fake_kube_upgrade, True)
self.make_env()
results = self.shell("kube-upgrade-networking")
self.assertIn(fake_kube_upgrade['from_version'], results)
self.assertIn(fake_kube_upgrade['to_version'], results)
self.assertIn(fake_kube_upgrade['state'], results)
self.assertIn(fake_kube_upgrade['uuid'], results)
self.assertIn(fake_kube_upgrade['created_at'], results)
self.assertIn(fake_kube_upgrade['updated_at'], results)
@mock.patch('cgtsclient.v1.kube_upgrade.KubeUpgradeManager.update')
@mock.patch('cgtsclient.client._get_ksclient')
@mock.patch('cgtsclient.client._get_endpoint')
def test_kube_upgrade_complete(self, mock_get_endpoint, mock_get_client,
mock_update):
mock_get_endpoint.return_value = 'http://fakelocalhost:6385/v1'
fake_kube_upgrade = {'from_version': 'v1.42.1',
'to_version': 'v1.42.2',
'state': 'upgrade-complete',
'uuid': 'cb737aba-1820-4184-b0dc-9b073822af48',
'created_at': 'fake-created-time',
'updated_at': 'fake-updated-time',
}
mock_update.return_value = KubeUpgrade(None, fake_kube_upgrade, True)
self.make_env()
results = self.shell("kube-upgrade-complete")
self.assertIn(fake_kube_upgrade['from_version'], results)
self.assertIn(fake_kube_upgrade['to_version'], results)
self.assertIn(fake_kube_upgrade['state'], results)
self.assertIn(fake_kube_upgrade['uuid'], results)
self.assertIn(fake_kube_upgrade['created_at'], results)
self.assertIn(fake_kube_upgrade['updated_at'], results)
@mock.patch('cgtsclient.v1.kube_upgrade.KubeUpgradeManager.delete')
@mock.patch('cgtsclient.client._get_ksclient')
@mock.patch('cgtsclient.client._get_endpoint')
def test_kube_upgrade_delete(self, mock_get_endpoint, mock_get_client,
mock_delete):
mock_get_endpoint.return_value = 'http://fakelocalhost:6385/v1'
self.make_env()
results = self.shell("kube-upgrade-delete")
self.assertIn("Kubernetes upgrade deleted", results)
| 50.803279 | 79 | 0.613209 |
4a25384a96d55fb548fb11e7312eda0efa1fbbbc | 6,930 | py | Python | backend/still_timer_32077/settings.py | crowdbotics-apps/still-timer-32077 | 04d0d9d956a72f68830ac8e6609c12e4035223eb | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/still_timer_32077/settings.py | crowdbotics-apps/still-timer-32077 | 04d0d9d956a72f68830ac8e6609c12e4035223eb | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/still_timer_32077/settings.py | crowdbotics-apps/still-timer-32077 | 04d0d9d956a72f68830ac8e6609c12e4035223eb | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | """
Django settings for still_timer_32077 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
from modules.manifest import get_modules
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
'storages',
]
MODULES_APPS = get_modules()
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS + MODULES_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'still_timer_32077.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'web_build')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'still_timer_32077.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'), os.path.join(BASE_DIR, 'web_build/static')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY and
AWS_STORAGE_BUCKET_NAME and
AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| 29.615385 | 112 | 0.731025 |
4a253a020764e9667dcf56ec87b8615862689bb0 | 23,503 | py | Python | gpMgmt/bin/gppylib/gpcatalog.py | Tylarb/gpdb | 15e1341cfbac7f70d2086a9a1d46149a82765b5e | [
"PostgreSQL",
"Apache-2.0"
] | 1 | 2020-07-08T13:20:27.000Z | 2020-07-08T13:20:27.000Z | gpMgmt/bin/gppylib/gpcatalog.py | Tylarb/gpdb | 15e1341cfbac7f70d2086a9a1d46149a82765b5e | [
"PostgreSQL",
"Apache-2.0"
] | 6 | 2020-06-24T18:56:06.000Z | 2022-02-26T08:53:11.000Z | gpMgmt/bin/gppylib/gpcatalog.py | Tylarb/gpdb | 15e1341cfbac7f70d2086a9a1d46149a82765b5e | [
"PostgreSQL",
"Apache-2.0"
] | 1 | 2021-09-17T15:46:31.000Z | 2021-09-17T15:46:31.000Z | #!/usr/bin/env python
#
# Copyright (c) 2010-2011 EMC Corporation. All Rights Reserved
#
"""
gpcatalog.py
Contains two classes representing catalog metadata:
Catalog - a container class for CatalogTables
CatalogTable - metadata about a single tables
"""
# ============================================================================
import os
import json
from gppylib import gplog
from gppylib.gpversion import GpVersion
logger = gplog.get_default_logger()
class GPCatalogException(Exception):
pass
# Hard coded since "master only" is not defined in the catalog
MASTER_ONLY_TABLES = [
'gp_configuration_history',
'gp_segment_configuration',
'pg_auth_time_constraint',
'pg_description',
'pg_partition',
'pg_partition_encoding',
'pg_partition_rule',
'pg_shdescription',
'pg_stat_last_operation',
'pg_stat_last_shoperation',
'pg_statistic',
]
# Hard coded tables that have different values on every segment
SEGMENT_LOCAL_TABLES = [
'gp_fastsequence', # AO segment row id allocations
'gp_id',
'pg_shdepend', # (not if we fix oid inconsistencies)
'pg_statistic',
]
# These catalog tables either do not use pg_depend or does not create an
# entry in pg_depend immediately when an entry is created in that
# catalog table
DEPENDENCY_EXCLUSION = [
'pg_authid',
'pg_compression',
'pg_conversion',
'pg_database',
'pg_enum',
'pg_namespace',
'pg_partition',
'pg_partition_rule',
'pg_resgroup',
'pg_resgroupcapability',
'pg_resourcetype',
'pg_resqueue',
'pg_resqueuecapability',
'pg_tablespace'
]
# ============================================================================
class GPCatalog():
"""
Catalog is a container class that contains dictionary of CatalogTable
objects.
It provides the CatalogTables with a context that they can use to
refer to other CatalogTables (e.g. describe foreign keys) and it
provides calling code with a simple wrapper for what a known catalog
layout looks like.
It supports multiple source versions of the database. It issues a
warning if there are catalog tables defined in the database that
it is unaware of, usually indicating that it is operating against
an unknown version.
"""
# --------------------------------------------------------------------
# Public API functions:
# - Catalog() - Create a Catalog object
# - getCatalogTable() - Returns a single CatalogTable
# - getCatalogTables() - Returns a list of CatalogTable
# - getCatalogVersion() - Returns a GpVersion
# --------------------------------------------------------------------
def getCatalogTable(self, tablename):
"""
getCatalogTable(tablename) => Returns the specified CatalogTable
Raises: CatalogException when the table does not exist
"""
if tablename not in self._tables:
raise GPCatalogException("No such catalog table: %s" % str(tablename))
return self._tables[tablename]
def getCatalogTables(self):
"""
getCatalogTables() => Returns a list of CatalogTable
"""
return self._tables.values()
def getCatalogVersion(self):
"""
getCatalogVersion() => Returns the GpVersion object
"""
return self._version
# --------------------------------------------------------------------
# Private implementation functions:
# --------------------------------------------------------------------
def __init__(self, dbConnection):
"""
Catalog() constructor
1) Uses the supplied database connection to get a list of catalog tables
2) iterate through the list building up CatalogTable objects
3) Mark "master only" tables manually
4) Mark a couple primary keys manually
5) Mark foreign keys manually
6) Mark known catalog differences manually
7) Validate and return the Catalog object
"""
self._dbConnection = dbConnection
self._tables = {}
self._version = None
self._tidycat = {} # tidycat definitions from JSON file
version_query = """
SELECT version()
"""
catalog_query = """
SELECT relname, relisshared FROM pg_class
WHERE relnamespace=11 and relkind = 'r'
"""
# Read the catalog version from the database
try:
curs = self._query(version_query)
except Exception, e:
raise GPCatalogException("Error reading database version: " + str(e))
self._version = GpVersion(curs.getresult()[0][0])
# Read the list of catalog tables from the database
try:
curs = self._query(catalog_query)
except Exception, e:
raise GPCatalogException("Error reading catalog: " + str(e))
# Construct our internal representation of the catalog
for [relname, relisshared] in curs.getresult():
self._tables[relname] = GPCatalogTable(self, relname)
# Note: stupid API returns t/f for boolean value
self._tables[relname]._setShared(relisshared == 't')
# The tidycat.pl utility has been used to generate a json file
# describing aspects of the catalog that we can not currently
# interrogate from the catalog itself. This includes things
# like which tables are master only vs segment local and what
# the foreign key relationships are.
self._getJson()
# Which tables are "master only" is not derivable from the catalog
# so we have to set this manually.
self._markMasterOnlyTables()
# We derived primary keys for most of the catalogs based on un
# unique indexes, but we have to manually set a few stranglers
self._setPrimaryKeys()
# Foreign key relationships of the catalog tables are not actually
# defined in the catalog, so must be obtained from tidycat
self._setForeignKeys()
# Most catalog tables are now ready to go, but some columns can
# not be compared directly between segments, we need to indicate
# these exceptions manually.
self._setKnownDifferences()
# Finally validate that everything looks right, this will issue
# warnings if there are any regular catalog tables that do not
# have primary keys set.
self._validate()
def _query(self, qry):
"""
Simple wrapper around querying the database connection
"""
return self._dbConnection.query(qry)
def _markMasterOnlyTables(self):
"""
We mark two types of catalog tables as "master only"
- True "master only" tables
- Tables we know to have different contents on master/segment
While the later two are not technically "master only" they have
the property that we cannot validate cross segment consistency,
which makes them the same for our current purposes.
We may want to eventually move these other types of tables into
a different classification.
"""
for name in MASTER_ONLY_TABLES:
if name in self._tables:
self._tables[name]._setMasterOnly()
for name in SEGMENT_LOCAL_TABLES:
if name in self._tables:
self._tables[name]._setMasterOnly()
def _setPrimaryKeys(self):
"""
Most of the catalog primary keys are set automatically in
CatalogTable by looking at unique indexes over the catalogs.
However there are a couple of catalog tables that do not have
unique indexes that we still want to perform cross segment
consistency on, for them we have to manually set a primary key
"""
self._tables['gp_version_at_initdb']._setPrimaryKey(
"schemaversion productversion")
self._tables['pg_constraint']._setPrimaryKey(
"conname connamespace conrelid contypid")
self._tables['pg_depend']._setPrimaryKey(
"classid objid objsubid refclassid refobjid refobjsubid deptype")
if self._version >= "4.0":
self._tables['pg_resqueuecapability']._setPrimaryKey(
"resqueueid restypid")
def _getJson(self):
"""
Read the json file generated by tidycat which contains, among other
things, the primary key/foreign key relationships for the catalog
tables. Build the fkeys for each table and validate them against
the catalog.
"""
indir = os.path.dirname(__file__)
jname = str(self._version.getVersionRelease()) + ".json"
try:
# json doc in data subdirectory of pylib module
infil = open(os.path.join(indir, "data", jname), "r")
d = json.load(infil)
# remove the tidycat comment
if "__comment" in d:
del d["__comment"]
if "__info" in d:
del d["__info"]
infil.close()
self._tidycat = d
except Exception, e:
# older versions of product will not have tidycat defs --
# need to handle this case
logger.warn("GPCatalogTable: "+ str(e))
def _setForeignKeys(self):
"""
Setup the foreign key relationships amongst the catalogs. We
drive this based on the tidycat generate json file since this
information is not derivable from the catalog.
"""
try:
for tname, tdef in self._tidycat.iteritems():
if "foreign_keys" not in tdef:
continue
for fkdef in tdef["foreign_keys"]:
fk2 = GPCatalogTableForeignKey(tname,
fkdef[0],
fkdef[1],
fkdef[2])
self._tables[tname]._addForeignKey(fk2)
except Exception, e:
# older versions of product will not have tidycat defs --
# need to handle this case
logger.warn("GPCatalogTable: "+ str(e))
def _setKnownDifferences(self):
"""
Some catalogs have columns that, for one reason or another, we
need to mark as being different between the segments and the master.
These fall into two categories:
- Bugs (marked with the appropriate jiras)
- A small number of "special" columns
"""
# -------------
# Special cases
# -------------
# pg_class:
# - relfilenode should generally be consistent, but may not be (jira?)
# - relpages/reltuples/relfrozenxid/relminmxid are all vacumm/analyze related
# - relhasindex/relhaspkey are only cleared when vacuum completes
# - relowner has its own checks:
# => may want to separate out "owner" columns like acl and oid
self._tables['pg_class']._setKnownDifferences(
"relfilenode relpages reltuples relhasindex relhaspkey relowner relfrozenxid relminmxid relallvisible")
# pg_type: typowner has its own checks:
# => may want to separate out "owner" columns like acl and oid
self._tables['pg_type']._setKnownDifferences("typowner")
# pg_database: datfrozenxid and datminmxid are vacuum related
self._tables['pg_database']._setKnownDifferences("datfrozenxid datminmxid")
# -------------
# Issues still present in the product
# -------------
# MPP-11289 : inconsistent OIDS for table "default values"
self._tables['pg_attrdef']._setKnownDifferences("oid")
# MPP-11284 : inconsistent OIDS for constraints
self._tables['pg_constraint']._setKnownDifferences("oid")
# MPP-11282: Inconsistent oids for language callback functions
# MPP-12015: Inconsistent oids for operator communtator/negator functions
self._tables['pg_proc']._setKnownDifferences("oid prolang")
# MPP-11282: pg_language oids and callback functions
self._tables['pg_language']._setKnownDifferences("oid lanplcallfoid lanvalidator")
# MPP-12015: Inconsistent oids for operator communtator/negator functions
# MPP-12015: Inconsistent oids for operator sort/cmp operators
self._tables['pg_operator']._setKnownDifferences(
"oid oprcom oprnegate oprlsortop oprrsortop oprltcmpop oprgtcmpop")
self._tables['pg_aggregate']._setKnownDifferences("aggsortop")
# MPP-11281 : Inconsistent oids for views
self._tables['pg_rewrite']._setKnownDifferences("oid ev_action")
# MPP-11285 : Inconsistent oids for triggers
self._tables['pg_trigger']._setKnownDifferences("oid")
# MPP-11575 : Inconsistent handling of indpred for partial indexes
# indcheckxmin column related to HOT feature in pg_index is calculated
# independently for master and segment based on individual nodes
# transaction state, hence it can be different so skip it from checks.
self._tables['pg_index']._setKnownDifferences("indpred, indcheckxmin")
# This section should have exceptions for tables for which OIDs are not
# synchronized between master and segments, refer function
# RelationNeedsSynchronizedOIDs() in catalog.c
self._tables['pg_amop']._setKnownDifferences("oid, amopopr")
self._tables['pg_amproc']._setKnownDifferences("oid");
def _validate(self):
"""
Check that all tables defined in the catalog have either been marked
as "master only" or have a primary key
"""
for relname in sorted(self._tables):
if self._tables[relname].isMasterOnly():
continue
if self._tables[relname].getPrimaryKey() == []:
logger.warn("GPCatalogTable: unable to derive primary key for %s"
% str(relname))
# ============================================================================
class GPCatalogTable():
# --------------------------------------------------------------------
# Public API functions:
#
# Accessor functions
# - getTableName() - Returns the table name (string)
# - tableHasOids() - Returns if the table has oids (boolean)
# - isMasterOnly() - Returns if the table is "master only" (boolean)
# - isShared() - Returns if the table is shared (boolean)
# - getTableAcl() - Returns name of the acl column (string|None)
# - getPrimaryKey() - Returns the primary key (list)
# - getForeignKeys() - Returns a list of foreign keys (list)
# - getTableColumns() - Returns a list of table columns (list)
#
# --------------------------------------------------------------------
def getTableName(self):
return self._name
def tableHasOids(self):
return self._has_oid
def tableHasConsistentOids(self):
return (self._has_oid and 'oid' not in self._excluding)
def isMasterOnly(self):
return self._master
def isShared(self):
return self._isshared
def getTableAcl(self):
return self._acl
def getPrimaryKey(self):
return self._pkey
def getForeignKeys(self):
return self._fkey
def getTableColtypes(self):
return self._coltypes
def getTableColumns(self, with_oid=True, with_acl=True, excluding=None):
'''
Returns the list of columns this catalog table contains.
Optionally excluding:
- oid columns
- acl columns
- user specified list of excluded columns
By default excludes the "known differences" columns, to include them
pass [] as the excluding list.
'''
if excluding is None:
excluding = self._excluding
else:
excluding = set(excluding)
# Return all columns that are not excluded
return [
x for x in self._columns
if ((with_oid or x != 'oid') and
(with_acl or x != self._acl) and
(x not in excluding))
]
# --------------------------------------------------------------------
# Private Implementation functions
# --------------------------------------------------------------------
def __init__(self, parent, name, pkey=None):
"""
Create a new GPCatalogTable object
Uses the supplied database connection to identify:
- What are the columns in the table?
- Does the catalog table have an oid column?
- Does the catalog table have an acl column?
"""
assert(name != None)
# Split string input
if isinstance(pkey, str):
pkey = pkey.split()
self._parent = parent
self._name = name
self._master = False
self._isshared = False
self._pkey = list(pkey or [])
self._fkey = [] # foreign key
self._excluding = set()
self._columns = [] # initial value
self._coltypes = {}
self._acl = None # initial value
self._has_oid = False # initial value
# Query the database to lookup the catalog's definition
qry = """
select a.attname, a.atttypid, t.typname
from pg_attribute a
left outer join pg_type t on (a.atttypid = t.oid)
where attrelid = 'pg_catalog.%s'::regclass and
(attnum > 0 or attname='oid')
order by attnum
""" % name
try:
cur = parent._query(qry)
except:
# The cast to regclass will fail if the catalog table doesn't
# exist.
raise GPCatalogException("Catalog table %s does not exist" % name)
if cur.ntuples() == 0:
raise GPCatalogException("Catalog table %s does not exist" % name)
for row in cur.getresult():
(attname, atttype, typname) = row
# Mark if the catalog has an oid column
if attname == 'oid':
self._has_oid = True
# Detect the presence of an ACL column
if atttype == 1034:
self._acl = attname
# Add to the list of columns
self._columns.append(attname)
# Add to the coltypes dictionary
self._coltypes[attname] = typname
# If a primary key was not specified try to locate a unique index
# If a table has multiple matching indexes, we'll pick the first index
# order by indkey to avoid the issue of MPP-16663.
if self._pkey == []:
qry = """
SELECT attname FROM (
SELECT unnest(indkey) as keynum FROM (
SELECT indkey
FROM pg_index
WHERE indisunique and not (indkey @> '-2'::int2vector) and
indrelid = 'pg_catalog.{catname}'::regclass
ORDER BY indkey LIMIT 1
) index_keys
) unnested_index_keys
JOIN pg_attribute ON (attnum = keynum)
WHERE attrelid = 'pg_catalog.{catname}'::regclass
""".format(catname=name)
cur = parent._query(qry)
self._pkey = [row[0] for row in cur.getresult()]
# Primary key must be in the column list
for k in self._pkey:
if k not in self._columns:
raise GPCatalogException("%s.%s does not exist" % (name, k))
def __str__(self):
return self._name
def __hash__(self):
return hash(self.__str__())
def __repr__(self):
return "GPCatalogTable: %s; pkey: %s; oids: %s; acl: %s" % (
str(self._name), str(self._pkey), str(self._has_oid), str(self._acl),
)
def __cmp__(self, other):
return cmp(other, self._name)
def _setMasterOnly(self, value=True):
self._master = value
def _setShared(self, value):
self._isshared = value
def _setPrimaryKey(self, pkey=None):
# Split string input
if isinstance(pkey, str):
pkey = pkey.split()
# Check that the specified keys are real columns
pkey = list(pkey or [])
for k in pkey:
if k not in self._columns:
raise Exception("%s.%s does not exist" % (self._name, k))
self._pkey = pkey
def _addForeignKey(self, fkey):
# Check that the specified keys are real columns
for k in fkey.getColumns():
if k not in self._columns:
raise Exception("%s.%s does not exist" % (self._name, k))
self._fkey.append(fkey)
def _setKnownDifferences(self, diffs):
# Split string input
if isinstance(diffs, str):
diffs = diffs.split()
self._excluding = set(diffs or [])
# ============================================================================
class GPCatalogTableForeignKey():
"""
GPCatalogTableForeignKey is a container for a single instance of a
postgres catalog primary key/foreign key relationship. The
foreign key is a set of columns for with a table, associated with
a set of primary key columns on a primary key table.
Note that tables can self-join, so it is possible to have the
primary and foreign key tables be one and the same.
This class constructs the key, but does not validate it against
the catalog.
"""
# --------------------------------------------------------------------
# Public API functions:
#
# Accessor functions
# - getTableName() - Returns name of table with fkeys
# - getPkeyTableName() - Returns name of the pkey table for the fkeys
# - getColumns() - Returns a list of [foreign] key columns (list)
# - getPKey() - Returns a list of primary key columns (list)
#
# --------------------------------------------------------------------
def getTableName(self):
return self._tname
def getPkeyTableName(self):
return self._pktablename
def getColumns(self):
return self._columns
def getPKey(self):
return self._pkey
# --------------------------------------------------------------------
# Private Implementation functions
# --------------------------------------------------------------------
def __init__(self, tname, cols, pktablename, pkey):
"""
Create a new GPCatalogTableForeignKey object
"""
assert(tname != None)
assert(pktablename != None)
# Split string input
if isinstance(pkey, str):
pkey = pkey.split()
self._tname = tname
self._pktablename = pktablename
self._pkey = list(pkey or [])
self._columns = cols
def __str__(self):
return "%s: %s" % (self._tname, str(self._columns))
def __repr__(self):
return "GPCatalogTableForeignKey: %s; col: %s; " % (
str(self._tname), str(self._columns)
)
| 36.43876 | 115 | 0.578777 |
4a253a79b84e39b7f4e237df80f09e0236dce379 | 1,460 | py | Python | server/test_runner.py | Arun89-crypto/codechefsrm | bd793a40bf034f88deee3c98f342b86b3010d554 | [
"MIT"
] | null | null | null | server/test_runner.py | Arun89-crypto/codechefsrm | bd793a40bf034f88deee3c98f342b86b3010d554 | [
"MIT"
] | 1 | 2021-11-20T20:56:47.000Z | 2021-11-20T21:00:10.000Z | server/test_runner.py | Arun89-crypto/codechefsrm | bd793a40bf034f88deee3c98f342b86b3010d554 | [
"MIT"
] | 3 | 2021-11-20T16:48:40.000Z | 2021-12-05T13:44:17.000Z | import unittest
from tests import (
test_contact_us,
test_about_us,
test_team,
test_admin_register,
test_admin_login,
test_events,
test_access_refresh,
test_me,
)
from importlib import import_module
import argparse
def configure_options():
parser = argparse.ArgumentParser()
parser.add_argument("--module", nargs=2, default=[0, 0])
return parser
def get_server_tests(suite):
suite.addTest(unittest.makeSuite(test_contact_us.TestContactUs))
suite.addTest(unittest.makeSuite(test_about_us.TestAboutUs))
suite.addTest(unittest.makeSuite(test_team.TestTeamPage))
suite.addTest(unittest.makeSuite(test_admin_register.TestAdminRegister))
suite.addTest(unittest.makeSuite(test_admin_login.TestAdminLogin))
suite.addTest(unittest.makeSuite(test_events.TestEventsData))
suite.addTest(unittest.makeSuite(test_me.TestMe))
suite.addTest(unittest.makeSuite(test_access_refresh.TestAccessRefresh))
def main():
suite = unittest.TestSuite()
options = configure_options().parse_args()
if value := options.module[0]:
module = import_module(value)
test_class = getattr(module, options.module[1])
suite.addTest(unittest.makeSuite(test_class))
else:
get_server_tests(suite)
output = unittest.TextTestRunner(verbosity=2).run(suite)
if output.errors or output.failures:
print("Failing Tests")
if __name__ == "__main__":
main()
| 27.54717 | 76 | 0.736986 |
4a253b139096f01d4f9b234adc4a5862aab765f4 | 2,405 | py | Python | app/request.py | Cian747/job | b268aa6826fb31649f7a0f8da1bdd8b84a04e122 | [
"MIT"
] | null | null | null | app/request.py | Cian747/job | b268aa6826fb31649f7a0f8da1bdd8b84a04e122 | [
"MIT"
] | null | null | null | app/request.py | Cian747/job | b268aa6826fb31649f7a0f8da1bdd8b84a04e122 | [
"MIT"
] | null | null | null |
import urllib.request,json
from .models.job import Jobs
from flask_login import current_user
from app import db
job_api_url = None
def configure_request(app):
global job_api_url
# job_api_url = app.config['JOB_API_URL']
job_api_url='https://api.lever.co/v0/postings/leverdemo?mode=json'
def general():
'''
fetch all the general jobs
'''
get_job_url = job_api_url
with urllib.request.urlopen(get_job_url) as url:
get_job_data = url.read()
get_job_response = json.loads(get_job_data)
if get_job_response:
for job in get_job_response:
job_id = job.get('id')
commitment = job.get('categories').get('commitment')
department = job.get('categories').get('department')
team = job.get('categories').get('team')
location = job.get('categories').get('location')
descriptionPlain =job.get('descriptionPlain')
text = job.get("text")
applyUrl =job.get('applyUrl')
if location is None or commitment is None:
location = 'Remote'
commitment = 'Full time'
job = Jobs(job_id = job_id,commitment=commitment,department = department,team=team,location=location,descriptionPlain=descriptionPlain,text=text,applyUrl=applyUrl)
db.session.add(job)
db.session.commit()
return job
# def general_two():
# '''
# fetch all the general jobs
# '''
# get_job_url = job_api_url
# with urllib.request.urlopen(get_job_url) as url:
# get_job_data = url.read()
# get_job_response = json.loads(get_job_data)
# if get_job_response:
# for job in get_job_response:
# job_id = job.get('id')
# descriptionPlain = job.get("descriptionPlain")
# text = job.get("text")
# applyUrl = job.get("applyUrl")
# job_posting = Jobs(job_id = job_id, commitment = commitment, department = department,
# team = team, location = location, descriptionPlain = descriptionPlain,
# text = text, applyUrl = applyUrl)
# db.session.add(job_posting)
# db.session.commit()
# return job_posting
| 31.233766 | 179 | 0.575052 |
4a253c292120ad05258e99b546aa2163240b1241 | 4,143 | py | Python | pysnmp/HP-ICF-AUTORUN.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 11 | 2021-02-02T16:27:16.000Z | 2021-08-31T06:22:49.000Z | pysnmp/HP-ICF-AUTORUN.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 75 | 2021-02-24T17:30:31.000Z | 2021-12-08T00:01:18.000Z | pysnmp/HP-ICF-AUTORUN.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module HP-ICF-AUTORUN (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/HP-ICF-AUTORUN
# Produced by pysmi-0.3.4 at Mon Apr 29 19:20:45 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ConstraintsUnion, ValueSizeConstraint, ConstraintsIntersection, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ConstraintsUnion", "ValueSizeConstraint", "ConstraintsIntersection", "SingleValueConstraint")
hpSwitch, = mibBuilder.importSymbols("HP-ICF-OID", "hpSwitch")
SnmpAdminString, = mibBuilder.importSymbols("SNMP-FRAMEWORK-MIB", "SnmpAdminString")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
ObjectIdentity, Bits, Counter32, ModuleIdentity, Counter64, Unsigned32, Gauge32, Integer32, NotificationType, iso, TimeTicks, IpAddress, MibIdentifier, MibScalar, MibTable, MibTableRow, MibTableColumn = mibBuilder.importSymbols("SNMPv2-SMI", "ObjectIdentity", "Bits", "Counter32", "ModuleIdentity", "Counter64", "Unsigned32", "Gauge32", "Integer32", "NotificationType", "iso", "TimeTicks", "IpAddress", "MibIdentifier", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn")
DisplayString, TruthValue, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TruthValue", "TextualConvention")
hpicfAutorun = ModuleIdentity((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 42))
if mibBuilder.loadTexts: hpicfAutorun.setLastUpdated('200708240000Z')
if mibBuilder.loadTexts: hpicfAutorun.setOrganization('Hewlett-Packard Company, Workgroup Networks Division')
hpicfAutorunConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 42, 1))
hpicfUsbAutorunEnable = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 42, 1, 1), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpicfUsbAutorunEnable.setStatus('current')
hpicfUsbAutorunSecureMode = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 42, 1, 2), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpicfUsbAutorunSecureMode.setStatus('current')
hpicfUsbAutorunEncryptionKey = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 42, 1, 3), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpicfUsbAutorunEncryptionKey.setStatus('current')
hpicfAutorunConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 42, 2))
hpicfAutorunCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 42, 2, 1))
hpicfAutorunGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 42, 2, 2))
hpicfAutorunCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 42, 2, 1, 1)).setObjects(("HP-ICF-AUTORUN", "hpicfAutorunConfigGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicfAutorunCompliance = hpicfAutorunCompliance.setStatus('current')
hpicfAutorunConfigGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 42, 2, 2, 1)).setObjects(("HP-ICF-AUTORUN", "hpicfUsbAutorunEnable"), ("HP-ICF-AUTORUN", "hpicfUsbAutorunSecureMode"), ("HP-ICF-AUTORUN", "hpicfUsbAutorunEncryptionKey"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicfAutorunConfigGroup = hpicfAutorunConfigGroup.setStatus('current')
mibBuilder.exportSymbols("HP-ICF-AUTORUN", PYSNMP_MODULE_ID=hpicfAutorun, hpicfUsbAutorunEnable=hpicfUsbAutorunEnable, hpicfAutorunConfigGroup=hpicfAutorunConfigGroup, hpicfUsbAutorunSecureMode=hpicfUsbAutorunSecureMode, hpicfUsbAutorunEncryptionKey=hpicfUsbAutorunEncryptionKey, hpicfAutorunCompliance=hpicfAutorunCompliance, hpicfAutorun=hpicfAutorun, hpicfAutorunConformance=hpicfAutorunConformance, hpicfAutorunConfig=hpicfAutorunConfig, hpicfAutorunCompliances=hpicfAutorunCompliances, hpicfAutorunGroups=hpicfAutorunGroups)
| 111.972973 | 529 | 0.764905 |
4a253c514d4233ae90cc235c1df7630561e933ea | 923 | py | Python | NumericalMethods/lab_3/scalar_product_method.py | fivard/Third_course | fe0a331ab5e54ac31ccb0650b6b3a03ad3ab4cf9 | [
"MIT"
] | null | null | null | NumericalMethods/lab_3/scalar_product_method.py | fivard/Third_course | fe0a331ab5e54ac31ccb0650b6b3a03ad3ab4cf9 | [
"MIT"
] | null | null | null | NumericalMethods/lab_3/scalar_product_method.py | fivard/Third_course | fe0a331ab5e54ac31ccb0650b6b3a03ad3ab4cf9 | [
"MIT"
] | null | null | null | import numpy as np
np.set_printoptions(precision=3)
given_matrix = np.array([[11, 7, 3, 7],
[7, 10, -1, 4],
[3, -1, 16, -7],
[7, 4, -7, 15]])
eps = 0.01
def find_max_own_value(matrix):
current_ly = 0
prev_ly = -1
x = np.array([1, 1, 1, 1])
while abs(current_ly - prev_ly) > eps:
prev_ly = current_ly
e = np.divide(x, np.linalg.norm(x, ord=np.inf))
# print("e = ", e)
x = np.dot(matrix, e)
# print("x = ", x)
current_ly = np.dot(x, e) / np.dot(e, e)
# print("current_ly = ", current_ly)
return current_ly
ly_a_max = find_max_own_value(given_matrix)
b = np.dot(ly_a_max, np.eye(4)) - given_matrix
ly_b_max = find_max_own_value(b)
ly_a_min = ly_a_max - ly_b_max
print("max = ", ly_a_max)
print("min = ", ly_a_min)
| 26.371429 | 55 | 0.505959 |
4a253c7e2b0b9e7995a08bba39f82a1435450f7c | 19,205 | py | Python | trio/_core/tests/test_ki.py | Zac-HD/trio | 3da1c6f846169d9a04371aa2230ac5f5a266f6a5 | [
"Apache-2.0",
"MIT"
] | null | null | null | trio/_core/tests/test_ki.py | Zac-HD/trio | 3da1c6f846169d9a04371aa2230ac5f5a266f6a5 | [
"Apache-2.0",
"MIT"
] | null | null | null | trio/_core/tests/test_ki.py | Zac-HD/trio | 3da1c6f846169d9a04371aa2230ac5f5a266f6a5 | [
"Apache-2.0",
"MIT"
] | null | null | null | import outcome
import pytest
import sys
import os
import signal
import threading
import contextlib
import time
from async_generator import (
async_generator, yield_, isasyncgenfunction, asynccontextmanager
)
from ... import _core
from ...testing import wait_all_tasks_blocked
from ..._util import signal_raise, is_main_thread
from ..._timeouts import sleep
from .tutil import slow
def ki_self():
signal_raise(signal.SIGINT)
def test_ki_self():
with pytest.raises(KeyboardInterrupt):
ki_self()
async def test_ki_enabled():
# Regular tasks aren't KI-protected
assert not _core.currently_ki_protected()
# Low-level call-soon callbacks are KI-protected
token = _core.current_trio_token()
record = []
def check():
record.append(_core.currently_ki_protected())
token.run_sync_soon(check)
await wait_all_tasks_blocked()
assert record == [True]
@_core.enable_ki_protection
def protected():
assert _core.currently_ki_protected()
unprotected()
@_core.disable_ki_protection
def unprotected():
assert not _core.currently_ki_protected()
protected()
@_core.enable_ki_protection
async def aprotected():
assert _core.currently_ki_protected()
await aunprotected()
@_core.disable_ki_protection
async def aunprotected():
assert not _core.currently_ki_protected()
await aprotected()
# make sure that the decorator here overrides the automatic manipulation
# that start_soon() does:
async with _core.open_nursery() as nursery:
nursery.start_soon(aprotected)
nursery.start_soon(aunprotected)
@_core.enable_ki_protection
def gen_protected():
assert _core.currently_ki_protected()
yield
for _ in gen_protected():
pass
@_core.disable_ki_protection
def gen_unprotected():
assert not _core.currently_ki_protected()
yield
for _ in gen_unprotected():
pass
# This used to be broken due to
#
# https://bugs.python.org/issue29590
#
# Specifically, after a coroutine is resumed with .throw(), then the stack
# makes it look like the immediate caller is the function that called
# .throw(), not the actual caller. So child() here would have a caller deep in
# the guts of the run loop, and always be protected, even when it shouldn't
# have been. (Solution: we don't use .throw() anymore.)
async def test_ki_enabled_after_yield_briefly():
@_core.enable_ki_protection
async def protected():
await child(True)
@_core.disable_ki_protection
async def unprotected():
await child(False)
async def child(expected):
import traceback
traceback.print_stack()
assert _core.currently_ki_protected() == expected
await _core.checkpoint()
traceback.print_stack()
assert _core.currently_ki_protected() == expected
await protected()
await unprotected()
# This also used to be broken due to
# https://bugs.python.org/issue29590
async def test_generator_based_context_manager_throw():
@contextlib.contextmanager
@_core.enable_ki_protection
def protected_manager():
assert _core.currently_ki_protected()
try:
yield
finally:
assert _core.currently_ki_protected()
with protected_manager():
assert not _core.currently_ki_protected()
with pytest.raises(KeyError):
# This is the one that used to fail
with protected_manager():
raise KeyError
async def test_agen_protection():
@_core.enable_ki_protection
@async_generator
async def agen_protected1():
assert _core.currently_ki_protected()
try:
await yield_()
finally:
assert _core.currently_ki_protected()
@_core.disable_ki_protection
@async_generator
async def agen_unprotected1():
assert not _core.currently_ki_protected()
try:
await yield_()
finally:
assert not _core.currently_ki_protected()
# Swap the order of the decorators:
@async_generator
@_core.enable_ki_protection
async def agen_protected2():
assert _core.currently_ki_protected()
try:
await yield_()
finally:
assert _core.currently_ki_protected()
@async_generator
@_core.disable_ki_protection
async def agen_unprotected2():
assert not _core.currently_ki_protected()
try:
await yield_()
finally:
assert not _core.currently_ki_protected()
for agen_fn in [
agen_protected1,
agen_protected2,
agen_unprotected1,
agen_unprotected2,
]:
async for _ in agen_fn(): # noqa
assert not _core.currently_ki_protected()
# asynccontextmanager insists that the function passed must itself be an
# async gen function, not a wrapper around one
if isasyncgenfunction(agen_fn):
async with asynccontextmanager(agen_fn)():
assert not _core.currently_ki_protected()
# Another case that's tricky due to:
# https://bugs.python.org/issue29590
with pytest.raises(KeyError):
async with asynccontextmanager(agen_fn)():
raise KeyError
# Test the case where there's no magic local anywhere in the call stack
def test_ki_enabled_out_of_context():
assert not _core.currently_ki_protected()
def test_ki_disabled_in_del():
def nestedfunction():
return _core.currently_ki_protected()
def __del__():
assert _core.currently_ki_protected()
assert nestedfunction()
__del__()
assert not nestedfunction()
def test_ki_protection_works():
async def sleeper(name, record):
try:
while True:
await _core.checkpoint()
except _core.Cancelled:
record.add((name + " ok"))
async def raiser(name, record):
try:
# os.kill runs signal handlers before returning, so we don't need
# to worry that the handler will be delayed
print("killing, protection =", _core.currently_ki_protected())
ki_self()
except KeyboardInterrupt:
print("raised!")
# Make sure we aren't getting cancelled as well as siginted
await _core.checkpoint()
record.add((name + " raise ok"))
raise
else:
print("didn't raise!")
# If we didn't raise (b/c protected), then we *should* get
# cancelled at the next opportunity
try:
await _core.wait_task_rescheduled(
lambda _: _core.Abort.SUCCEEDED
)
except _core.Cancelled:
record.add((name + " cancel ok"))
# simulated control-C during raiser, which is *unprotected*
print("check 1")
record = set()
async def check_unprotected_kill():
async with _core.open_nursery() as nursery:
nursery.start_soon(sleeper, "s1", record)
nursery.start_soon(sleeper, "s2", record)
nursery.start_soon(raiser, "r1", record)
with pytest.raises(KeyboardInterrupt):
_core.run(check_unprotected_kill)
assert record == {"s1 ok", "s2 ok", "r1 raise ok"}
# simulated control-C during raiser, which is *protected*, so the KI gets
# delivered to the main task instead
print("check 2")
record = set()
async def check_protected_kill():
async with _core.open_nursery() as nursery:
nursery.start_soon(sleeper, "s1", record)
nursery.start_soon(sleeper, "s2", record)
nursery.start_soon(
_core.enable_ki_protection(raiser), "r1", record
)
# __aexit__ blocks, and then receives the KI
with pytest.raises(KeyboardInterrupt):
_core.run(check_protected_kill)
assert record == {"s1 ok", "s2 ok", "r1 cancel ok"}
# kill at last moment still raises (run_sync_soon until it raises an
# error, then kill)
print("check 3")
async def check_kill_during_shutdown():
token = _core.current_trio_token()
def kill_during_shutdown():
assert _core.currently_ki_protected()
try:
token.run_sync_soon(kill_during_shutdown)
except _core.RunFinishedError:
# it's too late for regular handling! handle this!
print("kill! kill!")
ki_self()
token.run_sync_soon(kill_during_shutdown)
with pytest.raises(KeyboardInterrupt):
_core.run(check_kill_during_shutdown)
# KI arrives very early, before main is even spawned
print("check 4")
class InstrumentOfDeath:
def before_run(self):
ki_self()
async def main():
await _core.checkpoint()
with pytest.raises(KeyboardInterrupt):
_core.run(main, instruments=[InstrumentOfDeath()])
# checkpoint_if_cancelled notices pending KI
print("check 5")
@_core.enable_ki_protection
async def main():
assert _core.currently_ki_protected()
ki_self()
with pytest.raises(KeyboardInterrupt):
await _core.checkpoint_if_cancelled()
_core.run(main)
# KI arrives while main task is not abortable, b/c already scheduled
print("check 6")
@_core.enable_ki_protection
async def main():
assert _core.currently_ki_protected()
ki_self()
await _core.cancel_shielded_checkpoint()
await _core.cancel_shielded_checkpoint()
await _core.cancel_shielded_checkpoint()
with pytest.raises(KeyboardInterrupt):
await _core.checkpoint()
_core.run(main)
# KI arrives while main task is not abortable, b/c refuses to be aborted
print("check 7")
@_core.enable_ki_protection
async def main():
assert _core.currently_ki_protected()
ki_self()
task = _core.current_task()
def abort(_):
_core.reschedule(task, outcome.Value(1))
return _core.Abort.FAILED
assert await _core.wait_task_rescheduled(abort) == 1
with pytest.raises(KeyboardInterrupt):
await _core.checkpoint()
_core.run(main)
# KI delivered via slow abort
print("check 8")
@_core.enable_ki_protection
async def main():
assert _core.currently_ki_protected()
ki_self()
task = _core.current_task()
def abort(raise_cancel):
result = outcome.capture(raise_cancel)
_core.reschedule(task, result)
return _core.Abort.FAILED
with pytest.raises(KeyboardInterrupt):
assert await _core.wait_task_rescheduled(abort)
await _core.checkpoint()
_core.run(main)
# KI arrives just before main task exits, so the run_sync_soon machinery
# is still functioning and will accept the callback to deliver the KI, but
# by the time the callback is actually run, main has exited and can't be
# aborted.
print("check 9")
@_core.enable_ki_protection
async def main():
ki_self()
with pytest.raises(KeyboardInterrupt):
_core.run(main)
print("check 10")
# KI in unprotected code, with
# restrict_keyboard_interrupt_to_checkpoints=True
record = []
async def main():
# We're not KI protected...
assert not _core.currently_ki_protected()
ki_self()
# ...but even after the KI, we keep running uninterrupted...
record.append("ok")
# ...until we hit a checkpoint:
with pytest.raises(KeyboardInterrupt):
await sleep(10)
_core.run(main, restrict_keyboard_interrupt_to_checkpoints=True)
assert record == ["ok"]
record = []
# Exact same code raises KI early if we leave off the argument, doesn't
# even reach the record.append call:
with pytest.raises(KeyboardInterrupt):
_core.run(main)
assert record == []
# KI arrives while main task is inside a cancelled cancellation scope
# the KeyboardInterrupt should take priority
print("check 11")
@_core.enable_ki_protection
async def main():
assert _core.currently_ki_protected()
with _core.CancelScope() as cancel_scope:
cancel_scope.cancel()
with pytest.raises(_core.Cancelled):
await _core.checkpoint()
ki_self()
with pytest.raises(KeyboardInterrupt):
await _core.checkpoint()
with pytest.raises(_core.Cancelled):
await _core.checkpoint()
_core.run(main)
def test_ki_is_good_neighbor():
# in the unlikely event someone overwrites our signal handler, we leave
# the overwritten one be
try:
orig = signal.getsignal(signal.SIGINT)
def my_handler(signum, frame): # pragma: no cover
pass
async def main():
signal.signal(signal.SIGINT, my_handler)
_core.run(main)
assert signal.getsignal(signal.SIGINT) is my_handler
finally:
signal.signal(signal.SIGINT, orig)
# Regression test for #461
def test_ki_with_broken_threads():
thread = threading.main_thread()
# scary!
original = threading._active[thread.ident]
# put this in a try finally so we don't have a chance of cascading a
# breakage down to everything else
try:
del threading._active[thread.ident]
@_core.enable_ki_protection
async def inner():
assert signal.getsignal(
signal.SIGINT
) != signal.default_int_handler
_core.run(inner)
finally:
threading._active[thread.ident] = original
# For details on why this test is non-trivial, see:
# https://github.com/python-trio/trio/issues/42
# https://github.com/python-trio/trio/issues/109
# To make it an even better test, we should try doing
# pthread_kill(pthread_self, SIGINT)
# in the child thread, to make sure signals in non-main threads also wake up
# the main loop... but currently that test would fail (see gh-109 again).
@slow
def test_ki_wakes_us_up():
assert is_main_thread()
# This test is flaky due to a race condition on Windows; see:
# https://github.com/python-trio/trio/issues/119
# https://bugs.python.org/issue30038
# I think the only fix is to wait for fixed CPython to be released, so in
# the mean time, on affected versions we send two signals (equivalent to
# hitting control-C twice). This works because the problem is that the C
# level signal handler does
#
# write-to-fd -> set-flags
#
# and we need
#
# set-flags -> write-to-fd
#
# so running the C level signal handler twice does
#
# write-to-fd -> set-flags -> write-to-fd -> set-flags
#
# which contains the desired sequence.
#
# Affected version of CPython include:
# - all versions of 3.5 (fix will not be backported)
# - 3.6.1 and earlier
# It's fixed in 3.6.2 and 3.7+
#
# PyPy was never affected.
#
# The problem technically occurs on Unix as well, if a signal is delivered
# to a non-main thread, and if we were relying on the wakeup fd to wake
# us. Currently we don't use the wakeup fd on Unix anyway, though (see
# gh-109).
#
# There's also this theoretical problem, but hopefully it won't actually
# bite us in practice:
# https://bugs.python.org/issue31119
# https://bitbucket.org/pypy/pypy/issues/2623
import platform
buggy_wakeup_fd = (
os.name == "nt" and platform.python_implementation() == "CPython"
and sys.version_info < (3, 6, 2)
)
# lock is only needed to avoid an annoying race condition where the
# *second* ki_self() call arrives *after* the first one woke us up and its
# KeyboardInterrupt was caught, and then generates a second
# KeyboardInterrupt that aborts the test run. The kill_soon thread holds
# the lock while doing the calls to ki_self, which means that it holds it
# while the C-level signal handler is running. Then in the main thread,
# when we're woken up we know that ki_self() has been run at least once;
# if we then take the lock it guaranteeds that ki_self() has been run
# twice, so if a second KeyboardInterrupt is going to arrive it should
# arrive by the time we've acquired the lock. This lets us force it to
# happen inside the pytest.raises block.
#
# It will be very nice when the buggy_wakeup_fd bug is fixed.
lock = threading.Lock()
def kill_soon():
# We want the signal to be raised after the main thread has entered
# the IO manager blocking primitive. There really is no way to
# deterministically interlock with that, so we have to use sleep and
# hope it's long enough.
time.sleep(1.1)
with lock:
print("thread doing ki_self()")
ki_self()
if buggy_wakeup_fd:
print("buggy_wakeup_fd =", buggy_wakeup_fd)
ki_self()
async def main():
thread = threading.Thread(target=kill_soon)
print("Starting thread")
thread.start()
try:
with pytest.raises(KeyboardInterrupt):
# To limit the damage on CI if this does get broken (as
# compared to sleep_forever())
print("Going to sleep")
try:
await sleep(20)
print("Woke without raising?!") # pragma: no cover
# The only purpose of this finally: block is to soak up the
# second KeyboardInterrupt that might arrive on
# buggy_wakeup_fd platforms. So it might get aborted at any
# moment randomly on some runs, so pragma: no cover avoids
# coverage flapping:
finally: # pragma: no cover
print("waiting for lock")
with lock:
print("got lock")
# And then we want to force a PyErr_CheckSignals. Which is
# not so easy on Windows. Weird kluge: builtin_repr calls
# PyObject_Repr, which does an unconditional
# PyErr_CheckSignals for some reason.
print(repr(None))
# And finally, it's possible that the signal was delivered
# but at a moment when we had KI protection enabled, so we
# need to execute a checkpoint to ensure it's delivered
# before we exit main().
await _core.checkpoint()
finally:
print("joining thread", sys.exc_info())
thread.join()
start = time.perf_counter()
try:
_core.run(main)
finally:
end = time.perf_counter()
print("duration", end - start)
print("sys.exc_info", sys.exc_info())
assert 1.0 <= (end - start) < 2
| 31.796358 | 80 | 0.635928 |
4a253d5ec9c3d0a05f4bff6f6e332ce8df33f156 | 1,031 | py | Python | test/test_1002.py | ralphribeiro/uri-projecteuler | 7151d86e014aea9c56026cc88f50b4e940117dd8 | [
"MIT"
] | null | null | null | test/test_1002.py | ralphribeiro/uri-projecteuler | 7151d86e014aea9c56026cc88f50b4e940117dd8 | [
"MIT"
] | null | null | null | test/test_1002.py | ralphribeiro/uri-projecteuler | 7151d86e014aea9c56026cc88f50b4e940117dd8 | [
"MIT"
] | null | null | null | from unittest import TestCase
from exercicios.ex1002 import calcula_area_circunferencia
class TesteEx1002(TestCase):
def test_argumento_deve_ser_float(self):
chamada = 1
with self.assertRaises(TypeError):
calcula_area_circunferencia(chamada)
def test_area_para_raio_0_ou_negativo_deve_ser_0(self):
chamada = -1.00
esperado = 0
self.assertEqual(calcula_area_circunferencia(chamada), esperado)
def test_area_de_2_v_00_deve_ser_12_v_5664(self):
chamada = 2.00
esperado = 'A = 12.5664'
self.assertEqual(calcula_area_circunferencia(chamada), esperado)
def test_area_de_100_v_64_deve_ser_31819_v_3103(self):
chamada = 100.64
esperado = 'A = 31819.3103'
self.assertEqual(calcula_area_circunferencia(chamada), esperado)
def test_area_de_150_v_00_deve_ser_70685_v_7750(self):
chamada = 150.00
esperado = 'A = 70685.7750'
self.assertEqual(calcula_area_circunferencia(chamada), esperado)
| 30.323529 | 72 | 0.71581 |
4a253dfcdfcf466b790bbe04e87612542e92c627 | 1,079 | py | Python | scripts/xml_to_csv.py | freds0/capybara_dataset | f6ba28f22064176c574629ea5a7cc2354acbcc78 | [
"MIT"
] | 1 | 2021-12-02T19:59:05.000Z | 2021-12-02T19:59:05.000Z | scripts/xml_to_csv.py | freds0/capybara_dataset | f6ba28f22064176c574629ea5a7cc2354acbcc78 | [
"MIT"
] | null | null | null | scripts/xml_to_csv.py | freds0/capybara_dataset | f6ba28f22064176c574629ea5a7cc2354acbcc78 | [
"MIT"
] | null | null | null | import os
import glob
import pandas as pd
import xml.etree.ElementTree as ET
def xml_to_csv(path):
xml_list = []
for xml_file in glob.glob(path + '/*.xml'):
tree = ET.parse(xml_file)
root = tree.getroot()
for member in root.findall('object'):
value = (root.find('filename').text,
int(root.find('size')[0].text),
int(root.find('size')[1].text),
member[0].text,
int(member[4][0].text),
int(member[4][1].text),
int(member[4][2].text),
int(member[4][3].text)
)
xml_list.append(value)
column_name = ['filename', 'width', 'height', 'class', 'xmin', 'ymin', 'xmax', 'ymax']
xml_df = pd.DataFrame(xml_list, columns=column_name)
return xml_df
def main():
image_path = os.path.join(os.getcwd(), 'test')
xml_df = xml_to_csv(image_path)
xml_df.to_csv('test_labels.csv', index=None)
print('Successfully converted xml to csv.')
main()
| 29.972222 | 90 | 0.534754 |
4a253f1b4cdf808489273729435c0bf560451acf | 7,675 | py | Python | FusionIIIT/Fusion/settings/common.py | gshubha/Fusion | 7a00a88a65b44257efefb49796a1aca8354e5e34 | [
"bzip2-1.0.6"
] | 1 | 2021-03-04T16:18:46.000Z | 2021-03-04T16:18:46.000Z | FusionIIIT/Fusion/settings/common.py | gshubha/Fusion | 7a00a88a65b44257efefb49796a1aca8354e5e34 | [
"bzip2-1.0.6"
] | null | null | null | FusionIIIT/Fusion/settings/common.py | gshubha/Fusion | 7a00a88a65b44257efefb49796a1aca8354e5e34 | [
"bzip2-1.0.6"
] | null | null | null | '''
Django settings for Fusion project.
Generated by 'django-admin startproject' using Django 1.11.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
'''
import os
from celery.schedules import crontab
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
CLIENT_ID = '187004491411-frc3j36n04o9k0imgnbl02qg42vkq36f.apps.googleusercontent.com'
CLIENT_SECRET = 'enHu3RD0yBvCM_9C0HQmEp0z'
# SECURITY WARNING: don't run with debug turned on in production!
# Google authentication
SOCIALACCOUNT_PROVIDERS = {
'google': {
'SCOPE': [
'profile',
'email',
],
'AUTH_PARAMS': {
'access_type': 'online',
'hd': 'iiitdmj.ac.in',
}
}
}
# allauth settings
LOGIN_URL = '/accounts/login/'
LOGOUT_URL = '/accounts/logout/'
LOGIN_REDIRECT_URL = '/dashboard'
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
# email of sender
EMAIL_HOST_USER = '[email protected]'
EMAIL_PORT = 587
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'username_email'
ACCOUNT_CONFIRM_EMAIL_ON_GET = False
ACCOUNT_EMAIL_CONFIRMATION_ANONYMOUS_REDIRECT_URL = '/accounts/login/'
ACCOUNT_EMAIL_CONFIRMATION_AUTHENTICATED_REDIRECT_URL = '/'
ACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS = 1
ACCOUNT_EMAIL_CONFIRMATION_HMAC = True
ACCOUNT_EMAIL_VERIFICATION = 'optional'
ACCOUNT_EMAIL_SUBJECT_PREFIX = 'Fusion: '
DEFAULT_FROM_EMAIL = '[email protected]'
SERVER_EMAIL = '[email protected]'
ACCOUNT_LOGOUT_REDIRECT_URL = '/accounts/login/'
ACCOUNT_USERNAME_MIN_LENGTH = 3
SOCIALACCOUNT_ADAPTER = 'applications.globals.adapters.MySocialAccountAdapter'
# CELERY STUFF
# CELERY_BROKER_URL = 'redis://localhost:6379'
# CELERY_RESULT_BACKEND = 'redis://localhost:6379'
CELERY_ACCEPT_CONTENT = ['application/json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_TIMEZONE = 'Asia/Calcutta'
CELERY_BEAT_SCHEDULE = {
'leave-migration-task': {
'task': 'applications.leave.tasks.execute_leave_migrations',
'schedule': crontab(minute='1', hour='0')
}
}
# Application definition
INSTALLED_APPS = [
'whitenoise.runserver_nostatic',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'django.contrib.humanize',
'applications.eis',
'notification',
'notifications',
'applications.academic_procedures',
'applications.academic_information',
'applications.leave',
'applications.library',
'applications.notifications_extension',
'applications.gymkhana',
'applications.office_module',
'applications.globals',
'applications.central_mess',
'applications.complaint_system',
'applications.filetracking',
'applications.finance_accounts',
'applications.health_center',
'applications.online_cms',
<<<<<<< HEAD
=======
'applications.ps1',
>>>>>>> d3826989aebf1c9252035dd068f1af3ff791d9fe
'applications.programme_curriculum',
'applications.placement_cell',
'applications.recruitment',
'applications.scholarships',
'applications.visitor_hostel',
'applications.establishment',
'applications.estate_module',
'applications.counselling_cell',
'applications.research_procedures',
'applications.income_expenditure',
'applications.hostel_management',
'applications.hr2',
'applications.department',
<<<<<<< HEAD
=======
'applications.iwdModuleV2',
>>>>>>> d3826989aebf1c9252035dd068f1af3ff791d9fe
'allauth',
'allauth.account',
'allauth.socialaccount',
# 'allauth.socialaccount.providers.google',
'semanticuiforms',
'applications.feeds.apps.FeedsConfig',
'pagedown',
'markdown_deux',
'django_cleanup.apps.CleanupConfig',
'django_unused_media',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Fusion.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, '..', 'templates/'),],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Fusion.wsgi.application'
# Database template for mysql
# DATABASES = {
# 'default':
# {
# 'ENGINE': 'django.db.backends.mysql',
# 'NAME': 'test',
# 'USER': 'root',
# 'PASSWORD': 'sksingh55',
# 'HOST': 'localhost',
# 'PORT': '3306',
# },
# }
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(PROJECT_DIR, 'fusion.db'),
# }
# }
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
AUTHENTICATION_BACKENDS = (
# Default backend -- used to login by username in Django admin
"django.contrib.auth.backends.ModelBackend",
# `allauth` specific authentication methods, such as login by e-mail
"allauth.account.auth_backends.AuthenticationBackend",
)
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Kolkata'
USE_I18N = True
USE_L10N = False
USE_TZ = False
SITE_ID = 1
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
# os.path.join(BASE_DIR, 'static/')
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, '..', 'static/')
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
MEDIA_ROOT = os.path.join(BASE_DIR, '..', 'media/')
MEDIA_URL = '/media/'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_REQUIRED = True
LOGIN_REDIRECT_URL = "/"
DJANGO_NOTIFICATIONS_CONFIG = {
'USE_JSONFIELD': True,
}
DJANGO_NOTIFICATIONS_CONFIG = {
'USE_JSONFIELD': True,
}
CRISPY_TEMPLATE_PACK = 'semantic-ui'
CRISPY_ALLOWED_TEMPLATE_PACKS = ('semantic-ui')
DATA_UPLOAD_MAX_NUMBER_FIELDS = 10240
YOUTUBE_DATA_API_KEY = 'api_key'
| 27.607914 | 91 | 0.704625 |
4a253f24f234e5415ffac9d7c992eb31f7a89121 | 665 | py | Python | e_commerce/cart/urls.py | MiKueen/Django | 67afc73676a9a58d4d0badc5b53410f164846b79 | [
"Apache-2.0"
] | null | null | null | e_commerce/cart/urls.py | MiKueen/Django | 67afc73676a9a58d4d0badc5b53410f164846b79 | [
"Apache-2.0"
] | 8 | 2019-08-06T02:03:39.000Z | 2021-06-10T18:07:47.000Z | e_commerce/cart/urls.py | MiKueen/Django | 67afc73676a9a58d4d0badc5b53410f164846b79 | [
"Apache-2.0"
] | null | null | null | from django.conf.urls import url
from .views import (
add_to_cart,
delete_from_cart,
order_details,
checkout,
update_transaction_records,
success
)
app_name = 'cart'
urlpatterns = [
url(r'^add-to-cart/(?P<item_id>[-\w]+)/$', add_to_cart, name="add_to_cart"),
url(r'^order-summary/$', order_details, name="order_summary"),
url(r'^success/$', success, name='purchase_success'),
url(r'^item/delete/(?P<item_id>[-\w]+)/$', delete_from_cart, name='delete_item'),
url(r'^checkout/$', checkout, name='checkout'),
url(r'^update-transaction/(?P<token>[-\w]+)/$', update_transaction_records,
name='update_records')
] | 30.227273 | 85 | 0.655639 |
4a253fb02056eb79413cfcb2b6e1844d7bf64702 | 25,143 | py | Python | jupyterlab_gcsfilebrowser/jupyterlab_gcsfilebrowser/handlers.py | angelakuo/jupyter-extensions | 06efeb0240e7bbba59b8dc7b4941ab4301737471 | [
"Apache-2.0"
] | null | null | null | jupyterlab_gcsfilebrowser/jupyterlab_gcsfilebrowser/handlers.py | angelakuo/jupyter-extensions | 06efeb0240e7bbba59b8dc7b4941ab4301737471 | [
"Apache-2.0"
] | 1 | 2020-11-17T18:08:16.000Z | 2020-11-17T18:08:16.000Z | jupyterlab_gcsfilebrowser/jupyterlab_gcsfilebrowser/handlers.py | mwiewior/jupyter-extensions | b928265ee22246ac2761a5439b8363e98ec735e6 | [
"Apache-2.0"
] | null | null | null | # Lint as: python3
"""Request handler classes for the extensions."""
import base64
import json
import re
import tornado.gen as gen
import os
import datetime
import notebook
import nbformat
from collections import namedtuple
from notebook.base.handlers import APIHandler, app_log
from google.cloud import storage # used for connecting to GCS
from google.api_core.client_info import ClientInfo
from io import BytesIO, StringIO # used for sending GCS blobs in JSON objects
from jupyterlab_gcsfilebrowser.version import VERSION
TEMPLATE_COPY_FILE = '-Copy%s'
TEMPLATE_NEW_FILE = '%s'
NEW_FILE_NAME = 'Untitled'
NEW_DIRECTORY_NAME = 'Untitled Folder'
CHECKPOINT_FOLDER = '.ipynb_checkpoints'
CHECKPOINT_ID = '-checkpoint'
class Error(Exception):
"""GCS Filebrowser exception."""
pass
class FileNotFound(Error):
"""File not found exception."""
pass
def list_dir(bucket_name, path, blobs_dir_list):
items = []
directories = set()
directory_last_modified = dict()
path = '%s%s' % (path, '' if re.match(".*/$", path) else '/')
for blob in blobs_dir_list:
relative_blob_name = re.sub(r'^' + path, '', blob.name)
relative_path_parts = [dir for dir in relative_blob_name.split('/') if dir]
if re.match(".*/$", blob.name):
# Add the top directory to the set of directories if one exist
if relative_path_parts:
directories.add(relative_path_parts[0])
directory_last_modified[relative_path_parts[0]] = blob_last_modified(
blob)
else:
if relative_path_parts:
dir_name = relative_path_parts[0]
def blobInDir(parts):
return len(parts) > 1
if blobInDir(relative_path_parts):
directories.add(relative_path_parts[0])
directory_last_modified[relative_path_parts[0]] = blob_last_modified(
blob)
else:
items.append({
'type': 'file',
'path': ('%s/%s' % (bucket_name, blob.name)),
'name': dir_name,
'last_modified': blob_last_modified(blob),
})
if path != '/':
path = '/' + path
items = items + [{
'type': 'directory',
'path': ('%s%s%s/' % (bucket_name, path, d)),
'name': d + '/',
'last_modified': directory_last_modified[d],
} for d in directories]
return items
# TODO(cbwilkes): Add tests for parse_path.
def parse_path(path):
# Remove any preceeding '/', and split off the bucket name
bucket_paths = re.sub(r'^/', '', path).split('/', 1)
# The first token should represent the bucket name
bucket_name = bucket_paths[0]
# The rest of the string should represent the blob path, if requested
blob_path = bucket_paths[1] if len(bucket_paths) > 1 else ''
return bucket_name, blob_path
def prefixed_blobs(bucket_name, prefix, storage_client):
return list(storage_client.list_blobs(bucket_name, prefix=prefix))
def matching_blobs(path, storage_client):
"""Find a blob with a name that matches the exact path. That is not a
directory
Returns:
An array of matching Blobs.
"""
# TODO(cbwilkes): Add tests for matching_blobs.
# TODO(cbwilkes): Return matching blobs for directories.
bucket_name, blob_path = parse_path(path)
# List blobs in the bucket with the blob_path prefix
blobs = prefixed_blobs(bucket_name, blob_path, storage_client)
# Find a blob that is not a directory name and fully matches the blob_path
# If there are any matches, we are retriving a single blob
blobs_matching = [
b for b in blobs
# TODO(cbwilkes): protect against empty names
if not re.match(".*/$", b.name) and b.name == blob_path
]
return blobs_matching
def matching_directory(path, storage_client):
"""Find a blob with a name that matches the exact path. Must be a
directory
Returns:
An array of matching Blobs.
"""
# TODO(cbwilkes): Add tests for matching_blobs.
# TODO(cbwilkes): Return matching blobs for directories.
bucket_name, blob_path = parse_path(path)
# List blobs in the bucket with the blob_path prefix
blobs = prefixed_blobs(bucket_name, blob_path, storage_client)
# Find a blob that is not a directory name and fully matches the blob_path
# If there are any matches, we are retriving a single blob
blobs_matching = [
b for b in blobs
# TODO(cbwilkes): protect against empty names
if re.match(".*/$", b.name) and b.name == blob_path
]
return blobs_matching
def directory_exist(path, storage_client):
"""Find if directory prefix exist.
Returns:
Boolean if directory prefix exist.
"""
# TODO(cbwilkes): Add tests for matching_blobs.
# TODO(cbwilkes): Return matching blobs for directories.
bucket_name, blob_path = parse_path(path)
# List blobs in the bucket with the blob_path prefix
blobs = prefixed_blobs(bucket_name, blob_path, storage_client)
blob_pattern = re.compile("^%s.*" % blob_path) if re.match(
".*/$", path) else re.compile("^%s/.*" % blob_path)
blobs_matching = [b for b in blobs if blob_pattern.match(b.name)]
return len(blobs_matching)
def matching_directory_contents(path, storage_client):
"""Find blobs within a directory.
Returns:
An array of matching Blobs.
Raises:
ValueError if 'path' is not a directory
"""
if not path or path[-1] != '/':
raise ValueError('Error: the path does not appear to be'
' a directory ending with an trailing "/"')
# TODO(cbwilkes): Add tests for matching_blobs.
# TODO(cbwilkes): Return matching blobs for directories.
bucket_name, blob_path = parse_path(path)
# List blobs in the bucket with the blob_path prefix
blobs = prefixed_blobs(bucket_name, blob_path, storage_client)
return blobs
def matching_bucket(path, storage_client):
bucket_name, _ = parse_path(path)
# Raises google.cloud.exceptions.NotFound – If the bucket is not found.
return storage_client.get_bucket(bucket_name)
def getPathContents(path, storage_client):
path = path or '/'
addDir = '/' if re.match(".+/$", path) else ''
path = os.path.normpath(path) + addDir
if path == '/':
buckets = storage_client.list_buckets()
return {
'type':
'directory',
'content': [{
'type': 'directory',
'path': b.name + '/',
'name': b.name + '/',
'last_modified': bucket_time_created(b),
} for b in buckets]
}
else:
bucket_name, blob_path = parse_path(path)
blobs_prefixed = prefixed_blobs(bucket_name, blob_path, storage_client)
blobs_matching = matching_blobs(path, storage_client)
directories_matching = matching_directory(path, storage_client)
dir_exist = directory_exist(path, storage_client)
# Bucket root or directory within bucket
if not blob_path or dir_exist:
return {
'type': 'directory',
'content': list_dir(bucket_name, blob_path, blobs_prefixed),
}
elif len(blobs_matching) == 1: # Single blob
blob = blobs_matching[0]
file_bytes = BytesIO()
blob.download_to_file(file_bytes)
return {
'type': 'file',
'content': {
'path': ('%s/%s' % (bucket_name, blob.name)),
'type':
'file',
'mimetype':
blob.content_type,
'content':
base64.encodebytes(file_bytes.getvalue()).decode('ascii'),
'last_modified':
blob_last_modified(blob),
}
}
else:
raise FileNotFound('File "%s" not found' % path)
def delete(path, storage_client):
path = path or '/'
addDir = '/' if re.match(".+/$", path) else ''
path = os.path.normpath(path) + addDir
if path == '/':
return {}
else:
blobs_matching = matching_blobs(path, storage_client)
if len(blobs_matching) == 1: # Single blob
blob = blobs_matching[0]
blob.delete()
elif not blobs_matching:
# Fallback to deleting a directory if single blob is not found
blobs_matching = matching_directory_contents(os.path.join(path, ''),
storage_client)
for b in blobs_matching:
b.delete()
return {}
def upload(model, storage_client):
bucket_name, blob_path = parse_path(model['path'])
def uploadModel(storage_client, model, blob_path):
bucket = storage_client.get_bucket(bucket_name)
blob = bucket.blob(blob_path)
if model['format'] == 'base64':
bytes_file = BytesIO(base64.b64decode(model['content']))
blob.upload_from_file(bytes_file)
elif model['format'] == 'json':
blob.upload_from_string(json.dumps(model['content']))
else:
blob.upload_from_string(model['content'])
def appendChunk(storage_client,
model,
last,
temp,
composite,
deleteLast=False):
bucket = storage_client.get_bucket(bucket_name)
uploadModel(storage_client, model, temp)
blob = bucket.blob(composite)
blob_last = bucket.blob(last)
blob_temp = bucket.blob(temp)
blob.compose([blob_last, blob_temp])
blob_temp.delete()
if deleteLast:
blob_last.delete()
if 'chunk' not in model:
uploadModel(storage_client, model, blob_path)
else:
if model['chunk'] == 1:
blob_path_composite = '%s.temporary' % (blob_path)
uploadModel(storage_client, model, blob_path_composite)
elif model['chunk'] == -1:
appendChunk(storage_client, model, '%s.temporary' % (blob_path),
'%s.temporary-%s.tmp' % (blob_path, model['chunk']),
blob_path, True)
else:
appendChunk(storage_client, model, '%s.temporary' % (blob_path),
'%s.temporary-%s.tmp' % (blob_path, model['chunk']),
'%s.temporary' % (blob_path))
bucket = storage_client.get_bucket(bucket_name)
return bucket.blob(blob_path)
def generate_next_unique_name(bucket_name,
blob_name,
storage_client,
template,
is_dir=False):
def generate_name(blob_name, name_addendum):
root, ext = os.path.splitext(blob_name)
addendum = ''
if name_addendum:
addendum = template % name_addendum
return '%s%s%s' % (root, addendum, ext)
def generate_directory_name(blob_name, name_addendum):
normpath = os.path.normpath(blob_name)
addendum = ''
if name_addendum:
addendum = template % name_addendum
return '%s%s%s' % (normpath, addendum, '/')
name_addendum = ''
if is_dir:
proposed_blob_name = generate_directory_name(blob_name, name_addendum)
while matching_directory(
os.path.normpath('/%s/%s' % (bucket_name, proposed_blob_name)) + '/',
storage_client):
if not name_addendum:
name_addendum = 1
else:
name_addendum = name_addendum + 1
proposed_blob_name = generate_directory_name(blob_name, name_addendum)
return generate_directory_name(blob_name, name_addendum)
else:
proposed_blob_name = generate_name(blob_name, name_addendum)
while matching_blobs(
os.path.normpath('/%s/%s' % (bucket_name, proposed_blob_name)),
storage_client):
if not name_addendum:
name_addendum = 1
else:
name_addendum = name_addendum + 1
proposed_blob_name = generate_name(blob_name, name_addendum)
return generate_name(blob_name, name_addendum)
def copy(path, directory, storage_client):
def copyFileName(path, directory):
_, blob_name = parse_path(path)
destination_bucket, destination_blob_name_dir = parse_path(directory)
basename = os.path.basename(blob_name)
if not basename:
raise ValueError('"path" is not a valid blob name.')
if destination_blob_name_dir:
new_blob_name = '%s/%s' % (destination_blob_name_dir, basename)
else:
new_blob_name = '%s' % (basename)
return destination_bucket, new_blob_name
if directory in ('/', ''):
raise ValueError('Error: Cannot copy file to the root directory. '
'Only GCS buckets can be created here.')
blobs_matching = matching_blobs(path, storage_client)
if not blobs_matching:
raise ValueError('Error: Blob not found "%s"' % (path))
current_bucket = matching_bucket(path, storage_client)
destination_bucket_name, new_blob_name = copyFileName(path, directory)
new_blob_name = generate_next_unique_name(destination_bucket_name,
new_blob_name, storage_client,
TEMPLATE_COPY_FILE)
destination_bucket = storage_client.get_bucket(destination_bucket_name)
return current_bucket.copy_blob(blobs_matching[0], destination_bucket,
new_blob_name)
def move(old, new, storage_client):
def add_directory_slash(path):
return '%s/' % path if not path or path[-1] != '/' else path
_, blob_path_new = parse_path(new)
if not blob_path_new:
raise ValueError('Error: Cannot copy file to the root directory. '
'Only GCS buckets can be created here.')
blobs_matching = matching_blobs(old, storage_client)
destination_bucket = matching_bucket(new, storage_client)
new_blob = matching_blobs(new, storage_client)
if new_blob:
raise ValueError('Error: Cannot move object. A destination '
'object already exist with the same name.')
new_dir = matching_directory(
add_directory_slash(re.sub(r'^%s' % old, new, new)), storage_client)
if new_dir:
raise ValueError('Error: Cannot move object. The destination '
'directory already exist with the same name. (%s)' % new)
# Fallback to moving directory if single blob is not found
if not blobs_matching:
blobs_matching = matching_directory_contents(add_directory_slash(old),
storage_client)
_, blob_path_old = parse_path(old)
for b in blobs_matching:
new_blob_name = re.sub(r'^%s' % blob_path_old, blob_path_new, b.name)
destination_bucket.rename_blob(b, new_blob_name)
return matching_directory(add_directory_slash(new), storage_client)[0]
else: # Move single blob
return destination_bucket.rename_blob(blobs_matching[0], blob_path_new)
def create_storage_client():
return storage.Client(client_info=ClientInfo(
user_agent='jupyterlab_gcsfilebrowser/{}'.format(VERSION)))
def new_file(file_type, ext, path, storage_client):
model = dict()
content = ''
file_format = 'text'
if file_type and file_type in ('notebook', 'file'):
if file_type == 'notebook':
ext = 'ipynb'
file_format = 'json'
content = {
"cells": [],
"metadata": {},
"nbformat": 4,
"nbformat_minor": 4
}
new_blob_name = '%s/%s.%s' % (path, NEW_FILE_NAME, ext)
destination_bucket_name, blob_path = parse_path(new_blob_name)
new_blob_name = generate_next_unique_name(destination_bucket_name,
blob_path, storage_client,
TEMPLATE_NEW_FILE)
model['path'] = '%s/%s' % (destination_bucket_name, new_blob_name)
model['content'] = content
model['format'] = file_format
blob = upload(model, storage_client)
file_bytes = BytesIO()
blob.download_to_file(file_bytes)
return {
'type': 'file',
'content': {
'path': ('%s/%s' % (blob.bucket.name, blob.name)),
'name': os.path.basename(blob.name),
'type': 'file',
'mimetype': blob.content_type,
'content': base64.encodebytes(file_bytes.getvalue()).decode('ascii')
}
}
elif file_type and file_type == 'directory':
new_blob_name = '%s/%s/' % (path, NEW_DIRECTORY_NAME)
destination_bucket_name, blob_path = parse_path(new_blob_name)
new_blob_name = generate_next_unique_name(destination_bucket_name,
blob_path,
storage_client,
TEMPLATE_NEW_FILE,
is_dir=True)
model['path'] = '%s/%s' % (destination_bucket_name, new_blob_name)
model['content'] = ''
model['format'] = 'text'
blob = upload(model, storage_client)
return {
'type': 'directory',
'path': ('%s/%s' % (blob.bucket.name, blob.name)),
'name': blob.name,
'content': []
}
def checkpoint_prefix(path):
checkpoint_dir = os.path.join(os.path.dirname(path), CHECKPOINT_FOLDER)
return os.path.join(os.path.dirname(path), CHECKPOINT_FOLDER,
os.path.basename(path))
def checkpoint_filename(path, checkpoint_id):
checkpoint_dir = os.path.join(os.path.dirname(path), CHECKPOINT_FOLDER)
return '%s%s' % (checkpoint_prefix(path), checkpoint_id)
def create_checkpoint(path, storage_client):
checkpoint_pathname = checkpoint_filename(path, CHECKPOINT_ID)
content = getPathContents(path, storage_client)
model = {
'format': 'base64',
'content': content['content']['content'],
'path': checkpoint_pathname,
}
blob = upload(model, storage_client)
return {
'checkpoint': {
'id': CHECKPOINT_ID,
'last_modified': blob_last_modified(blob)
}
}
def list_checkpoints(path, storage_client):
bucket_name, blob_path = parse_path(checkpoint_prefix(path))
blobs = prefixed_blobs(bucket_name, blob_path, storage_client)
return {
'checkpoints': [{
'id': CHECKPOINT_ID,
'last_modified': blob_last_modified(blob)
} for blob in blobs]
}
def restore_checkpoint(path, checkpoint_id, storage_client):
checkpoint_pathname = checkpoint_filename(path, checkpoint_id)
content = getPathContents(checkpoint_pathname, storage_client)
model = {
'format': 'base64',
'content': content['content']['content'],
'path': path,
}
blob = upload(model, storage_client)
return {
'checkpoint': {
'id': CHECKPOINT_ID,
'last_modified': blob_last_modified(blob)
}
}
def delete_checkpoint(path, checkpoint_id, storage_client):
checkpoint_pathname = checkpoint_filename(path, checkpoint_id)
delete(checkpoint_pathname, storage_client)
return {}
def blob_last_modified(blob):
return blob.updated.strftime("%Y-%m-%d %H:%M:%S %z") if blob.updated else ''
def bucket_time_created(bucket):
return bucket.time_created.strftime(
"%Y-%m-%d %H:%M:%S %z") if bucket.time_created else ''
class GCSHandler(APIHandler):
"""Handles requests for GCS operations."""
storage_client = None
@gen.coroutine
def get(self, path=''):
try:
if not self.storage_client:
self.storage_client = create_storage_client()
self.finish(json.dumps(getPathContents(path, self.storage_client)))
except FileNotFound as e:
app_log.exception(str(e))
self.set_status(404, str(e))
self.finish({'error': {
'message': str(e),
'response': {
'status': 404,
},
}})
except Exception as e:
app_log.exception(str(e))
self.set_status(500, str(e))
self.finish({'error': {'message': str(e)}})
class UploadHandler(APIHandler):
storage_client = None
@gen.coroutine
def post(self, *args, **kwargs):
try:
if not self.storage_client:
self.storage_client = create_storage_client()
model = self.get_json_body()
upload(model, self.storage_client)
self.finish({})
except Exception as e:
app_log.exception(str(e))
self.set_status(500, str(e))
self.finish({'error': {'message': str(e)}})
class DeleteHandler(APIHandler):
storage_client = None
@gen.coroutine
def delete(self, path=''):
try:
if not self.storage_client:
self.storage_client = create_storage_client()
self.finish(json.dumps(delete(path, self.storage_client)))
except Exception as e:
app_log.exception(str(e))
self.set_status(500, str(e))
self.finish({'error': {'message': str(e)}})
class MoveHandler(APIHandler):
storage_client = None
@gen.coroutine
def post(self, path=''):
move_obj = self.get_json_body()
try:
if not self.storage_client:
self.storage_client = create_storage_client()
blob = move(move_obj['oldLocalPath'], move_obj['newLocalPath'],
self.storage_client)
file_bytes = BytesIO()
blob.download_to_file(file_bytes)
self.finish({
'type': 'file',
'content': {
'path': ('%s/%s' % (blob.bucket.name, blob.name)),
'name':
blob.name,
'mimetype':
blob.content_type,
'content':
base64.encodebytes(file_bytes.getvalue()).decode('ascii'),
'last_modified':
blob_last_modified(blob),
},
})
except Exception as e:
app_log.exception(str(e))
self.set_status(500, str(e))
self.finish({'error': {'message': str(e)}})
class CopyHandler(APIHandler):
storage_client = None
@gen.coroutine
def post(self, path=''):
copy_obj = self.get_json_body()
try:
if not self.storage_client:
self.storage_client = create_storage_client()
blob = copy(copy_obj['localPath'], copy_obj['toLocalDir'],
self.storage_client)
self.finish({
'type': 'file',
'path': ('%s/%s' % (blob.bucket.name, blob.name)),
'name': blob.name
})
except Exception as e:
app_log.exception(str(e))
self.set_status(500, str(e))
self.finish({'error': {'message': str(e)}})
class NewHandler(APIHandler):
storage_client = None
@gen.coroutine
def post(self, path=''):
new_obj = self.get_json_body()
try:
if not self.storage_client:
self.storage_client = create_storage_client()
self.finish(
new_file(new_obj['type'], new_obj.get('ext', None), new_obj['path'],
self.storage_client))
except Exception as e:
app_log.exception(str(e))
self.set_status(500, str(e))
self.finish({'error': {'message': str(e)}})
class CheckpointHandler(APIHandler):
storage_client = None
@gen.coroutine
def post(self, *args, **kwargs):
checkpoint_obj = self.get_json_body()
try:
if not self.storage_client:
self.storage_client = create_storage_client()
if checkpoint_obj['action'] == 'createCheckpoint':
checkpoint = create_checkpoint(checkpoint_obj['localPath'],
self.storage_client)
self.finish(checkpoint)
if checkpoint_obj['action'] == 'listCheckpoints':
checkpoints = list_checkpoints(checkpoint_obj['localPath'],
self.storage_client)
self.finish(checkpoints)
if checkpoint_obj['action'] == 'restoreCheckpoint':
checkpoint = restore_checkpoint(checkpoint_obj['localPath'],
checkpoint_obj['checkpointID'],
self.storage_client)
self.finish(checkpoint)
if checkpoint_obj['action'] == 'deleteCheckpoint':
checkpoint = delete_checkpoint(checkpoint_obj['localPath'],
checkpoint_obj['checkpointID'],
self.storage_client)
self.finish({})
except Exception as e:
app_log.exception(str(e))
self.set_status(500, str(e))
self.finish({'error': {'message': str(e)}})
class GCSNbConvert(APIHandler):
"""Handles requests for nbconvert for files in GCS."""
storage_client = None
@gen.coroutine
def get(self, *args, **kwargs):
try:
if not self.storage_client:
self.storage_client = create_storage_client()
nb = getPathContents(args[1], self.storage_client)
gcs_notebook = nbformat.reads(base64.b64decode(
nb['content']['content']).decode('utf-8'),
as_version=4)
exporter = notebook.nbconvert.handlers.get_exporter(args[0])
(output, resources) = exporter.from_notebook_node(gcs_notebook)
# Force download if requested
if self.get_argument('download', 'false').lower() == 'true':
filename = os.path.splitext(args[1])[0] + resources['output_extension']
self.set_header('Content-Disposition',
'attachment; filename="%s"' % filename)
if exporter.output_mimetype:
self.set_header('Content-Type',
'%s; charset=utf-8' % exporter.output_mimetype)
self.finish(output)
except Exception as e:
app_log.exception(str(e))
self.set_status(500, str(e))
self.finish({'error': {'message': str(e)}})
| 29.545241 | 80 | 0.632462 |
4a25404ad6d90e52b39c4d69a76254f499eb983f | 16,866 | py | Python | dschema/dschema.py | Teriks/dschema | 0e76088b2d969695ac7a7e5add7c838c429e4462 | [
"BSD-3-Clause"
] | null | null | null | dschema/dschema.py | Teriks/dschema | 0e76088b2d969695ac7a7e5add7c838c429e4462 | [
"BSD-3-Clause"
] | null | null | null | dschema/dschema.py | Teriks/dschema | 0e76088b2d969695ac7a7e5add7c838c429e4462 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2018, Teriks
# All rights reserved.
#
# dschema is distributed under the following BSD 3-Clause License
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from copy import deepcopy
__all__ = ['Namespace',
'ValidationError',
'MissingKeyError',
'ExtraKeysError',
'TypeValidationError',
'SchemaError',
'SchemaDefaultError',
'SchemaMissingTypeError',
'Default',
'Required',
'Dict',
'Type',
'prop',
'Validator']
class Namespace:
"""Simple dynamic object, optimized for dschema."""
def __init__(self, dictionary):
self.__dict__ = dictionary
def __str__(self):
return self.__repr__()
def __repr__(self):
return 'Namespace({})'.format(", ".join("{}={}".format(k, repr(v)) for k, v in self.__dict__.items()))
class ValidationError(Exception):
"""Thrown on any validation error, such as missing required properties."""
def __init__(self, message):
super().__init__(message)
class ExtraKeysError(ValidationError):
"""Thrown when extra keys exist in data being validated
which do not exist in the schema."""
def __init__(self, message, keys):
super().__init__(message)
self.keys = keys
"""A set containing all the extraneous keys."""
class MissingKeyError(ValidationError):
"""Thrown when a required namespace/property defined in the schema is missing
from data being validated."""
def __init__(self, message, namespace):
super().__init__(message)
self.namespace = namespace
"""Full path to the missing key."""
class TypeValidationError(ValidationError):
"""Thrown when a type validation function throws on an incoming value."""
def __init__(self, message, type_exception=None):
super().__init__(message)
self.type_exception = type_exception
"""The exception object raised from your validation function."""
class SchemaError(Exception):
"""Thrown for errors related to the schema definition itself."""
def __init__(self, message):
super().__init__(message)
class SchemaDefaultError(SchemaError):
"""Thrown when a default value for a property/namespace is invalid for its own schema."""
def __init__(self, validation_error):
super().__init__(str(validation_error))
self.validation_error = validation_error
"""
The :py:class:`~dschema.ValidationError` instance raised when
validating the default value against the schema failed.
"""
class SchemaMissingTypeError(SchemaError):
"""Thrown when an undefined type is referenced by name in a schema."""
def __init__(self, message, typename):
super().__init__(message)
self.typename = typename
"""The referenced type name."""
#: Used to define a default value for a schema property
Default = '@default'
#: Used to indicate a given schema node/property is required
Required = '@required'
#: Used to indicate a schema property should allow a raw dictionary to pass through as a value
Dict = '@dict'
#: Used to define a custom validation function for a schema node/property
Type = '@type'
__VALID_PROP_ARGS = {'default', 'required', 'type', 'dict'}
def prop(**kwargs):
r"""Helper that returns a schema property specification from keyword arguments.
:param \**kwargs:
See below
:Keyword Arguments:
* *default* (``object``) --
Default value when none is present in the validated dictionary.
* *required* (``object``) --
Is this key/value pair required to be present?
* *type* (``callable``) --
The validation callable to use on the incoming value.
* *dict* (``bool``) --
Should the value be interpreted as a raw nested dictionary?
"""
r = dict()
for k, v in kwargs.items():
if k not in __VALID_PROP_ARGS:
raise ValueError('Unexpected parameter "{}".'.format(k))
r['@' + k] = v
return r
class Validator:
"""Schema validator class"""
def __init__(self, schema):
"""
:param schema: Your schema definition
"""
#: schema dictionary (reassignable).
self.schema = schema
self.types = dict()
"""Types dictionary, should contain type validator callables by name (reassignable).
Must always implement ``get(key, default)`` and ``types['key']`` like a python :py:class:`dict`."""
def add_type(self, name, validation_function):
"""Register a type validation callable that can be referenced by name in the schema.
See: :py:attr:`~dschema.Validator.types`
:param name: Name which can be referenced in the schema using a string.
:param validation_function: Associated validation function.
"""
self.types[name] = validation_function
def remove_type(self, name):
"""Remove an existing type validation callable.
See: :py:func:`~dschema.Validator.add_type` and :py:attr:`~dschema.Validator.types`
:param name: The name previously registered with :py:func:`~dschema.Validator.add_type`
"""
del self.types[name]
def _fill_schema_defaults(self, schema_node, cur_namespace, return_namespace):
schema_node = deepcopy(schema_node)
if Default in schema_node:
default = schema_node[Default]
try:
if isinstance(default, dict) and Dict not in schema_node:
if len(schema_node) > 1:
# only needed if there is more schema to apply to the default value
# when there is only one entry it is the @default entry, and we a processing
# the default value for a property with no @type and not a namespace
# containing properties
try:
# This modifies the dictionary parameter
self._check_schema(dictionary=default, schema=schema_node, cur_namespace=cur_namespace,
return_namespace=return_namespace, allow_extra_keys=True)
except ValidationError as e:
# repackage as a schema error
raise SchemaDefaultError(e)
return Namespace(default) if return_namespace else default
else:
if Type in schema_node:
try:
default = self._try_validate_type(schema_node[Type], default, cur_namespace)
except ValidationError as e:
# repackage as a schema error
raise SchemaDefaultError(e)
return default
finally:
del schema_node[Default]
stack = [schema_node]
while stack:
node = stack.pop()
for k, v in node.items():
if not isinstance(v, dict):
continue
if Default in v:
default = v[Default]
try:
if isinstance(default, dict) and Dict not in v:
if len(v) > 1:
# only needed if there is more schema to apply to the default value
# when there is only one entry it is the @default entry, and we a processing
# the default value for a property with no @type and not a namespace
# containing properties
try:
# This modifies the dictionary parameter
self._check_schema(dictionary=default, schema=v, cur_namespace=cur_namespace,
return_namespace=return_namespace, allow_extra_keys=True)
except ValidationError as e:
# repackage as a schema error
raise SchemaDefaultError(e)
node[k] = Namespace(default) if return_namespace else default
else:
if Type in v:
try:
default = self._try_validate_type(v[Type], default, cur_namespace)
except ValidationError as e:
# repackage as a schema error
raise SchemaDefaultError(e)
node[k] = default
finally:
del v[Default]
else:
stack.append(v)
if return_namespace:
if Dict in v:
del v[Dict]
else:
node[k] = Namespace(v)
elif Dict in v:
del v[Dict]
if return_namespace and Dict not in schema_node and isinstance(schema_node, dict):
return Namespace(schema_node)
else:
return schema_node
def _check_schema(self, dictionary, schema, cur_namespace, return_namespace, allow_extra_keys):
stack = [(dictionary, schema, cur_namespace)]
while stack:
dict_node, schema_node, cur_namespace = stack.pop()
dict_node_is_dict = isinstance(dict_node, dict)
for schema_key, schema_value in schema_node.items():
schema_value_is_dict = isinstance(schema_value, dict)
if not dict_node_is_dict or not (schema_key in dict_node):
if schema_value_is_dict and schema_value.get(Required, False):
cur_namespace = "{}.{}".format(cur_namespace, schema_key) if cur_namespace else schema_key
raise MissingKeyError("'{}' is required but missing.".format(cur_namespace), cur_namespace)
if dict_node_is_dict and schema_value_is_dict:
dict_node[schema_key] = self._fill_schema_defaults(schema_value, cur_namespace,
return_namespace)
continue
node_value = dict_node.get(schema_key, None)
if not schema_value_is_dict:
default = None
is_dict = schema_value == Dict
the_type = None if is_dict else schema_value
else:
if Default in schema_value:
if Required in schema_value:
raise SchemaError("Schema node '{}' cannot be required and also have a default value."
.format(schema_key))
else:
default = self._fill_schema_defaults(schema_value, cur_namespace, return_namespace)
else:
default = None
is_dict = schema_value.get(Dict, False)
the_type = schema_value.get(Type, None)
next_namespace = "{}.{}".format(cur_namespace, schema_key) if cur_namespace else schema_key
if node_value is None and default:
dict_node[schema_key] = default
continue
else:
dict_node[schema_key] = self._try_validate_type(the_type, node_value, next_namespace)
if is_dict:
continue
next_schema_node = schema_node[schema_key]
if isinstance(next_schema_node, dict):
stack.append((dict_node[schema_key], next_schema_node, next_namespace))
if return_namespace and Dict not in schema_value and isinstance(node_value, dict):
dict_node[schema_key] = Namespace(node_value)
if not allow_extra_keys and isinstance(dict_node, dict):
key_diff = dict_node.keys() - schema_node.keys()
if len(key_diff):
raise ExtraKeysError(
"Namespace '{}' contains extraneous keys: {}."
.format(cur_namespace if cur_namespace else '.', key_diff), key_diff)
elif return_namespace and isinstance(dict_node, dict):
for key in dict_node.keys() - schema_node.keys():
dict_node[key] = self._namespace_dicts(dict_node[key])
return dictionary
def _try_validate_type(self, typename, value, next_namespace):
if isinstance(typename, str):
found_type = self.types.get(typename, None)
if found_type:
typename = found_type
else:
raise SchemaMissingTypeError("'{}' schema type callable '{}' not provided."
.format(next_namespace, typename), typename)
if typename:
try:
return typename(value)
except Exception as e:
raise TypeValidationError("'{}' failed type validation: {}"
.format(next_namespace, str(e)), e)
return value
def validate(self, dictionary, copy=True, namespace=False, extra_keys=False):
"""Validate a dictionary object using the defined schema and return it a copy.
Defaults defined in the schema will be filled out if they do not exist in the incoming data.
:param copy: Whether or not to deep copy the input dictionary before processing, if this is not
done, then the input dictionary will be modified to a useless state. validate can
run faster if do not plan to use the input dictionary again and you use **copy=False**.
:param dictionary: (``dict``) object to validate.
:param namespace: If ``True``, return a deserialized :py:class:`dschema.Namespace` object.
:param extra_keys: Allow extra key value pairs that do not exist in the schema to pass through without
exception. In effect, only run validation on keys which are found to exist in the
schema, and let others always pass through if they have no schema defined for them.
:return: Processed input dictionary
"""
result = self._check_schema(deepcopy(dictionary) if copy else dictionary,
self.schema, '', namespace, extra_keys)
if namespace:
return Namespace(result)
else:
return result
@staticmethod
def _namespace_dicts(dictionary):
if not isinstance(dictionary, dict):
return dictionary
stack = [dictionary]
while stack:
d = stack.pop()
if isinstance(d, dict):
for k, v in d.items():
if isinstance(v, dict):
stack.append(v)
d[k] = Namespace(v)
return Namespace(dictionary)
| 40.73913 | 207 | 0.580517 |
4a25408df901febd9e970cb060d4f6e114f76849 | 398 | py | Python | search.py | AlexHerry/python | 55b1932bfb3a7cdb237267d2a24dd6910db2fd20 | [
"MIT"
] | null | null | null | search.py | AlexHerry/python | 55b1932bfb3a7cdb237267d2a24dd6910db2fd20 | [
"MIT"
] | null | null | null | search.py | AlexHerry/python | 55b1932bfb3a7cdb237267d2a24dd6910db2fd20 | [
"MIT"
] | null | null | null | #-*-coding:utf-8 -*-
import easygui
easygui.msgbox("我们正在寻找10-12岁的女孩")
while(1):
sex = easygui.enterbox("你是男孩还是女孩。如果你是男生,请输入m;如果你是女生,请输入f")
if sex == 'm' or sex == 'f':
break;
else:
easygui.msgbox("请输入正确的性别")
if sex == 'm':
easygui.msgbox("你不可以加入我们的球队")
else:
age = easygui.integerbox("你现在几岁了")
if age >= 10 and age <= 12:
easygui.msgbox("你可以加入我们的球队") | 26.533333 | 62 | 0.60804 |
4a2541359f1314a7bc5ff80957edc49d3e945f3f | 779 | py | Python | post/urls.py | CosBett/Prime-InstaClone | 08b8ed0c4b6da712e51290dc477063c48b408f96 | [
"Unlicense"
] | null | null | null | post/urls.py | CosBett/Prime-InstaClone | 08b8ed0c4b6da712e51290dc477063c48b408f96 | [
"Unlicense"
] | null | null | null | post/urls.py | CosBett/Prime-InstaClone | 08b8ed0c4b6da712e51290dc477063c48b408f96 | [
"Unlicense"
] | null | null | null | from django.urls import path,include
from . import views
from django.contrib.auth import views as auth_views
urlpatterns = [
path('',views.landing_page, name = 'landing_page' ),
path('signup/',views.sign_up, name = 'signup' ),
path('home/',views.index, name = 'homepage' ),
path('', include('django.contrib.auth.urls')),
path('userprofile/<username>/', views.user_profile, name='userprofile'),
path('profile/<username>/',views.profile, name='profile'),
path('post/<id>', views.post_comments, name='comment'),
path('search/', views.search_profile, name='search'),
path('likes', views.like_post, name='likes'),
path('follow/<to_follow>', views.follow, name='follow'),
path('unfollow/<to_unfollow>', views.unfollow, name='unfollow'),
]
| 41 | 76 | 0.676508 |
4a25429ba772637bbc74a0c843cee10719f22f31 | 5,294 | py | Python | test/test_core/test_permission.py | CherryGS/nonebot_plugin_PCtrl | 96858d9deabd6c9db49db6b67157aeeead8b910a | [
"MIT"
] | 2 | 2022-01-07T11:15:00.000Z | 2022-03-18T17:22:15.000Z | test/test_core/test_permission.py | CherryGS/nonebot_plugin_PCtrl | 96858d9deabd6c9db49db6b67157aeeead8b910a | [
"MIT"
] | 1 | 2022-01-10T15:58:20.000Z | 2022-01-21T00:25:04.000Z | test/test_core/test_permission.py | CherryGS/nonebot_plugin_PCtrl | 96858d9deabd6c9db49db6b67157aeeead8b910a | [
"MIT"
] | null | null | null | from random import choices, randint, sample
import pytest
from controller.core.permission import (
PyUserPerm,
UserPerm,
del_perms,
get_perms,
insert_perm_after_query,
insert_perm_ignore,
insert_perm_update,
merge_perm,
set_perms,
)
from controller.methods import DISABLE_TYPE, ENABLE_TYPE, NO_TYPE
from pydantic import parse_obj_as
from sqlalchemy import insert, select
from sqlalchemy.ext.asyncio import AsyncSession
n = 200
string = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
def randstr(l):
return "".join(sample(string, l))
key_space = [i for i in range(n)]
key_space_same = [1 for _ in range(n)]
key_handle = [i for i in range(n)]
key_plugin_name = [randstr(30) for _ in range(n)]
old_ban = [NO_TYPE for _ in range(n)]
new_ban = [ENABLE_TYPE for _ in range(n)]
old_switch = [NO_TYPE for _ in range(n)]
new_switch = [ENABLE_TYPE for _ in range(n)]
old_configure = [NO_TYPE for _ in range(n)]
new_configure = [ENABLE_TYPE for _ in range(n)]
data1 = [
PyUserPerm(
space=i[0], handle=i[1], plugin_name=i[2], ban=i[3], switch=i[4], configure=i[5]
).dict()
for i in zip(
key_space, key_handle, key_plugin_name, old_ban, old_switch, old_configure
)
]
data2 = [
PyUserPerm(
space=i[0], handle=i[1], plugin_name=i[2], ban=i[3], switch=i[4], configure=i[5]
).dict()
for i in zip(
key_space, key_handle, key_plugin_name, new_ban, old_switch, old_configure
)
]
data3 = [
PyUserPerm(
space=i[0], handle=i[1], plugin_name=i[2], ban=i[3], switch=i[4], configure=i[5]
).dict()
for i in zip(
key_space_same, key_handle, key_plugin_name, new_ban, old_switch, old_configure
)
]
class Test:
""""""
async def query_data(self, session, data):
for i in data:
res = (
await session.execute(
select(UserPerm.__table__)
.where(UserPerm.space == i["space"])
.where(UserPerm.plugin_name == i["plugin_name"])
.where(UserPerm.handle == i["handle"])
.where(UserPerm.ban == i["ban"])
.where(UserPerm.switch == i["switch"])
.where(UserPerm.configure == i["configure"])
)
).all()
yield res
@pytest.mark.usefixtures("init_table", "insert_on_conflict")
async def test_insert_perm_update(self, session: AsyncSession, engine_type: int):
await insert_perm_update(engine_type, session, data1)
async for i in self.query_data(session, data1):
assert len(i) == 1
async for i in self.query_data(session, data2):
assert len(i) == 0
await insert_perm_update(
engine_type, session, data2, ign=("ban", "switch", "configure")
)
async for i in self.query_data(session, data1):
assert len(i) == 1
async for i in self.query_data(session, data2):
assert len(i) == 0
await insert_perm_update(engine_type, session, data2)
async for i in self.query_data(session, data2):
assert len(i) == 1
async for i in self.query_data(session, data1):
assert len(i) == 0
@pytest.mark.usefixtures("init_table")
async def test_set_perms(self, session: AsyncSession):
await session.execute(insert(UserPerm.__table__), data1)
await session.commit()
await set_perms(session, ((UserPerm.ban, ENABLE_TYPE),))
async for i in self.query_data(session, data2):
assert len(i) == 1
await set_perms(session, ((UserPerm.ban, NO_TYPE),))
async for i in self.query_data(session, data1):
assert len(i) == 1
@pytest.mark.usefixtures("init_table")
async def test_insert_perm_after_query(self, session: AsyncSession):
await insert_perm_after_query(session, data1[: -int(n / 2)])
async for i in self.query_data(session, data1[: -int(n / 2)]):
assert len(i) == 1
await insert_perm_after_query(session, data1)
async for i in self.query_data(session, data1):
assert len(i) == 1
@pytest.mark.usefixtures("init_table")
async def test_merge_perm(self, session: AsyncSession):
await merge_perm(session, data1)
async for i in self.query_data(session, data1):
assert len(i) == 1
await merge_perm(session, data2)
async for i in self.query_data(session, data2):
assert len(i) == 1
async for i in self.query_data(session, data1):
assert len(i) == 0
@pytest.mark.usefixtures("init_table")
async def test_get_perms(self, session: AsyncSession):
await session.execute(insert(UserPerm.__table__), data3)
await session.commit()
res = await get_perms(session, key_space_same[0])
assert res and len(res) == len(data3)
@pytest.mark.usefixtures("init_table")
async def test_del_perms(self, session: AsyncSession):
await session.execute(insert(UserPerm.__table__), data3)
await session.commit()
await del_perms(session, key_space_same[0])
res = (await session.execute(select(UserPerm.__table__))).all()
assert not res
| 33.935897 | 88 | 0.629581 |
4a25429eec76bc02932c820eb686e9be244bf8c5 | 9,450 | py | Python | messager/common/rpc/proxy.py | snowflying/messager | 83a23997047a918d82db1123b0005b34c50b1403 | [
"Apache-2.0"
] | null | null | null | messager/common/rpc/proxy.py | snowflying/messager | 83a23997047a918d82db1123b0005b34c50b1403 | [
"Apache-2.0"
] | null | null | null | messager/common/rpc/proxy.py | snowflying/messager | 83a23997047a918d82db1123b0005b34c50b1403 | [
"Apache-2.0"
] | null | null | null | # Copyright 2012-2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A helper class for proxy objects to remote APIs.
For more information about rpc API version numbers, see:
rpc/dispatcher.py
"""
import six
from messager.common import rpc
from messager.common.rpc import common as rpc_common
from messager.common.rpc import serializer as rpc_serializer
class RpcProxy(object):
"""A helper class for rpc clients.
This class is a wrapper around the RPC client API. It allows you to
specify the topic and API version in a single place. This is intended to
be used as a base class for a class that implements the client side of an
rpc API.
"""
# The default namespace, which can be overridden in a subclass.
RPC_API_NAMESPACE = None
def __init__(self, topic, default_version, version_cap=None,
serializer=None):
"""Initialize an RpcProxy.
:param topic: The topic to use for all messages.
:param default_version: The default API version to request in all
outgoing messages. This can be overridden on a per-message
basis.
:param version_cap: Optionally cap the maximum version used for sent
messages.
:param serializer: Optionaly (de-)serialize entities with a
provided helper.
"""
self.topic = topic
self.default_version = default_version
self.version_cap = version_cap
if serializer is None:
serializer = rpc_serializer.NoOpSerializer()
self.serializer = serializer
super(RpcProxy, self).__init__()
def _set_version(self, msg, vers):
"""Helper method to set the version in a message.
:param msg: The message having a version added to it.
:param vers: The version number to add to the message.
"""
v = vers if vers else self.default_version
if (self.version_cap and not
rpc_common.version_is_compatible(self.version_cap, v)):
raise rpc_common.RpcVersionCapError(version_cap=self.version_cap)
msg['version'] = v
def _get_topic(self, topic):
"""Return the topic to use for a message."""
return topic if topic else self.topic
def can_send_version(self, version):
"""Check to see if a version is compatible with the version cap."""
return (not self.version_cap or
rpc_common.version_is_compatible(self.version_cap, version))
@staticmethod
def make_namespaced_msg(method, namespace, **kwargs):
dictobj = {'method': method, 'namespace': namespace, 'args': kwargs}
return dictobj
def make_msg(self, method, **kwargs):
return self.make_namespaced_msg(method, self.RPC_API_NAMESPACE,
**kwargs)
def _serialize_msg_args(self, context, kwargs):
"""Helper method called to serialize message arguments.
This calls our serializer on each argument, returning a new
set of args that have been serialized.
:param context: The request context
:param kwargs: The arguments to serialize
:returns: A new set of serialized arguments
"""
new_kwargs = dict()
for argname, arg in six.iteritems(kwargs):
new_kwargs[argname] = self.serializer.serialize_entity(context,
arg)
return new_kwargs
def call(self, context, msg, topic=None, version=None, timeout=None):
"""rpc.call() a remote method.
:param context: The request context
:param msg: The message to send, including the method and args.
:param topic: Override the topic for this message.
:param version: (Optional) Override the requested API version in this
message.
:param timeout: (Optional) A timeout to use when waiting for the
response. If no timeout is specified, a default timeout will be
used that is usually sufficient.
:returns: The return value from the remote method.
"""
self._set_version(msg, version)
msg['args'] = self._serialize_msg_args(context, msg['args'])
real_topic = self._get_topic(topic)
try:
result = rpc.call(context, real_topic, msg, timeout)
return self.serializer.deserialize_entity(context, result)
except rpc.common.Timeout as exc:
raise rpc.common.Timeout(
exc.info, real_topic, msg.get('method'))
def multicall(self, context, msg, topic=None, version=None, timeout=None):
"""rpc.multicall() a remote method.
:param context: The request context
:param msg: The message to send, including the method and args.
:param topic: Override the topic for this message.
:param version: (Optional) Override the requested API version in this
message.
:param timeout: (Optional) A timeout to use when waiting for the
response. If no timeout is specified, a default timeout will be
used that is usually sufficient.
:returns: An iterator that lets you process each of the returned values
from the remote method as they arrive.
"""
self._set_version(msg, version)
msg['args'] = self._serialize_msg_args(context, msg['args'])
real_topic = self._get_topic(topic)
try:
result = rpc.multicall(context, real_topic, msg, timeout)
return self.serializer.deserialize_entity(context, result)
except rpc.common.Timeout as exc:
raise rpc.common.Timeout(
exc.info, real_topic, msg.get('method'))
def cast(self, context, msg, topic=None, version=None):
"""rpc.cast() a remote method.
:param context: The request context
:param msg: The message to send, including the method and args.
:param topic: Override the topic for this message.
:param version: (Optional) Override the requested API version in this
message.
:returns: None. rpc.cast() does not wait on any return value from the
remote method.
"""
self._set_version(msg, version)
msg['args'] = self._serialize_msg_args(context, msg['args'])
rpc.cast(context, self._get_topic(topic), msg)
def fanout_cast(self, context, msg, topic=None, version=None):
"""rpc.fanout_cast() a remote method.
:param context: The request context
:param msg: The message to send, including the method and args.
:param topic: Override the topic for this message.
:param version: (Optional) Override the requested API version in this
message.
:returns: None. rpc.fanout_cast() does not wait on any return value
from the remote method.
"""
self._set_version(msg, version)
msg['args'] = self._serialize_msg_args(context, msg['args'])
rpc.fanout_cast(context, self._get_topic(topic), msg)
def cast_to_server(self, context, server_params, msg, topic=None,
version=None):
"""rpc.cast_to_server() a remote method.
:param context: The request context
:param server_params: Server parameters. See rpc.cast_to_server() for
details.
:param msg: The message to send, including the method and args.
:param topic: Override the topic for this message.
:param version: (Optional) Override the requested API version in this
message.
:returns: None. rpc.cast_to_server() does not wait on any
return values.
"""
self._set_version(msg, version)
msg['args'] = self._serialize_msg_args(context, msg['args'])
rpc.cast_to_server(context, server_params, self._get_topic(topic), msg)
def fanout_cast_to_server(self, context, server_params, msg, topic=None,
version=None):
"""rpc.fanout_cast_to_server() a remote method.
:param context: The request context
:param server_params: Server parameters. See rpc.cast_to_server() for
details.
:param msg: The message to send, including the method and args.
:param topic: Override the topic for this message.
:param version: (Optional) Override the requested API version in this
message.
:returns: None. rpc.fanout_cast_to_server() does not wait on any
return values.
"""
self._set_version(msg, version)
msg['args'] = self._serialize_msg_args(context, msg['args'])
rpc.fanout_cast_to_server(context, server_params,
self._get_topic(topic), msg)
| 41.447368 | 79 | 0.637143 |
4a2543fa09bbd343fdb18bb6f06c029727f3b5ee | 9,425 | py | Python | server/organisation/fixtures/layouts.py | coll-gate/collgate | 8c2ff1c59adda2bf318040f588c05263317a2812 | [
"MIT"
] | 2 | 2017-07-04T16:19:09.000Z | 2019-08-16T04:54:47.000Z | server/organisation/fixtures/layouts.py | coll-gate/collgate | 8c2ff1c59adda2bf318040f588c05263317a2812 | [
"MIT"
] | null | null | null | server/organisation/fixtures/layouts.py | coll-gate/collgate | 8c2ff1c59adda2bf318040f588c05263317a2812 | [
"MIT"
] | 1 | 2018-04-13T08:28:09.000Z | 2018-04-13T08:28:09.000Z | # -*- coding: utf-8; -*-
#
# @file accession_layouts.py
# @brief Setup the value for the organisation layouts and types of models of descriptors.
# @author Frédéric SCHERMA (INRA UMR1095)
# @date 2017-01-03
# @copyright Copyright (c) 2017 INRA/CIRAD
# @license MIT (see LICENSE file)
# @details
LAYOUTS = {
'organisation': {
'id': None,
'name': 'organisation',
'target': 'organisation.organisation',
'label': {'en': 'Organisation', 'fr': 'Organisation'},
'description': "Unique layout for an organisation entity.",
'layout_content': {
'panels': [
{
'label': {'en': 'Common', 'fr': 'Commun'},
'descriptors': [
{
'name': 'acronym_1',
'mandatory': False,
'set_once': False
},
{
'name': 'code_16',
'mandatory': False,
'set_once': False
},
{
'name': 'address',
'mandatory': False,
'set_once': False
},
{
'name': 'zipcode',
'mandatory': False,
'set_once': False
},
{
'name': 'city',
'mandatory': False,
'set_once': False
}
]
}
]
}
},
'establishment': {
'id': None,
'name': 'establishment',
'target': 'organisation.establishment',
'label': {'en': 'Establishment', 'fr': 'Implantation'},
'description': "Unique layout for an establishment of an organisation entity.",
'layout_content': {
'panels': [
{
'label': {'en': 'Common', 'fr': 'Commun'},
'descriptors': [
{
'name': 'address',
'mandatory': False,
'set_once': False
},
{
'name': 'zipcode',
'mandatory': False,
'set_once': False
},
{
'name': 'city',
'mandatory': False,
'set_once': False
},
{
'name': 'code_16',
'mandatory': False,
'set_once': False
}
]
}
]
}
},
'contact': {
'id': None,
'name': 'contact',
'target': 'organisation.person',
'label': {'en': 'Person/contact', 'fr': 'Personne/contact'},
'description': "Unique layout for a person/contact of an establishment entity.",
'layout_content': {
'panels': [
{
'label': {'en': 'Common', 'fr': 'Commun'},
'descriptors': [
{
'name': 'person_title',
'mandatory': False,
'set_once': False
},
{
'name': 'first_name',
'mandatory': False,
'set_once': False
},
{
'name': 'last_name',
'mandatory': False,
'set_once': False
},
{
'name': 'address',
'mandatory': False,
'set_once': False
},
{
'name': 'zipcode',
'mandatory': False,
'set_once': False
},
{
'name': 'city',
'mandatory': False,
'set_once': False
},
{
'name': 'email_address',
'mandatory': False,
'set_once': False
},
{
'name': 'phone_number',
'mandatory': False,
'set_once': False
},
{
'name': 'fax_number',
'mandatory': False,
'set_once': False
},
{
'name': 'mobile_number',
'mandatory': False,
'set_once': False
},
{
'name': 'comment',
'mandatory': False,
'set_once': False
}
]
}
]
}
},
'donor_selector_breeder': {
'id': None,
'name': 'donor_selector_breeder',
'target': 'organisation.person',
'label': {'en': 'Donor/Selector/Breeder', 'fr': 'Donneur/Selectionneur/Obtenteur'},
'description': "Unique layout for a donor of an establishment entity.",
'layout_content': {
'panels': [
{
'label': {'en': 'Common', 'fr': 'Commun'},
'descriptors': [
{
'name': 'full_name',
'mandatory': False,
'set_once': False
},
{
'name': 'address',
'mandatory': False,
'set_once': False
},
{
'name': 'zipcode',
'mandatory': False,
'set_once': False
},
{
'name': 'city',
'mandatory': False,
'set_once': False
},
{
'name': 'email_address',
'mandatory': False,
'set_once': False
},
{
'name': 'phone_number',
'mandatory': False,
'set_once': False
},
{
'name': 'fax_number',
'mandatory': False,
'set_once': False
},
{
'name': 'mobile_number',
'mandatory': False,
'set_once': False
},
{
'name': 'comment',
'mandatory': False,
'set_once': False
}
]
}
]
}
},
'conservatory': {
'id': None,
'name': 'conservatory',
'target': 'organisation.conservatory',
'label': {'en': 'Conservatory', 'fr': 'Conservatoire'},
'description': "Unique layout for a conservatory of an establishment entity.",
'layout_content': {
'panels': [
{
'label': {'en': 'Common', 'fr': 'Commun'},
'descriptors': [
{
'name': 'address',
'mandatory': False,
'set_once': False
},
{
'name': 'zipcode',
'mandatory': False,
'set_once': False
},
{
'name': 'city',
'mandatory': False,
'set_once': False
},
{
'name': 'code_16',
'mandatory': False,
'set_once': False
}
]
}
]
}
}
}
def fixture(fixture_manager, factory_manager):
fixture_manager.create_or_update_layouts(LAYOUTS)
| 35.836502 | 91 | 0.282653 |
4a25446b884f5541bec7410f7db5b208d9ace452 | 3,361 | py | Python | models/augment_cnn.py | FduJyy/bayes_darts | fa46d73e06b710264693f108433ee1f555e97456 | [
"MIT"
] | null | null | null | models/augment_cnn.py | FduJyy/bayes_darts | fa46d73e06b710264693f108433ee1f555e97456 | [
"MIT"
] | null | null | null | models/augment_cnn.py | FduJyy/bayes_darts | fa46d73e06b710264693f108433ee1f555e97456 | [
"MIT"
] | null | null | null | """ CNN for network augmentation """
import torch
import torch.nn as nn
from models.augment_cells import AugmentCell
from models import ops
class AuxiliaryHead(nn.Module):
""" Auxiliary head in 2/3 place of network to let the gradient flow well """
def __init__(self, input_size, C, n_classes):
""" assuming input size 7x7 or 8x8 """
# assert input_size in [7, 8]
super().__init__()
self.net = nn.Sequential(
nn.ReLU(inplace=True),
nn.AvgPool2d(5, stride=input_size-5, padding=0, count_include_pad=False), # 2x2 out
nn.Conv2d(C, 128, kernel_size=1, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(128, 768, kernel_size=2, bias=False), # 1x1 out
nn.BatchNorm2d(768),
nn.ReLU(inplace=True)
)
self.linear = nn.Linear(768, n_classes)
def forward(self, x):
out = self.net(x)
out = out.view(out.size(0), -1) # flatten
logits = self.linear(out)
return logits
class AugmentCNN(nn.Module):
""" Augmented CNN model """
def __init__(self, input_size, C_in, C, n_classes, n_layers, auxiliary, genotype,
stem_multiplier=3):
"""
Args:
input_size: size of height and width (assuming height = width)
C_in: # of input channels
C: # of starting model channels
"""
super().__init__()
self.C_in = C_in
self.C = C
self.n_classes = n_classes
self.n_layers = n_layers
self.genotype = genotype
# aux head position
self.aux_pos = 2*n_layers//3 if auxiliary else -1
C_cur = stem_multiplier * C
self.stem = nn.Sequential(
nn.Conv2d(C_in, C_cur, 3, 1, 1, bias=False),
nn.BatchNorm2d(C_cur)
)
C_pp, C_p, C_cur = C_cur, C_cur, C
self.cells = nn.ModuleList()
reduction_p = False
for i in range(n_layers):
if i in [n_layers//3, 2*n_layers//3]:
C_cur *= 2
reduction = True
else:
reduction = False
cell = AugmentCell(genotype, C_pp, C_p, C_cur, reduction_p, reduction)
reduction_p = reduction
self.cells.append(cell)
C_cur_out = C_cur * len(cell.concat)
C_pp, C_p = C_p, C_cur_out
if i == self.aux_pos:
# [!] this auxiliary head is ignored in computing parameter size
# by the name 'aux_head'
self.aux_head = AuxiliaryHead(input_size//4, C_p, n_classes)
self.gap = nn.AdaptiveAvgPool2d(1)
self.linear = nn.Linear(C_p, n_classes)
def forward(self, x):
s0 = s1 = self.stem(x)
aux_logits = None
for i, cell in enumerate(self.cells):
s0, s1 = s1, cell(s0, s1)
if i == self.aux_pos and self.training:
aux_logits = self.aux_head(s1)
out = self.gap(s1)
out = out.view(out.size(0), -1) # flatten
logits = self.linear(out)
return logits, aux_logits
def drop_path_prob(self, p):
""" Set drop path probability """
for module in self.modules():
if isinstance(module, ops.DropPath_):
module.p = p
| 32.95098 | 95 | 0.55787 |
4a2544e1329cdb4d132f333a4a0724ad0f455bd0 | 13,855 | py | Python | salt/modules/upstart.py | bogdanr/salt | 4f198525873a1b7da3fbb9994dbb40d381494922 | [
"Apache-2.0"
] | null | null | null | salt/modules/upstart.py | bogdanr/salt | 4f198525873a1b7da3fbb9994dbb40d381494922 | [
"Apache-2.0"
] | null | null | null | salt/modules/upstart.py | bogdanr/salt | 4f198525873a1b7da3fbb9994dbb40d381494922 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
Module for the management of upstart systems. The Upstart system only supports
service starting, stopping and restarting.
Currently (as of Ubuntu 12.04) there is no tool available to disable
Upstart services (like update-rc.d). This[1] is the recommended way to
disable an Upstart service. So we assume that all Upstart services
that have not been disabled in this manner are enabled.
But this is broken because we do not check to see that the dependent
services are enabled. Otherwise we would have to do something like
parse the output of "initctl show-config" to determine if all service
dependencies are enabled to start on boot. For example, see the "start
on" condition for the lightdm service below[2]. And this would be too
hard. So we wait until the upstart developers have solved this
problem. :) This is to say that an Upstart service that is enabled may
not really be enabled.
Also, when an Upstart service is enabled, should the dependent
services be enabled too? Probably not. But there should be a notice
about this, at least.
[1] http://upstart.ubuntu.com/cookbook/#disabling-a-job-from-automatically-starting
[2] example upstart configuration file::
lightdm
emits login-session-start
emits desktop-session-start
emits desktop-shutdown
start on ((((filesystem and runlevel [!06]) and started dbus) and (drm-device-added card0 PRIMARY_DEVICE_FOR_DISPLAY=1 or stopped udev-fallback-graphics)) or runlevel PREVLEVEL=S)
stop on runlevel [016]
.. warning::
This module should not be used on Red Hat systems. For these,
the :mod:`rh_service <salt.modules.rh_service>` module should be
used, as it supports the hybrid upstart/sysvinit system used in
RHEL/CentOS 6.
'''
from __future__ import absolute_import
# Import python libs
import glob
import os
# Import salt libs
import salt.utils
import salt.modules.cmdmod
import salt.utils.systemd
__func_alias__ = {
'reload_': 'reload'
}
# Define the module's virtual name
__virtualname__ = 'service'
def __virtual__():
'''
Only work on Ubuntu
'''
# Disable on these platforms, specific service modules exist:
if salt.utils.systemd.booted(__context__):
return False
elif __grains__['os'] in ('Ubuntu', 'Linaro', 'elementary OS', 'Mint'):
return __virtualname__
elif __grains__['os'] in ('Debian', 'Raspbian'):
debian_initctl = '/sbin/initctl'
if os.path.isfile(debian_initctl):
initctl_version = salt.modules.cmdmod._run_quiet(debian_initctl + ' version')
if 'upstart' in initctl_version:
return __virtualname__
return False
def _find_utmp():
'''
Figure out which utmp file to use when determining runlevel.
Sometimes /var/run/utmp doesn't exist, /run/utmp is the new hotness.
'''
result = {}
# These are the likely locations for the file on Ubuntu
for utmp in '/var/run/utmp', '/run/utmp':
try:
result[os.stat(utmp).st_mtime] = utmp
except Exception:
pass
return result[sorted(result).pop()]
def _default_runlevel():
'''
Try to figure out the default runlevel. It is kept in
/etc/init/rc-sysinit.conf, but can be overridden with entries
in /etc/inittab, or via the kernel command-line at boot
'''
# Try to get the "main" default. If this fails, throw up our
# hands and just guess "2", because things are horribly broken
try:
with salt.utils.fopen('/etc/init/rc-sysinit.conf') as fp_:
for line in fp_:
if line.startswith('env DEFAULT_RUNLEVEL'):
runlevel = line.split('=')[-1].strip()
except Exception:
return '2'
# Look for an optional "legacy" override in /etc/inittab
try:
with salt.utils.fopen('/etc/inittab') as fp_:
for line in fp_:
if not line.startswith('#') and 'initdefault' in line:
runlevel = line.split(':')[1]
except Exception:
pass
# The default runlevel can also be set via the kernel command-line.
# Kinky.
try:
valid_strings = set(
('0', '1', '2', '3', '4', '5', '6', 's', 'S', '-s', 'single'))
with salt.utils.fopen('/proc/cmdline') as fp_:
for line in fp_:
for arg in line.strip().split():
if arg in valid_strings:
runlevel = arg
break
except Exception:
pass
return runlevel
def _runlevel():
'''
Return the current runlevel
'''
if 'upstart._runlevel' in __context__:
return __context__['upstart._runlevel']
out = __salt__['cmd.run'](['runlevel', '{0}'.format(_find_utmp())], python_shell=False)
try:
ret = out.split()[1]
except IndexError:
# The runlevel is unknown, return the default
ret = _default_runlevel()
__context__['upstart._runlevel'] = ret
return ret
def _is_symlink(name):
return os.path.abspath(name) != os.path.realpath(name)
def _service_is_upstart(name):
'''
From "Writing Jobs" at
http://upstart.ubuntu.com/getting-started.html:
Jobs are defined in files placed in /etc/init, the name of the job
is the filename under this directory without the .conf extension.
'''
return os.access('/etc/init/{0}.conf'.format(name), os.R_OK)
def _upstart_is_disabled(name):
'''
An Upstart service is assumed disabled if a manual stanza is
placed in /etc/init/[name].override.
NOTE: An Upstart service can also be disabled by placing "manual"
in /etc/init/[name].conf.
'''
return os.access('/etc/init/{0}.override'.format(name), os.R_OK)
def _upstart_is_enabled(name):
'''
Assume that if an Upstart service is not disabled then it must be
enabled.
'''
return not _upstart_is_disabled(name)
def _service_is_sysv(name):
'''
A System-V style service will have a control script in
/etc/init.d. We make sure to skip over symbolic links that point
to Upstart's /lib/init/upstart-job, and anything that isn't an
executable, like README or skeleton.
'''
script = '/etc/init.d/{0}'.format(name)
return not _service_is_upstart(name) and os.access(script, os.X_OK)
def _sysv_is_disabled(name):
'''
A System-V style service is assumed disabled if there is no
start-up link (starts with "S") to its script in /etc/init.d in
the current runlevel.
'''
return not bool(glob.glob('/etc/rc{0}.d/S*{1}'.format(_runlevel(), name)))
def _sysv_is_enabled(name):
'''
Assume that if a System-V style service is not disabled then it
must be enabled.
'''
return not _sysv_is_disabled(name)
def _iter_service_names():
'''
Detect all of the service names available to upstart via init configuration
files and via classic sysv init scripts
'''
found = set()
for line in glob.glob('/etc/init.d/*'):
name = os.path.basename(line)
found.add(name)
yield name
for line in glob.glob('/etc/init/*.conf'):
name = os.path.basename(line)[:-5]
if name in found:
continue
yield name
def get_enabled():
'''
Return the enabled services
CLI Example:
.. code-block:: bash
salt '*' service.get_enabled
'''
ret = set()
for name in _iter_service_names():
if _service_is_upstart(name):
if _upstart_is_enabled(name):
ret.add(name)
else:
if _service_is_sysv(name):
if _sysv_is_enabled(name):
ret.add(name)
return sorted(ret)
def get_disabled():
'''
Return the disabled services
CLI Example:
.. code-block:: bash
salt '*' service.get_disabled
'''
ret = set()
for name in _iter_service_names():
if _service_is_upstart(name):
if _upstart_is_disabled(name):
ret.add(name)
else:
if _service_is_sysv(name):
if _sysv_is_disabled(name):
ret.add(name)
return sorted(ret)
def available(name):
'''
Returns ``True`` if the specified service is available, otherwise returns
``False``.
CLI Example:
.. code-block:: bash
salt '*' service.available sshd
'''
return name in get_all()
def missing(name):
'''
The inverse of service.available.
Returns ``True`` if the specified service is not available, otherwise returns
``False``.
CLI Example:
.. code-block:: bash
salt '*' service.missing sshd
'''
return name not in get_all()
def get_all():
'''
Return all installed services
CLI Example:
.. code-block:: bash
salt '*' service.get_all
'''
return sorted(get_enabled() + get_disabled())
def start(name):
'''
Start the specified service
CLI Example:
.. code-block:: bash
salt '*' service.start <service name>
'''
cmd = ['service', name, 'start']
return not __salt__['cmd.retcode'](cmd, python_shell=False)
def stop(name):
'''
Stop the specified service
CLI Example:
.. code-block:: bash
salt '*' service.stop <service name>
'''
cmd = ['service', name, 'stop']
return not __salt__['cmd.retcode'](cmd, python_shell=False)
def restart(name):
'''
Restart the named service
CLI Example:
.. code-block:: bash
salt '*' service.restart <service name>
'''
cmd = ['service', name, 'restart']
return not __salt__['cmd.retcode'](cmd, python_shell=False)
def full_restart(name):
'''
Do a full restart (stop/start) of the named service
CLI Example:
.. code-block:: bash
salt '*' service.full_restart <service name>
'''
cmd = ['service', name, '--full-restart']
return not __salt__['cmd.retcode'](cmd, python_shell=False)
def reload_(name):
'''
Reload the named service
CLI Example:
.. code-block:: bash
salt '*' service.reload <service name>
'''
cmd = ['service', name, 'reload']
return not __salt__['cmd.retcode'](cmd, python_shell=False)
def force_reload(name):
'''
Force-reload the named service
CLI Example:
.. code-block:: bash
salt '*' service.force_reload <service name>
'''
cmd = ['service', name, 'force-reload']
return not __salt__['cmd.retcode'](cmd, python_shell=False)
def status(name, sig=None):
'''
Return the status for a service, returns a bool whether the service is
running.
CLI Example:
.. code-block:: bash
salt '*' service.status <service name>
'''
if sig:
return bool(__salt__['status.pid'](sig))
cmd = ['service', name, 'status']
if _service_is_upstart(name):
# decide result base on cmd output, thus ignore retcode,
# which makes cmd output not at error lvl even when cmd fail.
return 'start/running' in __salt__['cmd.run'](cmd, python_shell=False,
ignore_retcode=True)
# decide result base on retcode, thus ignore output (set quite)
# because there is no way to avoid logging at error lvl when
# service is not running - retcode != 0 (which is totally relevant).
return not bool(__salt__['cmd.retcode'](cmd, python_shell=False,
quite=True))
def _get_service_exec():
'''
Debian uses update-rc.d to manage System-V style services.
http://www.debian.org/doc/debian-policy/ch-opersys.html#s9.3.3
'''
executable = 'update-rc.d'
salt.utils.check_or_die(executable)
return executable
def _upstart_disable(name):
'''
Disable an Upstart service.
'''
override = '/etc/init/{0}.override'.format(name)
with salt.utils.fopen(override, 'w') as ofile:
ofile.write('manual')
return _upstart_is_disabled(name)
def _upstart_enable(name):
'''
Enable an Upstart service.
'''
override = '/etc/init/{0}.override'.format(name)
if os.access(override, os.R_OK):
os.unlink(override)
return _upstart_is_enabled(name)
def enable(name, **kwargs):
'''
Enable the named service to start at boot
CLI Example:
.. code-block:: bash
salt '*' service.enable <service name>
'''
if _service_is_upstart(name):
return _upstart_enable(name)
executable = _get_service_exec()
cmd = '{0} -f {1} enable'.format(executable, name)
return not __salt__['cmd.retcode'](cmd, python_shell=False)
def disable(name, **kwargs):
'''
Disable the named service from starting on boot
CLI Example:
.. code-block:: bash
salt '*' service.disable <service name>
'''
if _service_is_upstart(name):
return _upstart_disable(name)
executable = _get_service_exec()
cmd = [executable, '-f', name, 'remove']
return not __salt__['cmd.retcode'](cmd, python_shell=False)
def enabled(name, **kwargs):
'''
Check to see if the named service is enabled to start on boot
CLI Example:
.. code-block:: bash
salt '*' service.enabled <service name>
'''
if _service_is_upstart(name):
return _upstart_is_enabled(name)
else:
if _service_is_sysv(name):
return _sysv_is_enabled(name)
return None
def disabled(name):
'''
Check to see if the named service is disabled to start on boot
CLI Example:
.. code-block:: bash
salt '*' service.disabled <service name>
'''
if _service_is_upstart(name):
return _upstart_is_disabled(name)
else:
if _service_is_sysv(name):
return _sysv_is_disabled(name)
return None
| 26.491396 | 183 | 0.631036 |
4a25450e14bcba9d9c9eb0bbfe38fa196caa31f1 | 5,453 | py | Python | saleor/graphql/webhook/enums.py | BearerPipelineTest/saleor | 1257145235cd2f59e932ac067ee18673fd5b6a54 | [
"CC-BY-4.0"
] | null | null | null | saleor/graphql/webhook/enums.py | BearerPipelineTest/saleor | 1257145235cd2f59e932ac067ee18673fd5b6a54 | [
"CC-BY-4.0"
] | 52 | 2021-12-27T04:33:50.000Z | 2022-03-28T04:54:44.000Z | saleor/graphql/webhook/enums.py | sakapong/saleor | a1a7a223bba28fc7941e8f270edbc1aa7c6b71f4 | [
"CC-BY-4.0"
] | null | null | null | import graphene
from ...webhook.event_types import WebhookEventAsyncType, WebhookEventSyncType
from ..core.utils import str_to_enum
checkout_updated_event_enum_description = (
"A checkout is updated. It also triggers all updates related to the checkout."
)
order_confirmed_event_enum_description = (
"An order is confirmed (status change unconfirmed -> unfulfilled) "
"by a staff user using the OrderConfirm mutation. "
"It also triggers when the user completes the checkout and the shop "
"setting `automatically_confirm_all_new_orders` is enabled."
)
order_fully_paid_event_enum_description = "Payment is made and an order is fully paid."
order_updated_event_enum_description = (
"An order is updated; triggered for all changes related to an order; "
"covers all other order webhooks, except for ORDER_CREATED."
)
WEBHOOK_EVENT_DESCRIPTION = {
WebhookEventAsyncType.CATEGORY_CREATED: "A new category created.",
WebhookEventAsyncType.CATEGORY_UPDATED: "A category is updated.",
WebhookEventAsyncType.CATEGORY_DELETED: "A category is deleted.",
WebhookEventAsyncType.CHANNEL_CREATED: "A new channel created.",
WebhookEventAsyncType.CHANNEL_UPDATED: "A channel is updated.",
WebhookEventAsyncType.CHANNEL_DELETED: "A channel is deleted.",
WebhookEventAsyncType.CHANNEL_STATUS_CHANGED: "A channel status is changed.",
WebhookEventAsyncType.CHECKOUT_CREATED: "A new checkout is created.",
WebhookEventAsyncType.CHECKOUT_UPDATED: checkout_updated_event_enum_description,
WebhookEventAsyncType.COLLECTION_CREATED: "A new collection is created.",
WebhookEventAsyncType.COLLECTION_UPDATED: "A collection is updated.",
WebhookEventAsyncType.COLLECTION_DELETED: "A collection is deleted.",
WebhookEventAsyncType.CUSTOMER_CREATED: "A new customer account is created.",
WebhookEventAsyncType.CUSTOMER_UPDATED: "A customer account is updated.",
WebhookEventAsyncType.GIFT_CARD_CREATED: "A new gift card created.",
WebhookEventAsyncType.GIFT_CARD_UPDATED: "A gift card is updated.",
WebhookEventAsyncType.GIFT_CARD_DELETED: "A gift card is deleted.",
WebhookEventAsyncType.GIFT_CARD_STATUS_CHANGED: "A gift card status is changed.",
WebhookEventAsyncType.NOTIFY_USER: "User notification triggered.",
WebhookEventAsyncType.ORDER_CREATED: "A new order is placed.",
WebhookEventAsyncType.ORDER_CONFIRMED: order_confirmed_event_enum_description,
WebhookEventAsyncType.ORDER_FULLY_PAID: order_fully_paid_event_enum_description,
WebhookEventAsyncType.ORDER_UPDATED: order_updated_event_enum_description,
WebhookEventAsyncType.ORDER_CANCELLED: "An order is cancelled.",
WebhookEventAsyncType.ORDER_FULFILLED: "An order is fulfilled.",
WebhookEventAsyncType.FULFILLMENT_CREATED: "A new fulfillment is created.",
WebhookEventAsyncType.FULFILLMENT_CANCELED: "A fulfillment is cancelled.",
WebhookEventAsyncType.PAGE_CREATED: "A new page is created.",
WebhookEventAsyncType.PAGE_UPDATED: "A page is updated.",
WebhookEventAsyncType.PAGE_DELETED: "A page is deleted.",
WebhookEventAsyncType.PRODUCT_CREATED: "A new product is created.",
WebhookEventAsyncType.PRODUCT_UPDATED: "A product is updated.",
WebhookEventAsyncType.PRODUCT_DELETED: "A product is deleted.",
WebhookEventAsyncType.PRODUCT_VARIANT_CREATED: "A new product variant is created.",
WebhookEventAsyncType.PRODUCT_VARIANT_UPDATED: "A product variant is updated.",
WebhookEventAsyncType.PRODUCT_VARIANT_DELETED: "A product variant is deleted.",
WebhookEventAsyncType.SHIPPING_PRICE_CREATED: "A new shipping price is created.",
WebhookEventAsyncType.SHIPPING_PRICE_UPDATED: "A shipping price is updated.",
WebhookEventAsyncType.SHIPPING_PRICE_DELETED: "A shipping price is deleted.",
WebhookEventAsyncType.SHIPPING_ZONE_CREATED: "A new shipping zone is created.",
WebhookEventAsyncType.SHIPPING_ZONE_UPDATED: "A shipping zone is updated.",
WebhookEventAsyncType.SHIPPING_ZONE_DELETED: "A shipping zone is deleted.",
WebhookEventAsyncType.INVOICE_REQUESTED: "An invoice for order requested.",
WebhookEventAsyncType.INVOICE_DELETED: "An invoice is deleted.",
WebhookEventAsyncType.INVOICE_SENT: "Invoice has been sent.",
WebhookEventAsyncType.ANY: "All the events.",
}
def description(enum):
if enum:
return WEBHOOK_EVENT_DESCRIPTION.get(enum.value)
return "Enum determining type of webhook."
WebhookEventTypeEnum = graphene.Enum(
"WebhookEventTypeEnum",
[
(str_to_enum(e_type[0]), e_type[0])
for e_type in (WebhookEventAsyncType.CHOICES + WebhookEventSyncType.CHOICES)
],
description=description,
)
WebhookEventTypeAsyncEnum = graphene.Enum(
"WebhookEventTypeAsyncEnum",
[(str_to_enum(e_type[0]), e_type[0]) for e_type in WebhookEventAsyncType.CHOICES],
description=description,
)
WebhookEventTypeSyncEnum = graphene.Enum(
"WebhookEventTypeSyncEnum",
[(str_to_enum(e_type[0]), e_type[0]) for e_type in WebhookEventSyncType.CHOICES],
description=description,
)
WebhookSampleEventTypeEnum = graphene.Enum(
"WebhookSampleEventTypeEnum",
[
(str_to_enum(e_type[0]), e_type[0])
for e_type in WebhookEventAsyncType.CHOICES
if e_type[0] != WebhookEventAsyncType.ANY
],
)
class EventDeliveryStatusEnum(graphene.Enum):
PENDING = "pending"
SUCCESS = "success"
FAILED = "failed"
| 46.606838 | 87 | 0.777003 |
4a25458a9c2856d534debeac5aa77b7e4b824de3 | 2,899 | py | Python | rotate/k2.py | dfm/cerot | 9c62039b21edc90bd09b6466e8000fd2fb7329ea | [
"Apache-2.0"
] | null | null | null | rotate/k2.py | dfm/cerot | 9c62039b21edc90bd09b6466e8000fd2fb7329ea | [
"Apache-2.0"
] | null | null | null | rotate/k2.py | dfm/cerot | 9c62039b21edc90bd09b6466e8000fd2fb7329ea | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import division, print_function
import everest
import exoarch
import numpy as np
from scipy.signal import savgol_filter
__all__ = ["get_light_curve"]
def sigma_clip(f, thresh=5, window=49):
"""Get a binary mask of 'good' points using sigma clipping
Args:
thresh (float): The sigma clipping threshold.
window (int): The width of the smoothing window for sigma
clipping.
"""
f = f - savgol_filter(f, window, 2) + np.nanmedian(f)
mu = np.median(f)
std = np.sqrt(np.median((f - mu)**2))
return np.abs(f - mu) < thresh*std
def get_light_curve(epicid, season=None, mask_transits=True, mask_width=3,
sigma_iter=10, sigma_thresh=5.0, sigma_window=49):
"""Get the light curve for a given EPIC ID
Args:
epicid (int): The ID of the target.
mask_transits (bool): Should known candidates be masked?
mask_width (float): The half width of the transit mask in units of the
transit duration.
sigma_iter (int): The maximum number of iterations of sigma clipping to
run.
sigma_thresh (float): The sigma clipping threshold.
sigma_window (int): The width of the smoothing window for sigma
clipping.
Returns:
t (ndarray): The array of timestamps.
F (ndarray): The ``(ntime, npix)`` matrix of (normalized) pixel flux
time series.
yerr (ndarray): An estimate of the uncertainties of the SAP flux
(``sum(F, axis=1)``).
"""
star = everest.Everest(epicid, season=season, quiet=True)
t = star.apply_mask(star.time)
F = star.apply_mask(star.fpix)
# Mask any known transits
if mask_transits:
k2cand = exoarch.ExoplanetArchiveCatalog("k2candidates").df
epic = k2cand[k2cand.epic_name == "EPIC {0}".format(epicid)]
cands = epic.groupby("epic_candname").mean()
for _, cand in cands.iterrows():
t0 = cand.pl_tranmid - 2454833.0
per = cand.pl_orbper
dur = cand.pl_trandur
m = np.abs((t - t0 + 0.5*per) % per - 0.5*per) > mask_width * dur
t = t[m]
F = F[m]
# Use 1st order PLD to do some sigma clipping
fsap = np.sum(F, axis=1)
A = F / fsap[:, None]
m = np.ones_like(fsap, dtype=bool)
for i in range(sigma_iter):
w = np.linalg.solve(np.dot(A[m].T, A[m]), np.dot(A[m].T, fsap[m]))
resid = fsap - np.dot(A, w)
m_new = sigma_clip(resid, thresh=sigma_thresh, window=sigma_window)
if m.sum() == m_new.sum():
m = m_new
break
m = m_new
t = t[m]
fsap = fsap[m]
F = F[m]
# Normalize
med = np.median(fsap)
fsap /= med
F /= med
# Estimate flux uncertainty
yerr = np.nanmedian(np.abs(np.diff(fsap)))
return t, F, yerr
| 31.172043 | 79 | 0.596758 |
4a2545fee14b103dd69c1e377d63813498b6cd95 | 28 | py | Python | gui/test.py | ErlendHer/AlgoView | 946c2bb38e2ab3af011281c9672af4fcca84ae87 | [
"Apache-2.0"
] | 1 | 2020-11-26T09:37:28.000Z | 2020-11-26T09:37:28.000Z | gui/test.py | ErlendHer/AlgoView | 946c2bb38e2ab3af011281c9672af4fcca84ae87 | [
"Apache-2.0"
] | null | null | null | gui/test.py | ErlendHer/AlgoView | 946c2bb38e2ab3af011281c9672af4fcca84ae87 | [
"Apache-2.0"
] | 1 | 2020-12-09T17:18:12.000Z | 2020-12-09T17:18:12.000Z | a = 2
b = {}
print(type(b)) | 7 | 14 | 0.464286 |
4a25460b42f3b32206405e2010afb3bea3d17db4 | 2,097 | py | Python | aliyun-python-sdk-imageprocess/aliyunsdkimageprocess/request/v20200320/DetectKneeKeypointXRayRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 1,001 | 2015-07-24T01:32:41.000Z | 2022-03-25T01:28:18.000Z | aliyun-python-sdk-imageprocess/aliyunsdkimageprocess/request/v20200320/DetectKneeKeypointXRayRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 363 | 2015-10-20T03:15:00.000Z | 2022-03-08T12:26:19.000Z | aliyun-python-sdk-imageprocess/aliyunsdkimageprocess/request/v20200320/DetectKneeKeypointXRayRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 682 | 2015-09-22T07:19:02.000Z | 2022-03-22T09:51:46.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkimageprocess.endpoint import endpoint_data
class DetectKneeKeypointXRayRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'imageprocess', '2020-03-20', 'DetectKneeKeypointXRay','imageprocess')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_DataFormat(self):
return self.get_body_params().get('DataFormat')
def set_DataFormat(self,DataFormat):
self.add_body_params('DataFormat', DataFormat)
def get_OrgId(self):
return self.get_body_params().get('OrgId')
def set_OrgId(self,OrgId):
self.add_body_params('OrgId', OrgId)
def get_OrgName(self):
return self.get_body_params().get('OrgName')
def set_OrgName(self,OrgName):
self.add_body_params('OrgName', OrgName)
def get_ImageUrl(self):
return self.get_body_params().get('ImageUrl')
def set_ImageUrl(self,ImageUrl):
self.add_body_params('ImageUrl', ImageUrl)
def get_TracerId(self):
return self.get_body_params().get('TracerId')
def set_TracerId(self,TracerId):
self.add_body_params('TracerId', TracerId) | 33.822581 | 99 | 0.758226 |
4a25463c2286657f04779a6626378ae0a018c74f | 175 | py | Python | Python/swapping_variable_values.py | aaaaaaaaaanyaaaaaaaaa/Hello-world | ff47220589c34c1cd4555346e92d3255b433975f | [
"MIT"
] | 3 | 2019-03-22T06:28:17.000Z | 2019-03-29T17:15:49.000Z | Python/swapping_variable_values.py | aaaaaaaaaanyaaaaaaaaa/Hello-world | ff47220589c34c1cd4555346e92d3255b433975f | [
"MIT"
] | 1 | 2015-10-18T11:24:08.000Z | 2015-10-18T11:34:07.000Z | Python/swapping_variable_values.py | aaaaaaaaaanyaaaaaaaaa/Hello-world | ff47220589c34c1cd4555346e92d3255b433975f | [
"MIT"
] | 3 | 2018-10-10T18:20:01.000Z | 2018-10-21T13:51:43.000Z | #We take 2 variables with certain values:
a= 10
b= 20
# Swapping the values of the variables:
b= a + b
a= b - a
b= b - a
# Printing out to check the values:
print a
print b | 14.583333 | 41 | 0.674286 |
4a25469c1fec921850514f4a960d2879bd4361f9 | 18,859 | py | Python | python/common/diagnostic/pydevDebug/runfiles.py | CountZer0/PipelineConstructionSet | 0aa73a8a63c72989b2d1c677efd78dad4388d335 | [
"BSD-3-Clause"
] | 21 | 2015-04-27T05:01:36.000Z | 2021-11-22T13:45:14.000Z | python/common/diagnostic/pydevDebug/runfiles.py | 0xb1dd1e/PipelineConstructionSet | 621349da1b6d1437e95d0c9e48ee9f36d59f19fd | [
"BSD-3-Clause"
] | null | null | null | python/common/diagnostic/pydevDebug/runfiles.py | 0xb1dd1e/PipelineConstructionSet | 621349da1b6d1437e95d0c9e48ee9f36d59f19fd | [
"BSD-3-Clause"
] | 7 | 2015-04-11T11:37:19.000Z | 2020-05-22T09:49:04.000Z | import fnmatch
import os.path
import re
import sys
import unittest
try:
__setFalse = False
except:
import __builtin__
setattr(__builtin__, 'True', 1)
setattr(__builtin__, 'False', 0)
#=======================================================================================================================
# Jython?
#=======================================================================================================================
try:
import org.python.core.PyDictionary #@UnresolvedImport @UnusedImport -- just to check if it could be valid
def DictContains(d, key):
return d.has_key(key)
except:
try:
#Py3k does not have has_key anymore, and older versions don't have __contains__
DictContains = dict.__contains__
except:
DictContains = dict.has_key
try:
xrange
except:
#Python 3k does not have it
xrange = range
try:
enumerate
except:
def enumerate(lst):
ret = []
i=0
for element in lst:
ret.append((i, element))
i+=1
return ret
#=======================================================================================================================
# getopt code copied since gnu_getopt is not available on jython 2.1
#=======================================================================================================================
class GetoptError(Exception):
opt = ''
msg = ''
def __init__(self, msg, opt=''):
self.msg = msg
self.opt = opt
Exception.__init__(self, msg, opt)
def __str__(self):
return self.msg
def gnu_getopt(args, shortopts, longopts=[]):
"""getopt(args, options[, long_options]) -> opts, args
This function works like getopt(), except that GNU style scanning
mode is used by default. This means that option and non-option
arguments may be intermixed. The getopt() function stops
processing options as soon as a non-option argument is
encountered.
If the first character of the option string is `+', or if the
environment variable POSIXLY_CORRECT is set, then option
processing stops as soon as a non-option argument is encountered.
"""
opts = []
prog_args = []
if isinstance(longopts, ''.__class__):
longopts = [longopts]
else:
longopts = list(longopts)
# Allow options after non-option arguments?
if shortopts.startswith('+'):
shortopts = shortopts[1:]
all_options_first = True
elif os.environ.get("POSIXLY_CORRECT"):
all_options_first = True
else:
all_options_first = False
while args:
if args[0] == '--':
prog_args += args[1:]
break
if args[0][:2] == '--':
opts, args = do_longs(opts, args[0][2:], longopts, args[1:])
elif args[0][:1] == '-':
opts, args = do_shorts(opts, args[0][1:], shortopts, args[1:])
else:
if all_options_first:
prog_args += args
break
else:
prog_args.append(args[0])
args = args[1:]
return opts, prog_args
def do_longs(opts, opt, longopts, args):
try:
i = opt.index('=')
except ValueError:
optarg = None
else:
opt, optarg = opt[:i], opt[i + 1:]
has_arg, opt = long_has_args(opt, longopts)
if has_arg:
if optarg is None:
if not args:
raise GetoptError('option --%s requires argument' % opt, opt)
optarg, args = args[0], args[1:]
elif optarg:
raise GetoptError('option --%s must not have an argument' % opt, opt)
opts.append(('--' + opt, optarg or ''))
return opts, args
# Return:
# has_arg?
# full option name
def long_has_args(opt, longopts):
possibilities = [o for o in longopts if o.startswith(opt)]
if not possibilities:
raise GetoptError('option --%s not recognized' % opt, opt)
# Is there an exact match?
if opt in possibilities:
return False, opt
elif opt + '=' in possibilities:
return True, opt
# No exact match, so better be unique.
if len(possibilities) > 1:
# XXX since possibilities contains all valid continuations, might be
# nice to work them into the error msg
raise GetoptError('option --%s not a unique prefix' % opt, opt)
assert len(possibilities) == 1
unique_match = possibilities[0]
has_arg = unique_match.endswith('=')
if has_arg:
unique_match = unique_match[:-1]
return has_arg, unique_match
def do_shorts(opts, optstring, shortopts, args):
while optstring != '':
opt, optstring = optstring[0], optstring[1:]
if short_has_arg(opt, shortopts):
if optstring == '':
if not args:
raise GetoptError('option -%s requires argument' % opt,
opt)
optstring, args = args[0], args[1:]
optarg, optstring = optstring, ''
else:
optarg = ''
opts.append(('-' + opt, optarg))
return opts, args
def short_has_arg(opt, shortopts):
for i in range(len(shortopts)):
if opt == shortopts[i] != ':':
return shortopts.startswith(':', i + 1)
raise GetoptError('option -%s not recognized' % opt, opt)
#=======================================================================================================================
# End getopt code
#=======================================================================================================================
#=======================================================================================================================
# parse_cmdline
#=======================================================================================================================
def parse_cmdline():
""" parses command line and returns test directories, verbosity, test filter and test suites
usage:
runfiles.py -v|--verbosity <level> -f|--filter <regex> -t|--tests <Test.test1,Test2> dirs|files
"""
verbosity = 2
test_filter = None
tests = None
optlist, dirs = gnu_getopt(sys.argv[1:], "v:f:t:", ["verbosity=", "filter=", "tests="])
for opt, value in optlist:
if opt in ("-v", "--verbosity"):
verbosity = value
elif opt in ("-f", "--filter"):
test_filter = value.split(',')
elif opt in ("-t", "--tests"):
tests = value.split(',')
if type([]) != type(dirs):
dirs = [dirs]
ret_dirs = []
for d in dirs:
if '|' in d:
#paths may come from the ide separated by |
ret_dirs.extend(d.split('|'))
else:
ret_dirs.append(d)
return ret_dirs, int(verbosity), test_filter, tests
#=======================================================================================================================
# PydevTestRunner
#=======================================================================================================================
class PydevTestRunner:
""" finds and runs a file or directory of files as a unit test """
__py_extensions = ["*.py", "*.pyw"]
__exclude_files = ["__init__.*"]
def __init__(self, test_dir, test_filter=None, verbosity=2, tests=None):
self.test_dir = test_dir
self.__adjust_path()
self.test_filter = self.__setup_test_filter(test_filter)
self.verbosity = verbosity
self.tests = tests
def __adjust_path(self):
""" add the current file or directory to the python path """
path_to_append = None
for n in xrange(len(self.test_dir)):
dir_name = self.__unixify(self.test_dir[n])
if os.path.isdir(dir_name):
if not dir_name.endswith("/"):
self.test_dir[n] = dir_name + "/"
path_to_append = os.path.normpath(dir_name)
elif os.path.isfile(dir_name):
path_to_append = os.path.dirname(dir_name)
else:
msg = ("unknown type. \n%s\nshould be file or a directory.\n" % (dir_name))
raise RuntimeError(msg)
if path_to_append is not None:
#Add it as the last one (so, first things are resolved against the default dirs and
#if none resolves, then we try a relative import).
sys.path.append(path_to_append)
return
def __setup_test_filter(self, test_filter):
""" turn a filter string into a list of filter regexes """
if test_filter is None or len(test_filter) == 0:
return None
return [re.compile("test%s" % f) for f in test_filter]
def __is_valid_py_file(self, fname):
""" tests that a particular file contains the proper file extension
and is not in the list of files to exclude """
is_valid_fname = 0
for invalid_fname in self.__class__.__exclude_files:
is_valid_fname += int(not fnmatch.fnmatch(fname, invalid_fname))
if_valid_ext = 0
for ext in self.__class__.__py_extensions:
if_valid_ext += int(fnmatch.fnmatch(fname, ext))
return is_valid_fname > 0 and if_valid_ext > 0
def __unixify(self, s):
""" stupid windows. converts the backslash to forwardslash for consistency """
return os.path.normpath(s).replace(os.sep, "/")
def __importify(self, s, dir=False):
""" turns directory separators into dots and removes the ".py*" extension
so the string can be used as import statement """
if not dir:
dirname, fname = os.path.split(s)
if fname.count('.') > 1:
#if there's a file named xxx.xx.py, it is not a valid module, so, let's not load it...
return
imp_stmt_pieces = [dirname.replace("\\", "/").replace("/", "."), os.path.splitext(fname)[0]]
if len(imp_stmt_pieces[0]) == 0:
imp_stmt_pieces = imp_stmt_pieces[1:]
return ".".join(imp_stmt_pieces)
else: #handle dir
return s.replace("\\", "/").replace("/", ".")
def __add_files(self, pyfiles, root, files):
""" if files match, appends them to pyfiles. used by os.path.walk fcn """
for fname in files:
if self.__is_valid_py_file(fname):
name_without_base_dir = self.__unixify(os.path.join(root, fname))
pyfiles.append(name_without_base_dir)
return
def find_import_files(self):
""" return a list of files to import """
pyfiles = []
for base_dir in self.test_dir:
if os.path.isdir(base_dir):
if hasattr(os, 'walk'):
for root, dirs, files in os.walk(base_dir):
self.__add_files(pyfiles, root, files)
else:
# jython2.1 is too old for os.walk!
os.path.walk(base_dir, self.__add_files, pyfiles)
elif os.path.isfile(base_dir):
pyfiles.append(base_dir)
return pyfiles
def __get_module_from_str(self, modname, print_exception):
""" Import the module in the given import path.
* Returns the "final" module, so importing "coilib40.subject.visu"
returns the "visu" module, not the "coilib40" as returned by __import__ """
try:
mod = __import__(modname)
for part in modname.split('.')[1:]:
mod = getattr(mod, part)
return mod
except:
if print_exception:
import traceback;traceback.print_exc()
sys.stderr.write('ERROR: Module: %s could not be imported.\n' % (modname,))
return None
def find_modules_from_files(self, pyfiles):
""" returns a lisst of modules given a list of files """
#let's make sure that the paths we want are in the pythonpath...
imports = [self.__importify(s) for s in pyfiles]
system_paths = []
for s in sys.path:
system_paths.append(self.__importify(s, True))
ret = []
for imp in imports:
if imp is None:
continue #can happen if a file is not a valid module
choices = []
for s in system_paths:
if imp.startswith(s):
add = imp[len(s) + 1:]
if add:
choices.append(add)
#sys.stdout.write(' ' + add + ' ')
if not choices:
sys.stdout.write('PYTHONPATH not found for file: %s\n' % imp)
else:
for i, import_str in enumerate(choices):
mod = self.__get_module_from_str(import_str, print_exception=i == len(choices) - 1)
if mod is not None:
ret.append(mod)
break
return ret
def find_tests_from_modules(self, modules):
""" returns the unittests given a list of modules """
loader = unittest.TestLoader()
ret = []
if self.tests:
accepted_classes = {}
accepted_methods = {}
for t in self.tests:
splitted = t.split('.')
if len(splitted) == 1:
accepted_classes[t] = t
elif len(splitted) == 2:
accepted_methods[t] = t
#===========================================================================================================
# GetTestCaseNames
#===========================================================================================================
class GetTestCaseNames:
"""Yes, we need a class for that (cannot use outer context on jython 2.1)"""
def __init__(self, accepted_classes, accepted_methods):
self.accepted_classes = accepted_classes
self.accepted_methods = accepted_methods
def __call__(self, testCaseClass):
"""Return a sorted sequence of method names found within testCaseClass"""
testFnNames = []
className = testCaseClass.__name__
if DictContains(self.accepted_classes, className):
for attrname in dir(testCaseClass):
#If a class is chosen, we select all the 'test' methods'
if attrname.startswith('test') and hasattr(getattr(testCaseClass, attrname), '__call__'):
testFnNames.append(attrname)
else:
for attrname in dir(testCaseClass):
#If we have the class+method name, we must do a full check and have an exact match.
if DictContains(self.accepted_methods, className + '.' + attrname):
if hasattr(getattr(testCaseClass, attrname), '__call__'):
testFnNames.append(attrname)
#sorted() is not available in jython 2.1
testFnNames.sort()
return testFnNames
loader.getTestCaseNames = GetTestCaseNames(accepted_classes, accepted_methods)
ret.extend([loader.loadTestsFromModule(m) for m in modules])
return ret
def filter_tests(self, test_objs):
""" based on a filter name, only return those tests that have
the test case names that match """
test_suite = []
for test_obj in test_objs:
if isinstance(test_obj, unittest.TestSuite):
if test_obj._tests:
test_obj._tests = self.filter_tests(test_obj._tests)
if test_obj._tests:
test_suite.append(test_obj)
elif isinstance(test_obj, unittest.TestCase):
test_cases = []
for tc in test_objs:
try:
testMethodName = tc._TestCase__testMethodName
except AttributeError:
#changed in python 2.5
testMethodName = tc._testMethodName
if self.__match(self.test_filter, testMethodName) and self.__match_tests(self.tests, tc, testMethodName):
test_cases.append(tc)
return test_cases
return test_suite
def __match_tests(self, tests, test_case, test_method_name):
if not tests:
return 1
for t in tests:
class_and_method = t.split('.')
if len(class_and_method) == 1:
#only class name
if class_and_method[0] == test_case.__class__.__name__:
return 1
elif len(class_and_method) == 2:
if class_and_method[0] == test_case.__class__.__name__ and class_and_method[1] == test_method_name:
return 1
return 0
def __match(self, filter_list, name):
""" returns whether a test name matches the test filter """
if filter_list is None:
return 1
for f in filter_list:
if re.match(f, name):
return 1
return 0
def run_tests(self):
""" runs all tests """
sys.stdout.write("Finding files...\n")
files = self.find_import_files()
sys.stdout.write('%s %s\n' % (self.test_dir, '... done'))
sys.stdout.write("Importing test modules ... ")
modules = self.find_modules_from_files(files)
sys.stdout.write("done.\n")
all_tests = self.find_tests_from_modules(modules)
if self.test_filter or self.tests:
if self.test_filter:
sys.stdout.write('Test Filter: %s' % ([p.pattern for p in self.test_filter],))
if self.tests:
sys.stdout.write('Tests to run: %s' % (self.tests,))
all_tests = self.filter_tests(all_tests)
sys.stdout.write('\n')
runner = unittest.TextTestRunner(stream=sys.stdout, descriptions=1, verbosity=verbosity)
runner.run(unittest.TestSuite(all_tests))
return
#=======================================================================================================================
# main
#=======================================================================================================================
if __name__ == '__main__':
dirs, verbosity, test_filter, tests = parse_cmdline()
PydevTestRunner(dirs, test_filter, verbosity, tests).run_tests()
| 35.516008 | 125 | 0.514025 |
4a254739cc3c94bd18fe79235462d71e0355becc | 15,776 | py | Python | yatube/posts/tests/test_views.py | annrud/post_publishing_platform | c065799203c0800ce05d038f3491fb6184b5cbc7 | [
"MIT"
] | 1 | 2021-11-09T09:40:37.000Z | 2021-11-09T09:40:37.000Z | yatube/posts/tests/test_views.py | annrud/post_publishing_platform | c065799203c0800ce05d038f3491fb6184b5cbc7 | [
"MIT"
] | null | null | null | yatube/posts/tests/test_views.py | annrud/post_publishing_platform | c065799203c0800ce05d038f3491fb6184b5cbc7 | [
"MIT"
] | null | null | null | import shutil
import tempfile
from http import HTTPStatus
from django import forms
from django.conf import settings
from django.core.cache import cache
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.paginator import Page
from django.test import Client, TestCase
from django.test.utils import override_settings
from django.urls import reverse
from ..forms import CommentForm, PostForm
from ..models import Follow, Group, Post, User
@override_settings(MEDIA_ROOT=tempfile.mkdtemp(dir=settings.BASE_DIR))
class PostsPagesTests(TestCase):
@classmethod
def setUpClass(cls):
"""Временная папка для медиа-файлов.
Запись в тестовую БД.
"""
super().setUpClass()
cls.small_gif = (
b'\x47\x49\x46\x38\x39\x61\x02\x00'
b'\x01\x00\x80\x00\x00\x00\x00\x00'
b'\xFF\xFF\xFF\x21\xF9\x04\x00\x00'
b'\x00\x00\x00\x2C\x00\x00\x00\x00'
b'\x02\x00\x01\x00\x00\x02\x02\x0C'
b'\x0A\x00\x3B'
)
cls.uploaded = SimpleUploadedFile(
name='small.gif',
content=cls.small_gif,
content_type='image/gif'
)
cls.author = User.objects.create_user(
username='anna', email='[email protected]'
)
cls.group = Group.objects.create(
title='Тестовая группа 1',
slug='test_slug_1',
description='Группа 1 для проведения тестов.',
)
cls.post = Post.objects.create(
text='Текст поста 1.',
author=cls.author,
group=cls.group,
image=cls.uploaded
)
@classmethod
def tearDownClass(cls):
shutil.rmtree(settings.MEDIA_ROOT, ignore_errors=True)
super().tearDownClass()
def setUp(self):
"""Неавторизованный пользователь,
авторизованный пользователь,
авторизованный автор поста.
"""
self.guest_client = Client()
self.user = User.objects.create_user(username='Galina')
self.authorized_client = Client()
self.authorized_client.force_login(self.user)
self.authorized_author = Client()
self.authorized_author.force_login(self.author)
cache.clear()
def checking_post_content(self, post, response):
self.assertEqual(post.text, self.post.text)
self.assertEqual(post.author, self.author)
self.assertEqual(post.pub_date, self.post.pub_date)
self.assertEqual(post.group, self.group)
self.assertContains(response, '<img')
def checking_profile_content(self, response):
self.assertIn('following', response.context)
self.assertIsInstance(response.context['following'], bool)
self.assertIn('post_author', response.context)
post_author = response.context['post_author']
self.assertIsInstance(post_author, User)
self.assertIn('page', response.context)
page = response.context['page']
self.assertGreater(len(page), 0)
post = page[0]
self.assertIsInstance(post, Post)
self.checking_post_content(post, response)
self.assertEqual(page.paginator.count, 1)
def checking_post_page_content(self, response):
self.assertIn('form', response.context)
self.assertIn('post', response.context)
post = response.context['post']
self.assertIsInstance(post, Post)
self.checking_post_content(post, response)
self.assertEqual(post.author.posts.count(), 1)
def test_urls_exists_and_uses_correct_template_for_guest_client(self):
"""URL-адрес использует соответствующий шаблон."""
templates_url_names = {
reverse('index'): 'index.html',
reverse('group_posts',
kwargs={'slug': self.group.slug}): 'group.html',
reverse('profile',
kwargs={'username': self.author.username}): 'profile.html',
reverse('post',
kwargs={'username': self.author.username,
'post_id': self.post.id}): 'post.html',
}
for address, template in templates_url_names.items():
with self.subTest(address=address):
response = self.guest_client.get(address)
self.assertTemplateUsed(response, template)
def test_url_exists_and_uses_correct_template_for_authorized_client(self):
"""URL-адрес использует соответствующий шаблон."""
templates_url_names = {
reverse('new_post'): 'post_new.html',
reverse('follow_index'): 'follow.html',
}
for address, template in templates_url_names.items():
with self.subTest(address=address):
response = self.authorized_client.get(address)
self.assertTemplateUsed(response, template)
def test_url_exists_at_desired_location_for_authorized_author(self):
"""URL-адрес использует соответствующий шаблон."""
response = self.authorized_author.get(
reverse('post_edit',
kwargs={'username': self.author.username,
'post_id': self.post.id})
)
self.assertTemplateUsed(response, 'post_edit.html')
def test_index_shows_correct_context(self):
"""Шаблон index.html сформирован с правильным контекстом."""
response = self.guest_client.get(reverse('index'))
self.assertIn('page', response.context)
self.assertIsInstance(response.context['page'], Page)
self.assertGreater(len(response.context['page']), 0)
post = response.context['page'][0]
self.assertIsInstance(post, Post)
self.checking_post_content(post, response)
def test_group_shows_correct_context(self):
"""Шаблон group.html сформирован
с правильным контекстом.
"""
response = self.guest_client.get(
reverse('group_posts', kwargs={'slug': f'{self.group.slug}'})
)
self.assertIn('group', response.context)
group = response.context['group']
self.assertIsInstance(group, Group)
self.assertIn('page', response.context)
self.assertIsInstance(response.context['page'], Page)
self.assertGreater(len(response.context['page']), 0)
post = response.context['page'][0]
self.assertIsInstance(post, Post)
self.assertEqual(group.title, self.group.title)
self.assertEqual(group.slug, self.group.slug)
self.assertEqual(group.description, self.group.description)
self.checking_post_content(post, response)
def test_post_not_in_group_2(self):
"""Пост не попал в группу, для
которой не был предназначен.
"""
self.group_2 = Group.objects.create(
title='Тестовая группа 2',
slug='test_slug_2',
description='Группа 2 для проведения тестов.',
)
response = self.guest_client.get(
reverse('group_posts', kwargs={'slug': f'{self.group_2.slug}'})
)
self.assertIn('page', response.context)
self.assertIsInstance(response.context['page'], Page)
posts = response.context['page']
self.assertNotIn(self.post, posts)
def test_post_new_shows_correct_form(self):
"""Шаблон post_new.html выводит
правильную форму создания поста.
"""
response = self.authorized_client.get(
reverse('new_post')
)
self.assertIn('form', response.context)
self.assertIsInstance(response.context['form'], PostForm)
form_fields = {
'group': forms.fields.ChoiceField,
'text': forms.fields.CharField,
'image': forms.fields.ImageField,
}
for value, expected in form_fields.items():
with self.subTest(value=value):
form_field = response.context['form'].fields[value]
self.assertIsInstance(form_field, expected)
def test_profile_shows_correct_context_for_guest_client(self):
"""Шаблон profile.html для анонимного пользователя
сформирован с правильным контекстом.
"""
response = self.guest_client.get(
reverse('profile', kwargs={'username': self.author.username})
)
self.assertEqual(response.context['following'], False)
self.checking_profile_content(response)
def test_profile_shows_correct_context_for_authorized_client(self):
"""Шаблон profile.html для авторизованного пользователя
сформирован с правильным контекстом.
"""
self.authorized_client.get(
reverse('profile_follow',
kwargs={'username': self.author.username}))
response = self.authorized_client.get(
reverse('profile', kwargs={'username': self.author.username})
)
self.assertEqual(response.context['following'], True)
self.checking_profile_content(response)
def test_post_shows_correct_context_for_guest_client(self):
"""Шаблон post.html для анонимного пользователя
сформирован с правильным контекстом.
"""
response = self.guest_client.get(
reverse('post',
kwargs={'username': self.author.username,
'post_id': self.post.id})
)
self.assertIsInstance(response.context['form'], CommentForm)
self.checking_post_page_content(response)
def test_post_shows_correct_context_for_authorized_client(self):
"""Шаблон post.html для авторизованного пользователя
сформирован с правильным контекстом.
"""
response = self.authorized_client.get(
reverse('post',
kwargs={'username': self.author.username,
'post_id': self.post.id})
)
self.assertIsInstance(response.context['form'], CommentForm)
form_field = response.context['form'].fields['text']
self.assertIsInstance(form_field, forms.fields.CharField)
self.checking_post_page_content(response)
def test_can_add_comment(self):
"""В шаблон post.html авторизованный пользователь
может добавить комментарий.
"""
comments_count = self.post.comments.count()
form_data = {'text': 'Это новый комментарий'}
response = self.authorized_client.post(
reverse('add_comment',
kwargs={'username': self.author.username,
'post_id': self.post.id}),
data=form_data,
follow=True
)
self.assertEqual(response.status_code, HTTPStatus.OK)
self.assertRedirects(response,
reverse('post',
kwargs={'username': self.author.username,
'post_id': self.post.id}))
self.assertNotEqual(comments_count, self.post.comments.count())
def test_can_not_add_comment(self):
"""В шаблон post.html анонимный пользователь
не может добавить комментарий.
"""
comments_count = self.post.comments.count()
form_data = {'text': 'Это новый комментарий'}
response = self.guest_client.post(
reverse('add_comment',
kwargs={'username': self.author.username,
'post_id': self.post.id}),
data=form_data,
follow=True
)
self.assertEqual(response.status_code, HTTPStatus.OK)
self.assertRedirects(response,
f'{reverse("login")}?next=/{self.author.username}'
f'/{self.post.id}/comment/')
self.assertEqual(comments_count, self.post.comments.count())
def test_post_edit_shows_correct_context(self):
"""Шаблон post_edit.html
сформирован с правильным контекстом.
"""
response = self.authorized_author.get(
reverse('post_edit',
kwargs={'username': self.author.username,
'post_id': self.post.id})
)
form_fields_filled = {
'group': self.group.id,
'text': self.post.text,
'image': self.post.image,
}
self.assertIn('form', response.context)
self.assertIsInstance(response.context['form'], PostForm)
self.assertIn('username', response.context)
self.assertIsInstance(response.context['username'], str)
self.assertIn('post', response.context)
post = response.context['post']
self.assertIsInstance(post, Post)
self.assertEqual(post.group.id, form_fields_filled['group'])
self.assertEqual(post.text, form_fields_filled['text'])
self.assertEqual(post.image, form_fields_filled['image'])
for value, expected in form_fields_filled.items():
with self.subTest(value=value):
form_field = response.context['form'].initial[value]
self.assertEqual(form_field, expected)
def test_follow_exist(self):
"""Авторизованный пользователь может
подписываться на автора поста.
"""
response = self.authorized_client.get(
reverse('profile_follow',
kwargs={'username': self.author.username}), follow=True)
self.assertEqual(response.status_code, HTTPStatus.OK)
self.assertRedirects(
response, reverse('profile',
kwargs={'username': self.author.username})
)
follow = Follow.objects.filter(user=self.user, author=self.author)
self.assertIs(follow.exists(), True)
def test_unfollow_exist(self):
"""Авторизованный пользователь может
отписываться от автора поста.
"""
response = self.authorized_client.get(
reverse('profile_unfollow',
kwargs={'username': self.author.username}), follow=True)
self.assertEqual(response.status_code, HTTPStatus.OK)
self.assertRedirects(
response, reverse('profile',
kwargs={'username': self.author.username})
)
follow = Follow.objects.filter(user=self.user, author=self.author)
self.assertIs(follow.exists(), False)
def test_follow_shows_followings_of_client(self):
"""Шаблон follow.html содержит
подписки пользователя.
"""
self.authorized_client.get(
reverse('profile_follow',
kwargs={'username': self.author.username}))
post_new = Post.objects.create(
text='Новый пост.',
author=self.author,
group=self.group,
image=self.uploaded
)
response = self.authorized_client.get(reverse('follow_index'))
self.assertEqual(response.status_code, HTTPStatus.OK)
self.assertIn('page', response.context)
self.assertIsInstance(response.context['page'], Page)
self.assertIn(post_new, response.context['page'])
def test_follow_do_not_show_followings_of_another_client(self):
"""Шаблон follow.html не содержит
подписки других пользователей.
"""
post_new = Post.objects.create(
text='Новый пост.',
author=self.author,
group=self.group,
image=self.uploaded
)
response = self.authorized_client.get(reverse('follow_index'))
self.assertEqual(response.status_code, HTTPStatus.OK)
self.assertIn('page', response.context)
self.assertIsInstance(response.context['page'], Page)
self.assertNotIn(post_new, response.context['page'])
| 40.976623 | 79 | 0.61771 |
4a25480eae4cc89441bdfba7a0937e6c8735d281 | 82 | py | Python | line/messaging/events/unfollow.py | shlee322/python-line-api | 674a9c0f4f7e4faa5d813fbd3d6e603b128d9be7 | [
"Apache-2.0"
] | 6 | 2016-10-02T15:27:55.000Z | 2016-12-01T15:08:31.000Z | line/messaging/events/unfollow.py | shlee322/python-line-api | 674a9c0f4f7e4faa5d813fbd3d6e603b128d9be7 | [
"Apache-2.0"
] | null | null | null | line/messaging/events/unfollow.py | shlee322/python-line-api | 674a9c0f4f7e4faa5d813fbd3d6e603b128d9be7 | [
"Apache-2.0"
] | 3 | 2016-10-04T16:43:48.000Z | 2020-03-01T08:08:37.000Z | from .event import Event
class UnfollowEvent(Event):
type = 'unfollow'
| 13.666667 | 28 | 0.670732 |
4a2548cb6ae24444d4e10984f4f26784ab43ef03 | 5,970 | py | Python | beta/tests/tensorflow/sparsity/magnitude/test_algorithm.py | kshpv/nncf_pytorch | 9f7035e31732f5a3c0403edce759729d9425d4a5 | [
"Apache-2.0"
] | null | null | null | beta/tests/tensorflow/sparsity/magnitude/test_algorithm.py | kshpv/nncf_pytorch | 9f7035e31732f5a3c0403edce759729d9425d4a5 | [
"Apache-2.0"
] | null | null | null | beta/tests/tensorflow/sparsity/magnitude/test_algorithm.py | kshpv/nncf_pytorch | 9f7035e31732f5a3c0403edce759729d9425d4a5 | [
"Apache-2.0"
] | null | null | null | """
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import tensorflow as tf
from addict import Dict
import numpy as np
import pytest
from pytest import approx
from beta.nncf.tensorflow.layers.wrapper import NNCFWrapper
from beta.nncf.tensorflow.sparsity.magnitude.operation import BinaryMask
from beta.nncf.tensorflow.sparsity.magnitude.algorithm import MagnitudeSparsityController
from beta.nncf.tensorflow.sparsity.magnitude.functions import normed_magnitude
from beta.tests.tensorflow.helpers import check_equal, create_compressed_model_and_algo_for_test, \
get_mock_model, get_empty_config, get_basic_conv_test_model
from beta.tests.tensorflow.sparsity.magnitude.test_helpers import get_magnitude_test_model, \
get_basic_magnitude_sparsity_config, ref_mask_2, ref_mask_1
def test_can_create_magnitude_sparse_algo__with_defaults():
model = get_magnitude_test_model()
config = get_basic_magnitude_sparsity_config()
config['compression']['params'] = \
{'schedule': 'multistep'}
sparse_model, compression_ctrl = create_compressed_model_and_algo_for_test(model, config)
assert isinstance(compression_ctrl, MagnitudeSparsityController)
assert compression_ctrl.sparsity_level == approx(0.1)
conv_names = [layer.name for layer in model.layers if isinstance(layer, tf.keras.layers.Conv2D)]
wrappers = [layer for layer in sparse_model.layers if isinstance(layer, NNCFWrapper)]
correct_wrappers = [wrapper for wrapper in wrappers if wrapper.layer.name in conv_names]
assert len(conv_names) == len(wrappers)
assert len(conv_names) == len(correct_wrappers)
assert compression_ctrl.threshold == approx(0.24, 0.1)
assert isinstance(compression_ctrl.weight_importance, type(normed_magnitude))
for i, wrapper in enumerate(wrappers):
ref_mask = tf.ones_like(wrapper.weights[-1]) if i == 0 else ref_mask_2
mask = list(wrapper.ops_weights.values())[0]['mask']
op = list(wrapper.weights_attr_ops['kernel'].values())[0]
tf.assert_equal(mask, ref_mask)
assert isinstance(op, BinaryMask)
@pytest.mark.parametrize(
('weight_importance', 'sparsity_level', 'threshold'),
(
('normed_abs', None, 0.219),
('abs', None, 9),
('normed_abs', 0.5, 0.243),
('abs', 0.5, 10),
)
)
def test_magnitude_sparse_algo_sets_threshold(weight_importance, sparsity_level, threshold):
model = get_magnitude_test_model()
config = get_basic_magnitude_sparsity_config()
config['compression']['params'] = {'schedule': 'multistep',
'weight_importance': weight_importance}
_, compression_ctrl = create_compressed_model_and_algo_for_test(model, config)
if sparsity_level:
compression_ctrl.set_sparsity_level(sparsity_level)
assert compression_ctrl.threshold == pytest.approx(threshold, 0.01)
def test_can_create_magnitude_algo__without_levels():
config = get_basic_magnitude_sparsity_config()
config['compression']['params'] = {'schedule': 'multistep', 'multistep_steps': [1]}
_, compression_ctrl = create_compressed_model_and_algo_for_test(get_mock_model(), config)
assert compression_ctrl.sparsity_level == approx(0.1)
def test_can_not_create_magnitude_algo__with_not_matched_steps_and_levels():
config = get_basic_magnitude_sparsity_config()
config['compression']['params'] = {'schedule': 'multistep', 'multistep_sparsity_levels': [0.1],
'multistep_steps': [1, 2]}
with pytest.raises(AttributeError):
_, _ = create_compressed_model_and_algo_for_test(get_mock_model(), config)
def test_magnitude_algo_set_binary_mask_on_forward():
config = get_basic_magnitude_sparsity_config()
config['compression']['params'] = {'weight_importance': 'abs'}
sparse_model, compression_ctrl = create_compressed_model_and_algo_for_test(get_magnitude_test_model(), config)
compression_ctrl.set_sparsity_level(0.3)
check_equal(ref_mask_1, sparse_model.layers[1].weights[-1])
check_equal(ref_mask_2, sparse_model.layers[2].weights[-1])
def test_magnitude_algo_binary_masks_are_applied():
input_shape = (1, 5, 5, 1)
model = get_basic_conv_test_model(input_shape=input_shape[1:])
config = get_empty_config(input_sample_sizes=input_shape)
config.update(Dict({'compression': {'algorithm': "magnitude_sparsity"}}))
compressed_model, _ = create_compressed_model_and_algo_for_test(model, config)
conv = compressed_model.layers[1]
op_name = list(conv.ops_weights.keys())[0]
conv.ops_weights[op_name] = {'mask': tf.ones_like(conv.weights[0])}
input_ = tf.ones(input_shape)
ref_output_1 = -4 * tf.ones((1, 4, 4, 2))
output_1 = compressed_model(input_)
tf.assert_equal(output_1, ref_output_1)
np_mask = conv.ops_weights[op_name]['mask'].numpy()
np_mask[0, 1, 0, 0] = 0
np_mask[1, 0, 0, 1] = 0
conv.ops_weights[op_name] = {'mask': tf.constant(np_mask)}
ref_output_2 = - 3 * tf.ones_like(ref_output_1)
output_2 = compressed_model(input_)
tf.assert_equal(output_2, ref_output_2)
np_mask[0, 1, 0, 1] = 0
conv.ops_weights[op_name] = {'mask': tf.constant(np_mask)}
ref_output_3 = ref_output_2.numpy()
ref_output_3[..., 1] = -2 * np.ones_like(ref_output_1[..., 1])
ref_output_3 = tf.constant(ref_output_3)
output_3 = compressed_model(input_)
tf.assert_equal(output_3, ref_output_3)
| 44.887218 | 114 | 0.740536 |
4a2548f625053c4de7c52887b83958b1bc69ae8b | 370 | py | Python | healthcheck/settings/dev.py | praekeltfoundation/healthcheck | 3f8b3722ea41c2d81c706e0f9a3473ba2cb2f2ba | [
"BSD-3-Clause"
] | null | null | null | healthcheck/settings/dev.py | praekeltfoundation/healthcheck | 3f8b3722ea41c2d81c706e0f9a3473ba2cb2f2ba | [
"BSD-3-Clause"
] | 23 | 2020-07-16T15:40:35.000Z | 2021-12-13T13:59:30.000Z | healthcheck/settings/dev.py | praekeltfoundation/healthcheck | 3f8b3722ea41c2d81c706e0f9a3473ba2cb2f2ba | [
"BSD-3-Clause"
] | 1 | 2021-02-24T04:58:40.000Z | 2021-02-24T04:58:40.000Z | from healthcheck.settings.base import * # noqa: F403
SECRET_KEY = env.str("SECRET_KEY", "testsecretkey") # noqa: F405
DEBUG = env.bool("DEBUG", True) # noqa: F405
DATABASES = {
"default": env.db( # noqa: F405
default="postgres://postgres:postgres@localhost:5432/healthcheck"
),
}
ALLOWED_HOSTS = env.list("ALLOWED_HOSTS", default="*") # noqa: F405
| 33.636364 | 73 | 0.672973 |
4a254a198bdd57cf8e6bbad3b41ec240ff6d6d17 | 9,322 | py | Python | voltagebudget/neurons.py | voytekresearch/voltagebudget | 358318a3d3b28d18a3d2a7321b0691479d2536df | [
"MIT"
] | null | null | null | voltagebudget/neurons.py | voytekresearch/voltagebudget | 358318a3d3b28d18a3d2a7321b0691479d2536df | [
"MIT"
] | null | null | null | voltagebudget/neurons.py | voytekresearch/voltagebudget | 358318a3d3b28d18a3d2a7321b0691479d2536df | [
"MIT"
] | null | null | null | import inspect
import csv
import numpy as np
from brian2 import *
from copy import deepcopy
from voltagebudget.util import burst
from voltagebudget.util import pulse
def shadow_adex(N, time, ns, ts, **adex_kwargs):
"""Est. the 'shadow voltage' of the AdEx membrane voltage."""
# In the neuron can't fire, we're in the shadow realm!
V_t = 1000 # 1000 volts is infinity, for neurons.
_, _, voltages = adex(N, time, ns, ts, budget=True, V_t=V_t, **adex_kwargs)
return voltages
def _parse_membrane_param(x, N, prng):
try:
if len(x) == 2:
x_min, x_max = x
x = prng.uniform(x_min, x_max, N)
else:
raise ValueError("Parameters must be scalars, or 2 V_lement lists")
except TypeError:
pass
return x, prng
# TODO: add sigma
def adex(N,
time,
ns,
ts,
E=0,
n_cycles=1,
w_in=0.8e-9,
tau_in=5e-3,
bias_in=0.0e-9,
V_t=-50.0e-3,
V_thresh=0.0,
f=0,
A=.1e-9,
phi=0,
sigma=0,
C=200e-12,
g_l=10e-9,
V_l=-70e-3,
a=0e-9,
b=10e-12,
tau_w=30e-3,
V_rheo=-48e-3,
delta_t=2e-3,
time_step=1e-5,
budget=True,
report=None,
save_args=None,
pulse_params=None,
seed_value=42):
"""A AdEx neuron
Params
------
time : Numeric
Simulation run time (seconds)
[...]
pulse_params: None or a tuple: (I, on, off)
Inject a current I, starting at on, ending on off
seed : None, int
The random seed
"""
# -----------------------------------------------------------------
# Plant all the seeds!
seed(seed_value)
prng = np.random.RandomState(seed_value)
# Integration settings
defaultclock.dt = time_step * second
prefs.codegen.target = 'numpy'
# -----------------------------------------------------------------
if save_args is not None:
skip = ['ns', 'ts', 'save_args']
arg_names = inspect.getargspec(adex)[0]
args = []
for arg in arg_names:
if arg not in skip:
row = (arg, eval(arg))
args.append(row)
with open("{}.csv".format(save_args), "w") as fi:
writer = csv.writer(fi, delimiter=",")
writer.writerows(args)
# -----------------------------------------------------------------
# If there's no input, return empty
if (ns.shape[0] == 0) and (pulse_params is None):
return np.array([]), np.array([]), dict()
# -----------------------------------------------------------------
# Adex dynamics params
g_l, prng = _parse_membrane_param(g_l, N, prng)
V_l, prng = _parse_membrane_param(V_l, N, prng)
C, prng = _parse_membrane_param(C, N, prng)
# Potentially random synaptic params
# Note: w_in gets created after synaptic input is
# Defined.
bias_in, prng = _parse_membrane_param(bias_in, N, prng)
tau_in, prng = _parse_membrane_param(tau_in, N, prng)
# Potentially random membrane params
V_rheo, prng = _parse_membrane_param(V_rheo, N, prng)
a, prng = _parse_membrane_param(a, N, prng)
b, prng = _parse_membrane_param(b, N, prng)
delta_t, prng = _parse_membrane_param(delta_t, N, prng)
tau_w, prng = _parse_membrane_param(tau_w, N, prng)
# Fixed membrane dynamics
sigma *= siemens
V_cut = V_t + 8 * np.mean(delta_t)
V_thresh *= volt
# Oscillation params
f *= Hz
A *= amp
phi *= second
# -----------------------------------------------------------------
# Define an adex neuron, and its connections
eqs = """
dv/dt = (-g_l * (v - V_l) + (g_l * delta_t * exp((v - V_t) / delta_t)) + I_in + I_osc(t) + I_noise + I_ext + bias_in - w) / C : volt
dw/dt = (a * (v - V_l) - w) / tau_w : amp
dg_in/dt = -g_in / tau_in : siemens
dg_noise/dt = -(g_noise + (sigma * sqrt(tau_in) * xi)) / tau_in : siemens
I_in = g_in * (v - V_l) : amp
I_noise = g_noise * (v - V_l) : amp
C : farad
g_l : siemens
a : siemens
b : amp
delta_t : volt
tau_w : second
V_rheo : volt
V_l : volt
bias_in : amp
tau_in : second
"""
# Add step?
# A step of current injection?code clean
if pulse_params is not None:
I, t_on, t_off = pulse_params
waves = pulse(I, t_on, t_off, time, time_step)
I_sq = TimedArray(waves, dt=time_step * second)
eqs += """I_ext = I_sq(t) * amp : amp"""
else:
eqs += """I_ext = 0 * amp : amp"""
# Create osc/burst
if np.isclose(E, 0.0):
E = time
_, I_osc = burst((0, time), E, n_cycles, A,
float(f), float(phi), float(time_step))
I_osc = TimedArray(I_osc, dt=time_step * second)
# Def the population
P_n = NeuronGroup(
N,
model=eqs,
threshold='v > V_thresh',
reset="v = V_rheo; w += b",
method='euler')
# Init adex params
# Fixed voltages neuron params
V_t *= volt
V_cut *= volt
P_n.a = a * siemens
P_n.b = b * amp
P_n.delta_t = delta_t * volt
P_n.tau_w = tau_w * second
P_n.V_rheo = V_rheo * volt
P_n.C = C * farad
P_n.g_l = g_l * siemens
P_n.V_l = V_l * volt
P_n.bias_in = bias_in * amp
P_n.tau_in = tau_in * second
# Init V0, w0
V_rest = V_l + (bias_in / g_l)
P_n.v = V_rheo * volt
P_n.w = 0 * pamp
# -----------------------------------------------------------------
# Add synaptic input into the network.
if ns.size > 0:
P_stim = SpikeGeneratorGroup(np.max(ns) + 1, ns, ts * second)
C_stim = Synapses(
P_stim, P_n, model='w_in : siemens', on_pre='g_in += w_in')
C_stim.connect()
# (Finally) Potentially random weights
w_in, prng = _parse_membrane_param(w_in, len(C_stim), prng)
C_stim.w_in = w_in * siemens
# -----------------------------------------------------------------
# Record input and voltage
spikes_n = SpikeMonitor(P_n)
record = ['v', 'I_ext']
traces_n = StateMonitor(P_n, record, record=True)
# -----------------------------------------------------------------
# Build the model!
net = Network(P_n, traces_n, spikes_n)
net.store('no_stim')
# -
# Run the net without any stimulation.
# (This strictly speaking isn't
# necessary, but I can't get Brian to express the needed
# diff eq to get the osc budget term in one pass...)
net.run(time * second, report=report)
V_osc = deepcopy(np.asarray(traces_n.v_))
# -
# Run with stim
net.restore('no_stim')
# Add spikes?
if ns.size > 0:
net.add([P_stim, C_stim])
net.run(time * second, report=report)
# -----------------------------------------------------------------
# Extract data from the run model
# Spikes
ns_e = np.asarray(spikes_n.i_)
ts_e = np.asarray(spikes_n.t_)
# Define the return objects
result = [ns_e, ts_e]
# Define the terms that go into the budget
# (these get added to result at the end)
if budget:
# -
# Drop brian units...
V_cut = float(V_cut)
V_t = float(V_t)
V_thresh = float(V_thresh)
V_rheo = np.asarray(V_rheo)
V_leak = np.asarray(V_l)
# -
# Get Vm
V_m = np.asarray(traces_n.v_)
# -
# Rectify away spikes
# V_m
V_m_thresh = V_m.copy()
if V_rheo.size > 1:
# V_m
rect_mask = V_m_thresh > V_rheo[:, None]
for i in range(V_m_thresh.shape[0]):
V_m_thresh[i, rect_mask[i, :]] = V_rheo[i]
# and V_osc
rect_mask = V_osc > V_rheo[:, None]
for i in range(V_osc.shape[0]):
V_osc[i, rect_mask[i, :]] = V_rheo[i]
else:
# V_m and V_osc
V_m_thresh[V_m_thresh > V_rheo] = V_rheo
V_osc[V_osc > V_rheo] = V_rheo
# Est. Comp AFTER spikes have been removed
V_comp = V_osc - V_m_thresh
V_comp[V_comp > 0] = 0 # Nonsense < 0 values creep in. Drop 'em.
# Recenter V_osc so unit scale matches comp
if V_leak.size > 1:
V_osc = V_leak[:, None] - V_osc
else:
V_osc = V_leak - V_osc
# Est free.
if V_rheo.size > 1:
V_free = V_rheo[:, None] - V_m_thresh
else:
V_free = V_rheo - V_m_thresh
# Budget
V_rest = np.asarray(V_rest)
V_budget = V_rheo - V_rest
# -
# Build final budget for return
vs = dict(
tau_m=np.asarray(C / g_l),
times=np.asarray(traces_n.t_),
I_ext=np.asarray(traces_n.I_ext_),
V_budget=V_budget,
V_m=V_m,
V_m_thresh=V_m_thresh,
V_comp=V_comp,
V_osc=V_osc,
V_free=V_free,
V_rheo=V_rheo,
V_rest=V_rest,
V_leak=V_leak,
V_cut=V_cut,
V_thresh=V_thresh,
V_t=V_t)
# -
# Add the budget to the return var, result
result.append(vs)
return result
| 28.078313 | 136 | 0.513624 |
4a254a7a06e89fd5b3018e75708e502d983fe7ee | 4,045 | py | Python | tests/test_endpoints_cbv.py | jekel/sanic-jwt | 4f6ab07376ad90011d40e205feacae8219359d6f | [
"MIT"
] | 226 | 2017-09-05T08:23:58.000Z | 2022-03-28T09:23:47.000Z | tests/test_endpoints_cbv.py | jekel/sanic-jwt | 4f6ab07376ad90011d40e205feacae8219359d6f | [
"MIT"
] | 179 | 2017-09-27T08:33:16.000Z | 2022-01-28T20:35:23.000Z | tests/test_endpoints_cbv.py | jekel/sanic-jwt | 4f6ab07376ad90011d40e205feacae8219359d6f | [
"MIT"
] | 45 | 2017-10-14T10:26:46.000Z | 2022-02-04T15:01:20.000Z | import jwt
from sanic import Sanic
from sanic.response import json
from sanic.views import HTTPMethodView
from sanic_jwt import exceptions, Initialize
from sanic_jwt.decorators import protected
class User(object):
def __init__(self, id, username, password):
self.id = id
self.username = username
self.password = password
def to_dict(self):
properties = ["user_id", "username"]
return {prop: getattr(self, prop, None) for prop in properties}
users = [User(1, "user1", "abcxyz"), User(2, "user2", "abcxyz")]
username_table = {u.username: u for u in users}
# userid_table = {u.user_id: u for u in users}
async def authenticate(request, *args, **kwargs):
username = request.json.get("username", None)
password = request.json.get("password", None)
if not username or not password:
raise exceptions.AuthenticationFailed("Missing username or password.")
user = username_table.get(username, None)
if user is None:
raise exceptions.AuthenticationFailed("User not found.")
if password != user.password:
raise exceptions.AuthenticationFailed("Password is incorrect.")
return user
sanic_app = Sanic("sanic-jwt-test")
sanic_jwt = Initialize(sanic_app, authenticate=authenticate)
class PublicView(HTTPMethodView):
def get(self, request):
return json({"hello": "world"})
class ProtectedView(HTTPMethodView):
decorators = [protected()]
async def get(self, request):
return json({"protected": True})
class PartiallyProtectedView(HTTPMethodView):
async def get(self, request):
return json({"protected": True})
@protected()
async def patch(self, request):
return json({"protected": True})
sanic_app.add_route(PublicView.as_view(), "/")
sanic_app.add_route(ProtectedView.as_view(), "/protected")
sanic_app.add_route(PartiallyProtectedView.as_view(), "/partially")
class TestEndpointsCBV(object):
def test_unprotected(self):
_, response = sanic_app.test_client.get("/")
assert response.status == 200
def test_protected(self):
_, response = sanic_app.test_client.get("/protected")
assert response.status == 401
assert response.json.get("exception") == "Unauthorized"
assert "Authorization header not present." in response.json.get(
"reasons"
)
def test_partially_protected(self):
_, response = sanic_app.test_client.get("/partially")
assert response.status == 200
_, response = sanic_app.test_client.patch("/partially")
assert response.status == 401
assert response.json.get("exception") == "Unauthorized"
assert "Authorization header not present." in response.json.get(
"reasons"
)
def test_auth_invalid_method(self):
_, response = sanic_app.test_client.get("/auth")
assert response.status == 405
assert b"Method GET not allowed for URL /auth" in response.body
def test_auth_proper_credentials(self):
_, response = sanic_app.test_client.post(
"/auth", json={"username": "user1", "password": "abcxyz"}
)
access_token = response.json.get(
sanic_jwt.config.access_token_name(), None
)
payload = jwt.decode(
access_token,
sanic_jwt.config.secret(),
algorithms=sanic_jwt.config.algorithm(),
)
assert response.status == 200
assert access_token is not None
assert isinstance(payload, dict)
assert sanic_jwt.config.user_id() in payload
assert "exp" in payload
_, response = sanic_app.test_client.get(
"/protected",
headers={"Authorization": "Bearer {}".format(access_token)},
)
assert response.status == 200
_, response = sanic_app.test_client.patch(
"/partially",
headers={"Authorization": "Bearer {}".format(access_token)},
)
assert response.status == 200
| 30.186567 | 78 | 0.651422 |
4a254b3dd516a4e6308875532bce823495328fe1 | 2,294 | py | Python | Logica-Fuzzy/ex02.py | andressagomes26/Logica-Fuzzy | b0fcd987409a6d3a1ac5c877932d07ec6e75e261 | [
"MIT"
] | null | null | null | Logica-Fuzzy/ex02.py | andressagomes26/Logica-Fuzzy | b0fcd987409a6d3a1ac5c877932d07ec6e75e261 | [
"MIT"
] | null | null | null | Logica-Fuzzy/ex02.py | andressagomes26/Logica-Fuzzy | b0fcd987409a6d3a1ac5c877932d07ec6e75e261 | [
"MIT"
] | null | null | null | from ex01 import valor_min_PRESSAO, valor_medio_PRESSAO, valor_max_PRESSAO
from ex01 import valor_min_VELOCIDADE, valor_max_VELOCIDADE
'''
PASSO 2: Inferir (aplicar as regras)
* Aplicar os valores nebulosos aos antecedentes das regras do sistema.
'''
pressao_pedal = float(input('Informe a Pressão no Pedal do freio (0 a 100): '))
velocidade_roda = float(input('Informe a Velocidade da Roda (0 a 100): '))
velocidade_carro = float(input('Informe a Velocidade do Carro (0 a 100): '))
libera_freio = 0.0
aplicar_freio = 0.0
''' Regra 1: Se pressão_pedal for média
Então aplicar_freio
'''
def regra_1():
aplicar_freio = valor_medio_PRESSAO(pressao_pedal)
return aplicar_freio
''' Regra 2: Se pressão_pedal = alta
E velocidade_carro = alta
E velocidade_carro = alta
Então aplicar_freio
-> Aplicar_freio recebe o menor valor de pertinência
'''
def regra_2():
aux1 = valor_max_PRESSAO(pressao_pedal)
aux2 = valor_max_VELOCIDADE(velocidade_carro)
aux3 = valor_max_VELOCIDADE(velocidade_roda)
aplicar_freio = min(aux1, aux2, aux3)
return aplicar_freio
''' Regra 3: Se pressão_pedal = alta
E velocidade_carro = alta
E velocidade_carro = baixa
Então libera_freio
-> Libera_freio recebe o menor valor de pertinência
'''
def regra_3():
aux4 = valor_max_PRESSAO(pressao_pedal)
aux5 = valor_max_VELOCIDADE(velocidade_carro)
aux6 = valor_min_VELOCIDADE(velocidade_roda)
libera_freio = min(aux4, aux5, aux6)
return libera_freio
''' Regra 4: Se pressao_pedal = baixa
Então libera_freio
'''
def regra_4():
libera_freio = valor_min_PRESSAO(pressao_pedal)
return libera_freio
'''
Definindo matematamente 'aperta_freio':
Se soma os resultados de aplicar_freio.
Logo, libera_freio recebe a soma dos resultados da regra 1 e regra 2
'''
def aperta_freio():
aplicar_freio = regra_1() + regra_2()
return aplicar_freio
'''
Definindo matematamente 'libera_freio':
Se soma os resultados de libera_freio.
Logo, libera_freio recebe a soma dos resultados da regra 3 e regra 4.
'''
def libera_freio():
libera_freio = regra_3() + regra_4()
return libera_freio | 29.410256 | 79 | 0.69442 |
4a254bb8229fb36cd4f9d5e888ea1e984fe91392 | 188 | py | Python | Processing.py | CommanderStorm/Roseninselachter-Processor | b9c264846ed39890fb3003662f063ec7123360bd | [
"Apache-2.0"
] | 1 | 2019-11-21T10:48:09.000Z | 2019-11-21T10:48:09.000Z | Processing.py | CommanderStorm/Roseninselachter-Processor | b9c264846ed39890fb3003662f063ec7123360bd | [
"Apache-2.0"
] | null | null | null | Processing.py | CommanderStorm/Roseninselachter-Processor | b9c264846ed39890fb3003662f063ec7123360bd | [
"Apache-2.0"
] | null | null | null | import json
def processing(JSONDaten_Filename):
with open(JSONDaten_Filename) as json_file:
data = json.load(json_file)
print(data['vertecies'])
print(data['edges'])
| 20.888889 | 47 | 0.696809 |
4a254ce86aa51ffc3706be1b1589cb389422091e | 1,265 | py | Python | project/tests/test__config.py | SweedInsight/flask-jwt-auth-backend | a80f0b2ce7880cdbe59931565ec9f6fcbbfa552f | [
"MIT"
] | null | null | null | project/tests/test__config.py | SweedInsight/flask-jwt-auth-backend | a80f0b2ce7880cdbe59931565ec9f6fcbbfa552f | [
"MIT"
] | null | null | null | project/tests/test__config.py | SweedInsight/flask-jwt-auth-backend | a80f0b2ce7880cdbe59931565ec9f6fcbbfa552f | [
"MIT"
] | null | null | null | # project/server/tests/test_config.py
import unittest
from flask import current_app
from flask_testing import TestCase
from project.server import app
class TestDevelopmentConfig(TestCase):
def create_app(self):
app.config.from_object('project.server.config.DevelopmentConfig')
return app
def test_app_is_development(self):
self.assertTrue(app.config['DEBUG'] is True)
self.assertFalse(current_app is None)
self.assertTrue(
app.config['SQLALCHEMY_DATABASE_URI'] == 'postgresql://postgres:@localhost/flask_jwt_auth'
)
class TestTestingConfig(TestCase):
def create_app(self):
app.config.from_object('project.server.config.TestingConfig')
return app
def test_app_is_testing(self):
self.assertTrue(app.config['DEBUG'])
self.assertTrue(
app.config['SQLALCHEMY_DATABASE_URI'] == 'postgresql://postgres:@localhost/flask_jwt_auth_test'
)
class TestProductionConfig(TestCase):
def create_app(self):
app.config.from_object('project.server.config.ProductionConfig')
return app
def test_app_is_production(self):
self.assertTrue(app.config['DEBUG'] is False)
if __name__ == '__main__':
unittest.main()
| 27.5 | 107 | 0.702767 |
4a254d33e2e5f0965ffd89e2fae124095ca31916 | 6,857 | py | Python | eop/dataset.py | redhog/EmbarrassmentOfPandas | e0c5c38bfaa79c04424f0d8ecde2c83b7da64908 | [
"MIT"
] | null | null | null | eop/dataset.py | redhog/EmbarrassmentOfPandas | e0c5c38bfaa79c04424f0d8ecde2c83b7da64908 | [
"MIT"
] | null | null | null | eop/dataset.py | redhog/EmbarrassmentOfPandas | e0c5c38bfaa79c04424f0d8ecde2c83b7da64908 | [
"MIT"
] | 1 | 2021-04-28T22:03:09.000Z | 2021-04-28T22:03:09.000Z | import uuid
import weakref
import inspect
def valuerepr(obj):
s = str(obj)
if "\n" not in s:
return s[:30]
t = type(obj)
t = "%s.%s" % (t.__module__, t.__name__)
try:
return "<%s#%s>" % (t, hash(obj))
except:
return "<%s@%s>" % (t, id(obj))
def tagify(tag):
if isinstance(tag, dict):
return Tag(tag)
if isinstance(tag, slice):
return Tag({tag.start: tag.stop})
return tag
def to_tagset(key):
if isinstance(key, slice) and key.start is None and key.stop is None and key.step is None:
key = ()
if key is None:
key = ()
if not isinstance(key, (list, tuple, set)):
key = (key),
return frozenset(tagify(item) for item in key)
class Tag(object):
def __init__(self, *arg, **attrs):
if arg:
attrs = arg[0]
self.attrs = attrs
def __repr__(self):
return "[%s]" % ",".join("%s:%s" % (key, valuerepr(self.attrs[key])) for key in sorted(self.attrs.keys()))
def __hash__(self):
def hashorid(obj):
try:
return hash(obj)
except:
return id(obj)
return hash(",".join("%s:%s" % (key, hashorid(self.attrs[key])) for key in sorted(self.attrs.keys())))
def __eq__(self, other):
return repr(self) == repr(other)
def __getitem__(self, key):
return self.attrs[key]
class DataSetInstance(object):
def __init__(self, instance, *tags):
self.id = id(instance)
self.instance = instance
self.tags = frozenset(tags)
@property
def all_tags(self):
return frozenset.union(self.tags, inspect.getmro(type(self.instance)))
def __repr__(self):
res = valuerepr(self.instance)
if self.tags:
res += " / " + ",".join(str(tag) for tag in self.tags)
return res
class Storage(object):
def __init__(self):
self.by_tag = {}
self.datasets = {}
self.handlers = {}
def on(self, handler, *tags):
self.handlers[frozenset(tags)] = handler
def trigger(self, *tags, **kw):
for handler_tags, handler in self.handlers.items():
if len(handler_tags - frozenset(tags)) == 0:
handler(*tags, **kw)
def add(self, instance, *tags):
tags = [tagify(tag) for tag in tags]
instance = DataSetInstance(instance, *tags)
self.datasets[instance.id] = instance
for tag in instance.all_tags:
if tag not in self.by_tag:
self.by_tag[tag] = weakref.WeakSet()
self.by_tag[tag].add(instance)
self.trigger(action="add", instance=instance.instance, *instance.all_tags)
def query(self, qp):
if not qp:
return frozenset(self.datasets.values())
qs = [frozenset((self.datasets[id(t)],))
if id(t) in self.datasets
else frozenset(self.by_tag.get(tagify(t), ()))
for t in qp]
if not qs:
return frozenset()
return {instance for instance in frozenset.intersection(*qs)}
def instance_query(self, qp):
return {instance.instance for instance in self.query(qp)}
def remove(self, qp):
for instance in self.query(qp):
del self.dataset[instance.id]
self.trigger(action="remove", instance=instance.instance, *qp)
def untag(self, qp, *tags):
for old_instance in self.query(qp):
instance = DataSetInstance(
old_instance.instance, *(old_instance.all_tags - frozenset(tags)))
self.datasets[instance.id] = instance
for tag in instance.all_tags:
self.by_tag[tag].add(instance)
self.trigger(action="untag", instance=instance.instance, tags=tags, *old_instance.all_tags)
def tag(self, qp, *tags):
for tag in tags:
if tag not in self.by_tag:
self.by_tag[tag] = weakref.WeakSet()
for old_instance in self.query(qp):
instance = DataSetInstance(
old_instance.instance, *(frozenset.union(old_instance.tags, tags)))
self.datasets[instance.id] = instance
for tag in instance.all_tags:
self.by_tag[tag].add(instance)
self.trigger(action="tag", instance=instance.instance, tags=tags, *instance.all_tags)
class DataSet(object):
def __new__(cls, items = []):
self = cls.new_from_storage_and_filter()
for tags, instance in items:
self.storage.add(instance, *tags)
return self
@classmethod
def new_from_storage_and_filter(cls, storage = None, filter=None):
self = object.__new__(cls)
self.storage = storage if storage is not None else Storage()
self.filter = filter if filter is not None else frozenset()
return self
def on(self, handler):
self.storage.on(handler, *self.filter)
def trigger(self, **kw):
self.storage.trigger(*self.filter, **kw)
@property
def tags(self):
res = self.storage.query(self.filter)
if not res: return frozenset()
return frozenset.union(*(instance.tags for instance in res))
def __call__(self, *arg):
return self.__getitem__(arg)
def __getitem__(self, qp):
return type(self).new_from_storage_and_filter(
self.storage, frozenset.union(self.filter, to_tagset(qp)))
def __setitem__(self, key, value):
# Make foo["tag1", "tag2"...] += "Ntag" work
if isinstance(value, DataSet) and id(value.storage) == id(self.storage):
return
self.storage.add(value, *frozenset.union(self.filter, to_tagset(key)))
def __delitem__(self, key):
self.storage.remove(
set.union(self.filter, frozenset.union(self.filter, to_tagset(key))))
def __repr__(self):
return "\n".join(repr(instance)
for instance in self.storage.query(self.filter))
def __len__(self):
return len(self.storage.instance_query(self.filter))
def __contains__(self, qp):
return len(self[qp]) > 0
def __iter__(self):
return iter(self.storage.instance_query(self.filter))
def __eq__(self, other):
return frozenset(self) == frozenset(other)
def __iadd__(self, tags):
self.storage.tag(self.filter, *to_tagset(tags))
return self
def __isub__(self, tags):
self.storage.untag(self.filter, *to_tagset(tags))
return self
def items(self):
return ((instance.tags, instance.instance) for instance in self.storage.query(self.filter))
def keys(self):
return iter(self.tags)
def values(self):
return iter(self)
| 32.808612 | 114 | 0.586116 |
4a254d47f70fe89c5643e5985dd52d55f47dfd33 | 42 | py | Python | hpvm/projects/predtuner/predtuner/approxes/__init__.py | vzyrianov/hpvm-autograd | 521cc3b684531548aea75f9fe3cc673aaa4a2e90 | [
"Apache-2.0"
] | null | null | null | hpvm/projects/predtuner/predtuner/approxes/__init__.py | vzyrianov/hpvm-autograd | 521cc3b684531548aea75f9fe3cc673aaa4a2e90 | [
"Apache-2.0"
] | null | null | null | hpvm/projects/predtuner/predtuner/approxes/__init__.py | vzyrianov/hpvm-autograd | 521cc3b684531548aea75f9fe3cc673aaa4a2e90 | [
"Apache-2.0"
] | null | null | null | from .approxes import get_knobs_from_file
| 21 | 41 | 0.880952 |
4a254df0989101ec50d259180e9ea8fd2cab294a | 1,238 | py | Python | imodels/experimental/bartpy/sigma.py | stjordanis/imodels | 3c31df3f3d600d3b9c07fabdffd375b93e139c50 | [
"MIT"
] | 102 | 2019-07-16T13:45:35.000Z | 2020-09-14T19:12:49.000Z | imodels/experimental/bartpy/sigma.py | stjordanis/imodels | 3c31df3f3d600d3b9c07fabdffd375b93e139c50 | [
"MIT"
] | 2 | 2020-01-03T20:47:14.000Z | 2020-01-03T21:17:39.000Z | imodels/experimental/bartpy/sigma.py | stjordanis/imodels | 3c31df3f3d600d3b9c07fabdffd375b93e139c50 | [
"MIT"
] | 8 | 2019-08-09T08:40:34.000Z | 2020-09-06T17:51:10.000Z |
class Sigma:
"""
A representation of the sigma term in the model.
Specifically, this is the sigma of y itself, i.e. the sigma in
y ~ Normal(sum_of_trees, sigma)
The default prior is an inverse gamma distribution on the variance
The parametrization is slightly different to the numpy gamma version, with the scale parameter inverted
Parameters
----------
alpha - the shape of the prior
beta - the scale of the prior
scaling_factor - the range of the original distribution
needed to rescale the variance into the original scale rather than on (-0.5, 0.5)
"""
def __init__(self, alpha: float, beta: float, scaling_factor: float, classification :bool = False):
self.alpha = alpha
self.beta = beta
self._current_value = 1.0
self.scaling_factor = scaling_factor
self._classification = classification
def set_value(self, value: float) -> None:
self._current_value = value
def current_value(self) -> float:
if self._classification:
return 1
return self._current_value
def current_unnormalized_value(self) -> float:
return self.current_value() * self.scaling_factor
| 33.459459 | 107 | 0.66559 |
4a254e332cc601cb1f42236589c430f0f4e438a2 | 2,943 | py | Python | day_6.py | kurtrm/advent_of_code_2018 | a3db9df31de730479c8c2b3f2869b274a0e024e9 | [
"MIT"
] | null | null | null | day_6.py | kurtrm/advent_of_code_2018 | a3db9df31de730479c8c2b3f2869b274a0e024e9 | [
"MIT"
] | null | null | null | day_6.py | kurtrm/advent_of_code_2018 | a3db9df31de730479c8c2b3f2869b274a0e024e9 | [
"MIT"
] | null | null | null | """
(1, 1), (8, 3) -> (5, 1), (3, 4)
(a, b), (c, d)
(a - c) + (b - d)
4
2
a a a a a . c c c c
a A a a a . c c c c
a a a d d e c c c c
a a d d d e c c C c
. . d D d e e c c c
b b . d e E e e c c
b B b . e e e e . .
b b b . e e e f f f
b b b . e e f f f f
b b b . f f f f F f
"""
import numpy as np
from scipy.spatial.distance import cityblock
from read_file import read_input
def get_area():
"""
"""
coords_raw = read_input('input_6.txt')[:-1]
coords = [(int(coord.split(', ')[0]), int(coord.split(', ')[1])) for coord in coords_raw]
max_x = max(coords, key=lambda x: x[0])[0]
max_y = max(coords, key=lambda x: x[1])[1]
return max_x, max_y
def distances():
"""
construct area
for every point in the area, calculate distance between every input point and that point
which ever point has the smallest manhattan distance, set that coordinates value to the
integer value starting from 1
if equidistant, equal 0
"""
coords_raw = read_input('input_6.txt')[:-1]
coords = [(int(coord.split(', ')[0]), int(coord.split(', ')[1])) for coord in coords_raw]
max_x, max_y = get_area()
grid = np.zeros((max_y+1, max_x+1))
all_poss_coords = ((x, y) for x in range(max_x) for y in range(max_y))
for coord in all_poss_coords:
least = float('inf')
least_point = None
for i, point in enumerate(coords, 1):
dist = cityblock(coord, point)
if dist < least:
least = dist
least_point = i
grid[coord[1], coord[0]] = least_point
border_left = np.unique(grid[:, 0])
border_right = np.unique(grid[:, -1])
border_top = np.unique(grid[0])
border_bottom = np.unique(grid[-1])
border_nums = np.unique(np.concatenate([border_left, border_bottom, border_right, border_top]))
grid[np.isin(grid, border_nums)] = 0
return np.max(np.unique(grid, return_counts=True)[1][1:])
def safe_region():
"""
construct area
for every point in the area, calculate distance between every input point and that point
which ever point has the smallest manhattan distance, set that coordinates value to the
integer value starting from 1
if equidistant, equal 0
"""
coords_raw = read_input('input_6.txt')[:-1]
coords = [(int(coord.split(', ')[0]), int(coord.split(', ')[1])) for coord in coords_raw]
max_x, max_y = get_area()
# coords = [(1, 1), (1, 6), (8, 3), (3, 4), (5, 5), (8, 9)]
# max_x, max_y = 8, 9
grid = np.zeros((max_y+1, max_x+1))
all_poss_coords = ((x, y) for y in range(max_y+1) for x in range(max_x+1))
for coord in all_poss_coords:
dist = sum(cityblock(coord, point) for point in coords)
grid[coord[1], coord[0]] = dist
return len(grid[grid < 10000])
if __name__ == '__main__':
print(safe_region())
| 31.645161 | 100 | 0.584098 |
4a254e535c170f16cb768ab0327e0ad4beb92eb0 | 1,346 | py | Python | master_test1.py | DACUS1995/NAO---Robot-Human-interaction | f14233850afd83c3a450f39944745b5f7288ebd9 | [
"MIT"
] | 1 | 2019-08-30T13:07:24.000Z | 2019-08-30T13:07:24.000Z | master_test1.py | DACUS1995/NAO---Robot-Human-interaction | f14233850afd83c3a450f39944745b5f7288ebd9 | [
"MIT"
] | null | null | null | master_test1.py | DACUS1995/NAO---Robot-Human-interaction | f14233850afd83c3a450f39944745b5f7288ebd9 | [
"MIT"
] | null | null | null | import argparse
from naoqi import ALProxy
from naoqi import ALBroker
import time
IP = "172.19.8.207"
PORT = 9559
def main1(robotIP, PORT):
motionProxy = ALProxy("ALMotion", robotIP, PORT)
postureProxy = ALProxy("ALRobotPosture", robotIP, PORT)
# Wake up robot
#motionProxy.wakeUp()
#time.sleep(5)
# Send robot to Stand Init
postureProxy.goToPosture("StandInit", 0.5)
time.sleep(2)
# Send robot to First Position
#postureProxy.goToPosture("right_hand_extended", 1)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--ip", type=str, default="172.19.8.207",#---modify here to update tha IP---"192.168.0.125"
help="Robot ip address")
parser.add_argument("--port", type=int, default=9559,#---modify here to update port---9559
help="Robot port number")
args = parser.parse_args()
main1(args.ip, args.port)
#---first pos
import normal_pos
#normal_pos.do()
normal_pos.do1()
time.sleep(5)
#---Take picture
path="/home/nao/Demo_SD/Pictures"
name="picture"
tts = ALProxy("ALTextToSpeech", IP, PORT)
#tts.say("I will take a picture")
import run_search_comand as run
tts.say("I will take a picture")
pictureCapture = ALProxy("ALPhotoCapture")
pictureCapture.takePicture(path,name)
time.sleep(2)
run.do()
| 24.472727 | 115 | 0.679792 |
4a254ec3a5fc39927be2ca8daa0a47562fc65ff5 | 1,921 | py | Python | lm_eval/tasks/anli.py | bigscience-workshop/lm-evaluation-harness | c639c81974d6d0efea2e471f6292cf3c6ae67e4c | [
"MIT"
] | null | null | null | lm_eval/tasks/anli.py | bigscience-workshop/lm-evaluation-harness | c639c81974d6d0efea2e471f6292cf3c6ae67e4c | [
"MIT"
] | null | null | null | lm_eval/tasks/anli.py | bigscience-workshop/lm-evaluation-harness | c639c81974d6d0efea2e471f6292cf3c6ae67e4c | [
"MIT"
] | null | null | null | """
Adversarial NLI: A New Benchmark for Natural Language Understanding
https://arxiv.org/pdf/1910.14599.pdf
Adversarial NLI (ANLI) is a dataset collected via an iterative, adversarial
human-and-model-in-the-loop procedure. It consists of three rounds that progressively
increase in difficulty and complexity, and each question-answer includes annotator-
provided explanations.
Homepage: "https://github.com/facebookresearch/anli"
"""
import numpy as np
from lm_eval.base import rf, PromptSourceTask
from lm_eval.metrics import mean
_CITATION = """
@inproceedings{nie-etal-2020-adversarial,
title = "Adversarial {NLI}: A New Benchmark for Natural Language Understanding",
author = "Nie, Yixin and
Williams, Adina and
Dinan, Emily and
Bansal, Mohit and
Weston, Jason and
Kiela, Douwe",
booktitle = "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics",
year = "2020",
publisher = "Association for Computational Linguistics",
}
"""
class ANLIBase(PromptSourceTask):
VERSION = 0
DATASET_PATH = "anli"
DATASET_NAME = None
SPLIT = None
def has_training_docs(self):
return True
def has_validation_docs(self):
return True
def has_test_docs(self):
return True
def training_docs(self):
if self.has_training_docs():
if self._training_docs is None:
self._training_docs = list(self.dataset["train_r" + str(self.SPLIT)])
return self._training_docs
def validation_docs(self):
if self.has_validation_docs():
return self.dataset["dev_r" + str(self.SPLIT)]
def test_docs(self):
if self.has_test_docs():
return self.dataset["test_r" + str(self.SPLIT)]
class ANLIRound1(ANLIBase):
SPLIT = 1
class ANLIRound2(ANLIBase):
SPLIT = 2
class ANLIRound3(ANLIBase):
SPLIT = 3
| 26.315068 | 106 | 0.690265 |
4a254f0d131438f3f59729371cae801ecdc12935 | 3,939 | py | Python | modules/icinga/files/usr/local/bin/check_pingdom.py | camdesgov/govuk-puppet | 927a0e617e7e678a0ef11a5409628487f3c828b0 | [
"MIT"
] | null | null | null | modules/icinga/files/usr/local/bin/check_pingdom.py | camdesgov/govuk-puppet | 927a0e617e7e678a0ef11a5409628487f3c828b0 | [
"MIT"
] | null | null | null | modules/icinga/files/usr/local/bin/check_pingdom.py | camdesgov/govuk-puppet | 927a0e617e7e678a0ef11a5409628487f3c828b0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
# Testing: ./check_pingdom.py <checkid>
# Pingdom IDs are found in manifests/config/pingdom.pp
# You must have an /etc/pingdom.ini which looks like templates/etc/pingdom.ini.erb
#
import base64
import sys
import ConfigParser
import json
from urllib2 import *
# General Config
pingdom_api_host = "api.pingdom.com"
config_file = "/etc/pingdom.ini"
def unknown(message):
print "UNKNOWN: %s" % message
sys.exit(3)
def critical(message):
print "CRITICAL: %s" % message
sys.exit(2)
def warning(message):
print "WARNING: %s" % message
sys.exit(1)
def ok(message):
print "OK: %s" % message
sys.exit(0)
class Config :
pingdom_pass = None
pingdom_key = None
pingdom_user = None
def __init__(self) :
try:
config = ConfigParser.ConfigParser()
config.read(config_file)
self.pingdom_pass = config.get('DEFAULT','pingdom_pass')
self.pingdom_key = config.get('DEFAULT','pingdom_key')
self.pingdom_user = config.get('DEFAULT','pingdom_user')
except:
unknown("Could not read config file " % config_file)
def check_arguments():
if len(sys.argv) != 2:
unknown("No check ID passed as a parameter")
else:
check_id = sys.argv[1]
return check_id
def check_pingdom_up():
try:
urlopen("https://%s/" % pingdom_api_host, None, 5)
except HTTPError:
pass
except:
unknown("Pingdom API Down")
def parse_pingdom_result(json_result):
try:
message = "\n\n"
message += "Check Status: %s\n" % json_result['check']['status'].upper()
message += "Check Name: %s\n" % json_result['check']['name']
if json_result['check']['type']['http']['encryption']:
scheme = "https"
else:
scheme = "http"
message += "Check URL: %s://%s%s\n" % ( scheme,
json_result['check']['hostname'],
json_result['check']['type']['http']['url'] )
if 'shouldcontain' in json_result['check']['type']['http'].keys():
message += "Expected Text: %s\n" % json_result['check']['type']['http']['shouldcontain']
message += "Pingdom URL: https://my.pingdom.com/reports/uptime#check=%s&daterange=7days\n" % json_result['check']['id']
return message
except:
#This is extra details that are nice to have, it should never break execution
return ""
def get_pingdom_result(config,check_id):
try:
basic_auth_token = "Basic " + base64.b64encode("{0}:{1}".format(config.pingdom_user, config.pingdom_pass))
pingdom_url = "https://%s/api/2.0/checks/%s" % (pingdom_api_host, check_id)
req = Request(pingdom_url)
req.add_header("App-Key", config.pingdom_key)
req.add_header("Authorization", basic_auth_token)
try:
result = urlopen(req)
except HTTPError, e:
unknown("Could not retrieve check result (%s)" % e)
pingdom_check = json.loads(result.read())
try:
status = pingdom_check['check']['status']
message = parse_pingdom_result(pingdom_check)
if status == 'up':
ok("Pingdom reports this URL is UP" + message)
elif status in ['unknown']:
unknown("Pingdom check in unknown state" + message)
elif status in ['unconfirmed_down','paused']:
warning("Pingdom check is neither up nor down!" + message)
else:
critical("Pingdom reports this URL is not UP" + message)
except Exception, e:
unknown("Could not parse Pingdom output (%s)" % e)
except Exception, e:
unknown("Unknown %s retrieving check status" % e)
check_pingdom_up()
get_pingdom_result(Config(),check_arguments())
| 33.381356 | 129 | 0.593552 |
4a254f7bf2f79d2f3e6b8d64c203c4b9778f1740 | 446 | py | Python | users/migrations/0002_users_active_default.py | mitodl/ocw-studio | 949f96ec0647064f8d495ebdd22d66eea7d024a5 | [
"BSD-3-Clause"
] | 2 | 2020-08-07T15:55:41.000Z | 2021-08-16T18:28:09.000Z | users/migrations/0002_users_active_default.py | mitodl/ocw-studio | 949f96ec0647064f8d495ebdd22d66eea7d024a5 | [
"BSD-3-Clause"
] | 924 | 2020-08-10T17:54:19.000Z | 2022-03-31T21:15:17.000Z | users/migrations/0002_users_active_default.py | mitodl/ocw-studio | 949f96ec0647064f8d495ebdd22d66eea7d024a5 | [
"BSD-3-Clause"
] | null | null | null | # Generated by Django 3.1.6 on 2021-04-02 17:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("users", "0001_initial"),
]
operations = [
migrations.AlterField(
model_name="user",
name="is_active",
field=models.BooleanField(
default=True, help_text="The user account is active"
),
),
]
| 21.238095 | 68 | 0.565022 |
4a254fa6c1504aa8e289dc2e95b65874a147f54d | 7,662 | py | Python | controllers/controller_commons/controller_communication_interface.py | tuncali/VeTeF | 3810cffd87fc6a9d815774cb3d914d3a47288055 | [
"Apache-2.0"
] | null | null | null | controllers/controller_commons/controller_communication_interface.py | tuncali/VeTeF | 3810cffd87fc6a9d815774cb3d914d3a47288055 | [
"Apache-2.0"
] | null | null | null | controllers/controller_commons/controller_communication_interface.py | tuncali/VeTeF | 3810cffd87fc6a9d815774cb3d914d3a47288055 | [
"Apache-2.0"
] | null | null | null | """Defines ControllerCommunicationInterface class"""
import struct
from webots_controller_parameter import WebotsControllerParameter
class ControllerCommunicationInterface(object):
"""ControllerCommunicationInterface class handles the messaging between supervisor and the vehicle controllers."""
VHC_POSITION_MESSAGE = 1
SET_CONTROLLER_PARAMETERS_MESSAGE = 2
def __init__(self):
pass
def interpret_message(self, message):
data = None
data_size = 0
(command, ) = struct.unpack('B', message[0:struct.calcsize('B')])
if command == self.VHC_POSITION_MESSAGE:
(data, data_size) = self.interpret_vehicle_position_message(message)
elif command == self.SET_CONTROLLER_PARAMETERS_MESSAGE:
(data, data_size) = self.interpret_set_controller_parameters_message(message)
return command, data, data_size
def receive_command(self, receiver):
# TODO: Change this to return an array of commands, and corresponding data
command = None
data = None
data_size = 0
message = []
if receiver.getQueueLength() > 0:
message = receiver.getData()
if len(message) > 0:
(command, data, data_size) = self.interpret_message(message)
receiver.nextPacket()
return command, data, len(message), data_size
def generate_vehicle_position_message(self, vhc_id, vhc_position):
message = struct.pack('B', self.VHC_POSITION_MESSAGE)
message += struct.pack('I', vhc_id)
message += struct.pack('ddd', vhc_position[0], vhc_position[1], vhc_position[2])
return message
def transmit_vehicle_position_message(self, emitter, vhc_id, vhc_position):
if emitter is not None:
message = self.generate_vehicle_position_message(vhc_id, vhc_position)
emitter.send(message)
def interpret_vehicle_position_message(self, message):
cur_msg_index = struct.calcsize('B') # Command is already read
vhc_position = [0.0, 0.0, 0.0]
(vhc_id,) = struct.unpack('I', message[cur_msg_index:cur_msg_index + struct.calcsize('I')])
cur_msg_index += struct.calcsize('I')
(vhc_position[0], vhc_position[1], vhc_position[2]) = \
struct.unpack('ddd', message[cur_msg_index:cur_msg_index + struct.calcsize('ddd')])
cur_msg_index += struct.calcsize('ddd')
data = VehiclePosition(vhc_id=vhc_id, vhc_position=vhc_position)
return data, cur_msg_index
def generate_controller_parameter_message(self, parameter_name='N/A', parameter_data=None):
"""Generates controller parameter message to be used inside add vehicle or set controller params messages.
Message structure: Length of parameter name string(1),
parameter name string(?), Parameter data type character(1), Length of parameter data(4), parameter data(?)"""
message = struct.pack('B', len(parameter_name))
message += struct.pack("%ds" % (len(parameter_name),), parameter_name)
data_type_name = 'x'
data_length = 0
if type(parameter_data) == list:
data_length = len(parameter_data)
if len(parameter_data) > 0:
if type(parameter_data[0]) is bool:
data_type_name = '?'
elif type(parameter_data[0]) is int:
data_type_name = 'I'
elif type(parameter_data[0]) is float:
data_type_name = 'd'
elif type(parameter_data) == str:
data_length = len(parameter_data)
data_type_name = 's'
elif type(parameter_data) is bool:
data_length = 1
data_type_name = '?'
elif type(parameter_data) is int:
data_length = 1
data_type_name = 'I'
elif type(parameter_data) is float:
data_length = 1
data_type_name = 'd'
message += struct.pack('s', data_type_name)
message += struct.pack('I', data_length)
pack_str = '%s{}'.format(data_type_name)
message += struct.pack(pack_str % data_length, *parameter_data)
return message
def interpret_controller_parameter_message(self, message):
cur_msg_index = 0
(param_name_length, ) = struct.unpack('B', message[cur_msg_index:cur_msg_index + struct.calcsize('B')])
cur_msg_index += struct.calcsize('B')
(parameter_name, ) = \
struct.unpack('%ds' % param_name_length,
message[cur_msg_index:cur_msg_index + struct.calcsize('%ds' % param_name_length)])
cur_msg_index += struct.calcsize('%ds' % param_name_length)
(data_type_name, ) = struct.unpack('s', message[cur_msg_index:cur_msg_index + struct.calcsize('s')])
cur_msg_index += struct.calcsize('s')
(data_length, ) = struct.unpack('I', message[cur_msg_index:cur_msg_index + struct.calcsize('I')])
cur_msg_index += struct.calcsize('I')
unpack_str = '%s{}'.format(data_type_name)
parameter_data = \
list(struct.unpack(unpack_str % data_length,
message[cur_msg_index:cur_msg_index + struct.calcsize(unpack_str % data_length)]))
cur_msg_index += struct.calcsize(unpack_str % data_length)
data = WebotsControllerParameter(parameter_name=parameter_name, parameter_data=parameter_data)
return data, cur_msg_index
def generate_set_controller_parameters_message(self, vhc_id=0, parameter_name='N/A', parameter_data=None):
"""Generates SET_CONTROLLER_PARAMETERS message.
Message structure: Command(1), Applicable vehicle id(4), Length of parameter name string(1),
parameter name string(?), Parameter data type character(1), Length of parameter data(4), parameter data(?)"""
message = struct.pack('B', self.SET_CONTROLLER_PARAMETERS_MESSAGE)
message += struct.pack('I', int(vhc_id))
message += self.generate_controller_parameter_message(parameter_name, parameter_data)
return message
def transmit_set_controller_parameters_message(self, emitter, vhc_id=0, parameter_name='N/A', parameter_data=None):
if emitter is not None:
message = self.generate_set_controller_parameters_message(vhc_id=vhc_id,
parameter_name=parameter_name,
parameter_data=parameter_data)
emitter.send(message)
def interpret_set_controller_parameters_message(self, message):
cur_msg_index = struct.calcsize('B')
(vhc_id, ) = struct.unpack('I', message[cur_msg_index:cur_msg_index + struct.calcsize('I')])
cur_msg_index += struct.calcsize('I')
(data, data_size) = self.interpret_controller_parameter_message(message[cur_msg_index:])
data.set_vehicle_id(vhc_id)
return data, data_size + struct.calcsize('I') + struct.calcsize('B')
class VehiclePosition(object):
def __init__(self, vhc_id=None, vhc_position=None):
if vhc_id is None:
self.vehicle_id = 0
else:
self.vehicle_id = vhc_id
if vhc_position is None:
self.position = [0.0, 0.0, 0.0]
else:
self.position = vhc_position
def set_vehicle_id(self, vhc_id):
self.vehicle_id = vhc_id
def get_vehicle_id(self):
return self.vehicle_id
def set_vehicle_position(self, position):
self.position = position
def get_vehicle_position(self):
return self.position
| 47.590062 | 119 | 0.647481 |
4a254fd429fb9e84e9247dffcda5fab0b5f33c6b | 3,115 | py | Python | e-paper/RaspberryPi&JetsonNano/python/examples/epd_7in5_V2_test.py | ms900ft/slowmovieui | 1238bb55cc092889bed2123f0803c93592a43016 | [
"MIT"
] | null | null | null | e-paper/RaspberryPi&JetsonNano/python/examples/epd_7in5_V2_test.py | ms900ft/slowmovieui | 1238bb55cc092889bed2123f0803c93592a43016 | [
"MIT"
] | null | null | null | e-paper/RaspberryPi&JetsonNano/python/examples/epd_7in5_V2_test.py | ms900ft/slowmovieui | 1238bb55cc092889bed2123f0803c93592a43016 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding:utf-8 -*-
import sys
import os
picdir = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), 'pic')
libdir = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), 'lib')
if os.path.exists(libdir):
sys.path.append(libdir)
import logging
from waveshare_epd import epd7in5_V2
import time
from PIL import Image,ImageDraw,ImageFont
import traceback
logging.basicConfig(level=logging.DEBUG)
try:
logging.info("epd7in5_V2 Demo")
epd = epd7in5_V2.EPD()
logging.info("init and Clear")
epd.init()
epd.Clear()
font24 = ImageFont.truetype(os.path.join(picdir, 'Font.ttc'), 24)
font18 = ImageFont.truetype(os.path.join(picdir, 'Font.ttc'), 18)
# Drawing on the Horizontal image
logging.info("1.Drawing on the Horizontal image...")
Himage = Image.new('1', (epd.width, epd.height), 255) # 255: clear the frame
draw = ImageDraw.Draw(Himage)
draw.text((10, 0), 'hello world', font = font24, fill = 0)
draw.text((10, 20), '7.5inch e-Paper', font = font24, fill = 0)
draw.text((150, 0), u'微雪电子', font = font24, fill = 0)
draw.line((20, 50, 70, 100), fill = 0)
draw.line((70, 50, 20, 100), fill = 0)
draw.rectangle((20, 50, 70, 100), outline = 0)
draw.line((165, 50, 165, 100), fill = 0)
draw.line((140, 75, 190, 75), fill = 0)
draw.arc((140, 50, 190, 100), 0, 360, fill = 0)
draw.rectangle((80, 50, 130, 100), fill = 0)
draw.chord((200, 50, 250, 100), 0, 360, fill = 0)
epd.display(epd.getbuffer(Himage))
time.sleep(2)
# Drawing on the Vertical image
logging.info("2.Drawing on the Vertical image...")
Limage = Image.new('1', (epd.height, epd.width), 255) # 255: clear the frame
draw = ImageDraw.Draw(Limage)
draw.text((2, 0), 'hello world', font = font18, fill = 0)
draw.text((2, 20), '7.5inch epd', font = font18, fill = 0)
draw.text((20, 50), u'微雪电子', font = font18, fill = 0)
draw.line((10, 90, 60, 140), fill = 0)
draw.line((60, 90, 10, 140), fill = 0)
draw.rectangle((10, 90, 60, 140), outline = 0)
draw.line((95, 90, 95, 140), fill = 0)
draw.line((70, 115, 120, 115), fill = 0)
draw.arc((70, 90, 120, 140), 0, 360, fill = 0)
draw.rectangle((10, 150, 60, 200), fill = 0)
draw.chord((70, 150, 120, 200), 0, 360, fill = 0)
epd.display(epd.getbuffer(Limage))
time.sleep(2)
logging.info("3.read bmp file")
Himage = Image.open(os.path.join(picdir, '7in5_V2.bmp'))
epd.display(epd.getbuffer(Himage))
time.sleep(2)
logging.info("4.read bmp file on window")
Himage2 = Image.new('1', (epd.height, epd.width), 255) # 255: clear the frame
bmp = Image.open(os.path.join(picdir, '100x100.bmp'))
Himage2.paste(bmp, (50,10))
epd.display(epd.getbuffer(Himage2))
time.sleep(2)
logging.info("Clear...")
epd.init()
epd.Clear()
logging.info("Goto Sleep...")
epd.sleep()
except IOError as e:
logging.info(e)
except KeyboardInterrupt:
logging.info("ctrl + c:")
epd7in5.epdconfig.module_exit()
exit()
| 34.230769 | 90 | 0.622472 |
4a254ff7e4a7193b09dcce743ce741ac7b1dfcb2 | 5,459 | py | Python | test/generate.py | charles-typ/pandas | e3e4fc350aa2234569fe488889254ffb1e86d30b | [
"BSD-3-Clause"
] | null | null | null | test/generate.py | charles-typ/pandas | e3e4fc350aa2234569fe488889254ffb1e86d30b | [
"BSD-3-Clause"
] | null | null | null | test/generate.py | charles-typ/pandas | e3e4fc350aa2234569fe488889254ffb1e86d30b | [
"BSD-3-Clause"
] | null | null | null | import string
import timeit
import numpy as np
import logging
logging.basicConfig(level = logging.INFO,format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
from pandas import DataFrame, MultiIndex, Series, concat, date_range, merge, merge_asof
from pandas import pipeline_merge
import pandas.util.testing as tm
import argparse
try:
from pandas import merge_ordered
except ImportError:
from pandas import ordered_merge as merge_ordered
parser = argparse.ArgumentParser()
parser.add_argument('-left', default=50000, type=int)
parser.add_argument('-right', default=500000, type=int)
parser.add_argument('-chunk', default=20, type=int)
args = parser.parse_args()
left_table = args.left
right_table = args.right
N = 20000000
pieces = args.chunk
logger.info("Start generating data")
indices = tm.makeStringIndex(N).values
indices2 = tm.makeStringIndex(N).values
key = np.tile(indices[:left_table], 1)
left = DataFrame(
{"key": key, "value": np.random.randn(left_table)}
)
right = {}
np.random.shuffle(indices)
for i in range(1, pieces):
right[i] = DataFrame(
{
"key": indices2[(i - 1)*right_table + 5000:i*right_table + 5000],
"value2": np.random.randn(right_table),
}
)
logger.info("Finish generating data")
logger.info("Left table size: " + str(left_table) + ", right table chunk size: " + str(right_table))
#right[12] = DataFrame(
# {
# "key": indices[(11)*1000000 + 50000:11*1000000 + 50000 + 600000],
# "value2": np.random.randn(600000),
# }
#)
print("\n")
logger.info("Start Running test for original pandas code")
prev = 0
for ttt in range(2, pieces + 1):
right_merge = DataFrame(columns=["key", "value2"])
for i in range(1, ttt):
right_merge = right_merge.append(right[i])
#print(right_merge)
start = timeit.default_timer()
result = merge(left, right_merge, how="inner")
end = timeit.default_timer()
logger.info(str(ttt - 1) + " chunks take time: " + str(end - start) + " single chunk takes time: " + str(end - start - prev))
prev = end - start
#print("******* ", end - start)
print("--------------------------------------------------------------------------------")
print("\n")
logger.info("Start Running test for original pandas code, in increment manner")
total = 0
for i in range(1, pieces):
start = timeit.default_timer()
result = merge(left, right[i], how="inner")
end = timeit.default_timer()
logger.info(str(i) + "th single chunk takes time: " + str(end - start))
total += end - start
#print("******* ", end - start)
logger.info("Original increment takes time: " + str(total))
print("--------------------------------------------------------------------------------")
print("\n")
logger.info("Start Running test for pipelined pandas code")
leftsorter = None
leftcount = None
orizer = None
intrizer = None
count = 0
for i in range(1, pieces):
start = timeit.default_timer()
result, orizer, intrizer, leftsorter, leftcount = pipeline_merge(left, right[i], factorizer=orizer, intfactorizer=intrizer, leftsorter=leftsorter, leftcount=leftcount, slices=ttt-1, how="pipeline")
end = timeit.default_timer()
count += (end - start)
logger.info(str(i) + " chunks take time " + str(end - start) + " Accum time: " + str(count))
#print("******* ", end - start)
print("--------------------------------------------------------------------------------")
print("\n")
logger.info("Start Running test for pipelined pandas with merge join code")
leftsorter = None
leftcount = None
orizer = None
intrizer = None
count = 0
for i in range(1, pieces):
start = timeit.default_timer()
result, orizer, intrizer, leftsorter, leftcount = pipeline_merge(left, right[i], factorizer=orizer, intfactorizer=intrizer, leftsorter=leftsorter, leftcount=leftcount, slices=ttt-1, how="pipeline_merge")
end = timeit.default_timer()
count += (end - start)
logger.info(str(i) + " chunks take time " + str(end - start) + " Accum time: " + str(count))
#print("******* ", end - start)
#for ttt in range(2, pieces + 1):
# leftsorter = None
# leftcount = None
# orizer = None
# intrizer = None
# start = timeit.default_timer()
# for i in range(1, ttt):
# result, orizer, intrizer, leftsorter, leftcount = pipeline_merge(left, right[i], factorizer=orizer, intfactorizer=intrizer, leftsorter=leftsorter, leftcount=leftcount, how="pipeline")
# end = timeit.default_timer()
# print(end - start)
# def time_merge_dataframe_integer_2key(self, sort):
# pipeline_merge(self.df, self.df3, how="pipeline")
#
# def time_merge_dataframe_integer_key(self, sort):
# pipeline_merge(self.df, self.df2, on="key1", how="pipeline")
# class I8Merge:
#
# params = ["inner", "outer", "left", "right"]
# param_names = ["how"]
#
# def setup(self, how):
# low, high, n = -1000, 1000, 10 ** 6
# self.left = DataFrame(
# np.random.randint(low, high, (n, 7)), columns=list("ABCDEFG")
# )
# self.left["left"] = self.left.sum(axis=1)
# self.right = self.left.sample(frac=1).rename({"left": "right"}, axis=1)
# self.right = self.right.reset_index(drop=True)
# self.right["right"] *= -1
#
# def time_i8merge(self, how):
# merge(self.left, self.right, how=how)
#from .pandas_vb_common import setup # noqa: F401 isort:skip
| 34.99359 | 207 | 0.631618 |
4a25506c6926ec9655aec0b93c206a67531f8984 | 3,266 | py | Python | minecraft/__init__.py | ky13/pyCraft | 27ebbbb04e3c3c5ade88ec9452ccfd6261f79625 | [
"Apache-2.0"
] | 1 | 2018-12-13T08:39:26.000Z | 2018-12-13T08:39:26.000Z | minecraft/__init__.py | vEaglee/pyCraft | 316ea4d63d6be5c59fe4b121db920b117130c6e9 | [
"Apache-2.0"
] | null | null | null | minecraft/__init__.py | vEaglee/pyCraft | 316ea4d63d6be5c59fe4b121db920b117130c6e9 | [
"Apache-2.0"
] | null | null | null | """
A modern, Python3-compatible, well-documented library for communicating
with a MineCraft server.
"""
__version__ = "0.5.0"
SUPPORTED_MINECRAFT_VERSIONS = {
'1.8': 47,
'1.8.1': 47,
'1.8.2': 47,
'1.8.3': 47,
'1.8.4': 47,
'1.8.5': 47,
'1.8.6': 47,
'1.8.7': 47,
'1.8.8': 47,
'1.8.9': 47,
'1.9': 107,
'1.9.1': 108,
'1.9.2': 109,
'1.9.3': 110,
'1.9.4': 110,
'1.10': 210,
'1.10.1': 210,
'1.10.2': 210,
'16w32a': 301,
'16w32b': 302,
'16w33a': 303,
'16w35a': 304,
'16w36a': 305,
'16w38a': 306,
'16w39a': 307,
'16w39b': 308,
'16w39c': 309,
'16w40a': 310,
'16w41a': 311,
'16w42a': 312,
'16w43a': 313,
'16w44a': 313,
'1.11-pre1': 314,
'1.11': 315,
'16w50a': 316,
'1.11.1': 316,
'1.11.2': 316,
'17w06a': 317,
'17w13a': 318,
'17w13b': 319,
'17w14a': 320,
'17w15a': 321,
'17w16a': 322,
'17w16b': 323,
'17w17a': 324,
'17w17b': 325,
'17w18a': 326,
'17w18b': 327,
'1.12-pre1': 328,
'1.12-pre2': 329,
'1.12-pre3': 330,
'1.12-pre4': 331,
'1.12-pre5': 332,
'1.12-pre6': 333,
'1.12-pre7': 334,
'1.12': 335,
'17w31a': 336,
'1.12.1-pre1': 337,
'1.12.1': 338,
'1.12.2-pre1': 339,
'1.12.2-pre2': 339,
'1.12.2': 340,
'17w43a': 341,
'17w43b': 342,
'17w45a': 343,
'17w45b': 344,
'17w46a': 345,
'17w47a': 346,
'17w47b': 347,
'17w48a': 348,
'17w49a': 349,
'17w49b': 350,
'17w50a': 351,
'18w01a': 352,
'18w02a': 353,
'18w03a': 354,
'18w03b': 355,
'18w05a': 356,
'18w06a': 357,
'18w07a': 358,
'18w07b': 359,
'18w07c': 360,
'18w08a': 361,
'18w08b': 362,
'18w09a': 363,
'18w10a': 364,
'18w10b': 365,
'18w10c': 366,
'18w10d': 367,
'18w11a': 368,
'18w14a': 369,
'18w14b': 370,
'18w15a': 371,
'18w16a': 372,
'18w19a': 373,
'18w19b': 374,
'18w20a': 375,
'18w20b': 376,
'18w20c': 377,
'18w21a': 378,
'18w21b': 379,
'18w22a': 380,
'18w22b': 381,
'18w22c': 382,
'1.13-pre1': 383,
'1.13-pre2': 384,
'1.13-pre3': 385,
'1.13-pre4': 386,
'1.13-pre5': 387,
'1.13-pre6': 388,
'1.13-pre7': 389,
'1.13-pre8': 390,
'1.13-pre9': 391,
'1.13-pre10': 392,
'1.13': 393,
'18w30a': 394,
'18w30b': 395,
'18w31a': 396,
'18w32a': 397,
'18w33a': 398,
'1.13.1-pre1': 399,
'1.13.1-pre2': 400,
'1.13.1': 401,
'1.13.2-pre1': 402,
'1.13.2-pre2': 403,
'1.13.2': 404,
}
SUPPORTED_PROTOCOL_VERSIONS = \
sorted(set(SUPPORTED_MINECRAFT_VERSIONS.values()))
| 23.496403 | 71 | 0.390386 |
4a2551005c8c4924f44239999e84d6d65b6e3e7a | 9,491 | py | Python | Packs/GenericSQL/Integrations/GenericSQL/GenericSQL_test.py | mazmat-panw/content | 024a65c1dea2548e2637a9cbbe54966e9e34a722 | [
"MIT"
] | 2 | 2021-12-06T21:38:24.000Z | 2022-01-13T08:23:36.000Z | Packs/GenericSQL/Integrations/GenericSQL/GenericSQL_test.py | mazmat-panw/content | 024a65c1dea2548e2637a9cbbe54966e9e34a722 | [
"MIT"
] | 87 | 2022-02-23T12:10:53.000Z | 2022-03-31T11:29:05.000Z | Packs/GenericSQL/Integrations/GenericSQL/GenericSQL_test.py | henry-sue-pa/content | 043c6badfb4f9c80673cad9242fdea72efe301f7 | [
"MIT"
] | 2 | 2022-01-05T15:27:01.000Z | 2022-02-01T19:27:43.000Z | import os
import pytest
import sqlalchemy
from GenericSQL import Client, sql_query_execute, generate_default_port_by_dialect
class ResultMock:
def __init__(self):
pass
def fetchall(self):
return []
ARGS1 = {
'query': "select Name from city",
'limit': 5,
'skip': 0
}
ARGS2 = {
'query': "select * from mysql.user",
'limit': 1,
'skip': 0
}
ARGS3 = {
'query': "select Name from city where 1=2",
'limit': 5,
'skip': 0
}
RAW1 = [{'Name': 'Kabul'}, {'Name': 'Qandahar'}, {'Name': 'Herat'}, {'Name': 'Mazar-e-Sharif'}]
RAW2 = [{'Host': '%',
'User': 'admin',
'Select_priv': 'Y',
'Insert_priv': 'Y',
'Update_priv': 'Y',
'Delete_priv': 'Y',
'Create_priv': 'Y',
'Drop_priv': 'Y',
'Reload_priv': 'Y',
'Shutdown_priv': 'N',
'Process_priv': 'Y',
'File_priv': 'N',
'Grant_priv': 'Y',
'References_priv': 'Y',
'Index_priv': 'Y',
'Alter_priv': 'Y',
'Show_db_priv': 'Y',
'Super_priv': 'N',
'Create_tmp_table_priv': 'Y',
'Lock_tables_priv': 'Y',
'Execute_priv': 'Y',
'Repl_slave_priv': 'Y',
'Repl_client_priv': 'Y',
'Create_view_priv': 'Y',
'Show_view_priv': 'Y',
'Create_routine_priv': 'Y',
'Alter_routine_priv': 'Y',
'Create_user_priv': 'Y',
'Event_priv': 'Y',
'Trigger_priv': 'Y',
'Create_tablespace_priv': 'N',
'ssl_type': '',
'ssl_cipher': b'',
'x509_issuer': b'',
'x509_subject': b'',
'max_questions': 0,
'max_updates': 0,
'max_connections': 0,
'max_user_connections': 0,
'plugin': 'mysql_native_password',
'authentication_string': 'test',
'password_expired': 'N',
'password_last_changed': '2020-02-17 08:49:45',
'password_lifetime': None,
'account_locked': 'N',
'Create_role_priv': 'N',
'Drop_role_priv': 'N',
'Password_reuse_history': None,
'Password_reuse_time': None,
'Password_require_current': None,
'User_attributes': None}]
HEADER1 = ['Name']
HEADER2 = ['Host', 'User', 'Select_priv', 'Insert_priv', 'Update_priv', 'Delete_priv', 'Create_priv', 'Drop_priv',
'Reload_priv', 'Shutdown_priv', 'Process_priv', 'File_priv', 'Grant_priv', 'References_priv', 'Index_priv',
'Alter_priv', 'Show_db_priv', 'Super_priv', 'Create_tmp_table_priv', 'Lock_tables_priv', 'Execute_priv',
'Repl_slave_priv', 'Repl_client_priv', 'Create_view_priv', 'Show_view_priv', 'Create_routine_priv',
'Alter_routine_priv', 'Create_user_priv', 'Event_priv', 'Trigger_priv', 'Create_tablespace_priv', 'ssl_type',
'ssl_cipher', 'x509_issuer', 'x509_subject', 'max_questions', 'max_updates', 'max_connections',
'max_user_connections', 'plugin', 'authentication_string', 'password_expired', 'password_last_changed',
'password_lifetime', 'account_locked', 'Create_role_priv', 'Drop_role_priv', 'Password_reuse_history',
'Password_reuse_time', 'Password_require_current', 'User_attributes']
EXPECTED_OUTPUT1 = {
'GenericSQL(val.Query && val.Query === obj.Query)':
{'GenericSQL': {'Result': [{'Name': 'Kabul'},
{'Name': 'Qandahar'},
{'Name': 'Herat'},
{'Name': 'Mazar-e-Sharif'}],
'Headers': HEADER1,
'Query': 'select Name from city',
'InstanceName': 'sql_dialect_database'}}
}
EXPECTED_OUTPUT2 = \
{'GenericSQL(val.Query && val.Query === obj.Query)': {'GenericSQL': {'Result': [{
'Host': '%',
'User': 'admin',
'Select_priv': 'Y',
'Insert_priv': 'Y',
'Update_priv': 'Y',
'Delete_priv': 'Y',
'Create_priv': 'Y',
'Drop_priv': 'Y',
'Reload_priv': 'Y',
'Shutdown_priv': 'N',
'Process_priv': 'Y',
'File_priv': 'N',
'Grant_priv': 'Y',
'References_priv': 'Y',
'Index_priv': 'Y',
'Alter_priv': 'Y',
'Show_db_priv': 'Y',
'Super_priv': 'N',
'Create_tmp_table_priv': 'Y',
'Lock_tables_priv': 'Y',
'Execute_priv': 'Y',
'Repl_slave_priv': 'Y',
'Repl_client_priv': 'Y',
'Create_view_priv': 'Y',
'Show_view_priv': 'Y',
'Create_routine_priv': 'Y',
'Alter_routine_priv': 'Y',
'Create_user_priv': 'Y',
'Event_priv': 'Y',
'Trigger_priv': 'Y',
'Create_tablespace_priv': 'N',
'ssl_type': '',
'ssl_cipher': "b''",
'x509_issuer': "b''",
'x509_subject': "b''",
'max_questions': '0',
'max_updates': '0',
'max_connections': '0',
'max_user_connections': '0',
'plugin': 'mysql_native_password',
'authentication_string': 'test',
'password_expired': 'N',
'password_last_changed': '2020-02-17 08:49:45',
'password_lifetime': 'None',
'account_locked': 'N',
'Create_role_priv': 'N',
'Drop_role_priv': 'N',
'Password_reuse_history': 'None',
'Password_reuse_time': 'None',
'Password_require_current': 'None',
'User_attributes': 'None',
}], 'Headers': HEADER2,
'Query': 'select * from mysql.user',
'InstanceName': 'sql_dialect_database'}}}
EMPTY_OUTPUT = {
'GenericSQL(val.Query && val.Query === obj.Query)': {
'GenericSQL':
{
'Result': [],
'Headers': [],
'Query': 'select Name from city where 1=2',
'InstanceName': 'sql_dialect_database'
}
}
}
@pytest.mark.parametrize('command, args, response, expected_result, header', [
# Classic sql query, showing a table from database and convert it to readable data
(sql_query_execute, ARGS1, RAW1, EXPECTED_OUTPUT1, HEADER1),
# Simulates an mysql default tables such as "user",
# in previous bug the value- b'' couldn't be converted to a readable value and the query failed
(sql_query_execute, ARGS2, RAW2, EXPECTED_OUTPUT2, HEADER2),
])
def test_sql_queries(command, args, response, expected_result, header, mocker):
"""Unit test
Given
- select query
- raw response of the database
When
- mock the database result
Then
- convert the result to human readable table
- create the context
- validate the expected_result and the created context
"""
# needed in order not to make a connection in tests
mocker.patch.object(Client, '_create_engine_and_connect', return_value=mocker.Mock(spec=sqlalchemy.engine.base.Connection))
mocker.patch.object(Client, 'sql_query_execute_request', return_value=(response, header))
client = Client('sql_dialect', 'server_url', 'username', 'password', 'port', 'database', "", False)
result = command(client, args)
assert expected_result == result[1] # entry context is found in the 2nd place in the result of the command
def test_sql_queries_with_empty_table(mocker):
"""Unit test
Given
- query that return an empty table
- raw response of the database
When
- mock the database result
Then
- convert the result to human readable table
- create the context
- validate the expected_result and the created context
"""
mocker.patch.object(Client, '_create_engine_and_connect', return_value=mocker.Mock(spec=sqlalchemy.engine.base.Connection))
client = Client('sql_dialect', 'server_url', 'username', 'password', 'port', 'database', "", False)
mocker.patch.object(client.connection, 'execute', return_value=ResultMock())
result = sql_query_execute(client, ARGS3)
assert EMPTY_OUTPUT == result[1] # entry context is found in the 2nd place in the result of the command
def test_mysql_integration():
"""Test actual connection to mysql. Will be skipped unless MYSQL_HOST is set.
Can be used to do local debuging of connecting to MySQL by set env var MYSQL_HOST or changing the code below.
Test assumes mysql credentials: root/password
You can setup mysql locally by running:
docker run --name mysql-80 -e MYSQL_ROOT_PASSWORD=password -d mysql:8.0
And then set env var: MYSQL_HOST=localhost
"""
host = os.getenv('MYSQL_HOST', '')
if not host:
pytest.skip('Skipping mysql integration test as MYSQL_HOST is not set')
dialect = 'MySQL'
client = Client(dialect, host, 'root', 'password', generate_default_port_by_dialect(dialect), 'mysql', "", False, True)
res = client.sql_query_execute_request('show processlist', {})
assert len(res) >= 1
@pytest.mark.parametrize('connect_parameters, dialect, expected_response', [
('arg1=value1&arg2=value2', 'MySQL', {'arg1': 'value1', 'arg2': 'value2'}),
('arg1=value1&arg2=value2', 'Microsoft SQL Server', {'arg1': 'value1', 'arg2': 'value2', 'driver': 'FreeTDS'}),
('arg1=value1&arg2=value2', 'Microsoft SQL Server - MS ODBC Driver',
{'arg1': 'value1', 'arg2': 'value2', 'driver': 'ODBC Driver 17 for SQL Server'})])
def test_parse_connect_parameters(connect_parameters, dialect, expected_response):
assert Client.parse_connect_parameters(connect_parameters, dialect) == expected_response
| 37.074219 | 127 | 0.597724 |
4a2551cbd3d782426815501d197c862dafbfba17 | 958 | py | Python | greendoge/wallet/trade_record.py | grayfallstown/greendoge-blockchain | 31e325913374d694dc0859140d006a642e7f95ac | [
"Apache-2.0"
] | 44 | 2021-07-06T10:09:06.000Z | 2022-02-09T04:30:14.000Z | greendoge/wallet/trade_record.py | grayfallstown/greendoge-blockchain | 31e325913374d694dc0859140d006a642e7f95ac | [
"Apache-2.0"
] | 67 | 2021-07-06T11:57:18.000Z | 2022-02-02T16:14:15.000Z | greendoge/wallet/trade_record.py | grayfallstown/greendoge-blockchain | 31e325913374d694dc0859140d006a642e7f95ac | [
"Apache-2.0"
] | 16 | 2021-07-06T10:36:37.000Z | 2022-03-15T08:35:16.000Z | from dataclasses import dataclass
from typing import List, Optional, Tuple
from greendoge.types.blockchain_format.coin import Coin
from greendoge.types.blockchain_format.sized_bytes import bytes32
from greendoge.types.spend_bundle import SpendBundle
from greendoge.util.ints import uint8, uint32, uint64
from greendoge.util.streamable import Streamable, streamable
@dataclass(frozen=True)
@streamable
class TradeRecord(Streamable):
"""
Used for storing transaction data and status in wallets.
"""
confirmed_at_index: uint32
accepted_at_time: Optional[uint64]
created_at_time: uint64
my_offer: bool
sent: uint32
spend_bundle: SpendBundle # This in not complete spendbundle
tx_spend_bundle: Optional[SpendBundle] # this is full trade
additions: List[Coin]
removals: List[Coin]
trade_id: bytes32
status: uint32 # TradeStatus, enum not streamable
sent_to: List[Tuple[str, uint8, Optional[str]]]
| 31.933333 | 65 | 0.769311 |
4a25526a8d67085dddf76948161bcbe3cd110bae | 3,100 | py | Python | data/p2DJ/New/R2/benchmark/startQiskit_QC62.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p2DJ/New/R2/benchmark/startQiskit_QC62.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p2DJ/New/R2/benchmark/startQiskit_QC62.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=2
# total number=10
import cirq
import qiskit
from qiskit import IBMQ
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename='circuit/deutsch-oracle.png')
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n, "qc")
target = QuantumRegister(1, "qt")
prog = QuantumCircuit(input_qubit, target)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(target)
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[1]) # number=1
prog.h(target)
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [target])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
#for i in range(n):
# prog.measure(input_qubit[i], classicals[i])
prog.cx(input_qubit[0],input_qubit[1]) # number=7
prog.x(input_qubit[1]) # number=8
prog.cx(input_qubit[0],input_qubit[1]) # number=9
prog.cx(input_qubit[0],input_qubit[1]) # number=4
prog.x(input_qubit[1]) # number=5
prog.cx(input_qubit[0],input_qubit[1]) # number=6
# circuit end
return prog
if __name__ == '__main__':
n = 2
f = lambda rep: rep[-1]
# f = lambda rep: "1" if rep[0:2] == "01" or rep[0:2] == "10" else "0"
# f = lambda rep: "0"
prog = make_circuit(n, f)
sample_shot =2800
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = provider.get_backend("ibmq_belem")
circuit1 = transpile(prog,FakeVigo())
circuit1.x(qubit=3)
circuit1.x(qubit=3)
circuit1.measure_all()
prog = circuit1
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
writefile = open("../data/startQiskit_QC62.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 28.181818 | 82 | 0.624516 |
4a2552949421e005c478ff95f2b666fde31a09a8 | 5,179 | py | Python | src/SocialNetwork_API/services/user.py | mungpham/mungpham | 3545dafdb498503d2f138d4b7515a7ae8f195994 | [
"MIT"
] | null | null | null | src/SocialNetwork_API/services/user.py | mungpham/mungpham | 3545dafdb498503d2f138d4b7515a7ae8f195994 | [
"MIT"
] | null | null | null | src/SocialNetwork_API/services/user.py | mungpham/mungpham | 3545dafdb498503d2f138d4b7515a7ae8f195994 | [
"MIT"
] | null | null | null | import _thread
import time
import hashlib
from django.core.exceptions import ValidationError
from django.db import transaction
from django.db.models import Q
from django.conf import settings
from django.template.defaultfilters import slugify
from SocialNetwork_API.services.base import BaseService
from SocialNetwork_API.models import *
from SocialNetwork_API.const import ResourceType
class UserService(BaseService):
@classmethod
def get_all_users(cls):
try:
users = User.objects.all()
if len(users) > 0:
return users
return None
except Exception as exception:
cls.log_exception(exception)
return None
@classmethod
def get_user_friend(cls, user_id, friend_id):
try:
user_friend = Friend.objects.filter(user_id=user_id, friend_user_id=friend_id)
if len(user_friend) > 0:
return user_friend[0]
return None
except Exception as exception:
cls.log_exception(exception)
return None
@classmethod
def get_single_user(cls, user_id):
try:
return User.objects.get(pk=user_id)
except:
return None
@classmethod
def authenticate(cls, email, username, password):
try:
if email:
user = User.objects.filter(email=email)[0]
if username:
user = User.objects.filter(username=username)[0]
if user and user.check_password(password):
return user
else:
return None
except Exception as exception:
return None
@classmethod
def save(cls, user_data, instance=None):
try:
password = user_data.pop('password', None)
user = instance if instance else User()
is_new = instance is None
# Set property values
if 'username' in user_data and user.username != user_data['username']:
user.slug = slugify(user_data['username'])
for key in user_data:
setattr(user, key, user_data[key])
# Set password
if is_new:
user.set_password(password)
else:
if password:
user.set_password(password)
with transaction.atomic():
user.save()
return cls.get_user(user.id)
except Exception as exception:
raise exception
@classmethod
def user_friend(cls, user, friend):
try:
user_friend = Friend()
user_friend.user_id = user.id
user_friend.friend_user_id = friend.id
with transaction.atomic():
user_friend.save()
# # Save follow_user to arangodb
# if settings.SAVE_TO_ARANGODB:
# ArangoUserService.follow_band(band.userband.__dict__, activity.__dict__)
return True
except Exception as exception:
raise exception
@classmethod
def get_email(cls, email):
try:
user_email = UserEmail.objects.get(email=email)
if user_email:
return user_email
except Exception as e:
cls.log_exception(e)
return None
return None
@classmethod
def gen_token(cls, user_id):
text = str(user_id) + Utils.id_generator(10) + str(int(time.time()))
hash_object = hashlib.md5(text.encode('utf-8'))
return hash_object.hexdigest()
@classmethod
def get_by_email(cls, email):
try:
user = User.objects.get(email=email)
return cls.get_user(user.pk)
except User.DoesNotExist:
return None
@classmethod
def get_users(cls, *args, **kwargs):
limit = kwargs.get('limit', 20)
offset = kwargs.get('offset', 0)
search = kwargs.get('search', None)
end = offset + limit
filter = kwargs.get('filter', {})
order_by = kwargs.get('order', '-id')
includes = kwargs.get('includes', [])
users = []
if search:
term = Q(username__icontains=search)
user_ids = User.objects.values_list('id', flat=True) \
.order_by(order_by).filter(**filter).filter(term)[offset:end]
count = User.objects.values_list('id', flat=True) \
.order_by(order_by).filter(**filter).filter(term).count()
else:
user_ids = User.objects.values_list('id', flat=True).order_by(order_by).filter(**filter)[offset:end]
count = User.objects.values_list('id', flat=True).order_by(order_by).filter(**filter).count()
for id in user_ids:
users.append(cls.get_user(id, includes=includes))
return {
'result': users,
'count': count
}
@classmethod
def get_user(cls, user_id):
try:
user = User.objects.get(pk=user_id)
except Exception as e:
cls.log_exception(e)
return None
return user | 31.198795 | 112 | 0.575208 |
4a2552c2e1a451429efc2cb457e1d002521e8ba4 | 887 | py | Python | setup.py | alastairflynn/cartograph | c538d25b438315a23ca04fbfab63c7cef082a8a8 | [
"MIT"
] | null | null | null | setup.py | alastairflynn/cartograph | c538d25b438315a23ca04fbfab63c7cef082a8a8 | [
"MIT"
] | null | null | null | setup.py | alastairflynn/cartograph | c538d25b438315a23ca04fbfab63c7cef082a8a8 | [
"MIT"
] | null | null | null | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="cartograph",
version="0.0.1",
author="Alastair Flynn",
author_email="[email protected]",
description="A package for drawing map tiles",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://alastairflynn.com/cartograph",
project_urls = {'Documentation':'https://alastairflynn.com/cartograph',
'Source code':'https://github.com/alastairflynn/cartograph'},
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.5',
install_requires=['numpy>=1.3.0', 'scipy>=0.9.0', 'matplotlib>=3.0.0']
)
| 34.115385 | 81 | 0.659526 |
4a2553dcb28cf359d8cb703bababa1f262a9f332 | 4,885 | py | Python | docs/conf.py | alexwohletz/cookie_test | 6bef4af9468ca1897f3c8fe0519e49fa9eaeeaaf | [
"MIT"
] | null | null | null | docs/conf.py | alexwohletz/cookie_test | 6bef4af9468ca1897f3c8fe0519e49fa9eaeeaaf | [
"MIT"
] | null | null | null | docs/conf.py | alexwohletz/cookie_test | 6bef4af9468ca1897f3c8fe0519e49fa9eaeeaaf | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# cookie_test documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 9 13:47:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import cookie_test
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'cookie_test'
copyright = u"2019, Audrey Roy Greenfeld"
author = u"Audrey Roy Greenfeld"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = cookie_test.__version__
# The full version, including alpha/beta/rc tags.
release = cookie_test.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'cookie_testdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(master_doc, 'cookie_test.tex',
u'cookie_test Documentation',
u'Audrey Roy Greenfeld', 'manual'),
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'cookie_test',
u'cookie_test Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'cookie_test',
u'cookie_test Documentation',
author,
'cookie_test',
'One line description of project.',
'Miscellaneous'),
]
| 29.786585 | 77 | 0.68741 |
4a2553dfe94a433e1a41e6be317c6fc66103c950 | 92,289 | py | Python | pattern/db/__init__.py | kkoch986/pattern | db807c888dcd15f515afe31753c9b0345a11b542 | [
"BSD-3-Clause"
] | 2 | 2015-12-08T15:59:25.000Z | 2016-03-15T20:11:29.000Z | pattern/db/__init__.py | kkoch986/pattern | db807c888dcd15f515afe31753c9b0345a11b542 | [
"BSD-3-Clause"
] | null | null | null | pattern/db/__init__.py | kkoch986/pattern | db807c888dcd15f515afe31753c9b0345a11b542 | [
"BSD-3-Clause"
] | null | null | null | #### PATTERN | DB ##################################################################################
# -*- coding: utf-8 -*-
# Copyright (c) 2010 University of Antwerp, Belgium
# Author: Tom De Smedt <[email protected]>
# License: BSD (see LICENSE.txt for details).
# http://www.clips.ua.ac.be/pages/pattern
####################################################################################################
import os
import sys
import warnings
import re
import htmlentitydefs
import urllib
import csv
from cStringIO import StringIO
from codecs import BOM_UTF8
from datetime import datetime, timedelta
from time import mktime, strftime
from math import sqrt
from types import GeneratorType
try:
from email.utils import parsedate_tz, mktime_tz
except:
from email.Utils import parsedate_tz, mktime_tz
try:
MODULE = os.path.dirname(__file__)
except:
MODULE = ""
MYSQL = "mysql"
SQLITE = "sqlite"
def _import_db(engine=SQLITE):
""" Lazy import called from Database() or Database.new().
Depending on the type of database we either import MySQLdb or SQLite.
Note: 64-bit Python needs 64-bit MySQL, 32-bit the 32-bit version.
"""
global MySQLdb
global sqlite
if engine == MYSQL:
import MySQLdb
warnings.simplefilter("ignore", MySQLdb.Warning)
if engine == SQLITE:
try:
# Python 2.5+
import sqlite3.dbapi2 as sqlite
except:
# Python 2.4 with pysqlite2
import pysqlite2.dbapi2 as sqlite
def find(match=lambda item: False, list=[]):
""" Returns the first item in the list for which match(item) is True.
"""
for item in list:
if match(item) is True:
return item
_sum = sum # pattern.db.sum() is also a column aggregate function.
#### DATE FUNCTIONS ################################################################################
NOW, YEAR = "now", datetime.now().year
# Date formats can be found in the Python documentation:
# http://docs.python.org/library/time.html#time.strftime
DEFAULT_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
date_formats = [
DEFAULT_DATE_FORMAT, # 2010-09-21 09:27:01 => SQLite + MySQL
"%Y-%m-%dT%H:%M:%SZ", # 2010-09-20T09:27:01Z => Bing
"%Y-%m-%dT%H:%M:%S+0000", # 2010-09-20T09:27:01+0000 => Facebook
"%Y-%m-%d %H:%M", # 2010-09-21 09:27
"%Y-%m-%d", # 2010-09-21
"%d/%m/%Y", # 21/09/2010
"%d %B %Y", # 21 September 2010
"%B %d %Y", # September 21 2010
"%B %d, %Y", # September 21, 2010
]
class DateError(Exception):
pass
class Date(datetime):
""" A convenience wrapper for datetime.datetime with a default string format.
"""
format = DEFAULT_DATE_FORMAT
def copy(self):
return date(self.timestamp)
@property
def timestamp(self):
return int(mktime(self.timetuple())) # Seconds elapsed since 1/1/1970.
def strftime(self, format):
if self.year < 1900:
# Python's strftime() doesn't handle year < 1900:
return strftime(format, (1900,) + self.timetuple()[1:]).replace("1900", str(self.year), 1)
return datetime.strftime(self, format)
def __str__(self):
return self.strftime(self.format)
def __repr__(self):
return "Date(%s)" % repr(self.__str__())
def __iadd__(self, time):
return self.__add__(time)
def __isub__(self, time):
return self.__sub__(time)
def __add__(self, time):
d = datetime.__add__(self, time)
return date(d.year, d.month, d.day, d.hour, d.minute, d.second, d.microsecond, self.format)
def __sub__(self, time):
d = datetime.__sub__(self, time)
if isinstance(d, timedelta):
# Subtracting two dates returns a time().
return d
return date(d.year, d.month, d.day, d.hour, d.minute, d.second, d.microsecond, self.format)
def date(*args, **kwargs):
""" Returns a Date from the given parameters:
- date(format=Date.format) => now
- date(int)
- date(string)
- date(string, format=Date.format)
- date(string, inputformat, format=Date.format)
- date(year, month, day, format=Date.format)
- date(year, month, day, hours, minutes, seconds, format=Date.format)
If a string is given without an explicit input format, all known formats will be tried.
"""
d = None
f = None
if len(args) == 0 or args[0] == NOW:
# No parameters or one parameter NOW.
d = Date.now()
elif len(args) == 1 \
and (isinstance(args[0], int) \
or isinstance(args[0], basestring) and args[0].isdigit()):
# One parameter, an int or string timestamp.
d = Date.fromtimestamp(int(args[0]))
elif len(args) == 1 and isinstance(args[0], basestring):
# One parameter, a date string for which we guess the input format (RFC2822 or known formats).
try: d = Date.fromtimestamp(mktime_tz(parsedate_tz(args[0])))
except:
for format in ("format" in kwargs and [kwargs["format"]] or []) + date_formats:
try: d = Date.strptime(args[0], format); break
except:
pass
if d is None:
raise DateError, "unknown date format for %s" % repr(args[0])
elif len(args) == 2 and isinstance(args[0], basestring):
# Two parameters, a date string and an explicit input format.
d = Date.strptime(args[0], args[1])
elif len(args) >= 3:
# 3-6 parameters: year, month, day, hours, minutes, seconds.
f = kwargs.pop("format", None)
d = Date(*args[:7], **kwargs)
else:
raise DateError, "unknown date format"
d.format = kwargs.get("format") or len(args)>7 and args[7] or f or Date.format
return d
def time(days=0, seconds=0, minutes=0, hours=0, **kwargs):
""" Returns a value that can be added to a Date object.
"""
# Other parameters: microseconds, milliseconds, weeks.
# There is no months-parameter since months have a variable amount of days (28-31).
# To increase the month of a Date:
# Date(date.year, date.month+1, date.day, format=date.format)
return timedelta(days=days, seconds=seconds, minutes=minutes, hours=hours, **kwargs)
#### STRING FUNCTIONS ##############################################################################
def string(value, default=""):
""" Returns the value cast to unicode, or default if it is None/empty.
"""
# Useful for HTML interfaces.
if value is None or value == "": # Don't do value != None because this includes 0.
return default
return decode_utf8(value)
def decode_utf8(string):
""" Returns the given string as a unicode string (if possible).
"""
if isinstance(string, str):
for encoding in (("utf-8",), ("windows-1252",), ("utf-8", "ignore")):
try:
return string.decode(*encoding)
except:
pass
return string
return unicode(string)
def encode_utf8(string):
""" Returns the given string as a Python byte string (if possible).
"""
if isinstance(string, unicode):
try:
return string.encode("utf-8")
except:
return string
return str(string)
RE_AMPERSAND = re.compile("\&(?!\#)") # & not followed by #
RE_UNICODE = re.compile(r'&(#?)(x|X?)(\w+);') # É
def encode_entities(string):
""" Encodes HTML entities in the given string ("<" => "<").
For example, to display "<em>hello</em>" in a browser,
we need to pass "<em>hello</em>" (otherwise "hello" in italic is displayed).
"""
if isinstance(string, (str, unicode)):
string = RE_AMPERSAND.sub("&", string)
string = string.replace("<", "<")
string = string.replace(">", ">")
string = string.replace('"', """)
string = string.replace("'", "'")
return string
def decode_entities(string):
""" Decodes HTML entities in the given string ("<" => "<").
"""
# http://snippets.dzone.com/posts/show/4569
def replace_entity(match):
hash, hex, name = match.group(1), match.group(2), match.group(3)
if hash == "#" or name.isdigit():
if hex == '' :
return unichr(int(name)) # "&" => "&"
if hex in ("x","X"):
return unichr(int('0x'+name, 16)) # "&" = > "&"
else:
cp = htmlentitydefs.name2codepoint.get(name) # "&" => "&"
return cp and unichr(cp) or match.group() # "&foo;" => "&foo;"
if isinstance(string, (str, unicode)):
return RE_UNICODE.subn(replace_entity, string)[0]
return string
class _Binary:
""" A wrapper for BLOB data with engine-specific encoding.
See also: Database.binary().
"""
def __init__(self, data, type=SQLITE):
self.data, self.type = str(hasattr(data, "read") and data.read() or data), type
def escape(self):
if self.type == SQLITE:
return str(self.data.encode("string-escape")).replace("'","''")
if self.type == MYSQL:
return MySQLdb.escape_string(self.data)
def _escape(value, quote=lambda string: "'%s'" % string.replace("'", "\\'")):
""" Returns the quoted, escaped string (e.g., "'a bird\'s feathers'") for database entry.
Anything that is not a string (e.g., an integer) is converted to string.
Booleans are converted to "0" and "1", None is converted to "null".
See also: Database.escape()
"""
# Note: use Database.escape() for MySQL/SQLITE-specific escape.
if isinstance(value, str):
# Strings are encoded as UTF-8.
try: value = value.encode("utf-8")
except:
pass
if value in ("current_timestamp",):
# Don't quote constants such as current_timestamp.
return value
if isinstance(value, basestring):
# Strings are quoted, single quotes are escaped according to the database engine.
return quote(value)
if isinstance(value, bool):
# Booleans are converted to "0" or "1".
return str(int(value))
if isinstance(value, (int, long, float)):
# Numbers are converted to string.
return str(value)
if isinstance(value, datetime):
# Dates are formatted as string.
return quote(value.strftime(DEFAULT_DATE_FORMAT))
if isinstance(value, type(None)):
# None is converted to NULL.
return "null"
if isinstance(value, Query):
# A Query is converted to "("+Query.SQL()+")" (=subquery).
return "(%s)" % value.SQL().rstrip(";")
if isinstance(value, _Binary):
# Binary data is escaped with attention to null bytes.
return "'%s'" % value.escape()
return value
#### LIST FUNCTIONS ################################################################################
def order(list, cmp=None, key=None, reverse=False):
""" Returns a list of indices in the order as when the given list is sorted.
For example: ["c","a","b"] => [1, 2, 0]
This means that in the sorted list, "a" (index 1) comes first and "c" (index 0) last.
"""
if cmp and key:
f = lambda i, j: cmp(key(list[i]), key(list[j]))
elif cmp:
f = lambda i, j: cmp(list[i], list[j])
elif key:
f = lambda i, j: int(key(list[i]) >= key(list[j])) * 2 - 1
else:
f = lambda i, j: int(list[i] >= list[j]) * 2 - 1
return sorted(range(len(list)), cmp=f, reverse=reverse)
_order = order
def avg(list):
""" Returns the arithmetic mean of the given list of values.
For example: mean([1,2,3,4]) = 10/4 = 2.5.
"""
return float(_sum(list)) / (len(list) or 1)
def variance(list):
""" Returns the variance of the given list of values.
The variance is the average of squared deviations from the mean.
"""
a = avg(list)
return _sum([(x-a)**2 for x in list]) / (len(list)-1 or 1)
def stdev(list):
""" Returns the standard deviation of the given list of values.
Low standard deviation => values are close to the mean.
High standard deviation => values are spread out over a large range.
"""
return sqrt(variance(list))
#### SQLITE FUNCTIONS ##############################################################################
# Convenient MySQL functions not in in pysqlite2. These are created at each Database.connect().
class sqlite_first(list):
def step(self, value): self.append(value)
def finalize(self):
return self[0]
class sqlite_last(list):
def step(self, value): self.append(value)
def finalize(self):
return self[-1]
class sqlite_group_concat(list):
def step(self, value): self.append(value)
def finalize(self):
return ",".join(string(v) for v in self if v is not None)
# SQLite (and MySQL) date string format:
# yyyy-mm-dd hh:mm:ss
def sqlite_year(datestring):
return int(datestring.split(" ")[0].split("-")[0])
def sqlite_month(datestring):
return int(datestring.split(" ")[0].split("-")[1])
def sqlite_day(datestring):
return int(datestring.split(" ")[0].split("-")[2])
def sqlite_hour(datestring):
return int(datestring.split(" ")[1].split(":")[0])
def sqlite_minute(datestring):
return int(datestring.split(" ")[1].split(":")[1])
def sqlite_second(datestring):
return int(datestring.split(" ")[1].split(":")[2])
#### DATABASE ######################################################################################
class DatabaseConnectionError(Exception):
pass
class Database(object):
class Tables(dict):
# Table objects are lazily constructed when retrieved.
# This saves time because each table executes a metadata query when constructed.
def __init__(self, db, *args, **kwargs):
dict.__init__(self, *args, **kwargs); self.db=db
def __getitem__(self, k):
if dict.__getitem__(self, k) is None:
dict.__setitem__(self, k, Table(name=k, database=self.db))
return dict.__getitem__(self, k)
def __init__(self, name, host="localhost", port=3306, username="root", password="", type=SQLITE, unicode=True, **kwargs):
""" A collection of tables stored in an SQLite or MySQL database.
If the database does not exist, creates it.
If the host, user or password is wrong, raises DatabaseConnectionError.
"""
_import_db(type)
self.type = type
self.name = name
self.host = host
self.port = port
self.username = kwargs.get("user", username)
self.password = password
self._connection = None
self.connect(unicode)
# Table names are available in the Database.tables dictionary,
# table objects as attributes (e.g. Database.table_name).
q = self.type==SQLITE and "select name from sqlite_master where type='table';" or "show tables;"
self.tables = Database.Tables(self)
for name, in self.execute(q):
if not name.startswith(("sqlite_",)):
self.tables[name] = None
# The SQL syntax of the last query is kept in cache.
self._query = None
# Persistent relations between tables, stored as (table1, table2, key1, key2, join) tuples.
self.relations = []
def connect(self, unicode=True):
# Connections for threaded applications work differently,
# see http://tools.cherrypy.org/wiki/Databases
# (have one Database object for each thread).
if self._connection is not None:
return
# MySQL
if self.type == MYSQL:
try:
self._connection = MySQLdb.connect(self.host, self.username, self.password, self.name, port=self.port, use_unicode=unicode)
self._connection.autocommit(False)
except Exception, e:
# Create the database if it doesn't exist yet.
if "unknown database" not in str(e).lower():
raise DatabaseConnectionError, e[1] # Wrong host, username and/or password.
connection = MySQLdb.connect(self.host, self.username, self.password)
cursor = connection.cursor()
cursor.execute("create database if not exists `%s`;" % self.name)
cursor.close()
connection.close()
self._connection = MySQLdb.connect(self.host, self.username, self.password, self.name, port=self.port, use_unicode=unicode)
self._connection.autocommit(False)
if unicode:
self._connection.set_character_set("utf8")
# SQLite
if self.type == SQLITE:
self._connection = sqlite.connect(self.name, detect_types=sqlite.PARSE_DECLTYPES)
# Create functions that are not natively supported by the engine.
# Aggregate functions (for grouping rows) + date functions.
self._connection.create_aggregate("first", 1, sqlite_first)
self._connection.create_aggregate("last", 1, sqlite_last)
self._connection.create_aggregate("group_concat", 1, sqlite_group_concat)
self._connection.create_function("year", 1, sqlite_year)
self._connection.create_function("month", 1, sqlite_month)
self._connection.create_function("day", 1, sqlite_day)
self._connection.create_function("hour", 1, sqlite_hour)
self._connection.create_function("minute", 1, sqlite_minute)
self._connection.create_function("second", 1, sqlite_second)
# Map field type INTEGER to int (not long(), e.g., 1L).
# Map field type BOOLEAN to bool.
# Map field type DATE to str, yyyy-mm-dd hh:mm:ss.
if self.type == MYSQL:
type = MySQLdb.constants.FIELD_TYPE
self._connection.converter[type.LONG] = int
self._connection.converter[type.LONGLONG] = int
self._connection.converter[type.DECIMAL] = float
self._connection.converter[type.NEWDECIMAL] = float
self._connection.converter[type.TINY] = bool
self._connection.converter[type.TIMESTAMP] = date
if self.type == SQLITE:
sqlite.converters["TINYINT(1)"] = bool # See Binary() why this is necessary:
sqlite.converters["BLOB"] = lambda data: str(data).decode("string-escape")
sqlite.converters["TIMESTAMP"] = date
def disconnect(self):
if self._connection is not None:
self._connection.commit()
self._connection.close()
self._connection = None
@property
def connection(self):
return self._connection
@property
def connected(self):
return self._connection is not None
def __getattr__(self, k):
""" Tables are available as attributes by name, e.g., Database.persons.
"""
if k in self.tables:
return self.tables[k]
if k in self.__dict__:
return self.__dict__[k]
raise AttributeError, "'Database' object has no attribute '%s'" % k
def __len__(self):
return len(self.tables)
def __iter__(self):
return iter(self.tables.keys())
def __getitem__(self, k):
return self.tables[k]
def __nonzero__(self):
return True
# Backwards compatibility.
def _get_user(self):
return self.username
def _set_user(self, v):
self.username = v
user = property(_get_user, _set_user)
@property
def query(self):
""" Yields the last executed SQL query as a string.
"""
return self._query
def execute(self, SQL, commit=False):
""" Executes the given SQL query and return a list of rows.
With commit=True, automatically commits insert/update/delete changes.
"""
self._query = SQL
if not SQL:
return # MySQL doesn't like empty queries.
#print SQL
cursor = self._connection.cursor()
cursor.execute(SQL)
rows = list(cursor.fetchall())
cursor.close()
if commit is not False:
self._connection.commit()
return rows
def commit(self):
""" Commit all pending insert/update/delete changes.
"""
self._connection.commit()
def rollback(self):
""" Discard changes since the last commit.
"""
self._connection.rollback()
def escape(self, value):
""" Returns the quoted, escaped string (e.g., "'a bird\'s feathers'") for database entry.
Anything that is not a string (e.g., an integer) is converted to string.
Booleans are converted to "0" and "1", None is converted to "null".
"""
def quote(string):
# How to escape strings differs between database engines.
if self.type == MYSQL:
#return "'%s'" % self._connection.escape_string(string) # Doesn't like Unicode.
return "'%s'" % string.replace("'", "\\'")
if self.type == SQLITE:
return "'%s'" % string.replace("'", "''")
return _escape(value, quote)
def binary(self, data):
""" Returns the string of binary data as a value that can be inserted in a BLOB field.
"""
return _Binary(data, self.type)
blob = binary
def _field_SQL(self, table, field):
# Returns a (field, index)-tuple with SQL strings for the given field().
# The field string can be used in a CREATE TABLE or ALTER TABLE statement.
# The index string is an optional CREATE INDEX statement (or None).
auto = " auto%sincrement" % (self.type == MYSQL and "_" or "")
field = list(field) + [STRING, None, False, True][len(field)-1:]
field = list(_field(field[0], field[1], default=field[2], index=field[3], optional=field[4]))
if field[1] == "timestamp" and field[2] == "now":
field[2] = "current_timestamp"
a = b = None
a = "`%s` %s%s%s%s" % (
# '`id` integer not null primary key auto_increment'
field[0],
field[1] == STRING and field[1]() or field[1],
field[4] is False and " not null" or " null",
field[2] is not None and " default %s" % self.escape(field[2]) or "",
field[3] == PRIMARY and " primary key%s" % ("", auto)[field[1]==INTEGER] or "")
if field[3] in (UNIQUE, True):
b = "create %sindex `%s_%s` on `%s` (`%s`);" % (
field[3] == UNIQUE and "unique " or "", table, field[0], table, field[0])
return a, b
def create(self, table, fields=[], encoding="utf-8", **kwargs):
""" Creates a new table with the given fields.
The given list of fields must contain values returned from the field() function.
"""
if table in self.tables:
raise TableError, "table '%s' already exists" % (self.name + "." + table)
if table.startswith(XML_HEADER):
# From an XML-string generated with Table.xml.
return parse_xml(self, table,
table = kwargs.get("name"),
field = kwargs.get("field", lambda s: s.replace(".", "_")))
encoding = self.type == MYSQL and " default charset=" + encoding.replace("utf-8", "utf8") or ""
fields, indices = zip(*[self._field_SQL(table, f) for f in fields])
self.execute("create table `%s` (%s)%s;" % (table, ", ".join(fields), encoding))
for index in indices:
if index is not None:
self.execute(index, commit=True)
self.tables[table] = None # lazy loading
return self.tables[table]
def drop(self, table):
""" Removes the table with the given name.
"""
if isinstance(table, Table) and table.db == self:
table = table.name
if table in self.tables:
self.tables[table].database = None
self.tables.pop(table)
self.execute("drop table `%s`;" % table, commit=True)
# The SQLite version in Python 2.5 has a drop/recreate table bug.
# Reconnect. This means that any reference to Database.connection
# is no longer valid after Database.drop().
if self.type == SQLITE and sys.version < "2.6":
self.disconnect()
self.connect()
remove = drop
def link(self, table1, field1, table2, field2, join="left"):
""" Defines a relation between two tables in the database.
When executing a table query, fields from the linked table will also be available
(to disambiguate between field names, use table.field_name).
"""
if isinstance(table1, Table):
table1 = table1.name
if isinstance(table2, Table):
table2 = table2.name
self.relations.append((table1, field1, table2, field2, join))
def __repr__(self):
return "Database(name=%s, host=%s, tables=%s)" % (
repr(self.name),
repr(self.host),
repr(self.tables.keys()))
def _delete(self):
# No warning is issued, seems a bad idea to document the method.
# Anyone wanting to delete an entire database should use an editor.
if self.type == MYSQL:
self.execute("drop database `%s`" % self.name, commit=True)
self.disconnect()
if self.type == SQLITE:
self.disconnect()
os.unlink(self.name)
def __delete__(self):
try:
self.disconnect()
except:
pass
#### FIELD #########################################################################################
class _String(str):
# The STRING constant can be called with a length when passed to field(),
# for example field("language", type=STRING(2), default="en", index=True).
def __new__(self):
return str.__new__(self, "string")
def __call__(self, length=100):
return "varchar(%s)" % (length>255 and 255 or (length<1 and 1 or length))
# Field type.
# Note: SQLite string fields do not impose a string limit.
# Unicode strings have more characters than actually displayed (e.g. "♥").
# Boolean fields are stored as tinyint(1), int 0 or 1.
STRING, INTEGER, FLOAT, TEXT, BLOB, BOOLEAN, DATE = \
_String(), "integer", "float", "text", "blob", "boolean", "date"
INT, BOOL = INTEGER, BOOLEAN
# Field index.
PRIMARY = "primary"
UNIQUE = "unique"
# DATE default.
NOW = "now"
#--- FIELD- ----------------------------------------------------------------------------------------
#def field(name, type=STRING, default=None, index=False, optional=True)
def field(name, type=STRING, **kwargs):
""" Returns a table field definition that can be passed to Database.create().
The column can be indexed by setting index to True, PRIMARY or UNIQUE.
Primary key number columns are always auto-incremented.
"""
default, index, optional = (
kwargs.get("default", type == DATE and NOW or None),
kwargs.get("index", False),
kwargs.get("optional", True)
)
if type == STRING:
type = STRING()
if type == FLOAT:
type = "real"
if type == BOOLEAN:
type = "tinyint(1)"
if type == DATE:
type = "timestamp"
if str(index) in "01":
index = bool(int(index))
if str(optional) in "01":
optional = bool(int(optional))
return (name, type, default, index, optional)
_field = field
def primary_key(name="id"):
""" Returns an auto-incremented integer primary key field named "id".
"""
return field(name, INTEGER, index=PRIMARY, optional=False)
pk = primary_key
#--- FIELD SCHEMA ----------------------------------------------------------------------------------
class Schema(object):
def __init__(self, name, type, default=None, index=False, optional=True, extra=None):
""" Field info returned from a "show columns from table"-query.
Each table object has a Table.schema{} dictionary describing the fields' structure.
"""
# Determine field type (NUMBER, STRING, TEXT, BLOB or DATE).
type, length = type.lower(), None
if type.startswith(("varchar", "char")):
length = type.split("(")[-1].strip(")")
length = int(length)
type = STRING
if type.startswith("int"):
type = INTEGER
if type.startswith(("real", "double")):
type = FLOAT
if type.startswith("time"):
type = DATE
if type.startswith("text"):
type = TEXT
if type.startswith("blob"):
type = BLOB
if type.startswith("tinyint(1)"):
type = BOOLEAN
# Determine index type (PRIMARY, UNIQUE, True or False).
if isinstance(index, basestring):
if index.lower().startswith("pri"):
index = PRIMARY
if index.lower().startswith("uni"):
index = UNIQUE
if index.lower() in ("0", "1", "", "yes", "mul"):
index = index.lower() in ("1", "yes", "mul")
# SQLite dumps the date string with quotes around it:
if isinstance(default, basestring) and type == DATE:
default = default.strip("'")
default = default.replace("current_timestamp", NOW)
default = default.replace("CURRENT_TIMESTAMP", NOW)
if default is not None and type == INTEGER:
default = int(default)
if default is not None and type == FLOAT:
default = float(default)
if not default and default != 0:
default = None
self.name = name # Field name.
self.type = type # Field type: INTEGER | FLOAT | STRING | TEXT | BLOB | DATE.
self.length = length # Field length for STRING.
self.default = default # Default value.
self.index = index # PRIMARY | UNIQUE | True | False.
self.optional = str(optional) in ("0", "True", "YES")
self.extra = extra or None
def __repr__(self):
return "Schema(name=%s, type=%s, default=%s, index=%s, optional=%s)" % (
repr(self.name),
repr(self.type),
repr(self.default),
repr(self.index),
repr(self.optional))
#### TABLE #########################################################################################
ALL = "*"
class TableError(Exception):
pass
class Table(object):
class Fields(list):
# Table.fields.append() alters the table.
# New field() with optional=False must have a default value (can not be NOW).
# New field() can have index=True, but not PRIMARY or UNIQUE.
def __init__(self, table, *args, **kwargs):
list.__init__(self, *args, **kwargs); self.table=table
def append(self, field):
name, (field, index) = field[0], self.table.db._field_SQL(self.table.name, field)
self.table.db.execute("alter table `%s` add column %s;" % (self.table.name, field))
self.table.db.execute(index, commit=True)
self.table._update()
def extend(self, fields):
[self.append(f) for f in fields]
def __setitem__(self, *args, **kwargs):
raise NotImplementedError, "Table.fields only supports append()"
insert = remove = pop = __setitem__
def __init__(self, name, database):
""" A collection of rows consisting of one or more fields (i.e., table columns)
of a certain type (i.e., strings, numbers).
"""
self.database = database
self._name = name
self.fields = [] # List of field names (i.e., column names).
self.schema = {} # Dictionary of (field, Schema)-items.
self.default = {} # Default values for Table.insert().
self.primary_key = None
self._update()
def _update(self):
# Retrieve table column names.
# Table column names are available in the Table.fields list.
# Table column names should not contain unicode because they can also be function parameters.
# Table column names should avoid " ", ".", "(" and ")".
# The primary key column is stored in Table.primary_key.
self.fields = Table.Fields(self)
if self.name not in self.database.tables:
raise TableError, "table '%s' does not exist" % (self.database.name + "." + self.name)
if self.db.type == MYSQL:
q = "show columns from `%s`;" % self.name
if self.db.type == SQLITE:
q = "pragma table_info(`%s`);" % self.name
i = self.db.execute("pragma index_list(`%s`)" % self.name) # look up indices
i = dict(((v[1].replace(self.name+"_", "", 1), v[2]) for v in i))
for f in self.db.execute(q):
# [name, type, default, index, optional, extra]
if self.db.type == MYSQL:
f = [f[0], f[1], f[4], f[3], f[2], f[5]]
if self.db.type == SQLITE:
f = [f[1], f[2], f[4], f[5], f[3], ""]
f[3] = f[3] == 1 and "pri" or (f[0] in i and ("1","uni")[int(i[f[0]])] or "")
list.append(self.fields, f[0])
self.schema[f[0]] = Schema(*f)
if self.schema[f[0]].index == PRIMARY:
self.primary_key = f[0]
def _get_name(self):
return self._name
def _set_name(self, name):
# Rename the table in the database and in any Database.relations.
# SQLite and MySQL will automatically copy indices on the new table.
self.db.execute("alter table `%s` rename to `%s`;" % (self._name, name))
self.db.tables.pop(self._name)
self.db.tables[name] = self
for i, r in enumerate(self.db.relations):
if r[0] == self._name:
self.db.relations = (name, r[1], r[2], r[3])
if r[2] == self.name:
self.db.relations = (r[0], r[1], name, r[3])
self._name = name
name = property(_get_name, _set_name)
@property
def db(self):
return self.database
@property
def pk(self):
return self.primary_key
def count(self):
""" Yields the number of rows in the table.
"""
return int(list(self.db.execute("select count(*) from `%s`;" % self.name))[0][0])
def __len__(self):
return self.count()
def __iter__(self):
return iter(self.rows())
def __getitem__(self, i):
return self.rows()[i]
def abs(self, field):
""" Returns the absolute field name (e.g., "name" => ""persons.name").
"""
return abs(self.name, field)
def rows(self):
""" Returns a list of all the rows in the table.
"""
return self.db.execute("select * from `%s`;" % self.name)
def filter(self, *args, **kwargs):
""" Returns the rows that match the given constraints (using equals + AND):
"""
# Table.filter(("name","age"), id=1)
# Table.filter(ALL, type=("cat","dog")) => "cat" OR "dog"
# Table.filter(ALL, type="cat", name="Taxi") => "cat" AND "Taxi"
# Table.filter({"type":"cat", "name":"Taxi"})
if len(args) == 0:
# No parameters: default to ALL fields.
fields = ALL
elif len(args) == 1 and not isinstance(args[0], dict):
# One parameter: field / list of fields + optional keyword filters.
fields = args[0]
elif len(args) == 1:
# One parameter: dict of filters
fields, kwargs = ALL, args[0]
elif len(args) >= 2:
# Two parameters: field(s) and dict of filters.
fields, kwargs = args[0], args[1]
fields = isinstance(fields, (list, tuple)) and ", ".join(fields) or fields or ALL
q = " and ".join(cmp(k, v, "=", self.db.escape) for k, v in kwargs.items())
q = q and " where %s" % q or ""
q = "select %s from `%s`%s;" % (fields, self.name, q)
return self.db.execute(q)
def search(self, *args, **kwargs):
""" Returns a Query object that can be used to construct complex table queries.
"""
return Query(self, *args, **kwargs)
query = search
def _insert_id(self):
# Retrieves the primary key value of the last inserted row.
if self.db.type == MYSQL:
return list(self.db.execute("select last_insert_id();"))[0][0] or None
if self.db.type == SQLITE:
return list(self.db.execute("select last_insert_rowid();"))[0][0] or None
def insert(self, *args, **kwargs):
""" Inserts a new row from the given field parameters, returns id.
"""
# Table.insert(name="Taxi", age=2, type="cat")
# Table.insert({"name":"Fricassée", "age":2, "type":"cat"})
commit = kwargs.pop("commit", True) # As fieldname, use abs(Table.name, "commit").
if len(args) == 0 and len(kwargs) == 1 and isinstance(kwargs.get("values"), dict):
kwargs = kwargs["values"]
elif len(args) == 1 and isinstance(args[0], dict):
a=args[0]; a.update(kwargs); kwargs=a
if len(self.default) > 0:
kwargs.update(self.default)
k = ", ".join("`%s`" % k for k in kwargs.keys())
v = ", ".join(self.db.escape(v) for v in kwargs.values())
q = "insert into `%s` (%s) values (%s);" % (self.name, k, v)
self.db.execute(q, commit)
return self._insert_id()
def update(self, id, *args, **kwargs):
""" Updates the row with the given id.
"""
# Table.update(1, age=3)
# Table.update(1, {"age":3})
# Table.update(all(filter(field="name", value="Taxi")), age=3)
commit = kwargs.pop("commit", True) # As fieldname, use abs(Table.name, "commit").
if len(args) == 0 and len(kwargs) == 1 and isinstance(kwargs.get("values"), dict):
kwargs = kwargs["values"]
if len(args) == 1 and isinstance(args[0], dict):
a=args[0]; a.update(kwargs); kwargs=a
kv = ", ".join("`%s`=%s" % (k, self.db.escape(v)) for k, v in kwargs.items())
q = "update `%s` set %s where %s;" % (self.name, kv,
not isinstance(id, Group) and cmp(self.primary_key, id, "=", self.db.escape) \
or id.SQL(escape=self.db.escape))
self.db.execute(q, commit)
def delete(self, id, commit=True):
""" Removes the row which primary key equals the given id.
"""
# Table.delete(1)
# Table.delete(ALL)
# Table.delete(all(("type","cat"), ("age",15,">")))
q = "delete from `%s` where %s" % (self.name,
not isinstance(id, Group) and cmp(self.primary_key, id, "=", self.db.escape) \
or id.SQL(escape=self.db.escape))
self.db.execute(q, commit)
append, edit, remove = insert, update, delete
@property
def xml(self):
return xml(self)
def datasheet(self):
return Datasheet(rows=self.rows(), fields=[(f, self.schema[f].type) for f in self.fields])
def __repr__(self):
return "Table(name=%s, count=%s, database=%s)" % (
repr(self.name),
repr(self.count()),
repr(self.db.name))
#### QUERY #########################################################################################
#--- QUERY SYNTAX ----------------------------------------------------------------------------------
BETWEEN, LIKE, IN = \
"between", "like", "in"
sql_functions = \
"first|last|count|min|max|sum|avg|stdev|group_concat|concatenate|" \
"year|month|day|hour|minute|second|" \
"length|lower|upper|substr|substring|replace|trim|round|random|rand|" \
"strftime|date_format"
def abs(table, field):
""" For a given <fieldname>, returns the absolute <tablename>.<fieldname>.
This is useful when constructing queries with relations to other tables.
"""
def _format(s):
if not "." in s:
# Field could be wrapped in a function: year(date) => year(table.date).
p = s.endswith(")") and re.match(r"^("+sql_functions+r")\(", s, re.I) or None
i = p and len(p.group(0)) or 0
return "%s%s.%s" % (s[:i], table, s[i:])
return s
if isinstance(field, (list, tuple)):
return [_format(f) for f in field]
return _format(field)
def cmp(field, value, comparison="=", escape=lambda v: _escape(v), table=""):
""" Returns an SQL WHERE comparison string using =, i=, !=, >, <, >=, <= or BETWEEN.
Strings may contain wildcards (*) at the start or at the end.
A list or tuple of values can be given when using =, != or BETWEEN.
"""
# Use absolute field names if table name is given:
if table:
field = abs(table, field)
# cmp("name", "Mar*") => "name like 'Mar%'".
if isinstance(value, basestring) and (value.startswith(("*","%")) or value.endswith(("*","%"))):
if comparison in ("=", "i=", "==", LIKE):
return "%s like %s" % (field, escape(value.replace("*","%")))
if comparison in ("!=", "<>"):
return "%s not like %s" % (field, escape(value.replace("*","%")))
# cmp("name", "markov") => "name" like 'markov'" (case-insensitive).
if isinstance(value, basestring):
if comparison == "i=":
return "%s like %s" % (field, escape(value))
# cmp("type", ("cat", "dog"), "!=") => "type not in ('cat','dog')".
# cmp("amount", (10, 100), ":") => "amount between 10 and 100".
if isinstance(value, (list, tuple)):
if find(lambda v: isinstance(v, basestring) and (v.startswith("*") or v.endswith("*")), value):
return "(%s)" % any(*[(field, v) for v in value]).sql(escape=escape)
if comparison in ("=", "==", IN):
return "%s in (%s)" % (field, ",".join(escape(v) for v in value))
if comparison in ("!=", "<>"):
return "%s not in (%s)" % (field, ",".join(escape(v) for v in value))
if comparison in (":", BETWEEN):
return "%s between %s and %s" % (field, escape(value[0]), escape(value[1]))
# cmp("type", None, "!=") => "type is not null".
if isinstance(value, type(None)):
if comparison in ("=", "=="):
return "%s is null" % field
if comparison in ("!=", "<>"):
return "%s is not null" % field
# Using a subquery:
if isinstance(value, Query):
if comparison in ("=", "==", IN):
return "%s in %s" % (field, escape(value))
if comparison in ("!=", "<>"):
return "%s not in %s" % (field, escape(value))
return "%s%s%s" % (field, comparison, escape(value))
# Functions for date fields: cmp(year("date"), 1999, ">").
def year(date):
return "year(%s)" % date
def month(date):
return "month(%s)" % date
def day(date):
return "day(%s)" % date
def hour(date):
return "hour(%s)" % date
def minute(date):
return "minute(%s)" % date
def second(date):
return "second(%s)" % date
# Aggregate functions.
def count(value):
return "count(%s)" % value
def sum(value):
return "sum(%s)" % value
#--- QUERY FILTER ----------------------------------------------------------------------------------
AND, OR = "and", "or"
def filter(field, value, comparison="="):
return (field, value, comparison)
class Group(list):
def __init__(self, *args, **kwargs):
""" A list of SQL WHERE filters combined with AND/OR logical operator.
"""
list.__init__(self, args)
self.operator = kwargs.get("operator", AND)
def SQL(self, **kwargs):
""" For example, filter for small pets with tails or wings
(which is not the same as small pets with tails or pets with wings):
>>> Group(
>>> filter("type", "pet"),
>>> filter("weight", (4,6), ":"),
>>> Group(
>>> filter("tail", True),
>>> filter("wing", True), operator=OR))
Yields:
"type='pet' and weight between 4 and 6 and (tail=1 or wing=1)"
"""
# Remember to pass the right escape() function as optional parameter.
a = []
for filter in self:
# Traverse subgroups recursively.
if isinstance(filter, Group):
a.append("(%s)" % filter.SQL(**kwargs))
continue
# Convert filter() to string with cmp() - see above.
if isinstance(filter, (list, tuple)):
a.append(cmp(*filter, **kwargs))
continue
raise TypeError, "Group can contain other Group or filter(), not %s" % type(filter)
return (" %s " % self.operator).join(a)
sql = SQL
def all(*args):
""" Returns a group of filters combined with AND.
"""
return Group(*args, **dict(operator=AND))
def any(*args):
""" Returns a group of filters combined with OR.
"""
return Group(*args, **dict(operator=OR))
# From a GET-query dict:
# all(*dict.items())
# filter() value can also be a Query with comparison=IN.
#--- QUERY -----------------------------------------------------------------------------------------
# Relations:
INNER = "inner" # The rows for which there is a match in both tables (same as join=None).
LEFT = "left" # All rows from this table, with field values from the related table when possible.
RIGHT = "right" # All rows from the related table, with field values from this table when possible.
FULL = "full" # All rows form both tables.
def relation(field1, field2, table, join=LEFT):
return (field1, field2, table, join)
# Sorting:
ASCENDING = "asc"
DESCENDING = "desc"
# Grouping:
FIRST, LAST, COUNT, MAX, MIN, SUM, AVG, STDEV, CONCATENATE = \
"first", "last", "count", "max", "min", "sum", "avg", "stdev", "group_concat"
class Query(object):
id, cache = 0, {}
def __init__(self, table, fields=ALL, filters=[], relations=[], sort=None, order=ASCENDING, group=None, function=FIRST, range=None):
""" A selection of rows from the given table, filtered by any() and all() constraints.
"""
# Table.search(ALL, filters=any(("type","cat"), ("type","dog")) => cats and dogs.
# Table.search(("type", "name")), group="type", function=COUNT) => all types + amount per type.
# Table.search(("name", "types.has_tail"), relations=[("types","type","id")]) => links type to types.id.
Query.id += 1
filters = Group(*filters, **dict(operator=isinstance(filters, Group) and filters.operator or AND))
self._id = Query.id
self._table = table
self.fields = fields # A field name, list of field names or ALL.
self.aliases = {} # A dictionary of field name aliases, used with Query.xml or Query-in-Query.
self.filters = filters # A group of filter() objects.
self.relations = relations # A list of relation() objects.
self.sort = sort # A field name, list of field names or field index for sorting.
self.order = order # ASCENDING or DESCENDING.
self.group = group # A field name, list of field names or field index for folding.
self.function = function # FIRST, LAST, COUNT, MAX, MIN, SUM, AVG, STDEV or CONCATENATE (or list).
self.range = range # A (index1, index2)-tuple. The first row in the table is 0.
@property
def table(self):
return self._table
def __len__(self):
return len(list(self.rows()))
def __iter__(self):
return iter(self.rows())
def __getitem__(self, i):
return self.rows()[i]
def SQL(self):
""" Yields the SQL syntax of the query, which can be passed to Database.execute().
The SQL string will be cached for faster reuse.
"""
#if self._id in Query.cache:
# return Query.cache[self._id]
# Construct the SELECT clause from Query.fields.
g = not isinstance(self.group, (list, tuple)) and [self.group] or self.group
g = [abs(self._table.name, f) for f in g if f is not None]
fields = not isinstance(self.fields, (list, tuple)) and [self.fields] or self.fields
fields = [f in self.aliases and "%s as %s" % (f, self.aliases[f]) or f for f in fields]
fields = abs(self._table.name, fields)
# With a GROUPY BY clause, fields not used for grouping are wrapped in the given function.
# The function can also be a list of functions for each field (FIRST by default).
if g and isinstance(self.function, basestring):
fields = [f in g and f or "%s(%s)" % (self.function, f) for f in fields]
if g and isinstance(self.function, (list, tuple)):
fields = [f in g and f or "%s(%s)" % (F,f) for F,f in zip(self.function+[FIRST]*len(fields), fields)]
q = []
q.append("select %s" % ", ".join(fields))
# Construct the FROM clause from Query.relations.
# Table relations defined on the database are taken into account,
# but overridden by relations defined on the query.
q.append("from `%s`" % self._table.name)
relations = {}
for key1, key2, table, join in (relation(*r) for r in self.relations):
table = isinstance(table, Table) and table.name or table
relations[table] = (key1, key2, join)
for table1, key1, table2, key2, join in self._table.db.relations:
if table1 == self._table.name:
relations.setdefault(table2, (key1, key2, join))
if table2 == self._table.name:
relations.setdefault(table1, (key1, key2, join==LEFT and RIGHT or (join==RIGHT and LEFT or join)))
# Define relations only for tables whose fields are actually selected.
for (table, (key1, key2, join)) in relations.items():
for f in fields:
if table + "." in f:
q.append("%sjoin `%s`" % (join and join+" " or "", table))
q.append("on %s=%s" % (abs(self._table.name, key1), abs(self._table.db[table].name, key2)))
break
# Construct the WHERE clause from Query.filters.SQL().
# Use the database's escape function and absolute field names.
if len(self.filters) > 0:
q.append("where %s" % self.filters.SQL(escape=self._table.db.escape, table=self._table.name))
# Construct the ORDER BY clause from Query.sort and Query.order.
# Construct the GROUP BY clause from Query.group.
for clause, value in (("order", self.sort), ("group", self.group)):
if isinstance(value, basestring) and value != "":
q.append("%s by %s" % (clause, abs(self._table.name, value)))
elif isinstance(value, (list, tuple)) and len(value) > 0:
q.append("%s by %s" % (clause, ", ".join(abs(self._table.name, value))))
elif isinstance(value, int):
q.append("%s by %s" % (clause, abs(self._table.name, self._table.fields[value])))
if self.sort and clause == "order":
if self.order in (ASCENDING, DESCENDING):
q.append("%s" % self.order)
elif isinstance(self.order, (list, tuple)):
q[-1] = ",".join(" ".join(v) for v in zip(q[-1].split(","), self.order))
# Construct the LIMIT clause from Query.range.
if self.range:
q.append("limit %s, %s" % (str(self.range[0]), str(self.range[1])))
q = " ".join(q) + ";"
# Cache the SQL-string for faster retrieval.
#if len(Query.cache) > 100:
# Query.cache.clear()
#Query.cache[self._id] = q # XXX cache is not updated when properties change.
return q
sql = SQL
def execute(self):
""" Executes the query and returns the matching rows from the table.
"""
return self._table.db.execute(self.SQL())
def rows(self):
""" Executes the query and returns the matching rows from the table.
"""
return self.execute()
def record(self, row):
""" Returns the given row as a dictionary of (field or alias, value)-items.
"""
return dict(zip((self.aliases.get(f,f) for f in self.fields), row))
@property
def xml(self):
return xml(self)
def __repr__(self):
return "Query(sql=%s)" % repr(self.SQL())
#### VIEW ##########################################################################################
# A representation of data based on a table in the database.
# The render() method can be overridden to output data in a certain format (e.g., HTML for a web app).
class View(object):
def __init__(self, database, table, schema=[]):
""" A representation of data.
View.render() should be overridden in a subclass.
"""
self.database = database
self._table = isinstance(table, Table) and table.name or table
self.schema = schema # A list of table fields - see field().
@property
def db(self):
return self.database
@property
def table(self):
# If it doesn't exist, create the table from View.schema.
if not self._table in self.db:
self.setup()
return self.db[self._table]
def setup(self, overwrite=False):
""" Creates the database table from View.schema, optionally overwriting the old table.
"""
if overwrite:
self.db.drop(self._table)
if not self._table in self.db:
self.db.create(self._table, self.schema)
def render(self, *path, **query):
""" This method should be overwritten to return formatted table output (XML, HTML, RSS, ...)
For web apps, the given path should list all parts in the relative URL path,
and query is a dictionary of all POST and GET variables sent from the client.
For example: http://books.com/science/new
=> ["science", "new"]
=> render() data from db.books.filter(ALL, category="science", new=True).
"""
pass
# CherryPy-specific.
def default(self, *path, **query):
return self.render(*path, **query)
default.exposed = True
#### XML PARSER ####################################################################################
XML_HEADER = "<?xml version=\"1.0\" encoding=\"utf-8\"?>"
def _unpack_fields(table, fields=[]):
""" Replaces "*" with the actual field names.
Fields from related tables keep the "<tablename>." prefix.
"""
u = []
for f in fields:
a, b = "." in f and f.split(".", 1) or (table.name, f)
if a == table.name and b == ALL:
# <table>.*
u.extend(f for f in table.db.tables[a].fields)
elif a != table.name and b == ALL:
# <related-table>.*
u.extend("%s.%s" % (a, f) for f in table.db.tables[a].fields)
elif a != table.name:
# <related-table>.<field>
u.append("%s.%s" % (a, b))
else:
# <field>
u.append(b)
return u
def xml_format(a):
""" Returns the given attribute (string, int, float, bool, None) as a quoted unicode string.
"""
if isinstance(a, basestring):
return "\"%s\"" % encode_entities(a)
if isinstance(a, bool):
return "\"%s\"" % ("no","yes")[int(a)]
if isinstance(a, (int, long)):
return "\"%s\"" % a
if isinstance(a, float):
return "\"%s\"" % round(a, 5)
if isinstance(a, type(None)):
return "\"\""
if isinstance(a, Date):
return "\"%s\"" % str(a)
if isinstance(a, datetime.datetime):
return "\"%s\"" % str(date(mktime(a.timetuple())))
def xml(rows):
""" Returns the rows in the given Table or Query as an XML-string, for example:
<?xml version="1.0" encoding="utf-8"?>
<table name="pets", fields="id, name, type" count="2">
<schema>
<field name="id", type="integer", index="primary", optional="no" />
<field name="name", type="string", length="50" />
<field name="type", type="string", length="50" />
</schema>
<rows>
<row id="1", name="Taxi", type="cat" />
<row id="2", name="Hofstadter", type="dog" />
</rows>
</table>
"""
if isinstance(rows, Table):
root, table, rows, fields, aliases = "table", rows, rows.rows(), rows.fields, {}
if isinstance(rows, Query):
root, table, rows, fields, aliases, = "query", rows.table, rows.rows(), rows.fields, rows.aliases
fields = _unpack_fields(table, fields)
# <table name="" fields="" count="">
# <query table="" fields="" count="">
xml = []
xml.append(XML_HEADER)
xml.append("<%s %s=%s fields=\"%s\" count=\"%s\">" % (
root,
root != "table" and "table" or "name",
xml_format(table.name), # Use Query.aliases as field names.
", ".join(encode_entities(aliases.get(f,f)) for f in fields),
len(rows)))
# <schema>
# Field information is retrieved from the (related) table schema.
# If the XML is imported as a Table, the related fields become part of it.
xml.append("\t<schema>")
for f in fields:
if f not in table.schema:
s = f.split(".")
s = table.db[s[0]].schema[s[-1]]
else:
s = table.schema[f]
# <field name="" type="" length="" default="" index="" optional="" extra="" />
xml.append("\t\t<field name=%s type=%s%s%s%s%s%s />" % (
xml_format(aliases.get(f,f)),
xml_format(s.type),
s.length is not None and " length=%s" % xml_format(s.length) or "",
s.default is not None and " default=%s" % xml_format(s.default) or "",
s.index is not False and " index=%s" % xml_format(s.index) or "",
s.optional is not True and " optional=%s" % xml_format(s.optional) or "",
s.extra is not None and " extra=%s" % xml_format(s.extra) or ""))
xml.append("\t</schema>")
xml.append("\t<rows>")
# <rows>
for r in rows:
# <row field="value" />
xml.append("\t\t<row %s />" % " ".join("%s=%s" % (aliases.get(k,k), xml_format(v)) for k, v in zip(fields, r)))
xml.append("\t</rows>")
xml.append("</%s>" % root)
xml = "\n".join(xml)
xml = encode_utf8(xml)
return xml
def parse_xml(database, xml, table=None, field=lambda s: s.replace(".", "-")):
""" Creates a new table in the given database from the given XML-string.
The XML must be in the format generated by Table.xml.
If the table already exists, raises a TableError.
The given table parameter can be used to rename the table.
The given field function can be used to rename field names.
"""
def _attr(node, attribute, default=""):
return node.getAttribute(attribute) or default
# parseString() will decode entities, no need for decode_entities().
from xml.dom.minidom import parseString
dom = parseString(encode_utf8(xml))
a = dom.getElementsByTagName("table")
b = dom.getElementsByTagName("query")
if len(a) > 0:
table = table or _attr(a[0], "name", "")
if len(b) > 0:
table = table or _attr(b[0], "table", "")
# Parse field information (i.e., field name, field type, etc.)
fields, schema, rows = [], [], []
for f in dom.getElementsByTagName("field"):
fields.append(_attr(f, "name"))
schema.append(_field(
name = field(_attr(f, "name")),
type = _attr(f, "type") == STRING and STRING(int(_attr(f, "length", 255))) or _attr(f, "type"),
default = _attr(f, "default", None),
index = _attr(f, "index", False),
optional = _attr(f, "optional", True) != "no"
))
# Integer primary key is always auto-increment.
# The id's in the new table will differ from those in the XML.
if _attr(f, "index") == PRIMARY and _attr(f, "type") == INTEGER:
fields.pop()
# Parse row data.
for r in dom.getElementsByTagName("row"):
rows.append({})
for i, f in enumerate(fields):
v = _attr(r, f, None)
if schema[i][1] == BOOLEAN:
rows[-1][f] = (0,1)[v!="no"]
else:
rows[-1][f] = v
# Create table if not exists and insert rows.
if database.connected is False:
database.connect()
if table in database:
raise TableError, "table '%s' already exists" % table
database.create(table, fields=schema)
for r in rows:
database[table].insert(r, commit=False)
database.commit()
return database[table]
#### JSON PARSER ###################################################################################
class JSON:
def __init__(self):
self.float = lambda f: ("%.3f" % f).rstrip("0")
def __call__(self, obj, *args, **kwargs):
""" Returns a JSON string from the given data.
The data can be a nested structure of dict, list, str, unicode, bool, int, float and None.
"""
def _str(obj):
if isinstance(obj, type(None)):
return "null"
if isinstance(obj, bool):
return obj and "true" or "false"
if isinstance(obj, (int, long)): # Also validates bools, so those are handled first.
return str(obj)
if isinstance(obj, float):
return str(self.float(obj))
if isinstance(obj, (str, unicode)):
return '"%s"' % obj.replace('"', '\\"')
if isinstance(obj, dict):
return "{%s}" % ", ".join(['"%s": %s' % (k.replace('"', '\\"'), _str(v)) for k, v in obj.items()])
if isinstance(obj, (list, tuple, GeneratorType)):
return "[%s]" % ", ".join(_str(v) for v in obj)
raise TypeError, "can't process %s." % type(obj)
return "%s" % _str(obj)
json = JSON()
#### DATASHEET #####################################################################################
#--- CSV -------------------------------------------------------------------------------------------
def csv_header_encode(field, type=STRING):
# csv_header_encode("age", INTEGER) => "age (INTEGER)".
t = re.sub(r"^varchar\(.*?\)", "string", (type or ""))
t = t and " (%s)" % t or ""
s = "%s%s" % (encode_utf8(field or ""), t.upper())
return s
def csv_header_decode(s):
# csv_header_decode("age (INTEGER)") => ("age", INTEGER).
p = r"STRING|INTEGER|FLOAT|TEXT|BLOB|BOOLEAN|DATE|"
p = re.match(r"(.*?) \(("+p+")\)", s)
s = s.endswith(" ()") and s[:-3] or s
return p and (string(p.group(1), default=None), p.group(2).lower()) or (string(s) or None, None)
class CSV(list):
def __init__(self, rows=[], fields=None, **kwargs):
""" A list of lists that can be exported as a comma-separated text file.
"""
fields = fields or kwargs.get("headers", None)
self.__dict__["fields"] = fields # List of (name, type)-tuples, with type = STRING, INTEGER, etc.
self.extend(rows)
def _set_headers(self, v):
self.__dict__["fields"] = v
def _get_headers(self):
return self.__dict__["fields"]
headers = property(_get_headers, _set_headers)
def save(self, path, separator=",", encoder=lambda v: v, headers=False, **kwargs):
""" Exports the table to a unicode text file at the given path.
Rows in the file are separated with a newline.
Columns in a row are separated with the given separator (by default, comma).
For data types other than string, int, float, bool or None, a custom string encoder can be given.
"""
# Optional parameters include all arguments for csv.writer(), see:
# http://docs.python.org/library/csv.html#csv.writer
kwargs.setdefault("delimiter", separator)
kwargs.setdefault("quoting", csv.QUOTE_ALL)
# csv.writer will handle str, int, float and bool:
s = StringIO()
w = csv.writer(s, **kwargs)
if headers and self.fields is not None:
w.writerows([[csv_header_encode(name, type) for name, type in self.fields]])
w.writerows([[encode_utf8(encoder(v)) for v in row] for row in self])
s = s.getvalue()
s = s.strip()
s = re.sub("([^\"]|^)\"None\"", "\\1None", s)
f = open(path, "wb")
f.write(BOM_UTF8)
f.write(s)
f.close()
@classmethod
def load(cls, path, separator=",", decoder=lambda v: v, headers=False, preprocess=lambda s: s):
""" Returns a table from the data in the given text file.
Rows are expected to be separated by a newline.
Columns are expected to be separated by the given separator (by default, comma).
Strings will be converted to int, float, bool, date or None if headers are parsed.
For other data types, a custom string decoder can be given.
"""
# Date objects are saved and loaded as strings, but it is easy to convert these back to dates:
# - set a DATE field type for the column,
# - or do Table.columns[x].map(lambda s: date(s))
data = open(path, "rb").read().replace(BOM_UTF8, "")
data = preprocess(data)
data = "\n".join(line for line in data.splitlines()) # Excel \r => \n
data = StringIO(data)
data = [row for row in csv.reader(data, delimiter=separator)]
if headers:
fields = [csv_header_decode(field) for field in data.pop(0)]
fields += [(None, None)] * (max([0]+[len(row) for row in data]) - len(fields))
else:
fields = []
if not fields:
# Cast fields using the given decoder (by default, all strings + None).
data = [[decoder(decode_utf8(v) if v != "None" else None) for v in row] for row in data]
else:
# Cast fields to their defined field type (STRING, INTEGER, ...)
for i, row in enumerate(data):
for j, v in enumerate(row):
type = fields[j][1]
if row[j] == "None":
row[j] = decoder(None)
elif type is None:
row[j] = decoder(decode_utf8(v))
elif type in (STRING, TEXT):
row[j] = decode_utf8(v)
elif type == INTEGER:
row[j] = int(row[j])
elif type == FLOAT:
row[j] = float(row[j])
elif type == BOOLEAN:
row[j] = bool(row[j])
elif type == DATE:
row[j] = date(row[j])
elif type == BLOB:
row[j] = v
else:
row[j] = decoder(decode_utf8(v))
return cls(rows=data, fields=fields)
#--- DATASHEET -------------------------------------------------------------------------------------
class Datasheet(CSV):
def __init__(self, rows=[], fields=None, **kwargs):
""" A matrix of rows and columns, where each row and column can be retrieved as a list.
Values can be any kind of Python object.
"""
# NumPy array, convert to list of int/float/str/bool.
if rows.__class__.__name__ == "ndarray":
rows = rows.tolist()
self.__dict__["_rows"] = DatasheetRows(self)
self.__dict__["_columns"] = DatasheetColumns(self)
self.__dict__["_m"] = 0 # Number of columns per row, see Datasheet.insert().
CSV.__init__(self, rows, fields, **kwargs)
def _get_rows(self):
return self._rows
def _set_rows(self, rows):
# Datasheet.rows property can't be set, except in special case Datasheet.rows += row.
if isinstance(rows, DatasheetRows) and rows._datasheet == self:
self._rows = rows; return
raise AttributeError, "can't set attribute"
rows = property(_get_rows, _set_rows)
def _get_columns(self):
return self._columns
def _set_columns(self, columns):
# Datasheet.columns property can't be set, except in special case Datasheet.columns += column.
if isinstance(columns, DatasheetColumns) and columns._datasheet == self:
self._columns = columns; return
raise AttributeError, "can't set attribute"
columns = cols = property(_get_columns, _set_columns)
def __getattr__(self, k):
""" Columns can be retrieved by field name, e.g., Datasheet.date.
"""
#print "Datasheet.__getattr__", k
if k in self.__dict__:
return self.__dict__[k]
for i, f in enumerate(f[0] for f in self.__dict__["fields"] or []):
if f == k:
return self.__dict__["_columns"][i]
raise AttributeError, "'Datasheet' object has no attribute '%s'" % k
def __setattr__(self, k, v):
""" Columns can be set by field name, e.g., Datasheet.date = [...].
"""
#print "Datasheet.__setattr__", k
if k in self.__dict__:
self.__dict__[k] = v
return
if k == "rows":
self._set_rows(v)
return
if k == "columns":
self._set_columns(v)
return
if k == "headers":
self._set_headers(v)
return
for i, f in enumerate(f[0] for f in self.__dict__["fields"] or []):
if f == k:
self.__dict__["_columns"].__setitem__(i, v); return
raise AttributeError, "'Datasheet' object has no attribute '%s'" % k
def __setitem__(self, index, value):
""" Sets an item or row in the matrix.
For Datasheet[i] = v, sets the row at index i to v.
For Datasheet[i,j] = v, sets the value in row i and column j to v.
"""
if isinstance(index, tuple):
list.__getitem__(self, index[0])[index[1]] = value
elif isinstance(index, int):
self.pop(index)
self.insert(index, value)
else:
raise TypeError, "Datasheet indices must be int or tuple"
def __getitem__(self, index):
""" Returns an item, row or slice from the matrix.
For Datasheet[i], returns the row at the given index.
For Datasheet[i,j], returns the value in row i and column j.
"""
if isinstance(index, (int, slice)):
# Datasheet[i] => row i.
return list.__getitem__(self, index)
if isinstance(index, tuple):
i, j = index
# Datasheet[i,j] => item from column j in row i.
# Datasheet[i,j1:j2] => columns j1-j2 from row i.
if not isinstance(i, slice):
return list.__getitem__(self, i)[j]
# Datasheet[i1:i2,j] => column j from rows i1-i2.
if not isinstance(j, slice):
return [row[j] for row in list.__getitem__(self, i)]
# Datasheet[i1:i2,j1:j2] => Datasheet with columns j1-j2 from rows i1-i2.
return Datasheet(
rows = (row[j] for row in list.__getitem__(self, i)),
fields = self.fields and self.fields[j] or self.fields)
raise TypeError, "Datasheet indices must be int, tuple or slice"
def __getslice__(self, i, j):
# Datasheet[i1:i2] => Datasheet with rows i1-i2.
return Datasheet(
rows = list.__getslice__(self, i, j),
fields = self.fields)
def __delitem__(self, index):
self.pop(index)
# datasheet1 = datasheet2 + datasheet3
# datasheet1 = [[...],[...]] + datasheet2
# datasheet1 += datasheet2
def __add__(self, datasheet):
m = self.copy(); m.extend(datasheet); return m
def __radd__(self, datasheet):
m = Datasheet(datasheet); m.extend(self); return m
def __iadd__(self, datasheet):
self.extend(datasheet); return self
def insert(self, i, row, default=None):
""" Inserts the given row into the matrix.
Missing columns at the end (right) will be filled with the default value.
"""
try:
# Copy the row (fast + safe for generators and DatasheetColumns).
row = [v for v in row]
except:
raise TypeError, "Datasheet.insert(x): x must be list"
list.insert(self, i, row)
m = max((len(self) > 1 and self._m or 0, len(row)))
if len(row) < m:
row.extend([default] * (m-len(row)))
if self._m < m:
# The given row might have more columns than the rows in the matrix.
# Performance takes a hit when these rows have to be expanded:
for row in self:
if len(row) < m:
row.extend([default] * (m-len(row)))
self.__dict__["_m"] = m
def append(self, row, default=None, _m=None):
self.insert(len(self), row, default)
def extend(self, rows, default=None):
for row in rows:
self.insert(len(self), row, default)
def group(self, j, function=FIRST, key=lambda v: v):
""" Returns a datasheet with unique values in column j by grouping rows with the given function.
The function takes a list of column values as input and returns a single value,
e.g. FIRST, LAST, COUNT, MAX, MIN, SUM, AVG, STDEV, CONCATENATE.
The function can also be a list of functions (one for each column).
TypeError will be raised when the function cannot handle the data in a column.
The key argument can be used to map the values in column j, for example:
key=lambda date: date.year to group Date objects by year.
"""
if isinstance(function, tuple):
function = list(function)
if not isinstance(function, list):
function = [function] * self._m
if len(function) < self._m:
function+= [FIRST] * (self._m - len(function))
for i, f in enumerate(function):
if i == j: # Group column j is always FIRST.
f = FIRST
if f == FIRST:
function[i] = lambda a: a[+0]
if f == LAST:
function[i] = lambda a: a[-1]
if f == COUNT:
function[i] = lambda a: len(a)
if f == MAX:
function[i] = lambda a: max(a)
if f == MIN:
function[i] = lambda a: min(a)
if f == SUM:
function[i] = lambda a: _sum([x for x in a if x is not None])
if f == AVG:
function[i] = lambda a: avg([x for x in a if x is not None])
if f == STDEV:
function[i] = lambda a: stdev([x for x in a if x is not None])
if f == CONCATENATE:
function[i] = lambda a: ",".join(decode_utf8(x) for x in a if x is not None)
J = j
# Map unique values in column j to a list of rows that contain this value.
g = {}; [g.setdefault(key(v), []).append(i) for i, v in enumerate(self.columns[j])]
# Map unique values in column j to a sort index in the new, grouped list.
o = [(g[v][0], v) for v in g]
o = dict([(v, i) for i, (ii,v) in enumerate(sorted(o))])
# Create a list of rows with unique values in column j,
# applying the group function to the other columns.
u = [None] * len(o)
for v in g:
# List the column values for each group row.
u[o[v]] = [[list.__getitem__(self, i)[j] for i in g[v]] for j in range(self._m)]
# Apply the group function to each row, except the unique value in column j.
u[o[v]] = [function[j](column) for j, column in enumerate(u[o[v]])]
u[o[v]][J] = v#list.__getitem__(self, i)[J]
return Datasheet(rows=u)
def map(self, function=lambda item: item):
""" Applies the given function to each item in the matrix.
"""
for i, row in enumerate(self):
for j, item in enumerate(row):
row[j] = function(item)
def slice(self, i, j, n, m):
""" Returns a new Datasheet starting at row i and column j and spanning n rows and m columns.
"""
return Datasheet(rows=[list.__getitem__(self, i)[j:j+m] for i in range(i, i+n)])
def copy(self, rows=ALL, columns=ALL):
""" Returns a new Datasheet from a selective list of row and/or column indices.
"""
if rows == ALL and columns == ALL:
return Datasheet(rows=self)
if rows == ALL:
return Datasheet(rows=zip(*(self.columns[j] for j in columns)))
if columns == ALL:
return Datasheet(rows=(self.rows[i] for i in rows))
z = zip(*(self.columns[j] for j in columns))
return Datasheet(rows=(z[i] for i in rows))
@property
def json(self):
""" Returns a JSON-string, as a list of dictionaries (if fields are defined) or as a list of lists.
This is useful for sending a Datasheet to JavaScript, for example.
"""
if self.fields is not None:
return json([dict((f[0], row[i]) for i, f in enumerate(self.fields)) for row in self])
else:
return json(self)
@property
def array(self):
""" Returns a NumPy array.
Arrays must have elements of the same type, and rows of equal size.
"""
import numpy
return numpy.array(self)
def flip(datasheet):
""" Returns a new datasheet with rows for columns and columns for rows.
"""
return Datasheet(rows=datasheet.columns)
#--- DATASHEET ROWS --------------------------------------------------------------------------------
# Datasheet.rows mimics the operations on Datasheet:
class DatasheetRows(list):
def __init__(self, datasheet):
self._datasheet = datasheet
def __setitem__(self, i, row):
self._datasheet.pop(i)
self._datasheet.insert(i, row)
def __getitem__(self, i):
return list.__getitem__(self._datasheet, i)
def __delitem__(self, i):
self.pop(i)
def __len__(self):
return len(self._datasheet)
def __iter__(self):
for i in xrange(len(self)): yield list.__getitem__(self._datasheet, i)
def __repr__(self):
return repr(self._datasheet)
def __add__(self, row):
raise TypeError, "unsupported operand type(s) for +: 'Datasheet.rows' and '%s'" % row.__class__.__name__
def __iadd__(self, row):
self.append(row); return self
def __eq__(self, rows):
return self._datasheet.__eq__(rows)
def __ne__(self, rows):
return self._datasheet.__ne__(rows)
def insert(self, i, row, default=None):
self._datasheet.insert(i, row, default)
def append(self, row, default=None):
self._datasheet.append(row, default)
def extend(self, rows, default=None):
self._datasheet.extend(rows, default)
def remove(self, row):
self._datasheet.remove(row)
def pop(self, i):
return self._datasheet.pop(i)
def count(self, row):
return self._datasheet.count(row)
def index(self, row):
return self._datasheet.index(row)
def sort(self, cmp=None, key=None, reverse=False):
self._datasheet.sort(cmp, key, reverse)
def reverse(self):
self._datasheet.reverse()
def swap(self, i1, i2):
self[i1], self[i2] = self[i2], self[i1]
#--- DATASHEET COLUMNS -----------------------------------------------------------------------------
class DatasheetColumns(list):
def __init__(self, datasheet):
self._datasheet = datasheet
self._cache = {} # Keep a reference to DatasheetColumn objects generated with Datasheet.columns[j].
# This way we can unlink them when they are deleted.
def __setitem__(self, j, column):
if self._datasheet.fields is not None and j < len(self._datasheet.fields):
# Preserve the column header if it exists.
f = self._datasheet.fields[j]
else:
f = None
self.pop(j)
self.insert(j, column, field=f)
def __getitem__(self, j):
if j < 0: j = j % len(self) # DatasheetColumns[-1]
if j >= len(self):
raise IndexError, "list index out of range"
return self._cache.setdefault(j, DatasheetColumn(self._datasheet, j))
def __delitem__(self, j):
self.pop(j)
def __len__(self):
return len(self._datasheet) > 0 and len(self._datasheet[0]) or 0
def __iter__(self):
for i in xrange(len(self)): yield self.__getitem__(i)
def __repr__(self):
return repr(list(iter(self)))
def __add__(self, column):
raise TypeError, "unsupported operand type(s) for +: 'Datasheet.columns' and '%s'" % column.__class__.__name__
def __iadd__(self, column):
self.append(column); return self
def __eq__(self, columns):
return list(self) == columns
def __ne__(self, columns):
return not self.__eq__(self, columns)
def insert(self, j, column, default=None, field=None):
""" Inserts the given column into the matrix.
Missing rows at the end (bottom) will be filled with the default value.
"""
try: column = [v for v in column]
except:
raise TypeError, "Datasheet.columns.insert(x): x must be list"
column = column + [default] * (len(self._datasheet) - len(column))
if len(column) > len(self._datasheet):
self._datasheet.extend([[None]] * (len(column)-len(self._datasheet)))
for i, row in enumerate(self._datasheet):
row.insert(j, column[i])
self._datasheet.__dict__["_m"] += 1 # Increase column count.
# Add a new header.
if self._datasheet.fields is not None:
self._datasheet.fields += [(None, None)] * (len(self) - len(self._datasheet.fields) - 1)
self._datasheet.fields.insert(j, field or (None, None))
def append(self, column, default=None, field=None):
self.insert(len(self), column, default, field)
def extend(self, columns, default=None, fields=[]):
for j, column in enumerate(columns):
self.insert(len(self), column, default, j<len(fields) and fields[j] or None)
def remove(self, column):
if isinstance(column, DatasheetColumn) and column._datasheet == self._datasheet:
self.pop(column._j); return
raise ValueError, "list.remove(x): x not in list"
def pop(self, j):
column = list(self[j]) # Return a list copy.
for row in self._datasheet:
row.pop(j)
# At one point a DatasheetColumn object was created with Datasheet.columns[j].
# It might still be in use somewhere, so we unlink it from the datasheet:
self._cache[j]._datasheet = Datasheet(rows=[[v] for v in column])
self._cache[j]._j = 0
self._cache.pop(j)
for k in range(j+1, len(self)+1):
if k in self._cache:
# Shift the DatasheetColumn objects on the right to the left.
self._cache[k-1] = self._cache.pop(k)
self._cache[k-1]._j = k-1
self._datasheet.__dict__["_m"] -= 1 # Decrease column count.
# Remove the header.
if self._datasheet.fields is not None:
self._datasheet.fields.pop(j)
return column
def count(self, column):
return len([True for c in self if c == column])
def index(self, column):
if isinstance(column, DatasheetColumn) and column._datasheet == self._datasheet:
return column._j
return list(self).index(column)
def sort(self, cmp=None, key=None, reverse=False, order=None):
# This makes most sense if the order in which columns should appear is supplied.
o = order and order or _order(self, cmp, key, reverse)
for i, row in enumerate(self._datasheet):
# The main difficulty is modifying each row in-place,
# since other variables might be referring to it.
r=list(row); [row.__setitem__(i2, r[i1]) for i2, i1 in enumerate(o)]
# Reorder the datasheet headers.
if self._datasheet.fields is not None:
self._datasheet.fields = [self._datasheet.fields[i] for i in o]
def swap(self, j1, j2):
self[j1], self[j2] = self[j2], self[j1]
# Reorder the datasheet headers.
if self._datasheet.fields is not None:
self._datasheet.fields[j1], self._datasheet.fields[j2] = (
self._datasheet.fields[j2],
self._datasheet.fields[j1])
#--- DATASHEET COLUMN ------------------------------------------------------------------------------
class DatasheetColumn(list):
def __init__(self, datasheet, j):
""" A dynamic column in a Datasheet.
If the actual column is deleted with Datasheet.columns.remove() or Datasheet.columms.pop(),
the DatasheetColumn object will be orphaned (i.e., it is no longer part of the table).
"""
self._datasheet = datasheet
self._j = j
def __getitem__(self, i):
return list.__getitem__(self._datasheet, i)[self._j]
def __setitem__(self, i, value):
list.__getitem__(self._datasheet, i)[self._j] = value
def __len__(self):
return len(self._datasheet)
def __iter__(self): # Can be put more simply but optimized for performance:
for i in xrange(len(self)): yield list.__getitem__(self._datasheet, i)[self._j]
def __repr__(self):
return repr(list(iter(self)))
def __gt__(self, column):
return list(self) > list(column)
def __lt__(self, column):
return list(self) < list(column)
def __ge__(self, column):
return list(self) >= list(column)
def __le__(self, column):
return list(self) <= list(column)
def __eq__(self, column):
return list(self) == column
def __ne__(self, column):
return not self.__eq__(column)
def __add__(self, value):
raise TypeError, "unsupported operand type(s) for +: 'Datasheet.columns[x]' and '%s'" % value.__class__.__name__
def __iadd__(self, value):
self.append(value); return self
def __contains__(self, value):
for v in self:
if v == value: return True
return False
def count(self, value):
return len([True for v in self if v == value])
def index(self, value):
for i, v in enumerate(self):
if v == value:
return i
raise ValueError, "list.index(x): x not in list"
def remove(self, value):
""" Removes the matrix row that has the given value in this column.
"""
for i, v in enumerate(self):
if v == value:
self._datasheet.pop(i); return
raise ValueError, "list.remove(x): x not in list"
def pop(self, i):
""" Removes the entire row from the matrix and returns the value at the given index.
"""
row = self._datasheet.pop(i); return row[self._j]
def sort(self, cmp=None, key=None, reverse=False):
""" Sorts the rows in the matrix according to the values in this column,
e.g. clicking ascending / descending on a column header in a datasheet viewer.
"""
o = order(list(self), cmp, key, reverse)
# Modify the table in place, more than one variable may be referencing it:
r=list(self._datasheet); [self._datasheet.__setitem__(i2, r[i1]) for i2, i1 in enumerate(o)]
def insert(self, i, value, default=None):
""" Inserts the given value in the column.
This will create a new row in the matrix, where other columns are set to the default.
"""
self._datasheet.insert(i, [default]*self._j + [value] + [default]*(len(self._datasheet)-self._j-1))
def append(self, value, default=None):
self.insert(len(self), value, default)
def extend(self, values, default=None):
for value in values:
self.insert(len(self), value, default)
def map(self, function=lambda value: value):
""" Applies the given function to each value in the column.
"""
for j, value in enumerate(self):
self[j] = function(value)
def swap(self, i1, i2):
self._datasheet.swap(i1, i2)
#---------------------------------------------------------------------------------------------------
_UID = 0
def uid():
global _UID; _UID+=1; return _UID
def truncate(string, length=100):
""" Returns a (head, tail)-tuple, where the head string length is less than the given length.
Preferably the string is split at a space, otherwise a hyphen ("-") is injected.
"""
if len(string) <= length:
return string, ""
n, words = 0, string.split(" ")
for i, w in enumerate(words):
if n + len(w) > length:
break
n += len(w) + 1
if i == 0 and len(w) > length:
return ( w[:length-1] + "-",
(w[length-1:] + " " + " ".join(words[1:])).strip())
return (" ".join(words[:i]),
" ".join(words[i:]))
_truncate = truncate
def pprint(datasheet, truncate=40, padding=" ", fill="."):
""" Prints a string where the rows in the datasheet are organized in outlined columns.
"""
# Calculate the width of each column, based on the longest field in each column.
# Long fields can be split across different lines, so we need to check each line.
w = [0 for column in datasheet.columns]
R = []
for i, row in enumerate(datasheet.rows):
fields = []
for j, v in enumerate(row):
# Cast each field in the row to a string.
# Strings that span beyond the maximum column width are wrapped.
# Thus, each "field" in the row is a list of lines.
head, tail = _truncate(decode_utf8(v), truncate)
lines = []
lines.append(head)
w[j] = max(w[j], len(head))
while len(tail) > 0:
head, tail = _truncate(tail, truncate)
lines.append(head)
w[j] = max(w[j], len(head))
fields.append(lines)
R.append(fields)
for i, fields in enumerate(R):
# Add empty lines to each field so they are of equal height.
n = max([len(lines) for lines in fields])
fields = [lines+[""] * (n-len(lines)) for lines in fields]
# Print the row line per line, justifying the fields with spaces.
for k in range(n):
for j, lines in enumerate(fields):
s = lines[k]
s += ((k==0 or len(lines[k]) > 0) and fill or " ") * (w[j] - len(lines[k]))
s += padding
print s,
print
| 42.256868 | 139 | 0.559525 |
4a25549586edecb650f1b129d71d0c890956a683 | 1,565 | py | Python | quantum/common/topics.py | ericwanghp/quantum | 1c0d543552c38a5eac6dd08580b73725c5757876 | [
"Apache-2.0"
] | 1 | 2021-04-18T15:23:19.000Z | 2021-04-18T15:23:19.000Z | quantum/common/topics.py | ericwanghp/quantum | 1c0d543552c38a5eac6dd08580b73725c5757876 | [
"Apache-2.0"
] | null | null | null | quantum/common/topics.py | ericwanghp/quantum | 1c0d543552c38a5eac6dd08580b73725c5757876 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
NETWORK = 'network'
SUBNET = 'subnet'
PORT = 'port'
SECURITY_GROUP = 'security_group'
CREATE = 'create'
DELETE = 'delete'
UPDATE = 'update'
AGENT = 'q-agent-notifier'
PLUGIN = 'q-plugin'
DHCP = 'q-dhcp-notifer'
LOADBALANCER_PLUGIN = 'q-loadbalancer-plugin'
L3_AGENT = 'l3_agent'
DHCP_AGENT = 'dhcp_agent'
LOADBALANCER_AGENT = 'loadbalancer_agent'
def get_topic_name(prefix, table, operation):
"""Create a topic name.
The topic name needs to be synced between the agent and the
plugin. The plugin will send a fanout message to all of the
listening agents so that the agents in turn can perform their
updates accordingly.
:param prefix: Common prefix for the plugin/agent message queues.
:param table: The table in question (NETWORK, SUBNET, PORT).
:param operation: The operation that invokes notification (CREATE,
DELETE, UPDATE)
:returns: The topic name.
"""
return '%s-%s-%s' % (prefix, table, operation)
| 31.3 | 70 | 0.720767 |
4a2554c67cdf635ffce437accb9865ade0402acd | 1,912 | py | Python | taivasnet/train-linear.py | ikanher/numpy-MNIST | f41daa6181a04d82667ba4e3f694afcd4b291845 | [
"MIT"
] | null | null | null | taivasnet/train-linear.py | ikanher/numpy-MNIST | f41daa6181a04d82667ba4e3f694afcd4b291845 | [
"MIT"
] | null | null | null | taivasnet/train-linear.py | ikanher/numpy-MNIST | f41daa6181a04d82667ba4e3f694afcd4b291845 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
Trains a linear model to predict y = 2x
"""
import argparse
from taivasnet.dataloaders import RegressionDataLoader
from taivasnet.networks import NeuralNet
from taivasnet.optimizers import SGD
from taivasnet.models import LinearModel
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--epochs', default=50, type=int, help='Number of training round (50)')
parser.add_argument('--lr', default=0.0000001, type=float, help='Learning rate (0.0000001)')
parser.add_argument('--batch_size', default=256, type=int, help='Mini-batch size (256)')
parser.add_argument('--load',
dest='load_weights',
action='store_true',
help='Load saved weights from file')
parser.add_argument('--save',
dest='save_weights',
action='store_true',
help='Save weights to file after training')
parser.add_argument('--weights_fname',
default='../data/saved_weights_linear.dat',
help='Path and filename for saving and loading the weights (../data/saved_weights_linear.dat)')
args = parser.parse_args()
weights_fname = args.weights_fname
# create the model
model = LinearModel()
net = NeuralNet(model=model)
if args.load_weights:
print('- Loading weights from:', weights_fname)
net.load_weights(weights_fname)
# create the optimizer
optimizer = SGD(net=net, dataloader=RegressionDataLoader(), batch_size=args.batch_size)
# fit the model
print('- Training model for', args.epochs, 'epoch, with learning rate', args.lr)
optimizer.fit(n_epochs=args.epochs, learning_rate=args.lr)
if args.save_weights:
print("- Saving weights to:", weights_fname)
net.save_weights(weights_fname)
| 35.407407 | 119 | 0.648536 |
4a25550b22bbbc7c180dec8d64d346b29dc53e8c | 3,099 | py | Python | test/test_identity_resource.py | passbase/passbase-python | 9d5b9cf21b38c2a50fe3755084ef8291d9e2d4d9 | [
"MIT"
] | 8 | 2020-09-09T14:30:46.000Z | 2020-10-19T14:09:00.000Z | test/test_identity_resource.py | passbase/passbase-python | 9d5b9cf21b38c2a50fe3755084ef8291d9e2d4d9 | [
"MIT"
] | null | null | null | test/test_identity_resource.py | passbase/passbase-python | 9d5b9cf21b38c2a50fe3755084ef8291d9e2d4d9 | [
"MIT"
] | 1 | 2021-04-23T21:05:19.000Z | 2021-04-23T21:05:19.000Z | # coding: utf-8
"""
Verification API
# Introduction <span class=\"subtext\"> Welcome to the Passbase Verifications API docs. This documentation will help you understand our models and the Verification API with its endpoints. Based on this you can build your own system (i.e. verification) and hook it up to Passbase. In case of feedback or questions you can reach us under this email address: [[email protected]](mailto:[email protected]). </span> A User submits a video selfie and valid identifying __Resources__ during a __Verification__ guided by the Passbase client-side integration. Once all the necessary __Resources__ are submitted, __Data points__ are extracted, digitized, and authenticated. These Data points then becomes part of the User's __Identity__. The User then consents to share __Resources__ and/or __Data points__ from their Identity with you. This information is passed to you and can be used to make decisions about a User (e.g. activate account). This table below explains our terminology further. | Term | Description | |-----------------------------------------|-------------| | [Identity](#tag/identity_model) | A set of Data points and Resources related to and owned by one single User. This data can be accessed by you through a Verification. | | Data points | Any data about a User extracted from a Resource (E.g. Passport Number, or Age). | | [Resource](#tag/resource_model) | A source document used to generate the Data points for a User (E.g. Passport). | | [User](#tag/user_model) | The owner of an email address associated with an Identity. | | Verification | A transaction through which a User consents to share Data points with you. If the Data points you request are not already available in the User's Identity, the Passbase client will ask the User to submit the necessary Resource required to extract them. | | Re-authentication (login) | A transaction through which a User can certify the ownership of Personal data previously shared through an Authentication. | # Authentication <span class=\"subtext\"> There are two forms of authentication for the API: <br/>• API Key <br/>• Bearer JWT Token </span> # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import passbase
from passbase.models.identity_resource import IdentityResource # noqa: E501
from passbase.rest import ApiException
class TestIdentityResource(unittest.TestCase):
"""IdentityResource unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testIdentityResource(self):
"""Test IdentityResource"""
# FIXME: construct object with mandatory attributes with example values
# model = passbase.models.identity_resource.IdentityResource() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 77.475 | 2,289 | 0.708616 |
4a2556315519d705c70ae91714827e8267823042 | 6,815 | py | Python | p2_continuous-control/Reacher_MultiAgent_Control_Exercise/scripts/model.py | derektan95/drl-algorithms-collection | bd4bcf82d28027c84e919df371ed20576e1c4379 | [
"MIT"
] | null | null | null | p2_continuous-control/Reacher_MultiAgent_Control_Exercise/scripts/model.py | derektan95/drl-algorithms-collection | bd4bcf82d28027c84e919df371ed20576e1c4379 | [
"MIT"
] | null | null | null | p2_continuous-control/Reacher_MultiAgent_Control_Exercise/scripts/model.py | derektan95/drl-algorithms-collection | bd4bcf82d28027c84e919df371ed20576e1c4379 | [
"MIT"
] | null | null | null | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
def hidden_init(layer):
fan_in = layer.weight.data.size()[0]
lim = 1. / np.sqrt(fan_in)
return (-lim, lim)
class Actor(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, seed, fc1_units=256, fc2_units=128):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fc1_units (int): Number of nodes in first hidden layer
fc2_units (int): Number of nodes in second hidden layer
"""
# Needed to inherit functionalities from nn.Module
# super(Actor, self).__init__()
super().__init__()
self.seed = torch.manual_seed(seed)
self.bn0 = nn.BatchNorm1d(state_size)
self.fc1 = nn.Linear(state_size, fc1_units)
self.bn1 = nn.BatchNorm1d(fc1_units)
self.fc2 = nn.Linear(fc1_units, fc2_units)
self.bn2 = nn.BatchNorm1d(fc2_units)
self.fc3 = nn.Linear(fc2_units, action_size)
self.reset_parameters()
def reset_parameters(self):
self.fc1.weight.data.uniform_(*hidden_init(self.fc1))
self.fc2.weight.data.uniform_(*hidden_init(self.fc2))
self.fc3.weight.data.uniform_(-3e-3, 3e-3)
def forward(self, state):
"""Build an actor (policy) network that maps states -> actions."""
# Add 1 dimension @dim=0 for batchnorm to work properly
if state.dim() == 1:
state = state.unsqueeze(0)
x = self.bn0(state)
x = F.relu(self.bn1(self.fc1(x)))
x = F.relu(self.bn2(self.fc2(x)))
x = torch.tanh(self.fc3(x)) # F.tanh is deperecated
return x.squeeze() # Remove extra dimensions to output action list
class Critic(nn.Module):
"""Critic (Value) Model."""
def __init__(self, state_size, action_size, seed, num_atoms, fc1_units=256, fc2_units=128):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fc1_units (int): Number of nodes in the first hidden layer
fc2_units (int): Number of nodes in the second hidden layer
"""
# Needed to inherit functionalities from nn.Module
# super(Critic, self).__init__()
super().__init__()
self.seed = torch.manual_seed(seed)
self.bn0 = nn.BatchNorm1d(state_size)
self.fc1 = nn.Linear(state_size, fc1_units)
self.bn1 = nn.BatchNorm1d(fc1_units)
self.fc2 = nn.Linear(fc1_units+action_size, fc2_units)
self.fc3 = nn.Linear(fc2_units, num_atoms)
self.reset_parameters()
def reset_parameters(self):
self.fc1.weight.data.uniform_(*hidden_init(self.fc1))
self.fc2.weight.data.uniform_(*hidden_init(self.fc2))
self.fc3.weight.data.uniform_(-3e-3, 3e-3)
def forward(self, state, action, log=False):
"""Build a critic (value) network that maps (state, action) pairs -> Q-values."""
x = self.bn0(state)
x = F.relu(self.bn1(self.fc1(x)))
x = torch.cat((x, action), dim=1)
x = F.relu(self.fc2(x))
# Only calculate the type of softmax needed by the foward call, to save
# a modest amount of calculation across 1000s of timesteps.
if log:
return F.log_softmax(self.fc3(x), dim=-1)
else:
return F.softmax(self.fc3(x), dim=-1)
####################################################################################################
# class Actor(nn.Module):
# """Actor (Policy) Model."""
# def __init__(self, state_size, action_size, seed, fc1_units=400, fc2_units=300):
# """Initialize parameters and build model.
# Params
# ======
# state_size (int): Dimension of each state
# action_size (int): Dimension of each action
# seed (int): Random seed
# fc1_units (int): Number of nodes in first hidden layer
# fc2_units (int): Number of nodes in second hidden layer
# """
# # Needed to inherit functionalities from nn.Module
# # super(Actor, self).__init__()
# super().__init__()
# self.seed = torch.manual_seed(seed)
# self.fc1 = nn.Linear(state_size, fc1_units)
# self.fc2 = nn.Linear(fc1_units, fc2_units)
# self.fc3 = nn.Linear(fc2_units, action_size)
# self.reset_parameters()
# def reset_parameters(self):
# self.fc1.weight.data.uniform_(*hidden_init(self.fc1))
# self.fc2.weight.data.uniform_(*hidden_init(self.fc2))
# self.fc3.weight.data.uniform_(-3e-3, 3e-3)
# def forward(self, state):
# """Build an actor (policy) network that maps states -> actions."""
# x = F.relu(self.fc1(state))
# x = F.relu(self.fc2(x))
# return (self.fc3(x)).tanh() # F.tanh is deperecated
# class Critic(nn.Module):
# """Critic (Value) Model."""
# def __init__(self, state_size, action_size, seed, num_atoms, fc1_units=400, fc2_units=300):
# """Initialize parameters and build model.
# Params
# ======
# state_size (int): Dimension of each state
# action_size (int): Dimension of each action
# seed (int): Random seed
# fc1_units (int): Number of nodes in the first hidden layer
# fc2_units (int): Number of nodes in the second hidden layer
# """
# # Needed to inherit functionalities from nn.Module
# # super(Critic, self).__init__()
# super().__init__()
# self.seed = torch.manual_seed(seed)
# self.fc1 = nn.Linear(state_size, fc1_units)
# self.fc2 = nn.Linear(fc1_units+action_size, fc2_units)
# self.fc3 = nn.Linear(fc2_units, num_atoms)
# self.reset_parameters()
# def reset_parameters(self):
# self.fc1.weight.data.uniform_(*hidden_init(self.fc1))
# self.fc2.weight.data.uniform_(*hidden_init(self.fc2))
# self.fc3.weight.data.uniform_(-3e-3, 3e-3)
# def forward(self, state, action, log=False):
# """Build a critic (value) network that maps (state, action) pairs -> Q-values."""
# x = F.relu(self.fc1(state))
# x = torch.cat((x, action), dim=1)
# x = F.relu(self.fc2(x))
# logits = self.fc3(x)
# if log:
# return F.log_softmax(logits, dim=-1)
# else:
# return F.softmax(logits, dim=-1) | 38.721591 | 100 | 0.587087 |
4a2556bb224fa27a6eab98083eac0a94d01937e5 | 443 | py | Python | zip_file_example/append_file/main.py | DazEB2/SimplePyScripts | 1dde0a42ba93fe89609855d6db8af1c63b1ab7cc | [
"CC-BY-4.0"
] | 117 | 2015-12-18T07:18:27.000Z | 2022-03-28T00:25:54.000Z | zip_file_example/append_file/main.py | DazEB2/SimplePyScripts | 1dde0a42ba93fe89609855d6db8af1c63b1ab7cc | [
"CC-BY-4.0"
] | 8 | 2018-10-03T09:38:46.000Z | 2021-12-13T19:51:09.000Z | zip_file_example/append_file/main.py | DazEB2/SimplePyScripts | 1dde0a42ba93fe89609855d6db8af1c63b1ab7cc | [
"CC-BY-4.0"
] | 28 | 2016-08-02T17:43:47.000Z | 2022-03-21T08:31:12.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# Добавляем файл в архив
import zipfile
with zipfile.ZipFile('out.zip', mode='w', compression=zipfile.ZIP_DEFLATED) as f:
f.write('file_1.txt')
f.write('sub_dir/file_1.1.txt', 'file_1.1.txt')
f.write('sub_dir/file_1.1.txt') # out.zip/sub_dir/file_1.1.txt
f.write('sub_dir/file_1.1.txt', 'new_sub_dir/file_1.1.txt') # out.zip/new_sub_dir/file_1.1.txt
| 29.533333 | 99 | 0.683973 |
4a2556cc81a750c8525bf3705cda340e4e4be0fc | 14,131 | py | Python | python/level1_single_api/9_amct/amct_caffe/resnet50/src/ResNet50_sample.py | Ascend/samples | 5e060ddf8c502cf0e248ecbe1c8986e95351cbbd | [
"Apache-2.0"
] | 25 | 2020-11-20T09:01:35.000Z | 2022-03-29T10:35:38.000Z | python/level1_single_api/9_amct/amct_caffe/resnet50/src/ResNet50_sample.py | Ascend/samples | 5e060ddf8c502cf0e248ecbe1c8986e95351cbbd | [
"Apache-2.0"
] | 5 | 2021-02-28T20:49:37.000Z | 2022-03-04T21:50:27.000Z | python/level1_single_api/9_amct/amct_caffe/resnet50/src/ResNet50_sample.py | Ascend/samples | 5e060ddf8c502cf0e248ecbe1c8986e95351cbbd | [
"Apache-2.0"
] | 16 | 2020-12-06T07:26:13.000Z | 2022-03-01T07:51:55.000Z | #!/usr/bin/python3 # pylint: disable=C0103
# -*- coding: UTF-8 -*-
"""
Copyright (C) 2019. Huawei Technologies Co., Ltd. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the Apache License Version 2.0.You may not use
this file except in compliance with the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
Apache License for more details at
http://www.apache.org/licenses/LICENSE-2.0
AMCT_CAFFE sample of resnet-50 model
"""
import os
import sys
import argparse
from pathlib import Path
import numpy as np
import cv2 # pylint: disable=E0401
MODEL_INPUT_BLOB_NAME = 'data'
MODEL_OUTPUT_BLOB_NAME = 'prob'
PATH = os.path.split(os.path.realpath(__file__))[0]
PATH = os.path.realpath(os.path.join(PATH, '..'))
TMP = os.path.join(PATH, 'tmp')
RESULT = os.path.join(PATH, 'results/calibration_results')
DATA_DIR = os.path.join(PATH, 'data/images')
LABEL_FILE = os.path.join(DATA_DIR, 'image_label.txt')
BATCH_SIZE = 32
# Configuration for ImageNet LMDB dataset
SCALE = 1
CROP_SIZE = 224
MEAN_FILE = None
MEAN_VALUE = [103.894, 116.555, 122.578]
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser(description='ResNet50 demo')
parser.add_argument('--cpu',
dest='cpu_mode',
help='Use CPU mode.',
action='store_true')
parser.add_argument('--gpu',
dest='gpu_id',
help='GPU device id to use.',
default=None,
type=int)
parser.add_argument('--model_file',
dest='model_file',
help='Specify the model file of caffe model.',
default=None,
type=str)
parser.add_argument('--weights_file',
dest='weights_file',
help='Specify the weights file of caffe model.',
default=None,
type=str)
parser.add_argument('--caffe_dir',
dest='caffe_dir',
help='Specify the dir of caffe.',
default=None,
type=str)
parser.add_argument('--pre_test',
dest='pre_test',
help='Do test with amct caffe calibration or not.',
action='store_true')
parser.add_argument('--cfg_define',
dest='cfg_define',
help='The simple configure define file.',
default=None,
type=str)
parser.add_argument('--benchmark',
dest='benchmark',
help='Do benchmark test.',
action='store_true')
parser.add_argument('--iterations',
dest='iterations',
help='Specify iterations of test.',
default=5,
type=int)
parser.add_argument('--dataset',
dest='dataset',
help='The path of benchmark dataset.',
default=None,
type=str)
return parser.parse_args()
def args_check_caffe_dir(args):
"""check args of caffe dir
"""
if args.caffe_dir is None:
raise RuntimeError('Must specify a caffe framework dir')
caffe_dir = os.path.realpath(args.caffe_dir)
if not Path(caffe_dir).exists():
raise RuntimeError('Must specify a caffe framework dir')
caffe_exec_bin = os.path.join(caffe_dir, 'build/tools/caffe')
if not Path(caffe_exec_bin).exists():
raise RuntimeError('Must make caffe before execute demo')
pycaffe_file = os.path.join(caffe_dir, 'python/caffe/pycaffe.py')
if not Path(pycaffe_file).exists():
raise RuntimeError('Must make pycaffe before execute demo')
def args_check(args):
"""Check resnet-50 sample args"""
# --weights_file
if args.weights_file is None:
raise RuntimeError('Must specify a caffe caffemodel file!')
resnet_50_weights_file = os.path.realpath(args.weights_file)
if not Path(resnet_50_weights_file).exists():
raise RuntimeError('Must specify a caffe caffemodel file!')
args.weights_file = resnet_50_weights_file
# --model_file
if args.model_file is None:
raise RuntimeError('Must specify a caffe deploy prototxt file!')
resnet_50_model_file = os.path.realpath(args.model_file)
if not Path(resnet_50_model_file).exists():
raise RuntimeError('Must specify a caffe deploy prototxt file!')
args.model_file = resnet_50_model_file
# --iterations
if not args.benchmark and args.iterations > 5:
raise RuntimeError('Max iterations of sample dataset is 5')
if args.cpu_mode and args.gpu_id is not None:
raise RuntimeError(
'Cannot run in CPU mode and GPU mode at same time.')
# --dataset
if args.benchmark:
if args.dataset is None:
raise RuntimeError(
'Must specify a dataset path using in benchmark.')
args.dataset = os.path.realpath(args.dataset)
if not os.access(args.dataset, os.F_OK):
raise RuntimeError('Must specify a validdataset path.')
def add_path(path):
"""Add path to env"""
if path not in sys.path:
sys.path.insert(0, path)
QUANT_ARGS = parse_args()
args_check_caffe_dir(QUANT_ARGS)
add_path(os.path.join(QUANT_ARGS.caffe_dir, 'python'))
import caffe # pylint: disable=E0401, C0413
import amct_caffe as amct # pylint: disable=E0401, C0413
if QUANT_ARGS.benchmark:
import datasets
def mkdir(name):
"""make dir"""
if not os.access(name, os.F_OK):
os.makedirs(name)
def get_blobs_from_im(data_dir, imgs, batch_size):
"""Read image files to blobs [3, 256, 256]"""
if batch_size != len(imgs):
raise RuntimeError(
'batch_size:{} != len(imgs):{}'.format(batch_size, len(imgs)))
blobs_data = np.zeros((batch_size, 3, 256, 256), np.uint8)
for index in range(batch_size):
im_file = os.path.join(data_dir, imgs[index])
im_data = cv2.imread(im_file)
im_data = cv2.resize(
im_data, (256, 256), interpolation=cv2.INTER_CUBIC)
im_data = im_data.swapaxes(0, 2)
im_data = im_data.swapaxes(1, 2)
blobs_data[index, :, :, :] = im_data
return blobs_data
def img_preprocess(blobs_data, mean_value, crop_size):
"""Do image data pre-process"""
# crop image[height, width] to [crop_size, crop_size]
height = blobs_data.shape[2]
width = blobs_data.shape[3]
h_off = int((height - crop_size) / 2)
w_off = int((width - crop_size) / 2)
crop_data = blobs_data[:, :, h_off:(h_off + crop_size), \
w_off:(w_off + crop_size)]
# trans uint8 image data to float
crop_data = crop_data.astype(np.float32, copy=False)
# do channel-wise reduce mean value
for channel in range(crop_data.shape[1]):
crop_data[:, channel, :, :] -= mean_value[channel]
return crop_data
def img_postprocess(probs, labels):
"""Do image post-process"""
# calculate top1 and top5 accuracy
top1_get = 0
top5_get = 0
prob_size = probs.shape[1]
for index, label in enumerate(labels):
top5_record = (probs[index, :].argsort())[prob_size - 5:prob_size]
if label == top5_record[-1]:
top1_get += 1
top5_get += 1
elif label in top5_record:
top5_get += 1
return float(top1_get) / len(labels), float(top5_get) / len(labels)
def get_labels_from_txt():
"""Read all images' name and label from label_file"""
images = []
labels = []
with open(LABEL_FILE, 'r') as file_open:
lines = file_open.readlines()
for line in lines:
images.append(line.split(' ')[0])
labels.append(int(line.split(' ')[1]))
return images, labels
def run_caffe_model(model_file, weights_file, iterations):
"""run caffe model forward"""
net = caffe.Net(model_file, weights_file, caffe.TEST)
top1_total = 0
top5_total = 0
images, labels = get_labels_from_txt()
for iter_num in range(iterations):
blobs_data = get_blobs_from_im(
DATA_DIR,
images[iter_num * BATCH_SIZE:(iter_num + 1) * BATCH_SIZE],
BATCH_SIZE)
blobs_data = img_preprocess(blobs_data, [104, 117, 123], 224)
forward_kwargs = {MODEL_INPUT_BLOB_NAME: blobs_data}
blobs_out = net.forward(**forward_kwargs)
top1, top5 = img_postprocess(
blobs_out[MODEL_OUTPUT_BLOB_NAME],
labels[iter_num * BATCH_SIZE:(iter_num + 1) * BATCH_SIZE])
top1_total += top1
top5_total += top5
print('****************iteration:{}*****************'.format(iter_num))
print('top1_acc:{}'.format(top1))
print('top5_acc:{}'.format(top5))
print('******final top1:{}'.format(top1_total / iterations))
print('******final top5:{}'.format(top5_total / iterations))
def do_benchmark_test(args, model_file, weights_file, iterations=1000):
""" Calc the accuracy on the lmdb dataset"""
net = caffe.Net(model_file, weights_file, caffe.TEST)
top1_total = 0
top5_total = 0
lmdb_data = datasets.LMDBData(args.dataset)
lmdb_data.set_scale(SCALE)
lmdb_data.set_crop_size(CROP_SIZE)
if MEAN_FILE is not None:
lmdb_data.set_mean_file(MEAN_FILE)
else:
lmdb_data.set_mean_value(MEAN_VALUE)
for index in range(iterations):
data, labels = lmdb_data.get_blobs(BATCH_SIZE)
forward_kwargs = {MODEL_INPUT_BLOB_NAME: data}
blobs_out = net.forward(**forward_kwargs)
top1, top5 = img_postprocess(blobs_out[MODEL_OUTPUT_BLOB_NAME], labels)
top1_total += top1
top5_total += top5
print('*****************iteration:{}******************'.format(index))
print('top1_acc:{}'.format(top1))
print('top5_acc:{}'.format(top5))
print('******final top1:{}'.format(top1_total / iterations))
print('******final top5:{}'.format(top5_total / iterations))
return top1_total / iterations, top5_total / iterations
def main(args):
"""Main function"""
args_check(args)
mkdir(TMP)
mkdir(RESULT)
# set_cpu_mode or set_gpu_mode decides whether using
# CPU/GPU to do weights calibration, but activation calibration is
# controled by caffe APIs: caffe.set_mode_cpu() or set_mode_gpu().
# Need to set amct mode before the whole calibration process,
# default using CPU mode to do weights calibration.
# amct.set_gpu_mode() does not set which GPU card to use. Users can
# set GPU card in two ways:
# 1) use pycaffe API set_device(gpu_id)
# 2) use environment variable CUDA_VISIBLE_DEVICES
if args.gpu_id is not None and not args.cpu_mode:
caffe.set_mode_gpu()
caffe.set_device(args.gpu_id)
amct.set_gpu_mode()
else:
caffe.set_mode_cpu()
# Run pre model test
if args.pre_test:
if not args.benchmark:
run_caffe_model(
args.model_file, args.weights_file, args.iterations)
else:
do_benchmark_test(
args, args.model_file, args.weights_file, args.iterations)
print('[AMCT][INFO]Run ResNet-50 without quantize success!')
return
# Quantize configurations
config_json_file = os.path.join(TMP, 'config.json')
skip_layers = []
batch_num = 2
if args.cfg_define is not None:
# do weights calibration with non uniform quantize configure
amct.create_quant_config(config_json_file,
args.model_file,
args.weights_file,
skip_layers,
batch_num,
config_defination=args.cfg_define)
else:
amct.create_quant_config(config_json_file,
args.model_file,
args.weights_file,
skip_layers,
batch_num)
# Phase0: Init amct task
scale_offset_record_file = os.path.join(TMP, 'scale_offset_record.txt')
graph = amct.init(config_json_file,
args.model_file,
args.weights_file,
scale_offset_record_file)
# Phase1: do conv+bn+scale fusion, weights calibration and fake
# quant, insert quant and dequant layer
modified_model_file = os.path.join(TMP, 'modified_model.prototxt')
modified_weights_file = os.path.join(TMP, 'modified_model.caffemodel')
amct.quantize_model(graph, modified_model_file, modified_weights_file)
# Phase2: run caffe model to do activation calibration
if not args.benchmark:
run_caffe_model(
modified_model_file, modified_weights_file, batch_num)
else:
do_benchmark_test(
args, modified_model_file, modified_weights_file, batch_num)
# Phase3: save final model, one for caffe do fake quant test, one
# deploy model for GE
result_path = os.path.join(RESULT, 'ResNet50')
amct.save_model(graph, 'Both', result_path)
# Phase4: do final fake quant model test
fake_quant_model = os.path.join(
RESULT, 'ResNet50_fake_quant_model.prototxt')
fake_quant_weights = os.path.join(
RESULT, 'ResNet50_fake_quant_weights.caffemodel')
if not args.benchmark:
run_caffe_model(
fake_quant_model, fake_quant_weights, args.iterations)
else:
do_benchmark_test(
args, fake_quant_model, fake_quant_weights, args.iterations)
print('[AMCT][INFO]Run ResNet-50 with quantize success!')
if __name__ == '__main__':
main(QUANT_ARGS)
| 36.895561 | 79 | 0.617649 |
4a2557c30533d4931acb4d23c838e7864d13fce2 | 1,447 | py | Python | bento_map/loader.py | bento-dbaas/bento2globomap | 257d914fad1bfa364b4910f797527b287176b6db | [
"BSD-3-Clause"
] | 1 | 2018-01-18T17:31:36.000Z | 2018-01-18T17:31:36.000Z | bento_map/loader.py | bento-dbaas/bento2globomap | 257d914fad1bfa364b4910f797527b287176b6db | [
"BSD-3-Clause"
] | null | null | null | bento_map/loader.py | bento-dbaas/bento2globomap | 257d914fad1bfa364b4910f797527b287176b6db | [
"BSD-3-Clause"
] | null | null | null | from logging import info
from requests import post
from globomap_loader_api_client.auth import Auth
from globomap_loader_api_client.update import Update
from bento_map.settings import MAP_ENDPOINT, MAP_USERNAME, MAP_PASSWORD, \
BOT_ENDPOINT, DATABASE_PROVIDER
class Loader(object):
def __init__(self):
self.auth = Auth(
api_url=MAP_ENDPOINT,
username=MAP_USERNAME,
password=MAP_PASSWORD
)
self.provider = DATABASE_PROVIDER
def clear_old_data(self, model, before):
content = {
"action": "CLEAR",
"collection": model.collection,
"element": [[{
"field": "timestamp",
"operator": "<",
"value": before
}]],
"type": model.type
}
return self.__execute(content)
def update(self, model):
return self.__execute(model.content)
def __execute(self, content):
try:
info(content)
update = Update(auth=self.auth, driver_name=self.provider)
response = update.post(content)
info(response)
return response
except Exception as e:
self.notify_bot(str(e))
raise e
@staticmethod
def notify_bot(error):
json = {"message": "Error sending content to Map: {}".format(error)}
return post(BOT_ENDPOINT + "/notify", json=json)
| 29.530612 | 76 | 0.588113 |
4a2557d9fded487106240ed3a6ba926199716945 | 1,120 | py | Python | examples/invalid_sklearn_test.py | dkarageo/lovpy | 85f43c07aeed4b318238c35da606de2dc65ca24f | [
"Apache-2.0"
] | null | null | null | examples/invalid_sklearn_test.py | dkarageo/lovpy | 85f43c07aeed4b318238c35da606de2dc65ca24f | [
"Apache-2.0"
] | null | null | null | examples/invalid_sklearn_test.py | dkarageo/lovpy | 85f43c07aeed4b318238c35da606de2dc65ca24f | [
"Apache-2.0"
] | null | null | null | from pandas import read_csv
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.svm import SVC
# Load dataset
url = "https://raw.githubusercontent.com/jbrownlee/Datasets/master/iris.csv"
names = ['sepal-length', 'sepal-width', 'petal-length', 'petal-width', 'class']
dataset = read_csv(url, names=names)
# Split-out validation dataset
array = dataset.values
X = array[:, 0:4]
y = array[:, 4]
# X_train, X_validation, Y_train, Y_validation = train_test_split(X, y, test_size=0.20,
# random_state=1)
# Forgot to split them up.
X_train = X
X_validation = X
Y_train = y
Y_validation = y
# Make predictions on validation dataset
model = SVC(gamma='auto')
model.fit(X_train, Y_train)
predictions = model.predict(X_validation)
# Evaluate predictions
print(accuracy_score(Y_validation, predictions))
print(confusion_matrix(Y_validation, predictions))
print(classification_report(Y_validation, predictions))
| 32 | 87 | 0.742857 |
4a255858298dc2f3b25015d5bfc77c1cfe9fd23d | 4,708 | py | Python | src/robot_properties_bolt/config.py | Danfoa/robot_properties_bolt | ae4b8d5fe932c7bc2deebe6eb0a8f0c99d2c5eac | [
"BSD-3-Clause"
] | null | null | null | src/robot_properties_bolt/config.py | Danfoa/robot_properties_bolt | ae4b8d5fe932c7bc2deebe6eb0a8f0c99d2c5eac | [
"BSD-3-Clause"
] | null | null | null | src/robot_properties_bolt/config.py | Danfoa/robot_properties_bolt | ae4b8d5fe932c7bc2deebe6eb0a8f0c99d2c5eac | [
"BSD-3-Clause"
] | null | null | null | # @namespace robot_properties_bolt.config
""" This module includes configuration for the Bolt.
@file config.py
@copyright Copyright (c) 2020,
New York University and Max Planck Gesellschaft,
License BSD-3-Clause
"""
import numpy as np
from math import pi
from os.path import join, dirname
import pinocchio as se3
from pinocchio.utils import zero
from pinocchio.robot_wrapper import RobotWrapper
from robot_properties_bolt.utils import find_paths
class BoltAbstract(object):
""" Abstract class used for all Bolt robots. """
# PID gains
kp = 5.0
kd = 0.1
ki = 0.0
# The Kt constant of the motor [Nm/A]: tau = I * Kt.
motor_torque_constant = 0.025
# Control time period.
control_period = 0.001
dt = control_period
# MaxCurrent = 12 # Ampers
max_current = 2
# Maximum torques.
max_torque = motor_torque_constant * max_current
# Maximum control one can send, here the control is the current.
max_control = max_current
# ctrl_manager_current_to_control_gain I am not sure what it does so 1.0.
ctrl_manager_current_to_control_gain = 1.0
max_qref = pi
base_link_name = "base_link"
end_effector_names = ["FL_ANKLE", "FR_ANKLE"]
@classmethod
def buildRobotWrapper(cls):
# Rebuild the robot wrapper instead of using the existing model to
# also load the visuals.
robot = RobotWrapper.BuildFromURDF(
cls.urdf_path, cls.meshes_path, se3.JointModelFreeFlyer()
)
robot.model.rotorInertia[6:] = cls.motor_inertia
robot.model.rotorGearRatio[6:] = cls.motor_gear_ration
return robot
@classmethod
def buildSimuRobotWrapper(cls):
# Rebuild the robot wrapper instead of using the existing model to
# also load the visuals.
robot = RobotWrapper.BuildFromURDF(
cls.simu_urdf_path, cls.meshes_path, se3.JointModelFreeFlyer()
)
robot.model.rotorInertia[6:] = cls.motor_inertia
robot.model.rotorGearRatio[6:] = cls.motor_gear_ration
return robot
def joint_name_in_single_string(self):
joint_names = ""
for name in self.robot_model.names[2:]:
joint_names += name + " "
return joint_names
class BoltConfig(BoltAbstract):
robot_family = "bolt"
robot_name = "bolt"
# Here we use the same urdf as for the quadruped but without the freeflyer.
paths = find_paths(robot_name)
meshes_path = paths["package"]
dgm_yaml_path = paths["dgm_yaml"]
simu_urdf_path = paths["simu_urdf"]
urdf_path = paths["urdf"]
ctrl_path = paths["imp_ctrl_yaml"]
# The inertia of a single blmc_motor.
motor_inertia = 0.0000045
# The motor gear ratio.
motor_gear_ration = 9.0
# pinocchio model.
robot_model = se3.buildModelFromUrdf(urdf_path, se3.JointModelFreeFlyer())
robot_model.rotorInertia[6:] = motor_inertia
robot_model.rotorGearRatio[6:] = motor_gear_ration
mass = np.sum([i.mass for i in robot_model.inertias])
base_name = robot_model.frames[2].name
# The number of motors, here they are the same as there are only revolute
# joints.
nb_joints = robot_model.nv - 6
# pinocchio model.
pin_robot_wrapper = RobotWrapper.BuildFromURDF(
urdf_path, meshes_path, se3.JointModelFreeFlyer()
)
# End effectors informations
robot_model = pin_robot_wrapper.model
end_eff_ids = []
for leg in ["FL", "FR"]:
end_eff_ids.append(robot_model.getFrameId(leg + "_ANKLE"))
nb_ee = len(end_eff_ids)
joint_names = ["FL_HAA", "FL_HFE", "FL_KFE", "FR_HAA", "FR_HFE", "FR_KFE"]
# Mapping between the ctrl vector in the device and the urdf indexes.
urdf_to_dgm = tuple(range(6))
map_joint_name_to_id = {}
map_joint_limits = {}
for i, (name, lb, ub) in enumerate(
zip(
robot_model.names[1:],
robot_model.lowerPositionLimit,
robot_model.upperPositionLimit,
)
):
map_joint_name_to_id[name] = i
map_joint_limits[i] = [float(lb), float(ub)]
# Define the initial state.
initial_configuration = np.array(
[
0.0,
0.0,
0.35487417,
0.0,
0.0,
0.0,
1.0,
-0.3,
0.78539816,
-1.57079633,
0.3,
0.78539816,
-1.57079633,
]
)
initial_velocity = (6 + 6) * [
0,
]
q0 = np.zeros(robot_model.nq)
q0[:] = initial_configuration
v0 = np.zeros(robot_model.nv)
v0[:] = initial_velocity
a0 = np.zeros(robot_model.nv)
| 28.191617 | 79 | 0.638488 |
4a2558b34e234290f1ef3f47d79218ccccd6a834 | 1,772 | py | Python | 03/queue_to_do/solution.py | kevinah95/foobar | 0118a083291e55eff7a3587b26f2d9d26b00a91d | [
"MIT"
] | null | null | null | 03/queue_to_do/solution.py | kevinah95/foobar | 0118a083291e55eff7a3587b26f2d9d26b00a91d | [
"MIT"
] | null | null | null | 03/queue_to_do/solution.py | kevinah95/foobar | 0118a083291e55eff7a3587b26f2d9d26b00a91d | [
"MIT"
] | null | null | null | def result():
from functools import reduce
product = reduce((lambda x, y: x ^ y), [0, 1, 2, 3, 4])
# product = 2
#print product ^ 6
def answer2(start,length):
first_column_array = [start]
for i in range(length - 1):
previous = first_column_array[-1]
current = previous + length
first_column_array.append(current)
#print first_column_array
diagonal = []
for i in range(1, length):
column_value = first_column_array[i]
value = column_value - (i)
diagonal.append(value)
#for i in range(length):
result = 0
for x, y in map(None, first_column_array[:-1], diagonal):
result = result^reduce((lambda x, y: x ^ y), range(x,y+1))
result = result^first_column_array[-1]
return result
def contador(start,max):
n=start
while n < max:
yield n
n=n+1
def answer(start, length):
acum = 0
first_column_value = start
for i in range(length):
next_column_value = first_column_value + length
last_column_value = next_column_value - (i+1)
#print (i,first_column_value, last_column_value)
contad = contador(first_column_value,last_column_value+1)
for i in contad:
acum = acum ^ i
#acum = acum ^ reduce((lambda x, y: x ^ y), xrange(first_column_value,last_column_value+1))
first_column_value = next_column_value
return acum
def f(a):
res = [a, 1, a+1, 0]
print res[a%4]
return res[a%4]
def getXor(a, b):
return f(b) ^ f(a-1)
def gen_nums(start, length):
l = length
ans = 0
while l > 0:
ans^= getXor(start,start+l-1)
start = start + length
l = l - 1
return ans
print gen_nums(17,4) | 26.058824 | 99 | 0.594808 |
4a2558f9c81868177345559624088c22fc3346a0 | 65 | py | Python | 13. Modules - Lab/fibonacci_sequence/__init__.py | elenaborisova/Python-Advanced | 4c266d81f294372c3599741e8ba53f59fdc834c5 | [
"MIT"
] | 2 | 2021-04-04T06:26:13.000Z | 2022-02-18T22:21:49.000Z | 13. Modules - Lab/fibonacci_sequence/__init__.py | elenaborisova/Python-Advanced | 4c266d81f294372c3599741e8ba53f59fdc834c5 | [
"MIT"
] | null | null | null | 13. Modules - Lab/fibonacci_sequence/__init__.py | elenaborisova/Python-Advanced | 4c266d81f294372c3599741e8ba53f59fdc834c5 | [
"MIT"
] | 3 | 2021-02-01T12:32:03.000Z | 2021-04-12T13:45:20.000Z | from tribonacci_sequence.sequence import create_sequence, locate
| 32.5 | 64 | 0.892308 |
4a25599927c549592b4a62a60373912ce7d7541a | 4,385 | py | Python | examples/checkpoints.py | nirs/python-ovirt-engine-sdk4 | 454e39d8d9e06511f8488a83a1b5649823cc22c6 | [
"Apache-2.0"
] | 3 | 2022-01-14T00:37:58.000Z | 2022-03-26T12:26:32.000Z | examples/checkpoints.py | nirs/python-ovirt-engine-sdk4 | 454e39d8d9e06511f8488a83a1b5649823cc22c6 | [
"Apache-2.0"
] | 29 | 2021-07-20T12:42:44.000Z | 2022-03-28T13:01:33.000Z | examples/checkpoints.py | nirs/python-ovirt-engine-sdk4 | 454e39d8d9e06511f8488a83a1b5649823cc22c6 | [
"Apache-2.0"
] | 12 | 2021-07-20T12:27:07.000Z | 2022-02-24T11:10:12.000Z | #!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This example shows how to manage VM checkpoints.
To remove a single checkpoint, use:
./checkpoints.py -c myengine remove vm-id checkpoint-id
To remove checkpoints older than 7 days:
./checkpoints.py -c myengine purge vm-id
To remove checkpoints older than 1 day:
./checkpoints.py -c myengine purge --days 1 vm-id
"""
import time
import datetime
from contextlib import closing
import ovirtsdk4 as sdk
from helpers import common
from helpers.common import progress
def main():
parser = common.ArgumentParser(description="Manage checkpoints")
subparsers = parser.add_subparsers(title="commands")
remove = subparsers.add_parser(
"remove",
help="Remove a VM checkpoint.")
remove.set_defaults(command=cmd_remove)
remove.add_argument(
"vm_uuid",
help="VM UUID for removing checkpoint.")
remove.add_argument(
"checkpoint_uuid",
help="The removed checkpoint UUID.")
purge = subparsers.add_parser(
"purge",
help="Remove old VM checkpoint.")
purge.set_defaults(command=cmd_purge)
purge.add_argument(
"--days",
type=int,
default=7,
help="Remove checkpoints older than specified days. If not "
"specified, remove checkpoints older than 7 days.")
purge.add_argument(
"vm_uuid",
help="VM UUID for removing checkpoint.")
args = parser.parse_args()
common.configure_logging(args)
args.command(args)
def cmd_remove(args):
progress(f"Removing VM {args.vm_uuid} checkpoint {args.checkpoint_uuid}")
# Create a connection to the server
connection = common.create_connection(args)
with closing(connection):
system_service = connection.system_service()
vm_service = system_service.vms_service().vm_service(id=args.vm_uuid)
checkpoints_service = vm_service.checkpoints_service()
checkpoint_service = checkpoints_service.checkpoint_service(id=args.checkpoint_uuid)
# Validate that the VM has the requested checkpoint
try:
checkpoint_service.get()
except sdk.NotFoundError:
raise RuntimeError(f"VM {args.vm_uuid} has no checkpoint {args.checkpoint_uuid}")
remove_checkpoint(checkpoint_service)
progress(f"Checkpoint {args.checkpoint_uuid} removed successfully")
def cmd_purge(args):
progress(f"Removing VM {args.vm_uuid} checkpoints older than {args.days} days")
# Create a connection to the server
connection = common.create_connection(args)
with closing(connection):
system_service = connection.system_service()
vm_service = system_service.vms_service().vm_service(id=args.vm_uuid)
checkpoints_service = vm_service.checkpoints_service()
now = datetime.datetime.now(datetime.timezone.utc)
for checkpoint in checkpoints_service.list():
checkpoint_age = now - checkpoint.creation_date
if checkpoint_age.days > args.days:
progress(f"Removing checkpoint {checkpoint.id}, created {checkpoint_age.days} days ago")
checkpoint_service = checkpoints_service.checkpoint_service(checkpoint.id)
remove_checkpoint(checkpoint_service)
progress(f"Checkpoint {checkpoint.id} removed successfully")
def remove_checkpoint(checkpoint_service, timeout=60):
checkpoint_service.remove()
dedaline = time.monotonic() + timeout
while True:
try:
checkpoint_service.get()
except sdk.NotFoundError:
break
if time.monotonic() > deadline:
raise RuntimeError("Timeout waiting for checkpoint removal")
time.sleep(1)
if __name__ == "__main__":
main()
| 31.099291 | 104 | 0.693501 |
4a255adb0d22a5c6c111b79160de36c13bc0c557 | 671 | py | Python | planar_ising/sparse_lu/csr_matrix.py | ValeryTyumen/planar_ising | 5a1803487e1dd59c5d5e790cc949b7234bf52ac8 | [
"MIT"
] | 8 | 2019-05-02T20:27:21.000Z | 2020-11-01T20:41:38.000Z | planar_ising/sparse_lu/csr_matrix.py | ValeryTyumen/planar_ising | 5a1803487e1dd59c5d5e790cc949b7234bf52ac8 | [
"MIT"
] | 1 | 2019-09-03T18:15:53.000Z | 2019-09-06T16:41:12.000Z | planar_ising/sparse_lu/csr_matrix.py | ValeryTyumen/planar_ising | 5a1803487e1dd59c5d5e790cc949b7234bf52ac8 | [
"MIT"
] | 3 | 2019-08-11T23:08:58.000Z | 2022-03-19T09:09:50.000Z | class CSRMatrix:
def __init__(self, signs, logs, column_indices, row_first_element_indices):
self._signs = signs
self._logs = logs
self._column_indices = column_indices
self._row_first_element_indices = row_first_element_indices
@property
def signs(self):
return self._signs
@property
def logs(self):
return self._logs
@property
def column_indices(self):
return self._column_indices
@property
def row_first_element_indices(self):
return self._row_first_element_indices
@property
def size(self):
return len(self._row_first_element_indices) - 1
| 19.735294 | 79 | 0.673621 |
4a255b53b68dc43e854006c2893af20cc38ed5d8 | 860 | py | Python | Exercises/Exercise06/Python/helper.py | Lucksong/Exercises-Solutions | 614c3ad9945d83de21ef2bfdd38e0c67cedc4c8b | [
"CC-BY-3.0"
] | 436 | 2015-01-09T23:29:34.000Z | 2022-03-31T11:25:38.000Z | Exercises/Exercise06/Python/helper.py | Lucksong/Exercises-Solutions | 614c3ad9945d83de21ef2bfdd38e0c67cedc4c8b | [
"CC-BY-3.0"
] | 13 | 2015-04-12T12:54:32.000Z | 2020-03-07T06:52:09.000Z | Exercises/Exercise06/Python/helper.py | Lucksong/Exercises-Solutions | 614c3ad9945d83de21ef2bfdd38e0c67cedc4c8b | [
"CC-BY-3.0"
] | 195 | 2015-01-14T05:08:28.000Z | 2022-02-21T21:01:55.000Z |
from definitions import *
import numpy
# Function to compute the matrix product (sequential algorithm, dot prod)
def seq_mat_mul_sdot(N, A, B, C):
for i in range(N):
for j in range(N):
tmp = 0.0
for k in range(N):
tmp += A[i*N+k] * B[k*N+j]
C[i*N+j] = tmp
# Function to compute errors of the product matrix
def error(N, C):
cval = float(N) * AVAL * BVAL
errsq = 0.0
for i in range(N):
for j in range(N):
err = C[i*N+j] - cval
errsq += err * err
return errsq;
# Function to analyze and output results
def results(N, C, run_time):
mflops = 2.0 * N * N * N/(1000000.0* run_time)
print run_time, "seconds at", mflops, "MFLOPS"
errsq = error(N, C)
if numpy.isnan(errsq) or errsq > TOL:
print "Errors in multiplication:", errsq
| 26.875 | 74 | 0.57093 |
4a255cc7d2c20ccb4328e3ce001190f714c2558a | 1,358 | py | Python | pyntcloud/utils/mesh.py | bernssolg/pyntcloud-master | 84cf000b7a7f69a2c1b36f9624f05f65160bf992 | [
"MIT"
] | 1,142 | 2016-10-10T08:55:30.000Z | 2022-03-30T04:46:16.000Z | pyntcloud/utils/mesh.py | bernssolg/pyntcloud-master | 84cf000b7a7f69a2c1b36f9624f05f65160bf992 | [
"MIT"
] | 195 | 2016-10-10T08:30:37.000Z | 2022-02-17T12:51:17.000Z | pyntcloud/utils/mesh.py | bernssolg/pyntcloud-master | 84cf000b7a7f69a2c1b36f9624f05f65160bf992 | [
"MIT"
] | 215 | 2017-02-28T00:50:29.000Z | 2022-03-22T17:01:31.000Z | import numpy as np
import pandas as pd
def quadrilateral_to_triangular(mesh):
new_mesh = pd.DataFrame()
quadrilateral_vertex = mesh[["v1", "v2", "v3", "v4"]].values
triangular_vertex = np.vstack(
(quadrilateral_vertex[:, [0, 1, 2]],
quadrilateral_vertex[:, [2, 3, 0]]))
new_mesh["v1"] = triangular_vertex[:, 0]
new_mesh["v2"] = triangular_vertex[:, 1]
new_mesh["v3"] = triangular_vertex[:, 2]
if "vn1" in mesh.columns:
quadrilateral_vertex_normals = mesh[["vn1", "vn2", "vn3", "vn4"]].values
triangular_vertex_normals = np.vstack(
(quadrilateral_vertex_normals[:, [0, 1, 2]],
quadrilateral_vertex_normals[:, [2, 3, 0]]))
new_mesh["vn1"] = triangular_vertex_normals[:, 0]
new_mesh["vn2"] = triangular_vertex_normals[:, 1]
new_mesh["vn3"] = triangular_vertex_normals[:, 2]
if "vt1" in mesh.columns:
quadrilateral_vertex_texture = mesh[["vt1", "vt2", "vt3", "vt4"]].values
triangular_vertex_texture = np.vstack(
(quadrilateral_vertex_texture[:, [0, 1, 2]],
quadrilateral_vertex_texture[:, [2, 3, 0]]))
new_mesh["vt1"] = triangular_vertex_texture[:, 0]
new_mesh["vt2"] = triangular_vertex_texture[:, 1]
new_mesh["vt3"] = triangular_vertex_texture[:, 2]
return new_mesh
| 34.820513 | 80 | 0.61782 |
4a255d8fe2e9c513b058d7721aed650507f7eb8e | 1,987 | py | Python | cluster/silhouette.py | cjku97/project5 | 256d9dedd37bab90eedb9f07e2bfe78669d1c4e1 | [
"MIT"
] | null | null | null | cluster/silhouette.py | cjku97/project5 | 256d9dedd37bab90eedb9f07e2bfe78669d1c4e1 | [
"MIT"
] | null | null | null | cluster/silhouette.py | cjku97/project5 | 256d9dedd37bab90eedb9f07e2bfe78669d1c4e1 | [
"MIT"
] | null | null | null | import numpy as np
from scipy.spatial.distance import cdist
class Silhouette:
def __init__(self, metric: str = "euclidean"):
"""
inputs:
metric: str
the name of the distance metric to use
"""
self.metric = metric
def score(self, X: np.ndarray, y: np.ndarray) -> np.ndarray:
"""
calculates the silhouette score for each of the observations
inputs:
X: np.ndarray
A 2D matrix where the rows are observations and columns are features.
y: np.ndarray
a 1D array representing the cluster labels for each of the observations in `X`
outputs:
np.ndarray
a 1D array with the silhouette scores for each of the observations in `X`
"""
# get matrix parameters
n_obs = len(X)
n_feats = len(X[0])
n_clusts = len(np.unique(y))
# initialize scores array
scores = np.zeros(n_obs)
# get datapoint distances
dists = cdist(X, X, metric = self.metric)
# get silhouette scores for each observation
for i in range(0, n_obs):
size_c_i = sum(y == y[i])
if size_c_i == 1:
scores[i] = 0
else:
# find intra-cluster distances
# i.e. the mean distance between i and all other data points
# in the same cluster
a_i = np.sum(dists[y == y[i]][:,i])/(size_c_i - 1)
# find inter-cluster distances
# i.e. the smallest mean distance of i to all points in any other cluster,
# of which i is not a member
b_i = np.inf
for k in range(1, n_clusts+1):
if k == y[i]:
continue
else:
b_k = np.mean(dists[y == k][:,i])
b_i = min(b_k, b_i)
scores[i] = (b_i - a_i)/max(a_i,b_i)
return(scores)
| 29.656716 | 94 | 0.520886 |
4a255e282e038c28df9174e2112785905c98a8ee | 75 | py | Python | Strings/F-strings/f-strings.py | fsierra2000/Python | 19c3f52797b366019794ea75508d7317ef492ed5 | [
"MIT"
] | null | null | null | Strings/F-strings/f-strings.py | fsierra2000/Python | 19c3f52797b366019794ea75508d7317ef492ed5 | [
"MIT"
] | null | null | null | Strings/F-strings/f-strings.py | fsierra2000/Python | 19c3f52797b366019794ea75508d7317ef492ed5 | [
"MIT"
] | null | null | null | name = ???
age = ???
print(f"Hello, My name is ??? and I'm ??? years old.") | 25 | 54 | 0.52 |
4a255e644419d7c8bf1c2c2a359e8d7f58db9be4 | 1,668 | py | Python | iteration.py | anishkarki/CODILITYTRAININGPYTHON | e25c629348172cb7f53c02be510fdc1d8056ee3c | [
"Apache-2.0"
] | null | null | null | iteration.py | anishkarki/CODILITYTRAININGPYTHON | e25c629348172cb7f53c02be510fdc1d8056ee3c | [
"Apache-2.0"
] | null | null | null | iteration.py | anishkarki/CODILITYTRAININGPYTHON | e25c629348172cb7f53c02be510fdc1d8056ee3c | [
"Apache-2.0"
] | null | null | null | #Binary Gap
import unittest
#from stackclass import StackDeclare
from collections import deque
MAXINT = 2147483647
class StackDeclare:
def __init__(self):
self.items = deque()
def isEmpty(self):
return self.items == []
def push(self, item):
self.items.append(item)
def pop(self):
if (len(self.items)>0):
return self.items.pop()
else:
return None
def peek(self):
if (len(self.items)>0):
return self.items[len(self.items)-1]
else:
return None
def size(self):
return len(self.items)
def solution(N):
count=0
lastcount=0
countstack = StackDeclare()
while (N>0):
val = N//2
num = N%2
N = val
if num == 1:
if countstack.peek() != 1:
countstack.push(num)
else:
countstack.pop()
if lastcount <= count:
lastcount=count
count = 0
countstack.push(num)
elif num == 0:
if countstack.peek() == 1:
count += 1
else:
pass
return lastcount
print (solution(74901729))
class TestGap(unittest.TestCase):
def test_example(self):
self.assertEqual(5, solution(1041))
def test_example2(self):
self.assertEqual(0,solution(32))
def test_extremes(self):
self.assertEqual(0,solution(1))
self.assertEqual(1,solution(5))
self.assertEqual(0,solution(MAXINT))
if __name__ == '__main__':
unittest.main()
solution(1)
| 21.384615 | 48 | 0.52518 |
4a255eb442a823f591e9b306dfd4d26460324f16 | 13,558 | py | Python | tools/VmaDumpVis/VmaDumpVis.py | Fahien/VulkanMemoryAllocator | 01099675548f32762d4d7b15afcf17dcf358e642 | [
"MIT"
] | null | null | null | tools/VmaDumpVis/VmaDumpVis.py | Fahien/VulkanMemoryAllocator | 01099675548f32762d4d7b15afcf17dcf358e642 | [
"MIT"
] | null | null | null | tools/VmaDumpVis/VmaDumpVis.py | Fahien/VulkanMemoryAllocator | 01099675548f32762d4d7b15afcf17dcf358e642 | [
"MIT"
] | null | null | null | #
# Copyright (c) 2018-2021 Advanced Micro Devices, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import argparse
import json
from PIL import Image, ImageDraw, ImageFont
PROGRAM_VERSION = 'VMA Dump Visualization 2.0.1'
IMG_SIZE_X = 1200
IMG_MARGIN = 8
FONT_SIZE = 10
MAP_SIZE = 24
COLOR_TEXT_H1 = (0, 0, 0, 255)
COLOR_TEXT_H2 = (150, 150, 150, 255)
COLOR_OUTLINE = (155, 155, 155, 255)
COLOR_OUTLINE_HARD = (0, 0, 0, 255)
COLOR_GRID_LINE = (224, 224, 224, 255)
argParser = argparse.ArgumentParser(description='Visualization of Vulkan Memory Allocator JSON dump.')
argParser.add_argument('DumpFile', type=argparse.FileType(mode='r', encoding='UTF-8'), help='Path to source JSON file with memory dump created by Vulkan Memory Allocator library')
argParser.add_argument('-v', '--version', action='version', version=PROGRAM_VERSION)
argParser.add_argument('-o', '--output', required=True, help='Path to destination image file (e.g. PNG)')
args = argParser.parse_args()
data = {}
def ProcessBlock(dstBlockList, iBlockId, objBlock, sAlgorithm):
iBlockSize = int(objBlock['TotalBytes'])
arrSuballocs = objBlock['Suballocations']
dstBlockObj = {'ID': iBlockId, 'Size':iBlockSize, 'Suballocations':[]}
dstBlockObj['Algorithm'] = sAlgorithm
for objSuballoc in arrSuballocs:
dstBlockObj['Suballocations'].append((objSuballoc['Type'], int(objSuballoc['Size']), int(objSuballoc['Usage']) if ('Usage' in objSuballoc) else 0))
dstBlockList.append(dstBlockObj)
def GetDataForMemoryType(iMemTypeIndex):
global data
if iMemTypeIndex in data:
return data[iMemTypeIndex]
else:
newMemTypeData = {'DedicatedAllocations':[], 'DefaultPoolBlocks':[], 'CustomPools':{}}
data[iMemTypeIndex] = newMemTypeData
return newMemTypeData
def IsDataEmpty():
global data
for dictMemType in data.values():
if 'DedicatedAllocations' in dictMemType and len(dictMemType['DedicatedAllocations']) > 0:
return False
if 'DefaultPoolBlocks' in dictMemType and len(dictMemType['DefaultPoolBlocks']) > 0:
return False
if 'CustomPools' in dictMemType:
for lBlockList in dictMemType['CustomPools'].values():
if len(lBlockList) > 0:
return False
return True
# Returns tuple:
# [0] image height : integer
# [1] pixels per byte : float
def CalcParams():
global data
iImgSizeY = IMG_MARGIN
iImgSizeY += FONT_SIZE + IMG_MARGIN # Grid lines legend - sizes
iMaxBlockSize = 0
for dictMemType in data.values():
iImgSizeY += IMG_MARGIN + FONT_SIZE
lDedicatedAllocations = dictMemType['DedicatedAllocations']
iImgSizeY += len(lDedicatedAllocations) * (IMG_MARGIN * 2 + FONT_SIZE + MAP_SIZE)
for tDedicatedAlloc in lDedicatedAllocations:
iMaxBlockSize = max(iMaxBlockSize, tDedicatedAlloc[1])
lDefaultPoolBlocks = dictMemType['DefaultPoolBlocks']
iImgSizeY += len(lDefaultPoolBlocks) * (IMG_MARGIN * 2 + FONT_SIZE + MAP_SIZE)
for objBlock in lDefaultPoolBlocks:
iMaxBlockSize = max(iMaxBlockSize, objBlock['Size'])
dCustomPools = dictMemType['CustomPools']
for lBlocks in dCustomPools.values():
iImgSizeY += len(lBlocks) * (IMG_MARGIN * 2 + FONT_SIZE + MAP_SIZE)
for objBlock in lBlocks:
iMaxBlockSize = max(iMaxBlockSize, objBlock['Size'])
fPixelsPerByte = (IMG_SIZE_X - IMG_MARGIN * 2) / float(iMaxBlockSize)
return iImgSizeY, fPixelsPerByte
def TypeToColor(sType, iUsage):
if sType == 'FREE':
return 220, 220, 220, 255
elif sType == 'BUFFER':
if (iUsage & 0x1C0) != 0: # INDIRECT_BUFFER | VERTEX_BUFFER | INDEX_BUFFER
return 255, 148, 148, 255 # Red
elif (iUsage & 0x28) != 0: # STORAGE_BUFFER | STORAGE_TEXEL_BUFFER
return 255, 187, 121, 255 # Orange
elif (iUsage & 0x14) != 0: # UNIFORM_BUFFER | UNIFORM_TEXEL_BUFFER
return 255, 255, 0, 255 # Yellow
else:
return 255, 255, 165, 255 # Light yellow
elif sType == 'IMAGE_OPTIMAL':
if (iUsage & 0x20) != 0: # DEPTH_STENCIL_ATTACHMENT
return 246, 128, 255, 255 # Pink
elif (iUsage & 0xD0) != 0: # INPUT_ATTACHMENT | TRANSIENT_ATTACHMENT | COLOR_ATTACHMENT
return 179, 179, 255, 255 # Blue
elif (iUsage & 0x4) != 0: # SAMPLED
return 0, 255, 255, 255 # Aqua
else:
return 183, 255, 255, 255 # Light aqua
elif sType == 'IMAGE_LINEAR':
return 0, 255, 0, 255 # Green
elif sType == 'IMAGE_UNKNOWN':
return 0, 255, 164, 255 # Green/aqua
elif sType == 'UNKNOWN':
return 175, 175, 175, 255 # Gray
assert False
return 0, 0, 0, 255
def DrawDedicatedAllocationBlock(draw, y, tDedicatedAlloc):
global fPixelsPerByte
iSizeBytes = tDedicatedAlloc[1]
iSizePixels = int(iSizeBytes * fPixelsPerByte)
draw.rectangle([IMG_MARGIN, y, IMG_MARGIN + iSizePixels, y + MAP_SIZE], fill=TypeToColor(tDedicatedAlloc[0], tDedicatedAlloc[2]), outline=COLOR_OUTLINE)
def DrawBlock(draw, y, objBlock):
global fPixelsPerByte
iSizeBytes = objBlock['Size']
iSizePixels = int(iSizeBytes * fPixelsPerByte)
draw.rectangle([IMG_MARGIN, y, IMG_MARGIN + iSizePixels, y + MAP_SIZE], fill=TypeToColor('FREE', 0), outline=None)
iByte = 0
iX = 0
iLastHardLineX = -1
for tSuballoc in objBlock['Suballocations']:
sType = tSuballoc[0]
iByteEnd = iByte + tSuballoc[1]
iXEnd = int(iByteEnd * fPixelsPerByte)
if sType != 'FREE':
if iXEnd > iX + 1:
iUsage = tSuballoc[2]
draw.rectangle([IMG_MARGIN + iX, y, IMG_MARGIN + iXEnd, y + MAP_SIZE], fill=TypeToColor(sType, iUsage), outline=COLOR_OUTLINE)
# Hard line was been overwritten by rectangle outline: redraw it.
if iLastHardLineX == iX:
draw.line([IMG_MARGIN + iX, y, IMG_MARGIN + iX, y + MAP_SIZE], fill=COLOR_OUTLINE_HARD)
else:
draw.line([IMG_MARGIN + iX, y, IMG_MARGIN + iX, y + MAP_SIZE], fill=COLOR_OUTLINE_HARD)
iLastHardLineX = iX
iByte = iByteEnd
iX = iXEnd
def BytesToStr(iBytes):
if iBytes < 1024:
return "%d B" % iBytes
iBytes /= 1024
if iBytes < 1024:
return "%d KiB" % iBytes
iBytes /= 1024
if iBytes < 1024:
return "%d MiB" % iBytes
iBytes /= 1024
return "%d GiB" % iBytes
jsonSrc = json.load(args.DumpFile)
if 'DedicatedAllocations' in jsonSrc:
for tType in jsonSrc['DedicatedAllocations'].items():
sType = tType[0]
assert sType[:5] == 'Type '
iType = int(sType[5:])
typeData = GetDataForMemoryType(iType)
for objAlloc in tType[1]:
typeData['DedicatedAllocations'].append((objAlloc['Type'], int(objAlloc['Size']), int(objAlloc['Usage']) if ('Usage' in objAlloc) else 0))
if 'DefaultPools' in jsonSrc:
for tType in jsonSrc['DefaultPools'].items():
sType = tType[0]
assert sType[:5] == 'Type '
iType = int(sType[5:])
typeData = GetDataForMemoryType(iType)
for sBlockId, objBlock in tType[1]['Blocks'].items():
ProcessBlock(typeData['DefaultPoolBlocks'], int(sBlockId), objBlock, '')
if 'Pools' in jsonSrc:
objPools = jsonSrc['Pools']
for sPoolId, objPool in objPools.items():
iType = int(objPool['MemoryTypeIndex'])
typeData = GetDataForMemoryType(iType)
objBlocks = objPool['Blocks']
sAlgorithm = objPool.get('Algorithm', '')
sName = objPool.get('Name', None)
if sName:
sFullName = sPoolId + ' "' + sName + '"'
else:
sFullName = sPoolId
dstBlockArray = []
typeData['CustomPools'][sFullName] = dstBlockArray
for sBlockId, objBlock in objBlocks.items():
ProcessBlock(dstBlockArray, int(sBlockId), objBlock, sAlgorithm)
if 'DedicatedAllocations' in objPool:
for tType in objPool['DedicatedAllocations'].items():
sType = tType[0]
assert sType[:5] == 'Type '
iType = int(sType[5:])
typeData = GetDataForMemoryType(iType)
for objAlloc in tType[1]:
typeData['CustomPools'][sFullName].append((objAlloc['Type'], int(objAlloc['Size']), int(objAlloc['Usage']) if ('Usage' in objAlloc) else 0))
if IsDataEmpty():
print("There is nothing to put on the image. Please make sure you generated the stats string with detailed map enabled.")
exit(1)
iImgSizeY, fPixelsPerByte = CalcParams()
img = Image.new('RGB', (IMG_SIZE_X, iImgSizeY), 'white')
draw = ImageDraw.Draw(img)
try:
font = ImageFont.truetype('segoeuib.ttf')
except:
font = ImageFont.load_default()
y = IMG_MARGIN
# Draw grid lines
iBytesBetweenGridLines = 32
while iBytesBetweenGridLines * fPixelsPerByte < 64:
iBytesBetweenGridLines *= 2
iByte = 0
TEXT_MARGIN = 4
while True:
iX = int(iByte * fPixelsPerByte)
if iX > IMG_SIZE_X - 2 * IMG_MARGIN:
break
draw.line([iX + IMG_MARGIN, 0, iX + IMG_MARGIN, iImgSizeY], fill=COLOR_GRID_LINE)
if iByte == 0:
draw.text((iX + IMG_MARGIN + TEXT_MARGIN, y), "0", fill=COLOR_TEXT_H2, font=font)
else:
text = BytesToStr(iByte)
textSize = draw.textsize(text, font=font)
draw.text((iX + IMG_MARGIN - textSize[0] - TEXT_MARGIN, y), text, fill=COLOR_TEXT_H2, font=font)
iByte += iBytesBetweenGridLines
y += FONT_SIZE + IMG_MARGIN
# Draw main content
for iMemTypeIndex in sorted(data.keys()):
dictMemType = data[iMemTypeIndex]
draw.text((IMG_MARGIN, y), "Memory type %d" % iMemTypeIndex, fill=COLOR_TEXT_H1, font=font)
y += FONT_SIZE + IMG_MARGIN
index = 0
for tDedicatedAlloc in dictMemType['DedicatedAllocations']:
draw.text((IMG_MARGIN, y), "Dedicated allocation %d" % index, fill=COLOR_TEXT_H2, font=font)
y += FONT_SIZE + IMG_MARGIN
DrawDedicatedAllocationBlock(draw, y, tDedicatedAlloc)
y += MAP_SIZE + IMG_MARGIN
index += 1
for objBlock in dictMemType['DefaultPoolBlocks']:
draw.text((IMG_MARGIN, y), "Default pool block %d" % objBlock['ID'], fill=COLOR_TEXT_H2, font=font)
y += FONT_SIZE + IMG_MARGIN
DrawBlock(draw, y, objBlock)
y += MAP_SIZE + IMG_MARGIN
index = 0
for sPoolName, listPool in dictMemType['CustomPools'].items():
for objBlock in listPool:
if 'Algorithm' in objBlock and objBlock['Algorithm']:
sAlgorithm = ' (Algorithm: %s)' % (objBlock['Algorithm'])
else:
sAlgorithm = ''
draw.text((IMG_MARGIN, y), "Custom pool %s%s block %d" % (sPoolName, sAlgorithm, objBlock['ID']), fill=COLOR_TEXT_H2, font=font)
y += FONT_SIZE + IMG_MARGIN
DrawBlock(draw, y, objBlock)
y += FONT_SIZE + IMG_MARGIN
DrawDedicatedAllocationBlock(draw, y, objBlock['DedicatedAllocations'])
y += MAP_SIZE + IMG_MARGIN
index += 1
del draw
img.save(args.output)
"""
Main data structure - variable `data` - is a dictionary. Key is integer - memory type index. Value is dictionary of:
- Fixed key 'DedicatedAllocations'. Value is list of tuples, each containing:
- [0]: Type : string
- [1]: Size : integer
- [2]: Usage : integer (0 if unknown)
- Fixed key 'DefaultPoolBlocks'. Value is list of objects, each containing dictionary with:
- Fixed key 'ID'. Value is int.
- Fixed key 'Size'. Value is int.
- Fixed key 'Suballocations'. Value is list of tuples as above.
- Fixed key 'CustomPools'. Value is dictionary.
- Key is string with pool ID/name. Value is list of objects representing memory blocks, each containing dictionary with:
- Fixed key 'ID'. Value is int.
- Fixed key 'Size'. Value is int.
- Fixed key 'Algorithm'. Optional. Value is string.
- Fixed key 'Suballocations'. Value is list of tuples as above.
- Fixed key 'DedicatedAllocations'. Value is list of tuples as above.
"""
| 42.63522 | 180 | 0.639475 |
4a255ff5494941435c75c98333a91538cab19de4 | 1,421 | py | Python | na_common/dates.py | NewAcropolis/na-common | d9a7a5d46f1ec34277e752d39af5fc9fc144300e | [
"MIT"
] | null | null | null | na_common/dates.py | NewAcropolis/na-common | d9a7a5d46f1ec34277e752d39af5fc9fc144300e | [
"MIT"
] | null | null | null | na_common/dates.py | NewAcropolis/na-common | d9a7a5d46f1ec34277e752d39af5fc9fc144300e | [
"MIT"
] | null | null | null | from datetime import datetime
class EventDate:
def __init__(self, event_datetime):
self.event_datetime = datetime.strptime(event_datetime, '%Y-%m-%d %H:%M')
def get_event_date_objs(event_dates):
event_date_objs = []
for e in event_dates:
event_date_objs.append(EventDate(e['event_datetime']))
return event_date_objs
def get_nice_event_dates(event_dates):
if event_dates and type(event_dates[0]) is dict:
event_dates = get_event_date_objs(event_dates)
event_dates.sort(key=lambda k: k.event_datetime)
event_date_str = ''
_event_month = ''
_event_dates = ''
for event_date in event_dates:
m = event_date.event_datetime.strftime("%B")
d = event_date.event_datetime.strftime("%a %-d, ")
if not _event_month:
_event_month = event_date.event_datetime.strftime("%B")
if m == _event_month:
_event_dates += d
elif _event_dates:
event_date_str += _event_dates[:-2] + ' of ' + _event_month + ', '
_event_dates = d
_event_month = m
event_date_str = (event_date_str if len(event_date_str) > 2 else '') + _event_dates[:-2] + ' of ' + _event_month
event_datetime = event_dates[0].event_datetime
event_date_str += ' - ' + event_datetime.strftime(
"%-I:%M %p" if event_datetime.strftime("%M") != '00' else "%-I %p")
return event_date_str
| 31.577778 | 116 | 0.649543 |
4a2560ccbcfd07eb759472abd167e2a4914cbc8e | 328,261 | py | Python | sdk/logic/azure-mgmt-logic/azure/mgmt/logic/models/_models.py | adewaleo/azure-sdk-for-python | 169457edbea5e3c5557246cfcf8bd635d528bae4 | [
"MIT"
] | 1 | 2020-03-05T18:10:35.000Z | 2020-03-05T18:10:35.000Z | sdk/logic/azure-mgmt-logic/azure/mgmt/logic/models/_models.py | adewaleo/azure-sdk-for-python | 169457edbea5e3c5557246cfcf8bd635d528bae4 | [
"MIT"
] | 2 | 2020-03-03T23:11:13.000Z | 2020-03-30T18:50:55.000Z | sdk/logic/azure-mgmt-logic/azure/mgmt/logic/models/_models.py | adewaleo/azure-sdk-for-python | 169457edbea5e3c5557246cfcf8bd635d528bae4 | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class AgreementContent(msrest.serialization.Model):
"""The integration account agreement content.
:param a_s2: The AS2 agreement content.
:type a_s2: ~azure.mgmt.logic.models.AS2AgreementContent
:param x12: The X12 agreement content.
:type x12: ~azure.mgmt.logic.models.X12AgreementContent
:param edifact: The EDIFACT agreement content.
:type edifact: ~azure.mgmt.logic.models.EdifactAgreementContent
"""
_attribute_map = {
'a_s2': {'key': 'aS2', 'type': 'AS2AgreementContent'},
'x12': {'key': 'x12', 'type': 'X12AgreementContent'},
'edifact': {'key': 'edifact', 'type': 'EdifactAgreementContent'},
}
def __init__(
self,
**kwargs
):
super(AgreementContent, self).__init__(**kwargs)
self.a_s2 = kwargs.get('a_s2', None)
self.x12 = kwargs.get('x12', None)
self.edifact = kwargs.get('edifact', None)
class ApiDeploymentParameterMetadata(msrest.serialization.Model):
"""The API deployment parameter metadata.
:param type: The type.
:type type: str
:param is_required: Indicates whether its required.
:type is_required: bool
:param display_name: The display name.
:type display_name: str
:param description: The description.
:type description: str
:param visibility: The visibility. Possible values include: "NotSpecified", "Default",
"Internal".
:type visibility: str or ~azure.mgmt.logic.models.ApiDeploymentParameterVisibility
"""
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'is_required': {'key': 'isRequired', 'type': 'bool'},
'display_name': {'key': 'displayName', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'visibility': {'key': 'visibility', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ApiDeploymentParameterMetadata, self).__init__(**kwargs)
self.type = kwargs.get('type', None)
self.is_required = kwargs.get('is_required', None)
self.display_name = kwargs.get('display_name', None)
self.description = kwargs.get('description', None)
self.visibility = kwargs.get('visibility', None)
class ApiDeploymentParameterMetadataSet(msrest.serialization.Model):
"""The API deployment parameters metadata.
:param package_content_link: The package content link parameter.
:type package_content_link: ~azure.mgmt.logic.models.ApiDeploymentParameterMetadata
:param redis_cache_connection_string: The package content link parameter.
:type redis_cache_connection_string: ~azure.mgmt.logic.models.ApiDeploymentParameterMetadata
"""
_attribute_map = {
'package_content_link': {'key': 'packageContentLink', 'type': 'ApiDeploymentParameterMetadata'},
'redis_cache_connection_string': {'key': 'redisCacheConnectionString', 'type': 'ApiDeploymentParameterMetadata'},
}
def __init__(
self,
**kwargs
):
super(ApiDeploymentParameterMetadataSet, self).__init__(**kwargs)
self.package_content_link = kwargs.get('package_content_link', None)
self.redis_cache_connection_string = kwargs.get('redis_cache_connection_string', None)
class Resource(msrest.serialization.Model):
"""The base resource type.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The resource id.
:vartype id: str
:ivar name: Gets the resource name.
:vartype name: str
:ivar type: Gets the resource type.
:vartype type: str
:param location: The resource location.
:type location: str
:param tags: A set of tags. The resource tags.
:type tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = kwargs.get('location', None)
self.tags = kwargs.get('tags', None)
class ApiOperation(Resource):
"""The api operation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The resource id.
:vartype id: str
:ivar name: Gets the resource name.
:vartype name: str
:ivar type: Gets the resource type.
:vartype type: str
:param location: The resource location.
:type location: str
:param tags: A set of tags. The resource tags.
:type tags: dict[str, str]
:param properties: The api operations properties.
:type properties: ~azure.mgmt.logic.models.ApiOperationPropertiesDefinition
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'properties': {'key': 'properties', 'type': 'ApiOperationPropertiesDefinition'},
}
def __init__(
self,
**kwargs
):
super(ApiOperation, self).__init__(**kwargs)
self.properties = kwargs.get('properties', None)
class ApiOperationAnnotation(msrest.serialization.Model):
"""The Api Operation Annotation.
:param status: The status annotation. Possible values include: "NotSpecified", "Preview",
"Production".
:type status: str or ~azure.mgmt.logic.models.StatusAnnotation
:param family: The family.
:type family: str
:param revision: The revision.
:type revision: int
"""
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'family': {'key': 'family', 'type': 'str'},
'revision': {'key': 'revision', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(ApiOperationAnnotation, self).__init__(**kwargs)
self.status = kwargs.get('status', None)
self.family = kwargs.get('family', None)
self.revision = kwargs.get('revision', None)
class ApiOperationListResult(msrest.serialization.Model):
"""The list of managed API operations.
:param value: The api operation definitions for an API.
:type value: list[~azure.mgmt.logic.models.ApiOperation]
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ApiOperation]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ApiOperationListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class ApiOperationPropertiesDefinition(msrest.serialization.Model):
"""The api operations properties.
:param summary: The summary of the api operation.
:type summary: str
:param description: The description of the api operation.
:type description: str
:param visibility: The visibility of the api operation.
:type visibility: str
:param trigger: The trigger type of api operation.
:type trigger: str
:param trigger_hint: The trigger hint for the api operation.
:type trigger_hint: str
:param pageable: Indicates whether the api operation is pageable.
:type pageable: bool
:param annotation: The annotation of api operation.
:type annotation: ~azure.mgmt.logic.models.ApiOperationAnnotation
:param api: The api reference.
:type api: ~azure.mgmt.logic.models.ApiReference
:param inputs_definition: The operation inputs definition schema.
:type inputs_definition: ~azure.mgmt.logic.models.SwaggerSchema
:param responses_definition: The operation responses definition schemas.
:type responses_definition: dict[str, ~azure.mgmt.logic.models.SwaggerSchema]
:param is_webhook: Indicates whether the API operation is webhook or not.
:type is_webhook: bool
:param is_notification: Indicates whether the API operation is notification or not.
:type is_notification: bool
"""
_attribute_map = {
'summary': {'key': 'summary', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'visibility': {'key': 'visibility', 'type': 'str'},
'trigger': {'key': 'trigger', 'type': 'str'},
'trigger_hint': {'key': 'triggerHint', 'type': 'str'},
'pageable': {'key': 'pageable', 'type': 'bool'},
'annotation': {'key': 'annotation', 'type': 'ApiOperationAnnotation'},
'api': {'key': 'api', 'type': 'ApiReference'},
'inputs_definition': {'key': 'inputsDefinition', 'type': 'SwaggerSchema'},
'responses_definition': {'key': 'responsesDefinition', 'type': '{SwaggerSchema}'},
'is_webhook': {'key': 'isWebhook', 'type': 'bool'},
'is_notification': {'key': 'isNotification', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(ApiOperationPropertiesDefinition, self).__init__(**kwargs)
self.summary = kwargs.get('summary', None)
self.description = kwargs.get('description', None)
self.visibility = kwargs.get('visibility', None)
self.trigger = kwargs.get('trigger', None)
self.trigger_hint = kwargs.get('trigger_hint', None)
self.pageable = kwargs.get('pageable', None)
self.annotation = kwargs.get('annotation', None)
self.api = kwargs.get('api', None)
self.inputs_definition = kwargs.get('inputs_definition', None)
self.responses_definition = kwargs.get('responses_definition', None)
self.is_webhook = kwargs.get('is_webhook', None)
self.is_notification = kwargs.get('is_notification', None)
class ResourceReference(msrest.serialization.Model):
"""The resource reference.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: The resource id.
:type id: str
:ivar name: Gets the resource name.
:vartype name: str
:ivar type: Gets the resource type.
:vartype type: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ResourceReference, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
self.name = None
self.type = None
class ApiReference(ResourceReference):
"""The Api reference.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: The resource id.
:type id: str
:ivar name: Gets the resource name.
:vartype name: str
:ivar type: Gets the resource type.
:vartype type: str
:param display_name: The display name of the api.
:type display_name: str
:param description: The description of the api.
:type description: str
:param icon_uri: The icon uri of the api.
:type icon_uri: str
:param swagger: The swagger of the api.
:type swagger: object
:param brand_color: The brand color of the api.
:type brand_color: str
:param category: The tier. Possible values include: "NotSpecified", "Enterprise", "Standard",
"Premium".
:type category: str or ~azure.mgmt.logic.models.ApiTier
:param integration_service_environment: The integration service environment reference.
:type integration_service_environment: ~azure.mgmt.logic.models.ResourceReference
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'icon_uri': {'key': 'iconUri', 'type': 'str'},
'swagger': {'key': 'swagger', 'type': 'object'},
'brand_color': {'key': 'brandColor', 'type': 'str'},
'category': {'key': 'category', 'type': 'str'},
'integration_service_environment': {'key': 'integrationServiceEnvironment', 'type': 'ResourceReference'},
}
def __init__(
self,
**kwargs
):
super(ApiReference, self).__init__(**kwargs)
self.display_name = kwargs.get('display_name', None)
self.description = kwargs.get('description', None)
self.icon_uri = kwargs.get('icon_uri', None)
self.swagger = kwargs.get('swagger', None)
self.brand_color = kwargs.get('brand_color', None)
self.category = kwargs.get('category', None)
self.integration_service_environment = kwargs.get('integration_service_environment', None)
class ApiResourceBackendService(msrest.serialization.Model):
"""The API backend service.
:param service_url: The service URL.
:type service_url: str
"""
_attribute_map = {
'service_url': {'key': 'serviceUrl', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ApiResourceBackendService, self).__init__(**kwargs)
self.service_url = kwargs.get('service_url', None)
class ApiResourceDefinitions(msrest.serialization.Model):
"""The Api resource definition.
:param original_swagger_url: The original swagger url.
:type original_swagger_url: str
:param modified_swagger_url: The modified swagger url.
:type modified_swagger_url: str
"""
_attribute_map = {
'original_swagger_url': {'key': 'originalSwaggerUrl', 'type': 'str'},
'modified_swagger_url': {'key': 'modifiedSwaggerUrl', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ApiResourceDefinitions, self).__init__(**kwargs)
self.original_swagger_url = kwargs.get('original_swagger_url', None)
self.modified_swagger_url = kwargs.get('modified_swagger_url', None)
class ApiResourceGeneralInformation(msrest.serialization.Model):
"""The API general information.
:param icon_url: The icon url.
:type icon_url: str
:param display_name: The display name.
:type display_name: str
:param description: The description.
:type description: str
:param terms_of_use_url: The terms of use url.
:type terms_of_use_url: str
:param release_tag: The release tag.
:type release_tag: str
:param tier: The tier. Possible values include: "NotSpecified", "Enterprise", "Standard",
"Premium".
:type tier: str or ~azure.mgmt.logic.models.ApiTier
"""
_attribute_map = {
'icon_url': {'key': 'iconUrl', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'terms_of_use_url': {'key': 'termsOfUseUrl', 'type': 'str'},
'release_tag': {'key': 'releaseTag', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ApiResourceGeneralInformation, self).__init__(**kwargs)
self.icon_url = kwargs.get('icon_url', None)
self.display_name = kwargs.get('display_name', None)
self.description = kwargs.get('description', None)
self.terms_of_use_url = kwargs.get('terms_of_use_url', None)
self.release_tag = kwargs.get('release_tag', None)
self.tier = kwargs.get('tier', None)
class ApiResourceMetadata(msrest.serialization.Model):
"""The api resource metadata.
:param source: The source.
:type source: str
:param brand_color: The brand color.
:type brand_color: str
:param hide_key: The hide key.
:type hide_key: str
:param tags: A set of tags. The tags.
:type tags: dict[str, str]
:param api_type: The api type. Possible values include: "NotSpecified", "Rest", "Soap".
:type api_type: str or ~azure.mgmt.logic.models.ApiType
:param wsdl_service: The WSDL service.
:type wsdl_service: ~azure.mgmt.logic.models.WsdlService
:param wsdl_import_method: The WSDL import method. Possible values include: "NotSpecified",
"SoapToRest", "SoapPassThrough".
:type wsdl_import_method: str or ~azure.mgmt.logic.models.WsdlImportMethod
:param connection_type: The connection type.
:type connection_type: str
:param provisioning_state: The provisioning state. Possible values include: "NotSpecified",
"Accepted", "Running", "Ready", "Creating", "Created", "Deleting", "Deleted", "Canceled",
"Failed", "Succeeded", "Moving", "Updating", "Registering", "Registered", "Unregistering",
"Unregistered", "Completed", "Renewing", "Pending", "Waiting", "InProgress".
:type provisioning_state: str or ~azure.mgmt.logic.models.WorkflowProvisioningState
:param deployment_parameters: The connector deployment parameters metadata.
:type deployment_parameters: ~azure.mgmt.logic.models.ApiDeploymentParameterMetadataSet
"""
_attribute_map = {
'source': {'key': 'source', 'type': 'str'},
'brand_color': {'key': 'brandColor', 'type': 'str'},
'hide_key': {'key': 'hideKey', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'api_type': {'key': 'ApiType', 'type': 'str'},
'wsdl_service': {'key': 'wsdlService', 'type': 'WsdlService'},
'wsdl_import_method': {'key': 'wsdlImportMethod', 'type': 'str'},
'connection_type': {'key': 'connectionType', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'deployment_parameters': {'key': 'deploymentParameters', 'type': 'ApiDeploymentParameterMetadataSet'},
}
def __init__(
self,
**kwargs
):
super(ApiResourceMetadata, self).__init__(**kwargs)
self.source = kwargs.get('source', None)
self.brand_color = kwargs.get('brand_color', None)
self.hide_key = kwargs.get('hide_key', None)
self.tags = kwargs.get('tags', None)
self.api_type = kwargs.get('api_type', None)
self.wsdl_service = kwargs.get('wsdl_service', None)
self.wsdl_import_method = kwargs.get('wsdl_import_method', None)
self.connection_type = kwargs.get('connection_type', None)
self.provisioning_state = kwargs.get('provisioning_state', None)
self.deployment_parameters = kwargs.get('deployment_parameters', None)
class ApiResourcePolicies(msrest.serialization.Model):
"""The API resource policies.
:param content: The API level only policies XML as embedded content.
:type content: str
:param content_link: The content link to the policies.
:type content_link: str
"""
_attribute_map = {
'content': {'key': 'content', 'type': 'str'},
'content_link': {'key': 'contentLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ApiResourcePolicies, self).__init__(**kwargs)
self.content = kwargs.get('content', None)
self.content_link = kwargs.get('content_link', None)
class ApiResourceProperties(msrest.serialization.Model):
"""The API resource properties.
:param name: The name.
:type name: str
:param connection_parameters: The connection parameters.
:type connection_parameters: dict[str, object]
:param metadata: The metadata.
:type metadata: ~azure.mgmt.logic.models.ApiResourceMetadata
:param runtime_urls: The runtime urls.
:type runtime_urls: list[str]
:param general_information: The api general information.
:type general_information: ~azure.mgmt.logic.models.ApiResourceGeneralInformation
:param capabilities: The capabilities.
:type capabilities: list[str]
:param backend_service: The backend service.
:type backend_service: ~azure.mgmt.logic.models.ApiResourceBackendService
:param policies: The policies for the API.
:type policies: ~azure.mgmt.logic.models.ApiResourcePolicies
:param api_definition_url: The API definition.
:type api_definition_url: str
:param api_definitions: The api definitions.
:type api_definitions: ~azure.mgmt.logic.models.ApiResourceDefinitions
:param integration_service_environment: The integration service environment reference.
:type integration_service_environment: ~azure.mgmt.logic.models.ResourceReference
:param provisioning_state: The provisioning state. Possible values include: "NotSpecified",
"Accepted", "Running", "Ready", "Creating", "Created", "Deleting", "Deleted", "Canceled",
"Failed", "Succeeded", "Moving", "Updating", "Registering", "Registered", "Unregistering",
"Unregistered", "Completed", "Renewing", "Pending", "Waiting", "InProgress".
:type provisioning_state: str or ~azure.mgmt.logic.models.WorkflowProvisioningState
:param category: The category. Possible values include: "NotSpecified", "Enterprise",
"Standard", "Premium".
:type category: str or ~azure.mgmt.logic.models.ApiTier
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'connection_parameters': {'key': 'connectionParameters', 'type': '{object}'},
'metadata': {'key': 'metadata', 'type': 'ApiResourceMetadata'},
'runtime_urls': {'key': 'runtimeUrls', 'type': '[str]'},
'general_information': {'key': 'generalInformation', 'type': 'ApiResourceGeneralInformation'},
'capabilities': {'key': 'capabilities', 'type': '[str]'},
'backend_service': {'key': 'backendService', 'type': 'ApiResourceBackendService'},
'policies': {'key': 'policies', 'type': 'ApiResourcePolicies'},
'api_definition_url': {'key': 'apiDefinitionUrl', 'type': 'str'},
'api_definitions': {'key': 'apiDefinitions', 'type': 'ApiResourceDefinitions'},
'integration_service_environment': {'key': 'integrationServiceEnvironment', 'type': 'ResourceReference'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'category': {'key': 'category', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ApiResourceProperties, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.connection_parameters = kwargs.get('connection_parameters', None)
self.metadata = kwargs.get('metadata', None)
self.runtime_urls = kwargs.get('runtime_urls', None)
self.general_information = kwargs.get('general_information', None)
self.capabilities = kwargs.get('capabilities', None)
self.backend_service = kwargs.get('backend_service', None)
self.policies = kwargs.get('policies', None)
self.api_definition_url = kwargs.get('api_definition_url', None)
self.api_definitions = kwargs.get('api_definitions', None)
self.integration_service_environment = kwargs.get('integration_service_environment', None)
self.provisioning_state = kwargs.get('provisioning_state', None)
self.category = kwargs.get('category', None)
class ArtifactProperties(msrest.serialization.Model):
"""The artifact properties definition.
:param created_time: The artifact creation time.
:type created_time: ~datetime.datetime
:param changed_time: The artifact changed time.
:type changed_time: ~datetime.datetime
:param metadata: Any object.
:type metadata: object
"""
_attribute_map = {
'created_time': {'key': 'createdTime', 'type': 'iso-8601'},
'changed_time': {'key': 'changedTime', 'type': 'iso-8601'},
'metadata': {'key': 'metadata', 'type': 'object'},
}
def __init__(
self,
**kwargs
):
super(ArtifactProperties, self).__init__(**kwargs)
self.created_time = kwargs.get('created_time', None)
self.changed_time = kwargs.get('changed_time', None)
self.metadata = kwargs.get('metadata', None)
class ArtifactContentPropertiesDefinition(ArtifactProperties):
"""The artifact content properties definition.
:param created_time: The artifact creation time.
:type created_time: ~datetime.datetime
:param changed_time: The artifact changed time.
:type changed_time: ~datetime.datetime
:param metadata: Any object.
:type metadata: object
:param content: Any object.
:type content: object
:param content_type: The content type.
:type content_type: str
:param content_link: The content link.
:type content_link: ~azure.mgmt.logic.models.ContentLink
"""
_attribute_map = {
'created_time': {'key': 'createdTime', 'type': 'iso-8601'},
'changed_time': {'key': 'changedTime', 'type': 'iso-8601'},
'metadata': {'key': 'metadata', 'type': 'object'},
'content': {'key': 'content', 'type': 'object'},
'content_type': {'key': 'contentType', 'type': 'str'},
'content_link': {'key': 'contentLink', 'type': 'ContentLink'},
}
def __init__(
self,
**kwargs
):
super(ArtifactContentPropertiesDefinition, self).__init__(**kwargs)
self.content = kwargs.get('content', None)
self.content_type = kwargs.get('content_type', None)
self.content_link = kwargs.get('content_link', None)
class AS2AcknowledgementConnectionSettings(msrest.serialization.Model):
"""The AS2 agreement acknowledgement connection settings.
All required parameters must be populated in order to send to Azure.
:param ignore_certificate_name_mismatch: Required. Indicates whether to ignore mismatch in
certificate name.
:type ignore_certificate_name_mismatch: bool
:param support_http_status_code_continue: Required. Indicates whether to support HTTP status
code 'CONTINUE'.
:type support_http_status_code_continue: bool
:param keep_http_connection_alive: Required. Indicates whether to keep the connection alive.
:type keep_http_connection_alive: bool
:param unfold_http_headers: Required. Indicates whether to unfold the HTTP headers.
:type unfold_http_headers: bool
"""
_validation = {
'ignore_certificate_name_mismatch': {'required': True},
'support_http_status_code_continue': {'required': True},
'keep_http_connection_alive': {'required': True},
'unfold_http_headers': {'required': True},
}
_attribute_map = {
'ignore_certificate_name_mismatch': {'key': 'ignoreCertificateNameMismatch', 'type': 'bool'},
'support_http_status_code_continue': {'key': 'supportHttpStatusCodeContinue', 'type': 'bool'},
'keep_http_connection_alive': {'key': 'keepHttpConnectionAlive', 'type': 'bool'},
'unfold_http_headers': {'key': 'unfoldHttpHeaders', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(AS2AcknowledgementConnectionSettings, self).__init__(**kwargs)
self.ignore_certificate_name_mismatch = kwargs['ignore_certificate_name_mismatch']
self.support_http_status_code_continue = kwargs['support_http_status_code_continue']
self.keep_http_connection_alive = kwargs['keep_http_connection_alive']
self.unfold_http_headers = kwargs['unfold_http_headers']
class AS2AgreementContent(msrest.serialization.Model):
"""The integration account AS2 agreement content.
All required parameters must be populated in order to send to Azure.
:param receive_agreement: Required. The AS2 one-way receive agreement.
:type receive_agreement: ~azure.mgmt.logic.models.AS2OneWayAgreement
:param send_agreement: Required. The AS2 one-way send agreement.
:type send_agreement: ~azure.mgmt.logic.models.AS2OneWayAgreement
"""
_validation = {
'receive_agreement': {'required': True},
'send_agreement': {'required': True},
}
_attribute_map = {
'receive_agreement': {'key': 'receiveAgreement', 'type': 'AS2OneWayAgreement'},
'send_agreement': {'key': 'sendAgreement', 'type': 'AS2OneWayAgreement'},
}
def __init__(
self,
**kwargs
):
super(AS2AgreementContent, self).__init__(**kwargs)
self.receive_agreement = kwargs['receive_agreement']
self.send_agreement = kwargs['send_agreement']
class AS2EnvelopeSettings(msrest.serialization.Model):
"""The AS2 agreement envelope settings.
All required parameters must be populated in order to send to Azure.
:param message_content_type: Required. The message content type.
:type message_content_type: str
:param transmit_file_name_in_mime_header: Required. The value indicating whether to transmit
file name in mime header.
:type transmit_file_name_in_mime_header: bool
:param file_name_template: Required. The template for file name.
:type file_name_template: str
:param suspend_message_on_file_name_generation_error: Required. The value indicating whether to
suspend message on file name generation error.
:type suspend_message_on_file_name_generation_error: bool
:param autogenerate_file_name: Required. The value indicating whether to auto generate file
name.
:type autogenerate_file_name: bool
"""
_validation = {
'message_content_type': {'required': True},
'transmit_file_name_in_mime_header': {'required': True},
'file_name_template': {'required': True},
'suspend_message_on_file_name_generation_error': {'required': True},
'autogenerate_file_name': {'required': True},
}
_attribute_map = {
'message_content_type': {'key': 'messageContentType', 'type': 'str'},
'transmit_file_name_in_mime_header': {'key': 'transmitFileNameInMimeHeader', 'type': 'bool'},
'file_name_template': {'key': 'fileNameTemplate', 'type': 'str'},
'suspend_message_on_file_name_generation_error': {'key': 'suspendMessageOnFileNameGenerationError', 'type': 'bool'},
'autogenerate_file_name': {'key': 'autogenerateFileName', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(AS2EnvelopeSettings, self).__init__(**kwargs)
self.message_content_type = kwargs['message_content_type']
self.transmit_file_name_in_mime_header = kwargs['transmit_file_name_in_mime_header']
self.file_name_template = kwargs['file_name_template']
self.suspend_message_on_file_name_generation_error = kwargs['suspend_message_on_file_name_generation_error']
self.autogenerate_file_name = kwargs['autogenerate_file_name']
class AS2ErrorSettings(msrest.serialization.Model):
"""The AS2 agreement error settings.
All required parameters must be populated in order to send to Azure.
:param suspend_duplicate_message: Required. The value indicating whether to suspend duplicate
message.
:type suspend_duplicate_message: bool
:param resend_if_mdn_not_received: Required. The value indicating whether to resend message If
MDN is not received.
:type resend_if_mdn_not_received: bool
"""
_validation = {
'suspend_duplicate_message': {'required': True},
'resend_if_mdn_not_received': {'required': True},
}
_attribute_map = {
'suspend_duplicate_message': {'key': 'suspendDuplicateMessage', 'type': 'bool'},
'resend_if_mdn_not_received': {'key': 'resendIfMDNNotReceived', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(AS2ErrorSettings, self).__init__(**kwargs)
self.suspend_duplicate_message = kwargs['suspend_duplicate_message']
self.resend_if_mdn_not_received = kwargs['resend_if_mdn_not_received']
class AS2MdnSettings(msrest.serialization.Model):
"""The AS2 agreement mdn settings.
All required parameters must be populated in order to send to Azure.
:param need_mdn: Required. The value indicating whether to send or request a MDN.
:type need_mdn: bool
:param sign_mdn: Required. The value indicating whether the MDN needs to be signed or not.
:type sign_mdn: bool
:param send_mdn_asynchronously: Required. The value indicating whether to send the asynchronous
MDN.
:type send_mdn_asynchronously: bool
:param receipt_delivery_url: The receipt delivery URL.
:type receipt_delivery_url: str
:param disposition_notification_to: The disposition notification to header value.
:type disposition_notification_to: str
:param sign_outbound_mdn_if_optional: Required. The value indicating whether to sign the
outbound MDN if optional.
:type sign_outbound_mdn_if_optional: bool
:param mdn_text: The MDN text.
:type mdn_text: str
:param send_inbound_mdn_to_message_box: Required. The value indicating whether to send inbound
MDN to message box.
:type send_inbound_mdn_to_message_box: bool
:param mic_hashing_algorithm: Required. The signing or hashing algorithm. Possible values
include: "NotSpecified", "None", "MD5", "SHA1", "SHA2256", "SHA2384", "SHA2512".
:type mic_hashing_algorithm: str or ~azure.mgmt.logic.models.HashingAlgorithm
"""
_validation = {
'need_mdn': {'required': True},
'sign_mdn': {'required': True},
'send_mdn_asynchronously': {'required': True},
'sign_outbound_mdn_if_optional': {'required': True},
'send_inbound_mdn_to_message_box': {'required': True},
'mic_hashing_algorithm': {'required': True},
}
_attribute_map = {
'need_mdn': {'key': 'needMDN', 'type': 'bool'},
'sign_mdn': {'key': 'signMDN', 'type': 'bool'},
'send_mdn_asynchronously': {'key': 'sendMDNAsynchronously', 'type': 'bool'},
'receipt_delivery_url': {'key': 'receiptDeliveryUrl', 'type': 'str'},
'disposition_notification_to': {'key': 'dispositionNotificationTo', 'type': 'str'},
'sign_outbound_mdn_if_optional': {'key': 'signOutboundMDNIfOptional', 'type': 'bool'},
'mdn_text': {'key': 'mdnText', 'type': 'str'},
'send_inbound_mdn_to_message_box': {'key': 'sendInboundMDNToMessageBox', 'type': 'bool'},
'mic_hashing_algorithm': {'key': 'micHashingAlgorithm', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AS2MdnSettings, self).__init__(**kwargs)
self.need_mdn = kwargs['need_mdn']
self.sign_mdn = kwargs['sign_mdn']
self.send_mdn_asynchronously = kwargs['send_mdn_asynchronously']
self.receipt_delivery_url = kwargs.get('receipt_delivery_url', None)
self.disposition_notification_to = kwargs.get('disposition_notification_to', None)
self.sign_outbound_mdn_if_optional = kwargs['sign_outbound_mdn_if_optional']
self.mdn_text = kwargs.get('mdn_text', None)
self.send_inbound_mdn_to_message_box = kwargs['send_inbound_mdn_to_message_box']
self.mic_hashing_algorithm = kwargs['mic_hashing_algorithm']
class AS2MessageConnectionSettings(msrest.serialization.Model):
"""The AS2 agreement message connection settings.
All required parameters must be populated in order to send to Azure.
:param ignore_certificate_name_mismatch: Required. The value indicating whether to ignore
mismatch in certificate name.
:type ignore_certificate_name_mismatch: bool
:param support_http_status_code_continue: Required. The value indicating whether to support
HTTP status code 'CONTINUE'.
:type support_http_status_code_continue: bool
:param keep_http_connection_alive: Required. The value indicating whether to keep the
connection alive.
:type keep_http_connection_alive: bool
:param unfold_http_headers: Required. The value indicating whether to unfold the HTTP headers.
:type unfold_http_headers: bool
"""
_validation = {
'ignore_certificate_name_mismatch': {'required': True},
'support_http_status_code_continue': {'required': True},
'keep_http_connection_alive': {'required': True},
'unfold_http_headers': {'required': True},
}
_attribute_map = {
'ignore_certificate_name_mismatch': {'key': 'ignoreCertificateNameMismatch', 'type': 'bool'},
'support_http_status_code_continue': {'key': 'supportHttpStatusCodeContinue', 'type': 'bool'},
'keep_http_connection_alive': {'key': 'keepHttpConnectionAlive', 'type': 'bool'},
'unfold_http_headers': {'key': 'unfoldHttpHeaders', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(AS2MessageConnectionSettings, self).__init__(**kwargs)
self.ignore_certificate_name_mismatch = kwargs['ignore_certificate_name_mismatch']
self.support_http_status_code_continue = kwargs['support_http_status_code_continue']
self.keep_http_connection_alive = kwargs['keep_http_connection_alive']
self.unfold_http_headers = kwargs['unfold_http_headers']
class AS2OneWayAgreement(msrest.serialization.Model):
"""The integration account AS2 one-way agreement.
All required parameters must be populated in order to send to Azure.
:param sender_business_identity: Required. The sender business identity.
:type sender_business_identity: ~azure.mgmt.logic.models.BusinessIdentity
:param receiver_business_identity: Required. The receiver business identity.
:type receiver_business_identity: ~azure.mgmt.logic.models.BusinessIdentity
:param protocol_settings: Required. The AS2 protocol settings.
:type protocol_settings: ~azure.mgmt.logic.models.AS2ProtocolSettings
"""
_validation = {
'sender_business_identity': {'required': True},
'receiver_business_identity': {'required': True},
'protocol_settings': {'required': True},
}
_attribute_map = {
'sender_business_identity': {'key': 'senderBusinessIdentity', 'type': 'BusinessIdentity'},
'receiver_business_identity': {'key': 'receiverBusinessIdentity', 'type': 'BusinessIdentity'},
'protocol_settings': {'key': 'protocolSettings', 'type': 'AS2ProtocolSettings'},
}
def __init__(
self,
**kwargs
):
super(AS2OneWayAgreement, self).__init__(**kwargs)
self.sender_business_identity = kwargs['sender_business_identity']
self.receiver_business_identity = kwargs['receiver_business_identity']
self.protocol_settings = kwargs['protocol_settings']
class AS2ProtocolSettings(msrest.serialization.Model):
"""The AS2 agreement protocol settings.
All required parameters must be populated in order to send to Azure.
:param message_connection_settings: Required. The message connection settings.
:type message_connection_settings: ~azure.mgmt.logic.models.AS2MessageConnectionSettings
:param acknowledgement_connection_settings: Required. The acknowledgement connection settings.
:type acknowledgement_connection_settings:
~azure.mgmt.logic.models.AS2AcknowledgementConnectionSettings
:param mdn_settings: Required. The MDN settings.
:type mdn_settings: ~azure.mgmt.logic.models.AS2MdnSettings
:param security_settings: Required. The security settings.
:type security_settings: ~azure.mgmt.logic.models.AS2SecuritySettings
:param validation_settings: Required. The validation settings.
:type validation_settings: ~azure.mgmt.logic.models.AS2ValidationSettings
:param envelope_settings: Required. The envelope settings.
:type envelope_settings: ~azure.mgmt.logic.models.AS2EnvelopeSettings
:param error_settings: Required. The error settings.
:type error_settings: ~azure.mgmt.logic.models.AS2ErrorSettings
"""
_validation = {
'message_connection_settings': {'required': True},
'acknowledgement_connection_settings': {'required': True},
'mdn_settings': {'required': True},
'security_settings': {'required': True},
'validation_settings': {'required': True},
'envelope_settings': {'required': True},
'error_settings': {'required': True},
}
_attribute_map = {
'message_connection_settings': {'key': 'messageConnectionSettings', 'type': 'AS2MessageConnectionSettings'},
'acknowledgement_connection_settings': {'key': 'acknowledgementConnectionSettings', 'type': 'AS2AcknowledgementConnectionSettings'},
'mdn_settings': {'key': 'mdnSettings', 'type': 'AS2MdnSettings'},
'security_settings': {'key': 'securitySettings', 'type': 'AS2SecuritySettings'},
'validation_settings': {'key': 'validationSettings', 'type': 'AS2ValidationSettings'},
'envelope_settings': {'key': 'envelopeSettings', 'type': 'AS2EnvelopeSettings'},
'error_settings': {'key': 'errorSettings', 'type': 'AS2ErrorSettings'},
}
def __init__(
self,
**kwargs
):
super(AS2ProtocolSettings, self).__init__(**kwargs)
self.message_connection_settings = kwargs['message_connection_settings']
self.acknowledgement_connection_settings = kwargs['acknowledgement_connection_settings']
self.mdn_settings = kwargs['mdn_settings']
self.security_settings = kwargs['security_settings']
self.validation_settings = kwargs['validation_settings']
self.envelope_settings = kwargs['envelope_settings']
self.error_settings = kwargs['error_settings']
class AS2SecuritySettings(msrest.serialization.Model):
"""The AS2 agreement security settings.
All required parameters must be populated in order to send to Azure.
:param override_group_signing_certificate: Required. The value indicating whether to send or
request a MDN.
:type override_group_signing_certificate: bool
:param signing_certificate_name: The name of the signing certificate.
:type signing_certificate_name: str
:param encryption_certificate_name: The name of the encryption certificate.
:type encryption_certificate_name: str
:param enable_nrr_for_inbound_encoded_messages: Required. The value indicating whether to
enable NRR for inbound encoded messages.
:type enable_nrr_for_inbound_encoded_messages: bool
:param enable_nrr_for_inbound_decoded_messages: Required. The value indicating whether to
enable NRR for inbound decoded messages.
:type enable_nrr_for_inbound_decoded_messages: bool
:param enable_nrr_for_outbound_mdn: Required. The value indicating whether to enable NRR for
outbound MDN.
:type enable_nrr_for_outbound_mdn: bool
:param enable_nrr_for_outbound_encoded_messages: Required. The value indicating whether to
enable NRR for outbound encoded messages.
:type enable_nrr_for_outbound_encoded_messages: bool
:param enable_nrr_for_outbound_decoded_messages: Required. The value indicating whether to
enable NRR for outbound decoded messages.
:type enable_nrr_for_outbound_decoded_messages: bool
:param enable_nrr_for_inbound_mdn: Required. The value indicating whether to enable NRR for
inbound MDN.
:type enable_nrr_for_inbound_mdn: bool
:param sha2_algorithm_format: The Sha2 algorithm format. Valid values are Sha2, ShaHashSize,
ShaHyphenHashSize, Sha2UnderscoreHashSize.
:type sha2_algorithm_format: str
"""
_validation = {
'override_group_signing_certificate': {'required': True},
'enable_nrr_for_inbound_encoded_messages': {'required': True},
'enable_nrr_for_inbound_decoded_messages': {'required': True},
'enable_nrr_for_outbound_mdn': {'required': True},
'enable_nrr_for_outbound_encoded_messages': {'required': True},
'enable_nrr_for_outbound_decoded_messages': {'required': True},
'enable_nrr_for_inbound_mdn': {'required': True},
}
_attribute_map = {
'override_group_signing_certificate': {'key': 'overrideGroupSigningCertificate', 'type': 'bool'},
'signing_certificate_name': {'key': 'signingCertificateName', 'type': 'str'},
'encryption_certificate_name': {'key': 'encryptionCertificateName', 'type': 'str'},
'enable_nrr_for_inbound_encoded_messages': {'key': 'enableNRRForInboundEncodedMessages', 'type': 'bool'},
'enable_nrr_for_inbound_decoded_messages': {'key': 'enableNRRForInboundDecodedMessages', 'type': 'bool'},
'enable_nrr_for_outbound_mdn': {'key': 'enableNRRForOutboundMDN', 'type': 'bool'},
'enable_nrr_for_outbound_encoded_messages': {'key': 'enableNRRForOutboundEncodedMessages', 'type': 'bool'},
'enable_nrr_for_outbound_decoded_messages': {'key': 'enableNRRForOutboundDecodedMessages', 'type': 'bool'},
'enable_nrr_for_inbound_mdn': {'key': 'enableNRRForInboundMDN', 'type': 'bool'},
'sha2_algorithm_format': {'key': 'sha2AlgorithmFormat', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AS2SecuritySettings, self).__init__(**kwargs)
self.override_group_signing_certificate = kwargs['override_group_signing_certificate']
self.signing_certificate_name = kwargs.get('signing_certificate_name', None)
self.encryption_certificate_name = kwargs.get('encryption_certificate_name', None)
self.enable_nrr_for_inbound_encoded_messages = kwargs['enable_nrr_for_inbound_encoded_messages']
self.enable_nrr_for_inbound_decoded_messages = kwargs['enable_nrr_for_inbound_decoded_messages']
self.enable_nrr_for_outbound_mdn = kwargs['enable_nrr_for_outbound_mdn']
self.enable_nrr_for_outbound_encoded_messages = kwargs['enable_nrr_for_outbound_encoded_messages']
self.enable_nrr_for_outbound_decoded_messages = kwargs['enable_nrr_for_outbound_decoded_messages']
self.enable_nrr_for_inbound_mdn = kwargs['enable_nrr_for_inbound_mdn']
self.sha2_algorithm_format = kwargs.get('sha2_algorithm_format', None)
class AS2ValidationSettings(msrest.serialization.Model):
"""The AS2 agreement validation settings.
All required parameters must be populated in order to send to Azure.
:param override_message_properties: Required. The value indicating whether to override incoming
message properties with those in agreement.
:type override_message_properties: bool
:param encrypt_message: Required. The value indicating whether the message has to be encrypted.
:type encrypt_message: bool
:param sign_message: Required. The value indicating whether the message has to be signed.
:type sign_message: bool
:param compress_message: Required. The value indicating whether the message has to be
compressed.
:type compress_message: bool
:param check_duplicate_message: Required. The value indicating whether to check for duplicate
message.
:type check_duplicate_message: bool
:param interchange_duplicates_validity_days: Required. The number of days to look back for
duplicate interchange.
:type interchange_duplicates_validity_days: int
:param check_certificate_revocation_list_on_send: Required. The value indicating whether to
check for certificate revocation list on send.
:type check_certificate_revocation_list_on_send: bool
:param check_certificate_revocation_list_on_receive: Required. The value indicating whether to
check for certificate revocation list on receive.
:type check_certificate_revocation_list_on_receive: bool
:param encryption_algorithm: Required. The encryption algorithm. Possible values include:
"NotSpecified", "None", "DES3", "RC2", "AES128", "AES192", "AES256".
:type encryption_algorithm: str or ~azure.mgmt.logic.models.EncryptionAlgorithm
:param signing_algorithm: The signing algorithm. Possible values include: "NotSpecified",
"Default", "SHA1", "SHA2256", "SHA2384", "SHA2512".
:type signing_algorithm: str or ~azure.mgmt.logic.models.SigningAlgorithm
"""
_validation = {
'override_message_properties': {'required': True},
'encrypt_message': {'required': True},
'sign_message': {'required': True},
'compress_message': {'required': True},
'check_duplicate_message': {'required': True},
'interchange_duplicates_validity_days': {'required': True},
'check_certificate_revocation_list_on_send': {'required': True},
'check_certificate_revocation_list_on_receive': {'required': True},
'encryption_algorithm': {'required': True},
}
_attribute_map = {
'override_message_properties': {'key': 'overrideMessageProperties', 'type': 'bool'},
'encrypt_message': {'key': 'encryptMessage', 'type': 'bool'},
'sign_message': {'key': 'signMessage', 'type': 'bool'},
'compress_message': {'key': 'compressMessage', 'type': 'bool'},
'check_duplicate_message': {'key': 'checkDuplicateMessage', 'type': 'bool'},
'interchange_duplicates_validity_days': {'key': 'interchangeDuplicatesValidityDays', 'type': 'int'},
'check_certificate_revocation_list_on_send': {'key': 'checkCertificateRevocationListOnSend', 'type': 'bool'},
'check_certificate_revocation_list_on_receive': {'key': 'checkCertificateRevocationListOnReceive', 'type': 'bool'},
'encryption_algorithm': {'key': 'encryptionAlgorithm', 'type': 'str'},
'signing_algorithm': {'key': 'signingAlgorithm', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AS2ValidationSettings, self).__init__(**kwargs)
self.override_message_properties = kwargs['override_message_properties']
self.encrypt_message = kwargs['encrypt_message']
self.sign_message = kwargs['sign_message']
self.compress_message = kwargs['compress_message']
self.check_duplicate_message = kwargs['check_duplicate_message']
self.interchange_duplicates_validity_days = kwargs['interchange_duplicates_validity_days']
self.check_certificate_revocation_list_on_send = kwargs['check_certificate_revocation_list_on_send']
self.check_certificate_revocation_list_on_receive = kwargs['check_certificate_revocation_list_on_receive']
self.encryption_algorithm = kwargs['encryption_algorithm']
self.signing_algorithm = kwargs.get('signing_algorithm', None)
class AssemblyCollection(msrest.serialization.Model):
"""A collection of assembly definitions.
:param value:
:type value: list[~azure.mgmt.logic.models.AssemblyDefinition]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[AssemblyDefinition]'},
}
def __init__(
self,
**kwargs
):
super(AssemblyCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
class AssemblyDefinition(Resource):
"""The assembly definition.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The resource id.
:vartype id: str
:ivar name: Gets the resource name.
:vartype name: str
:ivar type: Gets the resource type.
:vartype type: str
:param location: The resource location.
:type location: str
:param tags: A set of tags. The resource tags.
:type tags: dict[str, str]
:param properties: Required. The assembly properties.
:type properties: ~azure.mgmt.logic.models.AssemblyProperties
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'properties': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'properties': {'key': 'properties', 'type': 'AssemblyProperties'},
}
def __init__(
self,
**kwargs
):
super(AssemblyDefinition, self).__init__(**kwargs)
self.properties = kwargs['properties']
class AssemblyProperties(ArtifactContentPropertiesDefinition):
"""The assembly properties definition.
All required parameters must be populated in order to send to Azure.
:param created_time: The artifact creation time.
:type created_time: ~datetime.datetime
:param changed_time: The artifact changed time.
:type changed_time: ~datetime.datetime
:param metadata: Any object.
:type metadata: object
:param content: Any object.
:type content: object
:param content_type: The content type.
:type content_type: str
:param content_link: The content link.
:type content_link: ~azure.mgmt.logic.models.ContentLink
:param assembly_name: Required. The assembly name.
:type assembly_name: str
:param assembly_version: The assembly version.
:type assembly_version: str
:param assembly_culture: The assembly culture.
:type assembly_culture: str
:param assembly_public_key_token: The assembly public key token.
:type assembly_public_key_token: str
"""
_validation = {
'assembly_name': {'required': True},
}
_attribute_map = {
'created_time': {'key': 'createdTime', 'type': 'iso-8601'},
'changed_time': {'key': 'changedTime', 'type': 'iso-8601'},
'metadata': {'key': 'metadata', 'type': 'object'},
'content': {'key': 'content', 'type': 'object'},
'content_type': {'key': 'contentType', 'type': 'str'},
'content_link': {'key': 'contentLink', 'type': 'ContentLink'},
'assembly_name': {'key': 'assemblyName', 'type': 'str'},
'assembly_version': {'key': 'assemblyVersion', 'type': 'str'},
'assembly_culture': {'key': 'assemblyCulture', 'type': 'str'},
'assembly_public_key_token': {'key': 'assemblyPublicKeyToken', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AssemblyProperties, self).__init__(**kwargs)
self.assembly_name = kwargs['assembly_name']
self.assembly_version = kwargs.get('assembly_version', None)
self.assembly_culture = kwargs.get('assembly_culture', None)
self.assembly_public_key_token = kwargs.get('assembly_public_key_token', None)
class ErrorInfo(msrest.serialization.Model):
"""The error info.
All required parameters must be populated in order to send to Azure.
:param code: Required. The error code.
:type code: str
"""
_validation = {
'code': {'required': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ErrorInfo, self).__init__(**kwargs)
self.code = kwargs['code']
class AzureResourceErrorInfo(ErrorInfo):
"""The azure resource error info.
All required parameters must be populated in order to send to Azure.
:param code: Required. The error code.
:type code: str
:param message: Required. The error message.
:type message: str
:param details: The error details.
:type details: list[~azure.mgmt.logic.models.AzureResourceErrorInfo]
"""
_validation = {
'code': {'required': True},
'message': {'required': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'details': {'key': 'details', 'type': '[AzureResourceErrorInfo]'},
}
def __init__(
self,
**kwargs
):
super(AzureResourceErrorInfo, self).__init__(**kwargs)
self.message = kwargs['message']
self.details = kwargs.get('details', None)
class B2BPartnerContent(msrest.serialization.Model):
"""The B2B partner content.
:param business_identities: The list of partner business identities.
:type business_identities: list[~azure.mgmt.logic.models.BusinessIdentity]
"""
_attribute_map = {
'business_identities': {'key': 'businessIdentities', 'type': '[BusinessIdentity]'},
}
def __init__(
self,
**kwargs
):
super(B2BPartnerContent, self).__init__(**kwargs)
self.business_identities = kwargs.get('business_identities', None)
class BatchConfiguration(Resource):
"""The batch configuration resource definition.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The resource id.
:vartype id: str
:ivar name: Gets the resource name.
:vartype name: str
:ivar type: Gets the resource type.
:vartype type: str
:param location: The resource location.
:type location: str
:param tags: A set of tags. The resource tags.
:type tags: dict[str, str]
:param properties: Required. The batch configuration properties.
:type properties: ~azure.mgmt.logic.models.BatchConfigurationProperties
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'properties': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'properties': {'key': 'properties', 'type': 'BatchConfigurationProperties'},
}
def __init__(
self,
**kwargs
):
super(BatchConfiguration, self).__init__(**kwargs)
self.properties = kwargs['properties']
class BatchConfigurationCollection(msrest.serialization.Model):
"""A collection of batch configurations.
:param value:
:type value: list[~azure.mgmt.logic.models.BatchConfiguration]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[BatchConfiguration]'},
}
def __init__(
self,
**kwargs
):
super(BatchConfigurationCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
class BatchConfigurationProperties(ArtifactProperties):
"""The batch configuration properties definition.
All required parameters must be populated in order to send to Azure.
:param created_time: The artifact creation time.
:type created_time: ~datetime.datetime
:param changed_time: The artifact changed time.
:type changed_time: ~datetime.datetime
:param metadata: Any object.
:type metadata: object
:param batch_group_name: Required. The name of the batch group.
:type batch_group_name: str
:param release_criteria: Required. The batch release criteria.
:type release_criteria: ~azure.mgmt.logic.models.BatchReleaseCriteria
"""
_validation = {
'batch_group_name': {'required': True},
'release_criteria': {'required': True},
}
_attribute_map = {
'created_time': {'key': 'createdTime', 'type': 'iso-8601'},
'changed_time': {'key': 'changedTime', 'type': 'iso-8601'},
'metadata': {'key': 'metadata', 'type': 'object'},
'batch_group_name': {'key': 'batchGroupName', 'type': 'str'},
'release_criteria': {'key': 'releaseCriteria', 'type': 'BatchReleaseCriteria'},
}
def __init__(
self,
**kwargs
):
super(BatchConfigurationProperties, self).__init__(**kwargs)
self.batch_group_name = kwargs['batch_group_name']
self.release_criteria = kwargs['release_criteria']
class BatchReleaseCriteria(msrest.serialization.Model):
"""The batch release criteria.
:param message_count: The message count.
:type message_count: int
:param batch_size: The batch size in bytes.
:type batch_size: int
:param recurrence: The recurrence.
:type recurrence: ~azure.mgmt.logic.models.WorkflowTriggerRecurrence
"""
_attribute_map = {
'message_count': {'key': 'messageCount', 'type': 'int'},
'batch_size': {'key': 'batchSize', 'type': 'int'},
'recurrence': {'key': 'recurrence', 'type': 'WorkflowTriggerRecurrence'},
}
def __init__(
self,
**kwargs
):
super(BatchReleaseCriteria, self).__init__(**kwargs)
self.message_count = kwargs.get('message_count', None)
self.batch_size = kwargs.get('batch_size', None)
self.recurrence = kwargs.get('recurrence', None)
class BusinessIdentity(msrest.serialization.Model):
"""The integration account partner's business identity.
All required parameters must be populated in order to send to Azure.
:param qualifier: Required. The business identity qualifier e.g. as2identity, ZZ, ZZZ, 31, 32.
:type qualifier: str
:param value: Required. The user defined business identity value.
:type value: str
"""
_validation = {
'qualifier': {'required': True},
'value': {'required': True},
}
_attribute_map = {
'qualifier': {'key': 'qualifier', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(BusinessIdentity, self).__init__(**kwargs)
self.qualifier = kwargs['qualifier']
self.value = kwargs['value']
class CallbackUrl(msrest.serialization.Model):
"""The callback url.
:param value: The URL value.
:type value: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CallbackUrl, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
class ContentHash(msrest.serialization.Model):
"""The content hash.
:param algorithm: The algorithm of the content hash.
:type algorithm: str
:param value: The value of the content hash.
:type value: str
"""
_attribute_map = {
'algorithm': {'key': 'algorithm', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ContentHash, self).__init__(**kwargs)
self.algorithm = kwargs.get('algorithm', None)
self.value = kwargs.get('value', None)
class ContentLink(msrest.serialization.Model):
"""The content link.
:param uri: The content link URI.
:type uri: str
:param content_version: The content version.
:type content_version: str
:param content_size: The content size.
:type content_size: long
:param content_hash: The content hash.
:type content_hash: ~azure.mgmt.logic.models.ContentHash
:param metadata: The metadata.
:type metadata: object
"""
_attribute_map = {
'uri': {'key': 'uri', 'type': 'str'},
'content_version': {'key': 'contentVersion', 'type': 'str'},
'content_size': {'key': 'contentSize', 'type': 'long'},
'content_hash': {'key': 'contentHash', 'type': 'ContentHash'},
'metadata': {'key': 'metadata', 'type': 'object'},
}
def __init__(
self,
**kwargs
):
super(ContentLink, self).__init__(**kwargs)
self.uri = kwargs.get('uri', None)
self.content_version = kwargs.get('content_version', None)
self.content_size = kwargs.get('content_size', None)
self.content_hash = kwargs.get('content_hash', None)
self.metadata = kwargs.get('metadata', None)
class Correlation(msrest.serialization.Model):
"""The correlation property.
:param client_tracking_id: The client tracking id.
:type client_tracking_id: str
"""
_attribute_map = {
'client_tracking_id': {'key': 'clientTrackingId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Correlation, self).__init__(**kwargs)
self.client_tracking_id = kwargs.get('client_tracking_id', None)
class EdifactAcknowledgementSettings(msrest.serialization.Model):
"""The Edifact agreement acknowledgement settings.
All required parameters must be populated in order to send to Azure.
:param need_technical_acknowledgement: Required. The value indicating whether technical
acknowledgement is needed.
:type need_technical_acknowledgement: bool
:param batch_technical_acknowledgements: Required. The value indicating whether to batch the
technical acknowledgements.
:type batch_technical_acknowledgements: bool
:param need_functional_acknowledgement: Required. The value indicating whether functional
acknowledgement is needed.
:type need_functional_acknowledgement: bool
:param batch_functional_acknowledgements: Required. The value indicating whether to batch
functional acknowledgements.
:type batch_functional_acknowledgements: bool
:param need_loop_for_valid_messages: Required. The value indicating whether a loop is needed
for valid messages.
:type need_loop_for_valid_messages: bool
:param send_synchronous_acknowledgement: Required. The value indicating whether to send
synchronous acknowledgement.
:type send_synchronous_acknowledgement: bool
:param acknowledgement_control_number_prefix: The acknowledgement control number prefix.
:type acknowledgement_control_number_prefix: str
:param acknowledgement_control_number_suffix: The acknowledgement control number suffix.
:type acknowledgement_control_number_suffix: str
:param acknowledgement_control_number_lower_bound: Required. The acknowledgement control number
lower bound.
:type acknowledgement_control_number_lower_bound: int
:param acknowledgement_control_number_upper_bound: Required. The acknowledgement control number
upper bound.
:type acknowledgement_control_number_upper_bound: int
:param rollover_acknowledgement_control_number: Required. The value indicating whether to
rollover acknowledgement control number.
:type rollover_acknowledgement_control_number: bool
"""
_validation = {
'need_technical_acknowledgement': {'required': True},
'batch_technical_acknowledgements': {'required': True},
'need_functional_acknowledgement': {'required': True},
'batch_functional_acknowledgements': {'required': True},
'need_loop_for_valid_messages': {'required': True},
'send_synchronous_acknowledgement': {'required': True},
'acknowledgement_control_number_lower_bound': {'required': True},
'acknowledgement_control_number_upper_bound': {'required': True},
'rollover_acknowledgement_control_number': {'required': True},
}
_attribute_map = {
'need_technical_acknowledgement': {'key': 'needTechnicalAcknowledgement', 'type': 'bool'},
'batch_technical_acknowledgements': {'key': 'batchTechnicalAcknowledgements', 'type': 'bool'},
'need_functional_acknowledgement': {'key': 'needFunctionalAcknowledgement', 'type': 'bool'},
'batch_functional_acknowledgements': {'key': 'batchFunctionalAcknowledgements', 'type': 'bool'},
'need_loop_for_valid_messages': {'key': 'needLoopForValidMessages', 'type': 'bool'},
'send_synchronous_acknowledgement': {'key': 'sendSynchronousAcknowledgement', 'type': 'bool'},
'acknowledgement_control_number_prefix': {'key': 'acknowledgementControlNumberPrefix', 'type': 'str'},
'acknowledgement_control_number_suffix': {'key': 'acknowledgementControlNumberSuffix', 'type': 'str'},
'acknowledgement_control_number_lower_bound': {'key': 'acknowledgementControlNumberLowerBound', 'type': 'int'},
'acknowledgement_control_number_upper_bound': {'key': 'acknowledgementControlNumberUpperBound', 'type': 'int'},
'rollover_acknowledgement_control_number': {'key': 'rolloverAcknowledgementControlNumber', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(EdifactAcknowledgementSettings, self).__init__(**kwargs)
self.need_technical_acknowledgement = kwargs['need_technical_acknowledgement']
self.batch_technical_acknowledgements = kwargs['batch_technical_acknowledgements']
self.need_functional_acknowledgement = kwargs['need_functional_acknowledgement']
self.batch_functional_acknowledgements = kwargs['batch_functional_acknowledgements']
self.need_loop_for_valid_messages = kwargs['need_loop_for_valid_messages']
self.send_synchronous_acknowledgement = kwargs['send_synchronous_acknowledgement']
self.acknowledgement_control_number_prefix = kwargs.get('acknowledgement_control_number_prefix', None)
self.acknowledgement_control_number_suffix = kwargs.get('acknowledgement_control_number_suffix', None)
self.acknowledgement_control_number_lower_bound = kwargs['acknowledgement_control_number_lower_bound']
self.acknowledgement_control_number_upper_bound = kwargs['acknowledgement_control_number_upper_bound']
self.rollover_acknowledgement_control_number = kwargs['rollover_acknowledgement_control_number']
class EdifactAgreementContent(msrest.serialization.Model):
"""The Edifact agreement content.
All required parameters must be populated in order to send to Azure.
:param receive_agreement: Required. The EDIFACT one-way receive agreement.
:type receive_agreement: ~azure.mgmt.logic.models.EdifactOneWayAgreement
:param send_agreement: Required. The EDIFACT one-way send agreement.
:type send_agreement: ~azure.mgmt.logic.models.EdifactOneWayAgreement
"""
_validation = {
'receive_agreement': {'required': True},
'send_agreement': {'required': True},
}
_attribute_map = {
'receive_agreement': {'key': 'receiveAgreement', 'type': 'EdifactOneWayAgreement'},
'send_agreement': {'key': 'sendAgreement', 'type': 'EdifactOneWayAgreement'},
}
def __init__(
self,
**kwargs
):
super(EdifactAgreementContent, self).__init__(**kwargs)
self.receive_agreement = kwargs['receive_agreement']
self.send_agreement = kwargs['send_agreement']
class EdifactDelimiterOverride(msrest.serialization.Model):
"""The Edifact delimiter override settings.
All required parameters must be populated in order to send to Azure.
:param message_id: The message id.
:type message_id: str
:param message_version: The message version.
:type message_version: str
:param message_release: The message release.
:type message_release: str
:param data_element_separator: Required. The data element separator.
:type data_element_separator: int
:param component_separator: Required. The component separator.
:type component_separator: int
:param segment_terminator: Required. The segment terminator.
:type segment_terminator: int
:param repetition_separator: Required. The repetition separator.
:type repetition_separator: int
:param segment_terminator_suffix: Required. The segment terminator suffix. Possible values
include: "NotSpecified", "None", "CR", "LF", "CRLF".
:type segment_terminator_suffix: str or ~azure.mgmt.logic.models.SegmentTerminatorSuffix
:param decimal_point_indicator: Required. The decimal point indicator. Possible values include:
"NotSpecified", "Comma", "Decimal".
:type decimal_point_indicator: str or ~azure.mgmt.logic.models.EdifactDecimalIndicator
:param release_indicator: Required. The release indicator.
:type release_indicator: int
:param message_association_assigned_code: The message association assigned code.
:type message_association_assigned_code: str
:param target_namespace: The target namespace on which this delimiter settings has to be
applied.
:type target_namespace: str
"""
_validation = {
'data_element_separator': {'required': True},
'component_separator': {'required': True},
'segment_terminator': {'required': True},
'repetition_separator': {'required': True},
'segment_terminator_suffix': {'required': True},
'decimal_point_indicator': {'required': True},
'release_indicator': {'required': True},
}
_attribute_map = {
'message_id': {'key': 'messageId', 'type': 'str'},
'message_version': {'key': 'messageVersion', 'type': 'str'},
'message_release': {'key': 'messageRelease', 'type': 'str'},
'data_element_separator': {'key': 'dataElementSeparator', 'type': 'int'},
'component_separator': {'key': 'componentSeparator', 'type': 'int'},
'segment_terminator': {'key': 'segmentTerminator', 'type': 'int'},
'repetition_separator': {'key': 'repetitionSeparator', 'type': 'int'},
'segment_terminator_suffix': {'key': 'segmentTerminatorSuffix', 'type': 'str'},
'decimal_point_indicator': {'key': 'decimalPointIndicator', 'type': 'str'},
'release_indicator': {'key': 'releaseIndicator', 'type': 'int'},
'message_association_assigned_code': {'key': 'messageAssociationAssignedCode', 'type': 'str'},
'target_namespace': {'key': 'targetNamespace', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EdifactDelimiterOverride, self).__init__(**kwargs)
self.message_id = kwargs.get('message_id', None)
self.message_version = kwargs.get('message_version', None)
self.message_release = kwargs.get('message_release', None)
self.data_element_separator = kwargs['data_element_separator']
self.component_separator = kwargs['component_separator']
self.segment_terminator = kwargs['segment_terminator']
self.repetition_separator = kwargs['repetition_separator']
self.segment_terminator_suffix = kwargs['segment_terminator_suffix']
self.decimal_point_indicator = kwargs['decimal_point_indicator']
self.release_indicator = kwargs['release_indicator']
self.message_association_assigned_code = kwargs.get('message_association_assigned_code', None)
self.target_namespace = kwargs.get('target_namespace', None)
class EdifactEnvelopeOverride(msrest.serialization.Model):
"""The Edifact envelope override settings.
:param message_id: The message id on which this envelope settings has to be applied.
:type message_id: str
:param message_version: The message version on which this envelope settings has to be applied.
:type message_version: str
:param message_release: The message release version on which this envelope settings has to be
applied.
:type message_release: str
:param message_association_assigned_code: The message association assigned code.
:type message_association_assigned_code: str
:param target_namespace: The target namespace on which this envelope settings has to be
applied.
:type target_namespace: str
:param functional_group_id: The functional group id.
:type functional_group_id: str
:param sender_application_qualifier: The sender application qualifier.
:type sender_application_qualifier: str
:param sender_application_id: The sender application id.
:type sender_application_id: str
:param receiver_application_qualifier: The receiver application qualifier.
:type receiver_application_qualifier: str
:param receiver_application_id: The receiver application id.
:type receiver_application_id: str
:param controlling_agency_code: The controlling agency code.
:type controlling_agency_code: str
:param group_header_message_version: The group header message version.
:type group_header_message_version: str
:param group_header_message_release: The group header message release.
:type group_header_message_release: str
:param association_assigned_code: The association assigned code.
:type association_assigned_code: str
:param application_password: The application password.
:type application_password: str
"""
_attribute_map = {
'message_id': {'key': 'messageId', 'type': 'str'},
'message_version': {'key': 'messageVersion', 'type': 'str'},
'message_release': {'key': 'messageRelease', 'type': 'str'},
'message_association_assigned_code': {'key': 'messageAssociationAssignedCode', 'type': 'str'},
'target_namespace': {'key': 'targetNamespace', 'type': 'str'},
'functional_group_id': {'key': 'functionalGroupId', 'type': 'str'},
'sender_application_qualifier': {'key': 'senderApplicationQualifier', 'type': 'str'},
'sender_application_id': {'key': 'senderApplicationId', 'type': 'str'},
'receiver_application_qualifier': {'key': 'receiverApplicationQualifier', 'type': 'str'},
'receiver_application_id': {'key': 'receiverApplicationId', 'type': 'str'},
'controlling_agency_code': {'key': 'controllingAgencyCode', 'type': 'str'},
'group_header_message_version': {'key': 'groupHeaderMessageVersion', 'type': 'str'},
'group_header_message_release': {'key': 'groupHeaderMessageRelease', 'type': 'str'},
'association_assigned_code': {'key': 'associationAssignedCode', 'type': 'str'},
'application_password': {'key': 'applicationPassword', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EdifactEnvelopeOverride, self).__init__(**kwargs)
self.message_id = kwargs.get('message_id', None)
self.message_version = kwargs.get('message_version', None)
self.message_release = kwargs.get('message_release', None)
self.message_association_assigned_code = kwargs.get('message_association_assigned_code', None)
self.target_namespace = kwargs.get('target_namespace', None)
self.functional_group_id = kwargs.get('functional_group_id', None)
self.sender_application_qualifier = kwargs.get('sender_application_qualifier', None)
self.sender_application_id = kwargs.get('sender_application_id', None)
self.receiver_application_qualifier = kwargs.get('receiver_application_qualifier', None)
self.receiver_application_id = kwargs.get('receiver_application_id', None)
self.controlling_agency_code = kwargs.get('controlling_agency_code', None)
self.group_header_message_version = kwargs.get('group_header_message_version', None)
self.group_header_message_release = kwargs.get('group_header_message_release', None)
self.association_assigned_code = kwargs.get('association_assigned_code', None)
self.application_password = kwargs.get('application_password', None)
class EdifactEnvelopeSettings(msrest.serialization.Model):
"""The Edifact agreement envelope settings.
All required parameters must be populated in order to send to Azure.
:param group_association_assigned_code: The group association assigned code.
:type group_association_assigned_code: str
:param communication_agreement_id: The communication agreement id.
:type communication_agreement_id: str
:param apply_delimiter_string_advice: Required. The value indicating whether to apply delimiter
string advice.
:type apply_delimiter_string_advice: bool
:param create_grouping_segments: Required. The value indicating whether to create grouping
segments.
:type create_grouping_segments: bool
:param enable_default_group_headers: Required. The value indicating whether to enable default
group headers.
:type enable_default_group_headers: bool
:param recipient_reference_password_value: The recipient reference password value.
:type recipient_reference_password_value: str
:param recipient_reference_password_qualifier: The recipient reference password qualifier.
:type recipient_reference_password_qualifier: str
:param application_reference_id: The application reference id.
:type application_reference_id: str
:param processing_priority_code: The processing priority code.
:type processing_priority_code: str
:param interchange_control_number_lower_bound: Required. The interchange control number lower
bound.
:type interchange_control_number_lower_bound: long
:param interchange_control_number_upper_bound: Required. The interchange control number upper
bound.
:type interchange_control_number_upper_bound: long
:param rollover_interchange_control_number: Required. The value indicating whether to rollover
interchange control number.
:type rollover_interchange_control_number: bool
:param interchange_control_number_prefix: The interchange control number prefix.
:type interchange_control_number_prefix: str
:param interchange_control_number_suffix: The interchange control number suffix.
:type interchange_control_number_suffix: str
:param sender_reverse_routing_address: The sender reverse routing address.
:type sender_reverse_routing_address: str
:param receiver_reverse_routing_address: The receiver reverse routing address.
:type receiver_reverse_routing_address: str
:param functional_group_id: The functional group id.
:type functional_group_id: str
:param group_controlling_agency_code: The group controlling agency code.
:type group_controlling_agency_code: str
:param group_message_version: The group message version.
:type group_message_version: str
:param group_message_release: The group message release.
:type group_message_release: str
:param group_control_number_lower_bound: Required. The group control number lower bound.
:type group_control_number_lower_bound: long
:param group_control_number_upper_bound: Required. The group control number upper bound.
:type group_control_number_upper_bound: long
:param rollover_group_control_number: Required. The value indicating whether to rollover group
control number.
:type rollover_group_control_number: bool
:param group_control_number_prefix: The group control number prefix.
:type group_control_number_prefix: str
:param group_control_number_suffix: The group control number suffix.
:type group_control_number_suffix: str
:param group_application_receiver_qualifier: The group application receiver qualifier.
:type group_application_receiver_qualifier: str
:param group_application_receiver_id: The group application receiver id.
:type group_application_receiver_id: str
:param group_application_sender_qualifier: The group application sender qualifier.
:type group_application_sender_qualifier: str
:param group_application_sender_id: The group application sender id.
:type group_application_sender_id: str
:param group_application_password: The group application password.
:type group_application_password: str
:param overwrite_existing_transaction_set_control_number: Required. The value indicating
whether to overwrite existing transaction set control number.
:type overwrite_existing_transaction_set_control_number: bool
:param transaction_set_control_number_prefix: The transaction set control number prefix.
:type transaction_set_control_number_prefix: str
:param transaction_set_control_number_suffix: The transaction set control number suffix.
:type transaction_set_control_number_suffix: str
:param transaction_set_control_number_lower_bound: Required. The transaction set control number
lower bound.
:type transaction_set_control_number_lower_bound: long
:param transaction_set_control_number_upper_bound: Required. The transaction set control number
upper bound.
:type transaction_set_control_number_upper_bound: long
:param rollover_transaction_set_control_number: Required. The value indicating whether to
rollover transaction set control number.
:type rollover_transaction_set_control_number: bool
:param is_test_interchange: Required. The value indicating whether the message is a test
interchange.
:type is_test_interchange: bool
:param sender_internal_identification: The sender internal identification.
:type sender_internal_identification: str
:param sender_internal_sub_identification: The sender internal sub identification.
:type sender_internal_sub_identification: str
:param receiver_internal_identification: The receiver internal identification.
:type receiver_internal_identification: str
:param receiver_internal_sub_identification: The receiver internal sub identification.
:type receiver_internal_sub_identification: str
"""
_validation = {
'apply_delimiter_string_advice': {'required': True},
'create_grouping_segments': {'required': True},
'enable_default_group_headers': {'required': True},
'interchange_control_number_lower_bound': {'required': True},
'interchange_control_number_upper_bound': {'required': True},
'rollover_interchange_control_number': {'required': True},
'group_control_number_lower_bound': {'required': True},
'group_control_number_upper_bound': {'required': True},
'rollover_group_control_number': {'required': True},
'overwrite_existing_transaction_set_control_number': {'required': True},
'transaction_set_control_number_lower_bound': {'required': True},
'transaction_set_control_number_upper_bound': {'required': True},
'rollover_transaction_set_control_number': {'required': True},
'is_test_interchange': {'required': True},
}
_attribute_map = {
'group_association_assigned_code': {'key': 'groupAssociationAssignedCode', 'type': 'str'},
'communication_agreement_id': {'key': 'communicationAgreementId', 'type': 'str'},
'apply_delimiter_string_advice': {'key': 'applyDelimiterStringAdvice', 'type': 'bool'},
'create_grouping_segments': {'key': 'createGroupingSegments', 'type': 'bool'},
'enable_default_group_headers': {'key': 'enableDefaultGroupHeaders', 'type': 'bool'},
'recipient_reference_password_value': {'key': 'recipientReferencePasswordValue', 'type': 'str'},
'recipient_reference_password_qualifier': {'key': 'recipientReferencePasswordQualifier', 'type': 'str'},
'application_reference_id': {'key': 'applicationReferenceId', 'type': 'str'},
'processing_priority_code': {'key': 'processingPriorityCode', 'type': 'str'},
'interchange_control_number_lower_bound': {'key': 'interchangeControlNumberLowerBound', 'type': 'long'},
'interchange_control_number_upper_bound': {'key': 'interchangeControlNumberUpperBound', 'type': 'long'},
'rollover_interchange_control_number': {'key': 'rolloverInterchangeControlNumber', 'type': 'bool'},
'interchange_control_number_prefix': {'key': 'interchangeControlNumberPrefix', 'type': 'str'},
'interchange_control_number_suffix': {'key': 'interchangeControlNumberSuffix', 'type': 'str'},
'sender_reverse_routing_address': {'key': 'senderReverseRoutingAddress', 'type': 'str'},
'receiver_reverse_routing_address': {'key': 'receiverReverseRoutingAddress', 'type': 'str'},
'functional_group_id': {'key': 'functionalGroupId', 'type': 'str'},
'group_controlling_agency_code': {'key': 'groupControllingAgencyCode', 'type': 'str'},
'group_message_version': {'key': 'groupMessageVersion', 'type': 'str'},
'group_message_release': {'key': 'groupMessageRelease', 'type': 'str'},
'group_control_number_lower_bound': {'key': 'groupControlNumberLowerBound', 'type': 'long'},
'group_control_number_upper_bound': {'key': 'groupControlNumberUpperBound', 'type': 'long'},
'rollover_group_control_number': {'key': 'rolloverGroupControlNumber', 'type': 'bool'},
'group_control_number_prefix': {'key': 'groupControlNumberPrefix', 'type': 'str'},
'group_control_number_suffix': {'key': 'groupControlNumberSuffix', 'type': 'str'},
'group_application_receiver_qualifier': {'key': 'groupApplicationReceiverQualifier', 'type': 'str'},
'group_application_receiver_id': {'key': 'groupApplicationReceiverId', 'type': 'str'},
'group_application_sender_qualifier': {'key': 'groupApplicationSenderQualifier', 'type': 'str'},
'group_application_sender_id': {'key': 'groupApplicationSenderId', 'type': 'str'},
'group_application_password': {'key': 'groupApplicationPassword', 'type': 'str'},
'overwrite_existing_transaction_set_control_number': {'key': 'overwriteExistingTransactionSetControlNumber', 'type': 'bool'},
'transaction_set_control_number_prefix': {'key': 'transactionSetControlNumberPrefix', 'type': 'str'},
'transaction_set_control_number_suffix': {'key': 'transactionSetControlNumberSuffix', 'type': 'str'},
'transaction_set_control_number_lower_bound': {'key': 'transactionSetControlNumberLowerBound', 'type': 'long'},
'transaction_set_control_number_upper_bound': {'key': 'transactionSetControlNumberUpperBound', 'type': 'long'},
'rollover_transaction_set_control_number': {'key': 'rolloverTransactionSetControlNumber', 'type': 'bool'},
'is_test_interchange': {'key': 'isTestInterchange', 'type': 'bool'},
'sender_internal_identification': {'key': 'senderInternalIdentification', 'type': 'str'},
'sender_internal_sub_identification': {'key': 'senderInternalSubIdentification', 'type': 'str'},
'receiver_internal_identification': {'key': 'receiverInternalIdentification', 'type': 'str'},
'receiver_internal_sub_identification': {'key': 'receiverInternalSubIdentification', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EdifactEnvelopeSettings, self).__init__(**kwargs)
self.group_association_assigned_code = kwargs.get('group_association_assigned_code', None)
self.communication_agreement_id = kwargs.get('communication_agreement_id', None)
self.apply_delimiter_string_advice = kwargs['apply_delimiter_string_advice']
self.create_grouping_segments = kwargs['create_grouping_segments']
self.enable_default_group_headers = kwargs['enable_default_group_headers']
self.recipient_reference_password_value = kwargs.get('recipient_reference_password_value', None)
self.recipient_reference_password_qualifier = kwargs.get('recipient_reference_password_qualifier', None)
self.application_reference_id = kwargs.get('application_reference_id', None)
self.processing_priority_code = kwargs.get('processing_priority_code', None)
self.interchange_control_number_lower_bound = kwargs['interchange_control_number_lower_bound']
self.interchange_control_number_upper_bound = kwargs['interchange_control_number_upper_bound']
self.rollover_interchange_control_number = kwargs['rollover_interchange_control_number']
self.interchange_control_number_prefix = kwargs.get('interchange_control_number_prefix', None)
self.interchange_control_number_suffix = kwargs.get('interchange_control_number_suffix', None)
self.sender_reverse_routing_address = kwargs.get('sender_reverse_routing_address', None)
self.receiver_reverse_routing_address = kwargs.get('receiver_reverse_routing_address', None)
self.functional_group_id = kwargs.get('functional_group_id', None)
self.group_controlling_agency_code = kwargs.get('group_controlling_agency_code', None)
self.group_message_version = kwargs.get('group_message_version', None)
self.group_message_release = kwargs.get('group_message_release', None)
self.group_control_number_lower_bound = kwargs['group_control_number_lower_bound']
self.group_control_number_upper_bound = kwargs['group_control_number_upper_bound']
self.rollover_group_control_number = kwargs['rollover_group_control_number']
self.group_control_number_prefix = kwargs.get('group_control_number_prefix', None)
self.group_control_number_suffix = kwargs.get('group_control_number_suffix', None)
self.group_application_receiver_qualifier = kwargs.get('group_application_receiver_qualifier', None)
self.group_application_receiver_id = kwargs.get('group_application_receiver_id', None)
self.group_application_sender_qualifier = kwargs.get('group_application_sender_qualifier', None)
self.group_application_sender_id = kwargs.get('group_application_sender_id', None)
self.group_application_password = kwargs.get('group_application_password', None)
self.overwrite_existing_transaction_set_control_number = kwargs['overwrite_existing_transaction_set_control_number']
self.transaction_set_control_number_prefix = kwargs.get('transaction_set_control_number_prefix', None)
self.transaction_set_control_number_suffix = kwargs.get('transaction_set_control_number_suffix', None)
self.transaction_set_control_number_lower_bound = kwargs['transaction_set_control_number_lower_bound']
self.transaction_set_control_number_upper_bound = kwargs['transaction_set_control_number_upper_bound']
self.rollover_transaction_set_control_number = kwargs['rollover_transaction_set_control_number']
self.is_test_interchange = kwargs['is_test_interchange']
self.sender_internal_identification = kwargs.get('sender_internal_identification', None)
self.sender_internal_sub_identification = kwargs.get('sender_internal_sub_identification', None)
self.receiver_internal_identification = kwargs.get('receiver_internal_identification', None)
self.receiver_internal_sub_identification = kwargs.get('receiver_internal_sub_identification', None)
class EdifactFramingSettings(msrest.serialization.Model):
"""The Edifact agreement framing settings.
All required parameters must be populated in order to send to Azure.
:param service_code_list_directory_version: The service code list directory version.
:type service_code_list_directory_version: str
:param character_encoding: The character encoding.
:type character_encoding: str
:param protocol_version: Required. The protocol version.
:type protocol_version: int
:param data_element_separator: Required. The data element separator.
:type data_element_separator: int
:param component_separator: Required. The component separator.
:type component_separator: int
:param segment_terminator: Required. The segment terminator.
:type segment_terminator: int
:param release_indicator: Required. The release indicator.
:type release_indicator: int
:param repetition_separator: Required. The repetition separator.
:type repetition_separator: int
:param character_set: Required. The EDIFACT frame setting characterSet. Possible values
include: "NotSpecified", "UNOB", "UNOA", "UNOC", "UNOD", "UNOE", "UNOF", "UNOG", "UNOH",
"UNOI", "UNOJ", "UNOK", "UNOX", "UNOY", "KECA".
:type character_set: str or ~azure.mgmt.logic.models.EdifactCharacterSet
:param decimal_point_indicator: Required. The EDIFACT frame setting decimal indicator. Possible
values include: "NotSpecified", "Comma", "Decimal".
:type decimal_point_indicator: str or ~azure.mgmt.logic.models.EdifactDecimalIndicator
:param segment_terminator_suffix: Required. The EDIFACT frame setting segment terminator
suffix. Possible values include: "NotSpecified", "None", "CR", "LF", "CRLF".
:type segment_terminator_suffix: str or ~azure.mgmt.logic.models.SegmentTerminatorSuffix
"""
_validation = {
'protocol_version': {'required': True},
'data_element_separator': {'required': True},
'component_separator': {'required': True},
'segment_terminator': {'required': True},
'release_indicator': {'required': True},
'repetition_separator': {'required': True},
'character_set': {'required': True},
'decimal_point_indicator': {'required': True},
'segment_terminator_suffix': {'required': True},
}
_attribute_map = {
'service_code_list_directory_version': {'key': 'serviceCodeListDirectoryVersion', 'type': 'str'},
'character_encoding': {'key': 'characterEncoding', 'type': 'str'},
'protocol_version': {'key': 'protocolVersion', 'type': 'int'},
'data_element_separator': {'key': 'dataElementSeparator', 'type': 'int'},
'component_separator': {'key': 'componentSeparator', 'type': 'int'},
'segment_terminator': {'key': 'segmentTerminator', 'type': 'int'},
'release_indicator': {'key': 'releaseIndicator', 'type': 'int'},
'repetition_separator': {'key': 'repetitionSeparator', 'type': 'int'},
'character_set': {'key': 'characterSet', 'type': 'str'},
'decimal_point_indicator': {'key': 'decimalPointIndicator', 'type': 'str'},
'segment_terminator_suffix': {'key': 'segmentTerminatorSuffix', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EdifactFramingSettings, self).__init__(**kwargs)
self.service_code_list_directory_version = kwargs.get('service_code_list_directory_version', None)
self.character_encoding = kwargs.get('character_encoding', None)
self.protocol_version = kwargs['protocol_version']
self.data_element_separator = kwargs['data_element_separator']
self.component_separator = kwargs['component_separator']
self.segment_terminator = kwargs['segment_terminator']
self.release_indicator = kwargs['release_indicator']
self.repetition_separator = kwargs['repetition_separator']
self.character_set = kwargs['character_set']
self.decimal_point_indicator = kwargs['decimal_point_indicator']
self.segment_terminator_suffix = kwargs['segment_terminator_suffix']
class EdifactMessageFilter(msrest.serialization.Model):
"""The Edifact message filter for odata query.
All required parameters must be populated in order to send to Azure.
:param message_filter_type: Required. The message filter type. Possible values include:
"NotSpecified", "Include", "Exclude".
:type message_filter_type: str or ~azure.mgmt.logic.models.MessageFilterType
"""
_validation = {
'message_filter_type': {'required': True},
}
_attribute_map = {
'message_filter_type': {'key': 'messageFilterType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EdifactMessageFilter, self).__init__(**kwargs)
self.message_filter_type = kwargs['message_filter_type']
class EdifactMessageIdentifier(msrest.serialization.Model):
"""The Edifact message identifier.
All required parameters must be populated in order to send to Azure.
:param message_id: Required. The message id on which this envelope settings has to be applied.
:type message_id: str
"""
_validation = {
'message_id': {'required': True},
}
_attribute_map = {
'message_id': {'key': 'messageId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EdifactMessageIdentifier, self).__init__(**kwargs)
self.message_id = kwargs['message_id']
class EdifactOneWayAgreement(msrest.serialization.Model):
"""The Edifact one way agreement.
All required parameters must be populated in order to send to Azure.
:param sender_business_identity: Required. The sender business identity.
:type sender_business_identity: ~azure.mgmt.logic.models.BusinessIdentity
:param receiver_business_identity: Required. The receiver business identity.
:type receiver_business_identity: ~azure.mgmt.logic.models.BusinessIdentity
:param protocol_settings: Required. The EDIFACT protocol settings.
:type protocol_settings: ~azure.mgmt.logic.models.EdifactProtocolSettings
"""
_validation = {
'sender_business_identity': {'required': True},
'receiver_business_identity': {'required': True},
'protocol_settings': {'required': True},
}
_attribute_map = {
'sender_business_identity': {'key': 'senderBusinessIdentity', 'type': 'BusinessIdentity'},
'receiver_business_identity': {'key': 'receiverBusinessIdentity', 'type': 'BusinessIdentity'},
'protocol_settings': {'key': 'protocolSettings', 'type': 'EdifactProtocolSettings'},
}
def __init__(
self,
**kwargs
):
super(EdifactOneWayAgreement, self).__init__(**kwargs)
self.sender_business_identity = kwargs['sender_business_identity']
self.receiver_business_identity = kwargs['receiver_business_identity']
self.protocol_settings = kwargs['protocol_settings']
class EdifactProcessingSettings(msrest.serialization.Model):
"""The Edifact agreement protocol settings.
All required parameters must be populated in order to send to Azure.
:param mask_security_info: Required. The value indicating whether to mask security information.
:type mask_security_info: bool
:param preserve_interchange: Required. The value indicating whether to preserve interchange.
:type preserve_interchange: bool
:param suspend_interchange_on_error: Required. The value indicating whether to suspend
interchange on error.
:type suspend_interchange_on_error: bool
:param create_empty_xml_tags_for_trailing_separators: Required. The value indicating whether to
create empty xml tags for trailing separators.
:type create_empty_xml_tags_for_trailing_separators: bool
:param use_dot_as_decimal_separator: Required. The value indicating whether to use dot as
decimal separator.
:type use_dot_as_decimal_separator: bool
"""
_validation = {
'mask_security_info': {'required': True},
'preserve_interchange': {'required': True},
'suspend_interchange_on_error': {'required': True},
'create_empty_xml_tags_for_trailing_separators': {'required': True},
'use_dot_as_decimal_separator': {'required': True},
}
_attribute_map = {
'mask_security_info': {'key': 'maskSecurityInfo', 'type': 'bool'},
'preserve_interchange': {'key': 'preserveInterchange', 'type': 'bool'},
'suspend_interchange_on_error': {'key': 'suspendInterchangeOnError', 'type': 'bool'},
'create_empty_xml_tags_for_trailing_separators': {'key': 'createEmptyXmlTagsForTrailingSeparators', 'type': 'bool'},
'use_dot_as_decimal_separator': {'key': 'useDotAsDecimalSeparator', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(EdifactProcessingSettings, self).__init__(**kwargs)
self.mask_security_info = kwargs['mask_security_info']
self.preserve_interchange = kwargs['preserve_interchange']
self.suspend_interchange_on_error = kwargs['suspend_interchange_on_error']
self.create_empty_xml_tags_for_trailing_separators = kwargs['create_empty_xml_tags_for_trailing_separators']
self.use_dot_as_decimal_separator = kwargs['use_dot_as_decimal_separator']
class EdifactProtocolSettings(msrest.serialization.Model):
"""The Edifact agreement protocol settings.
All required parameters must be populated in order to send to Azure.
:param validation_settings: Required. The EDIFACT validation settings.
:type validation_settings: ~azure.mgmt.logic.models.EdifactValidationSettings
:param framing_settings: Required. The EDIFACT framing settings.
:type framing_settings: ~azure.mgmt.logic.models.EdifactFramingSettings
:param envelope_settings: Required. The EDIFACT envelope settings.
:type envelope_settings: ~azure.mgmt.logic.models.EdifactEnvelopeSettings
:param acknowledgement_settings: Required. The EDIFACT acknowledgement settings.
:type acknowledgement_settings: ~azure.mgmt.logic.models.EdifactAcknowledgementSettings
:param message_filter: Required. The EDIFACT message filter.
:type message_filter: ~azure.mgmt.logic.models.EdifactMessageFilter
:param processing_settings: Required. The EDIFACT processing Settings.
:type processing_settings: ~azure.mgmt.logic.models.EdifactProcessingSettings
:param envelope_overrides: The EDIFACT envelope override settings.
:type envelope_overrides: list[~azure.mgmt.logic.models.EdifactEnvelopeOverride]
:param message_filter_list: The EDIFACT message filter list.
:type message_filter_list: list[~azure.mgmt.logic.models.EdifactMessageIdentifier]
:param schema_references: Required. The EDIFACT schema references.
:type schema_references: list[~azure.mgmt.logic.models.EdifactSchemaReference]
:param validation_overrides: The EDIFACT validation override settings.
:type validation_overrides: list[~azure.mgmt.logic.models.EdifactValidationOverride]
:param edifact_delimiter_overrides: The EDIFACT delimiter override settings.
:type edifact_delimiter_overrides: list[~azure.mgmt.logic.models.EdifactDelimiterOverride]
"""
_validation = {
'validation_settings': {'required': True},
'framing_settings': {'required': True},
'envelope_settings': {'required': True},
'acknowledgement_settings': {'required': True},
'message_filter': {'required': True},
'processing_settings': {'required': True},
'schema_references': {'required': True},
}
_attribute_map = {
'validation_settings': {'key': 'validationSettings', 'type': 'EdifactValidationSettings'},
'framing_settings': {'key': 'framingSettings', 'type': 'EdifactFramingSettings'},
'envelope_settings': {'key': 'envelopeSettings', 'type': 'EdifactEnvelopeSettings'},
'acknowledgement_settings': {'key': 'acknowledgementSettings', 'type': 'EdifactAcknowledgementSettings'},
'message_filter': {'key': 'messageFilter', 'type': 'EdifactMessageFilter'},
'processing_settings': {'key': 'processingSettings', 'type': 'EdifactProcessingSettings'},
'envelope_overrides': {'key': 'envelopeOverrides', 'type': '[EdifactEnvelopeOverride]'},
'message_filter_list': {'key': 'messageFilterList', 'type': '[EdifactMessageIdentifier]'},
'schema_references': {'key': 'schemaReferences', 'type': '[EdifactSchemaReference]'},
'validation_overrides': {'key': 'validationOverrides', 'type': '[EdifactValidationOverride]'},
'edifact_delimiter_overrides': {'key': 'edifactDelimiterOverrides', 'type': '[EdifactDelimiterOverride]'},
}
def __init__(
self,
**kwargs
):
super(EdifactProtocolSettings, self).__init__(**kwargs)
self.validation_settings = kwargs['validation_settings']
self.framing_settings = kwargs['framing_settings']
self.envelope_settings = kwargs['envelope_settings']
self.acknowledgement_settings = kwargs['acknowledgement_settings']
self.message_filter = kwargs['message_filter']
self.processing_settings = kwargs['processing_settings']
self.envelope_overrides = kwargs.get('envelope_overrides', None)
self.message_filter_list = kwargs.get('message_filter_list', None)
self.schema_references = kwargs['schema_references']
self.validation_overrides = kwargs.get('validation_overrides', None)
self.edifact_delimiter_overrides = kwargs.get('edifact_delimiter_overrides', None)
class EdifactSchemaReference(msrest.serialization.Model):
"""The Edifact schema reference.
All required parameters must be populated in order to send to Azure.
:param message_id: Required. The message id.
:type message_id: str
:param message_version: Required. The message version.
:type message_version: str
:param message_release: Required. The message release version.
:type message_release: str
:param sender_application_id: The sender application id.
:type sender_application_id: str
:param sender_application_qualifier: The sender application qualifier.
:type sender_application_qualifier: str
:param association_assigned_code: The association assigned code.
:type association_assigned_code: str
:param schema_name: Required. The schema name.
:type schema_name: str
"""
_validation = {
'message_id': {'required': True},
'message_version': {'required': True},
'message_release': {'required': True},
'schema_name': {'required': True},
}
_attribute_map = {
'message_id': {'key': 'messageId', 'type': 'str'},
'message_version': {'key': 'messageVersion', 'type': 'str'},
'message_release': {'key': 'messageRelease', 'type': 'str'},
'sender_application_id': {'key': 'senderApplicationId', 'type': 'str'},
'sender_application_qualifier': {'key': 'senderApplicationQualifier', 'type': 'str'},
'association_assigned_code': {'key': 'associationAssignedCode', 'type': 'str'},
'schema_name': {'key': 'schemaName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EdifactSchemaReference, self).__init__(**kwargs)
self.message_id = kwargs['message_id']
self.message_version = kwargs['message_version']
self.message_release = kwargs['message_release']
self.sender_application_id = kwargs.get('sender_application_id', None)
self.sender_application_qualifier = kwargs.get('sender_application_qualifier', None)
self.association_assigned_code = kwargs.get('association_assigned_code', None)
self.schema_name = kwargs['schema_name']
class EdifactValidationOverride(msrest.serialization.Model):
"""The Edifact validation override settings.
All required parameters must be populated in order to send to Azure.
:param message_id: Required. The message id on which the validation settings has to be applied.
:type message_id: str
:param enforce_character_set: Required. The value indicating whether to validate character Set.
:type enforce_character_set: bool
:param validate_edi_types: Required. The value indicating whether to validate EDI types.
:type validate_edi_types: bool
:param validate_xsd_types: Required. The value indicating whether to validate XSD types.
:type validate_xsd_types: bool
:param allow_leading_and_trailing_spaces_and_zeroes: Required. The value indicating whether to
allow leading and trailing spaces and zeroes.
:type allow_leading_and_trailing_spaces_and_zeroes: bool
:param trailing_separator_policy: Required. The trailing separator policy. Possible values
include: "NotSpecified", "NotAllowed", "Optional", "Mandatory".
:type trailing_separator_policy: str or ~azure.mgmt.logic.models.TrailingSeparatorPolicy
:param trim_leading_and_trailing_spaces_and_zeroes: Required. The value indicating whether to
trim leading and trailing spaces and zeroes.
:type trim_leading_and_trailing_spaces_and_zeroes: bool
"""
_validation = {
'message_id': {'required': True},
'enforce_character_set': {'required': True},
'validate_edi_types': {'required': True},
'validate_xsd_types': {'required': True},
'allow_leading_and_trailing_spaces_and_zeroes': {'required': True},
'trailing_separator_policy': {'required': True},
'trim_leading_and_trailing_spaces_and_zeroes': {'required': True},
}
_attribute_map = {
'message_id': {'key': 'messageId', 'type': 'str'},
'enforce_character_set': {'key': 'enforceCharacterSet', 'type': 'bool'},
'validate_edi_types': {'key': 'validateEDITypes', 'type': 'bool'},
'validate_xsd_types': {'key': 'validateXSDTypes', 'type': 'bool'},
'allow_leading_and_trailing_spaces_and_zeroes': {'key': 'allowLeadingAndTrailingSpacesAndZeroes', 'type': 'bool'},
'trailing_separator_policy': {'key': 'trailingSeparatorPolicy', 'type': 'str'},
'trim_leading_and_trailing_spaces_and_zeroes': {'key': 'trimLeadingAndTrailingSpacesAndZeroes', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(EdifactValidationOverride, self).__init__(**kwargs)
self.message_id = kwargs['message_id']
self.enforce_character_set = kwargs['enforce_character_set']
self.validate_edi_types = kwargs['validate_edi_types']
self.validate_xsd_types = kwargs['validate_xsd_types']
self.allow_leading_and_trailing_spaces_and_zeroes = kwargs['allow_leading_and_trailing_spaces_and_zeroes']
self.trailing_separator_policy = kwargs['trailing_separator_policy']
self.trim_leading_and_trailing_spaces_and_zeroes = kwargs['trim_leading_and_trailing_spaces_and_zeroes']
class EdifactValidationSettings(msrest.serialization.Model):
"""The Edifact agreement validation settings.
All required parameters must be populated in order to send to Azure.
:param validate_character_set: Required. The value indicating whether to validate character set
in the message.
:type validate_character_set: bool
:param check_duplicate_interchange_control_number: Required. The value indicating whether to
check for duplicate interchange control number.
:type check_duplicate_interchange_control_number: bool
:param interchange_control_number_validity_days: Required. The validity period of interchange
control number.
:type interchange_control_number_validity_days: int
:param check_duplicate_group_control_number: Required. The value indicating whether to check
for duplicate group control number.
:type check_duplicate_group_control_number: bool
:param check_duplicate_transaction_set_control_number: Required. The value indicating whether
to check for duplicate transaction set control number.
:type check_duplicate_transaction_set_control_number: bool
:param validate_edi_types: Required. The value indicating whether to Whether to validate EDI
types.
:type validate_edi_types: bool
:param validate_xsd_types: Required. The value indicating whether to Whether to validate XSD
types.
:type validate_xsd_types: bool
:param allow_leading_and_trailing_spaces_and_zeroes: Required. The value indicating whether to
allow leading and trailing spaces and zeroes.
:type allow_leading_and_trailing_spaces_and_zeroes: bool
:param trim_leading_and_trailing_spaces_and_zeroes: Required. The value indicating whether to
trim leading and trailing spaces and zeroes.
:type trim_leading_and_trailing_spaces_and_zeroes: bool
:param trailing_separator_policy: Required. The trailing separator policy. Possible values
include: "NotSpecified", "NotAllowed", "Optional", "Mandatory".
:type trailing_separator_policy: str or ~azure.mgmt.logic.models.TrailingSeparatorPolicy
"""
_validation = {
'validate_character_set': {'required': True},
'check_duplicate_interchange_control_number': {'required': True},
'interchange_control_number_validity_days': {'required': True},
'check_duplicate_group_control_number': {'required': True},
'check_duplicate_transaction_set_control_number': {'required': True},
'validate_edi_types': {'required': True},
'validate_xsd_types': {'required': True},
'allow_leading_and_trailing_spaces_and_zeroes': {'required': True},
'trim_leading_and_trailing_spaces_and_zeroes': {'required': True},
'trailing_separator_policy': {'required': True},
}
_attribute_map = {
'validate_character_set': {'key': 'validateCharacterSet', 'type': 'bool'},
'check_duplicate_interchange_control_number': {'key': 'checkDuplicateInterchangeControlNumber', 'type': 'bool'},
'interchange_control_number_validity_days': {'key': 'interchangeControlNumberValidityDays', 'type': 'int'},
'check_duplicate_group_control_number': {'key': 'checkDuplicateGroupControlNumber', 'type': 'bool'},
'check_duplicate_transaction_set_control_number': {'key': 'checkDuplicateTransactionSetControlNumber', 'type': 'bool'},
'validate_edi_types': {'key': 'validateEDITypes', 'type': 'bool'},
'validate_xsd_types': {'key': 'validateXSDTypes', 'type': 'bool'},
'allow_leading_and_trailing_spaces_and_zeroes': {'key': 'allowLeadingAndTrailingSpacesAndZeroes', 'type': 'bool'},
'trim_leading_and_trailing_spaces_and_zeroes': {'key': 'trimLeadingAndTrailingSpacesAndZeroes', 'type': 'bool'},
'trailing_separator_policy': {'key': 'trailingSeparatorPolicy', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EdifactValidationSettings, self).__init__(**kwargs)
self.validate_character_set = kwargs['validate_character_set']
self.check_duplicate_interchange_control_number = kwargs['check_duplicate_interchange_control_number']
self.interchange_control_number_validity_days = kwargs['interchange_control_number_validity_days']
self.check_duplicate_group_control_number = kwargs['check_duplicate_group_control_number']
self.check_duplicate_transaction_set_control_number = kwargs['check_duplicate_transaction_set_control_number']
self.validate_edi_types = kwargs['validate_edi_types']
self.validate_xsd_types = kwargs['validate_xsd_types']
self.allow_leading_and_trailing_spaces_and_zeroes = kwargs['allow_leading_and_trailing_spaces_and_zeroes']
self.trim_leading_and_trailing_spaces_and_zeroes = kwargs['trim_leading_and_trailing_spaces_and_zeroes']
self.trailing_separator_policy = kwargs['trailing_separator_policy']
class ErrorProperties(msrest.serialization.Model):
"""Error properties indicate why the Logic service was not able to process the incoming request. The reason is provided in the error message.
:param code: Error code.
:type code: str
:param message: Error message indicating why the operation failed.
:type message: str
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ErrorProperties, self).__init__(**kwargs)
self.code = kwargs.get('code', None)
self.message = kwargs.get('message', None)
class ErrorResponse(msrest.serialization.Model):
"""Error response indicates Logic service is not able to process the incoming request. The error property contains the error details.
:param error: The error properties.
:type error: ~azure.mgmt.logic.models.ErrorProperties
"""
_attribute_map = {
'error': {'key': 'error', 'type': 'ErrorProperties'},
}
def __init__(
self,
**kwargs
):
super(ErrorResponse, self).__init__(**kwargs)
self.error = kwargs.get('error', None)
class Expression(msrest.serialization.Model):
"""The expression.
:param text: The text.
:type text: str
:param value: Any object.
:type value: object
:param subexpressions: The sub expressions.
:type subexpressions: list[~azure.mgmt.logic.models.Expression]
:param error: The azure resource error info.
:type error: ~azure.mgmt.logic.models.AzureResourceErrorInfo
"""
_attribute_map = {
'text': {'key': 'text', 'type': 'str'},
'value': {'key': 'value', 'type': 'object'},
'subexpressions': {'key': 'subexpressions', 'type': '[Expression]'},
'error': {'key': 'error', 'type': 'AzureResourceErrorInfo'},
}
def __init__(
self,
**kwargs
):
super(Expression, self).__init__(**kwargs)
self.text = kwargs.get('text', None)
self.value = kwargs.get('value', None)
self.subexpressions = kwargs.get('subexpressions', None)
self.error = kwargs.get('error', None)
class ExpressionRoot(Expression):
"""The expression root.
:param text: The text.
:type text: str
:param value: Any object.
:type value: object
:param subexpressions: The sub expressions.
:type subexpressions: list[~azure.mgmt.logic.models.Expression]
:param error: The azure resource error info.
:type error: ~azure.mgmt.logic.models.AzureResourceErrorInfo
:param path: The path.
:type path: str
"""
_attribute_map = {
'text': {'key': 'text', 'type': 'str'},
'value': {'key': 'value', 'type': 'object'},
'subexpressions': {'key': 'subexpressions', 'type': '[Expression]'},
'error': {'key': 'error', 'type': 'AzureResourceErrorInfo'},
'path': {'key': 'path', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ExpressionRoot, self).__init__(**kwargs)
self.path = kwargs.get('path', None)
class ExpressionTraces(msrest.serialization.Model):
"""The expression traces.
:param inputs:
:type inputs: list[~azure.mgmt.logic.models.ExpressionRoot]
"""
_attribute_map = {
'inputs': {'key': 'inputs', 'type': '[ExpressionRoot]'},
}
def __init__(
self,
**kwargs
):
super(ExpressionTraces, self).__init__(**kwargs)
self.inputs = kwargs.get('inputs', None)
class ExtendedErrorInfo(msrest.serialization.Model):
"""The extended error info.
All required parameters must be populated in order to send to Azure.
:param code: Required. The error code. Possible values include: "NotSpecified",
"IntegrationServiceEnvironmentNotFound", "InternalServerError", "InvalidOperationId".
:type code: str or ~azure.mgmt.logic.models.ErrorResponseCode
:param message: Required. The error message.
:type message: str
:param details: The error message details.
:type details: list[~azure.mgmt.logic.models.ExtendedErrorInfo]
:param inner_error: The inner error.
:type inner_error: object
"""
_validation = {
'code': {'required': True},
'message': {'required': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'details': {'key': 'details', 'type': '[ExtendedErrorInfo]'},
'inner_error': {'key': 'innerError', 'type': 'object'},
}
def __init__(
self,
**kwargs
):
super(ExtendedErrorInfo, self).__init__(**kwargs)
self.code = kwargs['code']
self.message = kwargs['message']
self.details = kwargs.get('details', None)
self.inner_error = kwargs.get('inner_error', None)
class FlowAccessControlConfiguration(msrest.serialization.Model):
"""The access control configuration.
:param triggers: The access control configuration for invoking workflow triggers.
:type triggers: ~azure.mgmt.logic.models.FlowAccessControlConfigurationPolicy
:param contents: The access control configuration for accessing workflow run contents.
:type contents: ~azure.mgmt.logic.models.FlowAccessControlConfigurationPolicy
:param actions: The access control configuration for workflow actions.
:type actions: ~azure.mgmt.logic.models.FlowAccessControlConfigurationPolicy
:param workflow_management: The access control configuration for workflow management.
:type workflow_management: ~azure.mgmt.logic.models.FlowAccessControlConfigurationPolicy
"""
_attribute_map = {
'triggers': {'key': 'triggers', 'type': 'FlowAccessControlConfigurationPolicy'},
'contents': {'key': 'contents', 'type': 'FlowAccessControlConfigurationPolicy'},
'actions': {'key': 'actions', 'type': 'FlowAccessControlConfigurationPolicy'},
'workflow_management': {'key': 'workflowManagement', 'type': 'FlowAccessControlConfigurationPolicy'},
}
def __init__(
self,
**kwargs
):
super(FlowAccessControlConfiguration, self).__init__(**kwargs)
self.triggers = kwargs.get('triggers', None)
self.contents = kwargs.get('contents', None)
self.actions = kwargs.get('actions', None)
self.workflow_management = kwargs.get('workflow_management', None)
class FlowAccessControlConfigurationPolicy(msrest.serialization.Model):
"""The access control configuration policy.
:param allowed_caller_ip_addresses: The allowed caller IP address ranges.
:type allowed_caller_ip_addresses: list[~azure.mgmt.logic.models.IpAddressRange]
:param open_authentication_policies: The authentication policies for workflow.
:type open_authentication_policies: ~azure.mgmt.logic.models.OpenAuthenticationAccessPolicies
"""
_attribute_map = {
'allowed_caller_ip_addresses': {'key': 'allowedCallerIpAddresses', 'type': '[IpAddressRange]'},
'open_authentication_policies': {'key': 'openAuthenticationPolicies', 'type': 'OpenAuthenticationAccessPolicies'},
}
def __init__(
self,
**kwargs
):
super(FlowAccessControlConfigurationPolicy, self).__init__(**kwargs)
self.allowed_caller_ip_addresses = kwargs.get('allowed_caller_ip_addresses', None)
self.open_authentication_policies = kwargs.get('open_authentication_policies', None)
class FlowEndpoints(msrest.serialization.Model):
"""The flow endpoints configuration.
:param outgoing_ip_addresses: The outgoing ip address.
:type outgoing_ip_addresses: list[~azure.mgmt.logic.models.IpAddress]
:param access_endpoint_ip_addresses: The access endpoint ip address.
:type access_endpoint_ip_addresses: list[~azure.mgmt.logic.models.IpAddress]
"""
_attribute_map = {
'outgoing_ip_addresses': {'key': 'outgoingIpAddresses', 'type': '[IpAddress]'},
'access_endpoint_ip_addresses': {'key': 'accessEndpointIpAddresses', 'type': '[IpAddress]'},
}
def __init__(
self,
**kwargs
):
super(FlowEndpoints, self).__init__(**kwargs)
self.outgoing_ip_addresses = kwargs.get('outgoing_ip_addresses', None)
self.access_endpoint_ip_addresses = kwargs.get('access_endpoint_ip_addresses', None)
class FlowEndpointsConfiguration(msrest.serialization.Model):
"""The endpoints configuration.
:param workflow: The workflow endpoints.
:type workflow: ~azure.mgmt.logic.models.FlowEndpoints
:param connector: The connector endpoints.
:type connector: ~azure.mgmt.logic.models.FlowEndpoints
"""
_attribute_map = {
'workflow': {'key': 'workflow', 'type': 'FlowEndpoints'},
'connector': {'key': 'connector', 'type': 'FlowEndpoints'},
}
def __init__(
self,
**kwargs
):
super(FlowEndpointsConfiguration, self).__init__(**kwargs)
self.workflow = kwargs.get('workflow', None)
self.connector = kwargs.get('connector', None)
class GenerateUpgradedDefinitionParameters(msrest.serialization.Model):
"""The parameters to generate upgraded definition.
:param target_schema_version: The target schema version.
:type target_schema_version: str
"""
_attribute_map = {
'target_schema_version': {'key': 'targetSchemaVersion', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(GenerateUpgradedDefinitionParameters, self).__init__(**kwargs)
self.target_schema_version = kwargs.get('target_schema_version', None)
class GetCallbackUrlParameters(msrest.serialization.Model):
"""The callback url parameters.
:param not_after: The expiry time.
:type not_after: ~datetime.datetime
:param key_type: The key type. Possible values include: "NotSpecified", "Primary", "Secondary".
:type key_type: str or ~azure.mgmt.logic.models.KeyType
"""
_attribute_map = {
'not_after': {'key': 'notAfter', 'type': 'iso-8601'},
'key_type': {'key': 'keyType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(GetCallbackUrlParameters, self).__init__(**kwargs)
self.not_after = kwargs.get('not_after', None)
self.key_type = kwargs.get('key_type', None)
class IntegrationAccount(Resource):
"""The integration account.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The resource id.
:vartype id: str
:ivar name: Gets the resource name.
:vartype name: str
:ivar type: Gets the resource type.
:vartype type: str
:param location: The resource location.
:type location: str
:param tags: A set of tags. The resource tags.
:type tags: dict[str, str]
:param sku: The sku.
:type sku: ~azure.mgmt.logic.models.IntegrationAccountSku
:param integration_service_environment: The integration service environment.
:type integration_service_environment: ~azure.mgmt.logic.models.IntegrationServiceEnvironment
:param state: The workflow state. Possible values include: "NotSpecified", "Completed",
"Enabled", "Disabled", "Deleted", "Suspended".
:type state: str or ~azure.mgmt.logic.models.WorkflowState
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'IntegrationAccountSku'},
'integration_service_environment': {'key': 'properties.integrationServiceEnvironment', 'type': 'IntegrationServiceEnvironment'},
'state': {'key': 'properties.state', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IntegrationAccount, self).__init__(**kwargs)
self.sku = kwargs.get('sku', None)
self.integration_service_environment = kwargs.get('integration_service_environment', None)
self.state = kwargs.get('state', None)
class IntegrationAccountAgreement(Resource):
"""The integration account agreement.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The resource id.
:vartype id: str
:ivar name: Gets the resource name.
:vartype name: str
:ivar type: Gets the resource type.
:vartype type: str
:param location: The resource location.
:type location: str
:param tags: A set of tags. The resource tags.
:type tags: dict[str, str]
:ivar created_time: The created time.
:vartype created_time: ~datetime.datetime
:ivar changed_time: The changed time.
:vartype changed_time: ~datetime.datetime
:param metadata: The metadata.
:type metadata: object
:param agreement_type: Required. The agreement type. Possible values include: "NotSpecified",
"AS2", "X12", "Edifact".
:type agreement_type: str or ~azure.mgmt.logic.models.AgreementType
:param host_partner: Required. The integration account partner that is set as host partner for
this agreement.
:type host_partner: str
:param guest_partner: Required. The integration account partner that is set as guest partner
for this agreement.
:type guest_partner: str
:param host_identity: Required. The business identity of the host partner.
:type host_identity: ~azure.mgmt.logic.models.BusinessIdentity
:param guest_identity: Required. The business identity of the guest partner.
:type guest_identity: ~azure.mgmt.logic.models.BusinessIdentity
:param content: Required. The agreement content.
:type content: ~azure.mgmt.logic.models.AgreementContent
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'created_time': {'readonly': True},
'changed_time': {'readonly': True},
'agreement_type': {'required': True},
'host_partner': {'required': True},
'guest_partner': {'required': True},
'host_identity': {'required': True},
'guest_identity': {'required': True},
'content': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'created_time': {'key': 'properties.createdTime', 'type': 'iso-8601'},
'changed_time': {'key': 'properties.changedTime', 'type': 'iso-8601'},
'metadata': {'key': 'properties.metadata', 'type': 'object'},
'agreement_type': {'key': 'properties.agreementType', 'type': 'str'},
'host_partner': {'key': 'properties.hostPartner', 'type': 'str'},
'guest_partner': {'key': 'properties.guestPartner', 'type': 'str'},
'host_identity': {'key': 'properties.hostIdentity', 'type': 'BusinessIdentity'},
'guest_identity': {'key': 'properties.guestIdentity', 'type': 'BusinessIdentity'},
'content': {'key': 'properties.content', 'type': 'AgreementContent'},
}
def __init__(
self,
**kwargs
):
super(IntegrationAccountAgreement, self).__init__(**kwargs)
self.created_time = None
self.changed_time = None
self.metadata = kwargs.get('metadata', None)
self.agreement_type = kwargs['agreement_type']
self.host_partner = kwargs['host_partner']
self.guest_partner = kwargs['guest_partner']
self.host_identity = kwargs['host_identity']
self.guest_identity = kwargs['guest_identity']
self.content = kwargs['content']
class IntegrationAccountAgreementFilter(msrest.serialization.Model):
"""The integration account agreement filter for odata query.
All required parameters must be populated in order to send to Azure.
:param agreement_type: Required. The agreement type of integration account agreement. Possible
values include: "NotSpecified", "AS2", "X12", "Edifact".
:type agreement_type: str or ~azure.mgmt.logic.models.AgreementType
"""
_validation = {
'agreement_type': {'required': True},
}
_attribute_map = {
'agreement_type': {'key': 'agreementType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IntegrationAccountAgreementFilter, self).__init__(**kwargs)
self.agreement_type = kwargs['agreement_type']
class IntegrationAccountAgreementListResult(msrest.serialization.Model):
"""The list of integration account agreements.
:param value: The list of integration account agreements.
:type value: list[~azure.mgmt.logic.models.IntegrationAccountAgreement]
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[IntegrationAccountAgreement]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IntegrationAccountAgreementListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class IntegrationAccountCertificate(Resource):
"""The integration account certificate.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The resource id.
:vartype id: str
:ivar name: Gets the resource name.
:vartype name: str
:ivar type: Gets the resource type.
:vartype type: str
:param location: The resource location.
:type location: str
:param tags: A set of tags. The resource tags.
:type tags: dict[str, str]
:ivar created_time: The created time.
:vartype created_time: ~datetime.datetime
:ivar changed_time: The changed time.
:vartype changed_time: ~datetime.datetime
:param metadata: The metadata.
:type metadata: object
:param key: The key details in the key vault.
:type key: ~azure.mgmt.logic.models.KeyVaultKeyReference
:param public_certificate: The public certificate.
:type public_certificate: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'created_time': {'readonly': True},
'changed_time': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'created_time': {'key': 'properties.createdTime', 'type': 'iso-8601'},
'changed_time': {'key': 'properties.changedTime', 'type': 'iso-8601'},
'metadata': {'key': 'properties.metadata', 'type': 'object'},
'key': {'key': 'properties.key', 'type': 'KeyVaultKeyReference'},
'public_certificate': {'key': 'properties.publicCertificate', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IntegrationAccountCertificate, self).__init__(**kwargs)
self.created_time = None
self.changed_time = None
self.metadata = kwargs.get('metadata', None)
self.key = kwargs.get('key', None)
self.public_certificate = kwargs.get('public_certificate', None)
class IntegrationAccountCertificateListResult(msrest.serialization.Model):
"""The list of integration account certificates.
:param value: The list of integration account certificates.
:type value: list[~azure.mgmt.logic.models.IntegrationAccountCertificate]
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[IntegrationAccountCertificate]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IntegrationAccountCertificateListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class IntegrationAccountListResult(msrest.serialization.Model):
"""The list of integration accounts.
:param value: The list of integration accounts.
:type value: list[~azure.mgmt.logic.models.IntegrationAccount]
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[IntegrationAccount]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IntegrationAccountListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class IntegrationAccountMap(Resource):
"""The integration account map.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The resource id.
:vartype id: str
:ivar name: Gets the resource name.
:vartype name: str
:ivar type: Gets the resource type.
:vartype type: str
:param location: The resource location.
:type location: str
:param tags: A set of tags. The resource tags.
:type tags: dict[str, str]
:param map_type: Required. The map type. Possible values include: "NotSpecified", "Xslt",
"Xslt20", "Xslt30", "Liquid".
:type map_type: str or ~azure.mgmt.logic.models.MapType
:param parameters_schema: The parameters schema of integration account map.
:type parameters_schema:
~azure.mgmt.logic.models.IntegrationAccountMapPropertiesParametersSchema
:ivar created_time: The created time.
:vartype created_time: ~datetime.datetime
:ivar changed_time: The changed time.
:vartype changed_time: ~datetime.datetime
:param content: The content.
:type content: str
:param content_type: The content type.
:type content_type: str
:ivar content_link: The content link.
:vartype content_link: ~azure.mgmt.logic.models.ContentLink
:param metadata: The metadata.
:type metadata: object
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'map_type': {'required': True},
'created_time': {'readonly': True},
'changed_time': {'readonly': True},
'content_link': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'map_type': {'key': 'properties.mapType', 'type': 'str'},
'parameters_schema': {'key': 'properties.parametersSchema', 'type': 'IntegrationAccountMapPropertiesParametersSchema'},
'created_time': {'key': 'properties.createdTime', 'type': 'iso-8601'},
'changed_time': {'key': 'properties.changedTime', 'type': 'iso-8601'},
'content': {'key': 'properties.content', 'type': 'str'},
'content_type': {'key': 'properties.contentType', 'type': 'str'},
'content_link': {'key': 'properties.contentLink', 'type': 'ContentLink'},
'metadata': {'key': 'properties.metadata', 'type': 'object'},
}
def __init__(
self,
**kwargs
):
super(IntegrationAccountMap, self).__init__(**kwargs)
self.map_type = kwargs['map_type']
self.parameters_schema = kwargs.get('parameters_schema', None)
self.created_time = None
self.changed_time = None
self.content = kwargs.get('content', None)
self.content_type = kwargs.get('content_type', None)
self.content_link = None
self.metadata = kwargs.get('metadata', None)
class IntegrationAccountMapFilter(msrest.serialization.Model):
"""The integration account map filter for odata query.
All required parameters must be populated in order to send to Azure.
:param map_type: Required. The map type of integration account map. Possible values include:
"NotSpecified", "Xslt", "Xslt20", "Xslt30", "Liquid".
:type map_type: str or ~azure.mgmt.logic.models.MapType
"""
_validation = {
'map_type': {'required': True},
}
_attribute_map = {
'map_type': {'key': 'mapType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IntegrationAccountMapFilter, self).__init__(**kwargs)
self.map_type = kwargs['map_type']
class IntegrationAccountMapListResult(msrest.serialization.Model):
"""The list of integration account maps.
:param value: The list of integration account maps.
:type value: list[~azure.mgmt.logic.models.IntegrationAccountMap]
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[IntegrationAccountMap]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IntegrationAccountMapListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class IntegrationAccountMapPropertiesParametersSchema(msrest.serialization.Model):
"""The parameters schema of integration account map.
:param ref: The reference name.
:type ref: str
"""
_attribute_map = {
'ref': {'key': 'ref', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IntegrationAccountMapPropertiesParametersSchema, self).__init__(**kwargs)
self.ref = kwargs.get('ref', None)
class IntegrationAccountPartner(Resource):
"""The integration account partner.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The resource id.
:vartype id: str
:ivar name: Gets the resource name.
:vartype name: str
:ivar type: Gets the resource type.
:vartype type: str
:param location: The resource location.
:type location: str
:param tags: A set of tags. The resource tags.
:type tags: dict[str, str]
:param partner_type: Required. The partner type. Possible values include: "NotSpecified",
"B2B".
:type partner_type: str or ~azure.mgmt.logic.models.PartnerType
:ivar created_time: The created time.
:vartype created_time: ~datetime.datetime
:ivar changed_time: The changed time.
:vartype changed_time: ~datetime.datetime
:param metadata: The metadata.
:type metadata: object
:param content: Required. The partner content.
:type content: ~azure.mgmt.logic.models.PartnerContent
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'partner_type': {'required': True},
'created_time': {'readonly': True},
'changed_time': {'readonly': True},
'content': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'partner_type': {'key': 'properties.partnerType', 'type': 'str'},
'created_time': {'key': 'properties.createdTime', 'type': 'iso-8601'},
'changed_time': {'key': 'properties.changedTime', 'type': 'iso-8601'},
'metadata': {'key': 'properties.metadata', 'type': 'object'},
'content': {'key': 'properties.content', 'type': 'PartnerContent'},
}
def __init__(
self,
**kwargs
):
super(IntegrationAccountPartner, self).__init__(**kwargs)
self.partner_type = kwargs['partner_type']
self.created_time = None
self.changed_time = None
self.metadata = kwargs.get('metadata', None)
self.content = kwargs['content']
class IntegrationAccountPartnerFilter(msrest.serialization.Model):
"""The integration account partner filter for odata query.
All required parameters must be populated in order to send to Azure.
:param partner_type: Required. The partner type of integration account partner. Possible values
include: "NotSpecified", "B2B".
:type partner_type: str or ~azure.mgmt.logic.models.PartnerType
"""
_validation = {
'partner_type': {'required': True},
}
_attribute_map = {
'partner_type': {'key': 'partnerType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IntegrationAccountPartnerFilter, self).__init__(**kwargs)
self.partner_type = kwargs['partner_type']
class IntegrationAccountPartnerListResult(msrest.serialization.Model):
"""The list of integration account partners.
:param value: The list of integration account partners.
:type value: list[~azure.mgmt.logic.models.IntegrationAccountPartner]
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[IntegrationAccountPartner]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IntegrationAccountPartnerListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class IntegrationAccountSchema(Resource):
"""The integration account schema.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The resource id.
:vartype id: str
:ivar name: Gets the resource name.
:vartype name: str
:ivar type: Gets the resource type.
:vartype type: str
:param location: The resource location.
:type location: str
:param tags: A set of tags. The resource tags.
:type tags: dict[str, str]
:param schema_type: Required. The schema type. Possible values include: "NotSpecified", "Xml".
:type schema_type: str or ~azure.mgmt.logic.models.SchemaType
:param target_namespace: The target namespace of the schema.
:type target_namespace: str
:param document_name: The document name.
:type document_name: str
:param file_name: The file name.
:type file_name: str
:ivar created_time: The created time.
:vartype created_time: ~datetime.datetime
:ivar changed_time: The changed time.
:vartype changed_time: ~datetime.datetime
:param metadata: The metadata.
:type metadata: object
:param content: The content.
:type content: str
:param content_type: The content type.
:type content_type: str
:ivar content_link: The content link.
:vartype content_link: ~azure.mgmt.logic.models.ContentLink
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'schema_type': {'required': True},
'created_time': {'readonly': True},
'changed_time': {'readonly': True},
'content_link': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'schema_type': {'key': 'properties.schemaType', 'type': 'str'},
'target_namespace': {'key': 'properties.targetNamespace', 'type': 'str'},
'document_name': {'key': 'properties.documentName', 'type': 'str'},
'file_name': {'key': 'properties.fileName', 'type': 'str'},
'created_time': {'key': 'properties.createdTime', 'type': 'iso-8601'},
'changed_time': {'key': 'properties.changedTime', 'type': 'iso-8601'},
'metadata': {'key': 'properties.metadata', 'type': 'object'},
'content': {'key': 'properties.content', 'type': 'str'},
'content_type': {'key': 'properties.contentType', 'type': 'str'},
'content_link': {'key': 'properties.contentLink', 'type': 'ContentLink'},
}
def __init__(
self,
**kwargs
):
super(IntegrationAccountSchema, self).__init__(**kwargs)
self.schema_type = kwargs['schema_type']
self.target_namespace = kwargs.get('target_namespace', None)
self.document_name = kwargs.get('document_name', None)
self.file_name = kwargs.get('file_name', None)
self.created_time = None
self.changed_time = None
self.metadata = kwargs.get('metadata', None)
self.content = kwargs.get('content', None)
self.content_type = kwargs.get('content_type', None)
self.content_link = None
class IntegrationAccountSchemaFilter(msrest.serialization.Model):
"""The integration account schema filter for odata query.
All required parameters must be populated in order to send to Azure.
:param schema_type: Required. The schema type of integration account schema. Possible values
include: "NotSpecified", "Xml".
:type schema_type: str or ~azure.mgmt.logic.models.SchemaType
"""
_validation = {
'schema_type': {'required': True},
}
_attribute_map = {
'schema_type': {'key': 'schemaType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IntegrationAccountSchemaFilter, self).__init__(**kwargs)
self.schema_type = kwargs['schema_type']
class IntegrationAccountSchemaListResult(msrest.serialization.Model):
"""The list of integration account schemas.
:param value: The list of integration account schemas.
:type value: list[~azure.mgmt.logic.models.IntegrationAccountSchema]
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[IntegrationAccountSchema]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IntegrationAccountSchemaListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class IntegrationAccountSession(Resource):
"""The integration account session.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The resource id.
:vartype id: str
:ivar name: Gets the resource name.
:vartype name: str
:ivar type: Gets the resource type.
:vartype type: str
:param location: The resource location.
:type location: str
:param tags: A set of tags. The resource tags.
:type tags: dict[str, str]
:ivar created_time: The created time.
:vartype created_time: ~datetime.datetime
:ivar changed_time: The changed time.
:vartype changed_time: ~datetime.datetime
:param content: The session content.
:type content: object
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'created_time': {'readonly': True},
'changed_time': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'created_time': {'key': 'properties.createdTime', 'type': 'iso-8601'},
'changed_time': {'key': 'properties.changedTime', 'type': 'iso-8601'},
'content': {'key': 'properties.content', 'type': 'object'},
}
def __init__(
self,
**kwargs
):
super(IntegrationAccountSession, self).__init__(**kwargs)
self.created_time = None
self.changed_time = None
self.content = kwargs.get('content', None)
class IntegrationAccountSessionFilter(msrest.serialization.Model):
"""The integration account session filter.
All required parameters must be populated in order to send to Azure.
:param changed_time: Required. The changed time of integration account sessions.
:type changed_time: ~datetime.datetime
"""
_validation = {
'changed_time': {'required': True},
}
_attribute_map = {
'changed_time': {'key': 'changedTime', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(IntegrationAccountSessionFilter, self).__init__(**kwargs)
self.changed_time = kwargs['changed_time']
class IntegrationAccountSessionListResult(msrest.serialization.Model):
"""The list of integration account sessions.
:param value: The list of integration account sessions.
:type value: list[~azure.mgmt.logic.models.IntegrationAccountSession]
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[IntegrationAccountSession]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IntegrationAccountSessionListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class IntegrationAccountSku(msrest.serialization.Model):
"""The integration account sku.
All required parameters must be populated in order to send to Azure.
:param name: Required. The sku name. Possible values include: "NotSpecified", "Free", "Basic",
"Standard".
:type name: str or ~azure.mgmt.logic.models.IntegrationAccountSkuName
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IntegrationAccountSku, self).__init__(**kwargs)
self.name = kwargs['name']
class IntegrationServiceEnvironment(Resource):
"""The integration service environment.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The resource id.
:vartype id: str
:ivar name: Gets the resource name.
:vartype name: str
:ivar type: Gets the resource type.
:vartype type: str
:param location: The resource location.
:type location: str
:param tags: A set of tags. The resource tags.
:type tags: dict[str, str]
:param properties: The integration service environment properties.
:type properties: ~azure.mgmt.logic.models.IntegrationServiceEnvironmentProperties
:param sku: The sku.
:type sku: ~azure.mgmt.logic.models.IntegrationServiceEnvironmentSku
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'properties': {'key': 'properties', 'type': 'IntegrationServiceEnvironmentProperties'},
'sku': {'key': 'sku', 'type': 'IntegrationServiceEnvironmentSku'},
}
def __init__(
self,
**kwargs
):
super(IntegrationServiceEnvironment, self).__init__(**kwargs)
self.properties = kwargs.get('properties', None)
self.sku = kwargs.get('sku', None)
class IntegrationServiceEnvironmentAccessEndpoint(msrest.serialization.Model):
"""The integration service environment access endpoint.
:param type: The access endpoint type. Possible values include: "NotSpecified", "External",
"Internal".
:type type: str or ~azure.mgmt.logic.models.IntegrationServiceEnvironmentAccessEndpointType
"""
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IntegrationServiceEnvironmentAccessEndpoint, self).__init__(**kwargs)
self.type = kwargs.get('type', None)
class IntegrationServiceEnvironmentListResult(msrest.serialization.Model):
"""The list of integration service environments.
:param value:
:type value: list[~azure.mgmt.logic.models.IntegrationServiceEnvironment]
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[IntegrationServiceEnvironment]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IntegrationServiceEnvironmentListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class IntegrationServiceEnvironmentNetworkDependency(msrest.serialization.Model):
"""The azure async operation resource.
:param category: The network dependency category type. Possible values include: "NotSpecified",
"AzureStorage", "AzureManagement", "AzureActiveDirectory", "SSLCertificateVerification",
"DiagnosticLogsAndMetrics", "IntegrationServiceEnvironmentConnectors", "RedisCache",
"AccessEndpoints", "RecoveryService", "SQL", "RegionalService".
:type category: str or
~azure.mgmt.logic.models.IntegrationServiceEnvironmentNetworkDependencyCategoryType
:param display_name: The display name.
:type display_name: str
:param endpoints: The endpoints.
:type endpoints: list[~azure.mgmt.logic.models.IntegrationServiceEnvironmentNetworkEndpoint]
"""
_attribute_map = {
'category': {'key': 'category', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'endpoints': {'key': 'endpoints', 'type': '[IntegrationServiceEnvironmentNetworkEndpoint]'},
}
def __init__(
self,
**kwargs
):
super(IntegrationServiceEnvironmentNetworkDependency, self).__init__(**kwargs)
self.category = kwargs.get('category', None)
self.display_name = kwargs.get('display_name', None)
self.endpoints = kwargs.get('endpoints', None)
class IntegrationServiceEnvironmentNetworkDependencyHealth(msrest.serialization.Model):
"""The integration service environment subnet network health.
:param error: The error if any occurred during the operation.
:type error: ~azure.mgmt.logic.models.ExtendedErrorInfo
:param state: The network dependency health state. Possible values include: "NotSpecified",
"Healthy", "Unhealthy", "Unknown".
:type state: str or
~azure.mgmt.logic.models.IntegrationServiceEnvironmentNetworkDependencyHealthState
"""
_attribute_map = {
'error': {'key': 'error', 'type': 'ExtendedErrorInfo'},
'state': {'key': 'state', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IntegrationServiceEnvironmentNetworkDependencyHealth, self).__init__(**kwargs)
self.error = kwargs.get('error', None)
self.state = kwargs.get('state', None)
class IntegrationServiceEnvironmentNetworkEndpoint(msrest.serialization.Model):
"""The network endpoint.
:param accessibility: The accessibility state. Possible values include: "NotSpecified",
"Unknown", "Available", "NotAvailable".
:type accessibility: str or
~azure.mgmt.logic.models.IntegrationServiceEnvironmentNetworkEndPointAccessibilityState
:param domain_name: The domain name.
:type domain_name: str
:param ports: The ports.
:type ports: list[str]
"""
_attribute_map = {
'accessibility': {'key': 'accessibility', 'type': 'str'},
'domain_name': {'key': 'domainName', 'type': 'str'},
'ports': {'key': 'ports', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(IntegrationServiceEnvironmentNetworkEndpoint, self).__init__(**kwargs)
self.accessibility = kwargs.get('accessibility', None)
self.domain_name = kwargs.get('domain_name', None)
self.ports = kwargs.get('ports', None)
class IntegrationServiceEnvironmentProperties(msrest.serialization.Model):
"""The integration service environment properties.
:param provisioning_state: The provisioning state. Possible values include: "NotSpecified",
"Accepted", "Running", "Ready", "Creating", "Created", "Deleting", "Deleted", "Canceled",
"Failed", "Succeeded", "Moving", "Updating", "Registering", "Registered", "Unregistering",
"Unregistered", "Completed", "Renewing", "Pending", "Waiting", "InProgress".
:type provisioning_state: str or ~azure.mgmt.logic.models.WorkflowProvisioningState
:param state: The integration service environment state. Possible values include:
"NotSpecified", "Completed", "Enabled", "Disabled", "Deleted", "Suspended".
:type state: str or ~azure.mgmt.logic.models.WorkflowState
:param integration_service_environment_id: Gets the tracking id.
:type integration_service_environment_id: str
:param endpoints_configuration: The endpoints configuration.
:type endpoints_configuration: ~azure.mgmt.logic.models.FlowEndpointsConfiguration
:param network_configuration: The network configuration.
:type network_configuration: ~azure.mgmt.logic.models.NetworkConfiguration
"""
_attribute_map = {
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'state': {'key': 'state', 'type': 'str'},
'integration_service_environment_id': {'key': 'integrationServiceEnvironmentId', 'type': 'str'},
'endpoints_configuration': {'key': 'endpointsConfiguration', 'type': 'FlowEndpointsConfiguration'},
'network_configuration': {'key': 'networkConfiguration', 'type': 'NetworkConfiguration'},
}
def __init__(
self,
**kwargs
):
super(IntegrationServiceEnvironmentProperties, self).__init__(**kwargs)
self.provisioning_state = kwargs.get('provisioning_state', None)
self.state = kwargs.get('state', None)
self.integration_service_environment_id = kwargs.get('integration_service_environment_id', None)
self.endpoints_configuration = kwargs.get('endpoints_configuration', None)
self.network_configuration = kwargs.get('network_configuration', None)
class IntegrationServiceEnvironmentSku(msrest.serialization.Model):
"""The integration service environment sku.
:param name: The sku name. Possible values include: "NotSpecified", "Premium", "Developer".
:type name: str or ~azure.mgmt.logic.models.IntegrationServiceEnvironmentSkuName
:param capacity: The sku capacity.
:type capacity: int
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'capacity': {'key': 'capacity', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(IntegrationServiceEnvironmentSku, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.capacity = kwargs.get('capacity', None)
class IntegrationServiceEnvironmentSkuCapacity(msrest.serialization.Model):
"""The integration service environment sku capacity.
:param minimum: The minimum capacity.
:type minimum: int
:param maximum: The maximum capacity.
:type maximum: int
:param default: The default capacity.
:type default: int
:param scale_type: The sku scale type. Possible values include: "Manual", "Automatic", "None".
:type scale_type: str or ~azure.mgmt.logic.models.IntegrationServiceEnvironmentSkuScaleType
"""
_attribute_map = {
'minimum': {'key': 'minimum', 'type': 'int'},
'maximum': {'key': 'maximum', 'type': 'int'},
'default': {'key': 'default', 'type': 'int'},
'scale_type': {'key': 'scaleType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IntegrationServiceEnvironmentSkuCapacity, self).__init__(**kwargs)
self.minimum = kwargs.get('minimum', None)
self.maximum = kwargs.get('maximum', None)
self.default = kwargs.get('default', None)
self.scale_type = kwargs.get('scale_type', None)
class IntegrationServiceEnvironmentSkuDefinition(msrest.serialization.Model):
"""The integration service environment sku definition.
:param resource_type: The resource type.
:type resource_type: str
:param sku: The sku.
:type sku: ~azure.mgmt.logic.models.IntegrationServiceEnvironmentSkuDefinitionSku
:param capacity: The sku capacity.
:type capacity: ~azure.mgmt.logic.models.IntegrationServiceEnvironmentSkuCapacity
"""
_attribute_map = {
'resource_type': {'key': 'resourceType', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'IntegrationServiceEnvironmentSkuDefinitionSku'},
'capacity': {'key': 'capacity', 'type': 'IntegrationServiceEnvironmentSkuCapacity'},
}
def __init__(
self,
**kwargs
):
super(IntegrationServiceEnvironmentSkuDefinition, self).__init__(**kwargs)
self.resource_type = kwargs.get('resource_type', None)
self.sku = kwargs.get('sku', None)
self.capacity = kwargs.get('capacity', None)
class IntegrationServiceEnvironmentSkuDefinitionSku(msrest.serialization.Model):
"""The sku.
:param name: The sku name. Possible values include: "NotSpecified", "Premium", "Developer".
:type name: str or ~azure.mgmt.logic.models.IntegrationServiceEnvironmentSkuName
:param tier: The sku tier.
:type tier: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IntegrationServiceEnvironmentSkuDefinitionSku, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.tier = kwargs.get('tier', None)
class IntegrationServiceEnvironmentSkuList(msrest.serialization.Model):
"""The list of integration service environment skus.
:param value: The list of integration service environment skus.
:type value: list[~azure.mgmt.logic.models.IntegrationServiceEnvironmentSkuDefinition]
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[IntegrationServiceEnvironmentSkuDefinition]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IntegrationServiceEnvironmentSkuList, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class IntegrationServiceEnvironmentSubnetNetworkHealth(msrest.serialization.Model):
"""The integration service environment subnet network health.
All required parameters must be populated in order to send to Azure.
:param outbound_network_dependencies: The outbound network dependencies.
:type outbound_network_dependencies:
list[~azure.mgmt.logic.models.IntegrationServiceEnvironmentNetworkDependency]
:param outbound_network_health: The integration service environment network health.
:type outbound_network_health:
~azure.mgmt.logic.models.IntegrationServiceEnvironmentNetworkDependencyHealth
:param network_dependency_health_state: Required. The integration service environment network
health state. Possible values include: "NotSpecified", "Unknown", "Available", "NotAvailable".
:type network_dependency_health_state: str or
~azure.mgmt.logic.models.IntegrationServiceEnvironmentNetworkEndPointAccessibilityState
"""
_validation = {
'network_dependency_health_state': {'required': True},
}
_attribute_map = {
'outbound_network_dependencies': {'key': 'outboundNetworkDependencies', 'type': '[IntegrationServiceEnvironmentNetworkDependency]'},
'outbound_network_health': {'key': 'outboundNetworkHealth', 'type': 'IntegrationServiceEnvironmentNetworkDependencyHealth'},
'network_dependency_health_state': {'key': 'networkDependencyHealthState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IntegrationServiceEnvironmentSubnetNetworkHealth, self).__init__(**kwargs)
self.outbound_network_dependencies = kwargs.get('outbound_network_dependencies', None)
self.outbound_network_health = kwargs.get('outbound_network_health', None)
self.network_dependency_health_state = kwargs['network_dependency_health_state']
class IpAddress(msrest.serialization.Model):
"""The ip address.
:param address: The address.
:type address: str
"""
_attribute_map = {
'address': {'key': 'address', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IpAddress, self).__init__(**kwargs)
self.address = kwargs.get('address', None)
class IpAddressRange(msrest.serialization.Model):
"""The ip address range.
:param address_range: The IP address range.
:type address_range: str
"""
_attribute_map = {
'address_range': {'key': 'addressRange', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IpAddressRange, self).__init__(**kwargs)
self.address_range = kwargs.get('address_range', None)
class JsonSchema(msrest.serialization.Model):
"""The JSON schema.
:param title: The JSON title.
:type title: str
:param content: The JSON content.
:type content: str
"""
_attribute_map = {
'title': {'key': 'title', 'type': 'str'},
'content': {'key': 'content', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(JsonSchema, self).__init__(**kwargs)
self.title = kwargs.get('title', None)
self.content = kwargs.get('content', None)
class KeyVaultKey(msrest.serialization.Model):
"""The key vault key.
:param kid: The key id.
:type kid: str
:param attributes: The key attributes.
:type attributes: ~azure.mgmt.logic.models.KeyVaultKeyAttributes
"""
_attribute_map = {
'kid': {'key': 'kid', 'type': 'str'},
'attributes': {'key': 'attributes', 'type': 'KeyVaultKeyAttributes'},
}
def __init__(
self,
**kwargs
):
super(KeyVaultKey, self).__init__(**kwargs)
self.kid = kwargs.get('kid', None)
self.attributes = kwargs.get('attributes', None)
class KeyVaultKeyAttributes(msrest.serialization.Model):
"""The key attributes.
:param enabled: Whether the key is enabled or not.
:type enabled: bool
:param created: When the key was created.
:type created: long
:param updated: When the key was updated.
:type updated: long
"""
_attribute_map = {
'enabled': {'key': 'enabled', 'type': 'bool'},
'created': {'key': 'created', 'type': 'long'},
'updated': {'key': 'updated', 'type': 'long'},
}
def __init__(
self,
**kwargs
):
super(KeyVaultKeyAttributes, self).__init__(**kwargs)
self.enabled = kwargs.get('enabled', None)
self.created = kwargs.get('created', None)
self.updated = kwargs.get('updated', None)
class KeyVaultKeyCollection(msrest.serialization.Model):
"""Collection of key vault keys.
:param value: The key vault keys.
:type value: list[~azure.mgmt.logic.models.KeyVaultKey]
:param skip_token: The skip token.
:type skip_token: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[KeyVaultKey]'},
'skip_token': {'key': 'skipToken', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(KeyVaultKeyCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.skip_token = kwargs.get('skip_token', None)
class KeyVaultKeyReference(msrest.serialization.Model):
"""The reference to the key vault key.
All required parameters must be populated in order to send to Azure.
:param key_vault: Required. The key vault reference.
:type key_vault: ~azure.mgmt.logic.models.KeyVaultKeyReferenceKeyVault
:param key_name: Required. The private key name in key vault.
:type key_name: str
:param key_version: The private key version in key vault.
:type key_version: str
"""
_validation = {
'key_vault': {'required': True},
'key_name': {'required': True},
}
_attribute_map = {
'key_vault': {'key': 'keyVault', 'type': 'KeyVaultKeyReferenceKeyVault'},
'key_name': {'key': 'keyName', 'type': 'str'},
'key_version': {'key': 'keyVersion', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(KeyVaultKeyReference, self).__init__(**kwargs)
self.key_vault = kwargs['key_vault']
self.key_name = kwargs['key_name']
self.key_version = kwargs.get('key_version', None)
class KeyVaultKeyReferenceKeyVault(msrest.serialization.Model):
"""The key vault reference.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: The resource id.
:type id: str
:ivar name: The resource name.
:vartype name: str
:ivar type: The resource type.
:vartype type: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(KeyVaultKeyReferenceKeyVault, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
self.name = None
self.type = None
class KeyVaultReference(ResourceReference):
"""The key vault reference.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: The resource id.
:type id: str
:ivar name: Gets the resource name.
:vartype name: str
:ivar type: Gets the resource type.
:vartype type: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(KeyVaultReference, self).__init__(**kwargs)
class ListKeyVaultKeysDefinition(msrest.serialization.Model):
"""The list key vault keys definition.
All required parameters must be populated in order to send to Azure.
:param key_vault: Required. The key vault reference.
:type key_vault: ~azure.mgmt.logic.models.KeyVaultReference
:param skip_token: The skip token.
:type skip_token: str
"""
_validation = {
'key_vault': {'required': True},
}
_attribute_map = {
'key_vault': {'key': 'keyVault', 'type': 'KeyVaultReference'},
'skip_token': {'key': 'skipToken', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ListKeyVaultKeysDefinition, self).__init__(**kwargs)
self.key_vault = kwargs['key_vault']
self.skip_token = kwargs.get('skip_token', None)
class ManagedApi(Resource):
"""The managed api definition.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The resource id.
:vartype id: str
:ivar name: Gets the resource name.
:vartype name: str
:ivar type: Gets the resource type.
:vartype type: str
:param location: The resource location.
:type location: str
:param tags: A set of tags. The resource tags.
:type tags: dict[str, str]
:param properties: The api resource properties.
:type properties: ~azure.mgmt.logic.models.ApiResourceProperties
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'properties': {'key': 'properties', 'type': 'ApiResourceProperties'},
}
def __init__(
self,
**kwargs
):
super(ManagedApi, self).__init__(**kwargs)
self.properties = kwargs.get('properties', None)
class ManagedApiListResult(msrest.serialization.Model):
"""The list of managed APIs.
:param value: The managed APIs.
:type value: list[~azure.mgmt.logic.models.ManagedApi]
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ManagedApi]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ManagedApiListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class NetworkConfiguration(msrest.serialization.Model):
"""The network configuration.
:param virtual_network_address_space: Gets the virtual network address space.
:type virtual_network_address_space: str
:param access_endpoint: The access endpoint.
:type access_endpoint: ~azure.mgmt.logic.models.IntegrationServiceEnvironmentAccessEndpoint
:param subnets: The subnets.
:type subnets: list[~azure.mgmt.logic.models.ResourceReference]
"""
_attribute_map = {
'virtual_network_address_space': {'key': 'virtualNetworkAddressSpace', 'type': 'str'},
'access_endpoint': {'key': 'accessEndpoint', 'type': 'IntegrationServiceEnvironmentAccessEndpoint'},
'subnets': {'key': 'subnets', 'type': '[ResourceReference]'},
}
def __init__(
self,
**kwargs
):
super(NetworkConfiguration, self).__init__(**kwargs)
self.virtual_network_address_space = kwargs.get('virtual_network_address_space', None)
self.access_endpoint = kwargs.get('access_endpoint', None)
self.subnets = kwargs.get('subnets', None)
class OpenAuthenticationAccessPolicies(msrest.serialization.Model):
"""AuthenticationPolicy of type Open.
:param policies: Open authentication policies.
:type policies: dict[str, ~azure.mgmt.logic.models.OpenAuthenticationAccessPolicy]
"""
_attribute_map = {
'policies': {'key': 'policies', 'type': '{OpenAuthenticationAccessPolicy}'},
}
def __init__(
self,
**kwargs
):
super(OpenAuthenticationAccessPolicies, self).__init__(**kwargs)
self.policies = kwargs.get('policies', None)
class OpenAuthenticationAccessPolicy(msrest.serialization.Model):
"""Open authentication access policy defined by user.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar type: Type of provider for OAuth. Possible values include: "AAD".
:vartype type: str or ~azure.mgmt.logic.models.OpenAuthenticationProviderType
:param claims: The access policy claims.
:type claims: list[~azure.mgmt.logic.models.OpenAuthenticationPolicyClaim]
"""
_validation = {
'type': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'claims': {'key': 'claims', 'type': '[OpenAuthenticationPolicyClaim]'},
}
def __init__(
self,
**kwargs
):
super(OpenAuthenticationAccessPolicy, self).__init__(**kwargs)
self.type = None
self.claims = kwargs.get('claims', None)
class OpenAuthenticationPolicyClaim(msrest.serialization.Model):
"""Open authentication policy claim.
:param name: The name of the claim.
:type name: str
:param value: The value of the claim.
:type value: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OpenAuthenticationPolicyClaim, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.value = kwargs.get('value', None)
class Operation(msrest.serialization.Model):
"""Logic REST API operation.
:param origin: Operation: origin.
:type origin: str
:param name: Operation name: {provider}/{resource}/{operation}.
:type name: str
:param display: The object that represents the operation.
:type display: ~azure.mgmt.logic.models.OperationDisplay
:param properties: The properties.
:type properties: object
"""
_attribute_map = {
'origin': {'key': 'origin', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
'properties': {'key': 'properties', 'type': 'object'},
}
def __init__(
self,
**kwargs
):
super(Operation, self).__init__(**kwargs)
self.origin = kwargs.get('origin', None)
self.name = kwargs.get('name', None)
self.display = kwargs.get('display', None)
self.properties = kwargs.get('properties', None)
class OperationDisplay(msrest.serialization.Model):
"""The object that represents the operation.
:param provider: Service provider: Microsoft.Logic.
:type provider: str
:param resource: Resource on which the operation is performed: Profile, endpoint, etc.
:type resource: str
:param operation: Operation type: Read, write, delete, etc.
:type operation: str
:param description: Operation: description.
:type description: str
"""
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationDisplay, self).__init__(**kwargs)
self.provider = kwargs.get('provider', None)
self.resource = kwargs.get('resource', None)
self.operation = kwargs.get('operation', None)
self.description = kwargs.get('description', None)
class OperationListResult(msrest.serialization.Model):
"""Result of the request to list Logic operations. It contains a list of operations and a URL link to get the next set of results.
:param value: List of Logic operations supported by the Logic resource provider.
:type value: list[~azure.mgmt.logic.models.Operation]
:param next_link: URL to get the next set of operation list results if there are any.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Operation]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class OperationResultProperties(msrest.serialization.Model):
"""The run operation result properties.
:param start_time: The start time of the workflow scope repetition.
:type start_time: ~datetime.datetime
:param end_time: The end time of the workflow scope repetition.
:type end_time: ~datetime.datetime
:param correlation: The correlation properties.
:type correlation: ~azure.mgmt.logic.models.RunActionCorrelation
:param status: The status of the workflow scope repetition. Possible values include:
"NotSpecified", "Paused", "Running", "Waiting", "Succeeded", "Skipped", "Suspended",
"Cancelled", "Failed", "Faulted", "TimedOut", "Aborted", "Ignored".
:type status: str or ~azure.mgmt.logic.models.WorkflowStatus
:param code: The workflow scope repetition code.
:type code: str
:param error: Any object.
:type error: object
"""
_attribute_map = {
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'correlation': {'key': 'correlation', 'type': 'RunActionCorrelation'},
'status': {'key': 'status', 'type': 'str'},
'code': {'key': 'code', 'type': 'str'},
'error': {'key': 'error', 'type': 'object'},
}
def __init__(
self,
**kwargs
):
super(OperationResultProperties, self).__init__(**kwargs)
self.start_time = kwargs.get('start_time', None)
self.end_time = kwargs.get('end_time', None)
self.correlation = kwargs.get('correlation', None)
self.status = kwargs.get('status', None)
self.code = kwargs.get('code', None)
self.error = kwargs.get('error', None)
class OperationResult(OperationResultProperties):
"""The operation result definition.
Variables are only populated by the server, and will be ignored when sending a request.
:param start_time: The start time of the workflow scope repetition.
:type start_time: ~datetime.datetime
:param end_time: The end time of the workflow scope repetition.
:type end_time: ~datetime.datetime
:param correlation: The correlation properties.
:type correlation: ~azure.mgmt.logic.models.RunActionCorrelation
:param status: The status of the workflow scope repetition. Possible values include:
"NotSpecified", "Paused", "Running", "Waiting", "Succeeded", "Skipped", "Suspended",
"Cancelled", "Failed", "Faulted", "TimedOut", "Aborted", "Ignored".
:type status: str or ~azure.mgmt.logic.models.WorkflowStatus
:param code: The workflow scope repetition code.
:type code: str
:param error: Any object.
:type error: object
:ivar tracking_id: Gets the tracking id.
:vartype tracking_id: str
:ivar inputs: Gets the inputs.
:vartype inputs: object
:ivar inputs_link: Gets the link to inputs.
:vartype inputs_link: ~azure.mgmt.logic.models.ContentLink
:ivar outputs: Gets the outputs.
:vartype outputs: object
:ivar outputs_link: Gets the link to outputs.
:vartype outputs_link: ~azure.mgmt.logic.models.ContentLink
:ivar tracked_properties: Gets the tracked properties.
:vartype tracked_properties: object
:param retry_history: Gets the retry histories.
:type retry_history: list[~azure.mgmt.logic.models.RetryHistory]
:param iteration_count:
:type iteration_count: int
"""
_validation = {
'tracking_id': {'readonly': True},
'inputs': {'readonly': True},
'inputs_link': {'readonly': True},
'outputs': {'readonly': True},
'outputs_link': {'readonly': True},
'tracked_properties': {'readonly': True},
}
_attribute_map = {
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'correlation': {'key': 'correlation', 'type': 'RunActionCorrelation'},
'status': {'key': 'status', 'type': 'str'},
'code': {'key': 'code', 'type': 'str'},
'error': {'key': 'error', 'type': 'object'},
'tracking_id': {'key': 'trackingId', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': 'object'},
'inputs_link': {'key': 'inputsLink', 'type': 'ContentLink'},
'outputs': {'key': 'outputs', 'type': 'object'},
'outputs_link': {'key': 'outputsLink', 'type': 'ContentLink'},
'tracked_properties': {'key': 'trackedProperties', 'type': 'object'},
'retry_history': {'key': 'retryHistory', 'type': '[RetryHistory]'},
'iteration_count': {'key': 'iterationCount', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(OperationResult, self).__init__(**kwargs)
self.tracking_id = None
self.inputs = None
self.inputs_link = None
self.outputs = None
self.outputs_link = None
self.tracked_properties = None
self.retry_history = kwargs.get('retry_history', None)
self.iteration_count = kwargs.get('iteration_count', None)
class PartnerContent(msrest.serialization.Model):
"""The integration account partner content.
:param b2_b: The B2B partner content.
:type b2_b: ~azure.mgmt.logic.models.B2BPartnerContent
"""
_attribute_map = {
'b2_b': {'key': 'b2b', 'type': 'B2BPartnerContent'},
}
def __init__(
self,
**kwargs
):
super(PartnerContent, self).__init__(**kwargs)
self.b2_b = kwargs.get('b2_b', None)
class RecurrenceSchedule(msrest.serialization.Model):
"""The recurrence schedule.
:param minutes: The minutes.
:type minutes: list[int]
:param hours: The hours.
:type hours: list[int]
:param week_days: The days of the week.
:type week_days: list[str or ~azure.mgmt.logic.models.DaysOfWeek]
:param month_days: The month days.
:type month_days: list[int]
:param monthly_occurrences: The monthly occurrences.
:type monthly_occurrences: list[~azure.mgmt.logic.models.RecurrenceScheduleOccurrence]
"""
_attribute_map = {
'minutes': {'key': 'minutes', 'type': '[int]'},
'hours': {'key': 'hours', 'type': '[int]'},
'week_days': {'key': 'weekDays', 'type': '[str]'},
'month_days': {'key': 'monthDays', 'type': '[int]'},
'monthly_occurrences': {'key': 'monthlyOccurrences', 'type': '[RecurrenceScheduleOccurrence]'},
}
def __init__(
self,
**kwargs
):
super(RecurrenceSchedule, self).__init__(**kwargs)
self.minutes = kwargs.get('minutes', None)
self.hours = kwargs.get('hours', None)
self.week_days = kwargs.get('week_days', None)
self.month_days = kwargs.get('month_days', None)
self.monthly_occurrences = kwargs.get('monthly_occurrences', None)
class RecurrenceScheduleOccurrence(msrest.serialization.Model):
"""The recurrence schedule occurrence.
:param day: The day of the week. Possible values include: "Sunday", "Monday", "Tuesday",
"Wednesday", "Thursday", "Friday", "Saturday".
:type day: str or ~azure.mgmt.logic.models.DayOfWeek
:param occurrence: The occurrence.
:type occurrence: int
"""
_attribute_map = {
'day': {'key': 'day', 'type': 'str'},
'occurrence': {'key': 'occurrence', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(RecurrenceScheduleOccurrence, self).__init__(**kwargs)
self.day = kwargs.get('day', None)
self.occurrence = kwargs.get('occurrence', None)
class RegenerateActionParameter(msrest.serialization.Model):
"""The access key regenerate action content.
:param key_type: The key type. Possible values include: "NotSpecified", "Primary", "Secondary".
:type key_type: str or ~azure.mgmt.logic.models.KeyType
"""
_attribute_map = {
'key_type': {'key': 'keyType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(RegenerateActionParameter, self).__init__(**kwargs)
self.key_type = kwargs.get('key_type', None)
class RepetitionIndex(msrest.serialization.Model):
"""The workflow run action repetition index.
All required parameters must be populated in order to send to Azure.
:param scope_name: The scope.
:type scope_name: str
:param item_index: Required. The index.
:type item_index: int
"""
_validation = {
'item_index': {'required': True},
}
_attribute_map = {
'scope_name': {'key': 'scopeName', 'type': 'str'},
'item_index': {'key': 'itemIndex', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(RepetitionIndex, self).__init__(**kwargs)
self.scope_name = kwargs.get('scope_name', None)
self.item_index = kwargs['item_index']
class Request(msrest.serialization.Model):
"""A request.
:param headers: A list of all the headers attached to the request.
:type headers: object
:param uri: The destination for the request.
:type uri: str
:param method: The HTTP method used for the request.
:type method: str
"""
_attribute_map = {
'headers': {'key': 'headers', 'type': 'object'},
'uri': {'key': 'uri', 'type': 'str'},
'method': {'key': 'method', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Request, self).__init__(**kwargs)
self.headers = kwargs.get('headers', None)
self.uri = kwargs.get('uri', None)
self.method = kwargs.get('method', None)
class RequestHistory(Resource):
"""The request history.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The resource id.
:vartype id: str
:ivar name: Gets the resource name.
:vartype name: str
:ivar type: Gets the resource type.
:vartype type: str
:param location: The resource location.
:type location: str
:param tags: A set of tags. The resource tags.
:type tags: dict[str, str]
:param properties: The request history properties.
:type properties: ~azure.mgmt.logic.models.RequestHistoryProperties
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'properties': {'key': 'properties', 'type': 'RequestHistoryProperties'},
}
def __init__(
self,
**kwargs
):
super(RequestHistory, self).__init__(**kwargs)
self.properties = kwargs.get('properties', None)
class RequestHistoryListResult(msrest.serialization.Model):
"""The list of workflow request histories.
:param value: A list of workflow request histories.
:type value: list[~azure.mgmt.logic.models.RequestHistory]
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[RequestHistory]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(RequestHistoryListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class RequestHistoryProperties(msrest.serialization.Model):
"""The request history.
:param start_time: The time the request started.
:type start_time: ~datetime.datetime
:param end_time: The time the request ended.
:type end_time: ~datetime.datetime
:param request: The request.
:type request: ~azure.mgmt.logic.models.Request
:param response: The response.
:type response: ~azure.mgmt.logic.models.Response
"""
_attribute_map = {
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'request': {'key': 'request', 'type': 'Request'},
'response': {'key': 'response', 'type': 'Response'},
}
def __init__(
self,
**kwargs
):
super(RequestHistoryProperties, self).__init__(**kwargs)
self.start_time = kwargs.get('start_time', None)
self.end_time = kwargs.get('end_time', None)
self.request = kwargs.get('request', None)
self.response = kwargs.get('response', None)
class Response(msrest.serialization.Model):
"""A response.
:param headers: A list of all the headers attached to the response.
:type headers: object
:param status_code: The status code of the response.
:type status_code: int
:param body_link: Details on the location of the body content.
:type body_link: ~azure.mgmt.logic.models.ContentLink
"""
_attribute_map = {
'headers': {'key': 'headers', 'type': 'object'},
'status_code': {'key': 'statusCode', 'type': 'int'},
'body_link': {'key': 'bodyLink', 'type': 'ContentLink'},
}
def __init__(
self,
**kwargs
):
super(Response, self).__init__(**kwargs)
self.headers = kwargs.get('headers', None)
self.status_code = kwargs.get('status_code', None)
self.body_link = kwargs.get('body_link', None)
class RetryHistory(msrest.serialization.Model):
"""The retry history.
:param start_time: Gets the start time.
:type start_time: ~datetime.datetime
:param end_time: Gets the end time.
:type end_time: ~datetime.datetime
:param code: Gets the status code.
:type code: str
:param client_request_id: Gets the client request Id.
:type client_request_id: str
:param service_request_id: Gets the service request Id.
:type service_request_id: str
:param error: Gets the error response.
:type error: ~azure.mgmt.logic.models.ErrorResponse
"""
_attribute_map = {
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'code': {'key': 'code', 'type': 'str'},
'client_request_id': {'key': 'clientRequestId', 'type': 'str'},
'service_request_id': {'key': 'serviceRequestId', 'type': 'str'},
'error': {'key': 'error', 'type': 'ErrorResponse'},
}
def __init__(
self,
**kwargs
):
super(RetryHistory, self).__init__(**kwargs)
self.start_time = kwargs.get('start_time', None)
self.end_time = kwargs.get('end_time', None)
self.code = kwargs.get('code', None)
self.client_request_id = kwargs.get('client_request_id', None)
self.service_request_id = kwargs.get('service_request_id', None)
self.error = kwargs.get('error', None)
class RunCorrelation(msrest.serialization.Model):
"""The correlation properties.
:param client_tracking_id: The client tracking identifier.
:type client_tracking_id: str
:param client_keywords: The client keywords.
:type client_keywords: list[str]
"""
_attribute_map = {
'client_tracking_id': {'key': 'clientTrackingId', 'type': 'str'},
'client_keywords': {'key': 'clientKeywords', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(RunCorrelation, self).__init__(**kwargs)
self.client_tracking_id = kwargs.get('client_tracking_id', None)
self.client_keywords = kwargs.get('client_keywords', None)
class RunActionCorrelation(RunCorrelation):
"""The workflow run action correlation properties.
:param client_tracking_id: The client tracking identifier.
:type client_tracking_id: str
:param client_keywords: The client keywords.
:type client_keywords: list[str]
:param action_tracking_id: The action tracking identifier.
:type action_tracking_id: str
"""
_attribute_map = {
'client_tracking_id': {'key': 'clientTrackingId', 'type': 'str'},
'client_keywords': {'key': 'clientKeywords', 'type': '[str]'},
'action_tracking_id': {'key': 'actionTrackingId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(RunActionCorrelation, self).__init__(**kwargs)
self.action_tracking_id = kwargs.get('action_tracking_id', None)
class SetTriggerStateActionDefinition(msrest.serialization.Model):
"""The set trigger state action definition.
All required parameters must be populated in order to send to Azure.
:param source: Required. The source.
:type source: ~azure.mgmt.logic.models.WorkflowTriggerReference
"""
_validation = {
'source': {'required': True},
}
_attribute_map = {
'source': {'key': 'source', 'type': 'WorkflowTriggerReference'},
}
def __init__(
self,
**kwargs
):
super(SetTriggerStateActionDefinition, self).__init__(**kwargs)
self.source = kwargs['source']
class Sku(msrest.serialization.Model):
"""The sku type.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name. Possible values include: "NotSpecified", "Free", "Shared",
"Basic", "Standard", "Premium".
:type name: str or ~azure.mgmt.logic.models.SkuName
:param plan: The reference to plan.
:type plan: ~azure.mgmt.logic.models.ResourceReference
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'plan': {'key': 'plan', 'type': 'ResourceReference'},
}
def __init__(
self,
**kwargs
):
super(Sku, self).__init__(**kwargs)
self.name = kwargs['name']
self.plan = kwargs.get('plan', None)
class SubResource(msrest.serialization.Model):
"""The sub resource type.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The resource id.
:vartype id: str
"""
_validation = {
'id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SubResource, self).__init__(**kwargs)
self.id = None
class SwaggerCustomDynamicList(msrest.serialization.Model):
"""The swagger custom dynamic list.
:param operation_id: The operation id to fetch dynamic schema.
:type operation_id: str
:param built_in_operation: The built in operation.
:type built_in_operation: str
:param items_path: The path to a response property (relative to the response object, not the
response body) which contains an array of dynamic value items.
:type items_path: str
:param item_value_path: The path to a property which defines the value which should be used.
:type item_value_path: str
:param item_title_path: The path to an item property which defines the display name of the
item.
:type item_title_path: str
:param parameters: The parameters.
:type parameters: dict[str, ~azure.mgmt.logic.models.SwaggerCustomDynamicProperties]
"""
_attribute_map = {
'operation_id': {'key': 'operationId', 'type': 'str'},
'built_in_operation': {'key': 'builtInOperation', 'type': 'str'},
'items_path': {'key': 'itemsPath', 'type': 'str'},
'item_value_path': {'key': 'itemValuePath', 'type': 'str'},
'item_title_path': {'key': 'itemTitlePath', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': '{SwaggerCustomDynamicProperties}'},
}
def __init__(
self,
**kwargs
):
super(SwaggerCustomDynamicList, self).__init__(**kwargs)
self.operation_id = kwargs.get('operation_id', None)
self.built_in_operation = kwargs.get('built_in_operation', None)
self.items_path = kwargs.get('items_path', None)
self.item_value_path = kwargs.get('item_value_path', None)
self.item_title_path = kwargs.get('item_title_path', None)
self.parameters = kwargs.get('parameters', None)
class SwaggerCustomDynamicProperties(msrest.serialization.Model):
"""The swagger custom dynamic properties.
:param operation_id: The operation id to fetch dynamic schema.
:type operation_id: str
:param value_path: Json pointer to the dynamic schema on the response body.
:type value_path: str
:param parameters: The operation parameters.
:type parameters: dict[str, ~azure.mgmt.logic.models.SwaggerCustomDynamicProperties]
"""
_attribute_map = {
'operation_id': {'key': 'operationId', 'type': 'str'},
'value_path': {'key': 'valuePath', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': '{SwaggerCustomDynamicProperties}'},
}
def __init__(
self,
**kwargs
):
super(SwaggerCustomDynamicProperties, self).__init__(**kwargs)
self.operation_id = kwargs.get('operation_id', None)
self.value_path = kwargs.get('value_path', None)
self.parameters = kwargs.get('parameters', None)
class SwaggerCustomDynamicSchema(msrest.serialization.Model):
"""The swagger custom dynamic schema.
:param operation_id: The operation id to fetch dynamic schema.
:type operation_id: str
:param value_path: Json pointer to the dynamic schema on the response body.
:type value_path: str
:param parameters: The operation parameters.
:type parameters: dict[str, object]
"""
_attribute_map = {
'operation_id': {'key': 'operationId', 'type': 'str'},
'value_path': {'key': 'valuePath', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': '{object}'},
}
def __init__(
self,
**kwargs
):
super(SwaggerCustomDynamicSchema, self).__init__(**kwargs)
self.operation_id = kwargs.get('operation_id', None)
self.value_path = kwargs.get('value_path', None)
self.parameters = kwargs.get('parameters', None)
class SwaggerCustomDynamicTree(msrest.serialization.Model):
"""The swagger custom dynamic tree.
:param settings: The tree settings.
:type settings: ~azure.mgmt.logic.models.SwaggerCustomDynamicTreeSettings
:param open: The tree on-open configuration.
:type open: ~azure.mgmt.logic.models.SwaggerCustomDynamicTreeCommand
:param browse: The tree on-browse configuration.
:type browse: ~azure.mgmt.logic.models.SwaggerCustomDynamicTreeCommand
"""
_attribute_map = {
'settings': {'key': 'settings', 'type': 'SwaggerCustomDynamicTreeSettings'},
'open': {'key': 'open', 'type': 'SwaggerCustomDynamicTreeCommand'},
'browse': {'key': 'browse', 'type': 'SwaggerCustomDynamicTreeCommand'},
}
def __init__(
self,
**kwargs
):
super(SwaggerCustomDynamicTree, self).__init__(**kwargs)
self.settings = kwargs.get('settings', None)
self.open = kwargs.get('open', None)
self.browse = kwargs.get('browse', None)
class SwaggerCustomDynamicTreeCommand(msrest.serialization.Model):
"""The swagger tree command.
:param operation_id: The path to an item property which defines the display name of the item.
:type operation_id: str
:param items_path: The path to an item property which defines the display name of the item.
:type items_path: str
:param item_value_path: The path to an item property which defines the display name of the
item.
:type item_value_path: str
:param item_title_path: The path to an item property which defines the display name of the
item.
:type item_title_path: str
:param item_full_title_path: The path to an item property which defines the display name of the
item.
:type item_full_title_path: str
:param item_is_parent: The path to an item property which defines the display name of the item.
:type item_is_parent: str
:param selectable_filter: The path to an item property which defines the display name of the
item.
:type selectable_filter: str
:param parameters: Dictionary of :code:`<SwaggerCustomDynamicTreeParameter>`.
:type parameters: dict[str, ~azure.mgmt.logic.models.SwaggerCustomDynamicTreeParameter]
"""
_attribute_map = {
'operation_id': {'key': 'operationId', 'type': 'str'},
'items_path': {'key': 'itemsPath', 'type': 'str'},
'item_value_path': {'key': 'itemValuePath', 'type': 'str'},
'item_title_path': {'key': 'itemTitlePath', 'type': 'str'},
'item_full_title_path': {'key': 'itemFullTitlePath', 'type': 'str'},
'item_is_parent': {'key': 'itemIsParent', 'type': 'str'},
'selectable_filter': {'key': 'selectableFilter', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': '{SwaggerCustomDynamicTreeParameter}'},
}
def __init__(
self,
**kwargs
):
super(SwaggerCustomDynamicTreeCommand, self).__init__(**kwargs)
self.operation_id = kwargs.get('operation_id', None)
self.items_path = kwargs.get('items_path', None)
self.item_value_path = kwargs.get('item_value_path', None)
self.item_title_path = kwargs.get('item_title_path', None)
self.item_full_title_path = kwargs.get('item_full_title_path', None)
self.item_is_parent = kwargs.get('item_is_parent', None)
self.selectable_filter = kwargs.get('selectable_filter', None)
self.parameters = kwargs.get('parameters', None)
class SwaggerCustomDynamicTreeParameter(msrest.serialization.Model):
"""The swagger custom dynamic tree parameter.
:param selected_item_value_path: Gets or sets a path to a property in the currently selected
item to pass as a value to a parameter for the given operation.
:type selected_item_value_path: str
:param value: The parameter value.
:type value: object
:param parameter_reference: The parameter reference.
:type parameter_reference: str
:param required: Indicates whether the parameter is required.
:type required: bool
"""
_attribute_map = {
'selected_item_value_path': {'key': 'selectedItemValuePath', 'type': 'str'},
'value': {'key': 'value', 'type': 'object'},
'parameter_reference': {'key': 'parameterReference', 'type': 'str'},
'required': {'key': 'required', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(SwaggerCustomDynamicTreeParameter, self).__init__(**kwargs)
self.selected_item_value_path = kwargs.get('selected_item_value_path', None)
self.value = kwargs.get('value', None)
self.parameter_reference = kwargs.get('parameter_reference', None)
self.required = kwargs.get('required', None)
class SwaggerCustomDynamicTreeSettings(msrest.serialization.Model):
"""The swagger custom dynamic tree settings.
:param can_select_parent_nodes: Indicates whether parent nodes can be selected.
:type can_select_parent_nodes: bool
:param can_select_leaf_nodes: Indicates whether leaf nodes can be selected.
:type can_select_leaf_nodes: bool
"""
_attribute_map = {
'can_select_parent_nodes': {'key': 'CanSelectParentNodes', 'type': 'bool'},
'can_select_leaf_nodes': {'key': 'CanSelectLeafNodes', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(SwaggerCustomDynamicTreeSettings, self).__init__(**kwargs)
self.can_select_parent_nodes = kwargs.get('can_select_parent_nodes', None)
self.can_select_leaf_nodes = kwargs.get('can_select_leaf_nodes', None)
class SwaggerExternalDocumentation(msrest.serialization.Model):
"""The swagger external documentation.
:param description: The document description.
:type description: str
:param uri: The documentation Uri.
:type uri: str
:param extensions: The vendor extensions.
:type extensions: dict[str, object]
"""
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'uri': {'key': 'uri', 'type': 'str'},
'extensions': {'key': 'extensions', 'type': '{object}'},
}
def __init__(
self,
**kwargs
):
super(SwaggerExternalDocumentation, self).__init__(**kwargs)
self.description = kwargs.get('description', None)
self.uri = kwargs.get('uri', None)
self.extensions = kwargs.get('extensions', None)
class SwaggerSchema(msrest.serialization.Model):
"""The swagger schema.
:param ref: The reference.
:type ref: str
:param type: The type. Possible values include: "String", "Number", "Integer", "Boolean",
"Array", "File", "Object", "Null".
:type type: str or ~azure.mgmt.logic.models.SwaggerSchemaType
:param title: The title.
:type title: str
:param items: The items schema.
:type items: ~azure.mgmt.logic.models.SwaggerSchema
:param properties: The object properties.
:type properties: dict[str, ~azure.mgmt.logic.models.SwaggerSchema]
:param additional_properties: The additional properties.
:type additional_properties: object
:param required: The object required properties.
:type required: list[str]
:param max_properties: The maximum number of allowed properties.
:type max_properties: int
:param min_properties: The minimum number of allowed properties.
:type min_properties: int
:param all_of: The schemas which must pass validation when this schema is used.
:type all_of: list[~azure.mgmt.logic.models.SwaggerSchema]
:param discriminator: The discriminator.
:type discriminator: str
:param read_only: Indicates whether this property must be present in the a request.
:type read_only: bool
:param xml: The xml representation format for a property.
:type xml: ~azure.mgmt.logic.models.SwaggerXml
:param external_docs: The external documentation.
:type external_docs: ~azure.mgmt.logic.models.SwaggerExternalDocumentation
:param example: The example value.
:type example: object
:param notification_url_extension: Indicates the notification url extension. If this is set,
the property's value should be a callback url for a webhook.
:type notification_url_extension: bool
:param dynamic_schema_old: The dynamic schema configuration.
:type dynamic_schema_old: ~azure.mgmt.logic.models.SwaggerCustomDynamicSchema
:param dynamic_schema_new: The dynamic schema configuration.
:type dynamic_schema_new: ~azure.mgmt.logic.models.SwaggerCustomDynamicProperties
:param dynamic_list_new: The dynamic list.
:type dynamic_list_new: ~azure.mgmt.logic.models.SwaggerCustomDynamicList
:param dynamic_tree: The dynamic values tree configuration.
:type dynamic_tree: ~azure.mgmt.logic.models.SwaggerCustomDynamicTree
"""
_attribute_map = {
'ref': {'key': 'ref', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'title': {'key': 'title', 'type': 'str'},
'items': {'key': 'items', 'type': 'SwaggerSchema'},
'properties': {'key': 'properties', 'type': '{SwaggerSchema}'},
'additional_properties': {'key': 'additionalProperties', 'type': 'object'},
'required': {'key': 'required', 'type': '[str]'},
'max_properties': {'key': 'maxProperties', 'type': 'int'},
'min_properties': {'key': 'minProperties', 'type': 'int'},
'all_of': {'key': 'allOf', 'type': '[SwaggerSchema]'},
'discriminator': {'key': 'discriminator', 'type': 'str'},
'read_only': {'key': 'readOnly', 'type': 'bool'},
'xml': {'key': 'xml', 'type': 'SwaggerXml'},
'external_docs': {'key': 'externalDocs', 'type': 'SwaggerExternalDocumentation'},
'example': {'key': 'example', 'type': 'object'},
'notification_url_extension': {'key': 'notificationUrlExtension', 'type': 'bool'},
'dynamic_schema_old': {'key': 'dynamicSchemaOld', 'type': 'SwaggerCustomDynamicSchema'},
'dynamic_schema_new': {'key': 'dynamicSchemaNew', 'type': 'SwaggerCustomDynamicProperties'},
'dynamic_list_new': {'key': 'dynamicListNew', 'type': 'SwaggerCustomDynamicList'},
'dynamic_tree': {'key': 'dynamicTree', 'type': 'SwaggerCustomDynamicTree'},
}
def __init__(
self,
**kwargs
):
super(SwaggerSchema, self).__init__(**kwargs)
self.ref = kwargs.get('ref', None)
self.type = kwargs.get('type', None)
self.title = kwargs.get('title', None)
self.items = kwargs.get('items', None)
self.properties = kwargs.get('properties', None)
self.additional_properties = kwargs.get('additional_properties', None)
self.required = kwargs.get('required', None)
self.max_properties = kwargs.get('max_properties', None)
self.min_properties = kwargs.get('min_properties', None)
self.all_of = kwargs.get('all_of', None)
self.discriminator = kwargs.get('discriminator', None)
self.read_only = kwargs.get('read_only', None)
self.xml = kwargs.get('xml', None)
self.external_docs = kwargs.get('external_docs', None)
self.example = kwargs.get('example', None)
self.notification_url_extension = kwargs.get('notification_url_extension', None)
self.dynamic_schema_old = kwargs.get('dynamic_schema_old', None)
self.dynamic_schema_new = kwargs.get('dynamic_schema_new', None)
self.dynamic_list_new = kwargs.get('dynamic_list_new', None)
self.dynamic_tree = kwargs.get('dynamic_tree', None)
class SwaggerXml(msrest.serialization.Model):
"""The Swagger XML.
:param name: The xml element or attribute name.
:type name: str
:param namespace: The xml namespace.
:type namespace: str
:param prefix: The name prefix.
:type prefix: str
:param attribute: Indicates whether the property should be an attribute instead of an element.
:type attribute: bool
:param wrapped: Indicates whether the array elements are wrapped in a container element.
:type wrapped: bool
:param extensions: The vendor extensions.
:type extensions: dict[str, object]
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'namespace': {'key': 'namespace', 'type': 'str'},
'prefix': {'key': 'prefix', 'type': 'str'},
'attribute': {'key': 'attribute', 'type': 'bool'},
'wrapped': {'key': 'wrapped', 'type': 'bool'},
'extensions': {'key': 'extensions', 'type': '{object}'},
}
def __init__(
self,
**kwargs
):
super(SwaggerXml, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.namespace = kwargs.get('namespace', None)
self.prefix = kwargs.get('prefix', None)
self.attribute = kwargs.get('attribute', None)
self.wrapped = kwargs.get('wrapped', None)
self.extensions = kwargs.get('extensions', None)
class TrackingEvent(msrest.serialization.Model):
"""The tracking event.
All required parameters must be populated in order to send to Azure.
:param event_level: Required. The event level. Possible values include: "LogAlways",
"Critical", "Error", "Warning", "Informational", "Verbose".
:type event_level: str or ~azure.mgmt.logic.models.EventLevel
:param event_time: Required. The event time.
:type event_time: ~datetime.datetime
:param record_type: Required. The record type. Possible values include: "NotSpecified",
"Custom", "AS2Message", "AS2MDN", "X12Interchange", "X12FunctionalGroup", "X12TransactionSet",
"X12InterchangeAcknowledgment", "X12FunctionalGroupAcknowledgment",
"X12TransactionSetAcknowledgment", "EdifactInterchange", "EdifactFunctionalGroup",
"EdifactTransactionSet", "EdifactInterchangeAcknowledgment",
"EdifactFunctionalGroupAcknowledgment", "EdifactTransactionSetAcknowledgment".
:type record_type: str or ~azure.mgmt.logic.models.TrackingRecordType
:param record: The record.
:type record: object
:param error: The error.
:type error: ~azure.mgmt.logic.models.TrackingEventErrorInfo
"""
_validation = {
'event_level': {'required': True},
'event_time': {'required': True},
'record_type': {'required': True},
}
_attribute_map = {
'event_level': {'key': 'eventLevel', 'type': 'str'},
'event_time': {'key': 'eventTime', 'type': 'iso-8601'},
'record_type': {'key': 'recordType', 'type': 'str'},
'record': {'key': 'record', 'type': 'object'},
'error': {'key': 'error', 'type': 'TrackingEventErrorInfo'},
}
def __init__(
self,
**kwargs
):
super(TrackingEvent, self).__init__(**kwargs)
self.event_level = kwargs['event_level']
self.event_time = kwargs['event_time']
self.record_type = kwargs['record_type']
self.record = kwargs.get('record', None)
self.error = kwargs.get('error', None)
class TrackingEventErrorInfo(msrest.serialization.Model):
"""The tracking event error info.
:param message: The message.
:type message: str
:param code: The code.
:type code: str
"""
_attribute_map = {
'message': {'key': 'message', 'type': 'str'},
'code': {'key': 'code', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TrackingEventErrorInfo, self).__init__(**kwargs)
self.message = kwargs.get('message', None)
self.code = kwargs.get('code', None)
class TrackingEventsDefinition(msrest.serialization.Model):
"""The tracking events definition.
All required parameters must be populated in order to send to Azure.
:param source_type: Required. The source type.
:type source_type: str
:param track_events_options: The track events options. Possible values include: "None",
"DisableSourceInfoEnrich".
:type track_events_options: str or ~azure.mgmt.logic.models.TrackEventsOperationOptions
:param events: Required. The events.
:type events: list[~azure.mgmt.logic.models.TrackingEvent]
"""
_validation = {
'source_type': {'required': True},
'events': {'required': True},
}
_attribute_map = {
'source_type': {'key': 'sourceType', 'type': 'str'},
'track_events_options': {'key': 'trackEventsOptions', 'type': 'str'},
'events': {'key': 'events', 'type': '[TrackingEvent]'},
}
def __init__(
self,
**kwargs
):
super(TrackingEventsDefinition, self).__init__(**kwargs)
self.source_type = kwargs['source_type']
self.track_events_options = kwargs.get('track_events_options', None)
self.events = kwargs['events']
class Workflow(Resource):
"""The workflow type.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The resource id.
:vartype id: str
:ivar name: Gets the resource name.
:vartype name: str
:ivar type: Gets the resource type.
:vartype type: str
:param location: The resource location.
:type location: str
:param tags: A set of tags. The resource tags.
:type tags: dict[str, str]
:ivar provisioning_state: Gets the provisioning state. Possible values include: "NotSpecified",
"Accepted", "Running", "Ready", "Creating", "Created", "Deleting", "Deleted", "Canceled",
"Failed", "Succeeded", "Moving", "Updating", "Registering", "Registered", "Unregistering",
"Unregistered", "Completed", "Renewing", "Pending", "Waiting", "InProgress".
:vartype provisioning_state: str or ~azure.mgmt.logic.models.WorkflowProvisioningState
:ivar created_time: Gets the created time.
:vartype created_time: ~datetime.datetime
:ivar changed_time: Gets the changed time.
:vartype changed_time: ~datetime.datetime
:param state: The state. Possible values include: "NotSpecified", "Completed", "Enabled",
"Disabled", "Deleted", "Suspended".
:type state: str or ~azure.mgmt.logic.models.WorkflowState
:ivar version: Gets the version.
:vartype version: str
:ivar access_endpoint: Gets the access endpoint.
:vartype access_endpoint: str
:param endpoints_configuration: The endpoints configuration.
:type endpoints_configuration: ~azure.mgmt.logic.models.FlowEndpointsConfiguration
:param access_control: The access control configuration.
:type access_control: ~azure.mgmt.logic.models.FlowAccessControlConfiguration
:ivar sku: The sku.
:vartype sku: ~azure.mgmt.logic.models.Sku
:param integration_account: The integration account.
:type integration_account: ~azure.mgmt.logic.models.ResourceReference
:param integration_service_environment: The integration service environment.
:type integration_service_environment: ~azure.mgmt.logic.models.ResourceReference
:param definition: The definition.
:type definition: object
:param parameters: The parameters.
:type parameters: dict[str, ~azure.mgmt.logic.models.WorkflowParameter]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
'created_time': {'readonly': True},
'changed_time': {'readonly': True},
'version': {'readonly': True},
'access_endpoint': {'readonly': True},
'sku': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'created_time': {'key': 'properties.createdTime', 'type': 'iso-8601'},
'changed_time': {'key': 'properties.changedTime', 'type': 'iso-8601'},
'state': {'key': 'properties.state', 'type': 'str'},
'version': {'key': 'properties.version', 'type': 'str'},
'access_endpoint': {'key': 'properties.accessEndpoint', 'type': 'str'},
'endpoints_configuration': {'key': 'properties.endpointsConfiguration', 'type': 'FlowEndpointsConfiguration'},
'access_control': {'key': 'properties.accessControl', 'type': 'FlowAccessControlConfiguration'},
'sku': {'key': 'properties.sku', 'type': 'Sku'},
'integration_account': {'key': 'properties.integrationAccount', 'type': 'ResourceReference'},
'integration_service_environment': {'key': 'properties.integrationServiceEnvironment', 'type': 'ResourceReference'},
'definition': {'key': 'properties.definition', 'type': 'object'},
'parameters': {'key': 'properties.parameters', 'type': '{WorkflowParameter}'},
}
def __init__(
self,
**kwargs
):
super(Workflow, self).__init__(**kwargs)
self.provisioning_state = None
self.created_time = None
self.changed_time = None
self.state = kwargs.get('state', None)
self.version = None
self.access_endpoint = None
self.endpoints_configuration = kwargs.get('endpoints_configuration', None)
self.access_control = kwargs.get('access_control', None)
self.sku = None
self.integration_account = kwargs.get('integration_account', None)
self.integration_service_environment = kwargs.get('integration_service_environment', None)
self.definition = kwargs.get('definition', None)
self.parameters = kwargs.get('parameters', None)
class WorkflowFilter(msrest.serialization.Model):
"""The workflow filter.
:param state: The state of workflows. Possible values include: "NotSpecified", "Completed",
"Enabled", "Disabled", "Deleted", "Suspended".
:type state: str or ~azure.mgmt.logic.models.WorkflowState
"""
_attribute_map = {
'state': {'key': 'state', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(WorkflowFilter, self).__init__(**kwargs)
self.state = kwargs.get('state', None)
class WorkflowListResult(msrest.serialization.Model):
"""The list of workflows.
:param value: The list of workflows.
:type value: list[~azure.mgmt.logic.models.Workflow]
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Workflow]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(WorkflowListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class WorkflowParameter(msrest.serialization.Model):
"""The workflow parameters.
:param type: The type. Possible values include: "NotSpecified", "String", "SecureString",
"Int", "Float", "Bool", "Array", "Object", "SecureObject".
:type type: str or ~azure.mgmt.logic.models.ParameterType
:param value: The value.
:type value: object
:param metadata: The metadata.
:type metadata: object
:param description: The description.
:type description: str
"""
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'value': {'key': 'value', 'type': 'object'},
'metadata': {'key': 'metadata', 'type': 'object'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(WorkflowParameter, self).__init__(**kwargs)
self.type = kwargs.get('type', None)
self.value = kwargs.get('value', None)
self.metadata = kwargs.get('metadata', None)
self.description = kwargs.get('description', None)
class WorkflowOutputParameter(WorkflowParameter):
"""The workflow output parameter.
Variables are only populated by the server, and will be ignored when sending a request.
:param type: The type. Possible values include: "NotSpecified", "String", "SecureString",
"Int", "Float", "Bool", "Array", "Object", "SecureObject".
:type type: str or ~azure.mgmt.logic.models.ParameterType
:param value: The value.
:type value: object
:param metadata: The metadata.
:type metadata: object
:param description: The description.
:type description: str
:ivar error: Gets the error.
:vartype error: object
"""
_validation = {
'error': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'value': {'key': 'value', 'type': 'object'},
'metadata': {'key': 'metadata', 'type': 'object'},
'description': {'key': 'description', 'type': 'str'},
'error': {'key': 'error', 'type': 'object'},
}
def __init__(
self,
**kwargs
):
super(WorkflowOutputParameter, self).__init__(**kwargs)
self.error = None
class WorkflowReference(ResourceReference):
"""The workflow reference.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: The resource id.
:type id: str
:ivar name: Gets the resource name.
:vartype name: str
:ivar type: Gets the resource type.
:vartype type: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(WorkflowReference, self).__init__(**kwargs)
class WorkflowRun(SubResource):
"""The workflow run.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The resource id.
:vartype id: str
:ivar name: Gets the workflow run name.
:vartype name: str
:ivar type: Gets the workflow run type.
:vartype type: str
:ivar wait_end_time: Gets the wait end time.
:vartype wait_end_time: ~datetime.datetime
:ivar start_time: Gets the start time.
:vartype start_time: ~datetime.datetime
:ivar end_time: Gets the end time.
:vartype end_time: ~datetime.datetime
:ivar status: Gets the status. Possible values include: "NotSpecified", "Paused", "Running",
"Waiting", "Succeeded", "Skipped", "Suspended", "Cancelled", "Failed", "Faulted", "TimedOut",
"Aborted", "Ignored".
:vartype status: str or ~azure.mgmt.logic.models.WorkflowStatus
:ivar code: Gets the code.
:vartype code: str
:ivar error: Gets the error.
:vartype error: object
:ivar correlation_id: Gets the correlation id.
:vartype correlation_id: str
:param correlation: The run correlation.
:type correlation: ~azure.mgmt.logic.models.Correlation
:ivar workflow: Gets the reference to workflow version.
:vartype workflow: ~azure.mgmt.logic.models.ResourceReference
:ivar trigger: Gets the fired trigger.
:vartype trigger: ~azure.mgmt.logic.models.WorkflowRunTrigger
:ivar outputs: Gets the outputs.
:vartype outputs: dict[str, ~azure.mgmt.logic.models.WorkflowOutputParameter]
:ivar response: Gets the response of the flow run.
:vartype response: ~azure.mgmt.logic.models.WorkflowRunTrigger
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'wait_end_time': {'readonly': True},
'start_time': {'readonly': True},
'end_time': {'readonly': True},
'status': {'readonly': True},
'code': {'readonly': True},
'error': {'readonly': True},
'correlation_id': {'readonly': True},
'workflow': {'readonly': True},
'trigger': {'readonly': True},
'outputs': {'readonly': True},
'response': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'wait_end_time': {'key': 'properties.waitEndTime', 'type': 'iso-8601'},
'start_time': {'key': 'properties.startTime', 'type': 'iso-8601'},
'end_time': {'key': 'properties.endTime', 'type': 'iso-8601'},
'status': {'key': 'properties.status', 'type': 'str'},
'code': {'key': 'properties.code', 'type': 'str'},
'error': {'key': 'properties.error', 'type': 'object'},
'correlation_id': {'key': 'properties.correlationId', 'type': 'str'},
'correlation': {'key': 'properties.correlation', 'type': 'Correlation'},
'workflow': {'key': 'properties.workflow', 'type': 'ResourceReference'},
'trigger': {'key': 'properties.trigger', 'type': 'WorkflowRunTrigger'},
'outputs': {'key': 'properties.outputs', 'type': '{WorkflowOutputParameter}'},
'response': {'key': 'properties.response', 'type': 'WorkflowRunTrigger'},
}
def __init__(
self,
**kwargs
):
super(WorkflowRun, self).__init__(**kwargs)
self.name = None
self.type = None
self.wait_end_time = None
self.start_time = None
self.end_time = None
self.status = None
self.code = None
self.error = None
self.correlation_id = None
self.correlation = kwargs.get('correlation', None)
self.workflow = None
self.trigger = None
self.outputs = None
self.response = None
class WorkflowRunAction(SubResource):
"""The workflow run action.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The resource id.
:vartype id: str
:ivar name: Gets the workflow run action name.
:vartype name: str
:ivar type: Gets the workflow run action type.
:vartype type: str
:ivar start_time: Gets the start time.
:vartype start_time: ~datetime.datetime
:ivar end_time: Gets the end time.
:vartype end_time: ~datetime.datetime
:ivar status: Gets the status. Possible values include: "NotSpecified", "Paused", "Running",
"Waiting", "Succeeded", "Skipped", "Suspended", "Cancelled", "Failed", "Faulted", "TimedOut",
"Aborted", "Ignored".
:vartype status: str or ~azure.mgmt.logic.models.WorkflowStatus
:ivar code: Gets the code.
:vartype code: str
:ivar error: Gets the error.
:vartype error: object
:ivar tracking_id: Gets the tracking id.
:vartype tracking_id: str
:param correlation: The correlation properties.
:type correlation: ~azure.mgmt.logic.models.RunActionCorrelation
:ivar inputs_link: Gets the link to inputs.
:vartype inputs_link: ~azure.mgmt.logic.models.ContentLink
:ivar outputs_link: Gets the link to outputs.
:vartype outputs_link: ~azure.mgmt.logic.models.ContentLink
:ivar tracked_properties: Gets the tracked properties.
:vartype tracked_properties: object
:param retry_history: Gets the retry histories.
:type retry_history: list[~azure.mgmt.logic.models.RetryHistory]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'start_time': {'readonly': True},
'end_time': {'readonly': True},
'status': {'readonly': True},
'code': {'readonly': True},
'error': {'readonly': True},
'tracking_id': {'readonly': True},
'inputs_link': {'readonly': True},
'outputs_link': {'readonly': True},
'tracked_properties': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'start_time': {'key': 'properties.startTime', 'type': 'iso-8601'},
'end_time': {'key': 'properties.endTime', 'type': 'iso-8601'},
'status': {'key': 'properties.status', 'type': 'str'},
'code': {'key': 'properties.code', 'type': 'str'},
'error': {'key': 'properties.error', 'type': 'object'},
'tracking_id': {'key': 'properties.trackingId', 'type': 'str'},
'correlation': {'key': 'properties.correlation', 'type': 'RunActionCorrelation'},
'inputs_link': {'key': 'properties.inputsLink', 'type': 'ContentLink'},
'outputs_link': {'key': 'properties.outputsLink', 'type': 'ContentLink'},
'tracked_properties': {'key': 'properties.trackedProperties', 'type': 'object'},
'retry_history': {'key': 'properties.retryHistory', 'type': '[RetryHistory]'},
}
def __init__(
self,
**kwargs
):
super(WorkflowRunAction, self).__init__(**kwargs)
self.name = None
self.type = None
self.start_time = None
self.end_time = None
self.status = None
self.code = None
self.error = None
self.tracking_id = None
self.correlation = kwargs.get('correlation', None)
self.inputs_link = None
self.outputs_link = None
self.tracked_properties = None
self.retry_history = kwargs.get('retry_history', None)
class WorkflowRunActionFilter(msrest.serialization.Model):
"""The workflow run action filter.
:param status: The status of workflow run action. Possible values include: "NotSpecified",
"Paused", "Running", "Waiting", "Succeeded", "Skipped", "Suspended", "Cancelled", "Failed",
"Faulted", "TimedOut", "Aborted", "Ignored".
:type status: str or ~azure.mgmt.logic.models.WorkflowStatus
"""
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(WorkflowRunActionFilter, self).__init__(**kwargs)
self.status = kwargs.get('status', None)
class WorkflowRunActionListResult(msrest.serialization.Model):
"""The list of workflow run actions.
:param value: A list of workflow run actions.
:type value: list[~azure.mgmt.logic.models.WorkflowRunAction]
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[WorkflowRunAction]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(WorkflowRunActionListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class WorkflowRunActionRepetitionDefinition(Resource):
"""The workflow run action repetition definition.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The resource id.
:vartype id: str
:ivar name: Gets the resource name.
:vartype name: str
:ivar type: Gets the resource type.
:vartype type: str
:param location: The resource location.
:type location: str
:param tags: A set of tags. The resource tags.
:type tags: dict[str, str]
:param start_time: The start time of the workflow scope repetition.
:type start_time: ~datetime.datetime
:param end_time: The end time of the workflow scope repetition.
:type end_time: ~datetime.datetime
:param correlation: The correlation properties.
:type correlation: ~azure.mgmt.logic.models.RunActionCorrelation
:param status: The status of the workflow scope repetition. Possible values include:
"NotSpecified", "Paused", "Running", "Waiting", "Succeeded", "Skipped", "Suspended",
"Cancelled", "Failed", "Faulted", "TimedOut", "Aborted", "Ignored".
:type status: str or ~azure.mgmt.logic.models.WorkflowStatus
:param code: The workflow scope repetition code.
:type code: str
:param error: Any object.
:type error: object
:ivar tracking_id: Gets the tracking id.
:vartype tracking_id: str
:ivar inputs: Gets the inputs.
:vartype inputs: object
:ivar inputs_link: Gets the link to inputs.
:vartype inputs_link: ~azure.mgmt.logic.models.ContentLink
:ivar outputs: Gets the outputs.
:vartype outputs: object
:ivar outputs_link: Gets the link to outputs.
:vartype outputs_link: ~azure.mgmt.logic.models.ContentLink
:ivar tracked_properties: Gets the tracked properties.
:vartype tracked_properties: object
:param retry_history: Gets the retry histories.
:type retry_history: list[~azure.mgmt.logic.models.RetryHistory]
:param iteration_count:
:type iteration_count: int
:param repetition_indexes: The repetition indexes.
:type repetition_indexes: list[~azure.mgmt.logic.models.RepetitionIndex]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'tracking_id': {'readonly': True},
'inputs': {'readonly': True},
'inputs_link': {'readonly': True},
'outputs': {'readonly': True},
'outputs_link': {'readonly': True},
'tracked_properties': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'start_time': {'key': 'properties.startTime', 'type': 'iso-8601'},
'end_time': {'key': 'properties.endTime', 'type': 'iso-8601'},
'correlation': {'key': 'properties.correlation', 'type': 'RunActionCorrelation'},
'status': {'key': 'properties.status', 'type': 'str'},
'code': {'key': 'properties.code', 'type': 'str'},
'error': {'key': 'properties.error', 'type': 'object'},
'tracking_id': {'key': 'properties.trackingId', 'type': 'str'},
'inputs': {'key': 'properties.inputs', 'type': 'object'},
'inputs_link': {'key': 'properties.inputsLink', 'type': 'ContentLink'},
'outputs': {'key': 'properties.outputs', 'type': 'object'},
'outputs_link': {'key': 'properties.outputsLink', 'type': 'ContentLink'},
'tracked_properties': {'key': 'properties.trackedProperties', 'type': 'object'},
'retry_history': {'key': 'properties.retryHistory', 'type': '[RetryHistory]'},
'iteration_count': {'key': 'properties.iterationCount', 'type': 'int'},
'repetition_indexes': {'key': 'properties.repetitionIndexes', 'type': '[RepetitionIndex]'},
}
def __init__(
self,
**kwargs
):
super(WorkflowRunActionRepetitionDefinition, self).__init__(**kwargs)
self.start_time = kwargs.get('start_time', None)
self.end_time = kwargs.get('end_time', None)
self.correlation = kwargs.get('correlation', None)
self.status = kwargs.get('status', None)
self.code = kwargs.get('code', None)
self.error = kwargs.get('error', None)
self.tracking_id = None
self.inputs = None
self.inputs_link = None
self.outputs = None
self.outputs_link = None
self.tracked_properties = None
self.retry_history = kwargs.get('retry_history', None)
self.iteration_count = kwargs.get('iteration_count', None)
self.repetition_indexes = kwargs.get('repetition_indexes', None)
class WorkflowRunActionRepetitionDefinitionCollection(msrest.serialization.Model):
"""A collection of workflow run action repetitions.
:param next_link: The link used to get the next page of recommendations.
:type next_link: str
:param value:
:type value: list[~azure.mgmt.logic.models.WorkflowRunActionRepetitionDefinition]
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'value': {'key': 'value', 'type': '[WorkflowRunActionRepetitionDefinition]'},
}
def __init__(
self,
**kwargs
):
super(WorkflowRunActionRepetitionDefinitionCollection, self).__init__(**kwargs)
self.next_link = kwargs.get('next_link', None)
self.value = kwargs.get('value', None)
class WorkflowRunActionRepetitionProperties(OperationResult):
"""The workflow run action repetition properties definition.
Variables are only populated by the server, and will be ignored when sending a request.
:param start_time: The start time of the workflow scope repetition.
:type start_time: ~datetime.datetime
:param end_time: The end time of the workflow scope repetition.
:type end_time: ~datetime.datetime
:param correlation: The correlation properties.
:type correlation: ~azure.mgmt.logic.models.RunActionCorrelation
:param status: The status of the workflow scope repetition. Possible values include:
"NotSpecified", "Paused", "Running", "Waiting", "Succeeded", "Skipped", "Suspended",
"Cancelled", "Failed", "Faulted", "TimedOut", "Aborted", "Ignored".
:type status: str or ~azure.mgmt.logic.models.WorkflowStatus
:param code: The workflow scope repetition code.
:type code: str
:param error: Any object.
:type error: object
:ivar tracking_id: Gets the tracking id.
:vartype tracking_id: str
:ivar inputs: Gets the inputs.
:vartype inputs: object
:ivar inputs_link: Gets the link to inputs.
:vartype inputs_link: ~azure.mgmt.logic.models.ContentLink
:ivar outputs: Gets the outputs.
:vartype outputs: object
:ivar outputs_link: Gets the link to outputs.
:vartype outputs_link: ~azure.mgmt.logic.models.ContentLink
:ivar tracked_properties: Gets the tracked properties.
:vartype tracked_properties: object
:param retry_history: Gets the retry histories.
:type retry_history: list[~azure.mgmt.logic.models.RetryHistory]
:param iteration_count:
:type iteration_count: int
:param repetition_indexes: The repetition indexes.
:type repetition_indexes: list[~azure.mgmt.logic.models.RepetitionIndex]
"""
_validation = {
'tracking_id': {'readonly': True},
'inputs': {'readonly': True},
'inputs_link': {'readonly': True},
'outputs': {'readonly': True},
'outputs_link': {'readonly': True},
'tracked_properties': {'readonly': True},
}
_attribute_map = {
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'correlation': {'key': 'correlation', 'type': 'RunActionCorrelation'},
'status': {'key': 'status', 'type': 'str'},
'code': {'key': 'code', 'type': 'str'},
'error': {'key': 'error', 'type': 'object'},
'tracking_id': {'key': 'trackingId', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': 'object'},
'inputs_link': {'key': 'inputsLink', 'type': 'ContentLink'},
'outputs': {'key': 'outputs', 'type': 'object'},
'outputs_link': {'key': 'outputsLink', 'type': 'ContentLink'},
'tracked_properties': {'key': 'trackedProperties', 'type': 'object'},
'retry_history': {'key': 'retryHistory', 'type': '[RetryHistory]'},
'iteration_count': {'key': 'iterationCount', 'type': 'int'},
'repetition_indexes': {'key': 'repetitionIndexes', 'type': '[RepetitionIndex]'},
}
def __init__(
self,
**kwargs
):
super(WorkflowRunActionRepetitionProperties, self).__init__(**kwargs)
self.repetition_indexes = kwargs.get('repetition_indexes', None)
class WorkflowRunFilter(msrest.serialization.Model):
"""The workflow run filter.
:param status: The status of workflow run. Possible values include: "NotSpecified", "Paused",
"Running", "Waiting", "Succeeded", "Skipped", "Suspended", "Cancelled", "Failed", "Faulted",
"TimedOut", "Aborted", "Ignored".
:type status: str or ~azure.mgmt.logic.models.WorkflowStatus
"""
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(WorkflowRunFilter, self).__init__(**kwargs)
self.status = kwargs.get('status', None)
class WorkflowRunListResult(msrest.serialization.Model):
"""The list of workflow runs.
:param value: A list of workflow runs.
:type value: list[~azure.mgmt.logic.models.WorkflowRun]
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[WorkflowRun]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(WorkflowRunListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class WorkflowRunTrigger(msrest.serialization.Model):
"""The workflow run trigger.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: Gets the name.
:vartype name: str
:ivar inputs: Gets the inputs.
:vartype inputs: object
:ivar inputs_link: Gets the link to inputs.
:vartype inputs_link: ~azure.mgmt.logic.models.ContentLink
:ivar outputs: Gets the outputs.
:vartype outputs: object
:ivar outputs_link: Gets the link to outputs.
:vartype outputs_link: ~azure.mgmt.logic.models.ContentLink
:ivar scheduled_time: Gets the scheduled time.
:vartype scheduled_time: ~datetime.datetime
:ivar start_time: Gets the start time.
:vartype start_time: ~datetime.datetime
:ivar end_time: Gets the end time.
:vartype end_time: ~datetime.datetime
:ivar tracking_id: Gets the tracking id.
:vartype tracking_id: str
:param correlation: The run correlation.
:type correlation: ~azure.mgmt.logic.models.Correlation
:ivar code: Gets the code.
:vartype code: str
:ivar status: Gets the status. Possible values include: "NotSpecified", "Paused", "Running",
"Waiting", "Succeeded", "Skipped", "Suspended", "Cancelled", "Failed", "Faulted", "TimedOut",
"Aborted", "Ignored".
:vartype status: str or ~azure.mgmt.logic.models.WorkflowStatus
:ivar error: Gets the error.
:vartype error: object
:ivar tracked_properties: Gets the tracked properties.
:vartype tracked_properties: object
"""
_validation = {
'name': {'readonly': True},
'inputs': {'readonly': True},
'inputs_link': {'readonly': True},
'outputs': {'readonly': True},
'outputs_link': {'readonly': True},
'scheduled_time': {'readonly': True},
'start_time': {'readonly': True},
'end_time': {'readonly': True},
'tracking_id': {'readonly': True},
'code': {'readonly': True},
'status': {'readonly': True},
'error': {'readonly': True},
'tracked_properties': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': 'object'},
'inputs_link': {'key': 'inputsLink', 'type': 'ContentLink'},
'outputs': {'key': 'outputs', 'type': 'object'},
'outputs_link': {'key': 'outputsLink', 'type': 'ContentLink'},
'scheduled_time': {'key': 'scheduledTime', 'type': 'iso-8601'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'tracking_id': {'key': 'trackingId', 'type': 'str'},
'correlation': {'key': 'correlation', 'type': 'Correlation'},
'code': {'key': 'code', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'error': {'key': 'error', 'type': 'object'},
'tracked_properties': {'key': 'trackedProperties', 'type': 'object'},
}
def __init__(
self,
**kwargs
):
super(WorkflowRunTrigger, self).__init__(**kwargs)
self.name = None
self.inputs = None
self.inputs_link = None
self.outputs = None
self.outputs_link = None
self.scheduled_time = None
self.start_time = None
self.end_time = None
self.tracking_id = None
self.correlation = kwargs.get('correlation', None)
self.code = None
self.status = None
self.error = None
self.tracked_properties = None
class WorkflowTrigger(SubResource):
"""The workflow trigger.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The resource id.
:vartype id: str
:ivar name: Gets the workflow trigger name.
:vartype name: str
:ivar type: Gets the workflow trigger type.
:vartype type: str
:ivar provisioning_state: Gets the provisioning state. Possible values include: "NotSpecified",
"Accepted", "Running", "Ready", "Creating", "Created", "Deleting", "Deleted", "Canceled",
"Failed", "Succeeded", "Moving", "Updating", "Registering", "Registered", "Unregistering",
"Unregistered", "Completed".
:vartype provisioning_state: str or ~azure.mgmt.logic.models.WorkflowTriggerProvisioningState
:ivar created_time: Gets the created time.
:vartype created_time: ~datetime.datetime
:ivar changed_time: Gets the changed time.
:vartype changed_time: ~datetime.datetime
:ivar state: Gets the state. Possible values include: "NotSpecified", "Completed", "Enabled",
"Disabled", "Deleted", "Suspended".
:vartype state: str or ~azure.mgmt.logic.models.WorkflowState
:ivar status: Gets the status. Possible values include: "NotSpecified", "Paused", "Running",
"Waiting", "Succeeded", "Skipped", "Suspended", "Cancelled", "Failed", "Faulted", "TimedOut",
"Aborted", "Ignored".
:vartype status: str or ~azure.mgmt.logic.models.WorkflowStatus
:ivar last_execution_time: Gets the last execution time.
:vartype last_execution_time: ~datetime.datetime
:ivar next_execution_time: Gets the next execution time.
:vartype next_execution_time: ~datetime.datetime
:ivar recurrence: Gets the workflow trigger recurrence.
:vartype recurrence: ~azure.mgmt.logic.models.WorkflowTriggerRecurrence
:ivar workflow: Gets the reference to workflow.
:vartype workflow: ~azure.mgmt.logic.models.ResourceReference
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
'created_time': {'readonly': True},
'changed_time': {'readonly': True},
'state': {'readonly': True},
'status': {'readonly': True},
'last_execution_time': {'readonly': True},
'next_execution_time': {'readonly': True},
'recurrence': {'readonly': True},
'workflow': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'created_time': {'key': 'properties.createdTime', 'type': 'iso-8601'},
'changed_time': {'key': 'properties.changedTime', 'type': 'iso-8601'},
'state': {'key': 'properties.state', 'type': 'str'},
'status': {'key': 'properties.status', 'type': 'str'},
'last_execution_time': {'key': 'properties.lastExecutionTime', 'type': 'iso-8601'},
'next_execution_time': {'key': 'properties.nextExecutionTime', 'type': 'iso-8601'},
'recurrence': {'key': 'properties.recurrence', 'type': 'WorkflowTriggerRecurrence'},
'workflow': {'key': 'properties.workflow', 'type': 'ResourceReference'},
}
def __init__(
self,
**kwargs
):
super(WorkflowTrigger, self).__init__(**kwargs)
self.name = None
self.type = None
self.provisioning_state = None
self.created_time = None
self.changed_time = None
self.state = None
self.status = None
self.last_execution_time = None
self.next_execution_time = None
self.recurrence = None
self.workflow = None
class WorkflowTriggerCallbackUrl(msrest.serialization.Model):
"""The workflow trigger callback URL.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: Gets the workflow trigger callback URL.
:vartype value: str
:ivar method: Gets the workflow trigger callback URL HTTP method.
:vartype method: str
:ivar base_path: Gets the workflow trigger callback URL base path.
:vartype base_path: str
:ivar relative_path: Gets the workflow trigger callback URL relative path.
:vartype relative_path: str
:param relative_path_parameters: Gets the workflow trigger callback URL relative path
parameters.
:type relative_path_parameters: list[str]
:param queries: Gets the workflow trigger callback URL query parameters.
:type queries: ~azure.mgmt.logic.models.WorkflowTriggerListCallbackUrlQueries
"""
_validation = {
'value': {'readonly': True},
'method': {'readonly': True},
'base_path': {'readonly': True},
'relative_path': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'method': {'key': 'method', 'type': 'str'},
'base_path': {'key': 'basePath', 'type': 'str'},
'relative_path': {'key': 'relativePath', 'type': 'str'},
'relative_path_parameters': {'key': 'relativePathParameters', 'type': '[str]'},
'queries': {'key': 'queries', 'type': 'WorkflowTriggerListCallbackUrlQueries'},
}
def __init__(
self,
**kwargs
):
super(WorkflowTriggerCallbackUrl, self).__init__(**kwargs)
self.value = None
self.method = None
self.base_path = None
self.relative_path = None
self.relative_path_parameters = kwargs.get('relative_path_parameters', None)
self.queries = kwargs.get('queries', None)
class WorkflowTriggerFilter(msrest.serialization.Model):
"""The workflow trigger filter.
:param state: The state of workflow trigger. Possible values include: "NotSpecified",
"Completed", "Enabled", "Disabled", "Deleted", "Suspended".
:type state: str or ~azure.mgmt.logic.models.WorkflowState
"""
_attribute_map = {
'state': {'key': 'state', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(WorkflowTriggerFilter, self).__init__(**kwargs)
self.state = kwargs.get('state', None)
class WorkflowTriggerHistory(SubResource):
"""The workflow trigger history.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The resource id.
:vartype id: str
:ivar name: Gets the workflow trigger history name.
:vartype name: str
:ivar type: Gets the workflow trigger history type.
:vartype type: str
:ivar start_time: Gets the start time.
:vartype start_time: ~datetime.datetime
:ivar end_time: Gets the end time.
:vartype end_time: ~datetime.datetime
:ivar scheduled_time: The scheduled time.
:vartype scheduled_time: ~datetime.datetime
:ivar status: Gets the status. Possible values include: "NotSpecified", "Paused", "Running",
"Waiting", "Succeeded", "Skipped", "Suspended", "Cancelled", "Failed", "Faulted", "TimedOut",
"Aborted", "Ignored".
:vartype status: str or ~azure.mgmt.logic.models.WorkflowStatus
:ivar code: Gets the code.
:vartype code: str
:ivar error: Gets the error.
:vartype error: object
:ivar tracking_id: Gets the tracking id.
:vartype tracking_id: str
:param correlation: The run correlation.
:type correlation: ~azure.mgmt.logic.models.Correlation
:ivar inputs_link: Gets the link to input parameters.
:vartype inputs_link: ~azure.mgmt.logic.models.ContentLink
:ivar outputs_link: Gets the link to output parameters.
:vartype outputs_link: ~azure.mgmt.logic.models.ContentLink
:ivar fired: The value indicating whether trigger was fired.
:vartype fired: bool
:ivar run: Gets the reference to workflow run.
:vartype run: ~azure.mgmt.logic.models.ResourceReference
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'start_time': {'readonly': True},
'end_time': {'readonly': True},
'scheduled_time': {'readonly': True},
'status': {'readonly': True},
'code': {'readonly': True},
'error': {'readonly': True},
'tracking_id': {'readonly': True},
'inputs_link': {'readonly': True},
'outputs_link': {'readonly': True},
'fired': {'readonly': True},
'run': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'start_time': {'key': 'properties.startTime', 'type': 'iso-8601'},
'end_time': {'key': 'properties.endTime', 'type': 'iso-8601'},
'scheduled_time': {'key': 'properties.scheduledTime', 'type': 'iso-8601'},
'status': {'key': 'properties.status', 'type': 'str'},
'code': {'key': 'properties.code', 'type': 'str'},
'error': {'key': 'properties.error', 'type': 'object'},
'tracking_id': {'key': 'properties.trackingId', 'type': 'str'},
'correlation': {'key': 'properties.correlation', 'type': 'Correlation'},
'inputs_link': {'key': 'properties.inputsLink', 'type': 'ContentLink'},
'outputs_link': {'key': 'properties.outputsLink', 'type': 'ContentLink'},
'fired': {'key': 'properties.fired', 'type': 'bool'},
'run': {'key': 'properties.run', 'type': 'ResourceReference'},
}
def __init__(
self,
**kwargs
):
super(WorkflowTriggerHistory, self).__init__(**kwargs)
self.name = None
self.type = None
self.start_time = None
self.end_time = None
self.scheduled_time = None
self.status = None
self.code = None
self.error = None
self.tracking_id = None
self.correlation = kwargs.get('correlation', None)
self.inputs_link = None
self.outputs_link = None
self.fired = None
self.run = None
class WorkflowTriggerHistoryFilter(msrest.serialization.Model):
"""The workflow trigger history filter.
:param status: The status of workflow trigger history. Possible values include: "NotSpecified",
"Paused", "Running", "Waiting", "Succeeded", "Skipped", "Suspended", "Cancelled", "Failed",
"Faulted", "TimedOut", "Aborted", "Ignored".
:type status: str or ~azure.mgmt.logic.models.WorkflowStatus
"""
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(WorkflowTriggerHistoryFilter, self).__init__(**kwargs)
self.status = kwargs.get('status', None)
class WorkflowTriggerHistoryListResult(msrest.serialization.Model):
"""The list of workflow trigger histories.
:param value: A list of workflow trigger histories.
:type value: list[~azure.mgmt.logic.models.WorkflowTriggerHistory]
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[WorkflowTriggerHistory]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(WorkflowTriggerHistoryListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class WorkflowTriggerListCallbackUrlQueries(msrest.serialization.Model):
"""Gets the workflow trigger callback URL query parameters.
:param api_version: The api version.
:type api_version: str
:param sp: The SAS permissions.
:type sp: str
:param sv: The SAS version.
:type sv: str
:param sig: The SAS signature.
:type sig: str
:param se: The SAS timestamp.
:type se: str
"""
_attribute_map = {
'api_version': {'key': 'api-version', 'type': 'str'},
'sp': {'key': 'sp', 'type': 'str'},
'sv': {'key': 'sv', 'type': 'str'},
'sig': {'key': 'sig', 'type': 'str'},
'se': {'key': 'se', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(WorkflowTriggerListCallbackUrlQueries, self).__init__(**kwargs)
self.api_version = kwargs.get('api_version', None)
self.sp = kwargs.get('sp', None)
self.sv = kwargs.get('sv', None)
self.sig = kwargs.get('sig', None)
self.se = kwargs.get('se', None)
class WorkflowTriggerListResult(msrest.serialization.Model):
"""The list of workflow triggers.
:param value: A list of workflow triggers.
:type value: list[~azure.mgmt.logic.models.WorkflowTrigger]
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[WorkflowTrigger]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(WorkflowTriggerListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class WorkflowTriggerRecurrence(msrest.serialization.Model):
"""The workflow trigger recurrence.
:param frequency: The frequency. Possible values include: "NotSpecified", "Second", "Minute",
"Hour", "Day", "Week", "Month", "Year".
:type frequency: str or ~azure.mgmt.logic.models.RecurrenceFrequency
:param interval: The interval.
:type interval: int
:param start_time: The start time.
:type start_time: str
:param end_time: The end time.
:type end_time: str
:param time_zone: The time zone.
:type time_zone: str
:param schedule: The recurrence schedule.
:type schedule: ~azure.mgmt.logic.models.RecurrenceSchedule
"""
_attribute_map = {
'frequency': {'key': 'frequency', 'type': 'str'},
'interval': {'key': 'interval', 'type': 'int'},
'start_time': {'key': 'startTime', 'type': 'str'},
'end_time': {'key': 'endTime', 'type': 'str'},
'time_zone': {'key': 'timeZone', 'type': 'str'},
'schedule': {'key': 'schedule', 'type': 'RecurrenceSchedule'},
}
def __init__(
self,
**kwargs
):
super(WorkflowTriggerRecurrence, self).__init__(**kwargs)
self.frequency = kwargs.get('frequency', None)
self.interval = kwargs.get('interval', None)
self.start_time = kwargs.get('start_time', None)
self.end_time = kwargs.get('end_time', None)
self.time_zone = kwargs.get('time_zone', None)
self.schedule = kwargs.get('schedule', None)
class WorkflowTriggerReference(ResourceReference):
"""The workflow trigger reference.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: The resource id.
:type id: str
:ivar name: Gets the resource name.
:vartype name: str
:ivar type: Gets the resource type.
:vartype type: str
:param flow_name: The workflow name.
:type flow_name: str
:param trigger_name: The workflow trigger name.
:type trigger_name: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'flow_name': {'key': 'flowName', 'type': 'str'},
'trigger_name': {'key': 'triggerName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(WorkflowTriggerReference, self).__init__(**kwargs)
self.flow_name = kwargs.get('flow_name', None)
self.trigger_name = kwargs.get('trigger_name', None)
class WorkflowVersion(Resource):
"""The workflow version.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The resource id.
:vartype id: str
:ivar name: Gets the resource name.
:vartype name: str
:ivar type: Gets the resource type.
:vartype type: str
:param location: The resource location.
:type location: str
:param tags: A set of tags. The resource tags.
:type tags: dict[str, str]
:ivar provisioning_state: The provisioning state. Possible values include: "NotSpecified",
"Accepted", "Running", "Ready", "Creating", "Created", "Deleting", "Deleted", "Canceled",
"Failed", "Succeeded", "Moving", "Updating", "Registering", "Registered", "Unregistering",
"Unregistered", "Completed", "Renewing", "Pending", "Waiting", "InProgress".
:vartype provisioning_state: str or ~azure.mgmt.logic.models.WorkflowProvisioningState
:ivar created_time: Gets the created time.
:vartype created_time: ~datetime.datetime
:ivar changed_time: Gets the changed time.
:vartype changed_time: ~datetime.datetime
:param state: The state. Possible values include: "NotSpecified", "Completed", "Enabled",
"Disabled", "Deleted", "Suspended".
:type state: str or ~azure.mgmt.logic.models.WorkflowState
:ivar version: Gets the version.
:vartype version: str
:ivar access_endpoint: Gets the access endpoint.
:vartype access_endpoint: str
:param endpoints_configuration: The endpoints configuration.
:type endpoints_configuration: ~azure.mgmt.logic.models.FlowEndpointsConfiguration
:param access_control: The access control configuration.
:type access_control: ~azure.mgmt.logic.models.FlowAccessControlConfiguration
:ivar sku: The sku.
:vartype sku: ~azure.mgmt.logic.models.Sku
:param integration_account: The integration account.
:type integration_account: ~azure.mgmt.logic.models.ResourceReference
:param definition: The definition.
:type definition: object
:param parameters: The parameters.
:type parameters: dict[str, ~azure.mgmt.logic.models.WorkflowParameter]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
'created_time': {'readonly': True},
'changed_time': {'readonly': True},
'version': {'readonly': True},
'access_endpoint': {'readonly': True},
'sku': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'created_time': {'key': 'properties.createdTime', 'type': 'iso-8601'},
'changed_time': {'key': 'properties.changedTime', 'type': 'iso-8601'},
'state': {'key': 'properties.state', 'type': 'str'},
'version': {'key': 'properties.version', 'type': 'str'},
'access_endpoint': {'key': 'properties.accessEndpoint', 'type': 'str'},
'endpoints_configuration': {'key': 'properties.endpointsConfiguration', 'type': 'FlowEndpointsConfiguration'},
'access_control': {'key': 'properties.accessControl', 'type': 'FlowAccessControlConfiguration'},
'sku': {'key': 'properties.sku', 'type': 'Sku'},
'integration_account': {'key': 'properties.integrationAccount', 'type': 'ResourceReference'},
'definition': {'key': 'properties.definition', 'type': 'object'},
'parameters': {'key': 'properties.parameters', 'type': '{WorkflowParameter}'},
}
def __init__(
self,
**kwargs
):
super(WorkflowVersion, self).__init__(**kwargs)
self.provisioning_state = None
self.created_time = None
self.changed_time = None
self.state = kwargs.get('state', None)
self.version = None
self.access_endpoint = None
self.endpoints_configuration = kwargs.get('endpoints_configuration', None)
self.access_control = kwargs.get('access_control', None)
self.sku = None
self.integration_account = kwargs.get('integration_account', None)
self.definition = kwargs.get('definition', None)
self.parameters = kwargs.get('parameters', None)
class WorkflowVersionListResult(msrest.serialization.Model):
"""The list of workflow versions.
:param value: A list of workflow versions.
:type value: list[~azure.mgmt.logic.models.WorkflowVersion]
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[WorkflowVersion]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(WorkflowVersionListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class WsdlService(msrest.serialization.Model):
"""The WSDL service.
:param qualified_name: The qualified name.
:type qualified_name: str
:param endpoint_qualified_names: The list of endpoints' qualified names.
:type endpoint_qualified_names: list[str]
"""
_attribute_map = {
'qualified_name': {'key': 'qualifiedName', 'type': 'str'},
'endpoint_qualified_names': {'key': 'EndpointQualifiedNames', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(WsdlService, self).__init__(**kwargs)
self.qualified_name = kwargs.get('qualified_name', None)
self.endpoint_qualified_names = kwargs.get('endpoint_qualified_names', None)
class X12AcknowledgementSettings(msrest.serialization.Model):
"""The X12 agreement acknowledgement settings.
All required parameters must be populated in order to send to Azure.
:param need_technical_acknowledgement: Required. The value indicating whether technical
acknowledgement is needed.
:type need_technical_acknowledgement: bool
:param batch_technical_acknowledgements: Required. The value indicating whether to batch the
technical acknowledgements.
:type batch_technical_acknowledgements: bool
:param need_functional_acknowledgement: Required. The value indicating whether functional
acknowledgement is needed.
:type need_functional_acknowledgement: bool
:param functional_acknowledgement_version: The functional acknowledgement version.
:type functional_acknowledgement_version: str
:param batch_functional_acknowledgements: Required. The value indicating whether to batch
functional acknowledgements.
:type batch_functional_acknowledgements: bool
:param need_implementation_acknowledgement: Required. The value indicating whether
implementation acknowledgement is needed.
:type need_implementation_acknowledgement: bool
:param implementation_acknowledgement_version: The implementation acknowledgement version.
:type implementation_acknowledgement_version: str
:param batch_implementation_acknowledgements: Required. The value indicating whether to batch
implementation acknowledgements.
:type batch_implementation_acknowledgements: bool
:param need_loop_for_valid_messages: Required. The value indicating whether a loop is needed
for valid messages.
:type need_loop_for_valid_messages: bool
:param send_synchronous_acknowledgement: Required. The value indicating whether to send
synchronous acknowledgement.
:type send_synchronous_acknowledgement: bool
:param acknowledgement_control_number_prefix: The acknowledgement control number prefix.
:type acknowledgement_control_number_prefix: str
:param acknowledgement_control_number_suffix: The acknowledgement control number suffix.
:type acknowledgement_control_number_suffix: str
:param acknowledgement_control_number_lower_bound: Required. The acknowledgement control number
lower bound.
:type acknowledgement_control_number_lower_bound: int
:param acknowledgement_control_number_upper_bound: Required. The acknowledgement control number
upper bound.
:type acknowledgement_control_number_upper_bound: int
:param rollover_acknowledgement_control_number: Required. The value indicating whether to
rollover acknowledgement control number.
:type rollover_acknowledgement_control_number: bool
"""
_validation = {
'need_technical_acknowledgement': {'required': True},
'batch_technical_acknowledgements': {'required': True},
'need_functional_acknowledgement': {'required': True},
'batch_functional_acknowledgements': {'required': True},
'need_implementation_acknowledgement': {'required': True},
'batch_implementation_acknowledgements': {'required': True},
'need_loop_for_valid_messages': {'required': True},
'send_synchronous_acknowledgement': {'required': True},
'acknowledgement_control_number_lower_bound': {'required': True},
'acknowledgement_control_number_upper_bound': {'required': True},
'rollover_acknowledgement_control_number': {'required': True},
}
_attribute_map = {
'need_technical_acknowledgement': {'key': 'needTechnicalAcknowledgement', 'type': 'bool'},
'batch_technical_acknowledgements': {'key': 'batchTechnicalAcknowledgements', 'type': 'bool'},
'need_functional_acknowledgement': {'key': 'needFunctionalAcknowledgement', 'type': 'bool'},
'functional_acknowledgement_version': {'key': 'functionalAcknowledgementVersion', 'type': 'str'},
'batch_functional_acknowledgements': {'key': 'batchFunctionalAcknowledgements', 'type': 'bool'},
'need_implementation_acknowledgement': {'key': 'needImplementationAcknowledgement', 'type': 'bool'},
'implementation_acknowledgement_version': {'key': 'implementationAcknowledgementVersion', 'type': 'str'},
'batch_implementation_acknowledgements': {'key': 'batchImplementationAcknowledgements', 'type': 'bool'},
'need_loop_for_valid_messages': {'key': 'needLoopForValidMessages', 'type': 'bool'},
'send_synchronous_acknowledgement': {'key': 'sendSynchronousAcknowledgement', 'type': 'bool'},
'acknowledgement_control_number_prefix': {'key': 'acknowledgementControlNumberPrefix', 'type': 'str'},
'acknowledgement_control_number_suffix': {'key': 'acknowledgementControlNumberSuffix', 'type': 'str'},
'acknowledgement_control_number_lower_bound': {'key': 'acknowledgementControlNumberLowerBound', 'type': 'int'},
'acknowledgement_control_number_upper_bound': {'key': 'acknowledgementControlNumberUpperBound', 'type': 'int'},
'rollover_acknowledgement_control_number': {'key': 'rolloverAcknowledgementControlNumber', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(X12AcknowledgementSettings, self).__init__(**kwargs)
self.need_technical_acknowledgement = kwargs['need_technical_acknowledgement']
self.batch_technical_acknowledgements = kwargs['batch_technical_acknowledgements']
self.need_functional_acknowledgement = kwargs['need_functional_acknowledgement']
self.functional_acknowledgement_version = kwargs.get('functional_acknowledgement_version', None)
self.batch_functional_acknowledgements = kwargs['batch_functional_acknowledgements']
self.need_implementation_acknowledgement = kwargs['need_implementation_acknowledgement']
self.implementation_acknowledgement_version = kwargs.get('implementation_acknowledgement_version', None)
self.batch_implementation_acknowledgements = kwargs['batch_implementation_acknowledgements']
self.need_loop_for_valid_messages = kwargs['need_loop_for_valid_messages']
self.send_synchronous_acknowledgement = kwargs['send_synchronous_acknowledgement']
self.acknowledgement_control_number_prefix = kwargs.get('acknowledgement_control_number_prefix', None)
self.acknowledgement_control_number_suffix = kwargs.get('acknowledgement_control_number_suffix', None)
self.acknowledgement_control_number_lower_bound = kwargs['acknowledgement_control_number_lower_bound']
self.acknowledgement_control_number_upper_bound = kwargs['acknowledgement_control_number_upper_bound']
self.rollover_acknowledgement_control_number = kwargs['rollover_acknowledgement_control_number']
class X12AgreementContent(msrest.serialization.Model):
"""The X12 agreement content.
All required parameters must be populated in order to send to Azure.
:param receive_agreement: Required. The X12 one-way receive agreement.
:type receive_agreement: ~azure.mgmt.logic.models.X12OneWayAgreement
:param send_agreement: Required. The X12 one-way send agreement.
:type send_agreement: ~azure.mgmt.logic.models.X12OneWayAgreement
"""
_validation = {
'receive_agreement': {'required': True},
'send_agreement': {'required': True},
}
_attribute_map = {
'receive_agreement': {'key': 'receiveAgreement', 'type': 'X12OneWayAgreement'},
'send_agreement': {'key': 'sendAgreement', 'type': 'X12OneWayAgreement'},
}
def __init__(
self,
**kwargs
):
super(X12AgreementContent, self).__init__(**kwargs)
self.receive_agreement = kwargs['receive_agreement']
self.send_agreement = kwargs['send_agreement']
class X12DelimiterOverrides(msrest.serialization.Model):
"""The X12 delimiter override settings.
All required parameters must be populated in order to send to Azure.
:param protocol_version: The protocol version.
:type protocol_version: str
:param message_id: The message id.
:type message_id: str
:param data_element_separator: Required. The data element separator.
:type data_element_separator: int
:param component_separator: Required. The component separator.
:type component_separator: int
:param segment_terminator: Required. The segment terminator.
:type segment_terminator: int
:param segment_terminator_suffix: Required. The segment terminator suffix. Possible values
include: "NotSpecified", "None", "CR", "LF", "CRLF".
:type segment_terminator_suffix: str or ~azure.mgmt.logic.models.SegmentTerminatorSuffix
:param replace_character: Required. The replacement character.
:type replace_character: int
:param replace_separators_in_payload: Required. The value indicating whether to replace
separators in payload.
:type replace_separators_in_payload: bool
:param target_namespace: The target namespace on which this delimiter settings has to be
applied.
:type target_namespace: str
"""
_validation = {
'data_element_separator': {'required': True},
'component_separator': {'required': True},
'segment_terminator': {'required': True},
'segment_terminator_suffix': {'required': True},
'replace_character': {'required': True},
'replace_separators_in_payload': {'required': True},
}
_attribute_map = {
'protocol_version': {'key': 'protocolVersion', 'type': 'str'},
'message_id': {'key': 'messageId', 'type': 'str'},
'data_element_separator': {'key': 'dataElementSeparator', 'type': 'int'},
'component_separator': {'key': 'componentSeparator', 'type': 'int'},
'segment_terminator': {'key': 'segmentTerminator', 'type': 'int'},
'segment_terminator_suffix': {'key': 'segmentTerminatorSuffix', 'type': 'str'},
'replace_character': {'key': 'replaceCharacter', 'type': 'int'},
'replace_separators_in_payload': {'key': 'replaceSeparatorsInPayload', 'type': 'bool'},
'target_namespace': {'key': 'targetNamespace', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(X12DelimiterOverrides, self).__init__(**kwargs)
self.protocol_version = kwargs.get('protocol_version', None)
self.message_id = kwargs.get('message_id', None)
self.data_element_separator = kwargs['data_element_separator']
self.component_separator = kwargs['component_separator']
self.segment_terminator = kwargs['segment_terminator']
self.segment_terminator_suffix = kwargs['segment_terminator_suffix']
self.replace_character = kwargs['replace_character']
self.replace_separators_in_payload = kwargs['replace_separators_in_payload']
self.target_namespace = kwargs.get('target_namespace', None)
class X12EnvelopeOverride(msrest.serialization.Model):
"""The X12 envelope override settings.
All required parameters must be populated in order to send to Azure.
:param target_namespace: Required. The target namespace on which this envelope settings has to
be applied.
:type target_namespace: str
:param protocol_version: Required. The protocol version on which this envelope settings has to
be applied.
:type protocol_version: str
:param message_id: Required. The message id on which this envelope settings has to be applied.
:type message_id: str
:param responsible_agency_code: Required. The responsible agency code.
:type responsible_agency_code: str
:param header_version: Required. The header version.
:type header_version: str
:param sender_application_id: Required. The sender application id.
:type sender_application_id: str
:param receiver_application_id: Required. The receiver application id.
:type receiver_application_id: str
:param functional_identifier_code: The functional identifier code.
:type functional_identifier_code: str
:param date_format: Required. The date format. Possible values include: "NotSpecified",
"CCYYMMDD", "YYMMDD".
:type date_format: str or ~azure.mgmt.logic.models.X12DateFormat
:param time_format: Required. The time format. Possible values include: "NotSpecified", "HHMM",
"HHMMSS", "HHMMSSdd", "HHMMSSd".
:type time_format: str or ~azure.mgmt.logic.models.X12TimeFormat
"""
_validation = {
'target_namespace': {'required': True},
'protocol_version': {'required': True},
'message_id': {'required': True},
'responsible_agency_code': {'required': True},
'header_version': {'required': True},
'sender_application_id': {'required': True},
'receiver_application_id': {'required': True},
'date_format': {'required': True},
'time_format': {'required': True},
}
_attribute_map = {
'target_namespace': {'key': 'targetNamespace', 'type': 'str'},
'protocol_version': {'key': 'protocolVersion', 'type': 'str'},
'message_id': {'key': 'messageId', 'type': 'str'},
'responsible_agency_code': {'key': 'responsibleAgencyCode', 'type': 'str'},
'header_version': {'key': 'headerVersion', 'type': 'str'},
'sender_application_id': {'key': 'senderApplicationId', 'type': 'str'},
'receiver_application_id': {'key': 'receiverApplicationId', 'type': 'str'},
'functional_identifier_code': {'key': 'functionalIdentifierCode', 'type': 'str'},
'date_format': {'key': 'dateFormat', 'type': 'str'},
'time_format': {'key': 'timeFormat', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(X12EnvelopeOverride, self).__init__(**kwargs)
self.target_namespace = kwargs['target_namespace']
self.protocol_version = kwargs['protocol_version']
self.message_id = kwargs['message_id']
self.responsible_agency_code = kwargs['responsible_agency_code']
self.header_version = kwargs['header_version']
self.sender_application_id = kwargs['sender_application_id']
self.receiver_application_id = kwargs['receiver_application_id']
self.functional_identifier_code = kwargs.get('functional_identifier_code', None)
self.date_format = kwargs['date_format']
self.time_format = kwargs['time_format']
class X12EnvelopeSettings(msrest.serialization.Model):
"""The X12 agreement envelope settings.
All required parameters must be populated in order to send to Azure.
:param control_standards_id: Required. The controls standards id.
:type control_standards_id: int
:param use_control_standards_id_as_repetition_character: Required. The value indicating whether
to use control standards id as repetition character.
:type use_control_standards_id_as_repetition_character: bool
:param sender_application_id: Required. The sender application id.
:type sender_application_id: str
:param receiver_application_id: Required. The receiver application id.
:type receiver_application_id: str
:param control_version_number: Required. The control version number.
:type control_version_number: str
:param interchange_control_number_lower_bound: Required. The interchange control number lower
bound.
:type interchange_control_number_lower_bound: int
:param interchange_control_number_upper_bound: Required. The interchange control number upper
bound.
:type interchange_control_number_upper_bound: int
:param rollover_interchange_control_number: Required. The value indicating whether to rollover
interchange control number.
:type rollover_interchange_control_number: bool
:param enable_default_group_headers: Required. The value indicating whether to enable default
group headers.
:type enable_default_group_headers: bool
:param functional_group_id: The functional group id.
:type functional_group_id: str
:param group_control_number_lower_bound: Required. The group control number lower bound.
:type group_control_number_lower_bound: int
:param group_control_number_upper_bound: Required. The group control number upper bound.
:type group_control_number_upper_bound: int
:param rollover_group_control_number: Required. The value indicating whether to rollover group
control number.
:type rollover_group_control_number: bool
:param group_header_agency_code: Required. The group header agency code.
:type group_header_agency_code: str
:param group_header_version: Required. The group header version.
:type group_header_version: str
:param transaction_set_control_number_lower_bound: Required. The transaction set control number
lower bound.
:type transaction_set_control_number_lower_bound: int
:param transaction_set_control_number_upper_bound: Required. The transaction set control number
upper bound.
:type transaction_set_control_number_upper_bound: int
:param rollover_transaction_set_control_number: Required. The value indicating whether to
rollover transaction set control number.
:type rollover_transaction_set_control_number: bool
:param transaction_set_control_number_prefix: The transaction set control number prefix.
:type transaction_set_control_number_prefix: str
:param transaction_set_control_number_suffix: The transaction set control number suffix.
:type transaction_set_control_number_suffix: str
:param overwrite_existing_transaction_set_control_number: Required. The value indicating
whether to overwrite existing transaction set control number.
:type overwrite_existing_transaction_set_control_number: bool
:param group_header_date_format: Required. The group header date format. Possible values
include: "NotSpecified", "CCYYMMDD", "YYMMDD".
:type group_header_date_format: str or ~azure.mgmt.logic.models.X12DateFormat
:param group_header_time_format: Required. The group header time format. Possible values
include: "NotSpecified", "HHMM", "HHMMSS", "HHMMSSdd", "HHMMSSd".
:type group_header_time_format: str or ~azure.mgmt.logic.models.X12TimeFormat
:param usage_indicator: Required. The usage indicator. Possible values include: "NotSpecified",
"Test", "Information", "Production".
:type usage_indicator: str or ~azure.mgmt.logic.models.UsageIndicator
"""
_validation = {
'control_standards_id': {'required': True},
'use_control_standards_id_as_repetition_character': {'required': True},
'sender_application_id': {'required': True},
'receiver_application_id': {'required': True},
'control_version_number': {'required': True},
'interchange_control_number_lower_bound': {'required': True},
'interchange_control_number_upper_bound': {'required': True},
'rollover_interchange_control_number': {'required': True},
'enable_default_group_headers': {'required': True},
'group_control_number_lower_bound': {'required': True},
'group_control_number_upper_bound': {'required': True},
'rollover_group_control_number': {'required': True},
'group_header_agency_code': {'required': True},
'group_header_version': {'required': True},
'transaction_set_control_number_lower_bound': {'required': True},
'transaction_set_control_number_upper_bound': {'required': True},
'rollover_transaction_set_control_number': {'required': True},
'overwrite_existing_transaction_set_control_number': {'required': True},
'group_header_date_format': {'required': True},
'group_header_time_format': {'required': True},
'usage_indicator': {'required': True},
}
_attribute_map = {
'control_standards_id': {'key': 'controlStandardsId', 'type': 'int'},
'use_control_standards_id_as_repetition_character': {'key': 'useControlStandardsIdAsRepetitionCharacter', 'type': 'bool'},
'sender_application_id': {'key': 'senderApplicationId', 'type': 'str'},
'receiver_application_id': {'key': 'receiverApplicationId', 'type': 'str'},
'control_version_number': {'key': 'controlVersionNumber', 'type': 'str'},
'interchange_control_number_lower_bound': {'key': 'interchangeControlNumberLowerBound', 'type': 'int'},
'interchange_control_number_upper_bound': {'key': 'interchangeControlNumberUpperBound', 'type': 'int'},
'rollover_interchange_control_number': {'key': 'rolloverInterchangeControlNumber', 'type': 'bool'},
'enable_default_group_headers': {'key': 'enableDefaultGroupHeaders', 'type': 'bool'},
'functional_group_id': {'key': 'functionalGroupId', 'type': 'str'},
'group_control_number_lower_bound': {'key': 'groupControlNumberLowerBound', 'type': 'int'},
'group_control_number_upper_bound': {'key': 'groupControlNumberUpperBound', 'type': 'int'},
'rollover_group_control_number': {'key': 'rolloverGroupControlNumber', 'type': 'bool'},
'group_header_agency_code': {'key': 'groupHeaderAgencyCode', 'type': 'str'},
'group_header_version': {'key': 'groupHeaderVersion', 'type': 'str'},
'transaction_set_control_number_lower_bound': {'key': 'transactionSetControlNumberLowerBound', 'type': 'int'},
'transaction_set_control_number_upper_bound': {'key': 'transactionSetControlNumberUpperBound', 'type': 'int'},
'rollover_transaction_set_control_number': {'key': 'rolloverTransactionSetControlNumber', 'type': 'bool'},
'transaction_set_control_number_prefix': {'key': 'transactionSetControlNumberPrefix', 'type': 'str'},
'transaction_set_control_number_suffix': {'key': 'transactionSetControlNumberSuffix', 'type': 'str'},
'overwrite_existing_transaction_set_control_number': {'key': 'overwriteExistingTransactionSetControlNumber', 'type': 'bool'},
'group_header_date_format': {'key': 'groupHeaderDateFormat', 'type': 'str'},
'group_header_time_format': {'key': 'groupHeaderTimeFormat', 'type': 'str'},
'usage_indicator': {'key': 'usageIndicator', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(X12EnvelopeSettings, self).__init__(**kwargs)
self.control_standards_id = kwargs['control_standards_id']
self.use_control_standards_id_as_repetition_character = kwargs['use_control_standards_id_as_repetition_character']
self.sender_application_id = kwargs['sender_application_id']
self.receiver_application_id = kwargs['receiver_application_id']
self.control_version_number = kwargs['control_version_number']
self.interchange_control_number_lower_bound = kwargs['interchange_control_number_lower_bound']
self.interchange_control_number_upper_bound = kwargs['interchange_control_number_upper_bound']
self.rollover_interchange_control_number = kwargs['rollover_interchange_control_number']
self.enable_default_group_headers = kwargs['enable_default_group_headers']
self.functional_group_id = kwargs.get('functional_group_id', None)
self.group_control_number_lower_bound = kwargs['group_control_number_lower_bound']
self.group_control_number_upper_bound = kwargs['group_control_number_upper_bound']
self.rollover_group_control_number = kwargs['rollover_group_control_number']
self.group_header_agency_code = kwargs['group_header_agency_code']
self.group_header_version = kwargs['group_header_version']
self.transaction_set_control_number_lower_bound = kwargs['transaction_set_control_number_lower_bound']
self.transaction_set_control_number_upper_bound = kwargs['transaction_set_control_number_upper_bound']
self.rollover_transaction_set_control_number = kwargs['rollover_transaction_set_control_number']
self.transaction_set_control_number_prefix = kwargs.get('transaction_set_control_number_prefix', None)
self.transaction_set_control_number_suffix = kwargs.get('transaction_set_control_number_suffix', None)
self.overwrite_existing_transaction_set_control_number = kwargs['overwrite_existing_transaction_set_control_number']
self.group_header_date_format = kwargs['group_header_date_format']
self.group_header_time_format = kwargs['group_header_time_format']
self.usage_indicator = kwargs['usage_indicator']
class X12FramingSettings(msrest.serialization.Model):
"""The X12 agreement framing settings.
All required parameters must be populated in order to send to Azure.
:param data_element_separator: Required. The data element separator.
:type data_element_separator: int
:param component_separator: Required. The component separator.
:type component_separator: int
:param replace_separators_in_payload: Required. The value indicating whether to replace
separators in payload.
:type replace_separators_in_payload: bool
:param replace_character: Required. The replacement character.
:type replace_character: int
:param segment_terminator: Required. The segment terminator.
:type segment_terminator: int
:param character_set: Required. The X12 character set. Possible values include: "NotSpecified",
"Basic", "Extended", "UTF8".
:type character_set: str or ~azure.mgmt.logic.models.X12CharacterSet
:param segment_terminator_suffix: Required. The segment terminator suffix. Possible values
include: "NotSpecified", "None", "CR", "LF", "CRLF".
:type segment_terminator_suffix: str or ~azure.mgmt.logic.models.SegmentTerminatorSuffix
"""
_validation = {
'data_element_separator': {'required': True},
'component_separator': {'required': True},
'replace_separators_in_payload': {'required': True},
'replace_character': {'required': True},
'segment_terminator': {'required': True},
'character_set': {'required': True},
'segment_terminator_suffix': {'required': True},
}
_attribute_map = {
'data_element_separator': {'key': 'dataElementSeparator', 'type': 'int'},
'component_separator': {'key': 'componentSeparator', 'type': 'int'},
'replace_separators_in_payload': {'key': 'replaceSeparatorsInPayload', 'type': 'bool'},
'replace_character': {'key': 'replaceCharacter', 'type': 'int'},
'segment_terminator': {'key': 'segmentTerminator', 'type': 'int'},
'character_set': {'key': 'characterSet', 'type': 'str'},
'segment_terminator_suffix': {'key': 'segmentTerminatorSuffix', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(X12FramingSettings, self).__init__(**kwargs)
self.data_element_separator = kwargs['data_element_separator']
self.component_separator = kwargs['component_separator']
self.replace_separators_in_payload = kwargs['replace_separators_in_payload']
self.replace_character = kwargs['replace_character']
self.segment_terminator = kwargs['segment_terminator']
self.character_set = kwargs['character_set']
self.segment_terminator_suffix = kwargs['segment_terminator_suffix']
class X12MessageFilter(msrest.serialization.Model):
"""The X12 message filter for odata query.
All required parameters must be populated in order to send to Azure.
:param message_filter_type: Required. The message filter type. Possible values include:
"NotSpecified", "Include", "Exclude".
:type message_filter_type: str or ~azure.mgmt.logic.models.MessageFilterType
"""
_validation = {
'message_filter_type': {'required': True},
}
_attribute_map = {
'message_filter_type': {'key': 'messageFilterType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(X12MessageFilter, self).__init__(**kwargs)
self.message_filter_type = kwargs['message_filter_type']
class X12MessageIdentifier(msrest.serialization.Model):
"""The X12 message identifier.
All required parameters must be populated in order to send to Azure.
:param message_id: Required. The message id.
:type message_id: str
"""
_validation = {
'message_id': {'required': True},
}
_attribute_map = {
'message_id': {'key': 'messageId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(X12MessageIdentifier, self).__init__(**kwargs)
self.message_id = kwargs['message_id']
class X12OneWayAgreement(msrest.serialization.Model):
"""The X12 one-way agreement.
All required parameters must be populated in order to send to Azure.
:param sender_business_identity: Required. The sender business identity.
:type sender_business_identity: ~azure.mgmt.logic.models.BusinessIdentity
:param receiver_business_identity: Required. The receiver business identity.
:type receiver_business_identity: ~azure.mgmt.logic.models.BusinessIdentity
:param protocol_settings: Required. The X12 protocol settings.
:type protocol_settings: ~azure.mgmt.logic.models.X12ProtocolSettings
"""
_validation = {
'sender_business_identity': {'required': True},
'receiver_business_identity': {'required': True},
'protocol_settings': {'required': True},
}
_attribute_map = {
'sender_business_identity': {'key': 'senderBusinessIdentity', 'type': 'BusinessIdentity'},
'receiver_business_identity': {'key': 'receiverBusinessIdentity', 'type': 'BusinessIdentity'},
'protocol_settings': {'key': 'protocolSettings', 'type': 'X12ProtocolSettings'},
}
def __init__(
self,
**kwargs
):
super(X12OneWayAgreement, self).__init__(**kwargs)
self.sender_business_identity = kwargs['sender_business_identity']
self.receiver_business_identity = kwargs['receiver_business_identity']
self.protocol_settings = kwargs['protocol_settings']
class X12ProcessingSettings(msrest.serialization.Model):
"""The X12 processing settings.
All required parameters must be populated in order to send to Azure.
:param mask_security_info: Required. The value indicating whether to mask security information.
:type mask_security_info: bool
:param convert_implied_decimal: Required. The value indicating whether to convert numerical
type to implied decimal.
:type convert_implied_decimal: bool
:param preserve_interchange: Required. The value indicating whether to preserve interchange.
:type preserve_interchange: bool
:param suspend_interchange_on_error: Required. The value indicating whether to suspend
interchange on error.
:type suspend_interchange_on_error: bool
:param create_empty_xml_tags_for_trailing_separators: Required. The value indicating whether to
create empty xml tags for trailing separators.
:type create_empty_xml_tags_for_trailing_separators: bool
:param use_dot_as_decimal_separator: Required. The value indicating whether to use dot as
decimal separator.
:type use_dot_as_decimal_separator: bool
"""
_validation = {
'mask_security_info': {'required': True},
'convert_implied_decimal': {'required': True},
'preserve_interchange': {'required': True},
'suspend_interchange_on_error': {'required': True},
'create_empty_xml_tags_for_trailing_separators': {'required': True},
'use_dot_as_decimal_separator': {'required': True},
}
_attribute_map = {
'mask_security_info': {'key': 'maskSecurityInfo', 'type': 'bool'},
'convert_implied_decimal': {'key': 'convertImpliedDecimal', 'type': 'bool'},
'preserve_interchange': {'key': 'preserveInterchange', 'type': 'bool'},
'suspend_interchange_on_error': {'key': 'suspendInterchangeOnError', 'type': 'bool'},
'create_empty_xml_tags_for_trailing_separators': {'key': 'createEmptyXmlTagsForTrailingSeparators', 'type': 'bool'},
'use_dot_as_decimal_separator': {'key': 'useDotAsDecimalSeparator', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(X12ProcessingSettings, self).__init__(**kwargs)
self.mask_security_info = kwargs['mask_security_info']
self.convert_implied_decimal = kwargs['convert_implied_decimal']
self.preserve_interchange = kwargs['preserve_interchange']
self.suspend_interchange_on_error = kwargs['suspend_interchange_on_error']
self.create_empty_xml_tags_for_trailing_separators = kwargs['create_empty_xml_tags_for_trailing_separators']
self.use_dot_as_decimal_separator = kwargs['use_dot_as_decimal_separator']
class X12ProtocolSettings(msrest.serialization.Model):
"""The X12 agreement protocol settings.
All required parameters must be populated in order to send to Azure.
:param validation_settings: Required. The X12 validation settings.
:type validation_settings: ~azure.mgmt.logic.models.X12ValidationSettings
:param framing_settings: Required. The X12 framing settings.
:type framing_settings: ~azure.mgmt.logic.models.X12FramingSettings
:param envelope_settings: Required. The X12 envelope settings.
:type envelope_settings: ~azure.mgmt.logic.models.X12EnvelopeSettings
:param acknowledgement_settings: Required. The X12 acknowledgment settings.
:type acknowledgement_settings: ~azure.mgmt.logic.models.X12AcknowledgementSettings
:param message_filter: Required. The X12 message filter.
:type message_filter: ~azure.mgmt.logic.models.X12MessageFilter
:param security_settings: Required. The X12 security settings.
:type security_settings: ~azure.mgmt.logic.models.X12SecuritySettings
:param processing_settings: Required. The X12 processing settings.
:type processing_settings: ~azure.mgmt.logic.models.X12ProcessingSettings
:param envelope_overrides: The X12 envelope override settings.
:type envelope_overrides: list[~azure.mgmt.logic.models.X12EnvelopeOverride]
:param validation_overrides: The X12 validation override settings.
:type validation_overrides: list[~azure.mgmt.logic.models.X12ValidationOverride]
:param message_filter_list: The X12 message filter list.
:type message_filter_list: list[~azure.mgmt.logic.models.X12MessageIdentifier]
:param schema_references: Required. The X12 schema references.
:type schema_references: list[~azure.mgmt.logic.models.X12SchemaReference]
:param x12_delimiter_overrides: The X12 delimiter override settings.
:type x12_delimiter_overrides: list[~azure.mgmt.logic.models.X12DelimiterOverrides]
"""
_validation = {
'validation_settings': {'required': True},
'framing_settings': {'required': True},
'envelope_settings': {'required': True},
'acknowledgement_settings': {'required': True},
'message_filter': {'required': True},
'security_settings': {'required': True},
'processing_settings': {'required': True},
'schema_references': {'required': True},
}
_attribute_map = {
'validation_settings': {'key': 'validationSettings', 'type': 'X12ValidationSettings'},
'framing_settings': {'key': 'framingSettings', 'type': 'X12FramingSettings'},
'envelope_settings': {'key': 'envelopeSettings', 'type': 'X12EnvelopeSettings'},
'acknowledgement_settings': {'key': 'acknowledgementSettings', 'type': 'X12AcknowledgementSettings'},
'message_filter': {'key': 'messageFilter', 'type': 'X12MessageFilter'},
'security_settings': {'key': 'securitySettings', 'type': 'X12SecuritySettings'},
'processing_settings': {'key': 'processingSettings', 'type': 'X12ProcessingSettings'},
'envelope_overrides': {'key': 'envelopeOverrides', 'type': '[X12EnvelopeOverride]'},
'validation_overrides': {'key': 'validationOverrides', 'type': '[X12ValidationOverride]'},
'message_filter_list': {'key': 'messageFilterList', 'type': '[X12MessageIdentifier]'},
'schema_references': {'key': 'schemaReferences', 'type': '[X12SchemaReference]'},
'x12_delimiter_overrides': {'key': 'x12DelimiterOverrides', 'type': '[X12DelimiterOverrides]'},
}
def __init__(
self,
**kwargs
):
super(X12ProtocolSettings, self).__init__(**kwargs)
self.validation_settings = kwargs['validation_settings']
self.framing_settings = kwargs['framing_settings']
self.envelope_settings = kwargs['envelope_settings']
self.acknowledgement_settings = kwargs['acknowledgement_settings']
self.message_filter = kwargs['message_filter']
self.security_settings = kwargs['security_settings']
self.processing_settings = kwargs['processing_settings']
self.envelope_overrides = kwargs.get('envelope_overrides', None)
self.validation_overrides = kwargs.get('validation_overrides', None)
self.message_filter_list = kwargs.get('message_filter_list', None)
self.schema_references = kwargs['schema_references']
self.x12_delimiter_overrides = kwargs.get('x12_delimiter_overrides', None)
class X12SchemaReference(msrest.serialization.Model):
"""The X12 schema reference.
All required parameters must be populated in order to send to Azure.
:param message_id: Required. The message id.
:type message_id: str
:param sender_application_id: The sender application id.
:type sender_application_id: str
:param schema_version: Required. The schema version.
:type schema_version: str
:param schema_name: Required. The schema name.
:type schema_name: str
"""
_validation = {
'message_id': {'required': True},
'schema_version': {'required': True},
'schema_name': {'required': True},
}
_attribute_map = {
'message_id': {'key': 'messageId', 'type': 'str'},
'sender_application_id': {'key': 'senderApplicationId', 'type': 'str'},
'schema_version': {'key': 'schemaVersion', 'type': 'str'},
'schema_name': {'key': 'schemaName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(X12SchemaReference, self).__init__(**kwargs)
self.message_id = kwargs['message_id']
self.sender_application_id = kwargs.get('sender_application_id', None)
self.schema_version = kwargs['schema_version']
self.schema_name = kwargs['schema_name']
class X12SecuritySettings(msrest.serialization.Model):
"""The X12 agreement security settings.
All required parameters must be populated in order to send to Azure.
:param authorization_qualifier: Required. The authorization qualifier.
:type authorization_qualifier: str
:param authorization_value: The authorization value.
:type authorization_value: str
:param security_qualifier: Required. The security qualifier.
:type security_qualifier: str
:param password_value: The password value.
:type password_value: str
"""
_validation = {
'authorization_qualifier': {'required': True},
'security_qualifier': {'required': True},
}
_attribute_map = {
'authorization_qualifier': {'key': 'authorizationQualifier', 'type': 'str'},
'authorization_value': {'key': 'authorizationValue', 'type': 'str'},
'security_qualifier': {'key': 'securityQualifier', 'type': 'str'},
'password_value': {'key': 'passwordValue', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(X12SecuritySettings, self).__init__(**kwargs)
self.authorization_qualifier = kwargs['authorization_qualifier']
self.authorization_value = kwargs.get('authorization_value', None)
self.security_qualifier = kwargs['security_qualifier']
self.password_value = kwargs.get('password_value', None)
class X12ValidationOverride(msrest.serialization.Model):
"""The X12 validation override settings.
All required parameters must be populated in order to send to Azure.
:param message_id: Required. The message id on which the validation settings has to be applied.
:type message_id: str
:param validate_edi_types: Required. The value indicating whether to validate EDI types.
:type validate_edi_types: bool
:param validate_xsd_types: Required. The value indicating whether to validate XSD types.
:type validate_xsd_types: bool
:param allow_leading_and_trailing_spaces_and_zeroes: Required. The value indicating whether to
allow leading and trailing spaces and zeroes.
:type allow_leading_and_trailing_spaces_and_zeroes: bool
:param validate_character_set: Required. The value indicating whether to validate character
Set.
:type validate_character_set: bool
:param trim_leading_and_trailing_spaces_and_zeroes: Required. The value indicating whether to
trim leading and trailing spaces and zeroes.
:type trim_leading_and_trailing_spaces_and_zeroes: bool
:param trailing_separator_policy: Required. The trailing separator policy. Possible values
include: "NotSpecified", "NotAllowed", "Optional", "Mandatory".
:type trailing_separator_policy: str or ~azure.mgmt.logic.models.TrailingSeparatorPolicy
"""
_validation = {
'message_id': {'required': True},
'validate_edi_types': {'required': True},
'validate_xsd_types': {'required': True},
'allow_leading_and_trailing_spaces_and_zeroes': {'required': True},
'validate_character_set': {'required': True},
'trim_leading_and_trailing_spaces_and_zeroes': {'required': True},
'trailing_separator_policy': {'required': True},
}
_attribute_map = {
'message_id': {'key': 'messageId', 'type': 'str'},
'validate_edi_types': {'key': 'validateEDITypes', 'type': 'bool'},
'validate_xsd_types': {'key': 'validateXSDTypes', 'type': 'bool'},
'allow_leading_and_trailing_spaces_and_zeroes': {'key': 'allowLeadingAndTrailingSpacesAndZeroes', 'type': 'bool'},
'validate_character_set': {'key': 'validateCharacterSet', 'type': 'bool'},
'trim_leading_and_trailing_spaces_and_zeroes': {'key': 'trimLeadingAndTrailingSpacesAndZeroes', 'type': 'bool'},
'trailing_separator_policy': {'key': 'trailingSeparatorPolicy', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(X12ValidationOverride, self).__init__(**kwargs)
self.message_id = kwargs['message_id']
self.validate_edi_types = kwargs['validate_edi_types']
self.validate_xsd_types = kwargs['validate_xsd_types']
self.allow_leading_and_trailing_spaces_and_zeroes = kwargs['allow_leading_and_trailing_spaces_and_zeroes']
self.validate_character_set = kwargs['validate_character_set']
self.trim_leading_and_trailing_spaces_and_zeroes = kwargs['trim_leading_and_trailing_spaces_and_zeroes']
self.trailing_separator_policy = kwargs['trailing_separator_policy']
class X12ValidationSettings(msrest.serialization.Model):
"""The X12 agreement validation settings.
All required parameters must be populated in order to send to Azure.
:param validate_character_set: Required. The value indicating whether to validate character set
in the message.
:type validate_character_set: bool
:param check_duplicate_interchange_control_number: Required. The value indicating whether to
check for duplicate interchange control number.
:type check_duplicate_interchange_control_number: bool
:param interchange_control_number_validity_days: Required. The validity period of interchange
control number.
:type interchange_control_number_validity_days: int
:param check_duplicate_group_control_number: Required. The value indicating whether to check
for duplicate group control number.
:type check_duplicate_group_control_number: bool
:param check_duplicate_transaction_set_control_number: Required. The value indicating whether
to check for duplicate transaction set control number.
:type check_duplicate_transaction_set_control_number: bool
:param validate_edi_types: Required. The value indicating whether to Whether to validate EDI
types.
:type validate_edi_types: bool
:param validate_xsd_types: Required. The value indicating whether to Whether to validate XSD
types.
:type validate_xsd_types: bool
:param allow_leading_and_trailing_spaces_and_zeroes: Required. The value indicating whether to
allow leading and trailing spaces and zeroes.
:type allow_leading_and_trailing_spaces_and_zeroes: bool
:param trim_leading_and_trailing_spaces_and_zeroes: Required. The value indicating whether to
trim leading and trailing spaces and zeroes.
:type trim_leading_and_trailing_spaces_and_zeroes: bool
:param trailing_separator_policy: Required. The trailing separator policy. Possible values
include: "NotSpecified", "NotAllowed", "Optional", "Mandatory".
:type trailing_separator_policy: str or ~azure.mgmt.logic.models.TrailingSeparatorPolicy
"""
_validation = {
'validate_character_set': {'required': True},
'check_duplicate_interchange_control_number': {'required': True},
'interchange_control_number_validity_days': {'required': True},
'check_duplicate_group_control_number': {'required': True},
'check_duplicate_transaction_set_control_number': {'required': True},
'validate_edi_types': {'required': True},
'validate_xsd_types': {'required': True},
'allow_leading_and_trailing_spaces_and_zeroes': {'required': True},
'trim_leading_and_trailing_spaces_and_zeroes': {'required': True},
'trailing_separator_policy': {'required': True},
}
_attribute_map = {
'validate_character_set': {'key': 'validateCharacterSet', 'type': 'bool'},
'check_duplicate_interchange_control_number': {'key': 'checkDuplicateInterchangeControlNumber', 'type': 'bool'},
'interchange_control_number_validity_days': {'key': 'interchangeControlNumberValidityDays', 'type': 'int'},
'check_duplicate_group_control_number': {'key': 'checkDuplicateGroupControlNumber', 'type': 'bool'},
'check_duplicate_transaction_set_control_number': {'key': 'checkDuplicateTransactionSetControlNumber', 'type': 'bool'},
'validate_edi_types': {'key': 'validateEDITypes', 'type': 'bool'},
'validate_xsd_types': {'key': 'validateXSDTypes', 'type': 'bool'},
'allow_leading_and_trailing_spaces_and_zeroes': {'key': 'allowLeadingAndTrailingSpacesAndZeroes', 'type': 'bool'},
'trim_leading_and_trailing_spaces_and_zeroes': {'key': 'trimLeadingAndTrailingSpacesAndZeroes', 'type': 'bool'},
'trailing_separator_policy': {'key': 'trailingSeparatorPolicy', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(X12ValidationSettings, self).__init__(**kwargs)
self.validate_character_set = kwargs['validate_character_set']
self.check_duplicate_interchange_control_number = kwargs['check_duplicate_interchange_control_number']
self.interchange_control_number_validity_days = kwargs['interchange_control_number_validity_days']
self.check_duplicate_group_control_number = kwargs['check_duplicate_group_control_number']
self.check_duplicate_transaction_set_control_number = kwargs['check_duplicate_transaction_set_control_number']
self.validate_edi_types = kwargs['validate_edi_types']
self.validate_xsd_types = kwargs['validate_xsd_types']
self.allow_leading_and_trailing_spaces_and_zeroes = kwargs['allow_leading_and_trailing_spaces_and_zeroes']
self.trim_leading_and_trailing_spaces_and_zeroes = kwargs['trim_leading_and_trailing_spaces_and_zeroes']
self.trailing_separator_policy = kwargs['trailing_separator_policy']
| 41.301082 | 145 | 0.67277 |
4a25618452f41039a1ee3aa3591fd0a06267b73b | 1,638 | py | Python | setup.py | roaet/pyresty | 77a04e61e450df0331c6cb5c3eacb7aef1c87a89 | [
"Apache-2.0"
] | null | null | null | setup.py | roaet/pyresty | 77a04e61e450df0331c6cb5c3eacb7aef1c87a89 | [
"Apache-2.0"
] | null | null | null | setup.py | roaet/pyresty | 77a04e61e450df0331c6cb5c3eacb7aef1c87a89 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2016 Justin Hammond
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import setuptools
required_packages = [
"click",
"configobj",
"requests",
]
setuptools.setup(
name='pyresty',
version='0.0.1',
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7'],
install_requires=required_packages,
packages=['pyresty'],
keywords='resty curl',
author='Justin Hammond',
author_email='[email protected]',
license='Apache Software License',
description='Python curl wrapper inspired by micha/resty',
long_description=open('README.md').read(),
url='https://github.com/roaet/pyresty',
zip_safe=False,
entry_points='''
[console_scripts]
pyresty = pyresty.executable:main_run
GET = pyresty.executable:do_get
POST = pyresty.executable:do_post
DELETE = pyresty.executable:do_delete
PUT = pyresty.executable:do_put
PATCH = pyresty.executable:do_patch
'''
)
| 31.5 | 69 | 0.68315 |
4a2562e824504fe0d49fb1917ab8720c17848638 | 1,036 | py | Python | fibonacci_calculator_mpu/Calculator/FibonacciService.py | MaartenUijen/fibonacci_calculator_mpu | b96b1894d4801b3332726cc503618a9d6b8bef29 | [
"MIT"
] | null | null | null | fibonacci_calculator_mpu/Calculator/FibonacciService.py | MaartenUijen/fibonacci_calculator_mpu | b96b1894d4801b3332726cc503618a9d6b8bef29 | [
"MIT"
] | null | null | null | fibonacci_calculator_mpu/Calculator/FibonacciService.py | MaartenUijen/fibonacci_calculator_mpu | b96b1894d4801b3332726cc503618a9d6b8bef29 | [
"MIT"
] | null | null | null | from typing import List
from fibonacci_calculator_mpu.Calculator.FibonacciData import FibonacciData
class FibonacciService:
def __init__(self):
""" Instance of Fibonacci Data"""
self.f = FibonacciData()
def get_fibonacci_number(self, n: int) -> int:
"""
:param n: An index.
:return: Fibonacci number based on index.
"""
return self.f.add_fibonacci_number(n)
def get_fibonacci_sequence(self, n: int) -> List[int]:
"""
:param n: An index.
:return: A list with fibonacci sequence until the given index.
"""
return self.f.build_sequence(n)
def get_fibonacci_index(self, fibonacci_number: int) -> int:
"""
:param fibonacci_number: An arbitrary number.
:return: The index corresponding to the fibonacci_number. If it is not a fibonacci
number it returns an index corresponding to the closest fibonacci number.
"""
return self.f.search_index_fibonacci_number(fibonacci_number)
| 32.375 | 90 | 0.655405 |
4a25635611a816007708ca63bb8fe32173ebf4d3 | 3,633 | py | Python | Chapter_02/06_ssh_paramiko.py | dpazavalos/BlackHatPython3 | 73a84ceec6220ec51d83c36afdfb558c8ddbd5e5 | [
"MIT"
] | null | null | null | Chapter_02/06_ssh_paramiko.py | dpazavalos/BlackHatPython3 | 73a84ceec6220ec51d83c36afdfb558c8ddbd5e5 | [
"MIT"
] | null | null | null | Chapter_02/06_ssh_paramiko.py | dpazavalos/BlackHatPython3 | 73a84ceec6220ec51d83c36afdfb558c8ddbd5e5 | [
"MIT"
] | null | null | null | import threading
import paramiko
import subprocess
import socket
from typing import List, Optional
import getpass
# # #
# # # Common send/recv functions
# # #
def send_data(*, to_socket: socket.socket, data_stream: bytes,
send_timeout=2) -> None:
"""
Centralised function to handle sending data stream to receive data. Sends data in consistent
buffer sizes
Args:
to_socket:
Socket to send stream to
data_stream:
Data stream to send
send_timeout:
Set timeout for to_socket
"""
to_socket.settimeout(send_timeout)
try:
data_fragments = []
for i in range(0, len(data_stream), 4096):
# Break data stream into byte sized bites
data_fragments.append(data_stream[i:i + 4096])
if data_fragments[-1] == 4096:
# Make sure last fragment isn't BUFFER bytes long
data_fragments.append(b'\n')
for frag in data_fragments:
to_socket.send(frag)
except TimeoutError:
pass
def receive_data(*, from_socket: socket.socket,
from_timeout=2) -> bytes:
"""
Centralised fuction to handle receiving one or more packet buffers from TCP socket
Args:
from_socket:
Socket sending stream to this instance.
from_timeout:
Set timeout for from_socket
Returns:
Complete binary stream from socket
"""
from_socket.settimeout(from_timeout)
fragments: List[bytes] = []
try:
stream = from_socket.recv(4096)
fragments.append(stream)
while True:
if len(stream) < 4096:
break
else:
stream = from_socket.recv(4096)
fragments.append(stream)
except TimeoutError:
pass
return b''.join(fragments)
# # #
# # # SSHv2 functions, using paramiko API
# # #
def ssh_command(*,
ip: str = None, port: int = None, user: str = None, password: str = None,
command: str = None, known_hosts: Optional[str] = None):
"""
Non-interactive SSH command client, using paramiko API. Connects and sends single command to
target ssh
Args:
ip:
target IP (Def localhost)
port:
target port (Def 22)
user:
Username to pass to target IP (Def running user)
password:
password to pass to target IP (Def '')
command:
One shot command to pass (Def ll /)
known_hosts:
Optional key support, using absolute path to .ssh/known_hosts
"""
if not ip:
ip = 'localhost'
if not port:
port = 22
if not user:
user = getpass.getuser()
if not password:
password = ''
if not command:
command = 'whoami'
# Bind new SSH client
client = paramiko.SSHClient()
# Optional key support
if known_hosts:
client.load_host_keys(known_hosts)
# Auto add missing keys
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# Connect
client.connect(ip, port=port, username=user, password=password)
# request a new channel to server, session type
ssh_session = client.get_transport().open_session()
if ssh_session.active:
ssh_session.exec_command(command)
server_response = receive_data(from_socket=ssh_session)
# server_response = ssh_session.recv(1024)
print(server_response)
ssh_command(port=2222, password='toor', command='id')
| 27.11194 | 96 | 0.597303 |
4a2563fc1af5b69a96bf429ac36ff0fb0bb7777e | 124 | py | Python | heatrapy/mats/__init__.py | djsilva99/heatrapy | 91902e8fd9e216638855856ed7c0757bbade11c2 | [
"MIT"
] | 31 | 2019-03-21T15:41:18.000Z | 2022-03-21T15:41:49.000Z | heatrapy/mats/__init__.py | danieljosesilva/heatrapy | 5d1297d67ea237b2c6537133ace8c1f4a6091518 | [
"MIT"
] | 9 | 2020-09-01T08:50:52.000Z | 2022-03-12T00:55:53.000Z | heatrapy/mats/__init__.py | djsilva99/heatrapy | 91902e8fd9e216638855856ed7c0757bbade11c2 | [
"MIT"
] | 7 | 2020-03-10T19:34:32.000Z | 2022-03-28T01:12:59.000Z | """Mats.
This submodule access the properties of materials
"""
from .calmatpro import CalMatPro
__all__ = ['CalMatPro']
| 12.4 | 49 | 0.733871 |
4a256586eb50aab307b6ea6489df3f71ff0536b5 | 1,732 | py | Python | color_histogram/core/hist_common.py | waragai-katsunori/ColorHistogram | f57c1115a94aa72387a6e40aef88b1861eb470ab | [
"MIT"
] | null | null | null | color_histogram/core/hist_common.py | waragai-katsunori/ColorHistogram | f57c1115a94aa72387a6e40aef88b1861eb470ab | [
"MIT"
] | null | null | null | color_histogram/core/hist_common.py | waragai-katsunori/ColorHistogram | f57c1115a94aa72387a6e40aef88b1861eb470ab | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
## @package color_histogram.core.hist_common
#
# Common color histogram functions for 1D, 2D, 3D.
# @author tody
# @date 2015/08/29
import numpy as np
def colorCoordinates(color_ids, num_bins, color_range):
color_ids = np.array(color_ids).T
c_min, c_max = color_range
color_coordinates = c_min + (color_ids * (c_max - c_min)) / float(num_bins - 1.0)
return color_coordinates
def colorDensities(hist_bins):
hist_positive = hist_bins > 0.0
color_densities = np.float32(hist_bins[hist_positive])
density_max = np.max(color_densities)
color_densities = color_densities / density_max
return color_densities
def rgbColors(hist_bins, color_bins):
hist_positive = hist_bins > 0.0
colors = color_bins[hist_positive, :]
colors = np.clip(colors, 0.0, 1.0)
return colors
def clipLowDensity(hist_bins, color_bins, alpha):
density_mean = np.mean(hist_bins)
low_density = hist_bins < density_mean * alpha
hist_bins[low_density] = 0.0
for ci in range(3):
color_bins[low_density, ci] = 0.0
def densitySizes(color_densities, density_size_range):
density_size_min, density_size_max = density_size_range
density_size_factor = density_size_max / density_size_min
density_sizes = density_size_min * np.power(density_size_factor, color_densities)
return density_sizes
def range2ticks(tick_range, decimals=1):
ticks = np.around(tick_range, decimals=decimals)
ticks[ticks > 10] = np.rint(ticks[ticks > 10])
return ticks
def range2lims(tick_range):
unit = 0.1 * (tick_range[:, 1] - tick_range[:, 0])
lim = np.array(tick_range)
lim[:, 0] += -unit
lim[:, 1] += unit
return lim
| 26.646154 | 85 | 0.700924 |
4a2565983995548cdb6873d1cad6e45bfc941f34 | 6,811 | py | Python | main/results/experiments_falcon.py | wissembrdj/welink | ebc0cd4742578ad22014bd8067796e8cc1869f02 | [
"MIT"
] | null | null | null | main/results/experiments_falcon.py | wissembrdj/welink | ebc0cd4742578ad22014bd8067796e8cc1869f02 | [
"MIT"
] | null | null | null | main/results/experiments_falcon.py | wissembrdj/welink | ebc0cd4742578ad22014bd8067796e8cc1869f02 | [
"MIT"
] | null | null | null | import json
import csv
from result import Result
import requests
import time
import re
import io
from extract_entities import entities
writer = csv.writer(open("falcon_results_qald7.csv", 'a', newline=''))
url = 'https://labs.tib.eu/falcon/api?mode=long'
headers = {'Content-type': 'application/json'}
with open('qald-7.json', encoding='UTF-8') as data_file:
data = json.loads(data_file.read())
nb=0
for distro in data['questions']:
entities_dataset=entities(distro['query']['sparql'])
print(entities_dataset)
entity_mentions=0
correctly_linked=0
n=1
system_result=0
result=[]
tmp=time.time()
for d in distro['question']:
if d["language"]=='en':
question_en=d["string"]
query = {'text': str(question_en)}
data_json = json.dumps(query)
response = requests.post(url, data=data_json, headers=headers)
detected_entity=0
if response:
execution_time=time.time()-tmp
response_json=response.json()
if 'entities' in response_json:
if response_json['entities']:
# system_result=len(response_json['results'])
system_result=len(response_json['entities'])
for em in entities_dataset:
entity_mentions=entity_mentions+1
for i in response_json['entities']:
if i[0]==em:
correctly_linked=correctly_linked+1
result.append(i[1])
n=n+1
#print(correctly_linked, system_result, entity_mentions)
res= Result(correctly_linked, system_result, entity_mentions)
fmeasure=0
if system_result!=0:
entity_precision=res.precision()
else:
entity_precision=0
if entity_mentions!=0:
entity_recall=res.recall()
else:
entity_recall=0
if entity_recall!=0 and entity_precision!=0:
fmeasure= (2*entity_precision*entity_recall)/(entity_precision + entity_recall)
for i in result:
print("id question: ", distro['id'], "result n: ", system_result, detected_entity, result)
print("Precision:", entity_precision," Recall:", entity_recall )
print("____________________________________")
myData=[[distro['id'],question_en,entity_mentions,detected_entity,system_result,correctly_linked, entity_precision,entity_recall, fmeasure, "0", "0", execution_time] ]
myFile = open('falcon_results_qald7.csv', 'a', encoding='utf-8')
with myFile:
writer = csv.writer(myFile, delimiter =";", lineterminator='\r')
writer.writerows(myData)
else:
#No string match
nsm=0
system_result=0
entity_precision=0
entity_recall=0
nsm=nsm+1
myData=[[distro['id'],question_en,entity_mentions,detected_entity,system_result,correctly_linked, entity_precision,entity_recall, "0", "0",nsm, execution_time] ]
print("____________________________________No string match")
myFile = open('falcon_results_qald7.csv', 'a', encoding='utf-8')
with myFile:
writer = csv.writer(myFile, delimiter =";", lineterminator='\r')
writer.writerows(myData)
else:
#No detected named entity:
if entities_dataset:
nbem=0
system_result=0
entity_precision=0
entity_recall=0
correctly_linked=0
detected_entity=0
if 'entity mapping' in distro:
for em in distro["entity mapping"]:
nbem=nbem+1
myData=[[distro['id'],question_en,nbem,detected_entity,system_result,correctly_linked, entity_precision,entity_recall,"0", "1", "0", execution_time] ]
print("____________________________________No detected named entity")
else:
nbem=0
system_result=1
entity_precision=1
entity_recall=1
correctly_linked=1
detected_entity=0
fmeasure=1
if 'entity mapping' in distro:
for em in distro["entity mapping"]:
nbem=nbem+1
myData=[[distro['id'],question_en,nbem,detected_entity,system_result,correctly_linked, entity_precision,entity_recall,fmeasure, "3", "3", execution_time] ]
print("____________________________________No mention + No results")
myFile = open('falcon_results_qald7.csv', 'a', encoding='utf-8')
with myFile:
writer = csv.writer(myFile, delimiter =";", lineterminator='\r')
writer.writerows(myData)
else:
#Unknown error from the web service
execution_time=time.time()-tmp
system_result=0
entity_precision=0
entity_recall=0
fmeasure= 0
entity_mentions=0
detected_entity=0
correctly_linked=0
print("____________________________________Unknown error from the web service")
myData=[[distro['id'],question_en,entity_mentions,detected_entity,system_result,correctly_linked, entity_precision,entity_recall, fmeasure, "2", "2", execution_time] ]
myFile = open('falcon_results_qald7.csv', 'a', encoding='utf-8')
with myFile:
writer = csv.writer(myFile, delimiter =";", lineterminator='\r')
writer.writerows(myData)
print("FALCON process completed")
| 49 | 203 | 0.502129 |
4a25659fd5f6ac46b43718f03d32a72da48ee2c9 | 1,094 | py | Python | creacion_de_aplicaciones/tkinter_iris/app/gui/delete_last.py | soytupadrrre/Master_Python_Eip | c4774209d7dd15584233fe5d4cc01b1434c9316b | [
"MIT"
] | null | null | null | creacion_de_aplicaciones/tkinter_iris/app/gui/delete_last.py | soytupadrrre/Master_Python_Eip | c4774209d7dd15584233fe5d4cc01b1434c9316b | [
"MIT"
] | null | null | null | creacion_de_aplicaciones/tkinter_iris/app/gui/delete_last.py | soytupadrrre/Master_Python_Eip | c4774209d7dd15584233fe5d4cc01b1434c9316b | [
"MIT"
] | null | null | null | import tkinter as tk
from app import RestIris
class DeleteLastApp:
def __init__(self, master=None):
# build ui
self.delete_last_view = tk.Tk() if master is None else tk.Toplevel(master)
self.delete_last_view.title("Victor Luque - Tkinter - Delete Last")
self.label41 = tk.Label(self.delete_last_view)
self.label41.configure(
font="{Calibri} 16 {}", text="Eliminar última fila en Iris Dataset"
)
self.label41.grid(column="0", padx="10", pady="10", row="0")
self.button14 = tk.Button(self.delete_last_view)
self.button14.configure(default="normal", text="Eliminar")
self.button14.grid(column="0", pady="10", row="1")
self.button14.configure(command=self.send_data)
self.delete_last_view.configure(height="200", width="200")
# Main widget
self.mainwindow = self.delete_last_view
def run(self):
self.mainwindow.mainloop()
def send_data(self):
RestIris().delete_last_row()
if __name__ == "__main__":
app = DeleteLastApp()
app.run()
| 32.176471 | 82 | 0.64351 |
4a256632e734042e27d8dd34caedf694b21e5de4 | 1,459 | py | Python | Ch10-1.py | illmatictime/python-mini-project | e0b31fc78254d46c0744d690f8093289b375f600 | [
"MIT"
] | 1 | 2021-03-01T07:17:09.000Z | 2021-03-01T07:17:09.000Z | Ch10-1.py | illmatictime/python-mini-project | e0b31fc78254d46c0744d690f8093289b375f600 | [
"MIT"
] | null | null | null | Ch10-1.py | illmatictime/python-mini-project | e0b31fc78254d46c0744d690f8093289b375f600 | [
"MIT"
] | null | null | null | import string
def openFile():
while True:
try:
fileName = input("Please enter file name to process: ")
fileOpen = open(fileName)
wordTuple = dict()
for line in fileOpen:
line = line.translate(str.maketrans('', '',
string.punctuation))
line = line.translate(str.maketrans('', '', string.whitespace))
for char in line:
char = char.lower()
wordTuple[char] = wordTuple.get(char, 0) + 1
return wordTuple
except IOError:
print("File name", fileName, "cannot be found.")
def sort(wordTuple):
sortList = list()
for k, v in wordTuple.items():
sortList.append((v, k))
sortedTuple = sorted(sortList)
return sortedTuple
def printResults(sortedTuple):
print(sortedTuple)
def repeat():
repeatProgram = input("\nDo you want to try another file? (y or n) ")
if repeatProgram.upper() == "Y":
pass
elif repeatProgram.upper() == "N":
print("Thank you for playing.")
return True
# quit()
else:
print("Please enter y or n.")
repeat()
def main():
while True:
wordTuple = openFile()
sortedTuple = sort(wordTuple)
printResults(sortedTuple)
result = repeat()
if result == True:
break
main() | 24.728814 | 79 | 0.527759 |
4a25665fc6f3c212e7f868a7c371c4c15efdc93c | 151 | py | Python | bert_e/git_host/cache.py | tcarmet/bert-e | 8e0623d9a8c7bd111790d72307862167eca18a23 | [
"Apache-2.0"
] | null | null | null | bert_e/git_host/cache.py | tcarmet/bert-e | 8e0623d9a8c7bd111790d72307862167eca18a23 | [
"Apache-2.0"
] | null | null | null | bert_e/git_host/cache.py | tcarmet/bert-e | 8e0623d9a8c7bd111790d72307862167eca18a23 | [
"Apache-2.0"
] | null | null | null | from bert_e.lib.lru_cache import LRUCache
from collections import defaultdict
BUILD_STATUS_CACHE = defaultdict(LRUCache) # type: Dict[str, LRUCache]
| 30.2 | 71 | 0.821192 |
4a25672f05ab2e4670f29e104f5173db728c2b2c | 2,577 | py | Python | src/roles/timelord.py | MishaCatskill/lykos | d2ed4859958a24677fd4dab25391e9e6016a81f8 | [
"BSD-2-Clause"
] | null | null | null | src/roles/timelord.py | MishaCatskill/lykos | d2ed4859958a24677fd4dab25391e9e6016a81f8 | [
"BSD-2-Clause"
] | null | null | null | src/roles/timelord.py | MishaCatskill/lykos | d2ed4859958a24677fd4dab25391e9e6016a81f8 | [
"BSD-2-Clause"
] | null | null | null | import re
import random
import itertools
import math
import threading
import time
from collections import defaultdict
from src.utilities import *
from src import channels, users, debuglog, errlog, plog
from src.functions import get_players, get_all_players, get_main_role, get_reveal_role, get_target
from src.decorators import command, event_listener
from src.containers import UserList, UserSet, UserDict, DefaultUserDict
from src.messages import messages
from src.events import Event
TIME_ATTRIBUTES = (
("DAY_TIME_LIMIT", "TIME_LORD_DAY_LIMIT"),
("DAY_TIME_WARN", "TIME_LORD_DAY_WARN"),
("SHORT_DAY_LIMIT", "TIME_LORD_DAY_LIMIT"),
("SHORT_DAY_WARN", "TIME_LORD_DAY_WARN"),
("NIGHT_TIME_LIMIT", "TIME_LORD_NIGHT_LIMIT"),
("NIGHT_TIME_WARN", "TIME_LORD_NIGHT_WARN"),
)
@event_listener("del_player")
def on_del_player(evt, var, user, mainrole, allroles, death_triggers):
if not death_triggers or "time lord" not in allroles:
return
for attr, new_attr in TIME_ATTRIBUTES:
if attr not in var.ORIGINAL_SETTINGS:
var.ORIGINAL_SETTINGS[attr] = getattr(var, attr)
setattr(var, attr, getattr(var, new_attr))
channels.Main.send(messages["time_lord_dead"].format(var.TIME_LORD_DAY_LIMIT, var.TIME_LORD_NIGHT_LIMIT))
if var.GAMEPHASE == "day":
time_limit = var.DAY_TIME_LIMIT
time_warn = var.DAY_TIME_WARN
phase_id = "DAY_ID"
timer_name = "day_warn"
elif var.GAMEPHASE == "night":
time_limit = var.NIGHT_TIME_LIMIT
time_warn = var.NIGHT_TIME_WARN
phase_id = "NIGHT_ID"
timer_name = "night_warn"
if var.GAMEPHASE in var.TIMERS:
time_left = int((var.TIMERS[var.GAMEPHASE][1] + var.TIMERS[var.GAMEPHASE][2]) - time.time())
if time_left > time_limit > 0:
t = threading.Timer(time_limit, hurry_up, [phase_id, True])
var.TIMERS[var.GAMEPHASE] = (t, time.time(), time_limit)
t.daemon = True
t.start()
# Don't duplicate warnings, i.e. only set the warning timer if a warning was not already given
if timer_name in var.TIMERS:
timer = var.TIMERS[timer_name][0]
if timer.isAlive():
timer.cancel()
t = threading.Timer(time_warn, hurry_up, [phase_id, False])
var.TIMERS[timer_name] = (t, time.time(), time_warn)
t.daemon = True
t.start()
debuglog("{0} (time lord) TRIGGER".format(user))
# vim: set sw=4 expandtab:
| 35.791667 | 109 | 0.661234 |
4a256823daed13128370692ee3314a17984f5dde | 600 | py | Python | tests/test_catalog_record_converter.py | CSCfi/etsin-finder-search | c76888de65a3c32d98f78f863850a374606420c1 | [
"MIT"
] | null | null | null | tests/test_catalog_record_converter.py | CSCfi/etsin-finder-search | c76888de65a3c32d98f78f863850a374606420c1 | [
"MIT"
] | 9 | 2017-11-11T10:35:41.000Z | 2021-01-21T10:58:50.000Z | tests/test_catalog_record_converter.py | CSCfi/etsin-finder-search | c76888de65a3c32d98f78f863850a374606420c1 | [
"MIT"
] | 2 | 2018-03-06T08:19:48.000Z | 2019-03-20T06:55:16.000Z | # This file is part of the Etsin service
#
# Copyright 2017-2018 Ministry of Education and Culture, Finland
#
# :author: CSC - IT Center for Science Ltd., Espoo Finland <[email protected]>
# :license: MIT
from etsin_finder_search.catalog_record_converter import CRConverter
from .helpers import get_test_object_from_file
def test_get_es_person_or_org_common_data_from_metax_obj():
converter = CRConverter()
output = converter.convert_metax_cr_json_to_es_data_model(get_test_object_from_file('metax_catalog_record.json'))
assert output == get_test_object_from_file('es_document.json')
| 37.5 | 117 | 0.811667 |
4a2568dc93f37d66ec01e259ce64f9620e38bfa5 | 641 | py | Python | tests/mainGUI_test.py | ryandalex1/time-block | 371c62fb713e4ae9e9a046d492b137ce3ca47134 | [
"MIT"
] | null | null | null | tests/mainGUI_test.py | ryandalex1/time-block | 371c62fb713e4ae9e9a046d492b137ce3ca47134 | [
"MIT"
] | null | null | null | tests/mainGUI_test.py | ryandalex1/time-block | 371c62fb713e4ae9e9a046d492b137ce3ca47134 | [
"MIT"
] | null | null | null | import unittest
import mainGUI
from datetime import *
class InitDate(unittest.TestCase):
def tearDown(self):
pass
def setUp(self):
self.new_date = mainGUI.Date(datetime.today())
self.new_date.startH = 7
self.new_date.endH = 15
self.new_date.startHChange = 7
self.new_date.load_date()
def test_date_init(self):
assert self.new_date.events == [], "Date not initializing correctly"
def test_load_date(self):
assert self.new_date.buttons[1]["text"] == "Nothing Scheduled", "Buttons not initializing correctly"
if __name__ == '__main__':
unittest.main()
| 23.740741 | 108 | 0.663027 |
4a256914919e4b3a194d9102f47caa4659516bf7 | 534 | py | Python | users/admin.py | silencemind/Department-Managment-System | e39a28e5043344d323a4af639dca7e79c888f259 | [
"PostgreSQL"
] | 1 | 2020-12-10T15:04:59.000Z | 2020-12-10T15:04:59.000Z | users/admin.py | silencemind/College-Managment-System | e39a28e5043344d323a4af639dca7e79c888f259 | [
"PostgreSQL"
] | 5 | 2020-11-04T07:49:11.000Z | 2021-06-10T20:22:10.000Z | users/admin.py | silencemind/Department-Managment-System | e39a28e5043344d323a4af639dca7e79c888f259 | [
"PostgreSQL"
] | null | null | null | from django.contrib import admin
from .models import Users
from django.contrib.auth.admin import UserAdmin
from department.models import *
# Register your models here.
class UsersAdmin(UserAdmin):
model = Users
fieldsets = UserAdmin.fieldsets + (
('Additional Info', {
'fields': (
('is_student',),
('is_teacher',),
('is_admin')
)
}
),
)
admin.site.register(Users, UsersAdmin)
| 17.225806 | 47 | 0.522472 |
4a256b876063d229f947ae412957e5db38386078 | 1,012 | py | Python | Desafios/desafio98.py | gustavodoamaral/115_Desafios_Python | 8baa1c0353a40f7a63f442293bc0f6852fd94da0 | [
"MIT"
] | 1 | 2022-02-07T01:12:19.000Z | 2022-02-07T01:12:19.000Z | Desafios/desafio98.py | gustavodoamaral/desafios_python_gustavo_guanabara | 8baa1c0353a40f7a63f442293bc0f6852fd94da0 | [
"MIT"
] | null | null | null | Desafios/desafio98.py | gustavodoamaral/desafios_python_gustavo_guanabara | 8baa1c0353a40f7a63f442293bc0f6852fd94da0 | [
"MIT"
] | null | null | null | from time import sleep
def lin():
print(f"{'-='*30}")
def contador(inicio, fim, passo):
"""
FAZ UMA CONTAGEM E MOSTRA NA TELA
PARAM. INICIO: INICIO DA CONTAGEM
PARAM. FIM: FINAL DA CONTAGEN
PARAM. PASSO: PASSO DA CONTAGEM
RETURN: SEM RETORNO
"""
if passo == 0:
passo = 1
if passo < 0:
passo = passo*-1
lin()
print(f"Contagem de {inicio} até {fim} de {passo} em {passo}")
if fim < inicio:
while inicio >= fim:
sleep(0.5)
print(f"{inicio}", end=" ")
inicio -= passo
print("FIM!")
else:
while inicio <= fim:
sleep(0.5)
print(f"{inicio}", end=" ")
inicio += passo
print("FIM!")
contador(1, 10, 1)
contador(10, 0, 2)
lin()
print("Agora é sua vez de personalizar a contagem!")
inicio = int(input("Início: "))
meio = int(input("Fim: "))
passo = int(input("Passo "))
contador(inicio, meio, passo)
help(contador) | 23 | 66 | 0.527668 |
4a256c9cb0f590e1196715934749bd32ae405ed7 | 6,988 | py | Python | maint_tools/test_docstrings.py | danielmlevans/scikit-learn | 8fc351656a44789893bd8b092ea12fbbf5b803ca | [
"BSD-3-Clause"
] | null | null | null | maint_tools/test_docstrings.py | danielmlevans/scikit-learn | 8fc351656a44789893bd8b092ea12fbbf5b803ca | [
"BSD-3-Clause"
] | null | null | null | maint_tools/test_docstrings.py | danielmlevans/scikit-learn | 8fc351656a44789893bd8b092ea12fbbf5b803ca | [
"BSD-3-Clause"
] | null | null | null | import re
from inspect import signature
from typing import Optional
import pytest
from sklearn.utils import all_estimators
numpydoc_validation = pytest.importorskip("numpydoc.validate")
# List of modules ignored when checking for numpydoc validation.
DOCSTRING_IGNORE_LIST = [
"HuberRegressor",
"IterativeImputer",
"KNNImputer",
"LabelPropagation",
"LabelSpreading",
"LocalOutlierFactor",
"LocallyLinearEmbedding",
"MiniBatchKMeans",
"MissingIndicator",
"MultiLabelBinarizer",
"MultiTaskElasticNet",
"MultiTaskElasticNetCV",
"MultiTaskLasso",
"MultiTaskLassoCV",
"NearestCentroid",
"NeighborhoodComponentsAnalysis",
"OrthogonalMatchingPursuit",
"OrthogonalMatchingPursuitCV",
"OutputCodeClassifier",
"PLSCanonical",
"PLSRegression",
"PLSSVD",
"PassiveAggressiveClassifier",
"PassiveAggressiveRegressor",
"PatchExtractor",
"PolynomialFeatures",
"QuadraticDiscriminantAnalysis",
"RANSACRegressor",
"RandomizedSearchCV",
"RobustScaler",
"SGDOneClassSVM",
"SGDRegressor",
"SelfTrainingClassifier",
"SimpleImputer",
"SparseRandomProjection",
"SpectralBiclustering",
"SpectralClustering",
"SpectralCoclustering",
"SpectralEmbedding",
"SplineTransformer",
"StackingClassifier",
"StackingRegressor",
"TheilSenRegressor",
"TransformedTargetRegressor",
"TweedieRegressor",
]
def get_all_methods():
estimators = all_estimators()
for name, Estimator in estimators:
if name.startswith("_"):
# skip private classes
continue
methods = []
for name in dir(Estimator):
if name.startswith("_"):
continue
method_obj = getattr(Estimator, name)
if hasattr(method_obj, "__call__") or isinstance(method_obj, property):
methods.append(name)
methods.append(None)
for method in sorted(methods, key=lambda x: str(x)):
yield Estimator, method
def filter_errors(errors, method, Estimator=None):
"""
Ignore some errors based on the method type.
These rules are specific for scikit-learn."""
for code, message in errors:
# We ignore following error code,
# - RT02: The first line of the Returns section
# should contain only the type, ..
# (as we may need refer to the name of the returned
# object)
# - GL01: Docstring text (summary) should start in the line
# immediately after the opening quotes (not in the same line,
# or leaving a blank line in between)
# - GL02: If there's a blank line, it should be before the
# first line of the Returns section, not after (it allows to have
# short docstrings for properties).
if code in ["RT02", "GL01", "GL02"]:
continue
# Ignore PR02: Unknown parameters for properties. We sometimes use
# properties for ducktyping, i.e. SGDClassifier.predict_proba
if code == "PR02" and Estimator is not None and method is not None:
method_obj = getattr(Estimator, method)
if isinstance(method_obj, property):
continue
# Following codes are only taken into account for the
# top level class docstrings:
# - ES01: No extended summary found
# - SA01: See Also section not found
# - EX01: No examples section found
if method is not None and code in ["EX01", "SA01", "ES01"]:
continue
yield code, message
def repr_errors(res, estimator=None, method: Optional[str] = None) -> str:
"""Pretty print original docstring and the obtained errors
Parameters
----------
res : dict
result of numpydoc.validate.validate
estimator : {estimator, None}
estimator object or None
method : str
if estimator is not None, either the method name or None.
Returns
-------
str
String representation of the error.
"""
if method is None:
if hasattr(estimator, "__init__"):
method = "__init__"
elif estimator is None:
raise ValueError("At least one of estimator, method should be provided")
else:
raise NotImplementedError
if estimator is not None:
obj = getattr(estimator, method)
try:
obj_signature = signature(obj)
except TypeError:
# In particular we can't parse the signature of properties
obj_signature = (
"\nParsing of the method signature failed, "
"possibly because this is a property."
)
obj_name = estimator.__name__ + "." + method
else:
obj_signature = ""
obj_name = method
msg = "\n\n" + "\n\n".join(
[
str(res["file"]),
obj_name + str(obj_signature),
res["docstring"],
"# Errors",
"\n".join(
" - {}: {}".format(code, message) for code, message in res["errors"]
),
]
)
return msg
@pytest.mark.parametrize("Estimator, method", get_all_methods())
def test_docstring(Estimator, method, request):
base_import_path = Estimator.__module__
import_path = [base_import_path, Estimator.__name__]
if method is not None:
import_path.append(method)
import_path = ".".join(import_path)
if Estimator.__name__ in DOCSTRING_IGNORE_LIST:
request.applymarker(
pytest.mark.xfail(run=False, reason="TODO pass numpydoc validation")
)
res = numpydoc_validation.validate(import_path)
res["errors"] = list(filter_errors(res["errors"], method, Estimator=Estimator))
if res["errors"]:
msg = repr_errors(res, Estimator, method)
raise ValueError(msg)
if __name__ == "__main__":
import sys
import argparse
parser = argparse.ArgumentParser(description="Validate docstring with numpydoc.")
parser.add_argument("import_path", help="Import path to validate")
args = parser.parse_args()
res = numpydoc_validation.validate(args.import_path)
import_path_sections = args.import_path.split(".")
# When applied to classes, detect class method. For functions
# method = None.
# TODO: this detection can be improved. Currently we assume that we have
# class # methods if the second path element before last is in camel case.
if len(import_path_sections) >= 2 and re.match(
r"(?:[A-Z][a-z]*)+", import_path_sections[-2]
):
method = import_path_sections[-1]
else:
method = None
res["errors"] = list(filter_errors(res["errors"], method))
if res["errors"]:
msg = repr_errors(res, method=args.import_path)
print(msg)
sys.exit(1)
else:
print("All docstring checks passed for {}!".format(args.import_path))
| 30.515284 | 85 | 0.629794 |
Subsets and Splits