id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
/curvlinops-for-pytorch-1.1.0.tar.gz/curvlinops-for-pytorch-1.1.0/docs/examples/basic_usage/example_matrix_vector_products.py
|
r"""
Matrix-vector products
======================
This tutorial contains a basic demonstration how to set up ``LinearOperators``
for the Hessian and the GGN and how to multiply them to a vector.
First, the imports.
"""
import matplotlib.pyplot as plt
import numpy
import torch
from torch import nn
from curvlinops import GGNLinearOperator, HessianLinearOperator
from curvlinops.examples.functorch import functorch_ggn, functorch_hessian
from curvlinops.examples.utils import report_nonclose
# make deterministic
torch.manual_seed(0)
numpy.random.seed(0)
# %%
# Setup
# -----
# Let's create some toy data, a small MLP, and use mean-squared error as loss function.
N = 4
D_in = 7
D_hidden = 5
D_out = 3
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
X = torch.rand(N, D_in).to(DEVICE)
y = torch.rand(N, D_out).to(DEVICE)
model = nn.Sequential(
nn.Linear(D_in, D_hidden),
nn.ReLU(),
nn.Linear(D_hidden, D_hidden),
nn.Sigmoid(),
nn.Linear(D_hidden, D_out),
).to(DEVICE)
params = [p for p in model.parameters() if p.requires_grad]
loss_function = nn.MSELoss(reduction="mean").to(DEVICE)
# %%
# Hessian-vector products
# -----------------------
#
# Setting up a linear operator for the Hessian is straightforward.
data = [(X, y)]
H = HessianLinearOperator(model, loss_function, params, data)
# %%
#
# We can now multiply by the Hessian. This operation will be carried out in
# PyTorch under the hood, but the operator is compatible with ``scipy``, so we
# can just pass a ``numpy`` vector to the matrix-multiplication.
D = H.shape[0]
v = numpy.random.rand(D)
Hv = H @ v
# %%
#
# To verify the result, we compute the Hessian using ``functorch``, using a
# utility function from ``curvlinops.examples``:
H_mat = functorch_hessian(model, loss_function, params, data).detach().cpu().numpy()
# %%
#
# Let's check that the multiplication onto ``v`` leads to the same result:
Hv_functorch = H_mat @ v
print("Comparing Hessian-vector product with functorch's Hessian-vector product.")
report_nonclose(Hv, Hv_functorch)
# %%
# Hessian-matrix products
# -----------------------
#
# We can also compute the Hessian's matrix representation with the linear
# operator, simply by multiplying it onto the identity matrix. (Of course, this
# only works if the Hessian is small enough.)
H_mat_from_linop = H @ numpy.eye(D)
# %%
#
# This should yield the same matrix as with :code:`functorch`.
print("Comparing Hessian with functorch's Hessian.")
report_nonclose(H_mat, H_mat_from_linop)
# %%
#
# Last, here's a visualization of the Hessian.
plt.figure()
plt.title("Hessian")
plt.imshow(H_mat)
plt.colorbar()
# %%
# GGN-vector products
# -------------------
#
# Setting up a linear operator for the Fisher/GGN is identical to the Hessian.
GGN = GGNLinearOperator(model, loss_function, params, data)
# %%
#
# Let's compute a GGN-vector product.
D = H.shape[0]
v = numpy.random.rand(D)
GGNv = GGN @ v
# %%
#
# To verify the result, we will use ``functorch`` to compute the GGN. For that,
# we use that the GGN corresponds to the Hessian if we replace the neural
# network by its linearization. This is implemented in a utility function of
# :code:`curvlinops.examples`:
GGN_mat = functorch_ggn(model, loss_function, params, data).detach().cpu().numpy()
GGNv_functorch = GGN_mat @ v
print("Comparing GGN-vector product with functorch's GGN-vector product.")
report_nonclose(GGNv, GGNv_functorch)
# %%
# GGN-matrix products
# -------------------
#
# We can also compute the GGN matrix representation with the linear operator,
# simply by multiplying it onto the identity matrix. (Of course, this only
# works if the GGN is small enough.)
GGN_mat_from_linop = GGN @ numpy.eye(D)
# %%
#
# This should yield the same matrix as with :code:`functorch`.
print("Comparing GGN with functorch's GGN.")
report_nonclose(GGN_mat, GGN_mat_from_linop)
# %%
#
# Last, here's a visualization of the GGN.
plt.figure()
plt.title("GGN")
plt.imshow(GGN_mat)
plt.colorbar()
# %%
# Visual comparison: Hessian and GGN
# ----------------------------------
#
# To conclude, let's plot both the Hessian and GGN using the same limits
min_value = min(GGN_mat.min(), H_mat.min())
max_value = max(GGN_mat.max(), H_mat.max())
fig, ax = plt.subplots(ncols=2)
ax[0].set_title("Hessian")
ax[0].imshow(H_mat, vmin=min_value, vmax=max_value)
ax[1].set_title("GGN")
ax[1].imshow(GGN_mat, vmin=min_value, vmax=max_value)
|
PypiClean
|
/python3-weixin-0.0.9.zip/python3-weixin-0.0.9/py3weixin/lib/WXBizMsgCrypt.py
|
# TODO 重构加密解密
import base64
import string
import random
import hashlib
import time
import struct
from Crypto.Cipher import AES
import xml.etree.cElementTree as ET
import sys
import socket
reload(sys)
import ierror
class FormatException(Exception):
pass
def throw_exception(message, exception_class=FormatException):
"""my define raise exception function"""
raise exception_class(message)
class SHA1:
"""计算公众平台的消息签名接口"""
def getSHA1(self, token, timestamp, nonce, encrypt):
"""用SHA1算法生成安全签名
@param token: 票据
@param timestamp: 时间戳
@param encrypt: 密文
@param nonce: 随机字符串
@return: 安全签名
"""
try:
sortlist = [token, timestamp, nonce, encrypt]
sortlist.sort()
sha = hashlib.sha1()
sha.update("".join(sortlist))
return ierror.WXBizMsgCrypt_OK, sha.hexdigest()
except Exception:
return ierror.WXBizMsgCrypt_ComputeSignature_Error, None
class XMLParse:
"""提供提取消息格式中的密文及生成回复消息格式的接口"""
# xml消息模板
AES_TEXT_RESPONSE_TEMPLATE = """
<xml>
<Encrypt><![CDATA[%(msg_encrypt)s]]></Encrypt>
<MsgSignature><![CDATA[%(msg_signaturet)s]]></MsgSignature>
<TimeStamp>%(timestamp)s</TimeStamp>
<Nonce><![CDATA[%(nonce)s]]></Nonce>
</xml>
"""
def extract(self, xmltext):
"""提取出xml数据包中的加密消息
@param xmltext: 待提取的xml字符串
@return: 提取出的加密消息字符串
"""
try:
xml_tree = ET.fromstring(xmltext)
encrypt = xml_tree.find("Encrypt")
touser_name = xml_tree.find("ToUserName")
return ierror.WXBizMsgCrypt_OK, encrypt.text, touser_name.text
except Exception:
return ierror.WXBizMsgCrypt_ParseXml_Error, None, None
def generate(self, encrypt, signature, timestamp, nonce):
"""生成xml消息
@param encrypt: 加密后的消息密文
@param signature: 安全签名
@param timestamp: 时间戳
@param nonce: 随机字符串
@return: 生成的xml字符串
"""
resp_dict = {
'msg_encrypt': encrypt,
'msg_signaturet': signature,
'timestamp': timestamp,
'nonce': nonce
}
resp_xml = self.AES_TEXT_RESPONSE_TEMPLATE % resp_dict
return resp_xml
class PKCS7Encoder():
"""提供基于PKCS7算法的加解密接口"""
block_size = 32
def encode(self, text):
""" 对需要加密的明文进行填充补位
@param text: 需要进行填充补位操作的明文
@return: 补齐明文字符串
"""
text_length = len(text)
# 计算需要填充的位数
amount_to_pad = self.block_size - (text_length % self.block_size)
if amount_to_pad == 0:
amount_to_pad = self.block_size
# 获得补位所用的字符
pad = chr(amount_to_pad)
return text + pad * amount_to_pad
def decode(self, decrypted):
"""删除解密后明文的补位字符
@param decrypted: 解密后的明文
@return: 删除补位字符后的明文
"""
pad = ord(decrypted[-1])
if pad < 1 or pad > 32:
pad = 0
return decrypted[:-pad]
class Prpcrypt(object):
"""提供接收和推送给公众平台消息的加解密接口"""
def __init__(self, key):
# self.key = base64.b64decode(key+"=")
self.key = key
# 设置加解密模式为AES的CBC模式
self.mode = AES.MODE_CBC
def encrypt(self, text, appid):
"""对明文进行加密
@param text: 需要加密的明文
@return: 加密得到的字符串
"""
# 16位随机字符串添加到明文开头
text = self.get_random_str() + struct.pack("I", socket.htonl(len(text))) + text + appid
# 使用自定义的填充方式对明文进行补位填充
pkcs7 = PKCS7Encoder()
text = pkcs7.encode(text)
# 加密
cryptor = AES.new(self.key, self.mode, self.key[:16])
try:
ciphertext = cryptor.encrypt(text)
# 使用BASE64对加密后的字符串进行编码
return ierror.WXBizMsgCrypt_OK, base64.b64encode(ciphertext)
except Exception:
return ierror.WXBizMsgCrypt_EncryptAES_Error, None
def decrypt(self, text, appid):
"""对解密后的明文进行补位删除
@param text: 密文
@return: 删除填充补位后的明文
"""
try:
cryptor = AES.new(self.key, self.mode, self.key[:16])
# 使用BASE64对密文进行解码,然后AES-CBC解密
plain_text = cryptor.decrypt(base64.b64decode(text))
except Exception:
return ierror.WXBizMsgCrypt_DecryptAES_Error, None
try:
pad = ord(plain_text[-1])
# 去掉补位字符串
# pkcs7 = PKCS7Encoder()
# plain_text = pkcs7.encode(plain_text)
# 去除16位随机字符串
content = plain_text[16:-pad]
xml_len = socket.ntohl(struct.unpack("I", content[:4])[0])
xml_content = content[4:xml_len+4]
from_appid = content[xml_len+4:]
except Exception:
return ierror.WXBizMsgCrypt_IllegalBuffer, None
if from_appid != appid:
return ierror.WXBizMsgCrypt_ValidateAppid_Error, None
return 0, xml_content
def get_random_str(self):
""" 随机生成16位字符串
@return: 16位字符串
"""
rule = string.letters + string.digits
str = random.sample(rule, 16)
return "".join(str)
class WXBizMsgCrypt(object):
# 构造函数
# @param sToken: 公众平台上,开发者设置的Token
# @param sEncodingAESKey: 公众平台上,开发者设置的EncodingAESKey
# @param sAppId: 企业号的AppId
def __init__(self, sToken, sEncodingAESKey, sAppId):
try:
self.key = base64.b64decode(sEncodingAESKey + "=")
assert len(self.key) == 32
except:
throw_exception("[error]: EncodingAESKey unvalid !", FormatException)
self.token = sToken
self.appid = sAppId
def EncryptMsg(self, sReplyMsg, sNonce, timestamp=None):
# 将公众号回复用户的消息加密打包
# @param sReplyMsg: 企业号待回复用户的消息,xml格式的字符串
# @param sTimeStamp: 时间戳,可以自己生成,也可以用URL参数的timestamp,如为None则自动用当前时间
# @param sNonce: 随机串,可以自己生成,也可以用URL参数的nonce
# sEncryptMsg: 加密后的可以直接回复用户的密文,包括msg_signature, timestamp, nonce, encrypt的xml格式的字符串,
# return:成功0,sEncryptMsg,失败返回对应的错误码None
pc = Prpcrypt(self.key)
ret, encrypt = pc.encrypt(sReplyMsg, self.appid)
if ret != 0:
return ret, None
if timestamp is None:
timestamp = str(int(time.time()))
# 生成安全签名
sha1 = SHA1()
ret, signature = sha1.getSHA1(self.token, timestamp, sNonce, encrypt)
if ret != 0:
return ret, None
xmlParse = XMLParse()
return ret, xmlParse.generate(encrypt, signature, timestamp, sNonce)
def DecryptMsg(self, sPostData, sMsgSignature, sTimeStamp, sNonce):
# 检验消息的真实性,并且获取解密后的明文
# @param sMsgSignature: 签名串,对应URL参数的msg_signature
# @param sTimeStamp: 时间戳,对应URL参数的timestamp
# @param sNonce: 随机串,对应URL参数的nonce
# @param sPostData: 密文,对应POST请求的数据
# xml_content: 解密后的原文,当return返回0时有效
# @return: 成功0,失败返回对应的错误码
# 验证安全签名
xmlParse = XMLParse()
ret, encrypt, touser_name = xmlParse.extract(sPostData)
if ret != 0:
return ret, None
sha1 = SHA1()
ret, signature = sha1.getSHA1(self.token, sTimeStamp, sNonce, encrypt)
if ret != 0:
return ret, None
if not signature == sMsgSignature:
return ierror.WXBizMsgCrypt_ValidateSignature_Error, None
pc = Prpcrypt(self.key)
ret, xml_content = pc.decrypt(encrypt, self.appid)
return ret, xml_content
|
PypiClean
|
/arctic_aarch64-1.80.1-py3-none-any.whl/arctic/_util.py
|
import logging
import numpy as np
import pymongo
from pandas import DataFrame
try:
from pandas.testing import assert_frame_equal
except ImportError:
from pandas.util.testing import assert_frame_equal
from ._config import FW_POINTERS_CONFIG_KEY, FwPointersCfg
logger = logging.getLogger(__name__)
NP_OBJECT_DTYPE = np.dtype('O')
# Avoid import-time extra logic
_use_new_count_api = None
def get_fwptr_config(version):
return FwPointersCfg[version.get(FW_POINTERS_CONFIG_KEY, FwPointersCfg.DISABLED.name)]
def _detect_new_count_api():
try:
mongo_v = [int(v) for v in pymongo.version.split('.')]
return mongo_v[0] >= 3 and mongo_v[1] >= 7
except:
return False
def indent(s, num_spaces):
s = s.split('\n')
s = [(num_spaces * ' ') + line for line in s]
s = '\n'.join(s)
return s
def are_equals(o1, o2, **kwargs):
try:
if isinstance(o1, DataFrame):
assert_frame_equal(o1, o2, kwargs)
return True
return o1 == o2
except Exception:
return False
def enable_sharding(arctic, library_name, hashed=True, key='symbol'):
"""
Enable sharding on a library
Parameters:
-----------
arctic: `arctic.Arctic` Arctic class
library_name: `basestring` library name
hashed: `bool` if True, use hashed sharding, if False, use range sharding
See https://docs.mongodb.com/manual/core/hashed-sharding/,
as well as https://docs.mongodb.com/manual/core/ranged-sharding/ for details.
key: `basestring` key to be used for sharding. Defaults to 'symbol', applicable to
all of Arctic's built-in stores except for BSONStore, which typically uses '_id'.
See https://docs.mongodb.com/manual/core/sharding-shard-key/ for details.
"""
c = arctic._conn
lib = arctic[library_name]._arctic_lib
dbname = lib._db.name
library_name = lib.get_top_level_collection().name
try:
c.admin.command('enablesharding', dbname)
except pymongo.errors.OperationFailure as e:
if 'already enabled' not in str(e):
raise
if not hashed:
logger.info("Range sharding '" + key + "' on: " + dbname + '.' + library_name)
c.admin.command('shardCollection', dbname + '.' + library_name, key={key: 1})
else:
logger.info("Hash sharding '" + key + "' on: " + dbname + '.' + library_name)
c.admin.command('shardCollection', dbname + '.' + library_name, key={key: 'hashed'})
def mongo_count(collection, filter=None, **kwargs):
filter = {} if filter is None else filter
global _use_new_count_api
_use_new_count_api = _detect_new_count_api() if _use_new_count_api is None else _use_new_count_api
# This is a temporary compatibility fix for compatibility with pymongo>=3.7, and also avoid deprecation warnings
if _use_new_count_api:
# Projection is ignored for count_documents
return collection.count_documents(filter=filter, **kwargs)
else:
return collection.count(filter=filter, **kwargs)
|
PypiClean
|
/scoreplayer_external-0.2.11-py3-none-any.whl/scoreplayer_external.py
|
from zeroconf import ServiceBrowser, Zeroconf, ServiceStateChange, IPVersion
from pythonosc import osc_message_builder, udp_client, dispatcher, osc_server
import time
import socket
import threading
from datetime import datetime
from itertools import zip_longest
from queue import Queue
#Convenience function from itertools recipes
def grouper(iterable, n, fillvalue=None):
"Collect data into fixed-length chunks or blocks"
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return zip_longest(*args, fillvalue=fillvalue)
#The scoreObject class is used to provide a python representation of objects
#drawn onto the ScorePlayer canvas. It is subclassed for specific object types.
class scoreObject:
_addressPrefix = '/Renderer/Command/'
def __init__(self, name, external):
self.name = name
self.external = external
self.removed = False
#Every object can have its colour set.
def setColour(self, r, g, b, a=255):
self.sendCommand('setColour', int(r), int(g), int(b), int(a))
#The method used to actually send the OSC command via our external object.
#It checks to make sure that the object hasn't already been removed on the player.
def sendCommand(self, command, *args):
if self.removed:
print ('{} has been removed'.format(self.name))
return
self.external.sendMessage(scoreObject._addressPrefix + self.name + '/' + command, *args)
class scoreNonLineObject(scoreObject):
#Commands common to all objects but lines.
#(Abstract class not used for actual objects)
def addLayer(self, objname, part, x, y, width, height):
self.sendCommand('addLayer', objname, part, int(x), int(y), int(width), int(height))
return scoreLayerObject(objname, self.external)
def addScroller(self, objname, part, x, y, width, height, scrollerWidth, speed):
self.sendCommand('addScroller', objname, part, int(x), int(y), int(width), int(height), int(scrollerWidth), float(speed))
return scoreScrollerObject(objname, self.external)
def addText(self, objname, part, x, y, fontSize=36):
self.sendCommand('addText', objname, part, int(x), int(y), float(fontSize))
return scoreTextObject(objname, self.external)
def addGlyph(self, objname, part, x, y, glyphSize=36):
self.sendCommand('addGlyph', objname, part, int(x), int(y), float(glyphSize))
return scoreGlyphObject(objname, self.external)
def addStave(self, objname, part, x, y, width, height, lineWidth):
self.sendCommand('addStave', objname, part, int(x), int(y), int(width), int(height), int(lineWidth))
return scoreStaveObject(objname, self.external)
def addLine(self, objname, part, x1, y1, x2, y2, lineWidth):
self.sendCommand('addLine', objname, part, int(x1), int(y1), int(x2), int(y2), int(lineWidth))
return scoreLineObject(objname, self.external)
class scoreNonLineCanvasObject(scoreNonLineObject):
#Commands common to all objects but lines and the canvas.
def remove(self):
self.sendCommand('remove')
self.removed = True
def setOpacity(self, opacity):
self.sendCommand('setOpacity', float(opacity))
def fade(self, opacity, duration):
self.sendCommand('fade', float(opacity), float(duration))
def setPosition(self, x, y):
self.sendCommand('setPosition', int(x), int(y))
def move(self, x, y, duration):
self.sendCommand('move', int(x), int(y), float(duration))
class scoreCanvasObject(scoreNonLineObject):
def clear(self):
self.sendCommand('clear')
class scoreLineObject(scoreObject):
def remove(self):
self.sendCommand('remove')
self.removed = True
def setOpacity(self, opacity):
self.sendCommand('setOpacity', float(opacity))
def fade(self, opacity, duration):
self.sendCommand('fade', float(opacity), float(duration))
def setWidth(self, width):
self.sendCommand('setWidth', int(width))
def setStartPoint(self, x, y):
self.sendCommand('setStartPoint', int(x), int(y))
def setEndPoint(self, x, y):
self.sendCommand('setEndPoint', int(x), int(y))
class scoreLayerObject(scoreNonLineCanvasObject):
def loadImage(self, imgname, autosizing=0):
self.sendCommand('loadImage', imgname, autosizing)
def clearImage(self):
self.sendCommand('clearImage')
def setSize(self, width, height):
self.sendCommand('setSize', int(width), int(height))
class scoreScrollerObject(scoreLayerObject):
#This inherits from the Layer Object. This may need to change in future.
def setScrollerWidth(self, scrollerWidth):
self.sendCommand('setScrollerWidth', int(scrollerWidth))
def setScrollerPosition(self, scrollerPosition):
self.sendCommand('setScrollerPosition', int(scrollerPosition))
def setScrollerSpeed(self, scrollerSpeed):
self.sendCommand('setScrollerSpeed', float(scrollerSpeed))
def start(self):
self.sendCommand('start')
def stop(self):
self.sendCommand('stop')
class scoreTextObject(scoreNonLineCanvasObject):
def setText(self, text):
self.sendCommand('setText', text)
def setFont(self, font):
self.sendCommand('setFont', font)
def setFontSize(self, fontSize):
self.sendCommand('setFontSize', float(fontSize))
class scoreGlyphObject(scoreNonLineCanvasObject):
def setGlyph(self, glyphType):
self.sendCommand('setGlyph', glyphType)
def setGlyphSize(self, glyphSize):
self.sendCommand('setGlyphSize', float(glyphSize))
class scoreStaveObject(scoreNonLineCanvasObject):
def clear(self):
self.sendCommand('clear')
def setSize(self, width, height):
self.sendCommand('setSize', int(width), int(height))
def setLineWidth(self, lineWidth):
self.sendCommand('setLineWidth', int(lineWidth))
def setClef(self, clef, position):
self.sendCommand('setClef', clef, int(position))
def removeClef(self, position):
self.sendCommand('removeClef', int(position))
def addNotehead(self, note, position, filled=1):
self.sendCommand('addNotehead', note, int(position), int(filled))
def addNote(self, note, position, duration):
self.sendCommand('addNote', note, int(position), int(duration))
def removeNote(self, note, position):
self.sendCommand('removeNote', note, int(position))
class scorePlayerExternal:
#The protocol version expected. Current version is 16.
protocolVersion = 16
def __init__(self):
self.__services = {}
self.listeningPort = 7000
self.__service = None
self.__connectionHandler = None
self.errorHandler = None
self.disconnectionHandler = None
self.__statusHandler = None
self.__clientListHandler = None
self.playHandler = None
self.pauseHandler = None
self.resetHandler = None
self.tickHandler = None
self.loadHandler = None
self.seekHandler = None
self.seekingHandler = None
self.extMessageHandler = None
self.otherMessageHandler = None
self.printTicks = False
self.printMessages = False
self.printTimestamp = True
self.printToQueue = False
self.printQueue = Queue()
self.__location = 0
self.__zeroconf = None
self.__connected = False
self.__registrationTimer = None
#Set up our message routing and start listening on our port.
self.__dispatcher = dispatcher.Dispatcher()
self.__dispatcher.map('/Server/*', self.__printMessage)
self.__dispatcher.map('/Control/*', self.__printMessage)
self.__dispatcher.map('/External/*', self.__extMessage)
self.__dispatcher.map('/Server/RegistrationOK', self.__onConnect)
self.__dispatcher.map('/Server/BadProtocolVersion', self.__onError)
self.__dispatcher.map('/Server/Bye!', self.__onDisconnect)
self.__dispatcher.map('/External/NewServer', self.__onReconnect)
self.__dispatcher.map('/Control/Play', self.__onPlay)
self.__dispatcher.map('/Control/Pause', self.__onPause)
self.__dispatcher.map('/Control/Reset', self.__onReset)
self.__dispatcher.map('/Control/Seek', self.__whileSeeking)
self.__dispatcher.map('/Control/SeekFinished', self.__onSeek)
self.__dispatcher.map('/Tick', self.__onTick)
self.__dispatcher.map('/Server/LoadComplete', self.__onLoad)
self.__dispatcher.map('/Server/RequestRejected', self.__onError)
self.__dispatcher.map('/External/Error', self.__onError)
self.__dispatcher.map('/Status', self.__statusReceived)
self.__dispatcher.map('/Server/ClientList', self.__clientListReceived)
self.__dispatcher.set_default_handler(self.__otherMessage)
#Start our zeroconf browser.
self.__startBonjour()
#If our port is unavailable, increase the port number by 1 and try again.
while True:
try:
self.__server = osc_server.ThreadingOSCUDPServer(('0.0.0.0', self.listeningPort), self.__dispatcher)
break
except:
self.listeningPort += 1
self.__server_thread = threading.Thread(target=self.__server.serve_forever)
self.__server_thread.start()
print('Listening on port {}'.format(self.listeningPort))
def __startBonjour(self):
if self.__zeroconf == None:
#Set up our service browser
self.__services.clear()
#self.__zeroconf = Zeroconf(ip_version=IPVersion.All)
self.__zeroconf = Zeroconf()
self.__browser = ServiceBrowser(self.__zeroconf, '_decibel._udp.local.', handlers=[self.__serviceChange])
time.sleep(1)
def __stopBonjour(self):
if self.__zeroconf != None:
self.__browser.cancel()
self.__zeroconf.close()
self.__zeroconf = None
def __parseServiceName(self, service):
serverName = service.server
scoreName = service.name
if serverName.endswith('.local.'):
serverName = serverName[:-7]
scoreName = scoreName[:(scoreName.find(service.type)) - 1]
scoreName = scoreName[:(scoreName.rfind('.'))]
return [serverName, scoreName]
def findServers(self):
self.__startBonjour()
serverList = []
for service in self.__services.values():
serverDetails = self.__parseServiceName(service)
address = socket.inet_ntoa(service.addresses[0])
serverList.append([*serverDetails, address, service.port])
return serverList
def selectServer(self):
self.__startBonjour()
while True:
i = 1
print('Choose an iPad to connect to')
servers = []
#List each service we've discovered
for service in self.__services.values():
if not service is None:
serverDetails = self.__parseServiceName(service)
print('{}: {} ({})'.format(i, serverDetails[0], serverDetails[1]))
#Save the service info to an array
servers.append(service)
i += 1
print('Or\n{}: Refresh List'.format(i))
while True:
try:
selection = int(input('Enter Selection: '))
except ValueError:
print('Invalid selection')
continue
if selection == i:
print()
break
elif selection >= i or selection < 0:
print('Invalid selection')
else:
self.__service = servers[selection - 1]
return
def connect(self, connectionHandler, errorHandler=None):
if self.__service is None:
print('No server selected')
return
#Connect to our server
address = socket.inet_ntoa(self.__service.addresses[0])
return self.connectToAddress(address, self.__service.port, connectionHandler, errorHandler)
def __registrationTimeout(self):
self.__printMessage('/External/RegistrationTimeout', 'Connection timed out')
self.__onError('/External/RegistrationTimeout', 'Connection timed out')
self.__registrationTimer = None
#Connect to a specified address and port. This can be used if the required service cannot
#be found using zeroconf.
def connectToAddress(self, address, port, connectionHandler=None, errorHandler=None):
if self.__connected:
#Disconnect if we're connected.
self.disconnect()
#Stop our zeroconf browser
self.__stopBonjour()
self.__connectionHandler = connectionHandler
self.errorHandler = errorHandler
self.__client = udp_client.SimpleUDPClient(address, port)
self.__client.send_message('/Server/RegisterExternal', ['Decibel Networking Protocol v' + str(scorePlayerExternal.protocolVersion), self.listeningPort])
if self.__registrationTimer != None:
self.__registrationTimer.cancel()
self.__registrationTimer = threading.Timer(5, self.__registrationTimeout)
self.__registrationTimer.start()
if self.__connectionHandler is not None:
return scoreCanvasObject('canvas', self)
def sendMessage(self, message, *args):
if not self.__connected:
print('Not connected')
return
self.__client.send_message(message, args)
def __serviceChange(self, zeroconf, service_type, name, state_change):
if state_change is ServiceStateChange.Added:
self.__services[name] = zeroconf.get_service_info(service_type, name)
elif state_change is ServiceStateChange.Removed:
del self.__services[name]
def __onConnect(self, oscAddress):
if self.__registrationTimer != None:
self.__registrationTimer.cancel()
self.__registrationTimer = None
self.__connected = True
if self.__connectionHandler is not None:
handler = self.__connectionHandler
self.__connectionHandler = None
#time.sleep(0.1)
handler()
def __onError(self, oscAddress, *oscArgs):
if self.errorHandler is not None:
if len(oscArgs) > 0:
self.errorHandler(oscAddress, oscArgs[0])
else:
self.errorHandler(oscAddress, None)
def __onPlay(self, oscAddress):
if self.playHandler is not None:
self.playHandler()
def __onPause(self, oscAddress, location):
self.__location = location
if self.pauseHandler is not None:
self.pauseHandler(location)
def __onReset(self, oscAddress):
self.__location = 0
if self.resetHandler is not None:
self.resetHandler()
def __whileSeeking(self, oscAddress, location):
self.__location = location
if self.seekingHandler is not None:
self.seekingHandler(location)
def __onSeek(self, oscAddress):
if self.seekHandler is not None:
self.seekHandler(self.__location)
def __onTick(self, oscAddress, location):
self.__printMessage(oscAddress, location)
self.__location = location
if self.tickHandler is not None:
self.tickHandler(location)
def __extMessage(self, oscAddress, *oscArgs):
self.__printMessage(oscAddress, *oscArgs)
#Add capabilites here later
if oscAddress == "/External/NewServer" or oscAddress == "/External/Error":
#These are dealt with elsewhere.
return
if self.extMessageHandler is not None:
self.extMessageHandler(oscAddress, *oscArgs)
def __otherMessage(self, oscAddress, *oscArgs):
self.__printMessage(oscAddress, *oscArgs)
if self.otherMessageHandler is not None:
self.otherMessageHandler(oscAddress, *oscArgs)
def __onLoad(self, oscAddress):
self.__location = 0
if self.loadHandler is not None:
self.loadHandler()
def __statusReceived(self, oscAddress, *oscArgs):
self.__printMessage(oscAddress, *oscArgs)
if self.__statusHandler is not None:
if len(oscArgs) < 7:
#We don't have the right number of arguments.
return
#Format our status message into a dictionary.
status = {
"name": oscArgs[0],
"composer": oscArgs[1],
"scoreType": oscArgs[2],
"scoreVersion": oscArgs[3],
"playerState": oscArgs[4],
"location": oscArgs[5],
"duration": oscArgs[6]
}
handler = self.__statusHandler
self.__statusHandler = None
handler(status)
def __clientListReceived(self, oscAddress, *oscArgs):
if self.__clientListHandler is not None:
clientList = []
for device,version in grouper(oscArgs, 2):
currentClient = {
"deviceName": device,
"version": version
}
clientList.append(currentClient)
handler = self.__clientListHandler
self.__clientListHandler = None
handler(clientList)
def __printMessage(self, oscAddress, *oscArgs):
if oscAddress == '/Tick':
if not self.printTicks:
return
elif not self.printMessages:
return
argsString = ''
for arg in oscArgs:
if isinstance(arg, str):
argsString += '"{}", '.format(arg)
else:
argsString += '{}, '.format(arg)
if len(argsString) > 1:
argsString = argsString[:-2]
if self.printTimestamp:
outString = '{}: {} {}'.format(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), oscAddress, argsString)
else:
outString = '{} {}'.format(oscAddress, argsString)
if self.printToQueue:
outString = outString.rstrip()
self.printQueue.put(outString)
else:
#Explicitly print our newline so it isn't separated from the message when multiple
#messages arrive at the same time.
outString = outString.rstrip() + '\n'
print(outString, end="")
#Connect our external to a new device if the old server has left the network.
def __onReconnect(self, oscAddress, address, port):
self.connectToAddress(address, port)
def __onDisconnect(self, oscAddress):
if self.disconnectionHandler is not None:
self.disconnectionHandler()
def disconnect(self):
self.sendMessage('/Server/UnregisterExternal', self.listeningPort)
self.__connected = False
def shutdown(self):
self.disconnect()
self.__server.shutdown()
#Stop bonjour on the off chance that it is running
self.__stopBonjour()
#Commands to easily send basic control signals to the iPad
def play(self):
self.sendMessage('/Control/Play')
def pause(self, location=-1):
if location == -1:
location = self.__location
self.sendMessage('/Control/Pause', float(location))
def reset(self):
self.sendMessage('/Control/Reset')
def loadScore(self, name, composer, scoreType, scoreVersion='0'):
self.sendMessage('/Server/LoadRequest', name, composer, scoreType, scoreVersion)
def getStatus(self, statusHandler):
self.__statusHandler = statusHandler
self.sendMessage('/Master/GetStatus')
def getClientList(self, clientListHandler):
self.__clientListHandler = clientListHandler
self.sendMessage('/Server/GetClientList')
|
PypiClean
|
/kessel_sqladmin-0.5.0-py3-none-any.whl/kessel_sqladmin-0.5.0.dist-info/licenses/LICENSE.md
|
Copyright © 2022, Amin Alaee.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
PypiClean
|
/django_url_simplify-0.0.2-py3-none-any.whl/django_url_simplify-0.0.2.dist-info/DESCRIPTION.rst
|
============
Url Simplify
============
.. image:: https://badge.fury.io/py/django-url-simplify.svg
:target: https://badge.fury.io/py/django-url-simplify
Url Simplify is a simple Django app which generates simplified base62 urls
from regular ones.
Detailed documentation is in the "docs" directory.
Quick start
-----------
1. Add ``url_simplify`` to your INSTALLED_APPS setting like this::
INSTALLED_APPS = [
...
'url_simplify',
]
2. Include the ``url_simplify`` URLconf in your project urls.py like this::
path('url_simplify/', include('url_simplify.urls')),
3. Run `python manage.py migrate` to create the url_simplify models.
4. Start the development server and visit http://127.0.0.1:8000/admin/
to create a url_simplify (you'll need the Admin app enabled).
5. Visit http://127.0.0.1:8000/url_simplify/ to start generating simplified urls.
|
PypiClean
|
/taskcc-alipay-sdk-python-3.3.398.tar.gz/taskcc-alipay-sdk-python-3.3.398/alipay/aop/api/domain/AlipayInsSceneApplicationOutsideApplyModel.py
|
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.InsPerson import InsPerson
from alipay.aop.api.domain.InsPerson import InsPerson
class AlipayInsSceneApplicationOutsideApplyModel(object):
def __init__(self):
self._applicant = None
self._biz_factor = None
self._effect_end_time = None
self._effect_start_time = None
self._insureds = None
self._out_biz_no = None
self._period = None
self._premium = None
self._prod_code = None
self._source = None
self._sum_insured = None
@property
def applicant(self):
return self._applicant
@applicant.setter
def applicant(self, value):
if isinstance(value, InsPerson):
self._applicant = value
else:
self._applicant = InsPerson.from_alipay_dict(value)
@property
def biz_factor(self):
return self._biz_factor
@biz_factor.setter
def biz_factor(self, value):
self._biz_factor = value
@property
def effect_end_time(self):
return self._effect_end_time
@effect_end_time.setter
def effect_end_time(self, value):
self._effect_end_time = value
@property
def effect_start_time(self):
return self._effect_start_time
@effect_start_time.setter
def effect_start_time(self, value):
self._effect_start_time = value
@property
def insureds(self):
return self._insureds
@insureds.setter
def insureds(self, value):
if isinstance(value, list):
self._insureds = list()
for i in value:
if isinstance(i, InsPerson):
self._insureds.append(i)
else:
self._insureds.append(InsPerson.from_alipay_dict(i))
@property
def out_biz_no(self):
return self._out_biz_no
@out_biz_no.setter
def out_biz_no(self, value):
self._out_biz_no = value
@property
def period(self):
return self._period
@period.setter
def period(self, value):
self._period = value
@property
def premium(self):
return self._premium
@premium.setter
def premium(self, value):
self._premium = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def source(self):
return self._source
@source.setter
def source(self, value):
self._source = value
@property
def sum_insured(self):
return self._sum_insured
@sum_insured.setter
def sum_insured(self, value):
self._sum_insured = value
def to_alipay_dict(self):
params = dict()
if self.applicant:
if hasattr(self.applicant, 'to_alipay_dict'):
params['applicant'] = self.applicant.to_alipay_dict()
else:
params['applicant'] = self.applicant
if self.biz_factor:
if hasattr(self.biz_factor, 'to_alipay_dict'):
params['biz_factor'] = self.biz_factor.to_alipay_dict()
else:
params['biz_factor'] = self.biz_factor
if self.effect_end_time:
if hasattr(self.effect_end_time, 'to_alipay_dict'):
params['effect_end_time'] = self.effect_end_time.to_alipay_dict()
else:
params['effect_end_time'] = self.effect_end_time
if self.effect_start_time:
if hasattr(self.effect_start_time, 'to_alipay_dict'):
params['effect_start_time'] = self.effect_start_time.to_alipay_dict()
else:
params['effect_start_time'] = self.effect_start_time
if self.insureds:
if isinstance(self.insureds, list):
for i in range(0, len(self.insureds)):
element = self.insureds[i]
if hasattr(element, 'to_alipay_dict'):
self.insureds[i] = element.to_alipay_dict()
if hasattr(self.insureds, 'to_alipay_dict'):
params['insureds'] = self.insureds.to_alipay_dict()
else:
params['insureds'] = self.insureds
if self.out_biz_no:
if hasattr(self.out_biz_no, 'to_alipay_dict'):
params['out_biz_no'] = self.out_biz_no.to_alipay_dict()
else:
params['out_biz_no'] = self.out_biz_no
if self.period:
if hasattr(self.period, 'to_alipay_dict'):
params['period'] = self.period.to_alipay_dict()
else:
params['period'] = self.period
if self.premium:
if hasattr(self.premium, 'to_alipay_dict'):
params['premium'] = self.premium.to_alipay_dict()
else:
params['premium'] = self.premium
if self.prod_code:
if hasattr(self.prod_code, 'to_alipay_dict'):
params['prod_code'] = self.prod_code.to_alipay_dict()
else:
params['prod_code'] = self.prod_code
if self.source:
if hasattr(self.source, 'to_alipay_dict'):
params['source'] = self.source.to_alipay_dict()
else:
params['source'] = self.source
if self.sum_insured:
if hasattr(self.sum_insured, 'to_alipay_dict'):
params['sum_insured'] = self.sum_insured.to_alipay_dict()
else:
params['sum_insured'] = self.sum_insured
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayInsSceneApplicationOutsideApplyModel()
if 'applicant' in d:
o.applicant = d['applicant']
if 'biz_factor' in d:
o.biz_factor = d['biz_factor']
if 'effect_end_time' in d:
o.effect_end_time = d['effect_end_time']
if 'effect_start_time' in d:
o.effect_start_time = d['effect_start_time']
if 'insureds' in d:
o.insureds = d['insureds']
if 'out_biz_no' in d:
o.out_biz_no = d['out_biz_no']
if 'period' in d:
o.period = d['period']
if 'premium' in d:
o.premium = d['premium']
if 'prod_code' in d:
o.prod_code = d['prod_code']
if 'source' in d:
o.source = d['source']
if 'sum_insured' in d:
o.sum_insured = d['sum_insured']
return o
|
PypiClean
|
/pytest-collect-formatter-0.4.0.tar.gz/pytest-collect-formatter-0.4.0/README.rst
|
========================
pytest-collect-formatter
========================
Pytest plugin for formatting output of the collected tests.
* Free software: MIT license
Requirements
------------
* pyyaml
* dicttoxml
Installation
------------
You can install "pytest-collect-formatter" via `pip`_::
$ pip install pytest-collect-formatter
Usage
-----
* Use this plugin by running pytest normally and use the following options to customize collection format,
it's better to use with `--collect-only` option
>>> collect-formatter:
--collect-output-file=COLLECT_OUTPUT_FILE
Saves collected test items to the file
--collect-format=COLLECT_FORMAT
Saves collected test items specified format [xml, yaml, json]
--collect-type=COLLECT_TYPE
Format output results in classic pytest view or in 'path' view [classic, path], default classic
Example of usage
----------------
$ pytest tests --collect-output-file my_tests_structure.xml --collect-format xml
And you will get the my_tests_structure.xml
Exmpales of formats
-------------------
PATH
____
JSON
.. code-block::
[
{
"type": "path",
"title": "examples",
"children": [
{
"type": "path",
"title": "tests",
"children": [
{
"type": "path",
"title": "test_formatter",
"children": [
{
"type": "path",
"title": "test_fromatter_v1.py",
"children": [
{
"type": "pytest_unit",
"title": "TestFormatter",
"children": [
{
"type": "pytest_unit",
"title": "test_inside_class",
"children": []
},
{
...
YAML
.. code-block::
- children:
- children:
- children:
- children:
- children:
- children: []
title: test_inside_class
type: pytest_unit
- children: []
title: test_inside_class_parametrize[1]
type: pytest_unit
- children: []
title: test_inside_class_parametrize[2]
type: pytest_unit
- children: []
title: test_inside_class_parametrize[3]
...
XML
.. code-block::
<?xml version="1.0" ?>
<root>
<item>
<type>path</type>
<title>examples</title>
<children>
<item>
<type>path</type>
<title>tests</title>
<children>
<item>
<type>path</type>
<title>test_formatter</title>
<children>
<item>
<type>path</type>
<title>test_fromatter_v1.py</title>
<children>
<item>
<type>pytest_unit</type>
<title>TestFormatter</title>
<children>
<item>
<type>pytest_unit</type>
<title>test_inside_class</title>
<children/>
</item>
...
CLASSIC
_______
JSON
.. code-block::
[
{
"type": "Package",
"value": "test_formatter",
"children": [
{
"type": "Module",
"value": "test_fromatter_v1.py",
"children": [
{
"type": "Class",
"value": "TestFormatter",
"children": [
{
"type": "Function",
"value": "test_inside_class"
}
....
YAML
.. code-block::
- children:
- children:
- children:
- type: Function
value: test_inside_class
- type: Function
value: test_inside_class_parametrize[1]
- type: Function
value: test_inside_class_parametrize[2]
- type: Function
value: test_inside_class_parametrize[3]
- type: Function
value: test_inside_class_parametrize[4]
type: Class
value: TestFormatter
- type: Function
value: test_outside_of_class
type: Module
value: test_fromatter_v1.py
type: Package
value: test_formatter
....
XML
.. code-block::
<?xml version="1.0" ?>
<root>
<item>
<type>Package</type>
<value>test_formatter</value>
<children>
<item>
<type>Module</type>
<value>test_fromatter_v1.py</value>
<children>
<item>
<type>Class</type>
<value>TestFormatter</value>
<children>
<item>
<type>Function</type>
<value>test_inside_class</value>
</item>
<item>
<type>Function</type>
<value>test_inside_class_parametrize[1]</value>
</item>
<item>
<type>Function</type>
<value>test_inside_class_parametrize[2]</value>
</item>
<item>
<type>Function</type>
<value>test_inside_class_parametrize[3]</value>
</item>
<item>
<type>Function</type>
<value>test_inside_class_parametrize[4]</value>
</item>
</children>
</item>
<item>
<type>Function</type>
<value>test_outside_of_class</value>
</item>
</children>
</item>
</children>
....
More examples could be found in examples folder as well as tests structure
Issues
------
If you encounter any problems, please `file an issue`_ along with a detailed description.
Credits
-------
.. _`file an issue`: https://github.com/pytest-dev/pytest-slack/issues
.. _`pip`: https://pypi.python.org/pypi/pip/
|
PypiClean
|
/grav_toolbox-0.2.3-py3-none-any.whl/gravtools/CG5_utils/cg5_survey.py
|
import pandas as pd
import numpy as np
import re
import datetime as dt
from gravtools.models.exceptions import InvaliFileContentError
from gravtools import settings
class DataCursor:
"""Data cursor for matplotlib plot. X and Y coordinates are printed life to the plot canvas.
From: https://stackoverflow.com/questions/4652439/is-there-a-matplotlib-equivalent-of-matlabs-datacursormode
"""
# text_template = 'x: %0.2f\ny: %0.2f'
x, y = 0.0, 0.0
xoffset, yoffset = -20, 20
text_template = 'x: %0.2f\ny: %0.2f'
def __init__(self, ax):
self.ax = ax
self.annotation = ax.annotate(self.text_template,
xy=(self.x, self.y), xytext=(self.xoffset, self.yoffset),
textcoords='offset points', ha='right', va='bottom',
bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5),
arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0')
)
self.annotation.set_visible(False)
def __call__(self, event):
self.event = event
# xdata, ydata = event.artist.get_data()
# self.x, self.y = xdata[event.ind], ydata[event.ind]
self.x, self.y = event.mouseevent.xdata, event.mouseevent.ydata
if self.x is not None:
self.annotation.xy = self.x, self.y
self.annotation.set_text(self.text_template % (self.x, self.y))
self.annotation.set_visible(True)
event.canvas.draw()
class CG5SurveyParameters:
"""CG-5 Survey parameters.
Scintrex CG-5 survey parameters from the 'CG-5 SURVEY' block in
the observation file (text format).
The class is initialized either by the :py:meth:`.__init__`,
method by passing the attributes as keyword arguments directly,
or by the class method :py:meth:`.populate_from_obs_file_string`
that parses the content of a CG-5 observation file (txt).
Attributes
----------
survey_name : str
Name of gravity survey.
client : str
Client of survey.
operator : str
Name of the survey's operator.
long_deg : float
Geographical longitude at begin of survey [°].
lat_deg : float
Geographical latitude at begin of survey [°].
zone : str
Timezone of all time records.
date_time : datetime object
Start epoch of the survey.
"""
def __init__(self, *args, **kwargs):
"""
Parameters
----------
*args
Variable length argument list.
**kwargs : dict
Keyword arguments that are parsed to class attributes.
"""
self.survey_name = kwargs.get('survey_name', '') # string
self.instrument_sn = kwargs.get('instrument_sn', '') # string
self.client = kwargs.get('client', '') # string
self.operator = kwargs.get('operator', '') # string
self.long_deg = kwargs.get('long_deg', np.nan) # float
self.lat_deg = kwargs.get('lat_deg', np.nan) # float
self.zone = kwargs.get('zone', '') # string
self.date_time = kwargs.get('date_time', None) # datetime object (timezone aware)
@classmethod
def create_from_obs_file_string(cls, str_obs_file):
""" Create instance of class :py:class:`.CG5SurveyParameters` by
parsing a CG-5 observation file string.
Parameters
----------
str_obs_file : str
Content of the CG-5 observation file (.txt).
Returns
-------
Object: :py:class:`.CG5SurveyParameters`
Initialized class instance.
"""
# Parse observation file string:
# expr = r'(\/\tCG-5 SURVEY\s*\n\/\s+Survey name:\s*(?P<survey_name>\S+)\s*\n\/\tInstrument S\/N:\s*(?P<instrument_sn>\S+)\s*\n\/\tClient:\s*(?P<client>\S+)\s*\n\/\tOperator:\s*(?P<operator>\S+)\s*\n\/\tDate:\s*(?P<date_year>\d{4})\/\s*(?P<date_month>\d{1,2})\/\s*(?P<date_day>\d{1,2})\s*\n\/\tTime:\s*(?P<time_hour>\d{2}):(?P<time_minu>\d{2}):(?P<time_sec>\d{2})\s*\n\/\tLONG:\s*(?P<long_num>\d*\.?\d*)\s+(?P<long_dir>[E|N|S|W])\s*\n\/\tLAT:\s*(?P<lat_num>\d*\.?\d*)\s+(?P<lat_dir>[E|N|S|W])\s*\n\/\tZONE:\s*(?P<zone>\d*)\s*\n\/\tGMT DIFF.:\s*(?P<gmt_diff>\d*\.?\d*))+\s*\n'
expr = r'(\/\tCG-5 SURVEY\s*\n\/\s+Survey name:\s*(?P<survey_name>\S+)\s*\n' \
r'\/\tInstrument S\/N:\s*(?P<instrument_sn>\S+)\s*\n' \
r'\/\tClient:\s*(?P<client>\S+)\s*\n' \
r'\/\tOperator:\s*(?P<operator>\S+)\s*\n' \
r'\/\tDate:\s*(?P<date_year>\d{4})\/\s*(?P<date_month>\d{1,2})\/\s*(?P<date_day>\d{1,2})\s*\n' \
r'\/\tTime:\s*(?P<time_hour>\d{2}):(?P<time_minu>\d{2}):(?P<time_sec>\d{2})\s*\n' \
r'\/\tLONG:\s*(?P<long_num>\d*\.?\d*)\s+(?P<long_dir>[E|N|S|W])\s*\n' \
r'\/\tLAT:\s*(?P<lat_num>\d*\.?\d*)\s+(?P<lat_dir>[E|N|S|W])\s*\n' \
r'\/\tZONE:\s*(?P<zone>\d*)\s*\n' \
r'\/\tGMT DIFF.:\s*(?P<gmt_diff>\d*\.?\d*))+\s*\n'
# Read survey blocks from obs file string (only one block allowed!):
survey_count = 0 # number of survey blocks in obs file string
for survey_block in re.finditer(expr, str_obs_file):
survey_dict = survey_block.groupdict()
survey_count += 1
if survey_count == 1: # OK => Parse data in string:
# Handle geographic locations:
longitude_deg = float(survey_dict['long_num'])
if survey_dict['long_dir'] == "W":
longitude_deg = -longitude_deg
latitude_deg = float(survey_dict['lat_num'])
if survey_dict['lat_dir'] == "S":
latitude_deg = -latitude_deg
instrument_sn = survey_dict['instrument_sn'] # instrument serial number
return cls(survey_name=survey_dict['survey_name'],
instrument_sn=instrument_sn,
client=survey_dict['client'],
operator=survey_dict['operator'],
long_deg=longitude_deg,
lat_deg=latitude_deg,
zone=survey_dict['zone'],
date_time=dt.datetime(int(survey_dict['date_year']),
int(survey_dict['date_month']),
int(survey_dict['date_day']),
int(survey_dict['time_hour']),
int(survey_dict['time_sec']),
tzinfo=dt.timezone(dt.timedelta(hours=float(survey_dict['gmt_diff'])))
)
)
elif survey_count == 0: # Not available
return cls() # Initialize with default values
else: # More than 1 block found => Error!
raise InvaliFileContentError('{} "CG-5 SURVEY" blocks found in observation file, '
'but only one expected.'.format(survey_count))
# Error Msg, wenn der Block mehr als einmal gefunden wird.
class CG5SetupParameters:
"""CG-5 Survey parameters.
Scintrex CG-5 Setup parameters from the 'CG-5 SETUP PARAMETERS'
block in the observation file (text format).
The class is initialized either by the :py:meth:`.__init__`,
method by passing the attributes as keyword arguments directly,
or by the class method :py:meth:`.populate_from_obs_file_string`
that parses the content of a CG-5 observation file (txt).
Attributes
----------
gcal1 : float
Calibration factor GCAL1.
tiltxs : float
XXXXXXXX
tiltys : float
XXXXXXXX
tiltxo : float
XXXXXXXX
tiltyo : float
XXXXXXXX
tempco : float
XXXXXXXX
drift : float
Linear drift factor (long-term drift).
drift_date_time_start : datetime object (TZ aware)
Start epoch for the determination of th linear long-term drift.
"""
def __init__(self, *args, **kwargs):
"""
Parameters
----------
*args
Variable length argument list.
**kwargs : dict
Keyword arguments that are parsed to class attributes.
"""
self.gcal1 = kwargs.get('gcal1', np.nan) # float
self.tiltxs = kwargs.get('tiltxs', np.nan) # float
self.tiltys = kwargs.get('tiltys', np.nan) # string
self.tiltxo = kwargs.get('tiltxo', np.nan) # float
self.tiltyo = kwargs.get('tiltyo', np.nan) # float
self.tempco = kwargs.get('tempco', np.nan) # float
self.drift = kwargs.get('drift', np.nan) # float
self.drift_date_time_start = kwargs.get('drift_date_time_start', None) # datetime object (timezone aware)
@classmethod
def create_from_obs_file_string(cls, str_obs_file):
""" Create instance of class :py:class:`.CG5SetupParameters` by
parsing a CG-5 observation file string.
Parameters
----------
str_obs_file : str
Content of the CG-5 observation file (.txt).
Returns
-------
Object: :py:class:`.CG5SetupParameters`
Initializes class instance.
"""
# Parse observation file string:
expr = r"\/\tCG-5 SETUP PARAMETERS\s*\n\/\s+Gref:\s*(?P<gref>\S+)\s*\n\/\s+Gcal1:\s*(" \
r"?P<gcal1>\S+)\s*\n\/\s+TiltxS:\s*(?P<tiltxs>\S+)\s*\n\/\s+TiltyS:\s*(" \
r"?P<tiltys>\S+)\s*\n\/\s+TiltxO:\s*(?P<tiltxo>\S+)\s*\n\/\s+TiltyO:\s*(" \
r"?P<tiltyo>\S+)\s*\n\/\s+Tempco:\s*(?P<tempco>\S+)\s*\n\/\s+Drift:\s*(" \
r"?P<drift>\S+)\s*\n\/\s+DriftTime Start:\s*(?P<drift_time_start>\S+)\s*\n\/\s+DriftDate Start:\s*(" \
r"?P<drift_date_start>\S+)\s*\n"
# Read setup parameters blocks from obs file string (only one block allowed!):
block_count = 0 # number of survey blocks in obs file string
for survey_block in re.finditer(expr, str_obs_file):
setup_dict = survey_block.groupdict()
block_count += 1
if block_count == 1: # OK => Parse data in string:
return cls(gref=float(setup_dict['gref']),
gcal1=float(setup_dict['gcal1']),
tiltxs=float(setup_dict['tiltxs']),
tiltys=float(setup_dict['tiltys']),
tiltxo=float(setup_dict['tiltxo']),
tiltyo=float(setup_dict['tiltyo']),
tempco=float(setup_dict['tempco']),
drift=float(setup_dict['drift']),
drift_date_time_start=dt.datetime.strptime(
setup_dict["drift_date_start"] + setup_dict["drift_time_start"],
"%Y/%m/%d%H:%M:%S")
)
elif block_count == 0: # Not available
return cls() # Initialize with default values
else: # More than 1 block found => Error!
raise InvaliFileContentError('{} "CG-5 SETUP PARAMETERS" in observation file found, but maximum one '
'expected.'.format(block_count))
# def is_valid(self) -> bool:
# """???"""
# # TODO
# return True
class CG5OptionsParameters:
"""CG-5 instrumental options (filters, corrections, output).
Scintrex CG-5 options from the 'CG-5 OPTIONS' block in
the observation file (text format).
The class is initialized either by the :py:meth:`.__init__`,
method by passing the attributes as keyword arguments directly,
or by the class method :py:meth:`.populate_from_obs_file_string`
that parses the content of a CG-5 observation file (txt).
Attributes
----------
tide_correction : bool
Tide correction (on/off).
cont_tilt : bool
Continuous tilt correction (on/off).
auto_rejection : bool
Auto rejection of outliers (on/off).
terrain_correction : bool
Terrain correction (on/off).
seismic_filter : bool
Seismic filter (on/off).
raw_data : bool
Raw data output (on/off).
"""
def __init__(self, *args, **kwargs):
"""
Parameters
----------
*args
Variable length argument list.
**kwargs : dict
Keyword arguments that are parsed to class attributes.
"""
self.tide_correction = kwargs.get('tide_correction', None) # bool
self.cont_tilt = kwargs.get('cont_tilt', None) # bool
self.auto_rejection = kwargs.get('auto_rejection', None) # bool
self.terrain_correction = kwargs.get('terrain_correction', None) # bool
self.seismic_filter = kwargs.get('seismic_filter', None) # bool
self.raw_data = kwargs.get('raw_data', None) # bool
#
# self.tide_correction = kwargs.get('tide_correction', False) # bool
# self.cont_tilt = kwargs.get('cont_tilt', False) # bool
# self.auto_rejection = kwargs.get('auto_rejection', False) # bool
# self.terrain_correction = kwargs.get('terrain_correction', False) # bool
# self.seismic_filter = kwargs.get('seismic_filter', False) # bool
# self.raw_data = kwargs.get('raw_data', False) # bool
@classmethod
def create_from_obs_file_string(cls, str_obs_file):
""" Create instance of class :py:class:`.CG5OptionsParameters` by
parsing a CG-5 observation file string.
Parameters
----------
str_obs_file : str
Content of the CG-5 observation file (.txt).
Returns
-------
Object: :py:class:`.CG5OptionsParameters`
Initializes class instance.
"""
# Parse observation file string:
expr = r"\/\tCG-5 OPTIONS\s*\n\/\s+Tide Correction:\s*(?P<tide_correction>\S+)\s*\n\/\s+Cont. Tilt:\s*(" \
r"?P<cont_tilt>\S+)\s*\n\/\s+Auto Rejection:\s*(?P<auto_rejection>\S+)\s*\n\/\s+Terrain Corr.:\s*(" \
r"?P<terrain_correction>\S+)\s*\n\/\s+Seismic Filter:\s*(?P<seismic_filter>\S+)\s*\n\/\s+Raw Data:\s*(" \
r"?P<raw_data>\S+)\s*\n"
# Read setup parameters blocks from obs file string (only one block allowed!):
block_count = 0 # number of survey blocks in obs file string
for options_block in re.finditer(expr, str_obs_file):
options_dict = options_block.groupdict()
block_count += 1
if block_count == 1: # OK => Parse data in string:
return cls(tide_correction=options_dict['tide_correction'] == "YES", # bool
cont_tilt=options_dict['cont_tilt'] == "YES", # bool
auto_rejection=options_dict['auto_rejection'] == "YES", # bool
terrain_correction=options_dict['terrain_correction'] == "YES", # bool
seismic_filter=options_dict['seismic_filter'] == "YES", # bool
raw_data=options_dict['raw_data'] == "YES",
) # bool
elif block_count == 0: # Not available
return cls() # Initialize with default values
else: # More than 1 block found => Error!
raise InvaliFileContentError('{} "CG-5 OPTIONS" in observation file found, but maximum one '
'expected.'.format(block_count))
# def is_valid(self) -> bool:
# """???"""
# # TODO
# return True
class CG5Survey:
"""CG-5 survey data.
Class instances may contain the information available in the
following sections of Scintrex CG-5 observation files (txt format):
- Survey Parameter block (as instance of :py:obj:`.CG5SurveyParameters`)
- Setup block (as instance of :py:obj:`.CG5SetupParameters`)
- Options block (as instance of :py:obj:`.CG5OptionsParameters`)
- Observations (as pandas dataframe)
If the class is initialized without setting the observations file
attribute (`obs_filename`), no observation file is load and the
object is initialized empty.
Attributes
----------
obs_filename : str
Name (and path) to CG-5 observation file (txt format)
survey_parameters : :py:class:`.CG5SurveyParameters`
Survey Parameter of the Parameters block in the observation file.
setup_parameters : :py:class:`.CG5SetupParameters`
Setup Parameter of the Setup block in the observation file.
options : :py:class:`.CG5OptionsParameters`
Instrumental options from the Options block in the observation file.
obs_df : pandas data frame
Contains the actual observation data records with the colums defined in `self._OBS_DF_COLUMN_NAMES`.
"""
# Column names of the dataframe containing tha actual observation data:
_OBS_DF_COLUMN_NAMES = ('lat_deg', # Latitude [deg] :
'lon_deg', # Longitude [deg]
'alt_m', # Altitude [m]
'g_mgal', # Determined gravity (corrected) [mGal]
'sd_mgal', # Standard deviation of determined gravity [mGal]
'tiltx',
'tilty',
'temp',
'tide', # Tidal correction determined by the CG-5 [mGal]
'duration_sec', # Duration of the current setup [sec]
'rej', # Number of rejected single measurements
'time_str', # Reference time = mid of setup with duration `duration_sec`) (dropped later)
'dec_time_date',
'terrain', # Terrain correction [??]
'date', # Date (dropped later)
'station_name', # Station name : str
'dhf_m', # Distance between instrument top and physical reference point [m]
'dhb_m', # Distance between instrument top and ground [m]
'atm_pres_hpa', # Measured atmospheric pressure [hPa]
'setup_id', # Unique ID of this observation (=setup)
)
# obs_epoch : datetime object (added to df later)
# Rename columns: df.rename(columns = {'$b':'B'}, inplace = True)
# Non-numeric columns in the observation dataframe:
_OBS_DF_NON_NUMERIC_COLUMNS = ['station_name', 'date', 'time_str']
def __init__(self,
obs_filename='',
survey_parameters=CG5SurveyParameters(),
setup_parameters=CG5SetupParameters(),
options=CG5OptionsParameters()
):
"""
Parameters
----------
obs_filename : str, optional
Name (and path) to CG-5 observation file (txt format).
survey_parameters : :py:class:`.CG5SurveyParameters`, optional
Survey Parameter of the Parameters block in the observation file.
setup_parameters : :py:class:`.CG5SetupParameters`, optional
Setup Parameter of the Setup block in the observation file.
options : :py:class:`.CG5OptionsParameters`, optional
Instrumental options from the Options block in the observation file.
"""
self.obs_filename = obs_filename
assert isinstance(survey_parameters, CG5SurveyParameters), \
"survey_parameters is not an instance of CG5SurveyParameters"
self.survey_parameters = survey_parameters
assert isinstance(setup_parameters, CG5SetupParameters), \
"setup_parameters is not an instance of CG5SetupParameters"
self.setup_parameters = setup_parameters
assert isinstance(options, CG5OptionsParameters), \
"options is not an instance of CG5OptionsParameters"
self.options = options
# Read observation file, if a valid filename is available and valid. Otherwise initialize obs_df as None.
if self.obs_filename:
self.read_obs_file(obs_filename)
else:
self.obs_df = None # Initialize as None
def __str__(self):
if self.obs_df is None:
return 'Empty CG-5 Survey.'
else:
if not self.survey_parameters.survey_name:
return 'Unnamed CG-5 Survey with {} observations (file: {}).'.format(len(self.obs_df),
self.obs_filename.split('/')[-1])
else:
return 'CG-5 Survey "{}" with {} observations (file: {}).'.format(self.survey_parameters.survey_name,
len(self.obs_df),
self.obs_filename.split('/')[-1])
@staticmethod
def resolve_station_name(station_name_in):
"""Convert station name from Scintrex observation file
(as Note) to the naming convention used in the output
file (BEV conventions).
Parameters
----------
station_name_in : str
Station name string as written to the observation file
as note.
Returns
-------
Corrected station name : str
"""
station_name_in = station_name_in.upper()
# Check first letter of name in order detect the station type:
if station_name_in[0] == 'S':
station_name_out = station_name_in
elif station_name_in[0] == 'P':
station_name_out = 'P ' + station_name_in[1:]
elif station_name_in[0] == 'T' and '.' in station_name_in:
[str1_tmp, str2_tmp] = station_name_in.split('.')
station_name_out = 'T{0:>4} {1:>3}'.format(str1_tmp[1:], str2_tmp)
elif station_name_in[0] == 'N':
station_name_out = station_name_in
else:
station_name_out = station_name_in.replace('.', '-')
return station_name_out.upper()
@staticmethod
def get_dhb_dhf(dh_str):
"""Convert dhb and dhf from notes in the CG-5 observation file
to an actual number. In the notes '.' is used instead of '-'.
Parameters
----------
dh_str : str
Height difference [m] with '.' instead of '-'.
Returns
-------
Height difference [m] : float
"""
if dh_str.startswith('.'):
dh_str = '-' + dh_str[1:]
return float(dh_str)
def read_obs_file(self, obs_filename):
"""Read CG-5 observation file (txt) and populate the object.
Notes
-----
Ignore comment lines that start with "#".
Parameters
----------
obs_filename : str
Name (and path) to CG-5 observation file (txt format).
"""
COMMENT_MARKER = '#'
self.obs_filename = obs_filename
# with open(self.obs_filename, 'r') as content_file:
# str_obs_file = content_file.read()
# Read in file and ignore comment lines:
file_handle = open(self.obs_filename, 'r')
lines = []
for line in file_handle:
line_tmp = line.strip()
if not line_tmp.startswith(COMMENT_MARKER):
lines.append(line_tmp)
file_handle.close()
str_obs_file = '\n'.join(lines)
# Remove all or add one end-of-line symbols from end of string:
# - Last character of string has to be a \n so that regex works correctly!
number_of_trailing_eol_symbols = 0
str_idx = -1
while str_obs_file[str_idx] == '\n':
number_of_trailing_eol_symbols += 1
str_idx -= 1
if number_of_trailing_eol_symbols == 0:
str_obs_file += '\n'
elif number_of_trailing_eol_symbols > 1:
str_obs_file = str_obs_file[0:str_idx+2]
# ### Match blocks with regex ###
# CG-5 SURVEY block:
self.survey_parameters = CG5SurveyParameters.create_from_obs_file_string(str_obs_file)
self.setup_parameters = CG5SetupParameters.create_from_obs_file_string(str_obs_file)
self.options = CG5OptionsParameters.create_from_obs_file_string(str_obs_file)
# Get Observations
# ### 3 possibilities: ###
# Initialize empty dataframe and append observation blocks at each station
# Warning: Better performance, when preparing the data as list (appending) and then converting to df at once.
# See: https://stackoverflow.com/questions/13784192/creating-an-empty-pandas-dataframe-then-filling-it
obs_list = [] # Collect all obs data in this list and then convert to pd dataframe.
# 1.) Station name & dbh=dhf
expr = '\/\tNote: \t(?P<station_name>\S+)\s+(?P<dh_cm>-?[.0-9]+)\s*[\r?\n](?P<obs_data>(?:\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+\s*[\r?\n])+)'
for obs_block in re.finditer(expr, str_obs_file):
obs_dict = obs_block.groupdict()
station_name = self.resolve_station_name(obs_dict['station_name'])
dhf_m = float(obs_dict['dh_cm']) * 1e-2
dhb_m = float(obs_dict['dh_cm']) * 1e-2
atm_pres_hpa = None
lines = obs_dict['obs_data'].splitlines()
# Create unique ID (= UNIX timestamp of first observation) for each setup on a station:
# - To distinguish multiple setups (with multiple observations each) on multiple stations
time_str = lines[0].split()[-1] + ' ' + lines[0].split()[11]
setup_id = int(dt.datetime.timestamp(dt.datetime.strptime(time_str, "%Y/%m/%d %H:%M:%S")))
for line in lines:
line_items = line.split()
line_items.append(station_name)
line_items.append(dhf_m)
line_items.append(dhb_m)
line_items.append(atm_pres_hpa)
line_items.append(setup_id)
obs_list.append(line_items)
# 2.) Station name & dbh=dhf & pressure
expr = '\/\tNote: \t(?P<station_name>\S+)\s+(?P<dh_cm>-?[.0-9]+)\s*[\r?\n](?P<obs_data>(?:\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+\s*[\r?\n])+)\/\tNote: \t(?P<pres>[0-9]{3,4}[.]{0,1}[0-9]*)'
for obs_block in re.finditer(expr, str_obs_file):
obs_dict = obs_block.groupdict()
station_name = self.resolve_station_name(obs_dict['station_name'])
dhf_m = float(obs_dict['dh_cm']) * 1e-2
dhb_m = float(obs_dict['dh_cm']) * 1e-2
atm_pres_hpa = float(obs_dict['pres'])
lines = obs_dict['obs_data'].splitlines()
# Create unique ID (= UNIX timestamp of first observation) for each setup on a station:
# - To distinguish multiple setups (with multiple observations each) on multiple stations
time_str = lines[0].split()[-1] + ' ' + lines[0].split()[11]
setup_id = int(dt.datetime.timestamp(dt.datetime.strptime(time_str, "%Y/%m/%d %H:%M:%S")))
for line in lines:
line_items = line.split()
line_items.append(station_name)
line_items.append(dhf_m)
line_items.append(dhb_m)
line_items.append(atm_pres_hpa)
line_items.append(setup_id)
obs_list.append(line_items)
# 3.) Station name & dhb & dhf
expr = '\/\tNote: \t(?P<station_name>\S+)\s+(?P<dhb_cm>-?[.0-9]+)\s+(?P<dhf_cm>-?[.0-9]+)\s*[\r?\n](?P<obs_data>(?:\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+\s*[\r?\n])+)'
for obs_block in re.finditer(expr, str_obs_file):
obs_dict = obs_block.groupdict()
station_name = self.resolve_station_name(obs_dict['station_name'])
dhf_m = float(obs_dict['dhf_cm']) * 1e-2
dhb_m = float(obs_dict['dhb_cm']) * 1e-2
atm_pres_hpa = None
lines = obs_dict['obs_data'].splitlines()
# Create unique ID (= UNIX timestamp of first observation) for each setup on a station:
# - To distinguish multiple setups (with multiple observations each) on multiple stations
time_str = lines[0].split()[-1] + ' ' + lines[0].split()[11]
setup_id = int(dt.datetime.timestamp(dt.datetime.strptime(time_str, "%Y/%m/%d %H:%M:%S")))
for line in lines:
line_items = line.split()
line_items.append(station_name)
line_items.append(dhf_m)
line_items.append(dhb_m)
line_items.append(atm_pres_hpa)
line_items.append(setup_id)
obs_list.append(line_items)
# 4.) Station name & dhb & dhf & pressure
# expr = "\/\s+Note:\s+(?P<station_name>\S+)\s+(?P<dhb_cm>\S+)\s+(?P<dhf_cm>\S+)\s*\n(?P<obs_data>(?:\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+\s*[\r\n])+)"
expr = '\/\tNote: \t(?P<station_name>\S+)\s+(?P<dhb_cm>-?[.0-9]+)\s+(?P<dhf_cm>-?[.0-9]+)\s*[\r?\n](?P<obs_data>(?:\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+\s*[\r?\n])+)\/\tNote: \t(?P<pres>[0-9]{3,4}[.]{0,1}[0-9]*)'
for obs_block in re.finditer(expr, str_obs_file):
obs_dict = obs_block.groupdict()
station_name = self.resolve_station_name(obs_dict['station_name'])
dhf_m = float(obs_dict['dhf_cm']) * 1e-2
dhb_m = float(obs_dict['dhb_cm']) * 1e-2
atm_pres_hpa = float(obs_dict['pres'])
lines = obs_dict['obs_data'].splitlines()
# Create unique ID (= UNIX timestamp of first observation) for each setup on a station:
# - To distinguish multiple setups (with multiple observations each) on multiple stations
time_str = lines[0].split()[-1] + ' ' + lines[0].split()[11]
setup_id = int(dt.datetime.timestamp(dt.datetime.strptime(time_str, "%Y/%m/%d %H:%M:%S")))
for line in lines:
line_items = line.split()
line_items.append(station_name)
line_items.append(dhf_m)
line_items.append(dhb_m)
line_items.append(atm_pres_hpa)
line_items.append(setup_id)
obs_list.append(line_items)
# Create pandas dataframe of prepared list:
self.obs_df = pd.DataFrame(obs_list, columns=self._OBS_DF_COLUMN_NAMES)
# Remove duplicates entries that were matched with and without pressure:
setup_ids = self.obs_df['setup_id'].unique().tolist()
setup_ids_diplicates = []
for setup_id in setup_ids:
tmp_filter = self.obs_df['setup_id'] == setup_id
if (~self.obs_df.loc[tmp_filter, 'atm_pres_hpa'].isna()).any(): # Entries with pressure found
setup_ids_diplicates.append(setup_id)
if setup_ids_diplicates:
tmp_filter = ~(self.obs_df['atm_pres_hpa'].isna() & self.obs_df['setup_id'].isin(setup_ids_diplicates))
self.obs_df = self.obs_df.loc[tmp_filter].copy(deep=True)
# Convert numeric columns to numeric dtypes:
cols = self.obs_df.columns.drop(self._OBS_DF_NON_NUMERIC_COLUMNS)
self.obs_df[cols] = self.obs_df[cols].apply(pd.to_numeric, errors='raise')
# Sort observations by time and date and reset index:
self.obs_df.sort_values(by='dec_time_date', inplace=True, ignore_index=True)
# Convert date and time to datetime objects (aware, if UTC offset is available):
if self.survey_parameters.date_time is not None:
self.obs_df['obs_epoch'] = pd.to_datetime(
self.obs_df['date'] + ' ' + self.obs_df['time_str'], format='%Y/%m/%d %H:%M:%S')
self.obs_df['obs_epoch'] = self.obs_df['obs_epoch'].dt.tz_localize('UTC') # Set timezone = UTC
self.obs_df['obs_epoch'] = self.obs_df['obs_epoch'] + pd.Timedelta(self.survey_parameters.date_time.utcoffset())
else: # tz unaware time
self.obs_df['obs_epoch'] = pd.to_datetime(
self.obs_df['date'] + ' ' + self.obs_df['time_str'], format='%Y/%m/%d %H:%M:%S')
self.obs_df.drop(columns=['time_str', 'date'], inplace=True) # Drop columns that are not required any more
def plot_g_values(self, station_names=None):
"""Plot g-values of selected or all stations in the df.
Notes
-----
This method requires matplotlib as optional dependency!
Parameters
----------
station_names : list of str, optional
List of names of stations for which the observations will be plotted.
The default value is None which implied that the data of all stations
is plotted.
"""
# Check if obs data is available first:
if self.obs_df is not None:
# Get list of station names and loop over them:
if station_names is None:
station_names = self.obs_df['station_name'].unique()
fig, ax = plt.subplots()
for station_name in station_names:
x = self.obs_df[self.obs_df['station_name'] == station_name].obs_epoch
y = self.obs_df[self.obs_df['station_name'] == station_name].g_mgal * 1e3 # µGal
ax.scatter(x, y, label=station_name)
ax.legend()
ax.grid(True)
ax.set_xlabel('Time')
ax.set_ylabel('g [µGal]')
ax.set_title('survey: ' + self.survey_parameters.survey_name)
# Data cursor:
fig.canvas.mpl_connect('pick_event', DataCursor(plt.gca()))
plt.show()
# Run as standalone program:
if __name__ == "__main__":
import matplotlib.pyplot as plt
path = settings.PATH_OBS_FILE_CG5 + settings.NAME_OBS_FILE_CG5
s1 = CG5Survey()
s1.read_obs_file(path)
# s1.plot_g_values(['1-164-04', '1-164-12', '1-164-11'])
# s1.plot_g_values(['TEST'])
s1.plot_g_values()
else:
pass
|
PypiClean
|
/dash_echarts-0.0.12.9-py3-none-any.whl/dash_echarts/examples/custom_profit.py
|
import dash_echarts
import dash, random
from dash.dependencies import Input, Output
import dash_html_components as html
import dash_core_components as dcc
from dash.exceptions import PreventUpdate
def main():
'''
dash_echarts examples
name: custom profit with echarts
author: dameng <[email protected]>
'''
app = dash.Dash(__name__)
raw = [
[10, 16, 3, 'A'],
[16, 18, 15, 'B'],
[18, 26, 12, 'C'],
[26, 32, 22, 'D'],
[32, 56, 7, 'E'],
[56, 62, 17, 'F']
];
color_list = ['#4f81bd', '#c0504d', '#9bbb59', '#604a7b', '#948a54', '#e46c0b'];
data = []
for i,e in enumerate(raw):
data.append({
'value': e,
'itemStyle': {
'color': color_list[i]
}
})
option = {
'title': {
'text': 'Profit',
'left': 'center'
},
'tooltip': {
},
'xAxis': {
'scale': True
},
'yAxis': {
},
'series': [{
'type': 'custom',
'renderItem': 'ri',
'label': {
'show': True,
'position': 'top'
},
'dimensions': ['from', 'to', 'profit'],
'encode': {
'x': [0, 1],
'y': 2,
'tooltip': [0, 1, 2],
'itemName': 3
},
'data': data
}]
};
app.layout = html.Div([
dash_echarts.DashECharts(
option = option,
id='echarts',
fun_keys=['renderItem'],
funs= {
'ri': '''
function renderItem(params, api) {
var yValue = api.value(2);
var start = api.coord([api.value(0), yValue]);
var size = api.size([api.value(1) - api.value(0), yValue]);
var style = api.style();
return {
type: 'rect',
shape: {
x: start[0],
y: start[1],
width: size[0],
height: size[1]
},
style: style
};
}
'''
},
style={
"width": '100vw',
"height": '100vh',
}
),
dcc.Interval(id="interval", interval=1 * 1000, n_intervals=0),
])
app.run_server(debug=True)
if __name__ == '__main__':
main()
|
PypiClean
|
/docutools-2022.4.11-py3-none-any.whl/lcdoc/mkdocs/stats/__init__.py
|
from lcdoc import log
import json
import os
import sys
from mkdocs.config import config_options
from lcdoc.const import LogStats, PageStats, Stats
from lcdoc.mkdocs.tools import MDPlugin, app
from lcdoc.tools import dirname, exists, project, read_file, write_file
last_stats = {}
def get_fn_and_set_last(self, config):
"""On serve and '-' we work with the cached last stats. Else we read the file if present"""
fn = self.config['dump_stats']
if not fn:
app.info('no stats file configured')
return None
if fn == '-':
return fn
else:
fn = project.abs_path(fn, config, mkdirs=True)
if exists(fn):
os.rename(fn, fn + '.prev.json')
if last_stats:
return fn
l = read_file(fn + '.prev.json', dflt='')
if l:
last_stats.update(json.loads(l))
return fn
# write_file(fn, json.dumps(s, sort_keys=True, indent=4))
# app.info('Have written stats', keys=len(s), file=fn)
def get_diff(s, minval):
isminv = lambda v, m=minval: isinstance(v, float) and v == m
d, o = {'added': {}, 'changed': {}}, last_stats
d['removed'] = [k for k in o if not k in s and not isminv(o[k])]
for k, v in s.items():
vo = o.get(k)
if vo is None:
if isinstance(v, float) and v < 2 * minval:
continue
d['added'][k] = v
elif vo != v:
if isinstance(vo, float) and isinstance(v, float):
if int(vo * 10) == int(v * 10):
continue
d['changed'][k] = [vo, v]
d['changed'].pop('Filtered_0_Values', 0)
for k in {'added', 'removed', 'changed'}:
if not d.get(k):
d.pop(k)
return d
def filter_logs(sever):
"""beyond info we kept all logs in a ram cache (log_majors)
Here we return those incl. and beyond sever(=warning or error or fatal)
"""
l = log.log_majors
if not sever or not sever in l:
return
logs = []
m = log.log_levels
[logs.append([k, l.get(k)]) for k in m[m.index(sever) :] if l.get(k)]
return logs, sum([len(i[1]) for i in logs])
def by_ts(store):
def d(l, L):
l.insert(1, log.level_by_name[L])
if not l[-1]:
l.pop()
return l
k = lambda i: i[0]
return sorted([d(l, L) for L, logs in store.items() for l in logs], key=k)
class StatsPlugin(MDPlugin):
# :docs:stats_config
C = config_options.Choice
log_maj = lambda d, C=C: C(['warning', 'error', 'fatal', 'none'], default=d)
config_scheme = (
# if not starting with "/": relative to project root.
# for stdout: set file="-"
('dump_stats', config_options.Type(str, default='build/lcd-stats.json')),
# round floats to this precision:
('round_digits', config_options.Type(int, default=2)),
# omit zero values:
('filter_0', config_options.Type(bool, default=True)),
# helpfull to see changes at serve
('print_diff', config_options.Type(bool, default=True)),
# write the logs as json (same dir than fn)
('dump_logs', config_options.Type(str, default='build/lcd-logs.json')),
# print all logs from this level again at end of build:
('repeat_major_log_events', log_maj('warning')),
# fail mkdocs build on errors, you don't want broken docs published:
('fail_build_on_log_events', log_maj('error')),
)
# :docs:stats_config
def on_post_build(self, config):
from lcdoc.tools import flatten
fn = get_fn_and_set_last(self, config)
rd = self.config['round_digits']
minval = 5 * 10 ** -rd
filter_0 = self.config['filter_0']
s = {'Global': Stats, 'Pages': PageStats, 'Log': LogStats}
s = flatten(s, sep='.', tpljoin='')
if rd:
r = lambda v: round(v, rd) if type(v) == float else v
s = dict([(k, r(v)) for k, v in s.items()])
l = len(s)
if filter_0:
s = dict(filter(lambda x: x[1] > minval, s.items()))
f = l - len(s)
if f:
s['Filtered_0_Values'] = f
if last_stats and self.config['print_diff']:
diff = get_diff(s, minval=minval)
msg = 'Stats changes since last run'
msg = ('No s' + msg[1:]) if not diff else msg
app.info(msg, json=diff)
last_stats.clear()
last_stats.update(s)
kw = dict(filtered_near_zero_vals=filter_0)
if filter_0:
kw['minval'] = minval
if fn == '-':
app.info('Collected Stats', hint='pipe into jq to consolidate', **kw)
print(json.dumps(s, sort_keys=True))
elif fn:
write_file(fn, json.dumps(s, sort_keys=True, indent=4))
app.info('Have written stats', keys=len(s), file=fn, **kw)
sever = self.config['repeat_major_log_events']
logs, cnt = filter_logs(sever=sever)
if logs:
app.info('Logs of severity %s and higher' % sever, json=logs, count=cnt)
l = log.log_majors
fn = self.config['dump_logs']
if fn:
fn = project.abs_path(fn, config, mkdirs=True)
if exists(fn):
os.rename(fn, fn + '.prev.json')
ol, j = by_ts(l), json.dumps
write_file(fn, '\n'.join(j(i, default=str) for i in ol))
app.info('Dumped logs', fn=fn, count=len(ol))
bsever = self.config['fail_build_on_log_events']
if bsever != sever:
logs, cnt = filter_logs(sever=bsever)
if logs:
# won't interrupt server mode for this
m = app.error if 'serve' in sys.argv else app.die
m('Build is broken, have %s critical logs' % cnt)
[i.clear() for k, i in l.items()]
|
PypiClean
|
/Intrst_algrms-0.7.2-py3-none-any.whl/knapsack_problem/funcs.py
|
from itertools import combinations
from typing import Tuple, List, Generator, Any, Union, Iterator
from knapsack_problem.ref_func import knapsack_standard_solution, Item
# @profile
def knapsack_1_standard_solution(items: Tuple[Item], weight_limit: int) -> Item:
"""
https://codereview.stackexchange.com/a/20581
Solve the knapsack problem by finding the most valuable subsequence
of items that weighs no more than weight_limit.
items must be a sequence of pairs (value, weight), where value is a
number and weight is a non-negative integer.
weight_limit is a non-negative integer.
Return a pair whose first element is the sum of values in the most
valuable subsequence, and whose second element is the subsequence.
"""
return knapsack_standard_solution(items, weight_limit)
# @profile
def knapsack_2_solution(items: Tuple[Item], weight_limit: int) -> Item:
"""
my own function, written thanks to the site:
https://foxford.ru/wiki/informatika/algoritm-ukladki-ryukzaka
"""
w = weight_limit
f = [[0] * (w + 1) for i in range(len(items) + 1)]
for i, item in enumerate(items):
weight = item.weight
value = item.value
for j in range(1, w + 1):
if j >= weight:
f[i][j] = max(f[i - 1][j], f[i - 1][j - weight] + value)
else:
f[i][j] = f[i - 1][j]
for k in reversed(range(len(items))):
if f[k][w] != f[k - 1][w]:
yield items[k]
w -= items[k].weight
# @profile
def knapsack_3_solution(items: Tuple[Item], weight_limit: int) -> Item:
"""
Given a list of items with name, value and weight.
Return the optimal value with total weight <= allowed weight and
list of picked items.
https://codereview.stackexchange.com/a/125386
"""
# find which item are picked
def fetch_items(k: List[List[int]], weight_limit: int, items: Tuple[Item]):
for item, weights_p, weights_n in zip(items[::-1], k[-2::-1], k[::-1]):
if weights_n[weight_limit] != weights_p[weight_limit]:
yield item
weight_limit -= item.weight
k = [
[0] * (weight_limit + 1)
for x in range(len(items) + 1)
]
for next_idx, (item, weights) in enumerate(zip(items, k), 1):
for w, current_weight in enumerate(weights[1:], 1):
if item.weight <= w:
k[next_idx][w] = max(
item.value + weights[w - item.weight],
current_weight
)
else:
k[next_idx][w] = current_weight
return fetch_items(k, weight_limit, items)
# @profile
def knapsack_4_bruteforce_solution(items: Tuple[Item], weight_limit: int) -> Union[Item, List[None]]:
"""
Brute force algorithm
http://rosettacode.org/mw/index.php?title=Knapsack_problem/0-1&action=edit§ion=62
"""
def any_comb(items: Tuple[Item]) -> Generator:
"""return combinations of any length from the items"""
return (comb
for r in range(1, len(items) + 1)
for comb in combinations(items, r)
)
def total_value(comb: Tuple[Any]) -> Tuple[int, int]:
"""Totalise a particular combination of items"""
totwt = totval = 0
for _, val, wt in comb:
totwt += wt
totval += val
return (totval, -totwt) if totwt <= weight_limit else (0, 0)
bagged = max(any_comb(items), key=total_value) # max val or min wt if values equal
if bagged[0].weight <= weight_limit:
return bagged
return []
# @profile
def knapsack_5_dynamic_solution(items: Tuple[Item], weight_limit: int) -> Item:
"""
Dynamic programming solution
http://rosettacode.org/mw/index.php?title=Knapsack_problem/0-1&action=edit§ion=63
"""
table = [[0] * (weight_limit + 1) for j in range(len(items) + 1)]
for j, item in enumerate(items, 1):
_, val, wt = item
for w in range(1, weight_limit + 1):
if wt > w:
table[j][w] = table[j - 1][w]
else:
table[j][w] = max(table[j - 1][w],
table[j - 1][w - wt] + val)
w = weight_limit
for j in range(len(items), 0, -1):
was_added = table[j][w] != table[j - 1][w]
if was_added:
item, val, wt = items[j - 1]
yield items[j - 1]
w -= wt
# @profile
def knapsack_6_recursive_dynamic_solution(items: Tuple[Item], weight_limit: int) -> Tuple[Item]:
"""
Recursive dynamic programming algorithm
http://rosettacode.org/mw/index.php?title=Knapsack_problem/0-1&action=edit§ion=64
"""
def total_value(items: Tuple[Item], weight_limit: int) -> int:
return sum([x.value for x in items]) if sum([x.weight for x in items]) <= weight_limit else 0
cache = {}
def solve(items: Tuple[Item], weight_limit: int) -> Tuple:
if not items:
return ()
if (items, weight_limit) not in cache:
head = items[0]
tail = items[1:]
include = (head,) + solve(tail, weight_limit - head[2])
dont_include = solve(tail, weight_limit)
if total_value(include, weight_limit) > total_value(dont_include, weight_limit):
answer = include
else:
answer = dont_include
cache[(items, weight_limit)] = answer
return cache[(items, weight_limit)]
return solve(items, weight_limit)
# @profile
def knapsack_greedy_solution(items: Tuple[Item], weight_limit: int) -> Iterator:
"""
Return a list of items with the maximum value, subject to the
constraint that their combined weight must not exceed max_weight.
Implements the well-known "greedy approximation algorithm" for the knapsack problem
(first described by George Dantzig in 1957).
https://codereview.stackexchange.com/a/62871
"""
def efficiency(item: Item) -> float:
"""Return efficiency of item (its value per unit weight)."""
return float(item.value) / float(item.weight)
def pack(item: Item) -> bool:
# Attempt to pack item; return True if successful.
if item.weight <= pack.max_weight:
pack.max_weight -= item.weight
return True
else:
return False
pack.max_weight = weight_limit
return filter(pack, sorted(items, key=efficiency, reverse=True))
if __name__ == '__main__':
import time
from data import pack_up_static_knapsack_3
funcs = [
knapsack_1_standard_solution,
knapsack_2_solution,
knapsack_3_solution,
knapsack_4_bruteforce_solution,
knapsack_5_dynamic_solution,
knapsack_6_recursive_dynamic_solution,
knapsack_greedy_solution
]
for func in funcs:
list(func(*pack_up_static_knapsack_3()))
time.sleep(0.3)
|
PypiClean
|
/OctoBot-0.4.54.tar.gz/OctoBot-0.4.54/octobot/storage/trading_metadata.py
|
import math
import numpy
import octobot_commons.enums as common_enums
import octobot_commons.constants as commons_constants
import octobot_commons.databases as commons_databases
import octobot_commons.configuration as commons_configuration
import octobot_commons.time_frame_manager as time_frame_manager
import octobot_backtesting.api as backtesting_api
import octobot_trading.api as trading_api
import octobot_trading.enums as trading_enums
import octobot.backtesting as backtesting
async def clear_run_metadata(bot_id):
run_db = commons_databases.RunDatabasesProvider.instance().get_run_db(bot_id)
await run_db.delete(common_enums.DBTables.METADATA.value, None)
async def store_run_metadata(bot_id, exchange_managers, start_time, user_inputs=None, flush=False) -> dict:
run_db = commons_databases.RunDatabasesProvider.instance().get_run_db(bot_id)
user_inputs = user_inputs or await commons_configuration.get_user_inputs(run_db)
run_dbs_identifier = commons_databases.RunDatabasesProvider.instance().get_run_databases_identifier(bot_id)
metadata = await _get_trading_metadata(exchange_managers, start_time, user_inputs, run_dbs_identifier, False, None)
await run_db.log(
common_enums.DBTables.METADATA.value,
metadata
)
if flush:
await run_db.flush()
return metadata
async def store_backtesting_run_metadata(exchange_managers, start_time, user_inputs, run_dbs_identifier, name) -> dict:
run_metadata = await _get_trading_metadata(exchange_managers, start_time, user_inputs, run_dbs_identifier, True, name)
# use local database as a lock is required
async with commons_databases.DBWriter.database(
run_dbs_identifier.get_backtesting_metadata_identifier(),
with_lock=True) as writer:
await writer.log(common_enums.DBTables.METADATA.value, run_metadata)
return run_metadata
async def _get_trading_metadata(exchange_managers, run_start_time, user_inputs, run_dbs_identifier, is_backtesting, name) \
-> dict:
trading_mode = trading_api.get_trading_modes(exchange_managers[0])[0]
multi_exchanges_data = await _get_multi_exchange_data(exchange_managers, is_backtesting)
single_exchange_data = await _get_single_exchange_data(
exchange_managers[0],
trading_mode,
run_start_time,
user_inputs,
run_dbs_identifier,
is_backtesting,
name
)
return {
**single_exchange_data,
**multi_exchanges_data,
**(await trading_mode.get_additional_metadata(is_backtesting))
}
async def _get_multi_exchange_data(exchange_managers, is_backtesting):
symbols = list(set(
symbol
for exchange_manager in exchange_managers
for symbol in trading_api.get_trading_pairs(exchange_manager)
))
time_frames = list(set(
tf.value
for exchange_manager in exchange_managers
for tf in trading_api.get_relevant_time_frames(exchange_manager)
))
data_files = list(set(
data_file
for exchange_manager in exchange_managers
for data_file in trading_api.get_backtesting_data_files(exchange_manager)
)) if is_backtesting else []
profitability = numpy.average(tuple(
float(trading_api.get_profitability_stats(exchange_manager)[0])
for exchange_manager in exchange_managers
))
profitability_percent = numpy.average(tuple(
float(trading_api.get_profitability_stats(exchange_manager)[1])
for exchange_manager in exchange_managers
))
markets_profitability = _get_markets_profitability(exchange_managers, time_frames) if is_backtesting else {}
exchange_names = [
trading_api.get_exchange_name(exchange_manager)
for exchange_manager in exchange_managers
]
future_contracts_by_exchange = _get_future_contracts_by_exchange(exchange_managers)
trades = [
trade
for exchange_manager in exchange_managers
for trade in trading_api.get_trade_history(exchange_manager, include_cancelled=False)
]
entries = [
trade
for trade in trades
if trade.status is trading_enums.OrderStatus.FILLED and trade.side is trading_enums.TradeOrderSide.BUY
]
win_rate = round(numpy.average(tuple(
float(trading_api.get_win_rate(exchange_manager) * 100)
for exchange_manager in exchange_managers
)), 3)
wins = 0 if math.isnan(win_rate) else round(win_rate * len(entries) / 100)
draw_down = round(numpy.average(tuple(
float(trading_api.get_draw_down(exchange_manager))
for exchange_manager in exchange_managers
)), 3)
r_sq_end_balance = await _get_coefficient_of_determination(exchange_managers, False)
r_sq_max_balance = await _get_coefficient_of_determination(exchange_managers, True)
duration = round(max(
backtesting_api.get_backtesting_duration(exchange_manager.exchange.backtesting)
for exchange_manager in exchange_managers
), 3) if is_backtesting else 0
if trading_api.get_is_backtesting(exchange_managers[0]):
start_time = min(
backtesting_api.get_backtesting_starting_time(exchange_manager.exchange.backtesting)
for exchange_manager in exchange_managers
)
end_time = max(
backtesting_api.get_backtesting_ending_time(exchange_manager.exchange.backtesting)
for exchange_manager in exchange_managers
)
else:
start_time = exchange_managers[0].exchange.get_exchange_current_time()
end_time = -1
origin_portfolio, end_portfolio = _get_portfolio(exchange_managers)
backtesting_only_metadata = {
common_enums.BacktestingMetadata.DURATION.value: duration,
common_enums.BacktestingMetadata.BACKTESTING_FILES.value: data_files,
} if is_backtesting else {}
return {
**backtesting_only_metadata,
**{
common_enums.BacktestingMetadata.GAINS.value: round(float(profitability), 8),
common_enums.BacktestingMetadata.PERCENT_GAINS.value: round(float(profitability_percent), 3),
common_enums.BacktestingMetadata.MARKETS_PROFITABILITY.value:
{c: f"{round(float(v) * 100, 2)}%" for c, v in markets_profitability.items()},
common_enums.BacktestingMetadata.END_PORTFOLIO.value: str(end_portfolio),
common_enums.BacktestingMetadata.START_PORTFOLIO.value: str(origin_portfolio),
common_enums.BacktestingMetadata.WIN_RATE.value: win_rate,
common_enums.BacktestingMetadata.DRAW_DOWN.value: draw_down or 0,
common_enums.BacktestingMetadata.COEFFICIENT_OF_DETERMINATION_MAX_BALANCE.value: r_sq_max_balance or 0,
common_enums.BacktestingMetadata.COEFFICIENT_OF_DETERMINATION_END_BALANCE.value: r_sq_end_balance or 0,
common_enums.BacktestingMetadata.SYMBOLS.value: symbols,
common_enums.BacktestingMetadata.TIME_FRAMES.value: time_frames,
common_enums.BacktestingMetadata.ENTRIES.value: len(entries),
common_enums.BacktestingMetadata.WINS.value: wins,
common_enums.BacktestingMetadata.LOSES.value: len(entries) - wins,
common_enums.BacktestingMetadata.TRADES.value: len(trades),
common_enums.DBRows.EXCHANGES.value: exchange_names,
common_enums.DBRows.START_TIME.value: start_time,
common_enums.DBRows.END_TIME.value: end_time,
common_enums.DBRows.FUTURE_CONTRACTS.value: future_contracts_by_exchange,
}
}
async def _get_coefficient_of_determination(exchange_managers, use_high_instead_of_end_balance):
try:
return numpy.average([
await trading_api.get_coefficient_of_determination(
exchange_manager,
use_high_instead_of_end_balance=use_high_instead_of_end_balance
)
for exchange_manager in exchange_managers
])
except KeyError:
return None
def _get_markets_profitability(exchange_managers, time_frames):
markets_profitability = {}
min_tf = time_frame_manager.find_min_time_frame(time_frames)
for exchange_manager in exchange_managers:
for symbol in trading_api.get_trading_pairs(exchange_manager):
if symbol is markets_profitability:
continue
try:
markets_profitability[symbol] = \
backtesting.IndependentBacktesting.get_market_delta(symbol, exchange_manager, min_tf)
except Exception:
pass
return markets_profitability
def _get_portfolio(exchange_managers):
origin_portfolio = {}
end_portfolio = {}
for exchange_manager in exchange_managers:
try:
exchange_origin_portfolio = trading_api.get_origin_portfolio(exchange_manager, as_decimal=False)
except AttributeError:
# no initialized portfolio on this exchange
continue
exchange_end_portfolio = trading_api.get_portfolio(exchange_manager, as_decimal=False)
for portfolio in (exchange_origin_portfolio, exchange_end_portfolio):
for values in portfolio.values():
values.pop("available", None)
if exchange_manager.is_future:
for position in trading_api.get_positions(exchange_manager):
try:
exchange_end_portfolio[position.get_currency()]["position"] = float(position.quantity)
except KeyError:
exchange_end_portfolio[position.get_currency()] = {
commons_constants.PORTFOLIO_AVAILABLE: 0,
commons_constants.PORTFOLIO_TOTAL: 0,
"position": float(position.quantity),
}
for exchange_portfolio, portfolio in zip((exchange_origin_portfolio, exchange_end_portfolio),
(origin_portfolio, end_portfolio)):
for currency, value_dict in exchange_portfolio.items():
try:
pf_value = portfolio[currency]
except KeyError:
pf_value = {}
portfolio[currency] = pf_value
for key, value in value_dict.items():
pf_value[key] = pf_value.get(key, 0) + value
return origin_portfolio, end_portfolio
def _get_future_contracts_by_exchange(exchange_managers):
return {
trading_api.get_exchange_name(exchange_manager): {
symbol: {
"contract_type": contract.contract_type.value,
"position_mode": contract.position_mode.value,
"margin_type": contract.margin_type.value
}
for symbol, contract in trading_api.get_pair_contracts(exchange_manager).items()
if symbol in trading_api.get_trading_pairs(exchange_manager)
and trading_api.is_handled_contract(contract)
}
for exchange_manager in exchange_managers
if exchange_manager.is_future and hasattr(exchange_manager.exchange, "pair_contracts")
}
async def _get_single_exchange_data(exchange_manager, trading_mode, run_start_time, user_inputs, run_dbs_identifier,
is_backtesting, name):
exchange_type = trading_enums.ExchangeTypes.FUTURE.value if exchange_manager.is_future \
else trading_enums.ExchangeTypes.SPOT.value
if user_inputs is None:
user_inputs = {}
formatted_user_inputs = {}
for user_input in user_inputs:
if not user_input["is_nested_config"]:
try:
formatted_user_inputs[user_input["tentacle"]][user_input["name"]] = user_input["value"]
except KeyError:
formatted_user_inputs[user_input["tentacle"]] = {
user_input["name"]: user_input["value"]
}
leverage = 0
if exchange_manager.is_future and hasattr(exchange_manager.exchange, "get_pair_future_contract"):
leverage = float(next(iter(trading_api.get_pair_contracts(exchange_manager).values())).current_leverage)
backtesting_only_metadata = {
common_enums.BacktestingMetadata.ID.value: run_dbs_identifier.backtesting_id,
common_enums.BacktestingMetadata.OPTIMIZATION_CAMPAIGN.value: run_dbs_identifier.optimization_campaign_name,
common_enums.BacktestingMetadata.USER_INPUTS.value: formatted_user_inputs,
} if is_backtesting else {}
return {
**backtesting_only_metadata,
**{
common_enums.BacktestingMetadata.TIMESTAMP.value: run_start_time,
common_enums.BacktestingMetadata.NAME.value: name or trading_mode.get_name(),
common_enums.BacktestingMetadata.LEVERAGE.value: leverage,
common_enums.DBRows.TRADING_TYPE.value: exchange_type,
common_enums.DBRows.REFERENCE_MARKET.value: trading_api.get_reference_market(exchange_manager.config),
}
}
|
PypiClean
|
/python-docx-1-0.0.2.tar.gz/python-docx-1-0.0.2/docx/oxml/ns.py
|
from __future__ import absolute_import, print_function, unicode_literals
nsmap = {
"a": "http://schemas.openxmlformats.org/drawingml/2006/main",
"c": "http://schemas.openxmlformats.org/drawingml/2006/chart",
"cp": "http://schemas.openxmlformats.org/package/2006/metadata/core-properties",
"dc": "http://purl.org/dc/elements/1.1/",
"dcmitype": "http://purl.org/dc/dcmitype/",
"dcterms": "http://purl.org/dc/terms/",
"dgm": "http://schemas.openxmlformats.org/drawingml/2006/diagram",
"m": "http://schemas.openxmlformats.org/officeDocument/2006/math",
"pic": "http://schemas.openxmlformats.org/drawingml/2006/picture",
"r": "http://schemas.openxmlformats.org/officeDocument/2006/relationships",
"sl": "http://schemas.openxmlformats.org/schemaLibrary/2006/main",
"vt" : ("http://schemas.openxmlformats.org/officeDocument/2006/docPropsVTypes"),
"w": "http://schemas.openxmlformats.org/wordprocessingml/2006/main",
'w14': "http://schemas.microsoft.com/office/word/2010/wordml",
"wp": "http://schemas.openxmlformats.org/drawingml/2006/wordprocessingDrawing",
"xml": "http://www.w3.org/XML/1998/namespace",
"xsi": "http://www.w3.org/2001/XMLSchema-instance",
}
pfxmap = dict((value, key) for key, value in nsmap.items())
class NamespacePrefixedTag(str):
"""
Value object that knows the semantics of an XML tag having a namespace
prefix.
"""
def __new__(cls, nstag, *args):
return super(NamespacePrefixedTag, cls).__new__(cls, nstag)
def __init__(self, nstag):
self._pfx, self._local_part = nstag.split(':')
self._ns_uri = nsmap[self._pfx]
@property
def clark_name(self):
return '{%s}%s' % (self._ns_uri, self._local_part)
@classmethod
def from_clark_name(cls, clark_name):
nsuri, local_name = clark_name[1:].split('}')
nstag = '%s:%s' % (pfxmap[nsuri], local_name)
return cls(nstag)
@property
def local_part(self):
"""
Return the local part of the tag as a string. E.g. 'foobar' is
returned for tag 'f:foobar'.
"""
return self._local_part
@property
def nsmap(self):
"""
Return a dict having a single member, mapping the namespace prefix of
this tag to it's namespace name (e.g. {'f': 'http://foo/bar'}). This
is handy for passing to xpath calls and other uses.
"""
return {self._pfx: self._ns_uri}
@property
def nspfx(self):
"""
Return the string namespace prefix for the tag, e.g. 'f' is returned
for tag 'f:foobar'.
"""
return self._pfx
@property
def nsuri(self):
"""
Return the namespace URI for the tag, e.g. 'http://foo/bar' would be
returned for tag 'f:foobar' if the 'f' prefix maps to
'http://foo/bar' in nsmap.
"""
return self._ns_uri
def nsdecls(*prefixes):
"""
Return a string containing a namespace declaration for each of the
namespace prefix strings, e.g. 'p', 'ct', passed as *prefixes*.
"""
return ' '.join(['xmlns:%s="%s"' % (pfx, nsmap[pfx]) for pfx in prefixes])
def nspfxmap(*nspfxs):
"""
Return a dict containing the subset namespace prefix mappings specified by
*nspfxs*. Any number of namespace prefixes can be supplied, e.g.
namespaces('a', 'r', 'p').
"""
return dict((pfx, nsmap[pfx]) for pfx in nspfxs)
def qn(tag):
"""
Stands for "qualified name", a utility function to turn a namespace
prefixed tag name into a Clark-notation qualified tag name for lxml. For
example, ``qn('p:cSld')`` returns ``'{http://schemas.../main}cSld'``.
"""
prefix, tagroot = tag.split(':')
uri = nsmap[prefix]
return '{%s}%s' % (uri, tagroot)
|
PypiClean
|
/imshowpair-0.1.0.tar.gz/imshowpair-0.1.0/LICENSE.rst
|
.. -*- rst -*-
License
=======
Copyright (c) 2018-2019, Lev E. Givon.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of Lev E. Givon nor the names of any
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
PypiClean
|
/xentica-0.2.0.tar.gz/xentica-0.2.0/CONTRIBUTING.md
|
# Contributing Guidelines
Dear fellow CA enthusiast,
If you are reading this guidelines, you are probably as passionate
about the idea of self-emergence of lifeforms in cellular automata as
we are. Or maybe you are interested in multi-dimensional CA models
that could help us understand the laws of physics underlying the
quantum level. Either way, in case you are seeking to contribute to
our project, that's how you can do it.
1. Try to install Xentica framework along with Moire GUI, run some
examples and give us feedback on any problems you met in the
process. Feedback is twofold merit. On one hand, it is the the
quickest way to fix the problems occured with particular system
setups and to make the whole core framework more available to the
mass use. At the other hand, it is the signal that the project is
not unclaimed, which gives the core developers the inspiration to
move on.
2. In case you succeed with the previous step, you can try to build
your own CA models with Xentica, again giving us feedback on any
problems you met. Maybe, you'll also miss some stuff and have some
ideas about the new features. We are glad to listen and discuss
anything that come to your mind during Xentica usage
experience. Sharing of the source code for your own Xentica models
is highly appreciated too.
3. In the process, you'll definitely read through the Xentica official
documentation. The other way to contribute is help us to improve
this doc. Typos, syntax or stylistic mistakes, if you know how to
fix them, please let us know. Core developers are not native
English, so any editorial help would be appreciated.
4. [LOCKED] Pull Requests are now locked until version 0.3. The main
reason, because the framework is in very early development, and is
a subject to massive and (possibly) backward-incompatible
changes. If you are really willing to contribute some code in
future, please contact us, and we'll update you as soon as
framework API become at least semi-stable with Pull Requests
opened.
5. [LOCKED] This item is hidden even from core developers =)
The easier way to give us feedback is by
[open an issue](https://github.com/a5kin/xentica/issues/new)
at GitHub. When doing so, please follow the
[Code of Conduct](https://github.com/a5kin/xentica/blob/master/CODE_OF_CONDUCT.md),
we are abiding. In general, just be benevolent and respect others. And
stay inspired with the beautiful world of multi-dimensional cellular
automata.
With Planck-level blessings,\
Xentica Core Team
|
PypiClean
|
/pulumi_kubernetes-4.2.0a1693403901-py3-none-any.whl/pulumi_kubernetes/networking/v1beta1/IngressClass.py
|
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ... import core as _core
from ... import meta as _meta
from ._inputs import *
__all__ = ['IngressClassInitArgs', 'IngressClass']
@pulumi.input_type
class IngressClassInitArgs:
def __init__(__self__, *,
api_version: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
metadata: Optional[pulumi.Input['_meta.v1.ObjectMetaArgs']] = None,
spec: Optional[pulumi.Input['IngressClassSpecArgs']] = None):
"""
The set of arguments for constructing a IngressClass resource.
:param pulumi.Input[str] api_version: APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
:param pulumi.Input[str] kind: Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
:param pulumi.Input['_meta.v1.ObjectMetaArgs'] metadata: Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
:param pulumi.Input['IngressClassSpecArgs'] spec: Spec is the desired state of the IngressClass. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
"""
if api_version is not None:
pulumi.set(__self__, "api_version", 'networking.k8s.io/v1beta1')
if kind is not None:
pulumi.set(__self__, "kind", 'IngressClass')
if metadata is not None:
pulumi.set(__self__, "metadata", metadata)
if spec is not None:
pulumi.set(__self__, "spec", spec)
@property
@pulumi.getter(name="apiVersion")
def api_version(self) -> Optional[pulumi.Input[str]]:
"""
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
"""
return pulumi.get(self, "api_version")
@api_version.setter
def api_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_version", value)
@property
@pulumi.getter
def kind(self) -> Optional[pulumi.Input[str]]:
"""
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter
def metadata(self) -> Optional[pulumi.Input['_meta.v1.ObjectMetaArgs']]:
"""
Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
"""
return pulumi.get(self, "metadata")
@metadata.setter
def metadata(self, value: Optional[pulumi.Input['_meta.v1.ObjectMetaArgs']]):
pulumi.set(self, "metadata", value)
@property
@pulumi.getter
def spec(self) -> Optional[pulumi.Input['IngressClassSpecArgs']]:
"""
Spec is the desired state of the IngressClass. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
"""
return pulumi.get(self, "spec")
@spec.setter
def spec(self, value: Optional[pulumi.Input['IngressClassSpecArgs']]):
pulumi.set(self, "spec", value)
class IngressClass(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
api_version: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
metadata: Optional[pulumi.Input[pulumi.InputType['_meta.v1.ObjectMetaArgs']]] = None,
spec: Optional[pulumi.Input[pulumi.InputType['IngressClassSpecArgs']]] = None,
__props__=None):
"""
IngressClass represents the class of the Ingress, referenced by the Ingress Spec. The `ingressclass.kubernetes.io/is-default-class` annotation can be used to indicate that an IngressClass should be considered default. When a single IngressClass resource has this annotation set to true, new Ingress resources without a class specified will be assigned this default class.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] api_version: APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
:param pulumi.Input[str] kind: Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
:param pulumi.Input[pulumi.InputType['_meta.v1.ObjectMetaArgs']] metadata: Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
:param pulumi.Input[pulumi.InputType['IngressClassSpecArgs']] spec: Spec is the desired state of the IngressClass. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[IngressClassInitArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
IngressClass represents the class of the Ingress, referenced by the Ingress Spec. The `ingressclass.kubernetes.io/is-default-class` annotation can be used to indicate that an IngressClass should be considered default. When a single IngressClass resource has this annotation set to true, new Ingress resources without a class specified will be assigned this default class.
:param str resource_name: The name of the resource.
:param IngressClassInitArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(IngressClassInitArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
api_version: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
metadata: Optional[pulumi.Input[pulumi.InputType['_meta.v1.ObjectMetaArgs']]] = None,
spec: Optional[pulumi.Input[pulumi.InputType['IngressClassSpecArgs']]] = None,
__props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = IngressClassInitArgs.__new__(IngressClassInitArgs)
__props__.__dict__["api_version"] = 'networking.k8s.io/v1beta1'
__props__.__dict__["kind"] = 'IngressClass'
__props__.__dict__["metadata"] = metadata
__props__.__dict__["spec"] = spec
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="kubernetes:networking.k8s.io/v1:IngressClass")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(IngressClass, __self__).__init__(
'kubernetes:networking.k8s.io/v1beta1:IngressClass',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'IngressClass':
"""
Get an existing IngressClass resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = IngressClassInitArgs.__new__(IngressClassInitArgs)
__props__.__dict__["api_version"] = None
__props__.__dict__["kind"] = None
__props__.__dict__["metadata"] = None
__props__.__dict__["spec"] = None
return IngressClass(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="apiVersion")
def api_version(self) -> pulumi.Output[str]:
"""
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
"""
return pulumi.get(self, "api_version")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[str]:
"""
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def metadata(self) -> pulumi.Output['_meta.v1.outputs.ObjectMeta']:
"""
Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
"""
return pulumi.get(self, "metadata")
@property
@pulumi.getter
def spec(self) -> pulumi.Output['outputs.IngressClassSpec']:
"""
Spec is the desired state of the IngressClass. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
"""
return pulumi.get(self, "spec")
|
PypiClean
|
/py-pure-client-1.38.0.tar.gz/py-pure-client-1.38.0/pypureclient/flasharray/FA_2_15/models/protection_group_snapshot_post.py
|
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_15 import models
class ProtectionGroupSnapshotPost(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'name': 'str',
'created': 'int',
'destroyed': 'bool',
'pod': 'FixedReference',
'source': 'FixedReference',
'space': 'Space',
'suffix': 'str',
'time_remaining': 'int',
'eradication_config': 'ProtectionGroupEradicationConfig'
}
attribute_map = {
'id': 'id',
'name': 'name',
'created': 'created',
'destroyed': 'destroyed',
'pod': 'pod',
'source': 'source',
'space': 'space',
'suffix': 'suffix',
'time_remaining': 'time_remaining',
'eradication_config': 'eradication_config'
}
required_args = {
}
def __init__(
self,
id=None, # type: str
name=None, # type: str
created=None, # type: int
destroyed=None, # type: bool
pod=None, # type: models.FixedReference
source=None, # type: models.FixedReference
space=None, # type: models.Space
suffix=None, # type: str
time_remaining=None, # type: int
eradication_config=None, # type: models.ProtectionGroupEradicationConfig
):
"""
Keyword args:
id (str): A globally unique, system-generated ID. The ID cannot be modified and cannot refer to another resource.
name (str): A user-specified name. The name must be locally unique and can be changed.
created (int): The snapshot creation time of the original snapshot source. Measured in milliseconds since the UNIX epoch.
destroyed (bool)
pod (FixedReference): The pod in which the protection group of the protection group snapshot resides.
source (FixedReference): The original protection group from which this snapshot was taken.
space (Space): Returns provisioned size and physical storage consumption data for each protection group.
suffix (str): The name suffix appended to the protection group name to make up the full protection group snapshot name in the form `PGROUP.SUFFIX`. If `suffix` is not specified, the protection group name is in the form `PGROUP.NNN`, where `NNN` is a unique monotonically increasing number. If multiple protection group snapshots are created at a time, the suffix name is appended to those snapshots.
time_remaining (int): The amount of time left until the destroyed snapshot is permanently eradicated. Measured in milliseconds. Before the `time_remaining` period has elapsed, the destroyed snapshot can be recovered by setting `destroyed=false`.
eradication_config (ProtectionGroupEradicationConfig)
"""
if id is not None:
self.id = id
if name is not None:
self.name = name
if created is not None:
self.created = created
if destroyed is not None:
self.destroyed = destroyed
if pod is not None:
self.pod = pod
if source is not None:
self.source = source
if space is not None:
self.space = space
if suffix is not None:
self.suffix = suffix
if time_remaining is not None:
self.time_remaining = time_remaining
if eradication_config is not None:
self.eradication_config = eradication_config
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ProtectionGroupSnapshotPost`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def __getitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ProtectionGroupSnapshotPost`".format(key))
return object.__getattribute__(self, key)
def __setitem__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ProtectionGroupSnapshotPost`".format(key))
object.__setattr__(self, key, value)
def __delitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ProtectionGroupSnapshotPost`".format(key))
object.__delattr__(self, key)
def keys(self):
return self.attribute_map.keys()
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ProtectionGroupSnapshotPost, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ProtectionGroupSnapshotPost):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
PypiClean
|
/schemaorg_types-0.4.0.tar.gz/schemaorg_types-0.4.0/schemaorg_types/MedicalOrganization.py
|
from __future__ import annotations
from datetime import *
from time import *
from typing import *
from pydantic import *
class MedicalOrganization(BaseModel):
"""A medical organization (physical or not), such as hospital, institution or clinic.
References:
https://schema.org/MedicalOrganization
Note:
Model Depth 3
Attributes:
potentialAction: (Optional[Union[List[Union[str, Any]], str, Any]]): Indicates a potential Action, which describes an idealized action in which this thing would play an 'object' role.
mainEntityOfPage: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): Indicates a page (or other CreativeWork) for which this thing is the main entity being described. See [background notes](/docs/datamodel.html#mainEntityBackground) for details.
subjectOf: (Optional[Union[List[Union[str, Any]], str, Any]]): A CreativeWork or Event about this Thing.
url: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): URL of the item.
alternateName: (Union[List[Union[str, Any]], str, Any]): An alias for the item.
sameAs: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): URL of a reference Web page that unambiguously indicates the item's identity. E.g. the URL of the item's Wikipedia page, Wikidata entry, or official website.
description: (Union[List[Union[str, Any]], str, Any]): A description of the item.
disambiguatingDescription: (Union[List[Union[str, Any]], str, Any]): A sub property of description. A short description of the item used to disambiguate from other, similar items. Information from other properties (in particular, name) may be necessary for the description to be useful for disambiguation.
identifier: (Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]): The identifier property represents any kind of identifier for any kind of [[Thing]], such as ISBNs, GTIN codes, UUIDs etc. Schema.org provides dedicated properties for representing many of these, either as textual strings or as URL (URI) links. See [background notes](/docs/datamodel.html#identifierBg) for more details.
image: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): An image of the item. This can be a [[URL]] or a fully described [[ImageObject]].
name: (Union[List[Union[str, Any]], str, Any]): The name of the item.
additionalType: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): An additional type for the item, typically used for adding more specific types from external vocabularies in microdata syntax. This is a relationship between something and a class that the thing is in. In RDFa syntax, it is better to use the native RDFa syntax - the 'typeof' attribute - for multiple types. Schema.org tools may have only weaker understanding of extra types, in particular those defined externally.
serviceArea: (Optional[Union[List[Union[str, Any]], str, Any]]): The geographic area where the service is provided.
founder: (Optional[Union[List[Union[str, Any]], str, Any]]): A person who founded this organization.
isicV4: (Union[List[Union[str, Any]], str, Any]): The International Standard of Industrial Classification of All Economic Activities (ISIC), Revision 4 code for a particular organization, business person, or place.
hasPOS: (Optional[Union[List[Union[str, Any]], str, Any]]): Points-of-Sales operated by the organization or person.
globalLocationNumber: (Union[List[Union[str, Any]], str, Any]): The [Global Location Number](http://www.gs1.org/gln) (GLN, sometimes also referred to as International Location Number or ILN) of the respective organization, person, or place. The GLN is a 13-digit number used to identify parties and physical locations.
member: (Optional[Union[List[Union[str, Any]], str, Any]]): A member of an Organization or a ProgramMembership. Organizations can be members of organizations; ProgramMembership is typically for individuals.
knowsAbout: (Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]): Of a [[Person]], and less typically of an [[Organization]], to indicate a topic that is known about - suggesting possible expertise but not implying it. We do not distinguish skill levels here, or relate this to educational content, events, objectives or [[JobPosting]] descriptions.
makesOffer: (Optional[Union[List[Union[str, Any]], str, Any]]): A pointer to products or services offered by the organization or person.
ownershipFundingInfo: (Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]): For an [[Organization]] (often but not necessarily a [[NewsMediaOrganization]]), a description of organizational ownership structure; funding and grants. In a news/media setting, this is with particular reference to editorial independence. Note that the [[funder]] is also available and can be used to make basic funder information machine-readable.
founders: (Optional[Union[List[Union[str, Any]], str, Any]]): A person who founded this organization.
legalName: (Union[List[Union[str, Any]], str, Any]): The official name of the organization, e.g. the registered company name.
actionableFeedbackPolicy: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): For a [[NewsMediaOrganization]] or other news-related [[Organization]], a statement about public engagement activities (for news media, the newsroom’s), including involving the public - digitally or otherwise -- in coverage decisions, reporting and activities after publication.
areaServed: (Union[List[Union[str, Any]], str, Any]): The geographic area where a service or offered item is provided.
parentOrganization: (Optional[Union[List[Union[str, Any]], str, Any]]): The larger organization that this organization is a [[subOrganization]] of, if any.
slogan: (Union[List[Union[str, Any]], str, Any]): A slogan or motto associated with the item.
department: (Optional[Union[List[Union[str, Any]], str, Any]]): A relationship between an organization and a department of that organization, also described as an organization (allowing different urls, logos, opening hours). For example: a store with a pharmacy, or a bakery with a cafe.
keywords: (Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]): Keywords or tags used to describe some item. Multiple textual entries in a keywords list are typically delimited by commas, or by repeating the property.
reviews: (Optional[Union[List[Union[str, Any]], str, Any]]): Review of the item.
memberOf: (Optional[Union[List[Union[str, Any]], str, Any]]): An Organization (or ProgramMembership) to which this Person or Organization belongs.
publishingPrinciples: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): The publishingPrinciples property indicates (typically via [[URL]]) a document describing the editorial principles of an [[Organization]] (or individual, e.g. a [[Person]] writing a blog) that relate to their activities as a publisher, e.g. ethics or diversity policies. When applied to a [[CreativeWork]] (e.g. [[NewsArticle]]) the principles are those of the party primarily responsible for the creation of the [[CreativeWork]].While such policies are most typically expressed in natural language, sometimes related information (e.g. indicating a [[funder]]) can be expressed using schema.org terminology.
employee: (Optional[Union[List[Union[str, Any]], str, Any]]): Someone working for this organization.
award: (Union[List[Union[str, Any]], str, Any]): An award won by or for this item.
email: (Union[List[Union[str, Any]], str, Any]): Email address.
contactPoints: (Optional[Union[List[Union[str, Any]], str, Any]]): A contact point for a person or organization.
diversityStaffingReport: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): For an [[Organization]] (often but not necessarily a [[NewsMediaOrganization]]), a report on staffing diversity issues. In a news context this might be for example ASNE or RTDNA (US) reports, or self-reported.
foundingDate: (Optional[Union[List[Union[str, Any, date]], str, Any, date]]): The date that this organization was founded.
owns: (Optional[Union[List[Union[str, Any]], str, Any]]): Products owned by the organization or person.
awards: (Union[List[Union[str, Any]], str, Any]): Awards won by or for this item.
review: (Optional[Union[List[Union[str, Any]], str, Any]]): A review of the item.
dissolutionDate: (Optional[Union[List[Union[str, Any, date]], str, Any, date]]): The date that this organization was dissolved.
funding: (Optional[Union[List[Union[str, Any]], str, Any]]): A [[Grant]] that directly or indirectly provide funding or sponsorship for this item. See also [[ownershipFundingInfo]].
interactionStatistic: (Optional[Union[List[Union[str, Any]], str, Any]]): The number of interactions for the CreativeWork using the WebSite or SoftwareApplication. The most specific child type of InteractionCounter should be used.
events: (Optional[Union[List[Union[str, Any]], str, Any]]): Upcoming or past events associated with this place or organization.
seeks: (Optional[Union[List[Union[str, Any]], str, Any]]): A pointer to products or services sought by the organization or person (demand).
employees: (Optional[Union[List[Union[str, Any]], str, Any]]): People working for this organization.
unnamedSourcesPolicy: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): For an [[Organization]] (typically a [[NewsMediaOrganization]]), a statement about policy on use of unnamed sources and the decision process required.
subOrganization: (Optional[Union[List[Union[str, Any]], str, Any]]): A relationship between two organizations where the first includes the second, e.g., as a subsidiary. See also: the more specific 'department' property.
foundingLocation: (Optional[Union[List[Union[str, Any]], str, Any]]): The place where the Organization was founded.
funder: (Optional[Union[List[Union[str, Any]], str, Any]]): A person or organization that supports (sponsors) something through some kind of financial contribution.
iso6523Code: (Union[List[Union[str, Any]], str, Any]): An organization identifier as defined in ISO 6523(-1). Note that many existing organization identifiers such as [leiCode](https://schema.org/leiCode), [duns](https://schema.org/duns) and [vatID](https://schema.org/vatID) can be expressed as an ISO 6523 identifier by setting the ICD part of the ISO 6523 identifier accordingly.
diversityPolicy: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): Statement on diversity policy by an [[Organization]] e.g. a [[NewsMediaOrganization]]. For a [[NewsMediaOrganization]], a statement describing the newsroom’s diversity policy on both staffing and sources, typically providing staffing data.
hasMerchantReturnPolicy: (Optional[Union[List[Union[str, Any]], str, Any]]): Specifies a MerchantReturnPolicy that may be applicable.
event: (Optional[Union[List[Union[str, Any]], str, Any]]): Upcoming or past event associated with this place, organization, or action.
duns: (Union[List[Union[str, Any]], str, Any]): The Dun & Bradstreet DUNS number for identifying an organization or business person.
alumni: (Optional[Union[List[Union[str, Any]], str, Any]]): Alumni of an organization.
ethicsPolicy: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): Statement about ethics policy, e.g. of a [[NewsMediaOrganization]] regarding journalistic and publishing practices, or of a [[Restaurant]], a page describing food source policies. In the case of a [[NewsMediaOrganization]], an ethicsPolicy is typically a statement describing the personal, organizational, and corporate standards of behavior expected by the organization.
leiCode: (Union[List[Union[str, Any]], str, Any]): An organization identifier that uniquely identifies a legal entity as defined in ISO 17442.
vatID: (Union[List[Union[str, Any]], str, Any]): The Value-added Tax ID of the organization or person.
knowsLanguage: (Union[List[Union[str, Any]], str, Any]): Of a [[Person]], and less typically of an [[Organization]], to indicate a known language. We do not distinguish skill levels or reading/writing/speaking/signing here. Use language codes from the [IETF BCP 47 standard](http://tools.ietf.org/html/bcp47).
correctionsPolicy: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): For an [[Organization]] (e.g. [[NewsMediaOrganization]]), a statement describing (in news media, the newsroom’s) disclosure and correction policy for errors.
logo: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): An associated logo.
hasCredential: (Optional[Union[List[Union[str, Any]], str, Any]]): A credential awarded to the Person or Organization.
address: (Union[List[Union[str, Any]], str, Any]): Physical address of the item.
brand: (Optional[Union[List[Union[str, Any]], str, Any]]): The brand(s) associated with a product or service, or the brand(s) maintained by an organization or business person.
nonprofitStatus: (Optional[Union[List[Union[str, Any]], str, Any]]): nonprofitStatus indicates the legal status of a non-profit organization in its primary place of business.
contactPoint: (Optional[Union[List[Union[str, Any]], str, Any]]): A contact point for a person or organization.
hasOfferCatalog: (Optional[Union[List[Union[str, Any]], str, Any]]): Indicates an OfferCatalog listing for this Organization, Person, or Service.
members: (Optional[Union[List[Union[str, Any]], str, Any]]): A member of this organization.
aggregateRating: (Optional[Union[List[Union[str, Any]], str, Any]]): The overall rating, based on a collection of reviews or ratings, of the item.
faxNumber: (Union[List[Union[str, Any]], str, Any]): The fax number.
telephone: (Union[List[Union[str, Any]], str, Any]): The telephone number.
taxID: (Union[List[Union[str, Any]], str, Any]): The Tax / Fiscal ID of the organization or person, e.g. the TIN in the US or the CIF/NIF in Spain.
naics: (Union[List[Union[str, Any]], str, Any]): The North American Industry Classification System (NAICS) code for a particular organization or business person.
location: (Union[List[Union[str, Any]], str, Any]): The location of, for example, where an event is happening, where an organization is located, or where an action takes place.
numberOfEmployees: (Optional[Union[List[Union[str, Any]], str, Any]]): The number of employees in an organization, e.g. business.
sponsor: (Optional[Union[List[Union[str, Any]], str, Any]]): A person or organization that supports a thing through a pledge, promise, or financial contribution. E.g. a sponsor of a Medical Study or a corporate sponsor of an event.
healthPlanNetworkId: (Union[List[Union[str, Any]], str, Any]): Name or unique ID of network. (Networks are often reused across different insurance plans.)
medicalSpecialty: (Optional[Union[List[Union[str, Any]], str, Any]]): A medical specialty of the provider.
isAcceptingNewPatients: (Optional[Union[List[Union[str, StrictBool, Any]], str, StrictBool, Any]]): Whether the provider is accepting new patients.
"""
type_: str = Field(default="MedicalOrganization", alias="@type", const=True)
potentialAction: Optional[Union[List[Union[str, Any]], str, Any]] = Field(
default=None,
description="Indicates a potential Action, which describes an idealized action in which this thing"
"would play an 'object' role.",
)
mainEntityOfPage: Optional[
Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]
] = Field(
default=None,
description="Indicates a page (or other CreativeWork) for which this thing is the main entity being"
"described. See [background notes](/docs/datamodel.html#mainEntityBackground)"
"for details.",
)
subjectOf: Optional[Union[List[Union[str, Any]], str, Any]] = Field(
default=None,
description="A CreativeWork or Event about this Thing.",
)
url: Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]] = Field(
default=None,
description="URL of the item.",
)
alternateName: Union[List[Union[str, Any]], str, Any] = Field(
default=None,
description="An alias for the item.",
)
sameAs: Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]] = Field(
default=None,
description="URL of a reference Web page that unambiguously indicates the item's identity. E.g. the"
"URL of the item's Wikipedia page, Wikidata entry, or official website.",
)
description: Union[List[Union[str, Any]], str, Any] = Field(
default=None,
description="A description of the item.",
)
disambiguatingDescription: Union[List[Union[str, Any]], str, Any] = Field(
default=None,
description="A sub property of description. A short description of the item used to disambiguate from"
"other, similar items. Information from other properties (in particular, name) may"
"be necessary for the description to be useful for disambiguation.",
)
identifier: Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any] = Field(
default=None,
description="The identifier property represents any kind of identifier for any kind of [[Thing]],"
"such as ISBNs, GTIN codes, UUIDs etc. Schema.org provides dedicated properties for"
"representing many of these, either as textual strings or as URL (URI) links. See [background"
"notes](/docs/datamodel.html#identifierBg) for more details.",
)
image: Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]] = Field(
default=None,
description="An image of the item. This can be a [[URL]] or a fully described [[ImageObject]].",
)
name: Union[List[Union[str, Any]], str, Any] = Field(
default=None,
description="The name of the item.",
)
additionalType: Optional[
Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]
] = Field(
default=None,
description="An additional type for the item, typically used for adding more specific types from external"
"vocabularies in microdata syntax. This is a relationship between something and a class"
"that the thing is in. In RDFa syntax, it is better to use the native RDFa syntax - the 'typeof'"
"attribute - for multiple types. Schema.org tools may have only weaker understanding"
"of extra types, in particular those defined externally.",
)
serviceArea: Optional[Union[List[Union[str, Any]], str, Any]] = Field(
default=None,
description="The geographic area where the service is provided.",
)
founder: Optional[Union[List[Union[str, Any]], str, Any]] = Field(
default=None,
description="A person who founded this organization.",
)
isicV4: Union[List[Union[str, Any]], str, Any] = Field(
default=None,
description="The International Standard of Industrial Classification of All Economic Activities"
"(ISIC), Revision 4 code for a particular organization, business person, or place.",
)
hasPOS: Optional[Union[List[Union[str, Any]], str, Any]] = Field(
default=None,
description="Points-of-Sales operated by the organization or person.",
)
globalLocationNumber: Union[List[Union[str, Any]], str, Any] = Field(
default=None,
description="The [Global Location Number](http://www.gs1.org/gln) (GLN, sometimes also referred"
"to as International Location Number or ILN) of the respective organization, person,"
"or place. The GLN is a 13-digit number used to identify parties and physical locations.",
)
member: Optional[Union[List[Union[str, Any]], str, Any]] = Field(
default=None,
description="A member of an Organization or a ProgramMembership. Organizations can be members of"
"organizations; ProgramMembership is typically for individuals.",
)
knowsAbout: Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any] = Field(
default=None,
description="Of a [[Person]], and less typically of an [[Organization]], to indicate a topic that"
"is known about - suggesting possible expertise but not implying it. We do not distinguish"
"skill levels here, or relate this to educational content, events, objectives or [[JobPosting]]"
"descriptions.",
)
makesOffer: Optional[Union[List[Union[str, Any]], str, Any]] = Field(
default=None,
description="A pointer to products or services offered by the organization or person.",
)
ownershipFundingInfo: Union[
List[Union[str, AnyUrl, Any]], str, AnyUrl, Any
] = Field(
default=None,
description="For an [[Organization]] (often but not necessarily a [[NewsMediaOrganization]]),"
"a description of organizational ownership structure; funding and grants. In a news/media"
"setting, this is with particular reference to editorial independence. Note that the"
"[[funder]] is also available and can be used to make basic funder information machine-readable.",
)
founders: Optional[Union[List[Union[str, Any]], str, Any]] = Field(
default=None,
description="A person who founded this organization.",
)
legalName: Union[List[Union[str, Any]], str, Any] = Field(
default=None,
description="The official name of the organization, e.g. the registered company name.",
)
actionableFeedbackPolicy: Optional[
Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]
] = Field(
default=None,
description="For a [[NewsMediaOrganization]] or other news-related [[Organization]], a statement"
"about public engagement activities (for news media, the newsroom’s), including involving"
"the public - digitally or otherwise -- in coverage decisions, reporting and activities"
"after publication.",
)
areaServed: Union[List[Union[str, Any]], str, Any] = Field(
default=None,
description="The geographic area where a service or offered item is provided.",
)
parentOrganization: Optional[Union[List[Union[str, Any]], str, Any]] = Field(
default=None,
description="The larger organization that this organization is a [[subOrganization]] of, if any.",
)
slogan: Union[List[Union[str, Any]], str, Any] = Field(
default=None,
description="A slogan or motto associated with the item.",
)
department: Optional[Union[List[Union[str, Any]], str, Any]] = Field(
default=None,
description="A relationship between an organization and a department of that organization, also"
"described as an organization (allowing different urls, logos, opening hours). For"
"example: a store with a pharmacy, or a bakery with a cafe.",
)
keywords: Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any] = Field(
default=None,
description="Keywords or tags used to describe some item. Multiple textual entries in a keywords list"
"are typically delimited by commas, or by repeating the property.",
)
reviews: Optional[Union[List[Union[str, Any]], str, Any]] = Field(
default=None,
description="Review of the item.",
)
memberOf: Optional[Union[List[Union[str, Any]], str, Any]] = Field(
default=None,
description="An Organization (or ProgramMembership) to which this Person or Organization belongs.",
)
publishingPrinciples: Optional[
Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]
] = Field(
default=None,
description="The publishingPrinciples property indicates (typically via [[URL]]) a document describing"
"the editorial principles of an [[Organization]] (or individual, e.g. a [[Person]]"
"writing a blog) that relate to their activities as a publisher, e.g. ethics or diversity"
"policies. When applied to a [[CreativeWork]] (e.g. [[NewsArticle]]) the principles"
"are those of the party primarily responsible for the creation of the [[CreativeWork]].While"
"such policies are most typically expressed in natural language, sometimes related"
"information (e.g. indicating a [[funder]]) can be expressed using schema.org terminology.",
)
employee: Optional[Union[List[Union[str, Any]], str, Any]] = Field(
default=None,
description="Someone working for this organization.",
)
award: Union[List[Union[str, Any]], str, Any] = Field(
default=None,
description="An award won by or for this item.",
)
email: Union[List[Union[str, Any]], str, Any] = Field(
default=None,
description="Email address.",
)
contactPoints: Optional[Union[List[Union[str, Any]], str, Any]] = Field(
default=None,
description="A contact point for a person or organization.",
)
diversityStaffingReport: Optional[
Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]
] = Field(
default=None,
description="For an [[Organization]] (often but not necessarily a [[NewsMediaOrganization]]),"
"a report on staffing diversity issues. In a news context this might be for example ASNE"
"or RTDNA (US) reports, or self-reported.",
)
foundingDate: Optional[Union[List[Union[str, Any, date]], str, Any, date]] = Field(
default=None,
description="The date that this organization was founded.",
)
owns: Optional[Union[List[Union[str, Any]], str, Any]] = Field(
default=None,
description="Products owned by the organization or person.",
)
awards: Union[List[Union[str, Any]], str, Any] = Field(
default=None,
description="Awards won by or for this item.",
)
review: Optional[Union[List[Union[str, Any]], str, Any]] = Field(
default=None,
description="A review of the item.",
)
dissolutionDate: Optional[
Union[List[Union[str, Any, date]], str, Any, date]
] = Field(
default=None,
description="The date that this organization was dissolved.",
)
funding: Optional[Union[List[Union[str, Any]], str, Any]] = Field(
default=None,
description="A [[Grant]] that directly or indirectly provide funding or sponsorship for this item."
"See also [[ownershipFundingInfo]].",
)
interactionStatistic: Optional[Union[List[Union[str, Any]], str, Any]] = Field(
default=None,
description="The number of interactions for the CreativeWork using the WebSite or SoftwareApplication."
"The most specific child type of InteractionCounter should be used.",
)
events: Optional[Union[List[Union[str, Any]], str, Any]] = Field(
default=None,
description="Upcoming or past events associated with this place or organization.",
)
seeks: Optional[Union[List[Union[str, Any]], str, Any]] = Field(
default=None,
description="A pointer to products or services sought by the organization or person (demand).",
)
employees: Optional[Union[List[Union[str, Any]], str, Any]] = Field(
default=None,
description="People working for this organization.",
)
unnamedSourcesPolicy: Optional[
Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]
] = Field(
default=None,
description="For an [[Organization]] (typically a [[NewsMediaOrganization]]), a statement about"
"policy on use of unnamed sources and the decision process required.",
)
subOrganization: Optional[Union[List[Union[str, Any]], str, Any]] = Field(
default=None,
description="A relationship between two organizations where the first includes the second, e.g.,"
"as a subsidiary. See also: the more specific 'department' property.",
)
foundingLocation: Optional[Union[List[Union[str, Any]], str, Any]] = Field(
default=None,
description="The place where the Organization was founded.",
)
funder: Optional[Union[List[Union[str, Any]], str, Any]] = Field(
default=None,
description="A person or organization that supports (sponsors) something through some kind of financial"
"contribution.",
)
iso6523Code: Union[List[Union[str, Any]], str, Any] = Field(
default=None,
description="An organization identifier as defined in ISO 6523(-1). Note that many existing organization"
"identifiers such as [leiCode](https://schema.org/leiCode), [duns](https://schema.org/duns)"
"and [vatID](https://schema.org/vatID) can be expressed as an ISO 6523 identifier"
"by setting the ICD part of the ISO 6523 identifier accordingly.",
)
diversityPolicy: Optional[
Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]
] = Field(
default=None,
description="Statement on diversity policy by an [[Organization]] e.g. a [[NewsMediaOrganization]]."
"For a [[NewsMediaOrganization]], a statement describing the newsroom’s diversity"
"policy on both staffing and sources, typically providing staffing data.",
)
hasMerchantReturnPolicy: Optional[Union[List[Union[str, Any]], str, Any]] = Field(
default=None,
description="Specifies a MerchantReturnPolicy that may be applicable.",
)
event: Optional[Union[List[Union[str, Any]], str, Any]] = Field(
default=None,
description="Upcoming or past event associated with this place, organization, or action.",
)
duns: Union[List[Union[str, Any]], str, Any] = Field(
default=None,
description="The Dun & Bradstreet DUNS number for identifying an organization or business person.",
)
alumni: Optional[Union[List[Union[str, Any]], str, Any]] = Field(
default=None,
description="Alumni of an organization.",
)
ethicsPolicy: Optional[
Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]
] = Field(
default=None,
description="Statement about ethics policy, e.g. of a [[NewsMediaOrganization]] regarding journalistic"
"and publishing practices, or of a [[Restaurant]], a page describing food source policies."
"In the case of a [[NewsMediaOrganization]], an ethicsPolicy is typically a statement"
"describing the personal, organizational, and corporate standards of behavior expected"
"by the organization.",
)
leiCode: Union[List[Union[str, Any]], str, Any] = Field(
default=None,
description="An organization identifier that uniquely identifies a legal entity as defined in ISO"
"17442.",
)
vatID: Union[List[Union[str, Any]], str, Any] = Field(
default=None,
description="The Value-added Tax ID of the organization or person.",
)
knowsLanguage: Union[List[Union[str, Any]], str, Any] = Field(
default=None,
description="Of a [[Person]], and less typically of an [[Organization]], to indicate a known language."
"We do not distinguish skill levels or reading/writing/speaking/signing here. Use"
"language codes from the [IETF BCP 47 standard](http://tools.ietf.org/html/bcp47).",
)
correctionsPolicy: Optional[
Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]
] = Field(
default=None,
description="For an [[Organization]] (e.g. [[NewsMediaOrganization]]), a statement describing"
"(in news media, the newsroom’s) disclosure and correction policy for errors.",
)
logo: Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]] = Field(
default=None,
description="An associated logo.",
)
hasCredential: Optional[Union[List[Union[str, Any]], str, Any]] = Field(
default=None,
description="A credential awarded to the Person or Organization.",
)
address: Union[List[Union[str, Any]], str, Any] = Field(
default=None,
description="Physical address of the item.",
)
brand: Optional[Union[List[Union[str, Any]], str, Any]] = Field(
default=None,
description="The brand(s) associated with a product or service, or the brand(s) maintained by an organization"
"or business person.",
)
nonprofitStatus: Optional[Union[List[Union[str, Any]], str, Any]] = Field(
default=None,
description="nonprofitStatus indicates the legal status of a non-profit organization in its primary"
"place of business.",
)
contactPoint: Optional[Union[List[Union[str, Any]], str, Any]] = Field(
default=None,
description="A contact point for a person or organization.",
)
hasOfferCatalog: Optional[Union[List[Union[str, Any]], str, Any]] = Field(
default=None,
description="Indicates an OfferCatalog listing for this Organization, Person, or Service.",
)
members: Optional[Union[List[Union[str, Any]], str, Any]] = Field(
default=None,
description="A member of this organization.",
)
aggregateRating: Optional[Union[List[Union[str, Any]], str, Any]] = Field(
default=None,
description="The overall rating, based on a collection of reviews or ratings, of the item.",
)
faxNumber: Union[List[Union[str, Any]], str, Any] = Field(
default=None,
description="The fax number.",
)
telephone: Union[List[Union[str, Any]], str, Any] = Field(
default=None,
description="The telephone number.",
)
taxID: Union[List[Union[str, Any]], str, Any] = Field(
default=None,
description="The Tax / Fiscal ID of the organization or person, e.g. the TIN in the US or the CIF/NIF in"
"Spain.",
)
naics: Union[List[Union[str, Any]], str, Any] = Field(
default=None,
description="The North American Industry Classification System (NAICS) code for a particular organization"
"or business person.",
)
location: Union[List[Union[str, Any]], str, Any] = Field(
default=None,
description="The location of, for example, where an event is happening, where an organization is located,"
"or where an action takes place.",
)
numberOfEmployees: Optional[Union[List[Union[str, Any]], str, Any]] = Field(
default=None,
description="The number of employees in an organization, e.g. business.",
)
sponsor: Optional[Union[List[Union[str, Any]], str, Any]] = Field(
default=None,
description="A person or organization that supports a thing through a pledge, promise, or financial"
"contribution. E.g. a sponsor of a Medical Study or a corporate sponsor of an event.",
)
healthPlanNetworkId: Union[List[Union[str, Any]], str, Any] = Field(
default=None,
description="Name or unique ID of network. (Networks are often reused across different insurance"
"plans.)",
)
medicalSpecialty: Optional[Union[List[Union[str, Any]], str, Any]] = Field(
default=None,
description="A medical specialty of the provider.",
)
isAcceptingNewPatients: Optional[
Union[List[Union[str, StrictBool, Any]], str, StrictBool, Any]
] = Field(
default=None,
description="Whether the provider is accepting new patients.",
)
|
PypiClean
|
/lisa_data_challenge-1.2.0-cp39-cp39-macosx_13_0_arm64.whl/ldc/waveform/lisabeta/fast_bhb.py
|
import numpy as np
import numpy.lib.recfunctions as rfn
import xarray as xr
from ldc.common.series import FrequencySeries
from ldc.lisa.orbits import AnalyticOrbits
from ldc.common import tools
from ldc.common.series import TDI
import lisaconstants
ASTRONOMICAL_YEAR = lisaconstants.SIDEREALYEAR_J2000DAY*24*60*60
AU_SI = lisaconstants.ASTRONOMICAL_UNIT
class FastBHB :
def __init__(self, bbh_type, T=6.2914560e7, delta_t=5, approx="IMRPhenomD",
orbits=None, modes=None):
"""
Parameters initilization of the black holes binaries class
T : duration in second (will be converted in year for lisabeta)
deltat : time step in second
approx : approximant for the IMRPhenom waveform computation
orbits : LISA's orbit type
"""
#Type of BBH waveform
if bbh_type.lower() in ["mbhb","sobhb", "sbbh"] :
self.bbh_type = bbh_type
else :
raise NotImplementedError("Only MBHB and SOBHB are available.")
#Wave duration and time step
self.Tobs = T
self.Tobs_yr = self.Tobs/ASTRONOMICAL_YEAR
self.dt = delta_t
#Orbits initialization
if orbits is not None:
if not isinstance(orbits, AnalyticOrbits):
raise TypeError('FastBHB approximation requires analytic orbits')
else:
init_time = orbits.initial_position*ASTRONOMICAL_YEAR/(2 * np.pi)
lisa_params = {
'OrbitOmega' : (2*np.pi) / ASTRONOMICAL_YEAR,
'OrbitPhi0' : orbits.initial_rotation,
'Orbitt0' : init_time,
'OrbitR' : AU_SI,
'OrbitL' : orbits.arm_length}
else:
# TODO: to be removed in the end
import lisabeta.lisa.pyresponse as pyresponse
lisa_params = pyresponse.LISAconstDict["Proposal"]
#Waveform parameters used by lisabeta
self.wvf_pars = {
"minf": 1e-5, "maxf": 0.1,
"timetomerger_max": 1.0,
"fend": None, "tmin": None, "tmax": self.Tobs_yr,
"TDI": "TDIAET", "acc": 1e-4,
"order_fresnel_stencil": 0,
"approximant": approx,
"modes": modes,
"LISAconst": lisa_params,
"responseapprox": "full",
"frozenLISA": False,
"TDIrescaled": False
}
@property
def citation(self):
return 'arXiv:1806.10734'
def get_waveform(self, template=None, beta=None, lam=None,
chi1=None, chi2=None, m1=None, m2=None, Deltat=None,
phi=None, psi=None, inc=None, dist=None):
""" Return the waveform frequency range, amplitude and phase for
mode (2,2)
"""
# TODO: There might be a way to remove duplicates with get_fd_tdiaet
# Check that lisabeta is installed
# TODO: to be removed in the end
try:
import lisabeta.lisa.lisa as lisa
except ImportError:
print("Could not import lisabeta.lisa.lisa")
return None, None, None
#Rename parameters to match lisabeta notation
if template is not None:
params = self.rename_as_lisabeta(template)
else:
params = locals()
params["lambda"] = lam
#Generate the waveform of the BHB
if self.bbh_type.lower() == "mbhb" :
sig = lisa.GenerateLISATDI_SMBH(params, **self.wvf_pars)
if self.bbh_type.lower() in ["sobhb", "sbbh"]:
sig = lisa.GenerateLISATDI_SOBH(params, **self.wvf_pars)
md = (2,2)
frq = sig[md]['freq']
amp = sig[md]['amp']
phase = sig[md]['phase']
return frq, amp, phase
def get_tc(self, template):
""" Return time of coaelescence from initial frequency
"""
import lisabeta.waveforms.bbh.pyIMRPhenomD as pyIMRPhenomD
gridfreq0 = np.array([0.1])
params = self.rename_as_lisabeta(template)
syst = pyIMRPhenomD.IMRPhenomDh22AmpPhase(gridfreq0, params['m1'], params['m2'], params['chi1'],
params['chi2'], params['dist'])
tc = -syst.compute_toff(params['fstart'])
return tc
def get_fd_tdiaet(self, template=None, freqs=None, tdi2=False):
""" Return TDI A,E,T in frequency domain.
"""
# Check that lisabeta is installed
# TODO: to be removed in the end
try:
import lisabeta.lisa.lisa as lisa
except ImportError:
print("Could not import lisabeta.lisa.lisa")
return None, None, None
#Get the waveform parameters
if template is not None:
params = self.rename_as_lisabeta(template)
else:
params = locals()
params["lambda"] = lam
#If the simulation relies on TDI2, use AET observables
if tdi2:
self.wvf_pars["TDI"] = "TDI2AET"
#Generate the TDI waveform of the BHB
if self.bbh_type.lower() == "mbhb" :
sig = lisa.GenerateLISATDISignal_SMBH(params, **self.wvf_pars)
if self.bbh_type.lower() in ["sobhb", "sbbh"]:
sig = lisa.GenerateLISATDISignal_SOBH(params, **self.wvf_pars)
#Signal info
df = 1/self.Tobs
mds = sig['tdi']['modes']
#Compute frequency range if not given
if freqs is None :
fmin = self.wvf_pars['minf']
fmax = self.wvf_pars['maxf']
for mod in mds:
fmin = min(fmin, np.min(sig['tdi'][mod]['freq']))
fmax = max(fmax, np.max(sig['tdi'][mod]['freq']))
fmin = np.floor(fmin/df)*df # redefine f0 to be an integer x df
freqs = np.arange(fmin, fmax, df)
linearscale = True
else:
df = freqs[1]-freqs[0]
linearscale = np.isclose(np.sum(np.diff(freqs)-df), 0)
#kmin = int(np.rint(freqs[0]/df))
#Get TDI signal
tdifreqseries = lisa.EvaluateTDIFreqseries(sig["tdi"], freqs)
#Compute the waveform in the frequendy domain
A = np.zeros(len(freqs), dtype=np.complex128)
E = np.zeros(len(freqs), dtype=np.complex128)
T = np.zeros(len(freqs), dtype=np.complex128)
for md in mds:
A += np.conj(tdifreqseries[md]['chan1'])
E += np.conj(tdifreqseries[md]['chan2'])
T += np.conj(tdifreqseries[md]['chan3'])
if linearscale:
A = FrequencySeries(A, fs=freqs, name="A")
E = FrequencySeries(E, fs=freqs, name="E")
T = FrequencySeries(T, fs=freqs, name="T")
else:
fs = xr.DataArray(freqs, dims=('f'), attrs={'units': '1/s'})
A = xr.DataArray(A, dims=('f'), coords={'f': fs}, name='A', attrs={'df': df})
E = xr.DataArray(E, dims=('f'), coords={'f': fs}, name='E', attrs={'df': df})
T = xr.DataArray(T, dims=('f'), coords={'f': fs}, name='T', attrs={'df': df})
return A,E,T
def get_fd_tdixyz(self,**kwargs):
""" Return TDI X,Y,Z in frequency domain.
"""
#Get the AET combination
A,E,T = self.get_fd_tdiaet(**kwargs)
#Create a TDI object
tdi = TDI(dict(zip(['A', 'E', 'T'], [A, E, T])))
#Convert to XYZ combination
tdi.AET2XYZ()
return tdi["X"],tdi["Y"],tdi["Z"]
def get_td_tdiaet(self, **kwargs):
""" Return TDI A,E,T in time domain.
"""
#Get the waveform in the frequency domain
fA, fE, fT = self.get_fd_tdiaet(**kwargs)
#Compute the waveform in the time domain
tA = fA.ts.ifft(dt=self.dt)
tE = fE.ts.ifft(dt=self.dt)
tT = fT.ts.ifft(dt=self.dt)
return (tA,tE,tT)
def get_td_tdixyz(self, **kwargs):
""" Return TDI X,Y,Z in time domain.
"""
#Get the AET combination
A,E,T = self.get_td_tdiaet(**kwargs)
#Create a TDI object
tdi = TDI(dict(zip(['A', 'E', 'T'], [A, E, T])))
#Convert to XYZ combination
tdi.AET2XYZ()
return (tdi["X"],tdi["Y"],tdi["Z"])
def rename_as_lisabeta(self, params_i):
""" Rename fields to match lisabeta parameter names
"""
#Copy the parameters
params = params_i.copy()
#Spin projection
if 'PolarAngleOfSpin1' in params.keys():
params["Spin1"] = params['Spin1']*np.cos(params['PolarAngleOfSpin1'])
params["Spin2"] = params['Spin2']*np.cos(params['PolarAngleOfSpin2'])
k_param = ['PolarAngleOfSpin1', 'PolarAngleOfSpin2',
'ObservationDuration', 'Cadence', 'Redshift']
if self.bbh_type.lower() == "mbhb" :
psi, incl = tools.aziPolAngleL2PsiIncl(params["EclipticLatitude"],
params["EclipticLongitude"],
params['InitialPolarAngleL'],
params['InitialAzimuthalAngleL'])
params['Polarization'] = psi
params['Inclination'] = incl
k_param.extend(['InitialPolarAngleL','InitialAzimuthalAngleL'])
for k in k_param :
if k in params.keys():
params.pop(k)
#Parameters to match between LDC notation and lisabeta
dmatch = dict({'Mass1': "m1",
'Mass2': "m2",
'Spin1': "chi1",
'Spin2': "chi2",
'Distance': 'dist',
'Inclination': 'inc',
'EclipticLongitude': "lambda",
'EclipticLatitude': "beta",
"InitialFrequency": "fstart",
'Polarization': 'psi',
})
#Adding additionnal parameters
if self.bbh_type.lower() == "mbhb" :
dmatch['CoalescenceTime'] ='Deltat'
dmatch['PhaseAtCoalescence']='phi'
if self.bbh_type.lower() in ["sobhb", "sbbh"]:
dmatch['InitialPhase']='phi'
#Return only the relevant parameters
new_params = dict()
if isinstance(params, dict):
for k,v in params.items():
if k in dmatch.keys() :
new_params[dmatch[k]] = params[k]
else:
new_params = rfn.rename_fields(params, dmatch)
return new_params
|
PypiClean
|
/ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/docs/readme_config.txt
|
BBTools Config File Readme
Written by Brian Bushnell
Last updated May 12, 2015
A config file is a text file with a set of parameters that will be added to the command line.
The format is one parameter per line, with the # symbol indicating comments.
To use a config file, use the config=file flag. For example, take BBDuk:
bbduk.sh in=reads.fq out=trimmed.fq ref=ref.fa k=23 mink=11 hdist=1 tbo tpe
That is equivalent to:
bbduk.sh in=reads.fq out=trimmed.fq ref=ref.fa config=trimadapters.txt
...if trimadapters.txt contained these lines:
k=23
mink=11
hdist=1
tbo
tpe
Any parameter placed AFTER the config file will override the same parameter if it is in the config file.
For example, in this case k=20 will be used:
bbduk.sh in=reads.fq out=trimmed.fq ref=ref.fa config=trimadapters.txt k=20
But in this case, k=23 will be used, from the config file:
bbduk.sh in=reads.fq out=trimmed.fq ref=ref.fa k=20 config=trimadapters.txt
What are config files for? Well, mainly, to overcome difficulties like whitespace in file paths, or command lines that are too long.
There are some example config files in bbmap/config/. They are not used unless you specifically tell a program to use them.
|
PypiClean
|
/pytdx-async-1.62.tar.gz/pytdx-async-1.62/pytdx/reader/lc_min_bar_reader.py
|
from __future__ import unicode_literals, division
import pandas as pd
import os
from pytdx.reader.base_reader import TdxFileNotFoundException, TdxNotAssignVipdocPathException
from pytdx.reader.base_reader import BaseReader
from collections import OrderedDict
"""
网传秘籍...
二、通达信5分钟线*.lc5文件和*.lc1文件 文件名即股票代码 每32个字节为一个5分钟数据,每字段内低字节在前 00 ~ 01 字节:日期,整型,
设其值为num,则日期计算方法为: year=floor(num/2048)+2004; month=floor(mod(num,2048)/100); day=mod(mod(num,2048),100);
02 ~ 03 字节: 从0点开始至目前的分钟数,整型 04 ~ 07 字节:开盘价,float型 08 ~ 11 字节:最高价,float型 12 ~ 15 字节:最低价,
float型 16 ~ 19 字节:收盘价,float型 20 ~ 23 字节:成交额,float型 24 ~ 27 字节:成交量(股),整型 28 ~ 31 字节:(保留)
"""
class TdxLCMinBarReader(BaseReader):
"""
读取通达信分钟数据
"""
def parse_data_by_file(self, fname):
if not os.path.isfile(fname):
raise TdxFileNotFoundException('no tdx kline data, pleaes check path %s', fname)
with open(fname, 'rb') as f:
content = f.read()
raw_li = self.unpack_records("<HHfffffII", content)
data = []
for row in raw_li:
year, month, day = self._parse_date(row[0])
hour, minute = self._parse_time(row[1])
data.append(OrderedDict([
("date", "%04d-%02d-%02d %02d:%02d" % (year, month, day, hour, minute)),
("year", year),
('month', month),
('day', day),
('hour', hour),
('minute', minute),
('open', row[2]),
('high', row[3]),
('low', row[4]),
('close', row[5]),
('amount', row[6]),
('volume', row[7]),
#('unknown', row[8])
]))
return data
return []
def get_df(self, code_or_file, exchange=None):
#if exchange == None:
# 只传入了一个参数
data = self.parse_data_by_file(code_or_file)
#else:
# data = [self._df_convert(row) for row in self.get_kline_by_code(code_or_file, exchange)]
df = pd.DataFrame(data=data)
df.index = pd.to_datetime(df.date)
return df[['open', 'high', 'low', 'close', 'amount', 'volume']]
def _parse_date(self, num):
year = num // 2048 + 2004
month = (num % 2048) // 100
day = (num % 2048) % 100
return year, month, day
def _parse_time(self, num):
return (num // 60) , (num % 60)
if __name__ == '__main__':
reader = TdxLCMinBarReader()
df = reader.get_df("/Users/rainx/Downloads/sz000001.lc5")
print(df)
print(df['2017-07-26'].sum())
|
PypiClean
|
/boot-synth-1.2.0.tar.gz/boot-synth-1.2.0/synth/projects_master/nginx_router/frontend/react/node_modules/hpack.js/node_modules/readable-stream/doc/wg-meetings/2015-01-30.md
|
# streams WG Meeting 2015-01-30
## Links
* **Google Hangouts Video**: http://www.youtube.com/watch?v=I9nDOSGfwZg
* **GitHub Issue**: https://github.com/iojs/readable-stream/issues/106
* **Original Minutes Google Doc**: https://docs.google.com/document/d/17aTgLnjMXIrfjgNaTUnHQO7m3xgzHR2VXBTmi03Qii4/
## Agenda
Extracted from https://github.com/iojs/readable-stream/labels/wg-agenda prior to meeting.
* adopt a charter [#105](https://github.com/iojs/readable-stream/issues/105)
* release and versioning strategy [#101](https://github.com/iojs/readable-stream/issues/101)
* simpler stream creation [#102](https://github.com/iojs/readable-stream/issues/102)
* proposal: deprecate implicit flowing of streams [#99](https://github.com/iojs/readable-stream/issues/99)
## Minutes
### adopt a charter
* group: +1's all around
### What versioning scheme should be adopted?
* group: +1’s 3.0.0
* domenic+group: pulling in patches from other sources where appropriate
* mikeal: version independently, suggesting versions for io.js
* mikeal+domenic: work with TC to notify in advance of changes
simpler stream creation
### streamline creation of streams
* sam: streamline creation of streams
* domenic: nice simple solution posted
but, we lose the opportunity to change the model
may not be backwards incompatible (double check keys)
**action item:** domenic will check
### remove implicit flowing of streams on(‘data’)
* add isFlowing / isPaused
* mikeal: worrying that we’re documenting polyfill methods – confuses users
* domenic: more reflective API is probably good, with warning labels for users
* new section for mad scientists (reflective stream access)
* calvin: name the “third state”
* mikeal: maybe borrow the name from whatwg?
* domenic: we’re missing the “third state”
* consensus: kind of difficult to name the third state
* mikeal: figure out differences in states / compat
* mathias: always flow on data – eliminates third state
* explore what it breaks
**action items:**
* ask isaac for ability to list packages by what public io.js APIs they use (esp. Stream)
* ask rod/build for infrastructure
* **chris**: explore the “flow on data” approach
* add isPaused/isFlowing
* add new docs section
* move isPaused to that section
|
PypiClean
|
/iminuit-2.22.0.tar.gz/iminuit-2.22.0/doc/notebooks/conditional_variable.ipynb
|
# Fit PDF with conditional variable
In this example, we show an unusual fit where the total sample is not drawn form a single probability distribution, but each individual sample $x$ is drawn from a different distribution, whose parameters are determined by a conditional variable $y$.
In our example, we are drawing samples $x$ from varying Gaussian distributions. The location of each Gaussian is a function of the conditional variable $y$, but all share the same width parameter $\sigma$. We fit the shared parameter $\sigma$, but also the parameters $a$ and $b$ which determine how the location of each gaussian depends on $y$, assuming a line function $\mu = a + b y$.
This tutorial reproduces a [corresponding one from RooFit](https://root.cern.ch/doc/master/rf303__conditional_8C.html).
```
import iminuit
from iminuit.cost import UnbinnedNLL
from iminuit import Minuit
import numpy as np
import numba as nb
import boost_histogram as bh
import matplotlib.pyplot as plt
from scipy.stats import norm
from numba_stats import norm as norm_nb
print("iminuit version", iminuit.__version__)
rng = np.random.default_rng(1)
# conditional variable: each sample is paired with a random y parameter
y = rng.normal(0, 10, size=10000)
y = y[np.abs(y) < 10] # truncate at 10
# location of each gaussian is a function of y
def mu(y, a, b):
return a + b * y
# draw samples from Gaussians whose locations depend on y
truth = {"a": 0, "b": 0.5, "sigma": 1.0}
x = rng.normal(mu(y, truth["a"], truth["b"]), truth["sigma"])
```
The distribution in $x$ is more broad than the usual Gaussian because it is a convolution of many Gaussian distributions with varying means. We can visualise this by binning the data in $x$ and $y$.
```
ax_x = bh.axis.Regular(100, -10, 10)
ax_y = bh.axis.Regular(5, -10, 10)
h = bh.Histogram(ax_x, ax_y)
h.fill(x, y)
for i, (a, b) in enumerate(ax_y):
plt.stairs(h.values()[:,i], ax_x.edges, label=f"[{a}, {b})",
fill=True, alpha=0.2)
h1 = h[:, sum]
plt.stairs(h1.values(), ax_x.edges, color="k", label="total")
plt.xlabel("x")
plt.ylabel("events")
plt.legend(title="y interval", frameon=False, handlelength=1.2);
```
## Fit with conditional variable
The random distribution of $x$ depends on the value of $y$. We can exploit that information in the likelihood function to obtain a more accurate estimate of the parameters.
```
def model(xy, a, b, sigma):
x, y = xy
mu = a + b * y
# cannot use norm.pdf from numba_stats here, because it is not vectorized in mu
return norm.pdf(x, mu, sigma)
nll = UnbinnedNLL((x, y), model)
m = Minuit(nll, 0.0, 0.0, 2.0)
m.limits["sigma"] = (0, None)
m.migrad()
# construct model representation for comparison with data histogram
a, b, sigma = m.values
# get expected content per bin from cdf, sum over the individual cdfs
v = np.diff(np.sum(norm.cdf(ax_x.edges[:,np.newaxis],
mu(y, a, b), sigma), axis=1))
plt.stairs(v, ax_x.edges, label="model", zorder=5, lw=2)
plt.errorbar(ax_x.centers, h1.values(), h1.variances() ** 0.5,
fmt="ok", label="data")
plt.xlabel("x")
plt.ylabel("events")
plt.legend(frameon=False);
```
## Fit without conditional variable
We can also ignore the dependence of $x$ and $y$ and just fit the total $x$ distribution with a model built from the distribution of $y$ values. This also works in this case, but information is lost and therefore the parameter uncertainties become larger than in the previous case.
On top of that, the calculation is much slower, because building the pdf is more expensive. We parallelise the computation with numba.
```
nb.config.THREADING_LAYER = 'workqueue'
@nb.njit(parallel=True, fastmath=True)
def model(x, a, b, sigma):
mu = a + b * y
total = np.zeros_like(x)
for i in nb.prange(len(mu)):
total += norm_nb.pdf(x, mu[i], sigma)
return total
nll = UnbinnedNLL(x, model)
m2 = Minuit(nll, 0.0, 0.0, 2.0)
m2.limits["sigma"] = (0, None)
m2.migrad()
fig, ax = plt.subplots(1, 3, figsize=(8, 2), constrained_layout=True)
for par, axi in zip(m.parameters, ax):
axi.set_title(par)
t = truth[par]
axi.axhline(t, ls="--", color="0.5")
axi.errorbar(["with\n conditional"], m.values[par],
m.errors[par], fmt="ok")
axi.errorbar(["without\n conditional"], m2.values[par],
m2.errors[par], fmt="or")
axi.set_xlim(-0.5, 1.5)
dt = 2 * m2.errors[par]
axi.set_ylim(t - dt, t + dt)
```
|
PypiClean
|
/ladybug-display-0.10.1.tar.gz/ladybug-display-0.10.1/ladybug_display/extension/study/radiation.py
|
from ladybug_geometry.bounding import bounding_box
from ladybug_geometry.geometry3d import Mesh3D
from ladybug.color import Colorset, Color
from ladybug.legend import LegendParameters
from ladybug.graphic import GraphicContainer
from ladybug.datatype.energyintensity import Radiation
from ladybug.datatype.energyflux import Irradiance
from ladybug_display.geometry3d import DisplayText3D, DisplayMesh3D, DisplayFace3D
from ladybug_display.visualization import VisualizationSet, AnalysisGeometry, \
VisualizationData, ContextGeometry
def radiation_study_to_vis_set(
radiation_study, legend_parameters=None, plot_irradiance=False,
include_title=True, include_context=False):
"""Translate radiation study into a VisualizationSet.
Args:
radiation_study: A Ladybug-Radiance RadiationStudy object.
legend_parameters: An optional LegendParameter object to change the display
of the radiation study. If None, default legend parameters will be
used. (Default: None).
plot_irradiance: Boolean to note whether the results should be plotted
with units of total Radiation (kWh/m2) [False] or with units of average
Irradiance (W/m2) [True]. (Default: False).
include_title: Boolean to note whether the title should be included
in the output visualization. (Default: True).
include_context: Boolean to note whether the context geometry should be
included in the output visualization. (Default: False).
Returns:
A VisualizationSet with the radiation study represented as an
AnalysisGeometry. This includes these objects in the following order.
- Radiation_Data -- An AnalysisGeometry for the radiation data.
- Title -- A ContextGeometry with text for the title of the study.
This layer will be excluded if include_title is False.
- Context_Geometry -- A ContextGeometry with the shading context used
in the study. This layer will be excluded when include_context is False.
"""
# get the radiation data
if plot_irradiance:
d_type, unit, title = Irradiance(), 'W/m2', 'Incident Irradiance'
rad_data = radiation_study.irradiance_values
else:
d_type, unit, title = Radiation(), 'kWh/m2', 'Incident Radiation'
rad_data = radiation_study.radiation_values
if radiation_study.is_benefit:
title = '{} Benefit/Harm'.format(title)
# process the legend parameters and override the legend colors
if legend_parameters is not None:
assert isinstance(legend_parameters, LegendParameters), \
'Expected LegendParameters. Got {}.'.format(type(legend_parameters))
l_par = legend_parameters.duplicate()
else:
l_par = LegendParameters()
if radiation_study.is_benefit:
if l_par.min is None:
l_par.min = min((min(rad_data), -max(rad_data)))
if l_par.max is None:
l_par.max = max((-min(rad_data), max(rad_data)))
if l_par.are_colors_default:
l_par.colors = reversed(Colorset.benefit_harm())
else:
if l_par.min is None:
l_par.min = 0
if l_par.max is None:
l_par.max = max(rad_data)
# create the visualization set object
vis_set = VisualizationSet('RadiationStudy', ())
vis_set.display_name = 'Radiation Study'
# create the AnalysisGeometry
vis_data = VisualizationData(rad_data, l_par, d_type, unit)
mesh_geo = AnalysisGeometry(
'Radiation_Data', [radiation_study.study_mesh], [vis_data])
mesh_geo.display_name = 'Radiation Data'
mesh_geo.display_mode = 'Surface'
vis_set.add_geometry(mesh_geo)
# create the ContextGeometry for the title
if include_title:
all_geo = (radiation_study.study_mesh,) + radiation_study.context_geometry
min_pt, max_pt = bounding_box(all_geo)
graphic = GraphicContainer(
rad_data, min_pt, max_pt, l_par, d_type, unit)
study_title = DisplayText3D(
title, graphic.lower_title_location,
graphic.legend_parameters.text_height, None,
graphic.legend_parameters.font, 'Left', 'Top')
title_geo = ContextGeometry('Title', [study_title])
vis_set.add_geometry(title_geo)
# create the ContextGeometry for the context
if include_context:
con_color = Color(125, 125, 125, 125)
con_geos = []
for geo in radiation_study.context_geometry:
if isinstance(geo, Mesh3D):
con_geos.append(DisplayMesh3D(geo, con_color))
else: # it's a Face3D
con_geos.append(DisplayFace3D(geo, con_color))
context_geo = ContextGeometry('Context_Geometry', con_geos)
context_geo.display_name = 'Context Geometry'
vis_set.add_geometry(context_geo)
return vis_set
|
PypiClean
|
/AbSort-0.0.1.tar.gz/AbSort-0.0.1/README.txt
|
AbSort
=======
Absort is Powerful Python package that performs 8 different types of stable and unstable Sorting algorithms on List Data Structure with full of documentation.
This package will be more beneficial(Usefull) for competitive programmers and developers. So based on your requirement choose your sorting algorithm.
USAGE
======
importing Library
-----------------
- import AbSort
Creating the Object
sortObj = AbSort.SortingAlgo()
awsort
======
myList = [490, 395, 388, 496, 497, 350, 469, 435, 310, 1, 3, 2, 7, 4]
- Genearting the Sort Result
sortedResult = sortObj.awsort(myList)
- Science know the SortedResult variable contains the Sorted List we can print the sorted list saying
print(sortedResult)
SelectionSort
=============
myList = [490, 395, 388, 496, 497, 350, 469, 435, 310, 1, 3, 2, 7, 4]
- Genearting the Sort Result
sortedResult = sortObj.selectionSort(myList)
- Science know the SortedResult variable contains the Sorted List we can print the sorted list saying
print(sortedResult)
mergeSort
=========
myList = [490, 395, 388, 496, 497, 350, 469, 435, 310, 1, 3, 2, 7, 4]
- Genearting the Sort Result
sortedResult = sortObj.mergeSort(myList)
- Science know the SortedResult variable contains the Sorted List we can print the sorted list saying
print(sortedResult)
quickSort
=========
myList = [490, 395, 388, 496, 497, 350, 469, 435, 310, 1, 3, 2, 7, 4]
- Genearting the Sort Result
sortedResult = sortObj.quickSort(myList)
- Science know the SortedResult variable contains the Sorted List we can print the sorted list saying
print(sortedResult)
bogoSort
========
myList = [490, 395, 388, 496, 497, 350, 469, 435, 310, 1, 3, 2, 7, 4]
- Genearting the Sort Result
sortedResult = sortObj.bogoSort(myList)
- Science know the SortedResult variable contains the Sorted List we can print the sorted list saying
print(sortedResult)
insertionSort
=============
myList = [490, 395, 388, 496, 497, 350, 469, 435, 310, 1, 3, 2, 7, 4]
- Genearting the Sort Result
sortedResult = sortObj.insertionSort(myList)
- Science know the SortedResult variable contains the Sorted List we can print the sorted list saying
print(sortedResult)
binaryInsertionSort
===================
myList = [490, 395, 388, 496, 497, 350, 469, 435, 310, 1, 3, 2, 7, 4]
- Genearting the Sort Result
sortedResult = sortObj.binaryInsertionSort(myList)
- Science know the SortedResult variable contains the Sorted List we can print the sorted list saying
print(sortedResult)
bubbleSort
==========
myList = [490, 395, 388, 496, 497, 350, 469, 435, 310, 1, 3, 2, 7, 4]
- Genearting the Sort Result
sortedResult = sortObj.bubbleSort(myList)
- Science know the SortedResult variable contains the Sorted List we can print the sorted list saying
print(sortedResult)
Dependencies
============
- Python v3.x is Required.
|
PypiClean
|
/tensorflow_object_detection-0.0.6.tar.gz/tensorflow_object_detection-0.0.6/tf_object_detection/research/object_detection/tpu_exporters/ssd.py
|
"""Python library for ssd model, tailored for TPU inference."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
# pylint: disable=g-import-not-at-top
# Checking TF version, because this module relies on TPUPartitionedCall
# in tensorflow.python.tpu, which is not available until TF r1.14.
major, minor, _ = tf.__version__.split('.') # pylint: disable=protected-access
if int(major) < 1 or (int(major == 1) and int(minor) < 14):
raise RuntimeError(
'TensorFlow version >= 1.14 is required. Found ({}).'.format(
tf.__version__)) # pylint: disable=protected-access
from tensorflow.python.framework import function
from tensorflow.python.tpu import functional as tpu_functional
from tensorflow.python.tpu.ops import tpu_ops
from object_detection import exporter
from object_detection.builders import model_builder
from object_detection.tpu_exporters import utils
ANCHORS = 'anchors'
BOX_ENCODINGS = 'box_encodings'
CLASS_PREDICTIONS_WITH_BACKGROUND = 'class_predictions_with_background'
def get_prediction_tensor_shapes(pipeline_config):
"""Gets static shapes of tensors by building the graph on CPU.
This function builds the graph on CPU and obtain static shapes of output
tensors from TPUPartitionedCall. Shapes information are later used for setting
shapes of tensors when TPU graphs are built. This is necessary because tensors
coming out of TPUPartitionedCall lose their shape information, which are
needed for a lot of CPU operations later.
Args:
pipeline_config: A TrainEvalPipelineConfig proto.
Returns:
A python dict of tensors' names and their shapes.
"""
detection_model = model_builder.build(
pipeline_config.model, is_training=False)
_, input_tensors = exporter.input_placeholder_fn_map['image_tensor']()
inputs = tf.cast(input_tensors, dtype=tf.float32)
preprocessed_inputs, true_image_shapes = detection_model.preprocess(inputs)
prediction_dict = detection_model.predict(preprocessed_inputs,
true_image_shapes)
return {
BOX_ENCODINGS:
prediction_dict[BOX_ENCODINGS].shape.as_list(),
CLASS_PREDICTIONS_WITH_BACKGROUND:
prediction_dict[CLASS_PREDICTIONS_WITH_BACKGROUND].shape.as_list(),
ANCHORS:
prediction_dict[ANCHORS].shape.as_list(),
}
def recover_shape(preprocessed_inputs, prediction_outputs, shapes_info):
"""Recovers shape from TPUPartitionedCall.
Args:
preprocessed_inputs: 4D tensor, shaped (batch, channels, height, width)
prediction_outputs: Python list of tensors, in the following order -
box_encodings - 3D tensor, shaped (code_size, batch, num_anchors);
class_predictions_with_background - 3D tensor, shaped (num_classes + 1,
batch, num_anchors); anchors - 2D tensor, shaped (4, num_anchors)
shapes_info: Python dict of tensor shapes as lists.
Returns:
preprocessed_inputs: 4D tensor, shaped (batch, height, width, channels)
box_encodings: 3D tensor, shaped (batch, num_anchors, code_size)
class_predictions_with_background: 3D tensor,
shaped (batch, num_anchors, num_classes + 1)
anchors: 2D tensor, shaped (num_anchors, 4)
"""
# Dimshuffle: (b, c, h, w) -> (b, h, w, c)
preprocessed_inputs = tf.transpose(preprocessed_inputs, perm=[0, 2, 3, 1])
box_encodings = tf.transpose(prediction_outputs[0], perm=[1, 2, 0])
# [None, None, detection_model._box_coder.code_size]
box_encodings.set_shape(shapes_info[BOX_ENCODINGS])
class_predictions_with_background = tf.transpose(
prediction_outputs[1], perm=[1, 2, 0])
# [None, None, num_classes + 1]
class_predictions_with_background.set_shape(
shapes_info[CLASS_PREDICTIONS_WITH_BACKGROUND])
anchors = tf.transpose(prediction_outputs[2], perm=[1, 0])
# [None, 4]
anchors.set_shape(shapes_info[ANCHORS])
return (preprocessed_inputs, box_encodings, class_predictions_with_background,
anchors)
def build_graph(pipeline_config,
shapes_info,
input_type='encoded_image_string_tensor',
use_bfloat16=False):
"""Builds TPU serving graph of ssd to be exported.
Args:
pipeline_config: A TrainEvalPipelineConfig proto.
shapes_info: A python dict of tensors' names and their shapes, returned by
`get_prediction_tensor_shapes()`.
input_type: One of
'encoded_image_string_tensor': a 1d tensor with dtype=tf.string
'image_tensor': a 4d tensor with dtype=tf.uint8
'tf_example': a 1d tensor with dtype=tf.string
use_bfloat16: If true, use tf.bfloat16 on TPU.
Returns:
placeholder_tensor: A placeholder tensor, type determined by `input_type`.
result_tensor_dict: A python dict of tensors' names and tensors.
"""
detection_model = model_builder.build(
pipeline_config.model, is_training=False)
placeholder_tensor, input_tensors = \
exporter.input_placeholder_fn_map[input_type]()
inputs = tf.cast(input_tensors, dtype=tf.float32)
preprocessed_inputs, true_image_shapes = detection_model.preprocess(inputs)
# Dimshuffle: (b, h, w, c) -> (b, c, h, w)
# This is to avoid extra padding due to TPU memory layout:
# We swap larger dimensions in and smaller dimensions out, so that small
# dimensions don't get padded tens / hundreds times of its own size.
# This trick is applied to other similar tensors below.
preprocessed_inputs = tf.transpose(preprocessed_inputs, perm=[0, 3, 1, 2])
if use_bfloat16:
preprocessed_inputs = tf.cast(preprocessed_inputs, dtype=tf.bfloat16)
def predict_tpu_subgraph(preprocessed_inputs, true_image_shapes):
"""Wraps over the CPU version of `predict()`.
This builds a same graph as the original `predict()`, manipulates
result tensors' dimensions to be memory efficient on TPU, and
returns them as list of tensors.
Args:
preprocessed_inputs: A 4D tensor of shape (batch, channels, height, width)
true_image_shapes: True image shapes tensor.
Returns:
A Python list of tensors:
box_encodings: 3D tensor of shape (code_size, batch_size, num_anchors)
class_predictions_with_background: 3D tensor,
shape (num_classes + 1, batch_size, num_anchors)
anchors: 2D tensor of shape (4, num_anchors)
"""
# Dimshuffle: (b, c, h, w) -> (b, h, w, c)
preprocessed_inputs = tf.transpose(preprocessed_inputs, perm=[0, 2, 3, 1])
if use_bfloat16:
with tf.contrib.tpu.bfloat16_scope():
prediction_dict = detection_model.predict(preprocessed_inputs,
true_image_shapes)
else:
prediction_dict = detection_model.predict(preprocessed_inputs,
true_image_shapes)
# Dimshuffle: (batch, anchors, depth) -> (depth, batch, anchors)
return [
tf.transpose(prediction_dict[BOX_ENCODINGS], perm=[2, 0, 1]),
tf.transpose(
prediction_dict[CLASS_PREDICTIONS_WITH_BACKGROUND], perm=[2, 0, 1]),
tf.transpose(prediction_dict[ANCHORS], perm=[1, 0]),
]
@function.Defun(capture_resource_var_by_value=False)
def predict_tpu():
return tf.contrib.tpu.rewrite(predict_tpu_subgraph,
[preprocessed_inputs, true_image_shapes])
prediction_outputs = tpu_functional.TPUPartitionedCall(
args=predict_tpu.captured_inputs,
device_ordinal=tpu_ops.tpu_ordinal_selector(),
Tout=[o.type for o in predict_tpu.definition.signature.output_arg],
f=predict_tpu)
(preprocessed_inputs, box_encodings, class_predictions_with_background,
anchors) = recover_shape(preprocessed_inputs, prediction_outputs,
shapes_info)
output_tensors = {
'preprocessed_inputs': preprocessed_inputs,
BOX_ENCODINGS: box_encodings,
CLASS_PREDICTIONS_WITH_BACKGROUND: class_predictions_with_background,
ANCHORS: anchors,
}
if use_bfloat16:
output_tensors = utils.bfloat16_to_float32_nested(output_tensors)
postprocessed_tensors = detection_model.postprocess(output_tensors,
true_image_shapes)
result_tensor_dict = exporter.add_output_tensor_nodes(postprocessed_tensors,
'inference_op')
return placeholder_tensor, result_tensor_dict
|
PypiClean
|
/django-filebrowser-4.0.3.tar.gz/django-filebrowser-4.0.3/docs/_build/_static/js/modernizr.min.js
|
;window.Modernizr=function(a,b,c){function D(a){j.cssText=a}function E(a,b){return D(n.join(a+";")+(b||""))}function F(a,b){return typeof a===b}function G(a,b){return!!~(""+a).indexOf(b)}function H(a,b){for(var d in a){var e=a[d];if(!G(e,"-")&&j[e]!==c)return b=="pfx"?e:!0}return!1}function I(a,b,d){for(var e in a){var f=b[a[e]];if(f!==c)return d===!1?a[e]:F(f,"function")?f.bind(d||b):f}return!1}function J(a,b,c){var d=a.charAt(0).toUpperCase()+a.slice(1),e=(a+" "+p.join(d+" ")+d).split(" ");return F(b,"string")||F(b,"undefined")?H(e,b):(e=(a+" "+q.join(d+" ")+d).split(" "),I(e,b,c))}function K(){e.input=function(c){for(var d=0,e=c.length;d<e;d++)u[c[d]]=c[d]in k;return u.list&&(u.list=!!b.createElement("datalist")&&!!a.HTMLDataListElement),u}("autocomplete autofocus list placeholder max min multiple pattern required step".split(" ")),e.inputtypes=function(a){for(var d=0,e,f,h,i=a.length;d<i;d++)k.setAttribute("type",f=a[d]),e=k.type!=="text",e&&(k.value=l,k.style.cssText="position:absolute;visibility:hidden;",/^range$/.test(f)&&k.style.WebkitAppearance!==c?(g.appendChild(k),h=b.defaultView,e=h.getComputedStyle&&h.getComputedStyle(k,null).WebkitAppearance!=="textfield"&&k.offsetHeight!==0,g.removeChild(k)):/^(search|tel)$/.test(f)||(/^(url|email)$/.test(f)?e=k.checkValidity&&k.checkValidity()===!1:e=k.value!=l)),t[a[d]]=!!e;return t}("search tel url email datetime date month week time datetime-local number range color".split(" "))}var d="2.6.2",e={},f=!0,g=b.documentElement,h="modernizr",i=b.createElement(h),j=i.style,k=b.createElement("input"),l=":)",m={}.toString,n=" -webkit- -moz- -o- -ms- ".split(" "),o="Webkit Moz O ms",p=o.split(" "),q=o.toLowerCase().split(" "),r={svg:"http://www.w3.org/2000/svg"},s={},t={},u={},v=[],w=v.slice,x,y=function(a,c,d,e){var f,i,j,k,l=b.createElement("div"),m=b.body,n=m||b.createElement("body");if(parseInt(d,10))while(d--)j=b.createElement("div"),j.id=e?e[d]:h+(d+1),l.appendChild(j);return f=["­",'<style id="s',h,'">',a,"</style>"].join(""),l.id=h,(m?l:n).innerHTML+=f,n.appendChild(l),m||(n.style.background="",n.style.overflow="hidden",k=g.style.overflow,g.style.overflow="hidden",g.appendChild(n)),i=c(l,a),m?l.parentNode.removeChild(l):(n.parentNode.removeChild(n),g.style.overflow=k),!!i},z=function(b){var c=a.matchMedia||a.msMatchMedia;if(c)return c(b).matches;var d;return y("@media "+b+" { #"+h+" { position: absolute; } }",function(b){d=(a.getComputedStyle?getComputedStyle(b,null):b.currentStyle)["position"]=="absolute"}),d},A=function(){function d(d,e){e=e||b.createElement(a[d]||"div"),d="on"+d;var f=d in e;return f||(e.setAttribute||(e=b.createElement("div")),e.setAttribute&&e.removeAttribute&&(e.setAttribute(d,""),f=F(e[d],"function"),F(e[d],"undefined")||(e[d]=c),e.removeAttribute(d))),e=null,f}var a={select:"input",change:"input",submit:"form",reset:"form",error:"img",load:"img",abort:"img"};return d}(),B={}.hasOwnProperty,C;!F(B,"undefined")&&!F(B.call,"undefined")?C=function(a,b){return B.call(a,b)}:C=function(a,b){return b in a&&F(a.constructor.prototype[b],"undefined")},Function.prototype.bind||(Function.prototype.bind=function(b){var c=this;if(typeof c!="function")throw new TypeError;var d=w.call(arguments,1),e=function(){if(this instanceof e){var a=function(){};a.prototype=c.prototype;var f=new a,g=c.apply(f,d.concat(w.call(arguments)));return Object(g)===g?g:f}return c.apply(b,d.concat(w.call(arguments)))};return e}),s.flexbox=function(){return J("flexWrap")},s.canvas=function(){var a=b.createElement("canvas");return!!a.getContext&&!!a.getContext("2d")},s.canvastext=function(){return!!e.canvas&&!!F(b.createElement("canvas").getContext("2d").fillText,"function")},s.webgl=function(){return!!a.WebGLRenderingContext},s.touch=function(){var c;return"ontouchstart"in a||a.DocumentTouch&&b instanceof DocumentTouch?c=!0:y(["@media (",n.join("touch-enabled),("),h,")","{#modernizr{top:9px;position:absolute}}"].join(""),function(a){c=a.offsetTop===9}),c},s.geolocation=function(){return"geolocation"in navigator},s.postmessage=function(){return!!a.postMessage},s.websqldatabase=function(){return!!a.openDatabase},s.indexedDB=function(){return!!J("indexedDB",a)},s.hashchange=function(){return A("hashchange",a)&&(b.documentMode===c||b.documentMode>7)},s.history=function(){return!!a.history&&!!history.pushState},s.draganddrop=function(){var a=b.createElement("div");return"draggable"in a||"ondragstart"in a&&"ondrop"in a},s.websockets=function(){return"WebSocket"in a||"MozWebSocket"in a},s.rgba=function(){return D("background-color:rgba(150,255,150,.5)"),G(j.backgroundColor,"rgba")},s.hsla=function(){return D("background-color:hsla(120,40%,100%,.5)"),G(j.backgroundColor,"rgba")||G(j.backgroundColor,"hsla")},s.multiplebgs=function(){return D("background:url(https://),url(https://),red url(https://)"),/(url\s*\(.*?){3}/.test(j.background)},s.backgroundsize=function(){return J("backgroundSize")},s.borderimage=function(){return J("borderImage")},s.borderradius=function(){return J("borderRadius")},s.boxshadow=function(){return J("boxShadow")},s.textshadow=function(){return b.createElement("div").style.textShadow===""},s.opacity=function(){return E("opacity:.55"),/^0.55$/.test(j.opacity)},s.cssanimations=function(){return J("animationName")},s.csscolumns=function(){return J("columnCount")},s.cssgradients=function(){var a="background-image:",b="gradient(linear,left top,right bottom,from(#9f9),to(white));",c="linear-gradient(left top,#9f9, white);";return D((a+"-webkit- ".split(" ").join(b+a)+n.join(c+a)).slice(0,-a.length)),G(j.backgroundImage,"gradient")},s.cssreflections=function(){return J("boxReflect")},s.csstransforms=function(){return!!J("transform")},s.csstransforms3d=function(){var a=!!J("perspective");return a&&"webkitPerspective"in g.style&&y("@media (transform-3d),(-webkit-transform-3d){#modernizr{left:9px;position:absolute;height:3px;}}",function(b,c){a=b.offsetLeft===9&&b.offsetHeight===3}),a},s.csstransitions=function(){return J("transition")},s.fontface=function(){var a;return y('@font-face {font-family:"font";src:url("https://")}',function(c,d){var e=b.getElementById("smodernizr"),f=e.sheet||e.styleSheet,g=f?f.cssRules&&f.cssRules[0]?f.cssRules[0].cssText:f.cssText||"":"";a=/src/i.test(g)&&g.indexOf(d.split(" ")[0])===0}),a},s.generatedcontent=function(){var a;return y(["#",h,"{font:0/0 a}#",h,':after{content:"',l,'";visibility:hidden;font:3px/1 a}'].join(""),function(b){a=b.offsetHeight>=3}),a},s.video=function(){var a=b.createElement("video"),c=!1;try{if(c=!!a.canPlayType)c=new Boolean(c),c.ogg=a.canPlayType('video/ogg; codecs="theora"').replace(/^no$/,""),c.h264=a.canPlayType('video/mp4; codecs="avc1.42E01E"').replace(/^no$/,""),c.webm=a.canPlayType('video/webm; codecs="vp8, vorbis"').replace(/^no$/,"")}catch(d){}return c},s.audio=function(){var a=b.createElement("audio"),c=!1;try{if(c=!!a.canPlayType)c=new Boolean(c),c.ogg=a.canPlayType('audio/ogg; codecs="vorbis"').replace(/^no$/,""),c.mp3=a.canPlayType("audio/mpeg;").replace(/^no$/,""),c.wav=a.canPlayType('audio/wav; codecs="1"').replace(/^no$/,""),c.m4a=(a.canPlayType("audio/x-m4a;")||a.canPlayType("audio/aac;")).replace(/^no$/,"")}catch(d){}return c},s.localstorage=function(){try{return localStorage.setItem(h,h),localStorage.removeItem(h),!0}catch(a){return!1}},s.sessionstorage=function(){try{return sessionStorage.setItem(h,h),sessionStorage.removeItem(h),!0}catch(a){return!1}},s.webworkers=function(){return!!a.Worker},s.applicationcache=function(){return!!a.applicationCache},s.svg=function(){return!!b.createElementNS&&!!b.createElementNS(r.svg,"svg").createSVGRect},s.inlinesvg=function(){var a=b.createElement("div");return a.innerHTML="<svg/>",(a.firstChild&&a.firstChild.namespaceURI)==r.svg},s.smil=function(){return!!b.createElementNS&&/SVGAnimate/.test(m.call(b.createElementNS(r.svg,"animate")))},s.svgclippaths=function(){return!!b.createElementNS&&/SVGClipPath/.test(m.call(b.createElementNS(r.svg,"clipPath")))};for(var L in s)C(s,L)&&(x=L.toLowerCase(),e[x]=s[L](),v.push((e[x]?"":"no-")+x));return e.input||K(),e.addTest=function(a,b){if(typeof a=="object")for(var d in a)C(a,d)&&e.addTest(d,a[d]);else{a=a.toLowerCase();if(e[a]!==c)return e;b=typeof b=="function"?b():b,typeof f!="undefined"&&f&&(g.className+=" "+(b?"":"no-")+a),e[a]=b}return e},D(""),i=k=null,function(a,b){function k(a,b){var c=a.createElement("p"),d=a.getElementsByTagName("head")[0]||a.documentElement;return c.innerHTML="x<style>"+b+"</style>",d.insertBefore(c.lastChild,d.firstChild)}function l(){var a=r.elements;return typeof a=="string"?a.split(" "):a}function m(a){var b=i[a[g]];return b||(b={},h++,a[g]=h,i[h]=b),b}function n(a,c,f){c||(c=b);if(j)return c.createElement(a);f||(f=m(c));var g;return f.cache[a]?g=f.cache[a].cloneNode():e.test(a)?g=(f.cache[a]=f.createElem(a)).cloneNode():g=f.createElem(a),g.canHaveChildren&&!d.test(a)?f.frag.appendChild(g):g}function o(a,c){a||(a=b);if(j)return a.createDocumentFragment();c=c||m(a);var d=c.frag.cloneNode(),e=0,f=l(),g=f.length;for(;e<g;e++)d.createElement(f[e]);return d}function p(a,b){b.cache||(b.cache={},b.createElem=a.createElement,b.createFrag=a.createDocumentFragment,b.frag=b.createFrag()),a.createElement=function(c){return r.shivMethods?n(c,a,b):b.createElem(c)},a.createDocumentFragment=Function("h,f","return function(){var n=f.cloneNode(),c=n.createElement;h.shivMethods&&("+l().join().replace(/\w+/g,function(a){return b.createElem(a),b.frag.createElement(a),'c("'+a+'")'})+");return n}")(r,b.frag)}function q(a){a||(a=b);var c=m(a);return r.shivCSS&&!f&&!c.hasCSS&&(c.hasCSS=!!k(a,"article,aside,figcaption,figure,footer,header,hgroup,nav,section{display:block}mark{background:#FF0;color:#000}")),j||p(a,c),a}var c=a.html5||{},d=/^<|^(?:button|map|select|textarea|object|iframe|option|optgroup)$/i,e=/^(?:a|b|code|div|fieldset|h1|h2|h3|h4|h5|h6|i|label|li|ol|p|q|span|strong|style|table|tbody|td|th|tr|ul)$/i,f,g="_html5shiv",h=0,i={},j;(function(){try{var a=b.createElement("a");a.innerHTML="<xyz></xyz>",f="hidden"in a,j=a.childNodes.length==1||function(){b.createElement("a");var a=b.createDocumentFragment();return typeof a.cloneNode=="undefined"||typeof a.createDocumentFragment=="undefined"||typeof a.createElement=="undefined"}()}catch(c){f=!0,j=!0}})();var r={elements:c.elements||"abbr article aside audio bdi canvas data datalist details figcaption figure footer header hgroup mark meter nav output progress section summary time video",shivCSS:c.shivCSS!==!1,supportsUnknownElements:j,shivMethods:c.shivMethods!==!1,type:"default",shivDocument:q,createElement:n,createDocumentFragment:o};a.html5=r,q(b)}(this,b),e._version=d,e._prefixes=n,e._domPrefixes=q,e._cssomPrefixes=p,e.mq=z,e.hasEvent=A,e.testProp=function(a){return H([a])},e.testAllProps=J,e.testStyles=y,e.prefixed=function(a,b,c){return b?J(a,b,c):J(a,"pfx")},g.className=g.className.replace(/(^|\s)no-js(\s|$)/,"$1$2")+(f?" js "+v.join(" "):""),e}(this,this.document),function(a,b,c){function d(a){return"[object Function]"==o.call(a)}function e(a){return"string"==typeof a}function f(){}function g(a){return!a||"loaded"==a||"complete"==a||"uninitialized"==a}function h(){var a=p.shift();q=1,a?a.t?m(function(){("c"==a.t?B.injectCss:B.injectJs)(a.s,0,a.a,a.x,a.e,1)},0):(a(),h()):q=0}function i(a,c,d,e,f,i,j){function k(b){if(!o&&g(l.readyState)&&(u.r=o=1,!q&&h(),l.onload=l.onreadystatechange=null,b)){"img"!=a&&m(function(){t.removeChild(l)},50);for(var d in y[c])y[c].hasOwnProperty(d)&&y[c][d].onload()}}var j=j||B.errorTimeout,l=b.createElement(a),o=0,r=0,u={t:d,s:c,e:f,a:i,x:j};1===y[c]&&(r=1,y[c]=[]),"object"==a?l.data=c:(l.src=c,l.type=a),l.width=l.height="0",l.onerror=l.onload=l.onreadystatechange=function(){k.call(this,r)},p.splice(e,0,u),"img"!=a&&(r||2===y[c]?(t.insertBefore(l,s?null:n),m(k,j)):y[c].push(l))}function j(a,b,c,d,f){return q=0,b=b||"j",e(a)?i("c"==b?v:u,a,b,this.i++,c,d,f):(p.splice(this.i++,0,a),1==p.length&&h()),this}function k(){var a=B;return a.loader={load:j,i:0},a}var l=b.documentElement,m=a.setTimeout,n=b.getElementsByTagName("script")[0],o={}.toString,p=[],q=0,r="MozAppearance"in l.style,s=r&&!!b.createRange().compareNode,t=s?l:n.parentNode,l=a.opera&&"[object Opera]"==o.call(a.opera),l=!!b.attachEvent&&!l,u=r?"object":l?"script":"img",v=l?"script":u,w=Array.isArray||function(a){return"[object Array]"==o.call(a)},x=[],y={},z={timeout:function(a,b){return b.length&&(a.timeout=b[0]),a}},A,B;B=function(a){function b(a){var a=a.split("!"),b=x.length,c=a.pop(),d=a.length,c={url:c,origUrl:c,prefixes:a},e,f,g;for(f=0;f<d;f++)g=a[f].split("="),(e=z[g.shift()])&&(c=e(c,g));for(f=0;f<b;f++)c=x[f](c);return c}function g(a,e,f,g,h){var i=b(a),j=i.autoCallback;i.url.split(".").pop().split("?").shift(),i.bypass||(e&&(e=d(e)?e:e[a]||e[g]||e[a.split("/").pop().split("?")[0]]),i.instead?i.instead(a,e,f,g,h):(y[i.url]?i.noexec=!0:y[i.url]=1,f.load(i.url,i.forceCSS||!i.forceJS&&"css"==i.url.split(".").pop().split("?").shift()?"c":c,i.noexec,i.attrs,i.timeout),(d(e)||d(j))&&f.load(function(){k(),e&&e(i.origUrl,h,g),j&&j(i.origUrl,h,g),y[i.url]=2})))}function h(a,b){function c(a,c){if(a){if(e(a))c||(j=function(){var a=[].slice.call(arguments);k.apply(this,a),l()}),g(a,j,b,0,h);else if(Object(a)===a)for(n in m=function(){var b=0,c;for(c in a)a.hasOwnProperty(c)&&b++;return b}(),a)a.hasOwnProperty(n)&&(!c&&!--m&&(d(j)?j=function(){var a=[].slice.call(arguments);k.apply(this,a),l()}:j[n]=function(a){return function(){var b=[].slice.call(arguments);a&&a.apply(this,b),l()}}(k[n])),g(a[n],j,b,n,h))}else!c&&l()}var h=!!a.test,i=a.load||a.both,j=a.callback||f,k=j,l=a.complete||f,m,n;c(h?a.yep:a.nope,!!i),i&&c(i)}var i,j,l=this.yepnope.loader;if(e(a))g(a,0,l,0);else if(w(a))for(i=0;i<a.length;i++)j=a[i],e(j)?g(j,0,l,0):w(j)?B(j):Object(j)===j&&h(j,l);else Object(a)===a&&h(a,l)},B.addPrefix=function(a,b){z[a]=b},B.addFilter=function(a){x.push(a)},B.errorTimeout=1e4,null==b.readyState&&b.addEventListener&&(b.readyState="loading",b.addEventListener("DOMContentLoaded",A=function(){b.removeEventListener("DOMContentLoaded",A,0),b.readyState="complete"},0)),a.yepnope=k(),a.yepnope.executeStack=h,a.yepnope.injectJs=function(a,c,d,e,i,j){var k=b.createElement("script"),l,o,e=e||B.errorTimeout;k.src=a;for(o in d)k.setAttribute(o,d[o]);c=j?h:c||f,k.onreadystatechange=k.onload=function(){!l&&g(k.readyState)&&(l=1,c(),k.onload=k.onreadystatechange=null)},m(function(){l||(l=1,c(1))},e),i?k.onload():n.parentNode.insertBefore(k,n)},a.yepnope.injectCss=function(a,c,d,e,g,i){var e=b.createElement("link"),j,c=i?h:c||f;e.href=a,e.rel="stylesheet",e.type="text/css";for(j in d)e.setAttribute(j,d[j]);g||(n.parentNode.insertBefore(e,n),m(c,0))}}(this,document),Modernizr.load=function(){yepnope.apply(window,[].slice.call(arguments,0))};
|
PypiClean
|
/cubicweb-sherpa-0.11.0.tar.gz/cubicweb-sherpa-0.11.0/cubicweb_sherpa/views/__init__.py
|
from jinja2 import Environment, PackageLoader, select_autoescape
from cubicweb.view import View
from cubicweb.web.views import urlrewrite, startup, uicfg
_REWRITE_RULES = []
_JINJA_ENV = Environment(loader=PackageLoader('cubicweb_sherpa.views'),
autoescape=select_autoescape(enabled_extensions=('html',)))
def jinja_render(template_name, **ctx):
"""Return a string containing result of rendering of Jinja2's `template_name` with
`ctx` as context.
"""
template = _JINJA_ENV.get_template(template_name + '.jinja2.html')
return template.render(**ctx)
class JinjaStaticView(View):
"""Abstract base class to render static pages from a jinja template."""
__abstract__ = True
template_name = None
title = None
def call(self, **kw):
self.w(jinja_render(self.template_name, **self.build_context()))
def build_context(self):
return {
'title': self._cw._(self.title),
'build_url': self._cw.build_url,
'data_url': self._cw.datadir_url,
}
def jinja_static_view(template_name, title=None, regid=None, path=None):
"""Generate a sub-class of :class:`JinjaStaticView` parametrized with its `template_name` and
`title`.
`__regid__` is built by prepending 'sherpa.' to `template_name` or may be explicitly specified
using `regid`.
A path to access to view is automatically generated and will match `template_name` unless
explicitly specified using `path` argument.
"""
class_name = template_name.capitalize() + 'View'
if regid is None:
regid = 'sherpa.' + template_name
if path is None:
path = '/' + template_name
_REWRITE_RULES.append((path, {'vid': regid}))
return type(class_name, (JinjaStaticView,), {'__regid__': regid,
'template_name': template_name,
'title': title})
IndexView = jinja_static_view('index', 'view_index', regid='index', path='/')
ProjectView = jinja_static_view('project', u'Sherpa un générateur de profils')
UtilisationView = jinja_static_view('utilisation', u'Pour commencer')
SedaView = jinja_static_view('seda', u'Le SEDA')
Seda2SchemaView = jinja_static_view('schema_seda', u'Schéma du SEDA 2')
AProposView = jinja_static_view('apropos', u'À propos')
ContactView = jinja_static_view('contact', 'Contact')
ArchiveUnitView = jinja_static_view('archive_unit', u"Unités d'archive")
ARecordView = jinja_static_view('authority_record', u"Notices d'autorité")
# add our rewrite rules, has to be done once the list if filled because of metaclass magic
class SherpaReqRewriter(urlrewrite.SimpleReqRewriter):
ignore_baseclass_rules = True
global _REWRITE_RULES
rules = tuple(_REWRITE_RULES)
del _REWRITE_RULES
uicfg.autoform_section.tag_subject_of(('CWUser', 'primary_email', '*'), 'main', 'hidden')
def authority_record_kind_vocabulary(form, field):
"""Vocabulary function for AuthorityRecord.kind skipping "unknown" value."""
rset = form._cw.execute('Any X, XN WHERE X name XN, X is AgentKind, '
'X name != "unknown-agent-kind"')
return [(entity.dc_title(), str(entity.eid)) for entity in rset.entities()]
uicfg.autoform_field_kwargs.tag_attribute(('AuthorityRecord', 'agent_kind'),
{'choices': authority_record_kind_vocabulary})
def registration_callback(vreg):
vreg.register_all(globals().values(), __name__, (IndexView,))
vreg.register_and_replace(IndexView, startup.IndexView)
from cubicweb.web.views import actions, bookmark, cwuser
vreg.unregister(actions.SelectAction)
vreg.unregister(actions.CancelSelectAction)
vreg.unregister(actions.ViewAction)
vreg.unregister(actions.MultipleEditAction)
vreg.unregister(actions.CopyAction)
vreg.unregister(actions.AddNewAction)
vreg.unregister(actions.AddRelatedActions)
vreg.unregister(actions.ViewSameCWEType)
vreg.unregister(actions.UserPreferencesAction)
vreg.unregister(actions.ManageAction)
vreg.unregister(actions.PoweredByAction)
vreg.unregister(bookmark.BookmarksBox)
vreg.unregister(cwuser.UserPreferencesEntityAction)
|
PypiClean
|
/django-rte-0.4.0.tar.gz/django-rte-0.4.0/rte/static/rte/tiny_mce/plugins/contextmenu/editor_plugin.js
|
(function(){var a=tinymce.dom.Event,c=tinymce.each,b=tinymce.DOM;tinymce.create("tinymce.plugins.ContextMenu",{init:function(e){var h=this,f,d,i;h.editor=e;d=e.settings.contextmenu_never_use_native;h.onContextMenu=new tinymce.util.Dispatcher(this);f=e.onContextMenu.add(function(j,k){if((i!==0?i:k.ctrlKey)&&!d){return}a.cancel(k);if(k.target.nodeName=="IMG"){j.selection.select(k.target)}h._getMenu(j).showMenu(k.clientX||k.pageX,k.clientY||k.pageY);a.add(j.getDoc(),"click",function(l){g(j,l)});j.nodeChanged()});e.onRemove.add(function(){if(h._menu){h._menu.removeAll()}});function g(j,k){i=0;if(k&&k.button==2){i=k.ctrlKey;return}if(h._menu){h._menu.removeAll();h._menu.destroy();a.remove(j.getDoc(),"click",g)}}e.onMouseDown.add(g);e.onKeyDown.add(g);e.onKeyDown.add(function(j,k){if(k.shiftKey&&!k.ctrlKey&&!k.altKey&&k.keyCode===121){a.cancel(k);f(j,k)}})},getInfo:function(){return{longname:"Contextmenu",author:"Moxiecode Systems AB",authorurl:"http://tinymce.moxiecode.com",infourl:"http://wiki.moxiecode.com/index.php/TinyMCE:Plugins/contextmenu",version:tinymce.majorVersion+"."+tinymce.minorVersion}},_getMenu:function(e){var g=this,d=g._menu,j=e.selection,f=j.isCollapsed(),h=j.getNode()||e.getBody(),i,k;if(d){d.removeAll();d.destroy()}k=b.getPos(e.getContentAreaContainer());d=e.controlManager.createDropMenu("contextmenu",{offset_x:k.x+e.getParam("contextmenu_offset_x",0),offset_y:k.y+e.getParam("contextmenu_offset_y",0),constrain:1,keyboard_focus:true});g._menu=d;d.add({title:"advanced.cut_desc",icon:"cut",cmd:"Cut"}).setDisabled(f);d.add({title:"advanced.copy_desc",icon:"copy",cmd:"Copy"}).setDisabled(f);d.add({title:"advanced.paste_desc",icon:"paste",cmd:"Paste"});if((h.nodeName=="A"&&!e.dom.getAttrib(h,"name"))||!f){d.addSeparator();d.add({title:"advanced.link_desc",icon:"link",cmd:e.plugins.advlink?"mceAdvLink":"mceLink",ui:true});d.add({title:"advanced.unlink_desc",icon:"unlink",cmd:"UnLink"})}d.addSeparator();d.add({title:"advanced.image_desc",icon:"image",cmd:e.plugins.advimage?"mceAdvImage":"mceImage",ui:true});d.addSeparator();i=d.addMenu({title:"contextmenu.align"});i.add({title:"contextmenu.left",icon:"justifyleft",cmd:"JustifyLeft"});i.add({title:"contextmenu.center",icon:"justifycenter",cmd:"JustifyCenter"});i.add({title:"contextmenu.right",icon:"justifyright",cmd:"JustifyRight"});i.add({title:"contextmenu.full",icon:"justifyfull",cmd:"JustifyFull"});g.onContextMenu.dispatch(g,d,h,f);return d}});tinymce.PluginManager.add("contextmenu",tinymce.plugins.ContextMenu)})();
|
PypiClean
|
/PyPermissions-0.1.4.tar.gz/PyPermissions-0.1.4/pypermissions/decorators.py
|
from pypermissions.permission import PermissionSet
def _prepare_runtime_permission(self, perm=None, runkw=None, args=None, kwargs=None):
"""This function parses the provided string arguments to decorators into the actual values for use when the
decorator is being evaluated. This allows for permissions to be created that rely on arguments that are provided to
the function.
:param perm: The permission string to parse
:param runkw: The run-time components to be inserted into the permission
:param args: The arguments provided to the decorated function
:param kwargs: The keyword arguments provided to the decorated function
:rtype: :py:class:`str`
"""
permission = perm
if not permission:
return False
for key, value in runkw.iteritems():
val_split = value.split('.')
for attr in val_split:
if attr == "self":
value = self
continue
elif attr in kwargs:
value = kwargs.get(attr)
continue
value = getattr(value, attr)
permission = permission.replace('{'+key+'}', value)
return permission
def set_has_permission(perm=None, perm_set=None, on_failure=None, perm_check=None, **runkw):
"""This decorator checks if the provided permission set has the permission specified. It allows for the permission
to rely on runtime information via runkw; which be used to modify perm based on arguments provided to the decorated
function. For many use cases, this can be extended by decorating it with a custom decorator that will capture the
current user making the function call, and providing their permissions as the perm_set. The function provided for
use when the check fails will be called with the decorated functions arguments.
:param perm: The permission to be checked. May contain {} tags to be replaced at run time.
:param perm_set: The permission set being checked for the permission.
:param on_failure: A function that gets called instead of the decorated function when perm_set does not have the
specified permission.
:param perm_check: The PermissionSet function to be used when evaluating for perm.
:param runkw: The mappings to be used to create the actual permission at run time.
"""
def decorator(function):
def check_permission(self, *args, **kwargs):
permission = _prepare_runtime_permission(self, perm, runkw, args, kwargs)
# No permission provided, so everyone has permission.
if not permission:
return function(self, *args, **kwargs)
if not perm_set:
return on_failure(self, *args, **kwargs)
if not perm_check(perm_set, permission):
return on_failure(self, *args, **kwargs)
return function(self, *args, **kwargs)
return check_permission
return decorator
def set_grants_permission(perm=None, perm_set=None, on_failure=None, **runkw):
"""This decorator checks if the provided permission set has the permission specified. It allows for the permission
to rely on runtime information via runkw; which be used to modify perm based on arguments provided to the decorated
function. For many use cases, this can be extended by decorating it with a custom decorator that will capture the
current user making the function call, and providing their permissions as the perm_set. The function provided for
use when the check fails will be called with the decorated functions arguments.
:param perm: The permission to be checked. May contain {} tags to be replaced at run time.
:param perm_set: The permission set being checked for the permission.
:param on_failure: A function that gets called instead of the decorated function when perm_set does not have the
specified permission.
:param runkw: The mappings to be used to create the actual permission at run time.
"""
return set_has_permission(perm, perm_set, on_failure, perm_check=PermissionSet.grants_permission, **runkw)
def set_has_any_permission(perm=None, perm_set=None, on_failure=None, **runkw):
"""This decorator checks if the provided permission set has a permission of the form specified. It allows for the
permission to rely on runtime information via runkw; which be used to modify perm based on arguments provided to the
decorated function. For many use cases, this can be extended by decorating it with a custom decorator that will
capture the current user making the function call, and providing their permissions as the perm_set. The function
provided for use when the check fails will be called with the decorated functions arguments.
:param perm: The permission to be checked. May contain {} tags to be replaced at run time.
:param perm_set: The permission set being checked for the permission.
:param on_failure: A function that gets called instead of the decorated function when perm_set does not have the
specified permission.
:param runkw: The mappings to be used to create the actual permission at run time.
"""
return set_has_permission(perm, perm_set, on_failure, perm_check=PermissionSet.has_any_permission, **runkw)
|
PypiClean
|
/xadmin_croxlink2-0.7.0.3.tar.gz/xadmin_croxlink2-0.7.0.3/xadmin/static/xadmin/vendor/jquery-ui/jquery.ui.sortable.min.js
|
(function(t){function e(t,e,i){return t>e&&e+i>t}function i(t){return/left|right/.test(t.css("float"))||/inline|table-cell/.test(t.css("display"))}t.widget("ui.sortable",t.ui.mouse,{version:"1.10.2",widgetEventPrefix:"sort",ready:!1,options:{appendTo:"parent",axis:!1,connectWith:!1,containment:!1,cursor:"auto",cursorAt:!1,dropOnEmpty:!0,forcePlaceholderSize:!1,forceHelperSize:!1,grid:!1,handle:!1,helper:"original",items:"> *",opacity:!1,placeholder:!1,revert:!1,scroll:!0,scrollSensitivity:20,scrollSpeed:20,scope:"default",tolerance:"intersect",zIndex:1e3,activate:null,beforeStop:null,change:null,deactivate:null,out:null,over:null,receive:null,remove:null,sort:null,start:null,stop:null,update:null},_create:function(){var t=this.options;this.containerCache={},this.element.addClass("ui-sortable"),this.refresh(),this.floating=this.items.length?"x"===t.axis||i(this.items[0].item):!1,this.offset=this.element.offset(),this._mouseInit(),this.ready=!0},_destroy:function(){this.element.removeClass("ui-sortable ui-sortable-disabled"),this._mouseDestroy();for(var t=this.items.length-1;t>=0;t--)this.items[t].item.removeData(this.widgetName+"-item");return this},_setOption:function(e,i){"disabled"===e?(this.options[e]=i,this.widget().toggleClass("ui-sortable-disabled",!!i)):t.Widget.prototype._setOption.apply(this,arguments)},_mouseCapture:function(e,i){var s=null,n=!1,a=this;return this.reverting?!1:this.options.disabled||"static"===this.options.type?!1:(this._refreshItems(e),t(e.target).parents().each(function(){return t.data(this,a.widgetName+"-item")===a?(s=t(this),!1):undefined}),t.data(e.target,a.widgetName+"-item")===a&&(s=t(e.target)),s?!this.options.handle||i||(t(this.options.handle,s).find("*").addBack().each(function(){this===e.target&&(n=!0)}),n)?(this.currentItem=s,this._removeCurrentsFromItems(),!0):!1:!1)},_mouseStart:function(e,i,s){var n,a,o=this.options;if(this.currentContainer=this,this.refreshPositions(),this.helper=this._createHelper(e),this._cacheHelperProportions(),this._cacheMargins(),this.scrollParent=this.helper.scrollParent(),this.offset=this.currentItem.offset(),this.offset={top:this.offset.top-this.margins.top,left:this.offset.left-this.margins.left},t.extend(this.offset,{click:{left:e.pageX-this.offset.left,top:e.pageY-this.offset.top},parent:this._getParentOffset(),relative:this._getRelativeOffset()}),this.helper.css("position","absolute"),this.cssPosition=this.helper.css("position"),this.originalPosition=this._generatePosition(e),this.originalPageX=e.pageX,this.originalPageY=e.pageY,o.cursorAt&&this._adjustOffsetFromHelper(o.cursorAt),this.domPosition={prev:this.currentItem.prev()[0],parent:this.currentItem.parent()[0]},this.helper[0]!==this.currentItem[0]&&this.currentItem.hide(),this._createPlaceholder(),o.containment&&this._setContainment(),o.cursor&&"auto"!==o.cursor&&(a=this.document.find("body"),this.storedCursor=a.css("cursor"),a.css("cursor",o.cursor),this.storedStylesheet=t("<style>*{ cursor: "+o.cursor+" !important; }</style>").appendTo(a)),o.opacity&&(this.helper.css("opacity")&&(this._storedOpacity=this.helper.css("opacity")),this.helper.css("opacity",o.opacity)),o.zIndex&&(this.helper.css("zIndex")&&(this._storedZIndex=this.helper.css("zIndex")),this.helper.css("zIndex",o.zIndex)),this.scrollParent[0]!==document&&"HTML"!==this.scrollParent[0].tagName&&(this.overflowOffset=this.scrollParent.offset()),this._trigger("start",e,this._uiHash()),this._preserveHelperProportions||this._cacheHelperProportions(),!s)for(n=this.containers.length-1;n>=0;n--)this.containers[n]._trigger("activate",e,this._uiHash(this));return t.ui.ddmanager&&(t.ui.ddmanager.current=this),t.ui.ddmanager&&!o.dropBehaviour&&t.ui.ddmanager.prepareOffsets(this,e),this.dragging=!0,this.helper.addClass("ui-sortable-helper"),this._mouseDrag(e),!0},_mouseDrag:function(e){var i,s,n,a,o=this.options,r=!1;for(this.position=this._generatePosition(e),this.positionAbs=this._convertPositionTo("absolute"),this.lastPositionAbs||(this.lastPositionAbs=this.positionAbs),this.options.scroll&&(this.scrollParent[0]!==document&&"HTML"!==this.scrollParent[0].tagName?(this.overflowOffset.top+this.scrollParent[0].offsetHeight-e.pageY<o.scrollSensitivity?this.scrollParent[0].scrollTop=r=this.scrollParent[0].scrollTop+o.scrollSpeed:e.pageY-this.overflowOffset.top<o.scrollSensitivity&&(this.scrollParent[0].scrollTop=r=this.scrollParent[0].scrollTop-o.scrollSpeed),this.overflowOffset.left+this.scrollParent[0].offsetWidth-e.pageX<o.scrollSensitivity?this.scrollParent[0].scrollLeft=r=this.scrollParent[0].scrollLeft+o.scrollSpeed:e.pageX-this.overflowOffset.left<o.scrollSensitivity&&(this.scrollParent[0].scrollLeft=r=this.scrollParent[0].scrollLeft-o.scrollSpeed)):(e.pageY-t(document).scrollTop()<o.scrollSensitivity?r=t(document).scrollTop(t(document).scrollTop()-o.scrollSpeed):t(window).height()-(e.pageY-t(document).scrollTop())<o.scrollSensitivity&&(r=t(document).scrollTop(t(document).scrollTop()+o.scrollSpeed)),e.pageX-t(document).scrollLeft()<o.scrollSensitivity?r=t(document).scrollLeft(t(document).scrollLeft()-o.scrollSpeed):t(window).width()-(e.pageX-t(document).scrollLeft())<o.scrollSensitivity&&(r=t(document).scrollLeft(t(document).scrollLeft()+o.scrollSpeed))),r!==!1&&t.ui.ddmanager&&!o.dropBehaviour&&t.ui.ddmanager.prepareOffsets(this,e)),this.positionAbs=this._convertPositionTo("absolute"),this.options.axis&&"y"===this.options.axis||(this.helper[0].style.left=this.position.left+"px"),this.options.axis&&"x"===this.options.axis||(this.helper[0].style.top=this.position.top+"px"),i=this.items.length-1;i>=0;i--)if(s=this.items[i],n=s.item[0],a=this._intersectsWithPointer(s),a&&s.instance===this.currentContainer&&n!==this.currentItem[0]&&this.placeholder[1===a?"next":"prev"]()[0]!==n&&!t.contains(this.placeholder[0],n)&&("semi-dynamic"===this.options.type?!t.contains(this.element[0],n):!0)){if(this.direction=1===a?"down":"up","pointer"!==this.options.tolerance&&!this._intersectsWithSides(s))break;this._rearrange(e,s),this._trigger("change",e,this._uiHash());break}return this._contactContainers(e),t.ui.ddmanager&&t.ui.ddmanager.drag(this,e),this._trigger("sort",e,this._uiHash()),this.lastPositionAbs=this.positionAbs,!1},_mouseStop:function(e,i){if(e){if(t.ui.ddmanager&&!this.options.dropBehaviour&&t.ui.ddmanager.drop(this,e),this.options.revert){var s=this,n=this.placeholder.offset(),a=this.options.axis,o={};a&&"x"!==a||(o.left=n.left-this.offset.parent.left-this.margins.left+(this.offsetParent[0]===document.body?0:this.offsetParent[0].scrollLeft)),a&&"y"!==a||(o.top=n.top-this.offset.parent.top-this.margins.top+(this.offsetParent[0]===document.body?0:this.offsetParent[0].scrollTop)),this.reverting=!0,t(this.helper).animate(o,parseInt(this.options.revert,10)||500,function(){s._clear(e)})}else this._clear(e,i);return!1}},cancel:function(){if(this.dragging){this._mouseUp({target:null}),"original"===this.options.helper?this.currentItem.css(this._storedCSS).removeClass("ui-sortable-helper"):this.currentItem.show();for(var e=this.containers.length-1;e>=0;e--)this.containers[e]._trigger("deactivate",null,this._uiHash(this)),this.containers[e].containerCache.over&&(this.containers[e]._trigger("out",null,this._uiHash(this)),this.containers[e].containerCache.over=0)}return this.placeholder&&(this.placeholder[0].parentNode&&this.placeholder[0].parentNode.removeChild(this.placeholder[0]),"original"!==this.options.helper&&this.helper&&this.helper[0].parentNode&&this.helper.remove(),t.extend(this,{helper:null,dragging:!1,reverting:!1,_noFinalSort:null}),this.domPosition.prev?t(this.domPosition.prev).after(this.currentItem):t(this.domPosition.parent).prepend(this.currentItem)),this},serialize:function(e){var i=this._getItemsAsjQuery(e&&e.connected),s=[];return e=e||{},t(i).each(function(){var i=(t(e.item||this).attr(e.attribute||"id")||"").match(e.expression||/(.+)[\-=_](.+)/);i&&s.push((e.key||i[1]+"[]")+"="+(e.key&&e.expression?i[1]:i[2]))}),!s.length&&e.key&&s.push(e.key+"="),s.join("&")},toArray:function(e){var i=this._getItemsAsjQuery(e&&e.connected),s=[];return e=e||{},i.each(function(){s.push(t(e.item||this).attr(e.attribute||"id")||"")}),s},_intersectsWith:function(t){var e=this.positionAbs.left,i=e+this.helperProportions.width,s=this.positionAbs.top,n=s+this.helperProportions.height,a=t.left,o=a+t.width,r=t.top,h=r+t.height,l=this.offset.click.top,c=this.offset.click.left,u=s+l>r&&h>s+l&&e+c>a&&o>e+c;return"pointer"===this.options.tolerance||this.options.forcePointerForContainers||"pointer"!==this.options.tolerance&&this.helperProportions[this.floating?"width":"height"]>t[this.floating?"width":"height"]?u:e+this.helperProportions.width/2>a&&o>i-this.helperProportions.width/2&&s+this.helperProportions.height/2>r&&h>n-this.helperProportions.height/2},_intersectsWithPointer:function(t){var i="x"===this.options.axis||e(this.positionAbs.top+this.offset.click.top,t.top,t.height),s="y"===this.options.axis||e(this.positionAbs.left+this.offset.click.left,t.left,t.width),n=i&&s,a=this._getDragVerticalDirection(),o=this._getDragHorizontalDirection();return n?this.floating?o&&"right"===o||"down"===a?2:1:a&&("down"===a?2:1):!1},_intersectsWithSides:function(t){var i=e(this.positionAbs.top+this.offset.click.top,t.top+t.height/2,t.height),s=e(this.positionAbs.left+this.offset.click.left,t.left+t.width/2,t.width),n=this._getDragVerticalDirection(),a=this._getDragHorizontalDirection();return this.floating&&a?"right"===a&&s||"left"===a&&!s:n&&("down"===n&&i||"up"===n&&!i)},_getDragVerticalDirection:function(){var t=this.positionAbs.top-this.lastPositionAbs.top;return 0!==t&&(t>0?"down":"up")},_getDragHorizontalDirection:function(){var t=this.positionAbs.left-this.lastPositionAbs.left;return 0!==t&&(t>0?"right":"left")},refresh:function(t){return this._refreshItems(t),this.refreshPositions(),this},_connectWith:function(){var t=this.options;return t.connectWith.constructor===String?[t.connectWith]:t.connectWith},_getItemsAsjQuery:function(e){var i,s,n,a,o=[],r=[],h=this._connectWith();if(h&&e)for(i=h.length-1;i>=0;i--)for(n=t(h[i]),s=n.length-1;s>=0;s--)a=t.data(n[s],this.widgetFullName),a&&a!==this&&!a.options.disabled&&r.push([t.isFunction(a.options.items)?a.options.items.call(a.element):t(a.options.items,a.element).not(".ui-sortable-helper").not(".ui-sortable-placeholder"),a]);for(r.push([t.isFunction(this.options.items)?this.options.items.call(this.element,null,{options:this.options,item:this.currentItem}):t(this.options.items,this.element).not(".ui-sortable-helper").not(".ui-sortable-placeholder"),this]),i=r.length-1;i>=0;i--)r[i][0].each(function(){o.push(this)});return t(o)},_removeCurrentsFromItems:function(){var e=this.currentItem.find(":data("+this.widgetName+"-item)");this.items=t.grep(this.items,function(t){for(var i=0;e.length>i;i++)if(e[i]===t.item[0])return!1;return!0})},_refreshItems:function(e){this.items=[],this.containers=[this];var i,s,n,a,o,r,h,l,c=this.items,u=[[t.isFunction(this.options.items)?this.options.items.call(this.element[0],e,{item:this.currentItem}):t(this.options.items,this.element),this]],d=this._connectWith();if(d&&this.ready)for(i=d.length-1;i>=0;i--)for(n=t(d[i]),s=n.length-1;s>=0;s--)a=t.data(n[s],this.widgetFullName),a&&a!==this&&!a.options.disabled&&(u.push([t.isFunction(a.options.items)?a.options.items.call(a.element[0],e,{item:this.currentItem}):t(a.options.items,a.element),a]),this.containers.push(a));for(i=u.length-1;i>=0;i--)for(o=u[i][1],r=u[i][0],s=0,l=r.length;l>s;s++)h=t(r[s]),h.data(this.widgetName+"-item",o),c.push({item:h,instance:o,width:0,height:0,left:0,top:0})},refreshPositions:function(e){this.offsetParent&&this.helper&&(this.offset.parent=this._getParentOffset());var i,s,n,a;for(i=this.items.length-1;i>=0;i--)s=this.items[i],s.instance!==this.currentContainer&&this.currentContainer&&s.item[0]!==this.currentItem[0]||(n=this.options.toleranceElement?t(this.options.toleranceElement,s.item):s.item,e||(s.width=n.outerWidth(),s.height=n.outerHeight()),a=n.offset(),s.left=a.left,s.top=a.top);if(this.options.custom&&this.options.custom.refreshContainers)this.options.custom.refreshContainers.call(this);else for(i=this.containers.length-1;i>=0;i--)a=this.containers[i].element.offset(),this.containers[i].containerCache.left=a.left,this.containers[i].containerCache.top=a.top,this.containers[i].containerCache.width=this.containers[i].element.outerWidth(),this.containers[i].containerCache.height=this.containers[i].element.outerHeight();return this},_createPlaceholder:function(e){e=e||this;var i,s=e.options;s.placeholder&&s.placeholder.constructor!==String||(i=s.placeholder,s.placeholder={element:function(){var s=e.currentItem[0].nodeName.toLowerCase(),n=t(e.document[0].createElement(s)).addClass(i||e.currentItem[0].className+" ui-sortable-placeholder").removeClass("ui-sortable-helper");return"tr"===s?n.append("<td colspan='99'> </td>"):"img"===s&&n.attr("src",e.currentItem.attr("src")),i||n.css("visibility","hidden"),n},update:function(t,n){(!i||s.forcePlaceholderSize)&&(n.height()||n.height(e.currentItem.innerHeight()-parseInt(e.currentItem.css("paddingTop")||0,10)-parseInt(e.currentItem.css("paddingBottom")||0,10)),n.width()||n.width(e.currentItem.innerWidth()-parseInt(e.currentItem.css("paddingLeft")||0,10)-parseInt(e.currentItem.css("paddingRight")||0,10)))}}),e.placeholder=t(s.placeholder.element.call(e.element,e.currentItem)),e.currentItem.after(e.placeholder),s.placeholder.update(e,e.placeholder)},_contactContainers:function(s){var n,a,o,r,h,l,c,u,d,p,f=null,m=null;for(n=this.containers.length-1;n>=0;n--)if(!t.contains(this.currentItem[0],this.containers[n].element[0]))if(this._intersectsWith(this.containers[n].containerCache)){if(f&&t.contains(this.containers[n].element[0],f.element[0]))continue;f=this.containers[n],m=n}else this.containers[n].containerCache.over&&(this.containers[n]._trigger("out",s,this._uiHash(this)),this.containers[n].containerCache.over=0);if(f)if(1===this.containers.length)this.containers[m].containerCache.over||(this.containers[m]._trigger("over",s,this._uiHash(this)),this.containers[m].containerCache.over=1);else{for(o=1e4,r=null,p=f.floating||i(this.currentItem),h=p?"left":"top",l=p?"width":"height",c=this.positionAbs[h]+this.offset.click[h],a=this.items.length-1;a>=0;a--)t.contains(this.containers[m].element[0],this.items[a].item[0])&&this.items[a].item[0]!==this.currentItem[0]&&(!p||e(this.positionAbs.top+this.offset.click.top,this.items[a].top,this.items[a].height))&&(u=this.items[a].item.offset()[h],d=!1,Math.abs(u-c)>Math.abs(u+this.items[a][l]-c)&&(d=!0,u+=this.items[a][l]),o>Math.abs(u-c)&&(o=Math.abs(u-c),r=this.items[a],this.direction=d?"up":"down"));if(!r&&!this.options.dropOnEmpty)return;if(this.currentContainer===this.containers[m])return;r?this._rearrange(s,r,null,!0):this._rearrange(s,null,this.containers[m].element,!0),this._trigger("change",s,this._uiHash()),this.containers[m]._trigger("change",s,this._uiHash(this)),this.currentContainer=this.containers[m],this.options.placeholder.update(this.currentContainer,this.placeholder),this.containers[m]._trigger("over",s,this._uiHash(this)),this.containers[m].containerCache.over=1}},_createHelper:function(e){var i=this.options,s=t.isFunction(i.helper)?t(i.helper.apply(this.element[0],[e,this.currentItem])):"clone"===i.helper?this.currentItem.clone():this.currentItem;return s.parents("body").length||t("parent"!==i.appendTo?i.appendTo:this.currentItem[0].parentNode)[0].appendChild(s[0]),s[0]===this.currentItem[0]&&(this._storedCSS={width:this.currentItem[0].style.width,height:this.currentItem[0].style.height,position:this.currentItem.css("position"),top:this.currentItem.css("top"),left:this.currentItem.css("left")}),(!s[0].style.width||i.forceHelperSize)&&s.width(this.currentItem.width()),(!s[0].style.height||i.forceHelperSize)&&s.height(this.currentItem.height()),s},_adjustOffsetFromHelper:function(e){"string"==typeof e&&(e=e.split(" ")),t.isArray(e)&&(e={left:+e[0],top:+e[1]||0}),"left"in e&&(this.offset.click.left=e.left+this.margins.left),"right"in e&&(this.offset.click.left=this.helperProportions.width-e.right+this.margins.left),"top"in e&&(this.offset.click.top=e.top+this.margins.top),"bottom"in e&&(this.offset.click.top=this.helperProportions.height-e.bottom+this.margins.top)},_getParentOffset:function(){this.offsetParent=this.helper.offsetParent();var e=this.offsetParent.offset();return"absolute"===this.cssPosition&&this.scrollParent[0]!==document&&t.contains(this.scrollParent[0],this.offsetParent[0])&&(e.left+=this.scrollParent.scrollLeft(),e.top+=this.scrollParent.scrollTop()),(this.offsetParent[0]===document.body||this.offsetParent[0].tagName&&"html"===this.offsetParent[0].tagName.toLowerCase()&&t.ui.ie)&&(e={top:0,left:0}),{top:e.top+(parseInt(this.offsetParent.css("borderTopWidth"),10)||0),left:e.left+(parseInt(this.offsetParent.css("borderLeftWidth"),10)||0)}},_getRelativeOffset:function(){if("relative"===this.cssPosition){var t=this.currentItem.position();return{top:t.top-(parseInt(this.helper.css("top"),10)||0)+this.scrollParent.scrollTop(),left:t.left-(parseInt(this.helper.css("left"),10)||0)+this.scrollParent.scrollLeft()}}return{top:0,left:0}},_cacheMargins:function(){this.margins={left:parseInt(this.currentItem.css("marginLeft"),10)||0,top:parseInt(this.currentItem.css("marginTop"),10)||0}},_cacheHelperProportions:function(){this.helperProportions={width:this.helper.outerWidth(),height:this.helper.outerHeight()}},_setContainment:function(){var e,i,s,n=this.options;"parent"===n.containment&&(n.containment=this.helper[0].parentNode),("document"===n.containment||"window"===n.containment)&&(this.containment=[0-this.offset.relative.left-this.offset.parent.left,0-this.offset.relative.top-this.offset.parent.top,t("document"===n.containment?document:window).width()-this.helperProportions.width-this.margins.left,(t("document"===n.containment?document:window).height()||document.body.parentNode.scrollHeight)-this.helperProportions.height-this.margins.top]),/^(document|window|parent)$/.test(n.containment)||(e=t(n.containment)[0],i=t(n.containment).offset(),s="hidden"!==t(e).css("overflow"),this.containment=[i.left+(parseInt(t(e).css("borderLeftWidth"),10)||0)+(parseInt(t(e).css("paddingLeft"),10)||0)-this.margins.left,i.top+(parseInt(t(e).css("borderTopWidth"),10)||0)+(parseInt(t(e).css("paddingTop"),10)||0)-this.margins.top,i.left+(s?Math.max(e.scrollWidth,e.offsetWidth):e.offsetWidth)-(parseInt(t(e).css("borderLeftWidth"),10)||0)-(parseInt(t(e).css("paddingRight"),10)||0)-this.helperProportions.width-this.margins.left,i.top+(s?Math.max(e.scrollHeight,e.offsetHeight):e.offsetHeight)-(parseInt(t(e).css("borderTopWidth"),10)||0)-(parseInt(t(e).css("paddingBottom"),10)||0)-this.helperProportions.height-this.margins.top])},_convertPositionTo:function(e,i){i||(i=this.position);var s="absolute"===e?1:-1,n="absolute"!==this.cssPosition||this.scrollParent[0]!==document&&t.contains(this.scrollParent[0],this.offsetParent[0])?this.scrollParent:this.offsetParent,a=/(html|body)/i.test(n[0].tagName);return{top:i.top+this.offset.relative.top*s+this.offset.parent.top*s-("fixed"===this.cssPosition?-this.scrollParent.scrollTop():a?0:n.scrollTop())*s,left:i.left+this.offset.relative.left*s+this.offset.parent.left*s-("fixed"===this.cssPosition?-this.scrollParent.scrollLeft():a?0:n.scrollLeft())*s}},_generatePosition:function(e){var i,s,n=this.options,a=e.pageX,o=e.pageY,r="absolute"!==this.cssPosition||this.scrollParent[0]!==document&&t.contains(this.scrollParent[0],this.offsetParent[0])?this.scrollParent:this.offsetParent,h=/(html|body)/i.test(r[0].tagName);return"relative"!==this.cssPosition||this.scrollParent[0]!==document&&this.scrollParent[0]!==this.offsetParent[0]||(this.offset.relative=this._getRelativeOffset()),this.originalPosition&&(this.containment&&(e.pageX-this.offset.click.left<this.containment[0]&&(a=this.containment[0]+this.offset.click.left),e.pageY-this.offset.click.top<this.containment[1]&&(o=this.containment[1]+this.offset.click.top),e.pageX-this.offset.click.left>this.containment[2]&&(a=this.containment[2]+this.offset.click.left),e.pageY-this.offset.click.top>this.containment[3]&&(o=this.containment[3]+this.offset.click.top)),n.grid&&(i=this.originalPageY+Math.round((o-this.originalPageY)/n.grid[1])*n.grid[1],o=this.containment?i-this.offset.click.top>=this.containment[1]&&i-this.offset.click.top<=this.containment[3]?i:i-this.offset.click.top>=this.containment[1]?i-n.grid[1]:i+n.grid[1]:i,s=this.originalPageX+Math.round((a-this.originalPageX)/n.grid[0])*n.grid[0],a=this.containment?s-this.offset.click.left>=this.containment[0]&&s-this.offset.click.left<=this.containment[2]?s:s-this.offset.click.left>=this.containment[0]?s-n.grid[0]:s+n.grid[0]:s)),{top:o-this.offset.click.top-this.offset.relative.top-this.offset.parent.top+("fixed"===this.cssPosition?-this.scrollParent.scrollTop():h?0:r.scrollTop()),left:a-this.offset.click.left-this.offset.relative.left-this.offset.parent.left+("fixed"===this.cssPosition?-this.scrollParent.scrollLeft():h?0:r.scrollLeft())}},_rearrange:function(t,e,i,s){i?i[0].appendChild(this.placeholder[0]):e.item[0].parentNode.insertBefore(this.placeholder[0],"down"===this.direction?e.item[0]:e.item[0].nextSibling),this.counter=this.counter?++this.counter:1;var n=this.counter;this._delay(function(){n===this.counter&&this.refreshPositions(!s)})},_clear:function(t,e){this.reverting=!1;var i,s=[];if(!this._noFinalSort&&this.currentItem.parent().length&&this.placeholder.before(this.currentItem),this._noFinalSort=null,this.helper[0]===this.currentItem[0]){for(i in this._storedCSS)("auto"===this._storedCSS[i]||"static"===this._storedCSS[i])&&(this._storedCSS[i]="");this.currentItem.css(this._storedCSS).removeClass("ui-sortable-helper")}else this.currentItem.show();for(this.fromOutside&&!e&&s.push(function(t){this._trigger("receive",t,this._uiHash(this.fromOutside))}),!this.fromOutside&&this.domPosition.prev===this.currentItem.prev().not(".ui-sortable-helper")[0]&&this.domPosition.parent===this.currentItem.parent()[0]||e||s.push(function(t){this._trigger("update",t,this._uiHash())}),this!==this.currentContainer&&(e||(s.push(function(t){this._trigger("remove",t,this._uiHash())}),s.push(function(t){return function(e){t._trigger("receive",e,this._uiHash(this))}}.call(this,this.currentContainer)),s.push(function(t){return function(e){t._trigger("update",e,this._uiHash(this))}}.call(this,this.currentContainer)))),i=this.containers.length-1;i>=0;i--)e||s.push(function(t){return function(e){t._trigger("deactivate",e,this._uiHash(this))}}.call(this,this.containers[i])),this.containers[i].containerCache.over&&(s.push(function(t){return function(e){t._trigger("out",e,this._uiHash(this))}}.call(this,this.containers[i])),this.containers[i].containerCache.over=0);if(this.storedCursor&&(this.document.find("body").css("cursor",this.storedCursor),this.storedStylesheet.remove()),this._storedOpacity&&this.helper.css("opacity",this._storedOpacity),this._storedZIndex&&this.helper.css("zIndex","auto"===this._storedZIndex?"":this._storedZIndex),this.dragging=!1,this.cancelHelperRemoval){if(!e){for(this._trigger("beforeStop",t,this._uiHash()),i=0;s.length>i;i++)s[i].call(this,t);this._trigger("stop",t,this._uiHash())}return this.fromOutside=!1,!1}if(e||this._trigger("beforeStop",t,this._uiHash()),this.placeholder[0].parentNode.removeChild(this.placeholder[0]),this.helper[0]!==this.currentItem[0]&&this.helper.remove(),this.helper=null,!e){for(i=0;s.length>i;i++)s[i].call(this,t);this._trigger("stop",t,this._uiHash())}return this.fromOutside=!1,!0},_trigger:function(){t.Widget.prototype._trigger.apply(this,arguments)===!1&&this.cancel()},_uiHash:function(e){var i=e||this;return{helper:i.helper,placeholder:i.placeholder||t([]),position:i.position,originalPosition:i.originalPosition,offset:i.positionAbs,item:i.currentItem,sender:e?e.element:null}}})})(jQuery);
|
PypiClean
|
/sematic-0.0.2.alpha.1654672757-py3-none-any.whl/sqlalchemy/dialects/firebird/kinterbasdb.py
|
# noqa
import decimal
from re import match
from .base import FBDialect
from .base import FBExecutionContext
from ... import types as sqltypes
from ... import util
class _kinterbasdb_numeric(object):
def bind_processor(self, dialect):
def process(value):
if isinstance(value, decimal.Decimal):
return str(value)
else:
return value
return process
class _FBNumeric_kinterbasdb(_kinterbasdb_numeric, sqltypes.Numeric):
pass
class _FBFloat_kinterbasdb(_kinterbasdb_numeric, sqltypes.Float):
pass
class FBExecutionContext_kinterbasdb(FBExecutionContext):
@property
def rowcount(self):
if self.execution_options.get(
"enable_rowcount", self.dialect.enable_rowcount
):
return self.cursor.rowcount
else:
return -1
class FBDialect_kinterbasdb(FBDialect):
driver = "kinterbasdb"
supports_statement_cache = True
supports_sane_rowcount = False
supports_sane_multi_rowcount = False
execution_ctx_cls = FBExecutionContext_kinterbasdb
supports_native_decimal = True
colspecs = util.update_copy(
FBDialect.colspecs,
{
sqltypes.Numeric: _FBNumeric_kinterbasdb,
sqltypes.Float: _FBFloat_kinterbasdb,
},
)
def __init__(
self,
type_conv=200,
concurrency_level=1,
enable_rowcount=True,
retaining=False,
**kwargs
):
super(FBDialect_kinterbasdb, self).__init__(**kwargs)
self.enable_rowcount = enable_rowcount
self.type_conv = type_conv
self.concurrency_level = concurrency_level
self.retaining = retaining
if enable_rowcount:
self.supports_sane_rowcount = True
@classmethod
def dbapi(cls):
return __import__("kinterbasdb")
def do_execute(self, cursor, statement, parameters, context=None):
# kinterbase does not accept a None, but wants an empty list
# when there are no arguments.
cursor.execute(statement, parameters or [])
def do_rollback(self, dbapi_connection):
dbapi_connection.rollback(self.retaining)
def do_commit(self, dbapi_connection):
dbapi_connection.commit(self.retaining)
def create_connect_args(self, url):
opts = url.translate_connect_args(username="user")
if opts.get("port"):
opts["host"] = "%s/%s" % (opts["host"], opts["port"])
del opts["port"]
opts.update(url.query)
util.coerce_kw_type(opts, "type_conv", int)
type_conv = opts.pop("type_conv", self.type_conv)
concurrency_level = opts.pop(
"concurrency_level", self.concurrency_level
)
if self.dbapi is not None:
initialized = getattr(self.dbapi, "initialized", None)
if initialized is None:
# CVS rev 1.96 changed the name of the attribute:
# https://kinterbasdb.cvs.sourceforge.net/viewvc/kinterbasdb/
# Kinterbasdb-3.0/__init__.py?r1=1.95&r2=1.96
initialized = getattr(self.dbapi, "_initialized", False)
if not initialized:
self.dbapi.init(
type_conv=type_conv, concurrency_level=concurrency_level
)
return ([], opts)
def _get_server_version_info(self, connection):
"""Get the version of the Firebird server used by a connection.
Returns a tuple of (`major`, `minor`, `build`), three integers
representing the version of the attached server.
"""
# This is the simpler approach (the other uses the services api),
# that for backward compatibility reasons returns a string like
# LI-V6.3.3.12981 Firebird 2.0
# where the first version is a fake one resembling the old
# Interbase signature.
fbconn = connection.connection
version = fbconn.server_version
return self._parse_version_info(version)
def _parse_version_info(self, version):
m = match(
r"\w+-V(\d+)\.(\d+)\.(\d+)\.(\d+)( \w+ (\d+)\.(\d+))?", version
)
if not m:
raise AssertionError(
"Could not determine version from string '%s'" % version
)
if m.group(5) != None:
return tuple([int(x) for x in m.group(6, 7, 4)] + ["firebird"])
else:
return tuple([int(x) for x in m.group(1, 2, 3)] + ["interbase"])
def is_disconnect(self, e, connection, cursor):
if isinstance(
e, (self.dbapi.OperationalError, self.dbapi.ProgrammingError)
):
msg = str(e)
return (
"Error writing data to the connection" in msg
or "Unable to complete network request to host" in msg
or "Invalid connection state" in msg
or "Invalid cursor state" in msg
or "connection shutdown" in msg
)
else:
return False
dialect = FBDialect_kinterbasdb
|
PypiClean
|
/django_oidc_provider2-0.9.1-py3-none-any.whl/oidc_provider/migrations/0001_initial.py
|
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('auth', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Client',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(default=b'', max_length=100)),
('client_id', models.CharField(unique=True, max_length=255)),
('client_secret', models.CharField(unique=True, max_length=255)),
('response_type', models.CharField(max_length=30, choices=[
(b'code', b'code (Authorization Code Flow)'), (b'id_token', b'id_token (Implicit Flow)'),
(b'id_token token', b'id_token token (Implicit Flow)')])),
('_redirect_uris', models.TextField(default=b'')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Code',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('expires_at', models.DateTimeField()),
('_scope', models.TextField(default=b'')),
('code', models.CharField(unique=True, max_length=255)),
('client', models.ForeignKey(to='oidc_provider.Client', on_delete=models.CASCADE)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Token',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('expires_at', models.DateTimeField()),
('_scope', models.TextField(default=b'')),
('access_token', models.CharField(unique=True, max_length=255)),
('_id_token', models.TextField()),
('client', models.ForeignKey(to='oidc_provider.Client', on_delete=models.CASCADE)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='UserInfo',
fields=[
('user', models.OneToOneField(primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)),
('given_name', models.CharField(max_length=255, null=True, blank=True)),
('family_name', models.CharField(max_length=255, null=True, blank=True)),
('middle_name', models.CharField(max_length=255, null=True, blank=True)),
('nickname', models.CharField(max_length=255, null=True, blank=True)),
('gender', models.CharField(max_length=100, null=True, choices=[(b'F', b'Female'), (b'M', b'Male')])),
('birthdate', models.DateField(null=True)),
('zoneinfo', models.CharField(default=b'', max_length=100, null=True, blank=True)),
('preferred_username', models.CharField(max_length=255, null=True, blank=True)),
('profile', models.URLField(default=b'', null=True, blank=True)),
('picture', models.URLField(default=b'', null=True, blank=True)),
('website', models.URLField(default=b'', null=True, blank=True)),
('email_verified', models.NullBooleanField(default=False)),
('locale', models.CharField(max_length=100, null=True, blank=True)),
('phone_number', models.CharField(max_length=255, null=True, blank=True)),
('phone_number_verified', models.NullBooleanField(default=False)),
('address_street_address', models.CharField(max_length=255, null=True, blank=True)),
('address_locality', models.CharField(max_length=255, null=True, blank=True)),
('address_region', models.CharField(max_length=255, null=True, blank=True)),
('address_postal_code', models.CharField(max_length=255, null=True, blank=True)),
('address_country', models.CharField(max_length=255, null=True, blank=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='token',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE),
preserve_default=True,
),
migrations.AddField(
model_name='code',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE),
preserve_default=True,
),
]
|
PypiClean
|
/wxPython-zombie-3.1.5.6.tar.gz/wxPython-zombie-3.1.5.6/wx/demo/ActiveX_IEHtmlWindow.py
|
# 11/18/2003 - Jeff Grimmett ([email protected])
#
# o Updated for wx namespace
import wx
if wx.Platform == '__WXMSW__':
import wx.lib.iewin as iewin
#----------------------------------------------------------------------
class TestPanel(wx.Panel):
def __init__(self, parent, log, frame=None):
wx.Panel.__init__(
self, parent, -1,
style=wx.TAB_TRAVERSAL|wx.CLIP_CHILDREN|wx.NO_FULL_REPAINT_ON_RESIZE
)
self.log = log
self.current = "http://wxPython.org/"
self.frame = frame
if frame:
self.titleBase = frame.GetTitle()
sizer = wx.BoxSizer(wx.VERTICAL)
btnSizer = wx.BoxSizer(wx.HORIZONTAL)
self.ie = iewin.IEHtmlWindow(self)
btn = wx.Button(self, -1, "Open", style=wx.BU_EXACTFIT)
self.Bind(wx.EVT_BUTTON, self.OnOpenButton, btn)
btnSizer.Add(btn, 0, wx.EXPAND|wx.ALL, 2)
btn = wx.Button(self, -1, "Home", style=wx.BU_EXACTFIT)
self.Bind(wx.EVT_BUTTON, self.OnHomeButton, btn)
btnSizer.Add(btn, 0, wx.EXPAND|wx.ALL, 2)
btn = wx.Button(self, -1, "<--", style=wx.BU_EXACTFIT)
self.Bind(wx.EVT_BUTTON, self.OnPrevPageButton, btn)
btnSizer.Add(btn, 0, wx.EXPAND|wx.ALL, 2)
self.Bind(wx.EVT_UPDATE_UI, self.OnCheckCanGoBack, btn)
btn = wx.Button(self, -1, "-->", style=wx.BU_EXACTFIT)
self.Bind(wx.EVT_BUTTON, self.OnNextPageButton, btn)
btnSizer.Add(btn, 0, wx.EXPAND|wx.ALL, 2)
self.Bind(wx.EVT_UPDATE_UI, self.OnCheckCanGoForward, btn)
btn = wx.Button(self, -1, "Stop", style=wx.BU_EXACTFIT)
self.Bind(wx.EVT_BUTTON, self.OnStopButton, btn)
btnSizer.Add(btn, 0, wx.EXPAND|wx.ALL, 2)
btn = wx.Button(self, -1, "Search", style=wx.BU_EXACTFIT)
self.Bind(wx.EVT_BUTTON, self.OnSearchPageButton, btn)
btnSizer.Add(btn, 0, wx.EXPAND|wx.ALL, 2)
btn = wx.Button(self, -1, "Refresh", style=wx.BU_EXACTFIT)
self.Bind(wx.EVT_BUTTON, self.OnRefreshPageButton, btn)
btnSizer.Add(btn, 0, wx.EXPAND|wx.ALL, 2)
txt = wx.StaticText(self, -1, "Location:")
btnSizer.Add(txt, 0, wx.CENTER|wx.ALL, 2)
self.location = wx.ComboBox(
self, -1, "", style=wx.CB_DROPDOWN|wx.TE_PROCESS_ENTER
)
self.Bind(wx.EVT_COMBOBOX, self.OnLocationSelect, self.location)
self.location.Bind(wx.EVT_KEY_UP, self.OnLocationKey)
self.location.Bind(wx.EVT_CHAR, self.IgnoreReturn)
btnSizer.Add(self.location, 1, wx.EXPAND|wx.ALL, 2)
sizer.Add(btnSizer, 0, wx.EXPAND)
sizer.Add(self.ie, 1, wx.EXPAND)
self.ie.LoadUrl(self.current)
self.location.Append(self.current)
self.SetSizer(sizer)
# Since this is a wx.Window we have to call Layout ourselves
self.Bind(wx.EVT_SIZE, self.OnSize)
## Hook up the event handlers for the IE window. Using
## AddEventSink is how we tell the COM system to look in this
## object for method names matching the COM Event names. They
## are automatically looked for in the ActiveXCtrl class, (so
## deriving a new class from IEHtmlWindow would also have been
## a good appraoch) and now they will be looked for here too.
self.ie.AddEventSink(self)
def ShutdownDemo(self):
# put the frame title back
if self.frame:
self.frame.SetTitle(self.titleBase)
def OnSize(self, evt):
self.Layout()
def OnLocationSelect(self, evt):
url = self.location.GetStringSelection()
self.log.write('OnLocationSelect: %s\n' % url)
self.ie.Navigate(url)
def OnLocationKey(self, evt):
if evt.GetKeyCode() == wx.WXK_RETURN:
URL = self.location.GetValue()
self.location.Append(URL)
self.ie.Navigate(URL)
else:
evt.Skip()
def IgnoreReturn(self, evt):
if evt.GetKeyCode() != wx.WXK_RETURN:
evt.Skip()
def OnOpenButton(self, event):
dlg = wx.TextEntryDialog(self, "Open Location",
"Enter a full URL or local path",
self.current, wx.OK|wx.CANCEL)
dlg.CentreOnParent()
if dlg.ShowModal() == wx.ID_OK:
self.current = dlg.GetValue()
self.ie.Navigate(self.current)
dlg.Destroy()
def OnHomeButton(self, event):
self.ie.GoHome() ## ET Phone Home!
def OnPrevPageButton(self, event):
self.ie.GoBack()
def OnNextPageButton(self, event):
self.ie.GoForward()
def OnCheckCanGoBack(self, event):
event.Enable(self.ie.CanGoBack())
def OnCheckCanGoForward(self, event):
event.Enable(self.ie.CanGoForward())
def OnStopButton(self, evt):
self.ie.Stop()
def OnSearchPageButton(self, evt):
self.ie.GoSearch()
def OnRefreshPageButton(self, evt):
self.ie.Refresh(iewin.REFRESH_COMPLETELY)
# Here are some of the event methods for the IE COM events. See
# the MSDN docs for DWebBrowserEvents2 for details on what events
# are available, and what the parameters are.
def BeforeNavigate2(self, this, pDisp, URL, Flags, TargetFrameName,
PostData, Headers, Cancel):
self.log.write('BeforeNavigate2: %s\n' % URL[0])
if URL[0] == 'http://www.microsoft.com/':
if wx.MessageBox("Are you sure you want to visit Microsoft?",
style=wx.YES_NO|wx.ICON_QUESTION) == wx.NO:
# This is how you can cancel loading a page. The
# Cancel parameter is defined as an [in,out] type and
# so setting the value means it will be returned and
# checked in the COM control.
Cancel[0] = True
def NewWindow3(self, this, pDisp, Cancel, Flags, urlContext, URL):
self.log.write('NewWindow3: %s\n' % URL)
Cancel[0] = True # Veto the creation of a new window.
#def ProgressChange(self, this, progress, progressMax):
# self.log.write('ProgressChange: %d of %d\n' % (progress, progressMax))
def DocumentComplete(self, this, pDisp, URL):
self.current = URL[0]
self.location.SetValue(self.current)
def TitleChange(self, this, Text):
if self.frame:
self.frame.SetTitle(self.titleBase + ' -- ' + Text)
def StatusTextChange(self, this, Text):
if self.frame:
self.frame.SetStatusText(Text)
#----------------------------------------------------------------------
# for the demo framework...
def runTest(frame, nb, log):
if wx.Platform == '__WXMSW__':
win = TestPanel(nb, log, frame)
return win
else:
from wx.lib.msgpanel import MessagePanel
win = MessagePanel(nb, 'This demo only works on Microsoft Windows.',
'Sorry', wx.ICON_WARNING)
return win
overview = """\
<html><body>
<h2>wx.lib.iewin.IEHtmlWindow</h2>
The wx.lib.iewin.IEHtmlWindow class is one example of using ActiveX
controls from wxPython using the new wx.activex module. This allows
you to use an ActiveX control as if it is a wx.Window, you can call
its methods, set/get properties, and receive events from the ActiveX
control in a very intuitive way.
<p> Using this class is simpler than ActiveXWrapper, doesn't rely on
the win32all extensions, and is more "wx\'ish", meaning that it uses
events and etc. as would be expected from any other wx window.
</body></html>
"""
if __name__ == '__main__':
import sys,os
import run
run.main(['', os.path.basename(sys.argv[0])] + sys.argv[1:])
#----------------------------------------------------------------------
|
PypiClean
|
/connected_vehicle_client-2.0.1.tar.gz/connected_vehicle_client-2.0.1/src/models/r_instance_with_unit.py
|
import datetime
from typing import Any, Dict, List, Type, TypeVar, Union
import attr
from dateutil.parser import isoparse
from ..types import UNSET, Unset
T = TypeVar("T", bound="RInstanceWithUnit")
@attr.s(auto_attribs=True)
class RInstanceWithUnit:
"""
Attributes:
value (Union[Unset, str]):
timestamp (Union[Unset, datetime.datetime]):
unit (Union[Unset, str]):
"""
value: Union[Unset, str] = UNSET
timestamp: Union[Unset, datetime.datetime] = UNSET
unit: Union[Unset, str] = UNSET
additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)
def to_dict(self) -> Dict[str, Any]:
value = self.value
timestamp: Union[Unset, str] = UNSET
if not isinstance(self.timestamp, Unset):
timestamp = self.timestamp.isoformat()
unit = self.unit
field_dict: Dict[str, Any] = {}
field_dict.update(self.additional_properties)
field_dict.update({})
if value is not UNSET:
field_dict["value"] = value
if timestamp is not UNSET:
field_dict["timestamp"] = timestamp
if unit is not UNSET:
field_dict["unit"] = unit
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
d = src_dict.copy()
value = d.pop("value", UNSET)
_timestamp = d.pop("timestamp", UNSET)
timestamp: Union[Unset, datetime.datetime]
if isinstance(_timestamp, Unset):
timestamp = UNSET
else:
timestamp = isoparse(_timestamp)
unit = d.pop("unit", UNSET)
r_instance_with_unit = cls(
value=value,
timestamp=timestamp,
unit=unit,
)
r_instance_with_unit.additional_properties = d
return r_instance_with_unit
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> Any:
return self.additional_properties[key]
def __setitem__(self, key: str, value: Any) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties
|
PypiClean
|
/Lagring-0.2.7.1.tar.gz/Lagring-0.2.7.1/lagring/core.py
|
import os
import io
import shutil
import uuid
from .classproperty import classproperty
from .exception import StorageException
from .logger import log
class _AssetSource:
def __init__(self, src, extension=None):
self.src = src
self.extension = self._normalise_extension(extension)
@staticmethod
def _normalise_extension(extension):
if extension:
return extension if extension[:1] == '.' else '.' + extension
else:
return ''
def __bool__(self):
return self.type is not None
@property
def type(self):
if self.src is None:
return None
elif isinstance(self.src, str):
if os.path.isfile(self.src):
return 'file'
elif os.path.isdir(self.src):
return 'directory'
else:
raise StorageException('Path not found')
elif isinstance(self.src, io.IOBase):
return 'stream'
else:
raise StorageException('Unknown source type')
@property
def stream(self):
if self.src is None:
return None
elif isinstance(self.src, str):
if os.path.isfile(self.src):
return open(self.src, 'rb')
else:
raise StorageException('File not found')
elif isinstance(self.src, io.IOBase):
return self.src
else:
raise StorageException('Unknown source type')
@property
def path(self):
if isinstance(self.src, str):
if os.path.isfile(self.src) or os.path.isdir(self.src):
return self.src
else:
return None
class LagringCore(object):
'''
Storage base functionality
'''
def __init__(self, root, url_base='', write_allowed=True):
'''
root - file storage base directory
url_base - URL base
write_allowed - set this to False for read-only access
'''
self.url_base = url_base
self.root = root
self.write_allowed = write_allowed
if not os.path.exists(self.root):
os.makedirs(self.root)
log.info('Storage root has been created in "{}"'.format(self.root))
@classproperty
def asset_source_adapter(cls):
return _AssetSource
def _put(self, src, bucket, filename):
'''
Save asset file
src - source
bucket - storage directory
filename - filename without directory
'''
bucket_path = os.path.join(self.root, bucket)
if not os.path.exists(bucket_path):
os.makedirs(bucket_path)
dest_path = os.path.join(bucket_path, filename)
type = src.type
if type == 'file':
shutil.copy(src.path, dest_path)
elif type == 'directory':
shutil.copytree(src.path, dest_path)
elif type == 'stream':
with open(dest_path, 'wb') as dest_obj:
shutil.copyfileobj(src.stream, dest_obj)
asset_path = os.path.join(bucket, filename)
log.debug('Added "{}"'.format(asset_path))
return asset_path
def _trash(self, asset_path):
'''
Delete file
'''
abs_path = self.abs_path(asset_path)
if os.path.isfile(abs_path):
os.remove(abs_path)
elif os.path.isdir(abs_path):
shutil.rmtree(abs_path)
else:
log.warning('Non-existent path to delete "{}"'.format(asset_path))
return
log.debug('Deleted "{}"'.format(asset_path))
def _bucket(self, entity_type, entity_id, asset_name):
'''
Storage bucket path
'''
hex = '{0:0{1}x}'.format(entity_id, 8)
return '{}/{}/{}/{}/{}'.format(entity_type, asset_name, hex[0:2], hex[2:4], hex[4:6])
def add(self, entity_type, entity_id, asset_name, src):
'''
Add file or directory to the storage
New filename contains random part so the old version must be explicitly deleted
Returns path to the asset
'''
if not self.write_allowed:
raise StorageException("Storage is in read-only mode")
# генерим путь к ассету
bucket = self._bucket(entity_type, entity_id, asset_name)
filename = '{}-{}{}'.format(
entity_id,
str(uuid.uuid4())[:6], # some random
src.extension)
asset_path = self._put(src, bucket, filename)
log.debug(
'Asset added: entity=({}, {}), asset_name={}'\
.format(entity_type, entity_id, asset_name))
return asset_path
def delete(self, path):
'''
Delete asset
'''
if not self.write_allowed:
raise StorageException("Storage is in read-only mode")
self._trash(path)
def url(self, path):
'''
Get asset URL
'''
return self.url_base + \
('' if self.url_base.endswith('/') else '/') + \
path
def abs_path(self, path):
'''
Get asset absolute path
'''
return os.path.join(self.root, path)
|
PypiClean
|
/lib_bgp_data-0.3.0-py3-none-any.whl/lib_bgp_data/api/__init__.py
|
from flask import Flask
from werkzeug.contrib.fixers import ProxyFix
from werkzeug.routing import BaseConverter
from flasgger import Swagger
from ..utils import Thread_Safe_Logger as Logger
from ..database import Database
__author__ = "Justin Furuness"
__credits__ = ["Justin Furuness", "Reynaldo Morillo"]
__Lisence__ = "BSD"
__maintainer__ = "Justin Furuness"
__email__ = "[email protected]"
__status__ = "Development"
# http://exploreflask.com/en/latest/views.html#custom-converters
class ListConverter(BaseConverter):
"""Converts a comma separated url into a list."""
def to_python(self, value):
return value.split(',')
def to_url(self, values):
return ','.join([BaseConverter.to_url(value)
for value in values])
def create_app(args={}):
"""Creates the application and runs it."""
application = Flask(__name__)
# For flasgger docs
swagger = Swagger(application)
# Fixes the proxy problems we've been having
application.wsgi_app = ProxyFix(application.wsgi_app)
# For the list converter
application.url_map.converters['list'] = ListConverter
# Creates the database
application.db = Database(Logger(args))
# Imports all the blueprints that we have
# From the flask tutorial I watched they did it all here,
# so I assume that is correct
from .averages import averages_app
from .extrapolator_engine_results import extrapolator_engine_results_app\
as exr_app
from .hijacks import hijacks_app
from .policies import policies_app
from .relationships import relationships_app
from .roas import roas_app
from .rpki_validity_results import RPKI_app
for sub_app in [averages_app, exr_app, hijacks_app, policies_app,
relationships_app, roas_app, RPKI_app]:
# Sets the database
sub_app.db = application.db
# Registers the blueprint
application.register_blueprint(sub_app)
# Runs the application. Do NOT use in prod!!!
application.run(host='0.0.0.0', debug=True)
|
PypiClean
|
/Sympathy-4.0.1-py3-none-any.whl/sympathy/app/windows/library_view.py
|
import json
import itertools
import os
import PySide6.QtCore as QtCore
import PySide6.QtGui as QtGui
import PySide6.QtWidgets as QtWidgets
from .. import util
from .. import appcore
from .. import settings
from .. import tree_view
from sympathy.utils.prim import uri_to_path
from sympathy.app import flow
from sympathy.app.datatypes import DataType
class LibraryGroupItem(tree_view.TreeItem):
"""A LibraryItem is a folder item in the library model."""
def __init__(self, name, parent, style, tool_tip=''):
super().__init__()
self._name = name
self._parent = parent
self._style = style
self._icon = QtGui.QIcon()
self._icon.addPixmap(
style.standardPixmap(QtWidgets.QStyle.SP_DirClosedIcon),
QtGui.QIcon.Normal, QtGui.QIcon.Off)
self._icon.addPixmap(
style.standardPixmap(QtWidgets.QStyle.SP_DirOpenIcon),
QtGui.QIcon.Normal, QtGui.QIcon.On)
self._tool_tip = tool_tip or name
def icon(self):
return self._icon
def identifier(self):
return self._name
def tool_tip(self):
return self._tool_tip
class FlatLibraryGroupItem(LibraryGroupItem):
"""A FlatLibraryGroupItem is a header item for a sequence of tags."""
def __init__(self, name, parent, style):
super().__init__(name, parent, style)
self._icon = QtGui.QIcon()
def highlighted(self):
"""Highlighted text"""
return '<font color="#999999">{}</font>'.format(self._highlighted_text)
class LibraryNodeItem(tree_view.LeafItem):
"""A LibraryNodeItem is a leaf item in the library model representing a
Node.
"""
icon_cache = {}
def __init__(self, node, parent):
super().__init__()
def escape(s):
return (s.replace('&', '&')
.replace('<', '<')
.replace('>', '>'))
self._parent = parent
self._node = node
self._name = node.name
self._tool_tip = '<b>{}</b><p>{}</p>{} -> {}'.format(
self._node.name, self._node.description,
', '.join([escape(str(p.datatype)) for p in self._node.inputs]),
', '.join([escape(str(p.datatype)) for p in self._node.outputs]))
def is_leaf(self):
return True
def icon(self):
if self._node.has_svg_icon:
icon_path = uri_to_path(self._node.icon)
if icon_path in self.icon_cache:
return self.icon_cache[icon_path]
else:
result = QtGui.QIcon(icon_path)
self.icon_cache[icon_path] = result
return result
else:
return QtGui.QIcon(util.icon_path('sub_application.svg'))
def identifier(self):
return self._node.node_identifier
def tool_tip(self):
return self._tool_tip
def node(self):
return self._node
class LibraryModel(tree_view.TreeModel):
"""
The library model. Responsible for building and updating the (viewed)
library.
"""
tags = ['Unknown']
def __init__(self, library_root, style, exclude_builtins=False,
parent=None):
self._library_root = library_root
self._old_root = None
self._style = style
self._exclude_builtins = exclude_builtins
super().__init__(LibraryGroupItem('Root', None, self._style),
parent=parent)
def _build_model(self):
"""Build the tree model using path for hierarchy."""
def hidden_node(node):
if not settings.instance()['Gui/library_show_hidden']:
tags = node.tags if node.tags else self.tags
for tag in tags:
if tag.startswith('Hidden.'):
return True
return False
libraries = self._library_root.libraries
# Attempt to avoid crash in gc.
self._old_root = self._get_root()
root = LibraryGroupItem('Root', None, self._style)
self._set_root(root)
try:
self._old_root.deleteLater()
except AttributeError:
pass
for library in self._library_root.libraries:
name = library.name
child = LibraryGroupItem(name, root, self._style)
libs = {}
paths = set()
all_nodes = set()
all_nodes.update({n for n in library.nodes})
paths.update({tuple(n.path) for n in library.nodes})
# Add possibly missing levels
subpaths = set()
for path in paths:
subpaths.update({path[:i + 1] for i, _ in enumerate(path)})
paths.update(subpaths)
max_depth = max([len(n) for n in paths]) if paths else 0
root.add_child(child)
for depth_ in range(max_depth):
depth = depth_ + 1
libraries = [p for p in sorted(paths) if len(p) == depth]
for lib in libraries:
if (self._exclude_builtins and
(lib[0] == 'sympathy' or lib[0] == 'internal')):
continue
else:
if depth == 1:
parent = child
else:
parent = libs[lib[:-1]]
item = LibraryGroupItem(lib[-1], parent, self._style)
parent.add_child(item)
libs[lib] = item
for node in (n for n in all_nodes
if tuple(n.path) == lib):
if not hidden_node(node):
node_item = LibraryNodeItem(node, item)
item.add_child(node_item)
def mimeTypes(self):
return [appcore.AppCore.mime_type_node()]
def mimeData(self, indices):
nodes = []
for index in indices:
nodes.append(self.data(index, tree_view.IdentityRole))
mime_data = QtCore.QMimeData()
mime_data.setData(appcore.AppCore.mime_type_node(),
json.dumps(nodes).encode('ascii'))
return mime_data
class SeparateTagLibraryModel(LibraryModel):
def __init__(self, library_root, style, model_type='Disk', parent=None):
self._model_type = model_type
super().__init__(library_root, style, parent=parent)
def _build_tags(self, parent, tags, path, tag_mapping):
if tags and not tags.term:
for tag in tags:
child = LibraryGroupItem(
tag.name, parent, self._style, tag.desc)
parent.add_child(child)
self._build_tags(
child, tag, '.'.join([path, tag.key]) if path else tag.key,
tag_mapping)
else:
tag_mapping[path] = parent
def _build_node(self, node, tag_mapping):
tags = node.tags
if not tags:
tags = self.tags
# Filter hidden nodes.
if not settings.instance()['Gui/library_show_hidden']:
for tag in tags:
try:
if tag.startswith('Hidden.'):
return
except Exception:
return
for tag in tags:
parent = tag_mapping.get(tag, None)
if parent:
child = LibraryNodeItem(node, parent)
parent.add_child(child)
# Insert based on the first available tag.
return
for tag in self.tags:
parent = tag_mapping[tag]
child = LibraryNodeItem(node, parent)
parent.add_child(child)
def _build_model(self):
"""
Build the tree model using tags separated by libraries for hierarchy.
"""
if self._model_type == 'Disk':
return super()._build_model()
root = LibraryGroupItem('Root', None, self._style)
self._set_root(root)
for library in self._library_root.libraries:
name = library.name
child = LibraryGroupItem(name, root, self._style)
tag_mapping = {
tag: LibraryGroupItem(tag, child, self._style)
for tag in self.tags}
for value in tag_mapping.values():
child.add_child(value)
root.add_child(child)
if self._library_root.tags:
self._build_tags(child, self._library_root.tags.root, None,
tag_mapping)
for node in library.nodes:
self._build_node(node, tag_mapping)
class TagLibraryModel(SeparateTagLibraryModel):
tags = ['Unknown']
def __init__(self, library_root, style, model_type='Disk',
parent=None):
super().__init__(library_root, style,
model_type=model_type,
parent=parent)
def _build_model(self):
"""Build the tree model using path for hierarchy."""
if self._model_type in ['Disk', 'Separated']:
return super()._build_model()
elif self._model_type != 'Tag':
return
# Proceed with 'Tag Layout'.
tag_mapping = {}
libraries = self._library_root.libraries
all_nodes = set()
for lib in libraries:
all_nodes.update({n for n in lib.nodes})
root = LibraryGroupItem('Root', None, self._style)
self._set_root(root)
for tag in self.tags:
if tag not in tag_mapping:
child = LibraryGroupItem(tag, root, self._style)
root.add_child(child)
tag_mapping[tag] = child
if self._library_root.tags:
self._build_tags(root, self._library_root.tags.root, None,
tag_mapping)
for node in all_nodes:
self._build_node(node, tag_mapping)
def set_type(self, model_type):
model_type_prev = self._model_type
self._model_type = model_type
if self._model_type != model_type_prev:
self.update_model()
class FlatTagLibraryModel(TagLibraryModel):
tags = ['Unknown']
def __init__(self, library_root, style, model_type='Disk',
parent=None):
super().__init__(library_root, style,
model_type=model_type,
parent=parent)
def _all_tags(self):
def inner(tags, path, res):
if tags:
if tags.term:
res['.'.join(tag.key for tag in path)] = path
else:
for tag in tags:
inner(tag, path + [tag], res)
res = {}
inner(self._library_root.tags.root, [], res)
return res
def _build_model(self):
"""Build the tree model using path for hierarchy."""
if self._model_type in ['Disk', 'Tag', 'Separated']:
return super()._build_model()
elif self._model_type != 'FlatTag':
return
# Proceed with 'FlatTag Layout'.
tag_mapping = {}
libraries = self._library_root.libraries
all_nodes = set()
all_tags = self._all_tags()
for lib in libraries:
all_nodes.update({n for n in lib.nodes})
root = LibraryGroupItem('Root', None, self._style)
self._set_root(root)
for tag in itertools.chain(all_tags, self.tags):
if tag not in tag_mapping:
tags = all_tags.get(tag)
name = self.tags[0]
if tags:
name = '/'.join(tag.name for tag in tags)
child = FlatLibraryGroupItem(name, root, self._style)
root.add_child(child)
tag_mapping[tag] = child
for node in all_nodes:
self._build_node(node, tag_mapping)
def flags(self, index):
if self._model_type != 'FlatTag':
return super().flags(index)
if not index.isValid():
return 0
item = self._get_item(index)
if item is not None and item.is_leaf():
return (QtCore.Qt.ItemIsEnabled |
QtCore.Qt.ItemIsDragEnabled |
QtCore.Qt.ItemIsSelectable)
else:
return QtCore.Qt.NoItemFlags
class LibraryWidget(tree_view.TreeView):
def __init__(self, parent=None):
super().__init__(parent)
self.setObjectName('Gui::MainWindow::Library::View')
class LibraryFilterProxyModel(tree_view.TreeFilterProxyModelBase):
"""
Proxy model that supplies sorting and filtering for the library model.
"""
def __init__(self, parent=None):
self._input_type = None
self._output_type = None
self._current_libraries = set()
super().__init__(parent)
def _show_item(self, item):
node = item.node()
if node is None:
return False
if not node.installed and (os.path.normcase(os.path.abspath(
os.path.dirname(node.library)))
not in self._current_libraries):
return False
input_match = self._match_port_type(self._input_type, node._inputs)
output_match = self._match_port_type(
self._output_type, node._outputs)
return input_match and output_match
def _match_port_type(self, type_, ports):
if type_ is not None:
return any(port.datatype.match(type_) for port in ports)
return True
def update_port_type(self, datatype, output):
if not isinstance(datatype, DataType):
datatype = None
if output:
self._output_type = datatype
else:
self._input_type = datatype
self.update_filter(self._filter)
return datatype
def set_current_libraries(self, libraries):
prev = self._current_libraries
self._current_libraries = set([os.path.normcase(x) for x in libraries])
if prev != self._current_libraries:
self.invalidateFilter()
self.sort(0, QtCore.Qt.AscendingOrder)
class LibraryView(tree_view.FilterTreeView):
"""Library view combination widget - library view and filter edit."""
def __init__(self, parent=None):
tree_model = LibraryFilterProxyModel()
tree_widget = LibraryWidget()
self._model_type = 'Disk'
super().__init__(tree_model, tree_widget, parent)
self.setObjectName('Gui::MainWindow::Library::ViewWidget')
self._view_style = self._view.styleSheet()
@QtCore.Slot(str)
def set_model_type(self, model_type):
self._model_type = model_type
self._setup_view()
self._model.set_type(model_type)
def set_model(self, model):
self._model_type = model._model_type
super().set_model(model)
def _setup_view(self):
if self._model_type == 'FlatTag':
self._view.setIndentation(0)
self._view.setItemsExpandable(False)
self._view.expandAll()
# this is prevents showing the branch icons in the FlatTag mode
self._view.setStyleSheet(
'QTreeView::branch { border-image: url(none.png); }')
else:
self._view.setStyleSheet(self._view_style)
super()._setup_view()
def update_libraries(self, flow):
self._proxy_model.set_current_libraries(util.library_paths(flow))
@QtCore.Slot(str)
def update_input_filter(self, new_type=None):
self._update_port_filter(new_type, output=False)
@QtCore.Slot(str)
def update_output_filter(self, new_type=None):
self._update_port_filter(new_type, output=True)
def _get_port_datatype(self, new_type):
if isinstance(new_type, DataType):
return new_type
elif new_type:
return DataType.from_str(new_type)
return None
def _update_port_filter(self, new_type, output):
datatype = self._get_port_datatype(new_type)
used_datatype = self._proxy_model.update_port_type(
datatype, output=output)
self._handle_expanding(isinstance(used_datatype, DataType))
def _handle_expanding(self, state):
if state or (self._model and self._model_type == 'FlatTag'):
self._view.expandAll()
else:
self._view.collapseAll()
def _handle_switch_to_list_view(self):
self._view.setFocus()
try:
proxy_index = self._proxy_model.index(0, 0)
if self._model_type == 'FlatTag':
if not proxy_index.parent().isValid():
proxy_index = self._proxy_model.index(0, 0, proxy_index)
self._view.setCurrentIndex(proxy_index)
except Exception:
pass
@QtCore.Slot(flow.Flow)
def current_flow_changed(self, flow):
self._proxy_model.set_current_libraries(util.library_paths(flow))
if self._proxy_model._filter != '':
self._handle_expanding(self._proxy_model._filter != '')
class QuickSearchDialog(QtWidgets.QDialog):
item_accepted = QtCore.Signal(object, object, QtCore.QPointF)
def __init__(
self, library_root, flow_, port, scene_position, title=None,
parent=None):
super().__init__(
parent, QtCore.Qt.Popup | QtCore.Qt.FramelessWindowHint)
self._library_root = library_root
self._port = port
self._title = title
model_type = 'FlatTag'
self._model = FlatTagLibraryModel(
self._library_root, self.style(),
model_type=model_type,
parent=self)
self.scene_position = scene_position
self._view = LibraryView(parent=self)
self._view.current_flow_changed(flow_)
self._view.set_model(self._model)
settings_ = settings.instance()
matcher_type = settings_['Gui/library_matcher_type']
highlighter_type = settings_['Gui/library_highlighter_type']
highlighter_color = settings_['Gui/library_highlighter_color']
self._view.set_highlighter(
(matcher_type, highlighter_type, highlighter_color))
if self._port is not None:
if self._port.type == flow.Type.InputPort:
self._view.update_output_filter(self._port.datatype)
else:
self._view.update_input_filter(self._port.datatype)
layout = QtWidgets.QVBoxLayout()
layout.setContentsMargins(3, 3, 3, 3)
layout.setSpacing(2)
if self._title is not None:
title_label = QtWidgets.QLabel(self._title)
layout.addWidget(title_label)
layout.addWidget(self._view)
self.setLayout(layout)
self._view.item_accepted.connect(self._accept)
def _accept(self, item):
self.item_accepted.emit(item, self._port, self.scene_position)
self.accept()
def focus_filter(self):
self._view.focus_filter()
|
PypiClean
|
/django-dojo-0.0.1.tar.gz/django-dojo-0.0.1/dojo/static/dojo/dijit/form/CheckBox.js
|
define([
"require",
"dojo/_base/declare", // declare
"dojo/dom-attr", // domAttr.set
"dojo/has", // has("dijit-legacy-requires")
"dojo/query", // query
"dojo/ready",
"./ToggleButton",
"./_CheckBoxMixin",
"dojo/text!./templates/CheckBox.html",
"dojo/NodeList-dom" // NodeList.addClass/removeClass
], function(require, declare, domAttr, has, query, ready, ToggleButton, _CheckBoxMixin, template){
// module:
// dijit/form/CheckBox
// Back compat w/1.6, remove for 2.0
if(has("dijit-legacy-requires")){
ready(0, function(){
var requires = ["dijit/form/RadioButton"];
require(requires); // use indirection so modules not rolled into a build
});
}
return declare("dijit.form.CheckBox", [ToggleButton, _CheckBoxMixin], {
// summary:
// Same as an HTML checkbox, but with fancy styling.
//
// description:
// User interacts with real html inputs.
// On onclick (which occurs by mouse click, space-bar, or
// using the arrow keys to switch the selected radio button),
// we update the state of the checkbox/radio.
//
// There are two modes:
//
// 1. High contrast mode
// 2. Normal mode
//
// In case 1, the regular html inputs are shown and used by the user.
// In case 2, the regular html inputs are invisible but still used by
// the user. They are turned quasi-invisible and overlay the background-image.
templateString: template,
baseClass: "dijitCheckBox",
_setValueAttr: function(/*String|Boolean*/ newValue, /*Boolean*/ priorityChange){
// summary:
// Handler for value= attribute to constructor, and also calls to
// set('value', val).
// description:
// During initialization, just saves as attribute to the `<input type=checkbox>`.
//
// After initialization,
// when passed a boolean, controls whether or not the CheckBox is checked.
// If passed a string, changes the value attribute of the CheckBox (the one
// specified as "value" when the CheckBox was constructed
// (ex: `<input data-dojo-type="dijit/CheckBox" value="chicken">`).
//
// `widget.set('value', string)` will check the checkbox and change the value to the
// specified string.
//
// `widget.set('value', boolean)` will change the checked state.
if(typeof newValue == "string"){
this.inherited(arguments);
newValue = true;
}
if(this._created){
this.set('checked', newValue, priorityChange);
}
},
_getValueAttr: function(){
// summary:
// Hook so get('value') works.
// description:
// If the CheckBox is checked, returns the value attribute.
// Otherwise returns false.
return (this.checked ? this.value : false);
},
// Override behavior from Button, since we don't have an iconNode
_setIconClassAttr: null,
postMixInProperties: function(){
this.inherited(arguments);
// Need to set initial checked state as part of template, so that form submit works.
// domAttr.set(node, "checked", bool) doesn't work on IE until node has been attached
// to <body>, see #8666
this.checkedAttrSetting = this.checked ? "checked" : "";
},
_fillContent: function(){
// Override Button::_fillContent() since it doesn't make sense for CheckBox,
// since CheckBox doesn't even have a container
},
_onFocus: function(){
if(this.id){
query("label[for='"+this.id+"']").addClass("dijitFocusedLabel");
}
this.inherited(arguments);
},
_onBlur: function(){
if(this.id){
query("label[for='"+this.id+"']").removeClass("dijitFocusedLabel");
}
this.inherited(arguments);
}
});
});
|
PypiClean
|
/django_sql_dashboard-1.1-py3-none-any.whl/django_sql_dashboard/admin.py
|
from html import escape
from django.contrib import admin
from django.utils.safestring import mark_safe
from .models import Dashboard, DashboardQuery
class DashboardQueryInline(admin.TabularInline):
model = DashboardQuery
extra = 1
def has_change_permission(self, request, obj=None):
if obj is None:
return True
return obj.user_can_edit(request.user)
def get_readonly_fields(self, request, obj=None):
if not request.user.has_perm("django_sql_dashboard.execute_sql"):
return ("sql",)
else:
return tuple()
@admin.register(Dashboard)
class DashboardAdmin(admin.ModelAdmin):
list_display = ("slug", "title", "owned_by", "view_policy", "view_dashboard")
inlines = [
DashboardQueryInline,
]
raw_id_fields = ("owned_by",)
fieldsets = (
(
None,
{"fields": ("slug", "title", "description", "owned_by", "created_at")},
),
(
"Permissions",
{"fields": ("view_policy", "edit_policy", "view_group", "edit_group")},
),
)
def view_dashboard(self, obj):
return mark_safe(
'<a href="{path}">{path}</a>'.format(path=escape(obj.get_absolute_url()))
)
def save_model(self, request, obj, form, change):
if not obj.owned_by_id:
obj.owned_by = request.user
obj.save()
def has_change_permission(self, request, obj=None):
if obj is None:
return True
if request.user.is_superuser:
return True
return obj.user_can_edit(request.user)
def get_readonly_fields(self, request, obj):
readonly_fields = ["created_at"]
if not request.user.is_superuser:
readonly_fields.append("owned_by")
return readonly_fields
def get_queryset(self, request):
if request.user.is_superuser:
# Superusers should be able to see all dashboards.
return super().get_queryset(request)
# Otherwise, show only the dashboards the user has edit access to.
return Dashboard.get_editable_by_user(request.user)
|
PypiClean
|
/bluesky-widgets-0.0.15.tar.gz/bluesky-widgets-0.0.15/docs/source/installation.rst
|
============
Installation
============
This project currently has no *required* dependencies. The libraries you need
will depend on which graphical frontend(s) you plan to use.
We recommend to upgrade ``pip`` and ``setuptools`` first, as recent versions of
these specifically make installing what follows tend to succeed better.:::
$ pip install --upgrade pip setuptools
The examples perform data generation and access using some bluesky projects:::
$ pip install suitcase-msgpack bluesky databroker ophyd
and they use Qt:::
$ pip install qtpy pyqt5
Finally, install the project itself:::
$ pip install bluesky-widgets
|
PypiClean
|
/delta_nlp-0.3.2-cp36-cp36m-macosx_10_9_x86_64.whl/delta/layers/common_layers.py
|
"""Common layers."""
import delta.compat as tf
from absl import logging
#pylint: disable=invalid-name
def conv2d(x, name, filter_size, in_channels, out_channels, strides, bias=True):
"""2D convolution."""
with tf.variable_scope(name):
kernel = tf.get_variable(
name='DW',
shape=[filter_size[0], filter_size[1], in_channels, out_channels],
dtype=tf.float32,
initializer=tf.initializers.glorot_uniform())
if bias:
b = tf.get_variable(
name='bias',
shape=[out_channels],
dtype=tf.float32,
initializer=tf.constant_initializer(0.0))
out = tf.nn.conv2d(
x, kernel, [1, strides[0], strides[1], 1], padding='SAME')
if bias:
out = tf.nn.bias_add(out, b)
return out
def max_pool(x, ksize, strides):
"""Max Pooling."""
return tf.nn.max_pool(
x,
ksize=[1, ksize[0], ksize[1], 1],
strides=[1, strides[0], strides[1], 1],
padding='VALID',
name='max_pool')
def linear(x, names, shapes, has_bias=True):
"""Linear Layer."""
assert len(shapes) == 2
with tf.variable_scope(names):
weights = tf.get_variable(
name='weights',
shape=shapes,
initializer=tf.initializers.glorot_uniform())
if has_bias:
bias = tf.get_variable(
name='bias',
shape=shapes[1],
initializer=tf.initializers.glorot_uniform())
return tf.matmul(x, weights) + bias
else:
return tf.matmul(x, weights)
def attention(inputs, attention_size, time_major=False, return_alphas=False):
"""Attention layer."""
if isinstance(inputs, tuple):
# In case of Bi-RNN, concatenate the forward and the backward RNN outputs.
inputs = tf.concat(inputs, 2)
if time_major:
# (T,B,D) => (B,T,D)
inputs = tf.transpose(inputs, [1, 0, 2])
time_size = inputs.shape[1].value # T value - time size of the RNN layer
hidden_size = inputs.shape[2].value # D value - hidden size of the RNN layer
# Trainable parameters
W_omega = tf.get_variable(
name='W_omega',
initializer=tf.random_normal([hidden_size, attention_size], stddev=0.1))
b_omega = tf.get_variable(
name='b_omega',
initializer=tf.random_normal([attention_size], stddev=0.1))
u_omega = tf.get_variable(
name='u_omega',
initializer=tf.random_normal([attention_size, 1], stddev=0.1))
# Applying fully connected layer with non-linear activation to each of the B*T timestamps;
# the shape of `v` is (B,T,D)*(D,A)=(B,T,A), where A=attention_size
#v = tf.tanh(tf.tensordot(inputs, W_omega, axes=1) + b_omega)
#v = tf.sigmoid(tf.tensordot(inputs, W_omega, axes=1) + b_omega)
# (B, T, D) dot (D, Atten)
logging.info('attention inputs: {}'.format(inputs.shape))
inputs_reshaped = tf.reshape(inputs, [-1, hidden_size])
dot = tf.matmul(inputs_reshaped, W_omega)
dot = tf.reshape(dot, [-1, time_size, attention_size])
v = tf.sigmoid(dot + b_omega)
logging.info(f'attention vector: {v.shape}')
# For each of the timestamps its vector of size A from `v` is reduced with `u` vector
# (B, T, Atten) dot (Atten)
#vu = tf.tensordot(v, u_omega, axes=1) # (B,T) shape
v = tf.reshape(v, [-1, attention_size])
vu = tf.matmul(v, u_omega) # (B,T) shape
vu = tf.squeeze(vu, axis=-1)
vu = tf.reshape(vu, [-1, time_size])
logging.info(f'attention energe: {vu.shape}')
alphas = tf.nn.softmax(vu) # (B,T) shape also
# Output of (Bi-)RNN is reduced with attention vector; the result has (B,D) shape
# [batch, time] -> [batch, time, 1]
alphas = tf.expand_dims(alphas, -1)
# [batch, time, dim] -> [batch, dim]
output = tf.reduce_sum(inputs * alphas, 1)
if not return_alphas:
return output
return output, alphas
def embedding_look_up(text_inputs, vocab_size, embedding_size):
"""Embedding layer."""
with tf.variable_scope("embedding"):
W = tf.get_variable(
name='W',
initializer=tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0))
embedding_chars = tf.nn.embedding_lookup(W, text_inputs)
embedding_chars_expanded = tf.expand_dims(embedding_chars, -1)
return embedding_chars_expanded
#pylint: disable=too-many-locals
def conv_pool(embedded_chars_expanded, filter_sizes, embedding_size,
num_filters, sequence_length):
"""
text conv and max pooling to get one-dimension vector to representation of text
:param filter_sizes:
:return:
"""
pooled_outputs = []
for _, filter_size in enumerate(filter_sizes):
with tf.variable_scope("conv-maxpool-%s" % filter_size):
# Convolution Layer
filter_shape = [filter_size, embedding_size, 1, num_filters]
W = tf.get_variable(
name='W', initializer=tf.truncated_normal(filter_shape, stddev=0.1))
b = tf.get_variable(
name='b', initializer=tf.constant(0.1, shape=[num_filters]))
conv = tf.nn.conv2d(
embedded_chars_expanded,
W,
strides=[1, 1, 1, 1],
padding="VALID",
name="conv")
# Apply nonlinearity
h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu")
# Maxpooling over the outputs
pooled = tf.nn.max_pool(
h,
ksize=[1, sequence_length - filter_size + 1, 1, 1],
strides=[1, 1, 1, 1],
padding='VALID',
name="pool")
pooled_outputs.append(pooled)
# Combine all the pooled features
num_filters_total = num_filters * len(filter_sizes)
h_pool = tf.concat(pooled_outputs, 3)
h_pool_flat = tf.reshape(h_pool, [-1, num_filters_total])
return h_pool_flat
|
PypiClean
|
/vgs_api_client-0.0.45-py3-none-any.whl/vgs_api_client/model/inline_response200.py
|
import re # noqa: F401
import sys # noqa: F401
from vgs_api_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from vgs_api_client.exceptions import ApiAttributeError
def lazy_import():
from vgs_api_client.model.revealed_data import RevealedData
globals()['RevealedData'] = RevealedData
class InlineResponse200(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'data': ({str: (RevealedData,)},), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'data': 'data', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""InlineResponse200 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
data ({str: (RevealedData,)}): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""InlineResponse200 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
data ({str: (RevealedData,)}): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
|
PypiClean
|
/jupyterlab_myst-2.0.2.tar.gz/jupyterlab_myst-2.0.2/tailwind.config.js
|
const colors = require('tailwindcss/colors');
module.exports = {
darkMode: ['class', '[data-jp-theme-light="false"]'],
content: [
'./src/**/*.{js,ts,jsx,tsx}',
'node_modules/myst-to-react/dist/**/*.{js,ts,jsx,tsx}',
'node_modules/@myst-theme/frontmatter/dist/**/*.{js,ts,jsx,tsx}',
// Occasionally look to these folders as well in development only
'.yalc/myst-to-react/dist/**/*.{js,ts,jsx,tsx}',
'.yalc/@myst-theme/frontmatter/dist/**/*.{js,ts,jsx,tsx}'
],
theme: {
extend: {
colors: {
primary: colors.blue,
success: colors.green[500]
},
// See https://github.com/tailwindlabs/tailwindcss-typography/blob/master/src/styles.js
typography: theme => ({
DEFAULT: {
css: {
p: {
color: 'var(--jp-content-font-color1)',
fontFamily: 'var(--jp-content-font-family)',
fontSize: 'var(--jp-content-font-size1)',
lineHeight: 'var(--jp-content-line-height)',
marginTop: 0,
marginBottom: '1em'
},
'h1,h2,h3,h4,h5,h6': {
lineHeight: 'var(--jp-content-heading-line-height, 1)',
fontWeight: 'var(--jp-content-heading-font-weight, 500)',
fontStyle: 'normal',
marginTop: 'var(--jp-content-heading-margin-top, 1.2em)',
marginBottom: 'var(--jp-content-heading-margin-bottom, 0.8em)',
color: 'var(--jp-content-font-color1)'
},
'h1:first-child,h2:first-child,h3:first-child,h4:first-child,h5:first-child,h6:first-child':
{
marginTop: 'calc(0.5 * var(--jp-content-heading-margin-top))'
},
'h1:last-child,h2:last-child,h3:last-child,h4:last-child,h5:last-child,h6:last-child':
{
marginBottom:
'calc(0.5 * var(--jp-content-heading-margin-bottom))'
},
h1: {
fontSize: 'var(--jp-content-font-size5)'
},
h2: {
fontSize: 'var(--jp-content-font-size4)'
},
h3: {
fontSize: 'var(--jp-content-font-size3)'
},
h4: {
fontSize: 'var(--jp-content-font-size2)'
},
h5: {
fontSize: 'var(--jp-content-font-size1)'
},
h6: {
fontSize: 'var(--jp-content-font-size0)'
},
code: {
fontWeight: 'inherit',
color: 'var(--jp-content-font-color1)',
fontFamily: 'var(--jp-code-font-family)',
fontSize: 'inherit',
lineHeight: 'var(--jp-code-line-height)',
padding: 0,
whiteSpace: 'pre-wrap',
backgroundColor: 'var(--jp-layout-color2)',
padding: '1px 5px'
},
'code::before': {
content: ''
},
'code::after': {
content: ''
},
'blockquote p:first-of-type::before': { content: 'none' },
'blockquote p:first-of-type::after': { content: 'none' },
li: {
marginTop: '0.25rem',
marginBottom: '0.25rem'
},
a: {
textDecoration: 'none',
color: 'var(--jp-content-link-color, #1976d2)', // --md-blue-700
fontWeight: 400,
'&:hover': {
color: 'var(--jp-content-link-color, #1976d2)', // --md-blue-700
textDecoration: 'underline',
fontWeight: 400
}
},
'li > p, dd > p, header > p, footer > p': {
marginTop: '0.25rem',
marginBottom: '0.25rem'
}
}
},
stone: {
css: {
'--tw-prose-bullets': 'var(--jp-content-font-color1)'
}
}
}),
keyframes: {
load: {
'0%': { width: '0%' },
'100%': { width: '50%' }
},
fadeIn: {
'0%': { opacity: 0.0 },
'25%': { opacity: 0.25 },
'50%': { opacity: 0.5 },
'75%': { opacity: 0.75 },
'100%': { opacity: 1 }
}
},
animation: {
load: 'load 2.5s ease-out',
'fadein-fast': 'fadeIn 1s ease-out'
}
}
},
corePlugins: {
preflight: false
},
plugins: [require('@tailwindcss/typography')]
};
|
PypiClean
|
/ebook-converter-noimage-4.12.2.tar.gz/ebook-converter-noimage-4.12.2/ebook_converter/ebooks/metadata/pdf.py
|
import functools
import os
import re
import shutil
import subprocess
from ebook_converter.ptempfile import TemporaryDirectory
from ebook_converter.ebooks.metadata import (
MetaInformation, string_to_authors, check_isbn, check_doi)
def read_info(outputdir, get_cover):
''' Read info dict and cover from a pdf file named src.pdf in outputdir.
Note that this function changes the cwd to outputdir and is therefore not
thread safe. Run it using fork_job. This is necessary as there is no safe
way to pass unicode paths via command line arguments. This also ensures
that if poppler crashes, no stale file handles are left for the original
file, only for src.pdf.'''
pdfinfo = 'pdfinfo'
pdftoppm = 'pdftoppm'
source_file = os.path.join(outputdir, 'src.pdf')
cover_file = os.path.join(outputdir, 'cover')
ans = {}
try:
raw = subprocess.check_output([pdfinfo, '-enc', 'UTF-8', '-isodates',
source_file])
except subprocess.CalledProcessError as e:
print(f'pdfinfo errored out with return code: {e.returncode}')
return None
try:
info_raw = raw.decode('utf-8')
except UnicodeDecodeError:
print('pdfinfo returned no UTF-8 data')
return None
for line in info_raw.splitlines():
if ':' not in line:
continue
field, val = line.partition(':')[::2]
val = val.strip()
if field and val:
ans[field] = val.strip()
# Now read XMP metadata
# Versions of poppler before 0.47.0 used to print out both the Info dict
# and XMP metadata packet together. However, since that changed in
# https://cgit.freedesktop.org/poppler/poppler/commit/?id=c91483aceb1b640771f572cb3df9ad707e5cad0d
# we can no longer rely on it.
try:
raw = subprocess.check_output([pdfinfo, '-meta', source_file]).strip()
except subprocess.CalledProcessError as e:
print('pdfinfo failed to read XML metadata with return code: '
f'{e.returncode}')
else:
parts = re.split(br'^Metadata:', raw, 1, flags=re.MULTILINE)
if len(parts) > 1:
# old poppler < 0.47.0
raw = parts[1].strip()
if raw:
ans['xmp_metadata'] = raw
if get_cover:
try:
subprocess.check_call([pdftoppm, '-singlefile', '-jpeg',
'-cropbox', source_file, cover_file])
except subprocess.CalledProcessError as e:
print(f'pdftoppm errored out with return code: {e.returncode}')
return ans
def page_images(pdfpath, outputdir='.', first=1, last=1, image_format='jpeg',
prefix='page-images'):
pdftoppm = 'pdftoppm'
outputdir = os.path.abspath(outputdir)
args = {}
try:
subprocess.check_call([
pdftoppm, '-cropbox', '-' + image_format, '-f', str(first),
'-l', str(last), pdfpath, os.path.join(outputdir, prefix)
], **args)
except subprocess.CalledProcessError as e:
raise ValueError('Failed to render PDF, pdftoppm errorcode: %s' %
e.returncode)
def is_pdf_encrypted(path_to_pdf):
pdfinfo = 'pdfinfo'
raw = subprocess.check_output([pdfinfo, path_to_pdf])
q = re.search(br'^Encrypted:\s*(\S+)', raw, flags=re.MULTILINE)
if q is not None:
return q.group(1) == b'yes'
return False
def get_metadata(stream, cover=True):
with TemporaryDirectory('_pdf_metadata_read') as pdfpath:
stream.seek(0)
with open(os.path.join(pdfpath, 'src.pdf'), 'wb') as f:
shutil.copyfileobj(stream, f)
info = read_info(pdfpath, bool(cover))
if info is None:
raise ValueError('Could not read info dict from PDF')
covpath = os.path.join(pdfpath, 'cover.jpg')
cdata = None
if cover and os.path.exists(covpath):
with open(covpath, 'rb') as f:
cdata = f.read()
title = info.get('Title', None) or 'Unknown'
au = info.get('Author', None)
if au is None:
au = ['Unknown']
else:
au = string_to_authors(au)
mi = MetaInformation(title, au)
creator = info.get('Creator', None)
if creator:
mi.book_producer = creator
keywords = info.get('Keywords', None)
mi.tags = []
if keywords:
mi.tags = [x.strip() for x in keywords.split(',')]
isbn = [check_isbn(x) for x in mi.tags if check_isbn(x)]
if isbn:
mi.isbn = isbn = isbn[0]
mi.tags = [x for x in mi.tags if check_isbn(x) != isbn]
subject = info.get('Subject', None)
if subject:
mi.tags.insert(0, subject)
if 'xmp_metadata' in info:
from ebook_converter.ebooks.metadata.xmp import consolidate_metadata
mi = consolidate_metadata(mi, info)
# Look for recognizable identifiers in the info dict, if they were not
# found in the XMP metadata
for scheme, check_func in {'doi': check_doi, 'isbn': check_isbn}.items():
if scheme not in mi.get_identifiers():
for k, v in info.items():
if k != 'xmp_metadata':
val = check_func(v)
if val:
mi.set_identifier(scheme, val)
break
if cdata:
mi.cover_data = ('jpeg', cdata)
return mi
get_quick_metadata = functools.partial(get_metadata, cover=False)
def set_metadata(stream, mi):
return None
|
PypiClean
|
/proto_clip_toolkit-0.1.tar.gz/proto_clip_toolkit-0.1/proto_clip_toolkit/utils/tsne.py
|
import torch
import os
import matplotlib.pyplot as plt
import argparse
import yaml
import sys
from pathlib import Path
p = Path(__file__).parents[3]
sys.path.append(str(p))
from proto_datasets import build_dataset
from sklearn.manifold import TSNE
from matplotlib.offsetbox import OffsetImage, AnnotationBbox
from utils import build_cache_model, get_textual_memory_bank
import json
import cv2
def get_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--config', dest='config', help='settings of Proto-CLIP in yaml format', required=True)
parser.add_argument('--splits_path', dest='splits_path', help='path to the splits file for the particular dataset', required=True)
parser.add_argument('--memory_bank_v_path', dest='memory_bank_v_path', help='path to the visual embeddings memory bank', required=True)
parser.add_argument('--memory_bank_t_path', dest='memory_bank_t_path', help='path to the textual embeddings memory bank', required=True)
parser.add_argument('--after_train', dest='after_train', help='save embeddings after training', action='store_true')
args = parser.parse_args()
return args
def parse_splits_file(config_path):
"""Returns the map class id to the class name using the splits json file."""
class_id_mapping = {}
f = open(config_path)
data = json.load(f)
for config_data in data["train"]:
class_id_mapping[config_data[1]] = config_data[2]
return class_id_mapping
def get_image_samples(txt_path, n_classes):
"""Returns a single representative image sample for each class."""
f = open(txt_path, "r")
data = f.readlines()
data = [x.strip("\n") for x in data]
output_image_locations = []
for i in range(n_classes):
#We pick the first sample of the support set for a given class.
img_idx = i*16
output_image_locations.append(data[img_idx])
return output_image_locations
def get_tsne_coordinates(z_img_proto, z_text_proto, n_class):
"""Returns the 2-dimensional t-SNE coordinates for the image and the text embeddings."""
X = torch.vstack((
z_img_proto,
z_text_proto,
# zq_imgs.view(-1, zq_imgs.shape[-1])
)).cpu().data.numpy()
tsne_X = TSNE(n_components=2, perplexity=10, random_state=1).fit_transform(X)
zi, zt = tsne_X[:n_class], tsne_X[n_class: ]
return zi, zt
def plot_tsne_after(z_img_proto, z_text_proto, txt_prompts):
"""Returns the t-SNE plot for the visual and textual embeddings after training."""
n_class = z_img_proto.shape[0]
zi, zt = get_tsne_coordinates(z_img_proto, z_text_proto, n_class)
image_locations = get_image_samples("./image_locations.txt", n_class)
_, ax = plt.subplots(figsize=(50, 50))
fontsize = 10
for idx, (x, y) in enumerate(zip(zi[:, 0], zi[:, 1])):
img = plt.imread(image_locations[idx])
img = cv2.resize(img, (48, 48))
imagebox = OffsetImage(img) # Adjust the zoom level as desired
ab = AnnotationBbox(imagebox, (x, y), frameon=False, zorder=1)
# ax.set_aspect('equal')
ax.scatter(x, y, zorder=4, s=32, c='cyan', marker=".")
ax.add_artist(ab)
ax.annotate(
txt_prompts[idx], xy=(x, y + 1), ha='center', c="crimson", fontsize=fontsize)
ax.scatter(zt[:, 0], zt[:, 1], c='aquamarine', zorder=3, marker="+", s=128)
ax.axis('off')
plt.savefig("./test_plot_after.png", dpi=300)
def plot_tsne_before(z_img_proto, z_text_proto, txt_prompts):
"""Returns the t-SNE plot for the visual and textual embeddings before training."""
n_class = z_img_proto.shape[0]
zi, zt = get_tsne_coordinates(z_img_proto, z_text_proto, n_class)
image_locations = get_image_samples("./image_locations.txt", n_class)
_, ax = plt.subplots(figsize=(50, 50))
fontsize = 25
for idx, (x, y) in enumerate(zip(zi[:, 0], zi[:, 1])):
img = plt.imread(image_locations[idx])
img = cv2.resize(img, (48, 48))
imagebox = OffsetImage(img) # Adjust the zoom level as desired
ab = AnnotationBbox(imagebox, (x, y), frameon=False, zorder=1)
ax.scatter(x, y, zorder=4, s=32, c='cyan', marker=".")
ax.add_artist(ab)
ax.scatter(zt[:, 0], zt[:, 1], c='lightseagreen', zorder=3, marker="P", s=128)
for i in range(len(txt_prompts)):
ax.annotate(
txt_prompts[i], (zt[i, 0], zt[i, 1] + 0.2), c='crimson', fontsize=fontsize)
ax.axis('off')
plt.savefig("./test_plot_before.png", dpi=300)
if __name__=="__main__":
args = get_arguments()
cfg = yaml.load(open(args.config, 'r'), Loader=yaml.Loader)
dataset = build_dataset(cfg['dataset'], cfg['root_path'], cfg['shots'])
if not args.after_train:
#This function will not work if you do not have the cache available.
#You would need to create the cache and place the initial embeddings inside it.
#An easy way to do it is to run the main.py for 1 epoch.
visual_memory_keys, visual_mem_values = build_cache_model(cfg, None, None)
text_prompts, textual_memory_bank = get_textual_memory_bank(cfg, [], None, None)
embeddings_v = visual_memory_keys.t()
embeddings_t = textual_memory_bank.t()
else:
best_model_path_v = args.memory_bank_v_path
best_model_path_t = args.memory_bank_t_path
try:
embeddings_v = torch.load(best_model_path_v)
embeddings_t = torch.load(best_model_path_t)
except:
raise FileNotFoundError(f"File does not exist: {best_model_path_v} and {best_model_path_t}")
NxK, ndim= embeddings_v.shape
K = cfg['shots']
N = NxK//K
zs_imgs = embeddings_v.view(-1, K, ndim)
zs_imgs = zs_imgs / zs_imgs.norm(dim=-1, keepdim=True)
z_img_proto = zs_imgs.mean(dim=1)
z_img_proto = z_img_proto / z_img_proto.norm(dim=-1, keepdim=True)
zs_text = embeddings_t
z_text_proto = zs_text / zs_text.norm(dim=-1, keepdim=True)
class_id_mapping = parse_splits_file(args.splits_path)
if args.after_train:
plot_tsne_after(z_img_proto, z_text_proto, list(class_id_mapping.values()))
else:
plot_tsne_before(z_img_proto, z_text_proto, list(class_id_mapping.values()))
|
PypiClean
|
/nbclean-0.3.2.tar.gz/nbclean-0.3.2/examples/modify_notebooks.ipynb
|
```
import nbclean as nbc
```
# Modifying notebooks with `nbclean`
It's common to want to modify notebook code, outputs, etc according to some guidelines that you choose.
Below are a few examples for how to do this with `nbclean`. To demonstrate, we'll use a
notebook with lots of different information in it. We'll modify some cells, delete others, etc.
Click the button below to see the "original" version of the notebook, it has explanations for
how `nbclean` should treat each cell.
<a href="test_notebooks/test_notebook.ipynb"><button>Click here to see<br />the original notebook!</button></a>
Below we'll read in the notebook with `nbclean`, and then run several processors on it.
Note that these processors will treat cells differently depending on tags, cell content, etc.
```
# Here are paths to our notebooks
path_original_notebook = './test_notebooks/test_notebook.ipynb'
path_save = './test_notebooks/'
# Clear different parts of the notebook cells based on tags
ntbk = nbc.NotebookCleaner(path_original_notebook)
ntbk.clear('output', tag='hide_output')
ntbk.clear('content', tag='hide_content')
ntbk.clear('stderr', tag='hide_stderr')
# Removing entire cells
ntbk.remove_cells(tag='remove')
# Removing entire cells if they're empty and match a tag
ntbk.remove_cells(tag='remove_if_empty', empty=True)
# Removing entire cells based on text present in the source
ntbk.remove_cells(search_text="# HIDDEN")
# Replacing text
text_replace_begin = '### SOLUTION BEGIN'
text_replace_end = '### SOLUTION END'
ntbk.replace_text(text_replace_begin, text_replace_end)
```
Finally, we'll save the notebook to a new "cleaned" location.
```
# Now we'll save the notebook to inspect
ntbk.save(path_save + 'test_notebook_saved.ipynb')
```
<a href="test_notebooks/test_notebook_saved.ipynb"><button>Click here to see<br />the saved notebook!</button></a>
|
PypiClean
|
/flying_discs-0.3.0-py3-none-any.whl/flying_discs/morrison/coordinates.py
|
from __future__ import annotations
from dataclasses import dataclass
from typing import Sequence
import numpy as np
@dataclass
class MorrisonPosition2D:
x: float
z: float
vx: float
vz: float
ax: float
az: float
class MorrisonTrajectory2D:
# pylint: disable=too-many-instance-attributes, invalid-name
def __init__(self, positions: Sequence[MorrisonPosition2D]) -> None:
self.positions = positions
self.X = np.array([p.x for p in self.positions])
self.Z = np.array([p.z for p in self.positions])
self.VX = np.array([p.vx for p in self.positions])
self.VZ = np.array([p.vz for p in self.positions])
self.AX = np.array([p.ax for p in self.positions])
self.AZ = np.array([p.az for p in self.positions])
self.__n = 0 # iterator count
def __iter__(self) -> "MorrisonTrajectory2D":
self.__n = 0
return self
def __next__(self) -> MorrisonPosition2D:
if self.__n <= len(self.positions) - 1:
result = self.positions[self.__n]
self.__n += 1
return result
raise StopIteration
def __getitem__(self, i: int) -> MorrisonPosition2D:
return self.positions[i]
def __len__(self) -> int:
return len(self.positions)
def __eq__(self, o: object) -> bool:
return self.positions == o
@dataclass
class MorrisonPosition3D:
x: float
y: float
z: float
vx: float
vy: float
vz: float
ax: float
ay: float
az: float
class MorrisonTrajectory3D:
# pylint: disable=too-many-instance-attributes, invalid-name
def __init__(self, positions: Sequence[MorrisonPosition3D]) -> None:
self.positions = positions
self.X = np.array([p.x for p in self.positions])
self.Y = np.array([p.y for p in self.positions])
self.Z = np.array([p.z for p in self.positions])
self.VX = np.array([p.vx for p in self.positions])
self.VY = np.array([p.vy for p in self.positions])
self.VZ = np.array([p.vz for p in self.positions])
self.AX = np.array([p.ax for p in self.positions])
self.AY = np.array([p.ay for p in self.positions])
self.AZ = np.array([p.az for p in self.positions])
self.__n = 0 # iterator count
def __iter__(self) -> "MorrisonTrajectory3D":
self.__n = 0
return self
def __next__(self) -> MorrisonPosition3D:
if self.__n <= len(self.positions) - 1:
result = self.positions[self.__n]
self.__n += 1
return result
raise StopIteration
def __getitem__(self, i: int) -> MorrisonPosition3D:
return self.positions[i]
def __len__(self) -> int:
return len(self.positions)
def __eq__(self, o: object) -> bool:
return self.positions == o
|
PypiClean
|
/py-ms-consulate-1.0.0.tar.gz/py-ms-consulate-1.0.0/consulate/utils.py
|
import re
from urllib import parse as _urlparse
from consulate import exceptions
DURATION_PATTERN = re.compile(r"^(?:(?:-|)(?:\d+|\d+\.\d+)(?:µs|ms|s|m|h))+$")
def is_string(value):
"""Python 2 & 3 safe way to check if a value is either an instance of str
or unicode.
:param mixed value: The value to evaluate
:rtype: bool
"""
checks = [isinstance(value, t) for t in [bytes, str]]
return any(checks)
def maybe_encode(value):
"""If the value passed in is a str, encode it as UTF-8 bytes for Python 3
:param str|bytes value: The value to maybe encode
:rtype: bytes
"""
try:
return value.encode("utf-8")
except AttributeError:
return value
def _response_error(response):
"""Return the decoded response error or status code if no content exists.
:param requests.response response: The HTTP response
:rtype: str
"""
return response.body.decode("utf-8") if hasattr(response, "body") and response.body else str(response.status_code)
def response_ok(response, raise_on_404=False):
"""Evaluate the HTTP response and raise the appropriate exception if
required.
:param requests.response response: The HTTP response
:param bool raise_on_404: Raise an exception on 404 error
:rtype: bool
:raises: consulate.exceptions.ConsulateException
"""
result = False
if response.status_code == 200:
result = True
elif response.status_code == 400:
raise exceptions.ClientError(_response_error(response))
elif response.status_code == 401:
raise exceptions.ACLDisabled(_response_error(response))
elif response.status_code == 403:
raise exceptions.Forbidden(_response_error(response))
elif response.status_code == 404 and raise_on_404:
raise exceptions.NotFound(_response_error(response))
elif response.status_code == 500:
raise exceptions.ServerError(_response_error(response))
return result
def validate_go_interval(value, _model=None):
"""Validate the value passed in returning :data:`True` if it is a Go
Duration value.
:param str value: The string to check
:param consulate.models.base.Model _model: Optional model passed in
:rtype: bool
"""
return DURATION_PATTERN.match(value) is not None
def validate_url(value, _model=None):
"""Validate that the value passed in is a URL, returning :data:`True` if
it is.
:param str value: The string to check
:param consulate.models.base.Model _model: Optional model passed in
:rtype: bool
"""
parsed = _urlparse.urlparse(value)
return parsed.scheme and parsed.netloc
|
PypiClean
|
/globus_action_provider_tools-0.13.0rc1-py3-none-any.whl/globus_action_provider_tools/authorization.py
|
import logging
from itertools import chain
from globus_action_provider_tools.authentication import AuthState
from globus_action_provider_tools.data_types import ActionStatus
from globus_action_provider_tools.errors import AuthenticationError
log = logging.getLogger(__name__)
def authorize_action_access_or_404(status: ActionStatus, auth_state: AuthState) -> None:
"""
Determines whether or not a principal is allowed to view an ActionStatus.
If not allowed to view the ActionStatus, this function will raise an
AuthenticationError.
"""
if status.monitor_by is None:
allowed_set = set([status.creator_id])
else:
allowed_set = set(chain([status.creator_id], status.monitor_by))
authorized = auth_state.check_authorization(
allowed_set, allow_all_authenticated_users=True
)
if not authorized:
log.info(
f"None of {auth_state.effective_identity}'s identities are allowed to view "
f"{status.action_id}. User Identities={auth_state.principals} Allowed "
f"Identities={allowed_set}"
)
raise AuthenticationError(f"No Action with id {status.action_id}")
def authorize_action_management_or_404(
status: ActionStatus, auth_state: AuthState
) -> None:
"""
Determines whether or not a principal is allowed to manage an ActionStatus.
If not allowed to view the ActionStatus, this function will raise an
AuthenticationError.
"""
if status.manage_by is None:
allowed_set = set([status.creator_id])
else:
allowed_set = set(chain([status.creator_id], status.manage_by))
authorized = auth_state.check_authorization(
allowed_set, allow_all_authenticated_users=True
)
if not authorized:
log.info(
f"None of {auth_state.effective_identity}'s identities are allowed to manage "
f"{status.action_id}. User Identities={auth_state.principals} Allowed "
f"Identities={allowed_set}"
)
raise AuthenticationError(f"No Action with id {status.action_id}")
|
PypiClean
|
/easybuild-easyblocks-4.8.0.tar.gz/easybuild-easyblocks-4.8.0/easybuild/easyblocks/o/openmpi.py
|
import os
import re
from distutils.version import LooseVersion
import easybuild.tools.toolchain as toolchain
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.framework.easyconfig.constants import EASYCONFIG_CONSTANTS
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.config import build_option
from easybuild.tools.modules import get_software_root
from easybuild.tools.systemtools import check_os_dependency, get_shared_lib_ext
from easybuild.tools.toolchain.mpi import get_mpi_cmd_template
class EB_OpenMPI(ConfigureMake):
"""OpenMPI easyblock."""
def configure_step(self):
"""Custom configuration step for OpenMPI."""
def config_opt_used(key, enable_opt=False):
"""Helper function to check whether a configure option is already specified in 'configopts'."""
if enable_opt:
regex = '--(disable|enable)-%s' % key
else:
regex = '--(with|without)-%s' % key
return bool(re.search(regex, self.cfg['configopts']))
config_opt_names = [
# suppress failure modes in relation to mpirun path
'mpirun-prefix-by-default',
# build shared libraries
'shared',
]
for key in config_opt_names:
if not config_opt_used(key, enable_opt=True):
self.cfg.update('configopts', '--enable-%s' % key)
# List of EasyBuild dependencies for which OMPI has known options
known_dependencies = ('CUDA', 'hwloc', 'libevent', 'libfabric', 'PMIx', 'UCX', 'UCC')
# Value to use for `--with-<dep>=<value>` if the dependency is not specified in the easyconfig
# No entry is interpreted as no option added at all
# This is to make builds reproducible even when the system libraries are changed and avoids failures
# due to e.g. finding only PMIx but not libevent on the system
unused_dep_value = dict()
# Known options since version 3.0 (no earlier ones checked)
if LooseVersion(self.version) >= LooseVersion('3.0'):
# Default to disable the option with "no"
unused_dep_value = {dep: 'no' for dep in known_dependencies}
# For these the default is to use an internal copy and not using any is not supported
for dep in ('hwloc', 'libevent', 'PMIx'):
unused_dep_value[dep] = 'internal'
# handle dependencies
for dep in known_dependencies:
opt_name = dep.lower()
# If the option is already used, don't add it
if config_opt_used(opt_name):
continue
# libfabric option renamed in OpenMPI 3.1.0 to ofi
if dep == 'libfabric' and LooseVersion(self.version) >= LooseVersion('3.1'):
opt_name = 'ofi'
# Check new option name. They are synonyms since 3.1.0 for backward compatibility
if config_opt_used(opt_name):
continue
dep_root = get_software_root(dep)
# If the dependency is loaded, specify its path, else use the "unused" value, if any
if dep_root:
opt_value = dep_root
else:
opt_value = unused_dep_value.get(dep)
if opt_value is not None:
self.cfg.update('configopts', '--with-%s=%s' % (opt_name, opt_value))
if bool(get_software_root('PMIx')) != bool(get_software_root('libevent')):
raise EasyBuildError('You must either use both PMIx and libevent as dependencies or none of them. '
'This is to enforce the same libevent is used for OpenMPI as for PMIx or '
'the behavior may be unpredictable.')
# check whether VERBS support should be enabled
if not config_opt_used('verbs'):
# for OpenMPI v4.x, the openib BTL should be disabled when UCX is used;
# this is required to avoid "error initializing an OpenFabrics device" warnings,
# see also https://www.open-mpi.org/faq/?category=all#ofa-device-error
is_ucx_enabled = ('--with-ucx' in self.cfg['configopts'] and
'--with-ucx=no' not in self.cfg['configopts'])
if LooseVersion(self.version) >= LooseVersion('4.0.0') and is_ucx_enabled:
verbs = False
else:
# auto-detect based on available OS packages
os_packages = EASYCONFIG_CONSTANTS['OS_PKG_IBVERBS_DEV'][0]
verbs = any(check_os_dependency(osdep) for osdep in os_packages)
# for OpenMPI v5.x, the verbs support is removed, only UCX is available
# see https://github.com/open-mpi/ompi/pull/6270
if LooseVersion(self.version) <= LooseVersion('5.0.0'):
if verbs:
self.cfg.update('configopts', '--with-verbs')
else:
self.cfg.update('configopts', '--without-verbs')
super(EB_OpenMPI, self).configure_step()
def test_step(self):
"""Test step for OpenMPI"""
# Default to `make check` if nothing is set. Disable with "runtest = False" in the EC
if self.cfg['runtest'] is None:
self.cfg['runtest'] = 'check'
super(EB_OpenMPI, self).test_step()
def load_module(self, *args, **kwargs):
"""
Load (temporary) module file, after resetting to initial environment.
Also put RPATH wrappers back in place if needed, to ensure that sanity check commands work as expected.
"""
super(EB_OpenMPI, self).load_module(*args, **kwargs)
# ensure RPATH wrappers are in place, otherwise compiling minimal test programs will fail
if build_option('rpath'):
if self.toolchain.options.get('rpath', True):
self.toolchain.prepare_rpath_wrappers(rpath_filter_dirs=self.rpath_filter_dirs,
rpath_include_dirs=self.rpath_include_dirs)
def sanity_check_step(self):
"""Custom sanity check for OpenMPI."""
bin_names = ['mpicc', 'mpicxx', 'mpif90', 'mpifort', 'mpirun', 'ompi_info', 'opal_wrapper']
if LooseVersion(self.version) >= LooseVersion('5.0.0'):
bin_names.append('prterun')
else:
bin_names.append('orterun')
bin_files = [os.path.join('bin', x) for x in bin_names]
shlib_ext = get_shared_lib_ext()
lib_names = ['mpi_mpifh', 'mpi', 'open-pal']
if LooseVersion(self.version) >= LooseVersion('5.0.0'):
lib_names.append('prrte')
else:
lib_names.extend(['ompitrace', 'open-rte'])
lib_files = [os.path.join('lib', 'lib%s.%s' % (x, shlib_ext)) for x in lib_names]
inc_names = ['mpi-ext', 'mpif-config', 'mpif', 'mpi', 'mpi_portable_platform']
if LooseVersion(self.version) >= LooseVersion('5.0.0'):
inc_names.append('prte')
inc_files = [os.path.join('include', x + '.h') for x in inc_names]
custom_paths = {
'files': bin_files + inc_files + lib_files,
'dirs': [],
}
# make sure MPI compiler wrappers pick up correct compilers
expected = {
'mpicc': os.getenv('CC', 'gcc'),
'mpicxx': os.getenv('CXX', 'g++'),
'mpifort': os.getenv('FC', 'gfortran'),
'mpif90': os.getenv('F90', 'gfortran'),
}
# actual pattern for gfortran is "GNU Fortran"
for key in ['mpifort', 'mpif90']:
if expected[key] == 'gfortran':
expected[key] = "GNU Fortran"
# for PGI, correct pattern is "pgfortran" with mpif90
if expected['mpif90'] == 'pgf90':
expected['mpif90'] = 'pgfortran'
custom_commands = ["%s --version | grep '%s'" % (key, expected[key]) for key in sorted(expected.keys())]
# Add minimal test program to sanity checks
# Run with correct MPI launcher
mpi_cmd_tmpl, params = get_mpi_cmd_template(toolchain.OPENMPI, dict(), mpi_version=self.version)
# Limit number of ranks to 8 to avoid it failing due to hyperthreading
ranks = min(8, self.cfg['parallel'])
for src, compiler in (('hello_c.c', 'mpicc'), ('hello_mpifh.f', 'mpifort'), ('hello_usempi.f90', 'mpif90')):
src_path = os.path.join(self.cfg['start_dir'], 'examples', src)
if os.path.exists(src_path):
test_exe = os.path.join(self.builddir, 'mpi_test_' + os.path.splitext(src)[0])
self.log.info("Adding minimal MPI test program to sanity checks: %s", test_exe)
# Build test binary
custom_commands.append("%s %s -o %s" % (compiler, src_path, test_exe))
# Run the test if chosen
if build_option('mpi_tests'):
params.update({'nr_ranks': ranks, 'cmd': test_exe})
# Allow oversubscription for this test (in case of hyperthreading)
custom_commands.append("OMPI_MCA_rmaps_base_oversubscribe=1 " + mpi_cmd_tmpl % params)
# Run with 1 process which may trigger other bugs
# See https://github.com/easybuilders/easybuild-easyconfigs/issues/12978
params['nr_ranks'] = 1
custom_commands.append(mpi_cmd_tmpl % params)
super(EB_OpenMPI, self).sanity_check_step(custom_paths=custom_paths, custom_commands=custom_commands)
|
PypiClean
|
/zerospeech-benchmarks-0.9.4.tar.gz/zerospeech-benchmarks-0.9.4/zerospeech/submissions/abx17.py
|
import collections
import functools
import shutil
from datetime import datetime
from pathlib import Path
from typing import Tuple, Optional
import numpy as np
import pandas as pd
from pydantic import Field
from zerospeech import validators
from zerospeech.data_loaders import load_dataframe
from zerospeech.datasets import ZRC2017Dataset
from zerospeech.generics import (
FileTypes, FileListItem, Namespace, Item, FileItem
)
from zerospeech.leaderboards import EntryDetails, LeaderboardBenchmarkName, LeaderboardEntry
from zerospeech.leaderboards.abx17 import ABX17LeaderboardEntry, ABX17LeaderboardScores
from zerospeech.misc import load_obj
from zerospeech.tasks.abx.abx17 import ABXParameters
from ._model import MetaFile, Submission, SubmissionValidation, validation_fn, add_item, ScoreDir
class ABX17SubmissionValidator(SubmissionValidation):
""" File Validation for an ABX17 submission"""
dataset: ZRC2017Dataset = Field(default_factory=lambda: ZRC2017Dataset.load())
@staticmethod
def basic_abx_checks(item_list: FileListItem, abx_item: FileItem, tag: str):
# wav_list are compared to items inside item file
df = pd.read_csv(abx_item.file, sep=' ')
f_list_checks = [
# Verify that all necessary files are present
functools.partial(
validators.file_list_stem_check,
expected=[str(f) for f in df['#file']]
)
]
additional_checks = [
# Verify that type of array is float
functools.partial(
validators.numpy_dtype_check,
dtype=np.dtype('float')
),
# Verify that array has 2 dimensions
functools.partial(
validators.numpy_dimensions_check,
ndim=2
),
# Verify that files have the same dimensions
validators.numpy_col_comparison(1)
]
results = validators.numpy_array_list_check(
item_list, f_list_checks=f_list_checks, additional_checks=additional_checks
)
# add item tag
add_item(tag, results)
return results
@validation_fn(target='english_1s')
def validate_english_1s(self, english_1s: FileListItem):
abx_item = self.dataset.index.subsets.english.items.abx_1s_item
return self.basic_abx_checks(item_list=english_1s, abx_item=abx_item,
tag='english_1s')
@validation_fn(target='english_10s')
def validate_english_10s(self, english_10s: FileListItem):
abx_item = self.dataset.index.subsets.english.items.abx_10s_item
return self.basic_abx_checks(item_list=english_10s, abx_item=abx_item,
tag='english_10s')
@validation_fn(target='english_120s')
def validate_english_120s(self, english_120s: FileListItem):
abx_item = self.dataset.index.subsets.english.items.abx_120s_item
return self.basic_abx_checks(item_list=english_120s, abx_item=abx_item,
tag='english_120s')
@validation_fn(target='french_1s')
def validate_french_1s(self, french_1s: FileListItem):
abx_item = self.dataset.index.subsets.french.items.abx_1s_item
return self.basic_abx_checks(item_list=french_1s, abx_item=abx_item,
tag='french_1s')
@validation_fn(target='french_10s')
def validate_french_10s(self, french_10s: FileListItem):
abx_item = self.dataset.index.subsets.french.items.abx_10s_item
return self.basic_abx_checks(item_list=french_10s, abx_item=abx_item,
tag='french_10s')
@validation_fn(target='french_120s')
def validate_french_120s(self, french_120s: FileListItem):
abx_item = self.dataset.index.subsets.french.items.abx_120s_item
return self.basic_abx_checks(item_list=french_120s, abx_item=abx_item,
tag='french_120s')
@validation_fn(target='mandarin_1s')
def validate_mandarin_1s(self, mandarin_1s: FileListItem):
abx_item = self.dataset.index.subsets.mandarin.items.abx_1s_item
return self.basic_abx_checks(item_list=mandarin_1s, abx_item=abx_item,
tag='mandarin_1s')
@validation_fn(target='mandarin_10s')
def validate_mandarin_10s(self, mandarin_10s: FileListItem):
abx_item = self.dataset.index.subsets.mandarin.items.abx_10s_item
return self.basic_abx_checks(item_list=mandarin_10s, abx_item=abx_item,
tag='mandarin_10s')
@validation_fn(target='mandarin_120s')
def validate_mandarin_120s(self, mandarin_120s: FileListItem):
abx_item = self.dataset.index.subsets.mandarin.items.abx_120s_item
return self.basic_abx_checks(item_list=mandarin_120s, abx_item=abx_item,
tag='mandarin_120s')
@validation_fn(target='german_1s')
def validate_german_1s(self, german_1s: FileListItem):
abx_item = self.dataset.index.subsets.german.items.abx_1s_item
return self.basic_abx_checks(item_list=german_1s, abx_item=abx_item,
tag='german_1s')
@validation_fn(target='german_10s')
def validate_german_10s(self, german_10s: FileListItem):
abx_item = self.dataset.index.subsets.german.items.abx_10s_item
return self.basic_abx_checks(item_list=german_10s, abx_item=abx_item,
tag='german_10s')
@validation_fn(target='german_120s')
def validate_german_120s(self, german_120s: FileListItem):
abx_item = self.dataset.index.subsets.german.items.abx_120s_item
return self.basic_abx_checks(item_list=german_120s, abx_item=abx_item,
tag='german_120s')
@validation_fn(target='wolof_1s')
def validate_wolof_1s(self, wolof_1s: FileListItem):
abx_item = self.dataset.index.subsets.wolof.items.abx_1s_item
return self.basic_abx_checks(item_list=wolof_1s, abx_item=abx_item,
tag='wolof_1s')
@validation_fn(target='wolof_10s')
def validate_wolof_10s(self, wolof_10s: FileListItem):
abx_item = self.dataset.index.subsets.wolof.items.abx_10s_item
return self.basic_abx_checks(item_list=wolof_10s, abx_item=abx_item,
tag='wolof_10s')
@validation_fn(target='wolof_120s')
def validate_wolof_120s(self, wolof_120s: FileListItem):
abx_item = self.dataset.index.subsets.wolof.items.abx_120s_item
return self.basic_abx_checks(item_list=wolof_120s, abx_item=abx_item,
tag='wolof_120s')
class ABX17ScoreDir(ScoreDir):
params: Optional[ABXParameters] = ABXParameters()
@property
def scores(self):
csv_file = (self.location / self.params.result_filename).with_suffix('.csv')
return load_dataframe(csv_file)
def get_details(self) -> EntryDetails:
""" Build entry details """
train_set = ""
gpu_budget = ""
if self.meta_file is not None:
train_set = self.meta_file.model_info.train_set
gpu_budget = self.meta_file.model_info.gpu_budget
return EntryDetails(
train_set=train_set,
benchmarks=[LeaderboardBenchmarkName.ABX_17],
gpu_budget=gpu_budget,
parameters=self.params.to_meta()
)
def build_scores(self) -> ABX17LeaderboardScores:
""" extract scores from csv """
score_template = collections.defaultdict(lambda: collections.defaultdict(lambda: collections.defaultdict(dict)))
for _, row in self.scores.iterrows():
score_template[row['language']][f"t_{row['duration']}"][row['type']] = row["score"]
return ABX17LeaderboardScores.parse_obj(score_template)
def build_meta_data(self):
""" Build leaderboard metadata """
return dict(
model_id=self.meta_file.model_info.model_id,
submission_id="",
index=None,
submission_date=datetime.now(),
submitted_by=self.meta_file.username,
description=self.meta_file.model_info.system_description,
publication=dict(
author_short=self.meta_file.publication.author_label,
authors=self.meta_file.publication.authors,
paper_title=self.meta_file.publication.paper_title,
paper_ref=self.meta_file.publication.paper_url,
bib_ref=self.meta_file.publication.bib_reference,
paper_url=self.meta_file.publication.paper_url,
pub_year=self.meta_file.publication.publication_year,
team_name=self.meta_file.publication.team,
institution=self.meta_file.publication.institution,
code=self.meta_file.code_url,
DOI=self.meta_file.publication.DOI,
open_science=self.meta_file.open_source,
),
details=dict(
train_set=self.meta_file.model_info.train_set,
benchmarks=[],
gpu_budget=self.meta_file.model_info.gpu_budget,
parameters=self.params.to_meta(),
)
)
def build_leaderboard(self) -> LeaderboardEntry:
""" Build leaderboard entry for the current submission """
self.load_meta()
return ABX17LeaderboardEntry.parse_obj(
dict(
**self.build_meta_data(),
scores=self.build_scores()
)
)
class ABX17Submission(Submission):
""" Submission for ABX-17 """
sets: Tuple = ('1s', '10s', '120s')
tasks: Tuple = ('english', 'french', 'mandarin', 'german', 'wolof')
@classmethod
def load(
cls, path: Path, *,
tasks=('english', 'french', 'mandarin', 'german', 'wolof'),
sets=('1s', '10s', '120s')
):
# submission object
submission = cls(
sets=sets,
tasks=tasks,
location=path
)
# if params not set export defaults
if not submission.params_file.is_file():
params = ABXParameters()
params.result_filename = "scores.csv"
params.export(submission.params_file)
# Load items
file_ext = submission.params.score_file_type.replace('.', '')
file_ext = FileTypes(file_ext)
items = dict()
if 'english' in tasks:
if '1s' in sets:
items['english_1s'] = FileListItem.from_dir(
path / 'english/1s', f_type=file_ext
)
if '10s' in sets:
items['english_10s'] = FileListItem.from_dir(
path / 'english/10s', f_type=file_ext
)
if '120s' in sets:
items['english_120s'] = FileListItem.from_dir(
path / 'english/120s', f_type=file_ext
)
if 'french' in tasks:
if '1s' in sets:
items['french_1s'] = FileListItem.from_dir(
path / 'french/1s', f_type=file_ext
)
if '10s' in sets:
items['french_10s'] = FileListItem.from_dir(
path / 'french/10s', f_type=file_ext
)
if '120s' in sets:
items['french_120s'] = FileListItem.from_dir(
path / 'french/120s', f_type=file_ext
)
if 'mandarin' in tasks:
if '1s' in sets:
items['mandarin_1s'] = FileListItem.from_dir(
path / 'mandarin/1s', f_type=file_ext
)
if '10s' in sets:
items['mandarin_10s'] = FileListItem.from_dir(
path / 'mandarin/10s', f_type=file_ext
)
if '120s' in sets:
items['mandarin_120s'] = FileListItem.from_dir(
path / 'mandarin/120s', f_type=file_ext
)
if 'german' in tasks:
# retro-compatibility with old format
gloc = path / 'LANG1'
if not gloc.is_dir():
gloc = path / 'german'
if '1s' in sets:
items['german_1s'] = FileListItem.from_dir(
gloc / '1s', f_type=file_ext
)
if '10s' in sets:
items['german_10s'] = FileListItem.from_dir(
gloc / '10s', f_type=file_ext
)
if '120s' in sets:
items['german_120s'] = FileListItem.from_dir(
gloc / '120s', f_type=file_ext
)
if 'wolof' in tasks:
# retro-compatibility with old format
gloc = path / 'LANG2'
if not gloc.is_dir():
gloc = path / 'wolof'
if '1s' in sets:
items['wolof_1s'] = FileListItem.from_dir(
gloc / '1s', f_type=file_ext
)
if '10s' in sets:
items['wolof_10s'] = FileListItem.from_dir(
gloc / '10s', f_type=file_ext
)
if '120s' in sets:
items['wolof_120s'] = FileListItem.from_dir(
gloc / '120s', f_type=file_ext
)
submission.items = Namespace[Item](store=items)
return submission
def load_parameters(self) -> "ABXParameters":
if self.params_file.is_file():
obj = load_obj(self.params_file)
return ABXParameters.parse_obj(obj)
return ABXParameters()
def __validate_submission__(self):
""" Run validation on the submission data """
self.validation_output += ABX17SubmissionValidator().validate(self)
def get_scores(self):
""" Load score Dir"""
return ABX17ScoreDir(
submission_dir=self.location,
location=self.score_dir,
params=self.params
)
def __zippable__(self):
return [
("", self.meta_file),
("", self.params_file),
*[("english/1s/", f) for f in self.items.english_1s],
*[("english/10s/", f) for f in self.items.english_10s],
*[("english/120s/", f) for f in self.items.english_120s],
*[("french/1s/", f) for f in self.items.french_1s],
*[("french/10s/", f) for f in self.items.french_10s],
*[("french/120s/", f) for f in self.items.french_120s],
*[("mandarin/1s/", f) for f in self.items.mandarin_1s],
*[("mandarin/10s/", f) for f in self.items.mandarin_10s],
*[("mandarin/120s/", f) for f in self.items.mandarin_120s],
*[("german/1s/", f) for f in self.items.german_1s],
*[("german/10s/", f) for f in self.items.german_10s],
*[("german/120s/", f) for f in self.items.german_120s],
*[("wolof/1s/", f) for f in self.items.wolof_1s],
*[("wolof/10s/", f) for f in self.items.wolof_10s],
*[("wolof/120s/", f) for f in self.items.wolof_120s],
*[("scores/", f) for f in self.score_dir.iterdir()]
]
@classmethod
def init_dir(cls, location: Path):
# create sub-directories
location.mkdir(exist_ok=True, parents=True)
(location / 'english' / "1s").mkdir(exist_ok=True, parents=True)
(location / 'english' / "10s").mkdir(exist_ok=True, parents=True)
(location / 'english' / "120s").mkdir(exist_ok=True, parents=True)
(location / 'french' / "1s").mkdir(exist_ok=True, parents=True)
(location / 'french' / "10s").mkdir(exist_ok=True, parents=True)
(location / 'french' / "120s").mkdir(exist_ok=True, parents=True)
(location / 'mandarin' / "1s").mkdir(exist_ok=True, parents=True)
(location / 'mandarin' / "10s").mkdir(exist_ok=True, parents=True)
(location / 'mandarin' / "120s").mkdir(exist_ok=True, parents=True)
(location / 'german' / "1s").mkdir(exist_ok=True, parents=True)
(location / 'german' / "10s").mkdir(exist_ok=True, parents=True)
(location / 'german' / "120s").mkdir(exist_ok=True, parents=True)
(location / 'wolof' / "1s").mkdir(exist_ok=True, parents=True)
(location / 'wolof' / "10s").mkdir(exist_ok=True, parents=True)
(location / 'wolof' / "120s").mkdir(exist_ok=True, parents=True)
# scores dir
(location / 'scores').mkdir(exist_ok=True, parents=True)
# create parameters file
ABXParameters().export(location / ABXParameters.file_stem)
# create meta-template
template = MetaFile.to_template(benchmark_name="abx17")
template.to_yaml(
file=location / MetaFile.file_stem,
excluded={
"file_stem": True,
"model_info": {"model_id"},
"publication": {"bib_reference", "DOI"}
}
)
instruction_file = Path(__file__).parent / "instructions.md"
if instruction_file.is_file():
shutil.copy(instruction_file, location / 'help.md')
|
PypiClean
|
/django-codemirror2-0.2.tar.gz/django-codemirror2-0.2/codemirror2/static/codemirror2/mode/solr/solr.js
|
(function(mod) {
if (typeof exports == "object" && typeof module == "object") // CommonJS
mod(require("../../lib/codemirror"));
else if (typeof define == "function" && define.amd) // AMD
define(["../../lib/codemirror"], mod);
else // Plain browser env
mod(CodeMirror);
})(function(CodeMirror) {
"use strict";
CodeMirror.defineMode("solr", function() {
"use strict";
var isStringChar = /[^\s\|\!\+\-\*\?\~\^\&\:\(\)\[\]\{\}\^\"\\]/;
var isOperatorChar = /[\|\!\+\-\*\?\~\^\&]/;
var isOperatorString = /^(OR|AND|NOT|TO)$/i;
function isNumber(word) {
return parseFloat(word, 10).toString() === word;
}
function tokenString(quote) {
return function(stream, state) {
var escaped = false, next;
while ((next = stream.next()) != null) {
if (next == quote && !escaped) break;
escaped = !escaped && next == "\\";
}
if (!escaped) state.tokenize = tokenBase;
return "string";
};
}
function tokenOperator(operator) {
return function(stream, state) {
var style = "operator";
if (operator == "+")
style += " positive";
else if (operator == "-")
style += " negative";
else if (operator == "|")
stream.eat(/\|/);
else if (operator == "&")
stream.eat(/\&/);
else if (operator == "^")
style += " boost";
state.tokenize = tokenBase;
return style;
};
}
function tokenWord(ch) {
return function(stream, state) {
var word = ch;
while ((ch = stream.peek()) && ch.match(isStringChar) != null) {
word += stream.next();
}
state.tokenize = tokenBase;
if (isOperatorString.test(word))
return "operator";
else if (isNumber(word))
return "number";
else if (stream.peek() == ":")
return "field";
else
return "string";
};
}
function tokenBase(stream, state) {
var ch = stream.next();
if (ch == '"')
state.tokenize = tokenString(ch);
else if (isOperatorChar.test(ch))
state.tokenize = tokenOperator(ch);
else if (isStringChar.test(ch))
state.tokenize = tokenWord(ch);
return (state.tokenize != tokenBase) ? state.tokenize(stream, state) : null;
}
return {
startState: function() {
return {
tokenize: tokenBase
};
},
token: function(stream, state) {
if (stream.eatSpace()) return null;
return state.tokenize(stream, state);
}
};
});
CodeMirror.defineMIME("text/x-solr", "solr");
});
|
PypiClean
|
/bmcs-0.0.2a28-py3-none-any.whl/stats/spirrid_bak/old/demo_pullout.py
|
Created on May 27, 2010
@author: rostislav
'''
from stats.spirrid_bak.old.spirrid import SPIRRID
from matplotlib import pyplot as plt
from quaducom.pullout.constant_friction_finite_fiber import ConstantFrictionFiniteFiber
def run():
# Quantities for the response function
# and randomization
# construct a default response function for a single filament
rf = ConstantFrictionFiniteFiber(fu = 1200e15, qf = 1200,
L = 0.02, A = 0.00000002,
E = 210.e9, z = 0.004,
phi = 0.5, f = 0.01)
# construct the integrator and provide it with the response function.
s = SPIRRID(rf = rf,
min_eps = 0.00, max_eps = 0.0008, n_eps = 380)
# construct the random variables
n_int = 25
s.add_rv('E_mod', distribution = 'uniform', loc = 170.e9, scale = 250.e9, n_int = n_int)
s.add_rv('L', distribution = 'uniform', loc = 0.02, scale = 0.03, n_int = n_int)
s.add_rv('phi', distribution = 'sin_distr', loc = 0., scale = 1., n_int = n_int)
s.add_rv('z', distribution = 'uniform', loc = 0, scale = rf.L / 2., n_int = n_int)
# define a tables with the run configurations to start in a batch
run_list = [
(
{'cached_dG' : False,
'compiled_QdG_loop' : True,
'compiled_eps_loop' : True },
'bx-',
'$\mathrm{C}_{e,\\theta} ( q(e,\\theta) \cdot g[\\theta_1] g[\\theta_2] \dots g[\\theta_n] ) $ - %4.2f sec'
),
(
{'cached_dG' : False,
'compiled_QdG_loop' : True,
'compiled_eps_loop' : False },
'r-2',
'$\mathrm{P}_{e} ( \mathrm{C}_{\\theta} ( q(e,\\theta) \cdot g[\\theta_1] g[\\theta_2] \dots g[\\theta_n] ) ) $ - %4.2f sec',
),
(
{'cached_dG' : True,
'compiled_QdG_loop' : True,
'compiled_eps_loop' : True },
'go-',
'$\mathrm{C}_{e,\\theta} ( q(e,\\theta) \cdot G[\\theta] ) $ - %4.2f sec',
),
(
{'cached_dG' : True,
'compiled_QdG_loop' : False,
'compiled_eps_loop' : False },
'b--',
'$\mathrm{P}_{e} ( \mathrm{N}_{\\theta} ( q(e,\\theta) \cdot G[\\theta] ) ) $ - %4.2f sec'
),
]
for idx, run in enumerate(run_list):
run_options, plot_options, legend_string = run
print('run', idx, end=' ')
s.set(**run_options)
s.mean_curve.plot(plt, plot_options, linewidth = 2, label = legend_string % s.exec_time)
print('execution time', s.exec_time)
# def f():
# print 'exec_time', s.exec_time
#
# global f
#
# import cProfile
#
# cProfile.run('f()', 'spirrid.tprof')
# import pstats
# p = pstats.Stats('spirrid.tprof')
# p.strip_dirs()
# print 'cumulative'
# p.sort_stats('cumulative').print_stats(50)
# print 'time'
# p.sort_stats('time').print_stats(50)
plt.xlabel('strain [-]')
plt.ylabel('stress')
plt.legend(loc = 'lower right')
plt.title(s.rf.title)
plt.show()
if __name__ == '__main__':
run()
|
PypiClean
|
/reqmgr2ms-pileup-2.2.4rc1.tar.gz/reqmgr2ms-pileup-2.2.4rc1/src/python/WMCore/Services/DBS/DBSWriterObjects.py
|
from __future__ import print_function
from builtins import int
import logging
from DBSAPI.dbsException import *
from DBSAPI.dbsApiException import *
from DBSAPI.dbsPrimaryDataset import DbsPrimaryDataset
from DBSAPI.dbsAlgorithm import DbsAlgorithm
from DBSAPI.dbsQueryableParameterSet import DbsQueryableParameterSet
from DBSAPI.dbsProcessedDataset import DbsProcessedDataset
from DBSAPI.dbsFile import DbsFile
from DBSAPI.dbsFileBlock import DbsFileBlock
from DBSAPI.dbsStorageElement import DbsStorageElement
from DBSAPI.dbsRun import DbsRun
from DBSAPI.dbsLumiSection import DbsLumiSection
def makeTierList(dataTier):
"""
_makeTierList_
Standard tool to split data tiers if they contain - chars
*** Do not use outside of this module ***
"""
tierList = dataTier.split("-")
return tierList
def createPrimaryDataset(datasetInfo, apiRef = None):
"""
_createPrimaryDataset_
Create and return a Primary Dataset object.
If apiRef is not None, it is used to insert the dataset into the
DBS
"""
if 'PrimaryDatasetType' in datasetInfo:
PrimaryDatasetType = datasetInfo['PrimaryDatasetType']
else:
PrimaryDatasetType = 'mc'
logging.debug("Inserting PrimaryDataset %s with Type %s", datasetInfo["PrimaryDataset"], PrimaryDatasetType)
primary = DbsPrimaryDataset(Name = datasetInfo["PrimaryDataset"], Type=PrimaryDatasetType)
if apiRef != None:
apiRef.insertPrimaryDataset(primary)
return primary
def createAlgorithm(datasetInfo, configMetadata = None, apiRef = None):
"""
_createAlgorithm_
Create an algorithm assuming that datasetInfo is a
ProdCommon.MCPayloads.DatasetInfo like dictionary
"""
exeName = datasetInfo['ApplicationName']
appVersion = datasetInfo['ApplicationVersion']
appFamily = datasetInfo["ApplicationFamily"]
#
# HACK: Problem with large PSets (is this still relevant ?)
#
# Repacker jobs have no PSetContent/PSetHash
#
psetContent = datasetInfo.get('PSetContent',None)
if psetContent == None:
psetContent = "PSET_CONTENT_NOT_AVAILABLE"
psetHash = datasetInfo.get('PSetHash',None)
if psetHash == None:
psetHash = "NO_PSET_HASH"
else:
if psetHash.find(";"):
# no need for fake hash in new schema
psetHash = psetHash.split(";")[0]
psetHash = psetHash.replace("hash=", "")
## No more hacks
#msg = ">>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"
#msg += "TEST HACK USED FOR PSetContent\n"
#msg += ">>>>>>>>>>>>>>>>>>>>>>>>>>>>"
#logging.warning(msg)
#print msg
#psetContent = "This is not a PSet"
#
# HACK: 100 char limit on cfg file name
if configMetadata != None:
cfgName = configMetadata['name']
if len(cfgName) > 100:
msg = ">>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"
msg += "TEST HACK USED FOR Config File Name"
msg += ">>>>>>>>>>>>>>>>>>>>>>>>>>>>"
logging.warning(msg)
print(msg)
configMetadata['name'] = cfgName[-99]
psetInstance = DbsQueryableParameterSet(
Hash = psetHash,
Name = configMetadata['name'],
Version = configMetadata['version'],
Type = configMetadata['Type'],
Annotation = configMetadata['annotation'],
Content = psetContent,
)
algorithmInstance = DbsAlgorithm(
ExecutableName = exeName,
ApplicationVersion = appVersion,
ApplicationFamily = appFamily,
ParameterSetID = psetInstance
)
else:
psetInstance = DbsQueryableParameterSet(
Hash = psetHash)
algorithmInstance = DbsAlgorithm(
ExecutableName = exeName,
ApplicationVersion = appVersion,
ApplicationFamily = appFamily,
ParameterSetID = psetInstance
)
if apiRef != None:
apiRef.insertAlgorithm(algorithmInstance)
return algorithmInstance
def createAlgorithmForInsert(datasetInfo):
"""
_createPartialAlgorithm_
Create an Algorithm instance that uses the minimal info needed
to insert a file
"""
exeName = datasetInfo['ApplicationName']
appVersion = datasetInfo['ApplicationVersion']
appFamily = datasetInfo["ApplicationFamily"]
#
# Repacker jobs have no PsetContent/PSetHash
#
psetContent = datasetInfo.get('PSetContent',None)
if psetContent == None:
psetContent = "PSET_CONTENT_NOT_AVAILABLE"
psetHash = datasetInfo.get('PSetHash',None)
if psetHash == None:
psetHash = "NO_PSET_HASH"
else:
if psetHash.find(";"):
# no need for fake hash in new schema
psetHash = psetHash.split(";")[0]
psetHash = psetHash.replace("hash=", "")
psetInstance = DbsQueryableParameterSet(
Hash = psetHash)
algorithmInstance = DbsAlgorithm(
ExecutableName = exeName,
ApplicationVersion = appVersion,
ApplicationFamily = appFamily,
ParameterSetID = psetInstance
)
return algorithmInstance
def createMergeAlgorithm(datasetInfo, apiRef = None):
"""
_createMergeAlgorithm_
Create a DbsAlgorithm for a merge dataset
"""
exeName = datasetInfo['ApplicationName']
version = datasetInfo['ApplicationVersion']
family = datasetInfo.get('ApplicationFamily', None)
if (family == None) or not (family) :
family = datasetInfo['OutputModuleName']
mergeAlgo = DbsAlgorithm (
ExecutableName = exeName,
ApplicationVersion = version,
ApplicationFamily = family,
)
if apiRef != None:
apiRef.insertAlgorithm(mergeAlgo)
return mergeAlgo
def createProcessedDataset(primaryDataset, algorithm, datasetInfo,
apiRef = None):
"""
_createProcessedDataset_
"""
physicsGroup = datasetInfo.get("PhysicsGroup", "NoGroup")
status = datasetInfo.get("Status", "VALID")
dataTier = datasetInfo['DataTier']
globalTag = datasetInfo.get('Conditions', None)
if globalTag is None: globalTag = ''
parents = []
inputDataset = datasetInfo.get('ParentDataset', None)
if inputDataset != None:
parents.append(inputDataset)
tierList = makeTierList(datasetInfo['DataTier'])
name = datasetInfo['ProcessedDataset']
algolist=[]
if algorithm not in ('', None):
algolist=list(algorithm)
processedDataset = DbsProcessedDataset (
PrimaryDataset = primaryDataset,
AlgoList=algolist,
Name = name,
TierList = tierList,
ParentList = parents,
PhysicsGroup = physicsGroup,
Status = status,
GlobalTag = globalTag,
)
if apiRef != None:
apiRef.insertProcessedDataset(processedDataset)
#
logging.debug("PrimaryDataset: %s ProcessedDataset: %s DataTierList: %s requested by PhysicsGroup: %s ", primaryDataset['Name'], name, tierList, physicsGroup)
return processedDataset
def createDBSFiles(fjrFileInfo, jobType = None, apiRef = None):
"""
_createDBSFiles_
Create a list of DBS File instances from the file details contained
in a FwkJobRep.FileInfo instance describing an output file
Does not insert files, returns as list of DbsFile objects
Does insert runs and lumisections if DBS API reference is passed
"""
results = []
inputLFNs = [ x['LFN'] for x in fjrFileInfo.inputFiles]
checksum = fjrFileInfo.checksums['cksum']
adler32sum = fjrFileInfo.checksums.get('adler32', '')
nEvents = int(fjrFileInfo['TotalEvents'])
if len(fjrFileInfo.dataset)<=0:
logging.error("No dataset info found in FWJobReport!")
return results
# //
# // Set FileType
#//
if 'FileType' in fjrFileInfo:
fileType = fjrFileInfo['FileType']
else:
fileType = 'EDM'
#
# FIXME: at this point I should use the mc or data event type from
# the jobreport. Until this is supported by the framework,
# we use the workaround that mc job reports have an empty
# lumisections list (stripped in DBSInterface)
#
lumiList = []
if ( len(fjrFileInfo.getLumiSections()) > 0 ):
#
# insert runs (for data files from detector)
#
if ( apiRef != None ):
for runinfo in fjrFileInfo.runs:
run = DbsRun(
RunNumber = int(runinfo),
NumberOfEvents = 0,
NumberOfLumiSections = 0,
TotalLuminosity = 0,
StoreNumber = 0,
StartOfRun = 0,
EndOfRun = 0,
)
apiRef.insertRun(run)
#
# insert lumisections (for data files from detector)
# associate files with lumisections (for all data files)
#
for lumiinfo in fjrFileInfo.getLumiSections():
lumi = DbsLumiSection(
LumiSectionNumber = int(lumiinfo['LumiSectionNumber']),
StartEventNumber = 0,
EndEventNumber = 0,
LumiStartTime = 0,
LumiEndTime = 0,
RunNumber = int(lumiinfo['RunNumber']),
)
# Isnt needed, causes monster slowdown
#if ( apiRef != None ):
# apiRef.insertLumiSection(lumi)
lumiList.append(lumi)
logging.debug("Lumi associated to file is: %s" % ([x for x in lumiList]))
# //
# // Dataset info related to files and creation of DbsFile object
#//
for dataset in fjrFileInfo.dataset:
primary = createPrimaryDataset(dataset)
if jobType == "Merge":
algo = createMergeAlgorithm(dataset)
else:
algo = createAlgorithmForInsert(dataset)
processed = createProcessedDataset(primary, algo, dataset)
dbsFileInstance = DbsFile(
Checksum = checksum,
Adler32 = adler32sum,
NumberOfEvents = nEvents,
LogicalFileName = fjrFileInfo['LFN'],
FileSize = int(fjrFileInfo['Size']),
Status = "VALID",
ValidationStatus = 'VALID',
FileType = fileType,
Dataset = processed,
TierList = makeTierList(dataset['DataTier']),
AlgoList = [algo],
LumiList = lumiList,
ParentList = inputLFNs,
BranchList = fjrFileInfo.branches,
)
results.append(dbsFileInstance)
return results
def createDBSStorageElement(pnn):
"""
_createDBSStorageElement_
"""
return DbsStorageElement(Name = pnn)
def createDBSFileBlock(blockName):
"""
_createDBSFileBlock_
return a DbsFileBlock object with the block name provided
NOTE: This method DOES NOT create a new block in DBS
"""
return DbsFileBlock(Name=blockName)
|
PypiClean
|
/tensorflow_cpu-2.14.0rc1-cp311-cp311-macosx_10_15_x86_64.whl/tensorflow/python/distribute/tpu_replicated_variable.py
|
"""A Variable class that is replicated to logical cores for model parallelism."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import abc
import contextlib
from tensorflow.python.compiler.xla.experimental import xla_sharding
from tensorflow.python.distribute import tpu_util
from tensorflow.python.eager import context
from tensorflow.python.framework import config
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_conversion_registry
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_resource_variable_ops
from tensorflow.python.ops import gen_tpu_partition_ops as tpu_partition_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.saved_model import save_context
from tensorflow.python.trackable import base as trackable
def _on_device_update(update_fn, var, value, **kwargs):
with ops.device(var.device):
return update_fn(var, value, **kwargs)
class TPUReplicatedVariable(variables_lib.Variable):
"""Container for replicated `Variables` that are treated as a single variable.
This class maintains a list of replicated variables that are stored on
separate logic TPU devices. TF2XLA bridge accesses these variables as
if they were a single variable.
"""
def __init__(self, variables, name='TPUReplicatedVariable'):
"""Treats `variables` as a replicated list of `tf.Variable`s.
Example:
```
variables = [
tf.Variable(..., shape=(10, 100), dtype=tf.float32),
tf.Variable(..., shape=(10, 100), dtype=tf.float32),
tf.Variable(..., shape=(10, 100), dtype=tf.float32),
tf.Variable(..., shape=(10, 100), dtype=tf.float32),
]
replicated_variable = TPUReplicatedVariable(variables)
assert replicated_variable.shape.as_list() == [10, 100]
```
Args:
variables: A list of `ResourceVariable`s that comprise this replicated
variable. Variables should not be shared between different
`TPUReplicatedVariable` objects.
name: String. Name of this container. Defaults to "TPUReplicatedVariable".
"""
if not isinstance(variables, abc.Sequence) or not variables or any(
not isinstance(v, variables_lib.Variable) for v in variables):
raise TypeError('Argument `variables` should be a non-empty list of '
f'`variables.Variable`s. Received {variables}')
if any(v.dtype != variables[0].dtype for v in variables):
raise ValueError(
'All elements in argument `variables` must have the same dtype. '
f'Received dtypes: {[v.dtype for v in variables]}')
if any(v.shape != variables[0].shape for v in variables):
raise ValueError(
'All elements in argument `variables` must have the same shape. '
f'Received shapes: {[v.shape for v in variables]}')
self._vars = variables
self._name = name
self._common_name = self._name.split(':')[0]
self._cached_value = None
def __iter__(self):
"""Return an iterable for accessing the underlying sharded variables."""
return iter(self._vars)
@property
def name(self):
"""The name of this object. Used for checkpointing."""
return self._name
@property
def dtype(self):
"""The dtype of all `Variable`s in this object."""
return self._vars[0].dtype
@property
def is_initialized(self):
return self._vars[0].is_initialized
@property
def trainable(self):
return self._vars[0].trainable
@property
def device(self):
"""The device this variable is on."""
return self._vars[0].device
@contextlib.contextmanager
def _handle_graph(self):
with self.handle.graph.as_default():
yield
@contextlib.contextmanager
def _assign_dependencies(self):
if self._cached_value is not None:
with ops.control_dependencies([self._cached_value]):
yield
else:
yield
@property
def constraint(self):
return self._vars[0].constraint
@property
def _in_graph_mode(self):
return self._vars[0]._in_graph_mode # pylint: disable=protected-access
@property
def _unique_id(self):
return self._vars[0]._unique_id # pylint: disable=protected-access
@property
def graph(self):
return self._vars[0].graph
@property
def _shared_name(self):
return self._common_name
@property
def synchronization(self):
return variable_scope.VariableSynchronization.NONE
@property
def aggregation(self):
return variable_scope.VariableAggregation.NONE
@property
def variables(self):
"""The list of `Variables`."""
if save_context.in_save_context():
return [self._vars[0]]
return self._vars
def _export_to_saved_model_graph(self, object_map, tensor_map,
options, **kwargs):
"""For implementing `Trackable`."""
first_var = self._vars[0]
resource_list = first_var._export_to_saved_model_graph( # pylint:disable=protected-access
object_map, tensor_map, options, **kwargs)
for v in self._vars[1:]:
object_map[v] = object_map[first_var]
tensor_map[v.handle] = tensor_map[first_var.handle]
resource_list.append(v.handle)
object_map[self] = object_map[first_var]
tensor_map[self] = tensor_map[first_var.handle]
resource_list.append(self)
return resource_list
def _gather_saveables_for_saved_model(self):
return {trackable.VARIABLE_VALUE_KEY: self._vars[0]}
@property
def shape(self):
return self._vars[0].shape
@property
def handle(self):
if save_context.in_save_context() or context.executing_eagerly():
return self._vars[0].handle
if tpu_util.enclosing_tpu_context() is None:
raise NotImplementedError('TPUReplicatedVariable.handle is not available '
'outside tpu context or save context')
else:
with tpu_util.outside_or_skip_tpu_context():
packed_var = getattr(self, '_packed_var', None)
# TODO(b/202047549): Enable packed variables with soft device placement
if packed_var is None or config.get_soft_device_placement():
tensor = tpu_partition_ops.tpu_partitioned_input_v2(
[v.handle for v in self._vars],
partition_dims=[], is_packed=False)
else:
tensor = tpu_partition_ops.tpu_partitioned_input_v2(
[packed_var.packed_handle], partition_dims=[], is_packed=True)
return xla_sharding.replicate(tensor)
def _read_variable_op(self):
return gen_resource_variable_ops.read_variable_op(self.handle, self.dtype)
def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False):
"""Converts a variable to a tensor."""
# pylint: disable=protected-access
if tpu_util.enclosing_tpu_context() is None:
return self.read_value()
else:
return self._read_variable_op()
def read_value(self):
return self._vars[0].read_value()
def _update(self, update_fn, value, **kwargs):
"""Converts the value to tensor and updates the variable list."""
input_tensor = ops.convert_to_tensor(
value, name='value_in_tensor', dtype=self.dtype)
return control_flow_ops.group(
*tuple(
_on_device_update(update_fn, v, input_tensor, **kwargs)
for v in self.variables))
def assign(self, value, use_locking=False, name=None, read_value=True):
if tpu_util.enclosing_tpu_context() is None or context.executing_eagerly():
assign_fn = lambda var, *a, **ka: var.assign(*a, **ka)
return self._update(
assign_fn,
value=value,
use_locking=use_locking,
name=name,
read_value=read_value)
else:
return tpu_util.make_raw_assign_fn(
gen_resource_variable_ops.assign_variable_op)(
self,
value=value,
use_locking=use_locking,
name=name,
read_value=read_value)
def assign_sub(self, value, use_locking=False, name=None, read_value=True):
if tpu_util.enclosing_tpu_context() is None or context.executing_eagerly():
assign_sub_fn = lambda var, *a, **ka: var.assign_sub(*a, **ka)
return self._update(
assign_sub_fn,
value=value,
use_locking=use_locking,
name=name,
read_value=read_value)
else:
return tpu_util.make_raw_assign_fn(
gen_resource_variable_ops.assign_sub_variable_op)(
self,
value=value,
use_locking=use_locking,
name=name,
read_value=read_value)
def assign_add(self, value, use_locking=False, name=None, read_value=True):
if tpu_util.enclosing_tpu_context() is None or context.executing_eagerly():
assign_add_fn = lambda var, *a, **ka: var.assign_add(*a, **ka)
return self._update(
assign_add_fn,
value=value,
use_locking=use_locking,
name=name,
read_value=read_value)
else:
return tpu_util.make_raw_assign_fn(
gen_resource_variable_ops.assign_add_variable_op)(
self,
value=value,
use_locking=use_locking,
name=name,
read_value=read_value)
def __str__(self):
debug_str = ',\n'.join(
' %d: %s' % (i, v) for i, v in enumerate(self._vars))
return '%s:{\n%s\n}' % (self.__class__.__name__, debug_str)
def __repr__(self):
debug_repr = ',\n'.join(
' %d: %r' % (i, v) for i, v in enumerate(self._vars))
return '%s:{\n%s\n}' % (self.__class__.__name__, debug_repr)
# Register a conversion function which reads the value of the variable,
# allowing instances of the class to be used as tensors.
def _tensor_conversion_tpu_replicated_var(var,
dtype=None,
name=None,
as_ref=False):
return var._dense_var_to_tensor(dtype=dtype, name=name, as_ref=as_ref) # pylint: disable=protected-access
tensor_conversion_registry.register_tensor_conversion_function(
TPUReplicatedVariable, _tensor_conversion_tpu_replicated_var)
|
PypiClean
|
/VideoToSMI_Server-0.0.4-py3-none-any.whl/VideoToSMIServer/Server.py
|
from videotosmi import Video
from ConfigHelper import Config
from deepgeo import Utils
import http.server as BaseHTTPServer
import shutil
import cgi
class ServerConfig:
IP = "127.0.0.1"
PORT = 80
MODEL_NAME = "mscoco"
MODEL_CONFIG_PATH = ""
MODEL_ENGINE = "maskrcnn"
FRAME_SET = 60
ROTATION= -90
FILTER = []
IS_RANDOM_NAME = True
VIDEO_FOLDER = ""
def toFile(self, path):
config = Config(self)
config.toFile(path)
def fromFile(self, path):
config = Config(path)
config.setObject(self)
def save_file(obj, image_path, filename):
file = obj.file.read()
open(image_path + "/%s" % filename, "wb").write(file)
return image_path+filename
class Server:
def __init__(self, config:ServerConfig):
self.config = config
self.server = self._init_Server_()
def _init_Server_(self):
CONFIG = self.config
class __(BaseHTTPServer.BaseHTTPRequestHandler):
def __init__(self, *args):
self.video = Video()
self.config = CONFIG
self.video.add_model(self.config.MODEL_NAME,self.config.MODEL_ENGINE,self.config.MODEL_CONFIG_PATH)
self.config.VIDEO_FOLDER = Utils.create_folder(self.config.VIDEO_FOLDER)
BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, *args)
def write_file(self, file_path):
with open(file_path, 'rb') as content:
shutil.copyfileobj(content, self.wfile)
def write_data(self, data):
try:
self.wfile.write(data)
except Exception as e:
print(e, " Fixed: change UTF-8")
self.wfile.write(bytes(data, "utf-8"))
def do_GET(self):
url = str(self.path).split("/")[-1]
ext = url.split(".")[-1]
if len(url) <= 4 or ext!="smi":
self.send_response(404)
self.end_headers()
else:
self.send_response(200)
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Content-type', 'application/text; charset=utf-8')
self.end_headers()
self.write_file(self.config.VIDEO_FOLDER+url)
def do_POST(self):
form = cgi.FieldStorage(
fp=self.rfile,
headers=self.headers,
environ={'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': self.headers['Content-Type'],
})
key = list(form.keys())[0]
obj = form[key]
filename = str(obj).split("FieldStorage('"+key+"', '")[1].split("', ")[0]
if self.config.IS_RANDOM_NAME:
filename= Utils.create_name(filename.split(".")[-1],self.config.VIDEO_FOLDER)
img_names = save_file(obj,image_path=self.config.VIDEO_FOLDER, filename=filename)
self.video.detect(img_names,self.config.MODEL_NAME, frame_set=self.config.FRAME_SET, rotation=self.config.ROTATION, ftr=self.config.FILTER)
self.send_response(200)
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Content-type', 'text/html; charset=utf-8')
self.end_headers()
file_name = img_names.split(".")[0].split("/")[-1]+".smi"
self.write_data(file_name)
return __
def Run(self):
s = BaseHTTPServer.HTTPServer((self.config.IP, self.config.PORT), self.server)
print("{}:{} ... 대기".format(str(self.config.IP), str(self.config.PORT)))
s.serve_forever()
|
PypiClean
|
/ansible-8.3.0-py3-none-any.whl/ansible_collections/amazon/aws/plugins/modules/backup_selection.py
|
# Copyright: Contributors to the Ansible project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
DOCUMENTATION = r"""
module: backup_selection
short_description: Create, delete and modify AWS Backup selection
version_added: 6.0.0
description:
- Manages AWS Backup selections.
- For more information see the AWS documentation for backup selections
U(https://docs.aws.amazon.com/aws-backup/latest/devguide/assigning-resources.html).
options:
backup_plan_name:
description:
- Uniquely identifies the backup plan to be associated with the selection of resources.
required: true
type: str
aliases:
- plan_name
backup_selection_name:
description:
- The display name of a resource selection document. Must contain 1 to 50 alphanumeric or '-_.' characters.
required: true
type: str
aliases:
- selection_name
iam_role_arn:
description:
- The ARN of the IAM role that Backup uses to authenticate when backing up the target resource.
type: str
resources:
description:
- A list of Amazon Resource Names (ARNs) to assign to a backup plan. The maximum number of ARNs is 500 without wildcards,
or 30 ARNs with wildcards. If you need to assign many resources to a backup plan, consider a different resource selection
strategy, such as assigning all resources of a resource type or refining your resource selection using tags.
type: list
elements: str
list_of_tags:
description:
- A list of conditions that you define to assign resources to your backup plans using tags.
- Condition operators are case sensitive.
- When you specify more than one condition in I(list_of_tags), you assign all resources that match AT LEAST ONE condition (using OR logic).
type: list
elements: dict
suboptions:
condition_type:
description:
- An operation applied to a key-value pair used to assign resources to your backup plan.
- Condition only supports C(STRINGEQUALS).
type: str
condition_key:
description:
- The key in a key-value pair.
type: str
condition_value:
description:
- The value in a key-value pair.
type: str
not_resources:
description:
- A list of Amazon Resource Names (ARNs) to exclude from a backup plan. The maximum number of ARNs is 500 without wildcards,
or 30 ARNs with wildcards. If you need to exclude many resources from a backup plan, consider a different resource
selection strategy, such as assigning only one or a few resource types or refining your resource selection using tags.
type: list
elements: str
conditions:
description:
- A list of conditions (expressed as a dict) that you define to assign resources to your backup plans using tags.
- When you specify more than one condition in I(conditions), you only assign the resources that match ALL conditions (using AND logic).
- I(conditions) supports C(string_equals), C(string_like), C(string_not_equals), and C(string_not_like). I(list_of_tags) only supports C(string_equals).
type: dict
suboptions:
string_equals:
description:
- Filters the values of your tagged resources for only those resources that you tagged with the same value.
type: list
default: []
elements: dict
suboptions:
condition_key:
description:
- The key in a key-value pair.
- I(condition_key) in the I(conditions) option must use the AWS resource tag prefix, e.g. 'aws:ResourceTag/key-name'
type: str
condition_value:
description: The value in a key-value pair.
type: str
string_like:
description:
- Filters the values of your tagged resources for matching tag values with the use of a wildcard character (*) anywhere in the string.
For example, "prod*" or "*rod*" matches the tag value "production".
type: list
default: []
elements: dict
suboptions:
condition_key:
description:
- The key in a key-value pair.
- I(condition_key) in the I(conditions) option must use the AWS resource tag prefix, e.g. 'aws:ResourceTag/key-name'
type: str
condition_value:
description: The value in a key-value pair.
type: str
string_not_equals:
description:
- Filters the values of your tagged resources for only those resources that you tagged that do not have the same value.
type: list
default: []
elements: dict
suboptions:
condition_key:
description:
- The key in a key-value pair.
- I(condition_key) in the I(conditions) option must use the AWS resource tag prefix, e.g. 'aws:ResourceTag/key-name'
type: str
condition_value:
description: The value in a key-value pair.
type: str
string_not_like:
description:
- Filters the values of your tagged resources for non-matching tag values with the use of a wildcard character (*) anywhere in the string.
type: list
default: []
elements: dict
suboptions:
condition_key:
description:
- The key in a key-value pair.
- I(condition_key) in the I(conditions) option must use the AWS resource tag prefix, e.g. 'aws:ResourceTag/key-name'
type: str
condition_value:
description: The value in a key-value pair.
type: str
state:
description:
- Create, delete a backup selection.
default: present
choices: ['present', 'absent']
type: str
author:
- Kristof Imre Szabo (@krisek)
- Alina Buzachis (@alinabuzachis)
extends_documentation_fragment:
- amazon.aws.common.modules
- amazon.aws.region.modules
- amazon.aws.boto3
"""
EXAMPLES = r"""
- name: Create backup selection
amazon.aws.backup_selection:
selection_name: elastic
backup_plan_name: 1111f877-1ecf-4d79-9718-a861cd09df3b
iam_role_arn: arn:aws:iam::111122223333:role/system-backup
resources:
- arn:aws:elasticfilesystem:*:*:file-system/*
"""
RETURN = r"""
backup_selection:
description: Backup selection details.
returned: always
type: complex
contains:
backup_plan_id:
description: Backup plan id.
returned: always
type: str
sample: "1111f877-1ecf-4d79-9718-a861cd09df3b"
creation_date:
description: Backup plan creation date.
returned: always
type: str
sample: "2023-01-24T10:08:03.193000+01:00"
iam_role_arn:
description: The ARN of the IAM role that Backup uses.
returned: always
type: str
sample: "arn:aws:iam::111122223333:role/system-backup"
selection_id:
description: Backup selection id.
returned: always
type: str
sample: "1111c217-5d71-4a55-8728-5fc4e63d437b"
selection_name:
description: Backup selection name.
returned: always
type: str
sample: elastic
conditions:
description: List of conditions (expressed as a dict) that are defined to assign resources to the backup plan using tags.
returned: always
type: dict
sample: {}
list_of_tags:
description: Conditions defined to assign resources to the backup plans using tags.
returned: always
type: list
elements: dict
sample: []
not_resources:
description: List of Amazon Resource Names (ARNs) that are excluded from the backup plan.
returned: always
type: list
sample: []
resources:
description: List of Amazon Resource Names (ARNs) that are assigned to the backup plan.
returned: always
type: list
sample: []
"""
import json
try:
import botocore
except ImportError:
pass # Handled by AnsibleAWSModule
from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
from ansible_collections.amazon.aws.plugins.module_utils.backup import get_selection_details
from ansible_collections.amazon.aws.plugins.module_utils.backup import get_plan_details
from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
def check_for_update(current_selection, backup_selection_data, iam_role_arn):
update_needed = False
if current_selection[0].get("IamRoleArn", None) != iam_role_arn:
update_needed = True
fields_to_check = [
{
"field_name": "Resources",
"field_value_from_aws": json.dumps(current_selection[0].get("Resources", None), sort_keys=True),
"field_value": json.dumps(backup_selection_data.get("Resources", []), sort_keys=True),
},
{
"field_name": "ListOfTags",
"field_value_from_aws": json.dumps(current_selection[0].get("ListOfTags", None), sort_keys=True),
"field_value": json.dumps(backup_selection_data.get("ListOfTags", []), sort_keys=True),
},
{
"field_name": "NotResources",
"field_value_from_aws": json.dumps(current_selection[0].get("NotResources", None), sort_keys=True),
"field_value": json.dumps(backup_selection_data.get("NotResources", []), sort_keys=True),
},
{
"field_name": "Conditions",
"field_value_from_aws": json.dumps(current_selection[0].get("Conditions", None), sort_keys=True),
"field_value": json.dumps(backup_selection_data.get("Conditions", []), sort_keys=True),
},
]
for field_to_check in fields_to_check:
if field_to_check["field_value_from_aws"] != field_to_check["field_value"]:
if (
field_to_check["field_name"] != "Conditions"
and field_to_check["field_value_from_aws"] != "[]"
and field_to_check["field_value"] != "null"
):
# advanced settings to be updated
update_needed = True
if (
field_to_check["field_name"] == "Conditions"
and field_to_check["field_value_from_aws"]
!= '{"StringEquals": [], "StringLike": [], "StringNotEquals": [], "StringNotLike": []}'
and field_to_check["field_value"] != "null"
):
update_needed = True
return update_needed
def main():
argument_spec = dict(
backup_selection_name=dict(type="str", required=True, aliases=["selection_name"]),
backup_plan_name=dict(type="str", required=True, aliases=["plan_name"]),
iam_role_arn=dict(type="str"),
resources=dict(type="list", elements="str"),
conditions=dict(
type="dict",
options=dict(
string_equals=dict(
type="list",
default=[],
elements="dict",
options=dict(
condition_key=dict(type="str", no_log=False),
condition_value=dict(type="str"),
),
),
string_like=dict(
type="list",
default=[],
elements="dict",
options=dict(
condition_key=dict(type="str", no_log=False),
condition_value=dict(type="str"),
),
),
string_not_equals=dict(
type="list",
default=[],
elements="dict",
options=dict(
condition_key=dict(type="str", no_log=False),
condition_value=dict(type="str"),
),
),
string_not_like=dict(
type="list",
default=[],
elements="dict",
options=dict(
condition_key=dict(type="str", no_log=False),
condition_value=dict(type="str"),
),
),
),
),
not_resources=dict(type="list", elements="str"),
list_of_tags=dict(
type="list",
elements="dict",
options=dict(
condition_type=dict(type="str"),
condition_key=dict(type="str", no_log=False),
condition_value=dict(type="str"),
),
),
state=dict(default="present", choices=["present", "absent"]),
)
required_if = [
("state", "present", ["backup_selection_name", "backup_plan_name", "iam_role_arn"]),
("state", "absent", ["backup_selection_name", "backup_plan_name"]),
]
module = AnsibleAWSModule(argument_spec=argument_spec, required_if=required_if, supports_check_mode=True)
state = module.params.get("state")
backup_selection_name = module.params.get("selection_name")
backup_plan_name = module.params.get("backup_plan_name")
iam_role_arn = module.params.get("iam_role_arn")
resources = module.params.get("resources")
list_of_tags = module.params.get("list_of_tags")
not_resources = module.params.get("not_resources")
conditions = module.params.get("conditions")
try:
client = module.client("backup", retry_decorator=AWSRetry.jittered_backoff())
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to connect to AWS")
results = {"changed": False, "exists": False, "backup_selection": {}}
current_selection = get_selection_details(module, client, backup_plan_name, backup_selection_name)
if state == "present":
# build data specified by user
update_needed = False
backup_selection_data = {"SelectionName": backup_selection_name, "IamRoleArn": iam_role_arn}
if resources:
backup_selection_data["Resources"] = resources
if list_of_tags:
backup_selection_data["ListOfTags"] = snake_dict_to_camel_dict(list_of_tags, capitalize_first=True)
if not_resources:
backup_selection_data["NotResources"] = not_resources
if conditions:
backup_selection_data["Conditions"] = snake_dict_to_camel_dict(conditions, capitalize_first=True)
if current_selection:
results["exists"] = True
update_needed = check_for_update(current_selection, backup_selection_data, iam_role_arn)
if update_needed:
if module.check_mode:
results["changed"] = True
module.exit_json(**results, msg="Would have created selection if not in check mode")
try:
client.delete_backup_selection(
aws_retry=True,
SelectionId=current_selection[0]["SelectionId"],
BackupPlanId=current_selection[0]["BackupPlanId"],
)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to delete selection")
elif not update_needed:
results["exists"] = True
# state is present but backup vault doesnt exist
if not current_selection or update_needed:
results["changed"] = True
results["exists"] = True
plan = get_plan_details(module, client, backup_plan_name)
if module.check_mode:
module.exit_json(**results, msg="Would have created selection if not in check mode")
try:
client.create_backup_selection(
BackupSelection=backup_selection_data, BackupPlanId=plan[0]["backup_plan_id"]
)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to create selection")
new_selection = get_selection_details(module, client, backup_plan_name, backup_selection_name)
results["backup_selection"] = camel_dict_to_snake_dict(*new_selection)
elif state == "absent":
if current_selection:
results["changed"] = True
if module.check_mode:
module.exit_json(**results, msg="Would have deleted backup selection if not in check mode")
try:
client.delete_backup_selection(
aws_retry=True,
SelectionId=current_selection[0]["SelectionId"],
BackupPlanId=current_selection[0]["BackupPlanId"],
)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to delete selection")
module.exit_json(**results)
if __name__ == "__main__":
main()
|
PypiClean
|
/merqube_client_lib-0.21.0-py3-none-any.whl/merqube_client_lib/pydantic_v2_types.py
|
from __future__ import annotations
from datetime import date, datetime
from enum import Enum
from typing import Any, Dict, List, Optional, Union
from pydantic import (
BaseModel,
ConfigDict,
Field,
PositiveFloat,
RootModel,
StrictBool,
StrictStr,
conint,
constr,
)
from typing_extensions import Literal
class Role(Enum):
data_point = "data point"
verification = "verification"
calculation = "calculation"
development = "development"
administration = "administration"
class CalcFreq(Enum):
Daily = "Daily"
Daily__EOD = "Daily, EOD"
Intraday = "Intraday"
class DisplayMonth(Enum):
Jan = "Jan"
Feb = "Feb"
Mar = "Mar"
Apr = "Apr"
May = "May"
Jun = "Jun"
Jul = "Jul"
Aug = "Aug"
Sep = "Sep"
Oct = "Oct"
Nov = "Nov"
Dec = "Dec"
class FieldModel(BaseModel):
display_name: Optional[StrictStr] = None
field_name: Optional[StrictStr] = None
class PortfolioDisplay(BaseModel):
fields: Optional[List[FieldModel]] = None
class RebalFreq(Enum):
Annual = "Annual"
Daily = "Daily"
Monthly = "Monthly"
Quarterly = "Quarterly"
Semi_Annual = "Semi-Annual"
Weekly = "Weekly"
Bi_Monthly = "Bi-Monthly"
Intraday = "Intraday"
None_ = "None"
class Stage(Enum):
prod = "prod"
test = "test"
development = "development"
class WeightingMethod(Enum):
Other = "Other"
Equal = "Equal"
class IndexClassDefinitionPost(BaseModel):
model_config = ConfigDict(
extra="forbid",
)
name: StrictStr
namespace: Optional[StrictStr] = None
index_class_args_spec: Dict[str, Any]
class IndexSpec(BaseModel):
index_class: Optional[StrictStr] = None
index_class_args: Optional[Dict[str, Any]] = Field(
None,
description="should validate against index_class_args_spec field of the IndexClassDefinition identified by index_class",
)
index_variables: Optional[Dict[str, Any]] = Field(None, description="TODO")
version: Optional[int] = None
class Type(Enum):
security = "security"
index = "index"
class Related(BaseModel):
default_display: StrictBool = False
id: StrictStr
metric: Optional[StrictStr] = None
name: StrictStr
type: Type
class ChartType(Enum):
line = "line"
spline = "spline"
area = "area"
bar = "bar"
column = "column"
class CompareMode(Enum):
absolute = "absolute"
percent = "percent"
class ConstructorType(Enum):
chart = "chart"
stockChart = "stockChart"
class Value(BaseModel):
model_config = ConfigDict(
extra="forbid",
)
display_name: StrictStr
path: StrictStr
class Datum(BaseModel):
model_config = ConfigDict(
extra="forbid",
)
url: StrictStr
array_field: StrictStr
values: List[Value]
key_field: StrictStr
class CustomCharts(BaseModel):
model_config = ConfigDict(
extra="forbid",
)
name: StrictStr
xlabel: StrictStr
ylabel: StrictStr
chart_type: ChartType = ChartType.line
compare_mode: CompareMode = CompareMode.percent
constructor_type: ConstructorType = ConstructorType.stockChart
size: float = 12
data: Optional[List[Datum]] = None
class IndexBenchmark(BaseModel):
model_config = ConfigDict(
extra="forbid",
)
ticker: StrictStr = Field(
..., description="accepts either FSYM_ID, or MQI:XXX to benchmark against another index.", example="MQI:XXX"
)
threshold: float = Field(
...,
description="the deviation that triggers an outlier event, IE, the daily difference of the index vs the daily difference of the underlying should not be more than this percent",
example=0.05,
)
index_metric: StrictStr = Field(
"price_return",
description="the metric of this index to compare against the ticker/threshold, eg can compare some other metric of this index than price_return",
example="total_return",
)
benchmark_metric: StrictStr = Field(
"price_return",
description="only accepted if the ticker is an MQI. The metric of the benchmark index to compare against; eg can compare to some other metric than price_return of the benchmark",
example="total_return",
)
class Provider(Enum):
bloomberg = "bloomberg"
reuters = "reuters"
morningstar = "morningstar"
factset = "factset"
nasdaq = "nasdaq"
wind = "wind"
secapi = "secapi"
class BloombergProviderKwargsPostType(Enum):
EOD = "EOD"
RT = "RT"
EOD_AND_RT = "EOD_AND_RT"
class BloombergProviderKwargs(BaseModel):
model_config = ConfigDict(
extra="forbid",
)
post_type: BloombergProviderKwargsPostType
class ReutersProviderKwargsPostType(Enum):
EOD = "EOD"
RT = "RT"
EOD_AND_RT = "EOD_AND_RT"
class ReutersProviderKwargs(BaseModel):
model_config = ConfigDict(
extra="forbid",
)
post_type: Optional[ReutersProviderKwargsPostType] = None
class SecapiProviderKwargs(BaseModel):
model_config = ConfigDict(
extra="forbid",
)
security_type: StrictStr
security_name: StrictStr
metric: StrictStr
class NasdaqEnv(Enum):
testing = "testing"
production = "production"
class NasdaqProviderKwargs(BaseModel):
model_config = ConfigDict(
extra="forbid",
)
nasdaq_env: NasdaqEnv = Field(..., description="Which Nasdaq environment to send data to")
eod_date_delta: Optional[int] = Field(
None,
description="How many days to offset the Nasdaq date (T if before 6:30pm EST otherwise T + 1) when getting EOD data from the Index",
)
class Compression(Enum):
NONE = "NONE"
GZIP = "GZIP"
class LayoutType(Enum):
SINGLE_FILE = "SINGLE_FILE"
YEARLY_DIR_DAILY_FILE = "YEARLY_DIR_DAILY_FILE"
class DataCollectionsSpec(BaseModel):
model_config = ConfigDict(
extra="allow",
)
index_dc_subtype: StrictStr = Field(
..., description="Index Data Collection Subtype object key moves to array after transformation"
)
auto_persist: StrictBool = Field(
False, description="Boolean to indicate whether to automatically persist the data. Default is False."
)
compression: Compression = Compression.GZIP
date_col: StrictStr = "date"
format: Literal["csv"] = "csv"
layout_type: LayoutType = Field(
LayoutType.SINGLE_FILE,
description="SINGLE_FILE: one file. YEARLY_DIR_DAILY_FILE: one directory per year, each day, a file with YYYYMMDD format.",
)
location: StrictStr
name: constr(pattern=r"^[A-Za-z0-9_]*$", min_length=3, max_length=128, strict=True)
seed_file_path: Optional[StrictStr] = Field(None, description="Seed file for the data collection.")
start_time: Optional[StrictStr] = None
class RealTimeTradeType(Enum):
ELIGIBLE = "ELIGIBLE"
INELIGIBLE = "INELIGIBLE"
class PortfolioUom(Enum):
WEIGHT = "WEIGHT"
SHARES = "SHARES"
class EquityIdentifierType(Enum):
RIC = "RIC"
CURRENCY_CODE = "CURRENCY_CODE"
SEDOL = "SEDOL"
FSYM_ID = "FSYM_ID"
TICKER = "TICKER"
BBG_TICKER = "BBG_TICKER"
CUSIP = "CUSIP"
class AssetType(Enum):
CASH = "CASH"
THIRD_PARTY_INDEX = "THIRD_PARTY_INDEX"
EQUITY = "EQUITY"
MUTUAL_FUND = "MUTUAL_FUND"
MERQUBE_INDEX = "MERQUBE_INDEX"
class SecurityCreationResponse(BaseModel):
id: Optional[StrictStr] = None
inserts: Optional[int] = None
class DataType(Enum):
string = "string"
float64 = "float64"
datetime64 = "datetime64"
int64 = "int64"
bool = "bool"
object = "object"
class MetricsSchema(BaseModel):
data_type: DataType
description: StrictStr = Field(..., description="a description of what this metric represents")
name: StrictStr = Field(..., description="metric name")
object_schema: Optional[Dict[str, Any]] = Field(
None,
description="optional - this supports complex objects, ie you can specify the metric is of this schema. THis allows you to upload a json schema itself of this metric. THis is for downstream users of this metric.",
)
class IdentifierType(Enum):
RIC = "RIC"
secapi_name = "secapi_name"
class Security(BaseModel):
model_config = ConfigDict(
extra="forbid",
)
identifier_type: IdentifierType
identifier_value: StrictStr = Field(..., example="AAPL.QQ")
security_type: StrictStr = Field(..., example="equity")
class SecurityListPost(BaseModel):
model_config = ConfigDict(
extra="forbid",
)
name: StrictStr = Field(..., description="unique name among all other security lists")
namespace: Optional[StrictStr] = None
realtime: Optional[StrictBool] = None
securities: List[Security]
class OptionResponse(BaseModel):
price: Optional[float] = Field(None, description="Option Price")
vol: Optional[float] = Field(None, description="Implied Volatility")
interest_rate: Optional[float] = Field(None, description="Implied Interest Rate")
dividend_yield: Optional[float] = Field(None, description="Implied Dividend Yield for the underlying")
spot: Optional[float] = Field(None, description="Underlying spot price")
delta: Optional[float] = Field(None, description="Delta of this option")
theta: Optional[float] = Field(None, description="Theta of the option being priced")
gamma: Optional[float] = Field(None, description="Gamma of the option being priced")
vega: Optional[float] = Field(None, description="Vega of the option being priced")
bid_ask_spread: Optional[float] = Field(
None, description="Spread between Bid and Ask prices of the option being priced"
)
request_id: Optional[StrictStr] = Field(None, description="Request ID sent in the request")
class FindStrikeResponse(BaseModel):
strike: Optional[float] = Field(None, description="Strike that prices to the given budget.")
spot: Optional[float] = Field(None, description="Underlying spot price.")
strike_percentage: Optional[float] = Field(
None, description="Strike as a percentage of the underlying spot price that prices to the given budget."
)
request_id: Optional[StrictStr] = Field(None, description="Request ID sent in the request")
class DeadlineType(Enum):
calculation = "calculation"
restatement = "restatement"
class OgPriority(Enum):
P1 = "P1"
P2 = "P2"
P3 = "P3"
class Deadline(BaseModel):
model_config = ConfigDict(
extra="forbid",
)
time_of_day: constr(pattern=r"^(?:[01]\d|2[0-3]):(?:[0-5]\d):(?:[0-5]\d)$", strict=True) = Field(
..., description="the time of day of the deadline"
)
deadline_type: DeadlineType = DeadlineType.calculation
next_day: StrictBool = Field(
False,
description="set this to true if the index starts running on day T but the deadline does not pass until T+1 in that index timezone",
)
og_priority: OgPriority = OgPriority.P1
business_days_prior: int = Field(0, description="the amount of days before T to check for data existence")
metrics: Optional[List[StrictStr]] = Field(
None, description="the list of metrics to check in the deadline checker script"
)
class MetricsArrItem(BaseModel):
id: Optional[StrictStr] = Field(None, example="price_return")
value: Optional[float] = Field(None, example=1000.0000000000002)
class MetricsArr(RootModel):
root: List[MetricsArrItem]
class MetricsDef(BaseModel):
name: Optional[StrictStr] = Field(None, example="daily_return")
type: Optional[StrictStr] = Field(None, example="double")
class ErrorCodes(BaseModel):
code: Optional[StrictStr] = None
message: Optional[StrictStr] = None
class IntradayPublishConfigBloombergTargetParams(BaseModel):
model_config = ConfigDict(
extra="forbid",
)
rounding_decimals: Optional[int] = None
ticker: Optional[StrictStr] = None
class IntradayPublishConfigReutersTargetParams(BaseModel):
model_config = ConfigDict(
extra="forbid",
)
rounding_decimals: Optional[int] = None
ticker: Optional[StrictStr] = None
post_type: Optional[ReutersProviderKwargsPostType] = None
class RunStateStatus(Enum):
PENDING_CREATION = "PENDING_CREATION"
SUCCEEDED = "SUCCEEDED"
FAILED = "FAILED"
RUNNING = "RUNNING"
class ClientIndexConfigBase(BaseModel):
base_date: constr(pattern=r"^[0-9]{4}[-/][0-9]{2}[-/][0-9]{2}", strict=True) = Field(
..., description="the date that the index has level 'base_value'", example="2000-01-04"
)
base_value: Optional[PositiveFloat] = Field(
None, description="set the index value on base_date to this base_value", example=1000.0
)
bbg_ticker: Optional[StrictStr] = Field(
None,
description="due to the limitations of the Bloomberg ticker creation process (no API), this must be a pre-created ticker. You can email MerQube a list of tickers to create on your behalf at [email protected]. Then, you provide those as input to these indices",
example="MY_TICKER",
)
currency: StrictStr = Field("USD", description="set to the currency of the index", example="USD")
description: StrictStr = Field(
...,
description="set to the description of the index, which will show on merqube.com",
example="My Index Description",
)
email_list: List[StrictStr] = Field(
default_factory=list,
description="list of emails to send daily dissemination reports, and the initial backtest reports, to; if not specified, no emails will be sent",
example=["[email protected]", "[email protected]"],
)
is_intraday: StrictBool = Field(False, description="set to True if the index is intraday", example=False)
name: StrictStr = Field(
...,
description="set to the name of the index. Commonly people use the ticker as the name, but that is not necessary. Must be globally unique - you will get a 409 if this index name is taken ",
example="My Index",
)
namespace: StrictStr = Field(..., description="set to the namespace of the index", example="mycompany")
title: StrictStr = Field(
..., description="set to the title of the index that shows up on merqube.com", example="My Index Title"
)
class ReinvestmentType(Enum):
AT_OPEN = "AT_OPEN"
AT_CLOSE = "AT_CLOSE"
class ClientSSTRSpecific(BaseModel):
model_config = ConfigDict(
extra="forbid",
)
ric: StrictStr = Field(..., description="set to the RIC of the underlying equity", example="LMVH.PA")
reinvestment_type: ReinvestmentType = ReinvestmentType.AT_OPEN
class ClientSSTRConfig(ClientIndexConfigBase, ClientSSTRSpecific):
pass
model_config = ConfigDict(
extra="forbid",
)
class FeeType(Enum):
fixed = "fixed"
percentage_pre = "percentage_pre"
percentage_post = "percentage_post"
class ClientDecrementSpecific(BaseModel):
model_config = ConfigDict(
extra="forbid",
)
fee_value: float = Field(
...,
description="set to the value of the fee to apply. For fixed, this is bps, for percentage_pre/post this is given as a percentage",
example=0.05,
)
fee_type: FeeType = Field(..., description="set to the type of fee to apply", example="fixed")
day_count_convention: StrictStr = Field(
...,
description="must either adapt to a fixed number of days in a year e.g. 'f360' or to Actual ISDA convention, i.e. 'actual'",
example="f360",
)
ric: StrictStr = Field(..., description="set to the RIC of the underlying SSTR", example="LMVH.PA")
start_date: Optional[date] = Field(
None,
description="set to the start date of the index if it is to differ from base_date. If this is specified, it must be before base_date. In this case the base_date, base_value is used as a fixed intercept, with the index level starting from start_date and passing through that intercept",
example="2004-01-04",
)
class ClientDecrementConfig(ClientIndexConfigBase, ClientDecrementSpecific):
pass
model_config = ConfigDict(
extra="forbid",
)
class LevelOverride(BaseModel):
model_config = ConfigDict(
extra="forbid",
)
date: constr(pattern=r"^[0-9]{4}[-][0-9]{2}[-][0-9]{2}", strict=True)
level: Any
comment: Optional[StrictStr] = None
class ClientMultiEBConsituent(BaseModel):
date: constr(pattern=r"^[0-9]{4}-[0-9]{2}-[0-9]{2}", strict=True) = Field(..., example="2022-03-11")
identifier: StrictStr = Field(..., example="AAPL.OQ")
quantity: Any = Field(
..., description="the number of shares or percentage of portfolio of this constituent", example=-0.2512355
)
security_type: AssetType = Field(..., example="EQUITY")
class CorporateActions(BaseModel):
model_config = ConfigDict(
extra="forbid",
)
reinvest_dividends: StrictBool = True
class ClientMultiEBPortUpdate(BaseModel):
model_config = ConfigDict(
extra="forbid",
)
constituents: List[ClientMultiEBConsituent] = Field(
default_factory=list,
description="The EB portfolio directory",
example=[
{"date": "2022-03-11", "identifier": "AAPL.OQ", "quantity": -0.2512355, "security_type": "EQUITY"},
{"date": "2022-03-11", "identifier": "USD", "quantity": 60.0, "security_type": "CASH"},
],
)
class ClientMultiEBPortInitial(ClientMultiEBPortUpdate):
base_date: constr(pattern=r"^[0-9]{4}[-/][0-9]{2}[-/][0-9]{2}", strict=True) = Field(
..., description="the date that the index has level 'base_value'", example="2000-01-04"
)
base_value: PositiveFloat = Field(
..., description="set the index value on base_date to this base_value", example=1000.0
)
class MerqTimestamp(RootModel):
root: Union[
datetime,
date,
constr(
pattern=r"^[0-9]{4}-[0-9]{2}-[0-9]{2}(?:[ T][0-9]{2}:[0-9]{2}:[0-9]{2})?(?:[.][0-9]{1,6})?$", strict=True
),
] = Field(..., example=["2021-01-01", "2021-01-01T01:01:01", "2021-01-01T01:01:01.zzzzz"])
class Status(BaseModel):
created_at: Optional[StrictStr] = None
created_by: Optional[StrictStr] = None
last_modified: StrictStr
last_modified_by: Optional[StrictStr] = None
locked_after: Optional[datetime] = Field(
None,
description="If this is set (non null), the manifest is locked for all edits to any other field after this timestamp. A PUT/PATCH may be used to first unlock the manifest, by setting this field (to a max of one hour in the future), or to `null` again, to make other edits.",
)
class CalendarIdentifiers(BaseModel):
calendar_identifiers: Optional[List[constr(pattern=r"^(FUT|MIC|FX|SM|MQI):.+$", strict=True)]] = None
class HolidaysToAdd(BaseModel):
holidays_to_add: Optional[List[date]] = None
class Operator(Enum):
union = "union"
intersection = "intersection"
difference = "difference"
class NestedCalendarSchema(BaseModel):
model_config = ConfigDict(
extra="forbid",
)
operator: Operator
children: List[Union[CalendarIdentifiers, HolidaysToAdd, NestedCalendarSchema]]
class Condition(Enum):
any = "any"
all = "all"
custom = "custom"
class WeekmaskEnum(Enum):
Mon = "Mon"
Tue = "Tue"
Wed = "Wed"
Thu = "Thu"
Fri = "Fri"
Sat = "Sat"
Sun = "Sun"
class FlatCalendarSchema(BaseModel):
model_config = ConfigDict(
extra="forbid",
)
calendar_identifiers: List[constr(pattern=r"^(FUT|MIC|FX|SM|MQI):.+$", strict=True)] = Field(default_factory=list)
swaps_monitor_codes: List[constr(pattern=r"^[a-zA-Z]{3}$", strict=True)] = Field(default_factory=list)
condition: Condition = Condition.all
holidays_to_add: List[date] = Field(default_factory=list)
holidays_to_remove: List[date] = Field(default_factory=list)
weekmask: List[WeekmaskEnum] = Field(
[WeekmaskEnum.Mon, WeekmaskEnum.Tue, WeekmaskEnum.Wed, WeekmaskEnum.Thu, WeekmaskEnum.Fri],
description="weekmask of valid business days",
)
class HolidayCalendarSpec(RootModel):
root: Union[FlatCalendarSchema, NestedCalendarSchema]
class ArgumentsOrEnvVars(RootModel):
root: List[List[StrictStr]]
class PodKwargs(BaseModel):
name: Optional[StrictStr] = None
class SubPod(BaseModel):
model_config = ConfigDict(
extra="forbid",
)
arguments: Optional[ArgumentsOrEnvVars] = None
command: StrictStr
pod_type: StrictStr
uuid: StrictStr
env_vars: Optional[ArgumentsOrEnvVars] = None
pod_kwargs: Optional[PodKwargs] = None
class S3Bucket(BaseModel):
model_config = ConfigDict(
extra="forbid",
)
bucket: StrictStr
path: Optional[StrictStr] = None
class AirflowConfig(BaseModel):
model_config = ConfigDict(
extra="forbid",
)
description: Optional[StrictStr] = Field(None, description="A short description related with the generated runner")
s3_location: Optional[S3Bucket] = Field(
None,
description="Bucket and path where the generated dag should be stored. If not defined `merq-airflow-dags-airflow2` is defined in merqutil",
)
dag: Optional[Dict[str, Any]] = Field(None, description="use this to set kwargs for the dag")
pod: Optional[Dict[str, Any]] = Field(None, description="use this to set kwargs for the pod")
env: Optional[Dict[str, Any]] = Field(None, description="use this to set ENV variables in the pod")
subpods: Optional[List[SubPod]] = None
class EmailDissapiConfig(BaseModel):
recipient_list: List[StrictStr] = Field(..., min_length=1)
email_list: Optional[List[StrictStr]] = None
bcc_list: Optional[List[StrictStr]] = None
cc_list: Optional[List[StrictStr]] = None
subject: StrictStr
content: Optional[StrictStr] = None
file_keys: Optional[List[StrictStr]] = None
class SftpDissapiConfig(BaseModel):
sftp_ids: List[StrictStr]
file_keys: List[StrictStr]
class S3DissapiConfig(BaseModel):
bucket: StrictStr
key_prefix: StrictStr
file_keys: List[StrictStr]
class DisseminationDestinations(BaseModel):
email_dissapi_configs: Optional[List[EmailDissapiConfig]] = Field(None, min_length=1)
sftp_dissapi_configs: Optional[List[SftpDissapiConfig]] = None
s3_dissapi_configs: Optional[List[S3DissapiConfig]] = None
class IndexReport(BaseModel):
uuid: StrictStr
program_args: Optional[Dict[str, Any]] = None
dissemination_destinations: Optional[DisseminationDestinations] = None
class RunConfigLabelAdditional(BaseModel):
label: StrictStr = Field(..., description="unique label of this rc")
class HolidayCalendar(BaseModel):
cal_type: Optional[StrictStr] = Field(None, example="MIC")
mic: Optional[StrictStr] = Field(None, example="XNYS")
class Schedule(BaseModel):
model_config = ConfigDict(
extra="forbid",
)
retries: int
retry_interval_min: int
schedule_start: StrictStr
schedule_cron: Optional[StrictStr] = None
timeout_delta: Optional[StrictStr] = Field(None, example="minutes=17")
business_days_prior: Optional[int] = Field(None, example=1)
holiday_calendar: Optional[HolidayCalendar] = None
class CopyFromManifest(BaseModel):
copy_from_uuid: StrictStr = Field(..., description="the uuid of the resource to start from")
field_deletes: Optional[List[StrictStr]] = Field(None, description="fields to delete from the from-manifest")
field_updates: Optional[Dict[str, Any]] = Field(
None,
description="a dictionary of top level keys that the original manifest will be updated with (ie `manifest.update(this[field_updates])`. Top level keys can be added or replaced. To change the namespace, add namespace to the field_updates dict. The new manifest must be valid per the original resource definition.",
)
name: StrictStr = Field(..., description="the name of the new resource")
class CrudExtra(BaseModel):
model_config = ConfigDict(
extra="forbid",
)
id: StrictStr
namespace: StrictStr
status: Status
class Administrative(BaseModel):
model_config = ConfigDict(
extra="forbid",
)
role: Role
deadlines: Optional[List[Deadline]] = None
class IndexClassDefinitionPatchPutGet(IndexClassDefinitionPost, CrudExtra):
pass
model_config = ConfigDict(
extra="forbid",
)
class Webpage(BaseModel):
custom_charts: Optional[List[CustomCharts]] = None
visible: StrictBool = True
class IdentifierUUIDPost(BaseModel):
model_config = ConfigDict(
extra="forbid",
)
display_name: Optional[StrictStr] = None
index_name: StrictStr
metric: Optional[StrictStr] = None
name: StrictStr = Field(..., description="unique name among all other Identifiers of the provider resource type.")
namespace: Optional[StrictStr] = None
rounding_decimals: Optional[int] = None
provider_kwargs: Optional[
Union[BloombergProviderKwargs, ReutersProviderKwargs, SecapiProviderKwargs, NasdaqProviderKwargs]
] = None
start_date: Optional[date] = None
ticker: StrictStr
class IdentifierUUIDPatchPutGet(IdentifierUUIDPost, CrudExtra):
pass
model_config = ConfigDict(
extra="forbid",
)
class IdentifierUUIDRef(BaseModel):
model_config = ConfigDict(
extra="forbid",
)
name: StrictStr
provider: Provider
class ListDataCollections(BaseModel):
model_config = ConfigDict(
extra="allow",
)
data_collection: List[DataCollectionsSpec] = Field(default_factory=list)
class BasketPosition(BaseModel):
model_config = ConfigDict(
extra="forbid",
)
amount: float = Field(..., title="Amount")
asset_type: AssetType
identifier: StrictStr = Field(..., title="Identifier")
identifier_type: EquityIdentifierType
position_id: Optional[StrictStr] = Field(None, title="Position Id")
class RicEquityPosition(BasketPosition):
model_config = ConfigDict(
extra="forbid",
)
real_time_trade_types: Optional[List[RealTimeTradeType]] = None
use_primary_listing: StrictBool = Field(
False,
description="use the security's primary listing rather than the one from the identifier",
title="Use Primary Listing",
)
class NewSecurity(BaseModel):
model_config = ConfigDict(
extra="forbid",
)
name: StrictStr
namespace: StrictStr = Field(
...,
description="the calling user must have permission to this namespace to create the security in this namespace",
)
eff_ts: Optional[MerqTimestamp] = Field(
None,
description="DEPRECATED: this is no longer used for security creates, but allowed for backwards compatibility",
)
prov_ts: MerqTimestamp
source: Optional[StrictStr] = None
class SecurityMetrics(BaseModel):
eff_ts: MerqTimestamp
id: StrictStr = Field(..., example="0000699a-fcd7-45d4-8ce4-064a4ffeced0")
metric: StrictStr = Field(..., example="Level")
source: Optional[StrictStr] = Field(None, example="some_gcp_feed")
prov_ts: MerqTimestamp
value: Union[StrictStr, float, int, StrictBool, List, Dict[str, Any]]
class SecurityMetricsDeletion(BaseModel):
eff_ts: MerqTimestamp
id: StrictStr = Field(..., example="0000699a-fcd7-45d4-8ce4-064a4ffeced0")
metric: StrictStr = Field(..., example="Level")
prov_ts: Optional[MerqTimestamp] = None
source: Optional[StrictStr] = Field(None, example="some_gcp_feed")
class SecurityListPatchPutGet(SecurityListPost, CrudExtra):
pass
model_config = ConfigDict(
extra="forbid",
)
class Metrics(BaseModel):
ts: Optional[datetime] = Field(None, description="iso8601 format")
metrics: Optional[MetricsArr] = None
class Stats(BaseModel):
annual_volatility: Optional[float] = Field(None, example=3.717606464173326)
annualized_return: Optional[float] = Field(None, example=6.665050717276233)
cumulative_return: Optional[float] = Field(None, example=2.7597506251556547)
id: Optional[StrictStr] = Field(None, example="YTD")
max_drawdown: Optional[float] = Field(None, example=-1.3877029296510448)
sharpe_ratio: Optional[float] = Field(None, example=1.759543308780235)
start_date: Optional[MerqTimestamp] = None
end_date: Optional[MerqTimestamp] = None
class IntradayTickFilter(BaseModel):
model_config = ConfigDict(
extra="forbid",
)
start_date: Optional[MerqTimestamp] = None
end_date: Optional[MerqTimestamp] = None
start_time: Optional[StrictStr] = Field(
None,
description="Inclusive start time of the day to start running this intraday index. Format is %H:%M:%S.%f. This should be in the timezone of the index based on tzinfo.",
example=74340.262345,
)
end_time: Optional[StrictStr] = Field(
None,
description="Inclusive end time of the day to stop running this intraday index. Format is %H:%M:%S.%f. This should be in the timezone of the index based on tzinfo.",
example=74340.262345,
)
days_of_week: Optional[List[int]] = Field(
None,
description="Which days of the week to run the index. Each day of the week is denoted by an integer with Monday = 0 and Sunday = 6",
)
exclude_holidays: Optional[StrictBool] = Field(
None,
description="If this is True use the index holiday calendar to decide which days to run the index. It will only run on non-holidays.",
)
class IntradayPublishConfigTargetBase(BaseModel):
active_time_ranges: Optional[List[IntradayTickFilter]] = Field(
None,
description="Time ranges during which values should be published to this target. Target level active_time_ranges override publish_config metric level active_time_ranges.",
)
class IntradayPublishConfigDbTarget(IntradayPublishConfigTargetBase):
target: Literal["db"]
class IntradayPublishConfigSecapiTarget(IntradayPublishConfigTargetBase):
target: Literal["secapi"]
class IntradayPublishConfigBloombergTarget(IntradayPublishConfigTargetBase):
target: Literal["bloomberg"]
params: Optional[IntradayPublishConfigBloombergTargetParams] = None
class IntradayPublishConfigReutersTarget(IntradayPublishConfigTargetBase):
target: Literal["reuters"]
params: Optional[IntradayPublishConfigReutersTargetParams] = None
class IntradayPublishConfigTarget(RootModel):
root: Union[
IntradayPublishConfigDbTarget,
IntradayPublishConfigSecapiTarget,
IntradayPublishConfigBloombergTarget,
IntradayPublishConfigReutersTarget,
]
class IntradayPublishConfigItem(BaseModel):
model_config = ConfigDict(
extra="forbid",
)
default_active_time_ranges: Optional[List[IntradayTickFilter]] = Field(
None,
description="Time ranges during which values should be published for this metric. This will be overriden if a target level active_time_ranges is set.",
)
targets: List[IntradayPublishConfigTarget] = Field(..., description="List of targets to send this index metric too")
class IntradayPublishConfig(RootModel):
root: Optional[Dict[str, Union[IntradayPublishConfigItem, List[IntradayPublishConfigTarget]]]] = None
class RunState(BaseModel):
status: RunStateStatus
calculation_start_ts: Optional[
constr(pattern=r"^\d{4}-\d{2}-\d{2}(T|\s)\d{2}:\d{2}:\d{2}(\.\d{6})?$", strict=True)
] = Field(
None,
description="set to the start of the last calculation, if the status is not PENDING_CREATION (has not ever started)",
)
error: Optional[StrictStr] = Field(None, description="An error message if status is FAILED")
class ClientMultiEBSpecific(BaseModel):
model_config = ConfigDict(
extra="forbid",
)
corporate_actions: CorporateActions = Field(
{"reinvest_dividends": True},
description="how to handle corporate actions",
example={"reinvest_dividends": True},
)
holiday_calendar: Optional[HolidayCalendarSpec] = None
level_overrides: Optional[List[LevelOverride]] = Field(
None,
description="optional level overrides for the index on certain days.",
example=[
{"date": "2022-03-18", "level": 1364.344, "comment": "test for some special day for some special reason"}
],
)
run_hour: int = Field(
..., description="set to the hour of day to run the index in the index's timezone", example=16
)
run_minute: int = Field(..., description="set to the minute of the hour to run the index ", example=30)
timezone: StrictStr = Field("US/Eastern", description="set to the timezone of the index", example="US/Eastern")
class ClientMultiEBConfig(ClientIndexConfigBase, ClientMultiEBPortInitial, ClientMultiEBSpecific):
pass
model_config = ConfigDict(
extra="forbid",
)
class RunConfig(BaseModel):
model_config = ConfigDict(
extra="allow",
)
airflow_config: Optional[AirflowConfig] = None
command: Optional[StrictStr] = None
command_arguments: Optional[ArgumentsOrEnvVars] = None
index_report_uuids: Optional[List[StrictStr]] = None
index_reports: Optional[Union[List[StrictStr], List[IndexReport]]] = None
job_enabled: StrictBool
holiday_calendar: Optional[HolidayCalendarSpec] = None
pod_image_and_tag: Optional[StrictStr] = Field(None, description="overrides the image")
schedule: Schedule
tzinfo: Optional[StrictStr] = None
class RunConfigLabel(RunConfig, RunConfigLabelAdditional):
pass
model_config = ConfigDict(
extra="allow",
)
class Intraday(BaseModel):
model_config = ConfigDict(
extra="forbid",
)
enabled: StrictBool = Field(..., description="If the intraday process should be run or not")
security_id: Optional[StrictStr] = Field(
None, description="Security ID of the intraday_index security where ticker data is served"
)
ticker_period: Optional[float] = Field(None, description="Duration between ticker datapoints. In seconds")
tzinfo: Optional[StrictStr] = Field(None, description="Timezone of intraday tick timestamp comparisons")
active_time_ranges: Optional[List[IntradayTickFilter]] = None
publish_config: Optional[IntradayPublishConfig] = None
calculation_max_retry_delay: Optional[conint(ge=0)] = Field(
None,
description="How many seconds to keep retrying the real time calculation before setting the force flag. Defalts to 0 which means retry forever.",
)
resource_cpu_request: Optional[conint(ge=1)] = Field(
None, description="Minimum cpu resources requested in milliCPU"
)
resource_memory_request: Optional[conint(ge=0)] = Field(
None, description="Minimum memory resources requested in mebibytes."
)
heartbeat_timeout: Optional[conint(ge=1)] = Field(
None, description="Max number of seconds between heartbeats before the rtindex pod is considered unhealthy"
)
startup_failure_timeout: Optional[conint(ge=1)] = Field(
None,
description="Max number of seconds to wait for the first heartbeat from the rtindex pod before considering it a failed startup. By default it will wait 40sec",
)
data_refresh_period: Optional[conint(ge=0)] = Field(
None,
description="After the rtindex refreshes its data this is the number of seconds to wait before refreshing again. 0 means do not refresh. Defaults to 3600 seconds (60min)",
)
class IndexDefinitionPost(BaseModel):
model_config = ConfigDict(
extra="forbid",
)
administrative: Administrative
base_date: Optional[constr(pattern=r"^[0-9]{4}[-/][0-9]{2}[-/][0-9]{2}", strict=True)] = Field(
None, description="the date that the index has level 'base_value'", example="2000-01-04"
)
base_year: Optional[int] = None
benchmark: Optional[Union[IndexBenchmark, List[IndexBenchmark]]] = None
buffer_end: Optional[float] = None
buffer_start: Optional[float] = None
calc_freq: Optional[CalcFreq] = None
currency: Optional[StrictStr] = None
custom_metrics: Optional[List[MetricsSchema]] = None
description: StrictStr
display_month: Optional[DisplayMonth] = None
documents: Optional[Dict[str, Any]] = None
excess_strategy: Optional[Literal["keep_in_cash"]] = None
family: StrictStr
family_description: Optional[StrictStr] = None
first_value_date: Optional[constr(pattern=r"^[0-9]{4}[-/][0-9]{2}[-/][0-9]{2}", strict=True)] = Field(
None, description="the first date that the index has a value; if set, will be < base_date", example="2000-01-04"
)
identifiers: Optional[List[IdentifierUUIDRef]] = Field(None, min_length=0)
index_class_uuid: Optional[StrictStr] = None
intraday: Optional[Intraday] = Field(
None, description="Only available for indexes with intraday tick computation", title="Intraday Details"
)
launch_date: StrictStr
methodology: Optional[StrictStr] = None
month: Optional[int] = None
name: constr(pattern=r"^[a-zA-Z][a-zA-Z0-9-_]{0,99}$", strict=True)
namespace: Optional[StrictStr] = None
plot_metric: Optional[StrictStr] = None
portfolio_allocation_display: Optional[StrictBool] = Field(
None, description="whether or not portfolio_allocations are shown on the website. False by default"
)
portfolio_display: Optional[PortfolioDisplay] = None
rebal_freq: Optional[RebalFreq] = None
related: Optional[List[Related]] = None
run_configuration: Optional[RunConfig] = None
run_configurations: Optional[List[RunConfigLabel]] = None
spec: Optional[IndexSpec] = None
stage: Stage
tags: Optional[StrictStr] = None
title: StrictStr
webpage: Optional[Webpage] = None
weighting_method: Optional[WeightingMethod] = None
class IndexDefinitionPatchPutGet(IndexDefinitionPost, CrudExtra):
pass
model_config = ConfigDict(
extra="forbid",
)
class EquityBasketPortfolio(BaseModel):
model_config = ConfigDict(
extra="forbid",
)
positions: List[Union[BasketPosition, RicEquityPosition]] = Field(..., title="Positions")
timestamp: constr(pattern=r"^\d{4}-\d{2}-\d{2}(T|\s)\d{2}:\d{2}:\d{2}(\.\d{6})?$", strict=True) = Field(
..., description="the time at which the portfolio should take effect", title="Timestamp"
)
unit_of_measure: PortfolioUom
share_selection_date: Optional[
constr(pattern=r"^\d{4}-\d{2}-\d{2}(T|\s)\d{2}:\d{2}:\d{2}(\.\d{6})?$", strict=True)
] = Field(
None,
description="Share selection date, if specified, should be before the effective date (parameter 'timestamp') for this Target Portfolio. In case when the unit_of_measure is set to WEIGHT, we first determine the shares using the prices on the share selection date. If any constituent is de-listed by the share selection date, then that constituent's weight is distributed to other constituents before selecting the shares. We then apply the corporate actions from the share selection date to the day before the portfolio effective date. In case when the unit_of_measure is set to SHARES, we follow the similar steps as above, except the step of determining shares, as it is not needed.",
)
divisor: Optional[float] = Field(
None, description="A scaling factor to apply for the SHARES of the portfolio constituents"
)
class ClientTemplateResponse(BaseModel):
post_template: Optional[IndexDefinitionPost] = Field(None, description="a full index manifest that can be posted")
bbg_ident_template: Optional[IdentifierUUIDPost] = Field(
None, description="only returned if the client specified a bbg ticker"
)
target_ports: Optional[List[EquityBasketPortfolio]] = Field(
None, description="only returns for indices where the client should post the target portfolios"
)
NestedCalendarSchema.model_rebuild()
|
PypiClean
|
/notion-gcal-sync-1.1.12.tar.gz/notion-gcal-sync-1.1.12/notion_gcal_sync/install.py
|
import logging
import os
import sys
import click as click
import yaml
from notion_gcal_sync.clients.GCalClient import GCalClient
from notion_gcal_sync.config import Config
CONFIG_PATH = os.path.join(os.path.expanduser("~"), ".notion-gcal-sync")
CONFIG_FILE = os.path.join(CONFIG_PATH, "config.yml")
CONFIG_DEFAULT_FILE = os.path.join(CONFIG_PATH, "config.default.yml")
TOKEN_FILE = os.path.join(CONFIG_PATH, "token.json")
CLIENT_SECRET_FILE = os.path.join(CONFIG_PATH, "client_secret.json")
def confirm_create_path(path: str) -> bool:
logging.info("{} does not exist".format(path))
if click.confirm("Create non-existing {}?".format(path), default=True):
return True
return False
def config_path_created() -> bool:
if os.path.exists(CONFIG_PATH):
return True
if not confirm_create_path(CONFIG_PATH):
return False
logging.info("Creating {}".format(CONFIG_PATH))
os.mkdir(CONFIG_PATH)
return True
def config_file_created() -> bool:
if os.path.exists(CONFIG_FILE):
return True
if not confirm_create_path(CONFIG_FILE):
return False
logging.info("Configuring {}".format(CONFIG_FILE))
config_dict = {}
for key, val in Config().to_dict().items():
if key in ["gcal_calendars", "notion_columns"]:
continue
if key == "gcal_default_calendar_name":
gcal_mail = click.prompt(text="google_mail (e.g [email protected])", default=None)
config_dict["gcal_default_calendar_name"] = "Default"
config_dict["gcal_calendars"] = {"Default": gcal_mail}
continue
config_dict[key] = click.prompt(text="{}".format(key), default=val)
logging.info("Writing configured values to {}".format(CONFIG_FILE))
Config(**config_dict).to_yaml()
logging.info("Open {} to configure additional values.".format(CONFIG_FILE))
return True
def client_secret_created() -> bool:
if not os.path.exists(TOKEN_FILE) and os.path.exists(CLIENT_SECRET_FILE):
logging.info("{} does not exist".format(TOKEN_FILE))
logging.info("Generating token file {}".format(TOKEN_FILE))
GCalClient.get_credentials()
if os.path.exists(TOKEN_FILE):
return True
logging.error("{} nor {} exist".format(CLIENT_SECRET_FILE, TOKEN_FILE))
logging.info(
"Please follow the instructions on setting up the client_secret.json: "
"https://github.com/Ravio1i/notion-gcal-sync/blob/main/docs/setup.md#setup-credentials-for-google-calendar"
)
return False
def configure():
confirmed = config_path_created() and config_file_created() and client_secret_created()
if not confirmed:
logging.info("Exiting...")
sys.exit()
with open(CONFIG_FILE, "r") as yaml_file:
yaml_cfg = yaml.safe_load(yaml_file)
return Config(**yaml_cfg)
|
PypiClean
|
/netoprmgr-1.3.5.tar.gz/netoprmgr-1.3.5/pip/_internal/vcs/mercurial.py
|
from __future__ import absolute_import
import logging
import os
from pip._vendor.six.moves import configparser
from pip._internal.exceptions import BadCommand, InstallationError
from pip._internal.utils.misc import display_path
from pip._internal.utils.subprocess import make_command
from pip._internal.utils.temp_dir import TempDirectory
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from pip._internal.utils.urls import path_to_url
from pip._internal.vcs.versioncontrol import (
VersionControl,
find_path_to_setup_from_repo_root,
vcs,
)
if MYPY_CHECK_RUNNING:
from pip._internal.utils.misc import HiddenText
from pip._internal.vcs.versioncontrol import RevOptions
logger = logging.getLogger(__name__)
class Mercurial(VersionControl):
name = 'hg'
dirname = '.hg'
repo_name = 'clone'
schemes = (
'hg', 'hg+file', 'hg+http', 'hg+https', 'hg+ssh', 'hg+static-http',
)
@staticmethod
def get_base_rev_args(rev):
return [rev]
def export(self, location, url):
# type: (str, HiddenText) -> None
"""Export the Hg repository at the url to the destination location"""
with TempDirectory(kind="export") as temp_dir:
self.unpack(temp_dir.path, url=url)
self.run_command(
['archive', location], show_stdout=False, cwd=temp_dir.path
)
def fetch_new(self, dest, url, rev_options):
# type: (str, HiddenText, RevOptions) -> None
rev_display = rev_options.to_display()
logger.info(
'Cloning hg %s%s to %s',
url,
rev_display,
display_path(dest),
)
self.run_command(make_command('clone', '--noupdate', '-q', url, dest))
self.run_command(
make_command('update', '-q', rev_options.to_args()),
cwd=dest,
)
def switch(self, dest, url, rev_options):
# type: (str, HiddenText, RevOptions) -> None
repo_config = os.path.join(dest, self.dirname, 'hgrc')
config = configparser.RawConfigParser()
try:
config.read(repo_config)
config.set('paths', 'default', url.secret)
with open(repo_config, 'w') as config_file:
config.write(config_file)
except (OSError, configparser.NoSectionError) as exc:
logger.warning(
'Could not switch Mercurial repository to %s: %s', url, exc,
)
else:
cmd_args = make_command('update', '-q', rev_options.to_args())
self.run_command(cmd_args, cwd=dest)
def update(self, dest, url, rev_options):
# type: (str, HiddenText, RevOptions) -> None
self.run_command(['pull', '-q'], cwd=dest)
cmd_args = make_command('update', '-q', rev_options.to_args())
self.run_command(cmd_args, cwd=dest)
@classmethod
def get_remote_url(cls, location):
url = cls.run_command(
['showconfig', 'paths.default'],
show_stdout=False, cwd=location).strip()
if cls._is_local_repository(url):
url = path_to_url(url)
return url.strip()
@classmethod
def get_revision(cls, location):
"""
Return the repository-local changeset revision number, as an integer.
"""
current_revision = cls.run_command(
['parents', '--template={rev}'],
show_stdout=False, cwd=location).strip()
return current_revision
@classmethod
def get_requirement_revision(cls, location):
"""
Return the changeset identification hash, as a 40-character
hexadecimal string
"""
current_rev_hash = cls.run_command(
['parents', '--template={node}'],
show_stdout=False, cwd=location).strip()
return current_rev_hash
@classmethod
def is_commit_id_equal(cls, dest, name):
"""Always assume the versions don't match"""
return False
@classmethod
def get_subdirectory(cls, location):
"""
Return the path to setup.py, relative to the repo root.
Return None if setup.py is in the repo root.
"""
# find the repo root
repo_root = cls.run_command(
['root'], show_stdout=False, cwd=location).strip()
if not os.path.isabs(repo_root):
repo_root = os.path.abspath(os.path.join(location, repo_root))
return find_path_to_setup_from_repo_root(location, repo_root)
@classmethod
def controls_location(cls, location):
if super(Mercurial, cls).controls_location(location):
return True
try:
cls.run_command(
['identify'],
cwd=location,
show_stdout=False,
on_returncode='raise',
log_failed_cmd=False)
return True
except (BadCommand, InstallationError):
return False
vcs.register(Mercurial)
|
PypiClean
|
/cs272_project-0.2.0.tar.gz/cs272_project-0.2.0/cs272_project/model.py
|
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import CrossEntropyLoss
logger = logging.getLogger(__name__)
from transformers import GPT2PreTrainedModel, GPT2Model
class AS2HeadModel(nn.Module):
def __init__(self, config):
super().__init__()
mlp_neurons = 1024
self.linear1 = nn.Linear(config.hidden_size, mlp_neurons)
self.linear2 = nn.Linear(mlp_neurons, 1)
self.dropout = nn.Dropout(config.resid_pdrop)
self.ln_1 = nn.LayerNorm(mlp_neurons, eps=config.layer_norm_epsilon)
self.act = F.relu
self.act_out = torch.sigmoid
def forward(self, hidden_states, cls_index=None):
output = hidden_states[:, -1, :]
output = self.linear1(output)
output = self.act(output)
output = self.ln_1(output)
output = self.dropout(output)
output = self.linear2(output)
output = self.act_out(output)
return output
class GPT2TANDAModel(GPT2PreTrainedModel):
def __init__(self, config):
super().__init__(config)
config.num_labels = 1
self.transformer = GPT2Model(config)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
self.as2_head = AS2HeadModel(config)
self.init_weights()
self.loss_fct = CrossEntropyLoss()
def get_output_embeddings(self):
return self.lm_head
def forward(
self,
input_ids=None,
past=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
mc_token_ids=None,
lm_labels=None,
mc_labels=None,
):
transformer_outputs = self.transformer(
input_ids,
past=past,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states)
mc_logits = self.as2_head(hidden_states)
outputs = (lm_logits, mc_logits) + transformer_outputs[1:]
if mc_labels is not None:
loss = self.loss_fct(mc_logits, mc_labels)
outputs = (loss,) + outputs
if lm_labels is not None:
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = lm_labels[..., 1:].contiguous()
loss = self.loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
outputs = (loss,) + outputs
return outputs # (lm loss), (mc loss), lm logits, mc logits, presents, (all hidden_states), (attentions)
|
PypiClean
|
/py-pure-client-1.38.0.tar.gz/py-pure-client-1.38.0/pypureclient/flasharray/FA_2_4/models/username.py
|
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_4 import models
class Username(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'username': 'str'
}
attribute_map = {
'username': 'username'
}
required_args = {
}
def __init__(
self,
username=None, # type: str
):
"""
Keyword args:
username (str): The username of the user.
"""
if username is not None:
self.username = username
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `Username`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def __getitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `Username`".format(key))
return object.__getattribute__(self, key)
def __setitem__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `Username`".format(key))
object.__setattr__(self, key, value)
def __delitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `Username`".format(key))
object.__delattr__(self, key)
def keys(self):
return self.attribute_map.keys()
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Username, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Username):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
PypiClean
|
/aim-with-auth-support-3.15.2.post12.tar.gz/aim-with-auth-support-3.15.2.post12/README.md
|
<div align="center">
<img src="https://user-images.githubusercontent.com/13848158/154338760-edfe1885-06f3-4e02-87fe-4b13a403516b.png">
<h3>An easy-to-use & supercharged open-source experiment tracker</h3>
Aim logs your training runs, enables a beautiful UI to compare them and an API to query them programmatically.
</div>
<br/>
<img src="https://user-images.githubusercontent.com/13848158/154338753-34484cda-95b8-4da8-a610-7fdf198c05fd.png">
<p align="center">
<a href="#about-aim"><b>About</b></a> •
<a href="#why-use-aim"><b>Features</b></a> •
<a href="#demos"><b>Demos</b></a> •
<a href="https://github.com/aimhubio/aim/tree/main/examples"><b>Examples</b></a> •
<a href="#quick-start"><b>Quick Start</b></a> •
<a href="https://aimstack.readthedocs.io/en/latest/"><b>Documentation</b></a> •
<a href="#roadmap"><b>Roadmap</b></a> •
<a href="https://community.aimstack.io/"><b>Discord Community</b></a> •
<a href="https://twitter.com/aimstackio"><b>Twitter</b></a>
</p>
<div align="center">
[]()
[](https://pypi.org/project/aim/)
[](https://pypi.org/project/aim/)
[](https://opensource.org/licenses/Apache-2.0)
[](https://pypi.org/project/aim/)
[](http://github.com/aimhubio/aim/issues)
</div>
<div align="center">
<sub>Integrates seamlessly with your favorite tools</sub>
<br/>
<br/>
<img src="https://user-images.githubusercontent.com/13848158/155354389-d0301620-77ea-4629-a743-f7aa249e14b5.png" width="60" />
<img src="https://user-images.githubusercontent.com/13848158/155354496-b39d7b1c-63ef-40f0-9e59-c08d2c5e337c.png" width="60" />
<img src="https://user-images.githubusercontent.com/13848158/155354380-3755c741-6960-42ca-b93e-84a8791f088c.png" width="60" />
<img src="https://user-images.githubusercontent.com/13848158/155354342-7df0ef5e-63d2-4df7-b9f1-d2fc0e95f53f.png" width="60" />
<img src="https://user-images.githubusercontent.com/13848158/155354392-afbff3de-c845-4d86-855d-53df569f91d1.png" width="60" />
<img src="https://user-images.githubusercontent.com/13848158/155354355-89210506-e7e5-4d37-b2d6-ad3fda62ef13.png" width="60" />
<img src="https://user-images.githubusercontent.com/13848158/155354397-8af8e1d3-4067-405e-9d42-1f131663ed22.png" width="60" />
<br/>
<img src="https://user-images.githubusercontent.com/13848158/155354513-f7486146-3891-4f3f-934f-e58bbf9ce695.png" width="60" />
<img src="https://user-images.githubusercontent.com/13848158/155354500-c0471ce6-b2ce-4172-b9e4-07a197256303.png" width="60" />
<img src="https://user-images.githubusercontent.com/13848158/155354361-9f911785-008d-4b75-877e-651e026cf47e.png" width="60" />
<img src="https://user-images.githubusercontent.com/13848158/155354373-1879ae61-b5d1-41f0-a4f1-04b639b6f05e.png" width="60" />
<img src="https://user-images.githubusercontent.com/13848158/155354483-75d9853f-7154-4d95-8190-9ad7a73d6654.png" width="60" />
<img src="https://user-images.githubusercontent.com/13848158/155354329-cf7c3352-a72a-478d-82a7-04e3833b03b7.png" width="60" />
<img src="https://user-images.githubusercontent.com/13848158/155354349-dcdf3bc3-d7a9-4f34-8258-4824a57f59c7.png" width="60" />
<img src="https://user-images.githubusercontent.com/13848158/155354471-518f1814-7a41-4b23-9caf-e516507343f1.png" width="60" />
<img src="https://user-images.githubusercontent.com/48801049/165162736-2cc5da39-38aa-4093-874f-e56d0ba9cea2.png" width="60" />
<img src="https://user-images.githubusercontent.com/48801049/165074282-36ad18eb-1124-434d-8439-728c22cd7ac7.png" width="60" />
</div>
<div align="center">
<br/>
<kbd>
<img width="650px" src="https://user-images.githubusercontent.com/13848158/136374529-af267918-5dc6-4a4e-8ed2-f6333a332f96.gif" />
</kbd>
</div>
# About Aim
| Track and version ML runs | Visualize runs via beautiful UI | Query runs metadata via SDK |
|:--------------------:|:------------------------:|:-------------------:|
| <img width="600px" src="https://user-images.githubusercontent.com/13848158/154337794-e9310239-6614-41b3-a95b-bb91f0bb6c4f.png"> | <img width="600px" src="https://user-images.githubusercontent.com/13848158/154337788-03fe5b31-0fa3-44af-ae79-2861707d8602.png"> | <img width="600px" src="https://user-images.githubusercontent.com/13848158/154337793-85175c78-5659-4dd0-bb2d-05017278e2fa.png"> |
Aim is an open-source, self-hosted ML experiment tracking tool.
It's good at tracking lots (1000s) of training runs and it allows you to compare them with a performant and beautiful UI.
You can use not only the great Aim UI but also its SDK to query your runs' metadata programmatically.
That's especially useful for automations and additional analysis on a Jupyter Notebook.
Aim's mission is to democratize AI dev tools.
# Why use Aim?
### Compare 100s of runs in a few clicks - build models faster
- Compare, group and aggregate 100s of metrics thanks to effective visualizations.
- Analyze, learn correlations and patterns between hparams and metrics.
- Easy pythonic search to query the runs you want to explore.
### Deep dive into details of each run for easy debugging
- Hyperparameters, metrics, images, distributions, audio, text - all available at hand on an intuitive UI to understand the performance of your model.
- Easily track plots built via your favourite visualisation tools, like plotly and matplotlib.
- Analyze system resource usage to effectively utilize computational resources.
### Have all relevant information organised and accessible for easy governance
- Centralized dashboard to holistically view all your runs, their hparams and results.
- Use SDK to query/access all your runs and tracked metadata.
- You own your data - Aim is open source and self hosted.
# Demos
| Machine translation | lightweight-GAN |
|:---:|:---:|
| <a href="http://play.aimstack.io:10001/metrics?grouping=HQGdK9Xxy35e6sY1CYkCmk1WbWMN2AsCNfJJ3d1RJYLtrVPMoF5UpGiA6CF8bEJnfzRsKpqespf3AEuKSVrhUYvYk9MxzNGA9XZWYUf6phEg8AMbZGLRVDXnAPDuo8tueqsST1ZLizWzQwDYJWHUza6pyB2Eojt9uWqNHUdb858TqDRnCJzqiVJXKXEzFWUyvU8MckJo1qpqWWCTb4GpYN6DUJZx2GXDGR21e2xxd4m7PmNUnbA9B3apLttZoipJF6c3v7tNUKmb6irpqnNB3yc57tqYDa1XZuKfDxkMtyFdQ1x95K4jjsTVwhftEWLze35QNcxNXRCGGS9o9yEfTLG26GUX2zjPZFCjjMGU6vV7z1xRccK8MyoGrLSgAQCbvk68dTGBHpXUBvCRq8N&chart=FviZzVrt4fVQPjpCLr9sVGGrcR5etSroyqambiKpm3nTgpyv4eQxKuwNX9uN8UtKmzYUhUyTMBEANHmtbwjLApkvnYeNbxGNC6PVcoqi65m1XJnSrvgt8WiD89BapFAWRUwAGx6SWD7KZPsk3RQyysU7W7FjD3Q99NusxFGhsEfD6HXc7i8xH9KHDRGjLwh6x9VTtSp4FS8HEvpLSiiJoX7LCTi8pB7dXvrQ8G5w3jPsFz4qXYFdsVaCNL1BpFFZuiqQNkfbnM84gEq7UmiV1VzM4oS3AgQHxADG3kpBVp6eKTey9F1Swd4FcUkFA9QEPjgQgqwRGjkquZ2bdDDVLBnCh7JPvboP2kifCiZZ5MDdV9MMx6PKHp4DusWyWLXiHQYPkpGPWBiuccMUXDsuJaCWJbuABdY7CyiJMv1jdHYkjabygSxehPVyEDefWAtjBfv2vaeM1xv63jadbmpKYFxft7qmuT9HvVxiGvRgs4RQFxy8K4rtFBca3HNs1mDaaY81gy9MGXyw7BS5Fniu92jaJpsWDdg6Y3AQBLZtrpJy2obEZ4yzJaCVT7JUNPAyyCUNLck393VFLoEkaD9CU5npK5R7tj1c1G3gkMNQXnSXy5NpSj8deMmXV5qz3JKu1nq2caGQKcqjzy2gLkExdm674AMFjSg9yFjK6VqASXQ17NKtWRUvaYoxGbHDAFQaMKWKh8QLm22QA9mKT8NksLptWozbgDvafnQLNMvezLU5bvKV5o75PAWYiRB56RcYfEhzaB6YWdgL7TJicyY5rFi6Az8UZ7wqB3N5iMuZdpxhKn5KbZDxyuUMuvVt24i5LVPPmmwQtqxMoJ4aLo48a2YvDW6TAkdQjNjvn6KcEEz6GTixujb1YHhMUD8v4AepWKEwKz1ddEca1P2wLQjbpihCuaqbxeohnuZZLogJdUBojBEDgrnrrVpPBaLLEkGSpkJbtrsKUuEeBo1AF3yNgHftLbynGpobVF5DhmsmddmiA6c8vSTokJxHhjpnW8mAcNHBRtmVJCT7VkdHSAhNypM4Hivwfx5jCccG9LauKmCeRMDzHiA57TX9W6ttcPHSvUyQorARQAd2oeNY4H83hZjHh9Bt8iwKZRt4xK6hrTR8tif7hq8eURXrGH9Ys7TzykXK8FHHWvLNzNnYf3E4a9NkD43MjfKvMM1hj4Q2K8MHbmRCqrmFrHP5kim9shq6mhLPTgwha32nvnrBkfPQVPwpGTzKuwE&select=CdsQ7jVNkogQhRzQR3e28Ek39AZ4Ma2y37k5zJaf9EZmQhMjy8GtGm4LGU6dRFuAVG7mYww5xDrQAE74KHQ3Kk1e6661RmcmNALAUjtHyCmrTVBMCnBGNiuq1y7EzmxoodYHU1BV1rnoefQAw2kTBtbWi11hV1P4LcwFCcXfUWF6rpRC7ehEnUCTqUV4bkGVJPLcmk9mdmiGwa2YgmnSShNGPVGZiEi1rMVECyngSRVdqdZwAeXBGWFLfqF1KbZeCo4MTF4SSmFupJ9zLhYbuojEbopyFWHQ6xs3sq9epPeaQziLM4Js7oFYRmuFWUYdFqnZngmewXWmi7tQAgVqhiT6dMjG2eTdfgX6WuRSuoHALkh2XJhHA6GfZLUcxC5Ni9YyKuBTamtaYarbNNJJ8z15WWvuUkLpjgHdEpE2h924xFdu8aoZNuiQxYGvcndaW1BTGMXS5fTKPqYfe2n8Ky2HWPkcX3hEXtyawu1F9BndKNaXLPgsdAoFBArBZnSe28YtSmTa5LRucKVBAxakvv5MWMXchAmpaGFQbZyYUoMgQLcJd7Y96x6zSR7nhwr5Ar81BrmqYz2WFLuk7osUbwsc9HbSG6CQt8p6Vg2u7DjKaZXW8pjkPHAKrHWtHEDiJPJ5rj6VsdFm3"> <img width="800px" src="https://user-images.githubusercontent.com/13848158/154340796-c9e91b13-8ee0-4a67-bcde-8cf3aaa7ba99.jpg"> </a> | <a href="http://play.aimstack.io:10002/images?grouping=E1zQzcmtDR3wibEa1MVysTvCyZEv1T8ixkCxTWExCyMnHtX2HyiF9eszvPgfd2xdJ5TUTKGpSs1bsLVq5tHAV3uWtsZmmckn6HjNtVCMyQDJpwhiEy5tAyw&select=2NEXuD7fFoaLcwRjymjA1wLmUrGs9s3AiXcCW82C367SwJt18CAB6xzkMGowrUDuDwggE1huaPVcQJpQUsmAQx1CnGiqCUBp2jPMd5mMNPX2QKQMcmvu9ZykBNkeBvCQFPd9ERuQD2g1EjWuvyJ3H53mAZTfp94LCXvR9CUsG5ei2CjQUzfZLM6DCyUr1GPaEVnY5f1EwzicNxXuoutkBgqCqaobJ7Do4q4eHAA6ooiWU6ekS3D2sLj6qYwhVTjfGCPfbWwBiH83nFkY3fLExzdeTY2zeUHeeYikQR9S7xHbVD8WvjekdQVp8X4dNLJZxiVmEqHpPRnU3ZrYsMhE7yFAAgjJwPNUzLTt6YFrtZBcmc4rwAC2oyrqysUSEr6gzL6LcJ6yuqDGf9D5tzftHbTLDkhc8B2sCgTS&images=9vt2MvuQj2Q7jxGQYhNH6ZnWw4CsEzubFcFotuqCHfzvuruDs6pyWfhqhinD4hCiYsAURXgJbmq2L5z4vEQMbrE7iTy8XHNndPBPyuCEvRpxGwwFkukX3YGkVhNDQmUPtBagKbsMAgUASJM8hFtKboqbu9KWTModsjd4Qag7aL1KbJCzBYmZLCpKMSf6eKUTQtfwLLWbgquEx6oahAoSujV6aZ5cjsjN4JdGtPbicySpccgLDQHaQYTHCseA6sPVaEwCsoQDJAcTnjEVFFUUUW5HbPkrNgeRKb8M9pxudrweRQ3gNukLx5yizxQKrmcKU7saxLraqYUA2y5LmEQohsWGUq8sKkvGDH6oNLx2ytJsdVM5PGieENXMAaPg3KuWYXXTwixzwscdDsHSWeiXTGj1QxUKiBCnfwkZ7pZbYMCSgczSn9WpwygrKhb2znSYhn4gFzCsdjiXPPDv9LpPzkFVbsMCvk1CadqpwxTfxNmteKm7CQVViyCrvheGAk5rKpPzaBc5agyvfKpUqgRarxojnG8a4s1Y7qFT1rNVSC13C9h5fG54dDoFHxDyvej3bVTMDYsAiie3eVA3yEskyBGwApPNtjLY2H4b9jTmR3V7jnA9moFGfwMiXUjt8eoJsWTNkqBdRGSnqdva8zi5bApQaggnLebgCRpK1g8VvPrVS3ABQC8aMZJ2vibebHePWs1ahWZ2AXUUYwcuSRkiUWHwgtG9U1x6rR41UxFFNvW9rpDsU99DWzYpdgxfU75wTEPb2qeXYPxV1zVt5ixcFfA3Lvtsp5XXyfHY9FaNFeKKzAUQXPAkMWG4yH4Tp5me8Nt4puBC4pvJrboVcQdSsYhtxj2YwUjzN7Jyn9BV28dtRFPdtFUUc9pKpLvhZAD6XPDtKqrN3pG3LwYTKAiMDtC6tHvDqhQGuJGQZH5cVyTKkT48Xup4znass8tJxUJwacVQa6x2ewyd8AXCfc4j9bPQssabADmc1ho5Eghn5qe82cEcyG1okdfBCRMfmZ5EeCeKQYmoXddxM2cAwfJzCzG9bGtaMvXk3VV8TrSiRKjg3Exbftv8gx12QAzoBP9zosuULFpEAPZF1TvHJbEUmYgu9gwuRTAS3qYiywB7dsCq8wsTr7qmwt8WFFucpte8WvrkRGYy1GA7bD6uPhvS6sr1Wv259oB7Tkr5kirMo6Vdkz8ex9zVd4h2AP1J1dy8cqXaSk5B3HTZ6n1qdAMt4faLtt8SNqg4EqcvXx6r2J1czzXAPa9oSseYifvedcMyxnWkcTvno4QA6sp6zH25ubEwPAVzZZk35nNoJPasH3PgEgLafGPLCsPDD2sku5djPjfqkbDLUWMYm7BbTr7xK8v4UoTS485rPiF6VKoNQSuEnKQMT3uNRTS4EXNMjyRfUs4gk1217EhGVLhfqiZQyG4gqEhcJE3phLydLskk36PyGEbyFyvigjwvrK6boJnFpesze6Czc13HdWbWp6LHLseYujigdmdktU6EQb5KmghstmJ9gUF14JVPjYP57xtv19UT8XDuaJfwJn9z3U17ZDFnQ5zbXKSwD9ikMEd6VFo1xLBRHSmRdFSqcC96s23qWmMhheGtv6tTQAkq7CB1J1gy3skuFJXqhs1RvFWbFFUCLmHeTCtskEsQVP5Rkzat5Jn3QtSqCiRpEGc9Ykd5bWFAaqoudGcqEt993tVfVS3ZrVKAa6NDmbtAcdnfsUZxDt2muRPJDNVCBNW5k8XvevMpMsL3uCETtdutufp1VyLur2Yyx5WA8AeeFeDBxRxad3ZHbH27XdMpxWHF26hnbQAewspG1weRpVW9Ebc4Lc53RBeu8gVmTbKydrri1FHaYySZqCxht8bN4kdqSmkymmcTN3cfRN9DmzcmfKG6GbTDeCA9oXz5cVqrGXZcAiaj1oinnByW7W8GwhtK1Tzd7LG74Nu35DUdPCJXMH2ug4SEa3yXERXCaLvAHvFZAS89e7RUPpr3nTTrQLurjHSdkJ39pwEJpDcDjeWHsJSmTG1x195e6xvMmgPxAZd3Lzyk8Cxme8p1cY7FehSbTPc3zAAwi9LDGYyoQRcdbRHPLJ2W8rt9KeNfNq9moa1RVFPCPvhGuuyycT4f4QkP4Nvy4iUCaB5d8B1hcgmtg2X9Zpg6GUR32RYneQigK6S9ZYPNnaFeCNZZrwaYjkDpKMTMB6N24JC1TEAH8en3kXzf8CpLWeJpxoyB3hcCxjFHLYaovzgfGPeFBPY6ADDUcT3xkpUUEybdxE1cX7drHvBwyGqeU5g7i424tydxqufUgPY5sF9bM6mdoA3AvqDD9B3Zai71irxYXX8e6rRck4RwptJgBMX2gbotizoz9LrUwFQ2naBfJvbfEhZNCzME8a7H2YiVcq4Z6pkfbT1uMLfaixfw8nQCzVRbJAyVZgGzVbBj242LpD48R6VmxGcU5t2XkN8hZyYdBk1Uds9QyUG9VpC8ka7HjkvxBMknk6v4BjMnHnAj4ZxDUxMWEDbWw6iWD3iYWzVn3n5dzRcAqCQv3m2ZUnwuHHCTVJVZKZVyxrFP5eznpNv87RUXMfjbXypoLJFVtMoq81y82hYRFSkbAUwzhhoXBAGeBGDmDcwky2Hf7ZmfkzDLnRke916VxhTRLr8c6nXokCn8xwweuJHFeBqx7D88gpRbn5RrnH33545zyzyNpZpabQUGY3L7G3QznVw6wCS9x7FMixW2mgCeeWFhPDiz5Kz6DyyjaT413VSoRBCRakNcitYHUXqqCUPsFmZ3LTedA8jN99fYzse5LX36TSVbjnM7XmiZ8vNoH5mUsawmvG7NXbhgoyhx4rzL7t57A4g7sQg4YhGAFzEbXrh416riiPH8r52on2VEqkjNPDnybSg3cwuR6rPfMWA7YoyEAp14aStUPaKqbM9omConMxZde5o2DpjS86G5vDBY1o7F4LnBHLHRxKfqAkTPjvEdhaYY2uY6i598po9b2fAtpUGCbXnzcNrV5Vei5WkiQAqRT6whGr29PTLsAVGed71drx7BqzNiDcFJBL9dVrVoPqYLvrYVGi89MuuWuirD7CRhXWahysjrNpFf4aHXmuXS3UD7SFgkqAZzL1hrVq77K8UhGMMWLUzE9gjP6PH4xL6fJetKaRGZNpbsqDoKuBkBAk9j1nGpYMAyuo2H2AWUyj8PUgAbi1e4KPeqNqMVT85oZ9jkCggYczgNhT8gw5QsMarouMctMdbokxRfxz2xt9r2DuNmbEmq9e13Tqv94VrzR91R2o7pvH7YUFtJvcoJwR8K5jyof5SfKHT53zaBKxkLfCpPP3qR9ZCbAzVbreFKsQnCcZpd643VA9wtgKXxc375NwKj4QbnvafKNU9qc455d3S3o57mU4DFA7yHSqY1q41zySxfXYx4txL4TiqeyyTQu7KcHYbTUYRs69pkE1rWRW84N1qmisw2o7iLQPrhWkixrRDRk5toYWQg6ZDZExCyedYBGjsUAut"> <img width="800px" src="https://user-images.githubusercontent.com/13848158/154340790-bc7b7a21-e8a1-43a1-809d-4060b5bfb60f.jpg"> </a> |
| Training logs of a neural translation model(from WMT'19 competition). | Training logs of 'lightweight' GAN, proposed in ICLR 2021. |
| FastSpeech 2 | Simple MNIST |
|:---:|:---:|
| <a href="http://play.aimstack.io:10004/runs/d9e89aa7875e44b2ba85612a/audios"> <img width="800px" src="https://user-images.githubusercontent.com/13848158/154340778-dbe19620-2f27-4298-b0cb-caf3904760f1.jpg"> </a> | <a href="http://play.aimstack.io:10003/runs/7f083da898624a2c98e0f363/distributions"> <img width="800px" src="https://user-images.githubusercontent.com/13848158/154340785-a7e4d9fd-d048-4207-8cd1-c4edff9cca6a.jpg"> </a> |
| Training logs of Microsoft's "FastSpeech 2: Fast and High-Quality End-to-End Text to Speech". | Simple MNIST training logs. |
# Quick Start
Follow the steps below to get started with Aim.
**1. Install Aim on your training environment**
```shell
pip3 install aim
```
**2. Integrate Aim with your code**
```python
from aim import Run
# Initialize a new run
run = Run()
# Log run parameters
run["hparams"] = {
"learning_rate": 0.001,
"batch_size": 32,
}
# Log metrics
for i in range(10):
run.track(i, name='loss', step=i, context={ "subset":"train" })
run.track(i, name='acc', step=i, context={ "subset":"train" })
```
_See the full list of supported trackable objects(e.g. images, text, etc) [here](https://aimstack.readthedocs.io/en/latest/quick_start/supported_types.html)._
**3. Run the training as usual and start Aim UI**
```shell
aim up
```
**4. Or query runs programmatically via SDK**
```python
from aim import Repo
my_repo = Repo('/path/to/aim/repo')
query = "metric.name == 'loss'" # Example query
# Get collection of metrics
for run_metrics_collection in my_repo.query_metrics(query).iter_runs():
for metric in run_metrics_collection:
# Get run params
params = metric.run[...]
# Get metric values
steps, metric_values = metric.values.sparse_numpy()
```
# Integrations
<details>
<summary>
Integrate PyTorch Lightning
</summary>
```python
from aim.pytorch_lightning import AimLogger
# ...
trainer = pl.Trainer(logger=AimLogger(experiment='experiment_name'))
# ...
```
_See documentation [here](https://aimstack.readthedocs.io/en/latest/quick_start/integrations.html#integration-with-pytorch-lightning)._
</details>
<details>
<summary>
Integrate Hugging Face
</summary>
```python
from aim.hugging_face import AimCallback
# ...
aim_callback = AimCallback(repo='/path/to/logs/dir', experiment='mnli')
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
callbacks=[aim_callback],
# ...
)
# ...
```
_See documentation [here](https://aimstack.readthedocs.io/en/latest/quick_start/integrations.html#integration-with-hugging-face)._
</details>
<details>
<summary>
Integrate Keras & tf.keras
</summary>
```python
import aim
# ...
model.fit(x_train, y_train, epochs=epochs, callbacks=[
aim.keras.AimCallback(repo='/path/to/logs/dir', experiment='experiment_name')
# Use aim.tensorflow.AimCallback in case of tf.keras
aim.tensorflow.AimCallback(repo='/path/to/logs/dir', experiment='experiment_name')
])
# ...
```
_See documentation [here](https://aimstack.readthedocs.io/en/latest/quick_start/integrations.html#integration-with-keras-tf-keras)._
</details>
<details>
<summary>
Integrate KerasTuner
</summary>
```python
from aim.keras_tuner import AimCallback
# ...
tuner.search(
train_ds,
validation_data=test_ds,
callbacks=[AimCallback(tuner=tuner, repo='.', experiment='keras_tuner_test')],
)
# ...
```
_See documentation [here](https://aimstack.readthedocs.io/en/latest/quick_start/integrations.html#integration-with-kerastuner)._
</details>
<details>
<summary>
Integrate XGBoost
</summary>
```python
from aim.xgboost import AimCallback
# ...
aim_callback = AimCallback(repo='/path/to/logs/dir', experiment='experiment_name')
bst = xgb.train(param, xg_train, num_round, watchlist, callbacks=[aim_callback])
# ...
```
_See documentation [here](https://aimstack.readthedocs.io/en/latest/quick_start/integrations.html#integration-with-xgboost)._
</details>
<details>
<summary>
Integrate CatBoost
</summary>
```python
from aim.catboost import AimLogger
# ...
model.fit(train_data, train_labels, log_cout=AimLogger(loss_function='Logloss'), logging_level="Info")
# ...
```
_See documentation [here](https://aimstack.readthedocs.io/en/latest/quick_start/integrations.html#integration-with-catboost)._
</details>
<details>
<summary>
Integrate fastai
</summary>
```python
from aim.fastai import AimCallback
# ...
learn = cnn_learner(dls, resnet18, pretrained=True,
loss_func=CrossEntropyLossFlat(),
metrics=accuracy, model_dir="/tmp/model/",
cbs=AimCallback(repo='.', experiment='fastai_test'))
# ...
```
_See documentation [here](https://aimstack.readthedocs.io/en/latest/quick_start/integrations.html#integration-with-fastai)._
</details>
<details>
<summary>
Integrate LightGBM
</summary>
```python
from aim.lightgbm import AimCallback
# ...
aim_callback = AimCallback(experiment='lgb_test')
aim_callback.experiment['hparams'] = params
gbm = lgb.train(params,
lgb_train,
num_boost_round=20,
valid_sets=lgb_eval,
callbacks=[aim_callback, lgb.early_stopping(stopping_rounds=5)])
# ...
```
_See documentation [here](https://aimstack.readthedocs.io/en/latest/quick_start/integrations.html#integration-with-lightgbm)._
</details>
<details>
<summary>
Integrate PyTorch Ignite
</summary>
```python
from aim.pytorch_ignite import AimLogger
# ...
aim_logger = AimLogger()
aim_logger.log_params({
"model": model.__class__.__name__,
"pytorch_version": str(torch.__version__),
"ignite_version": str(ignite.__version__),
})
aim_logger.attach_output_handler(
trainer,
event_name=Events.ITERATION_COMPLETED,
tag="train",
output_transform=lambda loss: {'loss': loss}
)
# ...
```
_See documentation [here](https://aimstack.readthedocs.io/en/latest/quick_start/integrations.html#integration-with-pytorch-ignite)._
</details>
# Comparisons to familiar tools
### Tensorboard
**Training run comparison**
Order of magnitude faster training run comparison with Aim
- The tracked params are first class citizens at Aim. You can search, group, aggregate via params - deeply explore all the tracked data (metrics, params, images) on the UI.
- With tensorboard the users are forced to record those parameters in the training run name to be able to search and compare. This causes a super-tedius comparison experience and usability issues on the UI when there are many experiments and params. **TensorBoard doesn't have features to group, aggregate the metrics**
**Scalability**
- Aim is built to handle 1000s of training runs - both on the backend and on the UI.
- TensorBoard becomes really slow and hard to use when a few hundred training runs are queried / compared.
**Beloved TB visualizations to be added on Aim**
- Embedding projector.
- Neural network visualization.
### MLFlow
MLFlow is an end-to-end ML Lifecycle tool.
Aim is focused on training tracking.
The main differences of Aim and MLflow are around the UI scalability and run comparison features.
**Run comparison**
- Aim treats tracked parameters as first-class citizens. Users can query runs, metrics, images and filter using the params.
- MLFlow does have a search by tracked config, but there are no grouping, aggregation, subplotting by hyparparams and other comparison features available.
**UI Scalability**
- Aim UI can handle several thousands of metrics at the same time smoothly with 1000s of steps. It may get shaky when you explore 1000s of metrics with 10000s of steps each. But we are constantly optimizing!
- MLflow UI becomes slow to use when there are a few hundreds of runs.
### Weights and Biases
Hosted vs self-hosted
- Weights and Biases is a hosted closed-source MLOps platform.
- Aim is self-hosted, free and open-source experiment tracking tool.
# Roadmap
## Detailed Sprints
:sparkle: The [Aim product roadmap](https://github.com/orgs/aimhubio/projects/3)
- The `Backlog` contains the issues we are going to choose from and prioritize weekly
- The issues are mainly prioritized by the highly-requested features
## High-level roadmap
The high-level features we are going to work on the next few months
### Done
- [x] Live updates (Shipped: _Oct 18 2021_)
- [x] Images tracking and visualization (Start: _Oct 18 2021_, Shipped: _Nov 19 2021_)
- [x] Distributions tracking and visualization (Start: _Nov 10 2021_, Shipped: _Dec 3 2021_)
- [x] Jupyter integration (Start: _Nov 18 2021_, Shipped: _Dec 3 2021_)
- [x] Audio tracking and visualization (Start: _Dec 6 2021_, Shipped: _Dec 17 2021_)
- [x] Transcripts tracking and visualization (Start: _Dec 6 2021_, Shipped: _Dec 17 2021_)
- [x] Plotly integration (Start: _Dec 1 2021_, Shipped: _Dec 17 2021_)
- [x] Colab integration (Start: _Nov 18 2021_, Shipped: _Dec 17 2021_)
- [x] Centralized tracking server (Start: _Oct 18 2021_, Shipped: _Jan 22 2022_)
- [x] Tensorboard adaptor - visualize TensorBoard logs with Aim (Start: _Dec 17 2021_, Shipped: _Feb 3 2022_)
- [x] Track git info, env vars, CLI arguments, dependencies (Start: _Jan 17 2022_, Shipped: _Feb 3 2022_)
- [x] MLFlow adaptor (visualize MLflow logs with Aim) (Start: _Feb 14 2022_, Shipped: _Feb 22 2022_)
- [x] Activeloop Hub integration (Start: _Feb 14 2022_, Shipped: _Feb 22 2022_)
- [x] PyTorch-Ignite integration (Start: _Feb 14 2022_, Shipped: _Feb 22 2022_)
- [x] Run summary and overview info(system params, CLI args, git info, ...) (Start: _Feb 14 2022_, Shipped: _Mar 9 2022_)
- [x] Add DVC related metadata into aim run (Start: _Mar 7 2022_, Shipped: _Mar 26 2022_)
- [x] Ability to attach notes to Run from UI (Start: _Mar 7 2022_, Shipped: _Apr 29 2022_)
- [x] Fairseq integration (Start: _Mar 27 2022_, Shipped: _Mar 29 2022_)
- [x] LightGBM integration (Start: _Apr 14 2022_, Shipped: _May 17 2022_)
- [x] CatBoost integration (Start: _Apr 20 2022_, Shipped: _May 17 2022_)
- [x] Run execution details(display stdout/stderr logs) (Start: _Apr 25 2022_, Shipped: _May 17 2022_)
- [x] Long sequences(up to 5M of steps) support (Start: _Apr 25 2022_, Shipped: _Jun 22 2022_)
- [x] Figures Explorer (Start: _Mar 1 2022_, Shipped: _Aug 21 2022_)
- [x] Notify on stuck runs (Start: _Jul 22 2022_, Shipped: _Aug 21 2022_)
- [x] Integration with KerasTuner (Start: _Aug 10 2022_, Shipped: _Aug 21 2022_)
- [x] Integration with WandB (Start: _Aug 15 2022_, Shipped: _Aug 21 2022_)
- [x] Stable remote tracking server (Start: _Jun 15 2022_, Shipped: _Aug 21 2022_)
- [x] Integration with fast.ai (Start: _Aug 22 2022_, Shipped: _Oct 6 2022_)
- [x] Integration with MXNet (Start: _Sep 20 2022_, Shipped: _Oct 6 2022_)
- [x] Project overview page (Start: _Sep 1 2022_, Shipped: _Oct 6 2022_)
### In Progress
- [ ] Remote tracking server scaling (Start: _Sep 1 2022_)
- [ ] Aim SDK low-level interface (Start: _Aug 22 2022_)
### To Do
**Aim UI**
- Runs management
- Runs explorer – query and visualize runs data(images, audio, distributions, ...) in a central dashboard
- Explorers
- Audio Explorer
- Text Explorer
- Distributions Explorer
- Dashboards – customizable layouts with embedded explorers
**SDK and Storage**
- Scalability
- Smooth UI and SDK experience with over 10.000 runs
- Runs management
- CLI interfaces
- Reporting - runs summary and run details in a CLI compatible format
- Manipulations – copy, move, delete runs, params and sequences
**Integrations**
- ML Frameworks:
- Shortlist: MONAI, SpaCy, Raytune, PaddlePaddle
- Datasets versioning tools
- Shortlist: HuggingFace Datasets
- Resource management tools
- Shortlist: Kubeflow, Slurm
- Workflow orchestration tools
- Others: Hydra, Google MLMD, Streamlit, ...
### On hold
- scikit-learn integration
- Cloud storage support – store runs blob(e.g. images) data on the cloud (Start: _Mar 21 2022_)
- Artifact storage – store files, model checkpoints, and beyond (Start: _Mar 21 2022_)
## Community
### If you have questions
1. [Read the docs](https://aimstack.readthedocs.io/en/latest/)
2. [Open a feature request or report a bug](https://github.com/aimhubio/aim/issues)
3. [Join Discord community server](https://community.aimstack.io/)
|
PypiClean
|
/django-allauth-james-0.20.0.tar.gz/django-allauth-james-0.20.0/allauth/socialaccount/providers/twitter/south_migrations/0003_tosocialaccount.py
|
from south.v2 import DataMigration
class Migration(DataMigration):
depends_on = (('socialaccount', '0002_genericmodels'),)
def forwards(self, orm):
# Migrate apps
app_id_to_sapp = {}
for app in orm.TwitterApp.objects.all():
sapp = orm['socialaccount.SocialApp'].objects \
.create(site=app.site,
provider='twitter',
name=app.name,
key=app.consumer_key,
secret=app.consumer_secret)
app_id_to_sapp[app.id] = sapp
# Migrate accounts
acc_id_to_sacc = {}
for acc in orm.TwitterAccount.objects.all():
sacc = acc.socialaccount_ptr
sacc.uid = str(acc.social_id)
sacc.extra_data = { 'screen_name': acc.username,
'profile_image_url': acc.profile_image_url }
sacc.provider = 'twitter'
sacc.save()
acc_id_to_sacc[acc.id] = sacc
def backwards(self, orm):
"Write your backwards methods here."
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'socialaccount.socialaccount': {
'Meta': {'object_name': 'SocialAccount'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'extra_data': ('allauth.socialaccount.fields.JSONField', [], {'default': "'{}'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'uid': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'socialaccount.socialapp': {
'Meta': {'object_name': 'SocialApp'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'secret': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"})
},
'socialaccount.socialtoken': {
'Meta': {'unique_together': "(('app', 'account'),)", 'object_name': 'SocialToken'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['socialaccount.SocialAccount']"}),
'app': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['socialaccount.SocialApp']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'token_secret': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'})
},
'twitter.twitteraccount': {
'Meta': {'object_name': 'TwitterAccount', '_ormbases': ['socialaccount.SocialAccount']},
'profile_image_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'social_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'}),
'socialaccount_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['socialaccount.SocialAccount']", 'unique': 'True', 'primary_key': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '15'})
},
'twitter.twitterapp': {
'Meta': {'object_name': 'TwitterApp'},
'access_token_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'authorize_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'consumer_key': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'consumer_secret': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'request_token_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"})
}
}
complete_apps = ['socialaccount', 'twitter']
|
PypiClean
|
/fds.sdk.QuotesAPIforDigitalPortals-0.11.1-py3-none-any.whl/fds/sdk/QuotesAPIforDigitalPortals/model/inline_response20046.py
|
import re # noqa: F401
import sys # noqa: F401
from fds.sdk.QuotesAPIforDigitalPortals.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from fds.sdk.QuotesAPIforDigitalPortals.exceptions import ApiAttributeError
def lazy_import():
from fds.sdk.QuotesAPIforDigitalPortals.model.inline_response20046_data import InlineResponse20046Data
from fds.sdk.QuotesAPIforDigitalPortals.model.inline_response200_meta import InlineResponse200Meta
globals()['InlineResponse20046Data'] = InlineResponse20046Data
globals()['InlineResponse200Meta'] = InlineResponse200Meta
class InlineResponse20046(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'data': ([InlineResponse20046Data],), # noqa: E501
'meta': (InlineResponse200Meta,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'data': 'data', # noqa: E501
'meta': 'meta', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""InlineResponse20046 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
data ([InlineResponse20046Data]): List of benchmarks.. [optional] # noqa: E501
meta (InlineResponse200Meta): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""InlineResponse20046 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
data ([InlineResponse20046Data]): List of benchmarks.. [optional] # noqa: E501
meta (InlineResponse200Meta): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
|
PypiClean
|
/mxnet_cu112-2.0.0b1-py3-none-manylinux2014_x86_64.whl/mxnet/numpy/utils.py
|
import numpy as onp
__all__ = ['float16', 'float32', 'float64', 'uint8', 'int32', 'int8', 'int64',
'int16', 'uint16', 'uint32', 'uint64',
'bool', 'bool_', 'pi', 'inf', 'nan', 'PZERO', 'NZERO', 'newaxis',
'e', 'NINF', 'PINF', 'NAN', 'NaN',
'_STR_2_DTYPE_', '_DTYPE_2_STR_', '_type_promotion_table',
'integer_dtypes', 'floating_dtypes', 'boolean_dtypes', 'numeric_dtypes']
py_bool = bool
float16 = onp.dtype(onp.float16)
float32 = onp.dtype(onp.float32)
float64 = onp.dtype(onp.float64)
uint8 = onp.dtype(onp.uint8)
int32 = onp.dtype(onp.int32)
int8 = onp.dtype(onp.int8)
int64 = onp.dtype(onp.int64)
bool_ = onp.dtype(onp.bool_)
bool = onp.dtype(onp.bool)
int16 = onp.dtype(onp.int16)
uint16 = onp.dtype(onp.uint16)
uint32 = onp.dtype(onp.uint32)
uint64 = onp.dtype(onp.uint64)
pi = onp.pi
inf = onp.inf
nan = onp.nan
PZERO = onp.PZERO
NZERO = onp.NZERO
NINF = onp.NINF
PINF = onp.PINF
e = onp.e
NAN = onp.NAN
NaN = onp.NaN
newaxis = None
_STR_2_DTYPE_ = {'float16': float16, 'float32': float32, 'float64': float64, 'float': float64,
'int8': int8, 'int16': int16, 'int32': int32, 'int64': int64, 'int': int64,
'uint8': uint8, 'uint16': uint16, 'uint32': uint32, 'uint64': uint64,
'bool': bool, 'bool_': bool_, 'None': None}
_DTYPE_2_STR_ = {float16: 'float16', float32: 'float32', float64: 'float64', float: 'float64',
int8: 'int8', int16: 'int16', int32: 'int32', int64: 'int64', int:'int64',
uint8: 'uint8', uint16: 'uint16', uint32: 'uint32', uint64: 'uint64',
bool: 'bool', bool_: 'bool_', py_bool: 'bool', None: 'None'}
_ONP_OP_MODULES = [onp, onp.linalg, onp.random, onp.fft]
def _get_np_op(name):
"""Get official NumPy operator with `name`. If not found, raise ValueError."""
for mod in _ONP_OP_MODULES:
op = getattr(mod, name, None)
if op is not None:
return op
raise ValueError('Operator `{}` is not supported by `mxnet.numpy`.'.format(name))
_type_promotion_table = {
# signed integer type promotion
(int8, int8): int8,
(int8, int16): int16,
(int8, int32): int32,
(int8, int64): int64,
(int16, int16): int16,
(int16, int32): int32,
(int16, int64): int64,
(int32, int32): int32,
(int32, int64): int64,
(int64, int64): int64,
# unsigned integer type promotion
(uint8, uint8): uint8,
(uint8, uint16): uint16,
(uint8, uint32): uint32,
(uint8, uint64): uint64,
(uint16, uint16): uint16,
(uint16, uint32): uint32,
(uint16, uint64): uint64,
(uint32, uint32): uint32,
(uint32, uint64): uint64,
(uint64, uint64): uint64,
# mixed signed and unsigned integer type promotion
(int8, uint8): int16,
(int8, uint16): int32,
(int8, uint32): int64,
(int16, uint8): int16,
(int16, uint16): int32,
(int16, uint32): int64,
(int32, uint8): int32,
(int32, uint16): int32,
(int32, uint32): int64,
(int64, uint8): int64,
(int64, uint16): int64,
(int64, uint32): int64,
# float type promotion
(float16, float16): float16,
(float16, float32): float32,
(float16, float64): float64,
(float32, float32): float32,
(float32, float64): float64,
(float64, float64): float64,
# bool type promotion
(bool, bool): bool,
# mixed integer and float16 type promotion
(int8, float16): float16,
(int16, float16): float16,
(int32, float16): float16,
(int64, float16): float16,
(uint8, float16): float16,
(uint16, float16): float16,
(uint32, float16): float16,
(uint64, float16): float16,
# mixed integer and float16 type promotion
(int8, float32): float32,
(int16, float32): float32,
(int32, float32): float32,
(int64, float32): float32,
(uint8, float32): float32,
(uint16, float32): float32,
(uint32, float32): float32,
(uint64, float32): float32,
# mixed integer and float32 type promotion
(int8, float32): float32,
(int16, float32): float32,
(int32, float32): float32,
(int64, float32): float32,
(uint8, float32): float32,
(uint16, float32): float32,
(uint32, float32): float32,
(uint64, float32): float32,
# mixed integer and float64 type promotion
(int8, float64): float64,
(int16, float64): float64,
(int32, float64): float64,
(int64, float64): float64,
(uint8, float64): float64,
(uint16, float64): float64,
(uint32, float64): float64,
(uint64, float64): float64,
# mixed bool and other type promotion
(bool, int8): int8,
(bool, int16): int16,
(bool, int32): int32,
(bool, int64): int64,
(bool, uint8): uint8,
(bool, uint16): uint16,
(bool, uint32): uint32,
(bool, uint64): uint64,
(bool, float16): float16,
(bool, float32): float32,
(bool, float64): float64,
}
integer_dtypes = [
int8,
int16,
int32,
int64,
uint8,
uint16,
uint32,
uint64,
]
floating_dtypes = [
float16,
float32,
float64,
]
numeric_dtypes = [
*integer_dtypes,
*floating_dtypes,
]
boolean_dtypes = [
bool_,
]
|
PypiClean
|
/scikit-optimize-w-2020.1.2.tar.gz/scikit-optimize-w-2020.1.2/doc/whats_new/v0.7.rst
|
.. include:: _contributors.rst
.. currentmodule:: skopt
.. _changes_0_7_2:
Version 0.7.2
=============
**February 2020**
:mod:`skopt.optimizer`
----------------------
- |Feature| update_next() and get_results() added to Optimize and
add more examples
:pr:`837` by :user:`Holger Nahrstaedt <holgern>` and
:user:`Sigurd Carlsen <sigurdcarlsen>`
- |Fix| Fix random forest regressor (Add missing min_impurity_decrease)
:pr:`829` by :user:`Holger Nahrstaedt <holgern>`
:mod:`skopt.utils`
------------------
- |Enhancement| Add expected_minimum_random_sampling
:pr:`830` by :user:`Holger Nahrstaedt <holgern>`
- |FIX| Return ordereddict in point_asdict and add some more unit tests.
:pr:`840` by :user:`Holger Nahrstaedt <holgern>`
- |Enhancement| Added `check_list_types` and `check_dimension_names`
:pr:`803` by :user:`Hvass-Labs <Hvass-Labs>` and
:user:`Holger Nahrstaedt <holgern>`
:mod:`skopt.plots`
------------------
- |Enhancement| Add more parameter to plot_objective and more plot examples
:pr:`830` by :user:`Holger Nahrstaedt <holgern>` and
:user:`Sigurd Carlsen <sigurdcarlsen>`
:mod:`skopt.searchcv`
---------------------
- |Fix| Fix searchcv rank (issue #831)
:pr:`832` by :user:`Holger Nahrstaedt <holgern>`
:mod:`skopt.space`
------------------
* |Fix| Fix integer normalize by using round()
:pr:`830` by :user:`Holger Nahrstaedt <holgern>`
Miscellaneous
-------------
* |Fix| Fix doc examples
* |Fix| Fix license detection in github
:pr:`827` by :user:`Holger Nahrstaedt <holgern>`
* |Enhancement| Add doctest to CI
.. _changes_0_7_1:
Version 0.7.1
=============
**February 2020**
:mod:`skopt.space`
------------------
* |Fix| Fix categorical space (issue #821)
:pr:`823` by :user:`Holger Nahrstaedt <holgern>`
* |Enhancement| int can be set as dtype to fix issue #790
:pr:`807` by :user:`Holger Nahrstaedt <holgern>`
* |Feature| New StringEncoder, can be used in Categoricals
* Remove string conversion in Identity
* |Enhancement| dtype can be set in Integer and Real
Miscellaneous
-------------
- Sphinx documentation
:pr:`809` by :user:`Holger Nahrstaedt <holgern>`
- notebooks are replaced by sphinx-gallery
:pr:`811` by :user:`Holger Nahrstaedt <holgern>`
- Improve sphinx doc
:pr:`819` by :user:`Holger Nahrstaedt <holgern>`
- Old pdoc scripts are removed and replaced by sphinx
:pr:`822` by :user:`Holger Nahrstaedt <holgern>`
.. _changes_0_7:
Version 0.7
===========
**January 2020**
:mod:`skopt.optimizer`
----------------------
- |Enhancement| Models queue has now a customizable size (model_queue_size).
:pr:`803` by :user:`Kajetan Tukendorf <Bacoknight>` and
:user:`Holger Nahrstaedt <holgern>`
- |Enhancement| Add log-uniform prior to Integer space
:pr:`805` by :user:`Alex Liebscher <liebscher>`
:mod:`skopt.plots`
------------------
- |Enhancement| Support for plotting categorical dimensions
:pr:`806` by :user:`jkleint <jkleint>`
:mod:`skopt.searchcv`
---------------------
- |Fix| Allow BayesSearchCV to work with sklearn 0.21.
:pr:`777` by :user:`Kit Choi <kitchoi>`
Miscellaneous
-------------
- |Fix| Reduce the amount of deprecation warnings in unit tests
:pr:`808` by :user:`Holger Nahrstaedt <holgern>`
- |Fix| Reduce the amount of deprecation warnings in unit tests
:pr:`802` by :user:`Alex Liebscher <liebscher>`
- joblib instead of sklearn.externals.joblib
:pr:`776` by :user:`Vince Jankovics <vakker>`
- Improve travis CI unit tests (Different sklearn version are checked)
:pr:`804` by :user:`Holger Nahrstaedt <holgern>`
- Removed `versioneer` support, to keep things simple and to fix pypi deploy
:pr:`816` by :user:`Holger Nahrstaedt <holgern>`
|
PypiClean
|
/taskcc-alipay-sdk-python-3.3.398.tar.gz/taskcc-alipay-sdk-python-3.3.398/alipay/aop/api/request/AlipayCommerceCityfacilitatorVoucherBatchqueryRequest.py
|
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayCommerceCityfacilitatorVoucherBatchqueryModel import AlipayCommerceCityfacilitatorVoucherBatchqueryModel
class AlipayCommerceCityfacilitatorVoucherBatchqueryRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayCommerceCityfacilitatorVoucherBatchqueryModel):
self._biz_content = value
else:
self._biz_content = AlipayCommerceCityfacilitatorVoucherBatchqueryModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.commerce.cityfacilitator.voucher.batchquery'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
|
PypiClean
|
/minimal-snowplow-tracker-0.0.2.tar.gz/minimal-snowplow-tracker-0.0.2/snowplow_tracker/subject.py
|
SUPPORTED_PLATFORMS = set(["pc", "tv", "mob", "cnsl", "iot", "web", "srv", "app"])
DEFAULT_PLATFORM = "pc"
class Subject(object):
"""
Class for an event subject, where we view events as of the form
(Subject) -> (Verb) -> (Object)
"""
def __init__(self):
self.standard_nv_pairs = {"p": DEFAULT_PLATFORM}
def set_platform(self, value):
"""
:param value: One of ["pc", "tv", "mob", "cnsl", "iot", "web", "srv", "app"]
:type value: supported_platform
:rtype: subject
"""
self.standard_nv_pairs["p"] = value
return self
def set_user_id(self, user_id):
"""
:param user_id: User ID
:type user_id: string
:rtype: subject
"""
self.standard_nv_pairs["uid"] = user_id
return self
def set_screen_resolution(self, width, height):
"""
:param width: Width of the screen
:param height: Height of the screen
:type width: int,>0
:type height: int,>0
:rtype: subject
"""
self.standard_nv_pairs["res"] = "".join([str(width), "x", str(height)])
return self
def set_viewport(self, width, height):
"""
:param width: Width of the viewport
:param height: Height of the viewport
:type width: int,>0
:type height: int,>0
:rtype: subject
"""
self.standard_nv_pairs["vp"] = "".join([str(width), "x", str(height)])
return self
def set_color_depth(self, depth):
"""
:param depth: Depth of the color on the screen
:type depth: int
:rtype: subject
"""
self.standard_nv_pairs["cd"] = depth
return self
def set_timezone(self, timezone):
"""
:param timezone: Timezone as a string
:type timezone: string
:rtype: subject
"""
self.standard_nv_pairs["tz"] = timezone
return self
def set_lang(self, lang):
"""
Set language.
:param lang: Language the application is set to
:type lang: string
:rtype: subject
"""
self.standard_nv_pairs["lang"] = lang
return self
def set_domain_user_id(self, duid):
"""
Set the domain user ID
:param duid: Domain user ID
:type duid: string
:rtype: subject
"""
self.standard_nv_pairs["duid"] = duid
return self
def set_ip_address(self, ip):
"""
Set the domain user ID
:param ip: IP address
:type ip: string
:rtype: subject
"""
self.standard_nv_pairs["ip"] = ip
return self
def set_useragent(self, ua):
"""
Set the user agent
:param ua: User agent
:type ua: string
:rtype: subject
"""
self.standard_nv_pairs["ua"] = ua
return self
def set_network_user_id(self, nuid):
"""
Set the network user ID field
This overwrites the nuid field set by the collector
:param nuid: Network user ID
:type nuid: string
:rtype: subject
"""
self.standard_nv_pairs["tnuid"] = nuid
return self
|
PypiClean
|
/cli_chess-1.1.0-py3-none-any.whl/cli_chess/menus/menu_presenter.py
|
from __future__ import annotations
from cli_chess.utils.logging import log
from cli_chess.utils.event import Event
from enum import Enum
from typing import TYPE_CHECKING, List
if TYPE_CHECKING:
from cli_chess.menus import MenuOption, MultiValueMenuOption, MenuCategory, MenuModel, MultiValueMenuModel, MenuView, MultiValueMenuView
class MenuPresenter:
def __init__(self, model: MenuModel, view: MenuView):
self.model = model
self.view = view
self.selection = self.model.get_menu_options()[0].option
self.e_selection_updated = Event()
def get_menu_category(self) -> MenuCategory:
"""Get the menu category"""
return self.model.get_menu_category()
def get_menu_options(self) -> List[MenuOption]:
"""Returns all menu options regardless of their enabled/visibility state"""
return self.model.get_menu_options()
def get_visible_menu_options(self) -> List[MenuOption]:
"""Returns all menu options which are visible"""
visible_options = []
for opt in self.get_menu_options():
if not opt.visible:
continue
else:
visible_options.append(opt)
return visible_options
def select_handler(self, selected_option: int):
"""Called on menu item selection. Classes that inherit from
this class should override this method if specific tasks
need to execute when the selected option changes
"""
try:
self.selection = self.model.get_menu_options()[selected_option].option
log.debug(f"Menu selection: {self.selection}")
self._notify_selection_updated(self.selection)
except Exception as e:
# Todo: Print error to view element
log.exception(f"Exception caught: {e}")
raise e
def has_focus(self) -> bool:
"""Queries the view to determine if the menu has focus"""
return self.view.has_focus()
def _notify_selection_updated(self, selected_option: int) -> None:
"""Notifies listeners that the selection has been updated"""
self.e_selection_updated.notify(selected_option)
class MultiValueMenuPresenter(MenuPresenter):
def __init__(self, model: MultiValueMenuModel, view: MultiValueMenuView):
self.model = model
self.view = view
super().__init__(self.model, self.view)
def value_cycled_handler(self, selected_option: Enum) -> None:
"""Called when the selected options value is cycled. Classes that inherit from
this class should override this method if they need to
be alerted when the selected option changes
"""
pass
def get_menu_options(self) -> List[MultiValueMenuOption]:
"""Returns all menu options regardless of their enabled/visibility state"""
return super().get_menu_options()
def get_visible_menu_options(self) -> List[MultiValueMenuOption]:
"""Returns all menu options which are visible"""
return super().get_visible_menu_options()
|
PypiClean
|
/ndms2_client-0.1.2.tar.gz/ndms2_client-0.1.2/ndms2_client/client.py
|
import logging
import re
from typing import Dict, List, Tuple, Union, NamedTuple, Optional
from .connection import Connection
_LOGGER = logging.getLogger(__name__)
_VERSION_CMD = 'show version'
_ARP_CMD = 'show ip arp'
_ASSOCIATIONS_CMD = 'show associations'
_HOTSPOT_CMD = 'show ip hotspot'
_INTERFACE_CMD = 'show interface %s'
_INTERFACES_CMD = 'show interface'
_ARP_REGEX = re.compile(
r'(?P<name>.*?)\s+' +
r'(?P<ip>([0-9]{1,3}[.]){3}[0-9]{1,3})?\s+' +
r'(?P<mac>(([0-9a-f]{2}[:-]){5}([0-9a-f]{2})))\s+' +
r'(?P<interface>([^ ]+))\s+'
)
class Device(NamedTuple):
mac: str
name: str
ip: str
interface: str
class RouterInfo(NamedTuple):
name: str
fw_version: str
fw_channel: str
model: str
hw_version: str
manufacturer: str
vendor: str
region: str
@classmethod
def from_dict(cls, info: dict) -> "RouterInfo":
return RouterInfo(
name=str(info.get('description', info.get('model', 'NDMS2 Router'))),
fw_version=str(info.get('title', info.get('release'))),
fw_channel=str(info.get('sandbox', 'unknown')),
model=str(info.get('model', info.get('hw_id'))),
hw_version=str(info.get('hw_version', 'N/A')),
manufacturer=str(info.get('manufacturer')),
vendor=str(info.get('vendor')),
region=str(info.get('region', 'N/A')),
)
class InterfaceInfo(NamedTuple):
name: str
type: Optional[str]
description: Optional[str]
link: Optional[str]
connected: Optional[str]
state: Optional[str]
mtu: Optional[int]
address: Optional[str]
mask: Optional[str]
uptime: Optional[int]
security_level: Optional[str]
mac: Optional[str]
@classmethod
def from_dict(cls, info: dict) -> "InterfaceInfo":
return InterfaceInfo(
name=_str(info.get('interface-name')) or str(info['id']),
type=_str(info.get('type')),
description=_str(info.get('description')),
link=_str(info.get('link')),
connected=_str(info.get('connected')),
state=_str(info.get('state')),
mtu=_int(info.get('mtu')),
address=_str(info.get('address')),
mask=_str(info.get('mask')),
uptime=_int(info.get('uptime')),
security_level=_str(info.get('security-level')),
mac=_str(info.get('mac')),
)
class Client(object):
def __init__(self, connection: Connection):
self._connection = connection
def get_router_info(self) -> RouterInfo:
info = _parse_dict_lines(self._connection.run_command(_VERSION_CMD))
_LOGGER.debug('Raw router info: %s', str(info))
assert isinstance(info, dict), 'Router info response is not a dictionary'
return RouterInfo.from_dict(info)
def get_interfaces(self) -> List[InterfaceInfo]:
collection = _parse_collection_lines(self._connection.run_command(_INTERFACES_CMD))
_LOGGER.debug('Raw interfaces info: %s', str(collection))
assert isinstance(collection, list), 'Interfaces info response is not a collection'
return [InterfaceInfo.from_dict(info) for info in collection]
def get_interface_info(self, interface_name) -> Optional[InterfaceInfo]:
info = _parse_dict_lines(self._connection.run_command(_INTERFACE_CMD % interface_name))
_LOGGER.debug('Raw interface info: %s', str(info))
assert isinstance(info, dict), 'Interface info response is not a dictionary'
if 'id' in info:
return InterfaceInfo.from_dict(info)
return None
def get_devices(self, *, try_hotspot=True, include_arp=True, include_associated=True) -> List[Device]:
"""
Fetches a list of connected devices online
:param try_hotspot: first try `ip hotspot` command.
This is the most precise information on devices known to be online
:param include_arp: if try_hotspot is False or no hotspot devices detected
:param include_associated:
:return:
"""
devices = []
if try_hotspot:
devices = _merge_devices(devices, self.get_hotspot_devices())
if len(devices) > 0:
return devices
if include_arp:
devices = _merge_devices(devices, self.get_arp_devices())
if include_associated:
devices = _merge_devices(devices, self.get_associated_devices())
return devices
def get_hotspot_devices(self) -> List[Device]:
hotspot_info = self.__get_hotspot_info()
return [Device(
mac=info.get('mac').upper(),
name=info.get('name'),
ip=info.get('ip'),
interface=info['interface'].get('name', '')
) for info in hotspot_info.values() if 'interface' in info and info.get('link') == 'up']
def get_arp_devices(self) -> List[Device]:
lines = self._connection.run_command(_ARP_CMD)
result = _parse_table_lines(lines, _ARP_REGEX)
return [Device(
mac=info.get('mac').upper(),
name=info.get('name') or None,
ip=info.get('ip'),
interface=info.get('interface')
) for info in result if info.get('mac') is not None]
def get_associated_devices(self):
associations = _parse_dict_lines(self._connection.run_command(_ASSOCIATIONS_CMD))
items = associations.get('station', [])
if not isinstance(items, list):
items = [items]
aps = set([info.get('ap') for info in items])
ap_to_bridge = {}
for ap in aps:
ap_info = _parse_dict_lines(self._connection.run_command(_INTERFACE_CMD % ap))
ap_to_bridge[ap] = ap_info.get('group') or ap_info.get('interface-name')
# try enriching the results with hotspot additional info
hotspot_info = self.__get_hotspot_info()
devices = []
for info in items:
mac = info.get('mac')
if mac is not None and info.get('authenticated') in ['1', 'yes']:
host_info = hotspot_info.get(mac)
devices.append(Device(
mac=mac.upper(),
name=host_info.get('name') if host_info else None,
ip=host_info.get('ip') if host_info else None,
interface=ap_to_bridge.get(info.get('ap'), info.get('ap'))
))
return devices
# hotspot info is only available in newest firmware (2.09 and up) and in router mode
# however missing command error will lead to empty dict returned
def __get_hotspot_info(self):
info = _parse_dict_lines(self._connection.run_command(_HOTSPOT_CMD))
items = info.get('host', [])
if not isinstance(items, list):
items = [items]
return {item.get('mac'): item for item in items}
def _str(value: Optional[any]) -> Optional[str]:
if value is None:
return None
return str(value)
def _int(value: Optional[any]) -> Optional[int]:
if value is None:
return None
return int(value)
def _merge_devices(*lists: List[Device]) -> List[Device]:
res = {}
for l in lists:
for dev in l:
key = (dev.interface, dev.mac)
if key in res:
old_dev = res.get(key)
res[key] = Device(
mac=old_dev.mac,
name=old_dev.name or dev.name,
ip=old_dev.ip or dev.ip,
interface=old_dev.interface
)
else:
res[key] = dev
return list(res.values())
def _parse_table_lines(lines: List[str], regex: re) -> List[Dict[str, any]]:
"""Parse the lines using the given regular expression.
If a line can't be parsed it is logged and skipped in the output.
"""
results = []
for line in lines:
match = regex.search(line)
if not match:
_LOGGER.debug('Could not parse line: %s', line)
continue
results.append(match.groupdict())
return results
def _fix_continuation_lines(lines: List[str]) -> List[str]:
indent = 0
continuation_possible = False
fixed_lines = [] # type: List[str]
for line in lines:
if len(line.strip()) == 0:
continue
if continuation_possible and len(line[:indent].strip()) == 0:
prev_line = fixed_lines.pop()
line = prev_line.rstrip() + line[(indent + 1):].lstrip()
else:
assert ':' in line, 'Found a line with no colon when continuation is not possible: ' + line
colon_pos = line.index(':')
comma_pos = line.index(',') if ',' in line[:colon_pos] else None
indent = comma_pos if comma_pos is not None else colon_pos
continuation_possible = len(line[(indent + 1):].strip()) > 0
fixed_lines.append(line)
return fixed_lines
def _parse_dict_lines(lines: List[str]) -> Dict[str, any]:
response = {}
indent = 0
stack = [(None, indent, response)] # type: List[Tuple[str, int, Union[str, dict]]]
stack_level = 0
for line in _fix_continuation_lines(lines):
if len(line.strip()) == 0:
continue
_LOGGER.debug(line)
# exploding the line
colon_pos = line.index(':')
comma_pos = line.index(',') if ',' in line[:colon_pos] else None
key = line[:colon_pos].strip()
value = line[(colon_pos + 1):].strip()
new_indent = comma_pos if comma_pos is not None else colon_pos
# assuming line is like 'mac-access, id = Bridge0: ...'
if comma_pos is not None:
key = line[:comma_pos].strip()
value = {key: value} if value != '' else {}
args = line[comma_pos + 1:colon_pos].split(',')
for arg in args:
sub_key, sub_value = [p.strip() for p in arg.split('=', 1)]
value[sub_key] = sub_value
# up and down the stack
if new_indent > indent: # new line is a sub-value of parent
stack_level += 1
indent = new_indent
stack.append(None)
else:
while new_indent < indent and len(stack) > 0: # getting one level up
stack_level -= 1
stack.pop()
_, indent, _ = stack[stack_level]
if stack_level < 1:
break
assert indent == new_indent, 'Irregular indentation detected'
stack[stack_level] = key, indent, value
# current containing object
obj_key, obj_indent, obj = stack[stack_level - 1]
# we are the first child of the containing object
if not isinstance(obj, dict):
# need to convert it from empty string to empty object
assert obj == '', 'Unexpected nested object format'
_, _, parent_obj = stack[stack_level - 2]
obj = {}
# containing object might be in a list also
if isinstance(parent_obj[obj_key], list):
parent_obj[obj_key].pop()
parent_obj[obj_key].append(obj)
else:
parent_obj[obj_key] = obj
stack[stack_level - 1] = obj_key, obj_indent, obj
# current key is already in object means there should be an array of values
if key in obj:
if not isinstance(obj[key], list):
obj[key] = [obj[key]]
obj[key].append(value)
else:
obj[key] = value
return response
def _parse_collection_lines(lines: List[str]) -> List[Dict[str, any]]:
_HEADER_REGEXP = re.compile(r'^(\w+),\s*name\s*=\s*\"([^"]+)\"')
result = []
item_lines = [] # type: List[str]
for line in lines:
if len(line.strip()) == 0:
continue
match = _HEADER_REGEXP.match(line)
if match:
if len(item_lines) > 0:
result.append(_parse_dict_lines(item_lines))
item_lines = []
else:
item_lines.append(line)
if len(item_lines) > 0:
result.append(_parse_dict_lines(item_lines))
return result
|
PypiClean
|
/FamcyDev-0.3.71-py3-none-any.whl/Famcy/bower_components/bootstrap-table/src/locale/bootstrap-table-bg-BG.js
|
$.fn.bootstrapTable.locales['bg-BG'] = $.fn.bootstrapTable.locales['bg'] = {
formatCopyRows () {
return 'Copy Rows'
},
formatPrint () {
return 'Print'
},
formatLoadingMessage () {
return 'Зареждане, моля изчакайте'
},
formatRecordsPerPage (pageNumber) {
return `${pageNumber} реда на страница`
},
formatShowingRows (pageFrom, pageTo, totalRows, totalNotFiltered) {
if (totalNotFiltered !== undefined && totalNotFiltered > 0 && totalNotFiltered > totalRows) {
return `Показани редове от ${pageFrom} до ${pageTo} от ${totalRows} реда (филтрирани от общо ${totalNotFiltered} реда)`
}
return `Показани редове от ${pageFrom} до ${pageTo} от общо ${totalRows} реда`
},
formatSRPaginationPreText () {
return 'предишна страница'
},
formatSRPaginationPageText (page) {
return `до страница ${page}`
},
formatSRPaginationNextText () {
return 'следваща страница'
},
formatDetailPagination (totalRows) {
return `Показани ${totalRows} реда`
},
formatClearSearch () {
return 'Изчистване на търсенето'
},
formatSearch () {
return 'Търсене'
},
formatNoMatches () {
return 'Не са намерени съвпадащи записи'
},
formatPaginationSwitch () {
return 'Скриване/Показване на странициране'
},
formatPaginationSwitchDown () {
return 'Показване на странициране'
},
formatPaginationSwitchUp () {
return 'Скриване на странициране'
},
formatRefresh () {
return 'Обновяване'
},
formatToggle () {
return 'Превключване'
},
formatToggleOn () {
return 'Показване на изглед карта'
},
formatToggleOff () {
return 'Скриване на изглед карта'
},
formatColumns () {
return 'Колони'
},
formatColumnsToggleAll () {
return 'Превключване на всички'
},
formatFullscreen () {
return 'Цял екран'
},
formatAllRows () {
return 'Всички'
},
formatAutoRefresh () {
return 'Автоматично обновяване'
},
formatExport () {
return 'Експорт на данни'
},
formatJumpTo () {
return 'ОТИДИ'
},
formatAdvancedSearch () {
return 'Разширено търсене'
},
formatAdvancedCloseButton () {
return 'Затваряне'
},
formatFilterControlSwitch () {
return 'Hide/Show controls'
},
formatFilterControlSwitchHide () {
return 'Hide controls'
},
formatFilterControlSwitchShow () {
return 'Show controls'
}
}
$.extend($.fn.bootstrapTable.defaults, $.fn.bootstrapTable.locales['bg-BG'])
|
PypiClean
|
/distributions_gbi-0.1.tar.gz/distributions_gbi-0.1/distributions_gbi/Gaussiandistribution.py
|
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev)
|
PypiClean
|
/Pillow-SIMD-9.0.0.post0.tar.gz/Pillow-SIMD-9.0.0.post0/docs/reference/Image.rst
|
.. py:module:: PIL.Image
.. py:currentmodule:: PIL.Image
:py:mod:`~PIL.Image` Module
===========================
The :py:mod:`~PIL.Image` module provides a class with the same name which is
used to represent a PIL image. The module also provides a number of factory
functions, including functions to load images from files, and to create new
images.
Examples
--------
Open, rotate, and display an image (using the default viewer)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The following script loads an image, rotates it 45 degrees, and displays it
using an external viewer (usually xv on Unix, and the Paint program on
Windows).
.. code-block:: python
from PIL import Image
with Image.open("hopper.jpg") as im:
im.rotate(45).show()
Create thumbnails
^^^^^^^^^^^^^^^^^
The following script creates nice thumbnails of all JPEG images in the
current directory preserving aspect ratios with 128x128 max resolution.
.. code-block:: python
from PIL import Image
import glob, os
size = 128, 128
for infile in glob.glob("*.jpg"):
file, ext = os.path.splitext(infile)
with Image.open(infile) as im:
im.thumbnail(size)
im.save(file + ".thumbnail", "JPEG")
Functions
---------
.. autofunction:: open
.. warning::
To protect against potential DOS attacks caused by "`decompression bombs`_" (i.e. malicious files
which decompress into a huge amount of data and are designed to crash or cause disruption by using up
a lot of memory), Pillow will issue a ``DecompressionBombWarning`` if the number of pixels in an
image is over a certain limit, :py:data:`PIL.Image.MAX_IMAGE_PIXELS`.
This threshold can be changed by setting :py:data:`PIL.Image.MAX_IMAGE_PIXELS`. It can be disabled
by setting ``Image.MAX_IMAGE_PIXELS = None``.
If desired, the warning can be turned into an error with
``warnings.simplefilter('error', Image.DecompressionBombWarning)`` or suppressed entirely with
``warnings.simplefilter('ignore', Image.DecompressionBombWarning)``. See also
`the logging documentation`_ to have warnings output to the logging facility instead of stderr.
If the number of pixels is greater than twice :py:data:`PIL.Image.MAX_IMAGE_PIXELS`, then a
``DecompressionBombError`` will be raised instead.
.. _decompression bombs: https://en.wikipedia.org/wiki/Zip_bomb
.. _the logging documentation: https://docs.python.org/3/library/logging.html#integration-with-the-warnings-module
Image processing
^^^^^^^^^^^^^^^^
.. autofunction:: alpha_composite
.. autofunction:: blend
.. autofunction:: composite
.. autofunction:: eval
.. autofunction:: merge
Constructing images
^^^^^^^^^^^^^^^^^^^
.. autofunction:: new
.. autofunction:: fromarray
.. autofunction:: frombytes
.. autofunction:: frombuffer
Generating images
^^^^^^^^^^^^^^^^^
.. autofunction:: effect_mandelbrot
.. autofunction:: effect_noise
.. autofunction:: linear_gradient
.. autofunction:: radial_gradient
Registering plugins
^^^^^^^^^^^^^^^^^^^
.. note::
These functions are for use by plugin authors. Application authors can
ignore them.
.. autofunction:: register_open
.. autofunction:: register_mime
.. autofunction:: register_save
.. autofunction:: register_save_all
.. autofunction:: register_extension
.. autofunction:: register_extensions
.. autofunction:: registered_extensions
.. autofunction:: register_decoder
.. autofunction:: register_encoder
The Image Class
---------------
.. autoclass:: PIL.Image.Image
An instance of the :py:class:`~PIL.Image.Image` class has the following
methods. Unless otherwise stated, all methods return a new instance of the
:py:class:`~PIL.Image.Image` class, holding the resulting image.
.. automethod:: PIL.Image.Image.alpha_composite
.. automethod:: PIL.Image.Image.convert
The following example converts an RGB image (linearly calibrated according to
ITU-R 709, using the D65 luminant) to the CIE XYZ color space:
.. code-block:: python
rgb2xyz = (
0.412453, 0.357580, 0.180423, 0,
0.212671, 0.715160, 0.072169, 0,
0.019334, 0.119193, 0.950227, 0)
out = im.convert("RGB", rgb2xyz)
.. automethod:: PIL.Image.Image.copy
.. automethod:: PIL.Image.Image.crop
This crops the input image with the provided coordinates:
.. code-block:: python
from PIL import Image
with Image.open("hopper.jpg") as im:
# The crop method from the Image module takes four coordinates as input.
# The right can also be represented as (left+width)
# and lower can be represented as (upper+height).
(left, upper, right, lower) = (20, 20, 100, 100)
# Here the image "im" is cropped and assigned to new variable im_crop
im_crop = im.crop((left, upper, right, lower))
.. automethod:: PIL.Image.Image.draft
.. automethod:: PIL.Image.Image.effect_spread
.. automethod:: PIL.Image.Image.entropy
.. automethod:: PIL.Image.Image.filter
This blurs the input image using a filter from the ``ImageFilter`` module:
.. code-block:: python
from PIL import Image, ImageFilter
with Image.open("hopper.jpg") as im:
# Blur the input image using the filter ImageFilter.BLUR
im_blurred = im.filter(filter=ImageFilter.BLUR)
.. automethod:: PIL.Image.Image.frombytes
.. automethod:: PIL.Image.Image.getbands
This helps to get the bands of the input image:
.. code-block:: python
from PIL import Image
with Image.open("hopper.jpg") as im:
print(im.getbands()) # Returns ('R', 'G', 'B')
.. automethod:: PIL.Image.Image.getbbox
This helps to get the bounding box coordinates of the input image:
.. code-block:: python
from PIL import Image
with Image.open("hopper.jpg") as im:
print(im.getbbox())
# Returns four coordinates in the format (left, upper, right, lower)
.. automethod:: PIL.Image.Image.getchannel
.. automethod:: PIL.Image.Image.getcolors
.. automethod:: PIL.Image.Image.getdata
.. automethod:: PIL.Image.Image.getexif
.. automethod:: PIL.Image.Image.getextrema
.. automethod:: PIL.Image.Image.getpalette
.. automethod:: PIL.Image.Image.getpixel
.. automethod:: PIL.Image.Image.getprojection
.. automethod:: PIL.Image.Image.histogram
.. automethod:: PIL.Image.Image.paste
.. automethod:: PIL.Image.Image.point
.. automethod:: PIL.Image.Image.putalpha
.. automethod:: PIL.Image.Image.putdata
.. automethod:: PIL.Image.Image.putpalette
.. automethod:: PIL.Image.Image.putpixel
.. automethod:: PIL.Image.Image.quantize
.. automethod:: PIL.Image.Image.reduce
.. automethod:: PIL.Image.Image.remap_palette
.. automethod:: PIL.Image.Image.resize
This resizes the given image from ``(width, height)`` to ``(width/2, height/2)``:
.. code-block:: python
from PIL import Image
with Image.open("hopper.jpg") as im:
# Provide the target width and height of the image
(width, height) = (im.width // 2, im.height // 2)
im_resized = im.resize((width, height))
.. automethod:: PIL.Image.Image.rotate
This rotates the input image by ``theta`` degrees counter clockwise:
.. code-block:: python
from PIL import Image
with Image.open("hopper.jpg") as im:
# Rotate the image by 60 degrees counter clockwise
theta = 60
# Angle is in degrees counter clockwise
im_rotated = im.rotate(angle=theta)
.. automethod:: PIL.Image.Image.save
.. automethod:: PIL.Image.Image.seek
.. automethod:: PIL.Image.Image.show
.. automethod:: PIL.Image.Image.split
.. automethod:: PIL.Image.Image.tell
.. automethod:: PIL.Image.Image.thumbnail
.. automethod:: PIL.Image.Image.tobitmap
.. automethod:: PIL.Image.Image.tobytes
.. automethod:: PIL.Image.Image.transform
.. automethod:: PIL.Image.Image.transpose
This flips the input image by using the :data:`FLIP_LEFT_RIGHT` method.
.. code-block:: python
from PIL import Image
with Image.open("hopper.jpg") as im:
# Flip the image from left to right
im_flipped = im.transpose(method=Image.FLIP_LEFT_RIGHT)
# To flip the image from top to bottom,
# use the method "Image.FLIP_TOP_BOTTOM"
.. automethod:: PIL.Image.Image.verify
.. automethod:: PIL.Image.Image.load
.. automethod:: PIL.Image.Image.close
Image Attributes
----------------
Instances of the :py:class:`Image` class have the following attributes:
.. py:attribute:: Image.filename
:type: str
The filename or path of the source file. Only images created with the
factory function ``open`` have a filename attribute. If the input is a
file like object, the filename attribute is set to an empty string.
.. py:attribute:: Image.format
:type: Optional[str]
The file format of the source file. For images created by the library
itself (via a factory function, or by running a method on an existing
image), this attribute is set to :data:`None`.
.. py:attribute:: Image.mode
:type: str
Image mode. This is a string specifying the pixel format used by the image.
Typical values are “1”, “L”, “RGB”, or “CMYK.” See
:ref:`concept-modes` for a full list.
.. py:attribute:: Image.size
:type: tuple[int]
Image size, in pixels. The size is given as a 2-tuple (width, height).
.. py:attribute:: Image.width
:type: int
Image width, in pixels.
.. py:attribute:: Image.height
:type: int
Image height, in pixels.
.. py:attribute:: Image.palette
:type: Optional[PIL.ImagePalette.ImagePalette]
Colour palette table, if any. If mode is "P" or "PA", this should be an
instance of the :py:class:`~PIL.ImagePalette.ImagePalette` class.
Otherwise, it should be set to :data:`None`.
.. py:attribute:: Image.info
:type: dict
A dictionary holding data associated with the image. This dictionary is
used by file handlers to pass on various non-image information read from
the file. See documentation for the various file handlers for details.
Most methods ignore the dictionary when returning new images; since the
keys are not standardized, it’s not possible for a method to know if the
operation affects the dictionary. If you need the information later on,
keep a reference to the info dictionary returned from the open method.
Unless noted elsewhere, this dictionary does not affect saving files.
.. py:attribute:: Image.is_animated
:type: bool
``True`` if this image has more than one frame, or ``False`` otherwise.
This attribute is only defined by image plugins that support animated images.
Plugins may leave this attribute undefined if they don't support loading
animated images, even if the given format supports animated images.
Given that this attribute is not present for all images use
``getattr(image, "is_animated", False)`` to check if Pillow is aware of multiple
frames in an image regardless of its format.
.. seealso:: :attr:`~Image.n_frames`, :func:`~Image.seek` and :func:`~Image.tell`
.. py:attribute:: Image.n_frames
:type: int
The number of frames in this image.
This attribute is only defined by image plugins that support animated images.
Plugins may leave this attribute undefined if they don't support loading
animated images, even if the given format supports animated images.
Given that this attribute is not present for all images use
``getattr(image, "n_frames", 1)`` to check the number of frames that Pillow is
aware of in an image regardless of its format.
.. seealso:: :attr:`~Image.is_animated`, :func:`~Image.seek` and :func:`~Image.tell`
Classes
-------
.. autoclass:: PIL.Image.Exif
:members:
:undoc-members:
:show-inheritance:
.. autoclass:: PIL.Image.ImagePointHandler
.. autoclass:: PIL.Image.ImageTransformHandler
Constants
---------
.. data:: NONE
.. data:: MAX_IMAGE_PIXELS
Set to 89,478,485, approximately 0.25GB for a 24-bit (3 bpp) image.
See :py:meth:`~PIL.Image.open` for more information about how this is used.
Transpose methods
^^^^^^^^^^^^^^^^^
Used to specify the :meth:`Image.transpose` method to use.
.. data:: FLIP_LEFT_RIGHT
.. data:: FLIP_TOP_BOTTOM
.. data:: ROTATE_90
.. data:: ROTATE_180
.. data:: ROTATE_270
.. data:: TRANSPOSE
.. data:: TRANSVERSE
Transform methods
^^^^^^^^^^^^^^^^^
Used to specify the :meth:`Image.transform` method to use.
.. data:: AFFINE
Affine transform
.. data:: EXTENT
Cut out a rectangular subregion
.. data:: PERSPECTIVE
Perspective transform
.. data:: QUAD
Map a quadrilateral to a rectangle
.. data:: MESH
Map a number of source quadrilaterals in one operation
Resampling filters
^^^^^^^^^^^^^^^^^^
See :ref:`concept-filters` for details.
.. data:: NEAREST
:noindex:
.. data:: BOX
:noindex:
.. data:: BILINEAR
:noindex:
.. data:: HAMMING
:noindex:
.. data:: BICUBIC
:noindex:
.. data:: LANCZOS
:noindex:
Some filters are also available under the following names for backwards compatibility:
.. data:: NONE
:noindex:
:value: NEAREST
.. data:: LINEAR
:value: BILINEAR
.. data:: CUBIC
:value: BICUBIC
.. data:: ANTIALIAS
:value: LANCZOS
Dither modes
^^^^^^^^^^^^
Used to specify the dithering method to use for the
:meth:`~Image.convert` and :meth:`~Image.quantize` methods.
.. data:: NONE
:noindex:
No dither
.. comment: (not implemented)
.. data:: ORDERED
.. data:: RASTERIZE
.. data:: FLOYDSTEINBERG
Floyd-Steinberg dither
Palettes
^^^^^^^^
Used to specify the pallete to use for the :meth:`~Image.convert` method.
.. data:: WEB
.. data:: ADAPTIVE
Quantization methods
^^^^^^^^^^^^^^^^^^^^
Used to specify the quantization method to use for the :meth:`~Image.quantize` method.
.. data:: MEDIANCUT
Median cut. Default method, except for RGBA images. This method does not support
RGBA images.
.. data:: MAXCOVERAGE
Maximum coverage. This method does not support RGBA images.
.. data:: FASTOCTREE
Fast octree. Default method for RGBA images.
.. data:: LIBIMAGEQUANT
libimagequant
Check support using :py:func:`PIL.features.check_feature`
with ``feature="libimagequant"``.
|
PypiClean
|
/OpenHands-0.1.4.1.tar.gz/OpenHands-0.1.4.1/openhands/models/encoder/graph/decoupled_gcn.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import math
from omegaconf import OmegaConf
from .graph_utils import SpatialGraph
# https://github.com/jackyjsy/CVPR21Chal-SLR
def conv_init(conv):
nn.init.kaiming_normal_(conv.weight, mode="fan_out")
nn.init.constant_(conv.bias, 0)
def bn_init(bn, scale):
nn.init.constant_(bn.weight, scale)
nn.init.constant_(bn.bias, 0)
def find_drop_size(num_nodes, num_edges, K=1):
B_sum = 0
for i in range(1, K + 1):
B_sum += (2 * num_edges / num_nodes) * math.pow(
(2 * num_edges / num_nodes) - 1, i - 1
)
return B_sum
class DropGraphTemporal(nn.Module):
def __init__(self, block_size=7):
super(DropGraphTemporal, self).__init__()
self.block_size = block_size
def forward(self, x, keep_prob):
self.keep_prob = keep_prob
if not self.training or self.keep_prob == 1:
return x
n, c, t, v = x.size()
input_abs = torch.mean(torch.mean(torch.abs(x), dim=3), dim=1).detach()
input_abs = (input_abs / torch.sum(input_abs) * input_abs.numel()).view(n, 1, t)
gamma = (1.0 - self.keep_prob) / self.block_size
input1 = x.permute(0, 1, 3, 2).contiguous().view(n, c * v, t)
M = torch.bernoulli(torch.clamp(input_abs * gamma, max=1.0)).repeat(1, c * v, 1)
m_sum = F.max_pool1d(
M, kernel_size=[self.block_size], stride=1, padding=self.block_size // 2
)
mask = (1 - m_sum).to(device=m_sum.device, dtype=m_sum.dtype)
return (
(input1 * mask * mask.numel() / mask.sum())
.view(n, c, v, t)
.permute(0, 1, 3, 2)
)
class DropGraphSpatial(nn.Module):
def __init__(self, num_points, drop_size):
super(DropGraphSpatial, self).__init__()
self.drop_size = drop_size
self.num_points = num_points
def forward(self, x, keep_prob, A):
self.keep_prob = keep_prob
if not self.training or self.keep_prob == 1:
return x
n, c, t, v = x.size()
input_abs = torch.mean(torch.mean(torch.abs(x), dim=2), dim=1).detach()
input_abs = input_abs / torch.sum(input_abs) * input_abs.numel()
gamma = (1.0 - self.keep_prob) / (1 + self.drop_size)
M_seed = torch.bernoulli(torch.clamp(input_abs * gamma, max=1.0)).to(
device=x.device, dtype=x.dtype
)
M = torch.matmul(M_seed, A)
M[M > 0.001] = 1.0
M[M < 0.5] = 0.0
mask = (1 - M).view(n, 1, 1, self.num_points)
return x * mask * mask.numel() / mask.sum()
class TCNUnit(nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_size=9,
stride=1,
use_drop=True,
drop_size=1.92,
num_points=25,
block_size=41,
):
super(TCNUnit, self).__init__()
pad = int((kernel_size - 1) / 2)
self.conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size=(kernel_size, 1),
padding=(pad, 0),
stride=(stride, 1),
)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU()
conv_init(self.conv)
bn_init(self.bn, 1)
self.use_drop = use_drop
if use_drop:
self.dropS = DropGraphSpatial(num_points=num_points, drop_size=drop_size)
self.dropT = DropGraphTemporal(block_size=block_size)
def forward(self, x, keep_prob=None, A=None):
x = self.bn(self.conv(x))
if self.use_drop:
x = self.dropT(self.dropS(x, keep_prob, A), keep_prob)
return x
class DecoupledGCNUnit(nn.Module):
def __init__(self, in_channels, out_channels, A, groups, num_points, num_subset=3):
super(DecoupledGCNUnit, self).__init__()
self.num_points = num_points
self.in_channels = in_channels
self.out_channels = out_channels
self.groups = groups
self.num_subset = num_subset
self.decoupled_A = nn.Parameter(
torch.tensor(
np.reshape(A, [3, 1, num_points, num_points]), dtype=torch.float32
).repeat(1, groups, 1, 1),
requires_grad=True,
)
if in_channels != out_channels:
self.down = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 1), nn.BatchNorm2d(out_channels)
)
else:
self.down = lambda x: x
self.bn0 = nn.BatchNorm2d(out_channels * num_subset)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU()
self.linear_weight = nn.Parameter(
torch.zeros(in_channels, out_channels * num_subset), requires_grad=True
)
self.linear_bias = nn.Parameter(
torch.zeros(1, out_channels * num_subset, 1, 1), requires_grad=True
)
self.eye_list = nn.Parameter(
torch.stack([torch.eye(num_points) for _ in range(out_channels)]),
requires_grad=False,
)
for m in self.modules():
if isinstance(m, nn.Conv2d):
conv_init(m)
elif isinstance(m, nn.BatchNorm2d):
bn_init(m, 1)
bn_init(self.bn, 1e-6)
nn.init.normal_(
self.linear_weight, 0, math.sqrt(0.5 / (out_channels * num_subset))
)
nn.init.constant_(self.linear_bias, 1e-6)
def norm(self, A):
b, c, h, w = A.size()
A = A.view(c, self.num_points, self.num_points)
D_list = torch.sum(A, 1).view(c, 1, self.num_points)
D_list_12 = (D_list + 0.001) ** (-1)
D_12 = self.eye_list * D_list_12
A = torch.bmm(A, D_12).view(b, c, h, w)
return A
def forward(self, x0):
learn_adj = self.decoupled_A.repeat(1, self.out_channels // self.groups, 1, 1)
normed_adj = torch.cat(
[
self.norm(learn_adj[0:1, ...]),
self.norm(learn_adj[1:2, ...]),
self.norm(learn_adj[2:3, ...]),
],
0,
)
x = torch.einsum("nctw,cd->ndtw", (x0, self.linear_weight)).contiguous()
x = x + self.linear_bias
x = self.bn0(x)
n, kc, t, v = x.size()
x = x.view(n, self.num_subset, kc // self.num_subset, t, v)
x = torch.einsum("nkctv,kcvw->nctw", (x, normed_adj))
x = self.bn(x)
x += self.down(x0)
x = self.relu(x)
return x
class DecoupledGCN_TCN_unit(nn.Module):
"""
Single unit of a stack of Decoupled GCN and TCN layers.
Args:
in_channels (int): Number of channels in the input sequence data
out_channels (int): Number of channels produced by the convolution
A (ndarray): 2D array containing the adjacency matrix
groups (int): Number of Decouple groups to use
num_points (int): Number of spatial joints
block_size (int): Block size used for Temporal masking in Dropgraph
drop_size (int): drop size used for spatial dropgraph masking.
stride (int): Default 1,
residual (bool): To use residual connections or not. Default: ``True``
use_attention (bool): To use self attention layer or not. Default: ``True``
"""
def __init__(
self,
in_channels,
out_channels,
A,
groups,
num_points,
block_size,
drop_size,
stride=1,
residual=True,
use_attention=True,
):
super(DecoupledGCN_TCN_unit, self).__init__()
num_joints = A.shape[-1]
self.gcn1 = DecoupledGCNUnit(in_channels, out_channels, A, groups, num_points)
self.tcn1 = TCNUnit(
out_channels,
out_channels,
stride=stride,
num_points=num_points,
drop_size=drop_size,
)
self.relu = nn.ReLU()
self.A = nn.Parameter(
torch.tensor(
np.sum(
np.reshape(A.astype(np.float32), [3, num_points, num_points]),
axis=0,
),
dtype=torch.float32,
),
requires_grad=False,
)
if not residual:
self.residual = lambda x: 0
elif (in_channels == out_channels) and (stride == 1):
self.residual = lambda x: x
else:
self.residual = TCNUnit(
in_channels, out_channels, kernel_size=1, stride=stride, use_drop=False
)
self.drop_spatial = DropGraphSpatial(num_points=num_points, drop_size=drop_size)
self.drop_temporal = DropGraphTemporal(block_size=block_size)
self.use_attention = use_attention
if self.use_attention:
self.sigmoid = nn.Sigmoid()
# Temporal attention
self.conv_ta = nn.Conv1d(out_channels, 1, 9, padding=4)
nn.init.constant_(self.conv_ta.weight, 0)
nn.init.constant_(self.conv_ta.bias, 0)
# Spatial Attention
ker_jpt = num_joints - 1 if not num_joints % 2 else num_joints
pad = (ker_jpt - 1) // 2
self.conv_sa = nn.Conv1d(out_channels, 1, ker_jpt, padding=pad)
nn.init.xavier_normal_(self.conv_sa.weight)
nn.init.constant_(self.conv_sa.bias, 0)
# Channel Attention
rr = 2
self.fc1c = nn.Linear(out_channels, out_channels // rr)
self.fc2c = nn.Linear(out_channels // rr, out_channels)
nn.init.kaiming_normal_(self.fc1c.weight)
nn.init.constant_(self.fc1c.bias, 0)
nn.init.constant_(self.fc2c.weight, 0)
nn.init.constant_(self.fc2c.bias, 0)
def forward(self, x, keep_prob):
y = self.gcn1(x)
if self.use_attention:
# spatial attention
se = y.mean(-2) # N C V
se1 = self.sigmoid(self.conv_sa(se))
y = y * se1.unsqueeze(-2) + y
# temporal attention
se = y.mean(-1)
se1 = self.sigmoid(self.conv_ta(se))
y = y * se1.unsqueeze(-1) + y
# channel attention
se = y.mean(-1).mean(-1)
se1 = self.relu(self.fc1c(se))
se2 = self.sigmoid(self.fc2c(se1))
y = y * se2.unsqueeze(-1).unsqueeze(-1) + y
y = self.tcn1(y, keep_prob, self.A)
x_skip = self.residual(x)
x_skip = self.drop_spatial(x_skip, keep_prob, self.A)
x_skip = self.drop_temporal(x_skip, keep_prob)
return self.relu(y + x_skip)
class DecoupledGCN(nn.Module):
"""
ST-GCN backbone with Decoupled GCN layers, Self Attention and DropGraph proposed in the paper:
`Skeleton Aware Multi-modal Sign Language Recognition
<https://arxiv.org/pdf/2103.08833.pdf>`_
Args:
in_channels (int): Number of channels in the input data.
graph_cfg (dict): The arguments for building the graph.
groups (int): Number of Decouple groups to use. Default: 8.
block_size (int): Block size used for Temporal masking in Dropgraph. Default: 41.
n_out_features (int): Output Embedding dimension. Default: 256.
"""
def __init__(
self,
in_channels,
graph_args,
groups=8,
block_size=41,
n_out_features = 256
):
super(DecoupledGCN, self).__init__()
graph_args = OmegaConf.to_container(graph_args)
num_points = graph_args["num_nodes"]
inward_edges = graph_args["inward_edges"]
self.graph = SpatialGraph(num_points, inward_edges)
A = self.graph.A
self.data_bn = nn.BatchNorm1d(in_channels * num_points)
drop_size = find_drop_size(self.graph.num_nodes, len(self.graph.inward_edges))
self.l1 = DecoupledGCN_TCN_unit(
in_channels,
64,
A,
groups,
num_points,
block_size,
drop_size=drop_size,
residual=False,
)
self.l2 = DecoupledGCN_TCN_unit(
64, 64, A, groups, num_points, block_size, drop_size=drop_size
)
self.l3 = DecoupledGCN_TCN_unit(
64, 64, A, groups, num_points, block_size, drop_size=drop_size
)
self.l4 = DecoupledGCN_TCN_unit(
64, 64, A, groups, num_points, block_size, drop_size=drop_size
)
self.l5 = DecoupledGCN_TCN_unit(
64, 128, A, groups, num_points, block_size, drop_size=drop_size, stride=2
)
self.l6 = DecoupledGCN_TCN_unit(
128, 128, A, groups, num_points, block_size, drop_size=drop_size
)
self.l7 = DecoupledGCN_TCN_unit(
128, 128, A, groups, num_points, block_size, drop_size=drop_size
)
self.l8 = DecoupledGCN_TCN_unit(
128, 256, A, groups, num_points, block_size, drop_size=drop_size, stride=2
)
self.l9 = DecoupledGCN_TCN_unit(
256, 256, A, groups, num_points, block_size, drop_size=drop_size
)
self.n_out_features = n_out_features
self.l10 = DecoupledGCN_TCN_unit(
256,
self.n_out_features,
A,
groups,
num_points,
block_size,
drop_size=drop_size,
)
bn_init(self.data_bn, 1)
def forward(self, x, keep_prob=0.9):
"""
Args:
x (torch.Tensor): Input graph sequence of shape :math:`(N, in\_channels, T_{in}, V_{in})`
keep_prob (float): The probability to keep the node. Default: 0.9.
Returns:
torch.Tensor: Output embedding of shape :math:`(N, n\_out\_features)`
where:
- :math:`N` is a batch size,
- :math:`T_{in}` is a length of input sequence,
- :math:`V_{in}` is the number of graph nodes,
- :math:`n\_out\_features` is the `n\_out\_features' value.
"""
N, C, T, V = x.size()
x = x.permute(0, 3, 1, 2).contiguous().view(N, V * C, T)
x = self.data_bn(x)
x = (
x.view(N, V, C, T)
.permute(0, 2, 3, 1) # NVCT -> NCTV
.contiguous()
)
x = self.l1(x, 1.0)
x = self.l2(x, 1.0)
x = self.l3(x, 1.0)
x = self.l4(x, 1.0)
x = self.l5(x, 1.0)
x = self.l6(x, 1.0)
x = self.l7(x, keep_prob)
x = self.l8(x, keep_prob)
x = self.l9(x, keep_prob)
x = self.l10(x, keep_prob)
# x.shape: (N,C,T,V)
c_new = x.size(1)
x = x.reshape(N, c_new, -1)
return x.mean(2)
|
PypiClean
|
/mindspore_ascend-1.10.0-cp39-none-any.whl/mindspore/_akg/akg/ops/math/ascend/approximate_equal.py
|
"""operator dsl function: approximate_equal"""
import akg.tvm
from akg.utils.kernel_exec import product_is_mini
from akg.utils import validation_check as utils
from akg.utils.format_transform import get_shape
from ..sub import sub
from ..abs import abs
from ..cast import cast
@utils.check_input_type(akg.tvm.tensor.Tensor, akg.tvm.tensor.Tensor, (float, type(None)), (str, type(None)))
def approximate_equal(x, y, tolerance=1e-5, target=utils.CCE):
"""
abs(x-y) less than or equal to the tolerance
Args:
x (tvm.tensor.Tensor): Tensor of type float16, float32.
y (tvm.tensor.Tensor): Tensor of type float16, float32.
tolerance (float): default is 1e-5
Returns:
tvm.tensor.Tensor. If abs(x-y) less than or equal to the tolerance return True,
else return False.
Supported Platforms:
'Ascend'
"""
if tolerance < 0:
raise RuntimeError("tolerance should >= 0")
# check shape
utils.check_shape(x)
utils.check_shape(y)
shape = get_shape(x)
if shape != get_shape(y):
raise RuntimeError("input shape must be same, but got %s vs %s",
shape, get_shape(y))
# check input tensor data_type
utils.ops_dtype_check(x.dtype, utils.DtypeForDavinci.ALL_FLOAT)
utils.ops_dtype_check(y.dtype, utils.DtypeForDavinci.ALL_FLOAT)
dtype = x.dtype
if dtype != y.dtype:
raise RuntimeError("input type must be same, but got %s vs %s",
dtype, y.dtype)
res_vsub = sub(x, y, target)
res_vabs = abs(res_vsub, target)
# As vcmp_lt and vsel instruction don't support fp32 on mini
# It can be simplified by some methods, such as , "auto cast"
if product_is_mini():
dtype = "float16"
res_vabs = cast(res_vabs, dtype, target)
t = akg.tvm.compute(shape, lambda *indice: akg.tvm.const(1, dtype), "t")
f = akg.tvm.compute(shape, lambda *indice: akg.tvm.const(0, dtype), "f")
res = akg.tvm.compute(shape, lambda *indice: akg.tvm.expr.Select(
res_vabs[indice] <= akg.tvm.const(tolerance, dtype),
t[indice], f[indice]))
# It can be be simplified that let cast op support fp16 and fp32 to bool type
res_fp16 = cast(res, "float16", target)
res_bool = akg.tvm.compute(shape, lambda *indice: res_fp16(*indice).astype("bool"))
return res_bool
|
PypiClean
|
/hiv-aapcnt-2019.4.tar.gz/hiv-aapcnt-2019.4/hivaapcnt/hivaapcnt.py
|
import os
import json
from copy import deepcopy
from . import data
RES_DIRECTORY = os.path.dirname(data.__file__)
class HIVAAPcnt:
singletons = {}
def __new__(cls, treatment, subtype):
resource_name = '{}{}.json'.format(treatment, subtype)
if resource_name in cls.singletons:
return cls.singletons[resource_name]
self = super(HIVAAPcnt, cls).__new__(cls)
self.__init_resource(resource_name)
cls.singletons[resource_name] = self
return self
def __init_resource(self, resource_name):
with open(os.path.join(RES_DIRECTORY, resource_name)) as fp:
aapcnts = self.__aapcnts = json.load(fp)
aapcnts_dict = {}
for aapcnt in aapcnts:
genepos = (aapcnt['gene'], aapcnt['position'])
aapcnts_dict.setdefault(genepos, {})[aapcnt['aa']] = aapcnt
self.__aapcnts_dict = aapcnts_dict
def get(self, gene=None, position=None, aa=None):
if gene is None:
# make a copy in case of any modification
result = self.__aapcnts
elif position is None:
result = [aapcnt for aapcnt in self.__aapcnts
if aapcnt['gene'] == gene]
elif aa is None:
result = self.__aapcnts_dict[(gene, position)]
else:
result = self.__aapcnts_dict[(gene, position)][aa]
return deepcopy(result)
"""
Returns the highest amino acid prevalence associated with each of
the AA in a mixture.
"""
def get_highest_aa_percent_value(self, gene, position, mixture):
pcntval = .0
gpos = (gene, position)
for aa in mixture:
aa_pcntval = self.__aapcnts_dict[gpos][aa]['percent']
pcntval = max(pcntval, aa_pcntval)
return pcntval
"""Returns True if the given mutation contains any unusual AA"""
def contains_unusual_aa(self, gene, position, aas):
gpos = (gene, position)
for aa in aas:
aapcnt = self.__aapcnts_dict[gpos][aa]
if aapcnt['isUnusual']:
return True
return False
|
PypiClean
|
/cosmospy_protobuf-0.3.0.tar.gz/cosmospy_protobuf-0.3.0/src/cosmospy_protobuf/cosmos/bank/v1beta1/query_pb2.py
|
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
_sym_db = _symbol_database.Default()
from ....cosmos.base.query.v1beta1 import pagination_pb2 as cosmos_dot_base_dot_query_dot_v1beta1_dot_pagination__pb2
from ....gogoproto import gogo_pb2 as gogoproto_dot_gogo__pb2
from ....google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from ....cosmos.base.v1beta1 import coin_pb2 as cosmos_dot_base_dot_v1beta1_dot_coin__pb2
from ....cosmos.bank.v1beta1 import bank_pb2 as cosmos_dot_bank_dot_v1beta1_dot_bank__pb2
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1fcosmos/bank/v1beta1/query.proto\x12\x13cosmos.bank.v1beta1\x1a*cosmos/base/query/v1beta1/pagination.proto\x1a\x14gogoproto/gogo.proto\x1a\x1cgoogle/api/annotations.proto\x1a\x1ecosmos/base/v1beta1/coin.proto\x1a\x1ecosmos/bank/v1beta1/bank.proto"?\n\x13QueryBalanceRequest\x12\x0f\n\x07address\x18\x01 \x01(\t\x12\r\n\x05denom\x18\x02 \x01(\t:\x08\x88\xa0\x1f\x00\xe8\xa0\x1f\x00"B\n\x14QueryBalanceResponse\x12*\n\x07balance\x18\x01 \x01(\x0b2\x19.cosmos.base.v1beta1.Coin"p\n\x17QueryAllBalancesRequest\x12\x0f\n\x07address\x18\x01 \x01(\t\x12:\n\npagination\x18\x02 \x01(\x0b2&.cosmos.base.query.v1beta1.PageRequest:\x08\x88\xa0\x1f\x00\xe8\xa0\x1f\x00"\xb6\x01\n\x18QueryAllBalancesResponse\x12]\n\x08balances\x18\x01 \x03(\x0b2\x19.cosmos.base.v1beta1.CoinB0\xc8\xde\x1f\x00\xaa\xdf\x1f(github.com/cosmos/cosmos-sdk/types.Coins\x12;\n\npagination\x18\x02 \x01(\x0b2\'.cosmos.base.query.v1beta1.PageResponse"v\n\x1dQuerySpendableBalancesRequest\x12\x0f\n\x07address\x18\x01 \x01(\t\x12:\n\npagination\x18\x02 \x01(\x0b2&.cosmos.base.query.v1beta1.PageRequest:\x08\x88\xa0\x1f\x00\xe8\xa0\x1f\x00"\xbc\x01\n\x1eQuerySpendableBalancesResponse\x12]\n\x08balances\x18\x01 \x03(\x0b2\x19.cosmos.base.v1beta1.CoinB0\xc8\xde\x1f\x00\xaa\xdf\x1f(github.com/cosmos/cosmos-sdk/types.Coins\x12;\n\npagination\x18\x02 \x01(\x0b2\'.cosmos.base.query.v1beta1.PageResponse"_\n\x17QueryTotalSupplyRequest\x12:\n\npagination\x18\x01 \x01(\x0b2&.cosmos.base.query.v1beta1.PageRequest:\x08\x88\xa0\x1f\x00\xe8\xa0\x1f\x00"\xb4\x01\n\x18QueryTotalSupplyResponse\x12[\n\x06supply\x18\x01 \x03(\x0b2\x19.cosmos.base.v1beta1.CoinB0\xc8\xde\x1f\x00\xaa\xdf\x1f(github.com/cosmos/cosmos-sdk/types.Coins\x12;\n\npagination\x18\x02 \x01(\x0b2\'.cosmos.base.query.v1beta1.PageResponse"%\n\x14QuerySupplyOfRequest\x12\r\n\x05denom\x18\x01 \x01(\t"H\n\x15QuerySupplyOfResponse\x12/\n\x06amount\x18\x01 \x01(\x0b2\x19.cosmos.base.v1beta1.CoinB\x04\xc8\xde\x1f\x00"\x14\n\x12QueryParamsRequest"H\n\x13QueryParamsResponse\x121\n\x06params\x18\x01 \x01(\x0b2\x1b.cosmos.bank.v1beta1.ParamsB\x04\xc8\xde\x1f\x00"X\n\x1aQueryDenomsMetadataRequest\x12:\n\npagination\x18\x01 \x01(\x0b2&.cosmos.base.query.v1beta1.PageRequest"\x92\x01\n\x1bQueryDenomsMetadataResponse\x126\n\tmetadatas\x18\x01 \x03(\x0b2\x1d.cosmos.bank.v1beta1.MetadataB\x04\xc8\xde\x1f\x00\x12;\n\npagination\x18\x02 \x01(\x0b2\'.cosmos.base.query.v1beta1.PageResponse"*\n\x19QueryDenomMetadataRequest\x12\r\n\x05denom\x18\x01 \x01(\t"S\n\x1aQueryDenomMetadataResponse\x125\n\x08metadata\x18\x01 \x01(\x0b2\x1d.cosmos.bank.v1beta1.MetadataB\x04\xc8\xde\x1f\x002\xed\t\n\x05Query\x12\x98\x01\n\x07Balance\x12(.cosmos.bank.v1beta1.QueryBalanceRequest\x1a).cosmos.bank.v1beta1.QueryBalanceResponse"8\x82\xd3\xe4\x93\x022\x120/cosmos/bank/v1beta1/balances/{address}/by_denom\x12\x9b\x01\n\x0bAllBalances\x12,.cosmos.bank.v1beta1.QueryAllBalancesRequest\x1a-.cosmos.bank.v1beta1.QueryAllBalancesResponse"/\x82\xd3\xe4\x93\x02)\x12\'/cosmos/bank/v1beta1/balances/{address}\x12\xb7\x01\n\x11SpendableBalances\x122.cosmos.bank.v1beta1.QuerySpendableBalancesRequest\x1a3.cosmos.bank.v1beta1.QuerySpendableBalancesResponse"9\x82\xd3\xe4\x93\x023\x121/cosmos/bank/v1beta1/spendable_balances/{address}\x12\x8f\x01\n\x0bTotalSupply\x12,.cosmos.bank.v1beta1.QueryTotalSupplyRequest\x1a-.cosmos.bank.v1beta1.QueryTotalSupplyResponse"#\x82\xd3\xe4\x93\x02\x1d\x12\x1b/cosmos/bank/v1beta1/supply\x12\x8e\x01\n\x08SupplyOf\x12).cosmos.bank.v1beta1.QuerySupplyOfRequest\x1a*.cosmos.bank.v1beta1.QuerySupplyOfResponse"+\x82\xd3\xe4\x93\x02%\x12#/cosmos/bank/v1beta1/supply/{denom}\x12\x80\x01\n\x06Params\x12\'.cosmos.bank.v1beta1.QueryParamsRequest\x1a(.cosmos.bank.v1beta1.QueryParamsResponse"#\x82\xd3\xe4\x93\x02\x1d\x12\x1b/cosmos/bank/v1beta1/params\x12\xa6\x01\n\rDenomMetadata\x12..cosmos.bank.v1beta1.QueryDenomMetadataRequest\x1a/.cosmos.bank.v1beta1.QueryDenomMetadataResponse"4\x82\xd3\xe4\x93\x02.\x12,/cosmos/bank/v1beta1/denoms_metadata/{denom}\x12\xa1\x01\n\x0eDenomsMetadata\x12/.cosmos.bank.v1beta1.QueryDenomsMetadataRequest\x1a0.cosmos.bank.v1beta1.QueryDenomsMetadataResponse",\x82\xd3\xe4\x93\x02&\x12$/cosmos/bank/v1beta1/denoms_metadataB+Z)github.com/cosmos/cosmos-sdk/x/bank/typesb\x06proto3')
_globals = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'cosmos.bank.v1beta1.query_pb2', _globals)
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
DESCRIPTOR._serialized_options = b'Z)github.com/cosmos/cosmos-sdk/x/bank/types'
_QUERYBALANCEREQUEST._options = None
_QUERYBALANCEREQUEST._serialized_options = b'\x88\xa0\x1f\x00\xe8\xa0\x1f\x00'
_QUERYALLBALANCESREQUEST._options = None
_QUERYALLBALANCESREQUEST._serialized_options = b'\x88\xa0\x1f\x00\xe8\xa0\x1f\x00'
_QUERYALLBALANCESRESPONSE.fields_by_name['balances']._options = None
_QUERYALLBALANCESRESPONSE.fields_by_name['balances']._serialized_options = b'\xc8\xde\x1f\x00\xaa\xdf\x1f(github.com/cosmos/cosmos-sdk/types.Coins'
_QUERYSPENDABLEBALANCESREQUEST._options = None
_QUERYSPENDABLEBALANCESREQUEST._serialized_options = b'\x88\xa0\x1f\x00\xe8\xa0\x1f\x00'
_QUERYSPENDABLEBALANCESRESPONSE.fields_by_name['balances']._options = None
_QUERYSPENDABLEBALANCESRESPONSE.fields_by_name['balances']._serialized_options = b'\xc8\xde\x1f\x00\xaa\xdf\x1f(github.com/cosmos/cosmos-sdk/types.Coins'
_QUERYTOTALSUPPLYREQUEST._options = None
_QUERYTOTALSUPPLYREQUEST._serialized_options = b'\x88\xa0\x1f\x00\xe8\xa0\x1f\x00'
_QUERYTOTALSUPPLYRESPONSE.fields_by_name['supply']._options = None
_QUERYTOTALSUPPLYRESPONSE.fields_by_name['supply']._serialized_options = b'\xc8\xde\x1f\x00\xaa\xdf\x1f(github.com/cosmos/cosmos-sdk/types.Coins'
_QUERYSUPPLYOFRESPONSE.fields_by_name['amount']._options = None
_QUERYSUPPLYOFRESPONSE.fields_by_name['amount']._serialized_options = b'\xc8\xde\x1f\x00'
_QUERYPARAMSRESPONSE.fields_by_name['params']._options = None
_QUERYPARAMSRESPONSE.fields_by_name['params']._serialized_options = b'\xc8\xde\x1f\x00'
_QUERYDENOMSMETADATARESPONSE.fields_by_name['metadatas']._options = None
_QUERYDENOMSMETADATARESPONSE.fields_by_name['metadatas']._serialized_options = b'\xc8\xde\x1f\x00'
_QUERYDENOMMETADATARESPONSE.fields_by_name['metadata']._options = None
_QUERYDENOMMETADATARESPONSE.fields_by_name['metadata']._serialized_options = b'\xc8\xde\x1f\x00'
_QUERY.methods_by_name['Balance']._options = None
_QUERY.methods_by_name['Balance']._serialized_options = b'\x82\xd3\xe4\x93\x022\x120/cosmos/bank/v1beta1/balances/{address}/by_denom'
_QUERY.methods_by_name['AllBalances']._options = None
_QUERY.methods_by_name['AllBalances']._serialized_options = b"\x82\xd3\xe4\x93\x02)\x12'/cosmos/bank/v1beta1/balances/{address}"
_QUERY.methods_by_name['SpendableBalances']._options = None
_QUERY.methods_by_name['SpendableBalances']._serialized_options = b'\x82\xd3\xe4\x93\x023\x121/cosmos/bank/v1beta1/spendable_balances/{address}'
_QUERY.methods_by_name['TotalSupply']._options = None
_QUERY.methods_by_name['TotalSupply']._serialized_options = b'\x82\xd3\xe4\x93\x02\x1d\x12\x1b/cosmos/bank/v1beta1/supply'
_QUERY.methods_by_name['SupplyOf']._options = None
_QUERY.methods_by_name['SupplyOf']._serialized_options = b'\x82\xd3\xe4\x93\x02%\x12#/cosmos/bank/v1beta1/supply/{denom}'
_QUERY.methods_by_name['Params']._options = None
_QUERY.methods_by_name['Params']._serialized_options = b'\x82\xd3\xe4\x93\x02\x1d\x12\x1b/cosmos/bank/v1beta1/params'
_QUERY.methods_by_name['DenomMetadata']._options = None
_QUERY.methods_by_name['DenomMetadata']._serialized_options = b'\x82\xd3\xe4\x93\x02.\x12,/cosmos/bank/v1beta1/denoms_metadata/{denom}'
_QUERY.methods_by_name['DenomsMetadata']._options = None
_QUERY.methods_by_name['DenomsMetadata']._serialized_options = b'\x82\xd3\xe4\x93\x02&\x12$/cosmos/bank/v1beta1/denoms_metadata'
_globals['_QUERYBALANCEREQUEST']._serialized_start = 216
_globals['_QUERYBALANCEREQUEST']._serialized_end = 279
_globals['_QUERYBALANCERESPONSE']._serialized_start = 281
_globals['_QUERYBALANCERESPONSE']._serialized_end = 347
_globals['_QUERYALLBALANCESREQUEST']._serialized_start = 349
_globals['_QUERYALLBALANCESREQUEST']._serialized_end = 461
_globals['_QUERYALLBALANCESRESPONSE']._serialized_start = 464
_globals['_QUERYALLBALANCESRESPONSE']._serialized_end = 646
_globals['_QUERYSPENDABLEBALANCESREQUEST']._serialized_start = 648
_globals['_QUERYSPENDABLEBALANCESREQUEST']._serialized_end = 766
_globals['_QUERYSPENDABLEBALANCESRESPONSE']._serialized_start = 769
_globals['_QUERYSPENDABLEBALANCESRESPONSE']._serialized_end = 957
_globals['_QUERYTOTALSUPPLYREQUEST']._serialized_start = 959
_globals['_QUERYTOTALSUPPLYREQUEST']._serialized_end = 1054
_globals['_QUERYTOTALSUPPLYRESPONSE']._serialized_start = 1057
_globals['_QUERYTOTALSUPPLYRESPONSE']._serialized_end = 1237
_globals['_QUERYSUPPLYOFREQUEST']._serialized_start = 1239
_globals['_QUERYSUPPLYOFREQUEST']._serialized_end = 1276
_globals['_QUERYSUPPLYOFRESPONSE']._serialized_start = 1278
_globals['_QUERYSUPPLYOFRESPONSE']._serialized_end = 1350
_globals['_QUERYPARAMSREQUEST']._serialized_start = 1352
_globals['_QUERYPARAMSREQUEST']._serialized_end = 1372
_globals['_QUERYPARAMSRESPONSE']._serialized_start = 1374
_globals['_QUERYPARAMSRESPONSE']._serialized_end = 1446
_globals['_QUERYDENOMSMETADATAREQUEST']._serialized_start = 1448
_globals['_QUERYDENOMSMETADATAREQUEST']._serialized_end = 1536
_globals['_QUERYDENOMSMETADATARESPONSE']._serialized_start = 1539
_globals['_QUERYDENOMSMETADATARESPONSE']._serialized_end = 1685
_globals['_QUERYDENOMMETADATAREQUEST']._serialized_start = 1687
_globals['_QUERYDENOMMETADATAREQUEST']._serialized_end = 1729
_globals['_QUERYDENOMMETADATARESPONSE']._serialized_start = 1731
_globals['_QUERYDENOMMETADATARESPONSE']._serialized_end = 1814
_globals['_QUERY']._serialized_start = 1817
_globals['_QUERY']._serialized_end = 3078
|
PypiClean
|
/accelbyte_py_sdk-0.48.0.tar.gz/accelbyte_py_sdk-0.48.0/accelbyte_py_sdk/api/social/operations/user_statistic/bulk_reset_user_stat_it_ed9334.py
|
# template file: ags_py_codegen
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
# AccelByte Gaming Services Social Service (2.9.4)
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
from .....core import Operation
from .....core import HeaderStr
from .....core import HttpResponse
from ...models import ADTOObjectForResettingUserStatItems
from ...models import BulkStatOperationResult
from ...models import ValidationErrorEntity
class BulkResetUserStatItemValues(Operation):
"""Bulk reset user's statitem values (bulkResetUserStatItemValues)
Bulk reset user's statitem values for given namespace and user.
Other detail info:
+ *Required permission*: resource="ADMIN:NAMESPACE:{namespace}:USER:{userId}:STATITEM", action=4 (UPDATE)
+ *Returns*: bulk updated result
Required Permission(s):
- ADMIN:NAMESPACE:{namespace}:USER:{userId}:STATITEM [UPDATE]
Properties:
url: /social/v2/admin/namespaces/{namespace}/users/{userId}/statitems/value/reset/bulk
method: PUT
tags: ["UserStatistic"]
consumes: ["application/json"]
produces: ["application/json"]
securities: [BEARER_AUTH] or [BEARER_AUTH]
body: (body) OPTIONAL List[ADTOObjectForResettingUserStatItems] in body
namespace: (namespace) REQUIRED str in path
user_id: (userId) REQUIRED str in path
additional_key: (additionalKey) OPTIONAL str in query
Responses:
200: OK - List[BulkStatOperationResult] (successful operation)
422: Unprocessable Entity - ValidationErrorEntity (20002: validation error)
"""
# region fields
_url: str = "/social/v2/admin/namespaces/{namespace}/users/{userId}/statitems/value/reset/bulk"
_method: str = "PUT"
_consumes: List[str] = ["application/json"]
_produces: List[str] = ["application/json"]
_securities: List[List[str]] = [["BEARER_AUTH"], ["BEARER_AUTH"]]
_location_query: str = None
body: List[ADTOObjectForResettingUserStatItems] # OPTIONAL in [body]
namespace: str # REQUIRED in [path]
user_id: str # REQUIRED in [path]
additional_key: str # OPTIONAL in [query]
# endregion fields
# region properties
@property
def url(self) -> str:
return self._url
@property
def method(self) -> str:
return self._method
@property
def consumes(self) -> List[str]:
return self._consumes
@property
def produces(self) -> List[str]:
return self._produces
@property
def securities(self) -> List[List[str]]:
return self._securities
@property
def location_query(self) -> str:
return self._location_query
# endregion properties
# region get methods
# endregion get methods
# region get_x_params methods
def get_all_params(self) -> dict:
return {
"body": self.get_body_params(),
"path": self.get_path_params(),
"query": self.get_query_params(),
}
def get_body_params(self) -> Any:
if not hasattr(self, "body") or self.body is None:
return None
return [i.to_dict() for i in self.body]
def get_path_params(self) -> dict:
result = {}
if hasattr(self, "namespace"):
result["namespace"] = self.namespace
if hasattr(self, "user_id"):
result["userId"] = self.user_id
return result
def get_query_params(self) -> dict:
result = {}
if hasattr(self, "additional_key"):
result["additionalKey"] = self.additional_key
return result
# endregion get_x_params methods
# region is/has methods
# endregion is/has methods
# region with_x methods
def with_body(
self, value: List[ADTOObjectForResettingUserStatItems]
) -> BulkResetUserStatItemValues:
self.body = value
return self
def with_namespace(self, value: str) -> BulkResetUserStatItemValues:
self.namespace = value
return self
def with_user_id(self, value: str) -> BulkResetUserStatItemValues:
self.user_id = value
return self
def with_additional_key(self, value: str) -> BulkResetUserStatItemValues:
self.additional_key = value
return self
# endregion with_x methods
# region to methods
def to_dict(self, include_empty: bool = False) -> dict:
result: dict = {}
if hasattr(self, "body") and self.body:
result["body"] = [
i0.to_dict(include_empty=include_empty) for i0 in self.body
]
elif include_empty:
result["body"] = []
if hasattr(self, "namespace") and self.namespace:
result["namespace"] = str(self.namespace)
elif include_empty:
result["namespace"] = ""
if hasattr(self, "user_id") and self.user_id:
result["userId"] = str(self.user_id)
elif include_empty:
result["userId"] = ""
if hasattr(self, "additional_key") and self.additional_key:
result["additionalKey"] = str(self.additional_key)
elif include_empty:
result["additionalKey"] = ""
return result
# endregion to methods
# region response methods
# noinspection PyMethodMayBeStatic
def parse_response(
self, code: int, content_type: str, content: Any
) -> Tuple[
Union[None, List[BulkStatOperationResult]],
Union[None, HttpResponse, ValidationErrorEntity],
]:
"""Parse the given response.
200: OK - List[BulkStatOperationResult] (successful operation)
422: Unprocessable Entity - ValidationErrorEntity (20002: validation error)
---: HttpResponse (Undocumented Response)
---: HttpResponse (Unexpected Content-Type Error)
---: HttpResponse (Unhandled Error)
"""
pre_processed_response, error = self.pre_process_response(
code=code, content_type=content_type, content=content
)
if error is not None:
return None, None if error.is_no_content() else error
code, content_type, content = pre_processed_response
if code == 200:
return [BulkStatOperationResult.create_from_dict(i) for i in content], None
if code == 422:
return None, ValidationErrorEntity.create_from_dict(content)
return self.handle_undocumented_response(
code=code, content_type=content_type, content=content
)
# endregion response methods
# region static methods
@classmethod
def create(
cls,
namespace: str,
user_id: str,
body: Optional[List[ADTOObjectForResettingUserStatItems]] = None,
additional_key: Optional[str] = None,
**kwargs,
) -> BulkResetUserStatItemValues:
instance = cls()
instance.namespace = namespace
instance.user_id = user_id
if body is not None:
instance.body = body
if additional_key is not None:
instance.additional_key = additional_key
return instance
@classmethod
def create_from_dict(
cls, dict_: dict, include_empty: bool = False
) -> BulkResetUserStatItemValues:
instance = cls()
if "body" in dict_ and dict_["body"] is not None:
instance.body = [
ADTOObjectForResettingUserStatItems.create_from_dict(
i0, include_empty=include_empty
)
for i0 in dict_["body"]
]
elif include_empty:
instance.body = []
if "namespace" in dict_ and dict_["namespace"] is not None:
instance.namespace = str(dict_["namespace"])
elif include_empty:
instance.namespace = ""
if "userId" in dict_ and dict_["userId"] is not None:
instance.user_id = str(dict_["userId"])
elif include_empty:
instance.user_id = ""
if "additionalKey" in dict_ and dict_["additionalKey"] is not None:
instance.additional_key = str(dict_["additionalKey"])
elif include_empty:
instance.additional_key = ""
return instance
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"body": "body",
"namespace": "namespace",
"userId": "user_id",
"additionalKey": "additional_key",
}
@staticmethod
def get_required_map() -> Dict[str, bool]:
return {
"body": False,
"namespace": True,
"userId": True,
"additionalKey": False,
}
# endregion static methods
|
PypiClean
|
/collective.logbook-1.0.0.tar.gz/collective.logbook-1.0.0/collective/logbook/utils.py
|
import logging
import re
from email.mime.text import MIMEText
from collective.logbook.config import HEX_REGEX
from collective.logbook.config import LOGGER
from collective.logbook.config import LOGLEVEL
from plone import api as ploneapi
def get_portal():
"""Get the portal object
:returns: Portal object
:rtype: object
"""
return ploneapi.portal.getSite()
def get_plone_version():
"""Get the Plone version
:returns: Plone version
:rtype: str or list
"""
return ploneapi.env.plone_version()
def is_plone5():
"""Check for Plone 5 series
:returns: True if Plone 5
:rtype: boolean
"""
version = get_plone_version()
return version.startswith('5')
def is_patch_applied():
"""Checks if the monkey patch was already applied
"""
from Products.SiteErrorLog.SiteErrorLog import SiteErrorLog
from collective.logbook.monkey import raising
actual_raising = getattr(SiteErrorLog.raising, 'im_func', SiteErrorLog.raising)
return actual_raising is raising
def is_logbook_enabled():
"""Checks if logbook logging is enabled
"""
return ploneapi.portal.get_registry_record('logbook.logbook_enabled')
def is_logbook_large_site_enabled():
"""Checks if logbook logging is enabled
"""
return ploneapi.portal.get_registry_record('logbook.logbook_large_site')
def get_logbook_log_mails():
"""Returns the emails to notify on new errors
"""
return ploneapi.portal.get_registry_record('logbook.logbook_log_mails')
def log(msg, level=LOGLEVEL):
"""Log the message
"""
# get the numeric value of the level. defaults to 0 (NOTSET)
level = logging.getLevelName(level.upper()) or 0
LOGGER.log(level, msg)
def send_email(message, subject, recipients):
"""Send the message to the list of recipients
"""
log('Sending Email to %r' % recipients)
# Handle a single recipient address gracefully
if not is_list(recipients):
recipients = [recipients]
# convert to HTML email
body = MIMEText(message, _subtype="html", _charset="utf8")
# Send email to all of the recipients
for recipient in recipients:
try:
# Note: `plone.api.portal.send_email` takes care about the fetching
# the correct sender name and email address
ploneapi.portal.send_email(
recipient=recipient,
subject=subject,
body=body,
)
# Do not create another logbook error during the message sending
except Exception as exc:
log('Failed sending email to recipient(s): {} with error: {}'
.format(','.join(recipients), str(exc)), level='error')
def is_list(thing):
""" checks if an object is a list type
>>> is_list([])
True
>>> is_list(list())
True
>>> is_list('[]')
False
>>> is_list({})
False
"""
return isinstance(thing, (list, tuple))
def filtered_error_tail(error):
""" last 5 lines of traceback with replaced oid's
"""
tb_text = error.get('tb_text', '')
tail = tb_text.splitlines()[-5:]
filtered_tail = list(map(hexfilter, tail))
return filtered_tail
def hexfilter(text):
""" unify hex numbers
"""
return HEX_REGEX.sub('0x0000000', text)
# http://grok.zope.org/documentation/how-to/automatic-form-generation
email_expr = re.compile(
r"^(\w&.%#$&'\*+-/=?^_`{}|~]+!)*[\w&.%#$&'\*+-/=?^_`{}|~]+"
r"@(([0-9a-z]([0-9a-z-]*[0-9a-z])?\.)+[a-z]{2,6}|([0-9]{1,3}"
r"\.){3}[0-9]{1,3})$", re.IGNORECASE)
check_email = email_expr.match
|
PypiClean
|
/ThreadingWrapper-0.0.1.tar.gz/ThreadingWrapper-0.0.1/README.md
|
# pythreading-wrapper
> pip install pythreading-wrapper
usage
```commandline
from concurrency import FunctionArgs, MultiThreading
# a long running process that returns results based on
# a parameter passed
def long_running_process(param1: int):
time.sleep(1)
return param1
# imagine a large data set that needs to be chunked
data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
def chunk(start: int, end: int):
return data[start:end]
if __name__ == "__main__":
mp = MultiThreading()
# run the process 5 times in parallel
# the resturn types will be captured inside a dictionary
#--------------------------------------------------------
# use case 1 : getting data for different parameters passed
args = [FunctionArgs(long_running_process, output_var=item, param1=item) for item in range(5)]
op: dict = threading.execute_async(*args)
assert len(op.items()) == 5 # will be True
assert op[1] == 1 # will be 1
assert op[2] == 2 # will be 2
assert op[3] == 3 # will be 3
assert op[4] == 4 # will be 4
#--------------------------------------------------------
# use case 2 : getting data by chunks from a large dataset
# run the process 5 times in parallel
# the resturn types will be captured inside a dictionary
args = [FunctionArgs(chunk, output_var='', start=item, end=item+2) for item in range(0, 10, 2)]
op: list = threading.execute_by_chunks_async(*args)
print(op)
assert data[1] in op # op will contain 1
assert data[3] in op # op will contain 3
assert data[5] in op # op will contain 5
assert data[7] in op # op will contain 7
```
|
PypiClean
|
/textual_mandelbrot-0.5.0-py3-none-any.whl/textual_mandelbrot/mandelbrot.py
|
##############################################################################
# Python imports.
from __future__ import annotations
from decimal import Decimal
from operator import mul, truediv
from time import monotonic
from typing import Iterator, Callable
from typing_extensions import Self
##############################################################################
# Textual imports.
from textual.binding import Binding
from textual.color import Color
from textual.message import Message
##############################################################################
# Textual-canvas imports.
from textual_canvas import Canvas
##############################################################################
# Local imports.
from .colouring import default_map
##############################################################################
def _mandelbrot( x: Decimal, y: Decimal, multibrot: float, max_iteration: int ) -> int:
"""Return the Mandelbrot calculation for the point.
Args:
x: The x location of the point to calculate.
y: The y location of the point to calculate.
multibrot: The 'multibrot' value to use in the calculation.
max_iteration: The maximum number of iterations to calculate for.
Returns:
The number of loops to escape, or 0 if it didn't.
Note:
The point is considered to be stable, considered to have not
escaped, if the `max_iteration` has been hit without the calculation
going above 2.0.
"""
c1 = complex( x, y )
c2 = 0j
for n in range( max_iteration ):
if abs( c2 ) > 2:
return n
c2 = c1 + ( c2 ** multibrot )
return 0
##############################################################################
class Mandelbrot( Canvas ):
"""A Mandelbrot-plotting widget."""
DEFAULT_CSS = """
Mandelbrot {
width: 1fr;
height: 1fr;
}
"""
BINDINGS = [
Binding(
"up, w, k", "move( 0, -1 )", "Up", show=False
),
Binding(
"shift+up, W, K", "move( 0, -1, 50 )", "Up", show=False
),
Binding(
"down, s, j", "move( 0, 1 )", "Down", show=False
),
Binding(
"shift+down, S, J", "move( 0, 1, 50 )", "Down", show=False
),
Binding(
"left, a, h", "move( -1, 0 )", "Left", show=False
),
Binding(
"shift+left, A, H", "move( -1, 0, 50 )", "Left", show=False
),
Binding(
"right, d, l", "move( 1, 0 )", "Right", show=False
),
Binding(
"shift+right, D, L", "move( 1, 0, 50 )", "Right", show=False
),
Binding(
"pageup, right_square_bracket",
"zoom( -1.2 )", "In", key_display="PgUp"
),
Binding(
"pagedown, left_square_bracket",
"zoom( 1.2 )", "Out", key_display="PgDn"
),
Binding(
"ctrl+pageup, right_curly_bracket",
"zoom( -2.0 )", "In+", key_display="Ctrl+PgUp"
),
Binding(
"ctrl+pagedown, left_curly_bracket",
"zoom( 2.0 )", "Out+", key_display="Ctrl+PgDn"
),
Binding( "*, ctrl+up", "multibrot( 1 )", "Mul+" ),
Binding( "/, ctrl+down", "multibrot( -1 )", "Mul-" ),
Binding(
"ctrl+shift+up", "multibrot( 0.05 )", "Mul+", show=False
),
Binding(
"ctrl+shift+down", "multibrot( -0.05 )", "Mul-", show=False
),
Binding( "home", "zero", "0, 0", key_display="Home" ),
Binding(
"comma", "max_iter( -10 )","Res-"
),
Binding(
"less_than_sign", "max_iter( -100 )", "Res--"
),
Binding(
"full_stop", "max_iter( 10 )", "Res+"
),
Binding(
"greater_than_sign", "max_iter( 100 )", "Res++"
),
Binding(
"ctrl+r", "reset", "Reset"
),
Binding(
"escape", "app.quit", "Exit"
)
]
"""Keyboard bindings for the widget."""
class Changed( Message ):
"""Message sent when the range of the display changes.
This will be sent if the user (un)zooms or moves the display.
"""
def __init__( self, mandelbrot: Mandelbrot, elapsed: float ) -> None:
"""Initialise the message.
Args:
mandelbrot: The Mandelbrot causing the message.
elapsed: The time elapsed while calculating the plot.
"""
super().__init__()
self.mandelbrot: Mandelbrot = mandelbrot
"""The Mandelbrot widget that caused the event."""
self.elapsed = elapsed
"""The time that elapsed during the drawing of the current view."""
@property
def control(self) -> Mandelbrot:
"""Alias for the reference to the Mandelbrot widget."""
return self.mandelbrot
def __init__(
self,
width: int,
height: int,
colour_source: Callable[ [ int, int ], Color ] = default_map,
name: str | None = None,
id: str | None = None, # pylint:disable=redefined-builtin
classes: str | None = None,
disabled: bool = False
):
"""Initialise the canvas.
Args:
width: The width of the Mandelbrot set canvas.
height: The height of the Mandelbrot set canvas.
colour_source: Optional function for providing colours.
name: The name of the Mandelbrot widget.
id: The ID of the Mandelbrot widget in the DOM.
classes: The CSS classes of the Mandelbrot widget.
disabled: Whether the Mandelbrot widget is disabled or not.
"""
super().__init__( width, height, name=name, id=id, classes=classes, disabled=disabled )
self._max_iteration: int = 80
"""Maximum number of iterations to perform."""
self._multibrot: Decimal = Decimal( 2.0 )
"""The 'multibrot' value."""
self._from_x: Decimal = Decimal( -2.5 )
"""Start X position for the plot."""
self._to_x: Decimal = Decimal( 1.5 )
"""End X position for the plot."""
self._from_y: Decimal = Decimal( -1.5 )
"""Start Y position for the plot."""
self._to_y: Decimal = Decimal( 1.5 )
"""End Y position for the plot."""
self._colour_source = colour_source
"""Source of colour for the plot."""
@property
def max_iteration( self ) -> int:
"""Maximum number of iterations to perform."""
return self._max_iteration
@property
def multibrot( self ) -> Decimal:
"""The 'multibrot' value."""
return self._multibrot
@property
def from_x( self ) -> Decimal:
"""Start X position for the plot."""
return self._from_x
@property
def to_x( self ) -> Decimal:
"""End X position for the plot."""
return self._to_x
@property
def from_y( self ) -> Decimal:
"""Start Y position for the plot."""
return self._from_y
@property
def to_y( self ) -> Decimal:
"""End Y position for the plot."""
return self._to_y
def reset( self ) -> Self:
"""Reset the plot.
Returns:
Self.
"""
self._max_iteration = 80
self._multibrot = Decimal( 2 )
self._from_x = Decimal( -2.5 )
self._to_x = Decimal( 1.5 )
self._from_y = Decimal( -1.5 )
self._to_y = Decimal( 1.5 )
return self
def set_colour_source( self, colour_source: Callable[ [ int, int ], Color ] ) -> Self:
"""Set a new colour source.
Args:
colour_source: The new colour source.
Returns:
Self.
"""
self._colour_source = colour_source
return self.plot()
def _frange( self, r_from: Decimal, r_to: Decimal, size: int ) -> Iterator[ Decimal ]:
"""Generate a float range for the plot.
Args:
r_from: The value to generate from.
r_to: The value to generate to.
size: The size of canvas in the desired direction.
Yields:
Values between the range to fit the plot.
"""
steps = 0
step = Decimal( r_to - r_from ) / Decimal( size )
n = Decimal( r_from )
while n < r_to and steps < size:
yield n
n += step
steps += 1
def plot( self ) -> Self:
"""Plot the Mandelbrot set using the current conditions.
Returns:
Self.
"""
start = monotonic()
with self.app.batch_update():
for x_pixel, x_point in enumerate( self._frange( self._from_x, self._to_x, self.width ) ):
for y_pixel, y_point in enumerate( self._frange( self._from_y, self._to_y, self.height ) ):
self.set_pixel(
x_pixel, y_pixel,
self._colour_source(
_mandelbrot( x_point, y_point, float( self._multibrot ), self._max_iteration ),
self._max_iteration
)
)
self.post_message( self.Changed( self, monotonic() - start ) )
return self
def on_mount( self ) -> None:
"""Get the plotter going once the DOM is ready."""
self.plot()
def action_move( self, x: int, y: int, steps: int=5 ) -> None:
"""Move the Mandelbrot Set within the view.
Args:
x: The amount and direction to move in X.
y: The amount and direction to move in Y.
"""
x_step = Decimal( x * ( ( self._to_x - self._from_x ) / steps ) )
y_step = Decimal( y * ( ( self._to_y - self._from_y ) / steps ) )
self._from_x += x_step
self._to_x += x_step
self._from_y += y_step
self._to_y += y_step
self.plot()
def action_zero( self ) -> None:
"""Move the view to 0, 0."""
width = ( self._to_x - self._from_x ) / Decimal( 2 )
height = ( self._to_y - self._from_y ) / Decimal( 2 )
self._from_x = -width
self._to_x = width
self._from_y = -height
self._to_y = height
self.plot()
@staticmethod
def _scale( from_pos: Decimal, to_pos: Decimal, zoom: Decimal ) -> tuple[ Decimal, Decimal ]:
"""Scale a dimension.
Args:
from_pos: The start position of the dimension.
to_pos: The end position of the dimension.
Returns:
The new start and end positions.
"""
# Figure the operator from the sign.
by = truediv if zoom < 0 else mul
# We don't need the sign anymore.
zoom = Decimal( abs( zoom ) )
# Calculate the old and new dimensions.
old_dim = to_pos - from_pos
new_dim = Decimal( by( old_dim, zoom ) )
# Return the adjusted points.
return (
from_pos + Decimal( ( old_dim - new_dim ) / 2 ),
to_pos - Decimal( ( old_dim - new_dim ) / 2 )
)
def action_zoom( self, zoom: Decimal ) -> None:
"""Zoom in our out.
Args:
zoom: The amount to zoom by.
"""
self._from_x, self._to_x = self._scale( self._from_x, self._to_x, zoom )
self._from_y, self._to_y = self._scale( self._from_y, self._to_y, zoom )
self.plot()
def action_max_iter( self, change: int ) -> None:
"""Change the maximum number of iterations for a calculation.
Args:
change: The amount to change by.
"""
# Keep a lower bound for the max iteration.
if ( self._max_iteration + change ) >= 10:
self._max_iteration += change
self.plot()
else:
self.app.bell()
def action_multibrot( self, change: Decimal ) -> None:
"""Change the 'multibrot' modifier.
Args:
change: The amount to change by.
"""
if ( self._multibrot + Decimal( change ) ) > 0:
self._multibrot += Decimal( change )
self.plot()
else:
self.app.bell()
def action_reset( self ) -> None:
"""Reset the display of the Mandelbrot set back to initial conditions."""
self.reset().plot()
### mandelbrot.py ends here
|
PypiClean
|
/solentware_base-5.0-py3-none-any.whl/solentware_base/core/wherevalues.py
|
import re
DOUBLE_QUOTE_STRING = r'".*?"'
SINGLE_QUOTE_STRING = r"'.*?'"
IN = r"in"
TO = r"to"
NOT = r"not"
LIKE = r"like"
FROM = r"from"
ABOVE = r"above"
BELOW = r"below"
STRING = r"[^\s]+"
LEADING_SPACE = r"(?<=\s)"
TRAILING_SPACE = r"(?=\s)"
WHEREVALUES_RE = re.compile(
r"|".join(
(
DOUBLE_QUOTE_STRING,
SINGLE_QUOTE_STRING,
NOT.join((LEADING_SPACE, TRAILING_SPACE)),
LIKE.join((LEADING_SPACE, TRAILING_SPACE)),
FROM.join((LEADING_SPACE, TRAILING_SPACE)),
ABOVE.join((LEADING_SPACE, TRAILING_SPACE)),
BELOW.join((LEADING_SPACE, TRAILING_SPACE)),
TO.join((LEADING_SPACE, TRAILING_SPACE)),
IN.join((LEADING_SPACE, TRAILING_SPACE)),
STRING,
)
).join((r"(", r")")),
flags=re.IGNORECASE | re.DOTALL,
)
KEYWORDS = frozenset(
(
TO,
IN,
NOT,
LIKE,
FROM,
ABOVE,
BELOW,
)
)
class WhereValuesError(Exception):
"""Exception for WhereValues class."""
class WhereValues:
"""Find index values matching the query in statement."""
def __init__(self, statement):
"""Create WhereValues instance for statement."""
self.statement = statement
self.tokens = None
self.node = None
self._error_token_offset = None
self._not = False
self._processors = None
def lex(self):
"""Split instance's statement into tokens."""
tokens = []
strings = []
for word in WHEREVALUES_RE.split(self.statement):
if word.lower() in KEYWORDS:
if strings:
tokens.append(" ".join([_trim(s) for s in strings if s]))
strings.clear()
tokens.append(word.lower())
elif word.strip():
strings.append(word.strip())
if strings:
tokens.append(" ".join([_trim(s) for s in strings if s]))
strings.clear()
self.tokens = tokens
def parse(self):
"""Parse instance's tokens to create node structure to do query.
The structure is simple, consisting of a single node, a ValuesClause
object.
"""
self.node = ValuesClause()
state = self._set_fieldname
for item, token in enumerate(self.tokens):
state = state(token)
if not state:
self._error_token_offset = item
break
else:
self.node.valid_phrase = True
def validate(self, db, dbset):
"""Verify self's statement has a valid search for db and dbset.
db - the database.
dbset - the table in the database.
The field must exist in table dbset of database db.
One only of above_value and from_value can be siven.
One only of below_value and to_value can be siven.
"""
if self._error_token_offset is not None:
return self.tokens[: self._error_token_offset]
if self.node is None:
return None
node = self.node
# Valid values are None or a compiled regular expression.
# The attribute is bound to the string which failed to compile if the
# compilation failed.
if isinstance(node.like_pattern, str):
return False
if not node.valid_phrase:
return node.valid_phrase
if node.field is None:
return False
if not db.exists(dbset, node.field):
return False
if node.above_value is not None and node.from_value is not None:
return False
if node.below_value is not None and node.to_value is not None:
return False
return True
def evaluate(self, processors):
"""Evaluate the query using the processor.
processors - A FindValues object.
The processor will know how to access the field in the statement.
The answer to the query defined in instance's statement is put in the
self.node.result attribute.
"""
if self.node is None:
return
self._processors = processors
try:
self.node.evaluate_node_result(processors)
finally:
self._processors = None
def error(self, token):
"""Return False, token is an unexpected keyword or value."""
return False
def _set_fieldname(self, token):
"""Set field name and return method to process next token."""
if token.lower() in KEYWORDS:
return self.error(token)
self.node.field = token
return self._set_not_from_to_like_in_
def _set_from_value(self, token):
"""Set from value and return method to process next token."""
if token.lower() in KEYWORDS:
return self.error(token)
self.node.from_value = token
return self._set_not_to_like_in_
def _set_above_value(self, token):
"""Set above value and return method to process next token."""
if token.lower() in KEYWORDS:
return self.error(token)
self.node.above_value = token
return self._set_not_to_like_in_
def _set_to_value(self, token):
"""Set to value and return method to process next token."""
if token.lower() in KEYWORDS:
return self.error(token)
self.node.to_value = token
return self._set_not_like_in_
def _set_below_value(self, token):
"""Set to value and return method to process next token."""
if token.lower() in KEYWORDS:
return self.error(token)
self.node.below_value = token
return self._set_not_like_in_
def _set_like_value(self, token):
"""Set like value and return method to process next token."""
# If 'token' really must be one of the keywords the construct
# "fieldname from 'token' to 'token'" must be used to achieve the
# same result.
# Normally "fieldname like \At\Z" will do.
if token.lower() in KEYWORDS:
return self.error(token)
try:
self.node.like_pattern = re.compile(token)
except:
self.node.like_pattern = token
if self._not:
self.node.like = False
self._not = False
return self._set_not_in_
def _set_in__value(self, token):
"""Set 'in set' value and return method to process next token."""
if token.lower() in KEYWORDS:
return self.error(token)
self.node.in__set = token
if self._not:
self.node.in_ = False
self._not = False
return self._finish
def _set_not_from_to_like_in_(self, token):
"""Set not or condition and return method to process next token.
'from', 'above', 'to', 'below', 'like', and 'in', are accepted
conditions.
"""
if token.lower() == NOT:
self._not = True
return self._set_like_in_
if token.lower() == FROM:
return self._set_from_value
if token.lower() == ABOVE:
return self._set_above_value
if token.lower() == TO:
return self._set_to_value
if token.lower() == BELOW:
return self._set_below_value
if token.lower() == LIKE:
return self._set_like_value
if token.lower() == IN:
return self._set_in__value
return self.error(token)
def _set_not_to_like_in_(self, token):
"""Set not or condition and return method to process next token.
'to', 'below', 'like', and 'in', are accepted conditions.
"""
if token.lower() == NOT:
self._not = True
return self._set_like_in_
if token.lower() == TO:
return self._set_to_value
if token.lower() == BELOW:
return self._set_below_value
if token.lower() == LIKE:
return self._set_like_value
if token.lower() == IN:
return self._set_in__value
return self.error(token)
def _set_not_like_in_(self, token):
"""Set not or condition and return method to process next token.
'like' and 'in' are accepted conditions.
"""
if token.lower() == NOT:
self._not = True
return self._set_like_in_
if token.lower() == LIKE:
return self._set_like_value
if token.lower() == IN:
return self._set_in__value
return self.error(token)
def _set_not_in_(self, token):
"""Set not or condition and return method to process next token.
'in' is accepted condition.
"""
if token.lower() == NOT:
self._not = True
return self._set_in_
if token.lower() == IN:
return self._set_in__value
return self.error(token)
def _set_like_in_(self, token):
"""Set condition and return method to process next token.
'like' and 'in' are accepted conditions.
"""
if token.lower() == LIKE:
return self._set_like_value
if token.lower() == IN:
return self._set_in__value
return self.error(token)
def _set_in_(self, token):
"""Set condition and return method to process next token.
'in' is accepted condition.
"""
if token.lower() == IN:
return self._set_in__value
return self.error(token)
def _finish(self, token):
"""Set error if any token found after final valid token."""
return self.error(token)
class ValuesClause:
"""Phrase in WhereValues specification.
The WhereValues parser binds ValuesClause attributes to the field name,
condition, and values, found in a phrase of a 'find values' statement;
and states whether the attributes describe a valid phrase.
The attributes are:
valid_phrase - True if the phrase can be evaluated.
field - Name of field on database whose value is compared.
above_value - field value matches if greater than above_value.
below_value - field value matches if less than below_value.
from_value - field value matches if greater than or equal from_value.
to_value - field value matches if less than or equal to_value.
like - True if field value matches if it matches like_pattern.
like_pattern - Regular expression to evaluate 'like'.
in_ - True if field value matches if it is in the in__set set of values.
in__set - Iterable of values to evaluate 'in'.
result - List of values found when node is evaluated.
The syntax of the value selection statement leads to these possibilities:
Range is defined by one of the valuesclause attribute sets:
above_value and below_value are not None
above_value and to_value are not None
from_value and to_value are not None
from_value and below_value are not None
above_value is not None
to_value is not None
from_value is not None
below_value is not None
above_value, to_value, from_value, and below_value, are None,
Filters are defined by one of the valuesclause attribute sets:
like is False and like_pattern is None
like is True and like_pattern is not None
in_ is False and in__set is None
in_ is True and in__set is an iterable
Any pairing of the 'like' and 'in_' attribute sets above.
A range and a filter may appear in the same phrase.
"""
def __init__(self):
"""Initialiase a node.
valid_phrase is set False, like and in_ are set True, and the rest are
set None.
"""
self.valid_phrase = False
# Phrase
self.field = None
self.above_value = None
self.below_value = None
self.from_value = None
self.to_value = None
self.like = True
self.like_pattern = None
self.in_ = True
self.in__set = None
# Evaluation
self.result = None
def evaluate_node_result(self, processors):
"""Evaluate self's phrase with the processors FindValues object.
Call processor's find_values() method to evaluate node's phrase and
bind node's result attribute to the answer.
"""
if self.valid_phrase:
processors.find_values(self)
def apply_pattern_and_set_filters_to_value(self, value):
"""Apply 'like' and 'value set' constraints to value.
This method is intended for use as a callback by a FindValues object.
The underlying database engine may, or may not, have internal methods
able to do either or both these functions.
This method assumes the use of Python regular expressions to do 'like'
constraints and Python set operations to do 'value set' constraints.
"""
if self.like_pattern:
if not self.like_pattern.search(value):
if self.like:
return False
elif not self.like:
return False
if self.in__set is not None:
if self.in_:
return value in self.in__set
return value not in self.in__set
return True
def _trim(string):
"""Return string with one leading and trailing ' or " removed.
The two quote characters allow values containing spaces.
"""
if string[0] in "'\"":
return string[1:-1]
return string
|
PypiClean
|
/apache-flink-1.17.1.tar.gz/apache-flink-1.17.1/pyflink/fn_execution/beam/beam_boot.py
|
import argparse
import logging
import os
import sys
import grpc
from apache_beam.portability.api.org.apache.beam.model.fn_execution.v1.beam_fn_api_pb2 import \
StartWorkerRequest
from apache_beam.portability.api.org.apache.beam.model.fn_execution.v1.beam_fn_api_pb2_grpc import (
BeamFnExternalWorkerPoolStub)
from apache_beam.portability.api.org.apache.beam.model.fn_execution.v1.beam_provision_api_pb2 \
import GetProvisionInfoRequest
from apache_beam.portability.api.org.apache.beam.model.fn_execution.v1.beam_provision_api_pb2_grpc \
import ProvisionServiceStub
from apache_beam.portability.api.org.apache.beam.model.pipeline.v1.endpoints_pb2 import (
ApiServiceDescriptor)
from google.protobuf import json_format, text_format
def check_not_empty(check_str, error_message):
if check_str == "":
logging.fatal(error_message)
exit(1)
python_exec = sys.executable
if __name__ == "__main__":
# print INFO and higher level messages
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument("--id", default="", help="Local identifier (required).")
parser.add_argument("--provision_endpoint", default="",
help="Provision endpoint (required).")
parser.add_argument("--semi_persist_dir", default="/tmp",
help="Local semi-persistent directory (optional).")
args = parser.parse_known_args()[0]
worker_id = args.id
provision_endpoint = args.provision_endpoint
semi_persist_dir = args.semi_persist_dir
check_not_empty(worker_id, "No id provided.")
check_not_empty(provision_endpoint, "No provision endpoint provided.")
logging.info("Initializing Python harness: %s" % " ".join(sys.argv))
if 'PYTHON_LOOPBACK_SERVER_ADDRESS' in os.environ:
logging.info("Starting up Python harness in loopback mode.")
params = dict(os.environ)
params.update({'SEMI_PERSISTENT_DIRECTORY': semi_persist_dir})
with grpc.insecure_channel(os.environ['PYTHON_LOOPBACK_SERVER_ADDRESS']) as channel:
client = BeamFnExternalWorkerPoolStub(channel=channel)
request = StartWorkerRequest(
worker_id=worker_id,
provision_endpoint=ApiServiceDescriptor(url=provision_endpoint),
params=params)
response = client.StartWorker(request)
if response.error:
raise RuntimeError("Error starting worker: %s" % response.error)
else:
logging.info("Starting up Python harness in a standalone process.")
metadata = [("worker_id", worker_id)]
# read job information from provision stub
with grpc.insecure_channel(provision_endpoint) as channel:
client = ProvisionServiceStub(channel=channel)
info = client.GetProvisionInfo(GetProvisionInfoRequest(), metadata=metadata).info
options = json_format.MessageToJson(info.pipeline_options)
logging_endpoint = info.logging_endpoint.url
control_endpoint = info.control_endpoint.url
os.environ["WORKER_ID"] = worker_id
os.environ["PIPELINE_OPTIONS"] = options
os.environ["SEMI_PERSISTENT_DIRECTORY"] = semi_persist_dir
os.environ["LOGGING_API_SERVICE_DESCRIPTOR"] = text_format.MessageToString(
ApiServiceDescriptor(url=logging_endpoint))
os.environ["CONTROL_API_SERVICE_DESCRIPTOR"] = text_format.MessageToString(
ApiServiceDescriptor(url=control_endpoint))
env = dict(os.environ)
if "FLINK_BOOT_TESTING" in os.environ and os.environ["FLINK_BOOT_TESTING"] == "1":
logging.info("Shut down Python harness due to FLINK_BOOT_TESTING is set.")
exit(0)
from pyflink.fn_execution.beam import beam_sdk_worker_main
beam_sdk_worker_main.main()
|
PypiClean
|
/Pyomo-6.6.2-cp39-cp39-win_amd64.whl/pyomo/common/deprecation.py
|
import logging
import functools
import inspect
import itertools
import sys
import textwrap
import types
from pyomo.common.errors import DeveloperError
_doc_flag = '.. deprecated::'
def default_deprecation_msg(obj, user_msg, version, remove_in):
"""Generate the default deprecation message.
See deprecated() function for argument details.
"""
if user_msg is None:
if inspect.isclass(obj):
_obj = ' class'
elif inspect.ismethod(obj):
_obj = ' method'
elif inspect.isfunction(obj) or inspect.isbuiltin(obj):
_obj = ' function'
else:
# either @deprecated() an unknown type or called from
# deprecation_warning()
_obj = ''
_qual = getattr(obj, '__qualname__', '') or ''
if _qual.endswith('.__init__') or _qual.endswith('.__new__'):
_obj = f' class ({_qual.rsplit(".", 1)[0]})'
elif _qual and _obj:
_obj += f' ({_qual})'
user_msg = (
'This%s has been deprecated and may be removed in a '
'future release.' % (_obj,)
)
comment = []
if version:
comment.append('deprecated in %s' % (version,))
if remove_in:
comment.append('will be removed in (or after) %s' % (remove_in))
if comment:
return user_msg + " (%s)" % (', '.join(comment),)
else:
return user_msg
def _deprecation_docstring(obj, msg, version, remove_in):
# Note that _deprecation_docstring is guaranteed to be called by
# @deprecated in all situations where we would be creating a
# meaningful deprecation message (classes, functions, and methods),
# so this is a convenient place to check that the version is
# specified.
if version is None:
raise DeveloperError("@deprecated(): missing 'version' argument")
return (
f'{_doc_flag} {version}\n'
f' {default_deprecation_msg(obj, msg, None, remove_in)}\n'
)
def _wrap_class(cls, msg, logger, version, remove_in):
_doc = None
# Note: __new_member__ is where enum.Enum buries the user's original
# __new__ method
for field in ('__new__', '__init__', '__new_member__'):
_funcDoc = getattr(getattr(cls, field, None), '__doc__', '') or ''
_flagIdx = _funcDoc.find(_doc_flag)
if _flagIdx >= 0:
_doc = _funcDoc[_flagIdx:]
break
# Note: test 'msg is not None' to revert back to the user-supplied
# message. Checking the fields above is still useful as it lets us know
# if there is already a deprecation message on either new or init.
if msg is not None or _doc is None:
_doc = _deprecation_docstring(cls, msg, version, remove_in)
if cls.__doc__:
_doc = cls.__doc__ + '\n\n' + _doc
cls.__doc__ = 'DEPRECATED.\n\n' + _doc
if _flagIdx < 0:
# No deprecation message on __init__ or __new__: go through and
# find the "most derived" implementation of either __new__ or
# __init__ and wrap that (breaking ties in favor of __init__)
field = '__init__'
for c in reversed(cls.__mro__):
for f in ('__new__', '__init__'):
if getattr(c, f, None) is not getattr(cls, f, None):
field = f
setattr(
cls, field, _wrap_func(getattr(cls, field), msg, logger, version, remove_in)
)
return cls
def _wrap_func(func, msg, logger, version, remove_in):
message = default_deprecation_msg(func, msg, version, remove_in)
@functools.wraps(
func, assigned=('__module__', '__name__', '__qualname__', '__annotations__')
)
def wrapper(*args, **kwargs):
cf = _find_calling_frame(1)
deprecation_warning(message, logger, version='', calling_frame=cf)
return func(*args, **kwargs)
wrapper.__doc__ = 'DEPRECATED.\n\n'
_doc = func.__doc__ or ''
if _doc:
wrapper.__doc__ += _doc + '\n\n'
wrapper.__doc__ += _deprecation_docstring(func, msg, version, remove_in)
return wrapper
def _find_calling_frame(module_offset):
g = [globals()]
calling_frame = inspect.currentframe().f_back
while calling_frame is not None:
if calling_frame.f_globals is g[-1]:
calling_frame = calling_frame.f_back
elif len(g) < module_offset:
g.append(calling_frame.f_globals)
else:
break
return calling_frame
def in_testing_environment():
"""Return True if we are currently running in a "testing" environment
This currently includes if nose, nose2, pytest, or Sphinx are
running (imported).
"""
return any(mod in sys.modules for mod in ('nose', 'nose2', 'pytest', 'sphinx'))
def deprecation_warning(
msg, logger=None, version=None, remove_in=None, calling_frame=None
):
"""Standardized formatter for deprecation warnings
This is a standardized routine for formatting deprecation warnings
so that things look consistent and "nice".
Args:
msg (str): the deprecation message to format
logger (str): the logger to use for emitting the warning
(default: the calling pyomo package, or "pyomo")
version (str): [required] the version in which the decorated
object was deprecated. General practice is to set version
to the current development version (from `pyomo --version`)
during development and update it to the actual release as
part of the release process.
remove_in (str): the version in which the decorated object will be
removed from the code.
calling_frame (frame): the original frame context that triggered
the deprecation warning.
Example
-------
>>> from pyomo.common.deprecation import deprecation_warning
>>> deprecation_warning('This functionality is deprecated.', version='1.2.3')
WARNING: DEPRECATED: This functionality is deprecated. (deprecated in 1.2.3) ...
"""
if version is None:
raise DeveloperError("deprecation_warning() missing 'version' argument")
if logger is None:
if calling_frame is not None:
cf = calling_frame
else:
# The relevant module is the one that holds the
# function/method that called deprecation_warning
cf = _find_calling_frame(1)
if cf is not None:
logger = cf.f_globals.get('__name__', None)
if logger is not None and not logger.startswith('pyomo'):
logger = None
if logger is None:
logger = 'pyomo'
if isinstance(logger, str):
logger = logging.getLogger(logger)
msg = textwrap.fill(
f'DEPRECATED: {default_deprecation_msg(None, msg, version, remove_in)}',
width=70,
)
if calling_frame is None:
# The useful thing to let the user know is what called the
# function that generated the deprecation warning. The current
# globals() is *this* module. Walking up the stack to find the
# frame where the globals() changes tells us the module that is
# issuing the deprecation warning. As we assume that *that*
# module will not trigger its own deprecation warnings, we will
# walk farther up until the globals() changes again.
calling_frame = _find_calling_frame(2)
if calling_frame is not None:
info = inspect.getframeinfo(calling_frame)
msg += "\n(called from %s:%s)" % (info.filename.strip(), info.lineno)
if deprecation_warning.emitted_warnings is not None:
if msg in deprecation_warning.emitted_warnings:
return
deprecation_warning.emitted_warnings.add(msg)
logger.warning(msg)
if in_testing_environment():
deprecation_warning.emitted_warnings = None
else:
deprecation_warning.emitted_warnings = set()
def deprecated(msg=None, logger=None, version=None, remove_in=None):
"""Decorator to indicate that a function, method, or class is deprecated.
This decorator will cause a warning to be logged when the wrapped
function or method is called, or when the deprecated class is
constructed. This decorator also updates the target object's
docstring to indicate that it is deprecated.
Args:
msg (str): a custom deprecation message (default: "This
{function|class} has been deprecated and may be
removed in a future release.")
logger (str): the logger to use for emitting the warning
(default: the calling pyomo package, or "pyomo")
version (str): [required] the version in which the decorated
object was deprecated. General practice is to set version
to the current development version (from `pyomo --version`)
during development and update it to the actual release as
part of the release process.
remove_in (str): the version in which the decorated object will be
removed from the code.
Example
-------
>>> from pyomo.common.deprecation import deprecated
>>> @deprecated(version='1.2.3')
... def sample_function(x):
... return 2*x
>>> sample_function(5)
WARNING: DEPRECATED: This function (sample_function) has been deprecated and
may be removed in a future release. (deprecated in 1.2.3) ...
10
"""
def wrap(obj):
if inspect.isclass(obj):
return _wrap_class(obj, msg, logger, version, remove_in)
else:
return _wrap_func(obj, msg, logger, version, remove_in)
return wrap
def _import_object(name, target, version, remove_in, msg):
from importlib import import_module
modname, targetname = target.rsplit('.', 1)
_object = getattr(import_module(modname), targetname)
if msg is None:
if inspect.isclass(_object):
_type = 'class'
elif inspect.isfunction(_object):
_type = 'function'
else:
_type = 'attribute'
msg = (
f"the '{name}' {_type} has been moved to '{target}'."
" Please update your import."
)
deprecation_warning(msg, version=version, remove_in=remove_in)
return _object
def relocated_module(new_name, msg=None, logger=None, version=None, remove_in=None):
"""Provide a deprecation path for moved / renamed modules
Upon import, the old module (that called `relocated_module()`) will
be replaced in `sys.modules` by an alias that points directly to the
new module. As a result, the old module should have only two lines
of executable Python code (the import of `relocated_module` and the
call to it).
Parameters
----------
new_name: str
The new (fully-qualified) module name
msg: str
A custom deprecation message.
logger: str
The logger to use for emitting the warning (default: the calling
pyomo package, or "pyomo")
version: str [required]
The version in which the module was renamed or moved. General
practice is to set version to the current development version
(from `pyomo --version`) during development and update it to the
actual release as part of the release process.
remove_in: str
The version in which the module will be removed from the code.
Example
-------
>>> from pyomo.common.deprecation import relocated_module
>>> relocated_module('pyomo.common.deprecation', version='1.2.3')
WARNING: DEPRECATED: The '...' module has been moved to
'pyomo.common.deprecation'. Please update your import.
(deprecated in 1.2.3) ...
"""
from importlib import import_module
new_module = import_module(new_name)
# The relevant module (the one being deprecated) is the one that
# holds the function/method that called deprecated_module(). The
# relevant calling frame for the deprecation warning is the first
# frame in the stack that doesn't look like the importer (i.e., the
# thing that imported the deprecated module).
cf = _find_calling_frame(1)
old_name = cf.f_globals.get('__name__', '<stdin>')
cf = cf.f_back
if cf is not None:
importer = cf.f_back.f_globals['__name__'].split('.')[0]
while cf is not None and cf.f_globals['__name__'].split('.')[0] == importer:
cf = cf.f_back
if cf is None:
cf = _find_calling_frame(1)
sys.modules[old_name] = new_module
if msg is None:
msg = (
f"The '{old_name}' module has been moved to '{new_name}'. "
'Please update your import.'
)
deprecation_warning(msg, logger, version, remove_in, cf)
def relocated_module_attribute(
local, target, version, remove_in=None, msg=None, f_globals=None
):
"""Provide a deprecation path for moved / renamed module attributes
This function declares that a local module attribute has been moved
to another location. For Python 3.7+, it leverages a
module.__getattr__ method to manage the deferred import of the
object from the new location (on request), as well as emitting the
deprecation warning.
Parameters
----------
local: str
The original (local) name of the relocated attribute
target: str
The new absolute import name of the relocated attribute
version: str
The Pyomo version when this move was released
(passed to deprecation_warning)
remove_in: str
The Pyomo version when this deprecation path will be removed
(passed to deprecation_warning)
msg: str
If not None, then this specifies a custom deprecation message to
be emitted when the attribute is accessed from its original
location.
"""
if version is None:
raise DeveloperError("relocated_module_attribute(): missing 'version' argument")
# Historical note: This method only works for Python >= 3.7. There
# were backports to previous Python interpreters, but were removed
# after SHA 4e04819aaeefc2c08b7718460918885e12343451
if f_globals is None:
f_globals = inspect.currentframe().f_back.f_globals
if f_globals['__name__'].startswith('importlib.'):
raise DeveloperError(
"relocated_module_attribute() called from a cythonized "
"module without passing f_globals"
)
_relocated = f_globals.get('__relocated_attrs__', None)
if _relocated is None:
f_globals['__relocated_attrs__'] = _relocated = {}
_mod_getattr = f_globals.get('__getattr__', None)
def __getattr__(name):
info = _relocated.get(name, None)
if info is not None:
target_obj = _import_object(name, *info)
f_globals[name] = target_obj
return target_obj
elif _mod_getattr is not None:
return _mod_getattr(name)
raise AttributeError(
"module '%s' has no attribute '%s'" % (f_globals['__name__'], name)
)
f_globals['__getattr__'] = __getattr__
_relocated[local] = (target, version, remove_in, msg)
class RenamedClass(type):
"""Metaclass to provide a deprecation path for renamed classes
This metaclass provides a mechanism for renaming old classes while
still preserving isinstance / issubclass relationships.
Examples
--------
>>> from pyomo.common.deprecation import RenamedClass
>>> class NewClass(object):
... pass
>>> class OldClass(metaclass=RenamedClass):
... __renamed__new_class__ = NewClass
... __renamed__version__ = '6.0'
Deriving from the old class generates a warning:
>>> class DerivedOldClass(OldClass):
... pass
WARNING: DEPRECATED: Declaring class 'DerivedOldClass' derived from
'OldClass'. The class 'OldClass' has been renamed to 'NewClass'.
(deprecated in 6.0) ...
As does instantiating the old class:
>>> old = OldClass()
WARNING: DEPRECATED: Instantiating class 'OldClass'. The class
'OldClass' has been renamed to 'NewClass'. (deprecated in 6.0) ...
Finally, `isinstance` and `issubclass` still work, for example:
>>> isinstance(old, NewClass)
True
>>> class NewSubclass(NewClass):
... pass
>>> new = NewSubclass()
>>> isinstance(new, OldClass)
WARNING: DEPRECATED: Checking type relative to 'OldClass'. The class
'OldClass' has been renamed to 'NewClass'. (deprecated in 6.0) ...
True
"""
def __new__(cls, name, bases, classdict, *args, **kwargs):
new_class = classdict.get('__renamed__new_class__', None)
if new_class is not None:
def __renamed__new__(cls, *args, **kwargs):
cls.__renamed__warning__("Instantiating class '%s'." % (cls.__name__,))
return new_class(*args, **kwargs)
classdict['__new__'] = __renamed__new__
def __renamed__warning__(msg):
version = classdict.get('__renamed__version__')
remove_in = classdict.get('__renamed__remove_in__')
deprecation_warning(
"%s The class '%s' has been renamed to '%s'."
% (msg, name, new_class.__name__),
version=version,
remove_in=remove_in,
calling_frame=_find_calling_frame(1),
)
classdict['__renamed__warning__'] = __renamed__warning__
if not classdict.get('__renamed__version__'):
raise DeveloperError(
"Declaring class '%s' using the RenamedClass metaclass, "
"but without specifying the __renamed__version__ class "
"attribute" % (name,)
)
renamed_bases = []
for base in bases:
new_class = getattr(base, '__renamed__new_class__', None)
if new_class is not None:
base.__renamed__warning__(
"Declaring class '%s' derived from '%s'." % (name, base.__name__)
)
base = new_class
# Flag that this class is derived from a renamed class
classdict.setdefault('__renamed__new_class__', None)
# Avoid duplicates (in case someone does a diamond between
# the renamed class and [a class derived from] the new
# class)
if base not in renamed_bases:
renamed_bases.append(base)
# Add the new class as a "base class" of the renamed class (this
# makes issubclass(renamed, new_class) work correctly). As we
# still never create an actual instance of renamed, this doesn't
# affect the API)
if new_class is not None and new_class not in renamed_bases:
renamed_bases.append(new_class)
if new_class is None and '__renamed__new_class__' not in classdict:
if not any(
hasattr(base, '__renamed__new_class__')
for mro in itertools.chain.from_iterable(
base.__mro__ for base in renamed_bases
)
):
raise TypeError(
"Declaring class '%s' using the RenamedClass metaclass, "
"but without specifying the __renamed__new_class__ class "
"attribute" % (name,)
)
return super().__new__(
cls, name, tuple(renamed_bases), classdict, *args, **kwargs
)
def __instancecheck__(cls, instance):
# Note: the warning is issued by subclasscheck
return any(
cls.__subclasscheck__(c) for c in {type(instance), instance.__class__}
)
def __subclasscheck__(cls, subclass):
if hasattr(cls, '__renamed__warning__'):
cls.__renamed__warning__(
"Checking type relative to '%s'." % (cls.__name__,)
)
if subclass is cls:
return True
elif getattr(cls, '__renamed__new_class__') is not None:
return issubclass(subclass, getattr(cls, '__renamed__new_class__'))
else:
return super().__subclasscheck__(subclass)
|
PypiClean
|
/MESSI_RCCLab-0.0.12.tar.gz/MESSI_RCCLab-0.0.12/README.md
|
# MESSI
## Multi Ensamble Strategy for Structural Elucidation
Authors: Ariel M. Sarotti & María M. Zanardi
Usage: `MESSI_RCCLab: Multi Ensamble Strategy for Structural Elucidation`
### Installation Requirements
**MESSI.py** needs python 3.8 or later to work. You can install the module from the command line console using:
`pip3 install MESSI`
### User Guide
You need to create a folder containing the following files:
1. The gaussian outputs of the NMR and NBO calculations (all conformers for all isomers).
2. The excel file containing the experimental data and the labels of each nucleus associated with each experimental value.
See the [project repository](https://github.com/Sarotti-Lab/ML_J_DP4) for more details.
We still have to make GitHub repository
|
PypiClean
|
/sIBL_GUI-4.0.8.tar.gz/sIBL_GUI-4.0.8/sibl_gui/components/addons/loaderScriptOptions/loaderScriptOptions.py
|
#**********************************************************************************************************************
#*** Future imports.
#**********************************************************************************************************************
from __future__ import unicode_literals
#**********************************************************************************************************************
#*** External imports.
#**********************************************************************************************************************
import os
import re
import sys
if sys.version_info[:2] <= (2, 6):
from ordereddict import OrderedDict
else:
from collections import OrderedDict
from PyQt4.QtCore import QString
from PyQt4.QtCore import Qt
from PyQt4.QtGui import QColor
from PyQt4.QtGui import QComboBox
from PyQt4.QtGui import QDoubleSpinBox
from PyQt4.QtGui import QLabel
from PyQt4.QtGui import QLineEdit
from PyQt4.QtGui import QPalette
#**********************************************************************************************************************
#*** Internal imports.
#**********************************************************************************************************************
import foundations.common
import foundations.exceptions
import foundations.io
import foundations.parsers
import foundations.strings
import foundations.verbose
import umbra.exceptions
import umbra.ui.common
from foundations.parsers import SectionsFileParser
from manager.qwidgetComponent import QWidgetComponentFactory
from sibl_gui.components.addons.loaderScriptOptions.views import TemplatesAttributes_QTableWidget
from umbra.globals.constants import Constants
from umbra.ui.widgets.variable_QPushButton import Variable_QPushButton
#**********************************************************************************************************************
#*** Module attributes.
#**********************************************************************************************************************
__author__ = "Thomas Mansencal"
__copyright__ = "Copyright (C) 2008 - 2014 - Thomas Mansencal"
__license__ = "GPL V3.0 - http://www.gnu.org/licenses/"
__maintainer__ = "Thomas Mansencal"
__email__ = "[email protected]"
__status__ = "Production"
__all__ = ["LOGGER", "COMPONENT_UI_FILE", "LoaderScriptOptions"]
LOGGER = foundations.verbose.installLogger()
COMPONENT_UI_FILE = os.path.join(os.path.dirname(__file__), "ui", "Loader_Script_Options.ui")
#**********************************************************************************************************************
#*** Module classes and definitions.
#**********************************************************************************************************************
class LoaderScriptOptions(QWidgetComponentFactory(uiFile=COMPONENT_UI_FILE)):
"""
| Definesthe :mod:`sibl_gui.components.addons.loaderScriptOptions.loaderScriptOptions` Component Interface class.
| It provides override keys on request for the :mod:`sibl_gui.components.addons.loaderScript.loaderScript` Component.
| It exposes Templates files **Common Attributes** and **Additional Attributes** sections so that
the user can configure the behavior of the Loader Script.
"""
def __init__(self, parent=None, name=None, *args, **kwargs):
"""
Initializes the class.
:param parent: Object parent.
:type parent: QObject
:param name: Component name.
:type name: unicode
:param \*args: Arguments.
:type \*args: \*
:param \*\*kwargs: Keywords arguments.
:type \*\*kwargs: \*\*
"""
LOGGER.debug("> Initializing '{0}()' class.".format(self.__class__.__name__))
super(LoaderScriptOptions, self).__init__(parent, name, *args, **kwargs)
# --- Setting class attributes. ---
self.deactivatable = True
self.__dockArea = 2
self.__engine = None
self.__templatesOutliner = None
self.__loaderScript = None
self.__views = None
self.__commonView = None
self.__additionalView = None
self.__namespaceSplitter = "|"
self.__templatesSettingsDirectory = "templates/"
self.__templateSettingsFile = None
self.__templateCommonAttributesSection = "Common Attributes"
self.__templateAdditionalAttributesSection = "Additional Attributes"
self.__templateScriptSection = "Script"
self.__optionsToolboxesHeaders = ["Value"]
self.__uiLightGrayColor = QColor(240, 240, 240)
self.__uiDarkGrayColor = QColor(160, 160, 160)
self.__enumSplitter = ";"
#******************************************************************************************************************
#*** Attributes properties.
#******************************************************************************************************************
@property
def dockArea(self):
"""
Property for **self.__dockArea** attribute.
:return: self.__dockArea.
:rtype: int
"""
return self.__dockArea
@dockArea.setter
@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
def dockArea(self, value):
"""
Setter for **self.__dockArea** attribute.
:param value: Attribute value.
:type value: int
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "dockArea"))
@dockArea.deleter
@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
def dockArea(self):
"""
Deleter for **self.__dockArea** attribute.
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "dockArea"))
@property
def engine(self):
"""
Property for **self.__engine** attribute.
:return: self.__engine.
:rtype: QObject
"""
return self.__engine
@engine.setter
@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
def engine(self, value):
"""
Setter for **self.__engine** attribute.
:param value: Attribute value.
:type value: QObject
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "engine"))
@engine.deleter
@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
def engine(self):
"""
Deleter for **self.__engine** attribute.
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "engine"))
@property
def templatesOutliner(self):
"""
Property for **self.__templatesOutliner** attribute.
:return: self.__templatesOutliner.
:rtype: QWidget
"""
return self.__templatesOutliner
@templatesOutliner.setter
@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
def templatesOutliner(self, value):
"""
Setter for **self.__templatesOutliner** attribute.
:param value: Attribute value.
:type value: QWidget
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "templatesOutliner"))
@templatesOutliner.deleter
@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
def templatesOutliner(self):
"""
Deleter for **self.__templatesOutliner** attribute.
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "templatesOutliner"))
@property
def loaderScript(self):
"""
Property for **self.__loaderScript** attribute.
:return: self.__loaderScript.
:rtype: QWidget
"""
return self.__loaderScript
@loaderScript.setter
@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
def loaderScript(self, value):
"""
Setter for **self.__loaderScript** attribute.
:param value: Attribute value.
:type value: QWidget
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "loaderScript"))
@loaderScript.deleter
@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
def loaderScript(self):
"""
Deleter for **self.__loaderScript** attribute.
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "loaderScript"))
@property
def views(self):
"""
Property for **self.__views** attribute.
:return: self.__views.
:rtype: tuple
"""
return self.__views
@views.setter
@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
def views(self, value):
"""
Setter for **self.__views** attribute.
:param value: Attribute value.
:type value: tuple
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "views"))
@views.deleter
@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
def views(self):
"""
Deleter for **self.__views** attribute.
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "views"))
@property
def commonView(self):
"""
Property for **self.__commonView** attribute.
:return: self.__commonView.
:rtype: QListView
"""
return self.__commonView
@commonView.setter
@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
def commonView(self, value):
"""
Setter for **self.__commonView** attribute.
:param value: Attribute value.
:type value: QListView
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "commonView"))
@commonView.deleter
@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
def commonView(self):
"""
Deleter for **self.__commonView** attribute.
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "view"))
@property
def additionalView(self):
"""
Property for **self.__additionalView** attribute.
:return: self.__additionalView.
:rtype: QListView
"""
return self.__additionalView
@additionalView.setter
@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
def additionalView(self, value):
"""
Setter for **self.__additionalView** attribute.
:param value: Attribute value.
:type value: QListView
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "additionalView"))
@additionalView.deleter
@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
def additionalView(self):
"""
Deleter for **self.__additionalView** attribute.
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "view"))
@property
def namespaceSplitter(self):
"""
Property for **self.__namespaceSplitter** attribute.
:return: self.__namespaceSplitter.
:rtype: unicode
"""
return self.__namespaceSplitter
@namespaceSplitter.setter
@foundations.exceptions.handleExceptions(AssertionError)
def namespaceSplitter(self, value):
"""
Setter for **self.__namespaceSplitter** attribute.
:param value: Attribute value.
:type value: unicode
"""
if value is not None:
assert type(value) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format(
"namespaceSplitter", value)
assert len(value) == 1, "'{0}' attribute: '{1}' has multiples characters!".format(
"namespaceSplitter", value)
assert not re.search(r"\w", value), "'{0}' attribute: '{1}' is an alphanumeric character!".format(
"namespaceSplitter", value)
self.__namespaceSplitter = value
@namespaceSplitter.deleter
@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
def namespaceSplitter(self):
"""
Deleter for **self.__namespaceSplitter** attribute.
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "namespaceSplitter"))
@property
def templatesSettingsDirectory(self):
"""
Property for **self.__templatesSettingsDirectory** attribute.
:return: self.__templatesSettingsDirectory.
:rtype: unicode
"""
return self.__templatesSettingsDirectory
@templatesSettingsDirectory.setter
@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
def templatesSettingsDirectory(self, value):
"""
Setter for **self.__templatesSettingsDirectory** attribute.
:param value: Attribute value.
:type value: unicode
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "templatesSettingsDirectory"))
@templatesSettingsDirectory.deleter
@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
def templatesSettingsDirectory(self):
"""
Deleter for **self.__templatesSettingsDirectory** attribute.
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "templatesSettingsDirectory"))
@property
def templateSettingsFile(self):
"""
Property for **self.__templateSettingsFile** attribute.
:return: self.__templateSettingsFile.
:rtype: unicode
"""
return self.__templateSettingsFile
@templateSettingsFile.setter
@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
def templateSettingsFile(self, value):
"""
Setter for **self.__templateSettingsFile** attribute.
:param value: Attribute value.
:type value: unicode
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "templateSettingsFile"))
@templateSettingsFile.deleter
@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
def templateSettingsFile(self):
"""
Deleter for **self.__templateSettingsFile** attribute.
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "templateSettingsFile"))
@property
def templateCommonAttributesSection(self):
"""
Property for **self.__templateCommonAttributesSection** attribute.
:return: self.__templateCommonAttributesSection.
:rtype: unicode
"""
return self.__templateCommonAttributesSection
@templateCommonAttributesSection.setter
@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
def templateCommonAttributesSection(self, value):
"""
Setter for **self.__templateCommonAttributesSection** attribute.
:param value: Attribute value.
:type value: unicode
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "templateCommonAttributesSection"))
@templateCommonAttributesSection.deleter
@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
def templateCommonAttributesSection(self):
"""
Deleter for **self.__templateCommonAttributesSection** attribute.
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "templateCommonAttributesSection"))
@property
def templateAdditionalAttributesSection(self):
"""
Property for **self.__templateAdditionalAttributesSection** attribute.
:return: self.__templateAdditionalAttributesSection.
:rtype: unicode
"""
return self.__templateAdditionalAttributesSection
@templateAdditionalAttributesSection.setter
@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
def templateAdditionalAttributesSection(self, value):
"""
Setter for **self.__templateAdditionalAttributesSection** attribute.
:param value: Attribute value.
:type value: unicode
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "templateAdditionalAttributesSection"))
@templateAdditionalAttributesSection.deleter
@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
def templateAdditionalAttributesSection(self):
"""
Deleter for **self.__templateAdditionalAttributesSection** attribute.
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is not deletable!".format(
self.__class__.__name__, "templateAdditionalAttributesSection"))
@property
def templateScriptSection(self):
"""
Property for **self.__templateScriptSection** attribute.
:return: self.__templateScriptSection.
:rtype: unicode
"""
return self.__templateScriptSection
@templateScriptSection.setter
@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
def templateScriptSection(self, value):
"""
Setter for **self.__templateScriptSection** attribute.
:param value: Attribute value.
:type value: unicode
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "templateScriptSection"))
@templateScriptSection.deleter
@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
def templateScriptSection(self):
"""
Deleter for **self.__templateScriptSection** attribute.
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "templateScriptSection"))
@property
def optionsToolboxesHeaders(self):
"""
Property for **self.__optionsToolboxesHeaders** attribute.
:return: self.__optionsToolboxesHeaders.
:rtype: list
"""
return self.__optionsToolboxesHeaders
@optionsToolboxesHeaders.setter
@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
def optionsToolboxesHeaders(self, value):
"""
Setter for **self.__optionsToolboxesHeaders** attribute.
:param value: Attribute value.
:type value: list
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "optionsToolboxesHeaders"))
@optionsToolboxesHeaders.deleter
@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
def optionsToolboxesHeaders(self):
"""
Deleter for **self.__optionsToolboxesHeaders** attribute.
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "optionsToolboxesHeaders"))
@property
def uiLightGrayColor(self):
"""
Property for **self.__uiLightGrayColor** attribute.
:return: self.__uiLightGrayColor.
:rtype: QColor
"""
return self.__uiLightGrayColor
@uiLightGrayColor.setter
@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
def uiLightGrayColor(self, value):
"""
Setter for **self.__uiLightGrayColor** attribute.
:param value: Attribute value.
:type value: QColor
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "uiLightGrayColor"))
@uiLightGrayColor.deleter
@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
def uiLightGrayColor(self):
"""
Deleter for **self.__uiLightGrayColor** attribute.
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "uiLightGrayColor"))
@property
def uiDarkGrayColor(self):
"""
Property for **self.__uiDarkGrayColor** attribute.
:return: self.__uiDarkGrayColor.
:rtype: QColor
"""
return self.__uiDarkGrayColor
@uiDarkGrayColor.setter
@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
def uiDarkGrayColor(self, value):
"""
Setter for **self.__uiDarkGrayColor** attribute.
:param value: Attribute value.
:type value: QColor
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "uiDarkGrayColor"))
@uiDarkGrayColor.deleter
@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
def uiDarkGrayColor(self):
"""
Deleter for **self.__uiDarkGrayColor** attribute.
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "uiDarkGrayColor"))
@property
def enumSplitter(self):
"""
Property for **self.__enumSplitter** attribute.
:return: self.__enumSplitter.
:rtype: unicode
"""
return self.__enumSplitter
@enumSplitter.setter
@foundations.exceptions.handleExceptions(AssertionError)
def enumSplitter(self, value):
"""
Setter for **self.__enumSplitter** attribute.
:param value: Attribute value.
:type value: unicode
"""
if value is not None:
assert type(value) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format(
"enumSplitter", value)
assert len(value) == 1, "'{0}' attribute: '{1}' has multiples characters!".format("enumSplitter", value)
assert not re.search(r"\w", value), "'{0}' attribute: '{1}' is an alphanumeric character!".format(
"enumSplitter", value)
self.__enumSplitter = value
@enumSplitter.deleter
@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
def enumSplitter(self):
"""
Deleter for **self.__enumSplitter** attribute.
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "enumSplitter"))
#******************************************************************************************************************
#*** Class methods.
#******************************************************************************************************************
def activate(self, engine):
"""
Activates the Component.
:param engine: Engine to attach the Component to.
:type engine: QObject
:return: Method success.
:rtype: bool
"""
LOGGER.debug("> Activating '{0}' Component.".format(self.__class__.__name__))
self.__engine = engine
self.__templatesOutliner = self.__engine.componentsManager["core.templatesOutliner"]
self.__loaderScript = self.__engine.componentsManager["addons.loaderScript"]
self.__templatesSettingsDirectory = os.path.join(self.__engine.userApplicationDataDirectory,
Constants.settingsDirectory,
self.__templatesSettingsDirectory)
not foundations.common.pathExists(self.__templatesSettingsDirectory) and \
os.makedirs(self.__templatesSettingsDirectory)
self.__templateSettingsFile = None
self.activated = True
return True
def deactivate(self):
"""
Deactivates the Component.
:return: Method success.
:rtype: bool
"""
LOGGER.debug("> Deactivating '{0}' Component.".format(self.__class__.__name__))
self.__engine = None
self.__templatesOutliner = None
self.__loaderScript = None
self.__templatesSettingsDirectory = os.path.basename(os.path.abspath(self.__templatesSettingsDirectory))
self.__templateSettingsFile = None
self.activated = False
return True
def initializeUi(self):
"""
Initializes the Component ui.
:return: Method success.
:rtype: bool
"""
LOGGER.debug("> Initializing '{0}' Component ui.".format(self.__class__.__name__))
umbra.ui.common.setToolBoxHeight(self.Loader_Script_Options_toolBox)
self.Common_Attributes_tableWidget.setParent(None)
self.Common_Attributes_tableWidget = TemplatesAttributes_QTableWidget(self, message="No Attributes to view!")
self.Common_Attributes_tableWidget.setObjectName("Common_Attributes_tableWidget")
self.Common_Attributes_page_gridLayout.addWidget(self.Common_Attributes_tableWidget, 0, 0)
self.__commonView = self.Common_Attributes_tableWidget
self.Additional_Attributes_tableWidget.setParent(None)
self.Additional_Attributes_tableWidget = TemplatesAttributes_QTableWidget(self, message="No Attributes to view!")
self.Additional_Attributes_tableWidget.setObjectName("Additional_Attributes_tableWidget")
self.Additional_Attributes_page_gridLayout.addWidget(self.Additional_Attributes_tableWidget, 0, 0)
self.__additionalView = self.Additional_Attributes_tableWidget
self.__views = (self.__commonView, self.__additionalView)
# Signals / Slots.
self.__templatesOutliner.view.selectionModel().selectionChanged.connect(
self.__templatesOutliner_view_selectionModel__selectionChanged)
self.initializedUi = True
return True
def uninitializeUi(self):
"""
Uninitializes the Component ui.
:return: Method success.
:rtype: bool
"""
LOGGER.debug("> Uninitializing '{0}' Component ui.".format(self.__class__.__name__))
self.__views = None
self.__commonView = None
self.__additionalView = None
# Signals / Slots.
self.__templatesOutliner.view.selectionModel().selectionChanged.disconnect(
self.__templatesOutliner_view_selectionModel__selectionChanged)
self.initializedUi = False
return True
def addWidget(self):
"""
Adds the Component Widget to the engine.
:return: Method success.
:rtype: bool
"""
LOGGER.debug("> Adding '{0}' Component Widget.".format(self.__class__.__name__))
self.__engine.addDockWidget(Qt.DockWidgetArea(self.__dockArea), self)
return True
def removeWidget(self):
"""
Removes the Component Widget from the engine.
:return: Method success.
:rtype: bool
"""
LOGGER.debug("> Removing '{0}' Component Widget.".format(self.__class__.__name__))
self.__engine.removeDockWidget(self)
self.setParent(None)
return True
@foundations.exceptions.handleExceptions(ValueError)
def __view_setUi(self, section, view, overrides):
"""
Defines and sets the given View.
:param section: Section attributes.
:type section: dict
:param view: Table Widget.
:type view: QTableWidget
:param overrides: Attributes overrides.
:type overrides: dict
"""
LOGGER.debug("> Updating '{0}'.".format(view.objectName()))
view.hide()
self.__view_clearUi(view)
view.setRowCount(len(section))
view.setColumnCount(len(self.__optionsToolboxesHeaders))
view.horizontalHeader().setStretchLastSection(True)
view.setHorizontalHeaderLabels(self.__optionsToolboxesHeaders)
view.horizontalHeader().hide()
palette = QPalette()
palette.setColor(QPalette.Base, Qt.transparent)
view.setPalette(palette)
verticalHeaderLabels = []
for row, attribute in enumerate(section):
LOGGER.debug("> Current attribute: '{0}'.".format(attribute))
overridesValue = overrides[attribute] if attribute in overrides else None
LOGGER.debug("> Settings value: '{0}'.".format(overridesValue or Constants.nullObject))
attributeCompound = foundations.parsers.getAttributeCompound(attribute, section[attribute])
if attributeCompound.name:
verticalHeaderLabels.append(attributeCompound.alias)
else:
verticalHeaderLabels.append(foundations.strings.getNiceName(attributeCompound.name))
LOGGER.debug("> Attribute type: '{0}'.".format(attributeCompound.type))
if attributeCompound.type == "Boolean":
state = True if int(overridesValue if overridesValue is not None else attributeCompound.value) else False
item = Variable_QPushButton(self,
state,
(self.__uiLightGrayColor, self.__uiDarkGrayColor),
("True", "False"))
item.setObjectName("Spread_Sheet_pushButton")
item.setChecked(state)
# Signals / Slots.
item.clicked.connect(self.__view__valueChanged)
elif attributeCompound.type == "Float":
item = QDoubleSpinBox()
item.setMinimum(0)
item.setMaximum(65535)
item.setValue(float(overridesValue if overridesValue is not None else attributeCompound.value))
# Signals / Slots.
item.valueChanged.connect(self.__view__valueChanged)
elif attributeCompound.type == "Enum":
item = QComboBox()
comboBoxItems = [enumItem.strip() for enumItem in attributeCompound.value.split(self.__enumSplitter)]
item.addItems(comboBoxItems)
if overridesValue in comboBoxItems:
item.setCurrentIndex(comboBoxItems.index(overridesValue))
# Signals / Slots.
item.currentIndexChanged.connect(self.__view__valueChanged)
elif attributeCompound.type == "String":
item = QLineEdit(QString(overridesValue if overridesValue is not None else attributeCompound.value))
item.setAlignment(Qt.AlignCenter)
# Signals / Slots.
item.editingFinished.connect(self.__view__valueChanged)
else:
item = QLabel(QString("Attribute Type Error!"))
item.setStyleSheet("QLabel {background-color: rgb(210, 64, 32);}")
item.setAlignment(Qt.AlignCenter)
item.data = attributeCompound
view.setCellWidget(row, 0, item)
view.setVerticalHeaderLabels(verticalHeaderLabels)
view.show()
def __view_clearUi(self, view):
"""
Defines and sets the given View.
:param view: Table Widget.
:type view: QTableWidget
"""
LOGGER.debug("> Clearing '{0}'.".format(view.objectName()))
view.clear()
view.setRowCount(0)
view.setColumnCount(0)
def __views_setUi(self):
"""
Sets the Views.
"""
selectedTemplates = self.__templatesOutliner.getSelectedTemplates()
template = foundations.common.getFirstItem(selectedTemplates)
if not (template and foundations.common.pathExists(template.path)):
for view in self.__views:
self.__view_clearUi(view)
return
LOGGER.debug("> Attempting to read '{0}' Template settings file.".format(template.name))
commonAttributesOverrides = {}
additionalAttributesOverrides = {}
templateSettingsDirectory = os.path.join(self.__templatesSettingsDirectory, template.software, template.name)
currentTemplateSettingsDirectory = os.path.join(templateSettingsDirectory, template.release)
self.__templateSettingsFile = os.path.join(templateSettingsDirectory,
template.release,
os.path.basename(template.path))
not foundations.common.pathExists(currentTemplateSettingsDirectory) and \
foundations.io.setDirectory(currentTemplateSettingsDirectory)
templateSettingsFile = None
if foundations.common.pathExists(self.__templateSettingsFile):
templateSettingsFile = self.__templateSettingsFile
else:
for version in sorted((
path for path in os.listdir(templateSettingsDirectory)
if re.search(r"\d\.\d\.\d", path)), reverse=True, key=lambda x:(foundations.strings.getVersionRank(x))):
path = os.path.join(templateSettingsDirectory, version, os.path.basename(template.path))
if foundations.common.pathExists(path):
templateSettingsFile = path
break
if templateSettingsFile:
LOGGER.debug("> Accessing '{0}' Template settings file: '{1}'.".format(template.name, templateSettingsFile))
templateSettingsSectionsFileParser = SectionsFileParser(templateSettingsFile)
templateSettingsSectionsFileParser.parse()
commonAttributesOverrides.update(
templateSettingsSectionsFileParser.sections[self.__templateCommonAttributesSection])
additionalAttributesOverrides.update(
templateSettingsSectionsFileParser.sections[self.__templateAdditionalAttributesSection])
else:
LOGGER.debug("> No Template settings file found for : '{0}'.".format(template.name))
LOGGER.debug("> Parsing '{0}' Template for '{1}' and '{2}' section.".format(
template.name, self.__templateCommonAttributesSection, self.__templateAdditionalAttributesSection))
templateSectionsFileParser = SectionsFileParser(template.path)
templateSectionsFileParser.parse(rawSections=(self.__templateScriptSection))
self.__view_setUi(templateSectionsFileParser.sections.get(self.__templateCommonAttributesSection, {}),
self.__commonView, commonAttributesOverrides)
self.__view_setUi(templateSectionsFileParser.sections.get(self.__templateAdditionalAttributesSection, {}),
self.__additionalView, additionalAttributesOverrides)
def __view__valueChanged(self, *args):
"""
Defines the slot triggered by a View when value changed.
:param \*args: Arguments.
:type \*args: \*
"""
LOGGER.debug("> Initializing '{0}' Template settings file content.".format(self.__templateSettingsFile))
templateSettingsSectionsFileParser = SectionsFileParser(self.__templateSettingsFile)
templateSettingsSectionsFileParser.sections = OrderedDict()
for section, view in OrderedDict([(self.__templateCommonAttributesSection,
self.Common_Attributes_tableWidget),
(self.__templateAdditionalAttributesSection,
self.Additional_Attributes_tableWidget)]).iteritems():
templateSettingsSectionsFileParser.sections[section] = OrderedDict()
for row in range(view.rowCount()):
widget = view.cellWidget(row, 0)
if type(widget) is Variable_QPushButton:
value = widget.text() == "True" and "1" or "0"
elif type(widget) is QDoubleSpinBox:
value = foundations.strings.toString(widget.value())
elif type(widget) is QComboBox:
value = foundations.strings.toString(widget.currentText())
else:
value = foundations.strings.toString(widget.text())
templateSettingsSectionsFileParser.sections[
section][foundations.namespace.removeNamespace(widget.data.name)] = value
templateSettingsSectionsFileParser.write()
def __templatesOutliner_view_selectionModel__selectionChanged(self, selectedItems, deselectedItems):
"""
Defines the slot triggered by **templatesOutliner.view** Model when selection changed
:param selectedItems: Selected items.
:type selectedItems: QItemSelection
:param deselectedItems: Deselected items.
:type deselectedItems: QItemSelection
"""
self.__views_setUi()
def __updateOverrideKeys(self, view):
"""
Updates the Loader Script Component override keys.
:param view: Table Widget.
:type view: QTableWidget
:return: Method success.
:rtype: bool
"""
LOGGER.debug("> Updating override keys with '{0}' attributes.".format(view.objectName()))
for row in range(view.rowCount()):
widget = view.cellWidget(row, 0)
if type(widget) is Variable_QPushButton:
value = widget.text() == "True" and "1" or "0"
elif type(widget) is QDoubleSpinBox:
value = foundations.strings.toString(widget.value())
elif type(widget) is QComboBox:
value = foundations.strings.toString(widget.currentText())
else:
value = foundations.strings.toString(widget.text())
widget.data.value = value
LOGGER.debug("> Adding '{0}' override key with value: '{1}'.".format(widget.data.name, widget.data.value))
self.__loaderScript.overrideKeys[widget.data.name] = widget.data
return True
@foundations.exceptions.handleExceptions(umbra.exceptions.notifyExceptionHandler, Exception)
def getOverrideKeys(self):
"""
Gets override keys.
:return: Method success.
:rtype: bool
"""
LOGGER.info("{0} | Updating Loader Script override keys!".format(self.__class__.__name__))
success = True
success *= self.__updateOverrideKeys(self.Common_Attributes_tableWidget) or False
success *= self.__updateOverrideKeys(self.Additional_Attributes_tableWidget) or False
if success:
return True
else:
raise Exception("{0} | Exception raised while retrieving override keys!".format(self.__class__.__name__))
|
PypiClean
|
/python-openid3-2.2.6.zip/python-openid3-2.2.6/openid/test/discoverdata.py
|
import urllib.parse
import os.path
from openid.yadis.discover import DiscoveryResult, DiscoveryFailure
from openid.yadis.constants import YADIS_HEADER_NAME
tests_dir = os.path.dirname(__file__)
data_path = os.path.join(tests_dir, 'data')
testlist = [
# success, input_name, id_name, result_name
(True, "equiv", "equiv", "xrds"),
(True, "header", "header", "xrds"),
(True, "lowercase_header", "lowercase_header", "xrds"),
(True, "xrds", "xrds", "xrds"),
(True, "xrds_ctparam", "xrds_ctparam", "xrds_ctparam"),
(True, "xrds_ctcase", "xrds_ctcase", "xrds_ctcase"),
(False, "xrds_html", "xrds_html", "xrds_html"),
(True, "redir_equiv", "equiv", "xrds"),
(True, "redir_header", "header", "xrds"),
(True, "redir_xrds", "xrds", "xrds"),
(False, "redir_xrds_html", "xrds_html", "xrds_html"),
(True, "redir_redir_equiv", "equiv", "xrds"),
(False, "404_server_response", None, None),
(False, "404_with_header", None, None),
(False, "404_with_meta", None, None),
(False, "201_server_response", None, None),
(False, "500_server_response", None, None),
]
def getDataName(*components):
sanitized = []
for part in components:
if part in ['.', '..']:
raise ValueError
elif part:
sanitized.append(part)
if not sanitized:
raise ValueError
return os.path.join(data_path, *sanitized)
def getExampleXRDS():
filename = getDataName('example-xrds.xml')
return file(filename).read()
example_xrds = getExampleXRDS()
default_test_file = getDataName('test1-discover.txt')
discover_tests = {}
def readTests(filename):
data = file(filename).read()
tests = {}
for case in data.split('\f\n'):
(name, content) = case.split('\n', 1)
tests[name] = content
return tests
def getData(filename, name):
global discover_tests
try:
file_tests = discover_tests[filename]
except KeyError:
file_tests = discover_tests[filename] = readTests(filename)
return file_tests[name]
def fillTemplate(test_name, template, base_url, example_xrds):
mapping = [
('URL_BASE/', base_url),
('<XRDS Content>', example_xrds),
('YADIS_HEADER', YADIS_HEADER_NAME),
('NAME', test_name),
]
for k, v in mapping:
template = template.replace(k, v)
return template
def generateSample(test_name, base_url,
example_xrds=example_xrds,
filename=default_test_file):
try:
template = getData(filename, test_name)
except IOError as why:
import errno
if why[0] == errno.ENOENT:
raise KeyError(filename)
else:
raise
return fillTemplate(test_name, template, base_url, example_xrds)
def generateResult(base_url, input_name, id_name, result_name, success):
input_url = urllib.parse.urljoin(base_url, input_name)
# If the name is None then we expect the protocol to fail, which
# we represent by None
if id_name is None:
assert result_name is None
return input_url, DiscoveryFailure
result = generateSample(result_name, base_url)
headers, content = result.split('\n\n', 1)
header_lines = headers.split('\n')
for header_line in header_lines:
if header_line.startswith('Content-Type:'):
_, ctype = header_line.split(':', 1)
ctype = ctype.strip()
break
else:
ctype = None
id_url = urllib.parse.urljoin(base_url, id_name)
result = DiscoveryResult(input_url)
result.normalized_uri = id_url
if success:
result.xrds_uri = urllib.parse.urljoin(base_url, result_name)
result.content_type = ctype
result.response_text = content
return input_url, result
|
PypiClean
|
/pyFipper-0.1.3.tar.gz/pyFipper-0.1.3/pyrogram/methods/messages/stream_media.py
|
import math
from typing import Union, Optional, BinaryIO
import pyrogram
from pyrogram import types
from pyrogram.file_id import FileId
class StreamMedia:
async def stream_media(
self: "pyrogram.Client",
message: Union["types.Message", str],
limit: int = 0,
offset: int = 0
) -> Optional[Union[str, BinaryIO]]:
"""Stream the media from a message chunk by chunk.
You can use this method to partially download a file into memory or to selectively download chunks of file.
The chunk maximum size is 1 MiB (1024 * 1024 bytes).
.. include:: /_includes/usable-by/users-bots.rst
Parameters:
message (:obj:`~pyrogram.types.Message` | ``str``):
Pass a Message containing the media, the media itself (message.audio, message.video, ...) or a file id
as string.
limit (``int``, *optional*):
Limit the amount of chunks to stream.
Defaults to 0 (stream the whole media).
offset (``int``, *optional*):
How many chunks to skip before starting to stream.
Defaults to 0 (start from the beginning).
Returns:
``Generator``: A generator yielding bytes chunk by chunk
Example:
.. code-block:: python
# Stream the whole media
async for chunk in app.stream_media(message):
print(len(chunk))
# Stream the first 3 chunks only
async for chunk in app.stream_media(message, limit=3):
print(len(chunk))
# Stream the rest of the media by skipping the first 3 chunks
async for chunk in app.stream_media(message, offset=3):
print(len(chunk))
# Stream the last 3 chunks only (negative offset)
async for chunk in app.stream_media(message, offset=-3):
print(len(chunk))
"""
available_media = ("audio", "document", "photo", "sticker", "animation", "video", "voice", "video_note",
"new_chat_photo")
if isinstance(message, types.Message):
for kind in available_media:
media = getattr(message, kind, None)
if media is not None:
break
else:
raise ValueError("This message doesn't contain any downloadable media")
else:
media = message
if isinstance(media, str):
file_id_str = media
else:
file_id_str = media.file_id
file_id_obj = FileId.decode(file_id_str)
file_size = getattr(media, "file_size", 0)
if offset < 0:
if file_size == 0:
raise ValueError("Negative offsets are not supported for file ids, pass a Message object instead")
chunks = math.ceil(file_size / 1024 / 1024)
offset += chunks
async for chunk in self.get_file(file_id_obj, file_size, limit, offset):
yield chunk
|
PypiClean
|
/www_q_z/www_sinochemitc_com.py
|
import pandas as pd
import re
from selenium import webdriver
from bs4 import BeautifulSoup
from lmf.dbv2 import db_write
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.common.exceptions import NoSuchElementException, StaleElementReferenceException
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.support.select import Select
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import json
import time
from zhulong4.util.etl import est_html, est_meta, add_info,est_meta_large
_name_ = "www_sinochemitc_com"
def f1(driver, num):
locator = (By.XPATH, "//div[@class='zbdt-news-module-content']/div[1]//a")
WebDriverWait(driver, 10).until(EC.presence_of_element_located(locator))
url = driver.current_url
try:
locator = (By.XPATH, "//span[@class='i-pager-info-c']")
cnum = WebDriverWait(driver, 10).until(EC.presence_of_element_located(locator)).text.strip()
except:
cnum = 1
if num != int(cnum):
val = driver.find_element_by_xpath("//div[@class='zbdt-news-module-content']/div[1]//a").get_attribute('href')[-15:]
if num == 1:
url = re.sub("-[0-9]*\.html", "-1.html", url)
else:
s = "-%d.html" % (num) if num > 1 else "-1.html"
url = re.sub("-[0-9]*\.html", s, url)
driver.get(url)
locator = (By.XPATH, "//div[@class='zbdt-news-module-content']/div[1]//a[not(contains(@href, '%s'))]" % val)
WebDriverWait(driver, 20).until(EC.presence_of_element_located(locator))
page = driver.page_source
soup = BeautifulSoup(page, "html.parser")
div = soup.find("div", class_='zbdt-news-module-content')
lis = div.find_all('div', class_='zbdt-news-item')
data = []
for li in lis:
a = li.find("a")
try:
title = a['title'].strip()
except:
title = a.text.strip()
link = a["href"]
if 'http' in link:
href = link
else:
href = 'http://www.sinochemitc.com/s/' + link
span = li.find('div', class_='zbdt-news-item-date').text.strip()
tmp = [title, span, href]
data.append(tmp)
df = pd.DataFrame(data=data)
df['info'] = None
return df
def f2(driver):
locator = (By.XPATH, "//div[@class='zbdt-news-module-content']/div[1]//a")
WebDriverWait(driver, 10).until(EC.presence_of_element_located(locator))
try:
locator = (By.XPATH, "//span[@class='i-pager-info-p']")
total = WebDriverWait(driver, 10).until(EC.presence_of_element_located(locator)).text.strip()
num = re.findall(r'(\d+)', total)[0]
except:
num = 1
driver.quit()
return int(num)
def f3(driver, url):
driver.get(url)
locator = (By.XPATH, "//div[@class='Gnews-detail']")
WebDriverWait(driver, 10).until(EC.presence_of_all_elements_located(locator))
before = len(driver.page_source)
time.sleep(0.1)
after = len(driver.page_source)
i = 0
while before != after:
before = len(driver.page_source)
time.sleep(0.1)
after = len(driver.page_source)
i += 1
if i > 5: break
page = driver.page_source
soup = BeautifulSoup(page, 'html.parser')
div = soup.find('div', class_='Gnews-detail')
return div
data = [
["qy_zhaobiao_gg",
"http://www.sinochemitc.com/l/7239-18882-1.html",
["name", "ggstart_time", "href", "info"], f1, f2],
["qy_gqita_zhong_liu_gg",
"http://www.sinochemitc.com/l/7241-18885-1.html",
["name", "ggstart_time", "href", "info"], f1, f2],
]
def work(conp, **args):
est_meta_large(conp, data=data, diqu="中国中化集团有限公司", **args)
est_html(conp, f=f3, **args)
if __name__ == '__main__':
work(conp=["postgres", "since2015", "192.168.3.171", "guoziqiang3", "www_sinochemitc_com"])
# driver = webdriver.Chrome()
# url = "http://www.sinochemitc.com/l/7239-18882-1.html"
# driver.get(url)
# df = f2(driver)
# print(df)
#
# driver=webdriver.Chrome()
# url = "http://www.sinochemitc.com/l/7239-18882-1.html"
# driver.get(url)
# for i in range(11, 13):
# df=f1(driver, i)
# print(df.values)
# for i in df[2].values:
# f = f3(driver, i)
# print(f)
|
PypiClean
|
/kraken-4.3.11.tar.gz/kraken-4.3.11/docs/training.rst
|
.. _training:
Training kraken
===============
kraken is an optical character recognition package that can be trained fairly
easily for a large number of scripts. In contrast to other system requiring
segmentation down to glyph level before classification, it is uniquely suited
for the recognition of connected scripts, because the neural network is trained
to assign correct character to unsegmented training data.
Both segmentation, the process finding lines and regions on a page image, and
recognition, the conversion of line images into text, can be trained in kraken.
To train models for either we require training data, i.e. examples of page
segmentations and transcriptions that are similar to what we want to be able to
recognize. For segmentation the examples are the location of baselines, i.e.
the imaginary lines the text is written on, and polygons of regions. For
recognition these are the text contained in a line. There are multiple ways to
supply training data but the easiest is through PageXML or ALTO files.
Installing kraken
-----------------
The easiest way to install and use kraken is through `conda
<https://www.anaconda.com/download/>`_. kraken works both on Linux and Mac OS
X. After installing conda, download the environment file and create the
environment for kraken:
.. code-block:: console
$ wget https://raw.githubusercontent.com/mittagessen/kraken/main/environment.yml
$ conda env create -f environment.yml
Each time you want to use the kraken environment in a shell is has to be
activated first:
.. code-block:: console
$ conda activate kraken
Image acquisition and preprocessing
-----------------------------------
First a number of high quality scans, preferably color or grayscale and at
least 300dpi are required. Scans should be in a lossless image format such as
TIFF or PNG, images in PDF files have to be extracted beforehand using a tool
such as ``pdftocairo`` or ``pdfimages``. While each of these requirements can
be relaxed to a degree, the final accuracy will suffer to some extent. For
example, only slightly compressed JPEG scans are generally suitable for
training and recognition.
Depending on the source of the scans some preprocessing such as splitting scans
into pages, correcting skew and warp, and removing speckles can be advisable
although it isn't strictly necessary as the segmenter can be trained to treat
noisy material with a high accuracy. A fairly user-friendly software for
semi-automatic batch processing of image scans is `Scantailor
<http://scantailor.org>`_ albeit most work can be done using a standard image
editor.
The total number of scans required depends on the kind of model to train
(segmentation or recognition), the complexity of the layout or the nature of
the script to recognize. Only features that are found in the training data can
later be recognized, so it is important that the coverage of typographic
features is exhaustive. Training a small segmentation model for a particular
kind of material might require less than a few hundred samples while a general
model can well go into the thousands of pages. Likewise a specific recognition
model for printed script with a small grapheme inventory such as Arabic or
Hebrew requires around 800 lines, with manuscripts, complex scripts (such as
polytonic Greek), and general models for multiple typefaces and hands needing
more training data for the same accuracy.
There is no hard rule for the amount of training data and it may be required to
retrain a model after the initial training data proves insufficient. Most
``western`` texts contain between 25 and 40 lines per page, therefore upward of
30 pages have to be preprocessed and later transcribed.
Annotation and transcription
----------------------------
kraken does not provide internal tools for the annotation and transcription of
baselines, regions, and text. There are a number of tools available that can
create ALTO and PageXML files containing the requisite information for either
segmentation or recognition training: `escriptorium
<https://escripta.hypotheses.org>`_ integrates kraken tightly including
training and inference, `Aletheia
<https://www.primaresearch.org/tools/Aletheia>`_ is a powerful desktop
application that can create fine grained annotations.
Dataset Compilation
-------------------
.. _compilation:
Training
--------
.. _training_step:
The training data, e.g. a collection of PAGE XML documents, obtained through
annotation and transcription may now be used to train segmentation and/or
transcription models.
The training data in ``output_dir`` may now be used to train a new model by
invoking the ``ketos train`` command. Just hand a list of images to the command
such as:
.. code-block:: console
$ ketos train output_dir/*.png
to start training.
A number of lines will be split off into a separate held-out set that is used
to estimate the actual recognition accuracy achieved in the real world. These
are never shown to the network during training but will be recognized
periodically to evaluate the accuracy of the model. Per default the validation
set will comprise of 10% of the training data.
Basic model training is mostly automatic albeit there are multiple parameters
that can be adjusted:
--output
Sets the prefix for models generated during training. They will best as
``prefix_epochs.mlmodel``.
--report
How often evaluation passes are run on the validation set. It is an
integer equal or larger than 1 with 1 meaning a report is created each
time the complete training set has been seen by the network.
--savefreq
How often intermediate models are saved to disk. It is an integer with
the same semantics as ``--report``.
--load
Continuing training is possible by loading an existing model file with
``--load``. To continue training from a base model with another
training set refer to the full :ref:`ketos <ketos>` documentation.
--preload
Enables/disables preloading of the training set into memory for
accelerated training. The default setting preloads data sets with less
than 2500 lines, explicitly adding ``--preload`` will preload arbitrary
sized sets. ``--no-preload`` disables preloading in all circumstances.
Training a network will take some time on a modern computer, even with the
default parameters. While the exact time required is unpredictable as training
is a somewhat random process a rough guide is that accuracy seldom improves
after 50 epochs reached between 8 and 24 hours of training.
When to stop training is a matter of experience; the default setting employs a
fairly reliable approach known as `early stopping
<https://en.wikipedia.org/wiki/Early_stopping>`_ that stops training as soon as
the error rate on the validation set doesn't improve anymore. This will
prevent `overfitting <https://en.wikipedia.org/wiki/Overfitting>`_, i.e.
fitting the model to recognize only the training data properly instead of the
general patterns contained therein.
.. code-block:: console
$ ketos train output_dir/*.png
Building training set [####################################] 100%
Building validation set [####################################] 100%
[270.2364] alphabet mismatch {'9', '8', '݂', '3', '݀', '4', '1', '7', '5', '\xa0'}
Initializing model ✓
Accuracy report (0) -1.5951 3680 9550
epoch 0/-1 [####################################] 788/788
Accuracy report (1) 0.0245 3504 3418
epoch 1/-1 [####################################] 788/788
Accuracy report (2) 0.8445 3504 545
epoch 2/-1 [####################################] 788/788
Accuracy report (3) 0.9541 3504 161
epoch 3/-1 [------------------------------------] 13/788 0d 00:22:09
...
By now there should be a couple of models model_name-1.mlmodel,
model_name-2.mlmodel, ... in the directory the script was executed in. Lets
take a look at each part of the output.
.. code-block:: console
Building training set [####################################] 100%
Building validation set [####################################] 100%
shows the progress of loading the training and validation set into memory. This
might take a while as preprocessing the whole set and putting it into memory is
computationally intensive. Loading can be made faster without preloading at the
cost of performing preprocessing repeatedly during the training process.
.. code-block:: console
[270.2364] alphabet mismatch {'9', '8', '݂', '3', '݀', '4', '1', '7', '5', '\xa0'}
is a warning about missing characters in either the validation or training set,
i.e. that the alphabets of the sets are not equal. Increasing the size of the
validation set will often remedy this warning.
.. code-block:: console
Accuracy report (2) 0.8445 3504 545
this line shows the results of the validation set evaluation. The error after 2
epochs is 545 incorrect characters out of 3504 characters in the validation set
for a character accuracy of 84.4%. It should decrease fairly rapidly. If
accuracy remains around 0.30 something is amiss, e.g. non-reordered
right-to-left or wildly incorrect transcriptions. Abort training, correct the
error(s) and start again.
After training is finished the best model is saved as
``model_name_best.mlmodel``. It is highly recommended to also archive the
training log and data for later reference.
``ketos`` can also produce more verbose output with training set and network
information by appending one or more ``-v`` to the command:
.. code-block:: console
$ ketos -vv train syr/*.png
[0.7272] Building ground truth set from 876 line images
[0.7281] Taking 88 lines from training for evaluation
...
[0.8479] Training set 788 lines, validation set 88 lines, alphabet 48 symbols
[0.8481] alphabet mismatch {'\xa0', '0', ':', '݀', '܇', '݂', '5'}
[0.8482] grapheme count
[0.8484] SPACE 5258
[0.8484] ܐ 3519
[0.8485] ܘ 2334
[0.8486] ܝ 2096
[0.8487] ܠ 1754
[0.8487] ܢ 1724
[0.8488] ܕ 1697
[0.8489] ܗ 1681
[0.8489] ܡ 1623
[0.8490] ܪ 1359
[0.8491] ܬ 1339
[0.8491] ܒ 1184
[0.8492] ܥ 824
[0.8492] . 811
[0.8493] COMBINING DOT BELOW 646
[0.8493] ܟ 599
[0.8494] ܫ 577
[0.8495] COMBINING DIAERESIS 488
[0.8495] ܚ 431
[0.8496] ܦ 428
[0.8496] ܩ 307
[0.8497] COMBINING DOT ABOVE 259
[0.8497] ܣ 256
[0.8498] ܛ 204
[0.8498] ܓ 176
[0.8499] ܀ 132
[0.8499] ܙ 81
[0.8500] * 66
[0.8501] ܨ 59
[0.8501] ܆ 40
[0.8502] [ 40
[0.8503] ] 40
[0.8503] 1 18
[0.8504] 2 11
[0.8504] ܇ 9
[0.8505] 3 8
[0.8505] 6
[0.8506] 5 5
[0.8506] NO-BREAK SPACE 4
[0.8507] 0 4
[0.8507] 6 4
[0.8508] : 4
[0.8508] 8 4
[0.8509] 9 3
[0.8510] 7 3
[0.8510] 4 3
[0.8511] SYRIAC FEMININE DOT 1
[0.8511] SYRIAC RUKKAKHA 1
[0.8512] Encoding training set
[0.9315] Creating new model [1,1,0,48 Lbx100 Do] with 49 outputs
[0.9318] layer type params
[0.9350] 0 rnn direction b transposed False summarize False out 100 legacy None
[0.9361] 1 dropout probability 0.5 dims 1
[0.9381] 2 linear augmented False out 49
[0.9918] Constructing RMSprop optimizer (lr: 0.001, momentum: 0.9)
[0.9920] Set OpenMP threads to 4
[0.9920] Moving model to device cpu
[0.9924] Starting evaluation run
indicates that the training is running on 788 transcribed lines and a
validation set of 88 lines. 49 different classes, i.e. Unicode code points,
where found in these 788 lines. These affect the output size of the network;
obviously only these 49 different classes/code points can later be output by
the network. Importantly, we can see that certain characters occur markedly
less often than others. Characters like the Syriac feminine dot and numerals
that occur less than 10 times will most likely not be recognized well by the
trained net.
Evaluation and Validation
-------------------------
While output during training is detailed enough to know when to stop training
one usually wants to know the specific kinds of errors to expect. Doing more
in-depth error analysis also allows to pinpoint weaknesses in the training
data, e.g. above average error rates for numerals indicate either a lack of
representation of numerals in the training data or erroneous transcription in
the first place.
First the trained model has to be applied to some line transcriptions with the
`ketos test` command:
.. code-block:: console
$ ketos test -m syriac_best.mlmodel lines/*.png
Loading model syriac_best.mlmodel ✓
Evaluating syriac_best.mlmodel
Evaluating [#-----------------------------------] 3% 00:04:56
...
After all lines have been processed a evaluation report will be printed:
.. code-block:: console
=== report ===
35619 Characters
336 Errors
99.06% Accuracy
157 Insertions
81 Deletions
98 Substitutions
Count Missed %Right
27046 143 99.47% Syriac
7015 52 99.26% Common
1558 60 96.15% Inherited
Errors Correct-Generated
25 { } - { COMBINING DOT BELOW }
25 { COMBINING DOT BELOW } - { }
15 { . } - { }
15 { COMBINING DIAERESIS } - { }
12 { ܢ } - { }
10 { } - { . }
8 { COMBINING DOT ABOVE } - { }
8 { ܝ } - { }
7 { ZERO WIDTH NO-BREAK SPACE } - { }
7 { ܆ } - { }
7 { SPACE } - { }
7 { ܣ } - { }
6 { } - { ܝ }
6 { COMBINING DOT ABOVE } - { COMBINING DIAERESIS }
5 { ܙ } - { }
5 { ܬ } - { }
5 { } - { ܢ }
4 { NO-BREAK SPACE } - { }
4 { COMBINING DIAERESIS } - { COMBINING DOT ABOVE }
4 { } - { ܒ }
4 { } - { COMBINING DIAERESIS }
4 { ܗ } - { }
4 { } - { ܬ }
4 { } - { ܘ }
4 { ܕ } - { ܢ }
3 { } - { ܕ }
3 { ܐ } - { }
3 { ܗ } - { ܐ }
3 { ܝ } - { ܢ }
3 { ܀ } - { . }
3 { } - { ܗ }
.....
The first section of the report consists of a simple accounting of the number
of characters in the ground truth, the errors in the recognition output and the
resulting accuracy in per cent.
The next table lists the number of insertions (characters occurring in the
ground truth but not in the recognition output), substitutions (misrecognized
characters), and deletions (superfluous characters recognized by the model).
Next is a grouping of errors (insertions and substitutions) by Unicode script.
The final part of the report are errors sorted by frequency and a per
character accuracy report. Importantly most errors are incorrect recognition of
combining marks such as dots and diaereses. These may have several sources:
different dot placement in training and validation set, incorrect transcription
such as non-systematic transcription, or unclean speckled scans. Depending on
the error source, correction most often involves adding more training data and
fixing transcriptions. Sometimes it may even be advisable to remove
unrepresentative data from the training set.
Recognition
-----------
The ``kraken`` utility is employed for all non-training related tasks. Optical
character recognition is a multi-step process consisting of binarization
(conversion of input images to black and white), page segmentation (extracting
lines from the image), and recognition (converting line image to character
sequences). All of these may be run in a single call like this:
.. code-block:: console
$ kraken -i INPUT_IMAGE OUTPUT_FILE binarize segment ocr -m MODEL_FILE
producing a text file from the input image. There are also `hocr
<http://hocr.info>`_ and `ALTO <https://www.loc.gov/standards/alto/>`_ output
formats available through the appropriate switches:
.. code-block:: console
$ kraken -i ... ocr -h
$ kraken -i ... ocr -a
For debugging purposes it is sometimes helpful to run each step manually and
inspect intermediate results:
.. code-block:: console
$ kraken -i INPUT_IMAGE BW_IMAGE binarize
$ kraken -i BW_IMAGE LINES segment
$ kraken -i BW_IMAGE OUTPUT_FILE ocr -l LINES ...
It is also possible to recognize more than one file at a time by just chaining
``-i ... ...`` clauses like this:
.. code-block:: console
$ kraken -i input_1 output_1 -i input_2 output_2 ...
Finally, there is a central repository containing freely available models.
Getting a list of all available models:
.. code-block:: console
$ kraken list
Retrieving model metadata for a particular model:
.. code-block:: console
$ kraken show arabic-alam-al-kutub
name: arabic-alam-al-kutub.mlmodel
An experimental model for Classical Arabic texts.
Network trained on 889 lines of [0] as a test case for a general Classical
Arabic model. Ground truth was prepared by Sarah Savant
<[email protected]> and Maxim Romanov <[email protected]>.
Vocalization was omitted in the ground truth. Training was stopped at ~35000
iterations with an accuracy of 97%.
[0] Ibn al-Faqīh (d. 365 AH). Kitāb al-buldān. Edited by Yūsuf al-Hādī, 1st
edition. Bayrūt: ʿĀlam al-kutub, 1416 AH/1996 CE.
alphabet: !()-.0123456789:[] «»،؟ءابةتثجحخدذرزسشصضطظعغفقكلمنهوىي ARABIC
MADDAH ABOVE, ARABIC HAMZA ABOVE, ARABIC HAMZA BELOW
and actually fetching the model:
.. code-block:: console
$ kraken get arabic-alam-al-kutub
The downloaded model can then be used for recognition by the name shown in its metadata, e.g.:
.. code-block:: console
$ kraken -i INPUT_IMAGE OUTPUT_FILE binarize segment ocr -m arabic-alam-al-kutub.mlmodel
For more documentation see the kraken `website <http://kraken.re>`_.
|
PypiClean
|
/etsy_apiv3-2.1.5-py3-none-any.whl/etsy_apiv3/resources/ReceiptResource.py
|
from dataclasses import dataclass
from etsy_apiv3.utils import EtsySession, Response
from etsy_apiv3.models import Receipt, ReceiptType, Transaction
from typing import Union
from etsy_apiv3.utils.EtsyOauth2Session import EtsyOauth2Session
@dataclass
class ReceiptResource:
"""
Receipt Resource is the utility class to get receipts from Etsy
"""
session: Union[EtsySession, EtsyOauth2Session]
def get_receipts(self, shop_id: int, limit=25, offset=0, type: ReceiptType = ReceiptType.UNSHIPPED, **kwargs) -> Response[Receipt]:
"""
Get Shop Receipts By Shop ID And Receipt Type
Args:
shop_id (int): Your SHOP ID
limit (int, optional): Limit Of Receipt Objects. Defaults to 25.
offset (int, optional): How many receipts to skip. Defaults to 0.
type (ReceiptType, optional): Receipt type to find. Defaults to ReceiptType.UNSHIPPED.
Returns:
Response[Receipt]: Create Response Object Derived from the Receipt Object from json
"""
endpoint = f"shops/{shop_id}/receipts"
params = {"limit": limit, "offset": offset}
params.update(type)
params.update(kwargs)
json = self.session.request(endpoint=endpoint, params=params)
return Response[Receipt](**json)
def get_receipt_by_id(self, shop_id: int, receipt_id: int) -> Receipt:
"""
Find One Receipt By Shop ID And Receipt ID
Args:
shop_id (int): Your SHOP ID
receipt_id (int): Receipt ID
Returns:
Receipt: Create Receipt Object from json
"""
endpoint = f"shops/{shop_id}/receipts/{receipt_id}"
json = self.session.request(endpoint=endpoint)
return Receipt(**json)
async def aget_receipt_by_id(self, shop_id: int, receipt_id: int) -> Receipt:
endpoint = f"shops/{shop_id}/receipts/{receipt_id}"
json = await self.session.async_request(endpoint)
return Receipt(**json)
async def aget_receipts(self, shop_id: int, limit: int = 25, offset: int = 0, type: ReceiptType = ReceiptType.UNSHIPPED, **kwargs):
"""
Get Shop Receipts By Shop ID And Receipt Type
Args:
shop_id (int): Your SHOP ID
limit (int, optional): Limit Of Receipt Objects. Defaults to 25.
offset (int, optional): How many receipts to skip. Defaults to 0.
type (ReceiptType, optional): Receipt type to find. Defaults to ReceiptType.UNSHIPPED.
Returns:
Response[Receipt]: Create Response Object Derived from the Receipt Object from json
"""
endpoint = f"shops/{shop_id}/receipts"
params = {"limit": limit, "offset": offset}
params.update(type)
params.update(kwargs)
json = await self.session.async_request(endpoint=endpoint, params=params)
return Response[Receipt](**json)
def create_shipment(self, shop_id: int, receipt_id: int, tracking_number: str, carrier_name: str, send_bcc: bool = True, note_to_buyer: str = "") -> Receipt:
"""
Adds tracking information to the receipt object
Args:
shop_id (int): SHOP ID
receipt_id (int): Target Receipt ID
tracking_number (str): Tracking Number
carrier_name (str): Carrier Name Ex: UPS
send_bcc (bool, optional): Send Mail. Defaults to True.
note_to_buyer (str, optional): Note To Buyer From Seller. Defaults to "".
Returns:
Receipt: Receipt Object from json
"""
endpoint = f"shops/{shop_id}/receipts/{receipt_id}/tracking"
data = {
"tracking_code":tracking_number, "carrier_name":carrier_name,
"send_bcc":send_bcc, "note_to_buyer":note_to_buyer
}
json = self.session.request(endpoint=endpoint, method="POST", data=data)
return Receipt(**json)
def update_shop_receipt(self, shop_id: int, receipt_id: int, was_shipped: bool = None, was_paid: bool = None) -> Receipt:
endpoint = f"shops/{shop_id}/receipts/{receipt_id}"
data = {
"was_shipped": was_shipped,
"was_paid": was_paid
}
response = self.session.request(endpoint, "PUT", data=data)
return Receipt(**response)
def get_transactions_by_listing(self, shop_id: int, listing_id: int, limit: int = 25, offset: int = 0) -> Response[Transaction]:
endpoint = f"shops/{shop_id}/listings/{listing_id}/transactions"
params = {
"limit": limit,
"offset": offset
}
response = self.session.request(endpoint, params=params)
return Response[Transaction](**response)
def get_transactions_by_receipt(self, shop_id: int, receipt_id: int) -> Response[Transaction]:
endpoint = f"shops/{shop_id}/receipts/{receipt_id}/transactions"
response = self.session.request(endpoint)
return Response[Transaction](**response)
def get_transaction_by_transaction_id(self, shop_id: int, transaction_id: int) -> Transaction:
endpoint = f"shops/{shop_id}/transactions/{transaction_id}"
response = self.session.request(endpoint)
return Transaction(**response)
def get_receipt_transactions_by_shop(self, shop_id: int, limit: int = 25, offset: int = 0) -> Response[Transaction]:
endpoint = f"shops/{shop_id}/transactions"
params = {
"limit": limit,
"offset": offset
}
response = self.session.request(endpoint, params=params)
return Response[Transaction](**response)
|
PypiClean
|
/pulumi_oci-1.9.0a1693465256.tar.gz/pulumi_oci-1.9.0a1693465256/pulumi_oci/database/get_data_guard_associations.py
|
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = [
'GetDataGuardAssociationsResult',
'AwaitableGetDataGuardAssociationsResult',
'get_data_guard_associations',
'get_data_guard_associations_output',
]
@pulumi.output_type
class GetDataGuardAssociationsResult:
"""
A collection of values returned by getDataGuardAssociations.
"""
def __init__(__self__, data_guard_associations=None, database_id=None, filters=None, id=None):
if data_guard_associations and not isinstance(data_guard_associations, list):
raise TypeError("Expected argument 'data_guard_associations' to be a list")
pulumi.set(__self__, "data_guard_associations", data_guard_associations)
if database_id and not isinstance(database_id, str):
raise TypeError("Expected argument 'database_id' to be a str")
pulumi.set(__self__, "database_id", database_id)
if filters and not isinstance(filters, list):
raise TypeError("Expected argument 'filters' to be a list")
pulumi.set(__self__, "filters", filters)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
@property
@pulumi.getter(name="dataGuardAssociations")
def data_guard_associations(self) -> Sequence['outputs.GetDataGuardAssociationsDataGuardAssociationResult']:
"""
The list of data_guard_associations.
"""
return pulumi.get(self, "data_guard_associations")
@property
@pulumi.getter(name="databaseId")
def database_id(self) -> str:
"""
The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the reporting database.
"""
return pulumi.get(self, "database_id")
@property
@pulumi.getter
def filters(self) -> Optional[Sequence['outputs.GetDataGuardAssociationsFilterResult']]:
return pulumi.get(self, "filters")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
class AwaitableGetDataGuardAssociationsResult(GetDataGuardAssociationsResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDataGuardAssociationsResult(
data_guard_associations=self.data_guard_associations,
database_id=self.database_id,
filters=self.filters,
id=self.id)
def get_data_guard_associations(database_id: Optional[str] = None,
filters: Optional[Sequence[pulumi.InputType['GetDataGuardAssociationsFilterArgs']]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDataGuardAssociationsResult:
"""
This data source provides the list of Data Guard Associations in Oracle Cloud Infrastructure Database service.
Lists all Data Guard associations for the specified database.
## Example Usage
```python
import pulumi
import pulumi_oci as oci
test_data_guard_associations = oci.Database.get_data_guard_associations(database_id=oci_database_database["test_database"]["id"])
```
:param str database_id: The database [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm).
"""
__args__ = dict()
__args__['databaseId'] = database_id
__args__['filters'] = filters
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('oci:Database/getDataGuardAssociations:getDataGuardAssociations', __args__, opts=opts, typ=GetDataGuardAssociationsResult).value
return AwaitableGetDataGuardAssociationsResult(
data_guard_associations=pulumi.get(__ret__, 'data_guard_associations'),
database_id=pulumi.get(__ret__, 'database_id'),
filters=pulumi.get(__ret__, 'filters'),
id=pulumi.get(__ret__, 'id'))
@_utilities.lift_output_func(get_data_guard_associations)
def get_data_guard_associations_output(database_id: Optional[pulumi.Input[str]] = None,
filters: Optional[pulumi.Input[Optional[Sequence[pulumi.InputType['GetDataGuardAssociationsFilterArgs']]]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetDataGuardAssociationsResult]:
"""
This data source provides the list of Data Guard Associations in Oracle Cloud Infrastructure Database service.
Lists all Data Guard associations for the specified database.
## Example Usage
```python
import pulumi
import pulumi_oci as oci
test_data_guard_associations = oci.Database.get_data_guard_associations(database_id=oci_database_database["test_database"]["id"])
```
:param str database_id: The database [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm).
"""
...
|
PypiClean
|
/bittrex-websocket-aio-0.0.0.3.0.tar.gz/bittrex-websocket-aio-0.0.0.3.0/README.rst
|
|logo| bittrex-websocket-aio
============================
|pypi-v| |pypi-pyversions| |pypi-l| |pypi-wheel|
.. |pypi-v| image:: https://img.shields.io/pypi/v/bittrex-websocket-aio.svg
:target: https://pypi.python.org/pypi/bittrex-websocket-aio
.. |pypi-pyversions| image:: https://img.shields.io/pypi/pyversions/bittrex-websocket-aio.svg
:target: https://pypi.python.org/pypi/bittrex-websocket-aio
.. |pypi-l| image:: https://img.shields.io/pypi/l/bittrex-websocket-aio.svg
:target: https://pypi.python.org/pypi/bittrex-websocket-aio
.. |pypi-wheel| image:: https://img.shields.io/pypi/wheel/bittrex-websocket-aio.svg
:target: https://pypi.python.org/pypi/bittrex-websocket-aio
.. |logo| image:: /resources/py_btrx.svg
:width: 60px
What is ``bittrex-websocket-aio``?
----------------------------------
Python Bittrex WebSocket (PBW) is the first unofficial Python wrapper for
the `Bittrex Websocket API <https://github.com/Bittrex/bittrex.github.io>`_.
It provides users with a simple and easy to use interface to the `Bittrex Exchange <https://bittrex.com>`_.
Users can use it to access real-time public data (e.g exchange status, summary ticks and order fills) and account-level data such as order and balance status. The goal of the package is to serve as a foundation block which users can use to build upon their applications. Examples usages can include maintaining live order books, recording trade history, analysing order flow and many more.
The version is built upon ``asyncio`` which is Python's standard asynchronous I/O framework.
If you are looking for a ``non-async`` version or you are using Python=2.7, then take a look at my other library: `bittrex-websocket <https://github.com/slazarov/python-bittrex-websocket>`_.
--------------
Documentation
http://python-bittrex-websocket-docs.readthedocs.io/en/latest/
Getting started/How-to
http://python-bittrex-websocket-docs.readthedocs.io/en/latest/howto.html
Methods
http://python-bittrex-websocket-docs.readthedocs.io/en/latest/methods.html
Changelog
http://python-bittrex-websocket-docs.readthedocs.io/en/latest/changelog.html#bittrex-websocket-aio
I am constantly working on new features. Make sure you stay up to date by regularly checking the official docs!
**Having an issue or a question? Found a bug or perhaps you want to contribute? Open an issue!**
Quick Start
-----------
.. code:: bash
pip install bittrex-websocket-aio
.. code:: python
#!/usr/bin/python
# /examples/ticker_updates.py
# Sample script showing how subscribe_to_exchange_deltas() works.
# Overview:
# ---------
# 1) Creates a custom ticker_updates_container dict.
# 2) Subscribes to N tickers and starts receiving market data.
# 3) When information is received, checks if the ticker is
# in ticker_updates_container and adds it if not.
# 4) Disconnects when it has data information for each ticker.
from bittrex_websocket.websocket_client import BittrexSocket
from time import sleep
def main():
class MySocket(BittrexSocket):
async def on_public(self, msg):
name = msg['M']
if name not in ticker_updates_container:
ticker_updates_container[name] = msg
print('Just received market update for {}.'.format(name))
# Create container
ticker_updates_container = {}
# Create the socket instance
ws = MySocket()
# Enable logging
ws.enable_log()
# Define tickers
tickers = ['BTC-ETH', 'BTC-NEO', 'BTC-ZEC', 'ETH-NEO', 'ETH-ZEC']
# Subscribe to ticker information
for ticker in tickers:
sleep(0.01)
ws.subscribe_to_exchange_deltas([ticker])
# Users can also subscribe without introducing delays during invoking but
# it is the recommended way when you are subscribing to a large list of tickers.
# ws.subscribe_to_exchange_deltas(tickers)
while len(ticker_updates_container) < len(tickers):
sleep(1)
else:
print('We have received updates for all tickers. Closing...')
ws.disconnect()
sleep(10)
if __name__ == "__main__":
main()
Disclaimer
----------
I am not associated with Bittrex. Use the library at your own risk, I don't bear any responsibility if you end up losing your money.
|
PypiClean
|
/django_bpp-1.0.9-py3-none-any.whl/django_bpp/staticroot/os-homedir/readme.md
|
# os-homedir [](https://travis-ci.org/sindresorhus/os-homedir)
> Node.js 4 [`os.homedir()`](https://nodejs.org/api/os.html#os_os_homedir) [ponyfill](https://ponyfill.com)
## Install
```
$ npm install --save os-homedir
```
## Usage
```js
const osHomedir = require('os-homedir');
console.log(osHomedir());
//=> '/Users/sindresorhus'
```
## Related
- [user-home](https://github.com/sindresorhus/user-home) - Same as this module but caches the result
- [home-or-tmp](https://github.com/sindresorhus/home-or-tmp) - Get the user home directory with fallback to the system temp directory
## License
MIT © [Sindre Sorhus](https://sindresorhus.com)
|
PypiClean
|
/ansible-cmdb-1.30.tar.gz/ansible-cmdb-1.30/src/ansiblecmdb/util.py
|
import copy
import os
import stat
def is_executable(path):
"""
Determine whether `path` points to an executable file.
"""
return stat.S_IXUSR & os.stat(path)[stat.ST_MODE]
def deepupdate(target, src, overwrite=True):
"""Deep update target list, dict or set or other iterable with src
For each k,v in src: if k doesn't exist in target, it is deep copied from
src to target. Otherwise, if v is a list, target[k] is extended with
src[k]. If v is a set, target[k] is updated with v, If v is a dict,
recursively deep-update it. If `overwrite` is False, existing values in
target will not be overwritten.
Examples:
>>> t = {'name': 'Ferry', 'hobbies': ['programming', 'sci-fi']}
>>> deepupdate(t, {'hobbies': ['gaming']})
>>> print t
{'name': 'Ferry', 'hobbies': ['programming', 'sci-fi', 'gaming']}
"""
for k, v in src.items():
if type(v) == list:
if not k in target:
target[k] = copy.deepcopy(v)
elif overwrite is True:
target[k].extend(v)
elif type(v) == dict:
if not k in target:
target[k] = copy.deepcopy(v)
else:
deepupdate(target[k], v, overwrite=overwrite)
elif type(v) == set:
if not k in target:
target[k] = v.copy()
elif overwrite is True:
if type(target[k]) == list:
target[k].extend(v)
elif type(target[k]) == set:
target[k].update(v)
else:
raise TypeError("Cannot update {} with {}".format(type(target[k]), type(v)))
else:
if k not in target or overwrite is True:
target[k] = copy.copy(v)
def find_path(dirs, path_to_find):
"""
Go through a bunch of dirs and see if dir+path_to_find exists there.
Returns the first dir that matches. Otherwise, return None.
"""
for dir in dirs:
if os.path.exists(os.path.join(dir, path_to_find)):
return dir
return None
def to_bool(s):
"""
Convert string `s` into a boolean. `s` can be 'true', 'True', 1, 'false',
'False', 0.
Examples:
>>> to_bool("true")
True
>>> to_bool("0")
False
>>> to_bool(True)
True
"""
if isinstance(s, bool):
return s
elif s.lower() in ['true', '1']:
return True
elif s.lower() in ['false', '0']:
return False
else:
raise ValueError("Can't cast '%s' to bool" % (s))
|
PypiClean
|
/yt-dlp-custom-0.0.1.tar.gz/yt-dlp-custom-0.0.1/yt_dlp/extractor/ninegag.py
|
from .common import InfoExtractor
from ..utils import (
ExtractorError,
determine_ext,
int_or_none,
traverse_obj,
unescapeHTML,
url_or_none,
)
class NineGagIE(InfoExtractor):
IE_NAME = '9gag'
IE_DESC = '9GAG'
_VALID_URL = r'https?://(?:www\.)?9gag\.com/gag/(?P<id>[^/?&#]+)'
_TESTS = [{
'url': 'https://9gag.com/gag/ae5Ag7B',
'info_dict': {
'id': 'ae5Ag7B',
'ext': 'webm',
'title': 'Capybara Agility Training',
'upload_date': '20191108',
'timestamp': 1573237208,
'thumbnail': 'https://img-9gag-fun.9cache.com/photo/ae5Ag7B_460s.jpg',
'categories': ['Awesome'],
'tags': ['Awesome'],
'duration': 44,
'like_count': int,
'dislike_count': int,
'comment_count': int,
}
}, {
# HTML escaped title
'url': 'https://9gag.com/gag/av5nvyb',
'only_matching': True,
}, {
# Non Anonymous Uploader
'url': 'https://9gag.com/gag/ajgp66G',
'info_dict': {
'id': 'ajgp66G',
'ext': 'webm',
'title': 'Master Shifu! Or Splinter! You decide:',
'upload_date': '20220806',
'timestamp': 1659803411,
'thumbnail': 'https://img-9gag-fun.9cache.com/photo/ajgp66G_460s.jpg',
'categories': ['Funny'],
'tags': ['Funny'],
'duration': 26,
'like_count': int,
'dislike_count': int,
'comment_count': int,
'uploader': 'Peter Klaus',
'uploader_id': 'peterklaus12',
'uploader_url': 'https://9gag.com/u/peterklaus12',
}
}]
def _real_extract(self, url):
post_id = self._match_id(url)
post = self._download_json(
'https://9gag.com/v1/post', post_id, query={
'id': post_id
})['data']['post']
if post.get('type') != 'Animated':
raise ExtractorError(
'The given url does not contain a video',
expected=True)
duration = None
formats = []
thumbnails = []
for key, image in (post.get('images') or {}).items():
image_url = url_or_none(image.get('url'))
if not image_url:
continue
ext = determine_ext(image_url)
image_id = key.strip('image')
common = {
'url': image_url,
'width': int_or_none(image.get('width')),
'height': int_or_none(image.get('height')),
}
if ext in ('jpg', 'png'):
webp_url = image.get('webpUrl')
if webp_url:
t = common.copy()
t.update({
'id': image_id + '-webp',
'url': webp_url,
})
thumbnails.append(t)
common.update({
'id': image_id,
'ext': ext,
})
thumbnails.append(common)
elif ext in ('webm', 'mp4'):
if not duration:
duration = int_or_none(image.get('duration'))
common['acodec'] = 'none' if image.get('hasAudio') == 0 else None
for vcodec in ('vp8', 'vp9', 'h265'):
c_url = image.get(vcodec + 'Url')
if not c_url:
continue
c_f = common.copy()
c_f.update({
'format_id': image_id + '-' + vcodec,
'url': c_url,
'vcodec': vcodec,
})
formats.append(c_f)
common.update({
'ext': ext,
'format_id': image_id,
})
formats.append(common)
section = traverse_obj(post, ('postSection', 'name'))
tags = None
post_tags = post.get('tags')
if post_tags:
tags = []
for tag in post_tags:
tag_key = tag.get('key')
if not tag_key:
continue
tags.append(tag_key)
return {
'id': post_id,
'title': unescapeHTML(post.get('title')),
'timestamp': int_or_none(post.get('creationTs')),
'duration': duration,
'uploader': traverse_obj(post, ('creator', 'fullName')),
'uploader_id': traverse_obj(post, ('creator', 'username')),
'uploader_url': url_or_none(traverse_obj(post, ('creator', 'profileUrl'))),
'formats': formats,
'thumbnails': thumbnails,
'like_count': int_or_none(post.get('upVoteCount')),
'dislike_count': int_or_none(post.get('downVoteCount')),
'comment_count': int_or_none(post.get('commentsCount')),
'age_limit': 18 if post.get('nsfw') == 1 else None,
'categories': [section] if section else None,
'tags': tags,
}
|
PypiClean
|
/Brian2GeNN-1.7.0-py3-none-any.whl/brian2genn/sphinxext/docscrape.py
|
import inspect
import pydoc
import re
import textwrap
from warnings import warn
from sphinx.pycode import ModuleAnalyzer
class Reader:
"""A line-based string reader."""
def __init__(self, data):
"""
Parameters
----------
data : str
String with lines separated by '\n'.
"""
if isinstance(data, list):
self._str = data
else:
self._str = data.split("\n") # store string as list of lines
self.reset()
def __getitem__(self, n):
return self._str[n]
def reset(self):
self._l = 0 # current line nr
def read(self):
if not self.eof():
out = self[self._l]
self._l += 1
return out
else:
return ""
def seek_next_non_empty_line(self):
for line in self[self._l :]:
if line.strip():
break
else:
self._l += 1
def eof(self):
return self._l >= len(self._str)
def read_to_condition(self, condition_func):
start = self._l
for line in self[start:]:
if condition_func(line):
return self[start : self._l]
self._l += 1
if self.eof():
return self[start : self._l + 1]
return []
def read_to_next_empty_line(self):
self.seek_next_non_empty_line()
def is_empty(line):
return not line.strip()
return self.read_to_condition(is_empty)
def read_to_next_unindented_line(self):
def is_unindented(line):
return line.strip() and (len(line.lstrip()) == len(line))
return self.read_to_condition(is_unindented)
def peek(self, n=0):
if self._l + n < len(self._str):
return self[self._l + n]
else:
return ""
def is_empty(self):
return not "".join(self._str).strip()
class NumpyDocString:
def __init__(self, docstring, config=None):
docstring = textwrap.dedent(docstring).split("\n")
self._doc = Reader(docstring)
self._parsed_data = {
"Signature": "",
"Summary": [""],
"Extended Summary": [],
"Parameters": [],
"Returns": [],
"Raises": [],
"Warns": [],
"Other Parameters": [],
"Attributes": [],
"Methods": [],
"See Also": [],
"Notes": [],
"Warnings": [],
"References": "",
"Examples": "",
"index": {},
}
self._parse()
def __getitem__(self, key):
return self._parsed_data[key]
def __setitem__(self, key, val):
if key not in self._parsed_data:
warn(f"Unknown section {key}")
else:
self._parsed_data[key] = val
def _is_at_section(self):
self._doc.seek_next_non_empty_line()
if self._doc.eof():
return False
l1 = self._doc.peek().strip() # e.g. Parameters
if l1.startswith(".. index::"):
return True
l2 = self._doc.peek(1).strip() # ---------- or ==========
return l2.startswith("-" * len(l1)) or l2.startswith("=" * len(l1))
def _strip(self, doc):
start = stop = 0
for i, line in enumerate(doc):
if line.strip():
start = i
break
for i, line in enumerate(doc[::-1]):
if line.strip():
stop = i
break
return doc[start:-stop]
def _read_to_next_section(self):
section = self._doc.read_to_next_empty_line()
while not self._is_at_section() and not self._doc.eof():
if not self._doc.peek(-1).strip(): # previous line was empty
section += [""]
section += self._doc.read_to_next_empty_line()
return section
def _read_sections(self):
while not self._doc.eof():
data = self._read_to_next_section()
name = data[0].strip()
if name.startswith(".."): # index section
yield name, data[1:]
elif len(data) < 2:
yield StopIteration
else:
yield name, self._strip(data[2:])
def _parse_param_list(self, content):
r = Reader(content)
params = []
while not r.eof():
header = r.read().strip()
if " : " in header:
arg_name, arg_type = header.split(" : ")[:2]
else:
arg_name, arg_type = header, ""
desc = r.read_to_next_unindented_line()
desc = dedent_lines(desc)
params.append((arg_name, arg_type, desc))
return params
_name_rgx = re.compile(
r"^\s*(:(?P<role>\w+):`(?P<name>[a-zA-Z0-9_.-]+)`|"
r" (?P<name2>[a-zA-Z0-9_.-]+))\s*",
re.X,
)
def _parse_see_also(self, content):
"""
func_name : Descriptive text
continued text
another_func_name : Descriptive text
func_name1, func_name2, :meth:`func_name`, func_name3
"""
items = []
def parse_item_name(text):
"""Match ':role:`name`' or 'name'"""
m = self._name_rgx.match(text)
if m:
g = m.groups()
if g[1] is None:
return g[3], None
else:
return g[2], g[1]
raise ValueError(f"{text} is not a item name")
def push_item(name, rest):
if not name:
return
name, role = parse_item_name(name)
items.append((name, list(rest), role))
del rest[:]
current_func = None
rest = []
for line in content:
if not line.strip():
continue
m = self._name_rgx.match(line)
if m and line[m.end() :].strip().startswith(":"):
push_item(current_func, rest)
current_func, line = line[: m.end()], line[m.end() :]
rest = [line.split(":", 1)[1].strip()]
if not rest[0]:
rest = []
elif not line.startswith(" "):
push_item(current_func, rest)
current_func = None
if "," in line:
for func in line.split(","):
if func.strip():
push_item(func, [])
elif line.strip():
current_func = line
elif current_func is not None:
rest.append(line.strip())
push_item(current_func, rest)
return items
def _parse_index(self, section, content):
"""
.. index: default
:refguide: something, else, and more
"""
def strip_each_in(lst):
return [s.strip() for s in lst]
out = {}
section = section.split("::")
if len(section) > 1:
out["default"] = strip_each_in(section[1].split(","))[0]
for line in content:
line = line.split(":")
if len(line) > 2:
out[line[1]] = strip_each_in(line[2].split(","))
return out
def _parse_summary(self):
"""Grab signature (if given) and summary"""
if self._is_at_section():
return
summary = self._doc.read_to_next_empty_line()
summary_str = " ".join([s.strip() for s in summary]).strip()
if re.compile(r"^([\w., ]+=)?\s*[\w.]+\(.*\)$").match(summary_str):
self["Signature"] = summary_str
if not self._is_at_section():
self["Summary"] = self._doc.read_to_next_empty_line()
else:
self["Summary"] = summary
if not self._is_at_section():
self["Extended Summary"] = self._read_to_next_section()
def _parse(self):
self._doc.reset()
self._parse_summary()
for section, content in self._read_sections():
if not section.startswith(".."):
section = " ".join([s.capitalize() for s in section.split(" ")])
if section in (
"Parameters",
"Returns",
"Raises",
"Warns",
"Other Parameters",
"Attributes",
"Methods",
):
self[section] = self._parse_param_list(content)
elif section.startswith(".. index::"):
self["index"] = self._parse_index(section, content)
elif section == "See Also":
self["See Also"] = self._parse_see_also(content)
else:
self[section] = content
# string conversion routines
def _str_header(self, name, symbol="-"):
return [name, len(name) * symbol]
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [" " * indent + line]
return out
def _str_signature(self):
if self["Signature"]:
return [self["Signature"].replace("*", r"\*")] + [""]
else:
return [""]
def _str_summary(self):
if self["Summary"]:
return self["Summary"] + [""]
else:
return []
def _str_extended_summary(self):
if self["Extended Summary"]:
return self["Extended Summary"] + [""]
else:
return []
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_header(name)
for param, param_type, desc in self[name]:
out += [f"{param} : {param_type}"]
out += self._str_indent(desc)
out += [""]
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += self[name]
out += [""]
return out
def _str_see_also(self, func_role):
if not self["See Also"]:
return []
out = []
out += self._str_header("See Also")
last_had_desc = True
for func, desc, role in self["See Also"]:
if role:
link = f":{role}:`{func}`"
elif func_role:
link = f":{func_role}:`{func}`"
else:
link = f"`{func}`_"
if desc or last_had_desc:
out += [""]
out += [link]
else:
out[-1] += f", {link}"
if desc:
out += self._str_indent([" ".join(desc)])
last_had_desc = True
else:
last_had_desc = False
out += [""]
return out
def _str_index(self):
idx = self["index"]
out = []
out += [f".. index:: {idx.get('default', '')}"]
for section, references in idx.items():
if section == "default":
continue
out += [f" :{section}: {', '.join(references)}"]
return out
def __str__(self, func_role=""):
out = []
out += self._str_signature()
out += self._str_summary()
out += self._str_extended_summary()
for param_list in (
"Parameters",
"Returns",
"Other Parameters",
"Raises",
"Warns",
):
out += self._str_param_list(param_list)
out += self._str_section("Warnings")
out += self._str_see_also(func_role)
for s in ("Notes", "References", "Examples"):
out += self._str_section(s)
for param_list in ("Attributes", "Methods"):
out += self._str_param_list(param_list)
out += self._str_index()
return "\n".join(out)
def indent(str, indent=4):
indent_str = " " * indent
if str is None:
return indent_str
lines = str.split("\n")
return "\n".join(indent_str + line for line in lines)
def dedent_lines(lines):
"""Deindent a list of lines maximally"""
return textwrap.dedent("\n".join(lines)).split("\n")
def header(text, style="-"):
return f"{text}\n{style * len(text)}\n"
class FunctionDoc(NumpyDocString):
def __init__(self, func, role="func", doc=None, config=None):
self._f = func
self._role = role # e.g. "func" or "meth"
if doc is None:
if func is None:
raise ValueError("No function or docstring given")
doc = inspect.getdoc(func) or ""
NumpyDocString.__init__(self, doc)
if not self["Signature"] and func is not None:
func, func_name = self.get_func()
try:
# try to read signature
argspec = str(inspect.signature(func))
argspec = argspec.replace("*", r"\*")
signature = f"{func_name}{argspec}"
except (TypeError, ValueError):
signature = f"{func_name}()"
self["Signature"] = signature
def get_func(self):
func_name = getattr(self._f, "__name__", self.__class__.__name__)
if inspect.isclass(self._f):
if callable(self._f):
func = self._f.__call__
else:
func = self._f.__init__
else:
func = self._f
return func, func_name
def __str__(self):
out = ""
_, func_name = self.get_func()
roles = {"func": "function", "meth": "method"}
if self._role:
if self._role not in roles:
print(f"Warning: invalid role {self._role}")
out += f".. {roles.get(self._role, '')}:: {func_name}\n \n\n"
out += super().__str__(func_role=self._role)
return out
class ClassDoc(NumpyDocString):
extra_public_methods = ["__call__"]
def __init__(self, cls, doc=None, modulename="", func_doc=FunctionDoc, config=None):
if not inspect.isclass(cls) and cls is not None:
raise ValueError(f"Expected a class or None, but got {cls!r}")
self._cls = cls
if modulename and not modulename.endswith("."):
modulename += "."
self._mod = modulename
if doc is None:
if cls is None:
raise ValueError("No class or documentation string given")
doc = pydoc.getdoc(cls)
NumpyDocString.__init__(self, doc)
if not self["Methods"]:
self["Methods"] = [(name, "", "") for name in sorted(self.methods)]
if not self["Attributes"]:
self["Attributes"] = [(name, "", "") for name in sorted(self.properties)]
@property
def methods(self):
if self._cls is None:
return []
methods = [
name
for name, func in self._cls.__dict__.items()
if (
(not name.startswith("_") or name in self.extra_public_methods)
and (
(callable(func) and not isinstance(func, type))
or inspect.ismethoddescriptor(func)
)
)
]
return methods
@property
def properties(self):
if self._cls is None:
return []
analyzer = ModuleAnalyzer.for_module(self._cls.__module__)
instance_members = {
attr_name
for (class_name, attr_name) in analyzer.find_attr_docs().keys()
if class_name == self._cls.__name__
}
class_members = {
name
for name, func in self._cls.__dict__.items()
if not name.startswith("_")
and (func is None or inspect.isdatadescriptor(func))
}
return instance_members | class_members
|
PypiClean
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.