id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
/eversend_api-0.1.0-py3-none-any.whl/eversend/eversend.py
|
import requests
class Eversend:
def __init__(self, clientId, clientSecret, version):
""""
Initialize Eversend Class by providing the ClientID and ClientSecret from your Business Account Settings
"""
self.clientId = clientId
self.clientSecret = clientSecret
self.version = version
self.base_url = 'https://api.eversend.com/v'+self.version
self.token = None
self.headers = {
'clientId': self.clientId,
'clientSecret': self.clientSecret
}
self.get_token()
if self.token:
self.token_header = {
"Authorization": 'Bearer '+self.token
}
def get_token(self):
r = requests.get(self.base_url, headers=self.headers)
resp = r.json()
if resp['status'] == 200:
self.token = resp['token']
def get_wallets(self):
"""
Get All available Wallets
"""
url = self.base_url + '/wallets'
r = requests.get(url, headers=self.token_header)
return r.json()
def get_wallet(self, walletId):
"""
Get a specifi Wallet using WalletID or Currency
"""
url = self.base_url + '/wallets?walledId='+walletId
r = requests.get(url, headers=self.token_header)
return r.json()
def activate_wallet(self, wallet):
"""
Activate a specific wallet by the currency
:param wallet: UGX, KES, USD, NGN
"""
url = self.base_url + '/wallets/activate'
payload = {
'wallet': wallet
}
r = requests.post(url, json=payload, headers=self.token_header)
return r.json()
def deactivate_wallet(self, wallet):
"""
Deactivate a specific wallet by the currency
:param wallet: UGX, KES, USD, NGN
"""
url = self.base_url + '/wallets/deactivate'
payload = {
'wallet': wallet
}
r = requests.post(url, json=payload, headers=self.token_header)
return r.json()
def account_profile(self):
"""
Get your complete account profile
"""
url = self.base_url + "/account"
r = requests.get(url, headers=self.token_header)
return r.json()
def create_quotation(self, _from, amount, _to):
"""
Activate a quotation for currency conversion
:param _from: UGX, KES, USD, NGN
:param amount: Amount as a float
:param _to: UGX, KES, NGN, USD
"""
url = self.base_url + '/exchanges/quotation'
payload = {
'from': _from,
'Amount': amount,
'To': _to
}
r = requests.post(url, json=payload, headers=self.token_header)
return r.json()
def create_exchange(self, quote_token):
"""
Exchange
:param quote_token: Quotation reference to make a currency exchange
"""
url = self.base_url + '/exchanges'
payload = {
'token': quote_token
}
r = requests.post(url, json=payload, headers=self.token_header)
return r.json()
def get_transaction(self, transactionId):
url = self.base_url + "/transactions?transactionId="+transactionId
r = requests.get(url, headers=self.token_header)
return r.json()
def transactions(self, search,range,limit, page, _from, _to, _type, currency, status):
"""
View All Transactions
:param search: Transaction ID or detail
:param range: Options are day, week, month, year
:param limit: Number of items returned, Integer Starts from 1 Default value is 10
:param page: Number of Pages, Integer Default value is 1
:param _from: String Date as YYYY-MM-dd
:param _to: String Date as YYYY-MM-dd
:param _type: Options are payout, exchange, collection
:param currency: Options are UGX, USD, NGN, GHS, KES ...
:param status: Options are pending, successful, failed
"""
url = self.base_url + "/transactions"
parameters = {
'search': search,
'range': range,
'limit': limit,
'page': page,
'from': _from,
'to': _to,
'type': _type,
'currency': currency,
'status': status
}
r = requests.get(url, params=parameters, headers=self.token_header)
return r.json()
def collection_fees(self, method, currency, amount):
"""
Check collection fees
:param method: Options are momo, bank
:param currency: Options are UGX, KES, NGN, USD, GHS ..
:param amount: Amount as a number
"""
url = self.base_url + "/collection/fees"
payload = {
"method": method,
"currency": currency,
"amount": amount
}
r = requests.post(url, json=payload, headers=self.token_header)
return r.json()
def collection_otp(self, phone):
"""
Send an OTP before collection
:param phone: phone number in internation format
"""
url = self.base_url + "/collection/otp"
payload = {
"phone": phone
}
r = requests.post(url, json=payload, headers=self.token_header)
return r.json()
def mobile_money_collection(self, phone, amount, country, currency, transactionRef, customer=None, otp=None):
"""
Initiate Mobile Money Collection Request
:param phone: Phone Number in international format
:param amount: Amount as a number
:param country: Options are UG, KE, GH, RW
:param currency: Options are UGX, KES, GHS, RWF
:param customer: JSON Object for customer information e.g. '{"email":"[email protected]"}'
:param transactionRef: Unique alphanumeric string set by the client
:param otp: A JSON object with pinId from Get Collection OTP and pin from customer e.g {"pinId":"132466gdfsfsrey1535", "pin":"123456"}
"""
url = self.base_url + "/collection/momo"
payload = {
"phone": phone,
"amount": amount,
"country": country,
"currency": currency,
"transactionRef": transactionRef,
"customer": customer,
"otp": otp
}
r = requests.post(url, json=payload, headers=self.token_header)
return r.json()
|
PypiClean
|
/straattaal_bijbel-9786796767868973145897342348293748973489579438579384579384.1.tar.gz/straattaal_bijbel-9786796767868973145897342348293748973489579438579384579384.1/README.md
|
# straattaal-bijbel
To make an account go to https://www.dagelijkswoord.nl/verspreiden/json-feed and scroll down until you see "API Key aanvragen".
## Example
```
import straattaal_bijbel
verse = straattaal_bijbel.Verse(username="username", password="password") # https://www.dagelijkswoord.nl/verspreiden/json-feed
dictionary = straattaal_bijbel.Dictionary()
dictionary.load('dictionary')
dictionary.apply(verse)
print(verse.text)
```
## Example with Text-to-Speech
```
import straattaal_bijbel
verse = straattaal_bijbel.Verse(username="username", password="password") # https://www.dagelijkswoord.nl/verspreiden/json-feed
dictionary = straattaal_bijbel.Dictionary()
dictionary.load('dictionary')
dictionary.apply(verse)
tts = straattaal_bijbel.TextToSpeech()
tts.say(verse, language="nl", slow=False)
print(verse.text)
```
|
PypiClean
|
/martin-test-package-11122-0.0.1.tar.gz/martin-test-package-11122-0.0.1/example_pkg/__init__.py
|
import os
import sys
print("inside martin's test pip package")
def renderSingle():
os.system('ffmpeg -loop 1 -framerate 2 -i "front.png" -i "testWAVfile.wav" -vf "scale=2*trunc(iw/2):2*trunc(ih/2),setsar=1,format=yuv420p" -c:v libx264 -preset medium -tune stillimage -crf 18 -c:a aac -shortest -vf scale=1920:1080 "outputVideoPy.mp4"')
print("the script has the name %s" % (sys.argv[0]))
#set default args
outputFilename = None
outputResolution = None
#get option flags
if '-outputResolution' in sys.argv:
print("-outputResolution")
if '-outputFilename' in sys.argv:
print("-outputFilename")
#get requirerd flags
if '-img' in sys.argv:
print("-img")
if '-song' in sys.argv:
print('-song')
#get songFilepath
songFilepathIndex = sys.argv.index('-song')+1
songFilepath = sys.argv[songFilepathIndex]
print("songFilepath = ", songFilepath)
#get fileFormat from songFilepath
indexOfLastPeriod = songFilepath.rfind(".")
fileFormat = songFilepath[indexOfLastPeriod:]
print("fileFormat = ", fileFormat)
#render single
renderSingle()
elif '-songs' in sys.argv:
print("-songs (multiple)")
'''
ffmpeg
-loop 1
-framerate 2
-i "front.png"
-i "testWAVfile.wav"
-vf "scale=2*trunc(iw/2):2*trunc(ih/2),setsar=1,format=yuv420p"
-c:v libx264
-preset medium
-tune stillimage
-crf 18
-c:a aac
-shortest
-vf scale=1920:1080
"outputVideo.mp4"
#mp3
ffmpeg
-loop 1
-framerate 2
-i "front.png"
-i "testmp3file.MP3"
-vf "scale=2*trunc(iw/2):2*trunc(ih/2),setsar=1"
-c:v libx264
-preset medium
-tune stillimage
-crf 18
-c:a copy
-shortest
-pix_fmt yuv420p "$2/$5.mp4"
#flac
ffmpeg
-loop 1
-framerate 2
-i "$2/$3"
-i "$2/$4"
-vf "scale=2*trunc(iw/2):2*trunc(ih/2),setsar=1"
-c:v libx264
-preset medium
-tune stillimage
-crf 18
-c:a copy
-shortest
-pix_fmt yuv420p
-strict -2 "$2/$5.mp4"
#wav
ffmpeg -loop 1 -framerate 2 -i "front.png" -i "testWAVfile.wav" -vf "scale=2*trunc(iw/2):2*trunc(ih/2),setsar=1,format=yuv420p" -c:v libx264 -preset medium -tune stillimage -crf 18 -c:a aac -shortest -vf scale=1920:1080 "outputVideo.mp4"
'''
print("\n\n\n")
|
PypiClean
|
/openerp-server-5.0.0-3.tar.gz/openerp-server-5.0.0-3/bin/addons/account/report/rml_parse.py
|
from report import report_sxw
import xml.dom.minidom
import os, time
import osv
import re
import tools
import pooler
import re
import sys
class rml_parse(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(rml_parse, self).__init__(cr, uid, name, context=None)
self.localcontext.update({
'comma_me': self.comma_me,
'format_date': self._get_and_change_date_format_for_swiss,
'strip_name' : self._strip_name,
'explode_name' : self._explode_name,
})
def comma_me(self,amount):
#print "#" + str(amount) + "#"
if not amount:
amount = 0.0
if type(amount) is float :
amount = str('%.2f'%amount)
else :
amount = str(amount)
if (amount == '0'):
return ' '
orig = amount
new = re.sub("^(-?\d+)(\d{3})", "\g<1>'\g<2>", amount)
if orig == new:
return new
else:
return self.comma_me(new)
def _ellipsis(self, string, maxlen=100, ellipsis = '...'):
ellipsis = ellipsis or ''
try:
return string[:maxlen - len(ellipsis) ] + (ellipsis, '')[len(string) < maxlen]
except Exception, e:
return False
def _strip_name(self, name, maxlen=50):
return self._ellipsis(name, maxlen, '...')
def _get_and_change_date_format_for_swiss (self,date_to_format):
date_formatted=''
if date_to_format:
date_formatted = strptime (date_to_format,'%Y-%m-%d').strftime('%d.%m.%Y')
return date_formatted
def _explode_name(self,chaine,length):
# We will test if the size is less then account
full_string = ''
if (len(str(chaine)) <= length):
return chaine
#
else:
chaine = unicode(chaine,'utf8').encode('iso-8859-1')
rup = 0
for carac in chaine:
rup = rup + 1
if rup == length:
full_string = full_string + '\n'
full_string = full_string + carac
rup = 0
else:
full_string = full_string + carac
return full_string
def makeAscii(self,str):
try:
Stringer = str.encode("utf-8")
except UnicodeDecodeError:
try:
Stringer = str.encode("utf-16")
except UnicodeDecodeError:
print "UTF_16 Error"
Stringer = str
else:
return Stringer
else:
return Stringer
return Stringer
def explode_this(self,chaine,length):
#chaine = self.repair_string(chaine)
chaine = rstrip(chaine)
ast = list(chaine)
i = length
while i <= len(ast):
ast.insert(i,'\n')
i = i + length
chaine = str("".join(ast))
return chaine
def repair_string(self,chaine):
ast = list(chaine)
UnicodeAst = []
_previouslyfound = False
i = 0
#print str(ast)
while i < len(ast):
elem = ast[i]
try:
Stringer = elem.encode("utf-8")
except UnicodeDecodeError:
to_reencode = elem + ast[i+1]
print str(to_reencode)
Good_char = to_reencode.decode('utf-8')
UnicodeAst.append(Good_char)
i += i +2
else:
UnicodeAst.append(elem)
i += i + 1
return "".join(UnicodeAst)
def ReencodeAscii(self,str):
print sys.stdin.encoding
try:
Stringer = str.decode("ascii")
except UnicodeEncodeError:
print "REENCODING ERROR"
return str.encode("ascii")
except UnicodeDecodeError:
print "DECODING ERROR"
return str.encode("ascii")
else:
print Stringer
return Stringer
# def _add_header(self, node):
# rml_head = tools.file_open('specific_param/report/header/corporate_rml_header_ch.rml').read()
# head_dom = xml.dom.minidom.parseString(rml_head)
# #for frame in head_dom.getElementsByTagName('frame'):
# # frame.parentNode.removeChild(frame)
# node2 = head_dom.documentElement
# for tag in node2.childNodes:
# if tag.nodeType==tag.ELEMENT_NODE:
# found = self._find_node(node, tag.localName)
# # rml_frames = found.getElementsByTagName('frame')
# if found:
# if tag.hasAttribute('position') and (tag.getAttribute('position')=='inside'):
# found.appendChild(tag)
# else:
# found.parentNode.replaceChild(tag, found)
# # for frame in rml_frames:
# # tag.appendChild(frame)
# return True
|
PypiClean
|
/ka-lite-0.17.6b4.tar.gz/ka-lite-0.17.6b4/kalite/packages/bundled/django/contrib/localflavor/hk/forms.py
|
from __future__ import absolute_import, unicode_literals
import re
from django.core.validators import EMPTY_VALUES
from django.forms import CharField
from django.forms import ValidationError
from django.utils.encoding import smart_text
from django.utils.translation import ugettext_lazy as _
hk_phone_digits_re = re.compile(r'^(?:852-?)?(\d{4})[-\.]?(\d{4})$')
hk_special_numbers = ('999', '992', '112')
hk_phone_prefixes = ('2', '3', '5', '6', '8', '9')
hk_formats = ['XXXX-XXXX', '852-XXXX-XXXX', '(+852) XXXX-XXXX',
'XXXX XXXX', 'XXXXXXXX']
class HKPhoneNumberField(CharField):
"""
Validate Hong Kong phone number.
The input format can be either one of the followings:
'XXXX-XXXX', '852-XXXX-XXXX', '(+852) XXXX-XXXX',
'XXXX XXXX', or 'XXXXXXXX'.
The output format is 'XXXX-XXXX'.
Note: The phone number shall not start with 999, 992, or 112.
And, it should start with either 2, 3, 5, 6, 8, or 9.
Ref - http://en.wikipedia.org/wiki/Telephone_numbers_in_Hong_Kong
"""
default_error_messages = {
'disguise': _('Phone number should not start with ' \
'one of the followings: %s.' % \
', '.join(hk_special_numbers)),
'invalid': _('Phone number must be in one of the following formats: '
'%s.' % ', '.join(hk_formats)),
'prefix': _('Phone number should start with ' \
'one of the followings: %s.' % \
', '.join(hk_phone_prefixes)),
}
def __init__(self, *args, **kwargs):
super(HKPhoneNumberField, self).__init__(*args, **kwargs)
def clean(self, value):
super(HKPhoneNumberField, self).clean(value)
if value in EMPTY_VALUES:
return ''
value = re.sub('(\(|\)|\s+|\+)', '', smart_text(value))
m = hk_phone_digits_re.search(value)
if not m:
raise ValidationError(self.error_messages['invalid'])
value = '%s-%s' % (m.group(1), m.group(2))
for special in hk_special_numbers:
if value.startswith(special):
raise ValidationError(self.error_messages['disguise'])
prefix_found = map(lambda prefix: value.startswith(prefix),
hk_phone_prefixes)
if not any(prefix_found):
raise ValidationError(self.error_messages['prefix'])
return value
|
PypiClean
|
/musma_ray-1.0.0.2-py3-none-any.whl/rllib/examples/env/debug_counter_env.py
|
import gym
import numpy as np
from ray.rllib.env.multi_agent_env import MultiAgentEnv
class DebugCounterEnv(gym.Env):
"""Simple Env that yields a ts counter as observation (0-based).
Actions have no effect.
The episode length is always 15.
Reward is always: current ts % 3.
"""
def __init__(self, config=None):
config = config or {}
self.action_space = gym.spaces.Discrete(2)
self.observation_space = gym.spaces.Box(0, 100, (1,), dtype=np.float32)
self.start_at_t = int(config.get("start_at_t", 0))
self.i = self.start_at_t
def reset(self):
self.i = self.start_at_t
return self._get_obs()
def step(self, action):
self.i += 1
return self._get_obs(), float(self.i % 3), self.i >= 15 + self.start_at_t, {}
def _get_obs(self):
return np.array([self.i], dtype=np.float32)
class MultiAgentDebugCounterEnv(MultiAgentEnv):
def __init__(self, config):
super().__init__()
self._skip_env_checking = True
self.num_agents = config["num_agents"]
self.base_episode_len = config.get("base_episode_len", 103)
# Actions are always:
# (episodeID, envID) as floats.
self.action_space = gym.spaces.Box(-float("inf"), float("inf"), shape=(2,))
# Observation dims:
# 0=agent ID.
# 1=episode ID (0.0 for obs after reset).
# 2=env ID (0.0 for obs after reset).
# 3=ts (of the agent).
self.observation_space = gym.spaces.Box(float("-inf"), float("inf"), (4,))
self.timesteps = [0] * self.num_agents
self.dones = set()
def reset(self):
self.timesteps = [0] * self.num_agents
self.dones = set()
return {
i: np.array([i, 0.0, 0.0, 0.0], dtype=np.float32)
for i in range(self.num_agents)
}
def step(self, action_dict):
obs, rew, done = {}, {}, {}
for i, action in action_dict.items():
self.timesteps[i] += 1
obs[i] = np.array([i, action[0], action[1], self.timesteps[i]])
rew[i] = self.timesteps[i] % 3
done[i] = True if self.timesteps[i] > self.base_episode_len + i else False
if done[i]:
self.dones.add(i)
done["__all__"] = len(self.dones) == self.num_agents
return obs, rew, done, {}
|
PypiClean
|
/auto_augment-1.0.0-cp38-cp38-manylinux1_x86_64.whl/auto_augment/third_party/PaddleDetection/docs/featured_model/OIDV5_BASELINE_MODEL.md
|
# CascadeCA RCNN
## 简介
CascadeCA RCNN是百度视觉技术部在Google AI Open Images 2019-Object Detction比赛中的最佳单模型,该单模型助力团队在500多参数队伍中取得第二名。Open Images Dataset V5(OIDV5)包含500个类别、173W训练图像和超过1400W个标注边框,是目前已知规模最大的目标检测公开数据集,数据集地址:[https://storage.googleapis.com/openimages/web/index.html](https://storage.googleapis.com/openimages/web/index.html)。团队在比赛中的技术方案报告地址:[https://arxiv.org/pdf/1911.07171.pdf](https://arxiv.org/pdf/1911.07171.pdf)

## 方法描述
该模型结合了当前较优的检测方法。具体地,它将ResNet200-vd作为检测模型的骨干网络,其imagenet分类预训练模型可以在[这里](https://github.com/PaddlePaddle/models/blob/develop/PaddleCV/image_classification/README_en.md)下载;结合了CascadeCA RCNN、Feature Pyramid Networks、Non-local、Deformable V2等方法。在这里需要注意的是,标准的CascadeRCNN是只预测2个框(前景和背景,使用得分信息去判断最终前景所属的类别),而该模型对每个类别都单独预测了一个框(Cascade Class Aware)。最终模型框图如下图所示。

由于OIDV5的类别不均衡现象比较严重,在训练时采用了动态采样的策略去选择样本并进行训练;多尺度训练被用于解决边框面积范围太大的情况;此外,团队使用Libra loss替代Smooth L1 loss,来计算预测框的loss;在预测时,使用SoftNMS方法进行后处理,保证更多的框可以被召回。
Objects365 Dataset和OIDV5有大约189个类别是重复的,因此将两个数据集合并进行训练,用于扩充OIDV5的训练数据,最终该模型与其性能指标如下表所示。更具体的模型训练和融合策略可以见:[OIDV5技术报告](https://arxiv.org/pdf/1911.07171.pdf)。
OIDV5模型训练结果如下。
| 模型结构 | Public/Private Score | 下载链接 |
| :-----------------: | :--------: | :----------------------------------------------------------: |
| CascadeCARCNN-FPN-Dcnv2-Nonlocal ResNet200-vd | 0.62690/0.59459 | [模型](https://paddlemodels.bj.bcebos.com/object_detection/oidv5_cascade_rcnn_cls_aware_r200_vd_fpn_dcnv2_nonlocal_softnms.tar) |
此外,为验证模型的性能,团队基于该模型结构,也训练了针对COCO2017和Objects365 Dataset的模型,模型和验证集指标如下表。
| 模型结构 | 数据集 | 验证集mAP | 下载链接 |
| :-----------------: | :--------: | :--------: | :----------------------------------------------------------: |
| CascadeCARCNN-FPN-Dcnv2-Nonlocal ResNet200-vd | COCO2017 | 51.7% | [模型](https://paddlemodels.bj.bcebos.com/object_detection/cascade_rcnn_cls_aware_r200_vd_fpn_dcnv2_nonlocal_softnms.tar) |
| CascadeCARCNN-FPN-Dcnv2-Nonlocal ResNet200-vd | Objects365 | 34.5% | [模型](https://paddlemodels.bj.bcebos.com/object_detection/obj365_cascade_rcnn_cls_aware_r200_vd_fpn_dcnv2_nonlocal_softnms.tar) |
COCO和Objects365 Dataset数据格式相同,目前只支持预测和评估。
## 使用方法
OIDV5数据集格式与COCO不同,目前仅支持单张图片的预测。OIDV5的模型评估方法可以参考[文档](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/challenge_evaluation.md)
1. 下载模型并解压。
2. 运行预测程序。
```bash
python -u tools/infer.py -c configs/oidv5/cascade_rcnn_cls_aware_r200_vd_fpn_dcnv2_nonlocal_softnms.yml -o weights=./oidv5_cascade_rcnn_cls_aware_r200_vd_fpn_dcnv2_nonlocal_softnms/ --infer_img=demo/000000570688.jpg
```
其中模型所在文件夹需要根据自己放置的位置进行修改。
检测结果图像可以在`output`文件夹中查看。
## 模型检测效果

|
PypiClean
|
/docker-banner-gen-0.0.7.tar.gz/docker-banner-gen-0.0.7/RELEASE.md
|
# PyPi
Preparation:
* increment version in `setup.py`
* add new changelog section in `CHANGES.rst`
* commit/push all changes
Commands for releasing on pypi.org (requires twine >= 1.8.0):
```commandline
find -name "*~" -delete
rm dist/*
./venv/bin/python setup.py clean
./venv/bin/python setup.py sdist
./venv/bin/twine upload dist/*
```
# Debian
Generate Debian package with the following commands (requires `python3-all` and `python3-stdeb`):
```commandline
rm -Rf deb_dist/*
python3 setup.py --command-packages=stdeb.command bdist_deb
```
# Github
Steps:
* start new release (version: `vX.Y.Z`)
* enter release notes, i.e., significant changes since last release
* upload `docker-banner-gen-X.Y.Z.tar.gz` previously generated with `setyp.py`
* upload `deb_dist/python3-docker-banner-gen_X.Y.Z-1_all.deb` previously generated with `stdeb`
* publish
|
PypiClean
|
/GxSphinx-1.0.0.tar.gz/GxSphinx-1.0.0/sphinx/deprecation.py
|
import sys
import warnings
from importlib import import_module
from typing import Any, Dict
if False:
# For type annotation
from typing import Type # for python3.5.1
class RemovedInSphinx40Warning(DeprecationWarning):
pass
class RemovedInSphinx50Warning(PendingDeprecationWarning):
pass
RemovedInNextVersionWarning = RemovedInSphinx40Warning
def deprecated_alias(modname: str, objects: Dict, warning: "Type[Warning]") -> None:
module = import_module(modname)
sys.modules[modname] = _ModuleWrapper(module, modname, objects, warning) # type: ignore
class _ModuleWrapper:
def __init__(self, module: Any, modname: str, objects: Dict, warning: "Type[Warning]"
) -> None:
self._module = module
self._modname = modname
self._objects = objects
self._warning = warning
def __getattr__(self, name: str) -> Any:
if name in self._objects:
warnings.warn("%s.%s is deprecated. Check CHANGES for Sphinx "
"API modifications." % (self._modname, name),
self._warning, stacklevel=3)
return self._objects[name]
return getattr(self._module, name)
class DeprecatedDict(dict):
"""A deprecated dict which warns on each access."""
def __init__(self, data: Dict, message: str, warning: "Type[Warning]") -> None:
self.message = message
self.warning = warning
super().__init__(data)
def __setitem__(self, key: str, value: Any) -> None:
warnings.warn(self.message, self.warning, stacklevel=2)
super().__setitem__(key, value)
def setdefault(self, key: str, default: Any = None) -> Any:
warnings.warn(self.message, self.warning, stacklevel=2)
return super().setdefault(key, default)
def __getitem__(self, key: str) -> None:
warnings.warn(self.message, self.warning, stacklevel=2)
return super().__getitem__(key)
def get(self, key: str, default: Any = None) -> Any:
warnings.warn(self.message, self.warning, stacklevel=2)
return super().get(key, default)
def update(self, other: Dict) -> None: # type: ignore
warnings.warn(self.message, self.warning, stacklevel=2)
super().update(other)
|
PypiClean
|
/landscape_api_py3-0.9.0.tar.gz/landscape_api_py3-0.9.0/docs/installation.rst
|
.. highlight:: shell
============
Installation
============
Stable release
--------------
To install Landscape API (Python 3), run this command in your terminal:
.. code-block:: console
$ pip install landscape_api_py3
This is the preferred method to install Landscape API (Python 3), as it will always install the most recent stable release.
If you don't have `pip`_ installed, this `Python installation guide`_ can guide
you through the process.
.. _pip: https://pip.pypa.io
.. _Python installation guide: http://docs.python-guide.org/en/latest/starting/installation/
From sources
------------
The sources for Landscape API (Python 3) can be downloaded from the `Github repo`_.
You can either clone the public repository:
.. code-block:: console
$ git clone git://github.com/jurya/landscape_api_py3
Or download the `tarball`_:
.. code-block:: console
$ curl -OJL https://github.com/jurya/landscape_api_py3/tarball/master
Once you have a copy of the source, you can install it with:
.. code-block:: console
$ python setup.py install
.. _Github repo: https://github.com/jurya/landscape_api_py3
.. _tarball: https://github.com/jurya/landscape_api_py3/tarball/master
|
PypiClean
|
/mle_logging-0.0.5-py3-none-any.whl/mle_logging/merge/merge_logs.py
|
import os
import time
from typing import Union
from .merge_hdf5 import merge_hdf5_files
def merge_seed_logs(
merged_path: str,
experiment_dir: str,
num_logs: Union[int, None] = None,
delete_files: bool = True,
) -> None:
"""Merge all .hdf5 files for different seeds into single log."""
# Collect paths in log dir until the num_logs is found
log_dir = os.path.join(experiment_dir, "logs")
while True:
log_paths = [os.path.join(log_dir, log) for log in os.listdir(log_dir)]
if num_logs is not None:
if len(log_paths) == num_logs:
# Delete joined log if at some point over-eagerly merged
if merged_path in log_paths:
os.remove(merged_path)
break
else:
time.sleep(1)
else:
break
merge_hdf5_files(merged_path, log_paths, delete_files=delete_files)
def merge_config_logs(experiment_dir: str, all_run_ids: list) -> None:
"""Scavenge the experiment dictonaries & load in logs."""
all_folders = [x[0] for x in os.walk(experiment_dir)][1:]
# Get rid of timestring in beginning & collect all folders/hdf5 files
hyperp_results_folder = []
# Need to make sure that run_ids & experiment folder match!
for run_id in all_run_ids:
for f in all_folders:
path, file = os.path.split(f)
if file == run_id:
hyperp_results_folder.append(f)
continue
# Collect all paths to the .hdf5 file
log_paths = []
for i in range(len(hyperp_results_folder)):
log_d_t = os.path.join(hyperp_results_folder[i], "logs/")
for file in os.listdir(log_d_t):
fname, fext = os.path.splitext(file)
if file.endswith(".hdf5"):
if fname in all_run_ids or fname == "log":
log_paths.append(os.path.join(log_d_t, file))
# Merge individual run results into a single hdf5 file
assert len(log_paths) == len(all_run_ids)
meta_log_fname = os.path.join(experiment_dir, "meta_log.hdf5")
merge_hdf5_files(meta_log_fname, log_paths, file_ids=all_run_ids)
|
PypiClean
|
/py-pure-client-1.38.0.tar.gz/py-pure-client-1.38.0/pypureclient/flasharray/FA_2_22/models/admin_settings_response.py
|
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_22 import models
class AdminSettingsResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'items': 'list[AdminSettings]'
}
attribute_map = {
'items': 'items'
}
required_args = {
}
def __init__(
self,
items=None, # type: List[models.AdminSettings]
):
"""
Keyword args:
items (list[AdminSettings])
"""
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `AdminSettingsResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def __getitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `AdminSettingsResponse`".format(key))
return object.__getattribute__(self, key)
def __setitem__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `AdminSettingsResponse`".format(key))
object.__setattr__(self, key, value)
def __delitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `AdminSettingsResponse`".format(key))
object.__delattr__(self, key)
def keys(self):
return self.attribute_map.keys()
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(AdminSettingsResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AdminSettingsResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
PypiClean
|
/pytorch/cppcore.py
|
from cumm import tensorview as tv
import torch
from typing import Dict, Optional, List, Union
from spconv.constants import AllocKeys
from spconv.cppconstants import COMPILED_CUDA_ARCHS
import sys
from spconv.core_cc.csrc.sparse.alloc import ExternalAllocator
from spconv.core_cc.csrc.sparse.convops import ExternalSpconvMatmul
from spconv.core_cc.cumm.common import CompileInfo
import warnings
import numpy as np
_TORCH_DTYPE_TO_TV = {
torch.float32: tv.float32,
torch.float64: tv.float64,
torch.float16: tv.float16,
torch.int32: tv.int32,
torch.int64: tv.int64,
torch.int8: tv.int8,
torch.int16: tv.int16,
torch.uint8: tv.uint8,
torch.qint8: tv.int8,
}
_TORCH_UINT_WORKAROUNDS = {
tv.uint32: tv.int32,
tv.uint16: tv.int16,
tv.uint64: tv.int64
}
_TH_QTYPES = {torch.qint8}
_TV_DTYPE_TO_TORCH = {v: k for k, v in _TORCH_DTYPE_TO_TV.items()}
_TV_DTYPE_TO_TORCH.update({
tv.uint32: torch.int32,
tv.uint16: torch.int16,
tv.uint64: torch.int64
})
_TV_DTYPE_TO_TORCHQ = _TV_DTYPE_TO_TORCH.copy()
_TV_DTYPE_TO_TORCHQ[tv.int8] = torch.qint8
_ALL_INTS = {
tv.int32, tv.int16, tv.int8, tv.int64, tv.uint64, tv.uint8, tv.uint32,
tv.uint16
}
def torch_tensor_to_tv(ten: torch.Tensor,
dtype: Optional[int] = None,
shape: Optional[List[int]] = None,
stride: Optional[List[int]] = None):
# assert ten.is_contiguous(), "must be contiguous tensor"
ptr = ten.data_ptr()
device = ten.device
if device.type == "cpu":
tv_device = -1
elif device.type == "cuda":
tv_device = 0
else:
raise NotImplementedError
if dtype is None:
dtype = _TORCH_DTYPE_TO_TV[ten.dtype]
if stride is None:
stride = list(ten.stride())
if shape is None:
shape = list(ten.shape)
else:
if not ten.is_contiguous():
msg = "if you provide custom shape for non-contig tensor, stride must not None"
assert stride is not None, msg
else:
# custom shape, if tensor is contiguous, we use from_blob and calc strides
return tv.from_blob(ptr, shape, dtype, tv_device)
return tv.from_blob_strided(ptr, shape, stride, dtype, tv_device)
def torch_tensors_to_tv(*tens: torch.Tensor):
return (torch_tensor_to_tv(t) for t in tens)
def get_current_stream():
return torch.cuda.current_stream().cuda_stream
def get_arch():
arch = torch.cuda.get_device_capability()
if not CompileInfo.arch_is_compatible(arch) and not CompileInfo.algo_can_use_ptx((0, 0), arch):
warnings.warn(
f"[WARNING]your gpu arch {arch} isn't compiled in prebuilt, "
f"may cause invalid device function error. "
f"available: {COMPILED_CUDA_ARCHS}")
return arch
class TorchAllocator(ExternalAllocator):
def __init__(self, gpudevice: torch.device, is_quantized: bool = False) -> None:
super().__init__()
self.gpudevice = gpudevice
self.cpudevice = torch.device("cpu")
self.allocated: Dict[Union[str, int], torch.Tensor] = {}
self.is_quantized = is_quantized
self._tv_dtype_to_torch = _TV_DTYPE_TO_TORCH
if is_quantized:
self._tv_dtype_to_torch = _TV_DTYPE_TO_TORCHQ
def zeros(self, name: str, shape: List[int], dtype: int,
device: int, stream: int = 0, is_temp_memory: bool = False, scale: float = 1.0) -> tv.Tensor:
# TODO free memory by name if its already free by pointer.
# provide a name if you want to access it after c++ function exit.
dtype_bkp = dtype
th_dtype = self._tv_dtype_to_torch[dtype]
if device == -1:
dev = self.cpudevice
else:
dev = self.gpudevice
if self.is_quantized:
ten = torch._empty_affine_quantized(shape, scale=scale, zero_point=0, dtype=th_dtype, device=dev)
else:
ten = torch.empty(shape, dtype=th_dtype, device=dev).zero_()
ten_tv = torch_tensor_to_tv(ten, dtype_bkp)
if self.is_quantized:
# no _zeros_affine_quantized available, so we need to zero_ here.
ctx = tv.Context()
ctx.set_cuda_stream(stream)
ten_tv.zero_(ctx)
self.allocated[ten_tv.byte_pointer()] = ten
if name and not is_temp_memory:
self.allocated[name] = ten
return ten_tv
def empty(self, name: str, shape: List[int], dtype: int,
device: int, stream: int = 0, is_temp_memory: bool = False, scale: float = 1.0) -> tv.Tensor:
dtype_bkp = dtype
th_dtype = self._tv_dtype_to_torch[dtype]
if device == -1:
dev = self.cpudevice
else:
dev = self.gpudevice
if self.is_quantized:
ten = torch._empty_affine_quantized(shape, scale=scale, zero_point=0, dtype=th_dtype, device=dev)
else:
ten = torch.empty(shape, dtype=th_dtype, device=dev)
ten_tv = torch_tensor_to_tv(ten, dtype_bkp)
self.allocated[ten_tv.byte_pointer()] = ten
if name and not is_temp_memory:
self.allocated[name] = ten
return ten_tv
def full_int(self, name: str, shape: List[int], value: int, dtype: int,
device: int, stream: int = 0, is_temp_memory: bool = False) -> tv.Tensor:
if dtype in _TORCH_UINT_WORKAROUNDS and value < 0:
raise NotImplementedError("you can't use full for unsigned dtypes")
dtype_bkp = dtype
th_dtype = self._tv_dtype_to_torch[dtype]
if device == -1:
dev = self.cpudevice
else:
dev = self.gpudevice
if self.is_quantized:
assert th_dtype not in _TH_QTYPES
ten = torch.full(shape, value, dtype=th_dtype, device=dev)
ten_tv = torch_tensor_to_tv(ten, dtype_bkp)
self.allocated[ten_tv.byte_pointer()] = ten
if name and not is_temp_memory:
self.allocated[name] = ten
return ten_tv
def full_float(self, name: str, shape: List[int], value: float, dtype: int,
device: int, stream: int = 0, is_temp_memory: bool = False) -> tv.Tensor:
if dtype in _TORCH_UINT_WORKAROUNDS and value < 0:
raise NotImplementedError("you can't use full for unsigned dtypes")
dtype_bkp = dtype
th_dtype = self._tv_dtype_to_torch[dtype]
if device == -1:
dev = self.cpudevice
else:
dev = self.gpudevice
if self.is_quantized:
assert th_dtype not in _TH_QTYPES
ten = torch.full(shape, value, dtype=th_dtype, device=dev)
ten_tv = torch_tensor_to_tv(ten, dtype_bkp)
self.allocated[ten_tv.byte_pointer()] = ten
if name and not is_temp_memory:
self.allocated[name] = ten
return ten_tv
def get_tensor_by_name(self, name: str):
return torch_tensor_to_tv(self.allocated[name])
def free(self, ten: tv.Tensor):
if ten.storage_bytesize() != ten.bytesize():
raise ValueError("you can't free a sliced tensor.")
if ten.byte_pointer() in self.allocated:
self.allocated.pop(ten.byte_pointer())
return
raise ValueError("can't find your tensor in cache.")
def free_noexcept(self, ten: tv.Tensor):
# for c++ scope guard, free will be called in c++ destructor
if ten.storage_bytesize() != ten.bytesize():
return
if ten.byte_pointer() in self.allocated:
self.allocated.pop(ten.byte_pointer())
return
class TorchSpconvMatmul(ExternalSpconvMatmul):
def __init__(self, alloc: TorchAllocator) -> None:
super().__init__()
self.alloc = alloc
def indice_conv_init_gemm(self, features_n: str, filters_n: str,
all_weight_is_krsc: bool, is_kc_not_ck: bool,
kv_center: int, out_channel: int, stream_int: int = 0):
features = self.alloc.allocated[features_n]
filters = self.alloc.allocated[filters_n]
if not all_weight_is_krsc:
filters = filters.reshape(-1, *filters.shape[-2:])
if not is_kc_not_ck:
out_features = torch.mm(features, filters[kv_center])
else:
out_features = torch.mm(features, filters[kv_center].T)
else:
filters = filters.reshape(out_channel, -1, filters.shape[-1])
if features.is_cuda or (features.dtype != torch.float16):
out_features = torch.mm(features, filters[:, kv_center].T)
else:
# pytorch 1.12 don't support cpu half mm, f**k pytorch
# we need cpu fp16 mm for test only.
out_features = torch.empty((features.shape[0], out_channel),
dtype=features.dtype,
device=features.device)
features_np = torch_tensor_to_tv(features).numpy_view()
filters_np = torch_tensor_to_tv(filters).numpy_view()
out_features_np = torch_tensor_to_tv(out_features).numpy_view()
np.matmul(features_np,
filters_np[:, kv_center].T,
out=out_features_np)
self.alloc.allocated[AllocKeys.OutFeatures] = out_features
# print(filters.shape, features.shape, all_weight_is_krsc, out_features.shape, out_features.is_contiguous())
return torch_tensor_to_tv(out_features)
def indice_conv_cpu_gemm(self, inp_buffer_n: str, out_buffer_n: str, filters_n: str,
all_weight_is_krsc: bool,
is_kc_not_ck: bool, nhot: int, index: int):
kv_dim = 1 if all_weight_is_krsc else 0
inp_buffer = self.alloc.allocated[inp_buffer_n]
filters = self.alloc.allocated[filters_n]
if not all_weight_is_krsc:
filters = filters.reshape(-1, *filters.shape[-2:])
else:
filters = filters.reshape(filters.shape[0], -1, filters.shape[-1])
out_buffer = self.alloc.allocated[out_buffer_n]
filters_i = filters.select(kv_dim, index)
filters_cur = filters_i if not is_kc_not_ck else filters_i.T
if inp_buffer.dtype == torch.float16:
inp_buffer_np = torch_tensor_to_tv(inp_buffer).numpy_view()
filters_np = torch_tensor_to_tv(filters).numpy_view()
filters_i_np = filters_np[
index] if not all_weight_is_krsc else filters_np[:, index]
filters_cur_np = filters_i_np if not is_kc_not_ck else filters_i_np.T
out_buffer_np = torch_tensor_to_tv(out_buffer).numpy_view()
np.matmul(inp_buffer_np[:nhot],
filters_cur_np,
out=out_buffer_np[:nhot])
else:
torch.mm(inp_buffer[:nhot], filters_cur, out=out_buffer[:nhot])
def indice_conv_bwd_init_gemm(self, features_n: str, filters_n: str,
out_bp_n: str, dfilters_n: str,
all_weight_is_krsc: bool, is_kc_not_ck: bool,
kv_center: int, stream_int: int = 0):
features = self.alloc.allocated[features_n]
filters = self.alloc.allocated[filters_n]
out_bp = self.alloc.allocated[out_bp_n]
dfilters = self.alloc.allocated[dfilters_n]
if not all_weight_is_krsc:
filters = filters.reshape(-1, *filters.shape[-2:])
dfilters = dfilters.reshape(-1, *filters.shape[-2:])
else:
filters = filters.reshape(filters.shape[0], -1, filters.shape[-1])
dfilters = dfilters.reshape(filters.shape[0], -1, filters.shape[-1])
if not all_weight_is_krsc:
if not is_kc_not_ck:
torch.mm(features.T, out_bp, out=dfilters[kv_center])
din = torch.mm(out_bp, filters[kv_center].T)
else:
torch.mm(out_bp.T, features, out=dfilters[kv_center])
din = torch.mm(out_bp, filters[kv_center])
else:
# KN @ NC
torch.mm(out_bp.T, features, out=dfilters[:, kv_center])
# NK @ KC
din = torch.mm(out_bp, filters[:, kv_center])
self.alloc.allocated[AllocKeys.DIn] = din
return torch_tensor_to_tv(din)
def indice_conv_bwd_cpu_gemm(self, inp_buffer_n: str,
out_buffer_n: str, filters_n: str, dfilters_n: str,all_weight_is_krsc: bool,
is_kc_not_ck: bool, nhot: int, index: int):
kv_dim = 1 if all_weight_is_krsc else 0
inp_buffer = self.alloc.allocated[inp_buffer_n]
out_buffer = self.alloc.allocated[out_buffer_n]
filters = self.alloc.allocated[filters_n]
dfilters = self.alloc.allocated[dfilters_n]
if not all_weight_is_krsc:
filters = filters.reshape(-1, *filters.shape[-2:])
dfilters = dfilters.reshape(-1, *filters.shape[-2:])
else:
filters = filters.reshape(filters.shape[0], -1, filters.shape[-1])
dfilters = dfilters.reshape(filters.shape[0], -1, filters.shape[-1])
filters_i = filters.select(kv_dim, index)
dfilters_i = dfilters.select(kv_dim, index)
filters_KC = filters_i if is_kc_not_ck else filters_i.T
if is_kc_not_ck:
# KN @ NC
torch.mm(out_buffer[:nhot].T, inp_buffer[:nhot], out=dfilters_i)
else:
# CN @ NK
torch.mm(inp_buffer[:nhot].T, out_buffer[:nhot], out=dfilters_i)
# NK @ KC
torch.mm(out_buffer[:nhot], filters_KC, out=inp_buffer[:nhot])
if __name__ == "__main__":
a = torch.rand(2, 2)
atv = torch_tensor_to_tv(a)
print(atv.numpy_view())
|
PypiClean
|
/secretflow_ray-2.2.0-cp38-cp38-macosx_10_16_x86_64.whl/secretflow_ray-2.2.0.data/purelib/ray/rllib/connectors/agent/pipeline.py
|
import logging
from typing import Any, List
from ray.rllib.connectors.connector import (
AgentConnector,
Connector,
ConnectorContext,
ConnectorPipeline,
get_connector,
register_connector,
)
from ray.rllib.utils.typing import ActionConnectorDataType, AgentConnectorDataType
from ray.util.annotations import PublicAPI
logger = logging.getLogger(__name__)
@PublicAPI(stability="alpha")
class AgentConnectorPipeline(ConnectorPipeline, AgentConnector):
def __init__(self, ctx: ConnectorContext, connectors: List[Connector]):
super().__init__(ctx, connectors)
def reset(self, env_id: str):
for c in self.connectors:
c.reset(env_id)
def on_policy_output(self, output: ActionConnectorDataType):
for c in self.connectors:
c.on_policy_output(output)
def __call__(
self, acd_list: List[AgentConnectorDataType]
) -> List[AgentConnectorDataType]:
ret = acd_list
for c in self.connectors:
ret = c(ret)
return ret
def to_state(self):
children = []
for c in self.connectors:
state = c.to_state()
assert isinstance(state, tuple) and len(state) == 2, (
"Serialized connector state must be in the format of "
f"Tuple[name: str, params: Any]. Instead we got {state}"
f"for connector {c.__name__}."
)
children.append(state)
return AgentConnectorPipeline.__name__, children
@staticmethod
def from_state(ctx: ConnectorContext, params: List[Any]):
assert (
type(params) == list
), "AgentConnectorPipeline takes a list of connector params."
connectors = []
for state in params:
try:
name, subparams = state
connectors.append(get_connector(ctx, name, subparams))
except Exception as e:
logger.error(f"Failed to de-serialize connector state: {state}")
raise e
return AgentConnectorPipeline(ctx, connectors)
register_connector(AgentConnectorPipeline.__name__, AgentConnectorPipeline)
|
PypiClean
|
/panel-1.2.2.tar.gz/panel-1.2.2/examples/reference/widgets/DatetimePicker.ipynb
|
```
import datetime
import panel as pn
pn.extension()
```
The ``DatetimePicker`` widget allows selecting selecting a datetime value using a text box and the browser's datetime-picking utility.
Discover more on using widgets to add interactivity to your applications in the [how-to guides on interactivity](../how_to/interactivity/index.md). Alternatively, learn [how to set up callbacks and (JS-)links between parameters](../../how_to/links/index.md) or [how to use them as part of declarative UIs with Param](../../how_to/param/index.html).
#### Parameters:
For details on other options for customizing the component see the [layout](../../how_to/layout/index.md) and [styling](../../how_to/styling/index.md) how-to guides.
##### Core
* **``value``** (datetime): The selected value as a datetime type
* **``start``** (date or datetime): Inclusive lower bound of the allowed date selection.
* **``end``** (date or datetime): Inclusive upper bound of the allowed date selection.
* **``disabled_dates``** (list): Dates to make unavailable for selection; others will be available
* **``enabled_dates``** (list): Dates to make available for selection; others will be unavailable
* **``enable_time``** (boolean): Enable editing of the time in the widget, default is True
* **``enable_seconds``** (boolean): Enable editing of seconds in the widget, default is True
* **``military_time``** (boolean): Enable 24 hours time in the widget, default is True
##### Display
* **``disabled``** (boolean): Whether the widget is editable
* **``visible``** (boolean): Whether the widget is visible
* **``name``** (str): The title of the widget
___
``DatetimePicker`` uses a browser-dependent calendar widget to select the datetime:
```
datetime_picker = pn.widgets.DatetimePicker(name='Datetime Picker', value=datetime.datetime(2021, 3, 2, 12, 10))
datetime_picker
```
``DatetimePicker.value`` returns a datetime type that can be read out or set like other widgets:
```
datetime_picker.value
```
### Controls
The `DatetimePicker` widget exposes a number of options which can be changed from both Python and Javascript. Try out the effect of these parameters interactively:
```
pn.Row(datetime_picker.controls(jslink=True), datetime_picker)
```
|
PypiClean
|
/docker-harpoon-0.17.0.tar.gz/docker-harpoon-0.17.0/harpoon/actions.py
|
from harpoon.container_manager import make_server, Manager, wait_for_server
from harpoon.option_spec.harpoon_specs import HarpoonSpec
from harpoon.errors import BadOption, HarpoonError
from harpoon.ship.context import ContextBuilder
from harpoon.ship.builder import Builder
from harpoon.ship.syncer import Syncer
from docker.errors import APIError as DockerAPIError
from delfick_project.norms import sb, Meta
from urllib.parse import urlparse
from functools import partial
from textwrap import dedent
from itertools import chain
import docker.errors
import itertools
import threading
import requests
import logging
import signal
import socket
import shutil
import errno
import os
import re
log = logging.getLogger("harpoon.actions")
info = {"is_default": True}
default_actions = []
available_actions = {}
class an_action(object):
"""Records a task in the ``available_actions`` dictionary"""
def __init__(self, needs_image=False):
self.needs_image = needs_image
def __call__(self, func):
available_actions[func.__name__] = func
func.needs_image = self.needs_image
if info["is_default"]:
default_actions.append(func.__name__)
return func
@an_action(needs_image=True)
def push(collector, image, **kwargs):
"""Push an image"""
if not image.image_index:
raise BadOption(
"The chosen image does not have a image_index configuration", wanted=image.name
)
tag = kwargs["artifact"]
if tag is sb.NotSpecified:
tag = collector.configuration["harpoon"].tag
if tag is not sb.NotSpecified:
image.tag = tag
Builder().make_image(image, collector.configuration["images"], pushing=True)
Syncer().push(image)
@an_action()
def push_all(collector, **kwargs):
"""Push all the images"""
configuration = collector.configuration
configuration["harpoon"].do_push = True
configuration["harpoon"].only_pushable = True
make_all(collector, **kwargs)
@an_action()
def pull_arbitrary(collector, image, **kwargs):
"""Pull an arbitrary image"""
image_index_of = lambda image: urlparse("https://{0}".format(image)).netloc
if image.startswith("file://"):
parsed = urlparse(image)
filename = parsed.netloc + parsed.path
if not os.path.exists(filename):
raise HarpoonError("Provided file doesn't exist!", wanted=image)
with open(filename) as fle:
image_indexes = [(line.strip(), image_index_of(line.strip())) for line in fle]
else:
image_indexes = [(image, image_index_of(image))]
authentication = collector.configuration.get("authentication", sb.NotSpecified)
for index, (image, image_index) in enumerate(image_indexes):
tag = sb.NotSpecified
if ":" in image:
image, tag = image.split(":", 1)
image = {
"image_name": image,
"tag": tag,
"harpoon": collector.configuration["harpoon"],
"commands": ["FROM scratch"],
"image_index": image_index,
"assume_role": sb.NotSpecified,
"authentication": authentication,
}
meta = Meta(collector.configuration, []).at("images").at("__arbitrary_{0}__".format(index))
image = HarpoonSpec().image_spec.normalise(meta, image)
Syncer().pull(image)
@an_action(needs_image=True)
def pull(collector, image, **kwargs):
"""Pull an image"""
if not image.image_index:
raise BadOption(
"The chosen image does not have a image_index configuration", wanted=image.name
)
tag = kwargs["artifact"]
if tag is sb.NotSpecified:
collector.configuration["harpoon"].tag
if tag is not sb.NotSpecified:
image.tag = tag
log.info("Pulling tag: %s", tag)
Syncer().pull(image, ignore_missing=image.harpoon.ignore_missing)
@an_action(needs_image=True)
def pull_dependencies(collector, image, **kwargs):
"""Pull an image's dependent images"""
for dep in image.commands.dependent_images:
kwargs["image"] = dep
pull_arbitrary(collector, **kwargs)
@an_action(needs_image=True)
def pull_parent(collector, image, **kwargs):
"""DEPRECATED - use pull_dependencies instead"""
log.warning("DEPRECATED - use pull_dependencies instead")
pull_dependencies(collector, image, **kwargs)
@an_action()
def pull_all(collector, image, **kwargs):
"""Pull all the images"""
images = collector.configuration["images"]
for layer in Builder().layered(images, only_pushable=True):
for image_name, image in layer:
log.info("Pulling %s", image_name)
pull(collector, image, **kwargs)
@an_action()
def pull_all_external(collector, **kwargs):
"""Pull all the external dependencies of all the images"""
deps = set()
images = collector.configuration["images"]
for layer in Builder().layered(images):
for image_name, image in layer:
for dep in image.commands.external_dependencies:
deps.add(dep)
for dep in sorted(deps):
kwargs["image"] = dep
pull_arbitrary(collector, **kwargs)
@an_action()
def pull_parents(collector, **kwargs):
"""DEPRECATED - use pull_all_external instead"""
log.warning("DEPRECATED - use pull_all_external instead")
pull_all_external(collector, **kwargs)
@an_action(needs_image=True)
def make(collector, image, **kwargs):
"""Just create an image"""
tag = kwargs.get("artifact", sb.NotSpecified)
if tag is sb.NotSpecified:
tag = collector.configuration["harpoon"].tag
if tag is not sb.NotSpecified:
image.tag = tag
Builder().make_image(image, collector.configuration["images"])
print("Created image {0}".format(image.image_name))
@an_action()
def make_all(collector, **kwargs):
"""Creates all the images in layered order"""
configuration = collector.configuration
push = configuration["harpoon"].do_push
only_pushable = configuration["harpoon"].only_pushable
if push:
only_pushable = True
tag = kwargs.get("artifact", sb.NotSpecified)
if tag is sb.NotSpecified:
tag = configuration["harpoon"].tag
images = configuration["images"]
for layer in Builder().layered(images, only_pushable=only_pushable):
for _, image in layer:
if tag is not sb.NotSpecified:
image.tag = tag
Builder().make_image(image, images, ignore_deps=True, ignore_parent=True)
print("Created image {0}".format(image.image_name))
if push and image.image_index:
Syncer().push(image)
@an_action()
def make_pushable(collector, **kwargs):
"""Make only the pushable images and their dependencies"""
configuration = collector.configuration
configuration["harpoon"].do_push = True
configuration["harpoon"].only_pushable = True
make_all(collector, **kwargs)
@an_action(needs_image=True)
def run(collector, image, **kwargs):
"""Run specified task in this image"""
image.build_and_run(collector.configuration["images"])
@an_action()
def list_tasks(collector, tasks, **kwargs):
"""List the available_tasks"""
print("Available tasks to choose from are:")
print("Use the --task option to choose one")
print("")
keygetter = lambda item: item[1].label
tasks = sorted(tasks.items(), key=keygetter)
for label, items in itertools.groupby(tasks, keygetter):
print("--- {0}".format(label))
print("----{0}".format("-" * len(label)))
sorted_tasks = sorted(list(items), key=lambda item: len(item[0]))
max_length = max(len(name) for name, _ in sorted_tasks)
for key, task in sorted_tasks:
desc = dedent(task.description or "").strip().split("\n")[0]
print("\t{0}{1} :-: {2}".format(" " * (max_length - len(key)), key, desc))
print("")
@an_action()
def delete_untagged(collector, **kwargs):
"""Find the untagged images and remove them"""
configuration = collector.configuration
docker_api = configuration["harpoon"].docker_api
images = docker_api.images()
found = False
for image in images:
if image["RepoTags"] == ["<none>:<none>"]:
found = True
image_id = image["Id"]
log.info("Deleting untagged image\thash=%s", image_id)
try:
docker_api.remove_image(image["Id"])
except DockerAPIError as error:
log.error("Failed to delete image\thash=%s\terror=%s", image_id, error)
if not found:
log.info("Didn't find any untagged images to delete!")
@an_action()
def show(collector, **kwargs):
"""Show what images we have"""
configuration = collector.configuration
flat = configuration.get("harpoon.flat", False)
only_pushable = configuration.get("harpoon.only_pushable", False)
for index, layer in enumerate(
Builder().layered(configuration["images"], only_pushable=only_pushable)
):
if flat:
for _, image in layer:
print(image.image_name)
else:
print("Layer {0}".format(index))
for _, image in layer:
print(" {0}".format(image.display_line()))
print("")
@an_action()
def show_pushable(collector, **kwargs):
"""Show what images we have"""
collector.configuration["harpoon"].only_pushable = True
show(collector, **kwargs)
@an_action(needs_image=True)
def print_dockerfile(collector, image, **kwargs):
"""Print a dockerfile for the specified image"""
print("\n".join(image.docker_file.docker_lines))
@an_action(needs_image=True)
def get_docker_context(collector, image, **kwargs):
"""Output the context that would be sent to docker if we made this image"""
with image.make_context() as context:
context.close()
shutil.copyfile(context.name, os.environ.get("FILENAME", f"./context_{image.name}.tar"))
@an_action()
def print_all_dockerfiles(collector, **kwargs):
"""Print all the dockerfiles"""
for name, image in collector.configuration["images"].items():
print("{0}".format(name))
print("-" * len(name))
kwargs["image"] = image
print_dockerfile(collector, **kwargs)
@an_action()
def read_login(collector, image, **kwargs):
"""Login to a docker registry with read permissions"""
docker_api = collector.configuration["harpoon"].docker_api
collector.configuration["authentication"].login(
docker_api, image, is_pushing=False, global_docker=True
)
@an_action()
def write_login(collector, image, **kwargs):
"""Login to a docker registry with write permissions"""
docker_api = collector.configuration["harpoon"].docker_api
collector.configuration["authentication"].login(
docker_api, image, is_pushing=True, global_docker=True
)
@an_action(needs_image=True)
def untag(collector, image, artifact, **kwargs):
"""Tag an image!"""
if artifact in (None, "", sb.NotSpecified):
artifact = collector.configuration["harpoon"].tag
if artifact is sb.NotSpecified:
raise BadOption("Please specify a tag using the artifact or tag options")
image.tag = artifact
image_name = image.image_name_with_tag
log.info("Removing image\timage={0}".format(image_name))
try:
image.harpoon.docker_api.remove_image(image_name)
except docker.errors.ImageNotFound:
log.warning("No image was found to remove")
@an_action(needs_image=True)
def tag(collector, image, artifact, **kwargs):
"""Tag an image!"""
if artifact in (None, "", sb.NotSpecified):
raise BadOption("Please specify a tag using the artifact option")
if image.image_index in (None, "", sb.NotSpecified):
raise BadOption("Please specify an image with an image_index option")
tag = image.image_name
if collector.configuration["harpoon"].tag is not sb.NotSpecified:
tag = "{0}:{1}".format(tag, collector.configuration["harpoon"].tag)
else:
tag = "{0}:latest".format(tag)
images = image.harpoon.docker_api.images()
current_tags = chain.from_iterable(
image_conf["RepoTags"] for image_conf in images if image_conf["RepoTags"] is not None
)
if tag not in current_tags:
raise BadOption("Please build or pull the image down to your local cache before tagging it")
for image_conf in images:
if image_conf["RepoTags"] is not None:
if tag in image_conf["RepoTags"]:
image_id = image_conf["Id"]
break
log.info("Tagging {0} ({1}) as {2}".format(image_id, image.image_name, artifact))
image.harpoon.docker_api.tag(image_id, repository=image.image_name, tag=artifact, force=True)
image.tag = artifact
Syncer().push(image)
@an_action(needs_image=True)
def retrieve(collector, image, artifact, **kwargs):
"""Retrieve a file/folder from an image"""
if artifact in (None, "", sb.NotSpecified):
raise BadOption("Please specify what to retrieve using the artifact option")
if collector.configuration["harpoon"].tag is not sb.NotSpecified:
image.tag = collector.configuration["harpoon"].tag
# make sure the image is built
if os.environ.get("NO_BUILD") is None:
Builder().make_image(image, collector.configuration["images"])
content = {
"conf": image,
"docker_api": collector.configuration["harpoon"].docker_api,
"images": collector.configuration["images"],
"image": image.image_name_with_tag,
"path": artifact,
}
# Get us our gold!
with ContextBuilder().the_context(content) as fle:
shutil.copyfile(fle.name, os.environ.get("FILENAME", "./retrieved.tar.gz"))
@an_action()
def container_manager(collector, image, **kwargs):
"""
Start a web server that you can request containers from.
Usage is like::
harpoon container_manager pathtofile
Or::
harpoon container_manager pathtofile:port
Or::
harpoon container_manager :port
If pathtofile is specified then we will fork the process, start the web
server in the forked process, write the port of the web server and pid on
separate lines to the file specified by pathtofile and quit
If port is not specified, then we will bind to an available port, otherwise
we bind the web server to the specified port.
If no argument is specified, it's the same as saying::
harpoon container_manager :4545
"""
if image in (None, "", sb.NotSpecified):
image = ":4545"
m = re.match(r"([^:]+)?(?::(\d+))?", image)
if not m:
raise HarpoonError("First argument to container_manager was invalid")
groups = m.groups()
filename = groups[0]
port = int(groups[1] or 0)
if not port:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(("0.0.0.0", 0))
port = s.getsockname()[1]
if filename:
pid = os.fork()
if pid != 0:
with open(filename, "w") as fle:
fle.write(str(port))
fle.write("\n")
fle.write(str(pid))
fle.write("\n")
wait_for_server(port)
return
image_puller = partial(pull_arbitrary, collector)
manager = Manager(
collector.configuration["harpoon"],
collector.configuration["images"],
image_puller=image_puller,
)
def shutdown(signum, frame):
if not manager.shutting_down:
url = "http://127.0.0.1:{0}/shutdown".format(port)
thread = threading.Thread(target=requests.get, args=(url,))
thread.daemon = True
thread.start()
signal.signal(signal.SIGINT, shutdown)
signal.signal(signal.SIGTERM, shutdown)
# Start our server
try:
server = make_server(manager, ("0.0.0.0", port))
log.info("Serving container manager on 0.0.0.0:{0}".format(port))
server.serve_forever()
except OSError as error:
if error.errno == errno.EADDRINUSE:
raise HarpoonError(
"Container manager couldn't start because port was already in use", wanted=port
)
raise
# Make it so future use of @an_action doesn't result in more default tasks
info["is_default"] = False
|
PypiClean
|
/msgraph-sdk-1.0.0a3.tar.gz/msgraph-sdk-1.0.0a3/msgraph/generated/me/drives/item/list/content_types/item/unpublish/unpublish_request_builder.py
|
from __future__ import annotations
from dataclasses import dataclass
from kiota_abstractions.get_path_parameters import get_path_parameters
from kiota_abstractions.method import Method
from kiota_abstractions.request_adapter import RequestAdapter
from kiota_abstractions.request_information import RequestInformation
from kiota_abstractions.request_option import RequestOption
from kiota_abstractions.response_handler import ResponseHandler
from kiota_abstractions.serialization import Parsable, ParsableFactory
from typing import Any, Callable, Dict, List, Optional, Union
from ........models.o_data_errors import o_data_error
class UnpublishRequestBuilder():
"""
Provides operations to call the unpublish method.
"""
def __init__(self,request_adapter: RequestAdapter, path_parameters: Optional[Union[Dict[str, Any], str]] = None) -> None:
"""
Instantiates a new UnpublishRequestBuilder and sets the default values.
Args:
pathParameters: The raw url or the Url template parameters for the request.
requestAdapter: The request adapter to use to execute the requests.
"""
if path_parameters is None:
raise Exception("path_parameters cannot be undefined")
if request_adapter is None:
raise Exception("request_adapter cannot be undefined")
# Url template to use to build the URL for the current request builder
self.url_template: str = "{+baseurl}/me/drives/{drive%2Did}/list/contentTypes/{contentType%2Did}/microsoft.graph.unpublish"
url_tpl_params = get_path_parameters(path_parameters)
self.path_parameters = url_tpl_params
self.request_adapter = request_adapter
def create_post_request_information(self,request_configuration: Optional[UnpublishRequestBuilderPostRequestConfiguration] = None) -> RequestInformation:
"""
Unpublish a [contentType][] from a content type hub site.
Args:
requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options.
Returns: RequestInformation
"""
request_info = RequestInformation()
request_info.url_template = self.url_template
request_info.path_parameters = self.path_parameters
request_info.http_method = Method.POST
if request_configuration:
request_info.add_request_headers(request_configuration.headers)
request_info.add_request_options(request_configuration.options)
return request_info
async def post(self,request_configuration: Optional[UnpublishRequestBuilderPostRequestConfiguration] = None, response_handler: Optional[ResponseHandler] = None) -> None:
"""
Unpublish a [contentType][] from a content type hub site.
Args:
requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options.
responseHandler: Response handler to use in place of the default response handling provided by the core service
"""
request_info = self.create_post_request_information(
request_configuration
)
error_mapping: Dict[str, ParsableFactory] = {
"4XX": o_data_error.ODataError,
"5XX": o_data_error.ODataError,
}
if not self.request_adapter:
raise Exception("Http core is null")
return await self.request_adapter.send_no_response_content_async(request_info, response_handler, error_mapping)
@dataclass
class UnpublishRequestBuilderPostRequestConfiguration():
"""
Configuration for the request such as headers, query parameters, and middleware options.
"""
# Request headers
headers: Optional[Dict[str, str]] = None
# Request options
options: Optional[List[RequestOption]] = None
|
PypiClean
|
/trains_agent-0.16.3-py3-none-any.whl/trains_agent/backend_api/services/v2_4/queues.py
|
import six
import types
from datetime import datetime
import enum
from dateutil.parser import parse as parse_datetime
from ....backend_api.session import Request, BatchRequest, Response, DataModel, NonStrictDataModel, CompoundRequest, schema_property, StringEnum
class QueueMetrics(NonStrictDataModel):
"""
:param queue: ID of the queue
:type queue: str
:param dates: List of timestamps (in seconds from epoch) in the acceding order.
The timestamps are separated by the requested interval. Timestamps where no
queue status change was recorded are omitted.
:type dates: Sequence[int]
:param avg_waiting_times: List of average waiting times for tasks in the queue.
The points correspond to the timestamps in the dates list. If more than one
value exists for the given interval then the maximum value is taken.
:type avg_waiting_times: Sequence[float]
:param queue_lengths: List of tasks counts in the queue. The points correspond
to the timestamps in the dates list. If more than one value exists for the
given interval then the count that corresponds to the maximum average value is
taken.
:type queue_lengths: Sequence[int]
"""
_schema = {
'properties': {
'avg_waiting_times': {
'description': 'List of average waiting times for tasks in the queue. The points correspond to the timestamps in the dates list. If more than one value exists for the given interval then the maximum value is taken.',
'items': {'type': 'number'},
'type': ['array', 'null'],
},
'dates': {
'description': 'List of timestamps (in seconds from epoch) in the acceding order. The timestamps are separated by the requested interval. Timestamps where no queue status change was recorded are omitted.',
'items': {'type': 'integer'},
'type': ['array', 'null'],
},
'queue': {'description': 'ID of the queue', 'type': ['string', 'null']},
'queue_lengths': {
'description': 'List of tasks counts in the queue. The points correspond to the timestamps in the dates list. If more than one value exists for the given interval then the count that corresponds to the maximum average value is taken.',
'items': {'type': 'integer'},
'type': ['array', 'null'],
},
},
'type': 'object',
}
def __init__(
self, queue=None, dates=None, avg_waiting_times=None, queue_lengths=None, **kwargs):
super(QueueMetrics, self).__init__(**kwargs)
self.queue = queue
self.dates = dates
self.avg_waiting_times = avg_waiting_times
self.queue_lengths = queue_lengths
@schema_property('queue')
def queue(self):
return self._property_queue
@queue.setter
def queue(self, value):
if value is None:
self._property_queue = None
return
self.assert_isinstance(value, "queue", six.string_types)
self._property_queue = value
@schema_property('dates')
def dates(self):
return self._property_dates
@dates.setter
def dates(self, value):
if value is None:
self._property_dates = None
return
self.assert_isinstance(value, "dates", (list, tuple))
value = [int(v) if isinstance(v, float) and v.is_integer() else v for v in value]
self.assert_isinstance(value, "dates", six.integer_types, is_array=True)
self._property_dates = value
@schema_property('avg_waiting_times')
def avg_waiting_times(self):
return self._property_avg_waiting_times
@avg_waiting_times.setter
def avg_waiting_times(self, value):
if value is None:
self._property_avg_waiting_times = None
return
self.assert_isinstance(value, "avg_waiting_times", (list, tuple))
self.assert_isinstance(value, "avg_waiting_times", six.integer_types + (float,), is_array=True)
self._property_avg_waiting_times = value
@schema_property('queue_lengths')
def queue_lengths(self):
return self._property_queue_lengths
@queue_lengths.setter
def queue_lengths(self, value):
if value is None:
self._property_queue_lengths = None
return
self.assert_isinstance(value, "queue_lengths", (list, tuple))
value = [int(v) if isinstance(v, float) and v.is_integer() else v for v in value]
self.assert_isinstance(value, "queue_lengths", six.integer_types, is_array=True)
self._property_queue_lengths = value
class Entry(NonStrictDataModel):
"""
:param task: Queued task ID
:type task: str
:param added: Time this entry was added to the queue
:type added: datetime.datetime
"""
_schema = {
'properties': {
'added': {
'description': 'Time this entry was added to the queue',
'format': 'date-time',
'type': ['string', 'null'],
},
'task': {'description': 'Queued task ID', 'type': ['string', 'null']},
},
'type': 'object',
}
def __init__(
self, task=None, added=None, **kwargs):
super(Entry, self).__init__(**kwargs)
self.task = task
self.added = added
@schema_property('task')
def task(self):
return self._property_task
@task.setter
def task(self, value):
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property('added')
def added(self):
return self._property_added
@added.setter
def added(self, value):
if value is None:
self._property_added = None
return
self.assert_isinstance(value, "added", six.string_types + (datetime,))
if not isinstance(value, datetime):
value = parse_datetime(value)
self._property_added = value
class Queue(NonStrictDataModel):
"""
:param id: Queue id
:type id: str
:param name: Queue name
:type name: str
:param user: Associated user id
:type user: str
:param company: Company id
:type company: str
:param created: Queue creation time
:type created: datetime.datetime
:param tags: User-defined tags
:type tags: Sequence[str]
:param system_tags: System tags. This field is reserved for system use, please
don't use it.
:type system_tags: Sequence[str]
:param entries: List of ordered queue entries
:type entries: Sequence[Entry]
"""
_schema = {
'properties': {
'company': {'description': 'Company id', 'type': ['string', 'null']},
'created': {
'description': 'Queue creation time',
'format': 'date-time',
'type': ['string', 'null'],
},
'entries': {
'description': 'List of ordered queue entries',
'items': {'$ref': '#/definitions/entry'},
'type': ['array', 'null'],
},
'id': {'description': 'Queue id', 'type': ['string', 'null']},
'name': {'description': 'Queue name', 'type': ['string', 'null']},
'system_tags': {
'description': "System tags. This field is reserved for system use, please don't use it.",
'items': {'type': 'string'},
'type': ['array', 'null'],
},
'tags': {
'description': 'User-defined tags',
'items': {'type': 'string'},
'type': ['array', 'null'],
},
'user': {
'description': 'Associated user id',
'type': ['string', 'null'],
},
},
'type': 'object',
}
def __init__(
self, id=None, name=None, user=None, company=None, created=None, tags=None, system_tags=None, entries=None, **kwargs):
super(Queue, self).__init__(**kwargs)
self.id = id
self.name = name
self.user = user
self.company = company
self.created = created
self.tags = tags
self.system_tags = system_tags
self.entries = entries
@schema_property('id')
def id(self):
return self._property_id
@id.setter
def id(self, value):
if value is None:
self._property_id = None
return
self.assert_isinstance(value, "id", six.string_types)
self._property_id = value
@schema_property('name')
def name(self):
return self._property_name
@name.setter
def name(self, value):
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
@schema_property('user')
def user(self):
return self._property_user
@user.setter
def user(self, value):
if value is None:
self._property_user = None
return
self.assert_isinstance(value, "user", six.string_types)
self._property_user = value
@schema_property('company')
def company(self):
return self._property_company
@company.setter
def company(self, value):
if value is None:
self._property_company = None
return
self.assert_isinstance(value, "company", six.string_types)
self._property_company = value
@schema_property('created')
def created(self):
return self._property_created
@created.setter
def created(self, value):
if value is None:
self._property_created = None
return
self.assert_isinstance(value, "created", six.string_types + (datetime,))
if not isinstance(value, datetime):
value = parse_datetime(value)
self._property_created = value
@schema_property('tags')
def tags(self):
return self._property_tags
@tags.setter
def tags(self, value):
if value is None:
self._property_tags = None
return
self.assert_isinstance(value, "tags", (list, tuple))
self.assert_isinstance(value, "tags", six.string_types, is_array=True)
self._property_tags = value
@schema_property('system_tags')
def system_tags(self):
return self._property_system_tags
@system_tags.setter
def system_tags(self, value):
if value is None:
self._property_system_tags = None
return
self.assert_isinstance(value, "system_tags", (list, tuple))
self.assert_isinstance(value, "system_tags", six.string_types, is_array=True)
self._property_system_tags = value
@schema_property('entries')
def entries(self):
return self._property_entries
@entries.setter
def entries(self, value):
if value is None:
self._property_entries = None
return
self.assert_isinstance(value, "entries", (list, tuple))
if any(isinstance(v, dict) for v in value):
value = [Entry.from_dict(v) if isinstance(v, dict) else v for v in value]
else:
self.assert_isinstance(value, "entries", Entry, is_array=True)
self._property_entries = value
class AddTaskRequest(Request):
"""
Adds a task entry to the queue.
:param queue: Queue id
:type queue: str
:param task: Task id
:type task: str
"""
_service = "queues"
_action = "add_task"
_version = "2.4"
_schema = {
'definitions': {},
'properties': {
'queue': {'description': 'Queue id', 'type': 'string'},
'task': {'description': 'Task id', 'type': 'string'},
},
'required': ['queue', 'task'],
'type': 'object',
}
def __init__(
self, queue, task, **kwargs):
super(AddTaskRequest, self).__init__(**kwargs)
self.queue = queue
self.task = task
@schema_property('queue')
def queue(self):
return self._property_queue
@queue.setter
def queue(self, value):
if value is None:
self._property_queue = None
return
self.assert_isinstance(value, "queue", six.string_types)
self._property_queue = value
@schema_property('task')
def task(self):
return self._property_task
@task.setter
def task(self, value):
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
class AddTaskResponse(Response):
"""
Response of queues.add_task endpoint.
:param added: Number of tasks added (0 or 1)
:type added: int
"""
_service = "queues"
_action = "add_task"
_version = "2.4"
_schema = {
'definitions': {},
'properties': {
'added': {
'description': 'Number of tasks added (0 or 1)',
'enum': [0, 1],
'type': ['integer', 'null'],
},
},
'type': 'object',
}
def __init__(
self, added=None, **kwargs):
super(AddTaskResponse, self).__init__(**kwargs)
self.added = added
@schema_property('added')
def added(self):
return self._property_added
@added.setter
def added(self, value):
if value is None:
self._property_added = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "added", six.integer_types)
self._property_added = value
class CreateRequest(Request):
"""
Create a new queue
:param name: Queue name Unique within the company.
:type name: str
:param tags: User-defined tags list
:type tags: Sequence[str]
:param system_tags: System tags list. This field is reserved for system use,
please don't use it.
:type system_tags: Sequence[str]
"""
_service = "queues"
_action = "create"
_version = "2.4"
_schema = {
'definitions': {},
'properties': {
'name': {
'description': 'Queue name Unique within the company.',
'type': 'string',
},
'system_tags': {
'description': "System tags list. This field is reserved for system use, please don't use it.",
'items': {'type': 'string'},
'type': 'array',
},
'tags': {
'description': 'User-defined tags list',
'items': {'type': 'string'},
'type': 'array',
},
},
'required': ['name'],
'type': 'object',
}
def __init__(
self, name, tags=None, system_tags=None, **kwargs):
super(CreateRequest, self).__init__(**kwargs)
self.name = name
self.tags = tags
self.system_tags = system_tags
@schema_property('name')
def name(self):
return self._property_name
@name.setter
def name(self, value):
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
@schema_property('tags')
def tags(self):
return self._property_tags
@tags.setter
def tags(self, value):
if value is None:
self._property_tags = None
return
self.assert_isinstance(value, "tags", (list, tuple))
self.assert_isinstance(value, "tags", six.string_types, is_array=True)
self._property_tags = value
@schema_property('system_tags')
def system_tags(self):
return self._property_system_tags
@system_tags.setter
def system_tags(self, value):
if value is None:
self._property_system_tags = None
return
self.assert_isinstance(value, "system_tags", (list, tuple))
self.assert_isinstance(value, "system_tags", six.string_types, is_array=True)
self._property_system_tags = value
class CreateResponse(Response):
"""
Response of queues.create endpoint.
:param id: New queue ID
:type id: str
"""
_service = "queues"
_action = "create"
_version = "2.4"
_schema = {
'definitions': {},
'properties': {
'id': {'description': 'New queue ID', 'type': ['string', 'null']},
},
'type': 'object',
}
def __init__(
self, id=None, **kwargs):
super(CreateResponse, self).__init__(**kwargs)
self.id = id
@schema_property('id')
def id(self):
return self._property_id
@id.setter
def id(self, value):
if value is None:
self._property_id = None
return
self.assert_isinstance(value, "id", six.string_types)
self._property_id = value
class DeleteRequest(Request):
"""
Deletes a queue. If the queue is not empty and force is not set to true, queue will not be deleted.
:param queue: Queue id
:type queue: str
:param force: Force delete of non-empty queue. Defaults to false
:type force: bool
"""
_service = "queues"
_action = "delete"
_version = "2.4"
_schema = {
'definitions': {},
'properties': {
'force': {
'default': False,
'description': 'Force delete of non-empty queue. Defaults to false',
'type': 'boolean',
},
'queue': {'description': 'Queue id', 'type': 'string'},
},
'required': ['queue'],
'type': 'object',
}
def __init__(
self, queue, force=False, **kwargs):
super(DeleteRequest, self).__init__(**kwargs)
self.queue = queue
self.force = force
@schema_property('queue')
def queue(self):
return self._property_queue
@queue.setter
def queue(self, value):
if value is None:
self._property_queue = None
return
self.assert_isinstance(value, "queue", six.string_types)
self._property_queue = value
@schema_property('force')
def force(self):
return self._property_force
@force.setter
def force(self, value):
if value is None:
self._property_force = None
return
self.assert_isinstance(value, "force", (bool,))
self._property_force = value
class DeleteResponse(Response):
"""
Response of queues.delete endpoint.
:param deleted: Number of queues deleted (0 or 1)
:type deleted: int
"""
_service = "queues"
_action = "delete"
_version = "2.4"
_schema = {
'definitions': {},
'properties': {
'deleted': {
'description': 'Number of queues deleted (0 or 1)',
'enum': [0, 1],
'type': ['integer', 'null'],
},
},
'type': 'object',
}
def __init__(
self, deleted=None, **kwargs):
super(DeleteResponse, self).__init__(**kwargs)
self.deleted = deleted
@schema_property('deleted')
def deleted(self):
return self._property_deleted
@deleted.setter
def deleted(self, value):
if value is None:
self._property_deleted = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "deleted", six.integer_types)
self._property_deleted = value
class GetAllRequest(Request):
"""
Get all queues
:param name: Get only queues whose name matches this pattern (python regular
expression syntax)
:type name: str
:param id: List of Queue IDs used to filter results
:type id: Sequence[str]
:param tags: User-defined tags list used to filter results. Prepend '-' to tag
name to indicate exclusion
:type tags: Sequence[str]
:param system_tags: System tags list used to filter results. Prepend '-' to
system tag name to indicate exclusion
:type system_tags: Sequence[str]
:param page: Page number, returns a specific page out of the result list of
results.
:type page: int
:param page_size: Page size, specifies the number of results returned in each
page (last page may contain fewer results)
:type page_size: int
:param order_by: List of field names to order by. When search_text is used,
'@text_score' can be used as a field representing the text score of returned
documents. Use '-' prefix to specify descending order. Optional, recommended
when using page
:type order_by: Sequence[str]
:param search_text: Free text search query
:type search_text: str
:param only_fields: List of document field names (nesting is supported using
'.', e.g. execution.model_labels). If provided, this list defines the query's
projection (only these fields will be returned for each result entry)
:type only_fields: Sequence[str]
"""
_service = "queues"
_action = "get_all"
_version = "2.4"
_schema = {
'definitions': {},
'properties': {
'id': {
'description': 'List of Queue IDs used to filter results',
'items': {'type': 'string'},
'type': ['array', 'null'],
},
'name': {
'description': 'Get only queues whose name matches this pattern (python regular expression syntax)',
'type': ['string', 'null'],
},
'only_fields': {
'description': "List of document field names (nesting is supported using '.', e.g. execution.model_labels). If provided, this list defines the query's projection (only these fields will be returned for each result entry)",
'items': {'type': 'string'},
'type': ['array', 'null'],
},
'order_by': {
'description': "List of field names to order by. When search_text is used, '@text_score' can be used as a field representing the text score of returned documents. Use '-' prefix to specify descending order. Optional, recommended when using page",
'items': {'type': 'string'},
'type': ['array', 'null'],
},
'page': {
'description': 'Page number, returns a specific page out of the result list of results.',
'minimum': 0,
'type': ['integer', 'null'],
},
'page_size': {
'description': 'Page size, specifies the number of results returned in each page (last page may contain fewer results)',
'minimum': 1,
'type': ['integer', 'null'],
},
'search_text': {
'description': 'Free text search query',
'type': ['string', 'null'],
},
'system_tags': {
'description': "System tags list used to filter results. Prepend '-' to system tag name to indicate exclusion",
'items': {'type': 'string'},
'type': ['array', 'null'],
},
'tags': {
'description': "User-defined tags list used to filter results. Prepend '-' to tag name to indicate exclusion",
'items': {'type': 'string'},
'type': ['array', 'null'],
},
},
'type': 'object',
}
def __init__(
self, name=None, id=None, tags=None, system_tags=None, page=None, page_size=None, order_by=None, search_text=None, only_fields=None, **kwargs):
super(GetAllRequest, self).__init__(**kwargs)
self.name = name
self.id = id
self.tags = tags
self.system_tags = system_tags
self.page = page
self.page_size = page_size
self.order_by = order_by
self.search_text = search_text
self.only_fields = only_fields
@schema_property('name')
def name(self):
return self._property_name
@name.setter
def name(self, value):
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
@schema_property('id')
def id(self):
return self._property_id
@id.setter
def id(self, value):
if value is None:
self._property_id = None
return
self.assert_isinstance(value, "id", (list, tuple))
self.assert_isinstance(value, "id", six.string_types, is_array=True)
self._property_id = value
@schema_property('tags')
def tags(self):
return self._property_tags
@tags.setter
def tags(self, value):
if value is None:
self._property_tags = None
return
self.assert_isinstance(value, "tags", (list, tuple))
self.assert_isinstance(value, "tags", six.string_types, is_array=True)
self._property_tags = value
@schema_property('system_tags')
def system_tags(self):
return self._property_system_tags
@system_tags.setter
def system_tags(self, value):
if value is None:
self._property_system_tags = None
return
self.assert_isinstance(value, "system_tags", (list, tuple))
self.assert_isinstance(value, "system_tags", six.string_types, is_array=True)
self._property_system_tags = value
@schema_property('page')
def page(self):
return self._property_page
@page.setter
def page(self, value):
if value is None:
self._property_page = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "page", six.integer_types)
self._property_page = value
@schema_property('page_size')
def page_size(self):
return self._property_page_size
@page_size.setter
def page_size(self, value):
if value is None:
self._property_page_size = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "page_size", six.integer_types)
self._property_page_size = value
@schema_property('order_by')
def order_by(self):
return self._property_order_by
@order_by.setter
def order_by(self, value):
if value is None:
self._property_order_by = None
return
self.assert_isinstance(value, "order_by", (list, tuple))
self.assert_isinstance(value, "order_by", six.string_types, is_array=True)
self._property_order_by = value
@schema_property('search_text')
def search_text(self):
return self._property_search_text
@search_text.setter
def search_text(self, value):
if value is None:
self._property_search_text = None
return
self.assert_isinstance(value, "search_text", six.string_types)
self._property_search_text = value
@schema_property('only_fields')
def only_fields(self):
return self._property_only_fields
@only_fields.setter
def only_fields(self, value):
if value is None:
self._property_only_fields = None
return
self.assert_isinstance(value, "only_fields", (list, tuple))
self.assert_isinstance(value, "only_fields", six.string_types, is_array=True)
self._property_only_fields = value
class GetAllResponse(Response):
"""
Response of queues.get_all endpoint.
:param queues: Queues list
:type queues: Sequence[Queue]
"""
_service = "queues"
_action = "get_all"
_version = "2.4"
_schema = {
'definitions': {
'entry': {
'properties': {
'added': {
'description': 'Time this entry was added to the queue',
'format': 'date-time',
'type': ['string', 'null'],
},
'task': {
'description': 'Queued task ID',
'type': ['string', 'null'],
},
},
'type': 'object',
},
'queue': {
'properties': {
'company': {
'description': 'Company id',
'type': ['string', 'null'],
},
'created': {
'description': 'Queue creation time',
'format': 'date-time',
'type': ['string', 'null'],
},
'entries': {
'description': 'List of ordered queue entries',
'items': {'$ref': '#/definitions/entry'},
'type': ['array', 'null'],
},
'id': {'description': 'Queue id', 'type': ['string', 'null']},
'name': {
'description': 'Queue name',
'type': ['string', 'null'],
},
'system_tags': {
'description': "System tags. This field is reserved for system use, please don't use it.",
'items': {'type': 'string'},
'type': ['array', 'null'],
},
'tags': {
'description': 'User-defined tags',
'items': {'type': 'string'},
'type': ['array', 'null'],
},
'user': {
'description': 'Associated user id',
'type': ['string', 'null'],
},
},
'type': 'object',
},
},
'properties': {
'queues': {
'description': 'Queues list',
'items': {'$ref': '#/definitions/queue'},
'type': ['array', 'null'],
},
},
'type': 'object',
}
def __init__(
self, queues=None, **kwargs):
super(GetAllResponse, self).__init__(**kwargs)
self.queues = queues
@schema_property('queues')
def queues(self):
return self._property_queues
@queues.setter
def queues(self, value):
if value is None:
self._property_queues = None
return
self.assert_isinstance(value, "queues", (list, tuple))
if any(isinstance(v, dict) for v in value):
value = [Queue.from_dict(v) if isinstance(v, dict) else v for v in value]
else:
self.assert_isinstance(value, "queues", Queue, is_array=True)
self._property_queues = value
class GetByIdRequest(Request):
"""
Gets queue information
:param queue: Queue ID
:type queue: str
"""
_service = "queues"
_action = "get_by_id"
_version = "2.4"
_schema = {
'definitions': {},
'properties': {'queue': {'description': 'Queue ID', 'type': 'string'}},
'required': ['queue'],
'type': 'object',
}
def __init__(
self, queue, **kwargs):
super(GetByIdRequest, self).__init__(**kwargs)
self.queue = queue
@schema_property('queue')
def queue(self):
return self._property_queue
@queue.setter
def queue(self, value):
if value is None:
self._property_queue = None
return
self.assert_isinstance(value, "queue", six.string_types)
self._property_queue = value
class GetByIdResponse(Response):
"""
Response of queues.get_by_id endpoint.
:param queue: Queue info
:type queue: Queue
"""
_service = "queues"
_action = "get_by_id"
_version = "2.4"
_schema = {
'definitions': {
'entry': {
'properties': {
'added': {
'description': 'Time this entry was added to the queue',
'format': 'date-time',
'type': ['string', 'null'],
},
'task': {
'description': 'Queued task ID',
'type': ['string', 'null'],
},
},
'type': 'object',
},
'queue': {
'properties': {
'company': {
'description': 'Company id',
'type': ['string', 'null'],
},
'created': {
'description': 'Queue creation time',
'format': 'date-time',
'type': ['string', 'null'],
},
'entries': {
'description': 'List of ordered queue entries',
'items': {'$ref': '#/definitions/entry'},
'type': ['array', 'null'],
},
'id': {'description': 'Queue id', 'type': ['string', 'null']},
'name': {
'description': 'Queue name',
'type': ['string', 'null'],
},
'system_tags': {
'description': "System tags. This field is reserved for system use, please don't use it.",
'items': {'type': 'string'},
'type': ['array', 'null'],
},
'tags': {
'description': 'User-defined tags',
'items': {'type': 'string'},
'type': ['array', 'null'],
},
'user': {
'description': 'Associated user id',
'type': ['string', 'null'],
},
},
'type': 'object',
},
},
'properties': {
'queue': {
'description': 'Queue info',
'oneOf': [{'$ref': '#/definitions/queue'}, {'type': 'null'}],
},
},
'type': 'object',
}
def __init__(
self, queue=None, **kwargs):
super(GetByIdResponse, self).__init__(**kwargs)
self.queue = queue
@schema_property('queue')
def queue(self):
return self._property_queue
@queue.setter
def queue(self, value):
if value is None:
self._property_queue = None
return
if isinstance(value, dict):
value = Queue.from_dict(value)
else:
self.assert_isinstance(value, "queue", Queue)
self._property_queue = value
class GetDefaultRequest(Request):
"""
"""
_service = "queues"
_action = "get_default"
_version = "2.4"
_schema = {
'additionalProperties': False,
'definitions': {},
'properties': {},
'type': 'object',
}
class GetDefaultResponse(Response):
"""
Response of queues.get_default endpoint.
:param id: Queue id
:type id: str
:param name: Queue name
:type name: str
"""
_service = "queues"
_action = "get_default"
_version = "2.4"
_schema = {
'definitions': {},
'properties': {
'id': {'description': 'Queue id', 'type': ['string', 'null']},
'name': {'description': 'Queue name', 'type': ['string', 'null']},
},
'type': 'object',
}
def __init__(
self, id=None, name=None, **kwargs):
super(GetDefaultResponse, self).__init__(**kwargs)
self.id = id
self.name = name
@schema_property('id')
def id(self):
return self._property_id
@id.setter
def id(self, value):
if value is None:
self._property_id = None
return
self.assert_isinstance(value, "id", six.string_types)
self._property_id = value
@schema_property('name')
def name(self):
return self._property_name
@name.setter
def name(self, value):
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
class GetNextTaskRequest(Request):
"""
Gets the next task from the top of the queue (FIFO). The task entry is removed from the queue.
:param queue: Queue id
:type queue: str
"""
_service = "queues"
_action = "get_next_task"
_version = "2.4"
_schema = {
'definitions': {},
'properties': {'queue': {'description': 'Queue id', 'type': 'string'}},
'required': ['queue'],
'type': 'object',
}
def __init__(
self, queue, **kwargs):
super(GetNextTaskRequest, self).__init__(**kwargs)
self.queue = queue
@schema_property('queue')
def queue(self):
return self._property_queue
@queue.setter
def queue(self, value):
if value is None:
self._property_queue = None
return
self.assert_isinstance(value, "queue", six.string_types)
self._property_queue = value
class GetNextTaskResponse(Response):
"""
Response of queues.get_next_task endpoint.
:param entry: Entry information
:type entry: Entry
"""
_service = "queues"
_action = "get_next_task"
_version = "2.4"
_schema = {
'definitions': {
'entry': {
'properties': {
'added': {
'description': 'Time this entry was added to the queue',
'format': 'date-time',
'type': ['string', 'null'],
},
'task': {
'description': 'Queued task ID',
'type': ['string', 'null'],
},
},
'type': 'object',
},
},
'properties': {
'entry': {
'description': 'Entry information',
'oneOf': [{'$ref': '#/definitions/entry'}, {'type': 'null'}],
},
},
'type': 'object',
}
def __init__(
self, entry=None, **kwargs):
super(GetNextTaskResponse, self).__init__(**kwargs)
self.entry = entry
@schema_property('entry')
def entry(self):
return self._property_entry
@entry.setter
def entry(self, value):
if value is None:
self._property_entry = None
return
if isinstance(value, dict):
value = Entry.from_dict(value)
else:
self.assert_isinstance(value, "entry", Entry)
self._property_entry = value
class GetQueueMetricsRequest(Request):
"""
Returns metrics of the company queues. The metrics are avaraged in the specified interval.
:param from_date: Starting time (in seconds from epoch) for collecting metrics
:type from_date: float
:param to_date: Ending time (in seconds from epoch) for collecting metrics
:type to_date: float
:param interval: Time interval in seconds for a single metrics point. The
minimal value is 1
:type interval: int
:param queue_ids: List of queue ids to collect metrics for. If not provided or
empty then all then average metrics across all the company queues will be
returned.
:type queue_ids: Sequence[str]
"""
_service = "queues"
_action = "get_queue_metrics"
_version = "2.4"
_schema = {
'definitions': {},
'properties': {
'from_date': {
'description': 'Starting time (in seconds from epoch) for collecting metrics',
'type': 'number',
},
'interval': {
'description': 'Time interval in seconds for a single metrics point. The minimal value is 1',
'type': 'integer',
},
'queue_ids': {
'description': 'List of queue ids to collect metrics for. If not provided or empty then all then average metrics across all the company queues will be returned.',
'items': {'type': 'string'},
'type': 'array',
},
'to_date': {
'description': 'Ending time (in seconds from epoch) for collecting metrics',
'type': 'number',
},
},
'required': ['from_date', 'to_date', 'interval'],
'type': 'object',
}
def __init__(
self, from_date, to_date, interval, queue_ids=None, **kwargs):
super(GetQueueMetricsRequest, self).__init__(**kwargs)
self.from_date = from_date
self.to_date = to_date
self.interval = interval
self.queue_ids = queue_ids
@schema_property('from_date')
def from_date(self):
return self._property_from_date
@from_date.setter
def from_date(self, value):
if value is None:
self._property_from_date = None
return
self.assert_isinstance(value, "from_date", six.integer_types + (float,))
self._property_from_date = value
@schema_property('to_date')
def to_date(self):
return self._property_to_date
@to_date.setter
def to_date(self, value):
if value is None:
self._property_to_date = None
return
self.assert_isinstance(value, "to_date", six.integer_types + (float,))
self._property_to_date = value
@schema_property('interval')
def interval(self):
return self._property_interval
@interval.setter
def interval(self, value):
if value is None:
self._property_interval = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "interval", six.integer_types)
self._property_interval = value
@schema_property('queue_ids')
def queue_ids(self):
return self._property_queue_ids
@queue_ids.setter
def queue_ids(self, value):
if value is None:
self._property_queue_ids = None
return
self.assert_isinstance(value, "queue_ids", (list, tuple))
self.assert_isinstance(value, "queue_ids", six.string_types, is_array=True)
self._property_queue_ids = value
class GetQueueMetricsResponse(Response):
"""
Response of queues.get_queue_metrics endpoint.
:param queues: List of the requested queues with their metrics. If no queue ids
were requested then 'all' queue is returned with the metrics averaged accross
all the company queues.
:type queues: Sequence[QueueMetrics]
"""
_service = "queues"
_action = "get_queue_metrics"
_version = "2.4"
_schema = {
'definitions': {
'queue_metrics': {
'properties': {
'avg_waiting_times': {
'description': 'List of average waiting times for tasks in the queue. The points correspond to the timestamps in the dates list. If more than one value exists for the given interval then the maximum value is taken.',
'items': {'type': 'number'},
'type': ['array', 'null'],
},
'dates': {
'description': 'List of timestamps (in seconds from epoch) in the acceding order. The timestamps are separated by the requested interval. Timestamps where no queue status change was recorded are omitted.',
'items': {'type': 'integer'},
'type': ['array', 'null'],
},
'queue': {
'description': 'ID of the queue',
'type': ['string', 'null'],
},
'queue_lengths': {
'description': 'List of tasks counts in the queue. The points correspond to the timestamps in the dates list. If more than one value exists for the given interval then the count that corresponds to the maximum average value is taken.',
'items': {'type': 'integer'},
'type': ['array', 'null'],
},
},
'type': 'object',
},
},
'properties': {
'queues': {
'description': "List of the requested queues with their metrics. If no queue ids were requested then 'all' queue is returned with the metrics averaged accross all the company queues.",
'items': {'$ref': '#/definitions/queue_metrics'},
'type': ['array', 'null'],
},
},
'type': 'object',
}
def __init__(
self, queues=None, **kwargs):
super(GetQueueMetricsResponse, self).__init__(**kwargs)
self.queues = queues
@schema_property('queues')
def queues(self):
return self._property_queues
@queues.setter
def queues(self, value):
if value is None:
self._property_queues = None
return
self.assert_isinstance(value, "queues", (list, tuple))
if any(isinstance(v, dict) for v in value):
value = [QueueMetrics.from_dict(v) if isinstance(v, dict) else v for v in value]
else:
self.assert_isinstance(value, "queues", QueueMetrics, is_array=True)
self._property_queues = value
class MoveTaskBackwardRequest(Request):
"""
:param queue: Queue id
:type queue: str
:param task: Task id
:type task: str
:param count: Number of positions in the queue to move the task forward
relative to the current position. Optional, the default value is 1.
:type count: int
"""
_service = "queues"
_action = "move_task_backward"
_version = "2.4"
_schema = {
'definitions': {},
'properties': {
'count': {
'description': 'Number of positions in the queue to move the task forward relative to the current position. Optional, the default value is 1.',
'type': 'integer',
},
'queue': {'description': 'Queue id', 'type': 'string'},
'task': {'description': 'Task id', 'type': 'string'},
},
'required': ['queue', 'task'],
'type': 'object',
}
def __init__(
self, queue, task, count=None, **kwargs):
super(MoveTaskBackwardRequest, self).__init__(**kwargs)
self.queue = queue
self.task = task
self.count = count
@schema_property('queue')
def queue(self):
return self._property_queue
@queue.setter
def queue(self, value):
if value is None:
self._property_queue = None
return
self.assert_isinstance(value, "queue", six.string_types)
self._property_queue = value
@schema_property('task')
def task(self):
return self._property_task
@task.setter
def task(self, value):
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property('count')
def count(self):
return self._property_count
@count.setter
def count(self, value):
if value is None:
self._property_count = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "count", six.integer_types)
self._property_count = value
class MoveTaskBackwardResponse(Response):
"""
Response of queues.move_task_backward endpoint.
:param position: The new position of the task entry in the queue (index, -1
represents bottom of queue)
:type position: int
"""
_service = "queues"
_action = "move_task_backward"
_version = "2.4"
_schema = {
'definitions': {},
'properties': {
'position': {
'description': 'The new position of the task entry in the queue (index, -1 represents bottom of queue)',
'type': ['integer', 'null'],
},
},
'type': 'object',
}
def __init__(
self, position=None, **kwargs):
super(MoveTaskBackwardResponse, self).__init__(**kwargs)
self.position = position
@schema_property('position')
def position(self):
return self._property_position
@position.setter
def position(self, value):
if value is None:
self._property_position = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "position", six.integer_types)
self._property_position = value
class MoveTaskForwardRequest(Request):
"""
Moves a task entry one step forward towards the top of the queue.
:param queue: Queue id
:type queue: str
:param task: Task id
:type task: str
:param count: Number of positions in the queue to move the task forward
relative to the current position. Optional, the default value is 1.
:type count: int
"""
_service = "queues"
_action = "move_task_forward"
_version = "2.4"
_schema = {
'definitions': {},
'properties': {
'count': {
'description': 'Number of positions in the queue to move the task forward relative to the current position. Optional, the default value is 1.',
'type': 'integer',
},
'queue': {'description': 'Queue id', 'type': 'string'},
'task': {'description': 'Task id', 'type': 'string'},
},
'required': ['queue', 'task'],
'type': 'object',
}
def __init__(
self, queue, task, count=None, **kwargs):
super(MoveTaskForwardRequest, self).__init__(**kwargs)
self.queue = queue
self.task = task
self.count = count
@schema_property('queue')
def queue(self):
return self._property_queue
@queue.setter
def queue(self, value):
if value is None:
self._property_queue = None
return
self.assert_isinstance(value, "queue", six.string_types)
self._property_queue = value
@schema_property('task')
def task(self):
return self._property_task
@task.setter
def task(self, value):
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property('count')
def count(self):
return self._property_count
@count.setter
def count(self, value):
if value is None:
self._property_count = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "count", six.integer_types)
self._property_count = value
class MoveTaskForwardResponse(Response):
"""
Response of queues.move_task_forward endpoint.
:param position: The new position of the task entry in the queue (index, -1
represents bottom of queue)
:type position: int
"""
_service = "queues"
_action = "move_task_forward"
_version = "2.4"
_schema = {
'definitions': {},
'properties': {
'position': {
'description': 'The new position of the task entry in the queue (index, -1 represents bottom of queue)',
'type': ['integer', 'null'],
},
},
'type': 'object',
}
def __init__(
self, position=None, **kwargs):
super(MoveTaskForwardResponse, self).__init__(**kwargs)
self.position = position
@schema_property('position')
def position(self):
return self._property_position
@position.setter
def position(self, value):
if value is None:
self._property_position = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "position", six.integer_types)
self._property_position = value
class MoveTaskToBackRequest(Request):
"""
:param queue: Queue id
:type queue: str
:param task: Task id
:type task: str
"""
_service = "queues"
_action = "move_task_to_back"
_version = "2.4"
_schema = {
'definitions': {},
'properties': {
'queue': {'description': 'Queue id', 'type': 'string'},
'task': {'description': 'Task id', 'type': 'string'},
},
'required': ['queue', 'task'],
'type': 'object',
}
def __init__(
self, queue, task, **kwargs):
super(MoveTaskToBackRequest, self).__init__(**kwargs)
self.queue = queue
self.task = task
@schema_property('queue')
def queue(self):
return self._property_queue
@queue.setter
def queue(self, value):
if value is None:
self._property_queue = None
return
self.assert_isinstance(value, "queue", six.string_types)
self._property_queue = value
@schema_property('task')
def task(self):
return self._property_task
@task.setter
def task(self, value):
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
class MoveTaskToBackResponse(Response):
"""
Response of queues.move_task_to_back endpoint.
:param position: The new position of the task entry in the queue (index, -1
represents bottom of queue)
:type position: int
"""
_service = "queues"
_action = "move_task_to_back"
_version = "2.4"
_schema = {
'definitions': {},
'properties': {
'position': {
'description': 'The new position of the task entry in the queue (index, -1 represents bottom of queue)',
'type': ['integer', 'null'],
},
},
'type': 'object',
}
def __init__(
self, position=None, **kwargs):
super(MoveTaskToBackResponse, self).__init__(**kwargs)
self.position = position
@schema_property('position')
def position(self):
return self._property_position
@position.setter
def position(self, value):
if value is None:
self._property_position = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "position", six.integer_types)
self._property_position = value
class MoveTaskToFrontRequest(Request):
"""
:param queue: Queue id
:type queue: str
:param task: Task id
:type task: str
"""
_service = "queues"
_action = "move_task_to_front"
_version = "2.4"
_schema = {
'definitions': {},
'properties': {
'queue': {'description': 'Queue id', 'type': 'string'},
'task': {'description': 'Task id', 'type': 'string'},
},
'required': ['queue', 'task'],
'type': 'object',
}
def __init__(
self, queue, task, **kwargs):
super(MoveTaskToFrontRequest, self).__init__(**kwargs)
self.queue = queue
self.task = task
@schema_property('queue')
def queue(self):
return self._property_queue
@queue.setter
def queue(self, value):
if value is None:
self._property_queue = None
return
self.assert_isinstance(value, "queue", six.string_types)
self._property_queue = value
@schema_property('task')
def task(self):
return self._property_task
@task.setter
def task(self, value):
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
class MoveTaskToFrontResponse(Response):
"""
Response of queues.move_task_to_front endpoint.
:param position: The new position of the task entry in the queue (index, -1
represents bottom of queue)
:type position: int
"""
_service = "queues"
_action = "move_task_to_front"
_version = "2.4"
_schema = {
'definitions': {},
'properties': {
'position': {
'description': 'The new position of the task entry in the queue (index, -1 represents bottom of queue)',
'type': ['integer', 'null'],
},
},
'type': 'object',
}
def __init__(
self, position=None, **kwargs):
super(MoveTaskToFrontResponse, self).__init__(**kwargs)
self.position = position
@schema_property('position')
def position(self):
return self._property_position
@position.setter
def position(self, value):
if value is None:
self._property_position = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "position", six.integer_types)
self._property_position = value
class RemoveTaskRequest(Request):
"""
Removes a task entry from the queue.
:param queue: Queue id
:type queue: str
:param task: Task id
:type task: str
"""
_service = "queues"
_action = "remove_task"
_version = "2.4"
_schema = {
'definitions': {},
'properties': {
'queue': {'description': 'Queue id', 'type': 'string'},
'task': {'description': 'Task id', 'type': 'string'},
},
'required': ['queue', 'task'],
'type': 'object',
}
def __init__(
self, queue, task, **kwargs):
super(RemoveTaskRequest, self).__init__(**kwargs)
self.queue = queue
self.task = task
@schema_property('queue')
def queue(self):
return self._property_queue
@queue.setter
def queue(self, value):
if value is None:
self._property_queue = None
return
self.assert_isinstance(value, "queue", six.string_types)
self._property_queue = value
@schema_property('task')
def task(self):
return self._property_task
@task.setter
def task(self, value):
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
class RemoveTaskResponse(Response):
"""
Response of queues.remove_task endpoint.
:param removed: Number of tasks removed (0 or 1)
:type removed: int
"""
_service = "queues"
_action = "remove_task"
_version = "2.4"
_schema = {
'definitions': {},
'properties': {
'removed': {
'description': 'Number of tasks removed (0 or 1)',
'enum': [0, 1],
'type': ['integer', 'null'],
},
},
'type': 'object',
}
def __init__(
self, removed=None, **kwargs):
super(RemoveTaskResponse, self).__init__(**kwargs)
self.removed = removed
@schema_property('removed')
def removed(self):
return self._property_removed
@removed.setter
def removed(self, value):
if value is None:
self._property_removed = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "removed", six.integer_types)
self._property_removed = value
class UpdateRequest(Request):
"""
Update queue information
:param queue: Queue id
:type queue: str
:param name: Queue name Unique within the company.
:type name: str
:param tags: User-defined tags list
:type tags: Sequence[str]
:param system_tags: System tags list. This field is reserved for system use,
please don't use it.
:type system_tags: Sequence[str]
"""
_service = "queues"
_action = "update"
_version = "2.4"
_schema = {
'definitions': {},
'properties': {
'name': {
'description': 'Queue name Unique within the company.',
'type': 'string',
},
'queue': {'description': 'Queue id', 'type': 'string'},
'system_tags': {
'description': "System tags list. This field is reserved for system use, please don't use it.",
'items': {'type': 'string'},
'type': 'array',
},
'tags': {
'description': 'User-defined tags list',
'items': {'type': 'string'},
'type': 'array',
},
},
'required': ['queue'],
'type': 'object',
}
def __init__(
self, queue, name=None, tags=None, system_tags=None, **kwargs):
super(UpdateRequest, self).__init__(**kwargs)
self.queue = queue
self.name = name
self.tags = tags
self.system_tags = system_tags
@schema_property('queue')
def queue(self):
return self._property_queue
@queue.setter
def queue(self, value):
if value is None:
self._property_queue = None
return
self.assert_isinstance(value, "queue", six.string_types)
self._property_queue = value
@schema_property('name')
def name(self):
return self._property_name
@name.setter
def name(self, value):
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
@schema_property('tags')
def tags(self):
return self._property_tags
@tags.setter
def tags(self, value):
if value is None:
self._property_tags = None
return
self.assert_isinstance(value, "tags", (list, tuple))
self.assert_isinstance(value, "tags", six.string_types, is_array=True)
self._property_tags = value
@schema_property('system_tags')
def system_tags(self):
return self._property_system_tags
@system_tags.setter
def system_tags(self, value):
if value is None:
self._property_system_tags = None
return
self.assert_isinstance(value, "system_tags", (list, tuple))
self.assert_isinstance(value, "system_tags", six.string_types, is_array=True)
self._property_system_tags = value
class UpdateResponse(Response):
"""
Response of queues.update endpoint.
:param updated: Number of queues updated (0 or 1)
:type updated: int
:param fields: Updated fields names and values
:type fields: dict
"""
_service = "queues"
_action = "update"
_version = "2.4"
_schema = {
'definitions': {},
'properties': {
'fields': {
'additionalProperties': True,
'description': 'Updated fields names and values',
'type': ['object', 'null'],
},
'updated': {
'description': 'Number of queues updated (0 or 1)',
'enum': [0, 1],
'type': ['integer', 'null'],
},
},
'type': 'object',
}
def __init__(
self, updated=None, fields=None, **kwargs):
super(UpdateResponse, self).__init__(**kwargs)
self.updated = updated
self.fields = fields
@schema_property('updated')
def updated(self):
return self._property_updated
@updated.setter
def updated(self, value):
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
@schema_property('fields')
def fields(self):
return self._property_fields
@fields.setter
def fields(self, value):
if value is None:
self._property_fields = None
return
self.assert_isinstance(value, "fields", (dict,))
self._property_fields = value
response_mapping = {
GetByIdRequest: GetByIdResponse,
GetAllRequest: GetAllResponse,
GetDefaultRequest: GetDefaultResponse,
CreateRequest: CreateResponse,
UpdateRequest: UpdateResponse,
DeleteRequest: DeleteResponse,
AddTaskRequest: AddTaskResponse,
GetNextTaskRequest: GetNextTaskResponse,
RemoveTaskRequest: RemoveTaskResponse,
MoveTaskForwardRequest: MoveTaskForwardResponse,
MoveTaskBackwardRequest: MoveTaskBackwardResponse,
MoveTaskToFrontRequest: MoveTaskToFrontResponse,
MoveTaskToBackRequest: MoveTaskToBackResponse,
GetQueueMetricsRequest: GetQueueMetricsResponse,
}
|
PypiClean
|
/drypatrick-2021.7.5.tar.gz/drypatrick-2021.7.5/homeassistant/components/vilfo/sensor.py
|
from homeassistant.components.sensor import SensorEntity
from homeassistant.const import ATTR_ICON
from .const import (
ATTR_API_DATA_FIELD,
ATTR_DEVICE_CLASS,
ATTR_LABEL,
ATTR_UNIT,
DOMAIN,
ROUTER_DEFAULT_MODEL,
ROUTER_DEFAULT_NAME,
ROUTER_MANUFACTURER,
SENSOR_TYPES,
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Add Vilfo Router entities from a config_entry."""
vilfo = hass.data[DOMAIN][config_entry.entry_id]
sensors = []
for sensor_type in SENSOR_TYPES:
sensors.append(VilfoRouterSensor(sensor_type, vilfo))
async_add_entities(sensors, True)
class VilfoRouterSensor(SensorEntity):
"""Define a Vilfo Router Sensor."""
def __init__(self, sensor_type, api):
"""Initialize."""
self.api = api
self.sensor_type = sensor_type
self._device_info = {
"identifiers": {(DOMAIN, api.host, api.mac_address)},
"name": ROUTER_DEFAULT_NAME,
"manufacturer": ROUTER_MANUFACTURER,
"model": ROUTER_DEFAULT_MODEL,
"sw_version": api.firmware_version,
}
self._unique_id = f"{self.api.unique_id}_{self.sensor_type}"
self._state = None
@property
def available(self):
"""Return whether the sensor is available or not."""
return self.api.available
@property
def device_info(self):
"""Return the device info."""
return self._device_info
@property
def device_class(self):
"""Return the device class."""
return SENSOR_TYPES[self.sensor_type].get(ATTR_DEVICE_CLASS)
@property
def icon(self):
"""Return the icon for the sensor."""
return SENSOR_TYPES[self.sensor_type][ATTR_ICON]
@property
def name(self):
"""Return the name of the sensor."""
parent_device_name = self._device_info["name"]
sensor_name = SENSOR_TYPES[self.sensor_type][ATTR_LABEL]
return f"{parent_device_name} {sensor_name}"
@property
def state(self):
"""Return the state."""
return self._state
@property
def unique_id(self):
"""Return a unique_id for this entity."""
return self._unique_id
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity."""
return SENSOR_TYPES[self.sensor_type].get(ATTR_UNIT)
async def async_update(self):
"""Update the router data."""
await self.api.async_update()
self._state = self.api.data.get(
SENSOR_TYPES[self.sensor_type][ATTR_API_DATA_FIELD]
)
|
PypiClean
|
/pulumi_aws-6.1.0a1693529760.tar.gz/pulumi_aws-6.1.0a1693529760/pulumi_aws/redshift/hsm_configuration.py
|
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['HsmConfigurationArgs', 'HsmConfiguration']
@pulumi.input_type
class HsmConfigurationArgs:
def __init__(__self__, *,
description: pulumi.Input[str],
hsm_configuration_identifier: pulumi.Input[str],
hsm_ip_address: pulumi.Input[str],
hsm_partition_name: pulumi.Input[str],
hsm_partition_password: pulumi.Input[str],
hsm_server_public_certificate: pulumi.Input[str],
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a HsmConfiguration resource.
:param pulumi.Input[str] description: A text description of the HSM configuration to be created.
:param pulumi.Input[str] hsm_configuration_identifier: The identifier to be assigned to the new Amazon Redshift HSM configuration.
:param pulumi.Input[str] hsm_ip_address: The IP address that the Amazon Redshift cluster must use to access the HSM.
:param pulumi.Input[str] hsm_partition_name: The name of the partition in the HSM where the Amazon Redshift clusters will store their database encryption keys.
:param pulumi.Input[str] hsm_partition_password: The password required to access the HSM partition.
:param pulumi.Input[str] hsm_server_public_certificate: The HSMs public certificate file. When using Cloud HSM, the file name is server.pem.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource. If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
pulumi.set(__self__, "description", description)
pulumi.set(__self__, "hsm_configuration_identifier", hsm_configuration_identifier)
pulumi.set(__self__, "hsm_ip_address", hsm_ip_address)
pulumi.set(__self__, "hsm_partition_name", hsm_partition_name)
pulumi.set(__self__, "hsm_partition_password", hsm_partition_password)
pulumi.set(__self__, "hsm_server_public_certificate", hsm_server_public_certificate)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def description(self) -> pulumi.Input[str]:
"""
A text description of the HSM configuration to be created.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: pulumi.Input[str]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="hsmConfigurationIdentifier")
def hsm_configuration_identifier(self) -> pulumi.Input[str]:
"""
The identifier to be assigned to the new Amazon Redshift HSM configuration.
"""
return pulumi.get(self, "hsm_configuration_identifier")
@hsm_configuration_identifier.setter
def hsm_configuration_identifier(self, value: pulumi.Input[str]):
pulumi.set(self, "hsm_configuration_identifier", value)
@property
@pulumi.getter(name="hsmIpAddress")
def hsm_ip_address(self) -> pulumi.Input[str]:
"""
The IP address that the Amazon Redshift cluster must use to access the HSM.
"""
return pulumi.get(self, "hsm_ip_address")
@hsm_ip_address.setter
def hsm_ip_address(self, value: pulumi.Input[str]):
pulumi.set(self, "hsm_ip_address", value)
@property
@pulumi.getter(name="hsmPartitionName")
def hsm_partition_name(self) -> pulumi.Input[str]:
"""
The name of the partition in the HSM where the Amazon Redshift clusters will store their database encryption keys.
"""
return pulumi.get(self, "hsm_partition_name")
@hsm_partition_name.setter
def hsm_partition_name(self, value: pulumi.Input[str]):
pulumi.set(self, "hsm_partition_name", value)
@property
@pulumi.getter(name="hsmPartitionPassword")
def hsm_partition_password(self) -> pulumi.Input[str]:
"""
The password required to access the HSM partition.
"""
return pulumi.get(self, "hsm_partition_password")
@hsm_partition_password.setter
def hsm_partition_password(self, value: pulumi.Input[str]):
pulumi.set(self, "hsm_partition_password", value)
@property
@pulumi.getter(name="hsmServerPublicCertificate")
def hsm_server_public_certificate(self) -> pulumi.Input[str]:
"""
The HSMs public certificate file. When using Cloud HSM, the file name is server.pem.
"""
return pulumi.get(self, "hsm_server_public_certificate")
@hsm_server_public_certificate.setter
def hsm_server_public_certificate(self, value: pulumi.Input[str]):
pulumi.set(self, "hsm_server_public_certificate", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags to assign to the resource. If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class _HsmConfigurationState:
def __init__(__self__, *,
arn: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
hsm_configuration_identifier: Optional[pulumi.Input[str]] = None,
hsm_ip_address: Optional[pulumi.Input[str]] = None,
hsm_partition_name: Optional[pulumi.Input[str]] = None,
hsm_partition_password: Optional[pulumi.Input[str]] = None,
hsm_server_public_certificate: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
Input properties used for looking up and filtering HsmConfiguration resources.
:param pulumi.Input[str] arn: Amazon Resource Name (ARN) of the Hsm Client Certificate.
:param pulumi.Input[str] description: A text description of the HSM configuration to be created.
:param pulumi.Input[str] hsm_configuration_identifier: The identifier to be assigned to the new Amazon Redshift HSM configuration.
:param pulumi.Input[str] hsm_ip_address: The IP address that the Amazon Redshift cluster must use to access the HSM.
:param pulumi.Input[str] hsm_partition_name: The name of the partition in the HSM where the Amazon Redshift clusters will store their database encryption keys.
:param pulumi.Input[str] hsm_partition_password: The password required to access the HSM partition.
:param pulumi.Input[str] hsm_server_public_certificate: The HSMs public certificate file. When using Cloud HSM, the file name is server.pem.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource. If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider `default_tags` configuration block.
"""
if arn is not None:
pulumi.set(__self__, "arn", arn)
if description is not None:
pulumi.set(__self__, "description", description)
if hsm_configuration_identifier is not None:
pulumi.set(__self__, "hsm_configuration_identifier", hsm_configuration_identifier)
if hsm_ip_address is not None:
pulumi.set(__self__, "hsm_ip_address", hsm_ip_address)
if hsm_partition_name is not None:
pulumi.set(__self__, "hsm_partition_name", hsm_partition_name)
if hsm_partition_password is not None:
pulumi.set(__self__, "hsm_partition_password", hsm_partition_password)
if hsm_server_public_certificate is not None:
pulumi.set(__self__, "hsm_server_public_certificate", hsm_server_public_certificate)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if tags_all is not None:
pulumi.set(__self__, "tags_all", tags_all)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
"""
Amazon Resource Name (ARN) of the Hsm Client Certificate.
"""
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A text description of the HSM configuration to be created.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="hsmConfigurationIdentifier")
def hsm_configuration_identifier(self) -> Optional[pulumi.Input[str]]:
"""
The identifier to be assigned to the new Amazon Redshift HSM configuration.
"""
return pulumi.get(self, "hsm_configuration_identifier")
@hsm_configuration_identifier.setter
def hsm_configuration_identifier(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "hsm_configuration_identifier", value)
@property
@pulumi.getter(name="hsmIpAddress")
def hsm_ip_address(self) -> Optional[pulumi.Input[str]]:
"""
The IP address that the Amazon Redshift cluster must use to access the HSM.
"""
return pulumi.get(self, "hsm_ip_address")
@hsm_ip_address.setter
def hsm_ip_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "hsm_ip_address", value)
@property
@pulumi.getter(name="hsmPartitionName")
def hsm_partition_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the partition in the HSM where the Amazon Redshift clusters will store their database encryption keys.
"""
return pulumi.get(self, "hsm_partition_name")
@hsm_partition_name.setter
def hsm_partition_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "hsm_partition_name", value)
@property
@pulumi.getter(name="hsmPartitionPassword")
def hsm_partition_password(self) -> Optional[pulumi.Input[str]]:
"""
The password required to access the HSM partition.
"""
return pulumi.get(self, "hsm_partition_password")
@hsm_partition_password.setter
def hsm_partition_password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "hsm_partition_password", value)
@property
@pulumi.getter(name="hsmServerPublicCertificate")
def hsm_server_public_certificate(self) -> Optional[pulumi.Input[str]]:
"""
The HSMs public certificate file. When using Cloud HSM, the file name is server.pem.
"""
return pulumi.get(self, "hsm_server_public_certificate")
@hsm_server_public_certificate.setter
def hsm_server_public_certificate(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "hsm_server_public_certificate", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags to assign to the resource. If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags assigned to the resource, including those inherited from the provider `default_tags` configuration block.
"""
return pulumi.get(self, "tags_all")
@tags_all.setter
def tags_all(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags_all", value)
class HsmConfiguration(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
hsm_configuration_identifier: Optional[pulumi.Input[str]] = None,
hsm_ip_address: Optional[pulumi.Input[str]] = None,
hsm_partition_name: Optional[pulumi.Input[str]] = None,
hsm_partition_password: Optional[pulumi.Input[str]] = None,
hsm_server_public_certificate: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
Creates an HSM configuration that contains the information required by an Amazon Redshift cluster to store and use database encryption keys in a Hardware Security Module (HSM).
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.redshift.HsmConfiguration("example",
description="example",
hsm_configuration_identifier="example",
hsm_ip_address="10.0.0.1",
hsm_partition_name="aws",
hsm_partition_password="example",
hsm_server_public_certificate="example")
```
## Import
Using `pulumi import`, import Redshift HSM Client Certificates using `hsm_configuration_identifier`. For example:
```sh
$ pulumi import aws:redshift/hsmConfiguration:HsmConfiguration example example
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: A text description of the HSM configuration to be created.
:param pulumi.Input[str] hsm_configuration_identifier: The identifier to be assigned to the new Amazon Redshift HSM configuration.
:param pulumi.Input[str] hsm_ip_address: The IP address that the Amazon Redshift cluster must use to access the HSM.
:param pulumi.Input[str] hsm_partition_name: The name of the partition in the HSM where the Amazon Redshift clusters will store their database encryption keys.
:param pulumi.Input[str] hsm_partition_password: The password required to access the HSM partition.
:param pulumi.Input[str] hsm_server_public_certificate: The HSMs public certificate file. When using Cloud HSM, the file name is server.pem.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource. If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: HsmConfigurationArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Creates an HSM configuration that contains the information required by an Amazon Redshift cluster to store and use database encryption keys in a Hardware Security Module (HSM).
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.redshift.HsmConfiguration("example",
description="example",
hsm_configuration_identifier="example",
hsm_ip_address="10.0.0.1",
hsm_partition_name="aws",
hsm_partition_password="example",
hsm_server_public_certificate="example")
```
## Import
Using `pulumi import`, import Redshift HSM Client Certificates using `hsm_configuration_identifier`. For example:
```sh
$ pulumi import aws:redshift/hsmConfiguration:HsmConfiguration example example
```
:param str resource_name: The name of the resource.
:param HsmConfigurationArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(HsmConfigurationArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
hsm_configuration_identifier: Optional[pulumi.Input[str]] = None,
hsm_ip_address: Optional[pulumi.Input[str]] = None,
hsm_partition_name: Optional[pulumi.Input[str]] = None,
hsm_partition_password: Optional[pulumi.Input[str]] = None,
hsm_server_public_certificate: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = HsmConfigurationArgs.__new__(HsmConfigurationArgs)
if description is None and not opts.urn:
raise TypeError("Missing required property 'description'")
__props__.__dict__["description"] = description
if hsm_configuration_identifier is None and not opts.urn:
raise TypeError("Missing required property 'hsm_configuration_identifier'")
__props__.__dict__["hsm_configuration_identifier"] = hsm_configuration_identifier
if hsm_ip_address is None and not opts.urn:
raise TypeError("Missing required property 'hsm_ip_address'")
__props__.__dict__["hsm_ip_address"] = hsm_ip_address
if hsm_partition_name is None and not opts.urn:
raise TypeError("Missing required property 'hsm_partition_name'")
__props__.__dict__["hsm_partition_name"] = hsm_partition_name
if hsm_partition_password is None and not opts.urn:
raise TypeError("Missing required property 'hsm_partition_password'")
__props__.__dict__["hsm_partition_password"] = None if hsm_partition_password is None else pulumi.Output.secret(hsm_partition_password)
if hsm_server_public_certificate is None and not opts.urn:
raise TypeError("Missing required property 'hsm_server_public_certificate'")
__props__.__dict__["hsm_server_public_certificate"] = hsm_server_public_certificate
__props__.__dict__["tags"] = tags
__props__.__dict__["arn"] = None
__props__.__dict__["tags_all"] = None
secret_opts = pulumi.ResourceOptions(additional_secret_outputs=["hsmPartitionPassword"])
opts = pulumi.ResourceOptions.merge(opts, secret_opts)
super(HsmConfiguration, __self__).__init__(
'aws:redshift/hsmConfiguration:HsmConfiguration',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
arn: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
hsm_configuration_identifier: Optional[pulumi.Input[str]] = None,
hsm_ip_address: Optional[pulumi.Input[str]] = None,
hsm_partition_name: Optional[pulumi.Input[str]] = None,
hsm_partition_password: Optional[pulumi.Input[str]] = None,
hsm_server_public_certificate: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'HsmConfiguration':
"""
Get an existing HsmConfiguration resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: Amazon Resource Name (ARN) of the Hsm Client Certificate.
:param pulumi.Input[str] description: A text description of the HSM configuration to be created.
:param pulumi.Input[str] hsm_configuration_identifier: The identifier to be assigned to the new Amazon Redshift HSM configuration.
:param pulumi.Input[str] hsm_ip_address: The IP address that the Amazon Redshift cluster must use to access the HSM.
:param pulumi.Input[str] hsm_partition_name: The name of the partition in the HSM where the Amazon Redshift clusters will store their database encryption keys.
:param pulumi.Input[str] hsm_partition_password: The password required to access the HSM partition.
:param pulumi.Input[str] hsm_server_public_certificate: The HSMs public certificate file. When using Cloud HSM, the file name is server.pem.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource. If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider `default_tags` configuration block.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _HsmConfigurationState.__new__(_HsmConfigurationState)
__props__.__dict__["arn"] = arn
__props__.__dict__["description"] = description
__props__.__dict__["hsm_configuration_identifier"] = hsm_configuration_identifier
__props__.__dict__["hsm_ip_address"] = hsm_ip_address
__props__.__dict__["hsm_partition_name"] = hsm_partition_name
__props__.__dict__["hsm_partition_password"] = hsm_partition_password
__props__.__dict__["hsm_server_public_certificate"] = hsm_server_public_certificate
__props__.__dict__["tags"] = tags
__props__.__dict__["tags_all"] = tags_all
return HsmConfiguration(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
Amazon Resource Name (ARN) of the Hsm Client Certificate.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter
def description(self) -> pulumi.Output[str]:
"""
A text description of the HSM configuration to be created.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="hsmConfigurationIdentifier")
def hsm_configuration_identifier(self) -> pulumi.Output[str]:
"""
The identifier to be assigned to the new Amazon Redshift HSM configuration.
"""
return pulumi.get(self, "hsm_configuration_identifier")
@property
@pulumi.getter(name="hsmIpAddress")
def hsm_ip_address(self) -> pulumi.Output[str]:
"""
The IP address that the Amazon Redshift cluster must use to access the HSM.
"""
return pulumi.get(self, "hsm_ip_address")
@property
@pulumi.getter(name="hsmPartitionName")
def hsm_partition_name(self) -> pulumi.Output[str]:
"""
The name of the partition in the HSM where the Amazon Redshift clusters will store their database encryption keys.
"""
return pulumi.get(self, "hsm_partition_name")
@property
@pulumi.getter(name="hsmPartitionPassword")
def hsm_partition_password(self) -> pulumi.Output[str]:
"""
The password required to access the HSM partition.
"""
return pulumi.get(self, "hsm_partition_password")
@property
@pulumi.getter(name="hsmServerPublicCertificate")
def hsm_server_public_certificate(self) -> pulumi.Output[str]:
"""
The HSMs public certificate file. When using Cloud HSM, the file name is server.pem.
"""
return pulumi.get(self, "hsm_server_public_certificate")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A map of tags to assign to the resource. If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> pulumi.Output[Mapping[str, str]]:
"""
A map of tags assigned to the resource, including those inherited from the provider `default_tags` configuration block.
"""
return pulumi.get(self, "tags_all")
|
PypiClean
|
/ganymede_py-0.0.24-py3-none-any.whl/ganymede/editor/storage.py
|
from google.cloud import storage
from typing import List
def get_project(project_name: str) -> str:
"""
Gets project name
Parameters
----------
project_name : str
current project name
Returns
-------
str
project name in bucket
"""
if project_name == 'ganymede-core-dev':
project_name = 'ganymede-dev'
return project_name
def get_bucket_name(bucket_source: str, project: str) -> str:
"""
Gets bucket name
Parameters
----------
bucket_source : str
Either 'input' or 'output' bucket
project : str
Ganymede project name
Returns
-------
str
Bucket name to access
Raises
------
ValueError
Invalid bucket source; has to be either 'input' or 'output'
"""
bucket_source = bucket_source.strip().lower()
if bucket_source == 'input':
bucket_name = f"ganymede-{project}-lab-ingest"
elif bucket_source == 'output':
bucket_name = f"ganymede-{project}-output"
else:
raise ValueError("Data source must either be 'input' or 'output'")
return bucket_name
def list_data(bucket_source='input') -> List[str]:
"""
Retrieves listing of available files from cloud storage
Parameters
----------
filename : str
File to retrieve
bucket_source: str
Bucket to retrieve file from; either 'input' or 'output'
Returns
-------
List[str]
list of files available for retrieval
"""
client = storage.Client()
project = get_project(client.project)
bucket_name = get_bucket_name(bucket_source, project)
try:
file_list = []
for blob in client.list_blobs(bucket_name):
file_list.append(blob.name)
return file_list
except Exception as e:
print('\033[91m' + 'File list from cloud storage cloud not be retrieved. '
'See the following error.\n' + str(e))
def get_data(filename: str, bucket_source='input') -> bytes:
"""
Retrieves data from cloud storage
Parameters
----------
filename : str
File to retrieve
bucket_source: str
Bucket to retrieve file from; either 'input' or 'output'
Returns
-------
bytes
file contents
"""
client = storage.Client()
project = get_project(client.project)
bucket_name = get_bucket_name(bucket_source, project)
try:
bucket = client.get_bucket(bucket_name)
return bucket.blob(filename).download_as_string()
except Exception as e:
print('\033[91m' + 'Data retrieval was unsuccessful. '
'See the following error.\n' + str(e))
def save_data_from_file(file_to_upload: str,
dest_blob_name: str,
dest_bucket='output',
timeout=60):
"""
Store file in cloud storage
Parameters
----------
file_to_upload : str
Filename of local file to upload to cloud storage
dest_blob_name : str
Path of destination blob
dest_bucket: str
Bucket to store file in; either 'input' or 'output'
timeout: int
Number of seconds before upload timeout
"""
client = storage.Client()
project = get_project(client.project)
bucket_name = get_bucket_name(dest_bucket, project)
try:
bucket = client.get_bucket(bucket_name)
blob = bucket.blob(dest_blob_name)
blob.upload_from_filename(file_to_upload, timeout=timeout)
except Exception as e:
print('\033[91m' + 'Data storage was unsuccessful. '
'See the following error.\n' + str(e))
print('File {file_to_upload} successfully uploaded to {dest_blob_name}/{dest_bucket_name}')
|
PypiClean
|
/alipay_sdk_python-3.6.740-py3-none-any.whl/alipay/aop/api/request/AlipayCommerceEducateCampusSchoolcardUnsignRequest.py
|
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayCommerceEducateCampusSchoolcardUnsignModel import AlipayCommerceEducateCampusSchoolcardUnsignModel
class AlipayCommerceEducateCampusSchoolcardUnsignRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayCommerceEducateCampusSchoolcardUnsignModel):
self._biz_content = value
else:
self._biz_content = AlipayCommerceEducateCampusSchoolcardUnsignModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.commerce.educate.campus.schoolcard.unsign'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
|
PypiClean
|
/aws_role_credentials-0.6.4.tar.gz/aws_role_credentials-0.6.4/aws_role_credentials/cli.py
|
from __future__ import print_function
import sys
import argparse
import logging
import os
from os.path import expanduser
from aws_role_credentials import metadata
from aws_role_credentials.actions import Actions
log = logging.getLogger('aws_role_credentials')
def configurelogging():
log.setLevel(logging.DEBUG)
stderrlog = logging.StreamHandler()
stderrlog.setFormatter(logging.Formatter("%(message)s"))
log.addHandler(stderrlog)
def read_stdin():
try:
return ''.join([line for line in sys.stdin])
except KeyboardInterrupt:
sys.stdout.flush()
pass
def token_action(args):
if args['exec_command']:
return Actions.exec_handler(**args)
return Actions.credentials_handler(**args)
def saml_action(args):
args['assertion'] = read_stdin()
token_action(args)(Actions.saml_token(**args))
def user_action(args):
token_action(args)(Actions.user_token(**args))
def create_parser(prog, epilog,
saml_action=saml_action,
user_action=user_action):
arg_parser = argparse.ArgumentParser(
prog=prog,
formatter_class=argparse.RawDescriptionHelpFormatter,
description=metadata.description,
epilog=epilog)
subparsers = arg_parser.add_subparsers()
parent_parser = argparse.ArgumentParser(add_help=False)
parent_parser.add_argument(
'-V', '--version',
action='version',
version='{0} {1}'.format(metadata.project, metadata.version))
parent_parser.add_argument(
'--profile', type=str,
default='sts',
help='Use a specific profile in your credential file.')
parent_parser.add_argument(
'--region', type=str,
default='us-east-1',
help='The region to use. Overrides config/env settings.')
parent_parser.add_argument(
'--role-arn', type=str,
help='Optional role ARN to use when multiple roles are available.')
parent_parser.add_argument(
'--exec', type=str,
dest='exec_command',
help='If present then the string is read as a command to execute with the AWS credentials set as environment variables.')
parent_parser.add_argument(
'-q', '--quiet',
action='store_true',
help='Do not print helpful info including token expiration on successful authentication.')
saml_parser = subparsers.add_parser('saml',
description='Assume role using SAML assertion',
parents=[parent_parser])
saml_parser.set_defaults(func=saml_action)
user_parser = subparsers.add_parser('user',
description='Assume role using IAM user',
parents=[parent_parser])
user_parser.add_argument(
'role_arn', type=str,
help='The arn of the role to assume',
)
user_parser.add_argument(
'session_name', type=str,
help='An identifier for the assumed role session.')
user_parser.add_argument(
'--mfa-serial-number', type=str,
help='An identifier of the MFA device that is associated with the user.')
user_parser.add_argument(
'--mfa-token', type=str,
help='The value provided by the MFA device.')
user_parser.set_defaults(func=user_action)
return arg_parser
def main(argv):
configurelogging()
"""Program entry point.
:param argv: command-line arguments
:type argv: :class:`list`
"""
author_strings = []
for name, email in zip(metadata.authors, metadata.emails):
author_strings.append('Author: {0} <{1}>'.format(name, email))
epilog = '''
{project} {version}
{authors}
URL: <{url}>
'''.format(
project=metadata.project,
version=metadata.version,
authors='\n'.join(author_strings),
url=metadata.url)
arg_parser = create_parser(argv[0], epilog)
config = arg_parser.parse_args(args=argv[1:])
log.info(epilog)
credentials_dir = expanduser('~/.aws')
if not os.path.exists(credentials_dir):
os.makedirs(credentials_dir)
config.credentials_filename = os.path.join(credentials_dir, 'credentials')
config.func(vars(config))
return 0
def entry_point():
"""Zero-argument entry point for use with setuptools/distribute."""
raise SystemExit(main(sys.argv))
if __name__ == '__main__':
entry_point()
|
PypiClean
|
/gamification-engine-0.4.0.tar.gz/gamification-engine-0.4.0/gengine/app/jsscripts/node_modules/tar/lib/extract.js
|
module.exports = Extract
var tar = require("../tar.js")
, fstream = require("fstream")
, inherits = require("inherits")
, path = require("path")
function Extract (opts) {
if (!(this instanceof Extract)) return new Extract(opts)
tar.Parse.apply(this)
if (typeof opts !== "object") {
opts = { path: opts }
}
// better to drop in cwd? seems more standard.
opts.path = opts.path || path.resolve("node-tar-extract")
opts.type = "Directory"
opts.Directory = true
// similar to --strip or --strip-components
opts.strip = +opts.strip
if (!opts.strip || opts.strip <= 0) opts.strip = 0
this._fst = fstream.Writer(opts)
this.pause()
var me = this
// Hardlinks in tarballs are relative to the root
// of the tarball. So, they need to be resolved against
// the target directory in order to be created properly.
me.on("entry", function (entry) {
// if there's a "strip" argument, then strip off that many
// path components.
if (opts.strip) {
var p = entry.path.split("/").slice(opts.strip).join("/")
entry.path = entry.props.path = p
if (entry.linkpath) {
var lp = entry.linkpath.split("/").slice(opts.strip).join("/")
entry.linkpath = entry.props.linkpath = lp
}
}
if (entry.type === "Link") {
entry.linkpath = entry.props.linkpath =
path.join(opts.path, path.join("/", entry.props.linkpath))
}
if (entry.type === "SymbolicLink") {
var dn = path.dirname(entry.path) || ""
var linkpath = entry.props.linkpath
var target = path.resolve(opts.path, dn, linkpath)
if (target.indexOf(opts.path) !== 0) {
linkpath = path.join(opts.path, path.join("/", linkpath))
}
entry.linkpath = entry.props.linkpath = linkpath
}
})
this._fst.on("ready", function () {
me.pipe(me._fst, { end: false })
me.resume()
})
this._fst.on('error', function(err) {
me.emit('error', err)
})
this._fst.on('drain', function() {
me.emit('drain')
})
// this._fst.on("end", function () {
// console.error("\nEEEE Extract End", me._fst.path)
// })
this._fst.on("close", function () {
// console.error("\nEEEE Extract End", me._fst.path)
me.emit("finish")
me.emit("end")
me.emit("close")
})
}
inherits(Extract, tar.Parse)
Extract.prototype._streamEnd = function () {
var me = this
if (!me._ended || me._entry) me.error("unexpected eof")
me._fst.end()
// my .end() is coming later.
}
|
PypiClean
|
/qumulo_api-6.2.1-py3-none-any.whl/qumulo/rest/object_replication.py
|
from typing import Dict, Optional, Union
import qumulo.lib.request as request
from qumulo.lib.auth import Credentials
@request.request
def create_object_relationship(
conninfo: request.Connection,
_credentials: Optional[Credentials],
direction: str,
object_store_address: str,
bucket: str,
object_folder: str,
region: str,
access_key_id: str,
secret_access_key: str,
local_directory_id: Optional[str] = None,
local_directory_path: Optional[str] = None,
port: Optional[int] = None,
ca_certificate: Optional[str] = None,
bucket_style: Optional[str] = None,
) -> request.RestResponse:
"""
@p direction One of "COPY_TO_OBJECT", "COPY_FROM_OBJECT"
@p bucket_style One of "BUCKET_STYLE_PATH", "BUCKET_STYLE_VIRTUAL_HOSTED"
"""
method = 'POST'
uri = '/v3/replication/object-relationships/'
body: Dict[str, Union[str, int]] = {
'direction': direction,
'object_store_address': object_store_address,
'bucket': bucket,
'object_folder': object_folder,
'region': region,
'access_key_id': access_key_id,
'secret_access_key': secret_access_key,
}
if local_directory_id is not None:
body['local_directory_id'] = local_directory_id
if local_directory_path is not None:
body['local_directory_path'] = local_directory_path
if port is not None:
body['port'] = port
if ca_certificate is not None:
body['ca_certificate'] = ca_certificate
if bucket_style is not None:
body['bucket_style'] = bucket_style
return conninfo.send_request(method, uri, body=body)
@request.request
def get_object_relationship(
conninfo: request.Connection, _credentials: Optional[Credentials], relationship_id: str
) -> request.RestResponse:
method = 'GET'
uri = '/v3/replication/object-relationships/{}'
return conninfo.send_request(method, uri.format(relationship_id))
@request.request
def list_object_relationships(
conninfo: request.Connection, _credentials: Optional[Credentials]
) -> request.RestResponse:
method = 'GET'
uri = '/v3/replication/object-relationships/'
return conninfo.send_request(method, uri)
@request.request
def get_object_relationship_status(
conninfo: request.Connection, _credentials: Optional[Credentials], relationship_id: str
) -> request.RestResponse:
method = 'GET'
uri = '/v3/replication/object-relationships/{}/status'
return conninfo.send_request(method, uri.format(relationship_id))
@request.request
def list_object_relationship_statuses(
conninfo: request.Connection, _credentials: Optional[Credentials]
) -> request.RestResponse:
method = 'GET'
uri = '/v3/replication/object-relationships/status/'
return conninfo.send_request(method, uri)
@request.request
def abort_object_replication(
conninfo: request.Connection, _credentials: Optional[Credentials], relationship_id: str
) -> request.RestResponse:
method = 'POST'
uri = f'/v3/replication/object-relationships/{relationship_id}/abort-replication'
return conninfo.send_request(method, uri)
@request.request
def delete_object_relationship(
conninfo: request.Connection,
_credentials: Optional[Credentials],
relationship_id: str,
if_match: Optional[str] = None,
) -> request.RestResponse:
method = 'DELETE'
uri = f'/v3/replication/object-relationships/{relationship_id}'
return conninfo.send_request(method, uri, if_match=if_match)
@request.request
def replicate_object_relationship(
conninfo: request.Connection, _credentials: Optional[Credentials], relationship_id: str
) -> request.RestResponse:
method = 'POST'
uri = f'/v3/replication/object-relationships/{relationship_id}/replicate'
return conninfo.send_request(method, uri)
|
PypiClean
|
/vioneta_agro_frontend-20230809.1-py3-none-any.whl/hass_frontend/frontend_es5/8630-6C4qoAfpDeA.js
|
(self.webpackChunkvioneta_agro_frontend=self.webpackChunkvioneta_agro_frontend||[]).push([[8630],{53725:function(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=function(e,t){if(null==e)throw new TypeError("assign requires that input parameter not be null or undefined");for(var n in t)Object.prototype.hasOwnProperty.call(t,n)&&(e[n]=t[n]);return e},e.exports=t.default},20508:function(e,t,n){"use strict";var r=n(28847).default;Object.defineProperty(t,"__esModule",{value:!0}),t.default=function(e){return(0,i.default)({},e)};var i=r(n(53725));e.exports=t.default},59699:function(e,t,n){"use strict";n.d(t,{Z:function(){return a}});var r=n(90394),i=n(39244),s=n(23682),o=36e5;function a(e,t){(0,s.Z)(2,arguments);var n=(0,r.Z)(t);return(0,i.Z)(e,n*o)}},39244:function(e,t,n){"use strict";n.d(t,{Z:function(){return o}});var r=n(90394),i=n(34327),s=n(23682);function o(e,t){(0,s.Z)(2,arguments);var n=(0,i.Z)(e).getTime(),o=(0,r.Z)(t);return new Date(n+o)}},32182:function(e,t,n){"use strict";n.d(t,{Z:function(){return o}});var r=n(90394),i=n(34327),s=n(23682);function o(e,t){(0,s.Z)(2,arguments);var n=(0,i.Z)(e),o=(0,r.Z)(t);if(isNaN(o))return new Date(NaN);if(!o)return n;var a=n.getDate(),u=new Date(n.getTime());return u.setMonth(n.getMonth()+o+1,0),a>=u.getDate()?u:(n.setFullYear(u.getFullYear(),u.getMonth(),a),n)}},73826:function(e,t,n){"use strict";n.d(t,{f:function(){return f}});var r=n(40039),i=n(33368),s=n(71650),o=n(82390),a=n(69205),u=n(70906),c=n(91808),d=n(34541),l=n(47838),h=n(79932),f=function(e){var t=(0,c.Z)(null,(function(e,t){var n=function(t){(0,a.Z)(r,t);var n=(0,u.Z)(r);function r(){var t;(0,s.Z)(this,r);for(var i=arguments.length,a=new Array(i),u=0;u<i;u++)a[u]=arguments[u];return t=n.call.apply(n,[this].concat(a)),e((0,o.Z)(t)),t}return(0,i.Z)(r)}(t);return{F:n,d:[{kind:"field",decorators:[(0,h.Cb)({attribute:!1})],key:"hass",value:void 0},{kind:"field",key:"hassSubscribeRequiredHostProps",value:void 0},{kind:"field",key:"__unsubs",value:void 0},{kind:"method",key:"connectedCallback",value:function(){(0,d.Z)((0,l.Z)(n.prototype),"connectedCallback",this).call(this),this.__checkSubscribed()}},{kind:"method",key:"disconnectedCallback",value:function(){if((0,d.Z)((0,l.Z)(n.prototype),"disconnectedCallback",this).call(this),this.__unsubs){for(;this.__unsubs.length;){var e=this.__unsubs.pop();e instanceof Promise?e.then((function(e){return e()})):e()}this.__unsubs=void 0}}},{kind:"method",key:"updated",value:function(e){if((0,d.Z)((0,l.Z)(n.prototype),"updated",this).call(this,e),e.has("hass"))this.__checkSubscribed();else if(this.hassSubscribeRequiredHostProps){var t,i=(0,r.Z)(e.keys());try{for(i.s();!(t=i.n()).done;){var s=t.value;if(this.hassSubscribeRequiredHostProps.includes(s))return void this.__checkSubscribed()}}catch(o){i.e(o)}finally{i.f()}}}},{kind:"method",key:"hassSubscribe",value:function(){return[]}},{kind:"method",key:"__checkSubscribed",value:function(){var e,t=this;void 0!==this.__unsubs||!this.isConnected||void 0===this.hass||null!==(e=this.hassSubscribeRequiredHostProps)&&void 0!==e&&e.some((function(e){return void 0===t[e]}))||(this.__unsubs=this.hassSubscribe())}}]}}),e);return t}},61046:function(e,t,n){"use strict";n.r(t),n.d(t,{HuiEnergyCompareCard:function(){return g}});var r,i,s,o,a=n(88962),u=n(33368),c=n(71650),d=n(82390),l=n(69205),h=n(70906),f=n(91808),v=n(24112),p=n(93752),k=n(68144),_=n(79932),b=n(12198),y=n(55424),Z=n(73826),g=(0,f.Z)([(0,_.Mo)("hui-energy-compare-card")],(function(e,t){var n=function(t){(0,l.Z)(r,t);var n=(0,h.Z)(r);function r(){var t;(0,c.Z)(this,r);for(var i=arguments.length,s=new Array(i),o=0;o<i;o++)s[o]=arguments[o];return t=n.call.apply(n,[this].concat(s)),e((0,d.Z)(t)),t}return(0,u.Z)(r)}(t);return{F:n,d:[{kind:"field",decorators:[(0,_.Cb)({attribute:!1})],key:"hass",value:void 0},{kind:"field",decorators:[(0,_.SB)()],key:"_config",value:void 0},{kind:"field",decorators:[(0,_.SB)()],key:"_start",value:void 0},{kind:"field",decorators:[(0,_.SB)()],key:"_end",value:void 0},{kind:"field",decorators:[(0,_.SB)()],key:"_startCompare",value:void 0},{kind:"field",decorators:[(0,_.SB)()],key:"_endCompare",value:void 0},{kind:"field",decorators:[(0,_.Cb)({type:Boolean,reflect:!0})],key:"hidden",value:function(){return!0}},{kind:"method",key:"getCardSize",value:function(){return 1}},{kind:"method",key:"setConfig",value:function(e){this._config=e}},{kind:"field",key:"hassSubscribeRequiredHostProps",value:function(){return["_config"]}},{kind:"method",key:"hassSubscribe",value:function(){var e=this;return[(0,y.UB)(this.hass,{key:this._config.collection_key}).subscribe((function(t){return e._update(t)}))]}},{kind:"method",key:"render",value:function(){if(!this._startCompare||!this._endCompare)return k.Ld;var e=(0,v.Z)(this._endCompare,this._startCompare);return(0,k.dy)(r||(r=(0,a.Z)([' <ha-alert dismissable @alert-dismissed-clicked="','"> '," </ha-alert> "])),this._stopCompare,this.hass.localize("ui.panel.energy.compare.info",{start:(0,k.dy)(i||(i=(0,a.Z)(["<b>","","</b>"])),(0,b.p6)(this._start,this.hass.locale,this.hass.config),e>0?" -\n ".concat((0,b.p6)(this._end||(0,p.Z)(new Date),this.hass.locale,this.hass.config)):""),end:(0,k.dy)(s||(s=(0,a.Z)(["<b>","","</b>"])),(0,b.p6)(this._startCompare,this.hass.locale,this.hass.config),e>0?" -\n ".concat((0,b.p6)(this._endCompare,this.hass.locale,this.hass.config)):"")}))}},{kind:"method",key:"_update",value:function(e){this._start=e.start,this._end=e.end,this._startCompare=e.startCompare,this._endCompare=e.endCompare,this.hidden=!this._startCompare}},{kind:"method",key:"_stopCompare",value:function(){var e=(0,y.UB)(this.hass,{key:this._config.collection_key});e.setCompare(!1),e.refresh()}},{kind:"get",static:!0,key:"styles",value:function(){return(0,k.iv)(o||(o=(0,a.Z)(["mwc-button{width:max-content}"])))}}]}}),(0,Z.f)(k.oi))},28847:function(e){e.exports=function(e){return e&&e.__esModule?e:{default:e}},e.exports.__esModule=!0,e.exports.default=e.exports},23158:function(e,t,n){"use strict";n.d(t,{Z:function(){return s}});var r=n(89273),i=n(36857);function s(e,t,n){var s=(0,i.Z)(e,n),o=(0,r.Z)(t,s,!0),a=new Date(s.getTime()-o),u=new Date(0);return u.setFullYear(a.getUTCFullYear(),a.getUTCMonth(),a.getUTCDate()),u.setHours(a.getUTCHours(),a.getUTCMinutes(),a.getUTCSeconds(),a.getUTCMilliseconds()),u}},25101:function(e,t,n){"use strict";n.d(t,{Z:function(){return u}});var r=n(20508),i=n(36857),s=n(57944),o=n(89273),a=n(74101);function u(e,t,n){if("string"==typeof e&&!e.match(s.Z)){var u=r(n);return u.timeZone=t,(0,i.Z)(e,u)}var c=(0,i.Z)(e,n),d=(0,a.Z)(c.getFullYear(),c.getMonth(),c.getDate(),c.getHours(),c.getMinutes(),c.getSeconds(),c.getMilliseconds()).getTime(),l=(0,o.Z)(t,new Date(d));return new Date(d+l)}}}]);
//# sourceMappingURL=8630-6C4qoAfpDeA.js.map
|
PypiClean
|
/Cachalot-1.5.0.tar.gz/Cachalot-1.5.0/CONTRIBUTING.md
|
# Contributor's Guide
First of all, thank you for contributing to Cachalot!
This document provides guidelines for contributing to the project. They are written to ensure its consistency and maintainability. All contributions are welcome, as long as you follow these guidelines. If you have any questions, please [contact me](incoming+radek-sprta/[email protected]).
There are many ways to contribute to the project, including, but not limited to, submitting bug reports and feature requests, improving documentation or writing code.
## How to Report Bugs
Bug reports are hugely important, but please make sure to avoid duplicate reports. Before you submit one, please check [Cachalot issues](https://gitlab.com/radek-sprta/cachalot/issues), **both open and closed**, and make sure, that the bug has not been reported before.
When filing an issue, include answers to the following five questions:
1. What version of Cachalot are you using?
2. What operating system and Python version are you using?
3. What did you do?
4. What was the expected result?
5. What was the actual result?
## How to Suggest a Feature
If you have a suggestion for a feature or an enhancement, please feel free to [open an issue](https://gitlab.com/radek-sprta/cachalot/issues). In your feature request, include the following:
1. What should the feature do?
2. Why do you need it?
3. How should it work?
Note that I reserve a right to reject a feature request, if I feel it does not align with the project's goals.
## Contributing Code
If this is your first time contributing code on Gitlab, take a look at Gitlab's [How to create a merge request](https://docs.gitlab.com/ee/gitlab-basics/add-merge-request.html). After you read it, you follow this checklist to make a merge request:
1. Fork the repository.
2. Setup development environment using `poetry install --dev`
3. Run the tests using `pytest tests` to make sure they pass on your system.
4. Write tests for the feature you are adding. They should fail.
5. Add your feature.
6. Run the test suite again, ensuring all tests, including the ones you have written, pass.
7. Make a merge request on Gitlab.
### Code Style
- Cachalot adheres to [Pep 8](https://www.python.org/dev/peps/pep-0008/) and [Pep 257](https://www.python.org/dev/peps/pep-0257/) coding conventions.
- It also takes advantage of type hints. For an explanation of how they work, read [Pep 483](https://www.python.org/dev/peps/pep-0483/)
- Use [Google style docstrings](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html).
- Use imports for packages and modules only.
- Write your commit message in the imperative: "Fix bug" and not "Fixed bug" or "Fixes bug." This convention matches up with commit messages generated by commands like git merge and git revert.
## Updating Documentation
If you have found any mistakes, want to add examples, or just improve the documentation in general, you are more than welcome! Just make your change and send a merge request.
## Closing Words
Thank you for taking the time to read the Contributor's Guide!
Radek Sprta
Maintainer
|
PypiClean
|
/sdksio_juniper_mist_sdk-1.0.0-py3-none-any.whl/mistapi/models/wlan_airwatch.py
|
from mistapi.api_helper import APIHelper
class WlanAirwatch(object):
"""Implementation of the 'wlan_airwatch' model.
airwatch wlan settings
Attributes:
api_key (string): API Key
console_url (string): console URL
enabled (bool): TODO: type description here.
password (string): password
username (string): username
"""
# Create a mapping from Model property names to API property names
_names = {
"api_key": 'api_key',
"console_url": 'console_url',
"enabled": 'enabled',
"password": 'password',
"username": 'username'
}
_optionals = [
'api_key',
'console_url',
'enabled',
'password',
'username',
]
def __init__(self,
api_key=APIHelper.SKIP,
console_url=APIHelper.SKIP,
enabled=APIHelper.SKIP,
password=APIHelper.SKIP,
username=APIHelper.SKIP):
"""Constructor for the WlanAirwatch class"""
# Initialize members of the class
if api_key is not APIHelper.SKIP:
self.api_key = api_key
if console_url is not APIHelper.SKIP:
self.console_url = console_url
if enabled is not APIHelper.SKIP:
self.enabled = enabled
if password is not APIHelper.SKIP:
self.password = password
if username is not APIHelper.SKIP:
self.username = username
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object
as obtained from the deserialization of the server's response. The
keys MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
api_key = dictionary.get("api_key") if dictionary.get("api_key") else APIHelper.SKIP
console_url = dictionary.get("console_url") if dictionary.get("console_url") else APIHelper.SKIP
enabled = dictionary.get("enabled") if "enabled" in dictionary.keys() else APIHelper.SKIP
password = dictionary.get("password") if dictionary.get("password") else APIHelper.SKIP
username = dictionary.get("username") if dictionary.get("username") else APIHelper.SKIP
# Return an object of this model
return cls(api_key,
console_url,
enabled,
password,
username)
|
PypiClean
|
/alipay_sdk_python-3.6.740-py3-none-any.whl/alipay/aop/api/request/MybankCreditSceneprodLoanApplyRequest.py
|
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.MybankCreditSceneprodLoanApplyModel import MybankCreditSceneprodLoanApplyModel
class MybankCreditSceneprodLoanApplyRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, MybankCreditSceneprodLoanApplyModel):
self._biz_content = value
else:
self._biz_content = MybankCreditSceneprodLoanApplyModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'mybank.credit.sceneprod.loan.apply'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
|
PypiClean
|
/meta-edc-0.2.41.tar.gz/meta-edc-0.2.41/meta_prn/migrations/0002_auto_20191024_1000.py
|
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("meta_prn", "0001_initial")]
operations = [
migrations.AlterField(
model_name="endofstudy",
name="site",
field=models.ForeignKey(
editable=False,
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="+",
to="sites.Site",
),
),
migrations.AlterField(
model_name="losstofollowup",
name="site",
field=models.ForeignKey(
editable=False,
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="+",
to="sites.Site",
),
),
migrations.AlterField(
model_name="onschedule",
name="site",
field=models.ForeignKey(
editable=False,
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="+",
to="sites.Site",
),
),
migrations.AlterField(
model_name="protocoldeviationviolation",
name="site",
field=models.ForeignKey(
editable=False,
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="+",
to="sites.Site",
),
),
migrations.AlterField(
model_name="unblindingrequest",
name="site",
field=models.ForeignKey(
editable=False,
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="+",
to="sites.Site",
),
),
migrations.AlterField(
model_name="unblindingreview",
name="site",
field=models.ForeignKey(
editable=False,
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="+",
to="sites.Site",
),
),
]
|
PypiClean
|
/bard_py-0.1.0.tar.gz/bard_py-0.1.0/README.md
|
# Bard.py
[](https://github.com/vsakkas/bard.py/releases/tag/v0.1.0)
[](https://www.python.org/downloads/)
[](https://github.com/vsakkas/bard.py/blob/master/LICENSE)
Python client for Bard.
> **Note**
> This is an **unofficial** client.
## Requirements
- Python 3.10 or newer
- Google account with access to [Bard](https://bard.google.com/)
## Installation
To install Bard.py, run the following command:
```bash
pip install bard-py
```
or, if you use [poetry](https://python-poetry.org/):
```bash
poetry add bard-py
```
## License
This project is licensed under the MIT License - see the [LICENSE](https://github.com/vsakkas/bard.py/blob/master/LICENSE) file for details.
|
PypiClean
|
/pubtatortool-0.6.5.tar.gz/pubtatortool-0.6.5/README.md
|
# The PubTator Parsing Tool
A Python package for loading and manipulating PubTator files as Python objects.
## Installation
This package is on the Python Package Index. You can install it using `pip install pubtatortool`.
## Usage
For basic word tokenization and simple operations
```python
from pubtatortool import PubTatorCorpus
train_corpus = PubTatorCorpus(['train_corpus_part_1.txt',
'train_corpus_part_2.txt'])
dev_corpus = PubTatorCorpus(['dev_corpus.txt'])
test_corpus = PubTatorCorpus(['test_corpus.txt'])
```
For wordpiece tokenization and full ability to encode and decode text for use with machine learning models
```python
from pubtatortool import PubTatorCorpus
from pubtatortool.tokenization import get_tokenizer
tokenizer = get_tokenizer(tokenization='wordpiece', vocab='bert-base-cased')
train_corpus = PubTatorCorpus(['train_corpus_part_1.txt',
'train_corpus_part_2.txt'], tokenizer)
dev_corpus = PubTatorCorpus(['dev_corpus.txt'], tokenizer)
test_corpus = PubTatorCorpus(['test_corpus.txt'], tokenizer)
```
You can then serialize a corpus using Pickle, iterate over documents using `corpus.document_list`, and perform various operations on documents regardless of tokenization policy, even if it is lossy, without worrying about mention and text decoupling.
For example, you can create a TSV-formatted file from a PubTator file in 10 lines of code:
```python
from pubtatortool import PubTatorCorpus
from pubtatortool.tokenization import get_tokenizer
tokenizer = get_tokenizer(tokenization='wordpiece', vocab='bert-base-cased')
corpus = PubTatorCorpus(['mycorpus.txt'], tokenizer)
with open('outfile.txt', 'w') as outfile:
for doc in corpus.document_list:
for sentence, targets in zip(doc.sentences, doc.sentence_targets()):
for token, label in zip(sentence, targets):
print("{tok}\t{lab}".format(tok=token, lab=label),
file=outfile)
print('', file=outfile)
```
|
PypiClean
|
/wholecell-lens-0.0.28.tar.gz/wholecell-lens-0.0.28/lens/processes/transport_lookup.py
|
from __future__ import absolute_import, division, print_function
import os
import csv
from scipy import constants
from lens.actor.process import Process
from lens.environment.make_media import Media
from lens.environment.look_up import LookUp
from lens.utils.rate_law_utilities import load_reactions
from lens.utils.rate_law_utilities import get_reactions_from_exchange
from lens.utils.rate_law_utilities import get_molecules_from_reactions
from lens.data.spreadsheets import JsonReader
from itertools import ifilter
from lens.utils.units import units
EXTERNAL_MOLECULES_FILE = os.path.join('lens', 'data', 'flat', 'wcEcoli_environment_molecules.tsv')
TRANSPORT_IDS_FILE = os.path.join('lens', 'data', 'flat', 'wcEcoli_transport_reactions.tsv')
TSV_DIALECT = csv.excel_tab
amino_acids = [
'L-ALPHA-ALANINE',
'ARG',
'ASN',
'L-ASPARTATE',
'CYS',
'GLT',
'GLN',
'GLY',
'HIS',
'ILE',
'LEU',
'LYS',
'MET',
'PHE',
'PRO',
'SER',
'THR',
'TRP',
'TYR',
'L-SELENOCYSTEINE',
'VAL'
]
additional_exchange = ['OXYGEN-MOLECULE', 'GLC']
external_molecule_ids = additional_exchange + amino_acids
# add [p] label. TODO (Eran) -- fix this
external_molecule_ids_p = [mol_id + '[p]' for mol_id in external_molecule_ids]
COUNTS_UNITS = units.mmol
VOLUME_UNITS = units.L
TIME_UNITS = units.s
FLUX_UNITS = COUNTS_UNITS / VOLUME_UNITS / TIME_UNITS
class TransportLookup(Process):
def __init__(self, initial_parameters={}):
self.media_id = 'minimal' # initial_parameters.get('media_id', 'minimal')
self.lookup_type = 'average' # initial_parameters.get('lookup', 'average')
self.nAvogadro = constants.N_A * 1/units.mol
self.external_molecule_ids = external_molecule_ids
# load all reactions and maps
self.load_data()
# external_molecule_ids declares which molecules' exchange will be applied
self.transport_reaction_ids = get_reactions_from_exchange(self.all_transport_reactions, external_molecule_ids_p)
all_molecule_ids = get_molecules_from_reactions(self.transport_reaction_ids, self.all_transport_reactions)
internal_molecule_ids = [mol_id for mol_id in all_molecule_ids if mol_id not in external_molecule_ids_p]
# make look up object
self.look_up = LookUp()
roles = {
'external': self.external_molecule_ids,
'internal': internal_molecule_ids + ['volume']}
parameters = {}
parameters.update(initial_parameters)
super(TransportLookup, self).__init__(roles, parameters)
def default_state(self):
media_id = 'minimal_plus_amino_acids'
make_media = Media()
media = make_media.get_saved_media(media_id)
# declare the states
environment_state = media
environment_state['volume'] = 10
cell_state = {'volume': 1}
return {
'external': environment_state,
'internal': cell_state}
def default_emitter_keys(self):
keys = {
'internal': [],
'external': self.external_molecule_ids
}
return keys
def default_updaters(self):
'''
define the updater type for each state in roles.
The default updater is to pass a delta'''
updater_types = {
'internal': {}, # reactions set values directly
'external': {mol_id: 'accumulate' for mol_id in self.external_molecule_ids}} # all external values use default 'delta' udpater
return updater_types
def next_update(self, timestep, states):
volume = states['internal']['volume'] * units.fL
mmol_to_counts = self.nAvogadro.to('1/mmol') * volume.to('L')
# get transport fluxes
transport_fluxes = self.look_up.look_up(
self.lookup_type,
self.media_id,
self.transport_reaction_ids)
# time step dependences
# TODO (Eran) -- load units in look_up
transport_fluxes = {key: value * (FLUX_UNITS) * timestep * TIME_UNITS
for key, value in transport_fluxes.iteritems()}
# convert to counts
delta_counts = self.flux_to_counts(transport_fluxes, mmol_to_counts)
# Get the deltas for environmental molecules
environment_deltas = {}
for molecule_id in delta_counts.keys():
if molecule_id in self.molecule_to_external_map:
external_molecule_id = self.molecule_to_external_map[molecule_id]
environment_deltas[external_molecule_id] = delta_counts[molecule_id]
return {'external': environment_deltas}
# TODO (Eran) -- make this a util
def flux_to_counts(self, fluxes, conversion):
rxn_counts = {reaction_id: int(conversion * flux) for reaction_id, flux in fluxes.iteritems()}
delta_counts = {}
for reaction_id, rxn_count in rxn_counts.iteritems():
stoichiometry = self.all_transport_reactions[reaction_id]['stoichiometry']
substrate_counts = {substrate_id: coeff * rxn_count for substrate_id, coeff in stoichiometry.iteritems()}
# add to delta_counts
for substrate, delta in substrate_counts.iteritems():
if substrate in delta_counts:
delta_counts[substrate] += delta
else:
delta_counts[substrate] = delta
return delta_counts
def load_data(self):
'''
- Loads all reactions, including locations for enzymes.
- Separates out the transport reactions as an class dictionary
- Makes mappings from molecule ids with location tags to external molecules without location tags
'''
# use rate_law_utilities to get all_reactions
all_reactions = load_reactions()
# make dict of reactions in TRANSPORT_IDS_FILE
self.all_transport_reactions = {}
with open(TRANSPORT_IDS_FILE, 'rU') as tsvfile:
reader = JsonReader(
ifilter(lambda x: x.lstrip()[0] != '#', tsvfile), # Strip comments
dialect = TSV_DIALECT)
for row in reader:
reaction_id = row['reaction id']
stoichiometry = all_reactions[reaction_id]['stoichiometry']
reversible = all_reactions[reaction_id]['is reversible']
transporters_loc = all_reactions[reaction_id]['catalyzed by']
self.all_transport_reactions[reaction_id] = {
'stoichiometry': stoichiometry,
'is reversible': reversible,
'catalyzed by': transporters_loc,
}
# Make map of external molecule_ids with a location tag (as used in reaction stoichiometry) to molecule_ids in the environment
self.molecule_to_external_map = {}
self.external_to_molecule_map = {}
with open(EXTERNAL_MOLECULES_FILE, 'rU') as tsvfile:
reader = JsonReader(
ifilter(lambda x: x.lstrip()[0] != '#', tsvfile), # Strip comments
dialect = TSV_DIALECT)
for row in reader:
molecule_id = row['molecule id']
location = row['exchange molecule location']
self.molecule_to_external_map[molecule_id + location] = molecule_id
self.external_to_molecule_map[molecule_id] = molecule_id + location
|
PypiClean
|
/PCSE-5.5.5.tar.gz/PCSE-5.5.5/pcse/base/variablekiosk.py
|
from .. import exceptions as exc
class VariableKiosk(dict):
"""VariableKiosk for registering and publishing state variables in PCSE.
No parameters are needed for instantiating the VariableKiosk.
All variables that are
defined within PCSE will be registered within the VariableKiosk, while
usually only a small subset of those will be published with the kiosk.
The value of the published
variables can be retrieved with the bracket notation as the variableKiosk
is essentially a (somewhat fancy) dictionary.
Registering/deregistering rate and state variables goes through the
`self.register_variable()` and `self.deregister_variable()` methods while the
`set_variable()` method is used to update a value of a published variable.
In general, none of these methods need to be called by users directly as
the logic within the `StatesTemplate` and `RatesTemplate` takes care of
this.
Finally, the `variable_exists()` can be used to check if a variable is
registered, while the `flush_states()` and `flush_rates()` are used to
remove (flush) the values of any published state and rate variables.
example::
>>> import pcse
>>> from pcse.base import VariableKiosk
>>>
>>> v = VariableKiosk()
>>> id0 = 0
>>> v.register_variable(id0, "VAR1", type="S", publish=True)
>>> v.register_variable(id0, "VAR2", type="S", publish=False)
>>>
>>> id1 = 1
>>> v.register_variable(id1, "VAR3", type="R", publish=True)
>>> v.register_variable(id1, "VAR4", type="R", publish=False)
>>>
>>> v.set_variable(id0, "VAR1", 1.35)
>>> v.set_variable(id1, "VAR3", 310.56)
>>>
>>> print v
Contents of VariableKiosk:
* Registered state variables: 2
* Published state variables: 1 with values:
- variable VAR1, value: 1.35
* Registered rate variables: 2
* Published rate variables: 1 with values:
- variable VAR3, value: 310.56
>>> print v["VAR3"]
310.56
>>> v.set_variable(id0, "VAR3", 750.12)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "pcse/base.py", line 148, in set_variable
raise exc.VariableKioskError(msg % varname)
pcse.exceptions.VariableKioskError: Unregistered object tried to set the value of variable 'VAR3': access denied.
>>>
>>> v.flush_rates()
>>> print v
Contents of VariableKiosk:
* Registered state variables: 2
* Published state variables: 1 with values:
- variable VAR1, value: 1.35
* Registered rate variables: 2
* Published rate variables: 1 with values:
- variable VAR3, value: undefined
>>> v.flush_states()
>>> print v
Contents of VariableKiosk:
* Registered state variables: 2
* Published state variables: 1 with values:
- variable VAR1, value: undefined
* Registered rate variables: 2
* Published rate variables: 1 with values:
- variable VAR3, value: undefined
"""
def __init__(self):
dict.__init__(self)
self.registered_states = {}
self.registered_rates = {}
self.published_states = {}
self.published_rates = {}
def __setitem__(self, item, value):
msg = "See set_variable() for setting a variable."
raise RuntimeError(msg)
def __contains__(self, item):
"""Checks if item is in self.registered_states or self.registered_rates.
"""
return dict.__contains__(self, item)
def __getattr__(self, item):
"""Allow use of attribute notation (eg "kiosk.LAI") on published rates or states.
"""
return dict.__getitem__(self, item)
def __str__(self):
msg = "Contents of VariableKiosk:\n"
msg += " * Registered state variables: %i\n" % len(self.registered_states)
msg += " * Published state variables: %i with values:\n" % len(self.published_states)
for varname in self.published_states:
if varname in self:
value = self[varname]
else:
value = "undefined"
msg += " - variable %s, value: %s\n" % (varname, value)
msg += " * Registered rate variables: %i\n" % len(self.registered_rates)
msg += " * Published rate variables: %i with values:\n" % len(self.published_rates)
for varname in self.published_rates:
if varname in self:
value = self[varname]
else:
value = "undefined"
msg += " - variable %s, value: %s\n" % (varname, value)
return msg
def register_variable(self, oid, varname, type, publish=False):
"""Register a varname from object with id, with given type
:param oid: Object id (from python builtin id() function) of the
state/rate object registering this variable.
:param varname: Name of the variable to be registered, e.g. "DVS"
:param type: Either "R" (rate) or "S" (state) variable, is handled
automatically by the states/rates template class.
:param publish: True if variable should be published in the kiosk,
defaults to False
"""
self._check_duplicate_variable(varname)
if type.upper() == "R":
self.registered_rates[varname] = oid
if publish is True:
self.published_rates[varname] = oid
elif type.upper() == "S":
self.registered_states[varname] = oid
if publish is True:
self.published_states[varname] = oid
else:
msg = "Variable type should be 'S'|'R'"
raise exc.VariableKioskError(msg)
def deregister_variable(self, oid, varname):
"""Object with id(object) asks to deregister varname from kiosk
:param oid: Object id (from python builtin id() function) of the
state/rate object registering this variable.
:param varname: Name of the variable to be registered, e.g. "DVS"
"""
if varname in self.registered_states:
# print "Deregistering '%s'" % varname
if oid != self.registered_states[varname]:
msg = "Wrong object tried to deregister variable '%s'." \
% varname
raise exc.VariableKioskError(msg)
else:
self.registered_states.pop(varname)
if varname in self.published_states:
self.published_states.pop(varname)
elif varname in self.registered_rates:
# print "Deregistering '%s'" % varname
if oid != self.registered_rates[varname]:
msg = "Wrong object tried to deregister variable '%s'." \
% varname
raise exc.VariableKioskError(msg)
else:
self.registered_rates.pop(varname)
if varname in self.published_rates:
self.published_rates.pop(varname)
else:
msg = "Failed to deregister variabe '%s'!" % varname
raise exc.VariableKioskError(msg)
# Finally remove the value from the internal dictionary
if varname in self:
self.pop(varname)
def _check_duplicate_variable(self, varname):
"""Checks if variables are not registered twice.
"""
if varname in self.registered_rates or \
varname in self.registered_states:
msg = "Duplicate state/rate variable '%s' encountered!"
raise exc.VariableKioskError(msg % varname)
def set_variable(self, id, varname, value):
"""Let object with id, set the value of variable varname
:param id: Object id (from python builtin id() function) of the
state/rate object registering this variable.
:param varname: Name of the variable to be updated
:param value: Value to be assigned to the variable.
"""
if varname in self.published_rates:
if self.published_rates[varname] == id:
dict.__setitem__(self, varname, value)
else:
msg = "Unregistered object tried to set the value " + \
"of variable '%s': access denied."
raise exc.VariableKioskError(msg % varname)
elif varname in self.published_states:
if self.published_states[varname] == id:
dict.__setitem__(self, varname, value)
else:
msg = "Unregistered object tried to set the value of variable " \
"%s: access denied."
raise exc.VariableKioskError(msg % varname)
else:
msg = "Variable '%s' not published in VariableKiosk."
raise exc.VariableKioskError(msg % varname)
def variable_exists(self, varname):
""" Returns True if the state/rate variable is registered in the kiosk.
:param varname: Name of the variable to be checked for registration.
"""
if varname in self.registered_rates or \
varname in self.registered_states:
return True
else:
return False
def flush_rates(self):
"""flush the values of all published rate variable from the kiosk.
"""
for key in self.published_rates.keys():
self.pop(key, None)
def flush_states(self):
"""flush the values of all state variable from the kiosk.
"""
for key in self.published_states.keys():
self.pop(key, None)
|
PypiClean
|
/retro_data_structures-0.23.0-py3-none-any.whl/retro_data_structures/properties/echoes/objects/SporbProjectile.py
|
import dataclasses
import struct
import typing
from retro_data_structures.game_check import Game
from retro_data_structures.properties.base_property import BaseObjectType
from retro_data_structures.properties.echoes.archetypes.ActorParameters import ActorParameters
from retro_data_structures.properties.echoes.archetypes.EditorProperties import EditorProperties
from retro_data_structures.properties.echoes.archetypes.PatternedAITypedef import PatternedAITypedef
from retro_data_structures.properties.echoes.core.AssetId import AssetId, default_asset_id
@dataclasses.dataclass()
class SporbProjectile(BaseObjectType):
editor_properties: EditorProperties = dataclasses.field(default_factory=EditorProperties)
patterned: PatternedAITypedef = dataclasses.field(default_factory=PatternedAITypedef)
actor_information: ActorParameters = dataclasses.field(default_factory=ActorParameters)
ball_spit_particle_effect: AssetId = dataclasses.field(metadata={'asset_types': ['PART']}, default=default_asset_id)
ball_escape_particle_effect: AssetId = dataclasses.field(metadata={'asset_types': ['PART']}, default=default_asset_id)
@classmethod
def game(cls) -> Game:
return Game.ECHOES
def get_name(self) -> typing.Optional[str]:
return self.editor_properties.name
def set_name(self, name: str) -> None:
self.editor_properties.name = name
@classmethod
def object_type(cls) -> str:
return 'SPBP'
@classmethod
def modules(cls) -> typing.List[str]:
return ['Sporb.rel']
@classmethod
def from_stream(cls, data: typing.BinaryIO, size: typing.Optional[int] = None, default_override: typing.Optional[dict] = None):
struct_id, size, property_count = struct.unpack(">LHH", data.read(8))
assert struct_id == 0xFFFFFFFF
root_size_start = data.tell() - 2
present_fields = default_override or {}
for _ in range(property_count):
property_id, property_size = struct.unpack(">LH", data.read(6))
start = data.tell()
try:
property_name, decoder = _property_decoder[property_id]
present_fields[property_name] = decoder(data, property_size)
except KeyError:
raise RuntimeError(f"Unknown property: 0x{property_id:08x}")
assert data.tell() - start == property_size
assert data.tell() - root_size_start == size
return cls(**present_fields)
def to_stream(self, data: typing.BinaryIO, default_override: typing.Optional[dict] = None):
default_override = default_override or {}
data.write(b'\xff\xff\xff\xff') # struct object id
root_size_offset = data.tell()
data.write(b'\x00\x00') # placeholder for root struct size
data.write(b'\x00\x05') # 5 properties
data.write(b'%ZE\x80') # 0x255a4580
before = data.tell()
data.write(b'\x00\x00') # size placeholder
self.editor_properties.to_stream(data)
after = data.tell()
data.seek(before)
data.write(struct.pack(">H", after - before - 2))
data.seek(after)
data.write(b'\xb3wGP') # 0xb3774750
before = data.tell()
data.write(b'\x00\x00') # size placeholder
self.patterned.to_stream(data)
after = data.tell()
data.seek(before)
data.write(struct.pack(">H", after - before - 2))
data.seek(after)
data.write(b'~9\x7f\xed') # 0x7e397fed
before = data.tell()
data.write(b'\x00\x00') # size placeholder
self.actor_information.to_stream(data)
after = data.tell()
data.seek(before)
data.write(struct.pack(">H", after - before - 2))
data.seek(after)
data.write(b'g}O\xc2') # 0x677d4fc2
data.write(b'\x00\x04') # size
data.write(struct.pack(">L", self.ball_spit_particle_effect))
data.write(b'\xaa\xff\x18\x84') # 0xaaff1884
data.write(b'\x00\x04') # size
data.write(struct.pack(">L", self.ball_escape_particle_effect))
struct_end_offset = data.tell()
data.seek(root_size_offset)
data.write(struct.pack(">H", struct_end_offset - root_size_offset - 2))
data.seek(struct_end_offset)
@classmethod
def from_json(cls, data: dict):
return cls(
editor_properties=EditorProperties.from_json(data['editor_properties']),
patterned=PatternedAITypedef.from_json(data['patterned']),
actor_information=ActorParameters.from_json(data['actor_information']),
ball_spit_particle_effect=data['ball_spit_particle_effect'],
ball_escape_particle_effect=data['ball_escape_particle_effect'],
)
def to_json(self) -> dict:
return {
'editor_properties': self.editor_properties.to_json(),
'patterned': self.patterned.to_json(),
'actor_information': self.actor_information.to_json(),
'ball_spit_particle_effect': self.ball_spit_particle_effect,
'ball_escape_particle_effect': self.ball_escape_particle_effect,
}
def _dependencies_for_editor_properties(self, asset_manager):
yield from self.editor_properties.dependencies_for(asset_manager)
def _dependencies_for_patterned(self, asset_manager):
yield from self.patterned.dependencies_for(asset_manager)
def _dependencies_for_actor_information(self, asset_manager):
yield from self.actor_information.dependencies_for(asset_manager)
def _dependencies_for_ball_spit_particle_effect(self, asset_manager):
yield from asset_manager.get_dependencies_for_asset(self.ball_spit_particle_effect)
def _dependencies_for_ball_escape_particle_effect(self, asset_manager):
yield from asset_manager.get_dependencies_for_asset(self.ball_escape_particle_effect)
def dependencies_for(self, asset_manager):
for method, field_name, field_type in [
(self._dependencies_for_editor_properties, "editor_properties", "EditorProperties"),
(self._dependencies_for_patterned, "patterned", "PatternedAITypedef"),
(self._dependencies_for_actor_information, "actor_information", "ActorParameters"),
(self._dependencies_for_ball_spit_particle_effect, "ball_spit_particle_effect", "AssetId"),
(self._dependencies_for_ball_escape_particle_effect, "ball_escape_particle_effect", "AssetId"),
]:
try:
yield from method(asset_manager)
except Exception as e:
raise Exception(
f"Error finding dependencies for SporbProjectile.{field_name} ({field_type}): {e}"
)
def _decode_editor_properties(data: typing.BinaryIO, property_size: int):
return EditorProperties.from_stream(data, property_size)
def _decode_patterned(data: typing.BinaryIO, property_size: int):
return PatternedAITypedef.from_stream(data, property_size)
def _decode_actor_information(data: typing.BinaryIO, property_size: int):
return ActorParameters.from_stream(data, property_size)
def _decode_ball_spit_particle_effect(data: typing.BinaryIO, property_size: int):
return struct.unpack(">L", data.read(4))[0]
def _decode_ball_escape_particle_effect(data: typing.BinaryIO, property_size: int):
return struct.unpack(">L", data.read(4))[0]
_property_decoder: typing.Dict[int, typing.Tuple[str, typing.Callable[[typing.BinaryIO, int], typing.Any]]] = {
0x255a4580: ('editor_properties', _decode_editor_properties),
0xb3774750: ('patterned', _decode_patterned),
0x7e397fed: ('actor_information', _decode_actor_information),
0x677d4fc2: ('ball_spit_particle_effect', _decode_ball_spit_particle_effect),
0xaaff1884: ('ball_escape_particle_effect', _decode_ball_escape_particle_effect),
}
|
PypiClean
|
/django-pragmatic-4.8.0.tar.gz/django-pragmatic-4.8.0/pragmatic/signals.py
|
from collections import defaultdict
from functools import wraps
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import signals as django_signals
from django.db.models.signals import pre_init, post_init, post_save, pre_save, pre_delete, post_delete, post_migrate, \
pre_migrate
from django.utils.timezone import now
APM_DEBUG = getattr(settings, 'APM_DEBUG', False)
def add_apm_custom_context(type, value):
from elasticapm.traces import execution_context
transaction = execution_context.get_transaction()
if not transaction:
return
if 'custom' not in transaction.context:
transaction.context['custom'] = {}
if type not in transaction.context['custom']:
transaction.context['custom'][type] = [value]
else:
transaction.context['custom'][type].append(value)
def apm_custom_context(type, instance_attr='instance'):
"""
A decorator for connecting functions to APM. Used by passing the context type:
@apm_custom_context('signals')
def my_custom_signal(sender, instance, **kwargs):
...
@apm_custom_context('tasks')
def my_custom_task(arg1, arg2):
...
"""
def _decorator(func):
"""
Decorator to send custom information to APM.
"""
def wrapper(*args, **kwargs):
try:
from elasticapm.traces import execution_context
apm_message = None
if type == 'signals':
instance = kwargs.get(instance_attr, None)
if instance:
signal = kwargs.get('signal')
signal_name = SignalsHelper.get_signal_name(signal)
apm_message = f'[{signal_name}]\t{func.__module__}.{func.__qualname__}({instance.__class__.__name__}: {instance.id})'.strip()
elif type == 'tasks':
# execute task with given arguments
arguments = str(args)
apm_message = f'{func.__module__}.{func.__qualname__}{arguments}'
if apm_message:
if APM_DEBUG:
print(f'apm_message [{type}]:', apm_message)
add_apm_custom_context(type, apm_message)
return func(*args, **kwargs)
except ImportError:
# elasticapm is not installed
return func(*args, **kwargs)
# return wrapper
return wraps(func)(wrapper) # important to preserve function signature!
return _decorator
class SignalsHelper(object):
@staticmethod
def add_task_to_instance(instance, func, arguments, attr_name):
# get existing tasks
tasks = getattr(instance, attr_name, [])
# prepare task
task = (func, arguments)
# add task to the list
tasks.append(task)
# save tasks into instance
setattr(instance, attr_name, tasks)
@staticmethod
def add_task_and_connect(sender, instance, func, arguments, signal_type='post_save'):
attr_name = f'{signal_type}_signal_tasks'
receiver_name = f'{signal_type}_tasks_receiver'
if signal_type in ['post_save', 'post_delete', 'm2m_changed']:
signal = getattr(django_signals, signal_type)
else:
raise NotImplementedError()
receiver = getattr(SignalsHelper, receiver_name)
SignalsHelper.add_task_to_instance(instance, func, arguments, attr_name)
signal.connect(receiver=receiver, sender=sender, weak=True)
@staticmethod
@apm_custom_context('signals')
def post_save_tasks_receiver(sender, instance, **kwargs):
SignalsHelper.execute_instance_tasks(instance, 'post_save_signal_tasks')
@staticmethod
@apm_custom_context('signals')
def post_delete_tasks_receiver(sender, instance, **kwargs):
SignalsHelper.execute_instance_tasks(instance, 'post_delete_signal_tasks')
@staticmethod
@apm_custom_context('signals')
def m2m_changed_tasks_receiver(sender, instance, **kwargs):
SignalsHelper.execute_instance_tasks(instance, 'm2m_changed_signal_tasks')
@staticmethod
def execute_task(task):
# execute task with given arguments
func = task[0]
arguments = task[1]
func(*arguments)
@staticmethod
def execute_instance_tasks(instance, attr_name):
# start timer
start = now()
# get instance tasks
tasks = getattr(instance, attr_name, [])
total_tasks = len(tasks)
if APM_DEBUG:
SignalsHelper._print('>>> SignalsHelper instance tasks [{} in total]: {}'.format(total_tasks, tasks), total_tasks > 0)
else:
SignalsHelper._print('>>> SignalsHelper instance tasks [{} in total]'.format(total_tasks), total_tasks > 0)
# clean instance tasks: this allows calling own save() for model instances
setattr(instance, attr_name, [])
for task in tasks:
SignalsHelper.execute_task(task)
# end timer
duration = (now() - start).total_seconds()
SignalsHelper._print('SignalsHelper.process_response took {} seconds'.format(duration))
@staticmethod
def get_db_instance(instance):
try:
model = type(instance)
return model._default_manager.get(pk=instance.pk)
except ObjectDoesNotExist:
# object did not exist before
return None
@staticmethod
def attribute_changed(instance, diff_fields, diff_contains={}):
'''
diff_fields: list of field names
diff_contains: either {field_name: [vaue_1, value_2, ...]} or {field_name: {'from': [old_value_1, ...], 'to': [new_value_1, ...]}}
'''
obj = SignalsHelper.get_db_instance(instance)
if not obj:
# new object
return True
# object existed before, check difference
for field in diff_fields:
saved_value = getattr(obj, field)
instance_value = getattr(instance, field)
if saved_value != instance_value:
try:
# get specific values for field if supplied
diff_values = diff_contains[field]
except KeyError:
return True
if isinstance(diff_values, dict):
from_values = diff_values.get('from', [])
to_values = diff_values.get('to', [])
if from_values and to_values:
# from and to values provided
if saved_value in from_values and instance_value in to_values:
return True
elif from_values:
# only from values provided
if saved_value in from_values:
return True
elif to_values:
# only to values provided
if instance_value in to_values:
return True
else:
# empty dict provided
return True
elif isinstance(diff_values, list):
if not diff_values:
# empty list provided
return True
elif saved_value in diff_values or instance_value in diff_values:
# either old or new value is in provided values
return True
return False
@staticmethod
def get_signal_name(signal):
return next((v for v, k in django_signals.__dict__.items() if k == signal), str(signal))
@staticmethod
def _print(message, force_print=False):
if (settings.DEBUG or force_print) and getattr(settings, 'TEST_PRINT_TASKS', True):
print(message)
class temporary_disconnect_signal:
""" Temporarily disconnect a model from a signal """
def __init__(self, signal, receiver, sender, dispatch_uid=None):
self.signal = signal
self.receiver = receiver
self.sender = sender
self.dispatch_uid = dispatch_uid
self.entered_connected = False
def __enter__(self):
# check if receiver is connected same way as signal.disconnect
from django.dispatch.dispatcher import _make_id
if self.dispatch_uid:
lookup_key = (self.dispatch_uid, _make_id(self.sender))
else:
lookup_key = (_make_id(self.receiver), _make_id(self.sender))
for index in range(len(self.signal.receivers)):
(r_key, _) = self.signal.receivers[index]
if r_key == lookup_key:
self.entered_connected = True
break
if self.entered_connected:
self.signal.disconnect(
receiver=self.receiver,
sender=self.sender,
dispatch_uid=self.dispatch_uid,
)
def __exit__(self, type, value, traceback):
if self.entered_connected:
self.signal.connect(
receiver=self.receiver,
sender=self.sender,
dispatch_uid=self.dispatch_uid,
weak=False
)
class disable_signals:
def __init__(self, disabled_signals=None):
self.stashed_signals = defaultdict(list)
self.disabled_signals = disabled_signals or [
pre_init, post_init,
pre_save, post_save,
pre_delete, post_delete,
pre_migrate, post_migrate,
]
def __enter__(self):
for signal in self.disabled_signals:
self.disconnect(signal)
def __exit__(self, exc_type, exc_val, exc_tb):
for signal in list(self.stashed_signals):
self.reconnect(signal)
def disconnect(self, signal):
self.stashed_signals[signal] = signal.receivers
signal.receivers = []
def reconnect(self, signal):
signal.receivers = self.stashed_signals.get(signal, [])
del self.stashed_signals[signal]
|
PypiClean
|
/external_counter_dilshan-1.0.4-py3-none-any.whl/external_counter/external_counter.py
|
import serial
import warnings
class ExternalCounter:
DEFAULT_BAUD_RATE = 115200
PACKET_ID = 94
MODE_CLEAR_DISPLAY = 48
MODE_DISPLAY = 49
MODE_SET_TIME = 50
MODE_SET_DATE = 51
MODE_IDLE = 52
MODE_SET_IDLE_MSG = 53
MODE_SET_IDLE_TYPE = 54
MODE_BELL = 55
"""
Idle mode with maximum power saving and display is completely shutdown.
"""
IDLE_NONE = 0
"""
Display system time in idle mode.
"""
IDLE_TIME = 1
"""
Display system date in the idle mode.
"""
IDLE_DATE = 2
"""
Display custom message in the idle mode.
"""
IDLE_MSG = 3
device_port = ''
serial_handle = None
def __init__(self, port):
self.device_port = port
self.open_device()
@staticmethod
def __string_to_byte_array(instr):
return_array = []
for array_char in instr:
return_array.append(ord(array_char))
pad_count = 10 - len(return_array)
if pad_count > 0:
return_array.extend([0] * pad_count)
return return_array
def __is_device_available(self):
if not self.serial_handle.is_open:
raise Exception("Unable to open communication port")
return True
def __send_extended_byte_array(self, mode, data_buffer):
if not self.is_open():
self.open_device()
data_packet = [self.PACKET_ID, mode]
if len(data_buffer) > 0:
data_packet.extend(data_buffer)
if self.__is_device_available():
self.serial_handle.write(data_packet)
def is_open(self):
"""
Check device communication channel is opened or initialized.
:return: True if device communication channel is available. False if device communication channel is closed or
not initialized.
"""
return (self.serial_handle is not None) and self.serial_handle.is_open
def clear_display(self):
"""
Clear all 10 digits of the display panel.
:return: None
"""
self.__send_extended_byte_array(self.MODE_CLEAR_DISPLAY, [])
def show_message(self, msg):
"""
Display specified string in display panel.
:param msg: Message to display in the panel. The maximum allowed length of this string is 10.
:return: None
"""
if len(msg) == 0:
self.clear_display()
else:
if len(msg) > 10:
warnings.warn("Input string is truncated to 10 characters")
output_buffer = msg[:10]
else:
output_buffer = msg
output_array = self.__string_to_byte_array(output_buffer)
assert len(output_array) == 10, "Invalid message buffer size"
self.__send_extended_byte_array(self.MODE_DISPLAY, output_array)
def show_number(self, number):
"""
Display specified number in the display panel.
:param number: Number to display in the panel.
:return: None
"""
self.show_message(str(number))
def set_time(self, time_info):
"""
Set time of the display unit.
:param time_info: Time object to update the system time of the display unit.
:return: None
"""
time_buffer = [time_info.hour, time_info.minute, time_info.second]
self.__send_extended_byte_array(self.MODE_SET_TIME, time_buffer)
def set_date(self, date_info):
"""
Set date of the display unit.
:param date_info: Date object to update the system date of the display unit.
:return: None
"""
date_buffer = [date_info.day, date_info.month, (date_info.year - 2000)]
self.__send_extended_byte_array(self.MODE_SET_DATE, date_buffer)
def set_datetime(self, datetime_info):
"""
Update system date and time of the display unit.
:param datetime_info: Date/Time object to update the system date and time.
:return: None
"""
self.set_time(datetime_info)
self.set_date(datetime_info)
def to_idle(self):
"""
Forcefully switch display panel to idle mode.
:return: None
"""
self.__send_extended_byte_array(self.MODE_IDLE, [])
def set_idle_message(self, msg):
"""
Set idle message of the display unit.
:param msg: Idle message to display in the panel. The maximum allowed length of this string is 10.
:return: None
"""
if len(msg) > 10:
warnings.warn("Input string is truncated to 10 characters")
output_buffer = msg[:10]
else:
output_buffer = msg
output_array = self.__string_to_byte_array(output_buffer)
assert len(output_array) == 10, "Invalid message buffer size"
self.__send_extended_byte_array(self.MODE_SET_IDLE_MSG, output_array)
def set_idle_number(self, number):
"""
Set idle message of the display unit as number.
:param number: Idle message to display in the panel as number.
:return: None
"""
self.set_idle_message(str(number))
def set_idle_mode(self, mode):
"""
Set default idle mode the display panel.
:param mode: Idle mode to set as a default.
:return: None
"""
assert self.IDLE_NONE <= mode <= self.IDLE_MSG, "Invalid idle mode"
self.__send_extended_byte_array(self.MODE_SET_IDLE_TYPE, [mode])
def bell(self):
"""
Activate audio buzzer in the display panel.
:return: None
"""
self.__send_extended_byte_array(self.MODE_BELL, [])
def open_device(self, port=''):
"""
Open communication channel with the display panel.
:param port: Serial communication port to link with the display panel.
:return: None
"""
if port != '':
self.device_port = port
if self.is_open():
self.serial_handle.close()
if not self.device_port.strip():
raise Exception("Communication port is not defined or object is not initialized")
self.serial_handle = serial.Serial()
self.serial_handle.port = self.device_port
self.serial_handle.baudrate = self.DEFAULT_BAUD_RATE
self.serial_handle.open()
def close_device(self):
"""
Close communication channel with the display panel.
:return: None
"""
if self.is_open():
self.serial_handle.close()
|
PypiClean
|
/PhotosAPI_Client-0.5.0.tar.gz/PhotosAPI_Client-0.5.0/photosapi_client/api/default/photo_find_albums_album_photos_get.py
|
from http import HTTPStatus
from typing import Any, Dict, Optional, Union, cast
import httpx
from ... import errors
from ...client import AuthenticatedClient, Client
from ...models.search_results_photo import SearchResultsPhoto
from ...types import UNSET, Response, Unset
def _get_kwargs(
album: str,
*,
client: AuthenticatedClient,
q: Union[Unset, None, str] = UNSET,
caption: Union[Unset, None, str] = UNSET,
token: Union[Unset, None, str] = UNSET,
page: Union[Unset, None, int] = 1,
page_size: Union[Unset, None, int] = 100,
lat: Union[Unset, None, float] = UNSET,
lng: Union[Unset, None, float] = UNSET,
radius: Union[Unset, None, int] = UNSET,
) -> Dict[str, Any]:
url = "{}/albums/{album}/photos".format(client.base_url, album=album)
headers: Dict[str, str] = client.get_headers()
cookies: Dict[str, Any] = client.get_cookies()
params: Dict[str, Any] = {}
params["q"] = q
params["caption"] = caption
params["token"] = token
params["page"] = page
params["page_size"] = page_size
params["lat"] = lat
params["lng"] = lng
params["radius"] = radius
params = {k: v for k, v in params.items() if v is not UNSET and v is not None}
return {
"method": "get",
"url": url,
"headers": headers,
"cookies": cookies,
"timeout": client.get_timeout(),
"follow_redirects": client.follow_redirects,
"params": params,
}
def _parse_response(*, client: Client, response: httpx.Response) -> Optional[Union[Any, SearchResultsPhoto]]:
if response.status_code == HTTPStatus.OK:
response_200 = SearchResultsPhoto.from_dict(response.json())
return response_200
if response.status_code == HTTPStatus.BAD_REQUEST:
response_400 = cast(Any, None)
return response_400
if response.status_code == HTTPStatus.UNAUTHORIZED:
response_401 = cast(Any, None)
return response_401
if response.status_code == HTTPStatus.NOT_FOUND:
response_404 = cast(Any, None)
return response_404
if response.status_code == HTTPStatus.UNPROCESSABLE_ENTITY:
response_422 = cast(Any, None)
return response_422
if client.raise_on_unexpected_status:
raise errors.UnexpectedStatus(response.status_code, response.content)
else:
return None
def _build_response(*, client: Client, response: httpx.Response) -> Response[Union[Any, SearchResultsPhoto]]:
return Response(
status_code=HTTPStatus(response.status_code),
content=response.content,
headers=response.headers,
parsed=_parse_response(client=client, response=response),
)
def sync_detailed(
album: str,
*,
client: AuthenticatedClient,
q: Union[Unset, None, str] = UNSET,
caption: Union[Unset, None, str] = UNSET,
token: Union[Unset, None, str] = UNSET,
page: Union[Unset, None, int] = 1,
page_size: Union[Unset, None, int] = 100,
lat: Union[Unset, None, float] = UNSET,
lng: Union[Unset, None, float] = UNSET,
radius: Union[Unset, None, int] = UNSET,
) -> Response[Union[Any, SearchResultsPhoto]]:
"""Photo Find
Find a photo by filename, caption, location or token
Args:
album (str):
q (Union[Unset, None, str]):
caption (Union[Unset, None, str]):
token (Union[Unset, None, str]):
page (Union[Unset, None, int]): Default: 1.
page_size (Union[Unset, None, int]): Default: 100.
lat (Union[Unset, None, float]):
lng (Union[Unset, None, float]):
radius (Union[Unset, None, int]):
Raises:
errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True.
httpx.TimeoutException: If the request takes longer than Client.timeout.
Returns:
Response[Union[Any, SearchResultsPhoto]]
"""
kwargs = _get_kwargs(
album=album,
client=client,
q=q,
caption=caption,
token=token,
page=page,
page_size=page_size,
lat=lat,
lng=lng,
radius=radius,
)
response = httpx.request(
verify=client.verify_ssl,
**kwargs,
)
return _build_response(client=client, response=response)
def sync(
album: str,
*,
client: AuthenticatedClient,
q: Union[Unset, None, str] = UNSET,
caption: Union[Unset, None, str] = UNSET,
token: Union[Unset, None, str] = UNSET,
page: Union[Unset, None, int] = 1,
page_size: Union[Unset, None, int] = 100,
lat: Union[Unset, None, float] = UNSET,
lng: Union[Unset, None, float] = UNSET,
radius: Union[Unset, None, int] = UNSET,
) -> Optional[Union[Any, SearchResultsPhoto]]:
"""Photo Find
Find a photo by filename, caption, location or token
Args:
album (str):
q (Union[Unset, None, str]):
caption (Union[Unset, None, str]):
token (Union[Unset, None, str]):
page (Union[Unset, None, int]): Default: 1.
page_size (Union[Unset, None, int]): Default: 100.
lat (Union[Unset, None, float]):
lng (Union[Unset, None, float]):
radius (Union[Unset, None, int]):
Raises:
errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True.
httpx.TimeoutException: If the request takes longer than Client.timeout.
Returns:
Union[Any, SearchResultsPhoto]
"""
return sync_detailed(
album=album,
client=client,
q=q,
caption=caption,
token=token,
page=page,
page_size=page_size,
lat=lat,
lng=lng,
radius=radius,
).parsed
async def asyncio_detailed(
album: str,
*,
client: AuthenticatedClient,
q: Union[Unset, None, str] = UNSET,
caption: Union[Unset, None, str] = UNSET,
token: Union[Unset, None, str] = UNSET,
page: Union[Unset, None, int] = 1,
page_size: Union[Unset, None, int] = 100,
lat: Union[Unset, None, float] = UNSET,
lng: Union[Unset, None, float] = UNSET,
radius: Union[Unset, None, int] = UNSET,
) -> Response[Union[Any, SearchResultsPhoto]]:
"""Photo Find
Find a photo by filename, caption, location or token
Args:
album (str):
q (Union[Unset, None, str]):
caption (Union[Unset, None, str]):
token (Union[Unset, None, str]):
page (Union[Unset, None, int]): Default: 1.
page_size (Union[Unset, None, int]): Default: 100.
lat (Union[Unset, None, float]):
lng (Union[Unset, None, float]):
radius (Union[Unset, None, int]):
Raises:
errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True.
httpx.TimeoutException: If the request takes longer than Client.timeout.
Returns:
Response[Union[Any, SearchResultsPhoto]]
"""
kwargs = _get_kwargs(
album=album,
client=client,
q=q,
caption=caption,
token=token,
page=page,
page_size=page_size,
lat=lat,
lng=lng,
radius=radius,
)
async with httpx.AsyncClient(verify=client.verify_ssl) as _client:
response = await _client.request(**kwargs)
return _build_response(client=client, response=response)
async def asyncio(
album: str,
*,
client: AuthenticatedClient,
q: Union[Unset, None, str] = UNSET,
caption: Union[Unset, None, str] = UNSET,
token: Union[Unset, None, str] = UNSET,
page: Union[Unset, None, int] = 1,
page_size: Union[Unset, None, int] = 100,
lat: Union[Unset, None, float] = UNSET,
lng: Union[Unset, None, float] = UNSET,
radius: Union[Unset, None, int] = UNSET,
) -> Optional[Union[Any, SearchResultsPhoto]]:
"""Photo Find
Find a photo by filename, caption, location or token
Args:
album (str):
q (Union[Unset, None, str]):
caption (Union[Unset, None, str]):
token (Union[Unset, None, str]):
page (Union[Unset, None, int]): Default: 1.
page_size (Union[Unset, None, int]): Default: 100.
lat (Union[Unset, None, float]):
lng (Union[Unset, None, float]):
radius (Union[Unset, None, int]):
Raises:
errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True.
httpx.TimeoutException: If the request takes longer than Client.timeout.
Returns:
Union[Any, SearchResultsPhoto]
"""
return (
await asyncio_detailed(
album=album,
client=client,
q=q,
caption=caption,
token=token,
page=page,
page_size=page_size,
lat=lat,
lng=lng,
radius=radius,
)
).parsed
|
PypiClean
|
/ressources/lib/node_modules/highcharts/js/es-modules/indicators/roc.src.js
|
'use strict';
import H from '../parts/Globals.js';
import '../parts/Utilities.js';
var seriesType = H.seriesType,
isArray = H.isArray;
// Utils:
function populateAverage(xVal, yVal, i, period, index) {
/**
* Calculated as:
* (Closing Price [today] - Closing Price [n days ago]) /
* Closing Price [n days ago] * 100
*
* Return y as null when avoiding division by zero
*/
var nDaysAgoY,
rocY;
if (index < 0) {
// y data given as an array of values
nDaysAgoY = yVal[i - period];
rocY = nDaysAgoY ?
(yVal[i] - nDaysAgoY) / nDaysAgoY * 100 :
null;
} else {
// y data given as an array of arrays and the index should be used
nDaysAgoY = yVal[i - period][index];
rocY = nDaysAgoY ?
(yVal[i][index] - nDaysAgoY) / nDaysAgoY * 100 :
null;
}
return [xVal[i], rocY];
}
/**
* The ROC series type.
*
* @constructor seriesTypes.roc
* @augments seriesTypes.sma
*/
seriesType('roc', 'sma',
/**
* Rate of change indicator (ROC). The indicator value for each point
* is defined as:
*
* `(C - Cn) / Cn * 100`
*
* where: `C` is the close value of the point of the same x in the
* linked series and `Cn` is the close value of the point `n` periods
* ago. `n` is set through [period](#plotOptions.roc.params.period).
*
* This series requires `linkedTo` option to be set.
*
* @extends plotOptions.sma
* @product highstock
* @sample {highstock} stock/indicators/roc
* Rate of change indicator
* @since 6.0.0
* @optionparent plotOptions.roc
*/
{
params: {
index: 3,
period: 9
}
}, {
nameBase: 'Rate of Change',
getValues: function (series, params) {
var period = params.period,
xVal = series.xData,
yVal = series.yData,
yValLen = yVal ? yVal.length : 0,
ROC = [],
xData = [],
yData = [],
i,
index = -1,
ROCPoint;
// Period is used as a number of time periods ago, so we need more
// (at least 1 more) data than the period value
if (xVal.length <= period) {
return false;
}
// Switch index for OHLC / Candlestick / Arearange
if (isArray(yVal[0])) {
index = params.index;
}
// i = period <-- skip first N-points
// Calculate value one-by-one for each period in visible data
for (i = period; i < yValLen; i++) {
ROCPoint = populateAverage(xVal, yVal, i, period, index);
ROC.push(ROCPoint);
xData.push(ROCPoint[0]);
yData.push(ROCPoint[1]);
}
return {
values: ROC,
xData: xData,
yData: yData
};
}
});
/**
* A `ROC` series. If the [type](#series.wma.type) option is not
* specified, it is inherited from [chart.type](#chart.type).
*
* Rate of change indicator (ROC). The indicator value for each point
* is defined as:
*
* `(C - Cn) / Cn * 100`
*
* where: `C` is the close value of the point of the same x in the
* linked series and `Cn` is the close value of the point `n` periods
* ago. `n` is set through [period](#series.roc.params.period).
*
* This series requires `linkedTo` option to be set.
*
* @type {Object}
* @since 6.0.0
* @extends series,plotOptions.roc
* @excluding data,dataParser,dataURL
* @product highstock
* @apioption series.roc
*/
/**
* @extends series.sma.data
* @product highstock
* @apioption series.roc.data
*/
|
PypiClean
|
/dbt_dameng-1.4.5-py3-none-any.whl/dbt/adapters/dameng/impl.py
|
from typing import (
Optional, List, Set, Dict, Any, Union, Iterable
)
from itertools import chain
import agate
import dbt.exceptions
from dbt.exceptions import (
CrossDbReferenceProhibitedError,
IndexConfigNotDictError,
IndexConfigError,
DbtRuntimeError,
UnexpectedDbReferenceError,
)
from dbt.adapters.dameng.keyword_catalog import KEYWORDS
from dbt.adapters.base.relation import BaseRelation, InformationSchema
from dbt.adapters.base.impl import GET_CATALOG_MACRO_NAME
from dbt.adapters.sql import SQLAdapter
from dbt.adapters.base.meta import available
from dbt.adapters.dameng import DamengAdapterConnectionManager
from dbt.adapters.dameng.column import DamengColumn
from dbt.adapters.dameng.relation import DamengRelation
from dbt.contracts.graph.manifest import Manifest
from dbt.events import AdapterLogger
from dbt.exceptions import raise_compiler_error
from dbt.utils import filter_null_values
logger = AdapterLogger("dameng")
COLUMNS_EQUAL_SQL = '''
with diff_count as (
SELECT
1 as id,
COUNT(*) as num_missing FROM (
(SELECT {columns} FROM {relation_a} {except_op}
SELECT {columns} FROM {relation_b})
MINUS
(SELECT {columns} FROM {relation_b} {except_op}
SELECT {columns} FROM {relation_a})
) a
), table_a_56c36b as (
SELECT COUNT(*) as num_rows FROM {relation_a}
), table_b_56c36b as (
SELECT COUNT(*) as num_rows FROM {relation_b}
), row_count_diff as (
select
1 as id,
table_a_56c36b.num_rows - table_b_56c36b.num_rows as difference
from table_a_56c36b, table_b_56c36b
)
select
row_count_diff.difference as row_count_difference,
diff_count.num_missing as num_mismatched
from row_count_diff
join diff_count using (id)
'''.strip()
LIST_RELATIONS_MACRO_NAME = 'list_relations_without_caching'
GET_DATABASE_MACRO_NAME = 'get_database_name'
class DamengAdapter(SQLAdapter):
ConnectionManager = DamengAdapterConnectionManager
Relation = DamengRelation
Column = DamengColumn
def debug_query(self) -> None:
self.execute("select 1 as id from dual")
@classmethod
def date_function(cls):
return 'CURRENT_DATE'
@classmethod
def convert_text_type(cls, agate_table, col_idx):
column = agate_table.columns[col_idx]
lens = (len(d.encode("utf-8")) for d in column.values_without_nulls())
max_len = max(lens) if lens else 64
length = max_len if max_len > 16 else 16
return "varchar2({})".format(length)
@classmethod
def convert_date_type(cls, agate_table, col_idx):
return "timestamp"
@classmethod
def convert_datetime_type(cls, agate_table, col_idx):
return "timestamp"
@classmethod
def convert_boolean_type(cls, agate_table, col_idx):
return "number(1)"
@classmethod
def convert_number_type(cls, agate_table, col_idx):
decimals = agate_table.aggregate(agate.MaxPrecision(col_idx))
return "number"
@classmethod
def convert_time_type(cls, agate_table, col_idx):
return "timestamp"
@available
def verify_database(self, database):
if database.startswith('"'):
database = database.strip('"')
expected = self.config.credentials.database
if expected and database.lower() != expected.lower():
raise dbt.exceptions.DbtRuntimeError(
'Cross-db references not allowed in {} ({} vs {})'
.format(self.type(), database, expected)
)
# return an empty string on success so macros can call this
return ''
def _make_match_kwargs(self, database, schema, identifier):
quoting = self.config.quoting
if identifier is not None and quoting["identifier"] is False:
identifier = identifier.upper()
if schema is not None and quoting["schema"] is False:
schema = schema.upper()
if database is not None and quoting["database"] is False:
database = database.upper()
return filter_null_values(
{"identifier": identifier, "schema": schema, "database": database}
)
def get_rows_different_sql(
self,
relation_a: DamengRelation,
relation_b: DamengRelation,
column_names: Optional[List[str]] = None,
except_operator: str = 'MINUS',
) -> str:
"""Generate SQL for a query that returns a single row with a two
columns: the number of rows that are different between the two
relations and the number of mismatched rows.
"""
# This method only really exists for test reasons.
names: List[str]
if column_names is None:
columns = self.get_columns_in_relation(relation_a)
# names = sorted((self.quote(c.name) for c in columns)
names = sorted((c.name for c in columns))
else:
# names = sorted((self.quote(n) for n in column_names))
names = sorted((n for n in column_names))
columns_csv = ', '.join(names)
sql = COLUMNS_EQUAL_SQL.format(
columns=columns_csv,
relation_a=str(relation_a),
relation_b=str(relation_b),
except_op=except_operator,
)
return sql
def timestamp_add_sql(
self, add_to: str, number: int = 1, interval: str = 'hour'
) -> str:
# for backwards compatibility, we're compelled to set some sort of
# default. A lot of searching has lead me to believe that the
# '+ interval' syntax used in postgres/redshift is relatively common
# and might even be the SQL standard's intention.
return f"{add_to} + interval '{number}' {interval}"
def get_relation(self, database: str, schema: str, identifier: str) -> Optional[BaseRelation]:
if database == 'None':
database = self.config.credentials.database
return super().get_relation(database, schema, identifier)
def _get_one_catalog(
self,
information_schema: InformationSchema,
schemas: Set[str],
manifest: Manifest,
) -> agate.Table:
kwargs = {"information_schema": information_schema, "schemas": schemas}
table = self.execute_macro(
GET_CATALOG_MACRO_NAME,
kwargs=kwargs,
# pass in the full manifest so we get any local project
# overrides
manifest=manifest,
)
# In case database is not defined, we can use the the configured database which we set as part of credentials
for node in chain(manifest.nodes.values(), manifest.sources.values()):
if not node.database or node.database == 'None':
node.database = self.config.credentials.database
results = self._catalog_filter_table(table, manifest)
return results
def _get_catalog_schemas(self, manifest):
# postgres only allow one database (the main one)
schemas = super()._get_catalog_schemas(manifest)
try:
return schemas.flatten()
except DbtRuntimeError as exc:
raise CrossDbReferenceProhibitedError(self.type(), exc.msg)
def list_relations_without_caching(
self, schema_relation: BaseRelation,
) -> List[BaseRelation]:
# Set database if not supplied
if not self.config.credentials.database:
self.config.credentials.database = self.execute_macro(GET_DATABASE_MACRO_NAME)
kwargs = {'schema_relation': schema_relation}
results = self.execute_macro(
LIST_RELATIONS_MACRO_NAME,
kwargs=kwargs
)
relations = []
for _database, name, _schema, _type in results:
try:
_type = self.Relation.get_relation_type(_type)
except ValueError:
_type = self.Relation.External
relations.append(self.Relation.create(
database=_database,
schema=_schema,
identifier=name,
quote_policy=self.config.quoting,
type=_type
))
return relations
@staticmethod
def is_valid_identifier(identifier) -> bool:
"""Returns True if an identifier is valid
An identifier is considered valid if the following conditions are True
1. First character is alphabetic
2. Rest of the characters is either alphanumeric or any one of the literals '#', '$', '_'
"""
# The first character should be alphabetic
if not identifier[0].isalpha():
return False
# Rest of the characters is either alphanumeric or any one of the literals '#', '$', '_'
idx = 1
while idx < len(identifier):
identifier_chr = identifier[idx]
if not identifier_chr.isalnum() and identifier_chr not in ('#', '$', '_'):
return False
idx += 1
return True
@available
def should_identifier_be_quoted(self,
identifier,
models_column_dict=None) -> bool:
"""Returns True if identifier should be quoted else False
An identifier should be quoted in the following 3 cases:
- 1. Identifier is an Oracle keyword
- 2. Identifier is not valid according to the following rules
- First character is alphabetic
- Rest of the characters is either alphanumeric or any one of the literals '#', '$', '_'
- 3. User has enabled quoting for the column in the model configuration
"""
if identifier.upper() in KEYWORDS:
return True
elif not self.is_valid_identifier(identifier):
return True
elif models_column_dict and identifier in models_column_dict:
return models_column_dict[identifier].get('quote', False)
elif models_column_dict and self.quote(identifier) in models_column_dict:
return models_column_dict[self.quote(identifier)].get('quote', False)
return False
@available
def check_and_quote_identifier(self, identifier, models_column_dict=None) -> str:
if self.should_identifier_be_quoted(identifier, models_column_dict):
return self.quote(identifier)
else:
return identifier
@available
def quote_seed_column(
self, column: str, quote_config: Optional[bool]
) -> str:
quote_columns: bool = False
if isinstance(quote_config, bool):
quote_columns = quote_config
elif self.should_identifier_be_quoted(column):
quote_columns = True
elif quote_config is None:
pass
else:
raise dbt.exceptions.CompilationError(f'The seed configuration value of "quote_columns" '
f'has an invalid type {type(quote_config)}')
if quote_columns:
return self.quote(column)
else:
return column
def valid_incremental_strategies(self):
return ["append", "merge"]
# def standardize_grants_dict(self, grants_table: agate.Table) -> dict:
# """Translate the result of `show grants` (or equivalent) to match the
# grants which a user would configure in their project.
# Ideally, the SQL to show grants should also be filtering:
# filter OUT any grants TO the current user/role (e.g. OWNERSHIP).
# If that's not possible in SQL, it can be done in this method instead.
# :param grants_table: An agate table containing the query result of
# the SQL returned by get_show_grant_sql
# :return: A standardized dictionary matching the `grants` config
# :rtype: dict
# """
# unsupported_privileges = ["INDEX", "READ", "WRITE"]
#
# grants_dict: Dict[str, List[str]] = {}
# for row in grants_table:
# grantee = row["grantor"]
# privilege = row["privilege"]
#
# # skip unsupported privileges
# if privilege in unsupported_privileges:
# continue
#
# if privilege in grants_dict.keys():
# grants_dict[privilege].append(grantee)
# else:
# grants_dict.update({privilege: [grantee]})
# return grants_dict
# def list_schemas(self):
# # connection = self.acquire_connection(database)
# # cursor = connection.cursor()
# # cursor.execute("SHOW SCHEMAS")
# # return [row[0] for row in cursor.fetchall()]
# query = """
# SELECT distinct A.NAME SCHEMA_NAME FROM SYSOBJECTS A,DBA_USERS B
# WHERE A.PID=B.USER_ID AND A.TYPE$='SCH' """
# res = self.execute(query)
# schemas = []
# for row in res:
# schemas.append(row[0])
# return schemas
# def create_schema(self, database, if_not_exists=False):
# # connection = self.acquire_connection(schema)
# # cursor = connection.cursor()
# database = str(database).split(".")[0]
# query = f"CREATE SCHEMA {'IF NOT EXISTS ' if if_not_exists else ''}{database}"
# # cursor.execute(query)
# self.execute(query)
# def list_relations(self, schema):
# connection = self.acquire_connection(schema)
# cursor = connection.cursor()
# cursor.execute("SHOW TABLES")
# results = cursor.fetchall()
# relations = []
# for row in results:
# relations.append({
# 'schema': schema,
# 'name': row[0],
# 'type': 'table'
# })
# return relations
# def get_columns_in_relation(self, relation):
# connection = self.acquire_connection(relation.get('schema'))
# cursor = connection.cursor()
# cursor.execute(f"DESCRIBE {relation.get('name')};")
# results = cursor.fetchall()
# columns = []
# for row in results:
# columns.append(DamengColumn(
# name=row[0],
# data_type=row[1],
# table_name=relation.get('name'),
# table_schema=relation.get('schema')
# ))
# return columns
# def get_rows(self, schema, identifier):
# connection = self.acquire_connection(schema)
# cursor = connection.cursor()
# cursor.execute(f"SELECT * FROM {identifier};")
# results = cursor.fetchall()
# column_names = [desc[0] for desc in cursor.description]
# rows = []
# for row in results:
# rows.append({column_names[i]: row[i] for i in range(len(column_names))})
# return rows
|
PypiClean
|
/tedana-23.0.1.tar.gz/tedana-23.0.1/.github/ISSUE_TEMPLATE/issue_template.md
|
---
name: Issue Template
about: File an issue
title: ''
labels: ''
assignees: ''
---
<!--
This is a suggested issue template for tedana.
If there is other information that would be helpful to include, please do not hesitate to add it!
Before submitting, please check to make sure that the issue is not already addressed; if there is a related issue, then please cross-reference it by #.
If this is a usage question, please check out NeuroStars here:
https://neurostars.org/
and tag your topic with "tedana"
-->
<!--
Summarize the issue in 1-2 sentences, linking other issues if they are relevant
Note: simply typing # will prompt you for open issues to select from
-->
### Summary
<!--
If needed, add additional detail for:
1. Recreating a bug/problem
2. Any additional context necessary to understand the issue
-->
### Additional Detail
<!--
If desired, add suggested next steps.
If you foresee them in a particular order or priority, please use numbering
-->
### Next Steps
<!--
Thank you for submitting your issue!
If you do not receive a response within a calendar week, please post a comment on this issue to catch our attention.
Some issues may not be resolved right away due to the volunteer nature of the project; thank you for your patience!
-->
|
PypiClean
|
/zenoss.toolbox-0.5.2.tar.gz/zenoss.toolbox-0.5.2/README.rst
|
zenoss.toolbox
==============
*Current version: 0.5.2*
Utilities for analyzing/debugging Zenoss environments as well as tools to increase/maintain performance.
*How do I Install and Use the zenoss.toolbox Package?*
* https://support.zenoss.com/hc/en-us/articles/203117595
Tools Included
==============
zodbscan
--------
*How do I use zodbscan to Scan zodb for Dangling References?*
* https://support.zenoss.com/hc/en-us/articles/203118175
findposkeyerror
---------------
*How do I use findposkeyerror to detect and fix POSKeyErrors in zodb?*
* https://support.zenoss.com/hc/en-us/articles/203117795
zenrelationscan
---------------
*How do I use zenrelationscan to scan/fix ZenRelations*
* https://support.zenoss.com/hc/en-us/articles/203121165
zencatalogscan
--------------
*How do I use zencatalogscan to detect and fix unresolvable object references in catalogs?*
* https://support.zenoss.com/hc/en-us/articles/203118075
zenindextool
------------
*How to use zenindextool to reindex top-level organizers*
* https://support.zenoss.com/hc/en-us/articles/203263689
zennetworkclean
---------------
*How to use zennetworkclean to remove unused network information*
* https://support.zenoss.com/hc/en-us/articles/203263699
Author: Brian Bibeault ([email protected])
|
PypiClean
|
/wandb-ng-0.0.44.tar.gz/wandb-ng-0.0.44/wandb/vendor/prompt_toolkit/layout/menus.py
|
from __future__ import unicode_literals
from six.moves import zip_longest, range
from prompt_toolkit.filters import HasCompletions, IsDone, Condition, to_cli_filter
from prompt_toolkit.mouse_events import MouseEventType
from prompt_toolkit.token import Token
from prompt_toolkit.utils import get_cwidth
from .containers import Window, HSplit, ConditionalContainer, ScrollOffsets
from .controls import UIControl, UIContent
from .dimension import LayoutDimension
from .margins import ScrollbarMargin
from .screen import Point, Char
import math
__all__ = (
'CompletionsMenu',
'MultiColumnCompletionsMenu',
)
class CompletionsMenuControl(UIControl):
"""
Helper for drawing the complete menu to the screen.
:param scroll_offset: Number (integer) representing the preferred amount of
completions to be displayed before and after the current one. When this
is a very high number, the current completion will be shown in the
middle most of the time.
"""
# Preferred minimum size of the menu control.
# The CompletionsMenu class defines a width of 8, and there is a scrollbar
# of 1.)
MIN_WIDTH = 7
def __init__(self):
self.token = Token.Menu.Completions
def has_focus(self, cli):
return False
def preferred_width(self, cli, max_available_width):
complete_state = cli.current_buffer.complete_state
if complete_state:
menu_width = self._get_menu_width(500, complete_state)
menu_meta_width = self._get_menu_meta_width(500, complete_state)
return menu_width + menu_meta_width
else:
return 0
def preferred_height(self, cli, width, max_available_height, wrap_lines):
complete_state = cli.current_buffer.complete_state
if complete_state:
return len(complete_state.current_completions)
else:
return 0
def create_content(self, cli, width, height):
"""
Create a UIContent object for this control.
"""
complete_state = cli.current_buffer.complete_state
if complete_state:
completions = complete_state.current_completions
index = complete_state.complete_index # Can be None!
# Calculate width of completions menu.
menu_width = self._get_menu_width(width, complete_state)
menu_meta_width = self._get_menu_meta_width(width - menu_width, complete_state)
show_meta = self._show_meta(complete_state)
def get_line(i):
c = completions[i]
is_current_completion = (i == index)
result = self._get_menu_item_tokens(c, is_current_completion, menu_width)
if show_meta:
result += self._get_menu_item_meta_tokens(c, is_current_completion, menu_meta_width)
return result
return UIContent(get_line=get_line,
cursor_position=Point(x=0, y=index or 0),
line_count=len(completions),
default_char=Char(' ', self.token))
return UIContent()
def _show_meta(self, complete_state):
"""
Return ``True`` if we need to show a column with meta information.
"""
return any(c.display_meta for c in complete_state.current_completions)
def _get_menu_width(self, max_width, complete_state):
"""
Return the width of the main column.
"""
return min(max_width, max(self.MIN_WIDTH, max(get_cwidth(c.display)
for c in complete_state.current_completions) + 2))
def _get_menu_meta_width(self, max_width, complete_state):
"""
Return the width of the meta column.
"""
if self._show_meta(complete_state):
return min(max_width, max(get_cwidth(c.display_meta)
for c in complete_state.current_completions) + 2)
else:
return 0
def _get_menu_item_tokens(self, completion, is_current_completion, width):
if is_current_completion:
token = self.token.Completion.Current
else:
token = self.token.Completion
text, tw = _trim_text(completion.display, width - 2)
padding = ' ' * (width - 2 - tw)
return [(token, ' %s%s ' % (text, padding))]
def _get_menu_item_meta_tokens(self, completion, is_current_completion, width):
if is_current_completion:
token = self.token.Meta.Current
else:
token = self.token.Meta
text, tw = _trim_text(completion.display_meta, width - 2)
padding = ' ' * (width - 2 - tw)
return [(token, ' %s%s ' % (text, padding))]
def mouse_handler(self, cli, mouse_event):
"""
Handle mouse events: clicking and scrolling.
"""
b = cli.current_buffer
if mouse_event.event_type == MouseEventType.MOUSE_UP:
# Select completion.
b.go_to_completion(mouse_event.position.y)
b.complete_state = None
elif mouse_event.event_type == MouseEventType.SCROLL_DOWN:
# Scroll up.
b.complete_next(count=3, disable_wrap_around=True)
elif mouse_event.event_type == MouseEventType.SCROLL_UP:
# Scroll down.
b.complete_previous(count=3, disable_wrap_around=True)
def _trim_text(text, max_width):
"""
Trim the text to `max_width`, append dots when the text is too long.
Returns (text, width) tuple.
"""
width = get_cwidth(text)
# When the text is too wide, trim it.
if width > max_width:
# When there are no double width characters, just use slice operation.
if len(text) == width:
trimmed_text = (text[:max(1, max_width-3)] + '...')[:max_width]
return trimmed_text, len(trimmed_text)
# Otherwise, loop until we have the desired width. (Rather
# inefficient, but ok for now.)
else:
trimmed_text = ''
for c in text:
if get_cwidth(trimmed_text + c) <= max_width - 3:
trimmed_text += c
trimmed_text += '...'
return (trimmed_text, get_cwidth(trimmed_text))
else:
return text, width
class CompletionsMenu(ConditionalContainer):
def __init__(self, max_height=None, scroll_offset=0, extra_filter=True, display_arrows=False):
extra_filter = to_cli_filter(extra_filter)
display_arrows = to_cli_filter(display_arrows)
super(CompletionsMenu, self).__init__(
content=Window(
content=CompletionsMenuControl(),
width=LayoutDimension(min=8),
height=LayoutDimension(min=1, max=max_height),
scroll_offsets=ScrollOffsets(top=scroll_offset, bottom=scroll_offset),
right_margins=[ScrollbarMargin(display_arrows=display_arrows)],
dont_extend_width=True,
),
# Show when there are completions but not at the point we are
# returning the input.
filter=HasCompletions() & ~IsDone() & extra_filter)
class MultiColumnCompletionMenuControl(UIControl):
"""
Completion menu that displays all the completions in several columns.
When there are more completions than space for them to be displayed, an
arrow is shown on the left or right side.
`min_rows` indicates how many rows will be available in any possible case.
When this is langer than one, in will try to use less columns and more
rows until this value is reached.
Be careful passing in a too big value, if less than the given amount of
rows are available, more columns would have been required, but
`preferred_width` doesn't know about that and reports a too small value.
This results in less completions displayed and additional scrolling.
(It's a limitation of how the layout engine currently works: first the
widths are calculated, then the heights.)
:param suggested_max_column_width: The suggested max width of a column.
The column can still be bigger than this, but if there is place for two
columns of this width, we will display two columns. This to avoid that
if there is one very wide completion, that it doesn't significantly
reduce the amount of columns.
"""
_required_margin = 3 # One extra padding on the right + space for arrows.
def __init__(self, min_rows=3, suggested_max_column_width=30):
assert isinstance(min_rows, int) and min_rows >= 1
self.min_rows = min_rows
self.suggested_max_column_width = suggested_max_column_width
self.token = Token.Menu.Completions
self.scroll = 0
# Info of last rendering.
self._rendered_rows = 0
self._rendered_columns = 0
self._total_columns = 0
self._render_pos_to_completion = {}
self._render_left_arrow = False
self._render_right_arrow = False
self._render_width = 0
def reset(self):
self.scroll = 0
def has_focus(self, cli):
return False
def preferred_width(self, cli, max_available_width):
"""
Preferred width: prefer to use at least min_rows, but otherwise as much
as possible horizontally.
"""
complete_state = cli.current_buffer.complete_state
column_width = self._get_column_width(complete_state)
result = int(column_width * math.ceil(len(complete_state.current_completions) / float(self.min_rows)))
# When the desired width is still more than the maximum available,
# reduce by removing columns until we are less than the available
# width.
while result > column_width and result > max_available_width - self._required_margin:
result -= column_width
return result + self._required_margin
def preferred_height(self, cli, width, max_available_height, wrap_lines):
"""
Preferred height: as much as needed in order to display all the completions.
"""
complete_state = cli.current_buffer.complete_state
column_width = self._get_column_width(complete_state)
column_count = max(1, (width - self._required_margin) // column_width)
return int(math.ceil(len(complete_state.current_completions) / float(column_count)))
def create_content(self, cli, width, height):
"""
Create a UIContent object for this menu.
"""
complete_state = cli.current_buffer.complete_state
column_width = self._get_column_width(complete_state)
self._render_pos_to_completion = {}
def grouper(n, iterable, fillvalue=None):
" grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx "
args = [iter(iterable)] * n
return zip_longest(fillvalue=fillvalue, *args)
def is_current_completion(completion):
" Returns True when this completion is the currently selected one. "
return complete_state.complete_index is not None and c == complete_state.current_completion
# Space required outside of the regular columns, for displaying the
# left and right arrow.
HORIZONTAL_MARGIN_REQUIRED = 3
if complete_state:
# There should be at least one column, but it cannot be wider than
# the available width.
column_width = min(width - HORIZONTAL_MARGIN_REQUIRED, column_width)
# However, when the columns tend to be very wide, because there are
# some very wide entries, shrink it anyway.
if column_width > self.suggested_max_column_width:
# `column_width` can still be bigger that `suggested_max_column_width`,
# but if there is place for two columns, we divide by two.
column_width //= (column_width // self.suggested_max_column_width)
visible_columns = max(1, (width - self._required_margin) // column_width)
columns_ = list(grouper(height, complete_state.current_completions))
rows_ = list(zip(*columns_))
# Make sure the current completion is always visible: update scroll offset.
selected_column = (complete_state.complete_index or 0) // height
self.scroll = min(selected_column, max(self.scroll, selected_column - visible_columns + 1))
render_left_arrow = self.scroll > 0
render_right_arrow = self.scroll < len(rows_[0]) - visible_columns
# Write completions to screen.
tokens_for_line = []
for row_index, row in enumerate(rows_):
tokens = []
middle_row = row_index == len(rows_) // 2
# Draw left arrow if we have hidden completions on the left.
if render_left_arrow:
tokens += [(Token.Scrollbar, '<' if middle_row else ' ')]
# Draw row content.
for column_index, c in enumerate(row[self.scroll:][:visible_columns]):
if c is not None:
tokens += self._get_menu_item_tokens(c, is_current_completion(c), column_width)
# Remember render position for mouse click handler.
for x in range(column_width):
self._render_pos_to_completion[(column_index * column_width + x, row_index)] = c
else:
tokens += [(self.token.Completion, ' ' * column_width)]
# Draw trailing padding. (_get_menu_item_tokens only returns padding on the left.)
tokens += [(self.token.Completion, ' ')]
# Draw right arrow if we have hidden completions on the right.
if render_right_arrow:
tokens += [(Token.Scrollbar, '>' if middle_row else ' ')]
# Newline.
tokens_for_line.append(tokens)
else:
tokens = []
self._rendered_rows = height
self._rendered_columns = visible_columns
self._total_columns = len(columns_)
self._render_left_arrow = render_left_arrow
self._render_right_arrow = render_right_arrow
self._render_width = column_width * visible_columns + render_left_arrow + render_right_arrow + 1
def get_line(i):
return tokens_for_line[i]
return UIContent(get_line=get_line, line_count=len(rows_))
def _get_column_width(self, complete_state):
"""
Return the width of each column.
"""
return max(get_cwidth(c.display) for c in complete_state.current_completions) + 1
def _get_menu_item_tokens(self, completion, is_current_completion, width):
if is_current_completion:
token = self.token.Completion.Current
else:
token = self.token.Completion
text, tw = _trim_text(completion.display, width)
padding = ' ' * (width - tw - 1)
return [(token, ' %s%s' % (text, padding))]
def mouse_handler(self, cli, mouse_event):
"""
Handle scoll and click events.
"""
b = cli.current_buffer
def scroll_left():
b.complete_previous(count=self._rendered_rows, disable_wrap_around=True)
self.scroll = max(0, self.scroll - 1)
def scroll_right():
b.complete_next(count=self._rendered_rows, disable_wrap_around=True)
self.scroll = min(self._total_columns - self._rendered_columns, self.scroll + 1)
if mouse_event.event_type == MouseEventType.SCROLL_DOWN:
scroll_right()
elif mouse_event.event_type == MouseEventType.SCROLL_UP:
scroll_left()
elif mouse_event.event_type == MouseEventType.MOUSE_UP:
x = mouse_event.position.x
y = mouse_event.position.y
# Mouse click on left arrow.
if x == 0:
if self._render_left_arrow:
scroll_left()
# Mouse click on right arrow.
elif x == self._render_width - 1:
if self._render_right_arrow:
scroll_right()
# Mouse click on completion.
else:
completion = self._render_pos_to_completion.get((x, y))
if completion:
b.apply_completion(completion)
class MultiColumnCompletionsMenu(HSplit):
"""
Container that displays the completions in several columns.
When `show_meta` (a :class:`~prompt_toolkit.filters.CLIFilter`) evaluates
to True, it shows the meta information at the bottom.
"""
def __init__(self, min_rows=3, suggested_max_column_width=30, show_meta=True, extra_filter=True):
show_meta = to_cli_filter(show_meta)
extra_filter = to_cli_filter(extra_filter)
# Display filter: show when there are completions but not at the point
# we are returning the input.
full_filter = HasCompletions() & ~IsDone() & extra_filter
any_completion_has_meta = Condition(lambda cli:
any(c.display_meta for c in cli.current_buffer.complete_state.current_completions))
# Create child windows.
completions_window = ConditionalContainer(
content=Window(
content=MultiColumnCompletionMenuControl(
min_rows=min_rows, suggested_max_column_width=suggested_max_column_width),
width=LayoutDimension(min=8),
height=LayoutDimension(min=1)),
filter=full_filter)
meta_window = ConditionalContainer(
content=Window(content=_SelectedCompletionMetaControl()),
filter=show_meta & full_filter & any_completion_has_meta)
# Initialise split.
super(MultiColumnCompletionsMenu, self).__init__([
completions_window,
meta_window
])
class _SelectedCompletionMetaControl(UIControl):
"""
Control that shows the meta information of the selected token.
"""
def preferred_width(self, cli, max_available_width):
"""
Report the width of the longest meta text as the preferred width of this control.
It could be that we use less width, but this way, we're sure that the
layout doesn't change when we select another completion (E.g. that
completions are suddenly shown in more or fewer columns.)
"""
if cli.current_buffer.complete_state:
state = cli.current_buffer.complete_state
return 2 + max(get_cwidth(c.display_meta) for c in state.current_completions)
else:
return 0
def preferred_height(self, cli, width, max_available_height, wrap_lines):
return 1
def create_content(self, cli, width, height):
tokens = self._get_tokens(cli)
def get_line(i):
return tokens
return UIContent(get_line=get_line, line_count=1 if tokens else 0)
def _get_tokens(self, cli):
token = Token.Menu.Completions.MultiColumnMeta
state = cli.current_buffer.complete_state
if state and state.current_completion and state.current_completion.display_meta:
return [(token, ' %s ' % state.current_completion.display_meta)]
return []
|
PypiClean
|
/imdabessay-1.0.3-py3-none-any.whl/mksmith/__main__.py
|
import sys
# Reader imports
import reader
from reader import feed
from reader import viewer
imdabes_sun = """
IMDABES
..
,,,,,,, .,,
.,,,,, ,,,,,,,, ,,,,,,,
,,,,,,, .,,,,,,,,,, .,,,,,,,,,.
,,,,,. ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,,,,,,,....,,,,,,,,,,...,,,,,,,,,,,,,,,,
,,,,,,,,,....,,,.......................,,,,,,,,,,,,,,
,,,,,,,,...................................,,,,,,,,
,,,,,,,,.......................,%..........,,,,,,,,.
,,,,(((,.......@@@@@@@@........&@@@@@@@@,.(@@@@@@@@(,,,
,/@@@&@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@/,,,,,,,,,,,,.
,,%&,,,,........#@@@@@@@,.......%@@@@@@@@,.....,,,,,,,,,,,
,,,,,,,,,...............,........*@@@@@@@@......,,,,,,,,,,,
,,,,,,,,,,.......................................,,,,,,,,,,,
.,,,,,,,,,,,.......................................,,,,,,,,
.,,,,,,,,,,,,......................................,,,,,,,.
.,,,,,,,,,,,,,..........*@@ ., * , @ ,..........,,,,,,,.
,,,,,,,,,,,.........@@@@@@@@@@@@@@@@@.........,,,,,,,,
,,,,,,,,,........@@@@@@@%%%%%%@@@@@........,,,,,,,,,
,,,,,,,,........(@@@ @#%%%%%&@@@@(......,,,,,,,,,,,,
,,,,,,,,...........*.% ( % ( .......,,,,,,,,,,,,,,
.,,,,,,,,,,,,,,,,.....................,,,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,,,,,.......,,,,,,,,,,,,,,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
,,, ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
,,,,,,,,,,,,,. .,,,,,,,
,,,. ,,,,
"""
def main(): # type: () -> None
print(imdabes_sun)
if __name__ == "__main__":
main()
|
PypiClean
|
/msgraph_beta_sdk-1.0.0a9-py3-none-any.whl/msgraph/generated/models/managed_tenants/tenant_detailed_information.py
|
from __future__ import annotations
from kiota_abstractions.serialization import Parsable, ParseNode, SerializationWriter
from typing import Any, Callable, Dict, List, Optional, TYPE_CHECKING, Union
if TYPE_CHECKING:
from .. import entity
from .. import entity
class TenantDetailedInformation(entity.Entity):
def __init__(self,) -> None:
"""
Instantiates a new tenantDetailedInformation and sets the default values.
"""
super().__init__()
# The city where the managed tenant is located. Optional. Read-only.
self._city: Optional[str] = None
# The code for the country where the managed tenant is located. Optional. Read-only.
self._country_code: Optional[str] = None
# The name for the country where the managed tenant is located. Optional. Read-only.
self._country_name: Optional[str] = None
# The default domain name for the managed tenant. Optional. Read-only.
self._default_domain_name: Optional[str] = None
# The display name for the managed tenant.
self._display_name: Optional[str] = None
# The business industry associated with the managed tenant. Optional. Read-only.
self._industry_name: Optional[str] = None
# The OdataType property
self.odata_type: Optional[str] = None
# The region where the managed tenant is located. Optional. Read-only.
self._region: Optional[str] = None
# The business segment associated with the managed tenant. Optional. Read-only.
self._segment_name: Optional[str] = None
# The Azure Active Directory tenant identifier for the managed tenant.
self._tenant_id: Optional[str] = None
# The vertical associated with the managed tenant. Optional. Read-only.
self._vertical_name: Optional[str] = None
@property
def city(self,) -> Optional[str]:
"""
Gets the city property value. The city where the managed tenant is located. Optional. Read-only.
Returns: Optional[str]
"""
return self._city
@city.setter
def city(self,value: Optional[str] = None) -> None:
"""
Sets the city property value. The city where the managed tenant is located. Optional. Read-only.
Args:
value: Value to set for the city property.
"""
self._city = value
@property
def country_code(self,) -> Optional[str]:
"""
Gets the countryCode property value. The code for the country where the managed tenant is located. Optional. Read-only.
Returns: Optional[str]
"""
return self._country_code
@country_code.setter
def country_code(self,value: Optional[str] = None) -> None:
"""
Sets the countryCode property value. The code for the country where the managed tenant is located. Optional. Read-only.
Args:
value: Value to set for the country_code property.
"""
self._country_code = value
@property
def country_name(self,) -> Optional[str]:
"""
Gets the countryName property value. The name for the country where the managed tenant is located. Optional. Read-only.
Returns: Optional[str]
"""
return self._country_name
@country_name.setter
def country_name(self,value: Optional[str] = None) -> None:
"""
Sets the countryName property value. The name for the country where the managed tenant is located. Optional. Read-only.
Args:
value: Value to set for the country_name property.
"""
self._country_name = value
@staticmethod
def create_from_discriminator_value(parse_node: Optional[ParseNode] = None) -> TenantDetailedInformation:
"""
Creates a new instance of the appropriate class based on discriminator value
Args:
parseNode: The parse node to use to read the discriminator value and create the object
Returns: TenantDetailedInformation
"""
if parse_node is None:
raise Exception("parse_node cannot be undefined")
return TenantDetailedInformation()
@property
def default_domain_name(self,) -> Optional[str]:
"""
Gets the defaultDomainName property value. The default domain name for the managed tenant. Optional. Read-only.
Returns: Optional[str]
"""
return self._default_domain_name
@default_domain_name.setter
def default_domain_name(self,value: Optional[str] = None) -> None:
"""
Sets the defaultDomainName property value. The default domain name for the managed tenant. Optional. Read-only.
Args:
value: Value to set for the default_domain_name property.
"""
self._default_domain_name = value
@property
def display_name(self,) -> Optional[str]:
"""
Gets the displayName property value. The display name for the managed tenant.
Returns: Optional[str]
"""
return self._display_name
@display_name.setter
def display_name(self,value: Optional[str] = None) -> None:
"""
Sets the displayName property value. The display name for the managed tenant.
Args:
value: Value to set for the display_name property.
"""
self._display_name = value
def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:
"""
The deserialization information for the current model
Returns: Dict[str, Callable[[ParseNode], None]]
"""
from .. import entity
fields: Dict[str, Callable[[Any], None]] = {
"city": lambda n : setattr(self, 'city', n.get_str_value()),
"countryCode": lambda n : setattr(self, 'country_code', n.get_str_value()),
"countryName": lambda n : setattr(self, 'country_name', n.get_str_value()),
"defaultDomainName": lambda n : setattr(self, 'default_domain_name', n.get_str_value()),
"displayName": lambda n : setattr(self, 'display_name', n.get_str_value()),
"industryName": lambda n : setattr(self, 'industry_name', n.get_str_value()),
"region": lambda n : setattr(self, 'region', n.get_str_value()),
"segmentName": lambda n : setattr(self, 'segment_name', n.get_str_value()),
"tenantId": lambda n : setattr(self, 'tenant_id', n.get_str_value()),
"verticalName": lambda n : setattr(self, 'vertical_name', n.get_str_value()),
}
super_fields = super().get_field_deserializers()
fields.update(super_fields)
return fields
@property
def industry_name(self,) -> Optional[str]:
"""
Gets the industryName property value. The business industry associated with the managed tenant. Optional. Read-only.
Returns: Optional[str]
"""
return self._industry_name
@industry_name.setter
def industry_name(self,value: Optional[str] = None) -> None:
"""
Sets the industryName property value. The business industry associated with the managed tenant. Optional. Read-only.
Args:
value: Value to set for the industry_name property.
"""
self._industry_name = value
@property
def region(self,) -> Optional[str]:
"""
Gets the region property value. The region where the managed tenant is located. Optional. Read-only.
Returns: Optional[str]
"""
return self._region
@region.setter
def region(self,value: Optional[str] = None) -> None:
"""
Sets the region property value. The region where the managed tenant is located. Optional. Read-only.
Args:
value: Value to set for the region property.
"""
self._region = value
@property
def segment_name(self,) -> Optional[str]:
"""
Gets the segmentName property value. The business segment associated with the managed tenant. Optional. Read-only.
Returns: Optional[str]
"""
return self._segment_name
@segment_name.setter
def segment_name(self,value: Optional[str] = None) -> None:
"""
Sets the segmentName property value. The business segment associated with the managed tenant. Optional. Read-only.
Args:
value: Value to set for the segment_name property.
"""
self._segment_name = value
def serialize(self,writer: SerializationWriter) -> None:
"""
Serializes information the current object
Args:
writer: Serialization writer to use to serialize this model
"""
if writer is None:
raise Exception("writer cannot be undefined")
super().serialize(writer)
writer.write_str_value("city", self.city)
writer.write_str_value("countryCode", self.country_code)
writer.write_str_value("countryName", self.country_name)
writer.write_str_value("defaultDomainName", self.default_domain_name)
writer.write_str_value("displayName", self.display_name)
writer.write_str_value("industryName", self.industry_name)
writer.write_str_value("region", self.region)
writer.write_str_value("segmentName", self.segment_name)
writer.write_str_value("tenantId", self.tenant_id)
writer.write_str_value("verticalName", self.vertical_name)
@property
def tenant_id(self,) -> Optional[str]:
"""
Gets the tenantId property value. The Azure Active Directory tenant identifier for the managed tenant.
Returns: Optional[str]
"""
return self._tenant_id
@tenant_id.setter
def tenant_id(self,value: Optional[str] = None) -> None:
"""
Sets the tenantId property value. The Azure Active Directory tenant identifier for the managed tenant.
Args:
value: Value to set for the tenant_id property.
"""
self._tenant_id = value
@property
def vertical_name(self,) -> Optional[str]:
"""
Gets the verticalName property value. The vertical associated with the managed tenant. Optional. Read-only.
Returns: Optional[str]
"""
return self._vertical_name
@vertical_name.setter
def vertical_name(self,value: Optional[str] = None) -> None:
"""
Sets the verticalName property value. The vertical associated with the managed tenant. Optional. Read-only.
Args:
value: Value to set for the vertical_name property.
"""
self._vertical_name = value
|
PypiClean
|
/wasp-launcher-0.0.2.tar.gz/wasp-launcher-0.0.2/wasp_launcher/static/angular/angular-1.6.1/i18n/angular-locale_ar-ss.js
|
'use strict';
angular.module("ngLocale", [], ["$provide", function($provide) {
var PLURAL_CATEGORY = {ZERO: "zero", ONE: "one", TWO: "two", FEW: "few", MANY: "many", OTHER: "other"};
$provide.value("$locale", {
"DATETIME_FORMATS": {
"AMPMS": [
"\u0635",
"\u0645"
],
"DAY": [
"\u0627\u0644\u0623\u062d\u062f",
"\u0627\u0644\u0627\u062b\u0646\u064a\u0646",
"\u0627\u0644\u062b\u0644\u0627\u062b\u0627\u0621",
"\u0627\u0644\u0623\u0631\u0628\u0639\u0627\u0621",
"\u0627\u0644\u062e\u0645\u064a\u0633",
"\u0627\u0644\u062c\u0645\u0639\u0629",
"\u0627\u0644\u0633\u0628\u062a"
],
"ERANAMES": [
"\u0642\u0628\u0644 \u0627\u0644\u0645\u064a\u0644\u0627\u062f",
"\u0645\u064a\u0644\u0627\u062f\u064a"
],
"ERAS": [
"\u0642.\u0645",
"\u0645"
],
"FIRSTDAYOFWEEK": 0,
"MONTH": [
"\u064a\u0646\u0627\u064a\u0631",
"\u0641\u0628\u0631\u0627\u064a\u0631",
"\u0645\u0627\u0631\u0633",
"\u0623\u0628\u0631\u064a\u0644",
"\u0645\u0627\u064a\u0648",
"\u064a\u0648\u0646\u064a\u0648",
"\u064a\u0648\u0644\u064a\u0648",
"\u0623\u063a\u0633\u0637\u0633",
"\u0633\u0628\u062a\u0645\u0628\u0631",
"\u0623\u0643\u062a\u0648\u0628\u0631",
"\u0646\u0648\u0641\u0645\u0628\u0631",
"\u062f\u064a\u0633\u0645\u0628\u0631"
],
"SHORTDAY": [
"\u0627\u0644\u0623\u062d\u062f",
"\u0627\u0644\u0627\u062b\u0646\u064a\u0646",
"\u0627\u0644\u062b\u0644\u0627\u062b\u0627\u0621",
"\u0627\u0644\u0623\u0631\u0628\u0639\u0627\u0621",
"\u0627\u0644\u062e\u0645\u064a\u0633",
"\u0627\u0644\u062c\u0645\u0639\u0629",
"\u0627\u0644\u0633\u0628\u062a"
],
"SHORTMONTH": [
"\u064a\u0646\u0627\u064a\u0631",
"\u0641\u0628\u0631\u0627\u064a\u0631",
"\u0645\u0627\u0631\u0633",
"\u0623\u0628\u0631\u064a\u0644",
"\u0645\u0627\u064a\u0648",
"\u064a\u0648\u0646\u064a\u0648",
"\u064a\u0648\u0644\u064a\u0648",
"\u0623\u063a\u0633\u0637\u0633",
"\u0633\u0628\u062a\u0645\u0628\u0631",
"\u0623\u0643\u062a\u0648\u0628\u0631",
"\u0646\u0648\u0641\u0645\u0628\u0631",
"\u062f\u064a\u0633\u0645\u0628\u0631"
],
"STANDALONEMONTH": [
"\u064a\u0646\u0627\u064a\u0631",
"\u0641\u0628\u0631\u0627\u064a\u0631",
"\u0645\u0627\u0631\u0633",
"\u0623\u0628\u0631\u064a\u0644",
"\u0645\u0627\u064a\u0648",
"\u064a\u0648\u0646\u064a\u0648",
"\u064a\u0648\u0644\u064a\u0648",
"\u0623\u063a\u0633\u0637\u0633",
"\u0633\u0628\u062a\u0645\u0628\u0631",
"\u0623\u0643\u062a\u0648\u0628\u0631",
"\u0646\u0648\u0641\u0645\u0628\u0631",
"\u062f\u064a\u0633\u0645\u0628\u0631"
],
"WEEKENDRANGE": [
5,
6
],
"fullDate": "EEEE\u060c d MMMM\u060c y",
"longDate": "d MMMM\u060c y",
"medium": "dd\u200f/MM\u200f/y h:mm:ss a",
"mediumDate": "dd\u200f/MM\u200f/y",
"mediumTime": "h:mm:ss a",
"short": "d\u200f/M\u200f/y h:mm a",
"shortDate": "d\u200f/M\u200f/y",
"shortTime": "h:mm a"
},
"NUMBER_FORMATS": {
"CURRENCY_SYM": "\u00a3",
"DECIMAL_SEP": "\u066b",
"GROUP_SEP": "\u066c",
"PATTERNS": [
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 3,
"minFrac": 0,
"minInt": 1,
"negPre": "-",
"negSuf": "",
"posPre": "",
"posSuf": ""
},
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 2,
"minFrac": 2,
"minInt": 1,
"negPre": "-\u00a4\u00a0",
"negSuf": "",
"posPre": "\u00a4\u00a0",
"posSuf": ""
}
]
},
"id": "ar-ss",
"localeID": "ar_SS",
"pluralCat": function(n, opt_precision) { if (n == 0) { return PLURAL_CATEGORY.ZERO; } if (n == 1) { return PLURAL_CATEGORY.ONE; } if (n == 2) { return PLURAL_CATEGORY.TWO; } if (n % 100 >= 3 && n % 100 <= 10) { return PLURAL_CATEGORY.FEW; } if (n % 100 >= 11 && n % 100 <= 99) { return PLURAL_CATEGORY.MANY; } return PLURAL_CATEGORY.OTHER;}
});
}]);
|
PypiClean
|
/safegate_pro-2021.7.6-py3-none-any.whl/homeassistant/components/kostal_plenticore/sensor.py
|
from __future__ import annotations
from datetime import timedelta
import logging
from typing import Any, Callable
from homeassistant.components.sensor import SensorEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_DEVICE_CLASS, ATTR_ICON, ATTR_UNIT_OF_MEASUREMENT
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import (
ATTR_ENABLED_DEFAULT,
DOMAIN,
SENSOR_PROCESS_DATA,
SENSOR_SETTINGS_DATA,
)
from .helper import (
PlenticoreDataFormatter,
ProcessDataUpdateCoordinator,
SettingDataUpdateCoordinator,
)
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities
):
"""Add kostal plenticore Sensors."""
plenticore = hass.data[DOMAIN][entry.entry_id]
entities = []
available_process_data = await plenticore.client.get_process_data()
process_data_update_coordinator = ProcessDataUpdateCoordinator(
hass,
_LOGGER,
"Process Data",
timedelta(seconds=10),
plenticore,
)
for module_id, data_id, name, sensor_data, fmt in SENSOR_PROCESS_DATA:
if (
module_id not in available_process_data
or data_id not in available_process_data[module_id]
):
_LOGGER.debug(
"Skipping non existing process data %s/%s", module_id, data_id
)
continue
entities.append(
PlenticoreDataSensor(
process_data_update_coordinator,
entry.entry_id,
entry.title,
module_id,
data_id,
name,
sensor_data,
PlenticoreDataFormatter.get_method(fmt),
plenticore.device_info,
)
)
available_settings_data = await plenticore.client.get_settings()
settings_data_update_coordinator = SettingDataUpdateCoordinator(
hass,
_LOGGER,
"Settings Data",
timedelta(seconds=300),
plenticore,
)
for module_id, data_id, name, sensor_data, fmt in SENSOR_SETTINGS_DATA:
if module_id not in available_settings_data or data_id not in (
setting.id for setting in available_settings_data[module_id]
):
_LOGGER.debug(
"Skipping non existing setting data %s/%s", module_id, data_id
)
continue
entities.append(
PlenticoreDataSensor(
settings_data_update_coordinator,
entry.entry_id,
entry.title,
module_id,
data_id,
name,
sensor_data,
PlenticoreDataFormatter.get_method(fmt),
plenticore.device_info,
)
)
async_add_entities(entities)
class PlenticoreDataSensor(CoordinatorEntity, SensorEntity):
"""Representation of a Plenticore data Sensor."""
def __init__(
self,
coordinator,
entry_id: str,
platform_name: str,
module_id: str,
data_id: str,
sensor_name: str,
sensor_data: dict[str, Any],
formatter: Callable[[str], Any],
device_info: DeviceInfo,
):
"""Create a new Sensor Entity for Plenticore process data."""
super().__init__(coordinator)
self.entry_id = entry_id
self.platform_name = platform_name
self.module_id = module_id
self.data_id = data_id
self._sensor_name = sensor_name
self._sensor_data = sensor_data
self._formatter = formatter
self._device_info = device_info
@property
def available(self) -> bool:
"""Return if entity is available."""
return (
super().available
and self.coordinator.data is not None
and self.module_id in self.coordinator.data
and self.data_id in self.coordinator.data[self.module_id]
)
async def async_added_to_hass(self) -> None:
"""Register this entity on the Update Coordinator."""
await super().async_added_to_hass()
self.coordinator.start_fetch_data(self.module_id, self.data_id)
async def async_will_remove_from_hass(self) -> None:
"""Unregister this entity from the Update Coordinator."""
self.coordinator.stop_fetch_data(self.module_id, self.data_id)
await super().async_will_remove_from_hass()
@property
def device_info(self) -> DeviceInfo:
"""Return the device info."""
return self._device_info
@property
def unique_id(self) -> str:
"""Return the unique id of this Sensor Entity."""
return f"{self.entry_id}_{self.module_id}_{self.data_id}"
@property
def name(self) -> str:
"""Return the name of this Sensor Entity."""
return f"{self.platform_name} {self._sensor_name}"
@property
def unit_of_measurement(self) -> str | None:
"""Return the unit of this Sensor Entity or None."""
return self._sensor_data.get(ATTR_UNIT_OF_MEASUREMENT)
@property
def icon(self) -> str | None:
"""Return the icon name of this Sensor Entity or None."""
return self._sensor_data.get(ATTR_ICON)
@property
def device_class(self) -> str | None:
"""Return the class of this device, from component DEVICE_CLASSES."""
return self._sensor_data.get(ATTR_DEVICE_CLASS)
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if the entity should be enabled when first added to the entity registry."""
return self._sensor_data.get(ATTR_ENABLED_DEFAULT, False)
@property
def state(self) -> Any | None:
"""Return the state of the sensor."""
if self.coordinator.data is None:
# None is translated to STATE_UNKNOWN
return None
raw_value = self.coordinator.data[self.module_id][self.data_id]
return self._formatter(raw_value) if self._formatter else raw_value
|
PypiClean
|
/steam_idle-1.0.1.tar.gz/steam_idle-1.0.1/README.rst
|
.. image:: https://landscape.io/github/jayme-github/steam_idle/master/landscape.svg?style=flat
:target: https://landscape.io/github/jayme-github/steam_idle/master
:alt: Code Health
============
Steam Idle
============
This is mostly a rewrite of `Idle Master (Python branch) <https://github.com/jshackles/idle_master_py>`_.
It will idle all games with play time < 2 hours in parallel until they are out of the refund period (2+ hours).
After that, it will idle each game sequentially untill all cards have been dropped.
I did this rewrite because I don't wanted to poke around with cookies (and I thought idle_master_py is unmaintained).
Installation
============
.. code-block:: sh
pip install steam_idle
Requirements
============
* `steamweb <https://github.com/jayme-github/steamweb>`_>=0.6
* pycrypto>=2.6.1
* requests>=2.7.0
* future>=0.14.3 (python 2.x)
Usage
=====
Just run *steam_idle_cli.py* and follow the instructions:
.. code-block:: sh
steam_idle_cli.py
GUI (Qt) version
================
For a (Qt 4) GUI version please see `steam_idle_qt <https://github.com/jayme-github/steam_idle_qt>`_.
License
=======
This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation. A copy of the GNU General Public License can be found at http://www.gnu.org/licenses/. For your convenience, a copy of this license is included.
|
PypiClean
|
/baiduads_sdk_auto-2023.1.0-py3-none-any.whl/baiduads/titlerecommend/model/get_title_request.py
|
import re # noqa: F401
import sys # noqa: F401
from baiduads.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from baiduads.exceptions import ApiAttributeError
def lazy_import():
from baiduads.titlerecommend.model.filter_condition import FilterCondition
globals()['FilterCondition'] = FilterCondition
class GetTitleRequest(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'adgroup_id': (int,), # noqa: E501
'query': (str,), # noqa: E501
'limit': (int,), # noqa: E501
'filters': ([FilterCondition],), # noqa: E501
'org_title': (str,), # noqa: E501
'src': (str,), # noqa: E501
'query_type': (int,), # noqa: E501
'length_level': (int,), # noqa: E501
'sort_field': (str,), # noqa: E501
'sort_order': (str,), # noqa: E501
'page_no': (int,), # noqa: E501
'page_size': (int,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'adgroup_id': 'adgroupId', # noqa: E501
'query': 'query', # noqa: E501
'limit': 'limit', # noqa: E501
'filters': 'filters', # noqa: E501
'org_title': 'orgTitle', # noqa: E501
'src': 'src', # noqa: E501
'query_type': 'queryType', # noqa: E501
'length_level': 'lengthLevel', # noqa: E501
'sort_field': 'sortField', # noqa: E501
'sort_order': 'sortOrder', # noqa: E501
'page_no': 'pageNo', # noqa: E501
'page_size': 'pageSize', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""GetTitleRequest - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
adgroup_id (int): [optional] # noqa: E501
query (str): [optional] # noqa: E501
limit (int): [optional] # noqa: E501
filters ([FilterCondition]): [optional] # noqa: E501
org_title (str): [optional] # noqa: E501
src (str): [optional] # noqa: E501
query_type (int): [optional] # noqa: E501
length_level (int): [optional] # noqa: E501
sort_field (str): [optional] # noqa: E501
sort_order (str): [optional] # noqa: E501
page_no (int): [optional] # noqa: E501
page_size (int): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""GetTitleRequest - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
adgroup_id (int): [optional] # noqa: E501
query (str): [optional] # noqa: E501
limit (int): [optional] # noqa: E501
filters ([FilterCondition]): [optional] # noqa: E501
org_title (str): [optional] # noqa: E501
src (str): [optional] # noqa: E501
query_type (int): [optional] # noqa: E501
length_level (int): [optional] # noqa: E501
sort_field (str): [optional] # noqa: E501
sort_order (str): [optional] # noqa: E501
page_no (int): [optional] # noqa: E501
page_size (int): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
|
PypiClean
|
/keras-maskrcnn-0.2.2.tar.gz/keras-maskrcnn-0.2.2/keras_maskrcnn/models/__init__.py
|
import keras_retinanet.models
class Backbone(keras_retinanet.models.Backbone):
""" This class stores additional information on backbones.
"""
def __init__(self, backbone_name):
super(Backbone, self).__init__(backbone_name)
# a dictionary mapping custom layer names to the correct classes
from ..layers.roi import RoiAlign
from ..layers.upsample import Upsample
from ..layers.misc import Shape, ConcatenateBoxes, Cast
from .. import losses
self.custom_objects.update({
'RoiAlign' : RoiAlign,
'Upsample' : Upsample,
'Shape' : Shape,
'ConcatenateBoxes' : ConcatenateBoxes,
'ConcatenateBoxesMasks' : ConcatenateBoxes, # legacy
'_mask_conditional' : losses.mask(),
'Cast' : Cast,
})
def maskrcnn(self, *args, **kwargs):
""" Returns a maskrcnn model using the correct backbone.
"""
raise NotImplementedError('maskrcnn method not implemented.')
def backbone(backbone_name):
""" Returns a backbone object for the given backbone_name.
"""
if 'resnet' in backbone_name:
from .resnet import ResNetBackbone as b
else:
raise NotImplementedError('Backbone class for \'{}\' not implemented.'.format(backbone_name))
return b(backbone_name)
def load_model(filepath, backbone_name='resnet50'):
""" Loads a retinanet model using the correct custom objects.
# Arguments
filepath: one of the following:
- string, path to the saved model, or
- h5py.File object from which to load the model
backbone_name: Backbone with which the model was trained.
# Returns
A keras.models.Model object.
# Raises
ImportError: if h5py is not available.
ValueError: In case of an invalid savefile.
"""
import keras.models
return keras.models.load_model(filepath, custom_objects=backbone(backbone_name).custom_objects)
|
PypiClean
|
/braincube_connector-2.5.2-py3-none-any.whl/braincube_connector/data/data.py
|
import json
from typing import Any, Dict, List
import pandas as pd
from braincube_connector import client, parameters, tools
from braincube_connector.data import conditions
DATA_PATH = "braindata/{mb_id}/LF"
DATACOL = "data"
def _expand_var_id(long_mb_id: str, var_id: int) -> str:
"""Extend a variable name to include its memory bases id.
Args:
long_mb_id: Memory bases bcId extended with the 'mb' keyword.
var_id: Variable bcId.
Returns:
An extended variable id 'long_mb_id/dvar_id'.
"""
return "{mb}/d{var}".format(mb=long_mb_id, var=var_id)
def _to_datetime(dates: List["str"]):
"""Convert DATE str to a datetime object.
Args:
dates: A braincube styled date string.
Returns:
A datetime object.
"""
dates = pd.to_datetime(dates, errors="coerce", format="%Y%m%d_%H%M%S").to_series()
return [pandas_timestamp_to_datetime(timestamp) for timestamp in dates]
def pandas_timestamp_to_datetime(timestamp):
"""Convert pandas timestamp to datetime, with NaT handling.
Args:
timestamp: Pandas timestamp to convert to a python datetime.
Returns:
A python datetime object.
"""
if pd.isnull(timestamp):
return None
return pd.Timestamp.to_pydatetime(timestamp)
def _extract_format_data(raw_dataset: Dict[str, Any]) -> Dict[int, Any]:
"""Extract the requested data from the json.
The function extracts the data keys and types and convert the columns
using the types.
Args:
raw_dataset: An unformated dataset received from braindata.
Returns:
A formated dictionary {column_key: formated column data}
"""
formatted_dataset = {}
for col in raw_dataset["datadefs"]:
col_id = int(col["id"].split("/d")[1])
if col["type"] == "DATETIME" and parameters.get_parameter("parse_date"):
formatted_dataset[col_id] = _to_datetime(col[DATACOL])
elif col["type"] == "NUMERIC":
try:
formatted_dataset[col_id] = list(map(int, col[DATACOL]))
except ValueError:
formatted_dataset[col_id] = list(map(float, col[DATACOL]))
else:
formatted_dataset[col_id] = col[DATACOL]
return formatted_dataset
def get_braindata_memory_base_info(
braincube_path: str, memory_base_bcid: str, braincube_name=""
) -> Dict[str, str]:
"""Get the memory base informations from the braindata.
Args:
braincube_path: Path of the memory base's parent braincube.
memory_base_bcid: memory base's bcid.
braincube_name: Name of the braincube to use to replace the `{braincube-name}` placeholder
Returns:
Json dictionary with the memory base informations.
"""
long_mb_id = "mb{bcid}".format(bcid=memory_base_bcid)
braindata_info_path = "braindata/{mb_id}/simple".format(mb_id=long_mb_id)
data_path = tools.join_path([braincube_path, braindata_info_path.format()])
return client.request_ws(data_path, braincube_name=braincube_name)
def collect_data(
variable_ids: List[int],
memory_base: "MemoryBase", # type: ignore # noqa
filters: List[Dict[str, Any]] = None,
) -> Dict[int, Any]:
"""Get data from the memory bases.
Args:
variable_ids: bcIds of variables for which the data are collected.
memory_base: A memory base on which to collect the data.
filters: List of filter to apply to the request.
Returns:
A dictionary of data list.
"""
long_mb_id = "mb{bcid}".format(bcid=memory_base.get_bcid())
long_variable_ids = [_expand_var_id(long_mb_id, vv) for vv in variable_ids]
data_path = tools.join_path(
[memory_base.get_braincube_path(), DATA_PATH.format(mb_id=long_mb_id)]
)
body_data = {
"order": memory_base.get_order_variable_long_id(),
"definitions": long_variable_ids,
"context": {"dataSource": long_mb_id},
}
filters = conditions.combine_filters(filters) # Merge filters in one filter
if len(filters) == 1:
body_data["context"]["filter"] = filters[0] # type: ignore
return _extract_format_data(
client.request_ws(
data_path,
body_data=json.dumps(body_data),
rtype="POST",
braincube_name=memory_base.get_braincube_name(),
)
)
|
PypiClean
|
/MiWork-2021.2.20.20.8.11-py3-none-any.whl/miwork/dt_enum.py
|
from __future__ import absolute_import, division, print_function, unicode_literals
from enum import Enum
class MessageType(Enum):
"""消息的类型
支持:文本、图片、富文本、分享群聊卡片、卡片消息
"""
text = 'text' # 文本
image = 'image' # 图片
post = 'post' # 富文本
share_chat = 'share_chat' # 分享群名片
card = 'interactive' # 卡片消息
forward = 'forward' # 转发消息
class UrgentType(Enum):
"""消息加急类型
支持:飞书内部、短信、电话
"""
app = 'app' # 飞书内部
sms = 'sms' # 短信
phone = 'phone' # 电话
class I18NType(Enum):
"""国际化消息的类型
支持:中文、英文、日文
"""
zh_cn = 'zh_cn'
ja_jp = 'ja_jp'
en_us = 'en_us'
class ImageColor(Enum):
"""卡片消息头部的颜色
"""
orange = 'orange'
red = 'red'
yellow = 'yellow'
gray = 'gray'
blue = 'blue'
green = 'green'
class MethodType(Enum):
"""卡片消息按钮的请求类型
"""
post = 'post' # 发送 post 请求
get = 'get' # 发送 get 请求
jump = 'jump' # 跳转到指定 url
class CalendarRole(Enum):
reader = 'reader' # 订阅者,可查看日程详情
free_busy_reader = 'free_busy_reader' # 游客,只能看到"忙碌/空闲"
class CalendarEventVisibility(Enum):
"""日历的日程的可见性
支持:仅向他人显示是否“忙碌”;公开,显示日程详情;仅自己可见
"""
default = 'default' # 默认,仅向他人显示是否“忙碌”
public = 'public' # 公开,显示日程详情
private = 'private' # 仅自己可见
class ApprovalUploadFileType(Enum):
image = 'image'
attachment = 'attachment'
class EventType(Enum):
"""事件类型
https://open.feishu.cn/document/uYjL24iN/uUTNz4SN1MjL1UzM
"""
url_verification = 'url_verification' # 这是一个验证请求
app_ticket = 'app_ticket' # 租户管理员开通 ISV 应用后,会定时发送 app_ticket 事件到监听地址
app_open = 'app_open' # 当企业管理员在管理员后台开通应用时推送事件
message = 'message' # 接收用户发送给应用的消息,包括与机器人直接对话或者在群聊中与机器人交流
user_add = 'user_add' # 通讯录变更
user_update = 'user_update'
user_leave = 'user_leave'
dept_add = 'dept_add'
dept_update = 'dept_update'
dept_delete = 'dept_delete'
contact_scope_change = 'contact_scope_change'
approval = 'approval' # 审批通过
leave_approval = 'leave_approval' # 请假审批
work_approval = 'work_approval' # 加班审批
shift_approval = 'shift_approval' # 换班审批
remedy_approval = 'remedy_approval' # 补卡审批
trip_approval = 'trip_approval' # 出差审批
remove_bot = 'remove_bot' # 移除机器人
add_bot = 'add_bot' # 添加机器人
p2p_chat_create = 'p2p_chat_create' # 用户第一次打开这个机器人的会话界面
add_user_to_chat = 'add_user_to_chat' # 用户进群
remove_user_from_chat = 'remove_user_from_chat' # 用户出群
revoke_add_user_from_chat = 'revoke_add_user_from_chat' # 撤销加人
unknown = 'unknown'
class ApprovalInstanceStatus(Enum):
pending = 'PENDING' # 待审核
approved = 'APPROVED' # 已通过
rejected = 'REJECTED' # 已拒绝
canceled = 'CANCELED' # 已取消
deleted = 'DELETED' # 已取消
class ApprovalTaskStatus(Enum):
pending = 'PENDING' # 审批中
approved = 'APPROVED' # 通过
rejected = 'REJECTED' # 拒绝
transfered = 'TRANSFERRED' # 已转交
canceled = 'DONE' # 完成
class ApprovalTaskTypeStatus(Enum):
or_sign = 'OR' # 或签,一名负责人通过即可通过审批节点
and_sign = 'AND' # 或签,需所有负责人通过才能通过审批节点
auto_pass = 'AUTO_PASS' # 自动通过
auto_reject = 'AUTO_REJECT' # 自动拒绝
sequential = 'SEQUENTIAL' # 按照顺序
class ApprovalTimelineType(Enum):
"""动态类型"""
start = 'START' # 审批开始
passed = 'PASS' # 通过
reject = 'REJECT' # 拒绝
auto_pass = 'AUTO_PASS' # 自动通过
auto_reject = 'AUTO_REJECT' # 自动拒绝
remove_repeat = 'REMOVE_REPEAT' # 去重
transfer = 'TRANSFER' # 转交
add_approver_before = 'ADD_APPROVER_BEFORE' # 前加签
add_approver = 'ADD_APPROVER' # 并加签
add_approver_after = 'ADD_APPROVER_AFTER' # 后加签
delete_approver = 'DELETE_APPROVER' # 减签
rollback_selected = 'ROLLBACK_SELECTED' # 指定回退
rollback = 'ROLLBACK' # 全部回退
cancel = 'CANCEL' # 撤回
delete = 'DELETE' # 删除
cc = 'CC' # 抄送
class PayPricePlanType(Enum):
"""价格方案类型
"""
trial = 'trial' # 试用
permanent = 'permanent' # 一次性付费
per_year = 'per_year' # 企业年付费
per_month = 'per_month' # 企业月付费
per_seat_per_year = 'per_seat_per_year' # 按人按年付费
per_seat_per_month = 'per_seat_per_month' # 按人按月付费
permanent_count = 'permanent_count' # 按次付费
class PayBuyType(Enum):
"""购买类型
"""
buy = 'buy' # 普通购买
# 升级购买:仅price_plan_type为per_year、per_month、per_seat_per_year、per_seat_per_month时可升级购买
upgrade = 'upgrade'
renew = 'renew' # 续费购买
class PayStatus(Enum):
"""订单当前状态
"""
normal = 'normal' # 正常
refund = 'refund' # 已退款
all = 'all' # 全部,查询的时候会用到
class MeetingReplyStatus(Enum):
"""回复状态,NOT_CHECK_IN 表示未签到,ENDED_BEFORE_DUE 表示提前结束
"""
not_check_in = 'NOT_CHECK_IN' # 未签到
ended_before_due = 'ENDED_BEFORE_DUE' # 提前结束
|
PypiClean
|
/MOM-Tapyr-1.6.2.tar.gz/MOM-Tapyr-1.6.2/_Attr/Date_Time.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from _MOM import MOM
from _TFL import TFL
from _TFL.pyk import pyk
from _MOM._Attr.Type import *
from _MOM._Attr.Structured import *
from _MOM._Attr import Attr
from _TFL.I18N import _, _T
from _TFL.Regexp import *
import datetime
import itertools
import logging
import time
class _A_DT_ (_A_Structured_) :
"""Common base class for date-valued and time-valued attributes of an object."""
needs_raw_value = False
range_inclusive_p = True
_tuple_off = 0
@property
def output_format (self) :
return self._output_format ()
# end def output_format
def as_code (self, value) :
return self.__super.as_code (self.as_string (value))
# end def as_code
@TFL.Meta.Class_and_Instance_Method
def as_string (soc, value) :
if value is not None :
return pyk.text_type (value.strftime (soc._output_format ()))
return ""
# end def as_string
@TFL.Meta.Class_and_Instance_Method
def value_range (soc, head, tail, obj) :
### `value_range` is inclusive
from _CAL._DTW_ import _DTW_
import _CAL.Date_Time
d = soc.value_range_delta (obj)
n = _DTW_.new_dtw (head)
t = _DTW_.new_dtw (tail)
if not soc.range_inclusive_p :
t -= d
while n <= t :
yield n._body
try :
n += d
except OverflowError :
break
# end def value_range
@TFL.Meta.Class_and_Instance_Method
def _from_string (soc, s, obj = None) :
s = s.strip ()
if s :
for f in soc.input_formats :
try :
result = time.strptime (s, f)
except ValueError :
pass
else :
break
else :
raise MOM.Error.Attribute_Syntax (obj, soc, s)
return soc.P_Type (* result [soc._tuple_off:soc._tuple_len])
# end def _from_string
@TFL.Meta.Class_and_Instance_Method
def _output_format (soc) :
return soc.input_formats [0]
# end def _output_format
# end class _A_DT_
class _A_Date_ (_A_DT_) :
"""Common base class for date-valued attributes of an object."""
not_in_future = False
not_in_past = False
class _Attributes (_A_DT_._Attributes) :
class day (A_Int) :
"""Day specified by date."""
kind = Attr.Query
query = Q.day
# end class day
class month (A_Int) :
"""Month specified by date."""
kind = Attr.Query
query = Q.month
# end class month
class year (A_Int) :
"""Year specified by date."""
kind = Attr.Query
query = Q.year
# end class year
# end class _Attributes
class _Doc_Map_ (_A_DT_._Doc_Map_) :
not_in_future = """
A true value of `not_in_future` means that the date/time value of
the attribute must not lie in the future at the moment it is set.
"""
not_in_past = """
A true value of `not_in_past` means that the date/time value of
the attribute must not lie in the past at the moment it is set.
"""
# end class _Doc_Map_
def _checkers (self, e_type, kind) :
for c in self.__super._checkers (e_type, kind) :
yield c
if self.not_in_future :
name = self.name
p_name = "%s__not_in_future" % name
check = MOM.Pred.Condition.__class__ \
( p_name, (MOM.Pred.Condition, )
, dict
( assertion = "%s <= now" % (name, )
, attributes = (name, )
, bindings = dict
( now = "Q.%s.NOW" % (self.Q_Name, )
)
, kind = MOM.Pred.Object
, name = p_name
, __doc__ = _ ("Value must not be in the future")
)
)
yield check
if self.not_in_past :
name = self.name
p_name = "%s__not_in_past" % name
check = MOM.Pred.Condition.__class__ \
( p_name, (MOM.Pred.Condition, )
, dict
( assertion = "%s >= now" % (name, )
, attributes = (name, )
, bindings = dict
( now = "Q.%s.NOW" % (self.Q_Name, )
)
, guard = "not playback_p"
, guard_attr = ("playback_p", )
, kind = MOM.Pred.Object_Init
, name = p_name
, __doc__ =
_ ("Value must be in the future, not the past")
)
)
yield check
# end def _checkers
# end class _A_Date_
class _A_Time_ (_A_DT_) :
"""Common base class for time-valued attributes of an object."""
example = "06:42"
completer = MOM.Attr.Completer_Spec (2)
typ = _ ("Time")
P_Type = datetime.time
Q_Ckd_Type = MOM.Attr.Querier.Time
Q_Name = "TIME"
ui_length = 8
_midnight_pat = Regexp (r"^24(:00){0,2}$")
_tuple_len = 6
_tuple_off = 3
class _Attributes (_A_DT_._Attributes) :
class hour (A_Int) :
"""Hour specified by time."""
kind = Attr.Query
query = Q.hour
# end class hour
class minute (A_Int) :
"""Minute specified by time."""
kind = Attr.Query
query = Q.minute
# end class minute
class second (A_Int) :
"""Second specified by time."""
kind = Attr.Query
query = Q.second
# end class second
# end class _Attributes
class Pickler (TFL.Meta.Object) :
Type = datetime.time
@classmethod
def as_cargo (cls, attr_kind, attr_type, value) :
return value
# end def as_cargo
@classmethod
def from_cargo (cls, scope, attr_kind, attr_type, cargo) :
if cargo is not None :
if isinstance (cargo, datetime.datetime) :
cargo = cargo.time ()
return cargo
# end def from_cargo
# end class Pickler
def as_rest_cargo_ckd (self, obj, * args, ** kw) :
value = self.kind.get_value (obj)
if value is not None :
return pyk.text_type (value.strftime ("%H:%M:%S"))
# end def as_rest_cargo_ckd
@TFL.Meta.Class_and_Instance_Method
def as_string (soc, value) :
if value is not None and value == soc.P_Type.max :
result = "24:00"
else :
### when called for the class, `soc.__super` doesn't
### work while `super (_A_Time_, soc)` does
result = super (_A_Time_, soc).as_string (value)
if result.endswith (":00") :
result = result [:-3]
return result
# end def as_string
@TFL.Meta.Class_and_Instance_Method
def cooked (soc, value) :
if isinstance (value, datetime.datetime) :
value = value.time ()
elif isinstance (value, pyk.string_types) :
try :
value = soc._from_string (value)
except ValueError :
raise TypeError (_T ("Time expected, got %r") % (value, ))
elif not isinstance (value, datetime.time) :
raise TypeError (_T ("Time expected, got %r") % (value, ))
return value
# end def cooked
@classmethod
def now (cls) :
return datetime.datetime.now ().time ()
# end def now
@TFL.Meta.Class_and_Instance_Method
def value_range_delta (soc, obj) :
from _CAL.Delta import Time_Delta
return Time_Delta (1)
# end def value_range_delta
@TFL.Meta.Class_and_Instance_Method
def _from_string (soc, value, obj = None) :
try :
return super (_A_Time_, soc)._from_string (value, obj)
except Exception :
if soc._midnight_pat.match (value) :
return soc.P_Type.max
raise
# end def _from_string
# end class _A_Time_
class A_Date (_A_Date_) :
"""Date value."""
example = "2010-10-10"
completer = MOM.Attr.Completer_Spec (4)
typ = _ ("Date")
P_Type = datetime.date
Q_Ckd_Type = MOM.Attr.Querier.Date
Q_Name = "DATE"
syntax = _ ("yyyy-mm-dd")
ui_length = 12
input_formats = \
( "%Y-%m-%d", "%Y/%m/%d", "%Y%m%d", "%Y %m %d"
, "%d/%m/%Y", "%d.%m.%Y", "%d-%m-%Y"
)
_tuple_len = 3
class _Doc_Map_ (_A_Date_._Doc_Map_) :
input_formats = """
The possible strftime-formats used to convert raw values to cooked
values.
"""
# end class _Doc_Map_
def as_rest_cargo_ckd (self, obj, * args, ** kw) :
value = self.kind.get_value (obj)
if value is not None :
return pyk.text_type (value.strftime ("%Y-%m-%d"))
# end def as_rest_cargo_ckd
@TFL.Meta.Class_and_Instance_Method
def cooked (soc, value) :
if isinstance (value, datetime.datetime) :
value = value.date ()
elif isinstance (value, pyk.string_types) :
try :
value = soc._from_string (value)
except ValueError :
msg = "Date expected, got %r" % (value, )
raise MOM.Error.Attribute_Syntax (None, soc, value, msg)
elif not isinstance (value, datetime.date) :
raise TypeError ("Date expected, got %r" % (value, ))
return value
# end def cooked
@classmethod
def now (cls) :
return datetime.datetime.now ().date ()
# end def now
@TFL.Meta.Class_and_Instance_Method
def value_range_delta (self, obj) :
from _CAL.Delta import Date_Delta
return Date_Delta (1)
# end def value_range_delta
# end class A_Date
class A_Date_List (_A_Typed_List_) :
"""List of dates."""
typ = _ ("Date_List")
C_Type = A_Date
# end class A_Date_List
class A_Time (_A_Time_) :
"""Time value."""
syntax = _ ("hh:mm:ss, the seconds `ss` are optional")
input_formats = ("%H:%M:%S", "%H:%M")
# end class A_Time
class A_Time_X (_A_Time_) :
"""Time value."""
syntax = _ \
("hh:mm:ss, the minutes `mm` and seconds `ss` are optional")
input_formats = ("%H:%M:%S", "%H:%M", "%M")
# end class A_Time_X
class A_Time_List (_A_Typed_List_) :
"""List of times."""
typ = _ ("Time_List")
C_range_sep = Regexp (r"(?: ?(?:-|–|\.\.) ?)")
C_Type = A_Time
# end class A_Time_List
class A_Date_Time (_A_Date_) :
"""Date-time value."""
example = "2010-10-10 06:42"
typ = _ ("Date-Time")
P_Type = datetime.datetime
Q_Name = "DATE_TIME"
syntax = _ ("yyyy-mm-dd hh:mm:ss, the time `hh:mm:ss` is optional")
ui_length = 22
rfc3339_format = "%Y-%m-%dT%H:%M:%S"
input_formats = tuple \
( itertools.chain
( * ( (f + " %H:%M:%S", f + " %H:%M", f)
for f in A_Date.input_formats
)
)
) + (rfc3339_format, )
utcoffset_fmt = "%+03d:%02d"
utcoffset_pat = Regexp (r" *[-+](?P<oh>\d{2}):(?P<om>\d{2}) *$")
_tuple_len = 6
### plain old inheritance doesn't work here because
### _M_Structured_ doesn't support that
_Attributes = _A_Date_._Attributes.__class__ \
( "_Attributes"
, (_A_DT_._Attributes, )
, dict (_A_Date_._Attributes.__dict__, ** _A_Time_._Attributes.__dict__)
)
def as_rest_cargo_ckd (self, obj, * args, ** kw) :
### formatted according to ISO 8601, RFC 3339
value = self.kind.get_value (obj)
if value is not None :
offset = TFL.user_config.time_zone.utcoffset (value)
v = value + offset
oh, os = divmod (offset.total_seconds (), 3600)
om = os // 60
fmt = self.rfc3339_format + (self.utcoffset_fmt % (oh, om))
return v.strftime (fmt)
# end def as_rest_cargo_ckd
@TFL.Meta.Class_and_Instance_Method
def as_string (soc, value) :
if value is not None :
### In Python 3.5, `bool (t)` is never False -> compare to `t.min`
v = value + TFL.user_config.time_zone.utcoffset (value)
t = v.time ()
fmt = A_Date.input_formats [0] if t == t.min \
else soc._output_format ()
result = v.strftime (fmt)
if result.endswith (":00") :
result = result [:-3]
return result
return ""
# end def as_string
@TFL.Meta.Class_and_Instance_Method
def cooked (soc, value) :
if not isinstance (value, datetime.datetime) :
if isinstance (value, datetime.date) :
value = datetime.datetime (value.year, value.month, value.day)
elif isinstance (value, pyk.string_types) :
try :
value = soc._from_string (value)
except ValueError :
raise TypeError \
(_T ("Date/time expected, got %r") % (value, ))
else :
raise TypeError (_T ("Date/time expected, got %r") % (value, ))
return value
# end def cooked
@classmethod
def now (cls) :
return datetime.datetime.utcnow ()
# end def now
@TFL.Meta.Class_and_Instance_Method
def value_range_delta (self, obj) :
from _CAL.Delta import Date_Time_Delta
return Date_Time_Delta (1)
# end def value_range_delta
@TFL.Meta.Class_and_Instance_Method
def _from_string (soc, s, obj = None) :
utcoffset = None
utcoffset_pat = soc.utcoffset_pat
if utcoffset_pat.search (s) :
oh = int (utcoffset_pat.oh)
om = int (utcoffset_pat.om)
s = s [: utcoffset_pat.start ()]
utcoffset = datetime.timedelta (0, (oh * 60 + om) * 60)
result = super (A_Date_Time, soc)._from_string (s, obj)
if utcoffset is None :
utcoffset = TFL.user_config.time_zone.utcoffset (result)
result -= utcoffset
return result
# end def _from_string
# end class A_Date_Time
class A_Date_Time_List (_A_Typed_List_) :
"""List of date/time elements."""
typ = _ ("Date_Time_List")
C_Type = A_Date_Time
# end class A_Date_Time_List
__sphinx__members = attr_types_of_module ()
__all__ = __sphinx__members
if __name__ != "__main__" :
MOM.Attr._Export (* __all__)
### __END__ MOM.Attr.Date_Time
|
PypiClean
|
/pulumi_google_native-0.31.2a1689827148.tar.gz/pulumi_google_native-0.31.2a1689827148/pulumi_google_native/compute/alpha/interconnect.py
|
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['InterconnectArgs', 'Interconnect']
@pulumi.input_type
class InterconnectArgs:
def __init__(__self__, *,
admin_enabled: Optional[pulumi.Input[bool]] = None,
customer_name: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
interconnect_type: Optional[pulumi.Input['InterconnectInterconnectType']] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
link_type: Optional[pulumi.Input['InterconnectLinkType']] = None,
location: Optional[pulumi.Input[str]] = None,
macsec: Optional[pulumi.Input['InterconnectMacsecArgs']] = None,
macsec_enabled: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
noc_contact_email: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
remote_location: Optional[pulumi.Input[str]] = None,
request_id: Optional[pulumi.Input[str]] = None,
requested_features: Optional[pulumi.Input[Sequence[pulumi.Input['InterconnectRequestedFeaturesItem']]]] = None,
requested_link_count: Optional[pulumi.Input[int]] = None):
"""
The set of arguments for constructing a Interconnect resource.
:param pulumi.Input[bool] admin_enabled: Administrative status of the interconnect. When this is set to true, the Interconnect is functional and can carry traffic. When set to false, no packets can be carried over the interconnect and no BGP routes are exchanged over it. By default, the status is set to true.
:param pulumi.Input[str] customer_name: Customer name, to put in the Letter of Authorization as the party authorized to request a crossconnect.
:param pulumi.Input[str] description: An optional description of this resource. Provide this property when you create the resource.
:param pulumi.Input['InterconnectInterconnectType'] interconnect_type: Type of interconnect, which can take one of the following values: - PARTNER: A partner-managed interconnection shared between customers though a partner. - DEDICATED: A dedicated physical interconnection with the customer. Note that a value IT_PRIVATE has been deprecated in favor of DEDICATED.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Labels for this resource. These can only be added or modified by the setLabels method. Each label key/value pair must comply with RFC1035. Label values may be empty.
:param pulumi.Input['InterconnectLinkType'] link_type: Type of link requested, which can take one of the following values: - LINK_TYPE_ETHERNET_10G_LR: A 10G Ethernet with LR optics - LINK_TYPE_ETHERNET_100G_LR: A 100G Ethernet with LR optics. Note that this field indicates the speed of each of the links in the bundle, not the speed of the entire bundle.
:param pulumi.Input[str] location: URL of the InterconnectLocation object that represents where this connection is to be provisioned.
:param pulumi.Input['InterconnectMacsecArgs'] macsec: Configuration to enable Media Access Control security (MACsec) on the Interconnect between Google and your on-premises router.
:param pulumi.Input[bool] macsec_enabled: Enable or disable MACsec on this Interconnect. MACsec enablement will fail if the macsec object is not specified.
:param pulumi.Input[str] name: Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.
:param pulumi.Input[str] noc_contact_email: Email address to contact the customer NOC for operations and maintenance notifications regarding this Interconnect. If specified, this will be used for notifications in addition to all other forms described, such as Cloud Monitoring logs alerting and Cloud Notifications. This field is required for users who sign up for Cloud Interconnect using workforce identity federation.
:param pulumi.Input[str] remote_location: Indicates that this is a Cross-Cloud Interconnect. This field specifies the location outside of Google's network that the interconnect is connected to.
:param pulumi.Input[str] request_id: An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).
:param pulumi.Input[Sequence[pulumi.Input['InterconnectRequestedFeaturesItem']]] requested_features: Optional. List of features requested for this interconnect, which can take one of the following values: - MACSEC If specified then the interconnect will be created on MACsec capable hardware ports. If not specified, the default value is false, which will allocate non-MACsec capable ports first if available. This parameter can only be provided during interconnect INSERT and cannot be changed using interconnect PATCH. Please review Interconnect Pricing for implications on enabling this flag.
:param pulumi.Input[int] requested_link_count: Target number of physical links in the link bundle, as requested by the customer.
"""
if admin_enabled is not None:
pulumi.set(__self__, "admin_enabled", admin_enabled)
if customer_name is not None:
pulumi.set(__self__, "customer_name", customer_name)
if description is not None:
pulumi.set(__self__, "description", description)
if interconnect_type is not None:
pulumi.set(__self__, "interconnect_type", interconnect_type)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if link_type is not None:
pulumi.set(__self__, "link_type", link_type)
if location is not None:
pulumi.set(__self__, "location", location)
if macsec is not None:
pulumi.set(__self__, "macsec", macsec)
if macsec_enabled is not None:
pulumi.set(__self__, "macsec_enabled", macsec_enabled)
if name is not None:
pulumi.set(__self__, "name", name)
if noc_contact_email is not None:
pulumi.set(__self__, "noc_contact_email", noc_contact_email)
if project is not None:
pulumi.set(__self__, "project", project)
if remote_location is not None:
pulumi.set(__self__, "remote_location", remote_location)
if request_id is not None:
pulumi.set(__self__, "request_id", request_id)
if requested_features is not None:
pulumi.set(__self__, "requested_features", requested_features)
if requested_link_count is not None:
pulumi.set(__self__, "requested_link_count", requested_link_count)
@property
@pulumi.getter(name="adminEnabled")
def admin_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Administrative status of the interconnect. When this is set to true, the Interconnect is functional and can carry traffic. When set to false, no packets can be carried over the interconnect and no BGP routes are exchanged over it. By default, the status is set to true.
"""
return pulumi.get(self, "admin_enabled")
@admin_enabled.setter
def admin_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "admin_enabled", value)
@property
@pulumi.getter(name="customerName")
def customer_name(self) -> Optional[pulumi.Input[str]]:
"""
Customer name, to put in the Letter of Authorization as the party authorized to request a crossconnect.
"""
return pulumi.get(self, "customer_name")
@customer_name.setter
def customer_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "customer_name", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
An optional description of this resource. Provide this property when you create the resource.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="interconnectType")
def interconnect_type(self) -> Optional[pulumi.Input['InterconnectInterconnectType']]:
"""
Type of interconnect, which can take one of the following values: - PARTNER: A partner-managed interconnection shared between customers though a partner. - DEDICATED: A dedicated physical interconnection with the customer. Note that a value IT_PRIVATE has been deprecated in favor of DEDICATED.
"""
return pulumi.get(self, "interconnect_type")
@interconnect_type.setter
def interconnect_type(self, value: Optional[pulumi.Input['InterconnectInterconnectType']]):
pulumi.set(self, "interconnect_type", value)
@property
@pulumi.getter
def labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Labels for this resource. These can only be added or modified by the setLabels method. Each label key/value pair must comply with RFC1035. Label values may be empty.
"""
return pulumi.get(self, "labels")
@labels.setter
def labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "labels", value)
@property
@pulumi.getter(name="linkType")
def link_type(self) -> Optional[pulumi.Input['InterconnectLinkType']]:
"""
Type of link requested, which can take one of the following values: - LINK_TYPE_ETHERNET_10G_LR: A 10G Ethernet with LR optics - LINK_TYPE_ETHERNET_100G_LR: A 100G Ethernet with LR optics. Note that this field indicates the speed of each of the links in the bundle, not the speed of the entire bundle.
"""
return pulumi.get(self, "link_type")
@link_type.setter
def link_type(self, value: Optional[pulumi.Input['InterconnectLinkType']]):
pulumi.set(self, "link_type", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
URL of the InterconnectLocation object that represents where this connection is to be provisioned.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def macsec(self) -> Optional[pulumi.Input['InterconnectMacsecArgs']]:
"""
Configuration to enable Media Access Control security (MACsec) on the Interconnect between Google and your on-premises router.
"""
return pulumi.get(self, "macsec")
@macsec.setter
def macsec(self, value: Optional[pulumi.Input['InterconnectMacsecArgs']]):
pulumi.set(self, "macsec", value)
@property
@pulumi.getter(name="macsecEnabled")
def macsec_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Enable or disable MACsec on this Interconnect. MACsec enablement will fail if the macsec object is not specified.
"""
return pulumi.get(self, "macsec_enabled")
@macsec_enabled.setter
def macsec_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "macsec_enabled", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="nocContactEmail")
def noc_contact_email(self) -> Optional[pulumi.Input[str]]:
"""
Email address to contact the customer NOC for operations and maintenance notifications regarding this Interconnect. If specified, this will be used for notifications in addition to all other forms described, such as Cloud Monitoring logs alerting and Cloud Notifications. This field is required for users who sign up for Cloud Interconnect using workforce identity federation.
"""
return pulumi.get(self, "noc_contact_email")
@noc_contact_email.setter
def noc_contact_email(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "noc_contact_email", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter(name="remoteLocation")
def remote_location(self) -> Optional[pulumi.Input[str]]:
"""
Indicates that this is a Cross-Cloud Interconnect. This field specifies the location outside of Google's network that the interconnect is connected to.
"""
return pulumi.get(self, "remote_location")
@remote_location.setter
def remote_location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "remote_location", value)
@property
@pulumi.getter(name="requestId")
def request_id(self) -> Optional[pulumi.Input[str]]:
"""
An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).
"""
return pulumi.get(self, "request_id")
@request_id.setter
def request_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "request_id", value)
@property
@pulumi.getter(name="requestedFeatures")
def requested_features(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['InterconnectRequestedFeaturesItem']]]]:
"""
Optional. List of features requested for this interconnect, which can take one of the following values: - MACSEC If specified then the interconnect will be created on MACsec capable hardware ports. If not specified, the default value is false, which will allocate non-MACsec capable ports first if available. This parameter can only be provided during interconnect INSERT and cannot be changed using interconnect PATCH. Please review Interconnect Pricing for implications on enabling this flag.
"""
return pulumi.get(self, "requested_features")
@requested_features.setter
def requested_features(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['InterconnectRequestedFeaturesItem']]]]):
pulumi.set(self, "requested_features", value)
@property
@pulumi.getter(name="requestedLinkCount")
def requested_link_count(self) -> Optional[pulumi.Input[int]]:
"""
Target number of physical links in the link bundle, as requested by the customer.
"""
return pulumi.get(self, "requested_link_count")
@requested_link_count.setter
def requested_link_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "requested_link_count", value)
class Interconnect(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
admin_enabled: Optional[pulumi.Input[bool]] = None,
customer_name: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
interconnect_type: Optional[pulumi.Input['InterconnectInterconnectType']] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
link_type: Optional[pulumi.Input['InterconnectLinkType']] = None,
location: Optional[pulumi.Input[str]] = None,
macsec: Optional[pulumi.Input[pulumi.InputType['InterconnectMacsecArgs']]] = None,
macsec_enabled: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
noc_contact_email: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
remote_location: Optional[pulumi.Input[str]] = None,
request_id: Optional[pulumi.Input[str]] = None,
requested_features: Optional[pulumi.Input[Sequence[pulumi.Input['InterconnectRequestedFeaturesItem']]]] = None,
requested_link_count: Optional[pulumi.Input[int]] = None,
__props__=None):
"""
Creates an Interconnect in the specified project using the data included in the request.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] admin_enabled: Administrative status of the interconnect. When this is set to true, the Interconnect is functional and can carry traffic. When set to false, no packets can be carried over the interconnect and no BGP routes are exchanged over it. By default, the status is set to true.
:param pulumi.Input[str] customer_name: Customer name, to put in the Letter of Authorization as the party authorized to request a crossconnect.
:param pulumi.Input[str] description: An optional description of this resource. Provide this property when you create the resource.
:param pulumi.Input['InterconnectInterconnectType'] interconnect_type: Type of interconnect, which can take one of the following values: - PARTNER: A partner-managed interconnection shared between customers though a partner. - DEDICATED: A dedicated physical interconnection with the customer. Note that a value IT_PRIVATE has been deprecated in favor of DEDICATED.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Labels for this resource. These can only be added or modified by the setLabels method. Each label key/value pair must comply with RFC1035. Label values may be empty.
:param pulumi.Input['InterconnectLinkType'] link_type: Type of link requested, which can take one of the following values: - LINK_TYPE_ETHERNET_10G_LR: A 10G Ethernet with LR optics - LINK_TYPE_ETHERNET_100G_LR: A 100G Ethernet with LR optics. Note that this field indicates the speed of each of the links in the bundle, not the speed of the entire bundle.
:param pulumi.Input[str] location: URL of the InterconnectLocation object that represents where this connection is to be provisioned.
:param pulumi.Input[pulumi.InputType['InterconnectMacsecArgs']] macsec: Configuration to enable Media Access Control security (MACsec) on the Interconnect between Google and your on-premises router.
:param pulumi.Input[bool] macsec_enabled: Enable or disable MACsec on this Interconnect. MACsec enablement will fail if the macsec object is not specified.
:param pulumi.Input[str] name: Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.
:param pulumi.Input[str] noc_contact_email: Email address to contact the customer NOC for operations and maintenance notifications regarding this Interconnect. If specified, this will be used for notifications in addition to all other forms described, such as Cloud Monitoring logs alerting and Cloud Notifications. This field is required for users who sign up for Cloud Interconnect using workforce identity federation.
:param pulumi.Input[str] remote_location: Indicates that this is a Cross-Cloud Interconnect. This field specifies the location outside of Google's network that the interconnect is connected to.
:param pulumi.Input[str] request_id: An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).
:param pulumi.Input[Sequence[pulumi.Input['InterconnectRequestedFeaturesItem']]] requested_features: Optional. List of features requested for this interconnect, which can take one of the following values: - MACSEC If specified then the interconnect will be created on MACsec capable hardware ports. If not specified, the default value is false, which will allocate non-MACsec capable ports first if available. This parameter can only be provided during interconnect INSERT and cannot be changed using interconnect PATCH. Please review Interconnect Pricing for implications on enabling this flag.
:param pulumi.Input[int] requested_link_count: Target number of physical links in the link bundle, as requested by the customer.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[InterconnectArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Creates an Interconnect in the specified project using the data included in the request.
:param str resource_name: The name of the resource.
:param InterconnectArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(InterconnectArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
admin_enabled: Optional[pulumi.Input[bool]] = None,
customer_name: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
interconnect_type: Optional[pulumi.Input['InterconnectInterconnectType']] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
link_type: Optional[pulumi.Input['InterconnectLinkType']] = None,
location: Optional[pulumi.Input[str]] = None,
macsec: Optional[pulumi.Input[pulumi.InputType['InterconnectMacsecArgs']]] = None,
macsec_enabled: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
noc_contact_email: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
remote_location: Optional[pulumi.Input[str]] = None,
request_id: Optional[pulumi.Input[str]] = None,
requested_features: Optional[pulumi.Input[Sequence[pulumi.Input['InterconnectRequestedFeaturesItem']]]] = None,
requested_link_count: Optional[pulumi.Input[int]] = None,
__props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = InterconnectArgs.__new__(InterconnectArgs)
__props__.__dict__["admin_enabled"] = admin_enabled
__props__.__dict__["customer_name"] = customer_name
__props__.__dict__["description"] = description
__props__.__dict__["interconnect_type"] = interconnect_type
__props__.__dict__["labels"] = labels
__props__.__dict__["link_type"] = link_type
__props__.__dict__["location"] = location
__props__.__dict__["macsec"] = macsec
__props__.__dict__["macsec_enabled"] = macsec_enabled
__props__.__dict__["name"] = name
__props__.__dict__["noc_contact_email"] = noc_contact_email
__props__.__dict__["project"] = project
__props__.__dict__["remote_location"] = remote_location
__props__.__dict__["request_id"] = request_id
__props__.__dict__["requested_features"] = requested_features
__props__.__dict__["requested_link_count"] = requested_link_count
__props__.__dict__["available_features"] = None
__props__.__dict__["circuit_infos"] = None
__props__.__dict__["creation_timestamp"] = None
__props__.__dict__["expected_outages"] = None
__props__.__dict__["google_ip_address"] = None
__props__.__dict__["google_reference_id"] = None
__props__.__dict__["interconnect_attachments"] = None
__props__.__dict__["kind"] = None
__props__.__dict__["label_fingerprint"] = None
__props__.__dict__["operational_status"] = None
__props__.__dict__["peer_ip_address"] = None
__props__.__dict__["provisioned_link_count"] = None
__props__.__dict__["satisfies_pzs"] = None
__props__.__dict__["self_link"] = None
__props__.__dict__["self_link_with_id"] = None
__props__.__dict__["state"] = None
replace_on_changes = pulumi.ResourceOptions(replace_on_changes=["project"])
opts = pulumi.ResourceOptions.merge(opts, replace_on_changes)
super(Interconnect, __self__).__init__(
'google-native:compute/alpha:Interconnect',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Interconnect':
"""
Get an existing Interconnect resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = InterconnectArgs.__new__(InterconnectArgs)
__props__.__dict__["admin_enabled"] = None
__props__.__dict__["available_features"] = None
__props__.__dict__["circuit_infos"] = None
__props__.__dict__["creation_timestamp"] = None
__props__.__dict__["customer_name"] = None
__props__.__dict__["description"] = None
__props__.__dict__["expected_outages"] = None
__props__.__dict__["google_ip_address"] = None
__props__.__dict__["google_reference_id"] = None
__props__.__dict__["interconnect_attachments"] = None
__props__.__dict__["interconnect_type"] = None
__props__.__dict__["kind"] = None
__props__.__dict__["label_fingerprint"] = None
__props__.__dict__["labels"] = None
__props__.__dict__["link_type"] = None
__props__.__dict__["location"] = None
__props__.__dict__["macsec"] = None
__props__.__dict__["macsec_enabled"] = None
__props__.__dict__["name"] = None
__props__.__dict__["noc_contact_email"] = None
__props__.__dict__["operational_status"] = None
__props__.__dict__["peer_ip_address"] = None
__props__.__dict__["project"] = None
__props__.__dict__["provisioned_link_count"] = None
__props__.__dict__["remote_location"] = None
__props__.__dict__["request_id"] = None
__props__.__dict__["requested_features"] = None
__props__.__dict__["requested_link_count"] = None
__props__.__dict__["satisfies_pzs"] = None
__props__.__dict__["self_link"] = None
__props__.__dict__["self_link_with_id"] = None
__props__.__dict__["state"] = None
return Interconnect(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="adminEnabled")
def admin_enabled(self) -> pulumi.Output[bool]:
"""
Administrative status of the interconnect. When this is set to true, the Interconnect is functional and can carry traffic. When set to false, no packets can be carried over the interconnect and no BGP routes are exchanged over it. By default, the status is set to true.
"""
return pulumi.get(self, "admin_enabled")
@property
@pulumi.getter(name="availableFeatures")
def available_features(self) -> pulumi.Output[Sequence[str]]:
"""
[Output only] List of features available for this interconnect, which can take one of the following values: - MACSEC If present then the interconnect was created on MACsec capable hardware ports. If not present then the interconnect is provisioned on non-MACsec capable ports and MACsec enablement will fail.
"""
return pulumi.get(self, "available_features")
@property
@pulumi.getter(name="circuitInfos")
def circuit_infos(self) -> pulumi.Output[Sequence['outputs.InterconnectCircuitInfoResponse']]:
"""
A list of CircuitInfo objects, that describe the individual circuits in this LAG.
"""
return pulumi.get(self, "circuit_infos")
@property
@pulumi.getter(name="creationTimestamp")
def creation_timestamp(self) -> pulumi.Output[str]:
"""
Creation timestamp in RFC3339 text format.
"""
return pulumi.get(self, "creation_timestamp")
@property
@pulumi.getter(name="customerName")
def customer_name(self) -> pulumi.Output[str]:
"""
Customer name, to put in the Letter of Authorization as the party authorized to request a crossconnect.
"""
return pulumi.get(self, "customer_name")
@property
@pulumi.getter
def description(self) -> pulumi.Output[str]:
"""
An optional description of this resource. Provide this property when you create the resource.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="expectedOutages")
def expected_outages(self) -> pulumi.Output[Sequence['outputs.InterconnectOutageNotificationResponse']]:
"""
A list of outages expected for this Interconnect.
"""
return pulumi.get(self, "expected_outages")
@property
@pulumi.getter(name="googleIpAddress")
def google_ip_address(self) -> pulumi.Output[str]:
"""
IP address configured on the Google side of the Interconnect link. This can be used only for ping tests.
"""
return pulumi.get(self, "google_ip_address")
@property
@pulumi.getter(name="googleReferenceId")
def google_reference_id(self) -> pulumi.Output[str]:
"""
Google reference ID to be used when raising support tickets with Google or otherwise to debug backend connectivity issues.
"""
return pulumi.get(self, "google_reference_id")
@property
@pulumi.getter(name="interconnectAttachments")
def interconnect_attachments(self) -> pulumi.Output[Sequence[str]]:
"""
A list of the URLs of all InterconnectAttachments configured to use this Interconnect.
"""
return pulumi.get(self, "interconnect_attachments")
@property
@pulumi.getter(name="interconnectType")
def interconnect_type(self) -> pulumi.Output[str]:
"""
Type of interconnect, which can take one of the following values: - PARTNER: A partner-managed interconnection shared between customers though a partner. - DEDICATED: A dedicated physical interconnection with the customer. Note that a value IT_PRIVATE has been deprecated in favor of DEDICATED.
"""
return pulumi.get(self, "interconnect_type")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[str]:
"""
Type of the resource. Always compute#interconnect for interconnects.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter(name="labelFingerprint")
def label_fingerprint(self) -> pulumi.Output[str]:
"""
A fingerprint for the labels being applied to this Interconnect, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve an Interconnect.
"""
return pulumi.get(self, "label_fingerprint")
@property
@pulumi.getter
def labels(self) -> pulumi.Output[Mapping[str, str]]:
"""
Labels for this resource. These can only be added or modified by the setLabels method. Each label key/value pair must comply with RFC1035. Label values may be empty.
"""
return pulumi.get(self, "labels")
@property
@pulumi.getter(name="linkType")
def link_type(self) -> pulumi.Output[str]:
"""
Type of link requested, which can take one of the following values: - LINK_TYPE_ETHERNET_10G_LR: A 10G Ethernet with LR optics - LINK_TYPE_ETHERNET_100G_LR: A 100G Ethernet with LR optics. Note that this field indicates the speed of each of the links in the bundle, not the speed of the entire bundle.
"""
return pulumi.get(self, "link_type")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
URL of the InterconnectLocation object that represents where this connection is to be provisioned.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def macsec(self) -> pulumi.Output['outputs.InterconnectMacsecResponse']:
"""
Configuration to enable Media Access Control security (MACsec) on the Interconnect between Google and your on-premises router.
"""
return pulumi.get(self, "macsec")
@property
@pulumi.getter(name="macsecEnabled")
def macsec_enabled(self) -> pulumi.Output[bool]:
"""
Enable or disable MACsec on this Interconnect. MACsec enablement will fail if the macsec object is not specified.
"""
return pulumi.get(self, "macsec_enabled")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="nocContactEmail")
def noc_contact_email(self) -> pulumi.Output[str]:
"""
Email address to contact the customer NOC for operations and maintenance notifications regarding this Interconnect. If specified, this will be used for notifications in addition to all other forms described, such as Cloud Monitoring logs alerting and Cloud Notifications. This field is required for users who sign up for Cloud Interconnect using workforce identity federation.
"""
return pulumi.get(self, "noc_contact_email")
@property
@pulumi.getter(name="operationalStatus")
def operational_status(self) -> pulumi.Output[str]:
"""
The current status of this Interconnect's functionality, which can take one of the following values: - OS_ACTIVE: A valid Interconnect, which is turned up and is ready to use. Attachments may be provisioned on this Interconnect. - OS_UNPROVISIONED: An Interconnect that has not completed turnup. No attachments may be provisioned on this Interconnect. - OS_UNDER_MAINTENANCE: An Interconnect that is undergoing internal maintenance. No attachments may be provisioned or updated on this Interconnect.
"""
return pulumi.get(self, "operational_status")
@property
@pulumi.getter(name="peerIpAddress")
def peer_ip_address(self) -> pulumi.Output[str]:
"""
IP address configured on the customer side of the Interconnect link. The customer should configure this IP address during turnup when prompted by Google NOC. This can be used only for ping tests.
"""
return pulumi.get(self, "peer_ip_address")
@property
@pulumi.getter
def project(self) -> pulumi.Output[str]:
return pulumi.get(self, "project")
@property
@pulumi.getter(name="provisionedLinkCount")
def provisioned_link_count(self) -> pulumi.Output[int]:
"""
Number of links actually provisioned in this interconnect.
"""
return pulumi.get(self, "provisioned_link_count")
@property
@pulumi.getter(name="remoteLocation")
def remote_location(self) -> pulumi.Output[str]:
"""
Indicates that this is a Cross-Cloud Interconnect. This field specifies the location outside of Google's network that the interconnect is connected to.
"""
return pulumi.get(self, "remote_location")
@property
@pulumi.getter(name="requestId")
def request_id(self) -> pulumi.Output[Optional[str]]:
"""
An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).
"""
return pulumi.get(self, "request_id")
@property
@pulumi.getter(name="requestedFeatures")
def requested_features(self) -> pulumi.Output[Sequence[str]]:
"""
Optional. List of features requested for this interconnect, which can take one of the following values: - MACSEC If specified then the interconnect will be created on MACsec capable hardware ports. If not specified, the default value is false, which will allocate non-MACsec capable ports first if available. This parameter can only be provided during interconnect INSERT and cannot be changed using interconnect PATCH. Please review Interconnect Pricing for implications on enabling this flag.
"""
return pulumi.get(self, "requested_features")
@property
@pulumi.getter(name="requestedLinkCount")
def requested_link_count(self) -> pulumi.Output[int]:
"""
Target number of physical links in the link bundle, as requested by the customer.
"""
return pulumi.get(self, "requested_link_count")
@property
@pulumi.getter(name="satisfiesPzs")
def satisfies_pzs(self) -> pulumi.Output[bool]:
"""
Reserved for future use.
"""
return pulumi.get(self, "satisfies_pzs")
@property
@pulumi.getter(name="selfLink")
def self_link(self) -> pulumi.Output[str]:
"""
Server-defined URL for the resource.
"""
return pulumi.get(self, "self_link")
@property
@pulumi.getter(name="selfLinkWithId")
def self_link_with_id(self) -> pulumi.Output[str]:
"""
Server-defined URL for this resource with the resource id.
"""
return pulumi.get(self, "self_link_with_id")
@property
@pulumi.getter
def state(self) -> pulumi.Output[str]:
"""
The current state of Interconnect functionality, which can take one of the following values: - ACTIVE: The Interconnect is valid, turned up and ready to use. Attachments may be provisioned on this Interconnect. - UNPROVISIONED: The Interconnect has not completed turnup. No attachments may be provisioned on this Interconnect. - UNDER_MAINTENANCE: The Interconnect is undergoing internal maintenance. No attachments may be provisioned or updated on this Interconnect.
"""
return pulumi.get(self, "state")
|
PypiClean
|
/NeurIPS22-CellSeg-0.0.1.tar.gz/NeurIPS22-CellSeg-0.0.1/baseline/data/model/unet_3class/20220710-1947_model_training_3class.py
|
import argparse
import os
join = os.path.join
import numpy as np
import torch
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import monai
from monai.data import decollate_batch, PILReader
from monai.inferers import sliding_window_inference
from monai.metrics import DiceMetric
from monai.transforms import (
Activations,
AsChannelFirstd,
AddChanneld,
AsDiscrete,
Compose,
LoadImaged,
SpatialPadd,
RandSpatialCropd,
# RandCropByPosNegLabeld,
RandRotate90d,
ScaleIntensityd,
RandAxisFlipd,
RandZoomd,
RandGaussianNoised,
# RandShiftIntensityd,
RandAdjustContrastd,
RandGaussianSmoothd,
RandHistogramShiftd,
EnsureTyped,
EnsureType,
)
from baseline.unetr2d import UNETR2D
from monai.visualize import plot_2d_or_3d_image
import matplotlib.pyplot as plt
from datetime import datetime
import shutil
monai.config.print_config()
# logging.basicConfig(stream=sys.stdout, level=logging.INFO)
print('Successfully import all requirements!')
def main():
parser = argparse.ArgumentParser('Baseline for Microscopy image segmentation', add_help=False)
# Dataset parameters
parser.add_argument('--data_path', default='./data/Train_Pre_3class/', type=str,
help='training data path; subfolders: images, labels')
parser.add_argument('--work_dir', default='./work_dir',
help='path where to save models and logs')
parser.add_argument('--seed', default=2022, type=int)
parser.add_argument('--resume', default=False,
help='resume from checkpoint')
parser.add_argument('--num_workers', default=4, type=int)
# Model parameters
parser.add_argument('--model_name', default='unet', help='select mode: unet, unetr, swinunetr')
parser.add_argument('--num_class', default=3, type=int, help='segmentation classes')
parser.add_argument('--input_size', default=256, type=int, help='segmentation classes')
# Training parameters
parser.add_argument('--batch_size', default=8, type=int, help='Batch size per GPU')
parser.add_argument('--max_epochs', default=2000, type=int)
parser.add_argument('--val_interval', default=2, type=int)
parser.add_argument('--epoch_tolerance', default=100, type=int)
parser.add_argument('--initial_lr', type=float, default=6e-4, help='learning rate')
# def main():
# args = parser.parse_args()
args = parser.parse_args()
#%% set training/validation split
np.random.seed(args.seed)
model_path = join(args.work_dir, args.model_name+'_3class')
os.makedirs(model_path, exist_ok=True)
run_id = datetime.now().strftime("%Y%m%d-%H%M")
shutil.copyfile(__file__, join(model_path, run_id + '_' + os.path.basename(__file__)))
img_path = join(args.data_path, 'images')
gt_path = join(args.data_path, 'labels')
img_names = sorted(os.listdir(img_path))
gt_names = [img_name.split('.')[0]+'_label.png' for img_name in img_names]
img_num = len(img_names)
val_frac = 0.1
indices = np.arange(img_num)
np.random.shuffle(indices)
val_split = int(img_num*val_frac)
train_indices = indices[val_split:]
val_indices = indices[:val_split]
train_files = [{"img": join(img_path, img_names[i]), "label": join(gt_path, gt_names[i])} for i in train_indices]
val_files = [{"img": join(img_path, img_names[i]), "label": join(gt_path, gt_names[i])} for i in val_indices]
print(f"training image num: {len(train_files)}, validation image num: {len(val_files)}")
#%% define transforms for image and segmentation
train_transforms = Compose(
[
LoadImaged(keys=["img", "label"], reader=PILReader, dtype=np.uint8), # image three channels (H, W, 3); label: (H, W)
AddChanneld(keys=["label"], allow_missing_keys=True), # label: (1, H, W)
AsChannelFirstd(keys=['img'], channel_dim=-1, allow_missing_keys=True), # image: (3, H, W)
ScaleIntensityd(keys=["img"], allow_missing_keys=True), # Do not scale label
SpatialPadd(keys=["img","label"], spatial_size=args.input_size),
RandSpatialCropd(keys=["img", "label"], roi_size=args.input_size, random_size=False),
RandAxisFlipd(keys=["img", "label"], prob=0.5),
RandRotate90d(keys=["img", "label"], prob=0.5, spatial_axes=[0, 1]),
# # intensity transform
RandGaussianNoised(keys=['img'], prob=0.25, mean=0, std=0.1),
RandAdjustContrastd(keys=["img"], prob=0.25, gamma=(1,2)),
RandGaussianSmoothd(keys=["img"], prob=0.25, sigma_x=(1,2)),
RandHistogramShiftd(keys=["img"], prob=0.25, num_control_points=3),
RandZoomd(keys=["img", "label"], prob=0.15, min_zoom=0.8, max_zoom=1.5, mode=['area', 'nearest']),
EnsureTyped(keys=["img", "label"]),
]
)
val_transforms = Compose(
[
LoadImaged(keys=["img", "label"], reader=PILReader, dtype=np.uint8),
AddChanneld(keys=["label"], allow_missing_keys=True),
AsChannelFirstd(keys=['img'], channel_dim=-1, allow_missing_keys=True),
ScaleIntensityd(keys=["img"], allow_missing_keys=True),
# AsDiscreted(keys=['label'], to_onehot=3),
EnsureTyped(keys=["img", "label"]),
]
)
#% define dataset, data loader
check_ds = monai.data.Dataset(data=train_files, transform=train_transforms)
check_loader = DataLoader(check_ds, batch_size=1, num_workers=4)
check_data = monai.utils.misc.first(check_loader)
print('sanity check:', check_data["img"].shape, torch.max(check_data["img"]), check_data["label"].shape, torch.max(check_data["label"]))
#%% create a training data loader
train_ds = monai.data.Dataset(data=train_files, transform=train_transforms)
# use batch_size=2 to load images and use RandCropByPosNegLabeld to generate 2 x 4 images for network training
train_loader = DataLoader(
train_ds,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.num_workers,
pin_memory=torch.cuda.is_available()
)
# create a validation data loader
val_ds = monai.data.Dataset(data=val_files, transform=val_transforms)
val_loader = DataLoader(val_ds, batch_size=1, shuffle=False, num_workers=1)
dice_metric = DiceMetric(include_background=False, reduction="mean", get_not_nans=False)
post_pred = Compose([EnsureType(), Activations(softmax=True), AsDiscrete(threshold=0.5)])
post_gt = Compose([EnsureType(), AsDiscrete(to_onehot=None)])
# create UNet, DiceLoss and Adam optimizer
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if args.model_name.lower() == 'unet':
model = monai.networks.nets.UNet(
spatial_dims=2,
in_channels=3,
out_channels=args.num_class,
channels=(16, 32, 64, 128, 256),
strides=(2, 2, 2, 2),
num_res_units=2,
).to(device)
if args.model_name.lower() == 'unetr':
model = UNETR2D(
in_channels=3,
out_channels=args.num_class,
img_size=(args.input_size, args.input_size),
feature_size=16,
hidden_size=768,
mlp_dim=3072,
num_heads=12,
pos_embed="perceptron",
norm_name="instance",
res_block=True,
dropout_rate=0.0,
).to(device)
if args.model_name.lower() == 'swinunetr':
model = monai.networks.nets.SwinUNETR(
img_size=(args.input_size, args.input_size),
in_channels=3,
out_channels=args.num_class,
feature_size=24, # should be divisible by 12
spatial_dims=2
).to(device)
loss_function = monai.losses.DiceCELoss(softmax=True)
initial_lr = args.initial_lr
optimizer = torch.optim.AdamW(model.parameters(), initial_lr)
# start a typical PyTorch training
max_epochs = args.max_epochs
epoch_tolerance = args.epoch_tolerance
val_interval = args.val_interval
best_metric = -1
best_metric_epoch = -1
epoch_loss_values = list()
metric_values = list()
writer = SummaryWriter(model_path)
for epoch in range(1, max_epochs):
model.train()
epoch_loss = 0
for step, batch_data in enumerate(train_loader, 1):
inputs, labels = batch_data["img"].to(device), batch_data["label"].to(device)
optimizer.zero_grad()
outputs = model(inputs)
labels_onehot = monai.networks.one_hot(labels, args.num_class) # (b,cls,256,256)
loss = loss_function(outputs, labels_onehot)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
epoch_len = len(train_ds) // train_loader.batch_size
# print(f"{step}/{epoch_len}, train_loss: {loss.item():.4f}")
writer.add_scalar("train_loss", loss.item(), epoch_len * epoch + step)
epoch_loss /= step
epoch_loss_values.append(epoch_loss)
print(f"epoch {epoch} average loss: {epoch_loss:.4f}")
checkpoint = {'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'loss': epoch_loss_values,
}
if epoch>20 and epoch % val_interval == 0:
model.eval()
with torch.no_grad():
val_images = None
val_labels = None
val_outputs = None
for val_data in val_loader:
val_images, val_labels = val_data["img"].to(device), val_data["label"].to(device)
val_labels_onehot = monai.networks.one_hot(val_labels, args.num_class)
roi_size = (256, 256)
sw_batch_size = 4
val_outputs = sliding_window_inference(val_images, roi_size, sw_batch_size, model)
val_outputs = [post_pred(i) for i in decollate_batch(val_outputs)]
val_labels_onehot = [post_gt(i) for i in decollate_batch(val_labels_onehot)]
# compute metric for current iteration
print(os.path.basename(val_data['img_meta_dict']['filename_or_obj'][0]),
dice_metric(y_pred=val_outputs, y=val_labels_onehot))
# aggregate the final mean dice result
metric = dice_metric.aggregate().item()
# reset the status for next validation round
dice_metric.reset()
metric_values.append(metric)
if metric > best_metric:
best_metric = metric
best_metric_epoch = epoch + 1
torch.save(checkpoint, join(model_path, "best_Dice_model.pth"))
print("saved new best metric model")
print(
"current epoch: {} current mean dice: {:.4f} best mean dice: {:.4f} at epoch {}".format(
epoch + 1, metric, best_metric, best_metric_epoch
)
)
writer.add_scalar("val_mean_dice", metric, epoch + 1)
# plot the last model output as GIF image in TensorBoard with the corresponding image and label
plot_2d_or_3d_image(val_images, epoch, writer, index=0, tag="image")
plot_2d_or_3d_image(val_labels, epoch, writer, index=0, tag="label")
plot_2d_or_3d_image(val_outputs, epoch, writer, index=0, tag="output")
if (epoch - best_metric_epoch) > epoch_tolerance:
print(f"validation metric does not improve for {epoch_tolerance} epochs! current {epoch=}, {best_metric_epoch=}")
break
print(f"train completed, best_metric: {best_metric:.4f} at epoch: {best_metric_epoch}")
writer.close()
torch.save(checkpoint, join(model_path, 'final_model.pth'))
np.savez_compressed(join(model_path, 'train_log.npz'), val_dice=metric_values, epoch_loss=epoch_loss_values)
if __name__ == "__main__":
main()
|
PypiClean
|
/pagarme-python-5.7.6.tar.gz/pagarme-python-5.7.6/pagarmecoreapi/controllers/base_controller.py
|
from pagarmecoreapi.api_helper import APIHelper
from pagarmecoreapi.http.http_context import HttpContext
from pagarmecoreapi.http.requests_client import RequestsClient
from pagarmecoreapi.exceptions.api_exception import APIException
class BaseController(object):
"""All controllers inherit from this base class.
Attributes:
http_client (HttpClient): The HttpClient which a specific controller
instance will use. By default all the controller objects share
the same HttpClient. A user can use his own custom HttpClient
as well.
http_call_back (HttpCallBack): An object which holds call back
methods to be called before and after the execution of an HttpRequest.
global_headers (dict): The global headers of the API which are sent with
every request.
"""
http_client = RequestsClient()
http_call_back = None
global_headers = {
'user-agent': 'PagarmeCoreApi - Python 5.7.6'
}
def __init__(self, client=None, call_back=None):
if client != None:
self.http_client = client
if call_back != None:
self.http_call_back = call_back
def validate_parameters(self, **kwargs):
"""Validates required parameters of an endpoint.
Args:
kwargs (dict): A dictionary of the required parameters.
"""
for name, value in kwargs.items():
if value is None:
raise ValueError("Required parameter {} cannot be None.".format(name))
def execute_request(self, request, binary=False):
"""Executes an HttpRequest.
Args:
request (HttpRequest): The HttpRequest to execute.
binary (bool): A flag which should be set to True if
a binary response is expected.
Returns:
HttpContext: The HttpContext of the request. It contains,
both, the request itself and the HttpResponse object.
"""
# Invoke the on before request HttpCallBack if specified
if self.http_call_back != None:
self.http_call_back.on_before_request(request)
# Add global headers to request
request.headers = APIHelper.merge_dicts(self.global_headers, request.headers)
# Invoke the API call to fetch the response.
func = self.http_client.execute_as_binary if binary else self.http_client.execute_as_string
response = func(request)
context = HttpContext(request, response)
# Invoke the on after response HttpCallBack if specified
if self.http_call_back != None:
self.http_call_back.on_after_response(context)
return context
def validate_response(self, context):
"""Validates an HTTP response by checking for global errors.
Args:
context (HttpContext): The HttpContext of the API call.
"""
if (context.response.status_code < 200) or (context.response.status_code > 208): #[200,208] = HTTP OK
raise APIException('HTTP response not OK.', context)
|
PypiClean
|
/alpaca_historical_extract-1.25.0-py3-none-any.whl/alpaca_historical_extract/transform_library.py
|
import pandas as pd
import datetime as dt
import numpy as np
# non-standard libraries
# internal libraries
import dbmsIO
import core_library
def batch_barset_to_df(barset, timeFrame, actionsDf, dfCounter, fileName):
# print(barset)
# print('len(barset)', len(barset))
# print(fileName)
barsetKeys = list(barset.keys())
# print(barsetKeys)
totalBarSize = 0
# print (dfCounter)
nowTS = pd.Timestamp(dt.datetime.now().astimezone())
localTZ = nowTS.tzinfo
for symbol in barsetKeys:
if (len(barset[symbol]) > 0):
barsetDf = pd.DataFrame.from_dict(barset[symbol])
barsetDf.columns = [x.upper() for x in barsetDf.columns.to_list()]
barsetDf['T'] = pd.to_datetime(barsetDf['T'])
#barsetDf['T'] = pd.to_datetime(barsetDf['T']).dt.tz_convert(localTZ)
barsetDf.insert(0, 'SYMBOL', symbol)
barsetDf['SOURCE'] = 'IPX'
barsetDf['TIMEFRAME'] = timeFrame
barsetDf = barsetDf.rename(
columns={'T': 'DATETIME', "O": "OPEN", 'H': 'HIGH', 'L': 'LOW', 'C': 'CLOSE', 'V': 'VOLUME'})
barSize = barsetDf.memory_usage(deep=True).sum()
totalBarSize = totalBarSize + barSize
# print(barsetDf)
barsetDf = split_div_correction(df=barsetDf, actionsDf=actionsDf)
# print(barsetDf.head(10).to_string())
# print(barsetDf.tail(10).to_string())
dbmsIO.to_csv(position=dfCounter, data=barsetDf, tableName=fileName)
dfCounter = dfCounter + 1
return totalBarSize, dfCounter
def object_to_df(obj):
#print(obj[0])
outputDF = pd.DataFrame()
if len(obj) > 0:
assetList=[]
for i in obj:
#print()
assetList.append(i.__dict__['_raw'])
outputDF = pd.DataFrame().from_dict(assetList)
outputDF.columns = [x.upper() for x in outputDF.columns]
#print(outputDF.head(10).to_string())
return outputDF
else:
return pd.DataFrame()
def split_div_correction(df, actionsDf):
df['SPLIT CO-EFFICTIENT'] = 1
# print(actionsDf)
if len(actionsDf) > 0:
splits = actionsDf[['SYMBOL', 'DATETIME', 'SPLITS']]
splits = splits[splits['SYMBOL'] == df.iloc[0]['SYMBOL']]
splits['DATETIME'] = pd.to_datetime(splits['DATETIME']).dt.tz_localize('UTC').dt.tz_convert(
pd.Timestamp(dt.datetime.now().astimezone()).tzinfo)
splits = splits.loc[splits['SPLITS'] != 0].reset_index(drop=True)
splitsValue = 1
# print('SPLITS:',splits)
for i in range(len(splits) - 1, -1, -1):
if (splitsValue != splits.iloc[i]['SPLITS']):
splitsValue = splitsValue * splits.iloc[i]['SPLITS']
# print(splitsValue)
df.loc[df['DATETIME'] < splits.iloc[i]['DATETIME'], 'NEW SPLIT CO-EFFICTIENT'] = splits.iloc[i]['SPLITS']
df.loc[df['DATETIME'] < splits.iloc[i]['DATETIME'], 'SPLIT CO-EFFICTIENT'] = splitsValue
for i in df.columns.to_list():
if i in ['OPEN', 'HIGH', 'LOW', 'CLOSE']:
df[i] = df[i] / df['SPLIT CO-EFFICTIENT']
df = df.drop('SPLIT CO-EFFICTIENT', axis=1)
# print(df.head(5).to_string())
if 'NEW SPLIT CO-EFFICTIENT' in df.columns.to_list():
df = df.drop('NEW SPLIT CO-EFFICTIENT', axis=1)
return df
else:
return df
def get_slope(subDF, subset):
# print('calculating Slope')
# print(subDF,subset)
slope = 0
slopeDf = pd.DataFrame()
slopeDf['Y'] = subDF[subset]
slopeDf['X'] = pd.Series(slopeDf.index.to_list()) + 1
slopeDf['XY'] = slopeDf['X'] * slopeDf['Y']
slopeDf['XX'] = slopeDf['X'] * slopeDf['X']
slopeDf['YY'] = slopeDf['Y'] * slopeDf['Y']
# print(slopeDf)
n = len(slopeDf)
sumXY = slopeDf['XY'].sum()
sumXX = slopeDf['XX'].sum()
sumX = slopeDf['X'].sum()
sumY = slopeDf['Y'].sum()
top = ((n * sumXY) - (sumX * sumY))
bottom = ((n * sumXX) - (sumX * sumX))
# print(n,sumY,sumX,sumXY)
# print("top",top,"bottom",bottom)
# print('SLOPE CHECK')
# print(slope)
# print(top/bottom)
# slope = top/bottom
if bottom == 0:
slope = 0
else:
slope = top / bottom
# print(slope)
return slope
def df_stat_calcs(subDF, verbose=True):
global counter
global increment
global setPoint
global tStart
global tempDt
subDF = subDF.reset_index(drop=True)
tempDF = {}
tempDF['SYMBOL'] = subDF.iloc[0]['SYMBOL']
if (len(subDF) != 0):
tempDF['START TIMESTAMP'] = subDF.iloc[0]['DATETIME']
tempDF['END TIMESTAMP'] = subDF.iloc[-1]['DATETIME']
tempDF['START PRICE'] = subDF.iloc[0]['OPEN']
tempDF['END PRICE'] = subDF.iloc[-1]['CLOSE']
tempDF['GAIN'] = subDF.iloc[-1]['CLOSE'] - subDF.iloc[0]['OPEN']
if subDF.iloc[0]['OPEN'] == 0:
tempDF['% GAIN'] = 0
else:
tempDF['% GAIN'] = ((subDF.iloc[-1]['CLOSE'] - subDF.iloc[0]['OPEN']) / subDF.iloc[0]['OPEN']) * 100
tempDF['AVG OPEN'] = subDF['OPEN'].mean()
tempDF['AVG LOW'] = subDF['LOW'].mean()
tempDF['AVG HIGH'] = subDF['HIGH'].mean()
tempDF['AVG CLOSE'] = subDF['CLOSE'].mean()
tempDF['MIN'] = subDF['LOW'].min()
tempDF['25%'] = np.percentile(subDF['CLOSE'], 25)
tempDF['MEDIAN'] = subDF['CLOSE'].median()
tempDF['75%'] = np.percentile(subDF['CLOSE'], 75)
tempDF['MAX'] = subDF['HIGH'].max()
if (len(subDF) != 0):
# slope, intercept, r_value, p_value, std_err = stats.linregress(subDF.index, subDF['CLOSE'])
tempDF['SLOPE'] = get_slope(subDF, 'CLOSE')
tempDF['% SLOPE'] = get_slope(subDF, 'CLOSE') / (subDF['CLOSE']).mean() * 100
tempDF['VOLUME TREND'] = get_slope(subDF, 'VOLUME')
# tempDF['% Volume Trend'] = getSlope(subDF, 'VOLUME')/ (subDF['VOLUME']).mean() * 100
dy = subDF['CLOSE'] - subDF['OPEN']
tempDF['AVG INTERVAL SLOPE'] = dy.mean()
tempDF['STD DEV'] = subDF['CLOSE'].std()
tempDF['% STD DEV'] = (subDF['CLOSE'].std()) / subDF['CLOSE'].mean()
tempDF['AVG RANGE'] = (subDF['HIGH'] - subDF['LOW']).mean()
tempDF['% AVG RANGE'] = ((subDF['HIGH'] - subDF['LOW']) / (subDF['HIGH'] - subDF['LOW']).mean() * 100).mean()
tempDF['AVG PRICE'] = ((subDF['CLOSE'] + subDF['OPEN'] + subDF['HIGH'] + subDF['LOW']) / 4).mean()
if (len(subDF) != 0):
# print(subDF.iloc[0]['CLOSE'],subDF.iloc[-1]['CLOSE'],(subDF.iloc[0]['CLOSE'] - subDF.iloc[-1]['CLOSE']),((subDF.iloc[-1]['CLOSE']-subDF.iloc[0]['CLOSE'])/subDF.iloc[0]['CLOSE'])*100)
tempDF['TIME DELTA'] = (subDF.iloc[-1]['DATETIME'] - subDF.iloc[0]['DATETIME'])
# tempDF['FIDELITY'] = round(((len(subDF) / recordLen) * 100), 2)
tempDF['DATA POINTS'] = len(subDF)
tempDF['TIMEFRAME'] = subDF.iloc[0]['TIMEFRAME']
# tempDF['INTERVAL'] = subDF['INTERVAL']
tempDF['AVG VOLUME'] = subDF['VOLUME'].mean()
counter = counter + 1
if (verbose):
percentComplete = counter / len(tckrs)
if percentComplete * 100 > setPoint:
timeThusFar = dt.datetime.now() - tStart
percentLeft = 1 - percentComplete
timeLeft = ((timeThusFar * percentLeft) / (percentComplete))
print(dt.datetime.now(),
"| Completed", round(percentComplete * 100, 2), "% Stat Transformation completed. Time for Calc:",
dt.timedelta(seconds=(dt.datetime.now() - tempDt).seconds),
"| Predicted Time left:", timeLeft,
"| Predicted Total Time for complete calculation:", timeThusFar + timeLeft)
# print(round(percentComplete,2),"% Stat Transformation completed. Time for Calc:",dt.timedelta(seconds=(dt.datetime.now() - tempTimer).seconds),"| Predicted Time left:",(100-percentDone)*dt.timedelta(seconds=(dt.datetime.now() - tempTimer).seconds))
tempDt = dt.datetime.now()
setPoint = setPoint + increment
return pd.Series(tempDF, index=tempDF.keys())
def m_data_to_stats(df, fileName, verbose=True):
global counter
global increment
global setPoint
global tStart
global tempDt
global tckrs
if verbose:
print("Converting Raw", fileName, "to Stats")
tckrs = df['SYMBOL'].unique()
counter = 0
increment = 10
setPoint = increment
tStart = dt.datetime.now()
tempDt = dt.datetime.now()
outputDf = df.groupby('SYMBOL').apply(df_stat_calcs, verbose=verbose)
for col in ['SLOPE', '% SLOPE', 'STD DEV', '% STD DEV']:
outputDf[col] = outputDf[col].fillna(0)
for col in ['% GAIN', '% AVG RANGE', '% SLOPE', '% STD DEV', 'AVG VOLUME']:
percentile = col + " PERCENTILE"
outputDf[percentile] = round(outputDf[col].rank(pct=True) * 100, 2)
outputDf['ABSOLUTE SLOPE PERCENTILE'] = round(outputDf['% SLOPE'].abs().rank(pct=True, ascending=True) * 100, 2)
outputDf['FIDELITY'] = round(((outputDf['DATA POINTS'] / outputDf['DATA POINTS'].max()) * 100), 0)
if (len(outputDf) > 1):
outputDf = outputDf.dropna(how='any').reset_index(drop=True)
else:
outputDf = outputDf.reset_index(drop=True)
if verbose:
print('Data Processing Success rate:', len(outputDf) / len(tckrs) * 100, '% (', len(outputDf), '/', len(tckrs), ')')
print("Total time to run calculations:", dt.datetime.now() - tStart)
core_library.log_entry(logFile="project_log.txt",
logText=("Total time to run calculations: ", str(dt.datetime.now() - tStart)), logMode='a',
gap=False)
# print("input memory usage:", round(df.memory_usage(deep=True).sum() / 1000000, 2), 'MB')
# print("output memory usage:", round(outputDf.memory_usage(deep=True).sum() / 1000000, 2), 'MB')
# print(outputDf.to_string())
return outputDf
|
PypiClean
|
/neticspy-0.1.5.tar.gz/neticspy-0.1.5/CONTRIBUTING.rst
|
============
Contributing
============
Contributions are welcome, and they are greatly appreciated! Every
little bit helps, and credit will always be given.
Bug reports
===========
When `reporting a bug <https://github.com/bhi-kimlab/neticspy/issues>`_ please include:
* Your operating system name and version.
* Any details about your local setup that might be helpful in troubleshooting.
* Detailed steps to reproduce the bug.
Documentation improvements
==========================
NetICSpy could always use more documentation, whether as part of the
official NetICSpy docs, in docstrings, or even on the web in blog posts,
articles, and such.
Feature requests and feedback
=============================
The best way to send feedback is to file an issue at https://github.com/bhi-kimlab/neticspy/issues.
If you are proposing a feature:
* Explain in detail how it would work.
* Keep the scope as narrow as possible, to make it easier to implement.
* Remember that this is a volunteer-driven project, and that code contributions are welcome :)
Development
===========
To set up `NetICSpy` for local development:
1. Fork `NetICSpy <https://github.com/bhi-kimlab/neticspy>`_
(look for the "Fork" button).
2. Clone your fork locally::
git clone [email protected]:YOURGITHUBNAME/netics.git
3. Create a branch for local development::
git checkout -b name-of-your-bugfix-or-feature
Now you can make your changes locally.
4. When you're done making changes run all the checks and docs builder with `tox <https://tox.readthedocs.io/en/latest/install.html>`_ one command::
tox
5. Commit your changes and push your branch to GitHub::
git add .
git commit -m "Your detailed description of your changes."
git push origin name-of-your-bugfix-or-feature
6. Submit a pull request through the GitHub website.
Pull Request Guidelines
-----------------------
If you need some code review or feedback while you're developing the code just make the pull request.
For merging, you should:
1. Include passing tests (run ``tox``) [1]_.
2. Update documentation when there's new API, functionality etc.
3. Add a note to ``CHANGELOG.rst`` about the changes.
4. Add yourself to ``AUTHORS.rst``.
.. [1] If you don't have all the necessary python versions available locally you can rely on Travis - it will
`run the tests <https://travis-ci.org/bhi-kimlab/neticspy/pull_requests>`_ for each change you add in the pull request.
It will be slower though ...
Tips
----
To run a subset of tests::
tox -e envname -- pytest -k test_myfeature
To run all the test environments in *parallel*::
tox -p auto
|
PypiClean
|
/esi_requests-0.0.1.10-py3-none-any.whl/esi_requests/sso/utils.py
|
import pyperclip as pc
import sys
from subprocess import check_call, CalledProcessError, DEVNULL
from esi_requests.log import getLogger
logger = getLogger(__name__)
def to_clipboard(msg: str) -> None:
"""Copies msg to clipboard.
Copies msg to clipboard using Pyperclip package.
By default, copy should work on Windows and MacOS.
Linux needs one of xclip/xsel/gtk/PyQt4 to make it work.
Some Linux distributions might not have any of these,
so also tries to install xclip or xsel if possilbe.
"""
if sys.platform == "linux": # check xclip/xsel
xclip_installed = debian_package_check("xclip")
xsel_installed = debian_package_check("xsel")
dependency_satisfied = xclip_installed or xsel_installed
if not xclip_installed and not dependency_satisfied:
dependency_satisfied = debian_package_install("xclip")
if not xsel_installed and not dependency_satisfied:
dependency_satisfied = debian_package_install("xsel")
try:
pc.copy(msg)
logger.debug("Copy msg to clipboard successful: %s", msg)
except pc.PyperclipException as pc_exc:
if sys.platform == "linux": # linux2 prior to Python 3.3
if not dependency_satisfied:
logger.error(
"Pyperclip NotImplementedError: needs copy/paste mechanism for Linux: xclip or xsel"
)
raise SystemExit(
"With linux, one of xclip, xsel, gtk, PyQt4 is necessary. apt-get install xclip and xsel failed. Try to manually install them using sudo apt-get install, or get gtk or PyQt4 modules installed. See https://pypi.org/project/pyperclip/ for more."
) from pc_exc
# pc.copy() should work in MacOS and Windows by default
raise
def debian_package_check(name: str) -> bool:
"""Checks if a debian package is installed.
Should only be called under Linux system."""
if sys.platform != "linux":
raise NotImplemented
try:
import apt
except ImportError as exc:
if not debian_package_install("python3-apt") and not debian_package_install("python-apt"):
raise SystemExit("Missing package apt. Install using sudo apt-get install python3-apt or sudo apt-get install python-apt.") from exc
else:
# python3-apt or python-apt installed
import apt
db_packages = apt.Cache()
package = db_packages.get(name)
return package is not None and package.is_installed
def debian_package_install(name: str) -> bool:
"""Tries to install a debian package using sudo apt-get install.
For some users, this could be successful because they run as sudo.
In case users does not grant sudo to Python, a password prompt might appear,
or the call might fail, depending on the Python interpreter.
Note:
Run check_call with IDLE will fail. Run with command line, or in VS Code.
"""
try:
cmd = "sudo apt-get install -y {}".format(name)
check_call(
cmd.split(" "),
stdout=DEVNULL,
stderr=DEVNULL,
)
logger.info("Installed xclip using: %s", cmd)
return True
except CalledProcessError as grepexc:
logger.error(
f"FAILED Package install: {grepexc.cmd}: {grepexc.returncode} - {grepexc.output}"
)
return False
def read_clipboard() -> str:
"""Reads clipboard using Pyperclip."""
return pc.paste()
|
PypiClean
|
/eisenmp_examples-0.5.1.tar.gz/eisenmp_examples-0.5.1/eisenmp_examples/worker/eisenmp_exa_wrk_double.py
|
import time
def worker_entrance(toolbox):
"""
- WORKER - Called in a loop.
"""
print('Name |id() |reference ')
print(*toolbox.Q_NAME_ID_LST)
audio_chunk_lst, video_chunk_lst = None, None
if not toolbox.WORKER_ID % 2: # mod is 1 odd
audio_chunk_lst = batch_1_audio_get(toolbox)
video_chunk_lst = batch_1_video_get(toolbox) # batch_1_video_get(toolbox) ['head_1', 'foo', 'bar', 'buz']
if toolbox.WORKER_ID % 2: # mod is 0 even
audio_chunk_lst = batch_7_audio_get(toolbox)
video_chunk_lst = batch_7_video_get(toolbox)
print(f'....{toolbox.WORKER_ID} {audio_chunk_lst} {video_chunk_lst}')
busy = template_worker(toolbox, audio_chunk_lst, video_chunk_lst) # worker function
if not busy:
return False
return True
def batch_1_video_get(toolbox):
""""""
while 1:
if not toolbox.batch_1['video_in'].empty():
lst = toolbox.batch_1['video_in'].get()
toolbox.num_of_lists += 1 # list counter prn screen, ModuleConfiguration self.num_of_lists
return lst
def batch_1_audio_get(toolbox):
""""""
while 1:
if not toolbox.batch_1['audio_lg'].empty():
lst = toolbox.batch_1['audio_lg'].get()
return lst
def batch_7_video_get(toolbox):
""""""
while 1:
if not toolbox.batch_7['video_in'].empty():
lst = toolbox.batch_7['video_in'].get()
toolbox.num_of_lists += 1 # list counter prn screen
return lst
def batch_7_audio_get(toolbox):
""""""
while 1:
if not toolbox.batch_7['audio_lg'].empty():
lst = toolbox.batch_7['audio_lg'].get()
return lst
def remove_header(lst):
"""Transport ticket with consecutive number.
Remove if no recreation of order is necessary.
Can reuse list for result, if rebuild order.
"""
del lst[0] # remove header str
def send_eta_data(toolbox, lst):
"""list of [PERF_HEADER_ETA, PERF_CURRENT_ETA] to ProcInfo, to calc arrival time ETA
pure option, broken in version 0.4
"""
toolbox.PERF_CURRENT_ETA = len(lst)
perf_lst = [toolbox.PERF_HEADER_ETA + toolbox.WORKER_NAME, # binary head
toolbox.PERF_CURRENT_ETA]
# disable info q will block all
toolbox.mp_info_q.put(perf_lst) # ProcInfo calc arrival time and % from info_q, of all proc lists
def send_output(toolbox, row_aud, row_vid):
"""Put your findings in the output list.
Find results in the 'eisenmp_utils.Result.result_dict'
:params: toolbox: -
:params: res_lst: list, res_lst = [row_aud, row_aud]
"""
# header for output result list
header = toolbox.OUTPUT_HEADER + toolbox.header_aud # q collector can distinguish qs and store result in dict
result_lst = [header,
row_aud] # your findings here
toolbox.mp_output_q.put(result_lst)
header = toolbox.OUTPUT_HEADER + toolbox.header_vid # q collector can distinguish qs and store result in dict
result_lst = [header,
row_vid] # your findings here
toolbox.mp_output_q.put(result_lst)
def template_worker(toolbox, audio_chunk_lst, video_chunk_lst):
"""
"""
busy = True
toolbox.header_aud = audio_chunk_lst[0]
toolbox.header_vid = video_chunk_lst[0]
remove_header(audio_chunk_lst) # remove list header with serial number if no reassembling
remove_header(video_chunk_lst)
for idx, row_aud in enumerate(audio_chunk_lst):
row_aud = row_aud
row_vid = video_chunk_lst[idx]
pass
if toolbox.STOP_MSG in str(row_aud) or toolbox.STOP_MSG in str(row_vid): # stop is str
return False
else:
msg = f'worker: {toolbox.WORKER_ID} cat: {toolbox.header_aud} {toolbox.header_vid}' \
f'audio: {row_aud} vid: {row_vid} list({toolbox.num_of_lists})'
msg_col = None
if not toolbox.WORKER_ID % 2:
msg_col = toolbox.BLUE
if toolbox.WORKER_ID % 2:
msg_col = toolbox.RED
msg_show = msg_col + msg + toolbox.END
toolbox.mp_print_q.put(msg_show)
# output result
send_output(toolbox, row_aud, row_vid)
time.sleep(.2)
send_eta_data(toolbox, audio_chunk_lst)
return busy
|
PypiClean
|
/py-evm-0.7.0a3.tar.gz/py-evm-0.7.0a3/eth/vm/mnemonics.py
|
STOP = "STOP"
ADD = "ADD"
MUL = "MUL"
SUB = "SUB"
DIV = "DIV"
SDIV = "SDIV"
MOD = "MOD"
SMOD = "SMOD"
ADDMOD = "ADDMOD"
MULMOD = "MULMOD"
EXP = "EXP"
SIGNEXTEND = "SIGNEXTEND"
SHL = "SHL"
SHR = "SHR"
SAR = "SAR"
#
# Comparisons
#
LT = "LT"
GT = "GT"
SLT = "SLT"
SGT = "SGT"
EQ = "EQ"
ISZERO = "ISZERO"
AND = "AND"
OR = "OR"
XOR = "XOR"
NOT = "NOT"
BYTE = "BYTE"
#
# Sha3
#
SHA3 = "SHA3"
#
# Environment Information
#
ADDRESS = "ADDRESS"
BALANCE = "BALANCE"
SELFBALANCE = "SELFBALANCE"
ORIGIN = "ORIGIN"
CALLER = "CALLER"
CALLVALUE = "CALLVALUE"
CALLDATALOAD = "CALLDATALOAD"
CALLDATASIZE = "CALLDATASIZE"
CALLDATACOPY = "CALLDATACOPY"
CODESIZE = "CODESIZE"
CODECOPY = "CODECOPY"
GASPRICE = "GASPRICE"
EXTCODESIZE = "EXTCODESIZE"
EXTCODECOPY = "EXTCODECOPY"
EXTCODEHASH = "EXTCODEHASH"
RETURNDATASIZE = "RETURNDATASIZE"
RETURNDATACOPY = "RETURNDATACOPY"
CHAINID = "CHAINID"
#
# Block Information
#
BLOCKHASH = "BLOCKHASH"
COINBASE = "COINBASE"
TIMESTAMP = "TIMESTAMP"
NUMBER = "NUMBER"
DIFFICULTY = "DIFFICULTY"
PREVRANDAO = "PREVRANDAO"
GASLIMIT = "GASLIMIT"
BASEFEE = "BASEFEE"
#
# Stack, Memory, Storage and Flow Operations
#
POP = "POP"
MLOAD = "MLOAD"
MSTORE = "MSTORE"
MSTORE8 = "MSTORE8"
SLOAD = "SLOAD"
SSTORE = "SSTORE"
JUMP = "JUMP"
JUMPI = "JUMPI"
PC = "PC"
MSIZE = "MSIZE"
GAS = "GAS"
JUMPDEST = "JUMPDEST"
REVERT = "REVERT"
#
# Push Operations
#
PUSH0 = "PUSH0"
PUSH1 = "PUSH1"
PUSH2 = "PUSH2"
PUSH3 = "PUSH3"
PUSH4 = "PUSH4"
PUSH5 = "PUSH5"
PUSH6 = "PUSH6"
PUSH7 = "PUSH7"
PUSH8 = "PUSH8"
PUSH9 = "PUSH9"
PUSH10 = "PUSH10"
PUSH11 = "PUSH11"
PUSH12 = "PUSH12"
PUSH13 = "PUSH13"
PUSH14 = "PUSH14"
PUSH15 = "PUSH15"
PUSH16 = "PUSH16"
PUSH17 = "PUSH17"
PUSH18 = "PUSH18"
PUSH19 = "PUSH19"
PUSH20 = "PUSH20"
PUSH21 = "PUSH21"
PUSH22 = "PUSH22"
PUSH23 = "PUSH23"
PUSH24 = "PUSH24"
PUSH25 = "PUSH25"
PUSH26 = "PUSH26"
PUSH27 = "PUSH27"
PUSH28 = "PUSH28"
PUSH29 = "PUSH29"
PUSH30 = "PUSH30"
PUSH31 = "PUSH31"
PUSH32 = "PUSH32"
#
# Duplicate Operations
#
DUP1 = "DUP1"
DUP2 = "DUP2"
DUP3 = "DUP3"
DUP4 = "DUP4"
DUP5 = "DUP5"
DUP6 = "DUP6"
DUP7 = "DUP7"
DUP8 = "DUP8"
DUP9 = "DUP9"
DUP10 = "DUP10"
DUP11 = "DUP11"
DUP12 = "DUP12"
DUP13 = "DUP13"
DUP14 = "DUP14"
DUP15 = "DUP15"
DUP16 = "DUP16"
#
# Exchange Operations
#
SWAP1 = "SWAP1"
SWAP2 = "SWAP2"
SWAP3 = "SWAP3"
SWAP4 = "SWAP4"
SWAP5 = "SWAP5"
SWAP6 = "SWAP6"
SWAP7 = "SWAP7"
SWAP8 = "SWAP8"
SWAP9 = "SWAP9"
SWAP10 = "SWAP10"
SWAP11 = "SWAP11"
SWAP12 = "SWAP12"
SWAP13 = "SWAP13"
SWAP14 = "SWAP14"
SWAP15 = "SWAP15"
SWAP16 = "SWAP16"
#
# Logging
#
LOG0 = "LOG0"
LOG1 = "LOG1"
LOG2 = "LOG2"
LOG3 = "LOG3"
LOG4 = "LOG4"
#
# System
#
CREATE = "CREATE"
CREATE2 = "CREATE2"
CALL = "CALL"
CALLCODE = "CALLCODE"
STATICCALL = "STATICCALL"
RETURN = "RETURN"
DELEGATECALL = "DELEGATECALL"
SELFDESTRUCT = "SELFDESTRUCT"
|
PypiClean
|
/lsv2test-core-2.0.0.tar.gz/lsv2test-core-2.0.0/localstack/aws/api/firehose/__init__.py
|
import sys
from datetime import datetime
from typing import Dict, List, Optional
if sys.version_info >= (3, 8):
from typing import TypedDict
else:
from typing_extensions import TypedDict
from localstack.aws.api import RequestContext, ServiceException, ServiceRequest, handler
AWSKMSKeyARN = str
AmazonOpenSearchServerlessBufferingIntervalInSeconds = int
AmazonOpenSearchServerlessBufferingSizeInMBs = int
AmazonOpenSearchServerlessCollectionEndpoint = str
AmazonOpenSearchServerlessIndexName = str
AmazonOpenSearchServerlessRetryDurationInSeconds = int
AmazonopensearchserviceBufferingIntervalInSeconds = int
AmazonopensearchserviceBufferingSizeInMBs = int
AmazonopensearchserviceClusterEndpoint = str
AmazonopensearchserviceDomainARN = str
AmazonopensearchserviceIndexName = str
AmazonopensearchserviceRetryDurationInSeconds = int
AmazonopensearchserviceTypeName = str
BlockSizeBytes = int
BooleanObject = bool
BucketARN = str
ClusterJDBCURL = str
CopyOptions = str
DataTableColumns = str
DataTableName = str
DeliveryStreamARN = str
DeliveryStreamName = str
DeliveryStreamVersionId = str
DescribeDeliveryStreamInputLimit = int
DestinationId = str
ElasticsearchBufferingIntervalInSeconds = int
ElasticsearchBufferingSizeInMBs = int
ElasticsearchClusterEndpoint = str
ElasticsearchDomainARN = str
ElasticsearchIndexName = str
ElasticsearchRetryDurationInSeconds = int
ElasticsearchTypeName = str
ErrorCode = str
ErrorMessage = str
ErrorOutputPrefix = str
HECAcknowledgmentTimeoutInSeconds = int
HECEndpoint = str
HECToken = str
HttpEndpointAccessKey = str
HttpEndpointAttributeName = str
HttpEndpointAttributeValue = str
HttpEndpointBufferingIntervalInSeconds = int
HttpEndpointBufferingSizeInMBs = int
HttpEndpointName = str
HttpEndpointRetryDurationInSeconds = int
HttpEndpointUrl = str
IntervalInSeconds = int
KinesisStreamARN = str
ListDeliveryStreamsInputLimit = int
ListTagsForDeliveryStreamInputLimit = int
LogGroupName = str
LogStreamName = str
NonEmptyString = str
NonEmptyStringWithoutWhitespace = str
NonNegativeIntegerObject = int
OrcRowIndexStride = int
OrcStripeSizeBytes = int
ParquetPageSizeBytes = int
Password = str
Prefix = str
ProcessorParameterValue = str
Proportion = float
PutResponseRecordId = str
RedshiftRetryDurationInSeconds = int
RetryDurationInSeconds = int
RoleARN = str
SizeInMBs = int
SplunkRetryDurationInSeconds = int
TagKey = str
TagValue = str
Username = str
class AmazonOpenSearchServerlessS3BackupMode(str):
FailedDocumentsOnly = "FailedDocumentsOnly"
AllDocuments = "AllDocuments"
class AmazonopensearchserviceIndexRotationPeriod(str):
NoRotation = "NoRotation"
OneHour = "OneHour"
OneDay = "OneDay"
OneWeek = "OneWeek"
OneMonth = "OneMonth"
class AmazonopensearchserviceS3BackupMode(str):
FailedDocumentsOnly = "FailedDocumentsOnly"
AllDocuments = "AllDocuments"
class CompressionFormat(str):
UNCOMPRESSED = "UNCOMPRESSED"
GZIP = "GZIP"
ZIP = "ZIP"
Snappy = "Snappy"
HADOOP_SNAPPY = "HADOOP_SNAPPY"
class ContentEncoding(str):
NONE = "NONE"
GZIP = "GZIP"
class DeliveryStreamEncryptionStatus(str):
ENABLED = "ENABLED"
ENABLING = "ENABLING"
ENABLING_FAILED = "ENABLING_FAILED"
DISABLED = "DISABLED"
DISABLING = "DISABLING"
DISABLING_FAILED = "DISABLING_FAILED"
class DeliveryStreamFailureType(str):
RETIRE_KMS_GRANT_FAILED = "RETIRE_KMS_GRANT_FAILED"
CREATE_KMS_GRANT_FAILED = "CREATE_KMS_GRANT_FAILED"
KMS_ACCESS_DENIED = "KMS_ACCESS_DENIED"
DISABLED_KMS_KEY = "DISABLED_KMS_KEY"
INVALID_KMS_KEY = "INVALID_KMS_KEY"
KMS_KEY_NOT_FOUND = "KMS_KEY_NOT_FOUND"
KMS_OPT_IN_REQUIRED = "KMS_OPT_IN_REQUIRED"
CREATE_ENI_FAILED = "CREATE_ENI_FAILED"
DELETE_ENI_FAILED = "DELETE_ENI_FAILED"
SUBNET_NOT_FOUND = "SUBNET_NOT_FOUND"
SECURITY_GROUP_NOT_FOUND = "SECURITY_GROUP_NOT_FOUND"
ENI_ACCESS_DENIED = "ENI_ACCESS_DENIED"
SUBNET_ACCESS_DENIED = "SUBNET_ACCESS_DENIED"
SECURITY_GROUP_ACCESS_DENIED = "SECURITY_GROUP_ACCESS_DENIED"
UNKNOWN_ERROR = "UNKNOWN_ERROR"
class DeliveryStreamStatus(str):
CREATING = "CREATING"
CREATING_FAILED = "CREATING_FAILED"
DELETING = "DELETING"
DELETING_FAILED = "DELETING_FAILED"
ACTIVE = "ACTIVE"
class DeliveryStreamType(str):
DirectPut = "DirectPut"
KinesisStreamAsSource = "KinesisStreamAsSource"
class ElasticsearchIndexRotationPeriod(str):
NoRotation = "NoRotation"
OneHour = "OneHour"
OneDay = "OneDay"
OneWeek = "OneWeek"
OneMonth = "OneMonth"
class ElasticsearchS3BackupMode(str):
FailedDocumentsOnly = "FailedDocumentsOnly"
AllDocuments = "AllDocuments"
class HECEndpointType(str):
Raw = "Raw"
Event = "Event"
class HttpEndpointS3BackupMode(str):
FailedDataOnly = "FailedDataOnly"
AllData = "AllData"
class KeyType(str):
AWS_OWNED_CMK = "AWS_OWNED_CMK"
CUSTOMER_MANAGED_CMK = "CUSTOMER_MANAGED_CMK"
class NoEncryptionConfig(str):
NoEncryption = "NoEncryption"
class OrcCompression(str):
NONE = "NONE"
ZLIB = "ZLIB"
SNAPPY = "SNAPPY"
class OrcFormatVersion(str):
V0_11 = "V0_11"
V0_12 = "V0_12"
class ParquetCompression(str):
UNCOMPRESSED = "UNCOMPRESSED"
GZIP = "GZIP"
SNAPPY = "SNAPPY"
class ParquetWriterVersion(str):
V1 = "V1"
V2 = "V2"
class ProcessorParameterName(str):
LambdaArn = "LambdaArn"
NumberOfRetries = "NumberOfRetries"
MetadataExtractionQuery = "MetadataExtractionQuery"
JsonParsingEngine = "JsonParsingEngine"
RoleArn = "RoleArn"
BufferSizeInMBs = "BufferSizeInMBs"
BufferIntervalInSeconds = "BufferIntervalInSeconds"
SubRecordType = "SubRecordType"
Delimiter = "Delimiter"
class ProcessorType(str):
RecordDeAggregation = "RecordDeAggregation"
Lambda = "Lambda"
MetadataExtraction = "MetadataExtraction"
AppendDelimiterToRecord = "AppendDelimiterToRecord"
class RedshiftS3BackupMode(str):
Disabled = "Disabled"
Enabled = "Enabled"
class S3BackupMode(str):
Disabled = "Disabled"
Enabled = "Enabled"
class SplunkS3BackupMode(str):
FailedEventsOnly = "FailedEventsOnly"
AllEvents = "AllEvents"
class ConcurrentModificationException(ServiceException):
code: str = "ConcurrentModificationException"
sender_fault: bool = False
status_code: int = 400
class InvalidArgumentException(ServiceException):
code: str = "InvalidArgumentException"
sender_fault: bool = False
status_code: int = 400
class InvalidKMSResourceException(ServiceException):
code: str = "InvalidKMSResourceException"
sender_fault: bool = False
status_code: int = 400
class LimitExceededException(ServiceException):
code: str = "LimitExceededException"
sender_fault: bool = False
status_code: int = 400
class ResourceInUseException(ServiceException):
code: str = "ResourceInUseException"
sender_fault: bool = False
status_code: int = 400
class ResourceNotFoundException(ServiceException):
code: str = "ResourceNotFoundException"
sender_fault: bool = False
status_code: int = 400
class ServiceUnavailableException(ServiceException):
code: str = "ServiceUnavailableException"
sender_fault: bool = False
status_code: int = 400
class AmazonOpenSearchServerlessBufferingHints(TypedDict, total=False):
IntervalInSeconds: Optional[AmazonOpenSearchServerlessBufferingIntervalInSeconds]
SizeInMBs: Optional[AmazonOpenSearchServerlessBufferingSizeInMBs]
SecurityGroupIdList = List[NonEmptyStringWithoutWhitespace]
SubnetIdList = List[NonEmptyStringWithoutWhitespace]
class VpcConfiguration(TypedDict, total=False):
SubnetIds: SubnetIdList
RoleARN: RoleARN
SecurityGroupIds: SecurityGroupIdList
class CloudWatchLoggingOptions(TypedDict, total=False):
Enabled: Optional[BooleanObject]
LogGroupName: Optional[LogGroupName]
LogStreamName: Optional[LogStreamName]
class ProcessorParameter(TypedDict, total=False):
ParameterName: ProcessorParameterName
ParameterValue: ProcessorParameterValue
ProcessorParameterList = List[ProcessorParameter]
class Processor(TypedDict, total=False):
Type: ProcessorType
Parameters: Optional[ProcessorParameterList]
ProcessorList = List[Processor]
class ProcessingConfiguration(TypedDict, total=False):
Enabled: Optional[BooleanObject]
Processors: Optional[ProcessorList]
class KMSEncryptionConfig(TypedDict, total=False):
AWSKMSKeyARN: AWSKMSKeyARN
class EncryptionConfiguration(TypedDict, total=False):
NoEncryptionConfig: Optional[NoEncryptionConfig]
KMSEncryptionConfig: Optional[KMSEncryptionConfig]
class BufferingHints(TypedDict, total=False):
SizeInMBs: Optional[SizeInMBs]
IntervalInSeconds: Optional[IntervalInSeconds]
class S3DestinationConfiguration(TypedDict, total=False):
RoleARN: RoleARN
BucketARN: BucketARN
Prefix: Optional[Prefix]
ErrorOutputPrefix: Optional[ErrorOutputPrefix]
BufferingHints: Optional[BufferingHints]
CompressionFormat: Optional[CompressionFormat]
EncryptionConfiguration: Optional[EncryptionConfiguration]
CloudWatchLoggingOptions: Optional[CloudWatchLoggingOptions]
class AmazonOpenSearchServerlessRetryOptions(TypedDict, total=False):
DurationInSeconds: Optional[AmazonOpenSearchServerlessRetryDurationInSeconds]
class AmazonOpenSearchServerlessDestinationConfiguration(TypedDict, total=False):
RoleARN: RoleARN
CollectionEndpoint: Optional[AmazonOpenSearchServerlessCollectionEndpoint]
IndexName: AmazonOpenSearchServerlessIndexName
BufferingHints: Optional[AmazonOpenSearchServerlessBufferingHints]
RetryOptions: Optional[AmazonOpenSearchServerlessRetryOptions]
S3BackupMode: Optional[AmazonOpenSearchServerlessS3BackupMode]
S3Configuration: S3DestinationConfiguration
ProcessingConfiguration: Optional[ProcessingConfiguration]
CloudWatchLoggingOptions: Optional[CloudWatchLoggingOptions]
VpcConfiguration: Optional[VpcConfiguration]
class VpcConfigurationDescription(TypedDict, total=False):
SubnetIds: SubnetIdList
RoleARN: RoleARN
SecurityGroupIds: SecurityGroupIdList
VpcId: NonEmptyStringWithoutWhitespace
class S3DestinationDescription(TypedDict, total=False):
RoleARN: RoleARN
BucketARN: BucketARN
Prefix: Optional[Prefix]
ErrorOutputPrefix: Optional[ErrorOutputPrefix]
BufferingHints: BufferingHints
CompressionFormat: CompressionFormat
EncryptionConfiguration: EncryptionConfiguration
CloudWatchLoggingOptions: Optional[CloudWatchLoggingOptions]
class AmazonOpenSearchServerlessDestinationDescription(TypedDict, total=False):
RoleARN: Optional[RoleARN]
CollectionEndpoint: Optional[AmazonOpenSearchServerlessCollectionEndpoint]
IndexName: Optional[AmazonOpenSearchServerlessIndexName]
BufferingHints: Optional[AmazonOpenSearchServerlessBufferingHints]
RetryOptions: Optional[AmazonOpenSearchServerlessRetryOptions]
S3BackupMode: Optional[AmazonOpenSearchServerlessS3BackupMode]
S3DestinationDescription: Optional[S3DestinationDescription]
ProcessingConfiguration: Optional[ProcessingConfiguration]
CloudWatchLoggingOptions: Optional[CloudWatchLoggingOptions]
VpcConfigurationDescription: Optional[VpcConfigurationDescription]
class S3DestinationUpdate(TypedDict, total=False):
RoleARN: Optional[RoleARN]
BucketARN: Optional[BucketARN]
Prefix: Optional[Prefix]
ErrorOutputPrefix: Optional[ErrorOutputPrefix]
BufferingHints: Optional[BufferingHints]
CompressionFormat: Optional[CompressionFormat]
EncryptionConfiguration: Optional[EncryptionConfiguration]
CloudWatchLoggingOptions: Optional[CloudWatchLoggingOptions]
class AmazonOpenSearchServerlessDestinationUpdate(TypedDict, total=False):
RoleARN: Optional[RoleARN]
CollectionEndpoint: Optional[AmazonOpenSearchServerlessCollectionEndpoint]
IndexName: Optional[AmazonOpenSearchServerlessIndexName]
BufferingHints: Optional[AmazonOpenSearchServerlessBufferingHints]
RetryOptions: Optional[AmazonOpenSearchServerlessRetryOptions]
S3Update: Optional[S3DestinationUpdate]
ProcessingConfiguration: Optional[ProcessingConfiguration]
CloudWatchLoggingOptions: Optional[CloudWatchLoggingOptions]
class AmazonopensearchserviceBufferingHints(TypedDict, total=False):
IntervalInSeconds: Optional[AmazonopensearchserviceBufferingIntervalInSeconds]
SizeInMBs: Optional[AmazonopensearchserviceBufferingSizeInMBs]
class AmazonopensearchserviceRetryOptions(TypedDict, total=False):
DurationInSeconds: Optional[AmazonopensearchserviceRetryDurationInSeconds]
class AmazonopensearchserviceDestinationConfiguration(TypedDict, total=False):
RoleARN: RoleARN
DomainARN: Optional[AmazonopensearchserviceDomainARN]
ClusterEndpoint: Optional[AmazonopensearchserviceClusterEndpoint]
IndexName: AmazonopensearchserviceIndexName
TypeName: Optional[AmazonopensearchserviceTypeName]
IndexRotationPeriod: Optional[AmazonopensearchserviceIndexRotationPeriod]
BufferingHints: Optional[AmazonopensearchserviceBufferingHints]
RetryOptions: Optional[AmazonopensearchserviceRetryOptions]
S3BackupMode: Optional[AmazonopensearchserviceS3BackupMode]
S3Configuration: S3DestinationConfiguration
ProcessingConfiguration: Optional[ProcessingConfiguration]
CloudWatchLoggingOptions: Optional[CloudWatchLoggingOptions]
VpcConfiguration: Optional[VpcConfiguration]
class AmazonopensearchserviceDestinationDescription(TypedDict, total=False):
RoleARN: Optional[RoleARN]
DomainARN: Optional[AmazonopensearchserviceDomainARN]
ClusterEndpoint: Optional[AmazonopensearchserviceClusterEndpoint]
IndexName: Optional[AmazonopensearchserviceIndexName]
TypeName: Optional[AmazonopensearchserviceTypeName]
IndexRotationPeriod: Optional[AmazonopensearchserviceIndexRotationPeriod]
BufferingHints: Optional[AmazonopensearchserviceBufferingHints]
RetryOptions: Optional[AmazonopensearchserviceRetryOptions]
S3BackupMode: Optional[AmazonopensearchserviceS3BackupMode]
S3DestinationDescription: Optional[S3DestinationDescription]
ProcessingConfiguration: Optional[ProcessingConfiguration]
CloudWatchLoggingOptions: Optional[CloudWatchLoggingOptions]
VpcConfigurationDescription: Optional[VpcConfigurationDescription]
class AmazonopensearchserviceDestinationUpdate(TypedDict, total=False):
RoleARN: Optional[RoleARN]
DomainARN: Optional[AmazonopensearchserviceDomainARN]
ClusterEndpoint: Optional[AmazonopensearchserviceClusterEndpoint]
IndexName: Optional[AmazonopensearchserviceIndexName]
TypeName: Optional[AmazonopensearchserviceTypeName]
IndexRotationPeriod: Optional[AmazonopensearchserviceIndexRotationPeriod]
BufferingHints: Optional[AmazonopensearchserviceBufferingHints]
RetryOptions: Optional[AmazonopensearchserviceRetryOptions]
S3Update: Optional[S3DestinationUpdate]
ProcessingConfiguration: Optional[ProcessingConfiguration]
CloudWatchLoggingOptions: Optional[CloudWatchLoggingOptions]
ColumnToJsonKeyMappings = Dict[NonEmptyStringWithoutWhitespace, NonEmptyString]
class CopyCommand(TypedDict, total=False):
DataTableName: DataTableName
DataTableColumns: Optional[DataTableColumns]
CopyOptions: Optional[CopyOptions]
class Tag(TypedDict, total=False):
Key: TagKey
Value: Optional[TagValue]
TagDeliveryStreamInputTagList = List[Tag]
class HttpEndpointRetryOptions(TypedDict, total=False):
DurationInSeconds: Optional[HttpEndpointRetryDurationInSeconds]
class HttpEndpointCommonAttribute(TypedDict, total=False):
AttributeName: HttpEndpointAttributeName
AttributeValue: HttpEndpointAttributeValue
HttpEndpointCommonAttributesList = List[HttpEndpointCommonAttribute]
class HttpEndpointRequestConfiguration(TypedDict, total=False):
ContentEncoding: Optional[ContentEncoding]
CommonAttributes: Optional[HttpEndpointCommonAttributesList]
class HttpEndpointBufferingHints(TypedDict, total=False):
SizeInMBs: Optional[HttpEndpointBufferingSizeInMBs]
IntervalInSeconds: Optional[HttpEndpointBufferingIntervalInSeconds]
class HttpEndpointConfiguration(TypedDict, total=False):
Url: HttpEndpointUrl
Name: Optional[HttpEndpointName]
AccessKey: Optional[HttpEndpointAccessKey]
class HttpEndpointDestinationConfiguration(TypedDict, total=False):
EndpointConfiguration: HttpEndpointConfiguration
BufferingHints: Optional[HttpEndpointBufferingHints]
CloudWatchLoggingOptions: Optional[CloudWatchLoggingOptions]
RequestConfiguration: Optional[HttpEndpointRequestConfiguration]
ProcessingConfiguration: Optional[ProcessingConfiguration]
RoleARN: Optional[RoleARN]
RetryOptions: Optional[HttpEndpointRetryOptions]
S3BackupMode: Optional[HttpEndpointS3BackupMode]
S3Configuration: S3DestinationConfiguration
class SplunkRetryOptions(TypedDict, total=False):
DurationInSeconds: Optional[SplunkRetryDurationInSeconds]
class SplunkDestinationConfiguration(TypedDict, total=False):
HECEndpoint: HECEndpoint
HECEndpointType: HECEndpointType
HECToken: HECToken
HECAcknowledgmentTimeoutInSeconds: Optional[HECAcknowledgmentTimeoutInSeconds]
RetryOptions: Optional[SplunkRetryOptions]
S3BackupMode: Optional[SplunkS3BackupMode]
S3Configuration: S3DestinationConfiguration
ProcessingConfiguration: Optional[ProcessingConfiguration]
CloudWatchLoggingOptions: Optional[CloudWatchLoggingOptions]
class ElasticsearchRetryOptions(TypedDict, total=False):
DurationInSeconds: Optional[ElasticsearchRetryDurationInSeconds]
class ElasticsearchBufferingHints(TypedDict, total=False):
IntervalInSeconds: Optional[ElasticsearchBufferingIntervalInSeconds]
SizeInMBs: Optional[ElasticsearchBufferingSizeInMBs]
class ElasticsearchDestinationConfiguration(TypedDict, total=False):
RoleARN: RoleARN
DomainARN: Optional[ElasticsearchDomainARN]
ClusterEndpoint: Optional[ElasticsearchClusterEndpoint]
IndexName: ElasticsearchIndexName
TypeName: Optional[ElasticsearchTypeName]
IndexRotationPeriod: Optional[ElasticsearchIndexRotationPeriod]
BufferingHints: Optional[ElasticsearchBufferingHints]
RetryOptions: Optional[ElasticsearchRetryOptions]
S3BackupMode: Optional[ElasticsearchS3BackupMode]
S3Configuration: S3DestinationConfiguration
ProcessingConfiguration: Optional[ProcessingConfiguration]
CloudWatchLoggingOptions: Optional[CloudWatchLoggingOptions]
VpcConfiguration: Optional[VpcConfiguration]
class RedshiftRetryOptions(TypedDict, total=False):
DurationInSeconds: Optional[RedshiftRetryDurationInSeconds]
class RedshiftDestinationConfiguration(TypedDict, total=False):
RoleARN: RoleARN
ClusterJDBCURL: ClusterJDBCURL
CopyCommand: CopyCommand
Username: Username
Password: Password
RetryOptions: Optional[RedshiftRetryOptions]
S3Configuration: S3DestinationConfiguration
ProcessingConfiguration: Optional[ProcessingConfiguration]
S3BackupMode: Optional[RedshiftS3BackupMode]
S3BackupConfiguration: Optional[S3DestinationConfiguration]
CloudWatchLoggingOptions: Optional[CloudWatchLoggingOptions]
class RetryOptions(TypedDict, total=False):
DurationInSeconds: Optional[RetryDurationInSeconds]
class DynamicPartitioningConfiguration(TypedDict, total=False):
RetryOptions: Optional[RetryOptions]
Enabled: Optional[BooleanObject]
ListOfNonEmptyStringsWithoutWhitespace = List[NonEmptyStringWithoutWhitespace]
class OrcSerDe(TypedDict, total=False):
StripeSizeBytes: Optional[OrcStripeSizeBytes]
BlockSizeBytes: Optional[BlockSizeBytes]
RowIndexStride: Optional[OrcRowIndexStride]
EnablePadding: Optional[BooleanObject]
PaddingTolerance: Optional[Proportion]
Compression: Optional[OrcCompression]
BloomFilterColumns: Optional[ListOfNonEmptyStringsWithoutWhitespace]
BloomFilterFalsePositiveProbability: Optional[Proportion]
DictionaryKeyThreshold: Optional[Proportion]
FormatVersion: Optional[OrcFormatVersion]
class ParquetSerDe(TypedDict, total=False):
BlockSizeBytes: Optional[BlockSizeBytes]
PageSizeBytes: Optional[ParquetPageSizeBytes]
Compression: Optional[ParquetCompression]
EnableDictionaryCompression: Optional[BooleanObject]
MaxPaddingBytes: Optional[NonNegativeIntegerObject]
WriterVersion: Optional[ParquetWriterVersion]
class Serializer(TypedDict, total=False):
ParquetSerDe: Optional[ParquetSerDe]
OrcSerDe: Optional[OrcSerDe]
class OutputFormatConfiguration(TypedDict, total=False):
Serializer: Optional[Serializer]
ListOfNonEmptyStrings = List[NonEmptyString]
class HiveJsonSerDe(TypedDict, total=False):
TimestampFormats: Optional[ListOfNonEmptyStrings]
class OpenXJsonSerDe(TypedDict, total=False):
ConvertDotsInJsonKeysToUnderscores: Optional[BooleanObject]
CaseInsensitive: Optional[BooleanObject]
ColumnToJsonKeyMappings: Optional[ColumnToJsonKeyMappings]
class Deserializer(TypedDict, total=False):
OpenXJsonSerDe: Optional[OpenXJsonSerDe]
HiveJsonSerDe: Optional[HiveJsonSerDe]
class InputFormatConfiguration(TypedDict, total=False):
Deserializer: Optional[Deserializer]
class SchemaConfiguration(TypedDict, total=False):
RoleARN: Optional[NonEmptyStringWithoutWhitespace]
CatalogId: Optional[NonEmptyStringWithoutWhitespace]
DatabaseName: Optional[NonEmptyStringWithoutWhitespace]
TableName: Optional[NonEmptyStringWithoutWhitespace]
Region: Optional[NonEmptyStringWithoutWhitespace]
VersionId: Optional[NonEmptyStringWithoutWhitespace]
class DataFormatConversionConfiguration(TypedDict, total=False):
SchemaConfiguration: Optional[SchemaConfiguration]
InputFormatConfiguration: Optional[InputFormatConfiguration]
OutputFormatConfiguration: Optional[OutputFormatConfiguration]
Enabled: Optional[BooleanObject]
class ExtendedS3DestinationConfiguration(TypedDict, total=False):
RoleARN: RoleARN
BucketARN: BucketARN
Prefix: Optional[Prefix]
ErrorOutputPrefix: Optional[ErrorOutputPrefix]
BufferingHints: Optional[BufferingHints]
CompressionFormat: Optional[CompressionFormat]
EncryptionConfiguration: Optional[EncryptionConfiguration]
CloudWatchLoggingOptions: Optional[CloudWatchLoggingOptions]
ProcessingConfiguration: Optional[ProcessingConfiguration]
S3BackupMode: Optional[S3BackupMode]
S3BackupConfiguration: Optional[S3DestinationConfiguration]
DataFormatConversionConfiguration: Optional[DataFormatConversionConfiguration]
DynamicPartitioningConfiguration: Optional[DynamicPartitioningConfiguration]
class DeliveryStreamEncryptionConfigurationInput(TypedDict, total=False):
KeyARN: Optional[AWSKMSKeyARN]
KeyType: KeyType
class KinesisStreamSourceConfiguration(TypedDict, total=False):
KinesisStreamARN: KinesisStreamARN
RoleARN: RoleARN
class CreateDeliveryStreamInput(ServiceRequest):
DeliveryStreamName: DeliveryStreamName
DeliveryStreamType: Optional[DeliveryStreamType]
KinesisStreamSourceConfiguration: Optional[KinesisStreamSourceConfiguration]
DeliveryStreamEncryptionConfigurationInput: Optional[DeliveryStreamEncryptionConfigurationInput]
S3DestinationConfiguration: Optional[S3DestinationConfiguration]
ExtendedS3DestinationConfiguration: Optional[ExtendedS3DestinationConfiguration]
RedshiftDestinationConfiguration: Optional[RedshiftDestinationConfiguration]
ElasticsearchDestinationConfiguration: Optional[ElasticsearchDestinationConfiguration]
AmazonopensearchserviceDestinationConfiguration: Optional[
AmazonopensearchserviceDestinationConfiguration
]
SplunkDestinationConfiguration: Optional[SplunkDestinationConfiguration]
HttpEndpointDestinationConfiguration: Optional[HttpEndpointDestinationConfiguration]
Tags: Optional[TagDeliveryStreamInputTagList]
AmazonOpenSearchServerlessDestinationConfiguration: Optional[
AmazonOpenSearchServerlessDestinationConfiguration
]
class CreateDeliveryStreamOutput(TypedDict, total=False):
DeliveryStreamARN: Optional[DeliveryStreamARN]
Data = bytes
class DeleteDeliveryStreamInput(ServiceRequest):
DeliveryStreamName: DeliveryStreamName
AllowForceDelete: Optional[BooleanObject]
class DeleteDeliveryStreamOutput(TypedDict, total=False):
pass
DeliveryStartTimestamp = datetime
class HttpEndpointDescription(TypedDict, total=False):
Url: Optional[HttpEndpointUrl]
Name: Optional[HttpEndpointName]
class HttpEndpointDestinationDescription(TypedDict, total=False):
EndpointConfiguration: Optional[HttpEndpointDescription]
BufferingHints: Optional[HttpEndpointBufferingHints]
CloudWatchLoggingOptions: Optional[CloudWatchLoggingOptions]
RequestConfiguration: Optional[HttpEndpointRequestConfiguration]
ProcessingConfiguration: Optional[ProcessingConfiguration]
RoleARN: Optional[RoleARN]
RetryOptions: Optional[HttpEndpointRetryOptions]
S3BackupMode: Optional[HttpEndpointS3BackupMode]
S3DestinationDescription: Optional[S3DestinationDescription]
class SplunkDestinationDescription(TypedDict, total=False):
HECEndpoint: Optional[HECEndpoint]
HECEndpointType: Optional[HECEndpointType]
HECToken: Optional[HECToken]
HECAcknowledgmentTimeoutInSeconds: Optional[HECAcknowledgmentTimeoutInSeconds]
RetryOptions: Optional[SplunkRetryOptions]
S3BackupMode: Optional[SplunkS3BackupMode]
S3DestinationDescription: Optional[S3DestinationDescription]
ProcessingConfiguration: Optional[ProcessingConfiguration]
CloudWatchLoggingOptions: Optional[CloudWatchLoggingOptions]
class ElasticsearchDestinationDescription(TypedDict, total=False):
RoleARN: Optional[RoleARN]
DomainARN: Optional[ElasticsearchDomainARN]
ClusterEndpoint: Optional[ElasticsearchClusterEndpoint]
IndexName: Optional[ElasticsearchIndexName]
TypeName: Optional[ElasticsearchTypeName]
IndexRotationPeriod: Optional[ElasticsearchIndexRotationPeriod]
BufferingHints: Optional[ElasticsearchBufferingHints]
RetryOptions: Optional[ElasticsearchRetryOptions]
S3BackupMode: Optional[ElasticsearchS3BackupMode]
S3DestinationDescription: Optional[S3DestinationDescription]
ProcessingConfiguration: Optional[ProcessingConfiguration]
CloudWatchLoggingOptions: Optional[CloudWatchLoggingOptions]
VpcConfigurationDescription: Optional[VpcConfigurationDescription]
class RedshiftDestinationDescription(TypedDict, total=False):
RoleARN: RoleARN
ClusterJDBCURL: ClusterJDBCURL
CopyCommand: CopyCommand
Username: Username
RetryOptions: Optional[RedshiftRetryOptions]
S3DestinationDescription: S3DestinationDescription
ProcessingConfiguration: Optional[ProcessingConfiguration]
S3BackupMode: Optional[RedshiftS3BackupMode]
S3BackupDescription: Optional[S3DestinationDescription]
CloudWatchLoggingOptions: Optional[CloudWatchLoggingOptions]
class ExtendedS3DestinationDescription(TypedDict, total=False):
RoleARN: RoleARN
BucketARN: BucketARN
Prefix: Optional[Prefix]
ErrorOutputPrefix: Optional[ErrorOutputPrefix]
BufferingHints: BufferingHints
CompressionFormat: CompressionFormat
EncryptionConfiguration: EncryptionConfiguration
CloudWatchLoggingOptions: Optional[CloudWatchLoggingOptions]
ProcessingConfiguration: Optional[ProcessingConfiguration]
S3BackupMode: Optional[S3BackupMode]
S3BackupDescription: Optional[S3DestinationDescription]
DataFormatConversionConfiguration: Optional[DataFormatConversionConfiguration]
DynamicPartitioningConfiguration: Optional[DynamicPartitioningConfiguration]
class DestinationDescription(TypedDict, total=False):
DestinationId: DestinationId
S3DestinationDescription: Optional[S3DestinationDescription]
ExtendedS3DestinationDescription: Optional[ExtendedS3DestinationDescription]
RedshiftDestinationDescription: Optional[RedshiftDestinationDescription]
ElasticsearchDestinationDescription: Optional[ElasticsearchDestinationDescription]
AmazonopensearchserviceDestinationDescription: Optional[
AmazonopensearchserviceDestinationDescription
]
SplunkDestinationDescription: Optional[SplunkDestinationDescription]
HttpEndpointDestinationDescription: Optional[HttpEndpointDestinationDescription]
AmazonOpenSearchServerlessDestinationDescription: Optional[
AmazonOpenSearchServerlessDestinationDescription
]
DestinationDescriptionList = List[DestinationDescription]
class KinesisStreamSourceDescription(TypedDict, total=False):
KinesisStreamARN: Optional[KinesisStreamARN]
RoleARN: Optional[RoleARN]
DeliveryStartTimestamp: Optional[DeliveryStartTimestamp]
class SourceDescription(TypedDict, total=False):
KinesisStreamSourceDescription: Optional[KinesisStreamSourceDescription]
Timestamp = datetime
class FailureDescription(TypedDict, total=False):
Type: DeliveryStreamFailureType
Details: NonEmptyString
class DeliveryStreamEncryptionConfiguration(TypedDict, total=False):
KeyARN: Optional[AWSKMSKeyARN]
KeyType: Optional[KeyType]
Status: Optional[DeliveryStreamEncryptionStatus]
FailureDescription: Optional[FailureDescription]
class DeliveryStreamDescription(TypedDict, total=False):
DeliveryStreamName: DeliveryStreamName
DeliveryStreamARN: DeliveryStreamARN
DeliveryStreamStatus: DeliveryStreamStatus
FailureDescription: Optional[FailureDescription]
DeliveryStreamEncryptionConfiguration: Optional[DeliveryStreamEncryptionConfiguration]
DeliveryStreamType: DeliveryStreamType
VersionId: DeliveryStreamVersionId
CreateTimestamp: Optional[Timestamp]
LastUpdateTimestamp: Optional[Timestamp]
Source: Optional[SourceDescription]
Destinations: DestinationDescriptionList
HasMoreDestinations: BooleanObject
DeliveryStreamNameList = List[DeliveryStreamName]
class DescribeDeliveryStreamInput(ServiceRequest):
DeliveryStreamName: DeliveryStreamName
Limit: Optional[DescribeDeliveryStreamInputLimit]
ExclusiveStartDestinationId: Optional[DestinationId]
class DescribeDeliveryStreamOutput(TypedDict, total=False):
DeliveryStreamDescription: DeliveryStreamDescription
class ElasticsearchDestinationUpdate(TypedDict, total=False):
RoleARN: Optional[RoleARN]
DomainARN: Optional[ElasticsearchDomainARN]
ClusterEndpoint: Optional[ElasticsearchClusterEndpoint]
IndexName: Optional[ElasticsearchIndexName]
TypeName: Optional[ElasticsearchTypeName]
IndexRotationPeriod: Optional[ElasticsearchIndexRotationPeriod]
BufferingHints: Optional[ElasticsearchBufferingHints]
RetryOptions: Optional[ElasticsearchRetryOptions]
S3Update: Optional[S3DestinationUpdate]
ProcessingConfiguration: Optional[ProcessingConfiguration]
CloudWatchLoggingOptions: Optional[CloudWatchLoggingOptions]
class ExtendedS3DestinationUpdate(TypedDict, total=False):
RoleARN: Optional[RoleARN]
BucketARN: Optional[BucketARN]
Prefix: Optional[Prefix]
ErrorOutputPrefix: Optional[ErrorOutputPrefix]
BufferingHints: Optional[BufferingHints]
CompressionFormat: Optional[CompressionFormat]
EncryptionConfiguration: Optional[EncryptionConfiguration]
CloudWatchLoggingOptions: Optional[CloudWatchLoggingOptions]
ProcessingConfiguration: Optional[ProcessingConfiguration]
S3BackupMode: Optional[S3BackupMode]
S3BackupUpdate: Optional[S3DestinationUpdate]
DataFormatConversionConfiguration: Optional[DataFormatConversionConfiguration]
DynamicPartitioningConfiguration: Optional[DynamicPartitioningConfiguration]
class HttpEndpointDestinationUpdate(TypedDict, total=False):
EndpointConfiguration: Optional[HttpEndpointConfiguration]
BufferingHints: Optional[HttpEndpointBufferingHints]
CloudWatchLoggingOptions: Optional[CloudWatchLoggingOptions]
RequestConfiguration: Optional[HttpEndpointRequestConfiguration]
ProcessingConfiguration: Optional[ProcessingConfiguration]
RoleARN: Optional[RoleARN]
RetryOptions: Optional[HttpEndpointRetryOptions]
S3BackupMode: Optional[HttpEndpointS3BackupMode]
S3Update: Optional[S3DestinationUpdate]
class ListDeliveryStreamsInput(ServiceRequest):
Limit: Optional[ListDeliveryStreamsInputLimit]
DeliveryStreamType: Optional[DeliveryStreamType]
ExclusiveStartDeliveryStreamName: Optional[DeliveryStreamName]
class ListDeliveryStreamsOutput(TypedDict, total=False):
DeliveryStreamNames: DeliveryStreamNameList
HasMoreDeliveryStreams: BooleanObject
class ListTagsForDeliveryStreamInput(ServiceRequest):
DeliveryStreamName: DeliveryStreamName
ExclusiveStartTagKey: Optional[TagKey]
Limit: Optional[ListTagsForDeliveryStreamInputLimit]
ListTagsForDeliveryStreamOutputTagList = List[Tag]
class ListTagsForDeliveryStreamOutput(TypedDict, total=False):
Tags: ListTagsForDeliveryStreamOutputTagList
HasMoreTags: BooleanObject
class Record(TypedDict, total=False):
Data: Data
PutRecordBatchRequestEntryList = List[Record]
class PutRecordBatchInput(ServiceRequest):
DeliveryStreamName: DeliveryStreamName
Records: PutRecordBatchRequestEntryList
class PutRecordBatchResponseEntry(TypedDict, total=False):
RecordId: Optional[PutResponseRecordId]
ErrorCode: Optional[ErrorCode]
ErrorMessage: Optional[ErrorMessage]
PutRecordBatchResponseEntryList = List[PutRecordBatchResponseEntry]
class PutRecordBatchOutput(TypedDict, total=False):
FailedPutCount: NonNegativeIntegerObject
Encrypted: Optional[BooleanObject]
RequestResponses: PutRecordBatchResponseEntryList
class PutRecordInput(ServiceRequest):
DeliveryStreamName: DeliveryStreamName
Record: Record
class PutRecordOutput(TypedDict, total=False):
RecordId: PutResponseRecordId
Encrypted: Optional[BooleanObject]
class RedshiftDestinationUpdate(TypedDict, total=False):
RoleARN: Optional[RoleARN]
ClusterJDBCURL: Optional[ClusterJDBCURL]
CopyCommand: Optional[CopyCommand]
Username: Optional[Username]
Password: Optional[Password]
RetryOptions: Optional[RedshiftRetryOptions]
S3Update: Optional[S3DestinationUpdate]
ProcessingConfiguration: Optional[ProcessingConfiguration]
S3BackupMode: Optional[RedshiftS3BackupMode]
S3BackupUpdate: Optional[S3DestinationUpdate]
CloudWatchLoggingOptions: Optional[CloudWatchLoggingOptions]
class SplunkDestinationUpdate(TypedDict, total=False):
HECEndpoint: Optional[HECEndpoint]
HECEndpointType: Optional[HECEndpointType]
HECToken: Optional[HECToken]
HECAcknowledgmentTimeoutInSeconds: Optional[HECAcknowledgmentTimeoutInSeconds]
RetryOptions: Optional[SplunkRetryOptions]
S3BackupMode: Optional[SplunkS3BackupMode]
S3Update: Optional[S3DestinationUpdate]
ProcessingConfiguration: Optional[ProcessingConfiguration]
CloudWatchLoggingOptions: Optional[CloudWatchLoggingOptions]
class StartDeliveryStreamEncryptionInput(ServiceRequest):
DeliveryStreamName: DeliveryStreamName
DeliveryStreamEncryptionConfigurationInput: Optional[DeliveryStreamEncryptionConfigurationInput]
class StartDeliveryStreamEncryptionOutput(TypedDict, total=False):
pass
class StopDeliveryStreamEncryptionInput(ServiceRequest):
DeliveryStreamName: DeliveryStreamName
class StopDeliveryStreamEncryptionOutput(TypedDict, total=False):
pass
class TagDeliveryStreamInput(ServiceRequest):
DeliveryStreamName: DeliveryStreamName
Tags: TagDeliveryStreamInputTagList
class TagDeliveryStreamOutput(TypedDict, total=False):
pass
TagKeyList = List[TagKey]
class UntagDeliveryStreamInput(ServiceRequest):
DeliveryStreamName: DeliveryStreamName
TagKeys: TagKeyList
class UntagDeliveryStreamOutput(TypedDict, total=False):
pass
class UpdateDestinationInput(ServiceRequest):
DeliveryStreamName: DeliveryStreamName
CurrentDeliveryStreamVersionId: DeliveryStreamVersionId
DestinationId: DestinationId
S3DestinationUpdate: Optional[S3DestinationUpdate]
ExtendedS3DestinationUpdate: Optional[ExtendedS3DestinationUpdate]
RedshiftDestinationUpdate: Optional[RedshiftDestinationUpdate]
ElasticsearchDestinationUpdate: Optional[ElasticsearchDestinationUpdate]
AmazonopensearchserviceDestinationUpdate: Optional[AmazonopensearchserviceDestinationUpdate]
SplunkDestinationUpdate: Optional[SplunkDestinationUpdate]
HttpEndpointDestinationUpdate: Optional[HttpEndpointDestinationUpdate]
AmazonOpenSearchServerlessDestinationUpdate: Optional[
AmazonOpenSearchServerlessDestinationUpdate
]
class UpdateDestinationOutput(TypedDict, total=False):
pass
class FirehoseApi:
service = "firehose"
version = "2015-08-04"
@handler("CreateDeliveryStream")
def create_delivery_stream(
self,
context: RequestContext,
delivery_stream_name: DeliveryStreamName,
delivery_stream_type: DeliveryStreamType = None,
kinesis_stream_source_configuration: KinesisStreamSourceConfiguration = None,
delivery_stream_encryption_configuration_input: DeliveryStreamEncryptionConfigurationInput = None,
s3_destination_configuration: S3DestinationConfiguration = None,
extended_s3_destination_configuration: ExtendedS3DestinationConfiguration = None,
redshift_destination_configuration: RedshiftDestinationConfiguration = None,
elasticsearch_destination_configuration: ElasticsearchDestinationConfiguration = None,
amazonopensearchservice_destination_configuration: AmazonopensearchserviceDestinationConfiguration = None,
splunk_destination_configuration: SplunkDestinationConfiguration = None,
http_endpoint_destination_configuration: HttpEndpointDestinationConfiguration = None,
tags: TagDeliveryStreamInputTagList = None,
amazon_open_search_serverless_destination_configuration: AmazonOpenSearchServerlessDestinationConfiguration = None,
) -> CreateDeliveryStreamOutput:
raise NotImplementedError
@handler("DeleteDeliveryStream")
def delete_delivery_stream(
self,
context: RequestContext,
delivery_stream_name: DeliveryStreamName,
allow_force_delete: BooleanObject = None,
) -> DeleteDeliveryStreamOutput:
raise NotImplementedError
@handler("DescribeDeliveryStream")
def describe_delivery_stream(
self,
context: RequestContext,
delivery_stream_name: DeliveryStreamName,
limit: DescribeDeliveryStreamInputLimit = None,
exclusive_start_destination_id: DestinationId = None,
) -> DescribeDeliveryStreamOutput:
raise NotImplementedError
@handler("ListDeliveryStreams")
def list_delivery_streams(
self,
context: RequestContext,
limit: ListDeliveryStreamsInputLimit = None,
delivery_stream_type: DeliveryStreamType = None,
exclusive_start_delivery_stream_name: DeliveryStreamName = None,
) -> ListDeliveryStreamsOutput:
raise NotImplementedError
@handler("ListTagsForDeliveryStream")
def list_tags_for_delivery_stream(
self,
context: RequestContext,
delivery_stream_name: DeliveryStreamName,
exclusive_start_tag_key: TagKey = None,
limit: ListTagsForDeliveryStreamInputLimit = None,
) -> ListTagsForDeliveryStreamOutput:
raise NotImplementedError
@handler("PutRecord")
def put_record(
self, context: RequestContext, delivery_stream_name: DeliveryStreamName, record: Record
) -> PutRecordOutput:
raise NotImplementedError
@handler("PutRecordBatch")
def put_record_batch(
self,
context: RequestContext,
delivery_stream_name: DeliveryStreamName,
records: PutRecordBatchRequestEntryList,
) -> PutRecordBatchOutput:
raise NotImplementedError
@handler("StartDeliveryStreamEncryption")
def start_delivery_stream_encryption(
self,
context: RequestContext,
delivery_stream_name: DeliveryStreamName,
delivery_stream_encryption_configuration_input: DeliveryStreamEncryptionConfigurationInput = None,
) -> StartDeliveryStreamEncryptionOutput:
raise NotImplementedError
@handler("StopDeliveryStreamEncryption")
def stop_delivery_stream_encryption(
self, context: RequestContext, delivery_stream_name: DeliveryStreamName
) -> StopDeliveryStreamEncryptionOutput:
raise NotImplementedError
@handler("TagDeliveryStream")
def tag_delivery_stream(
self,
context: RequestContext,
delivery_stream_name: DeliveryStreamName,
tags: TagDeliveryStreamInputTagList,
) -> TagDeliveryStreamOutput:
raise NotImplementedError
@handler("UntagDeliveryStream")
def untag_delivery_stream(
self,
context: RequestContext,
delivery_stream_name: DeliveryStreamName,
tag_keys: TagKeyList,
) -> UntagDeliveryStreamOutput:
raise NotImplementedError
@handler("UpdateDestination")
def update_destination(
self,
context: RequestContext,
delivery_stream_name: DeliveryStreamName,
current_delivery_stream_version_id: DeliveryStreamVersionId,
destination_id: DestinationId,
s3_destination_update: S3DestinationUpdate = None,
extended_s3_destination_update: ExtendedS3DestinationUpdate = None,
redshift_destination_update: RedshiftDestinationUpdate = None,
elasticsearch_destination_update: ElasticsearchDestinationUpdate = None,
amazonopensearchservice_destination_update: AmazonopensearchserviceDestinationUpdate = None,
splunk_destination_update: SplunkDestinationUpdate = None,
http_endpoint_destination_update: HttpEndpointDestinationUpdate = None,
amazon_open_search_serverless_destination_update: AmazonOpenSearchServerlessDestinationUpdate = None,
) -> UpdateDestinationOutput:
raise NotImplementedError
|
PypiClean
|
/pydaal-2019.0.0.20180713-cp35-cp35m-manylinux1_x86_64.whl/pydaal-2019.0.0.20180713.data/data/share/pydaal_examples/examples/python/source/kmeans/kmeans_csr_distr.py
|
## <a name="DAAL-EXAMPLE-PY-KMEANS_CSR_DISTRIBUTED"></a>
## \example kmeans_csr_distr.py
import os
import sys
import daal.algorithms.kmeans as kmeans
import daal.algorithms.kmeans.init as init
from daal import step1Local, step2Master
utils_folder = os.path.realpath(os.path.abspath(os.path.dirname(os.path.dirname(__file__))))
if utils_folder not in sys.path:
sys.path.insert(0, utils_folder)
from utils import printNumericTable, createSparseTable
DAAL_PREFIX = os.path.join('..', 'data')
# K-Means algorithm parameters
nClusters = 20
nIterations = 5
nBlocks = 4
nVectorsInBlock = 8000
dataFileNames = [
os.path.join(DAAL_PREFIX, 'batch', 'kmeans_csr.csv'),
os.path.join(DAAL_PREFIX, 'batch', 'kmeans_csr.csv'),
os.path.join(DAAL_PREFIX, 'batch', 'kmeans_csr.csv'),
os.path.join(DAAL_PREFIX, 'batch', 'kmeans_csr.csv')
]
dataTable = [0] * nBlocks
if __name__ == "__main__":
masterAlgorithm = kmeans.Distributed(step2Master, nClusters, method=kmeans.lloydCSR, )
centroids = None
assignments = [0] * nBlocks
masterInitAlgorithm = init.Distributed(step2Master, nClusters, method=init.randomDense)
for i in range(nBlocks):
# Read dataFileNames and create a numeric table to store the input data
dataTable[i] = createSparseTable(dataFileNames[i])
# Create an algorithm object for the K-Means algorithm
localInit = init.Distributed(step1Local, nClusters, nBlocks * nVectorsInBlock, i * nVectorsInBlock, method=init.randomDense)
localInit.input.set(init.data, dataTable[i])
# compute and add input for next
masterInitAlgorithm.input.add(init.partialResults, localInit.compute())
masterInitAlgorithm.compute()
res = masterInitAlgorithm.finalizeCompute()
centroids = res.get(init.centroids)
for it in range(nIterations):
for i in range(nBlocks):
# Create an algorithm object for the K-Means algorithm
localAlgorithm = kmeans.Distributed(step1Local, nClusters, it == nIterations, method=kmeans.lloydCSR)
# Set the input data to the algorithm
localAlgorithm.input.set(kmeans.data, dataTable[i])
localAlgorithm.input.set(kmeans.inputCentroids, centroids)
pres = localAlgorithm.compute()
masterAlgorithm.input.add(kmeans.partialResults, pres)
masterAlgorithm.compute()
result = masterAlgorithm.finalizeCompute()
centroids = result.get(kmeans.centroids)
objectiveFunction = result.get(kmeans.objectiveFunction)
for i in range(nBlocks):
# Create an algorithm object for the K-Means algorithm
localAlgorithm = kmeans.Batch(nClusters, 0, method=kmeans.lloydCSR)
# Set the input data to the algorithm
localAlgorithm.input.set(kmeans.data, dataTable[i])
localAlgorithm.input.set(kmeans.inputCentroids, centroids)
res = localAlgorithm.compute()
assignments[i] = res.get(kmeans.assignments)
# Print the clusterization results
printNumericTable(assignments[0], "First 10 cluster assignments from 1st node:", 10)
printNumericTable(centroids, "First 10 dimensions of centroids:", 20, 10)
printNumericTable(objectiveFunction, "Objective function value:")
|
PypiClean
|
/cupy_cuda111-12.2.0-cp39-cp39-manylinux2014_x86_64.whl/cupyx/distributed/_store.py
|
import atexit
from ctypes import sizeof
import multiprocessing
import threading
import socket
import time
from cupyx.distributed import _klv_utils
from cupyx.distributed import _store_actions
_DEFAULT_HOST = '127.0.0.1'
_DEFAULT_PORT = 13333
_exit_mode = False
@atexit.register
def _exit():
global _exit_mode
_exit_mode = True
class ExceptionAwareProcess(multiprocessing.Process):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._exception = None
self._parent_p, self._child_p = multiprocessing.Pipe()
def run(self):
try:
super().run()
self._child_p.send(None)
except Exception as e:
self._child_p.send(e)
def join(self):
super().join()
if self._parent_p.poll():
exception = self._parent_p.recv()
if exception is not None:
raise exception
class TCPStore:
# This is only used for initialization of nccl so we don't care
# too much about peformance
def __init__(self, world_size):
self.storage = {}
self._process = None
self._world_size = world_size
self._run = multiprocessing.Value('b', 1)
# For implementing a barrier
self._lock = threading.Lock()
self._current_barrier = None
def __del__(self):
if not _exit_mode:
self.stop()
def _set_process(self, process):
self._process = process
def _process_request(self, c_socket):
with c_socket:
# Receive in KLV format
action_bytes = c_socket.recv(sizeof(_klv_utils.action_t))
if len(action_bytes) > 0:
action_m = _klv_utils.action_t.from_buffer_copy(action_bytes)
if action_m.length > 256:
raise ValueError('Invalid length for message')
value = bytearray(action_m.value)[:action_m.length]
r = _store_actions.execute_action(action_m.action, value, self)
if r is not None:
c_socket.sendall(r.klv())
def _server_loop(self, host, port):
# This is for minimum info exchange during initialization
# a single connection allows to implement locking mechanics easily
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((host, port))
s.listen()
s.settimeout(0.5)
while self._run.value == 1:
try:
c_socket, addr = s.accept()
except socket.timeout:
continue
t = threading.Thread(
target=self._process_request,
args=(c_socket,), daemon=True)
t.start()
def run(self, host=_DEFAULT_HOST, port=_DEFAULT_PORT):
# Run the TCP store in a different process
p = ExceptionAwareProcess(
target=self._server_loop, args=(host, port))
p.start()
self._process = p
def stop(self):
if _exit_mode:
return # Prevent shutdown errors
if self._process is not None:
with self._run.get_lock():
self._run.value = 0
self._process.join()
class TCPStoreProxy:
MAX_NUM_RETRIES = 50
DELAY_FOR_RETRY = 0.5
def __init__(self, host=_DEFAULT_HOST, port=_DEFAULT_PORT):
self.host = host
self.port = port
def _send_recv(self, action):
# Retry several times in case the rank 0 has not established the
# main store yet
for i in range(TCPStoreProxy.MAX_NUM_RETRIES):
try:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
# TODO retry connects
s.connect((self.host, self.port))
s.sendall(action.klv())
result_bytes = s.recv(sizeof(
_klv_utils.result_action_t))
if len(result_bytes) > 0:
result = _klv_utils.result_action_t.from_buffer_copy(
result_bytes)
value = bytearray(result.value)[:result.length]
if result.status == 0:
return action.decode_result(value)
else:
raise RuntimeError(value.decode('utf-8'))
except ConnectionRefusedError:
time.sleep(TCPStoreProxy.DELAY_FOR_RETRY)
raise RuntimeError('TCPStore is not available')
def __getitem__(self, key):
return self._send_recv(_store_actions.Get(key))
def __setitem__(self, key, value):
self._send_recv(_store_actions.Set(key, value))
def barrier(self):
# Barrier has special semantics
self._send_recv(_store_actions.Barrier())
|
PypiClean
|
/infoblox-netmri-3.8.0.0.tar.gz/infoblox-netmri-3.8.0.0/infoblox_netmri/api/broker/v2_8_0/spm_interfaces_default_grid_broker.py
|
from ..broker import Broker
class SpmInterfacesDefaultGridBroker(Broker):
controller = "spm_interfaces_default_grids"
def index(self, **kwargs):
"""Lists the available spm interfaces default grids. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` today
:param starttime: The data returned will represent the spm interfaces default grids with this date and time as lower boundary. If omitted, the result will indicate the most recently collected data.
:type starttime: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` tomorrow
:param endtime: The data returned will represent the spm interfaces default grids with this date and time as upper boundary. If omitted, the result will indicate the most recently collected data.
:type endtime: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, VirtualNetworkID, DeviceID, DeviceType, DeviceName, DeviceIPDotted, DeviceIPNumeric, Network, InterfaceID, ifName, VirtualNetworkMemberName, ifIndex, ifDescr, ifAlias, ifType, ifMAC, ifTrunkStatus, ifAdminStatus, ifOperStatus, ifSpeed, ifAdminDuplex, ifDuplex, PoEPower, PoEStatus, VlanIndex, VlanName, VlanID, VTPDomain, EndHostCount, PortStatus, Packets, Errors, ErrorPercentage, FirstSeen, LastSeen, ifPortControlInd, ifSwitchPortMgmtInd.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each SpmInterfacesDefaultGrid. Valid values are id, VirtualNetworkID, DeviceID, DeviceType, DeviceName, DeviceIPDotted, DeviceIPNumeric, Network, InterfaceID, ifName, VirtualNetworkMemberName, ifIndex, ifDescr, ifAlias, ifType, ifMAC, ifTrunkStatus, ifAdminStatus, ifOperStatus, ifSpeed, ifAdminDuplex, ifDuplex, PoEPower, PoEStatus, VlanIndex, VlanName, VlanID, VTPDomain, EndHostCount, PortStatus, Packets, Errors, ErrorPercentage, FirstSeen, LastSeen, ifPortControlInd, ifSwitchPortMgmtInd. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` False
:param refresh_ind: If true, the grid will be regenerated, rather than using any available cached grid data.
:type refresh_ind: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` False
:param async_ind: If true and if grid data is not yet available, it will return immediately with 202 status. User should retry again later.
:type async_ind: Boolean
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return spm_interfaces_default_grids: An array of the SpmInterfacesDefaultGrid objects that match the specified input criteria.
:rtype spm_interfaces_default_grids: Array of SpmInterfacesDefaultGrid
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return summary: A summary of calculation of selected columns, when applicable.
:rtype summary: Hash
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
def data_partitions(self, **kwargs):
"""Returns data partitions with their statuses for specified grid. 0 - data not available for that date, 1 - data available but must be prepared, 2 - data prepared and immediately available
**Inputs**
**Outputs**
"""
return self.api_request(self._get_method_fullname("data_partitions"), kwargs)
|
PypiClean
|
/tensor2tensorM-2.0.11.tar.gz/tensor2tensorM-2.0.11/tensor2tensor/layers/common_hparams.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import zip # pylint: disable=redefined-builtin
from tensor2tensor.utils import registry
from tensor2tensor.utils.hparam import HParams
import tensorflow as tf
@registry.register_hparams("basic_1")
def basic_params1():
"""A set of basic hyperparameters."""
return HParams(
# If the problem consists of variable-length sequences
# (see problem.batch_size_means_tokens()), then this is the number
# of tokens per batch per GPU or per TPU core. Otherwise, this is
# the number of examples per GPU or per TPU core.
batch_size=4096,
batch_shuffle_size=512,
# If True, then if the features are of variable length, the batch_size is
# used as the actual batch size (and not tokens per batch).
use_fixed_batch_size=False,
num_hidden_layers=4,
kernel_height=3,
kernel_width=1,
hidden_size=64,
compress_steps=0,
# All hyperparameters ending in "dropout" are automatically set to 0.0
# when not in training mode.
dropout=0.2,
clip_grad_norm=2.0,
grad_noise_scale=0.0,
summarize_grads=False,
# Flag for whether mlperf mode is on
mlperf_mode=False,
# Whether to log the name and size of every variable
summarize_vars=False,
initializer="orthogonal",
initializer_gain=1.5,
label_smoothing=0.1,
optimizer="adam",
optimizer_adam_epsilon=1e-6,
optimizer_adam_beta1=0.85,
optimizer_adam_beta2=0.997,
optimizer_momentum_momentum=0.9,
optimizer_momentum_nesterov=False,
optimizer_adafactor_beta1=0.0,
optimizer_adafactor_beta2=0.999,
optimizer_adafactor_factored=True,
optimizer_adafactor_decay_type="pow",
optimizer_adafactor_memory_exponent=0.8,
optimizer_adafactor_clipping_threshold=1.0,
optimizer_adafactor_multiply_by_parameter_scale=True,
# Number of accumulating steps for multi step optimizers.
optimizer_multistep_accumulate_steps=None,
# Loss scaling used.
# Generally only necessary with mixed precision training.
# Mixed precision training only supports exponential scaling currently
# To disable the scaler, see to 0/False
mixed_precision_optimizer_loss_scaler="exponential",
# Determines the initial loss scaling value for mixed precision
mixed_precision_optimizer_init_loss_scale=2**15,
# Whether to zero gradients that were not computed, so that the
# appropriate slots are created. Useful for sharing checkpoints between
# models with different sets of heads.
optimizer_zero_grads=False,
weight_decay=1e-6,
weight_noise=0.0,
# Defines the learning rate as a product of named functions.
# Available functions are listed in learning_rate._LEARNING_RATE_FUNCTIONS
# e.g. "constant*linear_warmup*rsqrt_decay*rsqrt_hidden_size"
learning_rate_schedule="legacy",
learning_rate_constant=1.0,
# If learning_rate_schedule=="legacy",
# then we specify decay scheme here. Warmup is always exponential,
# except with "noam" learning rate decay scheme.
# see optimize.legacy_learning_rate_schedule()
# TODO(noam): migrate everyone away from this.
learning_rate_decay_scheme="none",
# decay_steps and decay_staircase for learning_rate_decay_scheme=="exp"
learning_rate_decay_steps=5000,
learning_rate_decay_staircase=False,
learning_rate_minimum=None,
learning_rate_decay_rate=1.0,
learning_rate_warmup_steps=100,
learning_rate_cosine_cycle_steps=250000,
learning_rate=0.1,
sampling_method="argmax", # "argmax" or "random"
sampling_temp=1.0, # temperature for sampling
# expand the logits a piece at a time - saves memory.
factored_logits=False,
multiply_embedding_mode="sqrt_depth",
# Parameters related to mixtures of experts.
moe_hidden_sizes="2048", # hidden layer sizes (comma-separated)
moe_num_experts=64, # number of experts per layer
moe_k=2, # how many experts to use for each batch element
moe_loss_coef=1e-2,
# Sequences of operations to perform on layer input and layer output.
# Used by common_layers.layer_preprocess, common_layers.layer_postprocess
# Each character represents an operation:
# none: no preprocessing
# d: apply dropout
# n: apply normalization (see norm_type and norm_epsilon)
# a: add layer input (residual connection - only during postprocess)
# The special string "none" is used instead of the empty string
# to indicate no pre/postprocessing, since the empty string causes
# trouble for hyperparameter tuning.
# TODO(noam): The current settings ("", "dan") are the published version
# of the transformer. ("n", "da") seems better for harder-to-learn
# models, so it should probably be the default.
layer_preprocess_sequence="none",
layer_postprocess_sequence="dan",
# dropout rate to use during layer_preprocess and layer_postprocess
layer_prepostprocess_dropout=0.1,
# broadcast dimensions for layer_prepostprocess_dropout
# a comma-separated list of integers.
# see common_layers.dropout_with_broadcast_dims()
# Change this to "1" to save memory.
layer_prepostprocess_dropout_broadcast_dims="",
# dropout some symbols (set them to 0) before embedding.
symbol_dropout=0.0,
# What type of normalization to use
norm_type="layer", # "batch", layer", "noam", "none".
# epsilon parameter to normalization function
norm_epsilon=1e-6,
# pad vocabularies so that this value divides the vocabulary size.
vocab_divisor=1,
# During training, we drop sequences whose inputs and targets are shorter
# than min_length
min_length=0,
# During training, we drop sequences whose inputs or targets are longer
# than max_length.
# If max_length==0, we use hparams.batch_size instead.
max_length=0,
# Pack examples on the fly.
pack_dataset=False,
# Use custom ops not included in standard tensorflow.
use_custom_ops=True,
# Split targets on the first axis into chunks of this length.
split_targets_chunk_length=0,
split_targets_max_chunks=100,
# Maximum length in the smallest length bucket. Setting this
# flag too high will result in wasteful padding of short
# sequences. Due to some (hopefully) temporary hacks in the
# data reading and batching code, setting this flag too low
# results in a very long batch-shuffling queue.
# TODO(noam): change this once the Datasets API changes.
min_length_bucket=8,
# This flag controls the number of length buckets in the data
# reader. The buckets have maximum lengths from
# min_bucket_length to (max_length or batch_size), increasing
# (approximately) by factors of length_bucket_step.
length_bucket_step=1.1,
# If set to True, drop sequences longer than max_length during eval.
# This affects the validity of the evaluation metrics.
eval_drop_long_sequences=False,
# If True, run the model autoregressively instead of teacher-forcing
# during eval
eval_run_autoregressive=False,
# (For features with symbol modality) If True, share all of the
# input embeddings, target embeddings, and softmax weights.
shared_embedding_and_softmax_weights=False,
# (For features with symbol modality) If True, share the input embeddings
# and target embeddings.
shared_embedding=False,
# (For features with symbol modality) Number to shard embeddings by.
symbol_modality_num_shards=1,
# Feature transformations are optional dictionaries comprising key-value
# pairs of a feature name (str) and its transformation (function). If not
# specified, T2TModel applies a default transformation according to the
# feature's modality. Bottom is applicable to all features; loss, top, and
# weights_fn are only applicable to target features.
# TODO(trandustin): `name` is an optional hparam for legacy reasons,
# defining variable scope names. Remove this hparam in the future.
bottom={},
loss={},
name={},
top={},
weights_fn={},
# The maximum length of "input" sequence.
# Sequences longer than this value will be truncated. 0 or negative values
# mean there is no maximum or truncation.
# You can change this behavior by overriding preprocess_example() method
# in your problem class.
max_input_seq_length=0,
# The maximum length of "target" sequence.
# Sequences longer than this value will be truncated. 0 or negative values
# mean there is no maximum or truncation.
# You can change this behavior by overriding preprocess_example() method
# in your problem class.
max_target_seq_length=0,
# if nonzero, we split the target sequences on example read.
# This is for use with language modeling problems with fixed length
# examples. e.g. The examples may be written with length 65536, but we
# want to split each example into 64 examples of length 1024.
split_to_length=0,
# Video settings: how many frames to batch on input and targets.
video_num_input_frames=1,
video_num_target_frames=1,
# This flag allows us to optionally treat a seq-to-seq problem
# as a language model. Legal values are:
#
# "none" - Do not prepend the inputs to the targets.
# "prepend_inputs_masked_attention"
# replace "targets" in preprocessing with
# tf.concat([inputs, [0], targets], axis=1)
# i.e. we prepend the inputs to the targets with a single
# padding token in between. Use masked self-attention on the
# entire resulting sequence. During training, we compute losses on
# the combined sequence. During eval, we compute the metrics
# on only the targets portion.
# "prepend_inputs_full_attention"
# similar to the previous option except that each
# position in the inputs portion can see the
# entire inputs portion. This removes the challenge of
# autoregressively predicting the inputs portion.
prepend_mode="none",
# Scheduled sampling is interesting for auto-regressive models.
# It runs an additional step using the generated output as autoregressive
# targets, which can improve the models inference results later. The
# parameter scheduled_sampling_prob determines with what probability
# will such additional step be run. It's turned off (0.0) by default.
# This probability will exponentially warm up for the number of
# steps determined by scheduled_sampling_warmup_steps.
# The tensor used for the second step will consist of outputs from
# the first step mixed with gold truth, with the proportion of gold
# determined by scheduled_sampling_gold_mixin_prob.
scheduled_sampling_prob=0.0,
scheduled_sampling_warmup_steps=50000,
scheduled_sampling_gold_mixin_prob=0.5,
# This setting controls whether to copy variables around in a daisy chain
# (if true) or leave their placement to TensorFlow. It only affects multi
# device training and mostly should be turned on for performance. One
# exception are recurrent models: with dynamic loops it must be off.
daisy_chain_variables=True,
# If True in PREDICT mode, then last-position-only optimizations are not
# used.
force_full_predict=False,
# Set this for pure model parallelism. There is only one data shard.
no_data_parallelism=False,
# dtype used for activations. - "float32" or "bfloat16"
# activation_dtype="bfloat16" currently only works on TPU.
# It lowers activation-memory usage
# and does not appear to affect quality.
# You can train on TPU with activation_dtype="bfloat16" and evaluate
# on CPU/GPU with activation_dtype="float32"
activation_dtype="float32",
# dtype used for parameters: "float32" or "bfloat16"
# bfloat16 currently only works with optimizer="adafactor".
# The savings in memory allow for training larger models.
# Weights are encoded as (w*128)^8, using pseudostochastic
# roundoff. Initial experiments show that model quality is similar
# to baseline for about 3M training steps, but worse thereafter.
weight_dtype="float32",
# Directory containing a checkpoint for a pretrained model. This will only
# be used if a new run is being started. Parameters not found in the
# pretrained model will be randomly initialized. Superfluous parameters in
# the pretrained model will be ignored.
pretrained_model_dir="",
# Threshold used for two cases: the primary task probability for the
# constant mixing schedule, and the exponential schedule limit for when
# mixing should stop (eg: 0.5 means stop at 50-50 mixing, 0.8 means stop
# at 20-80 mixing for the primary-others mixing case.)
multiproblem_schedule_threshold=0.5,
# For more than 2 tasks, we may want to specify per-task thresholds here.
# In that case, this needs to be a string with as many floating point
# numbers as the number of tasks in the multi-problem. These numbers
# are later normalized to add up to 1 and taken as probabilities for
# each task. This enforces a constant mixing schedule and if this is
# empty then the threshold from above is used for the first task and
# the other tasks get the remaining probability split uniformly.
multiproblem_per_task_threshold="",
# The number of examples at which the proportion of the mixed in datasets
# is multiproblem_schedule_threshold
multiproblem_schedule_max_examples=1e7,
# When training multiproblems, we can mix the data according to different
# schedules. Example: a constant schedule mixing 20-80 between the primary
# and other tasks.
# A list of supported schedules can be found in
# `data_generators.multi_problem.py`.
multiproblem_mixing_schedule="constant",
# A boolean that decides whether input sequence losses and target label
# losses in classification problems should be reweighted.
multiproblem_reweight_label_loss=False,
# How much weight the targets in classification problems receive. Inputs
# receive 1 minus this weight.
multiproblem_label_weight=0.5,
# Hyperparameters for relative attention.
# The maximum relative positional distance to learn an embedding for.
max_relative_position=0,
# If heads share the same relative embedding.
heads_share_relative_embedding=False,
# If relative embedding terms are added to values too.
add_relative_to_values=False,
# If enable the host_call which is executed every training step.
# There could be a performance drop if host_call function is slow and
# cannot keep up with the TPU-side computation.
tpu_enable_host_call=False,
# Pad batch dim of inputs to nearest multiple of batch multiple.
pad_batch=False,
# When true, do not evaluate on the language model data when running the
# multiproblem since it can take a while. If False, set eval_steps to
# something large like 6000 or 10000.
multiproblem_target_eval_only=False,
# Max out the vocab size to a power of 2 for efficiency and to reserve
# extra space in the vocabulary for new task ids and label classes.
multiproblem_vocab_size=-1,
# When using multiproblem with generation tasks, need to truncate the
# inputs and targets manually before concatenating them.
multiproblem_max_input_length=-1,
multiproblem_max_target_length=-1,
# If positive, makes training targets fixed-length in MultiProblem.
multiproblem_fixed_train_length=-1,
# Load weights from a second model. For instance, when using
# pre-trained weights, you might want to initialize the encoder
# and decoder by loading different models.
warm_start_from_second=""
)
class RangedHParams(object):
"""Defines parameter ranges for tuning."""
# From ParameterConfig proto
LINEAR_SCALE = 1
LOG_SCALE = 2
REVERSE_LOG_SCALE = 3
SCALES_STR = {
LINEAR_SCALE: "UNIT_LINEAR_SCALE",
LOG_SCALE: "UNIT_LOG_SCALE",
REVERSE_LOG_SCALE: "UNIT_REVERSE_LOG_SCALE",
}
def __init__(self):
self._categorical_params = {}
self._discrete_params = {}
self._float_params = {}
self._int_params = {}
def _check_reset_and_type_change(self, name, orig_ctr):
"""Check if name is in orig_ctr or in one of the other type containers."""
# Resetting a hyperparameter
if name in orig_ctr:
tf.logging.warning("Overwriting hparam %s", name)
ctr_names = [
(self._categorical_params, "categorical"),
(self._discrete_params, "discrete"),
(self._float_params, "float"),
(self._int_params, "int"),
]
ctrs, names = list(zip(*ctr_names))
orig_name = names[ctrs.index(orig_ctr)]
for ctr, ctr_name in ctr_names:
if ctr is orig_ctr:
continue
# Using a different type for the same hyperparameter name
if name in ctr:
raise ValueError("Setting hyperparameter %s as type %s, but a "
"hyperparemeter of the same name was originally "
"registered as type %s" % (name, ctr_name, orig_name))
def set_categorical(self, name, categories, length=None):
self._check_reset_and_type_change(name, self._categorical_params)
self._categorical_params[name] = (name, categories, length)
def set_discrete(self, name, feasible_points, scale=None, length=None):
self._check_reset_and_type_change(name, self._discrete_params)
self._discrete_params[name] = (name, feasible_points, scale, length)
def set_float(self, name, min_val, max_val, scale=None, length=None):
self._check_reset_and_type_change(name, self._float_params)
self._float_params[name] = (name, min_val, max_val, scale, length)
def set_int(self, name, min_val, max_val, scale=None, length=None):
self._check_reset_and_type_change(name, self._int_params)
self._int_params[name] = (name, min_val, max_val, scale, length)
def fix_select_params(self, hp):
ctrs = [
self._categorical_params, self._discrete_params, self._float_params,
self._int_params
]
for key, val in hp.values().iteritems():
for ctr in ctrs:
if key in ctr:
del ctr[key]
self.set_discrete(key, [val])
def to_parameter_specs(self, name_prefix=""):
"""To list of dicts suitable for Cloud ML Engine hyperparameter tuning."""
specs = []
for name, categories, _ in self._categorical_params.values():
spec = {
"parameterName": name_prefix + name,
"type": "CATEGORICAL",
"categoricalValues": categories,
}
specs.append(spec)
for name, feasible_points, scale, _ in self._discrete_params.values():
spec = {
"parameterName": name_prefix + name,
"type": "DISCRETE",
"discreteValues": feasible_points,
}
if scale:
spec["scaleType"] = self.SCALES_STR[scale]
specs.append(spec)
for name, min_val, max_val, scale, _ in self._float_params.values():
spec = {
"parameterName": name_prefix + name,
"type": "DOUBLE",
"minValue": min_val,
"maxValue": max_val,
}
if scale:
spec["scaleType"] = self.SCALES_STR[scale]
specs.append(spec)
for name, min_val, max_val, scale, _ in self._int_params.values():
spec = {
"parameterName": name_prefix + name,
"type": "INTEGER",
"minValue": min_val,
"maxValue": max_val,
}
if scale:
spec["scaleType"] = self.SCALES_STR[scale]
specs.append(spec)
return specs
@registry.register_ranged_hparams("basic1")
def basic_range1(ranged_hparams):
"""A basic range of hyperparameters."""
rhp = ranged_hparams
rhp.set_discrete("batch_size", [1024, 2048, 4096])
rhp.set_discrete("num_hidden_layers", [1, 2, 3, 4, 5, 6])
rhp.set_discrete("hidden_size", [32, 64, 128, 256, 512], scale=rhp.LOG_SCALE)
rhp.set_discrete("kernel_height", [1, 3, 5, 7])
rhp.set_discrete("kernel_width", [1, 3, 5, 7])
rhp.set_discrete("compress_steps", [0, 1, 2])
rhp.set_float("dropout", 0.0, 0.5)
rhp.set_float("weight_decay", 1e-4, 10.0, scale=rhp.LOG_SCALE)
rhp.set_float("label_smoothing", 0.0, 0.2)
rhp.set_float("clip_grad_norm", 0.01, 50.0, scale=rhp.LOG_SCALE)
rhp.set_float("learning_rate", 0.005, 2.0, scale=rhp.LOG_SCALE)
rhp.set_categorical("initializer",
["uniform", "orthogonal", "uniform_unit_scaling"])
rhp.set_float("initializer_gain", 0.5, 3.5)
rhp.set_categorical("learning_rate_decay_scheme",
["none", "sqrt", "noam", "exp"])
rhp.set_float("optimizer_adam_epsilon", 1e-7, 1e-2, scale=rhp.LOG_SCALE)
rhp.set_float("optimizer_adam_beta1", 0.8, 0.9)
rhp.set_float("optimizer_adam_beta2", 0.995, 0.999)
rhp.set_categorical(
"optimizer",
["adam", "adagrad", "momentum", "rms_prop", "sgd", "yellow_fin"])
@registry.register_ranged_hparams
def basic_moe_range(rhp):
"""Moe range; when this parameter is unused, it allows us to see variance."""
rhp.set_float("moe_loss_coef", 0.01, 0.02)
|
PypiClean
|
/avalanche_lib-0.4.0-py3-none-any.whl/avalanche/benchmarks/datasets/mini_imagenet/mini_imagenet.py
|
# CSVs are taken from the aforementioned repository and were created by
# Ravi and Larochelle. CSVs are distributed under the following license:
################################################################################
# MIT License
#
# Copyright (c) 2016 Twitter, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
################################################################################
# For the Avalanche data loader adaptation:
################################################################################
# Copyright (c) 2021 ContinualAI. #
# Copyrights licensed under the MIT License. #
# See the accompanying LICENSE file for terms. #
# #
# Date: 15-02-2020 #
# Author: Lorenzo Pellegrini #
# E-mail: [email protected] #
# Website: www.continualai.org #
################################################################################
import csv
import glob
from pathlib import Path
from typing import Union, List, Tuple, Dict
from torchvision.datasets.folder import default_loader
from typing_extensions import Literal
import PIL
import numpy as np
from PIL import Image
from torch.utils.data.dataset import Dataset
from torchvision.transforms import Resize
from avalanche.benchmarks.datasets.mini_imagenet.mini_imagenet_data import (
MINI_IMAGENET_WNIDS,
MINI_IMAGENET_WNID_TO_IDX,
MINI_IMAGENET_CLASSES,
MINI_IMAGENET_CLASS_TO_IDX,
)
class MiniImageNetDataset(Dataset):
"""
The MiniImageNet dataset.
This implementation is based on the one from
https://github.com/yaoyao-liu/mini-imagenet-tools. Differently from that,
this class doesn't rely on a pre-generated mini imagenet folder. Instead,
this will use the original ImageNet folder by resizing images on-the-fly.
The list of included files are the ones defined in the CSVs taken from the
aforementioned repository. Those CSVs are generated by Ravi and Larochelle.
See the linked repository for more details.
Exactly as happens with the torchvision :class:`ImageNet` class, textual
class labels (wnids) such as "n02119789", "n02102040", etc. are mapped to
numerical labels based on their ascending order.
All the fields found in the torchvision implementation of the ImageNet
dataset (`wnids`, `wnid_to_idx`, `classes`, `class_to_idx`) are available.
"""
def __init__(
self,
imagenet_path: Union[str, Path],
split: Literal["all", "train", "val", "test"] = "all",
resize_to: Union[int, Tuple[int, int]] = 84,
loader=default_loader,
):
"""
Creates an instance of the Mini ImageNet dataset.
This dataset allows to obtain the whole dataset or even only specific
splits. Beware that, when using a split different that "all", the
returned dataset will contain patterns of a subset of the 100 classes.
This happens because MiniImagenet was created with the idea of training,
validating and testing on a disjoint set of classes.
This implementation uses the filelists provided by
https://github.com/yaoyao-liu/mini-imagenet-tools, which are the ones
generated by Ravi and Larochelle (see the linked repo for more details).
:param imagenet_path: The path to the imagenet folder. This has to be
the path to the full imagenet 2012 folder (plain, not resized).
Only the "train" folder will be used. Because of this, passing the
path to the imagenet 2012 "train" folder is also allowed.
:param split: The split to obtain. Defaults to "all". Valid values are
"all", "train", "val" and "test".
:param resize_to: The size of the output images. Can be an `int` value
or a tuple of two ints. When passing a single `int` value, images
will be resized by forcing as 1:1 aspect ratio. Defaults to 84,
which means that images will have size 84x84.
"""
self.imagenet_path = MiniImageNetDataset.get_train_path(imagenet_path)
"""
The path to the "train" folder of full imagenet 2012 directory.
"""
self.split: Literal["all", "train", "val", "test"] = split
"""
The required split.
"""
if isinstance(resize_to, int):
resize_to = (resize_to, resize_to)
self.resize_to: Tuple[int, int] = resize_to
"""
The size of the output images, as a two ints tuple.
"""
# TODO: the original loader from yaoyao-liu uses cv2.INTER_AREA
self._transform = Resize(self.resize_to, interpolation=PIL.Image.BILINEAR)
# The following fields are filled by self.prepare_dataset()
self.image_paths: List[str] = []
"""
The paths to images.
"""
self.targets: List[int] = []
"""
The class labels for the patterns. Aligned with the image_paths field.
"""
self.wnids: List[str] = []
"""
The list of wnids (the textual class labels, such as "n02119789").
"""
self.wnid_to_idx: Dict[str, int] = dict()
"""
A dictionary mapping wnids to numerical labels in range [0, 100).
"""
self.classes: List[Tuple[str, ...]] = []
"""
A list mapping numerical labels (the element index) to a tuple of human
readable categories. For instance:
('great grey owl', 'great gray owl', 'Strix nebulosa').
"""
self.class_to_idx: Dict[str, int] = dict()
"""
A dictionary mapping each string of the tuples found in the classes
field to their numerical label. That is, this dictionary contains the
inverse mapping of classes field.
"""
self.loader = loader
if not self.imagenet_path.exists():
raise ValueError("The provided directory does not exist.")
if self.split not in ["all", "train", "val", "test"]:
raise ValueError(
'Invalid split. Valid values are: "train", "val", ' '"test"'
)
self.prepare_dataset()
super().__init__()
@staticmethod
def get_train_path(root_path: Union[str, Path]):
root_path = Path(root_path)
if (root_path / "train").exists():
return root_path / "train"
return root_path
def prepare_dataset(self):
# Read the CSV containing the file list for this split
images: Dict[str, List[str]] = dict()
csv_dir = Path(__file__).resolve().parent / "csv_files"
if self.split == "all":
considered_csvs = ["train.csv", "val.csv", "test.csv"]
else:
considered_csvs = [self.split + ".csv"]
for csv_name in considered_csvs:
csv_path = str(csv_dir / csv_name)
with open(csv_path) as csvfile:
csv_reader = csv.reader(csvfile, delimiter=",")
next(csv_reader, None) # Skip header
for row in csv_reader:
if row[1] in images.keys():
images[row[1]].append(row[0])
else:
images[row[1]] = [row[0]]
# Fill fields like wnids, wnid_to_idx, etc.
# Those fields have the same meaning of the ones found in the
# torchvision implementation of the ImageNet dataset. Of course some
# work had to be done to keep this fields aligned for mini imagenet,
# which only contains 100 classes of the original 1000.
#
# wnids are 'n01440764', 'n01443537', 'n01484850', etc.
#
# self.wnid_to_idx is a dict mapping wnids to numerical labels
#
# self.classes is a list mapping numerical labels (the element
# index) to a tuple of human readable categories. For instance:
# ('great grey owl', 'great gray owl', 'Strix nebulosa').
#
# self.class_to_idx is a dict mapping each string of the
# aforementioned tuples to its numerical label. That is, it contains
# the inverse mapping of self.classes.
self.wnids = MINI_IMAGENET_WNIDS
self.wnid_to_idx = MINI_IMAGENET_WNID_TO_IDX
self.classes = MINI_IMAGENET_CLASSES
self.class_to_idx = MINI_IMAGENET_CLASS_TO_IDX
for cls in images.keys():
cls_numerical_label = self.wnid_to_idx[cls]
lst_files = []
for file in glob.glob(str(self.imagenet_path / cls / ("*" + cls + "*"))):
lst_files.append(file)
lst_index = [int(i[i.rfind("_") + 1 : i.rfind(".")]) for i in lst_files]
index_sorted = sorted(range(len(lst_index)), key=lst_index.__getitem__)
index_selected = [
int(i[i.index(".") - 4 : i.index(".")]) for i in images[cls]
]
selected_images = np.array(index_sorted)[np.array(index_selected) - 1]
for i in np.arange(len(selected_images)):
self.image_paths.append(lst_files[selected_images[i]])
self.targets.append(cls_numerical_label)
def __len__(self):
return len(self.targets)
def __getitem__(self, item):
img = self.loader(self.image_paths[item])
img = self._transform(img)
return img, self.targets[item]
__all__ = ["MiniImageNetDataset"]
if __name__ == "__main__":
import matplotlib.pyplot as plt
print("Creating training dataset")
train_dataset = MiniImageNetDataset("/ssd2/datasets/imagenet", split="train")
print("Creating validation dataset")
val_dataset = MiniImageNetDataset("/ssd2/datasets/imagenet", split="val")
print("Creating test dataset")
test_dataset = MiniImageNetDataset("/ssd2/datasets/imagenet", split="test")
print("Training patterns:", len(train_dataset))
print("Validation patterns:", len(val_dataset))
print("Test patterns:", len(test_dataset))
for img_idx, (img, label) in enumerate(train_dataset):
plt.title(
"Class {}, {}\n{}".format(
label,
train_dataset.classes[label],
train_dataset.image_paths[0],
)
)
plt.imshow(img)
plt.show()
print(img)
print(label)
class_to_idx = train_dataset.class_to_idx[train_dataset.classes[label][0]]
assert class_to_idx == label
if img_idx == 2:
break
for img_idx, (img, label) in enumerate(val_dataset):
plt.title(
"Class {}, {}\n{}".format(
label, val_dataset.classes[label], val_dataset.image_paths[0]
)
)
plt.imshow(img)
plt.show()
print(img)
print(label)
class_to_idx = val_dataset.class_to_idx[train_dataset.classes[label][0]]
assert class_to_idx == label
if img_idx == 2:
break
for img_idx, (img, label) in enumerate(test_dataset):
plt.title(
"Class {}, {}\n{}".format(
label, test_dataset.classes[label], test_dataset.image_paths[0]
)
)
plt.imshow(img)
plt.show()
print(img)
print(label)
class_to_idx = test_dataset.class_to_idx[train_dataset.classes[label][0]]
assert class_to_idx == label
if img_idx == 2:
break
|
PypiClean
|
/kolla-ansible-16.1.0.tar.gz/kolla-ansible-16.1.0/specs/containerize-openstack.rst
|
..
This work is licensed under a Creative Commons Attribution 3.0 Unported
License.
http://creativecommons.org/licenses/by/3.0/legalcode
======================
Containerize OpenStack
======================
When upgrading or downgrading OpenStack, it is possible to use package based
management or image-based management. Containerizing OpenStack is meant to
optimize image-based management of OpenStack. Containerizing OpenStack
solves a manageability and availability problem with the current state of the
art deployment systems in OpenStack.
Problem description
===================
Current state of the art deployment systems use either image based or package
based upgrade.
Image based upgrades are utilized by TripleO. When TripleO updates a system,
it creates an image of the entire disk and deploys that rather than just the
parts that compose the OpenStack deployment. This results in significant
loss of availability. Further running VMs are shut down in the imaging
process. However, image based systems offer atomicity, because all related
software for a service is updated in one atomic action by reimaging the system.
Other systems use package based upgrade. Package based upgrades suffer from
a non-atomic nature. An update may update 1 or more RPM packages. The update
process could fail for any number of reasons, and there is no way to back
out the existing changes. Typically in an OpenStack deployment it is
desirable to update a service that does one thing including it's dependencies
as an atomic unit. Package based upgrades do not offer atomicity.
To solve this problem, containers can be used to provide an image-based update
approach which offers atomic upgrade of a running system with minimal
interruption in service. A rough prototype of compute upgrade [1] shows
approximately a 10 second window of unavailability during a software update.
The prototype keeps virtual machines running without interruption.
Use cases
---------
1. Upgrade or rollback OpenStack deployments atomically. End-user wants to
change the running software versions in her system to deploy a new upstream
release without interrupting service for significant periods.
2. Upgrade OpenStack based by component. End-user wants to upgrade her system
in fine-grained chunks to limit damage from a failed upgrade.
3. Rollback OpenStack based by component. End-user experienced a failed
upgrade and wishes to rollback to the last known good working version.
Proposed change
===============
An OpenStack deployment based on containers are represented in a tree structure
with each node representing a container set, and each leaf representing a
container.
The full properties of a container set:
* A container set is composed of one or more container subsets or one or more
individual containers
* A container set provides a single logical service
* A container set is managed as a unit during startup, shutdown, and version
* Each container set is launched together as one unit
* A container set with subsets is launched as one unit including all subsets
* A container set is not atomically managed
* A container set provides appropriate hooks for high availability monitoring
The full properties of a container:
* A container is atomically upgraded or rolled back
* A container includes a monotonically increasing generation number to identify
the container's age in comparison with other containers
* A container has a single responsibility
* A container may be super-privileged when it needs significant access to the
host including:
* the network namespace of the host
* The UUID namespace of the host
* The IPC namespace of the host
* Filesystem sharing of the host for persistent storage
* A container may lack any privileges when it does not require significant
access to the host.
* A container should include a check function for evaluating its own health.
* A container will include proper PID 1 handling for reaping exited child
processes.
The top level container sets are composed of:
* database control
* messaging control
* high availability control
* OpenStack interface
* OpenStack control
* OpenStack compute operation
* OpenStack network operation
* OpenStack storage operation
The various container sets are composed in more detail as follows:
* Database control
* galera
* mariadb
* mongodb
* Messaging control
* rabbitmq
* High availability control
* HAProxy
* keepalived
* OpenStack interface
* keystone
* glance-api
* nova-api
* ceilometer-api
* heat-api
* OpenStack control
* glance-controller
* glance-registry
* nova-controller
* nova-conductor
* nova-scheduler
* metadata-service
* cinder-controller
* neutron-controller
* neutron-server
* ceilometer-controller
* ceilometer-alarm
* ceilometer-base
* ceilometer-central
* ceilometer-collector
* ceilometer-notification
* heat-controller
* heat-engine
* OpenStack compute operation
* nova-compute
* nova-libvirt
* neutron-agents-linux-bridge
* neutron-agents-ovs
* OpenStack network operation
* dhcp-agent
* l3-agent
* metadata-agent
* lbaas-agent
* fwaas-agent
* OpenStack storage operation
* Cinder
* Swift
* swift-account
* swift-base
* swift-container
* swift-object
* swift-proxy-server
In order to achieve the desired results, we plan to permit super-privileged
containers. A super-privileged container is defined as any container launched
with the --privileged=true flag to docker that:
* bind-mounts specific security-crucial host operating system directories
with -v. This includes nearly all directories in the filesystem except for
leaf directories with no other host operating system use.
* shares any namespace with the --ipc=host, --pid=host, or --net=host flags
We will not use the Docker EXPOSE operation since all containers will use
--net=host. One motive for using --net=host is it is inherently simpler.
A different motive for not using EXPOSE is the 20 microsecond penalty
applied to every packet forwarded and returned by docker-proxy.
If EXPOSE functionality is desired, it can be added back by
referencing the default list of OpenStack ports to each Dockerfile:
`Firewalls and default ports <https://docs.openstack.org/install-guide/firewalls-default-ports.html>`__.
We will use the docker flag --restart=always to provide some measure of
high availability for the individual containers and ensure they operate
correctly as currently designed.
A host tool will run and monitor the container's built-in check script via
docker exec to validate the container is operational on a pre-configured timer.
If the container does not pass its healthcheck operation, it should be
restarted.
Integration of metadata with fig or a similar single node Docker orchestration
tool will be implemented. Even though fig executes on a single node, the
containers will be designed to run multi-node and the deploy tool should take
some form of information to allow it to operate multi-node. The deploy tool
should take a set of key/value pairs as inputs and convert them into inputs
into the environment passed to Docker. These key/value pairs could be a file
or environment variables. We will not offer integration with multi-node
scheduling or orchestration tools, but instead expect our consumers to manage
each bare metal machine using our fig or similar in nature tool integration.
Any contributions from the community of the required metadata to run these
containers using a multi-node orchestration tool will be warmly received but
generally won't be maintained by the core team.
The technique for launching the deploy script is not handled by Kolla. This
is a problem for a higher level deployment tool such as TripleO or Fuel to
tackle.
Logs from the individual containers will be retrievable in some consistent way.
Security impact
---------------
Container usage with super-privileged mode may possibly impact security. For
example, when using --net=host mode and bind-mounting /run which is necessary
for a compute node, it is possible that a compute breakout could corrupt the
host operating system.
To mitigate security concerns, solutions such as SELinux and AppArmor should
be used where appropriate to contain the security privileges of the containers.
Performance Impact
------------------
The upgrade or downgrade process changes from a multi-hour outage to a 10
second outage across the system.
Implementation
==============
Assignee(s)
-----------
Primary assignee:
kolla maintainers
Work Items
----------
1. Container Sets
2. Containers
3. A minimal proof of concept single-node fig deployment integration
4. A minimal proof of concept fig healthchecking integration
Testing
=======
Functional tests will be implemented in the OpenStack check/gating system to
automatically check that containers pass each container's functional tests
stored in the project's repositories.
Documentation Impact
====================
The documentation impact is unclear as this project is a proof of concept
with no clear delivery consumer.
References
==========
* [1] https://github.com/sdake/compute-upgrade
|
PypiClean
|
/odoo14_addon_auth_oidc-14.0.1.0.2-py3-none-any.whl/odoo/addons/auth_oidc/models/res_users.py
|
import logging
import requests
from odoo import api, models
from odoo.exceptions import AccessDenied
from odoo.http import request
_logger = logging.getLogger(__name__)
class ResUsers(models.Model):
_inherit = "res.users"
def _auth_oauth_get_tokens_implicit_flow(self, oauth_provider, params):
# https://openid.net/specs/openid-connect-core-1_0.html#ImplicitAuthResponse
return params.get("access_token"), params.get("id_token")
def _auth_oauth_get_tokens_auth_code_flow(self, oauth_provider, params):
# https://openid.net/specs/openid-connect-core-1_0.html#AuthResponse
code = params.get("code")
# https://openid.net/specs/openid-connect-core-1_0.html#TokenRequest
auth = None
if oauth_provider.client_secret:
auth = (oauth_provider.client_id, oauth_provider.client_secret)
response = requests.post(
oauth_provider.token_endpoint,
data=dict(
client_id=oauth_provider.client_id,
grant_type="authorization_code",
code=code,
code_verifier=oauth_provider.code_verifier, # PKCE
redirect_uri=request.httprequest.url_root + "auth_oauth/signin",
),
auth=auth,
)
response.raise_for_status()
response_json = response.json()
# https://openid.net/specs/openid-connect-core-1_0.html#TokenResponse
return response_json.get("access_token"), response_json.get("id_token")
@api.model
def auth_oauth(self, provider, params):
oauth_provider = self.env["auth.oauth.provider"].browse(provider)
if oauth_provider.flow == "id_token":
access_token, id_token = self._auth_oauth_get_tokens_implicit_flow(
oauth_provider, params
)
elif oauth_provider.flow == "id_token_code":
access_token, id_token = self._auth_oauth_get_tokens_auth_code_flow(
oauth_provider, params
)
else:
return super(ResUsers, self).auth_oauth(provider, params)
if not access_token:
_logger.error("No access_token in response.")
raise AccessDenied()
if not id_token:
_logger.error("No id_token in response.")
raise AccessDenied()
validation = oauth_provider._parse_id_token(id_token, access_token)
# required check
if not validation.get("user_id"):
_logger.error("user_id claim not found in id_token (after mapping).")
raise AccessDenied()
# retrieve and sign in user
params["access_token"] = access_token
login = self._auth_oauth_signin(provider, validation, params)
if not login:
raise AccessDenied()
# return user credentials
return (self.env.cr.dbname, login, access_token)
|
PypiClean
|
/django-geoexplorer-worldmap-4.0.72.tar.gz/django-geoexplorer-worldmap-4.0.72/geoexplorer-worldmap/static/worldmap_client/externals/geoext/examples/wms-tree.js
|
* WMS Capabilities Tree
* ---------------------
* Create a tree loader from WMS capabilities documents.
*/
var tree, mapPanel;
Ext.onReady(function() {
var root = new Ext.tree.AsyncTreeNode({
text: 'GeoServer Demo WMS',
loader: new GeoExt.tree.WMSCapabilitiesLoader({
url: 'data/wmscap.xml',
layerOptions: {buffer: 0, singleTile: true, ratio: 1},
layerParams: {'TRANSPARENT': 'TRUE'},
// customize the createNode method to add a checkbox to nodes
createNode: function(attr) {
attr.checked = attr.leaf ? false : undefined;
return GeoExt.tree.WMSCapabilitiesLoader.prototype.createNode.apply(this, [attr]);
}
})
});
tree = new Ext.tree.TreePanel({
root: root,
region: 'west',
width: 250,
listeners: {
// Add layers to the map when ckecked, remove when unchecked.
// Note that this does not take care of maintaining the layer
// order on the map.
'checkchange': function(node, checked) {
if (checked === true) {
mapPanel.map.addLayer(node.attributes.layer);
} else {
mapPanel.map.removeLayer(node.attributes.layer);
}
}
}
});
mapPanel = new GeoExt.MapPanel({
zoom: 2,
layers: [
new OpenLayers.Layer.WMS("Global Imagery",
"http://maps.opengeo.org/geowebcache/service/wms",
{layers: "bluemarble"},
{buffer: 0}
)
],
region: 'center'
});
new Ext.Viewport({
layout: "fit",
hideBorders: true,
items: {
layout: "border",
deferredRender: false,
items: [mapPanel, tree, {
contentEl: "desc",
region: "east",
bodyStyle: {"padding": "5px"},
collapsible: true,
collapseMode: "mini",
split: true,
width: 200,
title: "Description"
}]
}
});
});
|
PypiClean
|
/altgraph-0.17.3.tar.gz/altgraph-0.17.3/doc/graph.rst
|
:mod:`altgraph.Graph` --- Basic directional graphs
==================================================
.. module:: altgraph.Graph
:synopsis: Basic directional graphs.
The module :mod:`altgraph.Graph` provides a class :class:`Graph` that
represents a directed graph with *N* nodes and *E* edges.
.. class:: Graph([edges])
Constructs a new empty :class:`Graph` object. If the optional
*edges* parameter is supplied, updates the graph by adding the
specified edges.
All of the elements in *edges* should be tuples with two or three
elements. The first two elements of the tuple are the source and
destination node of the edge, the optional third element is the
edge data. The source and destination nodes are added to the graph
when the aren't already present.
Node related methods
--------------------
.. method:: Graph.add_node(node[, node_data])
Adds a new node to the graph if it is not already present. The new
node must be a hashable object.
Arbitrary data can be attached to the node via the optional *node_data*
argument.
.. note:: the node also won't be added to the graph when it is
present but currently hidden.
.. method:: Graph.hide_node(node)
Hides a *node* from the graph. The incoming and outgoing edges of
the node will also be hidden.
Raises :class:`altgraph.GraphError` when the node is not (visible)
node of the graph.
.. method:: Graph.restore_node(node)
Restores a previously hidden *node*. The incoming and outgoing
edges of the node are also restored.
Raises :class:`altgraph.GraphError` when the node is not a hidden
node of the graph.
.. method:: Graph.restore_all_nodes()
Restores all hidden nodes.
.. method:: Graph.number_of_nodes()
Return the number of visible nodes in the graph.
.. method:: Graph.number_of_hidden_nodes()
Return the number of hidden nodes in the graph.
.. method:: Graph.node_list()
Return a list with all visible nodes in the graph.
.. method:: Graph.hidden_node_list()
Return a list with all hidden nodes in the graph.
.. method:: node_data(node)
Return the data associated with the *node* when it was
added.
.. method:: Graph.describe_node(node)
Returns *node*, the node's data and the lists of outgoing
and incoming edges for the node.
.. note::
the edge lists should not be modified, doing so
can result in unpredicatable behavior.
.. method:: Graph.__contains__(node)
Returns True iff *node* is a node in the graph. This
method is accessed through the *in* operator.
.. method:: Graph.__iter__()
Yield all nodes in the graph.
.. method:: Graph.out_edges(node)
Return the list of outgoing edges for *node*
.. method:: Graph.inc_edges(node)
Return the list of incoming edges for *node*
.. method:: Graph.all_edges(node)
Return the list of incoming and outgoing edges for *node*
.. method:: Graph.out_degree(node)
Return the number of outgoing edges for *node*.
.. method:: Graph.inc_degree(node)
Return the number of incoming edges for *node*.
.. method:: Graph.all_degree(node)
Return the number of edges (incoming or outgoing) for *node*.
Edge related methods
--------------------
.. method:: Graph.add_edge(head_id, tail_id [, edge data [, create_nodes]])
Adds a directed edge from *head_id* to *tail_id*. Arbitrary data can
be added via *edge_data*. When *create_nodes* is *True* (the default),
*head_id* and *tail_id* will be added to the graph when the aren't
already present.
.. method:: Graph.hide_edge(edge)
Hides an edge from the graph. The edge may be unhidden at some later
time.
.. method:: Graph.restore_edge(edge)
Restores a previously hidden *edge*.
.. method:: Graph.restore_all_edges()
Restore all edges that were hidden before, except for edges
referring to hidden nodes.
.. method:: Graph.edge_by_node(head, tail)
Return the edge ID for an edge from *head* to *tail*,
or :data:`None` when no such edge exists.
.. method:: Graph.edge_by_id(edge)
Return the head and tail of the *edge*
.. method:: Graph.edge_data(edge)
Return the data associated with the *edge*.
.. method:: Graph.update_edge_data(edge, data)
Replace the edge data for *edge* by *data*. Raises
:exc:`KeyError` when the edge does not exist.
.. versionadded:: 0.12
.. method:: Graph.head(edge)
Return the head of an *edge*
.. method:: Graph.tail(edge)
Return the tail of an *edge*
.. method:: Graph.describe_edge(edge)
Return the *edge*, the associated data, its head and tail.
.. method:: Graph.number_of_edges()
Return the number of visible edges.
.. method:: Graph.number_of_hidden_edges()
Return the number of hidden edges.
.. method:: Graph.edge_list()
Returns a list with all visible edges in the graph.
.. method:: Graph.hidden_edge_list()
Returns a list with all hidden edges in the graph.
Graph traversal
---------------
.. method:: Graph.out_nbrs(node)
Return a list of all nodes connected by outgoing edges.
.. method:: Graph.inc_nbrs(node)
Return a list of all nodes connected by incoming edges.
.. method:: Graph.all_nbrs(node)
Returns a list of nodes connected by an incoming or outgoing edge.
.. method:: Graph.forw_topo_sort()
Return a list of nodes where the successors (based on outgoing
edges) of any given node apear in the sequence after that node.
.. method:: Graph.back_topo_sort()
Return a list of nodes where the successors (based on incoming
edges) of any given node apear in the sequence after that node.
.. method:: Graph.forw_bfs_subgraph(start_id)
Return a subgraph consisting of the breadth first
reachable nodes from *start_id* based on their outgoing edges.
.. method:: Graph.back_bfs_subgraph(start_id)
Return a subgraph consisting of the breadth first
reachable nodes from *start_id* based on their incoming edges.
.. method:: Graph.iterdfs(start[, end[, forward]])
Yield nodes in a depth first traversal starting at the *start*
node.
If *end* is specified traversal stops when reaching that node.
If forward is True (the default) edges are traversed in forward
direction, otherwise they are traversed in reverse direction.
.. method:: Graph.iterdata(start[, end[, forward[, condition]]])
Yield the associated data for nodes in a depth first traversal
starting at the *start* node. This method will not yield values for nodes
without associated data.
If *end* is specified traversal stops when reaching that node.
If *condition* is specified and the condition callable returns
False for the associated data this method will not yield the
associated data and will not follow the edges for the node.
If forward is True (the default) edges are traversed in forward
direction, otherwise they are traversed in reverse direction.
.. method:: Graph.forw_bfs(start[, end])
Returns a list of nodes starting at *start* in some bread first
search order (following outgoing edges).
When *end* is specified iteration stops at that node.
.. method:: Graph.back_bfs(start[, end])
Returns a list of nodes starting at *start* in some bread first
search order (following incoming edges).
When *end* is specified iteration stops at that node.
.. method:: Graph.get_hops(start[, end[, forward]])
Computes the hop distance to all nodes centered around a specified node.
First order neighbours are at hop 1, their neigbours are at hop 2 etc.
Uses :py:meth:`forw_bfs` or :py:meth:`back_bfs` depending on the value of
the forward parameter.
If the distance between all neighbouring nodes is 1 the hop number
corresponds to the shortest distance between the nodes.
Typical usage::
>>> print graph.get_hops(1, 8)
>>> [(1, 0), (2, 1), (3, 1), (4, 2), (5, 3), (7, 4), (8, 5)]
# node 1 is at 0 hops
# node 2 is at 1 hop
# ...
# node 8 is at 5 hops
Graph statistics
----------------
.. method:: Graph.connected()
Returns True iff every node in the graph can be reached from
every other node.
.. method:: Graph.clust_coef(node)
Returns the local clustering coefficient of node.
The local cluster coefficient is the proportion of the actual number
of edges between neighbours of node and the maximum number of
edges between those nodes.
|
PypiClean
|
/quest_eras-1.6.6-py3-none-any.whl/quest_eras/es_gui/apps/data_manager/home.py
|
from __future__ import absolute_import
from functools import partial
import os
from kivy.uix.screenmanager import Screen
from quest_eras.es_gui.resources.widgets.common import NavigationButton
from quest_eras.es_gui.proving_grounds.help_carousel import HelpCarouselModalView
class DataManagerHomeScreen(Screen):
def on_enter(self):
ab = self.manager.nav_bar
ab.build_data_manager_nav_bar()
ab.set_title('Data Manager')
help_button = NavigationButton(
text='help',
on_release=self.open_help_carousel,
)
ab.action_view.add_widget(help_button)
def open_help_carousel(self, *args):
"""
"""
help_carousel_view = HelpCarouselModalView()
help_carousel_view.title.text = "QuESt Data Manager"
slide_01_text = "QuESt Data Manager is a collection of tools for acquiring data for use in other QuESt applications. Data acquired here is stored in a data bank accessible throughout the rest of the QuESt suite.\n\nClick on one of the data tools to get started."
slide_02_text = "Some data sources require registration and credentials. Look out for [font=Modern Pictograms][color=00ADD0]?[/color][/font] symbols for additional information.\n\nThe 'settings' button will open the global settings menu from the navigation bar. Make sure your connection settings are appropriately configured when using QuESt Data Manager as internet access is required to download data."
slide_03_text = "You can save some of your login information or API keys by entering in the 'QuESt Data Manager' tab in the global settings menu. These values will auto-populate the appropriate fields the next time you launch QuESt. These values are also stored in the quest.ini file in the QuESt installation folder.\n\nNote that QuESt does not store passwords."
slide_04_text = "Rate structure tables can be modified before saving. You can change the rate for each period. Click on the [font=Modern Pictograms]D[/font] button to copy the value to the next row."
slide_05_text = "The tables on the right describe the rate schedule for weekdays and weekends. Each row corresponds to a month and each column an hour. The value in each cell matches to a rate in the rates table; you can change each of these as needed. Try using the 'Tab' and arrow keys to navigate each table more quickly.\n\nNote that you cannot change the number of periods."
slide_06_text = "National Solar Radiation Database (NSRDB) weather data is available to download for performance applications. With the longitude and latitude of your desired location, a year of data may be obtained."
slide_07_text = "Once a file name has been entered and the save button is clicked, the EnergyPlus weather converter will run (must have EnergyPlus installed and in the QuESt directory; see Performance Tool for more information). Simply select the location data csv file, ensure the selected output format is EnergyPlus weather format (EPW), and enter the file name to save."
slides = [
(os.path.join("es_gui", "resources", "help_views", "data_manager", "updated_home.png"), slide_01_text),
(os.path.join("es_gui", "resources", "help_views", "data_manager", "02.png"), slide_02_text),
(os.path.join("es_gui", "resources", "help_views", "data_manager", "03.png"), slide_03_text),
(os.path.join("es_gui", "resources", "help_views", "data_manager", "04.png"), slide_04_text),
(os.path.join("es_gui", "resources", "help_views", "data_manager", "05.png"), slide_05_text),
(os.path.join("es_gui", "resources", "help_views", "data_manager", "weather.png"), slide_06_text),
(os.path.join("es_gui", "resources", "help_views", "data_manager", "eplus_converter.png"), slide_07_text)
]
help_carousel_view.add_slides(slides)
help_carousel_view.open()
|
PypiClean
|
/echarts-china-counties-pypkg-0.0.2.tar.gz/echarts-china-counties-pypkg-0.0.2/echarts_china_counties_pypkg/resources/echarts-china-counties-js/33dded63bb6c1dd29625c0d00c122409.js
|
(function (root, factory) {if (typeof define === 'function' && define.amd) {define(['exports', 'echarts'], factory);} else if (typeof exports === 'object' && typeof exports.nodeName !== 'string') {factory(exports, require('echarts'));} else {factory({}, root.echarts);}}(this, function (exports, echarts) {var log = function (msg) {if (typeof console !== 'undefined') {console && console.error && console.error(msg);}};if (!echarts) {log('ECharts is not Loaded');return;}if (!echarts.registerMap) {log('ECharts Map is not loaded');return;}echarts.registerMap('贡觉县', {"type":"FeatureCollection","features":[{"type":"Feature","id":"540322","properties":{"name":"贡觉县","cp":[98.27097,30.860099],"childNum":1},"geometry":{"type":"Polygon","coordinates":["@@@@@IDK@A@@BA@AB@@AB@BABCBABABA@A@A@@@ABA@A@E@A@CAC@A@C@A@CAAAAC@AAAAA@@A@ABG@@@A@EAC@A@A@A@ABA@ABABABCB@BA@C@A@A@AAAACCA@CAA@CAA@A@C@CBABADADCBABADABADCBABA@A@AAIAG@A@C@A@AAC@A@A@@@ABC@ABAAA@@IBGBEAC@EACCGEGCICICGEEIGEAACAE@E@KDMFEDE@GBGBGFONGDYDUA[MeI[C]BQB[J]NQHIFKTEVKP@@SHSEOKGIGUAQPSDMAKYKUESKY@UJQHQF[NOPSPYBUFUCMMWMSGMGIaBcCYGKGQE]IGmJADMPS`OfO`KXCHE^IXOXMTUXY\\QZ@@CDCFEDABCAC@A@ABAJCBABCBGHADCJADABCAGAIAC@ABCDEFEDGDADCD@BBJ@DGHEPABCN@DAB@BAB@B@DBDBBDF@BBDBBFBDBPJTLCT@ZDLDLTZPFLFQNKHOBGHMHMLKVkpMNEXNRNRKVKZORSAUASPOJIHYXSPYVA@WJQJURK\\EXG^WXYJWNS^OLYNg@QRQTWT]RCFADLLLFJNERG^QV[JQDEBQFBRHPFHPRPZANAXWTOLCDCDGFABCBABALABCDA@@B@FBD@@@BA@A@ABC@ABED@B@HBD@BA@GBC@A@@BABAFCLABADAD@B@J@D@@ADABBFAJ@BAB@@IDC@ABGH@B@D@BABA@CHAD@DA@AB@@CBC@E@EAE@G@@@e@k@aEWCKDEHAD@DJJN`LfL`LVLRZThLXFRPTHLJB@@BA@A@CFAB@BABAB@BCDADCBABCDABABAD@BABCFB@@BD@DDB@@@BBA@CBADA@CDCBCDA@AB@D@BBHFH@DD@@BB@@@@BBBBB@@@@@B@BA@@B@BA@@B@@@@@@BBDBBAB@BABABADABABADABA@@D@B@B@BBB@B@D@DAH@LCD@R@N@LAJCBAB@BABCBIBCBC@@BAB@@BBBBDDDFD@A@@B@DCHIB@B@J@D@D@BADABABABCBCBAFEBABEBABAFCFADCBC@E@AD@D@HBDBBBNHBBD@BD@BBFBBBBDDBBJBBD@BAB@BCDABGDABAB@DCBQ`QVG^@XBFDFXPHFLFDBB@D@FCDCBAD@FFB@D@HEDCFAFDLD@ABA@U@CBEDCBEBGLQDCDAFAPIHMBGBCB@P[HILUDARGNFLLX@NILMRQTUHOFIDSLQLIJGJILGJDRCTQFMHOLY@@NKZOXONKNITCP@NAVOLSHSJIREFAFCRALBPHX^JDPF\\DHFDFDPDD`SF@VFrLX@VERIdKVMJC\\AbBTAFAFAF@F@HBFBHAHAH@FDDBJBLAL@FELIHCHCFCNGNEHEFEBCBAD@D@ZHPJDDBB@FCFCH@DJ@HAFC@@BEBIDCPGJEHA@CB@@A@@BCB@@CBGBCD@BCFEBC@C@E@EAK@AEICECEAI@IAE@@@CAIAEAA@ACEAECACECEECCEAGACAE@C@A@@AAACAA@C@@@A@@@AB@BABCBA@C@C@C@ABABCDCDCLG@@DCDABCDADADADAD@DAB@BABAF@FADADAB@B@@A@CBA@ABCBADABADAB@DAHCDADCJCFABABABADGDG@CBA@E@EBA@AB@BA@@FAD@B@F@F@B@B@BABAFCBABE@CAE@E@CDC@E@CBEDAFBHCDE@@JGDCDCBEFCJCD@H@H@DAHCLCNCHCJALADCFE@@BO@E@EBE@EBCBEBCBIBCAGAECAGCAAGAI@E@GACA@@AC@CBC@CCEACGCIAC@G@KEA@EA@@E@CEAC@EBG@@@E@ADIDEBE@@DE@C@CBG@EBE@CBE@@DEDCBCDC@ABCBC@C@C@EBE@E@CCCACCC@@BE@CBEDCDADCBABABAF@DAD@BAFE@@@CBC@CACAECCACCEEEGECC@@AADK@@BC@G@EFAFCDCBA@CAE@C@AACAAAC@A@CACAAAEBE@E@@BC@ABADEBGBC@@DADEDCBABE@@@A@@@GAAAC@AAABC@C@ABC@@@CBA@ADCBABABEBADEB@@CBADE@@BABEDADEDE@C@E@C@C@G@E@A@E@A@AA@AC@@AABC@@@E@ABC"],"encodeOffsets":[[101340,31150]]}}],"UTF8Encoding":true});}));
|
PypiClean
|
/aliyun-python-sdk-unimkt-2.4.8.tar.gz/aliyun-python-sdk-unimkt-2.4.8/aliyunsdkunimkt/request/v20181212/ListRuleAreaRequest.py
|
from aliyunsdkcore.request import RpcRequest
from aliyunsdkunimkt.endpoint import endpoint_data
class ListRuleAreaRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'UniMkt', '2018-12-12', 'ListRuleArea')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_AdSlotType(self): # String
return self.get_query_params().get('AdSlotType')
def set_AdSlotType(self, AdSlotType): # String
self.add_query_param('AdSlotType', AdSlotType)
def get_RuleName(self): # String
return self.get_query_params().get('RuleName')
def set_RuleName(self, RuleName): # String
self.add_query_param('RuleName', RuleName)
def get_UserId(self): # String
return self.get_query_params().get('UserId')
def set_UserId(self, UserId): # String
self.add_query_param('UserId', UserId)
def get_OriginSiteUserId(self): # String
return self.get_query_params().get('OriginSiteUserId')
def set_OriginSiteUserId(self, OriginSiteUserId): # String
self.add_query_param('OriginSiteUserId', OriginSiteUserId)
def get_PageNumber(self): # Integer
return self.get_query_params().get('PageNumber')
def set_PageNumber(self, PageNumber): # Integer
self.add_query_param('PageNumber', PageNumber)
def get_AppName(self): # String
return self.get_query_params().get('AppName')
def set_AppName(self, AppName): # String
self.add_query_param('AppName', AppName)
def get_TenantId(self): # String
return self.get_query_params().get('TenantId')
def set_TenantId(self, TenantId): # String
self.add_query_param('TenantId', TenantId)
def get_AdSlotId(self): # String
return self.get_query_params().get('AdSlotId')
def set_AdSlotId(self, AdSlotId): # String
self.add_query_param('AdSlotId', AdSlotId)
def get_PageSize(self): # Integer
return self.get_query_params().get('PageSize')
def set_PageSize(self, PageSize): # Integer
self.add_query_param('PageSize', PageSize)
def get_EndCreateTime(self): # Long
return self.get_query_params().get('EndCreateTime')
def set_EndCreateTime(self, EndCreateTime): # Long
self.add_query_param('EndCreateTime', EndCreateTime)
def get_Business(self): # String
return self.get_query_params().get('Business')
def set_Business(self, Business): # String
self.add_query_param('Business', Business)
def get_RuleType(self): # String
return self.get_query_params().get('RuleType')
def set_RuleType(self, RuleType): # String
self.add_query_param('RuleType', RuleType)
def get_MediaId(self): # String
return self.get_query_params().get('MediaId')
def set_MediaId(self, MediaId): # String
self.add_query_param('MediaId', MediaId)
def get_MediaStatus(self): # String
return self.get_query_params().get('MediaStatus')
def set_MediaStatus(self, MediaStatus): # String
self.add_query_param('MediaStatus', MediaStatus)
def get_Environment(self): # String
return self.get_query_params().get('Environment')
def set_Environment(self, Environment): # String
self.add_query_param('Environment', Environment)
def get_StartCreateTime(self): # Long
return self.get_query_params().get('StartCreateTime')
def set_StartCreateTime(self, StartCreateTime): # Long
self.add_query_param('StartCreateTime', StartCreateTime)
def get_UserSite(self): # String
return self.get_query_params().get('UserSite')
def set_UserSite(self, UserSite): # String
self.add_query_param('UserSite', UserSite)
def get_RuleId(self): # String
return self.get_query_params().get('RuleId')
def set_RuleId(self, RuleId): # String
self.add_query_param('RuleId', RuleId)
|
PypiClean
|
/onegov.feriennet-1.6.16-py3-none-any.whl/onegov/feriennet/views/notification_template.py
|
from collections import OrderedDict
from onegov.activity import PeriodCollection
from onegov.core.html import html_to_text
from onegov.core.security import Secret
from onegov.core.templates import render_template
from onegov.feriennet import _, FeriennetApp
from onegov.feriennet.collections import NotificationTemplateCollection
from onegov.feriennet.forms import NotificationTemplateForm
from onegov.feriennet.forms import NotificationTemplateSendForm
from onegov.feriennet.layout import NotificationTemplateCollectionLayout
from onegov.feriennet.layout import NotificationTemplateLayout
from onegov.feriennet.models import NotificationTemplate
from onegov.feriennet.models.notification_template import TemplateVariables
from onegov.org.elements import DeleteLink, Link
from onegov.org.layout import DefaultMailLayout
from sedate import utcnow
def get_variables(request):
period = PeriodCollection(request.session).active()
variables = TemplateVariables(request, period).bound
return OrderedDict(
(token, variables[token].__doc__) for token in sorted(variables)
)
@FeriennetApp.html(
model=NotificationTemplateCollection,
permission=Secret,
template='notification_templates.pt')
def view_notification_templates(self, request):
layout = NotificationTemplateCollectionLayout(self, request)
def get_links(notification):
yield Link(
text=_("Mailing"),
url=request.link(notification, 'send')
)
yield Link(
text=_("Edit"),
url=request.link(notification, 'edit')
)
yield DeleteLink(
text=_("Delete"),
url=layout.csrf_protected_url(request.link(notification)),
confirm=_('Do you really want to delete "${title}"?', mapping={
'title': notification.subject,
}),
target='#{}'.format(notification.id.hex),
yes_button_text=_("Delete Notification Template")
)
return {
'title': _("Notification Templates"),
'layout': layout,
'notifications': self.query(),
'get_links': get_links,
}
@FeriennetApp.form(
model=NotificationTemplateCollection,
permission=Secret,
template='notification_template_form.pt',
name='new',
form=NotificationTemplateForm)
def view_notification_template_form(self, request, form):
title = _("New Notification Template")
if form.submitted(request):
self.add(
subject=form.subject.data,
text=form.text.data
)
request.success(_("Successfully added a new notification template"))
return request.redirect(request.link(self))
return {
'title': title,
'layout': NotificationTemplateCollectionLayout(self, request, title),
'form': form,
'variables': get_variables(request),
}
@FeriennetApp.form(
model=NotificationTemplate,
permission=Secret,
template='notification_template_form.pt',
name='edit',
form=NotificationTemplateForm)
def edit_notification(self, request, form):
if form.submitted(request):
form.populate_obj(self)
request.success(_("Your changes were saved"))
return request.redirect(
request.class_link(NotificationTemplateCollection))
elif not request.POST:
form.process(obj=self)
layout = NotificationTemplateLayout(self, request)
return {
'title': _("Edit"),
'layout': layout,
'form': form,
'variables': get_variables(request)
}
@FeriennetApp.view(
model=NotificationTemplate,
permission=Secret,
request_method='DELETE')
def delete_notification(self, request):
request.assert_valid_csrf_token()
NotificationTemplateCollection(request.session).delete(self)
@request.after
def remove_target(response):
response.headers.add('X-IC-Remove', 'true')
@FeriennetApp.form(
model=NotificationTemplate,
permission=Secret,
template='notification_template_send_form.pt',
name='send',
form=NotificationTemplateSendForm)
def handle_send_notification(self, request, form):
period = PeriodCollection(request.session).active()
variables = TemplateVariables(request, period)
layout = NotificationTemplateLayout(self, request)
if form.submitted(request):
recipients = form.recipients
if not recipients:
request.alert(_("There are no recipients matching the selection"))
else:
current = request.current_username
if current not in recipients:
recipients.add(current)
subject = variables.render(self.subject)
content = render_template('mail_notification.pt', request, {
'layout': DefaultMailLayout(self, request),
'title': subject,
'notification': variables.render(self.text)
})
plaintext = html_to_text(content)
for recipient in recipients:
request.app.send_marketing_email(
receivers=(recipient, ),
subject=subject,
content=content,
plaintext=plaintext,
)
self.last_sent = utcnow()
request.success(_(
"Successfully sent the e-mail to ${count} recipients",
mapping={
'count': len(recipients)
}
))
return request.redirect(
request.class_link(NotificationTemplateCollection))
return {
'title': _("Mailing"),
'layout': layout,
'form': form,
'preview_subject': variables.render(self.subject),
'preview_body': variables.render(self.text),
'edit_link': request.return_here(request.link(self, 'edit')),
'button_text': _("Send E-Mail Now")
}
|
PypiClean
|
/cn-zipline-live-async-1.3.2.tar.gz/cn-zipline-live-async-1.3.2/zipline/utils/calendars/exchange_calendar_bmf.py
|
from datetime import time
from pandas.tseries.holiday import (
Holiday,
Easter,
Day,
GoodFriday,
)
from pytz import timezone
from .trading_calendar import (
TradingCalendar,
FRIDAY,
HolidayCalendar)
# Universal Confraternization (new years day)
ConfUniversal = Holiday(
'Dia da Confraternizacao Universal',
month=1,
day=1,
)
# Sao Paulo city birthday
AniversarioSaoPaulo = Holiday(
'Aniversario de Sao Paulo',
month=1,
day=25,
)
# Carnival Monday
CarnavalSegunda = Holiday(
'Carnaval Segunda',
month=1,
day=1,
offset=[Easter(), Day(-48)]
)
# Carnival Tuesday
CarnavalTerca = Holiday(
'Carnaval Terca',
month=1,
day=1,
offset=[Easter(), Day(-47)]
)
# Ash Wednesday (short day)
QuartaCinzas = Holiday(
'Quarta Cinzas',
month=1,
day=1,
offset=[Easter(), Day(-46)]
)
# Good Friday
SextaPaixao = GoodFriday
# Feast of the Most Holy Body of Christ
CorpusChristi = Holiday(
'Corpus Christi',
month=1,
day=1,
offset=[Easter(), Day(60)]
)
# Tiradentes Memorial
Tiradentes = Holiday(
'Tiradentes',
month=4,
day=21,
)
# Labor Day
DiaTrabalho = Holiday(
'Dia Trabalho',
month=5,
day=1,
)
# Constitutionalist Revolution
Constitucionalista = Holiday(
'Constitucionalista',
month=7,
day=9,
start_date='1997-01-01'
)
# Independence Day
Independencia = Holiday(
'Independencia',
month=9,
day=7,
)
# Our Lady of Aparecida
Aparecida = Holiday(
'Nossa Senhora de Aparecida',
month=10,
day=12,
)
# All Souls' Day
Finados = Holiday(
'Dia dos Finados',
month=11,
day=2,
)
# Proclamation of the Republic
ProclamacaoRepublica = Holiday(
'Proclamacao da Republica',
month=11,
day=15,
)
# Day of Black Awareness
ConscienciaNegra = Holiday(
'Dia da Consciencia Negra',
month=11,
day=20,
start_date='2004-01-01'
)
# Christmas Eve
VesperaNatal = Holiday(
'Vespera Natal',
month=12,
day=24,
)
# Christmas
Natal = Holiday(
'Natal',
month=12,
day=25,
)
# New Year's Eve
AnoNovo = Holiday(
'Ano Novo',
month=12,
day=31,
)
# New Year's Eve falls on Saturday
AnoNovoSabado = Holiday(
'Ano Novo Sabado',
month=12,
day=30,
days_of_week=(FRIDAY,),
)
class BMFExchangeCalendar(TradingCalendar):
"""
Exchange calendar for BM&F BOVESPA
Open Time: 10:00 AM, Brazil/Sao Paulo
Close Time: 4:00 PM, Brazil/Sao Paulo
Regularly-Observed Holidays:
- Universal Confraternization (New year's day, Jan 1)
- Sao Paulo City Anniversary (Jan 25)
- Carnaval Monday (48 days before Easter)
- Carnaval Tuesday (47 days before Easter)
- Passion of the Christ (Good Friday, 2 days before Easter)
- Corpus Christi (60 days after Easter)
- Tiradentes (April 21)
- Labor day (May 1)
- Constitutionalist Revolution (July 9 after 1997)
- Independence Day (September 7)
- Our Lady of Aparecida Feast (October 12)
- All Souls' Day (November 2)
- Proclamation of the Republic (November 15)
- Day of Black Awareness (November 20 after 2004)
- Christmas (December 24 and 25)
- Day before New Year's Eve (December 30 if NYE falls on a Saturday)
- New Year's Eve (December 31)
"""
@property
def name(self):
return "BMF"
@property
def tz(self):
return timezone("America/Sao_Paulo")
@property
def open_time(self):
return time(10, 1)
@property
def close_time(self):
return time(16)
@property
def regular_holidays(self):
return HolidayCalendar([
ConfUniversal,
AniversarioSaoPaulo,
CarnavalSegunda,
CarnavalTerca,
SextaPaixao,
CorpusChristi,
Tiradentes,
DiaTrabalho,
Constitucionalista,
Independencia,
Aparecida,
Finados,
ProclamacaoRepublica,
ConscienciaNegra,
VesperaNatal,
Natal,
AnoNovo,
AnoNovoSabado,
])
@property
def special_opens(self):
return [
(time(13, 1), HolidayCalendar([QuartaCinzas]))
]
|
PypiClean
|
/cpskin.core-0.14.12.tar.gz/cpskin.core-0.14.12/cpskin/core/viewlets/footer.py
|
from Acquisition import aq_inner
from plone import api
from plone.app.layout.viewlets.common import ViewletBase
from Products.CMFCore.utils import getToolByName
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
class CPSkinFooterSitemapViewlet(ViewletBase):
render = ViewPageTemplateFile('footersitemap.pt')
def showSiteMap(self):
return api.portal.get_registry_record(
'cpskin.core.interfaces.ICPSkinSettings.show_footer_sitemap')
def createSiteMap(self):
context = aq_inner(self.context)
# take the 2 first levels of the site that respect the navigation
# strategy
portal_catalog = getToolByName(context, 'portal_catalog')
navtreeProps = getToolByName(
context, 'portal_properties').navtree_properties
queryDict = {}
navigation_root = api.portal.get_navigation_root(context)
queryDict['path'] = {
'query': '/'.join(navigation_root.getPhysicalPath()), 'depth': 1}
if navtreeProps.enable_wf_state_filtering:
queryDict['review_state'] = navtreeProps.wf_states_to_show
queryDict['sort_on'] = 'getObjPositionInParent'
themes = portal_catalog(queryDict)
res = []
metaTypesNotToList = navtreeProps.metaTypesNotToList
idsNotToList = navtreeProps.idsNotToList
for theme in themes:
if theme.meta_type not in metaTypesNotToList and \
theme.id not in idsNotToList and not theme.exclude_from_nav:
themeRes = {'theme': theme, 'children': []}
# do a second catalog_search by theme
queryDict['path'] = {'query': theme.getPath(), 'depth': 1}
children = portal_catalog(queryDict)
for child in children:
if child.meta_type not in metaTypesNotToList and \
child.id not in idsNotToList and not child.exclude_from_nav:
themeRes['children'].append(child)
res.append(themeRes)
return res
def getFooterText(self):
navigation_root = api.portal.get_navigation_root(self.context)
footer_static = getattr(navigation_root, 'footer-static', None)
text = ''
if footer_static is None:
return
if footer_static.Language() == self.context.Language():
if getattr(footer_static, 'text', None):
text = footer_static.text.raw
return text
if getattr(footer_static, 'getTranslation', None):
lang = self.context.REQUEST.get('LANGUAGE', 'fr')
footer_static = footer_static.getTranslation(lang)
if getattr(footer_static, 'text', None):
text = footer_static.text.raw
return text
|
PypiClean
|
/spyder-terminal-1.2.2.tar.gz/spyder-terminal-1.2.2/spyder_terminal/server/static/components/@webassemblyjs/helper-wasm-section/lib/resize.js
|
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.resizeSectionByteSize = resizeSectionByteSize;
exports.resizeSectionVecSize = resizeSectionVecSize;
var _wasmGen = require("@webassemblyjs/wasm-gen");
var _ast = require("@webassemblyjs/ast");
var _helperBuffer = require("@webassemblyjs/helper-buffer");
function resizeSectionByteSize(ast, uint8Buffer, section, deltaBytes) {
var sectionMetadata = (0, _ast.getSectionMetadata)(ast, section);
if (typeof sectionMetadata === "undefined") {
throw new Error("Section metadata not found");
}
if (typeof sectionMetadata.size.loc === "undefined") {
throw new Error("SectionMetadata " + section + " has no loc");
} // keep old node location to be overriden
var start = sectionMetadata.size.loc.start.column;
var end = sectionMetadata.size.loc.end.column;
var newSectionSize = sectionMetadata.size.value + deltaBytes;
var newBytes = (0, _wasmGen.encodeU32)(newSectionSize);
/**
* update AST
*/
sectionMetadata.size.value = newSectionSize;
var oldu32EncodedLen = end - start;
var newu32EncodedLen = newBytes.length; // the new u32 has a different encoded length
if (newu32EncodedLen !== oldu32EncodedLen) {
var deltaInSizeEncoding = newu32EncodedLen - oldu32EncodedLen;
sectionMetadata.size.loc.end.column = start + newu32EncodedLen;
deltaBytes += deltaInSizeEncoding; // move the vec size pointer size the section size is now smaller
sectionMetadata.vectorOfSize.loc.start.column += deltaInSizeEncoding;
sectionMetadata.vectorOfSize.loc.end.column += deltaInSizeEncoding;
} // Once we hit our section every that is after needs to be shifted by the delta
var encounteredSection = false;
(0, _ast.traverse)(ast, {
SectionMetadata: function SectionMetadata(path) {
if (path.node.section === section) {
encounteredSection = true;
return;
}
if (encounteredSection === true) {
(0, _ast.shiftSection)(ast, path.node, deltaBytes);
}
}
});
return (0, _helperBuffer.overrideBytesInBuffer)(uint8Buffer, start, end, newBytes);
}
function resizeSectionVecSize(ast, uint8Buffer, section, deltaElements) {
var sectionMetadata = (0, _ast.getSectionMetadata)(ast, section);
if (typeof sectionMetadata === "undefined") {
throw new Error("Section metadata not found");
}
if (typeof sectionMetadata.vectorOfSize.loc === "undefined") {
throw new Error("SectionMetadata " + section + " has no loc");
} // Section has no vector
if (sectionMetadata.vectorOfSize.value === -1) {
return uint8Buffer;
} // keep old node location to be overriden
var start = sectionMetadata.vectorOfSize.loc.start.column;
var end = sectionMetadata.vectorOfSize.loc.end.column;
var newValue = sectionMetadata.vectorOfSize.value + deltaElements;
var newBytes = (0, _wasmGen.encodeU32)(newValue); // Update AST
sectionMetadata.vectorOfSize.value = newValue;
sectionMetadata.vectorOfSize.loc.end.column = start + newBytes.length;
return (0, _helperBuffer.overrideBytesInBuffer)(uint8Buffer, start, end, newBytes);
}
|
PypiClean
|
/galileo_socketio-4.0.1.1-py3-none-any.whl/socketio/asyncio_client.py
|
import asyncio
import logging
import random
import engineio
import six
from . import client
from . import exceptions
from . import packet
default_logger = logging.getLogger('socketio.client')
class AsyncClient(client.Client):
"""A Socket.IO client for asyncio.
This class implements a fully compliant Socket.IO web client with support
for websocket and long-polling transports.
:param reconnection: ``True`` if the client should automatically attempt to
reconnect to the server after an interruption, or
``False`` to not reconnect. The default is ``True``.
:param reconnection_attempts: How many reconnection attempts to issue
before giving up, or 0 for infinity attempts.
The default is 0.
:param reconnection_delay: How long to wait in seconds before the first
reconnection attempt. Each successive attempt
doubles this delay.
:param reconnection_delay_max: The maximum delay between reconnection
attempts.
:param randomization_factor: Randomization amount for each delay between
reconnection attempts. The default is 0.5,
which means that each delay is randomly
adjusted by +/- 50%.
:param logger: To enable logging set to ``True`` or pass a logger object to
use. To disable logging set to ``False``. The default is
``False``.
:param binary: ``True`` to support binary payloads, ``False`` to treat all
payloads as text. On Python 2, if this is set to ``True``,
``unicode`` values are treated as text, and ``str`` and
``bytes`` values are treated as binary. This option has no
effect on Python 3, where text and binary payloads are
always automatically discovered.
:param json: An alternative json module to use for encoding and decoding
packets. Custom json modules must have ``dumps`` and ``loads``
functions that are compatible with the standard library
versions.
The Engine.IO configuration supports the following settings:
:param engineio_logger: To enable Engine.IO logging set to ``True`` or pass
a logger object to use. To disable logging set to
``False``. The default is ``False``.
"""
def is_asyncio_based(self):
return True
async def connect(self, url, headers={}, transports=None,
namespaces=None, socketio_path='socket.io'):
"""Connect to a Socket.IO server.
:param url: The URL of the Socket.IO server. It can include custom
query string parameters if required by the server.
:param headers: A dictionary with custom headers to send with the
connection request.
:param transports: The list of allowed transports. Valid transports
are ``'polling'`` and ``'websocket'``. If not
given, the polling transport is connected first,
then an upgrade to websocket is attempted.
:param namespaces: The list of custom namespaces to connect, in
addition to the default namespace. If not given,
the namespace list is obtained from the registered
event handlers.
:param socketio_path: The endpoint where the Socket.IO server is
installed. The default value is appropriate for
most cases.
Note: this method is a coroutine.
Example usage::
sio = socketio.Client()
sio.connect('http://localhost:5000')
"""
self.connection_url = url
self.connection_headers = headers
self.connection_transports = transports
self.connection_namespaces = namespaces
self.socketio_path = socketio_path
if namespaces is None:
namespaces = set(self.handlers.keys()).union(
set(self.namespace_handlers.keys()))
elif isinstance(namespaces, six.string_types):
namespaces = [namespaces]
self.connection_namespaces = namespaces
self.namespaces = [n for n in namespaces if n != '/']
try:
await self.eio.connect(url, headers=headers,
transports=transports,
engineio_path=socketio_path)
except engineio.exceptions.ConnectionError as exc:
six.raise_from(exceptions.ConnectionError(exc.args[0]), None)
async def wait(self):
"""Wait until the connection with the server ends.
Client applications can use this function to block the main thread
during the life of the connection.
Note: this method is a coroutine.
"""
while True:
await self.eio.wait()
await self.sleep(1) # give the reconnect task time to start up
if not self._reconnect_task:
break
await self._reconnect_task
if self.eio.state != 'connected':
break
async def emit(self, event, data=None, namespace=None, callback=None):
"""Emit a custom event to one or more connected clients.
:param event: The event name. It can be any string. The event names
``'connect'``, ``'message'`` and ``'disconnect'`` are
reserved and should not be used.
:param data: The data to send to the client or clients. Data can be of
type ``str``, ``bytes``, ``list`` or ``dict``. If a
``list`` or ``dict``, the data will be serialized as JSON.
:param namespace: The Socket.IO namespace for the event. If this
argument is omitted the event is emitted to the
default namespace.
:param callback: If given, this function will be called to acknowledge
the the client has received the message. The arguments
that will be passed to the function are those provided
by the client. Callback functions can only be used
when addressing an individual client.
Note: this method is a coroutine.
"""
namespace = namespace or '/'
self.logger.info('Emitting event "%s" [%s]', event, namespace)
if callback is not None:
id = self._generate_ack_id(namespace, callback)
else:
id = None
if six.PY2 and not self.binary:
binary = False # pragma: nocover
else:
binary = None
# tuples are expanded to multiple arguments, everything else is sent
# as a single argument
if isinstance(data, tuple):
data = list(data)
elif data is not None:
data = [data]
else:
data = []
await self._send_packet(packet.Packet(
packet.EVENT, namespace=namespace, data=[event] + data, id=id,
binary=binary))
async def send(self, data, namespace=None, callback=None):
"""Send a message to one or more connected clients.
This function emits an event with the name ``'message'``. Use
:func:`emit` to issue custom event names.
:param data: The data to send to the client or clients. Data can be of
type ``str``, ``bytes``, ``list`` or ``dict``. If a
``list`` or ``dict``, the data will be serialized as JSON.
:param namespace: The Socket.IO namespace for the event. If this
argument is omitted the event is emitted to the
default namespace.
:param callback: If given, this function will be called to acknowledge
the the client has received the message. The arguments
that will be passed to the function are those provided
by the client. Callback functions can only be used
when addressing an individual client.
Note: this method is a coroutine.
"""
await self.emit('message', data=data, namespace=namespace,
callback=callback)
async def call(self, event, data=None, namespace=None, timeout=60):
"""Emit a custom event to a client and wait for the response.
:param event: The event name. It can be any string. The event names
``'connect'``, ``'message'`` and ``'disconnect'`` are
reserved and should not be used.
:param data: The data to send to the client or clients. Data can be of
type ``str``, ``bytes``, ``list`` or ``dict``. If a
``list`` or ``dict``, the data will be serialized as JSON.
:param namespace: The Socket.IO namespace for the event. If this
argument is omitted the event is emitted to the
default namespace.
:param timeout: The waiting timeout. If the timeout is reached before
the client acknowledges the event, then a
``TimeoutError`` exception is raised.
Note: this method is a coroutine.
"""
callback_event = self.eio.create_event()
callback_args = []
def event_callback(*args):
callback_args.append(args)
callback_event.set()
await self.emit(event, data=data, namespace=namespace,
callback=event_callback)
try:
await asyncio.wait_for(callback_event.wait(), timeout)
except asyncio.TimeoutError:
six.raise_from(exceptions.TimeoutError(), None)
return callback_args[0] if len(callback_args[0]) > 1 \
else callback_args[0][0] if len(callback_args[0]) == 1 \
else None
async def disconnect(self):
"""Disconnect from the server.
Note: this method is a coroutine.
"""
# here we just request the disconnection
# later in _handle_eio_disconnect we invoke the disconnect handler
for n in self.namespaces:
await self._send_packet(packet.Packet(packet.DISCONNECT,
namespace=n))
await self._send_packet(packet.Packet(
packet.DISCONNECT, namespace='/'))
await self.eio.disconnect(abort=True)
def start_background_task(self, target, *args, **kwargs):
"""Start a background task using the appropriate async model.
This is a utility function that applications can use to start a
background task using the method that is compatible with the
selected async mode.
:param target: the target function to execute.
:param args: arguments to pass to the function.
:param kwargs: keyword arguments to pass to the function.
This function returns an object compatible with the `Thread` class in
the Python standard library. The `start()` method on this object is
already called by this function.
"""
return self.eio.start_background_task(target, *args, **kwargs)
async def sleep(self, seconds=0):
"""Sleep for the requested amount of time using the appropriate async
model.
This is a utility function that applications can use to put a task to
sleep without having to worry about using the correct call for the
selected async mode.
Note: this method is a coroutine.
"""
return await self.eio.sleep(seconds)
async def _send_packet(self, pkt):
"""Send a Socket.IO packet to the server."""
encoded_packet = pkt.encode()
if isinstance(encoded_packet, list):
binary = False
for ep in encoded_packet:
await self.eio.send(ep, binary=binary)
binary = True
else:
await self.eio.send(encoded_packet, binary=False)
async def _handle_connect(self, namespace):
namespace = namespace or '/'
self.logger.info('Namespace {} is connected'.format(namespace))
await self._trigger_event('connect', namespace=namespace)
if namespace == '/':
for n in self.namespaces:
await self._send_packet(packet.Packet(packet.CONNECT,
namespace=n))
elif namespace not in self.namespaces:
self.namespaces.append(namespace)
async def _handle_disconnect(self, namespace):
namespace = namespace or '/'
await self._trigger_event('disconnect', namespace=namespace)
if namespace in self.namespaces:
self.namespaces.remove(namespace)
async def _handle_event(self, namespace, id, data):
namespace = namespace or '/'
self.logger.info('Received event "%s" [%s]', data[0], namespace)
r = await self._trigger_event(data[0], namespace, *data[1:])
if id is not None:
# send ACK packet with the response returned by the handler
# tuples are expanded as multiple arguments
if r is None:
data = []
elif isinstance(r, tuple):
data = list(r)
else:
data = [r]
if six.PY2 and not self.binary:
binary = False # pragma: nocover
else:
binary = None
await self._send_packet(packet.Packet(
packet.ACK, namespace=namespace, id=id, data=data,
binary=binary))
async def _handle_ack(self, namespace, id, data):
namespace = namespace or '/'
self.logger.info('Received ack [%s]', namespace)
callback = None
try:
callback = self.callbacks[namespace][id]
except KeyError:
# if we get an unknown callback we just ignore it
self.logger.warning('Unknown callback received, ignoring.')
else:
del self.callbacks[namespace][id]
if callback is not None:
if asyncio.iscoroutinefunction(callback):
await callback(*data)
else:
callback(*data)
def _handle_error(self, namespace):
namespace = namespace or '/'
self.logger.info('Connection to namespace {} was rejected'.format(
namespace))
if namespace in self.namespaces:
self.namespaces.remove(namespace)
async def _trigger_event(self, event, namespace, *args):
"""Invoke an application event handler."""
# first see if we have an explicit handler for the event
if namespace in self.handlers and event in self.handlers[namespace]:
if asyncio.iscoroutinefunction(self.handlers[namespace][event]):
try:
ret = await self.handlers[namespace][event](*args)
except asyncio.CancelledError: # pragma: no cover
ret = None
else:
ret = self.handlers[namespace][event](*args)
return ret
# or else, forward the event to a namepsace handler if one exists
elif namespace in self.namespace_handlers:
return await self.namespace_handlers[namespace].trigger_event(
event, *args)
async def _handle_reconnect(self):
attempt_count = 0
current_delay = self.reconnection_delay
while True:
delay = current_delay
current_delay *= 2
if delay > self.reconnection_delay_max:
delay = self.reconnection_delay_max
delay += self.randomization_factor * (2 * random.random() - 1)
self.logger.info(
'Connection failed, new attempt in {:.02f} seconds'.format(
delay))
await self.sleep(delay)
attempt_count += 1
try:
await self.connect(self.connection_url,
headers=self.connection_headers,
transports=self.connection_transports,
namespaces=self.connection_namespaces,
socketio_path=self.socketio_path)
except (exceptions.ConnectionError, ValueError):
pass
else:
self.logger.info('Reconnection successful')
self._reconnect_task = None
break
if self.reconnection_attempts and \
attempt_count >= self.reconnection_attempts:
self.logger.info(
'Maximum reconnection attempts reached, giving up')
break
def _handle_eio_connect(self): # pragma: no cover
"""Handle the Engine.IO connection event."""
self.logger.info('Engine.IO connection established')
async def _handle_eio_message(self, data):
"""Dispatch Engine.IO messages."""
if self._binary_packet:
pkt = self._binary_packet
if pkt.add_attachment(data):
self._binary_packet = None
if pkt.packet_type == packet.BINARY_EVENT:
await self._handle_event(pkt.namespace, pkt.id, pkt.data)
else:
await self._handle_ack(pkt.namespace, pkt.id, pkt.data)
else:
pkt = packet.Packet(encoded_packet=data)
if pkt.packet_type == packet.CONNECT:
await self._handle_connect(pkt.namespace)
elif pkt.packet_type == packet.DISCONNECT:
await self._handle_disconnect(pkt.namespace)
elif pkt.packet_type == packet.EVENT:
await self._handle_event(pkt.namespace, pkt.id, pkt.data)
elif pkt.packet_type == packet.ACK:
await self._handle_ack(pkt.namespace, pkt.id, pkt.data)
elif pkt.packet_type == packet.BINARY_EVENT or \
pkt.packet_type == packet.BINARY_ACK:
self._binary_packet = pkt
elif pkt.packet_type == packet.ERROR:
self._handle_error(pkt.namespace)
else:
raise ValueError('Unknown packet type.')
async def _handle_eio_disconnect(self):
"""Handle the Engine.IO disconnection event."""
self.logger.info('Engine.IO connection dropped')
for n in self.namespaces:
await self._trigger_event('disconnect', namespace=n)
await self._trigger_event('disconnect', namespace='/')
self.callbacks = {}
self._binary_packet = None
if self.eio.state == 'connected' and self.reconnection:
self._reconnect_task = self.start_background_task(
self._handle_reconnect)
def _engineio_client_class(self):
return engineio.AsyncClient
|
PypiClean
|
/pros_cli-3.4.3-py3-none-any.whl/pros/conductor/project/ProjectTransaction.py
|
import itertools as it
import os
import tempfile
import zipfile
from typing import *
import pros.common.ui as ui
import pros.conductor as c
from pros.conductor.project.template_resolution import InvalidTemplateException, TemplateAction
class Action(object):
def execute(self, conductor: c.Conductor, project: c.Project) -> None:
raise NotImplementedError()
def describe(self, conductor: c.Conductor, project: c.Project) -> str:
raise NotImplementedError()
def can_execute(self, conductor: c.Conductor, project: c.Project) -> bool:
raise NotImplementedError()
class ApplyTemplateAction(Action):
def __init__(self, template: c.BaseTemplate, apply_kwargs: Dict[str, Any] = None,
suppress_already_installed: bool = False):
self.template = template
self.apply_kwargs = apply_kwargs or {}
self.suppress_already_installed = suppress_already_installed
def execute(self, conductor: c.Conductor, project: c.Project):
try:
conductor.apply_template(project, self.template, **self.apply_kwargs)
except InvalidTemplateException as e:
if e.reason != TemplateAction.AlreadyInstalled or not self.suppress_already_installed:
raise e
else:
ui.logger(__name__).warning(str(e))
return None
def describe(self, conductor: c.Conductor, project: c.Project):
action = project.get_template_actions(conductor.resolve_template(self.template))
if action == TemplateAction.NotApplicable:
return f'{self.template.identifier} cannot be applied to project!'
if action == TemplateAction.Installable:
return f'{self.template.identifier} will installed to project.'
if action == TemplateAction.Downgradable:
return f'Project will be downgraded to {self.template.identifier} from' \
f' {project.templates[self.template.name].version}.'
if action == TemplateAction.Upgradable:
return f'Project will be upgraded to {self.template.identifier} from' \
f' {project.templates[self.template.name].version}.'
if action == TemplateAction.AlreadyInstalled:
if self.apply_kwargs.get('force_apply'):
return f'{self.template.identifier} will be re-applied.'
elif self.suppress_already_installed:
return f'{self.template.identifier} will not be re-applied.'
else:
return f'{self.template.identifier} cannot be applied to project because it is already installed.'
def can_execute(self, conductor: c.Conductor, project: c.Project) -> bool:
action = project.get_template_actions(conductor.resolve_template(self.template))
if action == TemplateAction.AlreadyInstalled:
return self.apply_kwargs.get('force_apply') or self.suppress_already_installed
return action in [TemplateAction.Installable, TemplateAction.Downgradable, TemplateAction.Upgradable]
class RemoveTemplateAction(Action):
def __init__(self, template: c.BaseTemplate, remove_kwargs: Dict[str, Any] = None,
suppress_not_removable: bool = False):
self.template = template
self.remove_kwargs = remove_kwargs or {}
self.suppress_not_removable = suppress_not_removable
def execute(self, conductor: c.Conductor, project: c.Project):
try:
conductor.remove_template(project, self.template, **self.remove_kwargs)
except ValueError as e:
if not self.suppress_not_removable:
raise e
else:
ui.logger(__name__).warning(str(e))
def describe(self, conductor: c.Conductor, project: c.Project) -> str:
return f'{self.template.identifier} will be removed'
def can_execute(self, conductor: c.Conductor, project: c.Project):
return True
class ChangeProjectNameAction(Action):
def __init__(self, new_name: str):
self.new_name = new_name
def execute(self, conductor: c.Conductor, project: c.Project):
project.project_name = self.new_name
project.save()
def describe(self, conductor: c.Conductor, project: c.Project):
return f'Project will be renamed to: "{self.new_name}"'
def can_execute(self, conductor: c.Conductor, project: c.Project):
return True
class ProjectTransaction(object):
def __init__(self, project: c.Project, conductor: Optional[c.Conductor] = None):
self.project = project
self.conductor = conductor or c.Conductor()
self.actions: List[Action] = []
def add_action(self, action: Action) -> None:
self.actions.append(action)
def execute(self):
if len(self.actions) == 0:
ui.logger(__name__).warning('No actions necessary.')
return
location = self.project.location
tfd, tfn = tempfile.mkstemp(prefix='pros-project-', suffix=f'-{self.project.name}.zip', text='w+b')
with os.fdopen(tfd, 'w+b') as tf:
with zipfile.ZipFile(tf, mode='w') as zf:
files, length = it.tee(location.glob('**/*'), 2)
length = len(list(length))
with ui.progressbar(files, length=length, label=f'Backing up {self.project.name} to {tfn}') as pb:
for file in pb:
zf.write(file, arcname=file.relative_to(location))
try:
with ui.Notification():
for action in self.actions:
ui.logger(__name__).debug(action.describe(self.conductor, self.project))
rv = action.execute(self.conductor, self.project)
ui.logger(__name__).debug(f'{action} returned {rv}')
if rv is not None and not rv:
raise ValueError('Action did not complete successfully')
ui.echo('All actions performed successfully')
except Exception as e:
ui.logger(__name__).warning(f'Failed to perform transaction, restoring project to previous state')
with zipfile.ZipFile(tfn) as zf:
with ui.progressbar(zf.namelist(), label=f'Restoring {self.project.name} from {tfn}') as pb:
for file in pb:
zf.extract(file, path=location)
ui.logger(__name__).exception(e)
finally:
ui.echo(f'Removing {tfn}')
os.remove(tfn)
def apply_template(self, template: c.BaseTemplate, suppress_already_installed: bool = False, **kwargs):
self.add_action(
ApplyTemplateAction(template, suppress_already_installed=suppress_already_installed, apply_kwargs=kwargs)
)
def rm_template(self, template: c.BaseTemplate, suppress_not_removable: bool = False, **kwargs):
self.add_action(
RemoveTemplateAction(template, suppress_not_removable=suppress_not_removable, remove_kwargs=kwargs)
)
def change_name(self, new_name: str):
self.add_action(ChangeProjectNameAction(new_name))
def describe(self) -> str:
if len(self.actions) > 0:
return '\n'.join(
f'- {a.describe(self.conductor, self.project)}'
for a in self.actions
)
else:
return 'No actions necessary.'
def can_execute(self) -> bool:
return all(a.can_execute(self.conductor, self.project) for a in self.actions)
|
PypiClean
|
/cubicweb-web-1.2.1.tar.gz/cubicweb-web-1.2.1/cubicweb_web/httpcache.py
|
from calendar import timegm
from datetime import datetime
from cubicweb_web import view as viewmod
class NoHTTPCacheManager:
"""default cache manager: set no-cache cache control policy"""
def __init__(self, view):
self.view = view
self.req = view._cw
self.cw_rset = view.cw_rset
def set_headers(self):
self.req.set_header("Cache-control", "no-cache")
self.req.set_header("Expires", "Sat, 01 Jan 2000 00:00:00 GMT")
class MaxAgeHTTPCacheManager(NoHTTPCacheManager):
"""max-age cache manager: set max-age cache control policy, with max-age
specified with the `cache_max_age` attribute of the view
"""
def set_headers(self):
self.req.set_header("Cache-control", "max-age=%s" % self.view.cache_max_age)
class EtagHTTPCacheManager(NoHTTPCacheManager):
"""etag based cache manager for startup views
* etag is generated using the view name and the user's groups
* set policy to 'must-revalidate' and expires to the current time to force
revalidation on each request
"""
def etag(self):
if not self.req.cnx: # session without established connection to the repo
return self.view.__regid__
return self.view.__regid__ + "/" + ",".join(sorted(self.req.user.groups))
def max_age(self):
# 0 to actually force revalidation
return 0
def last_modified(self):
"""return view's last modified GMT time"""
return self.view.last_modified()
def set_headers(self):
req = self.req
try:
req.set_header("Etag", 'W/"%s"' % self.etag())
except NoEtag:
super().set_headers()
return
req.set_header("Cache-control", "must-revalidate,max-age=%s" % self.max_age())
mdate = self.last_modified()
# use a timestamp, not a formatted raw header, and let
# the front-end correctly generate it
# ("%a, %d %b %Y %H:%M:%S GMT" return localized date that
# twisted don't parse correctly)
req.set_header("Last-modified", timegm(mdate.timetuple()), raw=False)
class EntityHTTPCacheManager(EtagHTTPCacheManager):
"""etag based cache manager for view displaying a single entity
* etag is generated using entity's eid, the view name and the user's groups
* get last modified time from the entity definition (this may not be the
entity's modification time since a view may include some related entities
with a modification time to consider) using the `last_modified` method
"""
def etag(self):
if (
self.cw_rset is None or len(self.cw_rset) == 0
): # entity startup view for instance
return super().etag()
if len(self.cw_rset) > 1:
raise NoEtag()
etag = super().etag()
eid = self.cw_rset[0][0]
if self.req.user.owns(eid):
etag += ",owners"
return str(eid) + "/" + etag
class NoEtag(Exception):
"""an etag can't be generated"""
__all__ = (
"NoHTTPCacheManager",
"MaxAgeHTTPCacheManager",
"EtagHTTPCacheManager",
"EntityHTTPCacheManager",
)
# monkey patching, so view doesn't depends on this module and we have all
# http cache related logic here
def set_http_cache_headers(self):
self.http_cache_manager(self).set_headers()
viewmod.View.set_http_cache_headers = set_http_cache_headers
def last_modified(self):
"""return the date/time where this view should be considered as
modified. Take care of possible related objects modifications.
/!\\ must return GMT time /!\\
"""
# XXX check view module's file modification time in dev mod ?
ctime = datetime.utcnow()
if self.cache_max_age:
mtime = self._cw.header_if_modified_since()
if mtime:
tdelta = ctime - mtime
if tdelta.days * 24 * 60 * 60 + tdelta.seconds <= self.cache_max_age:
return mtime
# mtime = ctime will force page rerendering
return ctime
viewmod.View.last_modified = last_modified
# configure default caching
viewmod.View.http_cache_manager = NoHTTPCacheManager
# max-age=0 to actually force revalidation when needed
viewmod.View.cache_max_age = 0
viewmod.StartupView.http_cache_manager = MaxAgeHTTPCacheManager
viewmod.StartupView.cache_max_age = (
60 * 60 * 2
) # stay in http cache for 2 hours by default
# ## HTTP Cache validator ############################################
def get_validators(headers_in):
"""return a list of http condition validator relevant to this request"""
result = []
for header, func in VALIDATORS:
value = headers_in.getHeader(header)
if value is not None:
result.append((func, value))
return result
def if_modified_since(ref_date, headers_out):
last_modified = headers_out.getHeader("last-modified")
if last_modified is None:
return True
return ref_date < last_modified
def if_none_match(tags, headers_out):
etag = headers_out.getHeader("etag")
if etag is None:
return True
return not ((etag in tags) or ("*" in tags))
VALIDATORS = [
("if-modified-since", if_modified_since),
# ('if-unmodified-since', if_unmodified_since),
("if-none-match", if_none_match),
# ('if-modified-since', if_modified_since),
]
|
PypiClean
|
/opensesame_core-4.0.5-py3-none-any.whl/opensesame_extensions/core/opensesame_4_notifications/locale/hi/new-experiment.md
|
# कृपया ध्यान दें!
इस प्रयोग को OpenSesame के एक नए संस्करण के साथ बनाया गया था।
मुझे नहीं पता कि इसे OpenSesame के इस संस्करण के साथ काम करेगा या नहीं। मैं अनुरोध करता हूं कि आप OpenSesame के नवीनतम संस्करण को अपडेट करें। यदि यह संभव नहीं है, तो मैं अनुरोध करता हूं कि आप अपने प्रयोग को एक नई नाम के तहत सहेजें।
आप क्या करना चाहते हैं?
[अब प्रयोग सहेजें](opensesame://action.save){: .important-button} <br />
या आप कर सकते हैं:
[इस संदेश को हमेशा के लिए खारिज करें](opensesame:not.event.os4n_dismiss_old_experiment){: .dismiss-button}
|
PypiClean
|
/django_cfdi-2.134-py3-none-any.whl/cfdi/static/cfdi/codemirror/mode/tiki/tiki.js
|
(function(mod) {
if (typeof exports == "object" && typeof module == "object") // CommonJS
mod(require("../../lib/codemirror"));
else if (typeof define == "function" && define.amd) // AMD
define(["../../lib/codemirror"], mod);
else // Plain browser env
mod(CodeMirror);
})(function(CodeMirror) {
"use strict";
CodeMirror.defineMode('tiki', function(config) {
function inBlock(style, terminator, returnTokenizer) {
return function(stream, state) {
while (!stream.eol()) {
if (stream.match(terminator)) {
state.tokenize = inText;
break;
}
stream.next();
}
if (returnTokenizer) state.tokenize = returnTokenizer;
return style;
};
}
function inLine(style) {
return function(stream, state) {
while(!stream.eol()) {
stream.next();
}
state.tokenize = inText;
return style;
};
}
function inText(stream, state) {
function chain(parser) {
state.tokenize = parser;
return parser(stream, state);
}
var sol = stream.sol();
var ch = stream.next();
//non start of line
switch (ch) { //switch is generally much faster than if, so it is used here
case "{": //plugin
stream.eat("/");
stream.eatSpace();
stream.eatWhile(/[^\s\u00a0=\"\'\/?(}]/);
state.tokenize = inPlugin;
return "tag";
case "_": //bold
if (stream.eat("_"))
return chain(inBlock("strong", "__", inText));
break;
case "'": //italics
if (stream.eat("'"))
return chain(inBlock("em", "''", inText));
break;
case "(":// Wiki Link
if (stream.eat("("))
return chain(inBlock("variable-2", "))", inText));
break;
case "[":// Weblink
return chain(inBlock("variable-3", "]", inText));
break;
case "|": //table
if (stream.eat("|"))
return chain(inBlock("comment", "||"));
break;
case "-":
if (stream.eat("=")) {//titleBar
return chain(inBlock("header string", "=-", inText));
} else if (stream.eat("-")) {//deleted
return chain(inBlock("error tw-deleted", "--", inText));
}
break;
case "=": //underline
if (stream.match("=="))
return chain(inBlock("tw-underline", "===", inText));
break;
case ":":
if (stream.eat(":"))
return chain(inBlock("comment", "::"));
break;
case "^": //box
return chain(inBlock("tw-box", "^"));
break;
case "~": //np
if (stream.match("np~"))
return chain(inBlock("meta", "~/np~"));
break;
}
//start of line types
if (sol) {
switch (ch) {
case "!": //header at start of line
if (stream.match('!!!!!')) {
return chain(inLine("header string"));
} else if (stream.match('!!!!')) {
return chain(inLine("header string"));
} else if (stream.match('!!!')) {
return chain(inLine("header string"));
} else if (stream.match('!!')) {
return chain(inLine("header string"));
} else {
return chain(inLine("header string"));
}
break;
case "*": //unordered list line item, or <li /> at start of line
case "#": //ordered list line item, or <li /> at start of line
case "+": //ordered list line item, or <li /> at start of line
return chain(inLine("tw-listitem bracket"));
break;
}
}
//stream.eatWhile(/[&{]/); was eating up plugins, turned off to act less like html and more like tiki
return null;
}
var indentUnit = config.indentUnit;
// Return variables for tokenizers
var pluginName, type;
function inPlugin(stream, state) {
var ch = stream.next();
var peek = stream.peek();
if (ch == "}") {
state.tokenize = inText;
//type = ch == ")" ? "endPlugin" : "selfclosePlugin"; inPlugin
return "tag";
} else if (ch == "(" || ch == ")") {
return "bracket";
} else if (ch == "=") {
type = "equals";
if (peek == ">") {
stream.next();
peek = stream.peek();
}
//here we detect values directly after equal character with no quotes
if (!/[\'\"]/.test(peek)) {
state.tokenize = inAttributeNoQuote();
}
//end detect values
return "operator";
} else if (/[\'\"]/.test(ch)) {
state.tokenize = inAttribute(ch);
return state.tokenize(stream, state);
} else {
stream.eatWhile(/[^\s\u00a0=\"\'\/?]/);
return "keyword";
}
}
function inAttribute(quote) {
return function(stream, state) {
while (!stream.eol()) {
if (stream.next() == quote) {
state.tokenize = inPlugin;
break;
}
}
return "string";
};
}
function inAttributeNoQuote() {
return function(stream, state) {
while (!stream.eol()) {
var ch = stream.next();
var peek = stream.peek();
if (ch == " " || ch == "," || /[ )}]/.test(peek)) {
state.tokenize = inPlugin;
break;
}
}
return "string";
};
}
var curState, setStyle;
function pass() {
for (var i = arguments.length - 1; i >= 0; i--) curState.cc.push(arguments[i]);
}
function cont() {
pass.apply(null, arguments);
return true;
}
function pushContext(pluginName, startOfLine) {
var noIndent = curState.context && curState.context.noIndent;
curState.context = {
prev: curState.context,
pluginName: pluginName,
indent: curState.indented,
startOfLine: startOfLine,
noIndent: noIndent
};
}
function popContext() {
if (curState.context) curState.context = curState.context.prev;
}
function element(type) {
if (type == "openPlugin") {curState.pluginName = pluginName; return cont(attributes, endplugin(curState.startOfLine));}
else if (type == "closePlugin") {
var err = false;
if (curState.context) {
err = curState.context.pluginName != pluginName;
popContext();
} else {
err = true;
}
if (err) setStyle = "error";
return cont(endcloseplugin(err));
}
else if (type == "string") {
if (!curState.context || curState.context.name != "!cdata") pushContext("!cdata");
if (curState.tokenize == inText) popContext();
return cont();
}
else return cont();
}
function endplugin(startOfLine) {
return function(type) {
if (
type == "selfclosePlugin" ||
type == "endPlugin"
)
return cont();
if (type == "endPlugin") {pushContext(curState.pluginName, startOfLine); return cont();}
return cont();
};
}
function endcloseplugin(err) {
return function(type) {
if (err) setStyle = "error";
if (type == "endPlugin") return cont();
return pass();
};
}
function attributes(type) {
if (type == "keyword") {setStyle = "attribute"; return cont(attributes);}
if (type == "equals") return cont(attvalue, attributes);
return pass();
}
function attvalue(type) {
if (type == "keyword") {setStyle = "string"; return cont();}
if (type == "string") return cont(attvaluemaybe);
return pass();
}
function attvaluemaybe(type) {
if (type == "string") return cont(attvaluemaybe);
else return pass();
}
return {
startState: function() {
return {tokenize: inText, cc: [], indented: 0, startOfLine: true, pluginName: null, context: null};
},
token: function(stream, state) {
if (stream.sol()) {
state.startOfLine = true;
state.indented = stream.indentation();
}
if (stream.eatSpace()) return null;
setStyle = type = pluginName = null;
var style = state.tokenize(stream, state);
if ((style || type) && style != "comment") {
curState = state;
while (true) {
var comb = state.cc.pop() || element;
if (comb(type || style)) break;
}
}
state.startOfLine = false;
return setStyle || style;
},
indent: function(state, textAfter) {
var context = state.context;
if (context && context.noIndent) return 0;
if (context && /^{\//.test(textAfter))
context = context.prev;
while (context && !context.startOfLine)
context = context.prev;
if (context) return context.indent + indentUnit;
else return 0;
},
electricChars: "/"
};
});
CodeMirror.defineMIME("text/tiki", "tiki");
});
|
PypiClean
|
/aldryn_django-4.1.6.0-py3-none-any.whl/aldryn_django/management/commands/aldryn_optimize_static_images.py
|
import mimetypes
import shutil
import subprocess
from django.conf import settings
from django.contrib.staticfiles.finders import get_finders
from django.core.files import File
from django.core.files.temp import NamedTemporaryFile
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = 'Optimize static images prior to collectstatic'
setting_names = [
'STATIC_IMAGES_OPTIMIZE_COMMAND',
'THUMBNAIL_OPTIMIZE_COMMAND',
]
def get_settings(self):
for setting_name in self.setting_names:
try:
return getattr(settings, setting_name)
except AttributeError:
pass
else:
return {}
def handle(self, *args, **options):
ignore_patterns = ['CVS', '.*', '*~']
base_path = settings.BASE_DIR.rstrip('/') + '/'
optimize_commands = self.get_settings()
for finder in get_finders():
for path, storage in finder.list(ignore_patterns):
if not storage.path(path).startswith(base_path):
# Do not process images found in static dirs of third party
# apps.
continue
mimetype = mimetypes.guess_type(path)[0]
if not mimetype:
# Unknown mime type, ignore the file.
continue
generic_type, image_type = mimetype.split('/', 1)
if generic_type != 'image':
# Only process images.
continue
if image_type in optimize_commands:
self.optimize(
storage,
path,
image_type,
optimize_commands[image_type],
)
def optimize(self, storage, path, image_type, command):
with NamedTemporaryFile() as temp_image:
# Copy the image to the temporary file
with storage.open(path, 'r+') as image:
shutil.copyfileobj(image, temp_image)
temp_image.flush()
temp_image.seek(0)
# Optimize the image
optimize_command = command.format(filename=temp_image.name)
self.stdout.write(f'>>> {optimize_command}')
subprocess.check_call(optimize_command, shell=True)
self.stdout.write('')
# Save the image back from the temporary file into the storage
with open(temp_image.name) as fh:
storage.delete(path)
storage.save(path, File(fh))
|
PypiClean
|
/mindspore_ascend-1.10.0-cp39-none-any.whl/mindspore/_akg/akg/topi/arm_cpu/conv2d.py
|
"""Conv2D schedule for ARM CPU"""
from __future__ import absolute_import as _abs
import logging
import tvm
from tvm import autotvm
import tvm.contrib.nnpack
from ..generic import schedule_conv2d_nchw, schedule_conv2d_winograd_without_weight_transform, \
schedule_conv2d_winograd_nnpack_without_weight_transform
from ..util import traverse_inline, get_const_tuple
from ..nn import dilate, pad, conv2d, conv2d_alter_layout, \
conv2d_winograd_without_weight_transform, \
conv2d_winograd_nnpack_without_weight_transform, \
depthwise_conv2d_nchw
from ..nn.util import get_const_int, get_pad_tuple
from ..nn.winograd_util import winograd_transform_matrices
from .conv2d_spatial_pack import conv2d_spatial_pack_nchw, \
schedule_conv2d_spatial_pack_nchw
logger = logging.getLogger('topi')
@autotvm.register_topi_compute(conv2d, 'arm_cpu', ['direct'])
def conv2d_arm_cpu(cfg, data, kernel, strides, padding, dilation, layout, out_dtype):
"""TOPI compute callback for conv2d
Parameters
----------
cfg: ConfigEntity
The config for this template
data : tvm.Tensor
4-D with shape [batch, in_channel, in_height, in_width]
kernel : tvm.Tensor
4-D with shape [num_filter, in_channel, filter_height, filter_width] or
pre-packed 5-D with shape [num_filter_chunk, in_channel, filter_height,
filter_width, num_filter_block]
strides : list of two ints
[stride_height, stride_width]
padding : list of two ints
[pad_height, pad_width]
dilation : list of two ints
[dilation_height, dilation_width]
layout : str
layout of data
out_dtype: str
The output type. This is used for mixed precision.
Returns
-------
output : tvm.Tensor
4-D with shape [batch, out_channel, out_height, out_width]
"""
if layout == 'NCHW':
return conv2d_spatial_pack_nchw(cfg, data, kernel, strides, padding,
dilation, out_dtype, num_tile=2)
else:
raise ValueError("Unsupported layout {}".format(layout))
@autotvm.register_topi_schedule(
schedule_conv2d_nchw, 'arm_cpu',
['direct', 'winograd', 'winograd_nnpack_fp16', 'winograd_nnpack_fp32'])
def schedule_conv2d_nchw_arm_cpu(cfg, outs):
"""TOPI schedule callback for conv2d
Parameters
----------
cfg: ConfigEntity
The config for this template
outs: Array of Tensor
The computation graph description of conv2d
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for conv2d.
"""
s = tvm.create_schedule([x.op for x in outs])
def _callback(op):
# schedule conv2d
if 'spatial_conv2d_output' in op.tag:
output = op.output(0)
conv = op.input_tensors[0]
data_vec = conv.op.input_tensors[0]
data_pad = data_vec.op.input_tensors[0]
s[data_pad].compute_inline()
kernel_vec = conv.op.input_tensors[1]
if kernel_vec.op.name == 'kernel_vec':
kernel = kernel_vec.op.input_tensors[0]
else:
kernel = kernel_vec
if isinstance(kernel.op, tvm.tensor.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
schedule_conv2d_spatial_pack_nchw(cfg, s, data_vec, kernel_vec,
conv, output, outs[0])
if 'winograd_conv2d_output' in op.tag:
output = op.output(0)
_schedule_winograd(cfg, s, output, outs[0])
if 'winograd_nnpack_conv2d_output' in op.tag:
output = op.output(0)
_schedule_winograd_nnpack(cfg, s, output, outs[0])
traverse_inline(s, outs[0].op, _callback)
return s
@autotvm.register_topi_compute(conv2d, 'arm_cpu', ['winograd'])
def conv2d_arm_cpu_winograd(cfg, data, kernel, strides, padding, dilation, layout, out_dtype):
""" TOPI compute callback. Use winograd template """
tile_size = 4
return _decl_winograd(cfg, data, kernel, strides, padding, dilation, layout,
out_dtype, tile_size)
def _decl_winograd(cfg, data, kernel, strides, padding, dilation, layout, out_dtype, tile_size):
N, CI, IH, IW = get_const_tuple(data.shape)
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
if len(kernel.shape) == 4:
if dilation_h != 1 or dilation_w != 1:
kernel = dilate(kernel, (1, 1, dilation_h, dilation_w))
pre_computed = False
CO, _, KH, KW = get_const_tuple(kernel.shape)
else:
assert (dilation_h, dilation_w) == (1, 1), "Does not support dilation"
pre_computed = True
H_CAT, W_CAT, CO, CI, VC = get_const_tuple(kernel.shape)
CO *= VC
KH, KW = H_CAT - tile_size + 1, W_CAT - tile_size + 1
HSTR, WSTR = strides if isinstance(strides, (tuple, list)) else (strides, strides)
HPAD, WPAD, _, _ = get_pad_tuple(padding, kernel)
assert layout == 'NCHW'
assert KH == 3 and KW == 3 and HSTR == 1 and WSTR == 1
data_pad = pad(data, (0, 0, HPAD, WPAD), name="data_pad")
idxd = tvm.indexdiv
idxm = tvm.indexmod
r = KW
m = tile_size
alpha = m + r - 1
A, B, G = winograd_transform_matrices(m, r, out_dtype)
K = CO
C = CI
H = (IH + 2 * HPAD - 3) // HSTR + 1
W = (IW + 2 * WPAD - 3) // WSTR + 1
nH, nW = (H + m-1) // m, (W + m-1) // m
P = N * nH * nW
cfg.define_split('tile_p', cfg.axis(P), num_outputs=2, filter=lambda x: x.size[-1] <= 16)
cfg.define_split('tile_k', cfg.axis(K), num_outputs=2, filter=lambda x: x.size[-1] <= 16)
VP = cfg['tile_p'].size[-1]
VK = cfg['tile_k'].size[-1]
# pack input tile
input_tile = tvm.compute((C, idxd(P, VP), alpha, alpha, VP),
lambda c, b, eps, nu, bb:
data_pad[idxd(b*VP + bb, nH*nW), c,
idxm(idxd(b*VP + bb, nW), nH) * m + eps,
idxm(b*VP + bb, nW) * m + nu],
name='d')
# transform kernel
if pre_computed:
U = kernel
else:
r_kh = tvm.reduce_axis((0, KH), 'r_kh')
r_kw = tvm.reduce_axis((0, KW), 'r_kw')
U = tvm.compute((alpha, alpha, idxd(K, VK), C, VK), lambda eps, nu, k, c, kk:
tvm.sum(kernel[k * VK + kk][c][r_kh][r_kw].astype(out_dtype) *
G[eps][r_kh] * G[nu][r_kw], axis=[r_kh, r_kw]), name='U')
# transform image
r_eps = tvm.reduce_axis((0, alpha), 'r_eps')
r_nu = tvm.reduce_axis((0, alpha), 'r_nu')
V = tvm.compute((alpha, alpha, idxd(P, VP), C, VP), lambda eps, nu, b, c, bb:
tvm.sum(input_tile[c][b][r_eps][r_nu][bb].astype(out_dtype) *
B[r_eps][eps] * B[r_nu][nu], axis=[r_eps, r_nu]), name='V')
# batch gemm
c = tvm.reduce_axis((0, C), name='c')
M = tvm.compute((alpha, alpha, K, P), lambda eps, nu, k, b:
tvm.sum(U[eps][nu][idxd(k, VK)][c][idxm(k, VK)] *
V[eps][nu][idxd(b, VP)][c][idxm(b, VP)], axis=c), name='M')
# inverse transform
r_eps = tvm.reduce_axis((0, alpha), 'r_eps')
r_nu = tvm.reduce_axis((0, alpha), 'r_nu')
Y = tvm.compute((K, P, m, m), lambda k, b, vh, vw:
tvm.sum(M[r_eps][r_nu][k][b] * A[r_eps][vh] * A[r_nu][vw],
axis=[r_eps, r_nu]), name='Y')
# unpack output
output = tvm.compute((N, K, H, W), lambda n, k, h, w:
Y[k][n * nH * nW + idxd(h, m) * nW + idxd(w, m),
idxm(h, m), idxm(w, m)],
name='output', tag='winograd_conv2d_output')
# we have to manually assign effective GFLOP for winograd
cfg.add_flop(2 * N * K * H * W * KH * KW * C)
return output
def _schedule_winograd(cfg, s, output, last):
Y = output.op.input_tensors[0]
M, A = Y.op.input_tensors
U, V = M.op.input_tensors
d, B = V.op.input_tensors
data_pad = d.op.input_tensors[0]
# padding
s[data_pad].compute_inline()
# pack input tiles
s[d].compute_inline()
# transform kernel
if isinstance(U.op, tvm.tensor.ComputeOp):
kernel, G = U.op.input_tensors
s[G].compute_inline()
eps, nu, k, c, kk, = s[U].op.axis
if autotvm.GLOBAL_SCOPE.in_tuning:
# kernel transformation will be pre-computed during compilation, so we skip
# this part to make tuning records correct
s[U].pragma(eps, 'debug_skip_region')
else:
r_kh, r_kw = s[U].op.reduce_axis
s[U].reorder(k, c, eps, nu, r_kh, r_kw, kk)
for axis in [eps, nu, r_kh, r_kw]:
s[U].unroll(axis)
s[U].vectorize(kk)
s[U].parallel(k)
if isinstance(kernel.op, tvm.tensor.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
# transform image
DD = s.cache_read(d, 'global', [V])
s[B].compute_inline()
eps, nu, b, c, bb = s[V].op.axis
r_eps, r_nu = s[V].op.reduce_axis
s[V].reorder(b, c, eps, nu, r_eps, r_nu, bb)
for axis in [eps, nu, r_eps, r_nu]:
s[V].unroll(axis)
s[DD].compute_at(s[V], c)
s[V].vectorize(bb)
s[V].parallel(b)
# batch gemm
eps, nu, k, b = s[M].op.axis
c = s[M].op.reduce_axis[0]
cfg.define_split('tile_c', c, num_outputs=2, filter=lambda x: x.size[-1] <= 16)
co, ci = cfg['tile_c'].apply(s, M, c)
xo, xi = cfg['tile_p'].apply(s, M, b)
s[M].reorder(eps, nu, xo, co, k, ci, xi)
cfg.define_annotate('ann_reduce', [ci], policy='try_unroll')
cfg.define_annotate('ann_spatial', [k, xi], policy='try_unroll_vec')
cfg['ann_reduce'].apply(s, M, [ci],
axis_lens=[cfg['tile_c'].size[-1]],
max_unroll=16,
cfg=cfg)
cfg['ann_spatial'].apply(s, M, [k, xi])
# inverse transform
s[A].compute_inline()
k, b, vh, vw = s[Y].op.axis
r_eps, r_nu = s[Y].op.reduce_axis
for axis in [vh, vw, r_eps, r_nu]:
s[Y].unroll(axis)
# output
n, co, h, w = s[last].op.axis
co, coi = cfg['tile_k'].apply(s, last, co)
p = s[last].fuse(n, co)
s[M].compute_at(s[last], p)
s[last].parallel(p)
MM = s.cache_read(M, 'global', [Y])
m = get_const_int(V.shape[0]) + 1 - 3
ho, wo, hi, wi = s[last].tile(h, w, m, m)
s[Y].compute_at(s[last], wo)
s[MM].compute_at(s[last], wo)
if output != last:
s[output].compute_inline()
@autotvm.register_topi_compute(conv2d, 'arm_cpu', ['winograd_nnpack_fp16'])
def conv2d_arm_cpu_winograd_nnpack_fp16(
cfg, data, kernel, strides, padding, dilation, layout, out_dtype):
""" TOPI compute callback. Use winograd_nnpack_fp16 template """
return conv2d_arm_cpu_winograd_nnpack(
cfg, data, kernel, strides, padding, dilation, layout, out_dtype,
tvm.contrib.nnpack.ConvolutionAlgorithm.WT_8x8_FP16)
@autotvm.register_topi_compute(conv2d, 'arm_cpu', ['winograd_nnpack_fp32'])
def conv2d_arm_cpu_winograd_nnpack_fp32(
cfg, data, kernel, strides, padding, dilation, layout, out_dtype):
""" TOPI compute callback. Use winograd_nnpack_fp32 template """
return conv2d_arm_cpu_winograd_nnpack(
cfg, data, kernel, strides, padding, dilation, layout, out_dtype,
tvm.contrib.nnpack.ConvolutionAlgorithm.WT_8x8)
def conv2d_arm_cpu_winograd_nnpack(
cfg, data, kernel, strides, padding, dilation, layout, out_dtype, convolution_algorithm):
""" TOPI compute callback. Use winograd NNPACK template """
N, CI, IH, IW = get_const_tuple(data.shape)
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
assert (dilation_h, dilation_w) == (1, 1)
assert len(kernel.shape) == 4
CO, _, KH, KW = get_const_tuple(kernel.shape)
HSTR, WSTR = strides if isinstance(strides, (tuple, list)) else (strides, strides)
HPAD, WPAD, _, _ = get_pad_tuple(padding, kernel)
assert layout == 'NCHW'
assert KH == 3 and KW == 3 and HPAD == 1 and WPAD == 1 and HSTR == 1 and WSTR == 1
H = (IH + 2 * HPAD - 3) // HSTR + 1
W = (IW + 2 * WPAD - 3) // WSTR + 1
cfg.define_knob('winograd_nnpack_algorithm', [convolution_algorithm])
assert N == 1
with tvm.tag_scope("winograd_nnpack_conv2d_weight_transform"):
transformed_kernel = tvm.contrib.nnpack.convolution_inference_weight_transform(
kernel, algorithm=cfg['winograd_nnpack_algorithm'].val)
if autotvm.GLOBAL_SCOPE.in_tuning:
transformed_kernel = tvm.compute(transformed_kernel.shape, lambda *args: 0.0)
with tvm.tag_scope("winograd_nnpack_conv2d_output"):
output = tvm.contrib.nnpack.convolution_inference_without_weight_transform(
data, transformed_kernel,
bias=None,
padding=[HPAD, HPAD, WPAD, WPAD],
stride=[HSTR, WSTR],
algorithm=cfg['winograd_nnpack_algorithm'].val)
# we have to manually assign effective GFLOP for winograd
cfg.add_flop(2 * N * CI * H * W * KH * KW * CO)
return output
def _schedule_winograd_nnpack(cfg, s, output, last):
# Could have bias.
(X, TK) = output.op.input_tensors[:2]
# transform kernel
assert isinstance(TK.op, (tvm.tensor.ComputeOp, tvm.tensor.ExternOp, tvm.tensor.PlaceholderOp))
if autotvm.GLOBAL_SCOPE.in_tuning and isinstance(TK.op, tvm.tensor.ComputeOp):
# kernel transformation will be pre-computed during compilation, so we skip
# this part to make tuning records correct
s[TK].pragma(s[TK].op.axis[0], 'debug_skip_region')
##### REGISTER TOPI COMPUTE / SCHEDULE FOR WINOGRAD WITH WEIGHT TRANSFORM #####
@autotvm.register_topi_compute(conv2d_winograd_without_weight_transform, 'arm_cpu', ['winograd'])
def conv2d_winograd_ww(cfg, data, kernel, strides, padding, dilation, layout, out_dtype, tile_size):
"""TOPI compute callback"""
return _decl_winograd(cfg, data, kernel, strides, padding, dilation, layout, out_dtype,\
tile_size)
@autotvm.register_topi_schedule(schedule_conv2d_winograd_without_weight_transform,
'arm_cpu', ['winograd'])
def schedule_conv2d_winograd_without_weight_transform_(cfg, outs):
"""TOPI schedule callback"""
s = tvm.create_schedule([x.op for x in outs])
def _callback(op):
if 'winograd_conv2d_output' in op.tag:
output = op.output(0)
_schedule_winograd(cfg, s, output, outs[0])
traverse_inline(s, outs[0].op, _callback)
return s
##### REGISTER TOPI COMPUTE / SCHEDULE FOR WINOGRAD NNPACK WITHOUT WEIGHT TRANSFORM #####
@autotvm.register_topi_compute(conv2d_winograd_nnpack_without_weight_transform,
'arm_cpu',
['winograd_nnpack_fp16', 'winograd_nnpack_fp32'])
def conv2d_winograd_nnpack_ww(cfg, data, transformed_kernel, bias, strides,
padding, dilation, layout, out_dtype):
""" TOPI compute callback. Use winograd NNPACK template """
N, CI, IH, IW = get_const_tuple(data.shape)
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
assert (dilation_h, dilation_w) == (1, 1)
assert len(transformed_kernel.shape) == 4
CO, _, _, _ = get_const_tuple(transformed_kernel.shape)
HSTR, WSTR = strides if isinstance(strides, (tuple, list)) else (strides, strides)
HPAD, WPAD, _, _ = get_pad_tuple(padding, (3, 3))
KH, KW = 3, 3
assert layout == 'NCHW'
assert KH == 3 and KW == 3 and HPAD == 1 and WPAD == 1 and HSTR == 1 and WSTR == 1
H = (IH + 2 * HPAD - 3) // HSTR + 1
W = (IW + 2 * WPAD - 3) // WSTR + 1
assert N == 1
with tvm.tag_scope("winograd_nnpack_conv2d_output"):
output = tvm.contrib.nnpack.convolution_inference_without_weight_transform(
data=data,
transformed_kernel=transformed_kernel,
bias=bias,
padding=[HPAD, HPAD, WPAD, WPAD],
stride=[HSTR, WSTR],
algorithm=cfg['winograd_nnpack_algorithm'].val)
# we have to manually assign effective GFLOP for winograd
cfg.add_flop(2 * N * CI * H * W * KH * KW * CO)
return output
@autotvm.register_topi_schedule(schedule_conv2d_winograd_nnpack_without_weight_transform,
'arm_cpu', ['winograd_nnpack_fp16', 'winograd_nnpack_fp32'])
def schedule_conv2d_winograd_nnpack_without_weight_transform_(cfg, outs):
"""TOPI schedule callback"""
s = tvm.create_schedule([x.op for x in outs])
def _callback(op):
if 'winograd_nnpack_conv2d_output' in op.tag:
output = op.output(0)
_schedule_winograd_nnpack(cfg, s, output, outs[0])
traverse_inline(s, outs[0].op, _callback)
return s
##### REGISTER ALTER OP LAYOUT #####
@conv2d_alter_layout.register(["arm_cpu"])
def _alter_conv2d_layout_arm(attrs, inputs, tinfos, F):
"""Alter op layout for pre-computing kernel transformation
Parameters
----------
attrs : nnvm.top.AttrDict or tvm.attrs.Attrs
Attributes of current convolution
inputs : nnvm.symbol or tvm.relay.Expr
Grouped input symbols
tinfos : list
Input shape and dtype
F: symbol
The context, can be either nnvm.sym or relay.op
Note
----
Unlike other TOPI functions, this function operates on both graph level and operator level,
so we have to pass 'F' to make it support our two versions of graph IR, NNVM and Relay.
"""
copy_inputs = [s for s in inputs]
new_attrs = {k: attrs[k] for k in attrs.keys()}
if F.__name__ == 'tvm.relay.op':
# Derive channels for frontends (e.g ONNX) that miss "channel" field.
new_attrs["channels"] = inputs[1].checked_type.shape[attrs['kernel_layout'].index('O')]
dilation = attrs.get_int_tuple("dilation")
strides = attrs.get_int_tuple("strides")
padding = attrs.get_int_tuple("padding")
groups = attrs.get_int('groups')
data_layout_key = "data_layout" if "data_layout" in new_attrs else "layout"
layout = attrs[data_layout_key]
kernel_layout = attrs['kernel_layout']
out_dtype = attrs["out_dtype"]
if out_dtype in ("same", ""):
out_dtype = tinfos[0].dtype
if dilation != (1, 1):
logger.warning("Does not support weight pre-transform for dilated convolution.")
return None
# query config of this workload
data, kernel = tinfos[0:2]
if groups == 1:
workload = autotvm.task.args_to_workload(
[data, kernel, strides, padding, dilation, layout, out_dtype], conv2d)
else:
workload = autotvm.task.args_to_workload(
[data, kernel, strides, padding, dilation, out_dtype], depthwise_conv2d_nchw)
if layout == 'NCHW' and kernel_layout == 'OIHW':
N, CI, H, W = get_const_tuple(data.shape)
CO, _, KH, KW = get_const_tuple(kernel.shape)
elif layout == 'NHWC' and kernel_layout == 'HWIO':
N, H, W, CI = get_const_tuple(data.shape)
KH, KW, _, CO = get_const_tuple(kernel.shape)
# Also modify the workload to pick up because later we convert to NCHW
# layout.
new_data = tvm.placeholder((N, CI, H, W), dtype=data.dtype)
new_kernel = tvm.placeholder((CO, CI, KH, KW), dtype=kernel.dtype)
new_layout = 'NCHW'
workload = autotvm.task.args_to_workload(
[new_data, new_kernel, strides, padding, dilation, new_layout, out_dtype], conv2d)
elif layout == 'NHWC' and kernel_layout == 'HWOI':
# This is the case for depthwise convolution.
N, H, W, CI = get_const_tuple(data.shape)
KH, KW, CO, M = get_const_tuple(kernel.shape)
# Also modify the workload to pick up because later we convert to NCHW
# layout.
new_data = tvm.placeholder((N, CI, H, W), dtype=data.dtype)
new_kernel = tvm.placeholder((CO, M, KH, KW), dtype=kernel.dtype)
workload = autotvm.task.args_to_workload(
[new_data, new_kernel, strides, padding, dilation, out_dtype], depthwise_conv2d_nchw)
else:
return None
idxd = tvm.indexdiv
if groups == 1:
target = tvm.target.current_target()
dispatch_ctx = autotvm.DispatchContext.current
cfg = dispatch_ctx.query(target, workload)
if cfg.is_fallback: # if is fallback, clear query cache and return None
autotvm.task.clear_fallback_cache(target, workload)
if layout == 'NHWC' and kernel_layout == 'HWIO':
new_attrs['data_layout'] = 'NCHW'
new_attrs['kernel_layout'] = 'OIHW'
return F.nn.conv2d(*copy_inputs, **new_attrs)
return None
if cfg.template_key == 'direct': # pack weight tensor
VC = cfg['tile_co'].size[-1]
new_attrs['kernel_layout'] = 'OIHW%do' % VC
# Store the same config for the altered operator (workload)
new_data = tvm.placeholder((N, CI, H, W), dtype=data.dtype)
new_attrs[data_layout_key] = 'NCHW'
new_kernel = tvm.placeholder((idxd(CO, VC), CI, KH, KW, VC), dtype=kernel.dtype)
new_workload = autotvm.task.args_to_workload(
[new_data, new_kernel, strides, padding, dilation, 'NCHW', out_dtype], conv2d)
dispatch_ctx.update(target, new_workload, cfg)
return F.nn.conv2d(*copy_inputs, **new_attrs)
elif cfg.template_key == "winograd": # pre-compute weight transformation in winograd
if "-device=arm_cpu" in target.options:
tile_size = 4
VC = cfg['tile_k'].size[-1]
elif "-device=bifrost" in target.options:
tile_size = 2
VC = 0
else:
from ..mali.conv2d import _pick_tile_size
tile_size = _pick_tile_size(tinfos[0], tinfos[1])
VC = cfg['tile_bna'].val
weight = copy_inputs[1]
if kernel_layout != 'OIHW':
weight = F.transpose(weight, axes=(2, 3, 0, 1))
weight = F.nn.contrib_conv2d_winograd_weight_transform(weight,
tile_size=tile_size)
if VC > 0:
weight = F.reshape(weight,
newshape=(KH + tile_size - 1,
KW + tile_size - 1,
idxd(CO, VC), VC, CI))
weight = F.transpose(weight, axes=[0, 1, 2, 4, 3])
new_weight = tvm.placeholder((KH + tile_size - 1,
KW + tile_size -1,
idxd(CO, VC), CI, VC),
kernel.dtype)
else:
weight = F.reshape(weight,
newshape=(KH + tile_size - 1, KW + tile_size - 1, CO, CI))
new_weight = tvm.placeholder(
(KH + tile_size - 1, KW + tile_size -1, CO, CI), kernel.dtype
)
copy_inputs[1] = weight
new_attrs['tile_size'] = tile_size
new_attrs[data_layout_key] = 'NCHW'
# Store the same config for the altered operator (workload)
new_data = tvm.placeholder((N, CI, H, W), dtype=data.dtype)
new_workload = autotvm.task.args_to_workload(
[new_data, new_weight, strides, padding, dilation,
new_attrs[data_layout_key], out_dtype, tile_size],
conv2d_winograd_without_weight_transform)
dispatch_ctx.update(target, new_workload, cfg)
return F.nn.contrib_conv2d_winograd_without_weight_transform(*copy_inputs, **new_attrs)
elif cfg.template_key in ["winograd_nnpack_fp16", "winograd_nnpack_fp32"]:
# pre-compute winograd_nnpack transform
# for winograd_nnpack_fp16, the the precomputeprune pass must run on device,
# where float16 is supported
weight_dtype = 'float32'
weight = copy_inputs[1]
if kernel_layout != 'OIHW':
weight = F.transpose(weight, axes=(2, 3, 0, 1))
weight = F.nn.contrib_conv2d_winograd_weight_transform(weight,
tile_size=tile_size)
transformed_kernel = F.nn.contrib_conv2d_winograd_nnpack_weight_transform(
weight,
convolution_algorithm=cfg['winograd_nnpack_algorithm'].val,
out_dtype=weight_dtype)
copy_inputs[1] = transformed_kernel
new_data = tvm.placeholder((N, CI, H, W), dtype=data.dtype)
new_kernel = tvm.placeholder((CO, CI, 8, 8), "float32")
bias = tvm.placeholder((CO, ), "float32")
new_attrs[data_layout_key] = 'NCHW'
new_workload = autotvm.task.args_to_workload(
[new_data, new_kernel, bias, strides,
padding, dilation, new_attrs[data_layout_key], out_dtype]
if len(copy_inputs) == 3 else
[new_data, new_kernel, strides,
padding, dilation, new_attrs[data_layout_key], out_dtype],
conv2d_winograd_nnpack_without_weight_transform)
dispatch_ctx.update(target, new_workload, cfg)
return F.nn.contrib_conv2d_winograd_nnpack_without_weight_transform(
*copy_inputs, **new_attrs)
else:
raise RuntimeError("Unsupported template_key '%s'" % cfg.template_key)
else:
target = tvm.target.current_target()
dispatch_ctx = autotvm.DispatchContext.current
cfg = dispatch_ctx.query(target, workload)
if cfg.is_fallback: # if is fallback, clear query cache and return None
autotvm.task.clear_fallback_cache(tvm.target.current_target(), workload)
if layout == 'NHWC' and kernel_layout == 'HWOI':
new_attrs['data_layout'] = 'NCHW'
new_attrs['kernel_layout'] = 'OIHW'
return F.nn.conv2d(*copy_inputs, **new_attrs)
return None
if cfg.template_key == 'contrib_spatial_pack':
VC = cfg['tile_co'].size[-1]
new_attrs['kernel_layout'] = 'OIHW%do' % (cfg['tile_co'].size[-1])
# Store the same config for the altered operator (workload)
new_data = tvm.placeholder((N, CI, H, W), dtype=data.dtype)
new_attrs[data_layout_key] = 'NCHW'
if attrs['kernel_layout'] == 'OIHW':
CO, M, KH, KW = get_const_tuple(kernel.shape)
elif attrs['kernel_layout'] == 'HWOI':
KH, KW, CO, M = get_const_tuple(kernel.shape)
else:
raise RuntimeError("Depthwise conv should either have OIHW/HWIO kernel layout")
new_kernel = tvm.placeholder((idxd(CO, VC), M, KH, KW, VC), dtype=kernel.dtype)
new_workload = autotvm.task.args_to_workload(
[new_data, new_kernel, strides, padding, dilation, out_dtype],
depthwise_conv2d_nchw)
dispatch_ctx.update(target, new_workload, cfg)
return F.nn.conv2d(*copy_inputs, **new_attrs)
else:
# currently we only have contrib_spatial_pack and direct template
# add more schedule templates.
return None
|
PypiClean
|
/music_bg_extra-0.2.6.tar.gz/music_bg_extra-0.2.6/music_bg_extra/processors/pop.py
|
from functools import partial
from operator import mul
from typing import Union
from PIL import Image
def pop_filter( # noqa: WPS210
image: Image.Image,
offset_x: Union[str, int] = 60,
offset_y: Union[str, int] = 60,
increase_factor: Union[str, float] = 1.4,
decrease_factor: Union[str, float] = 0.8,
) -> Image.Image:
"""
Generate pop image.
This filter splits image by color channels
and creates three separate images.
By changing offset you can control relative position
of the channels.
:param image: source image.
:param offset_x: image offset by x axis, defaults to 40
:param offset_y: image offset by y axis, defaults to 40
:param increase_factor: color increase factor, defaults to 1.4
:param decrease_factor: color decrease factor, defaults to 0.8
:raises ValueError: if offset is less than zero or
increase factor is less than 1 or decrease_factor is greater than 1.
:return: new image.
"""
image = image.convert("RGBA")
offset_x = int(offset_x)
offset_y = int(offset_y)
if offset_y < 0 or offset_x < 0:
raise ValueError("Offset can't be less than zero.")
decrease_factor = float(decrease_factor)
increase_factor = float(increase_factor)
if increase_factor <= 1:
raise ValueError("Increase factor must be greater than one.")
if decrease_factor >= 1:
raise ValueError("Decrease factor must be less than one.")
increaser = partial(mul, increase_factor)
decreaser = partial(mul, decrease_factor)
red, green, blue, alpha = image.split()
r_dec, r_inc = red.point(decreaser), red.point(increaser)
g_dec, g_inc = green.point(decreaser), green.point(increaser)
b_dec, b_inc = blue.point(decreaser), blue.point(increaser)
r_img = Image.merge("RGBA", (r_inc, g_dec, b_dec, alpha))
g_img = Image.merge("RGBA", (r_dec, g_inc, b_dec, alpha))
b_img = Image.merge("RGBA", (r_dec, g_dec, b_inc, alpha))
res = Image.new( # noqa;
"RGBA",
(image.width + offset_x * 2, image.height + offset_y * 2),
(0, 0, 0, 0),
)
res.alpha_composite(r_img, (0, 0))
res.alpha_composite(g_img, (offset_x, offset_y))
res.alpha_composite(b_img, (offset_x * 2, offset_y * 2))
return res
|
PypiClean
|
/upaas-admin-0.3.1.tar.gz/upaas-admin-0.3.1/upaas_admin/var/static/zeroclipboard/ZeroClipboard.min.js
|
!function(){"use strict";var a,b=function(){var a=/\-([a-z])/g,b=function(a,b){return b.toUpperCase()};return function(c){return c.replace(a,b)}}(),c=function(a,c){var d,e,f,g,h,i;if(window.getComputedStyle?d=window.getComputedStyle(a,null).getPropertyValue(c):(e=b(c),d=a.currentStyle?a.currentStyle[e]:a.style[e]),"cursor"===c&&(!d||"auto"===d))for(f=a.tagName.toLowerCase(),g=["a"],h=0,i=g.length;i>h;h++)if(f===g[h])return"pointer";return d},d=function(a){if(p.prototype._singleton){a||(a=window.event);var b;this!==window?b=this:a.target?b=a.target:a.srcElement&&(b=a.srcElement),p.prototype._singleton.setCurrent(b)}},e=function(a,b,c){a.addEventListener?a.addEventListener(b,c,!1):a.attachEvent&&a.attachEvent("on"+b,c)},f=function(a,b,c){a.removeEventListener?a.removeEventListener(b,c,!1):a.detachEvent&&a.detachEvent("on"+b,c)},g=function(a,b){if(a.addClass)return a.addClass(b),a;if(b&&"string"==typeof b){var c=(b||"").split(/\s+/);if(1===a.nodeType)if(a.className){for(var d=" "+a.className+" ",e=a.className,f=0,g=c.length;g>f;f++)d.indexOf(" "+c[f]+" ")<0&&(e+=" "+c[f]);a.className=e.replace(/^\s+|\s+$/g,"")}else a.className=b}return a},h=function(a,b){if(a.removeClass)return a.removeClass(b),a;if(b&&"string"==typeof b||void 0===b){var c=(b||"").split(/\s+/);if(1===a.nodeType&&a.className)if(b){for(var d=(" "+a.className+" ").replace(/[\n\t]/g," "),e=0,f=c.length;f>e;e++)d=d.replace(" "+c[e]+" "," ");a.className=d.replace(/^\s+|\s+$/g,"")}else a.className=""}return a},i=function(){var a,b,c,d=1;return"function"==typeof document.body.getBoundingClientRect&&(a=document.body.getBoundingClientRect(),b=a.right-a.left,c=document.body.offsetWidth,d=Math.round(100*(b/c))/100),d},j=function(a){var b={left:0,top:0,width:0,height:0,zIndex:999999999},d=c(a,"z-index");if(d&&"auto"!==d&&(b.zIndex=parseInt(d,10)),a.getBoundingClientRect){var e,f,g,h=a.getBoundingClientRect();"pageXOffset"in window&&"pageYOffset"in window?(e=window.pageXOffset,f=window.pageYOffset):(g=i(),e=Math.round(document.documentElement.scrollLeft/g),f=Math.round(document.documentElement.scrollTop/g));var j=document.documentElement.clientLeft||0,k=document.documentElement.clientTop||0;b.left=h.left+e-j,b.top=h.top+f-k,b.width="width"in h?h.width:h.right-h.left,b.height="height"in h?h.height:h.bottom-h.top}return b},k=function(a,b){var c=!(b&&b.useNoCache===!1);return c?(-1===a.indexOf("?")?"?":"&")+"nocache="+(new Date).getTime():""},l=function(a){var b=[],c=[];return a.trustedOrigins&&("string"==typeof a.trustedOrigins?c.push(a.trustedOrigins):"object"==typeof a.trustedOrigins&&"length"in a.trustedOrigins&&(c=c.concat(a.trustedOrigins))),a.trustedDomains&&("string"==typeof a.trustedDomains?c.push(a.trustedDomains):"object"==typeof a.trustedDomains&&"length"in a.trustedDomains&&(c=c.concat(a.trustedDomains))),c.length&&b.push("trustedOrigins="+encodeURIComponent(c.join(","))),"string"==typeof a.amdModuleId&&a.amdModuleId&&b.push("amdModuleId="+encodeURIComponent(a.amdModuleId)),"string"==typeof a.cjsModuleId&&a.cjsModuleId&&b.push("cjsModuleId="+encodeURIComponent(a.cjsModuleId)),b.join("&")},m=function(a,b){if(b.indexOf)return b.indexOf(a);for(var c=0,d=b.length;d>c;c++)if(b[c]===a)return c;return-1},n=function(a){if("string"==typeof a)throw new TypeError("ZeroClipboard doesn't accept query strings.");return a.length?a:[a]},o=function(a,b,c,d,e){e?window.setTimeout(function(){a.call(b,c,d)},0):a.call(b,c,d)},p=function(a,b){if(a&&(p.prototype._singleton||this).glue(a),p.prototype._singleton)return p.prototype._singleton;p.prototype._singleton=this,this.options={};for(var c in s)this.options[c]=s[c];for(var d in b)this.options[d]=b[d];this.handlers={},p.detectFlashSupport()&&v()},q=[];p.prototype.setCurrent=function(b){a=b,this.reposition();var d=b.getAttribute("title");d&&this.setTitle(d);var e=this.options.forceHandCursor===!0||"pointer"===c(b,"cursor");return r.call(this,e),this},p.prototype.setText=function(a){return a&&""!==a&&(this.options.text=a,this.ready()&&this.flashBridge.setText(a)),this},p.prototype.setTitle=function(a){return a&&""!==a&&this.htmlBridge.setAttribute("title",a),this},p.prototype.setSize=function(a,b){return this.ready()&&this.flashBridge.setSize(a,b),this},p.prototype.setHandCursor=function(a){return a="boolean"==typeof a?a:!!a,r.call(this,a),this.options.forceHandCursor=a,this};var r=function(a){this.ready()&&this.flashBridge.setHandCursor(a)};p.version="1.2.2";var s={moviePath:"ZeroClipboard.swf",trustedOrigins:null,text:null,hoverClass:"zeroclipboard-is-hover",activeClass:"zeroclipboard-is-active",allowScriptAccess:"sameDomain",useNoCache:!0,forceHandCursor:!1};p.setDefaults=function(a){for(var b in a)s[b]=a[b]},p.destroy=function(){p.prototype._singleton.unglue(q);var a=p.prototype._singleton.htmlBridge;a.parentNode.removeChild(a),delete p.prototype._singleton},p.detectFlashSupport=function(){var a=!1;if("function"==typeof ActiveXObject)try{new ActiveXObject("ShockwaveFlash.ShockwaveFlash")&&(a=!0)}catch(b){}return!a&&navigator.mimeTypes["application/x-shockwave-flash"]&&(a=!0),a};var t=null,u=null,v=function(){var a=p.prototype._singleton,b=document.getElementById("global-zeroclipboard-html-bridge");if(!b){var c={};for(var d in a.options)c[d]=a.options[d];c.amdModuleId=t,c.cjsModuleId=u;var e=l(c),f=' <object classid="clsid:d27cdb6e-ae6d-11cf-96b8-444553540000" id="global-zeroclipboard-flash-bridge" width="100%" height="100%"> <param name="movie" value="'+a.options.moviePath+k(a.options.moviePath,a.options)+'"/> <param name="allowScriptAccess" value="'+a.options.allowScriptAccess+'"/> <param name="scale" value="exactfit"/> <param name="loop" value="false"/> <param name="menu" value="false"/> <param name="quality" value="best" /> <param name="bgcolor" value="#ffffff"/> <param name="wmode" value="transparent"/> <param name="flashvars" value="'+e+'"/> <embed src="'+a.options.moviePath+k(a.options.moviePath,a.options)+'" loop="false" menu="false" quality="best" bgcolor="#ffffff" width="100%" height="100%" name="global-zeroclipboard-flash-bridge" allowScriptAccess="always" allowFullScreen="false" type="application/x-shockwave-flash" wmode="transparent" pluginspage="http://www.macromedia.com/go/getflashplayer" flashvars="'+e+'" scale="exactfit"> </embed> </object>';b=document.createElement("div"),b.id="global-zeroclipboard-html-bridge",b.setAttribute("class","global-zeroclipboard-container"),b.setAttribute("data-clipboard-ready",!1),b.style.position="absolute",b.style.left="-9999px",b.style.top="-9999px",b.style.width="15px",b.style.height="15px",b.style.zIndex="9999",b.innerHTML=f,document.body.appendChild(b)}a.htmlBridge=b,a.flashBridge=document["global-zeroclipboard-flash-bridge"]||b.children[0].lastElementChild};p.prototype.resetBridge=function(){return this.htmlBridge.style.left="-9999px",this.htmlBridge.style.top="-9999px",this.htmlBridge.removeAttribute("title"),this.htmlBridge.removeAttribute("data-clipboard-text"),h(a,this.options.activeClass),a=null,this.options.text=null,this},p.prototype.ready=function(){var a=this.htmlBridge.getAttribute("data-clipboard-ready");return"true"===a||a===!0},p.prototype.reposition=function(){if(!a)return!1;var b=j(a);return this.htmlBridge.style.top=b.top+"px",this.htmlBridge.style.left=b.left+"px",this.htmlBridge.style.width=b.width+"px",this.htmlBridge.style.height=b.height+"px",this.htmlBridge.style.zIndex=b.zIndex+1,this.setSize(b.width,b.height),this},p.dispatch=function(a,b){p.prototype._singleton.receiveEvent(a,b)},p.prototype.on=function(a,b){for(var c=a.toString().split(/\s/g),d=0;d<c.length;d++)a=c[d].toLowerCase().replace(/^on/,""),this.handlers[a]||(this.handlers[a]=b);return this.handlers.noflash&&!p.detectFlashSupport()&&this.receiveEvent("onNoFlash",null),this},p.prototype.addEventListener=p.prototype.on,p.prototype.off=function(a,b){for(var c=a.toString().split(/\s/g),d=0;d<c.length;d++){a=c[d].toLowerCase().replace(/^on/,"");for(var e in this.handlers)e===a&&this.handlers[e]===b&&delete this.handlers[e]}return this},p.prototype.removeEventListener=p.prototype.off,p.prototype.receiveEvent=function(b,c){b=b.toString().toLowerCase().replace(/^on/,"");var d=a,e=!0;switch(b){case"load":if(c&&parseFloat(c.flashVersion.replace(",",".").replace(/[^0-9\.]/gi,""))<10)return this.receiveEvent("onWrongFlash",{flashVersion:c.flashVersion}),void 0;this.htmlBridge.setAttribute("data-clipboard-ready",!0);break;case"mouseover":g(d,this.options.hoverClass);break;case"mouseout":h(d,this.options.hoverClass),this.resetBridge();break;case"mousedown":g(d,this.options.activeClass);break;case"mouseup":h(d,this.options.activeClass);break;case"datarequested":var f=d.getAttribute("data-clipboard-target"),i=f?document.getElementById(f):null;if(i){var j=i.value||i.textContent||i.innerText;j&&this.setText(j)}else{var k=d.getAttribute("data-clipboard-text");k&&this.setText(k)}e=!1;break;case"complete":this.options.text=null}if(this.handlers[b]){var l=this.handlers[b];"string"==typeof l&&"function"==typeof window[l]&&(l=window[l]),"function"==typeof l&&o(l,d,this,c,e)}},p.prototype.glue=function(a){a=n(a);for(var b=0;b<a.length;b++)-1==m(a[b],q)&&(q.push(a[b]),e(a[b],"mouseover",d));return this},p.prototype.unglue=function(a){a=n(a);for(var b=0;b<a.length;b++){f(a[b],"mouseover",d);var c=m(a[b],q);-1!=c&&q.splice(c,1)}return this},"function"==typeof define&&define.amd?define(["require","exports","module"],function(a,b,c){return t=c&&c.id||null,p}):"object"==typeof module&&module&&"object"==typeof module.exports&&module.exports?(u=module.id||null,module.exports=p):window.ZeroClipboard=p}();
|
PypiClean
|
/dwave_hybrid-0.6.10-py3-none-any.whl/hybrid/traits.py
|
from collections.abc import Sequence, Mapping
from hybrid.exceptions import StateTraitMissingError, StateDimensionalityError
class StateTraits(object):
"""Set of traits imposed on State. By default, **not validated**."""
def __init__(self):
self.inputs = set()
self.outputs = set()
self.multi_input = False
self.multi_output = False
self.validate_input = False
self.validate_output = False
def validate_state_trait(self, state, trait, io):
"""Validate single input/output (`io`) `state` `trait`."""
if trait not in state:
raise StateTraitMissingError(
"{} state is missing {!r} on {!r}".format(io, trait, self))
def validate_input_state_traits(self, inp):
if not self.validate_input:
return
if self.multi_input:
if not isinstance(inp, Sequence):
raise StateDimensionalityError(
"state sequence required on input to {!r}".format(self))
for state in inp:
for trait in self.inputs:
self.validate_state_trait(state, trait, "input")
else:
if not isinstance(inp, Mapping):
raise StateDimensionalityError(
"single state required on input to {!r}".format(self))
for trait in self.inputs:
self.validate_state_trait(inp, trait, "input")
def validate_output_state_traits(self, out):
if not self.validate_output:
return
if self.multi_output:
if not isinstance(out, Sequence):
raise StateDimensionalityError(
"state sequence required on output from {!r}".format(self))
for state in out:
for trait in self.outputs:
self.validate_state_trait(state, trait, "output")
else:
if not isinstance(out, Mapping):
raise StateDimensionalityError(
"single state required on output from {!r}".format(self))
for trait in self.outputs:
self.validate_state_trait(out, trait, "output")
#
# I/O validation mixins
#
class InputValidated(StateTraits):
def __init__(self):
super(InputValidated, self).__init__()
self.validate_input = True
class OutputValidated(StateTraits):
def __init__(self):
super(OutputValidated, self).__init__()
self.validate_output = True
class InputNotValidated(StateTraits):
def __init__(self):
super(InputNotValidated, self).__init__()
self.validate_input = False
class OutputNotValidated(StateTraits):
def __init__(self):
super(OutputNotValidated, self).__init__()
self.validate_output = False
class Validated(InputValidated, OutputValidated):
"""Validated input state(s) and output state(s)."""
class NotValidated(InputNotValidated, OutputNotValidated):
"""Input state(s) and output state(s) are not validated."""
#
# I/O dimensionality mixins. Imply I/O validation.
#
class SingleInputState(InputValidated, StateTraits):
def __init__(self):
super(SingleInputState, self).__init__()
self.multi_input = False
class MultiInputStates(InputValidated, StateTraits):
def __init__(self):
super(MultiInputStates, self).__init__()
self.multi_input = True
class SingleOutputState(OutputValidated, StateTraits):
def __init__(self):
super(SingleOutputState, self).__init__()
self.multi_output = False
class MultiOutputStates(OutputValidated, StateTraits):
def __init__(self):
super(MultiOutputStates, self).__init__()
self.multi_output = True
class SISO(SingleInputState, SingleOutputState):
"""Single Input, Single Output."""
class SIMO(SingleInputState, MultiOutputStates):
"""Single Input, Multiple Outputs."""
class MIMO(MultiInputStates, MultiOutputStates):
"""Multiple Inputs, Multiple Outputs."""
class MISO(MultiInputStates, SingleOutputState):
"""Multiple Inputs, Single Output."""
#
# State structure mixins. Imply I/O validation.
#
class ProblemIntaking(InputValidated, StateTraits):
def __init__(self):
super(ProblemIntaking, self).__init__()
self.inputs.add('problem')
class ProblemProducing(OutputValidated, StateTraits):
def __init__(self):
super(ProblemProducing, self).__init__()
self.outputs.add('problem')
class SamplesIntaking(InputValidated, StateTraits):
def __init__(self):
super(SamplesIntaking, self).__init__()
self.inputs.add('samples')
class SamplesProducing(OutputValidated, StateTraits):
def __init__(self):
super(SamplesProducing, self).__init__()
self.outputs.add('samples')
class SubproblemIntaking(InputValidated, StateTraits):
def __init__(self):
super(SubproblemIntaking, self).__init__()
self.inputs.add('subproblem')
class SubproblemProducing(OutputValidated, StateTraits):
def __init__(self):
super(SubproblemProducing, self).__init__()
self.outputs.add('subproblem')
class SubsamplesIntaking(InputValidated, StateTraits):
def __init__(self):
super(SubsamplesIntaking, self).__init__()
self.inputs.add('subsamples')
class SubsamplesProducing(OutputValidated, StateTraits):
def __init__(self):
super(SubsamplesProducing, self).__init__()
self.outputs.add('subsamples')
class EmbeddingIntaking(InputValidated, StateTraits):
def __init__(self):
super(EmbeddingIntaking, self).__init__()
self.inputs.add('embedding')
class EmbeddingProducing(OutputValidated, StateTraits):
def __init__(self):
super(EmbeddingProducing, self).__init__()
self.outputs.add('embedding')
class ProblemDecomposer(ProblemIntaking, SamplesIntaking, SubproblemProducing):
pass
class SubsamplesComposer(SamplesIntaking, SubsamplesIntaking, ProblemIntaking, SamplesProducing):
pass
class ProblemSampler(ProblemIntaking, SamplesProducing):
pass
class SubproblemSampler(SubproblemIntaking, SubsamplesProducing):
pass
class SamplesProcessor(SamplesIntaking, SamplesProducing):
pass
class SubsamplesProcessor(SubsamplesIntaking, SubsamplesProducing):
pass
|
PypiClean
|
/bottle-0.12.25.tar.gz/bottle-0.12.25/test/tools.py
|
import bottle
import sys
import unittest
import wsgiref
import wsgiref.util
import wsgiref.validate
import mimetypes
import uuid
from bottle import tob, tonat, BytesIO, py3k, unicode
def warn(msg):
sys.stderr.write('WARNING: %s\n' % msg.strip())
def tobs(data):
''' Transforms bytes or unicode into a byte stream. '''
return BytesIO(tob(data))
def api(introduced, deprecated=None, removed=None):
current = tuple(map(int, bottle.__version__.split('-')[0].split('.')))
introduced = tuple(map(int, introduced.split('.')))
deprecated = tuple(map(int, deprecated.split('.'))) if deprecated else (99,99)
removed = tuple(map(int, removed.split('.'))) if removed else (99,100)
assert introduced < deprecated < removed
def decorator(func):
if current < introduced:
return None
elif current < deprecated:
return func
elif current < removed:
func.__doc__ = '(deprecated) ' + (func.__doc__ or '')
return func
else:
return None
return decorator
def wsgistr(s):
if py3k:
return s.encode('utf8').decode('latin1')
else:
return s
class ServerTestBase(unittest.TestCase):
def setUp(self):
''' Create a new Bottle app set it as default_app '''
self.port = 8080
self.host = 'localhost'
self.app = bottle.app.push()
self.wsgiapp = wsgiref.validate.validator(self.app)
def urlopen(self, path, method='GET', post='', env=None):
result = {'code':0, 'status':'error', 'header':{}, 'body':tob('')}
def start_response(status, header):
result['code'] = int(status.split()[0])
result['status'] = status.split(None, 1)[-1]
for name, value in header:
name = name.title()
if name in result['header']:
result['header'][name] += ', ' + value
else:
result['header'][name] = value
env = env if env else {}
wsgiref.util.setup_testing_defaults(env)
env['REQUEST_METHOD'] = wsgistr(method.upper().strip())
env['PATH_INFO'] = wsgistr(path)
env['QUERY_STRING'] = wsgistr('')
if post:
env['REQUEST_METHOD'] = 'POST'
env['CONTENT_LENGTH'] = str(len(tob(post)))
env['wsgi.input'].write(tob(post))
env['wsgi.input'].seek(0)
response = self.wsgiapp(env, start_response)
for part in response:
try:
result['body'] += part
except TypeError:
raise TypeError('WSGI app yielded non-byte object %s', type(part))
if hasattr(response, 'close'):
response.close()
del response
return result
def postmultipart(self, path, fields, files):
env = multipart_environ(fields, files)
return self.urlopen(path, method='POST', env=env)
def tearDown(self):
bottle.app.pop()
def assertStatus(self, code, route='/', **kargs):
self.assertEqual(code, self.urlopen(route, **kargs)['code'])
def assertBody(self, body, route='/', **kargs):
self.assertEqual(tob(body), self.urlopen(route, **kargs)['body'])
def assertInBody(self, body, route='/', **kargs):
result = self.urlopen(route, **kargs)['body']
if tob(body) not in result:
self.fail('The search pattern "%s" is not included in body:\n%s' % (body, result))
def assertHeader(self, name, value, route='/', **kargs):
self.assertEqual(value, self.urlopen(route, **kargs)['header'].get(name))
def assertHeaderAny(self, name, route='/', **kargs):
self.assertTrue(self.urlopen(route, **kargs)['header'].get(name, None))
def assertInError(self, search, route='/', **kargs):
bottle.request.environ['wsgi.errors'].errors.seek(0)
err = bottle.request.environ['wsgi.errors'].errors.read()
if search not in err:
self.fail('The search pattern "%s" is not included in wsgi.error: %s' % (search, err))
def multipart_environ(fields, files):
boundary = str(uuid.uuid1())
env = {'REQUEST_METHOD':'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary='+boundary}
wsgiref.util.setup_testing_defaults(env)
boundary = '--' + boundary
body = ''
for name, value in fields:
body += boundary + '\n'
body += 'Content-Disposition: form-data; name="%s"\n\n' % name
body += value + '\n'
for name, filename, content in files:
mimetype = str(mimetypes.guess_type(filename)[0]) or 'application/octet-stream'
body += boundary + '\n'
body += 'Content-Disposition: file; name="%s"; filename="%s"\n' % \
(name, filename)
body += 'Content-Type: %s\n\n' % mimetype
body += content + '\n'
body += boundary + '--\n'
if isinstance(body, unicode):
body = body.encode('utf8')
env['CONTENT_LENGTH'] = str(len(body))
env['wsgi.input'].write(body)
env['wsgi.input'].seek(0)
return env
|
PypiClean
|
/onnxoptimizer-0.3.2.tar.gz/onnxoptimizer-0.3.2/third_party/onnx/onnx/backend/test/case/node/roialign.py
|
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
def get_roi_align_input_values(): # type: ignore
X = np.array(
[
[
[
[
0.2764,
0.7150,
0.1958,
0.3416,
0.4638,
0.0259,
0.2963,
0.6518,
0.4856,
0.7250,
],
[
0.9637,
0.0895,
0.2919,
0.6753,
0.0234,
0.6132,
0.8085,
0.5324,
0.8992,
0.4467,
],
[
0.3265,
0.8479,
0.9698,
0.2471,
0.9336,
0.1878,
0.4766,
0.4308,
0.3400,
0.2162,
],
[
0.0206,
0.1720,
0.2155,
0.4394,
0.0653,
0.3406,
0.7724,
0.3921,
0.2541,
0.5799,
],
[
0.4062,
0.2194,
0.4473,
0.4687,
0.7109,
0.9327,
0.9815,
0.6320,
0.1728,
0.6119,
],
[
0.3097,
0.1283,
0.4984,
0.5068,
0.4279,
0.0173,
0.4388,
0.0430,
0.4671,
0.7119,
],
[
0.1011,
0.8477,
0.4726,
0.1777,
0.9923,
0.4042,
0.1869,
0.7795,
0.9946,
0.9689,
],
[
0.1366,
0.3671,
0.7011,
0.6234,
0.9867,
0.5585,
0.6985,
0.5609,
0.8788,
0.9928,
],
[
0.5697,
0.8511,
0.6711,
0.9406,
0.8751,
0.7496,
0.1650,
0.1049,
0.1559,
0.2514,
],
[
0.7012,
0.4056,
0.7879,
0.3461,
0.0415,
0.2998,
0.5094,
0.3727,
0.5482,
0.0502,
],
]
]
],
dtype=np.float32,
)
batch_indices = np.array([0, 0, 0], dtype=np.int64)
rois = np.array([[0, 0, 9, 9], [0, 5, 4, 9], [5, 5, 9, 9]], dtype=np.float32)
return X, batch_indices, rois
class RoiAlign(Base):
@staticmethod
def export_roialign_aligned_false() -> None:
node = onnx.helper.make_node(
"RoiAlign",
inputs=["X", "rois", "batch_indices"],
outputs=["Y"],
spatial_scale=1.0,
output_height=5,
output_width=5,
sampling_ratio=2,
coordinate_transformation_mode="output_half_pixel",
)
X, batch_indices, rois = get_roi_align_input_values()
# (num_rois, C, output_height, output_width)
Y = np.array(
[
[
[
[0.4664, 0.4466, 0.3405, 0.5688, 0.6068],
[0.3714, 0.4296, 0.3835, 0.5562, 0.3510],
[0.2768, 0.4883, 0.5222, 0.5528, 0.4171],
[0.4713, 0.4844, 0.6904, 0.4920, 0.8774],
[0.6239, 0.7125, 0.6289, 0.3355, 0.3495],
]
],
[
[
[0.3022, 0.4305, 0.4696, 0.3978, 0.5423],
[0.3656, 0.7050, 0.5165, 0.3172, 0.7015],
[0.2912, 0.5059, 0.6476, 0.6235, 0.8299],
[0.5916, 0.7389, 0.7048, 0.8372, 0.8893],
[0.6227, 0.6153, 0.7097, 0.6154, 0.4585],
]
],
[
[
[0.2384, 0.3379, 0.3717, 0.6100, 0.7601],
[0.3767, 0.3785, 0.7147, 0.9243, 0.9727],
[0.5749, 0.5826, 0.5709, 0.7619, 0.8770],
[0.5355, 0.2566, 0.2141, 0.2796, 0.3600],
[0.4365, 0.3504, 0.2887, 0.3661, 0.2349],
]
],
],
dtype=np.float32,
)
expect(node, inputs=[X, rois, batch_indices], outputs=[Y], name="test_roialign_aligned_false")
@staticmethod
def export_roialign_aligned_true() -> None:
node = onnx.helper.make_node(
"RoiAlign",
inputs=["X", "rois", "batch_indices"],
outputs=["Y"],
spatial_scale=1.0,
output_height=5,
output_width=5,
sampling_ratio=2,
coordinate_transformation_mode="half_pixel",
)
X, batch_indices, rois = get_roi_align_input_values()
# (num_rois, C, output_height, output_width)
Y = np.array(
[
[
[
[0.5178, 0.3434, 0.3229, 0.4474, 0.6344],
[0.4031, 0.5366, 0.4428, 0.4861, 0.4023],
[0.2512, 0.4002, 0.5155, 0.6954, 0.3465],
[0.3350, 0.4601, 0.5881, 0.3439, 0.6849],
[0.4932, 0.7141, 0.8217, 0.4719, 0.4039],
]
],
[
[
[0.3070, 0.2187, 0.3337, 0.4880, 0.4870],
[0.1871, 0.4914, 0.5561, 0.4192, 0.3686],
[0.1433, 0.4608, 0.5971, 0.5310, 0.4982],
[0.2788, 0.4386, 0.6022, 0.7000, 0.7524],
[0.5774, 0.7024, 0.7251, 0.7338, 0.8163],
]
],
[
[
[0.2393, 0.4075, 0.3379, 0.2525, 0.4743],
[0.3671, 0.2702, 0.4105, 0.6419, 0.8308],
[0.5556, 0.4543, 0.5564, 0.7502, 0.9300],
[0.6626, 0.5617, 0.4813, 0.4954, 0.6663],
[0.6636, 0.3721, 0.2056, 0.1928, 0.2478],
]
],
],
dtype=np.float32,
)
expect(node, inputs=[X, rois, batch_indices], outputs=[Y], name="test_roialign_aligned_true")
|
PypiClean
|
/zebracorn-0.0.1.tar.gz/zebracorn-0.0.1/src/make.sh
|
# Unicorn Engine
# By Nguyen Anh Quynh <[email protected]>, 2015
usage() {
cat 1>&2 <<EOF
make.sh - The build script for unicorn engine
USAGE:
$ ./make.sh [OPTIONS]
OPTIONS:
Build the project
asan Build for ASan
install Install the project
uninstall Uninstall the project
macos-universal Build universal binaries on macOS
macos-universal-no Build non-universal binaries that includes only 64-bit code on macOS
cross-win32 Cross-compile Windows 32-bit binary with MinGW
cross-win64 Cross-compile Windows 64-bit binary with MinGW
cross-android_arm Cross-compile for Android Arm
cross-android_arm64 Cross-compile for Android Arm64
linux32 Cross-compile Unicorn on 64-bit Linux to target 32-bit binary
msvc_update_genfiles Generate files for MSVC projects
EOF
}
MAKE_JOBS=$((MAKE_JOBS+0))
[ ${MAKE_JOBS} -lt 1 ] && \
MAKE_JOBS=4
# build for ASAN
asan() {
env UNICORN_DEBUG=yes UNICORN_ASAN=yes "${MAKE}" V=1
}
build_cross() {
[ "$UNAME" = Darwin ] && LIBARCHS="i386 x86_64"
CROSS=$1
CC=$CROSS-gcc \
AR=$CROSS-gcc-ar \
RANLIB=$CROSS-gcc-ranlib \
${MAKE}
}
build_linux32() {
PKG_CONFIG_PATH="/usr/lib/i386-linux-gnu/pkgconfig" \
CFLAGS=-m32 \
LDFLAGS=-m32 \
LDFLAGS_STATIC=-m32 \
LIBRARY_PATH="/usr/lib/i386-linux-gnu" \
UNICORN_QEMU_FLAGS="--cpu=i386 ${UNICORN_QEMU_FLAGS}" \
${MAKE}
}
install() {
# Mac OSX needs to find the right directory for pkgconfig
if [ "$UNAME" = Darwin ]; then
# we are going to install into /usr/local, so remove old installs under /usr
rm -rf /usr/lib/libunicorn*
rm -rf /usr/include/unicorn
# install into /usr/local
PREFIX=${PREFIX:-/usr/local}
${MAKE} install
else # not OSX
test -d /usr/lib64 && LIBDIRARCH=lib64
${MAKE} install
fi
}
uninstall() {
# Mac OSX needs to find the right directory for pkgconfig
if [ "$UNAME" = "Darwin" ]; then
# find the directory automatically, so we can support both Macport & Brew
PKGCFGDIR="$(pkg-config --variable pc_path pkg-config | cut -d ':' -f 1)"
PREFIX=${PREFIX:-/usr/local}
${MAKE} uninstall
else # not OSX
test -d /usr/lib64 && LIBDIRARCH=lib64
${MAKE} uninstall
fi
}
msvc_update_genfiles() {
${MAKE}
cp qemu/qapi-types.h msvc/unicorn/qapi-types.h
cp qemu/qapi-visit.h msvc/unicorn/qapi-visit.h
cp qemu/qapi-types.c msvc/unicorn/qapi-types.c
cp qemu/qapi-visit.c msvc/unicorn/qapi-visit.c
cp qemu/config-host.h msvc/unicorn/config-host.h
cp qemu/aarch64-softmmu/config-target.h msvc/unicorn/aarch64-softmmu/config-target.h
cp qemu/aarch64eb-softmmu/config-target.h msvc/unicorn/aarch64eb-softmmu/config-target.h
cp qemu/arm-softmmu/config-target.h msvc/unicorn/arm-softmmu/config-target.h
cp qemu/armeb-softmmu/config-target.h msvc/unicorn/armeb-softmmu/config-target.h
cp qemu/m68k-softmmu/config-target.h msvc/unicorn/m68k-softmmu/config-target.h
cp qemu/mips64el-softmmu/config-target.h msvc/unicorn/mips64el-softmmu/config-target.h
cp qemu/mips64-softmmu/config-target.h msvc/unicorn/mips64-softmmu/config-target.h
cp qemu/mipsel-softmmu/config-target.h msvc/unicorn/mipsel-softmmu/config-target.h
cp qemu/mips-softmmu/config-target.h msvc/unicorn/mips-softmmu/config-target.h
cp qemu/sparc64-softmmu/config-target.h msvc/unicorn/sparc64-softmmu/config-target.h
cp qemu/sparc-softmmu/config-target.h msvc/unicorn/sparc-softmmu/config-target.h
cp qemu/x86_64-softmmu/config-target.h msvc/unicorn/x86_64-softmmu/config-target.h
}
UNAME=${UNAME:-$(uname)}
MAKE=${MAKE:-make}
#[ -n "${MAKE_JOBS}" ] && MAKE="$MAKE -j${MAKE_JOBS}"
if [ "$UNAME" = SunOS ]; then
MAKE=${MAKE:-gmake}
INSTALL_BIN=ginstall
CC=gcc
fi
if echo "$UNAME" | grep -q BSD; then
MAKE=gmake
PREFIX=${PREFIX:-/usr/local}
fi
export CC INSTALL_BIN PREFIX PKGCFGDIR LIBDIRARCH LIBARCHS CFLAGS LDFLAGS
case "$1" in
"" ) ${MAKE};;
"asan" ) asan;;
"install" ) install;;
"uninstall" ) uninstall;;
"macos-universal" ) MACOS_UNIVERSAL=yes ${MAKE};;
"macos-universal-no" ) MACOS_UNIVERSAL=no ${MAKE};;
"cross-win32" ) build_cross i686-w64-mingw32;;
"cross-win64" ) build_cross x86_64-w64-mingw32;;
"cross-android_arm" ) CROSS=arm-linux-androideabi ${MAKE};;
"cross-android_arm64" ) CROSS=aarch64-linux-android ${MAKE};;
"linux32" ) build_linux32;;
"msvc_update_genfiles" ) msvc_update_genfiles;;
* )
usage;
exit 1;;
esac
|
PypiClean
|
/seamm_util-2023.6.4.tar.gz/seamm_util-2023.6.4/seamm_util/printing.py
|
import inspect
import logging
import os
import sys
import textwrap
try:
import thread
import threading
except ImportError:
thread = None
# ---------------------------------------------------------------------------
# Level related stuff
# ---------------------------------------------------------------------------
#
# Default levels and level names, these can be replaced with any positive set
# of values having corresponding names. There is a pseudo-level, ALL, which
# is only really there as a lower limit for user-defined levels. Handlers and
# loggers are initialized with ALL so that they will log all messages, even
# at user-defined levels.
#
JOB = 50
IMPORTANT = 40
TERSE = 30
NORMAL = 20
VERBOSE = 10
ALL = 0
_levelNames = {
JOB: "JOB",
IMPORTANT: "IMPORTANT",
TERSE: "TERSE",
NORMAL: "NORMAL",
VERBOSE: "VERBOSE",
ALL: "ALL",
"JOB": JOB,
"IMPORTANT": IMPORTANT,
"TERSE": TERSE,
"NORMAL": NORMAL,
"VERBOSE": VERBOSE,
"ALL": ALL,
}
def getLevelName(lvl):
"""
Return the textual representation of printing level 'lvl'. If the level is
one of the predefined levels (JOB, IMPORTANT, TERSE, NORMAL, VERBOSE) then
you get the corresponding string. If you have associated levels with names
using addLevelName then the name you have associated with 'lvl' is
returned. Otherwise, the string "Level %s" % lvl is returned.
"""
return _levelNames.get(lvl, "Level {}".format(lvl))
def addLevelName(lvl, levelName):
"""
Associate 'levelName' with 'lvl'. This is used when converting levels
to text during message formatting.
"""
_acquireLock()
try: # unlikely to cause an exception, but you never know...
_levelNames[lvl] = levelName
_levelNames[levelName] = lvl
finally:
_releaseLock()
# ---------------------------------------------------------------------------
# Thread-related stuff
# ---------------------------------------------------------------------------
#
# _lock is used to serialize access to shared data structures in this module.
# This needs to be an RLock because fileConfig() creates Handlers and so
# might arbitrary user threads. Since Handler.__init__() updates the shared
# dictionary _handlers, it needs to acquire the lock. But if configuring,
# the lock would already have been acquired - so we need an RLock.
# The same argument applies to Loggers and Manager.loggerDict.
#
_lock = None
def _acquireLock():
"""
Acquire the module-level lock for serializing access to shared data.
This should be released with _releaseLock().
"""
global _lock
if (not _lock) and thread:
_lock = threading.RLock()
if _lock:
_lock.acquire()
def _releaseLock():
"""
Release the module-level lock acquired by calling _acquireLock().
"""
if _lock:
_lock.release()
# ---------------------------------------------------------------------------
# Manager classes and functions
# ---------------------------------------------------------------------------
class PlaceHolder(object):
"""
PlaceHolder instances are used in the Manager printer hierarchy to take
the place of nodes for which no printers have been defined [FIXME add
example].
"""
def __init__(self, aprinter):
"""
Initialize with the specified printer being a child of this
placeholder.
"""
self.printers = [aprinter]
def append(self, aprinter):
"""
Add the specified printer as a child of this placeholder.
"""
if aprinter not in self.printers:
self.printers.append(aprinter)
#
# Determine which class to use when instantiating printers.
#
_printerClass = None
def setPrinterClass(klass):
"""
Set the class to be used when instantiating a printer. The class should
define __init__() such that only a name argument is required, and the
__init__() should call Printer.__init__()
"""
if klass != Printer:
if not isinstance(klass, object):
raise TypeError("setPrinterClass is expecting a class")
if not issubclass(klass, Printer):
raise TypeError(
"printer not derived from printer.Printer: " + klass.__name__
)
global _printerClass
_printerClass = klass
class Manager(object):
"""
There is [under normal circumstances] just one Manager instance, which
holds the hierarchy of printers.
"""
def __init__(self, print_root):
"""
Initialize the manager with the root node of the printer hierarchy.
"""
self.print_root = print_root
self.disable = 0
self.emittedNoHandlerWarning = 0
self.printerDict = {}
def getPrinter(self, name):
"""
Get a printer with the specified name (channel name), creating it
if it doesn't yet exist. If a PlaceHolder existed for the specified
name [i.e. the printer didn't exist but a child of it did], replace
it with the created printer and fix up the parent/child references
which pointed to the placeholder to now point to the printer.
"""
rv = None
_acquireLock()
try:
if name in self.printerDict:
rv = self.printerDict[name]
if isinstance(rv, PlaceHolder):
ph = rv
rv = _printerClass(name)
rv.manager = self
self.printerDict[name] = rv
self._fixupChildren(ph, rv)
self._fixupParents(rv)
else:
rv = _printerClass(name)
rv.manager = self
self.printerDict[name] = rv
self._fixupParents(rv)
finally:
_releaseLock()
return rv
def _fixupParents(self, aprinter):
"""
Ensure that there are either printers or placeholders all the way
from the specified printer to the root of the printer hierarchy.
"""
name = aprinter.name
i = name.rfind(".")
rv = None
while (i > 0) and not rv:
substr = name[:i]
if substr not in self.printerDict:
self.printerDict[substr] = PlaceHolder(aprinter)
else:
obj = self.printerDict[substr]
if isinstance(obj, Printer):
rv = obj
else:
assert isinstance(obj, PlaceHolder)
obj.append(aprinter)
i = name.rfind(".", 0, i - 1)
if not rv:
rv = self.print_root
aprinter.parent = rv
def _fixupChildren(self, ph, aprinter):
"""
Ensure that children of the placeholder ph are connected to the
specified printer.
"""
for c in ph.printers:
if aprinter.name in c.parent.name:
aprinter.parent = c.parent
c.parent = aprinter
class Printer(logging.Filterer):
"""Provide controlled printing for plugins for the MolSSI
Framework.
"""
def __init__(self, name, level=ALL):
"""Create the Printer object"""
super().__init__()
if name == "print_root":
self.logger = logging.getLogger(name)
else:
self.logger = logging.getLogger("print_root." + name)
self.logger.setLevel(level)
@property
def name(self):
return self.logger.name
@name.setter
def name(self, value):
self.logger.name = value
@property
def level(self):
return self.logger.level
@property
def parent(self):
return self.logger.parent
@parent.setter
def parent(self, value):
self.logger.parent = value
@property
def propagate(self):
return self.logger.propagate
@propagate.setter
def propagate(self, value):
self.logger.propagate = value
@property
def handlers(self):
return self.logger.handlers
@property
def disabled(self):
return self.logger.disabled
@disabled.setter
def disabled(self, value):
self.logger.disabled = value
def setLevel(self, lvl):
"""
Set the printing level of this printer.
"""
self.logger.level = lvl
def verbose(self, msg, *args, **kwargs):
"""Logs a message with level VERBOSE on this printer.
The msg is the message format string, and the args are the arguments
which are merged into msg using the string formatting
operator. (Note that this means that you can use keywords in the
format string, together with a single dictionary argument.)
"""
self.print(10, msg, *args, **kwargs)
def normal(self, msg, *args, **kwargs):
"""Logs a message with level NORMAL on this printer.
The msg is the message format string, and the args are the arguments
which are merged into msg using the string formatting
operator. (Note that this means that you can use keywords in the
format string, together with a single dictionary argument.)
"""
self.print(20, msg, *args, **kwargs)
def terse(self, msg, *args, **kwargs):
"""Logs a message with level TERSE on this printer.
The msg is the message format string, and the args are the arguments
which are merged into msg using the string formatting
operator. (Note that this means that you can use keywords in the
format string, together with a single dictionary argument.)
"""
self.print(30, msg, *args, **kwargs)
def important(self, msg, *args, **kwargs):
"""Logs a message with level IMPORTANT on this printer.
The msg is the message format string, and the args are the arguments
which are merged into msg using the string formatting
operator. (Note that this means that you can use keywords in the
format string, together with a single dictionary argument.)
"""
self.print(40, msg, *args, **kwargs)
def job(self, msg, *args, **kwargs):
"""Logs a message with level JOB on this printer.
The msg is the message format string, and the args are the arguments
which are merged into msg using the string formatting
operator. (Note that this means that you can use keywords in the
format string, together with a single dictionary argument.)
"""
self.print(50, msg, *args, **kwargs)
def print(self, lvl, msg, *args, **kwargs):
"""Prints a message with integer level lvl on this logger.
The other arguments are interpreted as for verbose().
"""
# print('print: lvl={}, msg={}, args={}, kwargs={}'.format(
# lvl, msg, args, kwargs
# ))
self.logger.log(lvl, msg, *args, **kwargs)
def findCaller(self):
"""
Find the stack frame of the caller so that we can note the source
file name and line number.
"""
rv = (None, None)
frame = inspect.currentframe().f_back
while frame:
sfn = inspect.getsourcefile(frame)
if sfn:
sfn = os.path.normcase(sfn)
if sfn != _srcfile: # noqa: F821
# print frame.f_code.co_code
lineno = inspect.getlineno(frame)
rv = (sfn, lineno)
break
frame = frame.f_back
return rv
def makeRecord(self, name, lvl, fn, lno, msg, args, exc_info):
"""
A factory method which can be overridden in subclasses to create
specialized LogRecords.
"""
return self.logger.makeRecord(name, lvl, fn, lno, msg, args, exc_info)
def _log(self, lvl, msg, args, exc_info=None):
"""
Low-level logging routine which creates a LogRecord and then calls
all the handlers of this logger to handle the record.
"""
self.logger._log(self, lvl, msg, args, exc_info)
def handle(self, record):
"""
Call the handlers for the specified record. This method is used for
unpickled records received from a socket, as well as those created
locally. Logger-level filtering is applied.
"""
self.logger.handle(record)
def addHandler(self, hdlr):
"""
Add the specified handler to this printer.
"""
self.logger.addHandler(hdlr)
def removeHandler(self, hdlr):
"""
Remove the specified handler from this printer.
"""
self.logger.removeHandler(hdlr)
def callHandlers(self, record):
"""
Loop through all handlers for this printer and its parents in the
printer hierarchy. If no handler was found, output a one-off error
message to sys.stderr. Stop searching up the hierarchy whenever a
printer with the "propagate" attribute set to zero is found - that
will be the last printer whose handlers are called.
"""
self.logger.callHandlers(record)
def getEffectiveLevel(self):
"""
Loop through this printer and its parents in the printer hierarchy,
looking for a non-zero logging level. Return the first one found.
"""
return self.logger.getEffectiveLevel()
def isEnabledFor(self, lvl):
"""
Is this printer enabled for level lvl?
"""
return self.logger.isEnabledFor(lvl)
class RootPrinter(Printer):
"""
A root printer is not that different to any other printer, except that
it must have a printing level and there is only one instance of it in
the hierarchy.
"""
def __init__(self, lvl):
"""
Initialize the printer with the name "print_root".
"""
super().__init__("print_root", lvl)
# Turn of propagation and set up handler
# so that we are separate from the loggers
self.logger.propagate = False
_printerClass = Printer
print_root = RootPrinter(NORMAL)
Printer.print_root = print_root
Printer.manager = Manager(Printer.print_root)
# ---------------------------------------------------------------------------
# Configuration classes and functions
# ---------------------------------------------------------------------------
BASIC_FORMAT = "{message:s}"
def basicConfig():
"""
Do basic configuration for the printing system by creating a
StreamHandler with a default Formatter and adding it to the
root logger.
"""
if len(print_root.handlers) == 0:
hdlr = logging.StreamHandler()
fmt = logging.Formatter(BASIC_FORMAT, style="{")
hdlr.setFormatter(fmt)
print_root.addHandler(hdlr)
_handlers = {} # repository of handlers (for flushing when shutdown called)
def fileConfig(fname):
"""
Read the printing configuration from a ConfigParser-format file. This can
be called several times from an application, allowing an end user the
ability to select from various pre-canned configurations (if the
developer provides a mechanism to present the choices and load the chosen
configuration).
In versions of ConfigParser which have the readfp method [typically
shipped in 2.x versions of Python], you can pass in a file-like object
rather than a filename, in which case the file-like object will be read
using readfp.
"""
import ConfigParser
cp = ConfigParser.ConfigParser()
if hasattr(cp, "readfp") and hasattr(fname, "read"):
cp.readfp(fname)
else:
cp.read(fname)
# first, do the formatters...
flist = cp.get("formatters", "keys")
if len(flist):
flist = flist.split(",")
formatters = {}
for form in flist:
sectname = "formatter_%s" % form
opts = cp.options(sectname)
if "format" in opts:
fs = cp.get(sectname, "format", 1)
else:
fs = None
if "datefmt" in opts:
dfs = cp.get(sectname, "datefmt", 1)
else:
dfs = None
f = logging.Formatter(fs, dfs)
formatters[form] = f
# next, do the handlers...
# critical section...
_acquireLock()
try:
try:
# first, lose the existing handlers...
_handlers.clear()
# now set up the new ones...
hlist = cp.get("handlers", "keys")
if len(hlist):
hlist = hlist.split(",")
handlers = {}
fixups = [] # for inter-handler references
for hand in hlist:
sectname = "handler_%s" % hand
klass = cp.get(sectname, "class")
opts = cp.options(sectname)
if "formatter" in opts:
fmt = cp.get(sectname, "formatter")
else:
fmt = ""
klass = eval(klass)
args = cp.get(sectname, "args")
args = eval(args)
h = klass(args)
if "level" in opts:
lvl = cp.get(sectname, "level")
h.setLevel(_levelNames[lvl])
if len(fmt):
h.setFormatter(formatters[fmt])
# temporary hack for FileHandler and MemoryHandler.
if klass == logging.FileHandler:
maxsize = 0
if "maxsize" in opts:
ms = cp.getint(sectname, "maxsize")
if ms > 0:
maxsize = ms
if maxsize:
backcount = 0
if "backcount" in opts:
bc = cp.getint(sectname, "backcount")
if bc > 0:
backcount = bc
h.setRollover(maxsize, backcount)
elif klass == logging.MemoryHandler:
if "target" in opts:
target = cp.get(sectname, "target")
else:
target = ""
if len(target): # the target handler may not be loaded yet,
# so keep for later...
fixups.append((h, target))
handlers[hand] = h
# now all handlers are loaded, fixup inter-handler references..
for fixup in fixups:
h = fixup[0]
t = fixup[1]
h.setTarget(handlers[t])
# at last, the loggers...first the root...
llist = cp.get("loggers", "keys")
llist = llist.split(",")
llist.remove("root")
sectname = "printer_root"
log = print_root
opts = cp.options(sectname)
if "level" in opts:
lvl = cp.get(sectname, "level")
log.setLevel(_levelNames[lvl])
for h in print_root.handlers:
print_root.removeHandler(h)
hlist = cp.get(sectname, "handlers")
if len(hlist):
hlist = hlist.split(",")
for hand in hlist:
log.addHandler(handlers[hand])
# and now the others...
# we don't want to lose the existing loggers,
# since other threads may have pointers to them.
# existing is set to contain all existing loggers,
# and as we go through the new configuration we
# remove any which are configured. At the end,
# what's left in existing is the set of loggers
# which were in the previous configuration but
# which are not in the new configuration.
existing = print_root.manager.printDict.keys()
# now set up the new ones...
for log in llist:
sectname = "printer_%s" % log
qn = cp.get(sectname, "qualname")
opts = cp.options(sectname)
if "propagate" in opts:
propagate = cp.getint(sectname, "propagate")
else:
propagate = 1
printer = getPrinter(qn)
if qn in existing:
existing.remove(qn)
if "level" in opts:
lvl = cp.get(sectname, "level")
printer.setLevel(_levelNames[lvl])
for h in printer.handlers:
printer.removeHandler(h)
printer.propagate = propagate
printer.disabled = 0
hlist = cp.get(sectname, "handlers")
if len(hlist):
hlist = hlist.split(",")
for hand in hlist:
printer.addHandler(handlers[hand])
# Disable any old loggers. There's no point deleting
# them as other threads may continue to hold references
# and by disabling them, you stop them doing any printing.
for log in existing:
print_root.manager.printerDict[log].disabled = 1
except Exception: # noqa: E722
import traceback
ei = sys.exc_info()
traceback.print_exception(ei[0], ei[1], ei[2], None, sys.stderr)
del ei
finally:
_releaseLock()
# ---------------------------------------------------------------------------
# Utility functions at module level.
# Basically delegate everything to the root printer.
# ---------------------------------------------------------------------------
def getPrinter(name=None):
"""
Return a printer with the specified name, creating it if necessary.
If no name is specified, return the root printer.
"""
if name:
return Printer.manager.getPrinter(name)
else:
return print_root
def job(msg, *args, **kwargs):
"""
Log a message with severity 'JOB' on the root printer.
"""
if len(print_root.handlers) == 0:
basicConfig()
print_root.job(msg, args, kwargs)
def important(msg, *args, **kwargs):
"""
Log a message with severity 'IMPORTANT' on the root printer.
"""
if len(print_root.handlers) == 0:
basicConfig()
print_root.important(msg, args, kwargs)
def terse(msg, *args, **kwargs):
"""
Log a message with severity 'TERSE' on the root printer.
"""
if len(print_root.handlers) == 0:
basicConfig()
print_root.terse(msg, args, kwargs)
def normal(msg, *args, **kwargs):
"""
Log a message with severity 'NORMAL' on the root printer.
"""
if len(print_root.handlers) == 0:
basicConfig()
print_root.normal(msg, args, kwargs)
def verbose(msg, *args, **kwargs):
"""
Log a message with severity 'VERBOSE' on the root printer.
"""
if len(print_root.handlers) == 0:
basicConfig()
print_root.verbose(msg, args, kwargs)
def disable(level):
"""
Disable all printing calls less severe than 'level'.
"""
print_root.manager.disable = level
def shutdown():
"""
Perform any cleanup actions in the printing system (e.g. flushing
buffers). Should be called at application exit.
"""
for h in _handlers.keys():
h.flush()
h.close()
#
# Did not bring the socket listener code over yet
#
#
class FormattedText(object):
"""
Class for wrapping, formatting and indenting text to make textual output
from the MolSSI Framework and its plugins easier
The __str__ method returns the formatted text, so e.g. print(<object>)
formats the text and prints it as expected.
Example:
-----------------------------------------------------------------------
ft = FormattedText('''\
This is the first line.
And the second.
And a very, very, very, very, very, very, very, very, very, very, very, very long line.
And i = '{i}' and j = '{j}' followed by a line break
Ending with the last line. Which can of course also be wrapped
if we want it to be.
What about a list: 1
With a second item: 2
Does that work?''', i=1, j=2, indent='--->', line_length=70)
print(ft)
-----------------------------------------------------------------------
Will print the following:
--->This is the first line. And the second. And a very, very, very,
--->very, very, very, very, very, very, very, very, very long line.
--->And i = '1' and j = '2' followed by a line break
--->Ending with the last line. Which can of course also be wrapped if
--->we want it to be.
---> What about a list: 1
---> With a second item: 2
--->Does that work?
where the start of each arrow is aligned to the left (not indented as in
this string).
""" # noqa: E501
def __init__(
self,
text,
dedent=True,
indent="",
indent_initial=True,
indent_all=False,
wrap=True,
line_length=80,
*args,
**kwargs
):
"""
Handle the text <text>, which usually from either a simple string
or a triple-quoted string.
The <dedent> option uses textwrap.dedent to remove consistent initial
spaces so that the string can be nicely lined up in the source code.
<indent> provides an optional string to prefix each line of output
<indent_all> indicates that empty line of those containing only white
space also be indented. By the default the are not.
<indent_initial> indicates that the first line shoud be indented.
defaults to True.
<wrap> asks that lines of text not starting with a blank be wrapped
at <line_length>.
<*args> and <**kwargs> are additional arguments for {}-type formatting
in the text. This is done after dedenting but before wrapping the text.
Empty lines separate 'paragraphs'. Each paragraph is wrapped by itself,
which is the behavior that most people expect.
Since lines starting with blanks (after dedent'ing) are not wrapped,
this can be used for e.g. tables and lists.
"""
self.text = text
self.args = args
self.kwargs = kwargs
self.dedent = dedent
self.indent = indent
self.indent_initial = indent_initial
self.indent_all = indent_all
self.wrap = wrap
self.line_length = line_length
def __str__(self):
"""Turn into a formatted, wrapped string as requested.
NB. If there are no extra args or kwargs, don't format the
string. This avoids problems with strings that have, e.g.
braces internally.
"""
if self.dedent:
if len(self.args) == 0 and len(self.kwargs) == 0:
text = textwrap.dedent(self.text)
else:
text = textwrap.dedent(self.text.format(*self.args, **self.kwargs))
else:
if len(self.args) == 0 and len(self.kwargs) == 0:
text = self.text
else:
text = self.text.format(*self.args, **self.kwargs)
if self.wrap:
wrapped_text = ""
buffer = []
if not self.indent_initial:
initial_indent = ""
else:
initial_indent = self.indent
for line in text.splitlines():
if line.strip() == "" or line[0] == " ":
if len(buffer) > 0:
wrapped_text += textwrap.fill(
"\n".join(buffer),
self.line_length,
initial_indent=initial_indent,
subsequent_indent=self.indent,
)
wrapped_text += "\n"
buffer = []
if line.strip() == "":
if self.indent_all:
wrapped_text += self.indent + "\n"
else:
wrapped_text += "\n"
elif line.strip() != "" and line[0] != " ":
buffer.append(line)
if line.strip() != "" and line[0] == " ":
wrapped_text += self.indent + line + "\n"
if len(buffer) > 0:
wrapped_text += textwrap.fill(
"\n".join(buffer),
self.line_length,
initial_indent=initial_indent,
subsequent_indent=self.indent,
)
return wrapped_text
else:
if self.indent_all:
return textwrap.indent(text, self.indent, lambda line: True)
else:
return textwrap.indent(text, self.indent)
if __name__ == "__main__":
print(__doc__)
|
PypiClean
|
/nlp_uncertainty_zoo-1.0.3-py3-none-any.whl/nlp_uncertainty_zoo/models/bert.py
|
from typing import Type, Optional, Dict, Any
# EXT
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.optim.lr_scheduler as scheduler
from torch.utils.data import DataLoader
from transformers import BertModel as HFBertModel # Rename to avoid collision
# PROJECT
from nlp_uncertainty_zoo.models.model import Module, Model
from nlp_uncertainty_zoo.utils.custom_types import Device, WandBRun
class BertModule(Module):
"""
Define a BERT module that implements all the functions implemented in Module.
"""
def __init__(
self,
bert_name: str,
output_size: int,
is_sequence_classifier: bool,
bert_class: Type[HFBertModel],
device: Device,
**build_params,
):
"""
Initialize a BERT module.
Parameters
----------
bert_name: str
Name of the underlying BERT, as specified in HuggingFace transformers.
output_size: int
Number of classes.
is_sequence_classifier: bool
Indicate whether model is going to be used as a sequence classifier. Otherwise, predictions are going to
made at every time step.
bert_class: Type[HFBertModel]
Type of BERT to be used. Default is BertModel from the Huggingface transformers package.
device: Device
Device the model should be moved to.
"""
bert = bert_class.from_pretrained(bert_name).to(device)
hidden_size = bert.config.hidden_size
super().__init__(
num_layers=bert.config.num_hidden_layers,
vocab_size=bert.config.vocab_size,
input_size=hidden_size,
hidden_size=hidden_size,
output_size=hidden_size,
is_sequence_classifier=is_sequence_classifier,
device=device,
)
self.bert = bert
self.output_size = output_size
self.sequence_length = bert.config.max_length
self.layer_norm = nn.LayerNorm([hidden_size])
self.output = nn.Linear(hidden_size, output_size)
def forward(self, input_: torch.LongTensor, *args, **kwargs) -> torch.FloatTensor:
"""
Forward pass of the model.
Parameters
----------
input_: torch.LongTensor
(Batch of) Indexed input sequences.
Returns
-------
torch.FloatTensor
Output predictions for input.
"""
attention_mask = kwargs["attention_mask"]
return_dict = self.bert.forward(input_, attention_mask, return_dict=True)
if self.is_sequence_classifier:
cls_activations = return_dict["last_hidden_state"][:, 0, :]
out = torch.tanh(self.bert.pooler.dense(cls_activations))
out = self.layer_norm(out)
out = out.unsqueeze(1)
else:
activations = return_dict["last_hidden_state"]
out = self.layer_norm(activations)
out = self.output(out)
return out
def get_logits(
self, input_: torch.LongTensor, *args, **kwargs
) -> torch.FloatTensor:
"""
Get the logits for an input. Results in a tensor of size batch_size x seq_len x output_size or batch_size x
num_predictions x seq_len x output_size depending on the model type. Used to create inputs for the uncertainty
metrics defined in nlp_uncertainty_zoo.metrics.
Parameters
----------
input_: torch.LongTensor
(Batch of) Indexed input sequences.
Returns
-------
torch.FloatTensor
Logits for current input.
"""
return self.forward(input_, *args, **kwargs)
def predict(self, input_: torch.LongTensor, *args, **kwargs) -> torch.FloatTensor:
"""
Output a probability distribution over classes given an input. Results in a tensor of size batch_size x seq_len
x output_size or batch_size x num_predictions x seq_len x output_size depending on the model type.
Parameters
----------
input_: torch.LongTensor
(Batch of) Indexed input sequences.
Returns
-------
torch.FloatTensor
Logits for current input.
"""
logits = self.get_logits(input_, *args, **kwargs)
probabilities = F.softmax(logits, dim=-1)
return probabilities
def get_sequence_representation_from_hidden(
self, hidden: torch.FloatTensor
) -> torch.FloatTensor:
"""
Define how the representation for an entire sequence is extracted from a number of hidden states. This is
relevant in sequence classification. For example, this could be the last hidden state for a unidirectional LSTM
or the first hidden state for a transformer, adding a pooler layer.
Parameters
----------
hidden: torch.FloatTensor
Hidden states of a model for a sequence.
Returns
-------
torch.FloatTensor
Representation for the current sequence.
"""
hidden = hidden[:, 0, :].unsqueeze(1)
hidden = torch.tanh(self.bert.pooler.dense(hidden))
return hidden
def get_hidden_representation(
self, input_: torch.LongTensor, *args, **kwargs
) -> torch.FloatTensor:
attention_mask = kwargs["attention_mask"]
return_dict = self.bert.forward(input_, attention_mask, return_dict=True)
if self.is_sequence_classifier:
activations = return_dict["last_hidden_state"][:, 0, :].unsqueeze(1)
else:
activations = return_dict["last_hidden_state"]
return activations
class BertModel(Model):
"""
Define a BERT model. The only purpose this serves it to provide a warmup_proportion for fit(). Since the number of
training steps is only defined in fit(), it means we can only define the scheduler in that method.
"""
def __init__(
self,
model_name: str,
module_class: type,
bert_name: str,
output_size: int,
is_sequence_classifier: bool,
lr: float,
weight_decay: float,
optimizer_class: Type[optim.Optimizer] = optim.Adam,
scheduler_class: Optional[Type[scheduler._LRScheduler]] = None,
scheduler_kwargs: Optional[Dict[str, Any]] = None,
bert_class: Type[HFBertModel] = HFBertModel,
model_dir: Optional[str] = None,
device: Device = "cpu",
**model_params,
):
"""
Initialize a BERT model.
Parameters
----------
model_name: str
Name of the model.
module_class: type
Class of the model that is being wrapped.
bert_name: str
Name of the underlying BERT, as specified in HuggingFace transformers.
output_size: int
Number of classes.
is_sequence_classifier: bool
Indicate whether model is going to be used as a sequence classifier. Otherwise, predictions are going to
made at every time step.
lr: float
Learning rate. Default is 0.4931.
weight_decay: float
Weight decay term for optimizer. Default is 0.001357.
optimizer_class: Type[optim.Optimizer]
Optimizer class. Default is Adam.
scheduler_class: Optional[Type[scheduler._LRScheduler]]
Learning rate scheduler class. Default is None.
scheduler_kwargs: Optional[Dict[str, Any]]
Keyword arguments for learning rate scheduler. Default is None.
bert_class: Type[HFBertModel]
Type of BERT to be used. Default is BertModel from the Huggingface transformers package.
model_dir: Optional[str]
Directory that model should be saved to.
device: Device
Device the model is located on.
"""
super().__init__(
model_name=model_name,
module_class=module_class,
bert_name=bert_name,
output_size=output_size,
is_sequence_classifier=is_sequence_classifier,
lr=lr,
weight_decay=weight_decay,
optimizer_class=optimizer_class,
scheduler_class=scheduler_class,
scheduler_kwargs=scheduler_kwargs,
model_dir=model_dir,
bert_class=bert_class,
device=device,
**model_params,
)
def fit(
self,
train_split: DataLoader,
num_training_steps: int,
warmup_proportion: float = 0.1,
valid_split: Optional[DataLoader] = None,
weight_loss: bool = False,
grad_clip: float = 10,
validation_interval: Optional[int] = None,
early_stopping_pat: int = np.inf,
early_stopping: bool = False,
verbose: bool = True,
wandb_run: Optional[WandBRun] = None,
**training_kwargs
):
"""
Fit the model to training data.
Parameters
----------
train_split: DataLoader
Dataset the model is being trained on.
num_training_steps: int
Number of training steps until completion.
warmup_proportion: float
Percentage of warmup steps for triangular learning rate schedule. Default is 0.1.
valid_split: Optional[DataLoader]
Validation set the model is being evaluated on if given.
verbose: bool
Whether to display information about current loss.
weight_loss: bool
Weight classes in loss function. Default is False.
grad_clip: float
Parameter grad norm value before it will be clipped. Default is 10.
validation_interval: Optional[int]
Interval of training steps between validations on the validation set. If None, the model is evaluated after
each pass through the training data.
early_stopping_pat: int
Patience in number of training steps before early stopping kicks in. Default is np.inf.
early_stopping: bool
Whether early stopping should be used. Default is False.
wandb_run: Optional[WandBRun]
Weights and Biases run to track training statistics. Training and validation loss (if applicable) are
tracked by default, everything else is defined in _epoch_iter() and _finetune() depending on the model.
"""
assert 0 <= warmup_proportion <= 1, f"warmup_proportion should be in [0, 1], {warmup_proportion} found."
if self.model_params.get("scheduler_class", None) is not None:
scheduler_class = self.model_params["scheduler_class"]
scheduler_kwargs = self.model_params.get("scheduler_kwargs", {})
scheduler_kwargs = {
# Warmup prob: 0.1
"num_warmup_steps": int(num_training_steps * warmup_proportion),
"num_training_steps": num_training_steps,
**scheduler_kwargs
}
self.scheduler = scheduler_class(
self.optimizer, **scheduler_kwargs
)
# Now call rest of function
super().fit(
train_split=train_split,
num_training_steps=num_training_steps,
valid_split=valid_split,
weight_loss=weight_loss,
grad_clip=grad_clip,
validation_interval=validation_interval,
early_stopping_pat=early_stopping_pat,
early_stopping=early_stopping,
verbose=verbose,
wandb_run=wandb_run,
**training_kwargs
)
|
PypiClean
|
/Mantissa-0.9.0.tar.gz/Mantissa-0.9.0/xmantissa/js/MochiKit/DateTime.js
|
if (typeof(dojo) != 'undefined') {
dojo.provide('MochiKit.DateTime');
}
if (typeof(MochiKit) == 'undefined') {
MochiKit = {};
}
if (typeof(MochiKit.DateTime) == 'undefined') {
MochiKit.DateTime = {};
}
MochiKit.DateTime.NAME = "MochiKit.DateTime";
MochiKit.DateTime.VERSION = "1.2";
MochiKit.DateTime.__repr__ = function () {
return "[" + this.NAME + " " + this.VERSION + "]";
};
MochiKit.DateTime.toString = function () {
return this.__repr__();
};
MochiKit.DateTime.isoDate = function (str) {
/***
Convert an ISO 8601 date (YYYY-MM-DD) to a Date object.
***/
str = str + "";
if (typeof(str) != "string" || str.length == 0) {
return null;
}
var iso = str.split('-');
if (iso.length == 0) {
return null;
}
return new Date(iso[0], iso[1] - 1, iso[2]);
};
MochiKit.DateTime._isoRegexp = /(\d{4,})(?:-(\d{1,2})(?:-(\d{1,2})(?:[T ](\d{1,2}):(\d{1,2})(?::(\d{1,2})(?:\.(\d+))?)?(?:(Z)|([+-])(\d{1,2})(?::(\d{1,2}))?)?)?)?)?/;
MochiKit.DateTime.isoTimestamp = function (str) {
/***
Convert an ISO 8601 timestamp (or something close to it) to
a Date object. Will accept the "de facto" form:
YYYY-MM-DD hh:mm:ss
or (the proper form):
YYYY-MM-DDThh:mm:ss
***/
str = str + "";
if (typeof(str) != "string" || str.length == 0) {
return null;
}
var res = str.match(MochiKit.DateTime._isoRegexp);
if (typeof(res) == "undefined" || res == null) {
return null;
}
var year, month, day, hour, min, sec, msec;
year = parseInt(res[1], 10);
if (typeof(res[2]) == "undefined" || res[2] == "") {
return new Date(year);
}
month = parseInt(res[2], 10) - 1;
day = parseInt(res[3], 10);
if (typeof(res[4]) == "undefined" || res[4] == "") {
return new Date(year, month, day);
}
hour = parseInt(res[4], 10);
min = parseInt(res[5], 10);
sec = (typeof(res[6]) != "undefined" && res[6] != "") ? parseInt(res[6], 10) : 0;
if (typeof(res[7]) != "undefined" && res[7] != "") {
msec = Math.round(1000.0 * parseFloat("0." + res[7]));
} else {
msec = 0;
}
if ((typeof(res[8]) == "undefined" || res[8] == "") && (typeof(res[9]) == "undefined" || res[9] == "")) {
return new Date(year, month, day, hour, min, sec, msec);
}
var ofs;
if (typeof(res[9]) != "undefined" && res[9] != "") {
ofs = parseInt(res[10], 10) * 3600000;
if (typeof(res[11]) != "undefined" && res[11] != "") {
ofs += parseInt(res[11], 10) * 60000;
}
if (res[9] == "-") {
ofs = -ofs;
}
} else {
ofs = 0;
}
return new Date(Date.UTC(year, month, day, hour, min, sec, msec) - ofs);
};
MochiKit.DateTime.toISOTime = function (date, realISO/* = false */) {
/***
Get the hh:mm:ss from the given Date object.
***/
if (typeof(date) == "undefined" || date == null) {
return null;
}
var hh = date.getHours();
var mm = date.getMinutes();
var ss = date.getSeconds();
var lst = [
((realISO && (hh < 10)) ? "0" + hh : hh),
((mm < 10) ? "0" + mm : mm),
((ss < 10) ? "0" + ss : ss)
];
return lst.join(":");
};
MochiKit.DateTime.toISOTimestamp = function (date, realISO/* = false*/) {
/***
Convert a Date object to something that's ALMOST but not quite an
ISO 8601 timestamp. If it was a proper ISO timestamp it would be:
YYYY-MM-DDThh:mm:ssZ
However, we see junk in SQL and other places that looks like this:
YYYY-MM-DD hh:mm:ss
So, this function returns the latter form, despite its name, unless
you pass true for realISO.
***/
if (typeof(date) == "undefined" || date == null) {
return null;
}
var sep = realISO ? "T" : " ";
var foot = realISO ? "Z" : "";
if (realISO) {
date = new Date(date.getTime() + (date.getTimezoneOffset() * 60000));
}
return MochiKit.DateTime.toISODate(date) + sep + MochiKit.DateTime.toISOTime(date, realISO) + foot;
};
MochiKit.DateTime.toISODate = function (date) {
/***
Convert a Date object to an ISO 8601 date string (YYYY-MM-DD)
***/
if (typeof(date) == "undefined" || date == null) {
return null;
}
var _padTwo = MochiKit.DateTime._padTwo;
return [
date.getFullYear(),
_padTwo(date.getMonth() + 1),
_padTwo(date.getDate())
].join("-");
};
MochiKit.DateTime.americanDate = function (d) {
/***
Converts a MM/DD/YYYY date to a Date object
***/
d = d + "";
if (typeof(d) != "string" || d.length == 0) {
return null;
}
var a = d.split('/');
return new Date(a[2], a[0] - 1, a[1]);
};
MochiKit.DateTime._padTwo = function (n) {
return (n > 9) ? n : "0" + n;
};
MochiKit.DateTime.toPaddedAmericanDate = function (d) {
/***
Converts a Date object to an MM/DD/YYYY date, e.g. 01/01/2001
***/
if (typeof(d) == "undefined" || d == null) {
return null;
}
var _padTwo = MochiKit.DateTime._padTwo;
return [
_padTwo(d.getMonth() + 1),
_padTwo(d.getDate()),
d.getFullYear()
].join('/');
};
MochiKit.DateTime.toAmericanDate = function (d) {
/***
Converts a Date object to an M/D/YYYY date, e.g. 1/1/2001
***/
if (typeof(d) == "undefined" || d == null) {
return null;
}
return [d.getMonth() + 1, d.getDate(), d.getFullYear()].join('/');
};
MochiKit.DateTime.EXPORT = [
"isoDate",
"isoTimestamp",
"toISOTime",
"toISOTimestamp",
"toISODate",
"americanDate",
"toPaddedAmericanDate",
"toAmericanDate"
];
MochiKit.DateTime.EXPORT_OK = [];
MochiKit.DateTime.EXPORT_TAGS = {
":common": MochiKit.DateTime.EXPORT,
":all": MochiKit.DateTime.EXPORT
};
MochiKit.DateTime.__new__ = function () {
// MochiKit.Base.nameFunctions(this);
var base = this.NAME + ".";
for (var k in this) {
var o = this[k];
if (typeof(o) == 'function' && typeof(o.NAME) == 'undefined') {
try {
o.NAME = base + k;
} catch (e) {
// pass
}
}
}
};
MochiKit.DateTime.__new__();
if (typeof(MochiKit.Base) != "undefined") {
MochiKit.Base._exportSymbols(this, MochiKit.DateTime);
} else {
(function (globals, module) {
if ((typeof(JSAN) == 'undefined' && typeof(dojo) == 'undefined')
|| (typeof(MochiKit.__compat__) == 'boolean' && MochiKit.__compat__)) {
var all = module.EXPORT_TAGS[":all"];
for (var i = 0; i < all.length; i++) {
globals[all[i]] = module[all[i]];
}
}
})(this, MochiKit.DateTime);
}
|
PypiClean
|
/trixie-0.1.2.tar.gz/trixie-0.1.2/homeassistant/components/media_player/russound_rnet.py
|
import logging
import voluptuous as vol
from homeassistant.components.media_player import (
SUPPORT_TURN_ON, SUPPORT_TURN_OFF, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET,
SUPPORT_SELECT_SOURCE, MediaPlayerDevice, PLATFORM_SCHEMA)
from homeassistant.const import (
CONF_HOST, CONF_PORT, STATE_OFF, STATE_ON, CONF_NAME)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['russound==0.1.9']
_LOGGER = logging.getLogger(__name__)
CONF_ZONES = 'zones'
CONF_SOURCES = 'sources'
SUPPORT_RUSSOUND = SUPPORT_VOLUME_MUTE | SUPPORT_VOLUME_SET | \
SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_SELECT_SOURCE
ZONE_SCHEMA = vol.Schema({
vol.Required(CONF_NAME): cv.string,
})
SOURCE_SCHEMA = vol.Schema({
vol.Required(CONF_NAME): cv.string,
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_PORT): cv.port,
vol.Required(CONF_ZONES): vol.Schema({cv.positive_int: ZONE_SCHEMA}),
vol.Required(CONF_SOURCES): vol.All(cv.ensure_list, [SOURCE_SCHEMA]),
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Russound RNET platform."""
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
if host is None or port is None:
_LOGGER.error("Invalid config. Expected %s and %s",
CONF_HOST, CONF_PORT)
return False
from russound import russound
russ = russound.Russound(host, port)
russ.connect()
sources = []
for source in config[CONF_SOURCES]:
sources.append(source['name'])
if russ.is_connected():
for zone_id, extra in config[CONF_ZONES].items():
add_devices([RussoundRNETDevice(
hass, russ, sources, zone_id, extra)], True)
else:
_LOGGER.error('Not connected to %s:%s', host, port)
class RussoundRNETDevice(MediaPlayerDevice):
"""Representation of a Russound RNET device."""
def __init__(self, hass, russ, sources, zone_id, extra):
"""Initialise the Russound RNET device."""
self._name = extra['name']
self._russ = russ
self._sources = sources
self._zone_id = zone_id
self._state = None
self._volume = None
self._source = None
def update(self):
"""Retrieve latest state."""
# Updated this function to make a single call to get_zone_info, so that
# with a single call we can get On/Off, Volume and Source, reducing the
# amount of traffic and speeding up the update process.
ret = self._russ.get_zone_info('1', self._zone_id, 4)
_LOGGER.debug("ret= %s", ret)
if ret is not None:
_LOGGER.debug("Updating status for zone %s", self._zone_id)
if ret[0] == 0:
self._state = STATE_OFF
else:
self._state = STATE_ON
self._volume = ret[2] * 2 / 100.0
# Returns 0 based index for source.
index = ret[1]
# Possibility exists that user has defined list of all sources.
# If a source is set externally that is beyond the defined list then
# an exception will be thrown.
# In this case return and unknown source (None)
try:
self._source = self._sources[index]
except IndexError:
self._source = None
else:
_LOGGER.error("Could not update status for zone %s", self._zone_id)
@property
def name(self):
"""Return the name of the zone."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_RUSSOUND
@property
def source(self):
"""Get the currently selected source."""
return self._source
@property
def volume_level(self):
"""Volume level of the media player (0..1).
Value is returned based on a range (0..100).
Therefore float divide by 100 to get to the required range.
"""
return self._volume
def set_volume_level(self, volume):
"""Set volume level. Volume has a range (0..1).
Translate this to a range of (0..100) as expected
by _russ.set_volume()
"""
self._russ.set_volume('1', self._zone_id, volume * 100)
def turn_on(self):
"""Turn the media player on."""
self._russ.set_power('1', self._zone_id, '1')
def turn_off(self):
"""Turn off media player."""
self._russ.set_power('1', self._zone_id, '0')
def mute_volume(self, mute):
"""Send mute command."""
self._russ.toggle_mute('1', self._zone_id)
def select_source(self, source):
"""Set the input source."""
if source in self._sources:
index = self._sources.index(source)
# 0 based value for source
self._russ.set_source('1', self._zone_id, index)
@property
def source_list(self):
"""Return a list of available input sources."""
return self._sources
|
PypiClean
|
/morph-tool-2.9.1.tar.gz/morph-tool-2.9.1/morph_tool/transform.py
|
import logging
from enum import Enum
import numpy as np
from scipy.spatial.transform import Rotation
from morphio import SectionType, IterType
from neurom.morphmath import angle_between_vectors
from morph_tool.spatial import point_to_section_segment
from morph_tool.apical_point import apical_point_section_segment
L = logging.getLogger(__name__)
def _apply_recursively(func, obj, origin=(0, 0, 0)):
origin = np.array(origin)
if hasattr(obj, 'soma'):
obj.soma.points = origin + func(obj.soma.points - origin)
for s in obj.iter():
s.points = origin + func(s.points - origin)
def transform(obj, A):
"""Apply transformation matrix `A` to a given morphology object.
Args:
obj: Morphology / Section
A: rotation matrix (4 x 4 NumPy array)
"""
if A is None:
return
A = np.asarray(A)
if A.shape != (4, 4):
raise ValueError(f"`A` should be 4 x 4 matrix (got: {A.shape})")
A = A.transpose()
def func(p):
return np.dot(np.column_stack((p, np.ones(len(p)))), A)[:, :3]
_apply_recursively(func, obj)
def rotate(obj, A, origin=(0, 0, 0)):
"""Apply rotation matrix `A` to a given morphology object.
Args:
obj: Morphology / Section
A: rotation matrix (3 x 3 NumPy array)
origin (3D point): the origin of the rotation
"""
if A is None:
return
A = np.asarray(A)
if A.shape != (3, 3):
raise ValueError(f"`A` should be 3 x 3 matrix (got: {A.shape})")
A = A.transpose()
def func(p):
return np.dot(p, A)
_apply_recursively(func, obj, origin)
def translate(obj, shift):
"""Apply translation to a given morphology object.
Args:
obj: Morphology / Section
shift: shift vector ((x, y, z) NumPy array)
"""
if shift is None:
return
shift = np.asarray(shift)
if shift.shape != (3,):
raise ValueError(
f"`shift` should be vector of shape (3,) (got: {shift.shape})"
)
def func(p):
return p + shift
_apply_recursively(func, obj)
def align(section, direction):
"""Rotate a section (and all its descendents) so its initial segment is along ``direction``."""
section_dir = section.points[1] - section.points[0]
alpha = angle_between_vectors(section_dir, direction)
if alpha < 1e-8:
return
if abs(alpha - np.pi) < 1e-8:
axis = np.cross(section_dir, [1, 0, 0])
# Case where X axis and section_dir are colinear
if np.linalg.norm(axis) < 1e-8:
axis = np.cross(section_dir, [0, 1, 0])
else:
axis = np.cross(section_dir, direction)
axis /= np.linalg.norm(axis)
matrix = Rotation.from_rotvec(alpha * axis).as_matrix()
rotate(section, matrix, origin=section.points[0])
class AlignMethod(Enum):
"""Contains possible align methods for align_morphology."""
WHOLE = 'whole'
TRUNK = 'trunk'
FIRST_SECTION = 'first_section'
FIRST_SEGMENT = 'first_segment'
@classmethod
def values(cls):
"""Get all possible values."""
return list(map(lambda c: c.value, cls))
def rotation_matrix_from_vectors(vec1, vec2):
"""Find the rotation matrix that aligns vec1 to vec2.
Picked from: https://stackoverflow.com/a/59204638/3868743
Args:
vec1: A 3d "source" vector
vec2: A 3d "destination" vector
Returns:
A transform matrix (3x3) which when applied to vec1, aligns it with vec2.
"""
vec1, vec2 = vec1 / np.linalg.norm(vec1), vec2 / np.linalg.norm(vec2)
v_cross = np.cross(vec1, vec2)
v_cross_norm = np.linalg.norm(v_cross)
if v_cross_norm == 0:
return np.eye(3)
kmat = np.array([[0.0, -v_cross[2], v_cross[1]],
[v_cross[2], 0.0, -v_cross[0]],
[-v_cross[1], v_cross[0], 0.0]])
return np.eye(3) + kmat + kmat.dot(kmat) * ((1 - np.dot(vec1, vec2)) / (v_cross_norm ** 2))
# pylint: disable=inconsistent-return-statements
def _get_points(morph, method, neurite_type, target_point):
"""Extract relevant points of dendrite to align the morphology, see align_morphology."""
_to_type = {'apical': SectionType.apical_dendrite, 'axon': SectionType.axon}
for root_section in morph.root_sections:
if root_section.type == _to_type[neurite_type]:
if method == AlignMethod.TRUNK.value:
if target_point is not None:
target_secid = point_to_section_segment(morph, target_point)[0] - 1
if target_secid is None:
return None
elif neurite_type == 'apical':
target_secid = apical_point_section_segment(morph)[0]
else:
raise Exception(f"We don't know how to get target point for {neurite_type}.")
return np.vstack(
[section.points
for section in morph.sections[target_secid].iter(IterType.upstream)]
)
if method == AlignMethod.FIRST_SECTION.value:
return root_section.points
if method == AlignMethod.FIRST_SEGMENT.value:
return root_section.points[:2]
return np.vstack([section.points for section in root_section.iter()])
def _get_principal_direction(points):
"""Return the principal direction of a point cloud.
It is the eigen vector of the covariance matrix with the highest eigen value. Taken from
:mod:`neuror.unravel`.
"""
X = np.copy(np.asarray(points))
C = np.dot(X.T, X)
w, v = np.linalg.eig(C)
return v[:, w.argmax()]
def align_morphology(
morph, direction=None, method='whole', neurite_type='apical', target_point=None
):
"""In-place alignment of a morphology towards a 'direction'.
The base algorithm is based on eigenvalue decomposition of the correlation matrix obtained
from points in specified neurites, giving the principal axis of the neurite, centered at soma.
Currently, five algorithms are implemented, differing in the choice of points:
1) with method='whole': All the points in the apical dendrite are used.
2) with method='trunk': Points in section up to the target points are used.
3) with method='first_section': Only the points in the first section are used.
4) with method='first_segment': Only the points in the first segment are used.
5) with method an ndarray or list, we will use it as the direction directly
If no neurite is present, no rotation is applied, and the identity rotation is returned.
If two neurites of same types are present, the first accessed by Morphio will be used.
Args:
morph (morphio.Morphology): morphology to align
direction (ndarray): 3-vector for final direction, if None, [0, 1, 0] will be used
method (str|ndarray): method for alignment.
neurite_type (str): neurite to consider, can only be apical or axon
target_point (ndarray): position of target point for method='trunk',
if None and neurite_type='apical', it will be estimated
Returns:
3x3 array with applied rotation matrix, 3 array with center of rotation
"""
if isinstance(method, str) and method not in AlignMethod.values():
raise NotImplementedError(f"Method {method} is not implemented")
if direction is None:
direction = [0.0, 1.0, 0.0]
else:
direction /= np.linalg.norm(direction)
if isinstance(method, (np.ndarray, list)):
points = np.array([[0.0, 0.0, 0.0], method])
else:
points = _get_points(morph, method, neurite_type, target_point)
if points is None:
L.info('We did not find an apical point to align the morphology')
return np.eye(3)
center = np.mean(morph.soma.points, axis=0)
principal_direction = _get_principal_direction(points - center)
principal_direction *= np.sign(points.dot(principal_direction).sum())
rotation_matrix = rotation_matrix_from_vectors(principal_direction, direction)
rotate(morph, rotation_matrix, origin=center)
return rotation_matrix
|
PypiClean
|
/docstring_parser-0.15.tar.gz/docstring_parser-0.15/docstring_parser/attrdoc.py
|
import ast
import inspect
import textwrap
import typing as T
from types import ModuleType
from .common import Docstring, DocstringParam
ast_constant_attr = {
ast.Constant: "value",
# python <= 3.7:
ast.NameConstant: "value",
ast.Num: "n",
ast.Str: "s",
}
def ast_get_constant_value(node: ast.AST) -> T.Any:
"""Return the constant's value if the given node is a constant."""
return getattr(node, ast_constant_attr[node.__class__])
def ast_unparse(node: ast.AST) -> T.Optional[str]:
"""Convert the AST node to source code as a string."""
if hasattr(ast, "unparse"):
return ast.unparse(node)
# Support simple cases in Python < 3.9
if isinstance(node, (ast.Str, ast.Num, ast.NameConstant, ast.Constant)):
return str(ast_get_constant_value(node))
if isinstance(node, ast.Name):
return node.id
return None
def ast_is_literal_str(node: ast.AST) -> bool:
"""Return True if the given node is a literal string."""
return (
isinstance(node, ast.Expr)
and isinstance(node.value, (ast.Constant, ast.Str))
and isinstance(ast_get_constant_value(node.value), str)
)
def ast_get_attribute(
node: ast.AST,
) -> T.Optional[T.Tuple[str, T.Optional[str], T.Optional[str]]]:
"""Return name, type and default if the given node is an attribute."""
if isinstance(node, (ast.Assign, ast.AnnAssign)):
target = (
node.targets[0] if isinstance(node, ast.Assign) else node.target
)
if isinstance(target, ast.Name):
type_str = None
if isinstance(node, ast.AnnAssign):
type_str = ast_unparse(node.annotation)
default = None
if node.value:
default = ast_unparse(node.value)
return target.id, type_str, default
return None
class AttributeDocstrings(ast.NodeVisitor):
"""An ast.NodeVisitor that collects attribute docstrings."""
attr_docs = None
prev_attr = None
def visit(self, node):
if self.prev_attr and ast_is_literal_str(node):
attr_name, attr_type, attr_default = self.prev_attr
self.attr_docs[attr_name] = (
ast_get_constant_value(node.value),
attr_type,
attr_default,
)
self.prev_attr = ast_get_attribute(node)
if isinstance(node, (ast.ClassDef, ast.Module)):
self.generic_visit(node)
def get_attr_docs(
self, component: T.Any
) -> T.Dict[str, T.Tuple[str, T.Optional[str], T.Optional[str]]]:
"""Get attribute docstrings from the given component.
:param component: component to process (class or module)
:returns: for each attribute docstring, a tuple with (description,
type, default)
"""
self.attr_docs = {}
self.prev_attr = None
try:
source = textwrap.dedent(inspect.getsource(component))
except OSError:
pass
else:
tree = ast.parse(source)
if inspect.ismodule(component):
self.visit(tree)
elif isinstance(tree, ast.Module) and isinstance(
tree.body[0], ast.ClassDef
):
self.visit(tree.body[0])
return self.attr_docs
def add_attribute_docstrings(
obj: T.Union[type, ModuleType], docstring: Docstring
) -> None:
"""Add attribute docstrings found in the object's source code.
:param obj: object from which to parse attribute docstrings
:param docstring: Docstring object where found attributes are added
:returns: list with names of added attributes
"""
params = set(p.arg_name for p in docstring.params)
for arg_name, (description, type_name, default) in (
AttributeDocstrings().get_attr_docs(obj).items()
):
if arg_name not in params:
param = DocstringParam(
args=["attribute", arg_name],
description=description,
arg_name=arg_name,
type_name=type_name,
is_optional=default is not None,
default=default,
)
docstring.meta.append(param)
|
PypiClean
|
/server_project-0.0.1.tar.gz/server_project-0.0.1/server/add_user.py
|
from PyQt5.QtWidgets import QDialog, QPushButton, QLineEdit, QApplication, QLabel, QMessageBox
from PyQt5.QtCore import Qt
import hashlib
import binascii
class RegisterUser(QDialog):
'''Класс диалог регистрации пользователя на сервере.'''
def __init__(self, database, server):
super().__init__()
self.database = database
self.server = server
self.setWindowTitle('Регистрация')
self.setFixedSize(175, 183)
self.setModal(True)
self.setAttribute(Qt.WA_DeleteOnClose)
self.label_username = QLabel('Введите имя пользователя:', self)
self.label_username.move(10, 10)
self.label_username.setFixedSize(150, 15)
self.client_name = QLineEdit(self)
self.client_name.setFixedSize(154, 20)
self.client_name.move(10, 30)
self.label_passwd = QLabel('Введите пароль:', self)
self.label_passwd.move(10, 55)
self.label_passwd.setFixedSize(150, 15)
self.client_passwd = QLineEdit(self)
self.client_passwd.setFixedSize(154, 20)
self.client_passwd.move(10, 75)
self.client_passwd.setEchoMode(QLineEdit.Password)
self.label_conf = QLabel('Введите подтверждение:', self)
self.label_conf.move(10, 100)
self.label_conf.setFixedSize(150, 15)
self.client_conf = QLineEdit(self)
self.client_conf.setFixedSize(154, 20)
self.client_conf.move(10, 120)
self.client_conf.setEchoMode(QLineEdit.Password)
self.btn_ok = QPushButton('Сохранить', self)
self.btn_ok.move(10, 150)
self.btn_ok.clicked.connect(self.save_data)
self.btn_cancel = QPushButton('Выход', self)
self.btn_cancel.move(90, 150)
self.btn_cancel.clicked.connect(self.close)
self.messages = QMessageBox()
self.show()
def save_data(self):
'''
Метод проверки правильности ввода и сохранения в базу нового пользователя.
'''
print('SAVE_DATA', self.client_name.text(), self.client_passwd.text(), self.client_conf.text())
if not self.client_name.text():
self.messages.critical(
self, 'Ошибка', 'Не указано имя пользователя.')
return
elif self.client_passwd.text() != self.client_conf.text():
self.messages.critical(
self, 'Ошибка', 'Введённые пароли не совпадают.')
return
elif self.database.check_user(self.client_name.text()):
self.messages.critical(
self, 'Ошибка', 'Пользователь уже существует.')
return
else:
# Генерируем хэш пароля, в качестве соли будем использовать логин в
# нижнем регистре.
passwd_bytes = self.client_passwd.text().encode('utf-8')
salt = self.client_name.text().lower().encode('utf-8')
passwd_hash = hashlib.pbkdf2_hmac(
'sha512', passwd_bytes, salt, 10000)
print('SALT!!!! ', self.client_name.text(), binascii.hexlify(passwd_hash))
self.database.add_user(
self.client_name.text(),
binascii.hexlify(passwd_hash))
self.messages.information(
self, 'Успех', 'Пользователь успешно зарегистрирован.')
# Рассылаем клиентам сообщение о необходимости обновить справичники
self.server.service_update_lists()
self.close()
if __name__ == '__main__':
app = QApplication([])
app.setAttribute(Qt.AA_DisableWindowContextHelpButton)
dial = RegisterUser(None, None)
app.exec_()
|
PypiClean
|
/repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/cryptocurrency/due_diligence/ccxt_model.py
|
__docformat__ = "numpy"
from typing import Any, Dict, List
import ccxt
import pandas as pd
from openbb_terminal.cryptocurrency.dataframe_helpers import prettify_column_names
def get_exchanges() -> List[str]:
"""Helper method to get all the exchanges supported by ccxt
[Source: https://docs.ccxt.com/en/latest/manual.html]
Parameters
----------
Returns
-------
List[str]
list of all the exchanges supported by ccxt
"""
return ccxt.exchanges
def get_binance_currencies() -> List[str]:
"""Helper method to get all the currenices supported by ccxt
[Source: https://docs.ccxt.com/en/latest/manual.html]
Parameters
----------
Returns
-------
List[str]
list of all the currenices supported by ccxt
"""
# Refactor this eventually to allow for any entered exchange -
# right now only works on default binace for "ob" and "trades"
# Commented out for now, since binance started blocking requests
# exchange = ccxt.binance({"fetchCurrencies": True})
# exchange.load_markets()
# currencies = exchange.quoteCurrencies
# return [c["code"] for c in currencies.values()]
return [
"AUD",
"BIDR",
"BKRW",
"BNB",
"BRL",
"BTC",
"BUSD",
"BVND",
"DAI",
"DOGE",
"DOT",
"ETH",
"EUR",
"GBP",
"IDRT",
"NGN",
"PAX",
"PLN",
"RUB",
"TRX",
"TRY",
"TUSD",
"UAH",
"USDC",
"USDP",
"USDS",
"USDT",
"UST",
"VAI",
"XRP",
"ZAR",
]
def get_orderbook(exchange: str, symbol: str, to_symbol: str) -> Dict[str, Any]:
"""Returns orderbook for a coin in a given exchange
[Source: https://docs.ccxt.com/en/latest/manual.html]
Parameters
----------
exchange : str
exchange id
symbol : str
coin symbol
to_symbol : str
currency to compare coin against
Returns
-------
Dict[str, Any]
With bids and asks
"""
exchange_class = getattr(ccxt, exchange)
exchange_cls = exchange_class()
ob = exchange_cls.fetch_order_book(f"{symbol.upper()}/{to_symbol.upper()}")
return ob
def get_trades(exchange_id: str, symbol: str, to_symbol: str) -> pd.DataFrame:
"""Returns trades for a coin in a given exchange
[Source: https://docs.ccxt.com/en/latest/manual.html]
Parameters
----------
exchange_id : str
exchange id
symbol : str
coin symbol
to_symbol : str
currency to compare coin against
Returns
-------
pd.DataFrame
trades for a coin in a given exchange
"""
exchange_class = getattr(ccxt, exchange_id)
exchange = exchange_class()
trades = exchange.fetch_trades(f"{symbol.upper()}/{to_symbol.upper()}")
df = pd.DataFrame(trades, columns=["datetime", "price", "amount", "cost", "side"])
df["datetime"] = pd.to_datetime(df["datetime"])
df.rename(columns={"datetime": "date"}, inplace=True)
df.columns = prettify_column_names(df.columns)
return df
|
PypiClean
|
/pygame_ce-2.3.1-cp39-cp39-macosx_11_0_arm64.whl/pygame/freetype.py
|
from pygame._freetype import (
Font,
STYLE_NORMAL,
STYLE_OBLIQUE,
STYLE_STRONG,
STYLE_UNDERLINE,
STYLE_WIDE,
STYLE_DEFAULT,
init,
quit,
get_init,
was_init,
get_cache_size,
get_default_font,
get_default_resolution,
get_error,
get_version,
set_default_resolution,
)
from pygame.sysfont import match_font, get_fonts, SysFont as _SysFont
__all__ = [
"Font",
"STYLE_NORMAL",
"STYLE_OBLIQUE",
"STYLE_STRONG",
"STYLE_UNDERLINE",
"STYLE_WIDE",
"STYLE_DEFAULT",
"init",
"quit",
"get_init",
"was_init",
"get_cache_size",
"get_default_font",
"get_default_resolution",
"get_error",
"get_version",
"set_default_resolution",
"match_font",
"get_fonts",
]
def SysFont(name, size, bold=False, italic=False, constructor=None):
"""pygame.ftfont.SysFont(name, size, bold=False, italic=False, constructor=None) -> Font
Create a pygame Font from system font resources.
This will search the system fonts for the given font
name. You can also enable bold or italic styles, and
the appropriate system font will be selected if available.
This will always return a valid Font object, and will
fall back on the builtin pygame font if the given font
is not found.
Name can also be an iterable of font names, a string of
comma-separated font names, or a bytes of comma-separated
font names, in which case the set of names will be searched
in order. Pygame uses a small set of common font aliases. If the
specific font you ask for is not available, a reasonable
alternative may be used.
If optional constructor is provided, it must be a function with
signature constructor(fontpath, size, bold, italic) which returns
a Font instance. If None, a pygame.freetype.Font object is created.
"""
if constructor is None:
def constructor(fontpath, size, bold, italic):
font = Font(fontpath, size)
font.strong = bold
font.oblique = italic
return font
return _SysFont(name, size, bold, italic, constructor)
|
PypiClean
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.