id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/uuid/lib/md5-browser.js
|
'use strict';
function md5(bytes) {
if (typeof(bytes) == 'string') {
var msg = unescape(encodeURIComponent(bytes)); // UTF8 escape
bytes = new Array(msg.length);
for (var i = 0; i < msg.length; i++) bytes[i] = msg.charCodeAt(i);
}
return md5ToHexEncodedArray(
wordsToMd5(
bytesToWords(bytes)
, bytes.length * 8)
);
}
/*
* Convert an array of little-endian words to an array of bytes
*/
function md5ToHexEncodedArray(input) {
var i;
var x;
var output = [];
var length32 = input.length * 32;
var hexTab = '0123456789abcdef';
var hex;
for (i = 0; i < length32; i += 8) {
x = (input[i >> 5] >>> (i % 32)) & 0xFF;
hex = parseInt(hexTab.charAt((x >>> 4) & 0x0F) + hexTab.charAt(x & 0x0F), 16);
output.push(hex);
}
return output;
}
/*
* Calculate the MD5 of an array of little-endian words, and a bit length.
*/
function wordsToMd5(x, len) {
/* append padding */
x[len >> 5] |= 0x80 << (len % 32);
x[(((len + 64) >>> 9) << 4) + 14] = len;
var i;
var olda;
var oldb;
var oldc;
var oldd;
var a = 1732584193;
var b = -271733879;
var c = -1732584194;
var d = 271733878;
for (i = 0; i < x.length; i += 16) {
olda = a;
oldb = b;
oldc = c;
oldd = d;
a = md5ff(a, b, c, d, x[i], 7, -680876936);
d = md5ff(d, a, b, c, x[i + 1], 12, -389564586);
c = md5ff(c, d, a, b, x[i + 2], 17, 606105819);
b = md5ff(b, c, d, a, x[i + 3], 22, -1044525330);
a = md5ff(a, b, c, d, x[i + 4], 7, -176418897);
d = md5ff(d, a, b, c, x[i + 5], 12, 1200080426);
c = md5ff(c, d, a, b, x[i + 6], 17, -1473231341);
b = md5ff(b, c, d, a, x[i + 7], 22, -45705983);
a = md5ff(a, b, c, d, x[i + 8], 7, 1770035416);
d = md5ff(d, a, b, c, x[i + 9], 12, -1958414417);
c = md5ff(c, d, a, b, x[i + 10], 17, -42063);
b = md5ff(b, c, d, a, x[i + 11], 22, -1990404162);
a = md5ff(a, b, c, d, x[i + 12], 7, 1804603682);
d = md5ff(d, a, b, c, x[i + 13], 12, -40341101);
c = md5ff(c, d, a, b, x[i + 14], 17, -1502002290);
b = md5ff(b, c, d, a, x[i + 15], 22, 1236535329);
a = md5gg(a, b, c, d, x[i + 1], 5, -165796510);
d = md5gg(d, a, b, c, x[i + 6], 9, -1069501632);
c = md5gg(c, d, a, b, x[i + 11], 14, 643717713);
b = md5gg(b, c, d, a, x[i], 20, -373897302);
a = md5gg(a, b, c, d, x[i + 5], 5, -701558691);
d = md5gg(d, a, b, c, x[i + 10], 9, 38016083);
c = md5gg(c, d, a, b, x[i + 15], 14, -660478335);
b = md5gg(b, c, d, a, x[i + 4], 20, -405537848);
a = md5gg(a, b, c, d, x[i + 9], 5, 568446438);
d = md5gg(d, a, b, c, x[i + 14], 9, -1019803690);
c = md5gg(c, d, a, b, x[i + 3], 14, -187363961);
b = md5gg(b, c, d, a, x[i + 8], 20, 1163531501);
a = md5gg(a, b, c, d, x[i + 13], 5, -1444681467);
d = md5gg(d, a, b, c, x[i + 2], 9, -51403784);
c = md5gg(c, d, a, b, x[i + 7], 14, 1735328473);
b = md5gg(b, c, d, a, x[i + 12], 20, -1926607734);
a = md5hh(a, b, c, d, x[i + 5], 4, -378558);
d = md5hh(d, a, b, c, x[i + 8], 11, -2022574463);
c = md5hh(c, d, a, b, x[i + 11], 16, 1839030562);
b = md5hh(b, c, d, a, x[i + 14], 23, -35309556);
a = md5hh(a, b, c, d, x[i + 1], 4, -1530992060);
d = md5hh(d, a, b, c, x[i + 4], 11, 1272893353);
c = md5hh(c, d, a, b, x[i + 7], 16, -155497632);
b = md5hh(b, c, d, a, x[i + 10], 23, -1094730640);
a = md5hh(a, b, c, d, x[i + 13], 4, 681279174);
d = md5hh(d, a, b, c, x[i], 11, -358537222);
c = md5hh(c, d, a, b, x[i + 3], 16, -722521979);
b = md5hh(b, c, d, a, x[i + 6], 23, 76029189);
a = md5hh(a, b, c, d, x[i + 9], 4, -640364487);
d = md5hh(d, a, b, c, x[i + 12], 11, -421815835);
c = md5hh(c, d, a, b, x[i + 15], 16, 530742520);
b = md5hh(b, c, d, a, x[i + 2], 23, -995338651);
a = md5ii(a, b, c, d, x[i], 6, -198630844);
d = md5ii(d, a, b, c, x[i + 7], 10, 1126891415);
c = md5ii(c, d, a, b, x[i + 14], 15, -1416354905);
b = md5ii(b, c, d, a, x[i + 5], 21, -57434055);
a = md5ii(a, b, c, d, x[i + 12], 6, 1700485571);
d = md5ii(d, a, b, c, x[i + 3], 10, -1894986606);
c = md5ii(c, d, a, b, x[i + 10], 15, -1051523);
b = md5ii(b, c, d, a, x[i + 1], 21, -2054922799);
a = md5ii(a, b, c, d, x[i + 8], 6, 1873313359);
d = md5ii(d, a, b, c, x[i + 15], 10, -30611744);
c = md5ii(c, d, a, b, x[i + 6], 15, -1560198380);
b = md5ii(b, c, d, a, x[i + 13], 21, 1309151649);
a = md5ii(a, b, c, d, x[i + 4], 6, -145523070);
d = md5ii(d, a, b, c, x[i + 11], 10, -1120210379);
c = md5ii(c, d, a, b, x[i + 2], 15, 718787259);
b = md5ii(b, c, d, a, x[i + 9], 21, -343485551);
a = safeAdd(a, olda);
b = safeAdd(b, oldb);
c = safeAdd(c, oldc);
d = safeAdd(d, oldd);
}
return [a, b, c, d];
}
/*
* Convert an array bytes to an array of little-endian words
* Characters >255 have their high-byte silently ignored.
*/
function bytesToWords(input) {
var i;
var output = [];
output[(input.length >> 2) - 1] = undefined;
for (i = 0; i < output.length; i += 1) {
output[i] = 0;
}
var length8 = input.length * 8;
for (i = 0; i < length8; i += 8) {
output[i >> 5] |= (input[(i / 8)] & 0xFF) << (i % 32);
}
return output;
}
/*
* Add integers, wrapping at 2^32. This uses 16-bit operations internally
* to work around bugs in some JS interpreters.
*/
function safeAdd(x, y) {
var lsw = (x & 0xFFFF) + (y & 0xFFFF);
var msw = (x >> 16) + (y >> 16) + (lsw >> 16);
return (msw << 16) | (lsw & 0xFFFF);
}
/*
* Bitwise rotate a 32-bit number to the left.
*/
function bitRotateLeft(num, cnt) {
return (num << cnt) | (num >>> (32 - cnt));
}
/*
* These functions implement the four basic operations the algorithm uses.
*/
function md5cmn(q, a, b, x, s, t) {
return safeAdd(bitRotateLeft(safeAdd(safeAdd(a, q), safeAdd(x, t)), s), b);
}
function md5ff(a, b, c, d, x, s, t) {
return md5cmn((b & c) | ((~b) & d), a, b, x, s, t);
}
function md5gg(a, b, c, d, x, s, t) {
return md5cmn((b & d) | (c & (~d)), a, b, x, s, t);
}
function md5hh(a, b, c, d, x, s, t) {
return md5cmn(b ^ c ^ d, a, b, x, s, t);
}
function md5ii(a, b, c, d, x, s, t) {
return md5cmn(c ^ (b | (~d)), a, b, x, s, t);
}
module.exports = md5;
|
PypiClean
|
/ifly_ailab-1.0.0-py3-none-any.whl/ailab/inference_wrapper/yolo/wrapper/yolov5/utils/segment/metrics.py
|
import numpy as np
from ..metrics import ap_per_class
def fitness(x):
# Model fitness as a weighted combination of metrics
w = [0.0, 0.0, 0.1, 0.9, 0.0, 0.0, 0.1, 0.9]
return (x[:, :8] * w).sum(1)
def ap_per_class_box_and_mask(
tp_m,
tp_b,
conf,
pred_cls,
target_cls,
plot=False,
save_dir='.',
names=(),
):
"""
Args:
tp_b: tp of boxes.
tp_m: tp of masks.
other arguments see `func: ap_per_class`.
"""
results_boxes = ap_per_class(tp_b,
conf,
pred_cls,
target_cls,
plot=plot,
save_dir=save_dir,
names=names,
prefix='Box')[2:]
results_masks = ap_per_class(tp_m,
conf,
pred_cls,
target_cls,
plot=plot,
save_dir=save_dir,
names=names,
prefix='Mask')[2:]
results = {
'boxes': {
'p': results_boxes[0],
'r': results_boxes[1],
'ap': results_boxes[3],
'f1': results_boxes[2],
'ap_class': results_boxes[4]},
'masks': {
'p': results_masks[0],
'r': results_masks[1],
'ap': results_masks[3],
'f1': results_masks[2],
'ap_class': results_masks[4]}}
return results
class Metric:
def __init__(self) -> None:
self.p = [] # (nc, )
self.r = [] # (nc, )
self.f1 = [] # (nc, )
self.all_ap = [] # (nc, 10)
self.ap_class_index = [] # (nc, )
@property
def ap50(self):
"""[email protected] of all classes.
Return:
(nc, ) or [].
"""
return self.all_ap[:, 0] if len(self.all_ap) else []
@property
def ap(self):
"""[email protected]:0.95
Return:
(nc, ) or [].
"""
return self.all_ap.mean(1) if len(self.all_ap) else []
@property
def mp(self):
"""mean precision of all classes.
Return:
float.
"""
return self.p.mean() if len(self.p) else 0.0
@property
def mr(self):
"""mean recall of all classes.
Return:
float.
"""
return self.r.mean() if len(self.r) else 0.0
@property
def map50(self):
"""Mean [email protected] of all classes.
Return:
float.
"""
return self.all_ap[:, 0].mean() if len(self.all_ap) else 0.0
@property
def map(self):
"""Mean [email protected]:0.95 of all classes.
Return:
float.
"""
return self.all_ap.mean() if len(self.all_ap) else 0.0
def mean_results(self):
"""Mean of results, return mp, mr, map50, map"""
return (self.mp, self.mr, self.map50, self.map)
def class_result(self, i):
"""class-aware result, return p[i], r[i], ap50[i], ap[i]"""
return (self.p[i], self.r[i], self.ap50[i], self.ap[i])
def get_maps(self, nc):
maps = np.zeros(nc) + self.map
for i, c in enumerate(self.ap_class_index):
maps[c] = self.ap[i]
return maps
def update(self, results):
"""
Args:
results: tuple(p, r, ap, f1, ap_class)
"""
p, r, all_ap, f1, ap_class_index = results
self.p = p
self.r = r
self.all_ap = all_ap
self.f1 = f1
self.ap_class_index = ap_class_index
class Metrics:
"""Metric for boxes and masks."""
def __init__(self) -> None:
self.metric_box = Metric()
self.metric_mask = Metric()
def update(self, results):
"""
Args:
results: Dict{'boxes': Dict{}, 'masks': Dict{}}
"""
self.metric_box.update(list(results['boxes'].values()))
self.metric_mask.update(list(results['masks'].values()))
def mean_results(self):
return self.metric_box.mean_results() + self.metric_mask.mean_results()
def class_result(self, i):
return self.metric_box.class_result(i) + self.metric_mask.class_result(i)
def get_maps(self, nc):
return self.metric_box.get_maps(nc) + self.metric_mask.get_maps(nc)
@property
def ap_class_index(self):
# boxes and masks have the same ap_class_index
return self.metric_box.ap_class_index
KEYS = [
'train/box_loss',
'train/seg_loss', # train loss
'train/obj_loss',
'train/cls_loss',
'metrics/precision(B)',
'metrics/recall(B)',
'metrics/mAP_0.5(B)',
'metrics/mAP_0.5:0.95(B)', # metrics
'metrics/precision(M)',
'metrics/recall(M)',
'metrics/mAP_0.5(M)',
'metrics/mAP_0.5:0.95(M)', # metrics
'val/box_loss',
'val/seg_loss', # val loss
'val/obj_loss',
'val/cls_loss',
'x/lr0',
'x/lr1',
'x/lr2',]
BEST_KEYS = [
'best/epoch',
'best/precision(B)',
'best/recall(B)',
'best/mAP_0.5(B)',
'best/mAP_0.5:0.95(B)',
'best/precision(M)',
'best/recall(M)',
'best/mAP_0.5(M)',
'best/mAP_0.5:0.95(M)',]
|
PypiClean
|
/msgraph_beta_sdk-1.0.0a9-py3-none-any.whl/msgraph/generated/models/item_category_collection_response.py
|
from __future__ import annotations
from kiota_abstractions.serialization import Parsable, ParseNode, SerializationWriter
from typing import Any, Callable, Dict, List, Optional, TYPE_CHECKING, Union
if TYPE_CHECKING:
from . import base_collection_pagination_count_response, item_category
from . import base_collection_pagination_count_response
class ItemCategoryCollectionResponse(base_collection_pagination_count_response.BaseCollectionPaginationCountResponse):
def __init__(self,) -> None:
"""
Instantiates a new ItemCategoryCollectionResponse and sets the default values.
"""
super().__init__()
# The value property
self._value: Optional[List[item_category.ItemCategory]] = None
@staticmethod
def create_from_discriminator_value(parse_node: Optional[ParseNode] = None) -> ItemCategoryCollectionResponse:
"""
Creates a new instance of the appropriate class based on discriminator value
Args:
parseNode: The parse node to use to read the discriminator value and create the object
Returns: ItemCategoryCollectionResponse
"""
if parse_node is None:
raise Exception("parse_node cannot be undefined")
return ItemCategoryCollectionResponse()
def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:
"""
The deserialization information for the current model
Returns: Dict[str, Callable[[ParseNode], None]]
"""
from . import base_collection_pagination_count_response, item_category
fields: Dict[str, Callable[[Any], None]] = {
"value": lambda n : setattr(self, 'value', n.get_collection_of_object_values(item_category.ItemCategory)),
}
super_fields = super().get_field_deserializers()
fields.update(super_fields)
return fields
def serialize(self,writer: SerializationWriter) -> None:
"""
Serializes information the current object
Args:
writer: Serialization writer to use to serialize this model
"""
if writer is None:
raise Exception("writer cannot be undefined")
super().serialize(writer)
writer.write_collection_of_object_values("value", self.value)
@property
def value(self,) -> Optional[List[item_category.ItemCategory]]:
"""
Gets the value property value. The value property
Returns: Optional[List[item_category.ItemCategory]]
"""
return self._value
@value.setter
def value(self,value: Optional[List[item_category.ItemCategory]] = None) -> None:
"""
Sets the value property value. The value property
Args:
value: Value to set for the value property.
"""
self._value = value
|
PypiClean
|
/torch_salad-0.2.1a0-py3-none-any.whl/salad/solver/da/dirtt.py
|
__author__ = "Steffen Schneider"
__email__ = "[email protected]"
import os, time
import pandas as pd
import numpy as np
from tqdm import tqdm
import torch
import torch.utils.data
import torch.nn as nn
from ... import layers, optim
from .. import Solver, BaseClassSolver
from .base import DABaseSolver
from .dann import AdversarialLoss, DANNSolver
import itertools
class VADA(AdversarialLoss):
def __call__(self, batch):
# TODO improve instead of overriding
(src_x, src_y), (trg_x, trg_y___) = batch
src_e, src_p = self.G(src_x)
trg_e, trg_p = self.G(trg_x)
# Compute outputs
real_logit = self.D(src_e)
fake_logit = self.D(trg_e)
if self.train_G:
return {
'ce' : (src_p, src_y),
'CL_src' : (real_logit, torch.zeros_like(real_logit)),
'CL_tgt' : (fake_logit, torch.ones_like(fake_logit)),
'VAT_src' : (src_x, src_p),
'VAT_tgt' : (trg_x, trg_p),
'H_tgt' : (trg_p,),
'acc_s' : (src_p, src_y),
'acc_t' : (trg_p, trg_y___)
}
else:
return {
'D_src' : (real_logit, torch.ones_like(real_logit)),
'D_tgt' : (fake_logit, torch.zeros_like(fake_logit))
}
class DIRTT():
def __init__(self, model, teacher):
self.model = model
self.teacher = teacher
def __call__(self, batch):
(trg_x, trg_y___) = batch
losses_student = {}
_, trg_y = self.teacher(trg_x)
_, trg_p = self.model(trg_x)
losses_student.update({
'DIRT_tgt' : (trg_p, trg_y),
'VAT_tgt' : (trg_x, trg_p),
'H_tgt' : (trg_p,),
'acc_t' : (trg_p, trg_y___)
})
return losses_student
class VADASolver(DANNSolver):
""" Virtual Adversarial Domain Adaptation
"""
def __init__(self, model, discriminator, dataset, *args, **kwargs):
super(VADASolver, self).__init__(model, discriminator, dataset, *args, **kwargs)
def _init_optims(self):
# override original call, but call init of higher class
DABaseSolver._init_optims(self)
opt_stud_src = torch.optim.Adam(self.model.parameters(0), lr=3e-4)
opt = optim.JointOptimizer(opt_stud_src)
loss_model = VADA(self.model, self.discriminator, train_G = True)
loss_disc = VADA(self.model, self.discriminator, train_G = False)
self.register_optimizer(opt, loss_model)
self.register_optimizer(torch.optim.Adam(
self.discriminator.parameters(),
lr=3e-4),
loss_disc)
def _init_losses(self):
super()._init_losses(cl_weight=1e-2)
self.register_loss(layers.VATLoss(self.model), 1, "VAT_src")
self.register_loss(layers.VATLoss(self.model), 1e-2, "VAT_tgt")
self.register_loss(layers.ConditionalEntropy(), 1e-2, "H_tgt")
class DIRTTSolver(Solver):
""" Virtual Adversarial Domain Adaptation
"""
def __init__(self, model, teacher, dataset, *args, **kwargs):
super().__init__(model, dataset, *args, **kwargs)
self.model = model
self.teacher = teacher
def _init_models(self):
""" Register student, teacher and discriminator model
"""
self.register_model(self.model, 'Target model')
self.register_model(self.teacher, 'Teacher')
def _init_optims(self):
opt_stud_src = torch.optim.Adam(self.model.parameters(0), lr=3e-4)
opt = optim.JointOptimizer(opt_stud_src)
loss_model = VADA(self.model, self.discriminator, train_G = True)
self.register_optimizer(opt, loss_model)
self.register_optimizer(torch.optim.Adam(
self.discriminator.parameters(),
lr=3e-4),
loss_disc)
def _init_losses(self):
super()._init_losses(cl_weight=1e-2)
self.register_loss(layers.VATLoss(self.model), 1e-2, "VAT_tgt")
self.register_loss(layers.ConditionalEntropy(), 1e-2, "H_tgt")
# class DIRTTSolver(Solver):
# """ Train a Model using DIRT-T
# Reference:
# Shu et al (ICLR 2018).
# A DIRT-T approach to unsupervised domain adaptation.
# """
# def __init__(self, model, teacher, dataset,
# learning_rate = 3e-4, teacher_alpha = .1,
# *args, **kwargs):
# super(DIRTTSolver, self).__init__(dataset, *args, **kwargs)
# # Add the teacher model with Weight EMA training
# # Teacher uses gradient-free optimization
# self.model = model
# self.teacher = teacher
# student_params = list(self.model.parameters())
# teacher_params = list(self.teacher.parameters())
# for param in teacher_params:
# param.requires_grad = False
# self.register_model(self.teacher,
# optim.DelayedWeight(teacher_params, student_params)
# )
# self.register_model(self.model,
# torch.optim.Adam(self.model.parameters(), 3e-4)
# )
# self.register_loss(layers.VATLoss(self.model), 1, "VAT_tgt")
# self.register_loss(layers.ConditionalEntropy(), 1, "H_tgt")
# self.register_loss(layers.KLDivWithLogits(), 1, "DIRT_tgt")
|
PypiClean
|
/Django-Pizza-16.10.1.tar.gz/Django-Pizza-16.10.1/pizza/kitchen_sink/static/ks/ckeditor/plugins/pastefromword/filter/default.js
|
/*
Copyright (c) 2003-2013, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.md or http://ckeditor.com/license
*/
(function(){function y(a){for(var a=a.toUpperCase(),c=z.length,b=0,f=0;f<c;++f)for(var d=z[f],e=d[1].length;a.substr(0,e)==d[1];a=a.substr(e))b+=d[0];return b}function A(a){for(var a=a.toUpperCase(),c=B.length,b=1,f=1;0<a.length;f*=c)b+=B.indexOf(a.charAt(a.length-1))*f,a=a.substr(0,a.length-1);return b}var C=CKEDITOR.htmlParser.fragment.prototype,o=CKEDITOR.htmlParser.element.prototype;C.onlyChild=o.onlyChild=function(){var a=this.children;return 1==a.length&&a[0]||null};o.removeAnyChildWithName=
function(a){for(var c=this.children,b=[],f,d=0;d<c.length;d++)f=c[d],f.name&&(f.name==a&&(b.push(f),c.splice(d--,1)),b=b.concat(f.removeAnyChildWithName(a)));return b};o.getAncestor=function(a){for(var c=this.parent;c&&(!c.name||!c.name.match(a));)c=c.parent;return c};C.firstChild=o.firstChild=function(a){for(var c,b=0;b<this.children.length;b++)if(c=this.children[b],a(c)||c.name&&(c=c.firstChild(a)))return c;return null};o.addStyle=function(a,c,b){var f="";if("string"==typeof c)f+=a+":"+c+";";else{if("object"==
typeof a)for(var d in a)a.hasOwnProperty(d)&&(f+=d+":"+a[d]+";");else f+=a;b=c}this.attributes||(this.attributes={});a=this.attributes.style||"";a=(b?[f,a]:[a,f]).join(";");this.attributes.style=a.replace(/^;|;(?=;)/,"")};o.getStyle=function(a){var c=this.attributes.style;if(c)return c=CKEDITOR.tools.parseCssText(c,1),c[a]};CKEDITOR.dtd.parentOf=function(a){var c={},b;for(b in this)-1==b.indexOf("$")&&this[b][a]&&(c[b]=1);return c};var H=/^([.\d]*)+(em|ex|px|gd|rem|vw|vh|vm|ch|mm|cm|in|pt|pc|deg|rad|ms|s|hz|khz){1}?/i,
D=/^(?:\b0[^\s]*\s*){1,4}$/,x={ol:{decimal:/\d+/,"lower-roman":/^m{0,4}(cm|cd|d?c{0,3})(xc|xl|l?x{0,3})(ix|iv|v?i{0,3})$/,"upper-roman":/^M{0,4}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})$/,"lower-alpha":/^[a-z]+$/,"upper-alpha":/^[A-Z]+$/},ul:{disc:/[l\u00B7\u2002]/,circle:/[\u006F\u00D8]/,square:/[\u006E\u25C6]/}},z=[[1E3,"M"],[900,"CM"],[500,"D"],[400,"CD"],[100,"C"],[90,"XC"],[50,"L"],[40,"XL"],[10,"X"],[9,"IX"],[5,"V"],[4,"IV"],[1,"I"]],B="ABCDEFGHIJKLMNOPQRSTUVWXYZ",s=0,t=null,w,E=CKEDITOR.plugins.pastefromword=
{utils:{createListBulletMarker:function(a,c){var b=new CKEDITOR.htmlParser.element("cke:listbullet");b.attributes={"cke:listsymbol":a[0]};b.add(new CKEDITOR.htmlParser.text(c));return b},isListBulletIndicator:function(a){if(/mso-list\s*:\s*Ignore/i.test(a.attributes&&a.attributes.style))return!0},isContainingOnlySpaces:function(a){var c;return(c=a.onlyChild())&&/^(:?\s| )+$/.test(c.value)},resolveList:function(a){var c=a.attributes,b;if((b=a.removeAnyChildWithName("cke:listbullet"))&&b.length&&
(b=b[0]))return a.name="cke:li",c.style&&(c.style=E.filters.stylesFilter([["text-indent"],["line-height"],[/^margin(:?-left)?$/,null,function(a){a=a.split(" ");a=CKEDITOR.tools.convertToPx(a[3]||a[1]||a[0]);!s&&(null!==t&&a>t)&&(s=a-t);t=a;c["cke:indent"]=s&&Math.ceil(a/s)+1||1}],[/^mso-list$/,null,function(a){var a=a.split(" "),b=Number(a[0].match(/\d+/)),a=Number(a[1].match(/\d+/));1==a&&(b!==w&&(c["cke:reset"]=1),w=b);c["cke:indent"]=a}]])(c.style,a)||""),c["cke:indent"]||(t=0,c["cke:indent"]=
1),CKEDITOR.tools.extend(c,b.attributes),!0;w=t=s=null;return!1},getStyleComponents:function(){var a=CKEDITOR.dom.element.createFromHtml('<div style="position:absolute;left:-9999px;top:-9999px;"></div>',CKEDITOR.document);CKEDITOR.document.getBody().append(a);return function(c,b,f){a.setStyle(c,b);for(var c={},b=f.length,d=0;d<b;d++)c[f[d]]=a.getStyle(f[d]);return c}}(),listDtdParents:CKEDITOR.dtd.parentOf("ol")},filters:{flattenList:function(a,c){var c="number"==typeof c?c:1,b=a.attributes,f;switch(b.type){case "a":f=
"lower-alpha";break;case "1":f="decimal"}for(var d=a.children,e,h=0;h<d.length;h++)if(e=d[h],e.name in CKEDITOR.dtd.$listItem){var j=e.attributes,g=e.children,m=g[g.length-1];m.name in CKEDITOR.dtd.$list&&(a.add(m,h+1),--g.length||d.splice(h--,1));e.name="cke:li";b.start&&!h&&(j.value=b.start);E.filters.stylesFilter([["tab-stops",null,function(a){(a=a.split(" ")[1].match(H))&&(t=CKEDITOR.tools.convertToPx(a[0]))}],1==c?["mso-list",null,function(a){a=a.split(" ");a=Number(a[0].match(/\d+/));a!==w&&
(j["cke:reset"]=1);w=a}]:null])(j.style);j["cke:indent"]=c;j["cke:listtype"]=a.name;j["cke:list-style-type"]=f}else if(e.name in CKEDITOR.dtd.$list){arguments.callee.apply(this,[e,c+1]);d=d.slice(0,h).concat(e.children).concat(d.slice(h+1));a.children=[];e=0;for(g=d.length;e<g;e++)a.add(d[e]);d=a.children}delete a.name;b["cke:list"]=1},assembleList:function(a){for(var c=a.children,b,f,d,e,h,j,a=[],g,m,i,l,k,p,n=0;n<c.length;n++)if(b=c[n],"cke:li"==b.name)if(b.name="li",f=b.attributes,i=(i=f["cke:listsymbol"])&&
i.match(/^(?:[(]?)([^\s]+?)([.)]?)$/),l=k=p=null,f["cke:ignored"])c.splice(n--,1);else{f["cke:reset"]&&(j=e=h=null);d=Number(f["cke:indent"]);d!=e&&(m=g=null);if(i){if(m&&x[m][g].test(i[1]))l=m,k=g;else for(var q in x)for(var u in x[q])if(x[q][u].test(i[1]))if("ol"==q&&/alpha|roman/.test(u)){if(g=/roman/.test(u)?y(i[1]):A(i[1]),!p||g<p)p=g,l=q,k=u}else{l=q;k=u;break}!l&&(l=i[2]?"ol":"ul")}else l=f["cke:listtype"]||"ol",k=f["cke:list-style-type"];m=l;g=k||("ol"==l?"decimal":"disc");k&&k!=("ol"==l?
"decimal":"disc")&&b.addStyle("list-style-type",k);if("ol"==l&&i){switch(k){case "decimal":p=Number(i[1]);break;case "lower-roman":case "upper-roman":p=y(i[1]);break;case "lower-alpha":case "upper-alpha":p=A(i[1])}b.attributes.value=p}if(j){if(d>e)a.push(j=new CKEDITOR.htmlParser.element(l)),j.add(b),h.add(j);else{if(d<e){e-=d;for(var r;e--&&(r=j.parent);)j=r.parent}j.add(b)}c.splice(n--,1)}else a.push(j=new CKEDITOR.htmlParser.element(l)),j.add(b),c[n]=j;h=b;e=d}else j&&(j=e=h=null);for(n=0;n<a.length;n++)if(j=
a[n],q=j.children,g=g=void 0,u=j.children.length,r=g=void 0,c=/list-style-type:(.*?)(?:;|$)/,e=CKEDITOR.plugins.pastefromword.filters.stylesFilter,g=j.attributes,!c.exec(g.style)){for(h=0;h<u;h++)if(g=q[h],g.attributes.value&&Number(g.attributes.value)==h+1&&delete g.attributes.value,g=c.exec(g.attributes.style))if(g[1]==r||!r)r=g[1];else{r=null;break}if(r){for(h=0;h<u;h++)g=q[h].attributes,g.style&&(g.style=e([["list-style-type"]])(g.style)||"");j.addStyle("list-style-type",r)}}w=t=s=null},falsyFilter:function(){return!1},
stylesFilter:function(a,c){return function(b,f){var d=[];(b||"").replace(/"/g,'"').replace(/\s*([^ :;]+)\s*:\s*([^;]+)\s*(?=;|$)/g,function(b,e,g){e=e.toLowerCase();"font-family"==e&&(g=g.replace(/["']/g,""));for(var m,i,l,k=0;k<a.length;k++)if(a[k]&&(b=a[k][0],m=a[k][1],i=a[k][2],l=a[k][3],e.match(b)&&(!m||g.match(m)))){e=l||e;c&&(i=i||g);"function"==typeof i&&(i=i(g,f,e));i&&i.push&&(e=i[0],i=i[1]);"string"==typeof i&&d.push([e,i]);return}!c&&d.push([e,g])});for(var e=0;e<d.length;e++)d[e]=
d[e].join(":");return d.length?d.join(";")+";":!1}},elementMigrateFilter:function(a,c){return a?function(b){var f=c?(new CKEDITOR.style(a,c))._.definition:a;b.name=f.element;CKEDITOR.tools.extend(b.attributes,CKEDITOR.tools.clone(f.attributes));b.addStyle(CKEDITOR.style.getStyleText(f))}:function(){}},styleMigrateFilter:function(a,c){var b=this.elementMigrateFilter;return a?function(f,d){var e=new CKEDITOR.htmlParser.element(null),h={};h[c]=f;b(a,h)(e);e.children=d.children;d.children=[e];e.filter=
function(){};e.parent=d}:function(){}},bogusAttrFilter:function(a,c){if(-1==c.name.indexOf("cke:"))return!1},applyStyleFilter:null},getRules:function(a,c){var b=CKEDITOR.dtd,f=CKEDITOR.tools.extend({},b.$block,b.$listItem,b.$tableContent),d=a.config,e=this.filters,h=e.falsyFilter,j=e.stylesFilter,g=e.elementMigrateFilter,m=CKEDITOR.tools.bind(this.filters.styleMigrateFilter,this.filters),i=this.utils.createListBulletMarker,l=e.flattenList,k=e.assembleList,p=this.utils.isListBulletIndicator,n=this.utils.isContainingOnlySpaces,
q=this.utils.resolveList,u=function(a){a=CKEDITOR.tools.convertToPx(a);return isNaN(a)?a:a+"px"},r=this.utils.getStyleComponents,t=this.utils.listDtdParents,o=!1!==d.pasteFromWordRemoveFontStyles,s=!1!==d.pasteFromWordRemoveStyles;return{elementNames:[[/meta|link|script/,""]],root:function(a){a.filterChildren(c);k(a)},elements:{"^":function(a){var c;CKEDITOR.env.gecko&&(c=e.applyStyleFilter)&&c(a)},$:function(a){var v=a.name||"",e=a.attributes;v in f&&e.style&&(e.style=j([[/^(:?width|height)$/,null,
u]])(e.style)||"");if(v.match(/h\d/)){a.filterChildren(c);if(q(a))return;g(d["format_"+v])(a)}else if(v in b.$inline)a.filterChildren(c),n(a)&&delete a.name;else if(-1!=v.indexOf(":")&&-1==v.indexOf("cke")){a.filterChildren(c);if("v:imagedata"==v){if(v=a.attributes["o:href"])a.attributes.src=v;a.name="img";return}delete a.name}v in t&&(a.filterChildren(c),k(a))},style:function(a){if(CKEDITOR.env.gecko){var a=(a=a.onlyChild().value.match(/\/\* Style Definitions \*\/([\s\S]*?)\/\*/))&&a[1],c={};a&&
(a.replace(/[\n\r]/g,"").replace(/(.+?)\{(.+?)\}/g,function(a,b,F){for(var b=b.split(","),a=b.length,d=0;d<a;d++)CKEDITOR.tools.trim(b[d]).replace(/^(\w+)(\.[\w-]+)?$/g,function(a,b,d){b=b||"*";d=d.substring(1,d.length);d.match(/MsoNormal/)||(c[b]||(c[b]={}),d?c[b][d]=F:c[b]=F)})}),e.applyStyleFilter=function(a){var b=c["*"]?"*":a.name,d=a.attributes&&a.attributes["class"];b in c&&(b=c[b],"object"==typeof b&&(b=b[d]),b&&a.addStyle(b,!0))})}return!1},p:function(a){if(/MsoListParagraph/i.exec(a.attributes["class"])||
a.getStyle("mso-list")){var b=a.firstChild(function(a){return a.type==CKEDITOR.NODE_TEXT&&!n(a.parent)});(b=b&&b.parent)&&b.addStyle("mso-list","Ignore")}a.filterChildren(c);q(a)||(d.enterMode==CKEDITOR.ENTER_BR?(delete a.name,a.add(new CKEDITOR.htmlParser.element("br"))):g(d["format_"+(d.enterMode==CKEDITOR.ENTER_P?"p":"div")])(a))},div:function(a){var c=a.onlyChild();if(c&&"table"==c.name){var b=a.attributes;c.attributes=CKEDITOR.tools.extend(c.attributes,b);b.style&&c.addStyle(b.style);c=new CKEDITOR.htmlParser.element("div");
c.addStyle("clear","both");a.add(c);delete a.name}},td:function(a){a.getAncestor("thead")&&(a.name="th")},ol:l,ul:l,dl:l,font:function(a){if(p(a.parent))delete a.name;else{a.filterChildren(c);var b=a.attributes,d=b.style,e=a.parent;"font"==e.name?(CKEDITOR.tools.extend(e.attributes,a.attributes),d&&e.addStyle(d),delete a.name):(d=d||"",b.color&&("#000000"!=b.color&&(d+="color:"+b.color+";"),delete b.color),b.face&&(d+="font-family:"+b.face+";",delete b.face),b.size&&(d+="font-size:"+(3<b.size?"large":
3>b.size?"small":"medium")+";",delete b.size),a.name="span",a.addStyle(d))}},span:function(a){if(p(a.parent))return!1;a.filterChildren(c);if(n(a))return delete a.name,null;if(p(a)){var b=a.firstChild(function(a){return a.value||"img"==a.name}),e=(b=b&&(b.value||"l."))&&b.match(/^(?:[(]?)([^\s]+?)([.)]?)$/);if(e)return b=i(e,b),(a=a.getAncestor("span"))&&/ mso-hide:\s*all|display:\s*none /.test(a.attributes.style)&&(b.attributes["cke:ignored"]=1),b}if(e=(b=a.attributes)&&b.style)b.style=j([["line-height"],
[/^font-family$/,null,!o?m(d.font_style,"family"):null],[/^font-size$/,null,!o?m(d.fontSize_style,"size"):null],[/^color$/,null,!o?m(d.colorButton_foreStyle,"color"):null],[/^background-color$/,null,!o?m(d.colorButton_backStyle,"color"):null]])(e,a)||"";b.style||delete b.style;CKEDITOR.tools.isEmpty(b)&&delete a.name;return null},b:g(d.coreStyles_bold),i:g(d.coreStyles_italic),u:g(d.coreStyles_underline),s:g(d.coreStyles_strike),sup:g(d.coreStyles_superscript),sub:g(d.coreStyles_subscript),a:function(a){a=
a.attributes;a.href&&a.href.match(/^file:\/\/\/[\S]+#/i)&&(a.href=a.href.replace(/^file:\/\/\/[^#]+/i,""))},"cke:listbullet":function(a){a.getAncestor(/h\d/)&&!d.pasteFromWordNumberedHeadingToList&&delete a.name}},attributeNames:[[/^onmouse(:?out|over)/,""],[/^onload$/,""],[/(?:v|o):\w+/,""],[/^lang/,""]],attributes:{style:j(s?[[/^list-style-type$/,null],[/^margin$|^margin-(?!bottom|top)/,null,function(a,b,c){if(b.name in{p:1,div:1}){b="ltr"==d.contentsLangDirection?"margin-left":"margin-right";if("margin"==
c)a=r(c,a,[b])[b];else if(c!=b)return null;if(a&&!D.test(a))return[b,a]}return null}],[/^clear$/],[/^border.*|margin.*|vertical-align|float$/,null,function(a,b){if("img"==b.name)return a}],[/^width|height$/,null,function(a,b){if(b.name in{table:1,td:1,th:1,img:1})return a}]]:[[/^mso-/],[/-color$/,null,function(a){if("transparent"==a)return!1;if(CKEDITOR.env.gecko)return a.replace(/-moz-use-text-color/g,"transparent")}],[/^margin$/,D],["text-indent","0cm"],["page-break-before"],["tab-stops"],["display",
"none"],o?[/font-?/]:null],s),width:function(a,c){if(c.name in b.$tableContent)return!1},border:function(a,c){if(c.name in b.$tableContent)return!1},"class":h,bgcolor:h,valign:s?h:function(a,b){b.addStyle("vertical-align",a);return!1}},comment:!CKEDITOR.env.ie?function(a,b){var c=a.match(/<img.*?>/),d=a.match(/^\[if !supportLists\]([\s\S]*?)\[endif\]$/);return d?(d=(c=d[1]||c&&"l.")&&c.match(/>(?:[(]?)([^\s]+?)([.)]?)</),i(d,c)):CKEDITOR.env.gecko&&c?(c=CKEDITOR.htmlParser.fragment.fromHtml(c[0]).children[0],
(d=(d=(d=b.previous)&&d.value.match(/<v:imagedata[^>]*o:href=['"](.*?)['"]/))&&d[1])&&(c.attributes.src=d),c):!1}:h}}},G=function(){this.dataFilter=new CKEDITOR.htmlParser.filter};G.prototype={toHtml:function(a){var a=CKEDITOR.htmlParser.fragment.fromHtml(a),c=new CKEDITOR.htmlParser.basicWriter;a.writeHtml(c,this.dataFilter);return c.getHtml(!0)}};CKEDITOR.cleanWord=function(a,c){CKEDITOR.env.gecko&&(a=a.replace(/(<\!--\[if[^<]*?\])--\>([\S\s]*?)<\!--(\[endif\]--\>)/gi,"$1$2$3"));CKEDITOR.env.webkit&&
(a=a.replace(/(class="MsoListParagraph[^>]+><\!--\[if !supportLists\]--\>)([^<]+<span[^<]+<\/span>)(<\!--\[endif\]--\>)/gi,"$1<span>$2</span>$3"));var b=new G,f=b.dataFilter;f.addRules(CKEDITOR.plugins.pastefromword.getRules(c,f));c.fire("beforeCleanWord",{filter:f});try{a=b.toHtml(a)}catch(d){alert(c.lang.pastefromword.error)}a=a.replace(/cke:.*?".*?"/g,"");a=a.replace(/style=""/g,"");return a=a.replace(/<span>/g,"")}})();
|
PypiClean
|
/isedit-0.3.0.tar.gz/isedit-0.3.0/js/node_modules/moment/src/locale/gl.js
|
import moment from '../moment';
export default moment.defineLocale('gl', {
months: 'xaneiro_febreiro_marzo_abril_maio_xuño_xullo_agosto_setembro_outubro_novembro_decembro'.split(
'_'
),
monthsShort:
'xan._feb._mar._abr._mai._xuñ._xul._ago._set._out._nov._dec.'.split(
'_'
),
monthsParseExact: true,
weekdays: 'domingo_luns_martes_mércores_xoves_venres_sábado'.split('_'),
weekdaysShort: 'dom._lun._mar._mér._xov._ven._sáb.'.split('_'),
weekdaysMin: 'do_lu_ma_mé_xo_ve_sá'.split('_'),
weekdaysParseExact: true,
longDateFormat: {
LT: 'H:mm',
LTS: 'H:mm:ss',
L: 'DD/MM/YYYY',
LL: 'D [de] MMMM [de] YYYY',
LLL: 'D [de] MMMM [de] YYYY H:mm',
LLLL: 'dddd, D [de] MMMM [de] YYYY H:mm',
},
calendar: {
sameDay: function () {
return '[hoxe ' + (this.hours() !== 1 ? 'ás' : 'á') + '] LT';
},
nextDay: function () {
return '[mañá ' + (this.hours() !== 1 ? 'ás' : 'á') + '] LT';
},
nextWeek: function () {
return 'dddd [' + (this.hours() !== 1 ? 'ás' : 'a') + '] LT';
},
lastDay: function () {
return '[onte ' + (this.hours() !== 1 ? 'á' : 'a') + '] LT';
},
lastWeek: function () {
return (
'[o] dddd [pasado ' + (this.hours() !== 1 ? 'ás' : 'a') + '] LT'
);
},
sameElse: 'L',
},
relativeTime: {
future: function (str) {
if (str.indexOf('un') === 0) {
return 'n' + str;
}
return 'en ' + str;
},
past: 'hai %s',
s: 'uns segundos',
ss: '%d segundos',
m: 'un minuto',
mm: '%d minutos',
h: 'unha hora',
hh: '%d horas',
d: 'un día',
dd: '%d días',
M: 'un mes',
MM: '%d meses',
y: 'un ano',
yy: '%d anos',
},
dayOfMonthOrdinalParse: /\d{1,2}º/,
ordinal: '%dº',
week: {
dow: 1, // Monday is the first day of the week.
doy: 4, // The week that contains Jan 4th is the first week of the year.
},
});
|
PypiClean
|
/velovae-0.1.2-py3-none-any.whl/build/lib/build/lib/build/lib/build/lib/model/model_util.py
|
import numpy as np
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from scipy.special import loggamma
from .scvelo_util import mRNA, vectorize, tau_inv, R_squared, test_bimodality, leastsq_NxN
from sklearn.neighbors import NearestNeighbors
import pynndescent
from tqdm.autonotebook import trange
from sklearn.cluster import SpectralClustering, KMeans
from scipy.stats import dirichlet, bernoulli, kstest, linregress
from scipy.linalg import svdvals
###################################################################################
# Dynamical Model
# Reference:
# Bergen, V., Lange, M., Peidli, S., Wolf, F. A., & Theis, F. J. (2020).
# Generalizing RNA velocity to transient cell states through dynamical modeling.
# Nature biotechnology, 38(12), 1408-1414.
###################################################################################
def scv_pred_single(t, alpha, beta, gamma, ts, scaling=1.0, uinit=0, sinit=0):
# Predicts u and s using the dynamical model.
beta = beta*scaling
tau, alpha, u0, s0 = vectorize(t, ts, alpha, beta, gamma, u0=uinit, s0=sinit)
tau = np.clip(tau, a_min=0, a_max=None)
ut, st = mRNA(tau, u0, s0, alpha, beta, gamma)
ut = ut*scaling
return ut.squeeze(), st.squeeze()
def scv_pred(adata, key, glist=None):
# Reproduce the full prediction of scvelo dynamical model
n_gene = len(glist) if glist is not None else adata.n_vars
n_cell = adata.n_obs
ut, st = np.ones((n_cell, n_gene))*np.nan, np.ones((n_cell, n_gene))*np.nan
if glist is None:
glist = adata.var_names.to_numpy()
for i in range(n_gene):
idx = np.where(adata.var_names == glist[i])[0][0]
item = adata.var.loc[glist[i]]
if len(item) == 0:
print('Gene '+glist[i]+' not found!')
continue
alpha, beta, gamma = item[f'{key}_alpha'], item[f'{key}_beta'], item[f'{key}_gamma']
scaling = item[f'{key}_scaling']
ts = item[f'{key}_t_']
t = adata.layers[f'{key}_t'][:, idx]
if np.isnan(alpha):
continue
u_g, s_g = scv_pred_single(t, alpha, beta, gamma, ts, scaling)
ut[:, i] = u_g
st[:, i] = s_g
return ut, st
#End of Reference
############################################################
# Shared among all VAEs
############################################################
def hist_equal(t, tmax, perc=0.95, n_bin=101):
# Perform histogram equalization across all local times.
t_ub = np.quantile(t, perc)
t_lb = t.min()
delta_t = (t_ub - t_lb)/(n_bin-1)
bins = [t_lb+i*delta_t for i in range(n_bin)]+[t.max()]
pdf_t, edges = np.histogram(t, bins, density=True)
pt, edges = np.histogram(t, bins, density=False)
# Perform histogram equalization
cdf_t = np.concatenate(([0], np.cumsum(pt)))
cdf_t = cdf_t/cdf_t[-1]
t_out = np.zeros((len(t)))
for i in range(n_bin):
mask = (t >= bins[i]) & (t < bins[i+1])
t_out[mask] = (cdf_t[i] + (t[mask]-bins[i])*pdf_t[i])*tmax
return t_out
############################################################
# Basic utility function to compute ODE solutions for all models
############################################################
def pred_su_numpy(tau, u0, s0, alpha, beta, gamma):
############################################################
# (Numpy Version)
# Analytical solution of the ODE
# tau: [B x 1] or [B x 1 x 1] time duration starting from the switch-on time of each gene.
# u0, s0: [G] or [N type x G] initial conditions
# alpha, beta, gamma: [G] or [N type x G] generation, splicing and degradation rates
############################################################
unstability = (np.abs(beta-gamma) < 1e-6)
expb, expg = np.exp(-beta*tau), np.exp(-gamma*tau)
upred = u0*expb + alpha/beta*(1-expb)
spred = s0*expg + alpha/gamma*(1-expg) \
+ (alpha-beta*u0)/(gamma-beta+1e-6)*(expg-expb)*(1-unstability) \
- (alpha-beta*u0)*tau*expg*unstability
return np.clip(upred, a_min=0, a_max=None), np.clip(spred, a_min=0, a_max=None)
def pred_su(tau, u0, s0, alpha, beta, gamma):
############################################################
# (PyTorch Version)
# Analytical solution of the ODE
# tau: [B x 1] or [B x 1 x 1] time duration starting from the switch-on time of each gene.
# u0, s0: [G] or [N type x G] initial conditions
# alpha, beta, gamma: [G] or [N type x G] generation, splicing and degradation rates
############################################################
expb, expg = torch.exp(-beta*tau), torch.exp(-gamma*tau)
eps = 1e-6
unstability = (torch.abs(beta-gamma) < eps).long()
upred = u0*expb + alpha/beta*(1-expb)
spred = s0*expg + alpha/gamma*(1-expg) \
+ (alpha-beta*u0)/(gamma-beta+eps)*(expg-expb)*(1-unstability) \
- (alpha-beta*u0)*tau*expg*unstability
return nn.functional.relu(upred), nn.functional.relu(spred)
def pred_su_back(tau, u1, s1, alpha, beta, gamma):
############################################################
# (PyTorch Version)
# Analytical solution of the ODE
# tau: [B x 1] or [B x 1 x 1] time duration starting from the switch-on time of each gene.
# u0, s0: [G] or [N type x G] initial conditions
# alpha, beta, gamma: [G] or [N type x G] generation, splicing and degradation rates
############################################################
expb, expg = torch.exp(beta*tau), torch.exp(gamma*tau)
eps = 1e-6
unstability = (torch.abs(beta-gamma) < eps).long()
upred = u1*expb - alpha/beta*(expb-1)
spred = s1*expg - alpha/gamma*(expg-1) \
- (alpha-beta*u1)/(gamma-beta+eps)*(expb-expg)*(1-unstability) \
+ (alpha-beta*u1)*expb*tau*unstability
return nn.functional.relu(upred), nn.functional.relu(spred)
###################################################################################
# Initialization Methods
# Reference:
# Bergen, V., Lange, M., Peidli, S., Wolf, F. A., & Theis, F. J. (2020).
# Generalizing RNA velocity to transient cell states through dynamical modeling.
# Nature biotechnology, 38(12), 1408-1414.
###################################################################################
def scale_by_gene(U, S, train_idx=None, mode='scale_u'):
# mode
# 'auto' means to scale the one with a smaller range
# 'scale_u' means to match std(u) with std(s)
# 'scale_s' means to match std(s) with std(u)
G = U.shape[1]
scaling_u = np.ones((G))
scaling_s = np.ones((G))
std_u, std_s = np.ones((G)), np.ones((G))
for i in range(G):
if train_idx is None:
si, ui = S[:, i], U[:, i]
else:
si, ui = S[train_idx, i], U[train_idx, i]
sfilt, ufilt = si[(si > 0) & (ui > 0)], ui[(si > 0) & (ui > 0)] # Use only nonzero data points
if len(sfilt) > 3 and len(ufilt) > 3:
std_u[i] = np.std(ufilt)
std_s[i] = np.std(sfilt)
mask_u, mask_s = (std_u == 0), (std_s == 0)
std_u = std_u + (mask_u & (~mask_s))*std_s + (mask_u & mask_s)*1
std_s = std_s + ((~mask_u) & mask_s)*std_u + (mask_u & mask_s)*1
if mode == 'auto':
scaling_u = np.max(np.stack([scaling_u, (std_u/std_s)]), 0)
scaling_s = np.max(np.stack([scaling_s, (std_s/std_u)]), 0)
elif mode == 'scale_u':
scaling_u = std_u/std_s
elif mode == 'scale_s':
scaling_s = std_s/std_u
return U/scaling_u, S/scaling_s, scaling_u, scaling_s
def get_gene_scale(U, S, train_idx=None, mode='scale_u'):
# mode
# 'auto' means to scale the one with a smaller range
# 'scale_u' means to match std(u) with std(s)
# 'scale_s' means to match std(s) with std(u)
G = U.shape[1]
scaling_u = np.ones((G))
scaling_s = np.ones((G))
std_u, std_s = np.ones((G)), np.ones((G))
for i in range(G):
if train_idx is None:
si, ui = S[:, i], U[:, i]
else:
si, ui = S[train_idx, i], U[train_idx, i]
sfilt, ufilt = si[(si > 0) & (ui > 0)], ui[(si > 0) & (ui > 0)] # Use only nonzero data points
if len(sfilt) > 3 and len(ufilt) > 3:
std_u[i] = np.std(ufilt)
std_s[i] = np.std(sfilt)
mask_u, mask_s = (std_u == 0), (std_s == 0)
std_u = std_u + (mask_u & (~mask_s))*std_s + (mask_u & mask_s)*1
std_s = std_s + ((~mask_u) & mask_s)*std_u + (mask_u & mask_s)*1
if mode == 'auto':
scaling_u = np.max(np.stack([scaling_u, (std_u/std_s)]), 0)
scaling_s = np.max(np.stack([scaling_s, (std_s/std_u)]), 0)
elif mode == 'scale_u':
scaling_u = std_u/std_s
elif mode == 'scale_s':
scaling_s = std_s/std_u
return scaling_u, scaling_s
def compute_scaling_bound(cell_scale):
# Compute the upper and lower bound for scaling factor thresholding
log_scale = np.log(cell_scale)
q3, q1 = np.quantile(log_scale, 0.75), np.quantile(log_scale, 0.25)
iqr = q3 - q1
ub, lb = q3 + 1.5*iqr, q1 - 1.5*iqr
return np.exp(ub), np.exp(lb)
def clip_cell_scale(lu, ls):
# Remove extreme values
lu_max, lu_min = compute_scaling_bound(lu)
ls_max, ls_min = compute_scaling_bound(ls)
lu = np.clip(lu, a_min=lu_min, a_max=lu_max)
ls = np.clip(ls, a_min=ls_min, a_max=ls_max)
return lu, ls
def scale_by_cell(U, S, train_idx=None, separate_us_scale=True, q=50):
nu, ns = U.sum(1, keepdims=True), S.sum(1, keepdims=True)
if separate_us_scale:
norm_count = ((np.percentile(nu, q), np.percentile(ns, q)) if train_idx is None else
(np.percentile(nu[train_idx], q), np.percentile(ns[train_idx], q)))
lu = nu/norm_count[0]
ls = ns/norm_count[1]
else:
norm_count = np.percentile(nu+ns, q) if train_idx is None else np.percentile(nu[train_idx]+ns[train_idx], q)
lu = (nu+ns)/norm_count
ls = lu
# Remove extreme values
print(f"Detecting zero scaling factors: {np.sum(lu==0)}, {np.sum(ls==0)}")
lu[lu == 0] = np.min(lu[lu > 0])
ls[ls == 0] = np.min(ls[ls > 0])
return U/lu, S/ls, lu, ls
def get_cell_scale(U, S, train_idx=None, separate_us_scale=True, q=0.5):
nu, ns = U.sum(1, keepdims=True), S.sum(1, keepdims=True)
if separate_us_scale:
norm_count = ((np.percentile(nu, q), np.percentile(ns, q)) if train_idx is None else
(np.percentile(nu[train_idx], q), np.percentile(ns[train_idx], q)))
lu = nu/norm_count[0]
ls = ns/norm_count[1]
else:
norm_count = np.percentile(nu+ns, q) if train_idx is None else np.percentile(nu[train_idx]+ns[train_idx], q)
lu = (nu+ns)/norm_count
ls = lu
# Remove extreme values
print(f"Detecting zero scaling factors: {np.sum(lu==0)}, {np.sum(ls==0)}")
lu[lu == 0] = np.min(lu[lu > 0])
ls[ls == 0] = np.min(ls[ls > 0])
return lu, ls
def get_dispersion(U, S, clip_min=1e-3, clip_max=1000):
mean_u, mean_s = np.clip(U.mean(0), 1e-6, None), np.clip(S.mean(0), 1e-6, None)
var_u, var_s = U.var(0), S.var(0)
dispersion_u, dispersion_s = var_u/mean_u, var_s/mean_s
dispersion_u = np.clip(dispersion_u, a_min=clip_min, a_max=clip_max)
dispersion_s = np.clip(dispersion_s, a_min=clip_min, a_max=clip_max)
return mean_u, mean_s, dispersion_u, dispersion_s
def linreg(u, s):
# Linear regression (helper function)
q = np.sum(s*s)
r = np.sum(u*s)
k = r/q
if np.isinf(k) or np.isnan(k):
k = 1.0+np.random.rand()
return k
def init_gene(s, u, percent, fit_scaling=False, Ntype=None):
# Adopted from scvelo
std_u, std_s = np.std(u), np.std(s)
scaling = std_u / std_s if fit_scaling else 1.0
u = u/scaling
# Pick Quantiles
# initialize beta and gamma from extreme quantiles of s
mask_s = s >= np.percentile(s, percent, axis=0)
mask_u = u >= np.percentile(u, percent, axis=0)
mask = mask_s & mask_u
if not np.any(mask):
mask = mask_s
# Initialize alpha, beta and gamma
beta = 1
gamma = linreg(u[mask], s[mask]) + 1e-6
if gamma < 0.05 / scaling:
gamma *= 1.2
elif gamma > 1.5 / scaling:
gamma /= 1.2
u_inf, s_inf = u[mask].mean(), s[mask].mean()
u0_, s0_ = u_inf, s_inf
alpha = u_inf*beta
# initialize switching from u quantiles and alpha from s quantiles
tstat_u, pval_u, means_u = test_bimodality(u, kde=True)
tstat_s, pval_s, means_s = test_bimodality(s, kde=True)
pval_steady = max(pval_u, pval_s)
steady_u = means_u[1]
if pval_steady < 1e-3:
u_inf = np.mean([u_inf, steady_u])
alpha = gamma * s_inf
beta = alpha / u_inf
u0_, s0_ = u_inf, s_inf
t_ = tau_inv(u0_, s0_, 0, 0, alpha, beta, gamma) # time to reach steady state
tau = tau_inv(u, s, 0, 0, alpha, beta, gamma) # induction
tau = np.clip(tau, 0, t_)
tau_ = tau_inv(u, s, u0_, s0_, 0, beta, gamma) # repression
tau_ = np.clip(tau_, 0, np.max(tau_[s > 0]))
ut, st = mRNA(tau, 0, 0, alpha, beta, gamma)
ut_, st_ = mRNA(tau_, u0_, s0_, 0, beta, gamma)
distu, distu_ = (u - ut), (u - ut_)
dists, dists_ = (s - st), (s - st_)
res = np.array([distu ** 2 + dists ** 2, distu_ ** 2 + dists_ ** 2])
t = np.array([tau, tau_+np.ones((len(tau_)))*t_])
o = np.argmin(res, axis=0)
t_latent = np.array([t[o[i], i] for i in range(len(tau))])
return alpha, beta, gamma, t_latent, u0_, s0_, t_, scaling
def init_params(data, percent, fit_offset=False, fit_scaling=True, eps=1e-3):
# Adopted from SCVELO
# Use the steady-state model to estimate alpha, beta,
# gamma and the latent time
# data: ncell x (2*ngene) tensor
# percent: percentage limit to pick the data
# Output: a ncellx4 2D array of parameters
ngene = data.shape[1]//2
u = data[:, :ngene]
s = data[:, ngene:]
params = np.ones((ngene, 4)) # four parameters: alpha, beta, gamma, scaling
params[:, 0] = np.random.rand((ngene))*np.max(u, 0)
params[:, 2] = np.clip(np.random.rand((ngene))*np.max(u, 0)/(np.max(s, 0)+1e-10), eps, None)
T = np.zeros((ngene, len(s)))
Ts = np.zeros((ngene))
U0, S0 = np.zeros((ngene)), np.zeros((ngene)) # Steady-1 State
print('Estimating ODE parameters...')
for i in trange(ngene):
si, ui = s[:, i], u[:, i]
sfilt, ufilt = si[(si > 0) & (ui > 0)], ui[(si > 0) & (ui > 0)] # Use only nonzero data points
if len(sfilt) > 3 and len(ufilt) > 3:
alpha, beta, gamma, t, u0_, s0_, ts, scaling = init_gene(sfilt, ufilt, percent, fit_scaling)
params[i, :] = np.array([alpha, beta, np.clip(gamma, eps, None), scaling])
T[i, (si > 0) & (ui > 0)] = t
U0[i] = u0_
S0[i] = s0_
Ts[i] = ts
else:
U0[i] = np.max(u)
S0[i] = np.max(s)
# Filter out genes
min_r2 = 0.01
offset, gamma = leastsq_NxN(s, u, fit_offset, perc=[100-percent, percent])
gamma = np.clip(gamma, eps, None)
residual = u-gamma*s
if fit_offset:
residual -= offset
r2 = R_squared(residual, total=u-u.mean(0))
velocity_genes = (r2 > min_r2) & (r2 < 0.95) & (gamma > 0.01) & (np.max(s > 0, 0) > 0) & (np.max(u > 0, 0) > 0)
print(f'Detected {np.sum(velocity_genes)} velocity genes.')
dist_u, dist_s = np.zeros(u.shape), np.zeros(s.shape)
print('Estimating the variance...')
assert np.all(params[:, 2] > 0)
for i in trange(ngene):
upred, spred = scv_pred_single(T[i],
params[i, 0],
params[i, 1],
params[i, 2],
Ts[i],
params[i, 3]) # upred has the original scale
dist_u[:, i] = u[:, i] - upred
dist_s[:, i] = s[:, i] - spred
sigma_u = np.clip(np.std(dist_u, 0), 0.1, None)
sigma_s = np.clip(np.std(dist_s, 0), 0.1, None)
sigma_u[np.isnan(sigma_u)] = 0.1
sigma_s[np.isnan(sigma_s)] = 0.1
# Make sure all genes get the same total relevance score
gene_score = velocity_genes * 1.0 + (1 - velocity_genes) * 0.25
return params[:, 0], params[:, 1], params[:, 2], params[:, 3], Ts, U0, S0, sigma_u, sigma_s, T.T, gene_score
###################################################################################
# Reinitialization based on the global time
###################################################################################
def get_ts_global(tgl, U, S, perc):
# Initialize the transition time in the original ODE model.
tsgl = np.zeros((U.shape[1]))
for i in range(U.shape[1]):
u, s = U[:, i], S[:, i]
zero_mask = (u > 0) & (s > 0)
mask_u, mask_s = u >= np.percentile(u, perc), s >= np.percentile(s, perc)
mask = mask_u & mask_s & zero_mask
if not np.any(mask):
mask = (mask_u | mask_s) & zero_mask
# edge case: all u or all s are zero
if not np.any(mask):
mask = (mask_u | mask_s) & ((u > 0) | (s > 0))
# edge case: all u and all s are zero
if not np.any(mask):
mask = np.ones((len(u))).astype(bool)
tsgl[i] = np.median(tgl[mask])
if np.isnan(tsgl[i]):
tsgl[i] = np.median(tgl)
assert not np.any(np.isnan(tsgl))
return tsgl
def reinit_gene(u, s, t, ts, eps=1e-6, max_val=1e4):
# Applied to the regular ODE
# Initialize the ODE parameters (alpha,beta,gamma,t_on) from
# input data and estimated global cell time.
# u1, u2: picked from induction
q = 0.95
mask1_u = u > np.quantile(u, q)
mask1_s = s > np.quantile(s, q)
# edge case handling
while not np.any(mask1_u | mask1_s) and q > 0.05:
q = q - 0.05
mask1_u = u > np.quantile(u, q)
mask1_s = s > np.quantile(s, q)
if not np.any(mask1_u | mask1_s):
mask1_u = u >= np.min(u)
mask1_s = s >= np.min(s)
assert np.any(mask1_u | mask1_s)
u1, s1 = np.median(u[mask1_u | mask1_s]), np.median(s[mask1_s | mask1_u])
if u1 == 0 or np.isnan(u1):
u1 = np.max(u)
if s1 == 0 or np.isnan(s1):
s1 = np.max(s)
t1 = np.median(t[mask1_u | mask1_s])
if t1 <= 0:
tm = np.max(t[mask1_u | mask1_s])
t1 = tm if tm > 0 else 1.0
mask2_u = (u >= u1*0.49) & (u <= u1*0.51) & (t <= ts)
mask2_s = (s >= s1*0.49) & (s <= s1*0.51) & (t <= ts)
if np.any(mask2_u):
t2 = np.median(t[mask2_u | mask2_s])
u2 = np.median(u[mask2_u])
t0 = np.log(np.clip((u1-u2)/(u1*np.exp(-t2)-u2*np.exp(-t1)+eps), a_min=1.0, a_max=None))
else:
t0 = 0
beta = 1
alpha = u1/(1-np.exp(t0-t1)) if u1 > 0 else 0.1*np.random.rand()
alpha = np.clip(alpha, None, max_val)
if alpha <= 0 or np.isnan(alpha) or np.isinf(alpha):
alpha = u1
p = 0.95
s_inf = np.quantile(s, p)
while s_inf == 0 and p < 1.0:
p = p + 0.01
s_inf = np.quantile(s, p)
gamma = alpha/np.clip(s_inf, a_min=eps, a_max=None)
if gamma <= 0 or np.isnan(gamma) or np.isinf(gamma):
gamma = 2.0
gamma = np.clip(gamma, None, max_val)
return alpha, beta, gamma, t0
def reinit_params(U, S, t, ts):
print('Reinitialize the regular ODE parameters based on estimated global latent time.')
G = U.shape[1]
alpha, beta, gamma, ton = np.zeros((G)), np.zeros((G)), np.zeros((G)), np.zeros((G))
for i in trange(G):
alpha_g, beta_g, gamma_g, ton_g = reinit_gene(U[:, i], S[:, i], t, ts[i])
alpha[i] = alpha_g
beta[i] = beta_g
gamma[i] = gamma_g
ton[i] = ton_g
assert not np.any(np.isnan(alpha))
assert not np.any(np.isnan(gamma))
return alpha, beta, gamma, ton
def find_dirichlet_param(mu, std):
alpha_1 = ((mu/std)*((1-mu)/std) - 1) * mu
return np.array([alpha_1, (1-mu)/mu*alpha_1])
def assign_gene_mode_binary(adata, w_noisy, thred=0.05):
Cs = np.corrcoef(adata.layers['Ms'].T)
Cu = np.corrcoef(adata.layers['Mu'].T)
C = 1+Cs*0.5+Cu*0.5
C[np.isnan(C)] = 0.0
spc = SpectralClustering(2, affinity='precomputed', assign_labels='discretize')
y = spc.fit_predict(C)
adata.var['init_mode'] = y
alpha_1, alpha_2 = find_dirichlet_param(0.6, 0.2), find_dirichlet_param(0.4, 0.2)
w = dirichlet(alpha_1).rvs(adata.n_vars)
# Perform Kolmogorov-Smirnov Test
w1, w2 = w_noisy[y == 0], w_noisy[y == 1]
w_neutral = dirichlet.rvs([12, 12], size=adata.n_vars, random_state=42)
res, pval = kstest(w1, w2)
if pval > 0.05:
print('Two modes are indistuiguishable.')
return w_neutral
res_1, pval_1 = kstest(w1, w2, alternative='greater', method='asymp')
res_2, pval_2 = kstest(w1, w2, alternative='less', method='asymp')
if pval_1 >= thred: # Take the null hypothesis that values of w1 are greater
adata.varm['alpha_w'] = (y.reshape(-1, 1) == 0) * alpha_1 + (y.reshape(-1, 1) == 1) * alpha_2
return (y == 0) * w[:, 0] + (y == 1) * w[:, 1]
elif pval_2 >= thred:
adata.varm['alpha_w'] = (y.reshape(-1, 1) == 1) * alpha_1 + (y.reshape(-1, 1) == 0) * alpha_2
return (y == 0) * w[:, 1] + (y == 1) * w[:, 0]
return w_neutral
def get_nclusters(C, noise=1.0, n_cluster_thred=3):
# determine the number of clusters based an affinity matrix
# thred: minimal number of clusters
v = svdvals(C)
n_clusters = 0
while n_clusters < n_cluster_thred and noise > 0:
thred = 4 / np.sqrt(3) * np.sqrt(C.shape[0]) * noise
n_clusters = np.sum((v > thred))
noise = noise - 0.005
print(f'{n_clusters} clusters detected based on gene co-expression.')
return n_clusters
def sample_dir_mix(w, yw, std_prior):
# Sample from a mixture of dirichlet distributions
mu_0, mu_1 = np.mean(w[yw == 0]), np.mean(w[yw == 1])
alpha_w_0 = find_dirichlet_param(mu_0, std_prior)
alpha_w_1 = find_dirichlet_param(mu_1, std_prior)
np.random.seed(42)
q1 = dirichlet.rvs(alpha_w_0, size=len(w))[:, 0]
np.random.seed(42)
q2 = dirichlet.rvs(alpha_w_1, size=len(w))[:, 0]
wq = np.sum(yw == 1)/len(yw)
np.random.seed(42)
b = bernoulli.rvs(wq, size=len(w))
q = (b == 0)*q1 + (b == 1)*q2
print(f'({1-wq:.2f}, {mu_0}), ({wq:.2f}, {mu_1})')
return q
def assign_gene_mode_auto(adata,
w_noisy,
thred=0.05,
std_prior=0.1,
n_cluster_thred=3):
# Compute gene correlation matrix
Cs = np.corrcoef(adata.layers['Ms'].T)
Cu = np.corrcoef(adata.layers['Mu'].T)
C = 1+Cs*0.5+Cu*0.5
C[np.isnan(C)] = 0.0
# Spectral clustering
spc = SpectralClustering(get_nclusters(C, n_cluster_thred),
affinity='precomputed',
assign_labels='discretize',
random_state=42)
y = spc.fit_predict(C)
adata.var['init_mode'] = y
# Sample weights from Dirichlet(mu=0.5, std=std_prior)
alpha_neutral = find_dirichlet_param(0.5, std_prior)
q_neutral = dirichlet.rvs(alpha_neutral, size=adata.n_vars)[:, 0]
w = np.empty((adata.n_vars))
pval_ind = []
pval_rep = []
cluster_type = np.zeros((y.max()+1))
alpha_ind, alpha_rep = find_dirichlet_param(0.6, std_prior), find_dirichlet_param(0.4, std_prior)
# Perform Komogorov-Smirnov Test
for i in range(y.max()+1):
n = np.sum(y == i)
res_1, pval_1 = kstest(w_noisy[y == i], q_neutral, alternative='greater', method='asymp')
res_2, pval_2 = kstest(w_noisy[y == i], q_neutral, alternative='less', method='asymp')
pval_ind.append(pval_1)
pval_rep.append(pval_2)
if pval_1 < thred and pval_2 < thred: # uni/bi-modal dirichlet
cluster_type[i] = 0
res_3, pval_3 = kstest(w_noisy[y == i], q_neutral)
if pval_3 < thred:
km = KMeans(2, n_init='auto')
yw = km.fit_predict(w_noisy[y == i].reshape(-1, 1))
w[y == i] = sample_dir_mix(w_noisy[y == i], yw, std_prior)
else:
np.random.seed(42)
w[y == i] = dirichlet.rvs(alpha_neutral, size=n)[:, 0]
elif pval_1 >= 0.05: # induction
cluster_type[i] = 1
np.random.seed(42)
w[y == i] = dirichlet.rvs(alpha_ind, size=n)[:, 0]
elif pval_2 >= 0.05: # repression
cluster_type[i] = 2
np.random.seed(42)
w[y == i] = dirichlet.rvs(alpha_rep, size=n)[:, 0]
pval_ind = np.array(pval_ind)
pval_rep = np.array(pval_rep)
print(f'KS-test result: {cluster_type}')
# If no repressive cluster is found, pick the one with the highest p value
if np.all(cluster_type == 1):
ymax = np.argmax(pval_rep)
print(f'Assign cluster {ymax} to repressive')
np.random.seed(42)
w[y == ymax] = dirichlet.rvs(alpha_rep, size=np.sum(y == ymax))[:, 0]
# If no inductive cluster is found, pick the one with the highest p value
if np.all(cluster_type == 2):
ymax = np.argmax(pval_ind)
print(f'Assign cluster {ymax} to inductive')
np.random.seed(42)
w[y == ymax] = dirichlet.rvs(alpha_ind, size=np.sum(y == ymax))[:, 0]
return w
def assign_gene_mode(adata,
w_noisy,
assign_type='binary',
thred=0.05,
std_prior=0.1,
n_cluster_thred=3):
# Assign one of ('inductive', 'repressive', 'mixture') to gene clusters
# `assign_type' specifies which strategy to use
if assign_type == 'binary':
return assign_gene_mode_binary(adata, w_noisy, thred)
elif assign_type == 'auto':
return assign_gene_mode_auto(adata, w_noisy, thred, std_prior, n_cluster_thred)
elif assign_type == 'inductive':
alpha_ind = find_dirichlet_param(0.8, std_prior)
np.random.seed(42)
return dirichlet.rvs(alpha_ind, size=adata.n_vars)[:, 0]
elif assign_type == 'repressive':
alpha_rep = find_dirichlet_param(0.2, std_prior)
np.random.seed(42)
return dirichlet.rvs(alpha_rep, size=adata.n_vars)[:, 0]
def assign_gene_mode_tprior(adata, tkey, train_idx, std_prior=0.05):
# Same as assign_gene_mode, but uses the informative time prior
# to determine inductive and repressive genes
tprior = adata.obs[tkey].to_numpy()[train_idx]
alpha_ind, alpha_rep = find_dirichlet_param(0.75, std_prior), find_dirichlet_param(0.25, std_prior)
w = np.empty((adata.n_vars))
slope = np.empty((adata.n_vars))
for i in range(adata.n_vars):
slope_u, intercept_u, r_u, p_u, se = linregress(tprior, adata.layers['Mu'][train_idx, i])
slope_s, intercept_s, r_s, p_s, se = linregress(tprior, adata.layers['Ms'][train_idx, i])
slope[i] = (slope_u*0.5+slope_s*0.5)
np.random.seed(42)
w[slope >= 0] = dirichlet.rvs(alpha_ind, size=np.sum(slope >= 0))[:, 0]
np.random.seed(42)
w[slope < 0] = dirichlet.rvs(alpha_rep, size=np.sum(slope < 0))[:, 0]
# return 1/(1+np.exp(-slope))
return w
############################################################
# Vanilla VAE
############################################################
"""
ODE Solution, with both numpy (for post-training analysis or plotting) and pytorch versions (for training)
"""
def pred_steady_numpy(ts, alpha, beta, gamma):
############################################################
# (Numpy Version)
# Predict the steady states.
# ts: [G] switching time, when the kinetics enters the repression phase
# alpha, beta, gamma: [G] generation, splicing and degradation rates
############################################################
eps = 1e-6
unstability = np.abs(beta-gamma) < eps
ts_ = ts.squeeze()
expb, expg = np.exp(-beta*ts_), np.exp(-gamma*ts_)
u0 = alpha/(beta+eps)*(1.0-expb)
s0 = alpha/(gamma+eps)*(1.0-expg)+alpha/(gamma-beta+eps)*(expg-expb)*(1-unstability)-alpha*ts_*expg*unstability
return u0, s0
def pred_steady(tau_s, alpha, beta, gamma):
############################################################
# (PyTorch Version)
# Predict the steady states.
# tau_s: [G] time duration from ton to toff
# alpha, beta, gamma: [G] generation, splicing and degradation rates
############################################################
eps = 1e-6
unstability = (torch.abs(beta - gamma) < eps).long()
expb, expg = torch.exp(-beta*tau_s), torch.exp(-gamma*tau_s)
u0 = alpha/(beta+eps)*(torch.tensor([1.0]).to(alpha.device)-expb)
s0 = alpha/(gamma+eps)*(torch.tensor([1.0]).to(alpha.device)-expg) \
+ alpha/(gamma-beta+eps)*(expg-expb)*(1-unstability)-alpha*tau_s*expg*unstability
return u0, s0
def ode_numpy(t, alpha, beta, gamma, to, ts, scaling=None, k=10.0):
"""(Numpy Version) ODE solution with fixed rates
Args:
t (:class:`numpy.ndarray`): Cell time, (N,1)
alpha (:class:`numpy.ndarray`): Transcription rates
beta (:class:`numpy.ndarray`): Splicing rates
gamma (:class:`numpy.ndarray`): Degradation rates
to (:class:`numpy.ndarray`): switch-on time
ts (:class:`numpy.ndarray`): switch-off time (induction to repression)
scaling (:class:numpy array, optional): Scaling factor (u / s). Defaults to None.
k (float, optional): Parameter for a smooth clip of tau. Defaults to 10.0.
Returns:
tuple:
returns the unspliced and spliced counts predicted by the ODE
"""
eps = 1e-6
unstability = (np.abs(beta - gamma) < eps)
o = (t <= ts).astype(int)
# Induction
tau_on = F.softplus(torch.tensor(t-to), beta=k).numpy()
assert np.all(~np.isnan(tau_on))
expb, expg = np.exp(-beta*tau_on), np.exp(-gamma*tau_on)
uhat_on = alpha/(beta+eps)*(1.0-expb)
shat_on = alpha/(gamma+eps)*(1.0-expg) \
+ alpha/(gamma-beta+eps)*(expg-expb)*(1-unstability) - alpha*tau_on*unstability
# Repression
u0_, s0_ = pred_steady_numpy(np.clip(ts-to, 0, None), alpha, beta, gamma) # tensor shape: (G)
if ts.ndim == 2 and to.ndim == 2:
u0_ = u0_.reshape(-1, 1)
s0_ = s0_.reshape(-1, 1)
# tau_off = np.clip(t-ts,a_min=0,a_max=None)
tau_off = F.softplus(torch.tensor(t-ts), beta=k).numpy()
assert np.all(~np.isnan(tau_off))
expb, expg = np.exp(-beta*tau_off), np.exp(-gamma*tau_off)
uhat_off = u0_*expb
shat_off = s0_*expg+(-beta*u0_)/(gamma-beta+eps)*(expg-expb)*(1-unstability)
uhat, shat = (uhat_on*o + uhat_off*(1-o)), (shat_on*o + shat_off*(1-o))
if scaling is not None:
uhat *= scaling
return uhat, shat
def ode(t, alpha, beta, gamma, to, ts, neg_slope=0.0):
"""(PyTorch Version) ODE Solution
Parameters are the same as the numpy version, with arrays replaced with
tensors. Additionally, neg_slope is used for time clipping.
"""
eps = 1e-6
unstability = (torch.abs(beta - gamma) < eps).long()
o = (t <= ts).int()
# Induction
tau_on = F.leaky_relu(t-to, negative_slope=neg_slope)
expb, expg = torch.exp(-beta*tau_on), torch.exp(-gamma*tau_on)
uhat_on = alpha/(beta+eps)*(torch.tensor([1.0]).to(alpha.device)-expb)
shat_on = alpha/(gamma+eps)*(torch.tensor([1.0]).to(alpha.device)-expg) \
+ (alpha/(gamma-beta+eps)*(expg-expb)*(1-unstability) - alpha*tau_on*expg * unstability)
# Repression
u0_, s0_ = pred_steady(F.relu(ts-to), alpha, beta, gamma)
tau_off = F.leaky_relu(t-ts, negative_slope=neg_slope)
expb, expg = torch.exp(-beta*tau_off), torch.exp(-gamma*tau_off)
uhat_off = u0_*expb
shat_off = s0_*expg+(-beta*u0_)/(gamma-beta+eps)*(expg-expb) * (1-unstability)
return (uhat_on*o + uhat_off*(1-o)), (shat_on*o + shat_off*(1-o))
############################################################
# Branching ODE
############################################################
def encode_type(cell_types_raw):
"""Use integer to encode the cell types
Args:
cell_types_raw (array like):
unique cell types in a dataset
Returns:
tuple containing:
- label_dic (dict): mapping from cell types to integers
- label_dic_rev (dict): inverse mapping from integers to cell types
"""
# Map cell types to integers
label_dic = {}
label_dic_rev = {}
for i, type_ in enumerate(cell_types_raw):
label_dic[type_] = i
label_dic_rev[i] = type_
return label_dic, label_dic_rev
def str2int(cell_labels_raw, label_dic):
"""Convert cell type annotations to integers
Args:
cell_labels_raw (array like):
Cell type annotations
label_dic (dict):
mapping from cell types to integers
Returns:
:class:`numpy.ndarray`:
Integer encodings of cell type annotations.
"""
return np.array([label_dic[cell_labels_raw[i]] for i in range(len(cell_labels_raw))])
def int2str(cell_labels, label_dic_rev):
"""Convert integer encodings to original cell type annotations
Args:
cell_labels (array like):
Integer encodings of cell type annotations
label_dic (dict):
mapping from integers to cell types
Returns:
:class:`numpy.ndarray`:
Original cell type annotations.
"""
return np.array([label_dic_rev[cell_labels[i]] for i in range(len(cell_labels))])
def linreg_mtx(u, s):
############################################################
# Performs linear regression ||U-kS||_2 while
# U and S are matrices and k is a vector.
# Handles divide by zero by returninig some default value.
############################################################
Q = np.sum(s*s, axis=0)
R = np.sum(u*s, axis=0)
k = R/Q
if np.isinf(k) or np.isnan(k):
k = 1.5
return k
def reinit_type_params(U, S, t, ts, cell_labels, cell_types, init_types):
############################################################
# Applied under branching ODE
# Use the steady-state model and estimated cell time to initialize
# branching ODE parameters.
############################################################
Ntype = len(cell_types)
G = U.shape[1]
alpha, beta, gamma = np.ones((Ntype, G)), np.ones((Ntype, G)), np.ones((Ntype, G))
u0, s0 = np.zeros((len(init_types), G)), np.zeros((len(init_types), G))
for i, type_ in enumerate(cell_types):
mask_type = cell_labels == type_
# Determine induction or repression
t_head = np.quantile(t[mask_type], 0.05)
t_mid = (t_head+np.quantile(t[mask_type], 0.95))*0.5
u_head = np.mean(U[(t >= t[mask_type].min()) & (t < t_head), :], axis=0)
u_mid = np.mean(U[(t >= t_mid*0.98) & (t <= t_mid*1.02), :], axis=0)
s_head = np.mean(S[(t >= t[mask_type].min()) & (t < t_head), :], axis=0)
s_mid = np.mean(S[(t >= t_mid*0.98) & (t <= t_mid*1.02), :], axis=0)
o = u_head + s_head < u_mid + s_mid
# Determine ODE parameters
U_type, S_type = U[cell_labels == type_], S[cell_labels == type_]
for g in range(G):
p_low, p_high = 0.05, 0.95
u_low = np.quantile(U_type[:, g], p_low)
s_low = np.quantile(S_type[:, g], p_low)
u_high = np.quantile(U_type[:, g], p_high)
s_high = np.quantile(S_type[:, g], p_high)
# edge cases
while (u_high == 0 or s_high == 0) and p_high < 1.0:
p_high += 0.01
u_high = np.quantile(U_type[:, g], p_high)
s_high = np.quantile(S_type[:, g], p_high)
if u_high == 0:
gamma[type_, g] = 0.01
continue
elif s_high == 0:
gamma[type_, g] = 1.0
continue
mask_high = (U_type[:, g] >= u_high) | (S_type[:, g] >= s_high)
mask_low = (U_type[:, g] <= u_low) | (S_type[:, g] <= s_low)
mask_q = mask_high | mask_low
u_q = U_type[mask_q, g]
s_q = S_type[mask_q, g]
slope = linreg_mtx(u_q-U_type[:, g].min(), s_q-S_type[:, g].min())
if slope == 1:
slope = 1 + 0.1*np.random.rand()
gamma[type_, g] = np.clip(slope, 0.01, None)
alpha[type_] = (np.quantile(U_type, 0.95, axis=0) - np.quantile(U_type, 0.05, axis=0)) * o \
+ (np.quantile(U_type, 0.95, axis=0) - np.quantile(U_type, 0.05, axis=0)) \
* (1-o) * np.random.rand(G) * 0.001 + 1e-10
for i, type_ in enumerate(init_types):
mask_type = cell_labels == type_
t_head = np.quantile(t[mask_type], 0.03)
u0[i] = np.mean(U[(t >= t[mask_type].min()) & (t <= t_head)], axis=0)+1e-10
s0[i] = np.mean(S[(t >= t[mask_type].min()) & (t <= t_head)], axis=0)+1e-10
return alpha, beta, gamma, u0, s0
def get_x0_tree(par, neg_slope=0.0, eps=1e-6, **kwargs):
# Compute initial conditions by sequentially traversing the tree
# Returns scaled u0
alpha, beta, gamma = kwargs['alpha'], kwargs['beta'], kwargs['gamma'] # tensor shape: (N type, G)
t_trans = kwargs['t_trans']
scaling = kwargs["scaling"]
n_type, G = alpha.shape
u0 = torch.empty(n_type, G, dtype=torch.float32, device=alpha.device)
s0 = torch.empty(n_type, G, dtype=torch.float32, device=alpha.device)
self_idx = torch.tensor(range(n_type), dtype=par.dtype, device=alpha.device)
roots = torch.where(par == self_idx)[0] # the parent of any root is itself
u0_root, s0_root = kwargs['u0_root'], kwargs['s0_root'] # tensor shape: (n roots, G), u0 unscaled
u0[roots] = u0_root/scaling
s0[roots] = s0_root
par[roots] = -1 # avoid revisiting the root in the while loop
count = len(roots)
progenitors = roots
while count < n_type:
cur_level = torch.cat([torch.where(par == x)[0] for x in progenitors])
tau0 = F.leaky_relu(t_trans[cur_level] - t_trans[par[cur_level]], neg_slope).view(-1, 1)
u0_hat, s0_hat = pred_su(tau0,
u0[par[cur_level]],
s0[par[cur_level]],
alpha[par[cur_level]],
beta[par[cur_level]],
gamma[par[cur_level]])
u0[cur_level] = u0_hat
s0[cur_level] = s0_hat
progenitors = cur_level
count += len(cur_level)
par[roots] = roots
return u0, s0
def ode_br(t, y, par, neg_slope=0.0, eps=1e-6, **kwargs):
"""(PyTorch Version) Branching ODE solution.
See the documentation of ode_br_numpy for details.
"""
alpha, beta, gamma = kwargs['alpha'], kwargs['beta'], kwargs['gamma'] # tensor shape: (N type, G)
t_trans = kwargs['t_trans']
scaling = kwargs["scaling"]
u0, s0 = get_x0_tree(par, neg_slope, **kwargs)
# For cells with time violation, we use its parent type
par_batch = par[y]
mask = (t >= t_trans[y].view(-1, 1)).float()
tau = F.leaky_relu(t - t_trans[y].view(-1, 1), neg_slope) * mask \
+ F.leaky_relu(t - t_trans[par_batch].view(-1, 1), neg_slope) * (1-mask)
u0_batch = u0[y] * mask + u0[par_batch] * (1-mask)
s0_batch = s0[y] * mask + s0[par_batch] * (1-mask) # tensor shape: (N type, G)
uhat, shat = pred_su(tau,
u0_batch,
s0_batch,
alpha[y] * mask + alpha[par_batch] * (1-mask),
beta[y] * mask + beta[par_batch] * (1-mask),
gamma[y] * mask + gamma[par_batch] * (1-mask))
return uhat * scaling, shat
def get_x0_tree_numpy(par, eps=1e-6, **kwargs):
# Compute initial conditions by sequentially traversing the tree
# Returns scaled u0
alpha, beta, gamma = kwargs['alpha'], kwargs['beta'], kwargs['gamma'] # tensor shape: (N type, G)
t_trans = kwargs['t_trans']
scaling = kwargs["scaling"]
n_type, G = alpha.shape
u0 = np.empty((n_type, G))
s0 = np.empty((n_type, G))
self_idx = np.array(range(n_type))
roots = np.where(par == self_idx)[0] # the parent of any root is itself
u0_root, s0_root = kwargs['u0_root'], kwargs['s0_root'] # tensor shape: (n roots, G), u0 unscaled
u0[roots] = u0_root/scaling
s0[roots] = s0_root
par[roots] = -1
count = len(roots)
progenitors = roots
while count < n_type:
cur_level = np.concatenate([np.where(par == x)[0] for x in progenitors])
tau0 = np.clip(t_trans[cur_level] - t_trans[par[cur_level]], 0, None).reshape(-1, 1)
u0_hat, s0_hat = pred_su_numpy(tau0,
u0[par[cur_level]],
s0[par[cur_level]],
alpha[par[cur_level]],
beta[par[cur_level]],
gamma[par[cur_level]])
u0[cur_level] = u0_hat
s0[cur_level] = s0_hat
progenitors = cur_level
count += len(cur_level)
par[roots] = roots
return u0, s0
def ode_br_numpy(t, y, par, eps=1e-6, **kwargs):
"""
(Numpy Version)
Branching ODE solution.
Args:
t (:class:`numpy.ndarray`):
Cell time, (N,1)
y (:class:`numpy.ndarray`):
Cell type, encoded in integer, (N,)
par (:class:`numpy.ndarray`):
Parent cell type in the transition graph, (N_type,)
kwargs:
alpha (:class:`numpy.ndarray`):
Transcription rates, (cell type by gene ).
beta (:class:`numpy.ndarray`):
Splicing rates, (cell type by gene ).
gamma (:class:`numpy.ndarray`):
Degradation rates, (cell type by gene ).
t_trans (:class:`numpy.ndarray`):
Start time of splicing dynamics of each cell type.
scaling (:class:`numpy.ndarray`):
Genewise scaling factor between unspliced and spliced counts.
Returns:
tuple containing:
- :class:`numpy.ndarray`: Predicted u values, (N, G)
- :class:`numpy.ndarray`: Predicted s values, (N, G)
"""
alpha, beta, gamma = kwargs['alpha'], kwargs['beta'], kwargs['gamma'] # array shape: (N type, G)
t_trans = kwargs['t_trans']
scaling = kwargs["scaling"]
u0, s0 = get_x0_tree_numpy(par, **kwargs)
n_type, G = alpha.shape
uhat, shat = np.zeros((len(y), G)), np.zeros((len(y), G))
for i in range(n_type):
mask = (t[y == i] >= t_trans[i])
tau = np.clip(t[y == i].reshape(-1, 1) - t_trans[i], 0, None) * mask \
+ np.clip(t[y == i].reshape(-1, 1) - t_trans[par[i]], 0, None) * (1-mask)
uhat_i, shat_i = pred_su_numpy(tau,
u0[i]*mask+u0[par[i]]*(1-mask),
s0[i]*mask+s0[par[i]]*(1-mask),
alpha[i]*mask+alpha[[par[i]]]*(1-mask),
beta[i]*mask+beta[[par[i]]]*(1-mask),
gamma[i]*mask+gamma[[par[i]]]*(1-mask))
uhat[y == i] = uhat_i
shat[y == i] = shat_i
return uhat*scaling, shat
############################################################
# KNN-Related Functions
############################################################
def _hist_equal(t, t_query, perc=0.95, n_bin=51):
# Perform histogram equalization across all local times.
tmax = t.max() - t.min()
t_ub = np.quantile(t, perc)
t_lb = t.min()
delta_t = (t_ub - t_lb)/(n_bin-1)
bins = [t_lb+i*delta_t for i in range(n_bin)]+[t.max()+0.01]
pdf_t, edges = np.histogram(t, bins, density=True)
pt, edges = np.histogram(t, bins, density=False)
# Perform histogram equalization
cdf_t = np.concatenate(([0], np.cumsum(pt)))
cdf_t = cdf_t/cdf_t[-1]
t_out = np.zeros((len(t)))
t_out_query = np.zeros((len(t_query)))
for i in range(n_bin):
mask = (t >= bins[i]) & (t < bins[i+1])
t_out[mask] = (cdf_t[i] + (t[mask]-bins[i])*pdf_t[i])*tmax
mask_q = (t_query >= bins[i]) & (t_query < bins[i+1])
t_out_query[mask_q] = (cdf_t[i] + (t_query[mask_q]-bins[i])*pdf_t[i])*tmax
return t_out, t_out_query
def knnx0(U, S,
t,
z,
t_query,
z_query,
dt,
k,
u0_init=None,
s0_init=None,
adaptive=0.0,
std_t=None,
forward=False,
hist_eq=False):
############################################################
# Given cell time and state, find KNN for each cell in a time window ahead of
# it. The KNNs are used to compute the initial condition for the ODE of
# the cell.
# 1-2. U,S [2D array (N,G)]
# Unspliced and Spliced count matrix
# 3-4. t,z [1D array (N)]
# Latent cell time and state used to build KNN
# 5-6. t_query [1D array (N)]
# Query cell time and state
# 7. dt [float tuple]
# Time window coefficient
# 8. k [int]
# Number of neighbors
# 9-10. u0_init, s0_init [1D array]
# Default initial condition when a cell has very few neighbors
# in its time window
# 9. adaptive [float]
# When set to positive value, neighbors will be chosen from
# [t-adaptive*std_t, t-adaptive*std_t+delta_t]
# 10. std_t [1D array (N)]
# Posterior standard deviation of cell time
# 11. forward [bool]
# Whether to look for ancestors or descendants
# 12. hist_eq [bool]
# Whether to perform histogram equalization to time.
# The purpose is to preserve more resolution in
# densely populated time intervals.
############################################################
Nq = len(t_query)
u0 = (np.zeros((Nq, U.shape[1])) if u0_init is None
else np.tile(u0_init, (Nq, 1)))
s0 = (np.zeros((Nq, S.shape[1])) if s0_init is None
else np.tile(s0_init, (Nq, 1)))
t0 = np.ones((Nq))*(t.min() - dt[0])
t_knn = t
n1 = 0
len_avg = 0
if hist_eq: # time histogram equalization
t, t_query = _hist_equal(t, t_query)
# Used as the default u/s counts at the final time point
t_98 = np.quantile(t, 0.98)
p = 0.98
while not np.any(t >= t_98) and p > 0.01:
p = p - 0.01
t_98 = np.quantile(t, p)
u_end, s_end = U[t >= t_98].mean(0), S[t >= t_98].mean(0)
for i in trange(Nq): # iterate through every cell
if adaptive > 0:
dt_r, dt_l = adaptive*std_t[i], adaptive*std_t[i] + (dt[1]-dt[0])
else:
dt_r, dt_l = dt[0], dt[1]
if forward:
t_ub, t_lb = t_query[i] + dt_l, t_query[i] + dt_r
else:
t_ub, t_lb = t_query[i] - dt_r, t_query[i] - dt_l
indices = np.where((t >= t_lb) & (t < t_ub))[0] # filter out cells in the bin
k_ = len(indices)
delta_t = dt[1] - dt[0] # increment / decrement of the time window boundary
while k_ < k and t_lb > t.min() - (dt[1] - dt[0]) and t_ub < t.max() + (dt[1] - dt[0]):
if forward:
t_lb = t_query[i]
t_ub = t_ub + delta_t
else:
t_lb = t_lb - delta_t
t_ub = t_query[i]
indices = np.where((t >= t_lb) & (t < t_ub))[0] # filter out cells in the bin
k_ = len(indices)
len_avg = len_avg + k_
if k_ > 0:
k_neighbor = k if k_ > k else max(1, k_//2)
knn_model = NearestNeighbors(n_neighbors=k_neighbor)
knn_model.fit(z[indices])
dist, ind = knn_model.kneighbors(z_query[i:i+1])
u0[i] = np.mean(U[indices[ind.squeeze()].astype(int)], 0)
s0[i] = np.mean(S[indices[ind.squeeze()].astype(int)], 0)
t0[i] = np.mean(t_knn[indices[ind.squeeze()].astype(int)])
else:
if forward:
u0[i] = u_end
s0[i] = s_end
t0[i] = t_98 + (t_98-t.min()) * 0.01
n1 = n1+1
print(f"Percentage of Invalid Sets: {n1/Nq:.3f}")
print(f"Average Set Size: {len_avg//Nq}")
return u0, s0, t0
def knnx0_index(t,
z,
t_query,
z_query,
dt,
k,
adaptive=0.0,
std_t=None,
forward=False,
hist_eq=False):
############################################################
# Same functionality as knnx0, but returns the neighbor index
############################################################
Nq = len(t_query)
n1 = 0
len_avg = 0
if hist_eq:
t, t_query = _hist_equal(t, t_query)
neighbor_index = []
for i in trange(Nq):
if adaptive > 0:
dt_r, dt_l = adaptive*std_t[i], adaptive*std_t[i] + (dt[1]-dt[0])
else:
dt_r, dt_l = dt[0], dt[1]
if forward:
t_ub, t_lb = t_query[i] + dt_l, t_query[i] + dt_r
else:
t_ub, t_lb = t_query[i] - dt_r, t_query[i] - dt_l
indices = np.where((t >= t_lb) & (t < t_ub))[0]
k_ = len(indices)
delta_t = dt[1] - dt[0] # increment / decrement of the time window boundary
while k_ < k and t_lb > t.min() - (dt[1] - dt[0]) and t_ub < t.max() + (dt[1] - dt[0]):
if forward:
t_lb = t_query[i]
t_ub = t_ub + delta_t
else:
t_lb = t_lb - delta_t
t_ub = t_query[i]
indices = np.where((t >= t_lb) & (t < t_ub))[0] # filter out cells in the bin
k_ = len(indices)
len_avg = len_avg + k_
if k_ > 1:
k_neighbor = k if k_ > k else max(1, k_//2)
knn_model = NearestNeighbors(n_neighbors=k_neighbor)
knn_model.fit(z[indices])
dist, ind = knn_model.kneighbors(z_query[i:i+1])
if isinstance(ind, int):
ind = np.array([int])
neighbor_index.append(indices[ind.flatten()].astype(int))
elif k_ == 1:
neighbor_index.append(indices)
else:
neighbor_index.append([])
n1 = n1+1
print(f"Percentage of Invalid Sets: {n1/Nq:.3f}")
print(f"Average Set Size: {len_avg//Nq}")
return neighbor_index
def get_x0(U,
S,
t,
dt,
neighbor_index,
u0_init=None,
s0_init=None,
forward=False):
N = len(neighbor_index) # training + validation
u0 = (np.zeros((N, U.shape[1])) if u0_init is None
else np.tile(u0_init, (N, 1)))
s0 = (np.zeros((N, S.shape[1])) if s0_init is None
else np.tile(s0_init, (N, 1)))
t0 = np.ones((N))*(t.min() - dt[0])
# Used as the default u/s counts at the final time point
t_98 = np.quantile(t, 0.98)
p = 0.98
while not np.any(t >= t_98) and p > 0.01:
p = p - 0.01
t_98 = np.quantile(t, p)
u_end, s_end = U[t >= t_98].mean(0), S[t >= t_98].mean(0)
for i in range(N):
if len(neighbor_index[i]) > 0:
u0[i] = U[neighbor_index[i]].mean(0)
s0[i] = S[neighbor_index[i]].mean(0)
t0[i] = t[neighbor_index[i]].mean()
elif forward:
u0[i] = u_end
s0[i] = s_end
t0[i] = t_98 + (t_98-t.min()) * 0.01
return u0, s0, t0
def knn_transition_prob(t,
z,
t_query,
z_query,
cell_labels,
n_type,
dt,
k,
soft_assign=True):
############################################################
# Compute the frequency of cell type transition based on windowed KNN.
# Used in transition graph construction.
############################################################
N, Nq = len(t), len(t_query)
P = np.zeros((n_type, n_type))
t0 = np.zeros((n_type))
sigma_t = np.zeros((n_type))
for i in range(n_type):
t0[i] = np.quantile(t[cell_labels == i], 0.01)
sigma_t[i] = t[cell_labels == i].std()
if soft_assign:
A = np.empty((N, N))
for i in range(Nq):
t_ub, t_lb = t_query[i] - dt[0], t_query[i] - dt[1]
indices = np.where((t >= t_lb) & (t < t_ub))[0]
k_ = len(indices)
if k_ > 0:
if k_ <= k:
A[i, indices] = 1
else:
knn_model = NearestNeighbors(n_neighbors=k)
knn_model.fit(z[indices])
dist, ind = knn_model.kneighbors(z_query[i:i+1])
A[i, indices[ind.squeeze()]] = 1
for i in range(n_type):
for j in range(n_type):
P[i, j] = A[cell_labels == i][:, cell_labels == j].sum()
else:
A = np.empty((N, n_type))
for i in range(Nq):
t_ub, t_lb = t_query[i] - dt[0], t_query[i] - dt[1]
indices = np.where((t >= t_lb) & (t < t_ub))[0]
k_ = len(indices)
if k_ > 0:
if k_ <= k:
knn_model = NearestNeighbors(n_neighbors=min(k, k_))
knn_model.fit(z[indices])
dist, ind = knn_model.kneighbors(z_query[i:i+1])
knn_label = cell_labels[indices][ind.squeeze()]
else:
knn_label = cell_labels[indices]
n_par = np.array([np.sum(knn_label == i) for i in range(n_type)])
A[i, np.argmax(n_par)] = 1
for i in range(n_type):
P[i] = A[cell_labels == i].sum(0)
psum = P.sum(1)
psum[psum == 0] = 1
return P/(psum.reshape(-1, 1))
############################################################
# ELBO term related to categorical variables in BasisVAE
# Referece:
# Märtens, K. & Yau, C.. (2020). BasisVAE: Translation-
# invariant feature-level clustering with Variational Autoencoders.
# Proceedings of the Twenty Third International Conference on
# Artificial Intelligence and Statistics, in Proceedings of
# Machine Learning Research</i> 108:2928-2937
# Available from https://proceedings.mlr.press/v108/martens20b.html.
############################################################
def elbo_collapsed_categorical(logits_phi, alpha, K, N):
phi = torch.softmax(logits_phi, dim=1)
if alpha.ndim == 1:
sum_alpha = alpha.sum()
pseudocounts = phi.sum(dim=0) # n basis
term1 = torch.lgamma(sum_alpha) - torch.lgamma(sum_alpha + N)
term2 = (torch.lgamma(alpha + pseudocounts) - torch.lgamma(alpha)).sum()
else:
sum_alpha = alpha.sum(axis=-1) # n vars
pseudocounts = phi.sum(dim=0)
term1 = (torch.lgamma(sum_alpha) - torch.lgamma(sum_alpha + N)).mean(0)
term2 = (torch.lgamma(alpha + pseudocounts) - torch.lgamma(alpha)).mean(0).sum()
E_q_logq = (phi * torch.log(phi + 1e-16)).sum()
return -term1 - term2 + E_q_logq
def entropy(logits_phi):
phi = torch.softmax(logits_phi, dim=1)
return (phi * torch.log(phi + 1e-16)).sum()
############################################################
# Other Auxilliary Functions
############################################################
def get_gene_index(genes_all, gene_list):
gind = []
gremove = []
for gene in gene_list:
matches = np.where(genes_all == gene)[0]
if len(matches) == 1:
gind.append(matches[0])
elif len(matches) == 0:
print(f'Warning: Gene {gene} not found! Ignored.')
gremove.append(gene)
else:
gind.append(matches[0])
print('Warning: Gene {gene} has multiple matches. Pick the first one.')
gene_list = list(gene_list)
for gene in gremove:
gene_list.remove(gene)
return gind, gene_list
def convert_time(t):
"""Convert the time in sec into the format: hour:minute:second
"""
hour = int(t//3600)
minute = int((t - hour*3600)//60)
second = int(t - hour*3600 - minute*60)
return f"{hour:3d} h : {minute:2d} m : {second:2d} s"
def sample_genes(adata, n, key, mode='top', q=0.5):
if mode == 'random':
return np.random.choice(adata.var_names, n, replace=False)
val_sorted = adata.var[key].sort_values(ascending=False)
genes_sorted = val_sorted.index.to_numpy()
if mode == 'quantile':
N = np.sum(val_sorted.to_numpy() >= q)
return np.random.choice(genes_sorted[:N], min(n, N), replace=False)
return genes_sorted[:n]
def add_capture_time(adata, tkey, save_key="tprior"):
capture_time = adata.obs[tkey].to_numpy()
if isinstance(capture_time[0], str):
j = 0
while not (capture_time[0][j] >= '0' and capture_time[0][j] >= '9'):
j = j+1
tprior = np.array([float(x[1:]) for x in capture_time])
else:
tprior = capture_time
tprior = tprior - tprior.min() + 0.01
adata.obs["tprior"] = tprior
def add_cell_cluster(adata, cluster_key, save_key="clusters"):
cell_labels = adata.obs[cluster_key].to_numpy()
adata.obs["clusters"] = np.array([str(x) for x in cell_labels])
|
PypiClean
|
/outlookcalendarsyncer-0.1.0.tar.gz/outlookcalendarsyncer-0.1.0/README.md
|
# OutlookCalendarSyncer
## How to setup dev environment
### Prerequisites
Following software is required to run spark application:
* Python 3
* [Poetry](https://python-poetry.org)
* [Pyenv](https://github.com/pyenv/pyenv)
### Run
```shell
# Install required python version
$ pyenv install 3.9.6
# Binding required python version to this directory
$ pyenv local 3.9.6
# Install poetry
$ pip3 install poetry
# Resolve dependencies
$ poetry install
# Check - all tests should pass
$ poetry run pytest
# Sync configured
$ poetry run python -m outlookcalendarsyncer
```
## How to build package
```shell
$ poetry build
```
## How to test
```shell
$ poetry run pytest
```
|
PypiClean
|
/stix-ramrod-1.2.0.zip/stix-ramrod-1.2.0/ramrod/stix/base.py
|
# stdlib
import itertools
# internal
from ramrod import base, errors, utils
# external
from six import iteritems
class BaseSTIXUpdater(base.BaseUpdater):
"""Base class for STIX updating code. Sets default values for
STIX-specific xpaths and namespaces.
"""
DEFAULT_VOCAB_NAMESPACE = 'http://stix.mitre.org/default_vocabularies-1'
XPATH_VERSIONED_NODES = (
"//stix:STIX_Package | "
"//indicator:Indicator[@version] | "
"//stix:Indicator[@version] | "
"//stixCommon:Indicator[@version] | "
"//incident:Incident[@version] | "
"//stix:Incident[@version] | "
"//stixCommon:Incident[@version] | "
"//ttp:TTP[@version] | "
"//stix:TTP[@version] | "
"//stixCommon:TTP[@version] | "
"//coa:Course_Of_Action[@version] | "
"//stix:Course_Of_Action[@version] | "
"//stixCommon:Course_Of_Action[@version] |"
"//ta:Threat_Actor[@version]| "
"//stix:Threat_Actor[@version] | "
"//stixCommon:Threat_Actor[@version] | "
"//campaign:Campaign[@version] | "
"//stix:Campaign[@version] | "
"//stixCommon:Campaign[@version] | "
"//et:Exploit_Target[@version] | "
"//stix:Exploit_Target[@version] | "
"//stixCommon:Exploit_Target[@version]"
)
XPATH_ROOT_NODES = "//stix:STIX_Package"
CYBOX_UPDATER = None
def __init__(self):
super(BaseSTIXUpdater, self).__init__()
self._init_cybox_updater()
def _init_cybox_updater(self):
"""Returns an initialized instance of a _CyboxUpdater implementation.
Note:
This needs to be implemented by derived classes.
"""
if not self.CYBOX_UPDATER:
self._cybox_updater = None
return
updater = self.CYBOX_UPDATER() # noqa
updater.NSMAP = dict(
itertools.chain(
iteritems(self.NSMAP),
iteritems(self.CYBOX_UPDATER.NSMAP)
)
)
self._cybox_updater = updater
@classmethod
def get_version(cls, package):
"""Returns the version of the `package` ``STIX_Package`` element by
inspecting its ``version`` attribute.
"""
return package.attrib.get('version')
def _check_version(self, root):
"""Checks that the version of the document `root` is valid for an
implementation of ``_BaseUpdater``.
Note:
The ``version`` attribute of `root` is compared against the
``VERSION`` class-level attribute.
Raises:
.UnknownVersionError: If `root` does not contain a ``version``
attribute.
.InvalidVersionError: If the ``version`` attribute value for
`root` does not match the value of ``VERSION``.
"""
roots = self._get_root_nodes(root)
expected = self.VERSION
for node in roots:
found = self.get_version(node)
if not found:
error = "Unable to determine the version of the STIX document."
raise errors.UnknownVersionError(error)
if utils.is_version_equal(found, expected):
return
error = "Document version does not match the expected version."
raise errors.InvalidVersionError(
message=error,
node=node,
expected=expected,
found=found
)
class STIXVocab(base.Vocab):
VOCAB_NAMESPACE = BaseSTIXUpdater.DEFAULT_VOCAB_NAMESPACE
|
PypiClean
|
/diode-1.0.2.tar.gz/diode-1.0.2/ext/pybind11/docs/advanced/pycpp/numpy.rst
|
.. _numpy:
NumPy
#####
Buffer protocol
===============
Python supports an extremely general and convenient approach for exchanging
data between plugin libraries. Types can expose a buffer view [#f2]_, which
provides fast direct access to the raw internal data representation. Suppose we
want to bind the following simplistic Matrix class:
.. code-block:: cpp
class Matrix {
public:
Matrix(size_t rows, size_t cols) : m_rows(rows), m_cols(cols) {
m_data = new float[rows*cols];
}
float *data() { return m_data; }
size_t rows() const { return m_rows; }
size_t cols() const { return m_cols; }
private:
size_t m_rows, m_cols;
float *m_data;
};
The following binding code exposes the ``Matrix`` contents as a buffer object,
making it possible to cast Matrices into NumPy arrays. It is even possible to
completely avoid copy operations with Python expressions like
``np.array(matrix_instance, copy = False)``.
.. code-block:: cpp
py::class_<Matrix>(m, "Matrix", py::buffer_protocol())
.def_buffer([](Matrix &m) -> py::buffer_info {
return py::buffer_info(
m.data(), /* Pointer to buffer */
sizeof(float), /* Size of one scalar */
py::format_descriptor<float>::format(), /* Python struct-style format descriptor */
2, /* Number of dimensions */
{ m.rows(), m.cols() }, /* Buffer dimensions */
{ sizeof(float) * m.cols(), /* Strides (in bytes) for each index */
sizeof(float) }
);
});
Supporting the buffer protocol in a new type involves specifying the special
``py::buffer_protocol()`` tag in the ``py::class_`` constructor and calling the
``def_buffer()`` method with a lambda function that creates a
``py::buffer_info`` description record on demand describing a given matrix
instance. The contents of ``py::buffer_info`` mirror the Python buffer protocol
specification.
.. code-block:: cpp
struct buffer_info {
void *ptr;
py::ssize_t itemsize;
std::string format;
py::ssize_t ndim;
std::vector<py::ssize_t> shape;
std::vector<py::ssize_t> strides;
};
To create a C++ function that can take a Python buffer object as an argument,
simply use the type ``py::buffer`` as one of its arguments. Buffers can exist
in a great variety of configurations, hence some safety checks are usually
necessary in the function body. Below, you can see a basic example on how to
define a custom constructor for the Eigen double precision matrix
(``Eigen::MatrixXd``) type, which supports initialization from compatible
buffer objects (e.g. a NumPy matrix).
.. code-block:: cpp
/* Bind MatrixXd (or some other Eigen type) to Python */
typedef Eigen::MatrixXd Matrix;
typedef Matrix::Scalar Scalar;
constexpr bool rowMajor = Matrix::Flags & Eigen::RowMajorBit;
py::class_<Matrix>(m, "Matrix", py::buffer_protocol())
.def(py::init([](py::buffer b) {
typedef Eigen::Stride<Eigen::Dynamic, Eigen::Dynamic> Strides;
/* Request a buffer descriptor from Python */
py::buffer_info info = b.request();
/* Some basic validation checks ... */
if (info.format != py::format_descriptor<Scalar>::format())
throw std::runtime_error("Incompatible format: expected a double array!");
if (info.ndim != 2)
throw std::runtime_error("Incompatible buffer dimension!");
auto strides = Strides(
info.strides[rowMajor ? 0 : 1] / (py::ssize_t)sizeof(Scalar),
info.strides[rowMajor ? 1 : 0] / (py::ssize_t)sizeof(Scalar));
auto map = Eigen::Map<Matrix, 0, Strides>(
static_cast<Scalar *>(info.ptr), info.shape[0], info.shape[1], strides);
return Matrix(map);
}));
For reference, the ``def_buffer()`` call for this Eigen data type should look
as follows:
.. code-block:: cpp
.def_buffer([](Matrix &m) -> py::buffer_info {
return py::buffer_info(
m.data(), /* Pointer to buffer */
sizeof(Scalar), /* Size of one scalar */
py::format_descriptor<Scalar>::format(), /* Python struct-style format descriptor */
2, /* Number of dimensions */
{ m.rows(), m.cols() }, /* Buffer dimensions */
{ sizeof(Scalar) * (rowMajor ? m.cols() : 1),
sizeof(Scalar) * (rowMajor ? 1 : m.rows()) }
/* Strides (in bytes) for each index */
);
})
For a much easier approach of binding Eigen types (although with some
limitations), refer to the section on :doc:`/advanced/cast/eigen`.
.. seealso::
The file :file:`tests/test_buffers.cpp` contains a complete example
that demonstrates using the buffer protocol with pybind11 in more detail.
.. [#f2] http://docs.python.org/3/c-api/buffer.html
Arrays
======
By exchanging ``py::buffer`` with ``py::array`` in the above snippet, we can
restrict the function so that it only accepts NumPy arrays (rather than any
type of Python object satisfying the buffer protocol).
In many situations, we want to define a function which only accepts a NumPy
array of a certain data type. This is possible via the ``py::array_t<T>``
template. For instance, the following function requires the argument to be a
NumPy array containing double precision values.
.. code-block:: cpp
void f(py::array_t<double> array);
When it is invoked with a different type (e.g. an integer or a list of
integers), the binding code will attempt to cast the input into a NumPy array
of the requested type. This feature requires the :file:`pybind11/numpy.h`
header to be included. Note that :file:`pybind11/numpy.h` does not depend on
the NumPy headers, and thus can be used without declaring a build-time
dependency on NumPy; NumPy>=1.7.0 is a runtime dependency.
Data in NumPy arrays is not guaranteed to packed in a dense manner;
furthermore, entries can be separated by arbitrary column and row strides.
Sometimes, it can be useful to require a function to only accept dense arrays
using either the C (row-major) or Fortran (column-major) ordering. This can be
accomplished via a second template argument with values ``py::array::c_style``
or ``py::array::f_style``.
.. code-block:: cpp
void f(py::array_t<double, py::array::c_style | py::array::forcecast> array);
The ``py::array::forcecast`` argument is the default value of the second
template parameter, and it ensures that non-conforming arguments are converted
into an array satisfying the specified requirements instead of trying the next
function overload.
There are several methods on arrays; the methods listed below under references
work, as well as the following functions based on the NumPy API:
- ``.dtype()`` returns the type of the contained values.
- ``.strides()`` returns a pointer to the strides of the array (optionally pass
an integer axis to get a number).
- ``.flags()`` returns the flag settings. ``.writable()`` and ``.owndata()``
are directly available.
- ``.offset_at()`` returns the offset (optionally pass indices).
- ``.squeeze()`` returns a view with length-1 axes removed.
- ``.view(dtype)`` returns a view of the array with a different dtype.
- ``.reshape({i, j, ...})`` returns a view of the array with a different shape.
``.resize({...})`` is also available.
- ``.index_at(i, j, ...)`` gets the count from the beginning to a given index.
There are also several methods for getting references (described below).
Structured types
================
In order for ``py::array_t`` to work with structured (record) types, we first
need to register the memory layout of the type. This can be done via
``PYBIND11_NUMPY_DTYPE`` macro, called in the plugin definition code, which
expects the type followed by field names:
.. code-block:: cpp
struct A {
int x;
double y;
};
struct B {
int z;
A a;
};
// ...
PYBIND11_MODULE(test, m) {
// ...
PYBIND11_NUMPY_DTYPE(A, x, y);
PYBIND11_NUMPY_DTYPE(B, z, a);
/* now both A and B can be used as template arguments to py::array_t */
}
The structure should consist of fundamental arithmetic types, ``std::complex``,
previously registered substructures, and arrays of any of the above. Both C++
arrays and ``std::array`` are supported. While there is a static assertion to
prevent many types of unsupported structures, it is still the user's
responsibility to use only "plain" structures that can be safely manipulated as
raw memory without violating invariants.
Vectorizing functions
=====================
Suppose we want to bind a function with the following signature to Python so
that it can process arbitrary NumPy array arguments (vectors, matrices, general
N-D arrays) in addition to its normal arguments:
.. code-block:: cpp
double my_func(int x, float y, double z);
After including the ``pybind11/numpy.h`` header, this is extremely simple:
.. code-block:: cpp
m.def("vectorized_func", py::vectorize(my_func));
Invoking the function like below causes 4 calls to be made to ``my_func`` with
each of the array elements. The significant advantage of this compared to
solutions like ``numpy.vectorize()`` is that the loop over the elements runs
entirely on the C++ side and can be crunched down into a tight, optimized loop
by the compiler. The result is returned as a NumPy array of type
``numpy.dtype.float64``.
.. code-block:: pycon
>>> x = np.array([[1, 3], [5, 7]])
>>> y = np.array([[2, 4], [6, 8]])
>>> z = 3
>>> result = vectorized_func(x, y, z)
The scalar argument ``z`` is transparently replicated 4 times. The input
arrays ``x`` and ``y`` are automatically converted into the right types (they
are of type ``numpy.dtype.int64`` but need to be ``numpy.dtype.int32`` and
``numpy.dtype.float32``, respectively).
.. note::
Only arithmetic, complex, and POD types passed by value or by ``const &``
reference are vectorized; all other arguments are passed through as-is.
Functions taking rvalue reference arguments cannot be vectorized.
In cases where the computation is too complicated to be reduced to
``vectorize``, it will be necessary to create and access the buffer contents
manually. The following snippet contains a complete example that shows how this
works (the code is somewhat contrived, since it could have been done more
simply using ``vectorize``).
.. code-block:: cpp
#include <pybind11/pybind11.h>
#include <pybind11/numpy.h>
namespace py = pybind11;
py::array_t<double> add_arrays(py::array_t<double> input1, py::array_t<double> input2) {
py::buffer_info buf1 = input1.request(), buf2 = input2.request();
if (buf1.ndim != 1 || buf2.ndim != 1)
throw std::runtime_error("Number of dimensions must be one");
if (buf1.size != buf2.size)
throw std::runtime_error("Input shapes must match");
/* No pointer is passed, so NumPy will allocate the buffer */
auto result = py::array_t<double>(buf1.size);
py::buffer_info buf3 = result.request();
double *ptr1 = static_cast<double *>(buf1.ptr);
double *ptr2 = static_cast<double *>(buf2.ptr);
double *ptr3 = static_cast<double *>(buf3.ptr);
for (size_t idx = 0; idx < buf1.shape[0]; idx++)
ptr3[idx] = ptr1[idx] + ptr2[idx];
return result;
}
PYBIND11_MODULE(test, m) {
m.def("add_arrays", &add_arrays, "Add two NumPy arrays");
}
.. seealso::
The file :file:`tests/test_numpy_vectorize.cpp` contains a complete
example that demonstrates using :func:`vectorize` in more detail.
Direct access
=============
For performance reasons, particularly when dealing with very large arrays, it
is often desirable to directly access array elements without internal checking
of dimensions and bounds on every access when indices are known to be already
valid. To avoid such checks, the ``array`` class and ``array_t<T>`` template
class offer an unchecked proxy object that can be used for this unchecked
access through the ``unchecked<N>`` and ``mutable_unchecked<N>`` methods,
where ``N`` gives the required dimensionality of the array:
.. code-block:: cpp
m.def("sum_3d", [](py::array_t<double> x) {
auto r = x.unchecked<3>(); // x must have ndim = 3; can be non-writeable
double sum = 0;
for (py::ssize_t i = 0; i < r.shape(0); i++)
for (py::ssize_t j = 0; j < r.shape(1); j++)
for (py::ssize_t k = 0; k < r.shape(2); k++)
sum += r(i, j, k);
return sum;
});
m.def("increment_3d", [](py::array_t<double> x) {
auto r = x.mutable_unchecked<3>(); // Will throw if ndim != 3 or flags.writeable is false
for (py::ssize_t i = 0; i < r.shape(0); i++)
for (py::ssize_t j = 0; j < r.shape(1); j++)
for (py::ssize_t k = 0; k < r.shape(2); k++)
r(i, j, k) += 1.0;
}, py::arg().noconvert());
To obtain the proxy from an ``array`` object, you must specify both the data
type and number of dimensions as template arguments, such as ``auto r =
myarray.mutable_unchecked<float, 2>()``.
If the number of dimensions is not known at compile time, you can omit the
dimensions template parameter (i.e. calling ``arr_t.unchecked()`` or
``arr.unchecked<T>()``. This will give you a proxy object that works in the
same way, but results in less optimizable code and thus a small efficiency
loss in tight loops.
Note that the returned proxy object directly references the array's data, and
only reads its shape, strides, and writeable flag when constructed. You must
take care to ensure that the referenced array is not destroyed or reshaped for
the duration of the returned object, typically by limiting the scope of the
returned instance.
The returned proxy object supports some of the same methods as ``py::array`` so
that it can be used as a drop-in replacement for some existing, index-checked
uses of ``py::array``:
- ``.ndim()`` returns the number of dimensions
- ``.data(1, 2, ...)`` and ``r.mutable_data(1, 2, ...)``` returns a pointer to
the ``const T`` or ``T`` data, respectively, at the given indices. The
latter is only available to proxies obtained via ``a.mutable_unchecked()``.
- ``.itemsize()`` returns the size of an item in bytes, i.e. ``sizeof(T)``.
- ``.ndim()`` returns the number of dimensions.
- ``.shape(n)`` returns the size of dimension ``n``
- ``.size()`` returns the total number of elements (i.e. the product of the shapes).
- ``.nbytes()`` returns the number of bytes used by the referenced elements
(i.e. ``itemsize()`` times ``size()``).
.. seealso::
The file :file:`tests/test_numpy_array.cpp` contains additional examples
demonstrating the use of this feature.
Ellipsis
========
Python provides a convenient ``...`` ellipsis notation that is often used to
slice multidimensional arrays. For instance, the following snippet extracts the
middle dimensions of a tensor with the first and last index set to zero.
.. code-block:: python
a = ... # a NumPy array
b = a[0, ..., 0]
The function ``py::ellipsis()`` function can be used to perform the same
operation on the C++ side:
.. code-block:: cpp
py::array a = /* A NumPy array */;
py::array b = a[py::make_tuple(0, py::ellipsis(), 0)];
Memory view
===========
For a case when we simply want to provide a direct accessor to C/C++ buffer
without a concrete class object, we can return a ``memoryview`` object. Suppose
we wish to expose a ``memoryview`` for 2x4 uint8_t array, we can do the
following:
.. code-block:: cpp
const uint8_t buffer[] = {
0, 1, 2, 3,
4, 5, 6, 7
};
m.def("get_memoryview2d", []() {
return py::memoryview::from_buffer(
buffer, // buffer pointer
{ 2, 4 }, // shape (rows, cols)
{ sizeof(uint8_t) * 4, sizeof(uint8_t) } // strides in bytes
);
});
This approach is meant for providing a ``memoryview`` for a C/C++ buffer not
managed by Python. The user is responsible for managing the lifetime of the
buffer. Using a ``memoryview`` created in this way after deleting the buffer in
C++ side results in undefined behavior.
We can also use ``memoryview::from_memory`` for a simple 1D contiguous buffer:
.. code-block:: cpp
m.def("get_memoryview1d", []() {
return py::memoryview::from_memory(
buffer, // buffer pointer
sizeof(uint8_t) * 8 // buffer size
);
});
.. versionchanged:: 2.6
``memoryview::from_memory`` added.
|
PypiClean
|
/bce-sam-translator-0.0.1.tar.gz/bce-sam-translator-0.0.1/samtranslator/intrinsics/actions.py
|
import re
from six import string_types
class Action(object):
"""
Base class for intrinsic function actions. Each intrinsic function must subclass this,
override the intrinsic_name, and provide a resolve() method
Subclasses would be working on the JSON representation of an intrinsic function like {Ref: foo} than the YAML
version !Ref foo because the input is already in JSON.
"""
_resource_ref_separator = "."
intrinsic_name = None
def __init__(self):
if not self.intrinsic_name:
raise TypeError("Subclass must provide a intrinsic_name")
def resolve_parameter_refs(self, input_dict, parameters):
"""
Subclass must implement this method to resolve the intrinsic function
"""
raise NotImplementedError("Subclass must implement this method")
def resolve_resource_refs(self, input_dict, supported_resource_refs):
"""
Subclass must implement this method to resolve resource references
"""
raise NotImplementedError("Subclass must implement this method")
def can_handle(self, input_dict):
"""
Validates that the input dictionary contains only one key and is of the given intrinsic_name
:param input_dict: Input dictionary representing the intrinsic function
:return: True if it matches expected structure, False otherwise
"""
return input_dict is not None \
and isinstance(input_dict, dict) \
and len(input_dict) == 1 \
and self.intrinsic_name in input_dict
@classmethod
def _parse_resource_reference(cls, ref_value):
"""
Splits a resource reference of structure "LogicalId.Property" and returns the "LogicalId" and "Property" separately.
:param string ref_value: Input reference value which *may* contain the structure "LogicalId.Property"
:return string, string: Returns two values - logical_id, property. If the input does not contain the structure,
then both `logical_id` and property will be None
"""
no_result = (None, None)
if not isinstance(ref_value, string_types):
return no_result
splits = ref_value.split(cls._resource_ref_separator, 1)
# Either there is no 'dot' (or) one of the values is empty string (Ex: when you split "LogicalId.")
if len(splits) != 2 or not all(splits):
return no_result
return splits[0], splits[1]
class RefAction(Action):
intrinsic_name = "Ref"
def resolve_parameter_refs(self, input_dict, parameters):
"""
Resolves references that are present in the parameters and returns the value. If it is not in parameters,
this method simply returns the input unchanged.
:param input_dict: Dictionary representing the Ref function. Must contain only one key and it should be "Ref".
Ex: {Ref: "foo"}
:param parameters: Dictionary of parameter values for resolution
:return:
"""
if not self.can_handle(input_dict):
return input_dict
param_name = input_dict[self.intrinsic_name]
if not isinstance(param_name, string_types):
return input_dict
if param_name in parameters:
return parameters[param_name]
else:
return input_dict
def resolve_resource_refs(self, input_dict, supported_resource_refs):
"""
Resolves references to some property of a resource. These are runtime properties which can't be converted
to a value here. Instead we output another reference that will more actually resolve to the value when
executed via CloudFormation
Example:
{"Ref": "LogicalId.Property"} => {"Ref": "SomeOtherLogicalId"}
:param dict input_dict: Dictionary representing the Ref function to be resolved.
:param samtranslator.intrinsics.resource_refs.SupportedResourceReferences supported_resource_refs: Instance of
an `SupportedResourceReferences` object that contain value of the property.
:return dict: Dictionary with resource references resolved.
"""
if not self.can_handle(input_dict):
return input_dict
ref_value = input_dict[self.intrinsic_name]
logical_id, property = self._parse_resource_reference(ref_value)
# ref_value could not be parsed
if not logical_id:
return input_dict
resolved_value = supported_resource_refs.get(logical_id, property)
if not resolved_value:
return input_dict
return {
self.intrinsic_name: resolved_value
}
class SubAction(Action):
intrinsic_name = "Fn::Sub"
def resolve_parameter_refs(self, input_dict, parameters):
"""
Substitute references found within the string of `Fn::Sub` intrinsic function
:param input_dict: Dictionary representing the Fn::Sub function. Must contain only one key and it should be
`Fn::Sub`. Ex: {"Fn::Sub": ...}
:param parameters: Dictionary of parameter values for substitution
:return: Resolved
"""
if not self.can_handle(input_dict):
return input_dict
key = self.intrinsic_name
value = input_dict[key]
def do_replacement(full_ref, prop_name):
"""
Replace parameter references with actual value. Return value of this method is directly replaces the
reference structure
:param full_ref: => ${logicalId.property}
:param prop_name: => logicalId.property
:return: Either the value it resolves to. If not the original reference
"""
return parameters.get(prop_name, full_ref)
input_dict[key] = self._handle_sub_value(value, do_replacement)
return input_dict
def resolve_resource_refs(self, input_dict, supported_resource_refs):
"""
Resolves reference to some property of a resource. Inside string to be substituted, there could be either a
"Ref" or a "GetAtt" usage of this property. They have to be handled differently.
Ref usages are directly converted to a Ref on the resolved value. GetAtt usages are split under the assumption
that there can be only one property of resource referenced here. Everything else is an attribute reference.
Example:
Let's say `LogicalId.Property` will be resolved to `ResolvedValue`
Ref usage:
${LogicalId.Property} => ${ResolvedValue}
GetAtt usage:
${LogicalId.Property.Arn} => ${ResolvedValue.Arn}
${LogicalId.Property.Attr1.Attr2} => {ResolvedValue.Attr1.Attr2}
:param input_dict: Dictionary to be resolved
:param samtranslator.intrinsics.resource_refs.SupportedResourceReferences supported_resource_refs: Instance of
an `SupportedResourceReferences` object that contain value of the property.
:return: Resolved dictionary
"""
if not self.can_handle(input_dict):
return input_dict
key = self.intrinsic_name
sub_value = input_dict[key]
def do_replacement(full_ref, ref_value):
"""
Perform the appropriate replacement to handle ${LogicalId.Property} type references inside a Sub.
This method is called to get the replacement string for each reference within Sub's value
:param full_ref: Entire reference string such as "${LogicalId.Property}"
:param ref_value: Just the value of the reference such as "LogicalId.Property"
:return: Resolved reference of the structure "${SomeOtherLogicalId}". Result should always include the
${} structure since we are not resolving to final value, but just converting one reference to another
"""
# Split the value by separator, expecting to separate out LogicalId.Property
splits = ref_value.split(self._resource_ref_separator)
# If we don't find at least two parts, there is nothing to resolve
if len(splits) < 2:
return full_ref
logical_id = splits[0]
property = splits[1]
resolved_value = supported_resource_refs.get(logical_id, property)
if not resolved_value:
# This ID/property combination is not in the supported references
return full_ref
# We found a LogicalId.Property combination that can be resolved. Construct the output by replacing
# the part of the reference string and not constructing a new ref. This allows us to support GetAtt-like
# syntax and retain other attributes. Ex: ${LogicalId.Property.Arn} => ${SomeOtherLogicalId.Arn}
replacement = self._resource_ref_separator.join([logical_id, property])
return full_ref.replace(replacement, resolved_value)
input_dict[key] = self._handle_sub_value(sub_value, do_replacement)
return input_dict
def _handle_sub_value(self, sub_value, handler_method):
"""
Generic method to handle value to Fn::Sub key. We are interested in parsing the ${} syntaxes inside
the string portion of the value.
:param sub_value: Value of the Sub function
:param handler_method: Method to be called on every occurrence of `${LogicalId}` structure within the string.
Implementation could resolve and replace this structure with whatever they seem fit
:return: Resolved value of the Sub dictionary
"""
# Just handle known references within the string to be substituted and return the whole dictionary
# because that's the best we can do here.
if isinstance(sub_value, string_types):
# Ex: {Fn::Sub: "some string"}
sub_value = self._sub_all_refs(sub_value, handler_method)
elif isinstance(sub_value, list) and len(sub_value) > 0 and isinstance(sub_value[0], string_types):
# Ex: {Fn::Sub: ["some string", {a:b}] }
sub_value[0] = self._sub_all_refs(sub_value[0], handler_method)
return sub_value
def _sub_all_refs(self, text, handler_method):
"""
Substitute references within a string that is using ${key} syntax by calling the `handler_method` on every
occurrence of this structure. The value returned by this method directly replaces the reference structure.
Ex:
text = "${key1}-hello-${key2}
def handler_method(full_ref, ref_value):
return "foo"
_sub_all_refs(text, handler_method) will output "foo-hello-foo"
:param string text: Input text
:param handler_method: Method to be called to handle each occurrence of ${blah} reference structure.
First parameter to this method is the full reference structure Ex: ${LogicalId.Property}. Second parameter is just the
value of the reference such as "LogicalId.Property"
:return string: Text with all reference structures replaced as necessary
"""
# RegExp to find pattern "${logicalId.property}" and return the word inside bracket
logical_id_regex = '[A-Za-z0-9\.]+'
ref_pattern = re.compile(r'\$\{('+logical_id_regex+')\}')
# Find all the pattern, and call the handler to decide how to substitute them.
# Do the substitution and return the final text
return re.sub(ref_pattern,
# Pass the handler entire string ${logicalId.property} as first parameter and "logicalId.property"
# as second parameter. Return value will be substituted
lambda match: handler_method(match.group(0), match.group(1)),
text)
class GetAttAction(Action):
intrinsic_name = "Fn::GetAtt"
def resolve_parameter_refs(self, input_dict, parameters):
# Parameters can never be referenced within GetAtt value
return input_dict
def resolve_resource_refs(self, input_dict, supported_resource_refs):
"""
Resolve resource references within a GetAtt dict.
Example:
{ "Fn::GetAtt": ["LogicalId.Property", "Arn"] } => {"Fn::GetAtt": ["ResolvedLogicalId", "Arn"]}
Theoretically, only the first element of the array can contain reference to SAM resources. The second element
is name of an attribute (like Arn) of the resource.
However tools like AWS CLI apply the assumption that first element of the array is a LogicalId and cannot
contain a 'dot'. So they break at the first dot to convert YAML tag to JSON map like this:
`!GetAtt LogicalId.Property.Arn` => {"Fn::GetAtt": [ "LogicalId", "Property.Arn" ] }
Therefore to resolve the reference, we join the array into a string, break it back up to check if it contains
a known reference, and resolve it if we can.
:param input_dict: Dictionary to be resolved
:param samtransaltor.intrinsics.resource_refs.SupportedResourceReferences supported_resource_refs: Instance of
an `SupportedResourceReferences` object that contain value of the property.
:return: Resolved dictionary
"""
if not self.can_handle(input_dict):
return input_dict
key = self.intrinsic_name
value = input_dict[key]
# Value must be an array with *at least* two elements. If not, this is invalid GetAtt syntax. We just pass along
# the input to CFN for it to do the "official" validation.
if not isinstance(value, list) or len(value) < 2:
return input_dict
# Value of GetAtt is an array. It can contain any number of elements, with first being the LogicalId of
# resource and rest being the attributes. In a SAM template, a reference to a resource can be used in the
# first parameter. However tools like AWS CLI might break them down as well. So let's just concatenate
# all elements, and break them into separate parts in a more standard way.
#
# Example:
# { Fn::GetAtt: ["LogicalId.Property", "Arn"] } is equivalent to { Fn::GetAtt: ["LogicalId", "Property.Arn"] }
# Former is the correct notation. However tools like AWS CLI can construct the later style.
# Let's normalize the value into "LogicalId.Property.Arn" to handle both scenarios
value_str = self._resource_ref_separator.join(value)
splits = value_str.split(self._resource_ref_separator)
logical_id = splits[0]
property = splits[1]
remaining = splits[2:] # if any
resolved_value = supported_resource_refs.get(logical_id, property)
if resolved_value:
# We resolved to a new resource logicalId. Use this as the first element and keep remaining elements intact
# This is the new value of Fn::GetAtt
input_dict[key] = [resolved_value] + remaining
return input_dict
|
PypiClean
|
/sage_engine-2.3.0-py3-none-any.whl/sage/cli/postgres.py
|
import coloredlogs
import logging
import time
import click
import psycopg2
import pylru
import sage.cli.postgres_utils as psql_utils
from psycopg2.extras import execute_values
from sage.cli.utils import load_graph, get_nb_triples
from sage.cli.parsers import ParserFactory
coloredlogs.install(level='INFO', fmt='%(asctime)s - %(levelname)s %(message)s')
logger = logging.getLogger(__name__)
def connect_postgres(graph):
"""Try to connect to a PostgreSQL server"""
if 'dbname' not in graph or 'user' not in graph or 'password' not in graph:
logger.error("Error: a valid PostgreSQL graph must be declared with fields 'dbname', 'user' and 'password'")
return None
dbname = graph['dbname']
user = graph['user']
password = graph['password']
host = graph['host'] if 'host' in graph else ''
port = int(graph['port']) if 'port' in graph else 5432
return psycopg2.connect(dbname=dbname, user=user, password=password, host=host, port=port)
@click.command()
@click.argument("config")
@click.argument("graph_name")
@click.option('--index/--no-index', default=True,
help="Enable/disable indexing of SQL tables. The indexes can be created separately using the command sage-postgres-index")
def init_postgres(config, graph_name, index):
"""Initialize the RDF graph GRAPH_NAME with a PostgreSQL backend, described in the configuration file CONFIG."""
# load graph from config file
graph, backend = load_graph(config, graph_name, logger, backends=['postgres', 'postgres-mvcc', 'postgres-catalog'])
# init postgre connection
logger.info("Connecting to the PostgreSQL server...")
connection = connect_postgres(graph)
if connection is None:
logger.error('Failed to establish a connection with PostgreSQL')
exit(1)
logger.info("Connected to the PostgreSQL server")
# turn off autocommit
connection.autocommit = False
# create a cursor to interact with the database
cursor = connection.cursor()
# create the main SQL tables
logger.info("Creating PostgreSQL tables...")
create_table_queries = psql_utils.get_create_tables_queries(graph_name, backend)
for query in create_table_queries:
cursor.execute(query)
logger.info("PostgreSQL tables successfully created")
# create the additional indexes on OSP and POS
if index:
logger.info("Creating additional B-tree indexes...")
create_indexes_queries = psql_utils.get_create_indexes_queries(graph_name, backend)
for query in create_indexes_queries:
cursor.execute(query)
logger.info("Additional B-tree indexes successfully created")
else:
logger.info("Skipping additional indexes creation on user-demand")
# commit and cleanup connection
logger.info("Committing and cleaning up...")
connection.commit()
cursor.close()
connection.close()
logger.info(f"Sage PostgreSQL model for graph '{graph_name}' successfully initialized")
@click.command()
@click.argument("config")
@click.argument("graph_name")
def index_postgres(config, graph_name):
"""Create the additional B-tree indexes on the RDF graph GRAPH_NAME, described in the configuration file CONFIG."""
# load graph from config file
graph, backend = load_graph(config, graph_name, logger, backends=['postgres', 'postgres-mvcc', 'postgres-catalog'])
# init PostgreSQL connection
logger.info("Connecting to the PostgreSQL server...")
connection = connect_postgres(graph)
if connection is None:
logger.error('Failed to establish a connection with PostgreSQL')
exit(1)
logger.info("Connected to the PostgreSQL server")
# turn off autocommit
connection.autocommit = False
# create a cursor to interact with the database
cursor = connection.cursor()
# create indexes
start = time.time()
logger.info("Creating additional B-tree indexes...")
create_indexes_queries = psql_utils.get_create_indexes_queries(graph_name, backend)
for query in create_indexes_queries:
cursor.execute(query)
stop = time.time()
logger.info(f"Additional B-tree indexes successfully created in {stop - start}s")
# rebuild table statistics
logger.info("Rebuilding table statistics...")
start = time.time()
cursor.execute(psql_utils.get_analyze_query(graph_name))
logger.info(f"Table statistics successfully rebuilt in {time.time() - start}s")
# commit and cleanup connection
logger.info("Committing and cleaning up...")
connection.commit()
cursor.close()
connection.close()
logger.info(f"Sage PostgreSQL model for graph '{graph_name}' successfully initialized")
def insert_bucket(cursor, bucket, graph_name, backend, block_size, cache):
if backend == 'postgres' or backend == 'postgres-mvcc':
insert_query = psql_utils.get_insert_into_query(graph_name)
execute_values(cursor, insert_query, bucket, page_size=block_size)
elif backend == 'postgres-catalog':
# Insert terms into the catalog
insert_query = psql_utils.get_insert_into_catalog_query()
values = list()
terms_index = dict()
cached_identifiers = dict()
for (s, p, o) in bucket:
if s in cache:
cached_identifiers[s] = cache[s]
elif s not in terms_index:
terms_index[s] = len(values)
values.append([s])
if p in cache:
cached_identifiers[p] = cache[p]
elif p not in terms_index:
terms_index[p] = len(values)
values.append([p])
if o in cache:
cached_identifiers[o] = cache[o]
elif o not in terms_index:
terms_index[o] = len(values)
values.append([o])
terms_identifier = execute_values(cursor, insert_query, values, page_size=block_size, fetch=True)
# Insert triples where terms are replaced by their identifier
insert_query = psql_utils.get_insert_into_query(graph_name)
values = list()
for (s, p, o) in bucket:
if s in cached_identifiers:
subject_id = cached_identifiers[s]
else:
subject_id = terms_identifier[terms_index[s]]
if p in cached_identifiers:
predicate_id = cached_identifiers[p]
else:
predicate_id = terms_identifier[terms_index[p]]
if o in cached_identifiers:
object_id = cached_identifiers[o]
else:
object_id = terms_identifier[terms_index[o]]
cache[s] = subject_id
cache[p] = predicate_id
cache[o] = object_id
values.append((subject_id, predicate_id, object_id))
execute_values(cursor, insert_query, values, page_size=block_size)
else:
raise Exception(f'Unknown backend for PostgreSQL: {backend}')
@click.command()
@click.argument("rdf_file")
@click.argument("config")
@click.argument("graph_name")
@click.option("-f", "--format", type=click.Choice(["nt", "hdt"]),
default="nt", show_default=True,
help="Format of the input file. Supported: nt (N-triples) and hdt (HDT).")
@click.option("--block-size", type=int,
default=100, show_default=True,
help="Block size used for the bulk loading")
@click.option("--commit-threshold", type=int,
default=500000, show_default=True,
help="Commit after sending this number of RDF triples")
@click.option("--cache-size", type=int,
default=300, show_default=True,
help="Store terms identifier when using the catalog schema to improve loading performance")
def put_postgres(config, graph_name, rdf_file, format, block_size, commit_threshold, cache_size):
"""Insert RDF triples from file RDF_FILE into the RDF graph GRAPH_NAME, described in the configuration file CONFIG. The graph must use the PostgreSQL or PostgreSQL-MVCC backend."""
# load graph from config file
graph, backend = load_graph(config, graph_name, logger, backends=['postgres', 'postgres-mvcc', 'postgres-catalog'])
# init PostgreSQL connection
logger.info("Connecting to the PostgreSQL server...")
connection = connect_postgres(graph)
if connection is None:
logger.error('Failed to establish a connection with PostgreSQL')
exit(1)
logger.info("Connected to the PostgreSQL server")
# turn off autocommit
connection.autocommit = False
# create a cursor to interact with the database
cursor = connection.cursor()
logger.info("Reading RDF source file...")
nb_triples = get_nb_triples(rdf_file, format)
logger.info(f"Found ~{nb_triples} RDF triples to ingest.")
start = time.time()
to_commit = 0
inserted = 0
dropped = 0
cache = pylru.lrucache(cache_size)
with click.progressbar(length=nb_triples, label=f"Inserting RDF triples 0/{nb_triples} - {dropped} triples dropped.") as bar:
def on_bucket(bucket):
nonlocal to_commit, inserted, dropped
insert_bucket(cursor, bucket, graph_name, backend, block_size, cache)
to_commit = to_commit + len(bucket)
if to_commit >= commit_threshold and ignore_errors:
connection.commit()
to_commit = 0
inserted = inserted + len(bucket)
bar.label = f"Inserting RDF triples {inserted}/{nb_triples} - {dropped} triples dropped."
bar.update(len(bucket))
def on_error(error):
nonlocal dropped, inserted
dropped = dropped + 1
bar.label = f"Inserting RDF triples {inserted}/{nb_triples} - {dropped} triples dropped."
bar.update(0)
def on_complete():
nonlocal start
logger.info(f"Triples ingestion successfully completed in {time.time() - start}s")
logger.info("Rebuilding table statistics...")
start = time.time()
cursor.execute(psql_utils.get_analyze_query(graph_name))
logger.info(f"Table statistics successfully rebuilt in {time.time() - start}s")
logger.info("Committing and cleaning up...")
connection.commit()
cursor.close()
connection.close()
logger.info(f"RDF data from file '{rdf_file}' successfully inserted into RDF graph '{graph_name}'")
logger.info("Starting RDF triples ingestion...")
parser = ParserFactory.create_parser(format, block_size)
parser.on_bucket = on_bucket
parser.on_error = on_error
parser.on_complete = on_complete
parser.parsefile(rdf_file)
|
PypiClean
|
/fedml-afaf-0.7.327.tar.gz/fedml-afaf-0.7.327/README.md
|
# FedML - The community building and connecting AI anywhere at any scale
https://doc.fedml.ai
## Mission
FedML builds simple and versatile APIs for machine learning running anywhere at any scale.
In other words, FedML supports both federated learning for data silos and distributed training for acceleration with MLOps and Open Source support, covering industrial grade use cases and cutting-edge academia research.
- Distributed Training: Accelerate Model Training with Lightweight Cheetah
- Simulator: (1) simulate FL using a single process (2) MPI-based FL Simulator (3) NCCL-based FL Simulator (fastest)
- Cross-silo Federated Learning for cross-organization/account training, including Python-based edge SDK
- Cross-device Federated Learning for Smartphones and IoTs, including edge SDK for Android/iOS and embedded Linux.
- Model Serving: we focus on providing a better user experience for edge AI.
- MLOps: FedML's machine learning operation pipeline for AI running anywhere at any scale.
## Source Code Structure
The functionality of each package is as follows:
**core**: The FedML low-level API package. This package implements distributed computing by communication backend like MPI, NCCL, MQTT, gRPC, PyTorch RPC, and also supports topology management.
Other low-level APIs related to security and privacy are also supported. All algorithms and Scenarios are built based on the "core" package.
**data**: FedML will provide some default datasets for users to get started. Customization templates are also provided.
**model**: FedML model zoo.
**device**: FedML computing resource management.
**simulation**: FedML parrot can support: (1) simulate FL using a single process (2) MPI-based FL Simulator (3) NCCL-based FL Simulator (fastest)
**cross-silo**: Cross-silo Federated Learning for cross-organization/account training
**cross-device**: Cross-device Federated Learning for Smartphones and IoTs
**distributed**: Distributed Training: Accelerate Model Training with Lightweight Cheetah
**serve**: Model serving, tailored for edge inference
**mlops**: APIs related to machine learning operation platform (open.fedml.ai)
**centralized**: Some centralized trainer code examples for benchmarking purposes.
**utils**: Common utilities shared by other modules.
## About FedML, Inc.
https://FedML.ai
|
PypiClean
|
/cupy_cuda113-10.6.0-cp37-cp37m-win_amd64.whl/cupyx/tools/install_library.py
|
# This script will also be used as a standalone script when building wheels.
# Keep the script runnable without CuPy dependency.
import argparse
import json
import os
import platform
import shutil
import subprocess
import sys
import tempfile
import urllib.request
_cudnn_records = []
_cutensor_records = []
_nccl_records = []
library_records = {}
def _make_cudnn_url(public_version, cuda_version, filename):
# https://developer.download.nvidia.com/compute/redist/cudnn/v8.3.2/local_installers/11.5/cudnn-linux-x86_64-8.3.2.44_cuda11.5-archive.tar.xz
return (
'https://developer.download.nvidia.com/compute/redist/cudnn' +
'/v{}/local_installers/{}/{}'.format(
public_version, cuda_version, filename))
def __make_cudnn_record(
cuda_version, public_version, archive_cuda_version, filename_linux,
filename_windows):
major_version = public_version.split('.')[0]
# Dependency order is documented at:
# https://docs.nvidia.com/deeplearning/cudnn/api/index.html
suffix_list = ['', '_ops_infer', '_ops_train',
'_cnn_infer', '_cnn_train',
'_adv_infer', '_adv_train']
return {
'cuda': cuda_version,
'cudnn': public_version,
'assets': {
'Linux': {
'url': _make_cudnn_url(
public_version, archive_cuda_version, filename_linux),
'filenames': [f'libcudnn{suffix}.so.{public_version}'
for suffix in suffix_list]
},
'Windows': {
'url': _make_cudnn_url(
public_version, archive_cuda_version, filename_windows),
'filenames': [f'cudnn{suffix}64_{major_version}.dll'
for suffix in suffix_list]
},
}
}
def _make_cudnn_record(cuda_version):
return __make_cudnn_record(
cuda_version, '8.4.0', '11.6',
'cudnn-linux-x86_64-8.4.0.27_cuda11.6-archive.tar.xz',
'cudnn-windows-x86_64-8.4.0.27_cuda11.6-archive.zip')
# Latest cuDNN versions: https://developer.nvidia.com/rdp/cudnn-download
_cudnn_records.append(_make_cudnn_record('11.7'))
_cudnn_records.append(_make_cudnn_record('11.6'))
_cudnn_records.append(_make_cudnn_record('11.5'))
_cudnn_records.append(_make_cudnn_record('11.4'))
_cudnn_records.append(_make_cudnn_record('11.3'))
_cudnn_records.append(_make_cudnn_record('11.2'))
_cudnn_records.append(_make_cudnn_record('11.1'))
_cudnn_records.append(_make_cudnn_record('11.0'))
_cudnn_records.append(__make_cudnn_record(
'10.2', '8.4.0', '10.2',
'cudnn-linux-x86_64-8.4.0.27_cuda10.2-archive.tar.xz',
'cudnn-windows-x86_64-8.4.0.27_cuda10.2-archive.zip'))
library_records['cudnn'] = _cudnn_records
def _make_cutensor_url(platform, filename):
# https://developer.download.nvidia.com/compute/cutensor/redist/libcutensor/linux-x86_64/libcutensor-linux-x86_64-1.5.0.3-archive.tar.xz
return (
'https://developer.download.nvidia.com/compute/cutensor/' +
f'redist/libcutensor/{platform}-x86_64/{filename}')
def __make_cutensor_record(
cuda_version, public_version, filename_linux, filename_windows):
return {
'cuda': cuda_version,
'cutensor': public_version,
'assets': {
'Linux': {
'url': _make_cutensor_url('linux', filename_linux),
'filenames': ['libcutensor.so.{}'.format(public_version)],
},
'Windows': {
'url': _make_cutensor_url('windows', filename_windows),
'filenames': ['cutensor.dll'],
},
}
}
def _make_cutensor_record(cuda_version):
return __make_cutensor_record(
cuda_version, '1.5.0',
'libcutensor-linux-x86_64-1.5.0.3-archive.tar.xz',
'libcutensor-windows-x86_64-1.5.0.3-archive.zip')
_cutensor_records.append(_make_cutensor_record('11.7'))
_cutensor_records.append(_make_cutensor_record('11.6'))
_cutensor_records.append(_make_cutensor_record('11.5'))
_cutensor_records.append(_make_cutensor_record('11.4'))
_cutensor_records.append(_make_cutensor_record('11.3'))
_cutensor_records.append(_make_cutensor_record('11.2'))
_cutensor_records.append(_make_cutensor_record('11.1'))
_cutensor_records.append(_make_cutensor_record('11.0'))
_cutensor_records.append(_make_cutensor_record('10.2'))
library_records['cutensor'] = _cutensor_records
def _make_nccl_url(public_version, filename):
# https://developer.download.nvidia.com/compute/redist/nccl/v2.8/nccl_2.8.4-1+cuda11.2_x86_64.txz
return (
'https://developer.download.nvidia.com/compute/redist/nccl/' +
'v{}/{}'.format(public_version, filename))
def _make_nccl_record(
cuda_version, full_version, public_version, filename_linux):
return {
'cuda': cuda_version,
'nccl': full_version,
'assets': {
'Linux': {
'url': _make_nccl_url(public_version, filename_linux),
'filenames': ['libnccl.so.{}'.format(full_version)],
},
},
}
_nccl_records.append(_make_nccl_record(
'11.7', '2.11.4', '2.11',
'nccl_2.11.4-1+cuda11.4_x86_64.txz'))
_nccl_records.append(_make_nccl_record(
'11.6', '2.11.4', '2.11',
'nccl_2.11.4-1+cuda11.4_x86_64.txz'))
_nccl_records.append(_make_nccl_record(
'11.5', '2.11.4', '2.11',
'nccl_2.11.4-1+cuda11.4_x86_64.txz'))
_nccl_records.append(_make_nccl_record(
'11.4', '2.11.4', '2.11',
'nccl_2.11.4-1+cuda11.4_x86_64.txz'))
_nccl_records.append(_make_nccl_record(
'11.3', '2.9.9', '2.9',
'nccl_2.9.9-1+cuda11.3_x86_64.txz'))
_nccl_records.append(_make_nccl_record(
'11.2', '2.8.4', '2.8',
'nccl_2.8.4-1+cuda11.2_x86_64.txz'))
_nccl_records.append(_make_nccl_record(
'11.1', '2.8.4', '2.8',
'nccl_2.8.4-1+cuda11.1_x86_64.txz'))
_nccl_records.append(_make_nccl_record(
'11.0', '2.11.4', '2.11',
'nccl_2.11.4-1+cuda11.0_x86_64.txz'))
_nccl_records.append(_make_nccl_record(
'10.2', '2.11.4', '2.11',
'nccl_2.11.4-1+cuda10.2_x86_64.txz'))
library_records['nccl'] = _nccl_records
def _unpack_archive(filename, extract_dir):
try:
shutil.unpack_archive(filename, extract_dir)
except shutil.ReadError:
print('The archive format is not supported in your Python '
'environment. Falling back to "tar" command...')
try:
os.makedirs(extract_dir, exist_ok=True)
subprocess.run(
['tar', 'xf', filename, '-C', extract_dir], check=True)
except subprocess.CalledProcessError:
msg = 'Failed to extract the archive using "tar" command.'
raise RuntimeError(msg)
def install_lib(cuda, prefix, library):
if platform.uname().machine.lower() not in ('x86_64', 'amd64'):
raise RuntimeError('''
Currently this tool only supports x86_64 architecture.''')
record = None
lib_records = library_records
for record in lib_records[library]:
if record['cuda'] == cuda:
break
else:
raise RuntimeError('''
The CUDA version specified is not supported.
Should be one of {}.'''.format(str([x['cuda'] for x in lib_records[library]])))
if prefix is None:
prefix = os.path.expanduser('~/.cupy/cuda_lib')
destination = calculate_destination(prefix, cuda, library, record[library])
if os.path.exists(destination):
raise RuntimeError('''
The destination directory {} already exists.
Remove the directory first if you want to reinstall.'''.format(destination))
target_platform = platform.system()
asset = record['assets'].get(target_platform, None)
if asset is None:
raise RuntimeError('''
The current platform ({}) is not supported.'''.format(target_platform))
if library == 'cudnn':
print('By downloading and using cuDNN, you accept the terms and'
' conditions of the NVIDIA cuDNN Software License Agreement:')
print(' https://docs.nvidia.com/deeplearning/cudnn/sla/index.html')
print()
elif library == 'cutensor':
print('By downloading and using cuTENSOR, you accept the terms and'
' conditions of the NVIDIA cuTENSOR Software License Agreement:')
print(' https://docs.nvidia.com/cuda/cutensor/license.html')
print()
elif library == 'nccl':
pass # BSD
else:
assert False
print('Installing {} {} for CUDA {} to: {}'.format(
library, record[library], record['cuda'], destination))
url = asset['url']
print('Downloading {}...'.format(url))
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, os.path.basename(url)), 'wb') as f:
with urllib.request.urlopen(url) as response:
f.write(response.read())
print('Extracting...')
outdir = os.path.join(tmpdir, 'extract')
_unpack_archive(f.name, outdir)
subdir = os.listdir(outdir)
assert len(subdir) == 1
dir_name = subdir[0]
print('Installing...')
if library == 'cudnn':
libdirs = ['bin', 'lib'] if sys.platform == 'win32' else ['lib']
for item in libdirs + ['include', 'LICENSE']:
shutil.move(
os.path.join(outdir, dir_name, item),
os.path.join(destination, item))
elif library == 'cutensor':
if cuda.startswith('11.') and cuda != '11.0':
cuda = '11'
license = 'LICENSE'
shutil.move(
os.path.join(outdir, dir_name, 'include'),
os.path.join(destination, 'include'))
shutil.move(
os.path.join(outdir, dir_name, 'lib', cuda),
os.path.join(destination, 'lib'))
shutil.move(
os.path.join(outdir, dir_name, license), destination)
elif library == 'nccl':
shutil.move(os.path.join(outdir, dir_name), destination)
else:
assert False
print('Cleaning up...')
print('Done!')
def calculate_destination(prefix, cuda, lib, lib_ver):
"""Calculates the installation directory.
~/.cupy/cuda_lib/{cuda_version}/{library_name}/{library_version}
"""
return os.path.join(prefix, cuda, lib, lib_ver)
def main(args):
parser = argparse.ArgumentParser()
parser.add_argument('--library',
choices=['cudnn', 'cutensor', 'nccl'],
required=True,
help='Library to install')
parser.add_argument('--cuda', type=str, required=True,
help='CUDA version')
parser.add_argument('--prefix', type=str, default=None,
help='Install destination')
parser.add_argument('--action', choices=['install', 'dump'],
default='install',
help='Action to perform')
params = parser.parse_args(args)
if params.prefix is not None:
params.prefix = os.path.abspath(params.prefix)
if params.action == 'install':
install_lib(params.cuda, params.prefix, params.library)
elif params.action == 'dump':
print(json.dumps(library_records[params.library], indent=4))
else:
assert False
if __name__ == '__main__':
main(sys.argv[1:])
|
PypiClean
|
/OpenSRANE-0.0.3.tar.gz/OpenSRANE-0.0.3/opensrane/Recorders/ObjManager.py
|
import opensrane as _opr
import os as _os
class _Classitterator(type):
#This class is for making iteration on class name
def __iter__(self):
for param in (self.Objlst):
yield param
class ObjManager(metaclass=_Classitterator): #metaclass is used to inheritage _Classitterator to make ObjManager iteratable
Taglst=[]
Objlst=[]
TagObjDict={}
@staticmethod
def Check(tag):
'''
A boolean function
This function checks that does this tag Created before or not.
'''
if tag in ObjManager.Taglst:
return False
else:
return True
@staticmethod
def Add(tag,obj):
'''
This function Add the tag of the new object and it's corresponding Object to the list of Tags and
Objects and the distionary
If it encounter with a repitative tag, an Error will be aroused
'''
if tag in ObjManager.Taglst:
raise Exception('OpenSRANE Err: This is a repitative tag and it should be changed')
else:
ObjManager.Taglst.append(tag)
ObjManager.Objlst.append(obj)
ObjManager.TagObjDict[tag]=obj
@staticmethod
def clearall(): #to clear all objects from
'''
This function clears All objects and Tags created from the classes in the module that its
ObjManager is located.
'''
directory_path = _os.path.dirname(__file__) #Find the current Directory path
modulename = _os.path.basename(directory_path) #Current dirtectory name that is the name of the module or subpackage
moduleObj=[x[1] for x in _opr.GetModules() if x[0]==modulename][0] #Get the object of the module or subpackage
classlist=set() #Find the class objects that objects are created from them
for i in moduleObj.ObjManager.Objlst: #ّFind the all class objects in the current module or subpackage
classlist.add(i.__class__)
for i in classlist: #For each class empty the Taglst and Objlst
i.Taglst=[]
i.objlst=[]
ObjManager.Objlst=[] #Also here remove all objects that are stored in ObjManager
ObjManager.Taglst=[]
ObjManager.TagObjDict={}
def __class_getitem__(cls,tag):
if tag in cls.Taglst:
return cls.TagObjDict[tag]
else:
return "This tag has not been defined Yet"
|
PypiClean
|
/PyNFe-0.4.3-py3-none-any.whl/pynfe/entidades/notafiscal.py
|
import random
from .base import Entidade
from pynfe import get_version
from pynfe.utils.flags import NF_STATUS, CODIGO_BRASIL, CODIGOS_ESTADOS
# from pynfe.utils import so_numeros, memoize
from pynfe.utils import so_numeros
from decimal import Decimal
class NotaFiscal(Entidade):
status = NF_STATUS[0]
# Código numérico aleatório que compõe a chave de acesso
codigo_numerico_aleatorio = str()
# Digito verificador do codigo numerico aleatorio
dv_codigo_numerico_aleatorio = str()
# Nota Fisca eletronica
# - Modelo (formato: NN)
modelo = int()
# - Serie (obrigatorio - formato: NNN)
serie = str()
# - Numero NF (obrigatorio)
numero_nf = str()
# - Data da Emissao (obrigatorio)
data_emissao = None
# - Natureza da Operacao (obrigatorio)
natureza_operacao = str()
# - Tipo do Documento (obrigatorio - seleciona de lista) - NF_TIPOS_DOCUMENTO
tipo_documento = int()
# - Processo de emissão da NF-e (obrigatorio - seleciona de lista) - NF_PROCESSOS_EMISSAO
processo_emissao = 0
# - Versao do processo de emissão da NF-e
versao_processo_emissao = get_version()
# - Tipo impressao DANFE (obrigatorio - seleciona de lista) - NF_TIPOS_IMPRESSAO_DANFE
tipo_impressao_danfe = int()
# - Data de saida/entrada
data_saida_entrada = None
# - Forma de pagamento (obrigatorio - seleciona de lista) - NF_FORMAS_PAGAMENTO
# Removido na NF-e 4.00
# forma_pagamento = int()
# - Tipo de pagamento
"""
Obrigatório o preenchimento do Grupo Informações de Pagamento para NF-e e NFC-e.
Para as notas com finalidade de Ajuste ou Devolução o campo Forma de Pagamento deve ser preenchido com 90=Sem Pagamento.
01=Dinheiro
02=Cheque
03=Cartão de Crédito
04=Cartão de Débito
05=Crédito Loja
10=Vale Alimentação
11=Vale Refeição
12=Vale Presente
13=Vale Combustível
14=Duplicata Mercantil
90= Sem pagamento
99=Outros
"""
tipo_pagamento = int()
# - Forma de emissao (obrigatorio - seleciona de lista) - NF_FORMAS_EMISSAO
forma_emissao = str()
# - Finalidade de emissao (obrigatorio - seleciona de lista) - NF_FINALIDADES_EMISSAO
finalidade_emissao = int()
# - Indica se a nota e para consumidor final
cliente_final = int()
# - Indica se a compra foi feita presencialmente, telefone, internet, etc
"""
0=Não se aplica (por exemplo, Nota Fiscal complementar ou deajuste);
1=Operação presencial;
2=Operação não presencial, pela Internet;
3=Operação não presencial, Teleatendimento;
4=NFC-e em operação com entrega a domicílio;
5=Operação presencial, fora do estabelecimento;
9=Operação não presencial, outros.
"""
indicador_presencial = int()
# - Indicador de intermediador/marketplace
"""
0=Operação sem intermediador (em site ou plataforma própria)
1=Operação em site ou plataforma de terceiros (intermediadores/marketplace)</xs:documentation>
"""
indicador_intermediador = int()
""" nfce suporta apenas operação interna
Identificador de local de destino da operação 1=Operação interna;2=Operação interestadual;3=Operação com exterior.
"""
indicador_destino = int()
# - UF - converter para codigos em CODIGOS_ESTADOS
uf = str()
# - Municipio de ocorrencia
municipio = str()
# - Digest value da NF-e (somente leitura)
digest_value = None
# - Valor total da nota (somente leitura)
valor_total_nota = Decimal()
# - Valor ICMS da nota (somente leitura)
valor_icms_nota = Decimal()
# - Valor ICMS ST da nota (somente leitura)
valor_icms_st_nota = Decimal()
# - Protocolo (somente leitura)
protocolo = str()
# - Data (somente leitura)
data = None
# - Notas Fiscais Referenciadas (lista 1 para * / ManyToManyField)
notas_fiscais_referenciadas = None
# - Emitente (CNPJ ???)
emitente = None
# - Destinatario/Remetente
# - Identificacao (seleciona de Clientes)
destinatario_remetente = None
# - Entrega (XXX sera possivel ter entrega e retirada ao mesmo tempo na NF?)
entrega = None
# - Retirada
retirada = None
# - Local Retirada/Entrega
# - Local de retirada diferente do emitente (Sim/Nao)
local_retirada_diferente_emitente = False
# - Local de entrega diferente do destinatario (Sim/Nao)
local_entrega_diferente_destinatario = False
# - Autorizados a baixar XML (lista 1 para * / ManyToManyField)
autorizados_baixar_xml = None
# - Produtos e Servicos (lista 1 para * / ManyToManyField)
produtos_e_servicos = None
# Totais
# - ICMS
# - Base de calculo (somente leitura)
totais_icms_base_calculo = Decimal()
# - Total do ICMS (somente leitura)
totais_icms_total = Decimal()
# - Total do ICMS Desonerado (somente leitura)
totais_icms_desonerado = Decimal()
# - Base de calculo do ICMS ST (somente leitura)
totais_icms_st_base_calculo = Decimal()
# - Total do ICMS ST (somente leitura)
totais_icms_st_total = Decimal()
# - Total dos produtos e servicos (somente leitura)
totais_icms_total_produtos_e_servicos = Decimal()
# - Total do frete (somente leitura)
totais_icms_total_frete = Decimal()
# - Total do seguro (somente leitura)
totais_icms_total_seguro = Decimal()
# - Total do desconto (somente leitura)
totais_icms_total_desconto = Decimal()
# - Total do II (somente leitura)
totais_icms_total_ii = Decimal()
# - Total do IPI (somente leitura)
totais_icms_total_ipi = Decimal()
# - Valor Total do IPI devolvido
# Deve ser informado quando preenchido o Grupo Tributos Devolvidos na emissão de nota finNFe=4 (devolução) nas operações com não contribuintes do IPI.
# Corresponde ao total da soma dos campos id:UA04.
totais_icms_total_ipi_dev = Decimal()
# - PIS (somente leitura)
totais_icms_pis = Decimal()
# - COFINS (somente leitura)
totais_icms_cofins = Decimal()
# - Outras despesas acessorias
totais_icms_outras_despesas_acessorias = Decimal()
# - Total da nota
totais_icms_total_nota = Decimal()
# - ISSQN
# - Base de calculo do ISS
totais_issqn_base_calculo_iss = Decimal()
# - Total do ISS
totais_issqn_total_iss = Decimal()
# - PIS sobre servicos
totais_issqn_pis = Decimal()
# - COFINS sobre servicos
totais_issqn_cofins = Decimal()
# - Total dos servicos sob nao-incidencia ou nao tributados pelo ICMS
totais_issqn_total = Decimal()
# - Retencao de Tributos
# - Valor retido de PIS
totais_retencao_valor_retido_pis = Decimal()
# - Valor retido de COFINS
totais_retencao_valor_retido_cofins = Decimal()
# - Valor retido de CSLL
totais_retencao_valor_retido_csll = Decimal()
# - Base de calculo do IRRF
totais_retencao_base_calculo_irrf = Decimal()
# - Valor retido do IRRF
totais_retencao_valor_retido_irrf = Decimal()
# - BC da ret. da Prev. Social
totais_retencao_bc_retencao_previdencia_social = Decimal()
# - Retencao da Prev. Social
totais_retencao_retencao_previdencia_social = Decimal()
# - Valor aproximado total de tributos federais, estaduais e municipais.
totais_tributos_aproximado = Decimal()
# - Valor Total do FCP (Fundo de Combate à Pobreza)
totais_fcp = Decimal()
# - Valor total do ICMS relativo Fundo de Combate à Pobreza (FCP) da UF de destino
totais_fcp_destino = Decimal()
# - Valor Total do FCP (Fundo de Combate à Pobreza) retido por substituição tributária
totais_fcp_st = Decimal()
# - Valor Total do FCP retido anteriormente por Substituição Tributária
totais_fcp_st_ret = Decimal()
# - Valor total do ICMS Interestadual para a UF de destino
totais_icms_inter_destino = Decimal()
# - Valor total do ICMS Interestadual para a UF do remetente
totais_icms_inter_remetente = Decimal()
# Transporte
# - Modalidade do Frete (obrigatorio - seleciona de lista) - MODALIDADES_FRETE
# 0=Contratação do Frete por conta do Remetente (CIF);
# 1=Contratação do Frete por conta do Destinatário (FOB);
# 2=Contratação do Frete por conta de Terceiros;
# 3=Transporte Próprio por conta do Remetente;
# 4=Transporte Próprio por conta do Destinatário;
# 9=Sem Ocorrência de Transporte.
transporte_modalidade_frete = int()
# - Transportador (seleciona de Transportadoras)
transporte_transportadora = None
# - Retencao do ICMS
# - Base de calculo
transporte_retencao_icms_base_calculo = Decimal()
# - Aliquota
transporte_retencao_icms_aliquota = Decimal()
# - Valor do servico
transporte_retencao_icms_valor_servico = Decimal()
# - UF
transporte_retencao_icms_uf = str()
# - Municipio
transporte_retencao_icms_municipio = Decimal()
# - CFOP
transporte_retencao_icms_cfop = str()
# - ICMS retido
transporte_retencao_icms_retido = Decimal()
# - Veiculo
# - Placa
transporte_veiculo_placa = str()
# - RNTC
transporte_veiculo_rntc = str()
# - UF
transporte_veiculo_uf = str()
# - Reboque
# - Placa
transporte_reboque_placa = str()
# - RNTC
transporte_reboque_rntc = str()
# - UF
transporte_reboque_uf = str()
# - Volumes (lista 1 para * / ManyToManyField)
transporte_volumes = None
# Cobranca
# - Fatura
# - Numero
fatura_numero = str()
# - Valor original
fatura_valor_original = Decimal()
# - Valor do desconto
fatura_valor_desconto = Decimal()
# - Valor liquido
fatura_valor_liquido = Decimal()
# - Duplicatas (lista 1 para * / ManyToManyField)
duplicatas = None
# Informacoes Adicionais
# - Informacoes Adicionais
# - Informacoes adicionais de interesse do fisco
informacoes_adicionais_interesse_fisco = str()
# - Informacoes complementares de interesse do contribuinte
informacoes_complementares_interesse_contribuinte = str()
# - Observacoes do Contribuinte (lista 1 para * / ManyToManyField)
observacoes_contribuinte = None
# - Processo Referenciado (lista 1 para * / ManyToManyField)
processos_referenciados = None
def __init__(self, *args, **kwargs):
self.autorizados_baixar_xml = []
self.notas_fiscais_referenciadas = []
self.produtos_e_servicos = []
self.transporte_volumes = []
self.duplicatas = []
self.observacoes_contribuinte = []
self.processos_referenciados = []
self.responsavel_tecnico = []
super(NotaFiscal, self).__init__(*args, **kwargs)
def __str__(self):
return ' '.join([str(self.modelo), self.serie, self.numero_nf])
def adicionar_autorizados_baixar_xml(self, **kwargs):
obj = AutorizadosBaixarXML(**kwargs)
self.autorizados_baixar_xml.append(obj)
return obj
def adicionar_nota_fiscal_referenciada(self, **kwargs):
u"""Adiciona uma instancia de Nota Fisca referenciada"""
obj = NotaFiscalReferenciada(**kwargs)
self.notas_fiscais_referenciadas.append(obj)
return obj
def adicionar_produto_servico(self, **kwargs):
u"""Adiciona uma instancia de Produto"""
obj = NotaFiscalProduto(**kwargs)
self.produtos_e_servicos.append(obj)
self.totais_icms_base_calculo += obj.icms_valor_base_calculo
self.totais_icms_total += obj.icms_valor
self.totais_icms_desonerado += obj.icms_desonerado
self.totais_icms_st_base_calculo += obj.icms_st_valor_base_calculo
self.totais_icms_st_total += obj.icms_st_valor
self.totais_icms_total_produtos_e_servicos += obj.valor_total_bruto
self.totais_icms_total_frete += obj.total_frete
self.totais_icms_total_seguro += obj.total_seguro
self.totais_icms_total_desconto += obj.desconto
self.totais_icms_total_ii += obj.imposto_importacao_valor
self.totais_icms_total_ipi += obj.ipi_valor_ipi
self.totais_icms_total_ipi_dev += obj.ipi_valor_ipi_dev
self.totais_icms_pis += obj.pis_valor
self.totais_icms_cofins += obj.cofins_valor
self.totais_icms_outras_despesas_acessorias += obj.outras_despesas_acessorias
# - Valor Total do FCP (Fundo de Combate à Pobreza)
self.totais_fcp += obj.fcp_valor
self.totais_fcp_destino += obj.fcp_destino_valor
self.totais_fcp_st += obj.fcp_st_valor
self.totais_fcp_st_ret += obj.fcp_st_ret_valor
self.totais_icms_inter_destino += obj.icms_inter_destino_valor
self.totais_icms_inter_remetente += obj.icms_inter_remetente_valor
## TODO calcular impostos aproximados
#self.totais_tributos_aproximado += obj.tributos
self.totais_icms_total_nota += (
obj.valor_total_bruto
+ obj.icms_st_valor
+ obj.fcp_st_valor
+ obj.total_frete
+ obj.total_seguro
+ obj.outras_despesas_acessorias
+ obj.imposto_importacao_valor
+ obj.ipi_valor_ipi
+ obj.ipi_valor_ipi_dev
- obj.desconto
- obj.icms_desonerado
)
return obj
def adicionar_transporte_volume(self, **kwargs):
u"""Adiciona uma instancia de Volume de Transporte"""
obj = NotaFiscalTransporteVolume(**kwargs)
self.transporte_volumes.append(obj)
return obj
def adicionar_duplicata(self, **kwargs):
u"""Adiciona uma instancia de Duplicata"""
obj = NotaFiscalCobrancaDuplicata(**kwargs)
self.duplicatas.append(obj)
return obj
def adicionar_observacao_contribuinte(self, **kwargs):
u"""Adiciona uma instancia de Observacao do Contribuinte"""
obj = NotaFiscalObservacaoContribuinte(**kwargs)
self.observacoes_contribuinte.append(obj)
return obj
def adicionar_processo_referenciado(self, **kwargs):
"""Adiciona uma instancia de Processo Referenciado"""
obj = NotaFiscalProcessoReferenciado(**kwargs)
self.processos_referenciados.append(obj)
return obj
def adicionar_responsavel_tecnico(self, **kwargs):
""" Adiciona uma instancia de Responsavel Tecnico """
obj = NotaFiscalResponsavelTecnico(**kwargs)
self.responsavel_tecnico.append(obj)
return obj
def _codigo_numerico_aleatorio(self):
self.codigo_numerico_aleatorio = str(random.randint(0, 99999999)).zfill(8)
return self.codigo_numerico_aleatorio
def _dv_codigo_numerico(self, key):
assert len(key) == 43
weights = [2, 3, 4, 5, 6, 7, 8, 9]
weights_size = len(weights)
key_numbers = [int(k) for k in key]
key_numbers.reverse()
key_sum = 0
for i, key_number in enumerate(key_numbers):
# cycle though weights
i = i % weights_size
key_sum += key_number * weights[i]
remainder = key_sum % 11
if remainder == 0 or remainder == 1:
self.dv_codigo_numerico_aleatorio = '0'
return '0'
self.dv_codigo_numerico_aleatorio = str(11 - remainder)
return str(self.dv_codigo_numerico_aleatorio)
@property
# @memoize
def identificador_unico(self):
# Monta 'Id' da tag raiz <infNFe>
# Ex.: NFe35080599999090910270550010000000011518005123
key = "%(uf)s%(ano)s%(mes)s%(cnpj)s%(mod)s%(serie)s%(nNF)s%(tpEmis)s%(cNF)s"%{
'uf': CODIGOS_ESTADOS[self.uf],
'ano': self.data_emissao.strftime('%y'),
'mes': self.data_emissao.strftime('%m'),
'cnpj': so_numeros(self.emitente.cnpj).zfill(14),
'mod': self.modelo,
'serie': str(self.serie).zfill(3),
'nNF': str(self.numero_nf).zfill(9),
'tpEmis': str(self.forma_emissao),
'cNF': self._codigo_numerico_aleatorio(),
}
return "NFe%(uf)s%(ano)s%(mes)s%(cnpj)s%(mod)s%(serie)s%(nNF)s%(tpEmis)s%(cNF)s%(cDV)s"%{
'uf': CODIGOS_ESTADOS[self.uf],
'ano': self.data_emissao.strftime('%y'),
'mes': self.data_emissao.strftime('%m'),
'cnpj': so_numeros(self.emitente.cnpj).zfill(14),
'mod': self.modelo,
'serie': str(self.serie).zfill(3),
'nNF': str(self.numero_nf).zfill(9),
'tpEmis': str(self.forma_emissao),
'cNF': str(self.codigo_numerico_aleatorio),
'cDV': self._dv_codigo_numerico(key),
}
class NotaFiscalReferenciada(Entidade):
# - Tipo (seleciona de lista) - NF_REFERENCIADA_TIPOS
tipo = str()
# - Nota Fiscal eletronica
# - Chave de Acesso
chave_acesso = str()
# - Nota Fiscal
# - UF
uf = str()
# - Mes e ano de emissao
mes_ano_emissao = str()
# - CNPJ
cnpj = str()
# - IE
ie = str()
# - Serie (XXX)
serie = str()
# - Numero
numero = str()
# - Modelo
modelo = str()
class NotaFiscalProduto(Entidade):
# - Dados
# - Codigo (obrigatorio)
codigo = str()
# - Descricao (obrigatorio)
descricao = str()
# - EAN
ean = str()
# - NCM
ncm = str()
# - EX TIPI
ex_tipi = str()
# - CFOP (obrigatorio)
cfop = str()
# - Genero
genero = str()
# Número de controle da FCI (nFCI) - Ficha de Conteúdo de Importação.
nfci = str()
# - Unidade Comercial (obrigatorio)
unidade_comercial = str()
# - Quantidade Comercial (obrigatorio)
quantidade_comercial = Decimal()
# - Valor Unitario Comercial (obrigatorio)
valor_unitario_comercial = Decimal()
# - Unidade Tributavel (obrigatorio)
unidade_tributavel = str()
# - cBenef
cbenef = str()
# - Quantidade Tributavel (obrigatorio)
quantidade_tributavel = Decimal()
# - Valor Unitario Tributavel (obrigatorio)
valor_unitario_tributavel = Decimal()
# - EAN Tributavel
ean_tributavel = str()
# - Total Frete
total_frete = Decimal()
# - Total Seguro
total_seguro = Decimal()
# - Desconto
desconto = Decimal()
# - Outras despesas acessórias
outras_despesas_acessorias = Decimal()
# - Indica se valor do Item (vProd) entra no valor total da NF-e
compoe_valor_total = 1
# - Valor total bruto (obrigatorio)
valor_total_bruto = Decimal()
# - Número do Pedido de Compra
numero_pedido = str()
# - Item do Pedido de Compra
numero_item = str()
# - Produto especifico (seleciona de lista) - NF_PRODUTOS_ESPECIFICOS
produto_especifico = str()
# Grupo de informações de Combustível
# Código de produto da ANP
cProdANP = str()
# Descrição do produto conforme ANP
descANP = str()
# Percentual de Gás derivado do Petróleo
pGLP = Decimal()
# Percentual de gás natural nacional
pGNn = Decimal()
# Percentual do gás natural importado
pGNi = Decimal()
# Valor de Partida (apenas para GLP)
vPart = Decimal()
# Sigla da UF de consumo – (OBS: Deve ser a Sigla e não o Código da UF)
UFCons = str()
# - Tributos
# - ICMS
# - Situacao tributaria (obrigatorio - seleciona de lista) - ICMS_TIPOS_TRIBUTACAO
icms_modalidade = str()
# - Origem (obrigatorio - seleciona de lista) - ICMS_ORIGENS
icms_origem = int()
# - ICMS
# - Modalidade de determinacao da BC ICMS (seleciona de lista) - ICMS_MODALIDADES
icms_modalidade_determinacao_bc = int()
# - Percentual reducao da BC ICMS
icms_percentual_reducao_bc = Decimal()
# - Valor da base de calculo ICMS
icms_valor_base_calculo = Decimal()
# - Aliquota ICMS
icms_aliquota = Decimal()
# - Valor do ICMS
icms_valor = Decimal()
# - ICMS Desonerado
icms_desonerado = Decimal()
# - Motivo da desoneração do ICMS (seleciona de lista) - ICMS_MOTIVO_DESONERACAO
icms_motivo_desoneracao = int()
# - ICMS ST
# - Modalidade de determinacao da BC ICMS ST - ICMS_ST_MODALIDADES
icms_st_modalidade_determinacao_bc = int()
# - Percentual da margem de valor Adicionado do ICMS ST
icms_st_percentual_adicional = Decimal()
# - Percentual reducao da BC ICMS ST
icms_st_percentual_reducao_bc = Decimal()
# - Valor da base de calculo ICMS ST
icms_st_valor_base_calculo = Decimal()
# - Aliquota ICMS ST
icms_st_aliquota = Decimal()
# - Valor do ICMS ST
icms_st_valor = Decimal()
# - Fundo de Combate a Pobreza
fcp_base_calculo = Decimal()
fcp_percentual = Decimal()
fcp_valor = Decimal()
fcp_st_base_calculo = Decimal()
fcp_st_percentual = Decimal()
fcp_st_valor = Decimal()
fcp_destino_valor = Decimal()
fcp_st_valor = Decimal()
fcp_st_ret_valor = Decimal()
icms_inter_destino_valor = Decimal()
icms_inter_remetente_valor = Decimal()
# - IPI
# - Situacao tributaria (seleciona de lista) - IPI_TIPOS_TRIBUTACAO
ipi_situacao_tributaria = str()
# - Classe de enquadramento
# - A informacao para classe de enquadramento do IPI para Cigarros e Bebidas,
# quando aplicavel, deve ser informada utilizando a codificacao prevista nos
# Atos Normativos editados pela Receita Federal
ipi_classe_enquadramento = str()
# - Codigo do enquadramento
ipi_codigo_enquadramento = str()
# - CNPJ do Produtor
ipi_cnpj_produtor = str()
# - Codigo do selo de controle
# - A informacao do codigo de selo, quando aplicavel, deve ser informada
# utilizando a codificacao prevista nos Atos Normativos editados pela Receita
# Federal
ipi_codigo_selo_controle = str()
# - Quantidade do selo de controle
ipi_quantidade_selo_controle = Decimal()
# - Tipo de calculo (seleciona de lista) - IPI_TIPOS_CALCULO
ipi_tipo_calculo = str()
# - Percentual
# - Valor da base de calculo
ipi_valor_base_calculo = Decimal()
# - Aliquota
ipi_aliquota = Decimal()
# - Em valor
# - Quantidade total unidade padrao
ipi_quantidade_total_unidade_padrao = Decimal()
# - Valor por unidade
ipi_valor_unidade = Decimal()
# - Valor do IPI
ipi_valor_ipi = Decimal()
# - Percentual Devolucao Produto
pdevol = Decimal()
# - Valor do IPI Devolvido
ipi_valor_ipi_dev = Decimal()
# - PIS
# - PIS
# - Situacao tributaria (obrigatorio - seleciona de lista) - PIS_TIPOS_TRIBUTACAO
pis_situacao_tributaria = str()
# - Tipo de calculo (seleciona de lista) - PIS_TIPOS_CALCULO
pis_tipo_calculo = str()
# - Percentual
# - Valor da base de calculo
pis_valor_base_calculo = Decimal()
# - Aliquota (percentual)
pis_aliquota_percentual = Decimal()
# - Em valor
# - Aliquota (em reais)
pis_aliquota_reais = Decimal()
# - Quantidade vendida
pis_quantidade_vendida = Decimal()
# - Valor do PIS
pis_valor = Decimal()
# - PIS ST
# - Tipo de calculo (seleciona de lista) - PIS_TIPOS_CALCULO
pis_st_tipo_calculo = str()
# - Percentual
# - Valor da base de calculo
pis_st_valor_base_calculo = Decimal()
# - Aliquota (percentual)
pis_st_aliquota_percentual = Decimal()
# - Em valor
# - Aliquota (em reais)
pis_st_aliquota_reais = Decimal()
# - Quantidade vendida
pis_st_quantidade_vendida = Decimal()
# - Valor do PIS ST
pis_st_valor = Decimal()
# - COFINS
# - COFINS
# - Situacao tributaria (obrigatorio - seleciona de lista) - COFINS_TIPOS_TRIBUTACAO
cofins_situacao_tributaria = str()
# - Tipo de calculo (seleciona de lista) - COFINS_TIPOS_CALCULO
cofins_tipo_calculo = str()
# - Percentual
# - Valor da base de calculo
cofins_valor_base_calculo = Decimal()
# - Aliquota (percentual)
cofins_aliquota_percentual = Decimal()
# - Em Valor
# - Aliquota (em reais)
cofins_aliquota_reais = Decimal()
# - Quantidade vendida
cofins_quantidade_vendida = Decimal()
# - Valor do COFINS
cofins_valor = Decimal()
# - COFINS ST
# - Tipo de calculo (seleciona de lista) - COFINS_TIPOS_CALCULO
cofins_st_tipo_calculo = str()
# - Percentual
# - Valor da base de calculo
cofins_st_valor_base_calculo = Decimal()
# - Aliquota (percentual)
cofins_st_aliquota_percentual = Decimal()
# - Em Valor
# - Aliquota (em reais)
cofins_st_aliquota_reais = Decimal()
# - Quantidade vendida
cofins_st_quantidade_vendida = Decimal()
# - Valor do COFINS ST
cofins_st_valor = Decimal()
# - ISSQN
# - Valor da base de calculo
issqn_valor_base_calculo = Decimal()
# - Aliquota
issqn_aliquota = Decimal()
# - Lista de servico (seleciona de lista)
# - Aceita somente valores maiores que 100, disponiveis no arquivo data/ISSQN/Lista-Servicos.txt
issqn_lista_servico = str()
# - UF
issqn_uf = str()
# - Municipio de ocorrencia
issqn_municipio = str()
# - Valor do ISSQN
issqn_valor = Decimal()
# - Imposto de Importacao
# - Valor base de calculo
imposto_importacao_valor_base_calculo = Decimal()
# - Valor despesas aduaneiras
imposto_importacao_valor_despesas_aduaneiras = Decimal()
# - Valor do IOF
imposto_importacao_valor_iof = Decimal()
# - Valor imposto de importacao
imposto_importacao_valor = Decimal()
# - Informacoes Adicionais
# - Texto livre de informacoes adicionais
informacoes_adicionais = str()
# - Declaracao de Importacao (lista 1 para * / ManyToManyField)
declaracoes_importacao = None
def __init__(self, *args, **kwargs):
self.declaracoes_importacao = []
super(NotaFiscalProduto, self).__init__(*args, **kwargs)
def adicionar_declaracao_importacao(self, **kwargs):
u"""Adiciona uma instancia de Declaracao de Importacao"""
self.declaracoes_importacao.append(NotaFiscalDeclaracaoImportacao(**kwargs))
class NotaFiscalDeclaracaoImportacao(Entidade):
# - Numero DI/DSI/DA
numero_di_dsi_da = str()
# - Data de registro
data_registro = None
# - Codigo exportador
codigo_exportador = str()
# - Desembaraco aduaneiro
# - UF
desembaraco_aduaneiro_uf = str()
# - Local
desembaraco_aduaneiro_local = str()
# - Data
desembaraco_aduaneiro_data = str()
# - Adicoes (lista 1 para * / ManyToManyField)
adicoes = None
def __init__(self, *args, **kwargs):
self.declaracoes_importacao = []
super(NotaFiscalDeclaracaoImportacao, self).__init__(*args, **kwargs)
def adicionar_adicao(self, **kwargs):
u"""Adiciona uma instancia de Adicao de Declaracao de Importacao"""
self.adicoes.append(NotaFiscalDeclaracaoImportacaoAdicao(**kwargs))
class NotaFiscalDeclaracaoImportacaoAdicao(Entidade):
# - Numero
numero = str()
# - Desconto
desconto = str()
# - Codigo fabricante
codigo_fabricante = str()
class NotaFiscalTransporteVolume(Entidade):
# - Quantidade
quantidade = Decimal()
# - Especie
especie = str()
# - Marca
marca = str()
# - Numeracao
numeracao = str()
# - Peso Liquido (kg)
peso_liquido = Decimal()
# - Peso Bruto (kg)
peso_bruto = Decimal()
# - Lacres (lista 1 para * / ManyToManyField)
lacres = None
def __init__(self, *args, **kwargs):
self.lacres = []
super(NotaFiscalTransporteVolume, self).__init__(*args, **kwargs)
def adicionar_lacre(self, **kwargs):
u"""Adiciona uma instancia de Lacre de Volume de Transporte"""
self.lacres.append(NotaFiscalTransporteVolumeLacre(**kwargs))
class NotaFiscalTransporteVolumeLacre(Entidade):
# - Numero de lacres
numero_lacre = str()
class NotaFiscalCobrancaDuplicata(Entidade):
# - Numero
numero = str()
# - Data de vencimento
data_vencimento = None
# - Valor
valor = Decimal()
class NotaFiscalObservacaoContribuinte(Entidade):
# - Nome do campo
nome_campo = str()
# - Observacao
observacao = str()
class NotaFiscalProcessoReferenciado(Entidade):
# - Identificador do processo
identificador_processo = str()
# - Origem (seleciona de lista) - ORIGENS_PROCESSO
# - SEFAZ
# - Justica federal
# - Justica estadual
# - Secex/RFB
# - Outros
origem = str()
class NotaFiscalEntregaRetirada(Entidade):
# - Tipo de Documento (obrigatorio) - default CNPJ
tipo_documento = 'CNPJ'
# - Numero do Documento (obrigatorio)
numero_documento = str()
# - Endereco
# - Logradouro (obrigatorio)
endereco_logradouro = str()
# - Numero (obrigatorio)
endereco_numero = str()
# - Complemento
endereco_complemento = str()
# - Bairro (obrigatorio)
endereco_bairro = str()
# - CEP
endereco_cep = str()
# - Pais (seleciona de lista)
endereco_pais = CODIGO_BRASIL
# - UF (obrigatorio)
endereco_uf = str()
# - Municipio (obrigatorio)
endereco_municipio = str()
# - Código Município (opt)
endereco_cod_municipio = str()
# - Telefone
endereco_telefone = str()
class NotaFiscalServico(Entidade):
# id do rps
identificador = str()
# tag competencia
data_emissao = None
# Serviço executado pelo prestador
servico = None
# Emitente da NFS-e
emitente = None
# Cliente para quem a NFS-e será emitida
cliente = None
# Optante Simples Nacional
simples = int() # 1-Sim; 2-Não
# Incentivo Fiscal
incentivo = int() # 1-Sim; 2-Não
# Serie
serie = str()
# Tipo
tipo = str()
# Natureza de operação
natureza_operacao = int()
# Regime especial de tributação
regime_especial = int()
def __init__(self, *args, **kwargs):
super(NotaFiscalServico, self).__init__(*args, **kwargs)
def __str__(self):
return ' '.join([str(self.identificador)])
class NotaFiscalResponsavelTecnico(Entidade):
# NT 2018/003
cnpj = str()
contato = str()
email = str()
fone = str()
csrt = str()
class AutorizadosBaixarXML(Entidade):
CPFCNPJ = str()
|
PypiClean
|
/magic_folder-23.6.0-py3-none-any.whl/magic_folder/test/fixtures.py
|
from errno import (
ENOENT,
)
from allmydata.util.base32 import (
b2a,
)
from wormhole.wormhole import (
IDeferredWormhole,
)
from zope.interface import (
implementer,
)
from twisted.internet.defer import (
succeed,
)
from ..util.encoding import (
load_yaml,
dump_yaml,
)
from ..util.capabilities import (
Capability,
random_dircap,
)
from ..util.wrap import (
delayed_wrap_frozen,
)
import attr
from fixtures import (
Fixture,
)
from hyperlink import (
DecodedURL,
)
from treq.client import HTTPClient
from twisted.internet.task import (
Clock,
Cooperator,
)
from twisted.internet.defer import (
CancelledError,
DeferredList,
)
from twisted.python.filepath import FilePath
from ..client import create_testing_http_client
from ..status import FolderStatus
from ..uploader import (
LocalSnapshotService,
LocalSnapshotCreator,
UploaderService,
)
from ..downloader import (
InMemoryMagicFolderFilesystem,
RemoteSnapshotCacheService,
)
from ..magic_file import (
MagicFileFactory,
)
from ..testing.web import (
create_fake_tahoe_root,
create_tahoe_treq_client,
)
from ..tahoe_client import (
create_tahoe_client,
)
from ..participants import participants_from_collective
from ..snapshot import create_local_author
from ..status import (
EventsWebSocketStatusService,
)
from ..service import MagicFolderService
from ..config import (
GlobalConfigDatabase,
SQLite3DatabaseLocation,
MagicFolderConfig,
create_testing_configuration,
)
from .common import success_result_of
@attr.s
class NodeDirectory(Fixture):
"""
Provide just enough filesystem state to appear to be a Tahoe-LAFS node
directory.
"""
path = attr.ib()
token = attr.ib(default=b"123")
@property
def tahoe_cfg(self):
return self.path.child(u"tahoe.cfg")
@property
def node_url(self):
return self.path.child(u"node.url")
@property
def magic_folder_url(self):
return self.path.child(u"magic-folder.url")
@property
def private(self):
return self.path.child(u"private")
@property
def api_auth_token(self):
return self.private.child(u"api_auth_token")
@property
def magic_folder_yaml(self):
return self.private.child(u"magic_folders.yaml")
def create_magic_folder(
self,
folder_name,
collective_dircap,
upload_dircap,
directory,
poll_interval,
):
try:
magic_folder_config_bytes = self.magic_folder_yaml.getContent()
except IOError as e:
if e.errno == ENOENT:
magic_folder_config = {}
else:
raise
else:
magic_folder_config = load_yaml(magic_folder_config_bytes)
magic_folder_config.setdefault(
u"magic-folders",
{},
)[folder_name] = {
u"collective_dircap": collective_dircap,
u"upload_dircap": upload_dircap,
u"directory": directory.path,
u"poll_interval": u"{}".format(poll_interval),
}
self.magic_folder_yaml.setContent(dump_yaml(magic_folder_config).encode("utf8"))
def _setUp(self):
self.path.makedirs()
self.tahoe_cfg.touch()
self.node_url.setContent(b"http://127.0.0.1:9876/")
self.magic_folder_url.setContent(b"http://127.0.0.1:5432/")
self.private.makedirs()
self.api_auth_token.setContent(self.token)
class MagicFileFactoryFixture(Fixture):
"""
A fixture which provides a ``MagicFileFactory`` connected to a
``MagicFolderConfig``.
"""
def __init__(self, temp, author, upload_dircap, root=None):
"""
:param FilePath temp: A path where the fixture may write whatever it
likes.
:param LocalAuthor author: The author which will be used to sign
created snapshots.
:param bytes upload_dircap: The Tahoe-LAFS capability for a writeable
directory into which new snapshots will be linked.
:param IResource root: The root resource for the fake Tahoe-LAFS HTTP
API hierarchy. The default is one created by
``create_fake_tahoe_root``.
"""
if root is None:
root = create_fake_tahoe_root()
self.temp = temp
self.author = author
self.upload_dircap = upload_dircap
self.root = root
self.http_client = create_tahoe_treq_client(self.root)
self.tahoe_client = create_tahoe_client(
DecodedURL.from_text(u"http://example.com"),
self.http_client,
)
def _setUp(self):
self.magic_path = self.temp.child("magic")
self.magic_path.makedirs()
self.stash_path = self.temp.child("stash")
self.stash_path.makedirs()
self.poll_interval = 1
self.scan_interval = None
collective_dircap = random_dircap(readonly=True)
participants = participants_from_collective(
collective_dircap, self.upload_dircap, self.tahoe_client
)
self.config = MagicFolderConfig.initialize(
u"some-folder",
SQLite3DatabaseLocation.memory(),
self.author,
self.stash_path,
random_dircap(readonly=True),
self.upload_dircap,
self.magic_path,
self.poll_interval,
self.scan_interval,
)
self.filesystem = InMemoryMagicFolderFilesystem()
self._global_config = create_testing_configuration(
self.temp.child("config"),
self.temp.child("tahoe-node"),
)
self.status = EventsWebSocketStatusService(Clock(), self._global_config)
folder_status = FolderStatus(self.config.name, self.status)
uncooperator = Cooperator(
terminationPredicateFactory=lambda: lambda: False,
scheduler=lambda f: f(),
)
self.addCleanup(uncooperator.stop)
local_snapshot_service = LocalSnapshotService(
self.config,
LocalSnapshotCreator(
self.config,
self.config.author,
self.config.stash_path,
self.config.magic_path,
self.tahoe_client,
cooperator=uncooperator,
),
status=folder_status,
)
local_snapshot_service.startService()
self.addCleanup(local_snapshot_service.stopService)
uploader = UploaderService(
self.config,
folder_status,
self.tahoe_client,
)
uploader.startService()
self.addCleanup(uploader.stopService)
self.magic_file_factory = MagicFileFactory(
config=self.config,
tahoe_client=self.tahoe_client,
folder_status=folder_status,
local_snapshot_service=local_snapshot_service,
uploader=uploader,
write_participant=participants.writer,
remote_cache=RemoteSnapshotCacheService.from_config(
self.config,
self.tahoe_client,
),
magic_fs=self.filesystem,
synchronous=True,
)
self.addCleanup(self.magic_file_factory.finish)
class TahoeClientWrapper(object):
"""
A sentinel passed to MagicFolderNode asking it to apply some
wrapping functions to the TahoeClient that is created.
This saves all kwargs for use with delayed_wrap_frozen(), which will be
used to transform the TahoeClient -- that is, to override any of
its methods or attributes.
"""
def __init__(self, **kwargs):
self.wrappers = kwargs
@attr.s
class MagicFolderNode(object):
# FIXME docstring
tahoe_root = attr.ib()
http_client = attr.ib(validator=attr.validators.instance_of(HTTPClient))
tahoe_client = attr.ib()
global_service = attr.ib(validator=attr.validators.instance_of(MagicFolderService))
global_config = attr.ib(validator=attr.validators.instance_of(GlobalConfigDatabase))
_cooperator = attr.ib()
@classmethod
def create(
cls,
reactor,
basedir,
auth_token=None,
folders=None,
start_folder_services=False,
tahoe_client=None,
wormhole_factory=None,
):
"""
Create a :py:`MagicFolderService` and a treq client which is hooked up to it.
:param reactor: A reactor to give to the ``MagicFolderService`` which will
back the HTTP interface.
:param FilePath basedir: A non-existant directory to create and populate
with a new Magic Folder service configuration.
:param bytes auth_token: The authorization token accepted by the
service.
:param folders: A mapping from Magic Folder names to their configurations.
These are the folders which will appear to exist.
:param bool start_folder_services: If ``True``, start the Magic Folder
service objects. Otherwise, don't.
:param TahoeClient tahoe_client: if provided, used as the
tahoe-client. If it is not provided, an in-memory Tahoe
instance will be used, and populated with empty folders
corresponding to the requested folders. (You may also pass
a TahoeClientWrapper to get the default instance, but with
some overridden methods -- the overrides only take place
after setup).
:return MagicFolderNode:
"""
global_config = create_testing_configuration(
basedir,
FilePath(u"/non-tahoe-directory"),
)
if auth_token is None:
auth_token = global_config.api_token
maybe_wrapper = None
assert isinstance(auth_token, bytes), "token is bytes"
uncooperator = Cooperator(
terminationPredicateFactory=lambda: lambda: False,
scheduler=lambda f: f(),
)
if tahoe_client is None or isinstance(tahoe_client, TahoeClientWrapper):
# Setup a Tahoe client backed by a fake Tahoe instance Since we
# know it is a working instance, we can delegate to
# py:`MagicFolderService.create_folder` below to create folders.
maybe_wrapper = tahoe_client
tahoe_root = create_fake_tahoe_root()
tahoe_client = create_tahoe_client(
DecodedURL.from_text(u"http://invalid./"),
create_tahoe_treq_client(tahoe_root),
)
if isinstance(maybe_wrapper, TahoeClientWrapper):
# the "delayed" means these overrides won't take
# effect until we call .enable_wrapper() below just
# before returning .. that is, after setup
tahoe_client = delayed_wrap_frozen(
tahoe_client,
**maybe_wrapper.wrappers
)
else:
tahoe_root = None
# If we've been supplied a custom Tahoe client, we can't assume
# anything about it, so we create the requested folders in the
# database and that is it. The folders created may not have
# corresponding Tahoe data in them.
if folders:
for name, config in folders.items():
global_config.create_magic_folder(
name,
config[u"magic-path"],
create_local_author(config[u"author-name"]),
# collective DMD
Capability.from_string(u"URI:DIR2{}:{}:{}".format(
"" if config["admin"] else "-RO",
b2a(("\0" * 16).encode("ascii")).decode("ascii"),
b2a(("\1" * 32).encode("ascii")).decode("ascii"),
)),
# personal DMD
Capability.from_string(u"URI:DIR2:{}:{}".format(b2a(("\2" * 16).encode("ascii")).decode("ascii"), b2a(("\3" * 32).encode("ascii")).decode("ascii"))),
config[u"poll-interval"],
config[u"scan-interval"],
)
status_service = EventsWebSocketStatusService(
reactor,
global_config,
)
global_service = MagicFolderService(
reactor,
global_config,
status_service,
# Provide a TahoeClient so MagicFolderService doesn't try to look up a
# Tahoe-LAFS node URL in the non-existent directory we supplied above
# in its efforts to create one itself.
tahoe_client,
cooperator=uncooperator,
skip_check_state=True,
wormhole_factory=wormhole_factory,
)
if folders and tahoe_root:
# Since we created a Tahoe node above, we delegate to
# py:`MagicFolderService.create_folder` to create folders, which
# creates the appropriate DMDs such that the folders are usable.
for name, config in folders.items():
success_result_of(
global_service.create_folder(
name,
config[u"author-name"],
config[u"magic-path"],
config[u"poll-interval"],
config[u"scan-interval"],
)
)
if not config[u"admin"]:
folder_config = global_config.get_magic_folder(name)
folder_config.collective_dircap = folder_config.collective_dircap.to_readonly()
# TODO: This should be in Fixture._setUp, along with a .addCleanup(stopService)
# See https://github.com/LeastAuthority/magic-folder/issues/334
if start_folder_services:
# Reach in and start the individual service for the folder we're going
# to interact with. This is required for certain functionality, eg
# snapshot creation. We avoid starting the whole global_service
# because it wants to do error-prone things like bind ports.
for name in folders:
global_service.get_folder_service(name).startService()
http_client = create_testing_http_client(
reactor,
global_config,
global_service,
lambda: auth_token,
status_service,
)
# if we wrapper out client, enable that now (after setup)
if isinstance(maybe_wrapper, TahoeClientWrapper):
tahoe_client.enable_wrapper()
return cls(
tahoe_root=tahoe_root,
http_client=http_client,
tahoe_client=tahoe_client,
global_service=global_service,
global_config=global_config,
cooperator=uncooperator,
)
def cleanup(self):
"""
Stop the (selected) services we started
"""
self._cooperator.stop()
return DeferredList([
magic_folder.stopService()
for magic_folder in self.global_service._iter_magic_folder_services()
])
@implementer(IDeferredWormhole)
class FakeWormhole:
"""
Enough of a DeferredWormhole fake to do the unit-test
"""
def __init__(self, code="1-foo-bar", messages=None, on_closed=None):
self._code = code
self._on_closed = on_closed
self._outgoing_messages = [] if messages is None else messages
self.sent_messages = []
self._cancelled = False
def add_message(self, msg):
self._outgoing_messages.append(msg)
# the IDeferredWormhole API methods
def get_welcome(self):
return succeed({})
def allocate_code(self, size):
return succeed(self._code)
def get_code(self):
return succeed(self._code)
def get_versions(self):
return succeed({
"magic-folder": {
"supported-messages": ["invite-v1"]
}
})
def get_message(self):
if len(self._outgoing_messages):
msg = self._outgoing_messages.pop(0)
return msg
raise CancelledError()
def send_message(self, msg):
self.sent_messages.append(msg)
def close(self):
self._on_closed()
return succeed(None)
|
PypiClean
|
/armstrong.hatband-1.4.0.tar.gz/armstrong.hatband-1.4.0/armstrong/hatband/static/ckeditor/plugins/scayt/dialogs/options.js
|
/*
Copyright (c) 2003-2011, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.html or http://ckeditor.com/license
*/
CKEDITOR.dialog.add('scaytcheck',function(a){var b=true,c,d=CKEDITOR.document,e=a.name,f=CKEDITOR.plugins.scayt.getUiTabs(a),g,h=[],i=0,j=['dic_create_'+e+',dic_restore_'+e,'dic_rename_'+e+',dic_delete_'+e],k=['mixedCase','mixedWithDigits','allCaps','ignoreDomainNames'];function l(){if(typeof document.forms['optionsbar_'+e]!='undefined')return document.forms['optionsbar_'+e].options;return[];};function m(){if(typeof document.forms['languagesbar_'+e]!='undefined')return document.forms['languagesbar_'+e].scayt_lang;return[];};function n(z,A){if(!z)return;var B=z.length;if(B==undefined){z.checked=z.value==A.toString();return;}for(var C=0;C<B;C++){z[C].checked=false;if(z[C].value==A.toString())z[C].checked=true;}};var o=a.lang.scayt,p=[{id:'options',label:o.optionsTab,elements:[{type:'html',id:'options',html:'<form name="optionsbar_'+e+'"><div class="inner_options">'+'\t<div class="messagebox"></div>'+'\t<div style="display:none;">'+'\t\t<input type="checkbox" name="options" id="allCaps_'+e+'" />'+'\t\t<label for="allCaps" id="label_allCaps_'+e+'"></label>'+'\t</div>'+'\t<div style="display:none;">'+'\t\t<input name="options" type="checkbox" id="ignoreDomainNames_'+e+'" />'+'\t\t<label for="ignoreDomainNames" id="label_ignoreDomainNames_'+e+'"></label>'+'\t</div>'+'\t<div style="display:none;">'+'\t<input name="options" type="checkbox" id="mixedCase_'+e+'" />'+'\t\t<label for="mixedCase" id="label_mixedCase_'+e+'"></label>'+'\t</div>'+'\t<div style="display:none;">'+'\t\t<input name="options" type="checkbox" id="mixedWithDigits_'+e+'" />'+'\t\t<label for="mixedWithDigits" id="label_mixedWithDigits_'+e+'"></label>'+'\t</div>'+'</div></form>'}]},{id:'langs',label:o.languagesTab,elements:[{type:'html',id:'langs',html:'<form name="languagesbar_'+e+'"><div class="inner_langs">'+'\t<div class="messagebox"></div>\t'+' <div style="float:left;width:45%;margin-left:5px;" id="scayt_lcol_'+e+'" ></div>'+' <div style="float:left;width:45%;margin-left:15px;" id="scayt_rcol_'+e+'"></div>'+'</div></form>'}]},{id:'dictionaries',label:o.dictionariesTab,elements:[{type:'html',style:'',id:'dictionaries',html:'<form name="dictionarybar_'+e+'"><div class="inner_dictionary" style="text-align:left; white-space:normal; width:320px; overflow: hidden;">'+'\t<div style="margin:5px auto; width:80%;white-space:normal; overflow:hidden;" id="dic_message_'+e+'"> </div>'+'\t<div style="margin:5px auto; width:80%;white-space:normal;"> '+' <span class="cke_dialog_ui_labeled_label" >Dictionary name</span><br>'+'\t\t<span class="cke_dialog_ui_labeled_content" >'+'\t\t\t<div class="cke_dialog_ui_input_text">'+'\t\t\t\t<input id="dic_name_'+e+'" type="text" class="cke_dialog_ui_input_text"/>'+'\t\t</div></span></div>'+'\t\t<div style="margin:5px auto; width:80%;white-space:normal;">'+'\t\t\t<a style="display:none;" class="cke_dialog_ui_button" href="javascript:void(0)" id="dic_create_'+e+'">'+'\t\t\t\t</a>'+'\t\t\t<a style="display:none;" class="cke_dialog_ui_button" href="javascript:void(0)" id="dic_delete_'+e+'">'+'\t\t\t\t</a>'+'\t\t\t<a style="display:none;" class="cke_dialog_ui_button" href="javascript:void(0)" id="dic_rename_'+e+'">'+'\t\t\t\t</a>'+'\t\t\t<a style="display:none;" class="cke_dialog_ui_button" href="javascript:void(0)" id="dic_restore_'+e+'">'+'\t\t\t\t</a>'+'\t\t</div>'+'\t<div style="margin:5px auto; width:95%;white-space:normal;" id="dic_info_'+e+'"></div>'+'</div></form>'}]},{id:'about',label:o.aboutTab,elements:[{type:'html',id:'about',style:'margin: 5px 5px;',html:'<div id="scayt_about_'+e+'"></div>'}]}],q={title:o.title,minWidth:360,minHeight:220,onShow:function(){var z=this;
z.data=a.fire('scaytDialog',{});z.options=z.data.scayt_control.option();z.chosed_lang=z.sLang=z.data.scayt_control.sLang;if(!z.data||!z.data.scayt||!z.data.scayt_control){alert('Error loading application service');z.hide();return;}var A=0;if(b)z.data.scayt.getCaption(a.langCode||'en',function(B){if(A++>0)return;c=B;s.apply(z);t.apply(z);b=false;});else t.apply(z);z.selectPage(z.data.tab);},onOk:function(){var z=this.data.scayt_control;z.option(this.options);var A=this.chosed_lang;z.setLang(A);z.refresh();},onCancel:function(){var z=l();for(var A in z)z[A].checked=false;n(m(),'');},contents:h},r=CKEDITOR.plugins.scayt.getScayt(a);for(g=0;g<f.length;g++){if(f[g]==1)h[h.length]=p[g];}if(f[2]==1)i=1;var s=function(){var z=this,A=z.data.scayt.getLangList(),B=['dic_create','dic_delete','dic_rename','dic_restore'],C=[],D=[],E=k,F;if(i){for(F=0;F<B.length;F++){C[F]=B[F]+'_'+e;d.getById(C[F]).setHtml('<span class="cke_dialog_ui_button">'+c['button_'+B[F]]+'</span>');}d.getById('dic_info_'+e).setHtml(c.dic_info);}if(f[0]==1)for(F in E){var G='label_'+E[F],H=G+'_'+e,I=d.getById(H);if('undefined'!=typeof I&&'undefined'!=typeof c[G]&&'undefined'!=typeof z.options[E[F]]){I.setHtml(c[G]);var J=I.getParent();J.$.style.display='block';}}var K='<p><img src="'+window.scayt.getAboutInfo().logoURL+'" /></p>'+'<p>'+c.version+window.scayt.getAboutInfo().version.toString()+'</p>'+'<p>'+c.about_throwt_copy+'</p>';d.getById('scayt_about_'+e).setHtml(K);var L=function(U,V){var W=d.createElement('label');W.setAttribute('for','cke_option'+U);W.setHtml(V[U]);if(z.sLang==U)z.chosed_lang=U;var X=d.createElement('div'),Y=CKEDITOR.dom.element.createFromHtml('<input id="cke_option'+U+'" type="radio" '+(z.sLang==U?'checked="checked"':'')+' value="'+U+'" name="scayt_lang" />');Y.on('click',function(){this.$.checked=true;z.chosed_lang=U;});X.append(Y);X.append(W);return{lang:V[U],code:U,radio:X};};if(f[1]==1){for(F in A.rtl)D[D.length]=L(F,A.ltr);for(F in A.ltr)D[D.length]=L(F,A.ltr);D.sort(function(U,V){return V.lang>U.lang?-1:1;});var M=d.getById('scayt_lcol_'+e),N=d.getById('scayt_rcol_'+e);for(F=0;F<D.length;F++){var O=F<D.length/2?M:N;O.append(D[F].radio);}}var P={};P.dic_create=function(U,V,W){var X=W[0]+','+W[1],Y=c.err_dic_create,Z=c.succ_dic_create;window.scayt.createUserDictionary(V,function(aa){x(X);w(W[1]);Z=Z.replace('%s',aa.dname);v(Z);},function(aa){Y=Y.replace('%s',aa.dname);u(Y+'( '+(aa.message||'')+')');});};P.dic_rename=function(U,V){var W=c.err_dic_rename||'',X=c.succ_dic_rename||'';
window.scayt.renameUserDictionary(V,function(Y){X=X.replace('%s',Y.dname);y(V);v(X);},function(Y){W=W.replace('%s',Y.dname);y(V);u(W+'( '+(Y.message||'')+' )');});};P.dic_delete=function(U,V,W){var X=W[0]+','+W[1],Y=c.err_dic_delete,Z=c.succ_dic_delete;window.scayt.deleteUserDictionary(function(aa){Z=Z.replace('%s',aa.dname);x(X);w(W[0]);y('');v(Z);},function(aa){Y=Y.replace('%s',aa.dname);u(Y);});};P.dic_restore=z.dic_restore||(function(U,V,W){var X=W[0]+','+W[1],Y=c.err_dic_restore,Z=c.succ_dic_restore;window.scayt.restoreUserDictionary(V,function(aa){Z=Z.replace('%s',aa.dname);x(X);w(W[1]);v(Z);},function(aa){Y=Y.replace('%s',aa.dname);u(Y);});});function Q(U){var V=d.getById('dic_name_'+e).getValue();if(!V){u(' Dictionary name should not be empty. ');return false;}try{var W=U.data.getTarget().getParent(),X=/(dic_\w+)_[\w\d]+/.exec(W.getId())[1];P[X].apply(null,[W,V,j]);}catch(Y){u(' Dictionary error. ');}return true;};var R=(j[0]+','+j[1]).split(','),S;for(F=0,S=R.length;F<S;F+=1){var T=d.getById(R[F]);if(T)T.on('click',Q,this);}},t=function(){var z=this;if(f[0]==1){var A=l();for(var B=0,C=A.length;B<C;B++){var D=A[B].id,E=d.getById(D);if(E){A[B].checked=false;if(z.options[D.split('_')[0]]==1)A[B].checked=true;if(b)E.on('click',function(){z.options[this.getId().split('_')[0]]=this.$.checked?1:0;});}}}if(f[1]==1){var F=d.getById('cke_option'+z.sLang);n(F.$,z.sLang);}if(i){window.scayt.getNameUserDictionary(function(G){var H=G.dname;x(j[0]+','+j[1]);if(H){d.getById('dic_name_'+e).setValue(H);w(j[1]);}else w(j[0]);},function(){d.getById('dic_name_'+e).setValue('');});v('');}};function u(z){d.getById('dic_message_'+e).setHtml('<span style="color:red;">'+z+'</span>');};function v(z){d.getById('dic_message_'+e).setHtml('<span style="color:blue;">'+z+'</span>');};function w(z){z=String(z);var A=z.split(',');for(var B=0,C=A.length;B<C;B+=1)d.getById(A[B]).$.style.display='inline';};function x(z){z=String(z);var A=z.split(',');for(var B=0,C=A.length;B<C;B+=1)d.getById(A[B]).$.style.display='none';};function y(z){d.getById('dic_name_'+e).$.value=z;};return q;});
|
PypiClean
|
/signal_lab-0.1.tar.gz/signal_lab-0.1/components/api/src/signal_lab.py
|
__synopsis__ = "Signal and image processing utilities for the RSF file format"
__author__ = 'Sean Ross-Ross <[email protected]>'
import optparse
import os
import sys
import numpy as np
import subprocess
import stat
import warnings
import cPickle
import string
from time import ctime, time
import xml.dom.minidom as minidom
import c_signal_lab as cSlab #@UnresolvedImport
from numpy import little_endian #@UnresolvedImport
warnings.simplefilter('ignore', RuntimeWarning )
dp = os.environ.get( 'DATAPATH','.' )
dplog = os.environ.get( 'DATAPATHLOGFILE', True )
opt_datapath = optparse.Option( '--datapath',
default=dp, # default value
help="path to put binary data to" )
opt_dryrun = optparse.Option( '-n','--dryrun',
default=False, # default value.
action='store_true',
help="do not output data, only headers" )
pack_opt = optparse.Option( '-p','--pack',
default=False,
action='store_true',
help="pack header with binary data" )
accept_tty_opt = optparse.Option( '--accept-tty',
dest ='accept_tty',
default=False,
action='store_true',
help="accept a tty device from stdin/out" )
stdinopt = optparse.Option( '--stdin',
default="file:sys.stdin",
help='redirect input of stdin to file'
)
stdoutopt = optparse.Option( '--stdout',
default="file:sys.stdout",
help='redirect output of stdout to file'
)
fifoopt = optparse.Option( '--fifo',
default=False,
action='store_true',
help="create and output to a fifo device" )
# defines the ratio of bytes to kilobytes
bmap = {'B':1,'KB':2**10,'MB':2**20,'GB':2**30}
def btype( option, opt, value ):
if value.isdigit( ):
val = value
vtype = 'b'
elif value[-2:].isalpha( ):
val = value[:-2]
vtype = value[-2:]
elif value[-1:].isalpha( ):
val = value[:-1]
vtype = value[-1:]
nbytes = int(val) * bmap[vtype.upper()]
# if value.endswith( 'b' ):
# value = int( value[:-1] )
# elif value.endswith( 'b' )
print >> sys.stderr, "nbtypes", nbytes
return nbytes
from copy import copy
class ByteSizeOption(optparse.Option):
TYPES = optparse.Option.TYPES + ("bytes",)
TYPE_CHECKER = copy(optparse.Option.TYPE_CHECKER)
TYPE_CHECKER["bytes"] = btype
buffer_sizeopt = ByteSizeOption( '-b', "--buffer-size",
default=None,
type='bytes',
help='buffer size for reading chunks of data',
)
local_bin = optparse.Option( '-l', "--local-binary",
dest='local_binary',
default=False,
action='store_true',
help='look up local binary files'
)
orderF = optparse.Option( "--order-fortran",
action='store_const',
const='F',
default='F',
dest='order',
help='process in Fortran ordering'
)
orderC = optparse.Option( "--order-C",
action='store_const',
const='C',
default='F',
dest='order',
help='process in C ordering'
)
replace_bin = optparse.Option( '-o',"--overwrite-binaries",
action='store_true',
default=True,
dest='replace_binaries',
help='overwrite rsf binary file if it already exists'
)
no_replace_bin = optparse.Option( '-k',"--no-overwrite-binaries",
action='store_false',
dest='replace_binaries',
help=('keep binary file if it already exists,\n'
'create a new temporary file instead\n'
'if the file "file.rsf@" exists, slab will create the \n'
'file file1.rsf@' )
)
verbose = optparse.Option( '-v',"--verbose",
action='store_true',
default=False,
dest='verbose',
help=( 'print verbose diagnostic of internal commands' )
)
datapath_logfile = optparse.Option( "--datapath-log",
#action='store_true',
default=dplog,
dest='datapath_log',
help=( 'data path log file name' )
)
no_datapath_logfile = optparse.Option( "--no-datapath-log",
action='store_false',
dest='datapath_log',
help=( "don't use data path logfile" )
)
permissive = optparse.Option( "--permissive",
action='store_true',
default=False,
dest='permissive',
help=( "SLAB will try its best to avoid throwing exceptions" )
)
werror = optparse.Option( "--werror",
action='store_true',
default=False,
help=( "SLAB will raise exceptions instead of warnings" )
)
slab_options = [opt_datapath,opt_dryrun,pack_opt,accept_tty_opt,stdinopt,stdoutopt,
fifoopt,buffer_sizeopt,local_bin,orderF,orderC,
verbose,replace_bin,no_replace_bin,
datapath_logfile,no_datapath_logfile,
permissive,werror
]
def values_to_dict( values ):
d1 = set(dir(values))
d2 = set(dir(optparse.Values()))
res = {}
for key in d1.difference(d2):
res[key] = getattr(values, key)
return res
class error(Exception):pass
class NoDefault( object ):pass
class Parameter( object ):
def __init__( self, name, type=str, default=NoDefault, help=None ):
self.name = name
if isinstance(type, tuple ):
self.type = type[0]
self.type_name = type[1]
elif isinstance(type,str):
self.type = type
self.type_name = type
else:
self.type = type
self.type_name = getattr(type, '__name__', str(type) )
self.default = default
self.help = help
def get_default(self):
if self.default is NoDefault:
if self.type_name in [ 'rsf input','rsf output' ]:
return self.name.upper( ) + ".rsf"
else:
return self.name.upper( )
else:
return self.default
class Environment( object ):
"""
This is an environment. a signal_lab.Environment object is designed to store arguments and
options for signal_lab programs and other objects
"""
@classmethod
def option_help(cls,option, opt, value, parser):
"""
Print the optparse standard help message and exit
"""
print parser.format_help( )
raise SystemExit
def user_help(self, option, opt, value, parser ):
'''
print help for main program
'''
prog = os.path.basename(self.prog)
print 'Name:'
print
print ' ',prog
print
if self.help:
print 'Description:'
print
print "\n".join( [ " " + line.strip() for line in self.help.splitlines() if line.strip()] )
print
print 'Synopsis:'
print
print ' ', prog,
if self.use_stdin:
print "< in.rsf",
print " ".join( [ str(i) for i in self.inputs ] ),
print " ".join( [ "%s=%s" %( arg.name, arg.get_default() ) for arg in self.user_arguments] ),
for opt in self.user_options:
if opt.action == 'store':
key = opt.get_opt_string( )
if opt.default != ('NO', 'DEFAULT'):
value = opt.default
else:
value = str(opt.dest).upper()
if opt._long_opts:
print "%s=%s" %(key,value),
else:
print "%s %s" %(key,value),
else:
print opt.get_opt_string(),
if self.use_stdout:
print "> out.rsf"
else:
print
if self.user_arguments:
user_arguments = [ ( par.type_name, par.name, par.help ) for par in self.user_arguments ]
max_type_name_size = max([ len(p) for (p,_,_) in user_arguments])
max_name_size = max([ len(p) for (_,p,_) in user_arguments])
spacer = '\n %%-%is | %%-%is | ' %(max_type_name_size,max_name_size) %('','')
#spacer = "\n"+" "*(max_type_name_size+max_name_size+10)
def format_help( text):
if not text:
return "No Documentation"
if len(text.splitlines( )) ==1:
return text
text = " ".join( text.splitlines( ) ).split()
new_text = []
last_line = []
linelen = 0
numlines = 0
while text:
word = text.pop(0)
if linelen+len(word) > 60:
new_text.append(" ".join(last_line))
last_line=[ ]
linelen=0
linelen += len(word)+1
last_line.append(word)
numlines+=1
if numlines>1:
new_text.append("")
return spacer.join(new_text)
user_arguments = [ ( type_name, name, format_help(help) ) for ( type_name, name, help ) in user_arguments ]
string_form_a = ' %%-%is | %%-%is | %%s' %(max_type_name_size,max_name_size)
string_form_b = ' %%-%is + %%-%is + %%s' %(max_type_name_size,max_name_size)
print
print 'Parameters:'
print
print string_form_a% ( 'type','name' ,'help' )
print string_form_b% ( '-'*max_type_name_size,'-'*max_name_size ,'-'*60 )
for par in user_arguments:
print string_form_a % par
print
print 'Use "%s -H" for help about command-line options' %prog
print
raise SystemExit
@classmethod
def user_manpage(cls,option, opt, value, parser):
print "User Man"
def __init__( self, args , help=None, inputs=None, user_arguments=None, user_options=None, use_stdin=False,use_stdout=False, **options ):
'''
*args*: list of string arguments
*help*: help text to print when '-h' option is in *args*
*inputs*: list of input names for help
*user_arguments*: list of *signal_lab.Parameter* objects
*user_options*: list of optparse.Option instances
*use_stdin*: for help output
*use_stdout*: for help output
*options*: overwrite options from *args*
'''
if not args:
args = [ os.path.basename(sys.argv[0]) ]
self.prog = args[0]
args = args[1:]
parser = optparse.OptionParser( epilog=__doc__, add_help_option=False )
if user_options is None:
user_options = [ ]
else:
user_options = list( user_options )
if user_arguments is None:
user_arguments = [ ]
else:
user_arguments = list( user_arguments )
if inputs is None:
self.inputs = []
else:
self.inputs = list( inputs )
self.user_options = user_options
self.user_arguments = user_arguments
self.help = help
self.use_stdin = use_stdin
self.use_stdout = use_stdout
syshelp = optparse.Option( '-H','--Help',
action='callback',
callback=self.option_help
)
userhelp = optparse.Option( '-h','--help',
action='callback',
callback=self.user_help
)
userman = optparse.Option( '--manpage',
action='callback',
callback=self.user_manpage
)
slab_help_options = [ syshelp, userhelp,userman ]
helpgroup = optparse.OptionGroup( parser, 'SLAB help' )
helpgroup.add_options( slab_help_options )
sysgroup = optparse.OptionGroup( parser, 'SLAB built-in options' )
sysgroup.add_options(slab_options )
usergroup = optparse.OptionGroup( parser, 'Program specific options' )
usergroup.add_options( user_options )
parser.add_option_group( sysgroup )
parser.add_option_group( usergroup )
parser.add_option_group( helpgroup )
values, newargs = parser.parse_args(list(args))
kw = {}
for argdef in self.user_arguments:
key = argdef.name
value = argdef.default
if value is NoDefault:
continue
else:
kw[key] = value
xargs = []
for arg in newargs:
if '=' in arg:
key,val = arg.split( '=', 1 )
kw[key] = val
if len(val) == 0:
raise Exception( "Expected a value afer '%s=' on command line, got ''"%key )
else:
xargs.append( arg )
for argdef in self.user_arguments:
key = argdef.name
etype = argdef.type
if etype is not None and key in kw:
value = kw[key]
try:
value = etype(value)
except:
kname = getattr(etype, '__name__', str(etype) )
raise KeyError( "could not read argument '%s=' from command line with expected type '%s got: %s ' " %(key,kname,value) )
self.args = xargs
self.kw = kw
self.options = values_to_dict( values )
self.options.update( options )
if self.verbose:
print >> sys.stderr, "SLAB: Creating Verbose Environment:"
for key,val in self.options.items():
print >> sys.stderr, "%20s = %r" %(key,val)
datapath_log = self.options['datapath_log']
datapath = self.options['datapath']
if datapath_log is True:
self.options['datapath_log'] = os.path.join(datapath,'.dp_log')
if self.user_arguments:
uargs = set( [u.name for u in self.user_arguments] )
gargs = set( kw.keys( ) )
diff = gargs.difference( uargs )
if diff:
print >> sys.stderr, ("signal_lab warning: Got unexpected user arguments ( '%s=' )" %("=', '".join(diff)))
return
def copy(self):
"""
returns a copy of this environment
"""
env = Environment(None)
env.kw = self.kw.copy()
env.options = self.options.copy()
env.args = list(self.args)
return env
def _sl_getint(self,value):
return int(self.kw[value])
def _sl_getfloat(self,value):
return float(self.kw[value])
def _sl_getstring(self,value):
return str(self.kw[value])
def get_bool(self,value, kw=None ):
warnings.warn( 'method env.get_bool is depricated, please use get_eval with etype=env._bool', DeprecationWarning )
if kw:
return self._bool(kw[value])
return self.get_eval( value, etype=self._bool )
@classmethod
def _bool(cls,value):
'''
If value is a string, 'N','NONE' and 'FALSE' will evaluate to False.
and 'YES','Y' and 'TRUE' evaluate to true. otherwize the string is evaluated as
python expression.
'''
if isinstance( value, str ):
if value.upper() in ['N','NO','NONE','FALSE']:
return False
elif value.upper() in ['YES','Y','TRUE']:
return True
else:
return bool( eval(value) )
else:
return bool(value)
def get_eval( self, *args, **kwargs ):
'''
env.get_eval( key [, default] [, etype=eval ] )
'''
if len(args) not in [1,2]:
raise TypeError( 'Environment.get_eval takes 1 or 2 arguments (%i given)' %len(args) )
key = args[0]
if isinstance(key, str ):
edefault = eval
else:
edefault = lambda args : args
etype = kwargs.get( 'etype' , edefault )
if key in self.kw:
value = self.kw[key]
try:
result = etype( value )
except:
kname = getattr(etype, '__name__', str(etype) )
raise KeyError( "could not read key '%s=' from command line arguments with constructor '%s( %s )' " %(key,kname,value) )
if self.verbose: print >> sys.stderr, "Eval Command line: %s=%r type '%s' " %( value, result, type(result) )
return result
else:
if len(args) == 2:
return args[1]
else:
raise KeyError( "no key '%s=' in command line arguments" %key )
def get_sequence( self, id , omit=None, type=eval ):
"""
get a sequence of parameters from the command line.
eg. for min1=2 min2=3
>>> env.get_sequence( 'min%i' , type=int )
[2,3]
"""
x = []
ndim = 0
while id%(ndim+1) in self.kw.keys():
key = id%(ndim+1)
if omit is not None and self.kw[key] == omit:
pass
else:
x.append( type( self.kw[key] ) )
ndim+=1
if self.options['order'] == 'C':
x.reverse( )
return tuple(x)
def _get_verbose( self ):
return self.options.get('verbose',False)
def _set_verbose( self, value ):
self.options['verbose'] = value
verbose = property( _get_verbose, _set_verbose, doc="print verbose output if true" )
def subst(self, strexpr ):
'''
substitute names of the form '$names' with values from the keyword and global options
'''
template = string.Template( strexpr )
template_dict = {}
template_dict.update( self.kw )
template_dict.update( self.options )
try:
result = template.substitute( **template_dict )
except KeyError, ke:
key = ke.args[0]
raise error( "while substituting tag '%s', no Key '%s=' in command line arguments or signal_lab options" %(strexpr,key) )
return result
def warn( self, msg ):
'''
'''
if self.options['permissive']:
print >> sys.stderr, "SLAB WARNING:",msg
sys.stderr.flush( )
else:
raise error( msg )
return
def __getitem__(self,item):
"""
get an argument from the command line.
if item is an int, getitem returns the non-keyword argument at that index.
Otherwize, returns the keyword argument with the key item.
"""
if isinstance(item, int):
result = self.args[item]
if self.verbose: print >> sys.stderr, "getting argument at idx %s, '%s' from command line" %(item,result)
else:
if item not in self.kw:
msg = "No '%s=' was specified on the command line" %item
raise KeyError( msg )
result = self.kw[item]
if self.verbose: print >> sys.stderr, "getting key-word argument %s=%s from command line" %(item,result)
if isinstance(result, str):
try:
result = eval(result)
except:
pass
return result
@classmethod
def _check_input_(cls,fname):
fname = str(fname)
if not os.path.isfile( fname ):
raise error( "file %s does not exist" %fname )
return fname
@classmethod
def _check_rsf_input_(cls,fname):
fname = str(fname)
if not os.path.isfile( fname ):
raise error( "file %s does not exist" %fname )
_,suffix = os.path.splitext(fname)
if suffix.lower() not in['.rsf','.h','.hh']:
print >> sys.stderr(" warning RSF input does not conform to standard extension '.rsf' " )
return fname
_check_input = (_check_input_,'input')
_check_output = (str,'output')
_check_rsf_input = (_check_rsf_input_,'rsf input')
_check_rsf_output = (str,'rsf output')
def index_to_pos( shape, indexing, order='F' ):
index = np.array( indexing, dtype=np.int64 )
cp = np.cumprod( shape, dtype=np.int64 ) #@UndefinedVariable
cps = np.sum( index[1:]*cp[:-1],dtype=np.int64) #@UndefinedVariable
return long(cps+index[0]) #@UndefinedVariable
class File( object ):
"""
slab.File object represents the rsf File format and deals with
the seporate header and binary files.
"""
__OPENED_HEADER_FILES__ = 0
__FINALIZED_FILES__ = 0
terminate = u'\x0c\x0c\x04'
_type_map = {
"shortint": { 2:'int16'},
"int": { 4:'int32'},
"longint": { 8: 'int64'},
"float": { 4:'float32'},
"double": { 8:'float64'},
"longdouble": { 16: 'float128'},
"complex": { 8:'complex64'},
"complexdouble": { 16:'complex128'},
"complexlongdouble": { 32: 'complex256'},
}
_dtype_map = {
'int16' :("shortint", 2),
'int32' :("int", 4),
'int64' :("longint", 8),
'float32' :("float", 4),
'float64' :("double", 8),
'float128' :("longdouble", 16),
'complex64' :("complex", 8),
'complex128' :("complexdouble", 16),
'complex256' :("complexlongdouble", 32),
}
def __init__( self, tag, input=True, env=None, header_only=False, **options ):
self._changed_header_keys = {}
self._kw_comments = {}
self._comments = []
self._to_delete =[]
self.nb_read = 0
self.nb_written = 0
self.tag = tag
self.header_only = header_only
if input is True:
if env and env.verbose: print >> sys.stderr, "Initializing input: '%s'" %(tag)
self.init_input( tag, env=env, **options )
else:
self.init_output( tag, input=input, env=env, **options )
if self.env.verbose: print >> sys.stderr, "Creating output: '%s' from='%s'" %(tag, getattr(input, 'tag', 'new' ) )
return
def init_input(self, tag, env=None, **options ):
if env is None:
self.env = Environment( [] )
else:
self.env = env.copy( )
self.env.options.update( options )
self.header = self.env.subst( tag )
#if tag == 'in':
# tag = env.options.get( 'stdin', 'stdin' )
if self.header == 'file:sys.stdin':
#header = 'stdin'
header_file = sys.stdin
self.packed = True
isatty = header_file.isatty( )
accept_tty = self.env.options.get( 'accept_tty', False )
if isatty and not accept_tty:
raise Exception("can not accept tty device '%s' as an rsf file" %self.header)
elif self.header == 'file:sys.stdout':
raise error( 'can not initialize header input file with stdout' )
else:
read_permissions = self.env.options.setdefault( 'header_in_mode' , 'r' )
header_file = open( self.header, read_permissions )
self.packed = False
self.header_file = header_file
header_keys,history = File.read_header(self.header_file,self.env)
self.history = history
self._header_keys = header_keys
self.is_input = True
self.finalized = True
self.binary = self['in']
if self.header_only:
binary_file = None
else:
if self.binary == 'stdin':
binary_file = self.header_file
self.packed = True
else:
read_permissions = self.env.options.setdefault( 'binary_in_mode' , 'r' )
########################################################
## RSF local path name in binary file
########################################################
if not os.path.isfile(self.binary):
if (os.path.abspath( self.binary ) != self.binary) and self.env.options['local_binary']:
self.binaryos.path.join( os.path.dirname(self.header), self.binary )
########################################################
########################################################
binary_file = open(self.binary, read_permissions)
self.binary_file = binary_file
if not self.packed:
self.header_file.close( )
File.__OPENED_HEADER_FILES__+=1
File.__FINALIZED_FILES__+=1
dstat = os.fstat(self.binary_file.fileno())
stat_fifo = stat.S_ISFIFO( dstat.st_mode )
stat_size = dstat.st_size
#print "ISREG",stat.S_ISREG( dstat.st_mode )
if stat_fifo:
self.original_tell_pos = -1
else:
if stat_size != self.nbytes:
self.env.warn( "size of binary '%s' is not as expected (%i != %i)" %(self.binary,stat_size,self.nbytes) )
self.original_tell_pos = self.binary_file.tell()
def _get_is_fifo(self):
dstat = os.fstat( self.binary_file.fileno() )
return stat.S_ISFIFO( dstat.st_mode )
is_fifo = property( _get_is_fifo )
def reset_binary(self):
if self.is_fifo:
raise Exception( "'%s' is a fifo device, can not reset" %self.header )
self.binary_file.seek( self.original_tell_pos )
return self.original_tell_pos
def init_output(self,tag, input=False, env=None, **options):
if isinstance(input, File ):
if env is None:
self.env = input.env.copy()
else:
self.env= env.copy()
self._header_keys = input._header_keys.copy( )
self.history = list(input.history)
else:
if env is None:
self.env = Environment( [] )
else:
self.env= env.copy( )
self._header_keys = { }
self.history = [ ]
self.env.options.update( options )
pack = self.env.options.get( 'pack', False )
self.packed = pack
self.header_file = None
self.binary_file = None
self.finalized = False
self.is_input = False
self.binary_file = None
#if tag == 'out':
# tag = self.env.options.get( 'stdout','out' )
#if tag == 'out':
# header = 'stdin'
#else:
# header = tag
self.tag = tag
#self.header = header
File.__OPENED_HEADER_FILES__+=1
return
def header_is_reg(self):
return stat.S_ISREG( os.fstat(self.header_file.fileno())[0] )
def _open_output(self):
header = self.env.subst( self.tag )
if header == 'file:sys.stdout':
self.header = 'stdout'
header_file = sys.stdout
isatty = header_file.isatty()
accept_tty = self.env.options.get( 'accept_tty', False )
if isatty and not accept_tty:
raise Exception("can not accept tty device '%s' as an rsf file" %self.header )
elif header == 'file:sys.stdin':
raise error( 'can not initialize header output file with stdin file descriptor' )
else:
if self.env.options.get( 'fifo', False ):
os.mkfifo(self.header)
self.header = header
mode = self.env.options.setdefault( 'header_out_mode' , 'w' )
header_file = open( header, mode )
fd = header_file.fileno()
if stat.S_ISREG( os.fstat(fd)[0] ) and not self.packed:
if header == 'file:sys.stdout':
self.header = self.get_open_file_name( header_file )
else:
self.header = header
self.packed = False
header_name = os.path.split( self.header )[-1]
datapath = self.env.options.get('datapath', '.' )
binary = os.path.join( datapath, header_name ) + '@'
bin_exists = os.path.isfile(binary)
if not self.env.options['replace_binaries']:
binary_pt = os.path.splitext(binary)[0]
count = 0
if bin_exists and self.env.verbose: print >> sys.stderr,"File '%s' exists. " %(binary),
while bin_exists:
count+=1
binary = binary_pt + '%i.rsf@' % count
bin_exists = os.path.isfile(binary)
if count and self.env.verbose: print >> sys.stderr, "Choosing '%s' as new binary file name" %(binary)
elif bin_exists and self.env.verbose: print >> sys.stderr, "Overwriting exsisting binary file '%s'" %(binary)
else:
self.packed = True
binary = 'stdin'
#self.header = header
self.header_file = header_file
self.binary = binary
self._header_keys['in'] = binary
self._changed_header_keys['in'] = binary
def comment(self, *args, **kw):
"""
add comments to the header file. if a keyword argument is given.
The comment will be placed above the keyword in the header file.
"""
self._comments.extend(args)
self._kw_comments.update(kw)
def close(self, doraise=True ):
"""
close both the header and binary files.
"""
if self.original_tell_pos != -1:
self.last_tell_pos = self.binary_file.tell()
self.header_file.close()
self.binary_file.close()
if self.original_tell_pos != -1:
nbytes = self.nbytes
nreadwrite = self.last_tell_pos-self.original_tell_pos
if (doraise and nreadwrite!=nbytes):
msg = "number of bytes %s is not equal to the number of bytes in the binary file (%i != %i)" %(self.is_input and "read" or "written", nreadwrite,nbytes )
msg+="\n : for header '%s' binary '%s'" %(self.header, self.binary)
self.env.warn( msg )
#
# def __del__( self ):
#
# self.close( )
_trace_header = None
def _get_trace_header( self ):
if self._trace_header is None:
self._trace_header = File( self['head'], env=self.env )
return self._trace_header
def _set_trace_header( self, value ):
self._trace_header = value
if isinstance(value, str):
self['head'] = value
else:
self['head'] = str( value.header )
trace_header = property( _get_trace_header, _set_trace_header,
doc="Return the trace header given by head= in the header file as a slab.File object" )
def _get_h_abspath(self):
return os.path.abspath(self.header)
header_abspath = property( _get_h_abspath, doc='absolute file path to the header' )
def _get_h_dirname(self):
return os.path.normpath(os.path.dirname(self.header))
header_dir = property( _get_h_dirname, doc='directory in which the header resides' )
def _get_h_basename(self):
return os.path.basename(self.header)
header_base = property( _get_h_basename, doc='name of the header, minus any directory portion' )
def _get_h_filebase(self):
return os.path.splitext(self.header_base)[0]
header_filebase = property( _get_h_filebase,
doc='Just the basename of the header file, minus any suffix and minus the directory.' )
def _get_b_dirname(self):
return os.path.dirname(self.binary)
binary_dir = property( _get_b_dirname ,doc='directory in which the binary resides')
def _get_b_basename(self):
return os.path.basename(self.binary)
binary_base = property( _get_b_basename,doc='name of the binary, minus any directory portion' )
def _get_b_filebase(self):
return os.path.splitext(self.binary)[0]
binary_filebase = property( _get_b_filebase,
doc='Just the basename of the binary file, minus any suffix and minus the directory.' )
def _get_b_abspath(self):
return os.path.abspath(self.binary)
binary_abspath = property( _get_b_abspath, doc='absolute file path to the header' )
def _log_output(self):
dp_log = self.env.options['datapath_log']
if (not dp_log) or self.packed:
return
impl = minidom.getDOMImplementation()
if not os.path.isfile( dp_log ):
try:
doc = impl.createDocument(None,"SLAB_DATA_LOG",None)
flog = open(dp_log, 'w' )
flog.write( doc.toprettyxml() )
flog.close()
except Exception,e:
warnings.warn( "Trouble initializing xml datapath logfile, reason: '%s'" %e , Warning, 3 )
return
st_mtime = os.stat(dp_log).st_mtime
while 1:
try:
flog = open( dp_log, 'r' )
doc = minidom.parse( flog )
flog.close()
except Exception,e:
warnings.warn( "Trouble reading xml datapath logfile, reason: '%s'" %e , Warning, 3 )
return
root = doc.firstChild
header_abspath = self.header_abspath
binary_abspath = self.binary_abspath
for element in root.getElementsByTagName('binary_header_pair'):
if header_abspath == element.getAttribute('header'):
if binary_abspath == element.getAttribute('binary'):
return # Already have this element
for element in root.childNodes:
if element.nodeType == doc.TEXT_NODE:
root.removeChild( element )
element = doc.createElement( 'binary_header_pair' )
element.setAttribute( 'header', header_abspath )
element.setAttribute( 'binary', binary_abspath )
root.appendChild( element )
next_st_mtime = os.stat(dp_log).st_mtime
if st_mtime != next_st_mtime:
continue
flog = open(dp_log, 'w+' )
doc.writexml( flog, indent=' ' , addindent='', newl='\n' )
flog.close( )
break
def finalize(self):
"""
For outputs only.
Header file is created and written to. Binary file is created.
"""
if self.is_input:
raise Exception( 'can not finalize input')
if self.finalized:
raise Exception( 'header output is already written' )
self._open_output( )
self._log_output( )
self.finalized = True
File.__FINALIZED_FILES__+=1
self.header_file.writelines(self.history)
try:
user = os.getlogin()
except:
user = os.environ.get( 'USER', 'unknown' )
if self.env.verbose: print >> sys.stderr, "writing header: '%s'" %(self.header)
loging = "%s@%s"%( user, os.uname()[1] )
print >> self.header_file, "#",self.env.prog, loging, ctime(time())
print >> self.header_file, "#"
print >> self.header_file, "# Created with the command: '"," ".join( sys.argv ),"'"
print >> self.header_file, "#"
for comment in self._comments:
print >> self.header_file, "#",comment.replace("\n", "\n# ")
for item in self._changed_header_keys.items():
toprint = ("\t%s=%r" %item).replace("'",'"')
if item[0] in self._kw_comments:
print >> self.header_file,"\t# '%s':"%item[0], self._kw_comments[item[0]].replace("\n", "\n\t# ")
print >> self.header_file, toprint
for item in self._to_delete:
print >> self.header_file, "@del", item
if self.binary == 'stdin':
self.header_file.write(self.terminate)
self.header_file.flush( )
if not self.packed:
self.header_file.close( )
if self.env.options.get('dryrun'):
do_exit = File.__OPENED_HEADER_FILES__ == File.__FINALIZED_FILES__
if do_exit:
print >> sys.stderr, "Dryrun exit"
raise SystemExit( 0 )
if self.binary == 'stdin':
binary_file = self.header_file
else:
mode = self.env.options.setdefault( 'binary_out_mode' , 'w' )
binary_file = open( self.binary, mode )
self.binary_file = binary_file
dstat = os.fstat(self.binary_file.fileno())
stat_fifo = stat.S_ISFIFO( dstat.st_mode )
if stat_fifo:
self.original_tell_pos = -1
else:
self.original_tell_pos = self.binary_file.tell()
return self
def __delitem__(self, item ):
self._to_delete.append(item)
if item in self._header_keys:
self._header_keys.__delitem__(item)
if item in self._changed_header_keys:
self._changed_header_keys.__delitem__(item)
return
def items(self):
return self._header_keys.items( )
def keys(self):
return self._header_keys.keys( )
def values(self):
return self._header_keys.values( )
def __getitem__(self,item):
return self._header_keys[item]
def __setitem__(self,item,value):
if self.is_input:
raise Exception( "could not set header value of input file" )
if self.finalized:
raise Exception( "could not set header value, header is already finalized" )
self._header_keys[item] = value
self._changed_header_keys[item] = value
def __repr__(self):
try:
tag = self.tag
except:
tag ='error'
try:
shape = self.shape
except:
shape ='[error]'
try:
type = self.type
except:
type ='error'
return "<signal_lab.File tag=%r shape=%r type=%s>" %(tag,shape,type)
def get( self, item, failobj=None ):
return self._header_keys.get(item,failobj)
def _get_ndim(self):
ndim = 0
while "n%s"%(ndim+1) in self.keys():
ndim+=1
return ndim
ndim = property( _get_ndim )
def get_sequence(self, id , omit=None ):
x = []
ndim = 0
doomit = False
while id%(ndim+1) in self.keys():
key = id%(ndim+1)
if doomit and omit is not None and self[key] == omit:
pass
else:
doomit=True
x.append( self[key] )
ndim+=1
if self.env.options['order'] == 'C':
x.reverse( )
return tuple(x)
def set_sequence(self, id , sequence ):
if self.env.options['order'] == 'C':
sequence = list(sequence)
sequence.reverse( )
for i,item in enumerate(sequence):
key = id %(i+1)
self[key] = item
return
def _get_size(self):
return int(np.prod( self.shape, dtype=np.int64 ))
size = property(_get_size)
def _get_nbytes(self):
return long(self.esize)*long(self.size)
nbytes = property( _get_nbytes )
def leftsize(self,left):
return np.prod( self.shape[left:], dtype=np.int64 )
def _get_shape(self):
return self.get_sequence( 'n%s' , omit=1 )
def _set_shape(self, value ):
return self.set_sequence( 'n%s' , value, )
shape = property( _get_shape ,_set_shape )
def _get_origin(self):
return self.get_sequence( 'o%s' )
def _set_origin( self, value ):
return self.set_sequence( 'o%s', value )
origin = property( _get_origin, _set_origin )
def _get_step(self ):
return self.get_sequence( 'd%s' )
def _set_step(self , value ):
return self.set_sequence( 'd%s' , value )
step = property( _get_step, _set_step )
def _get_labels(self ):
return self.get_sequence( 'label%s' )
def _set_labels(self, value ):
return self.set_sequence( 'label%s' ,value)
labels = property( _get_labels, _set_labels )
def _get_units(self ):
return self.get_sequence( 'unit%s' )
units = property( _get_units )
def _get_order(self):
return self.env.options['order']
order = property( _get_order )
def _get_type(self):
_, type = self['data_format'].split('_',1)
return type
def _set_type(self,value):
form , _ = self.get('data_format','native_NONE').split('_',1)
self['data_format'] = "_".join([form , value])
return type
type = property( _get_type, _set_type )
def _get_form( self ):
form , _ = self['data_format'].split('_',1)
return form
form = property( _get_form )
def _get_esize(self):
return self['esize']
def _set_esize(self,value):
self['esize'] = value
esize = property( _get_esize,_set_esize )
@classmethod
def get_open_file_name( cls, open_file ):
command = 'lsof -a +p %i -Fin'
pid = os.getpid()
st_inode = os.fstat( open_file.fileno() )[1]
p0 = subprocess.Popen( command %(pid) , shell=True, stdout=subprocess.PIPE ,stderr=subprocess.PIPE )
err= p0.wait()
if err:
return os.tmpnam()+'.rsf'
res = p0.stdout.read( ).split( '\n' )
inode = "i%s" %st_inode
if inode in res:
of = res[ res.index( "i%s" %st_inode )+1]
return of[1:]
else:
return os.tmpnam()+'.rsf'
@classmethod
def read_header( cls, open_file,env ):
header_keys = {}
history = []
pop = ''
lineno = 0
while 1:
lineno+=1
pop = open_file.read( 1 )
if pop == '':
break
if pop == '\n':
continue
if pop == cls.terminate[0]:
term = open_file.read( 2 )
if term != cls.terminate[1:]:
raise Exception( 'Header not properly formatted first bit of termination sequence found but not the next two' )
break
line = open_file.readline( )
line = pop + line
line = line.split("#",1)[0]
history.append( line )
if '=' in line:
line = line.strip( )
key, value = line.split( '=', 1 )
#workaround for non-standard headers like sep *.H files
# sets next: in= "*.H"
key = key.split( )[-1]
try:
value_new = eval(value)
value = value_new
except:
msg = "SLAB WARNING: could not guess type of header key %s=%s, defaults to string: file '%s': line %i" %( key, value, open_file.name, lineno )
print >> sys.stderr, msg
header_keys[key] = value
elif "@del" in line:
words = [word.strip("\t ,") for word in line.split("@del",1)[-1].strip(" ,()\n").split(" ")]
for word in words:
if word in header_keys:
del header_keys[word]
# print "words", words
return header_keys,history
def _get_dtype(self):
typename = self._type_map[self.type][self.esize]
dt = np.dtype(typename)
if self.byteswap:
dt = dt.newbyteorder('S')
return dt
def _set_dtype( self, dtype ):
type,esize = File._dtype_map[str(dtype)]
self.esize = esize
self['data_format'] = "native_%s" %(type)
def _get_basename(self):
bname = os.path.basename( self.header )
return os.path.splitext(bname)[0]
basename = property( _get_basename )
dtype = property( _get_dtype, _set_dtype )
def __array__( self , dtype=None ):
if dtype and dtype != self.dtype:
raise Exception("can not cast memory map to new dtype got %s, expected 'dtype=' None or %s" %(dtype,self.dtype) )
if self.packed:
raise Exception( 'Can not create numpy.memmap from fifo/packed header and binary' )
if self.is_input:
mode = self.env.options.get( 'binary_in_mode', 'r' )
else:
if not self.finalized:
raise Exception("File.finalize must be called before calling File.__array__ method for output arrays")
mode = self.env.options.get( 'binary_out_mode', 'write' )
if mode=='w': mode = 'write'
array = np.memmap( self.binary, mode=mode, shape=self.shape, dtype=self.dtype, order=self.order )
return array
def to_array( self, error=True ):
array = np.zeros( shape=self.shape, dtype=self.dtype, order=self.order )
self.into_array(array, error)
return array
def into_array( self, array, error=True ):
buffer = array.data
nbread = self.binary_file.readinto( buffer )
if error and ( nbread != len(buffer) ):
raise IOError( "number of bytes read does not equal the number requested ( %s != %s)" %(nbread, len(buffer)) )
return
def from_array(self,array):
self.binary_file.write( array.data )
def from_coords(self, value , error=True ):
N = self.shape
O = self.origin
D = self.step
result = np.divide(np.subtract(value,O), D).astype(int)
if error:
greater_zero = np.all( np.greater_equal( result, 0 ) )
less_N = np.all( np.less( result, N ) )
if not (greater_zero and less_N):
raise IndexError("coordinate outside range of data %s" %(result) )
return result.astype(int)
def to_coords(self, value ):
N = self.shape
O = self.origin
D = self.step
result = np.zeros( len(value) )
np.multiply( value, D , result )
result = np.add( result, O , result)
return result
def readinto( self, buffer, error=True ):
if hasattr( buffer, 'data' ):
buffer = buffer.data
count = len(buffer)
nr = self.binary_file.readinto( buffer )
self.nb_read += nr
if nr != count:
msg = "binary file '%s' from header '%s': could not read past %i bytes, expected %i" %(self.binary_abspath,self.header_base,self.nb_read,self.nbytes)
if error == 'warn':
warnings.warn( msg , UserWarning )
if bool(error):
raise Exception( msg )
else:
pass
return nr
def _get_endian(self):
if self.form == 'native':
return little_endian
elif self.form == 'xdr':
return False
elif self.form == 'big':
return False
else:
return True
endian = property( _get_endian )
def do_byteswap(self):
return not little_endian == self.endian
def _get_byteswap(self):
if hasattr(self, '_byteswap_data' ):
return self._byteswap_data
else:
return self.do_byteswap( )
def _set_byteswap(self,value):
self._byteswap_data = bool(value)
byteswap = property( _get_byteswap,_set_byteswap )
def read(self, buffer=None, count=-1, doraise=True ):
return cSlab.read( self.binary_file, buffer=buffer, count=count, esize=self.esize, byteswap=self.byteswap )
## Functions for c api
def _sl_getint(self,value):
return int(self[value])
def _sl_getfloat(self,value):
return float(self[value])
def _sl_getstring(self,value):
return str(self[value])
trace_header_keys = ['tracl', 'tracr', 'fldr', 'tracf', 'ep', 'cdp', 'cdpt',
'trid', 'nvs', 'nhs', 'duse', 'offset', 'gelev', 'selev',
'sdepth', 'gdel', 'sdel', 'swdep', 'gwdep', 'scalel',
'scalco', 'sx', 'sy', 'gx', 'gy', 'counit', 'wevel', 'swevel',
'sut', 'gut', 'sstat', 'gstat', 'tstat', 'laga', 'lagb', 'delrt',
'muts', 'mute', 'ns', 'dt', 'gain', 'igc', 'igi', 'corr', 'sfs',
'sfe', 'slen', 'styp', 'stas', 'stae', 'tatyp', 'afilf', 'afils',
'nofilf', 'nofils', 'lcf', 'hcf', 'lcs', 'hcs', 'year', 'day',
'hour', 'minute', 'sec', 'timbas', 'trwf', 'grnors', 'grnofr',
'grnlof', 'gaps', 'otrav']
@classmethod
def get_number_keys(cls):
return len(cls.trace_header_keys)
@classmethod
def get_key_index(cls, *p ):
if len(p) < 1 or len(p) > 2:
raise TypeError("File.get_key_index takes 1 or 2 arguments (got %i)" %len(p) )
key_name = p[0]
try:
idx = cls.trace_header_keys.index( key_name )
except ValueError:
if len(p) ==2:
return p[1]
else:
raise ValueError( "Unknown trace header key name '%s'" %(key_name,) )
return idx
def serialize(self):
newd = dict(self.__dict__)
newd['binary_file'] = None
newd['header_file'] = None
result = cPickle.dumps( newd )
return result
def unserialize( self, obj ):
newd = cPickle.loads( obj )
self.__dict__.update( newd )
def seek_to_trace( self, index ):
bin = self.binary_file
pos = index_to_pos( self.shape, [0]+list(index) )
bin.seek( pos, 0 )
return pos
|
PypiClean
|
/ixnetwork_restpy-1.1.10.tar.gz/ixnetwork_restpy-1.1.10/ixnetwork_restpy/testplatform/sessions/ixnetwork/quicktest/customthroughput_43392709d9d2c48c1b386a084571fdf5.py
|
import sys
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
if sys.version_info >= (3, 5):
from typing import List, Any, Union
class CustomThroughput(Base):
"""Signifies the custom throughput for quick test.
The CustomThroughput class encapsulates a list of customThroughput resources that are managed by the user.
A list of resources can be retrieved from the server using the CustomThroughput.find() method.
The list can be managed by using the CustomThroughput.add() and CustomThroughput.remove() methods.
"""
__slots__ = ()
_SDM_NAME = "customThroughput"
_SDM_ATT_MAP = {
"ForceApplyQTConfig": "forceApplyQTConfig",
"InputParameters": "inputParameters",
"Mode": "mode",
"Name": "name",
}
_SDM_ENUM_MAP = {
"mode": ["existingMode", "newMode"],
}
def __init__(self, parent, list_op=False):
super(CustomThroughput, self).__init__(parent, list_op)
@property
def LearnFrames(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.learnframes_e1fcc4f238fe74bdb100136e7501dc3b.LearnFrames): An instance of the LearnFrames class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.learnframes_e1fcc4f238fe74bdb100136e7501dc3b import (
LearnFrames,
)
if len(self._object_properties) > 0:
if self._properties.get("LearnFrames", None) is not None:
return self._properties.get("LearnFrames")
return LearnFrames(self)._select()
@property
def PassCriteria(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.passcriteria_e1de2400895bf20f9cfb51f57a7273ff.PassCriteria): An instance of the PassCriteria class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.passcriteria_e1de2400895bf20f9cfb51f57a7273ff import (
PassCriteria,
)
if len(self._object_properties) > 0:
if self._properties.get("PassCriteria", None) is not None:
return self._properties.get("PassCriteria")
return PassCriteria(self)._select()
@property
def Results(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.results_8ddcc8214bf629f8c6d75cf917d2de74.Results): An instance of the Results class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.results_8ddcc8214bf629f8c6d75cf917d2de74 import (
Results,
)
if len(self._object_properties) > 0:
if self._properties.get("Results", None) is not None:
return self._properties.get("Results")
return Results(self)._select()
@property
def TestConfig(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.testconfig_a98533092976c225fb8d5c32173490df.TestConfig): An instance of the TestConfig class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.testconfig_a98533092976c225fb8d5c32173490df import (
TestConfig,
)
if len(self._object_properties) > 0:
if self._properties.get("TestConfig", None) is not None:
return self._properties.get("TestConfig")
return TestConfig(self)._select()
@property
def TrafficSelection(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.trafficselection_3a7f5efc76c1307b025f9039773d3585.TrafficSelection): An instance of the TrafficSelection class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.trafficselection_3a7f5efc76c1307b025f9039773d3585 import (
TrafficSelection,
)
if len(self._object_properties) > 0:
if self._properties.get("TrafficSelection", None) is not None:
return self._properties.get("TrafficSelection")
return TrafficSelection(self)
@property
def ForceApplyQTConfig(self):
# type: () -> bool
"""
Returns
-------
- bool: Apply QT config
"""
return self._get_attribute(self._SDM_ATT_MAP["ForceApplyQTConfig"])
@ForceApplyQTConfig.setter
def ForceApplyQTConfig(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP["ForceApplyQTConfig"], value)
@property
def InputParameters(self):
# type: () -> str
"""
Returns
-------
- str: Input Parameters
"""
return self._get_attribute(self._SDM_ATT_MAP["InputParameters"])
@InputParameters.setter
def InputParameters(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP["InputParameters"], value)
@property
def Mode(self):
# type: () -> str
"""
Returns
-------
- str(existingMode | newMode): Test mode
"""
return self._get_attribute(self._SDM_ATT_MAP["Mode"])
@Mode.setter
def Mode(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP["Mode"], value)
@property
def Name(self):
# type: () -> str
"""
Returns
-------
- str: Test name
"""
return self._get_attribute(self._SDM_ATT_MAP["Name"])
@Name.setter
def Name(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP["Name"], value)
def update(
self, ForceApplyQTConfig=None, InputParameters=None, Mode=None, Name=None
):
# type: (bool, str, str, str) -> CustomThroughput
"""Updates customThroughput resource on the server.
Args
----
- ForceApplyQTConfig (bool): Apply QT config
- InputParameters (str): Input Parameters
- Mode (str(existingMode | newMode)): Test mode
- Name (str): Test name
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, ForceApplyQTConfig=None, InputParameters=None, Mode=None, Name=None):
# type: (bool, str, str, str) -> CustomThroughput
"""Adds a new customThroughput resource on the server and adds it to the container.
Args
----
- ForceApplyQTConfig (bool): Apply QT config
- InputParameters (str): Input Parameters
- Mode (str(existingMode | newMode)): Test mode
- Name (str): Test name
Returns
-------
- self: This instance with all currently retrieved customThroughput resources using find and the newly added customThroughput resources available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
"""Deletes all the contained customThroughput resources in this instance from the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, ForceApplyQTConfig=None, InputParameters=None, Mode=None, Name=None):
# type: (bool, str, str, str) -> CustomThroughput
"""Finds and retrieves customThroughput resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve customThroughput resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all customThroughput resources from the server.
Args
----
- ForceApplyQTConfig (bool): Apply QT config
- InputParameters (str): Input Parameters
- Mode (str(existingMode | newMode)): Test mode
- Name (str): Test name
Returns
-------
- self: This instance with matching customThroughput resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of customThroughput data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the customThroughput resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def Apply(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the apply operation on the server.
Applies the specified Quick Test.
apply(async_operation=bool)
---------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = {"Arg1": self.href}
for i in range(len(args)):
payload["Arg%s" % (i + 2)] = args[i]
for item in kwargs.items():
payload[item[0]] = item[1]
return self._execute("apply", payload=payload, response_object=None)
def ApplyAsync(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the applyAsync operation on the server.
applyAsync(async_operation=bool)
--------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = {"Arg1": self.href}
for i in range(len(args)):
payload["Arg%s" % (i + 2)] = args[i]
for item in kwargs.items():
payload[item[0]] = item[1]
return self._execute("applyAsync", payload=payload, response_object=None)
def ApplyAsyncResult(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[bool, None]
"""Executes the applyAsyncResult operation on the server.
applyAsyncResult(async_operation=bool)bool
------------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns bool:
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = {"Arg1": self.href}
for i in range(len(args)):
payload["Arg%s" % (i + 2)] = args[i]
for item in kwargs.items():
payload[item[0]] = item[1]
return self._execute("applyAsyncResult", payload=payload, response_object=None)
def ApplyITWizardConfiguration(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the applyITWizardConfiguration operation on the server.
Applies the specified Quick Test.
applyITWizardConfiguration(async_operation=bool)
------------------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = {"Arg1": self.href}
for i in range(len(args)):
payload["Arg%s" % (i + 2)] = args[i]
for item in kwargs.items():
payload[item[0]] = item[1]
return self._execute(
"applyITWizardConfiguration", payload=payload, response_object=None
)
def GenerateReport(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[str, None]
"""Executes the generateReport operation on the server.
Generate a PDF report for the last succesfull test run.
generateReport(async_operation=bool)string
------------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns str: This method is asynchronous and has no return value.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = {"Arg1": self.href}
for i in range(len(args)):
payload["Arg%s" % (i + 2)] = args[i]
for item in kwargs.items():
payload[item[0]] = item[1]
return self._execute("generateReport", payload=payload, response_object=None)
def Run(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the run operation on the server.
Starts the specified Quick Test and waits for its execution to finish.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
run(async_operation=bool)list
-----------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): This method is synchronous and returns the result of the test.
run(InputParameters=string, async_operation=bool)list
-----------------------------------------------------
- InputParameters (str): The input arguments of the test.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): This method is synchronous and returns the result of the test.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = {"Arg1": self.href}
for i in range(len(args)):
payload["Arg%s" % (i + 2)] = args[i]
for item in kwargs.items():
payload[item[0]] = item[1]
return self._execute("run", payload=payload, response_object=None)
def Start(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the start operation on the server.
Starts the specified Quick Test.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
start(async_operation=bool)
---------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
start(InputParameters=string, async_operation=bool)
---------------------------------------------------
- InputParameters (str): The input arguments of the test.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = {"Arg1": self.href}
for i in range(len(args)):
payload["Arg%s" % (i + 2)] = args[i]
for item in kwargs.items():
payload[item[0]] = item[1]
return self._execute("start", payload=payload, response_object=None)
def Stop(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the stop operation on the server.
Stops the currently running Quick Test.
stop(async_operation=bool)
--------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = {"Arg1": self.href}
for i in range(len(args)):
payload["Arg%s" % (i + 2)] = args[i]
for item in kwargs.items():
payload[item[0]] = item[1]
return self._execute("stop", payload=payload, response_object=None)
def WaitForTest(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the waitForTest operation on the server.
Waits for the execution of the specified Quick Test to be completed.
waitForTest(async_operation=bool)list
-------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): This method is synchronous and returns the result of the test.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = {"Arg1": self.href}
for i in range(len(args)):
payload["Arg%s" % (i + 2)] = args[i]
for item in kwargs.items():
payload[item[0]] = item[1]
return self._execute("waitForTest", payload=payload, response_object=None)
|
PypiClean
|
/ssf-0.2.2.tar.gz/ssf-0.2.2/docs/installation.rst
|
.. highlight:: shell
============
Installation
============
Stable release
--------------
To install ssf, run this command in your terminal:
.. code-block:: console
$ pip install ssf
This is the preferred method to install ssf, as it will always install the most recent stable release.
If you don't have `pip`_ installed, this `Python installation guide`_ can guide
you through the process.
.. _pip: https://pip.pypa.io
.. _Python installation guide: http://docs.python-guide.org/en/latest/starting/installation/
From sources
------------
The sources for ssf can be downloaded from the `Github repo`_.
You can either clone the public repository:
.. code-block:: console
$ git clone git://github.com/snoopyjc/ssf
Or download the `tarball`_:
.. code-block:: console
$ curl -OJL https://github.com/snoopyjc/ssf/tarball/master
Once you have a copy of the source, you can install it with:
.. code-block:: console
$ python setup.py install
.. _Github repo: https://github.com/snoopyjc/ssf
.. _tarball: https://github.com/snoopyjc/ssf/tarball/master
|
PypiClean
|
/AMQPStorm-2.10.6.tar.gz/AMQPStorm-2.10.6/amqpstorm/base.py
|
import locale
from amqpstorm.compatibility import is_string
from amqpstorm.exception import AMQPChannelError
AUTH_MECHANISM = 'PLAIN'
IDLE_WAIT = 0.01
LOCALE = locale.getlocale()[0] or 'en_US'
MAX_FRAME_SIZE = 131072
MAX_CHANNELS = 65535
class Stateful(object):
"""Stateful implementation."""
CLOSED = 0
CLOSING = 1
OPENING = 2
OPEN = 3
def __init__(self):
self._state = self.CLOSED
self._exceptions = []
def set_state(self, state):
"""Set State.
:param int state:
:return:
"""
self._state = state
@property
def current_state(self):
"""Get the State.
:rtype: int
"""
return self._state
@property
def is_closed(self):
"""Is Closed?
:rtype: bool
"""
return self._state == self.CLOSED
@property
def is_closing(self):
"""Is Closing?
:rtype: bool
"""
return self._state == self.CLOSING
@property
def is_opening(self):
"""Is Opening?
:rtype: bool
"""
return self._state == self.OPENING
@property
def is_open(self):
"""Is Open?
:rtype: bool
"""
return self._state == self.OPEN
@property
def exceptions(self):
"""Stores all exceptions thrown by this instance.
This is useful for troubleshooting, and is used internally
to check the health of the connection.
:rtype: list
"""
return self._exceptions
class BaseChannel(Stateful):
"""Channel base class."""
__slots__ = [
'_channel_id', '_consumer_tags'
]
def __init__(self, channel_id):
super(BaseChannel, self).__init__()
self._consumer_tags = []
self._channel_id = channel_id
@property
def channel_id(self):
"""Get Channel id.
:rtype: int
"""
return self._channel_id
@property
def consumer_tags(self):
"""Get a list of consumer tags.
:rtype: list
"""
return self._consumer_tags
def add_consumer_tag(self, tag):
"""Add a Consumer tag.
:param str tag: Consumer tag.
:return:
"""
if not is_string(tag):
raise AMQPChannelError('consumer tag needs to be a string')
if tag not in self._consumer_tags:
self._consumer_tags.append(tag)
def remove_consumer_tag(self, tag=None):
"""Remove a Consumer tag.
If no tag is specified, all all tags will be removed.
:param str,None tag: Consumer tag.
:return:
"""
if tag is not None:
if tag in self._consumer_tags:
self._consumer_tags.remove(tag)
else:
self._consumer_tags = []
class BaseMessage(object):
"""Message base class.
:param Channel channel: AMQPStorm Channel
:param str,unicode body: Message body
:param dict method: Message method
:param dict properties: Message properties
:param bool auto_decode: This is not implemented in the base message class.
"""
__slots__ = [
'_auto_decode', '_body', '_channel', '_method', '_properties'
]
def __init__(self, channel, body=None, method=None, properties=None,
auto_decode=None):
self._auto_decode = auto_decode
self._channel = channel
self._body = body
self._method = method
self._properties = properties or {}
def __iter__(self):
for attribute in ['_body', '_channel', '_method', '_properties']:
yield attribute[1::], getattr(self, attribute)
def to_dict(self):
"""Message to Dictionary.
:rtype: dict
"""
return {
'body': self._body,
'method': self._method,
'properties': self._properties,
'channel': self._channel
}
def to_tuple(self):
"""Message to Tuple.
:rtype: tuple
"""
return self._body, self._channel, self._method, self._properties
class Handler(object):
"""Operations Handler (e.g. Queue, Exchange)"""
__slots__ = [
'_channel'
]
def __init__(self, channel):
self._channel = channel
|
PypiClean
|
/ooouno-2.1.2-py3-none-any.whl/ooo/dyn/chart/data_label_placement.py
|
import uno
from enum import IntEnum
from typing import TYPE_CHECKING
from ooo.oenv.env_const import UNO_ENVIRONMENT, UNO_RUNTIME
_DYNAMIC = False
if (not TYPE_CHECKING) and UNO_RUNTIME and UNO_ENVIRONMENT:
_DYNAMIC = True
if not TYPE_CHECKING and _DYNAMIC:
# document generators will most likely not see this.
from ooo.helper.enum_helper import UnoConstMeta, ConstEnumMeta
class DataLabelPlacement(metaclass=UnoConstMeta, type_name="com.sun.star.chart.DataLabelPlacement", name_space="com.sun.star.chart"):
"""Dynamic Class. Contains all the constant values of ``com.sun.star.chart.DataLabelPlacement``"""
pass
class DataLabelPlacementEnum(IntEnum, metaclass=ConstEnumMeta, type_name="com.sun.star.chart.DataLabelPlacement", name_space="com.sun.star.chart"):
"""Dynamic Enum. Contains all the constant values of ``com.sun.star.chart.DataLabelPlacement`` as Enum values"""
pass
else:
if TYPE_CHECKING:
from com.sun.star.chart import DataLabelPlacement as DataLabelPlacement
else:
# keep document generators happy
from ...lo.chart.data_label_placement import DataLabelPlacement as DataLabelPlacement
class DataLabelPlacementEnum(IntEnum):
"""
Enum of Const Class DataLabelPlacement
These values specify where the captions/labels of data points are displayed.
**since**
LibreOffice 7.0
"""
AVOID_OVERLAP = DataLabelPlacement.AVOID_OVERLAP
CENTER = DataLabelPlacement.CENTER
TOP = DataLabelPlacement.TOP
TOP_LEFT = DataLabelPlacement.TOP_LEFT
LEFT = DataLabelPlacement.LEFT
BOTTOM_LEFT = DataLabelPlacement.BOTTOM_LEFT
BOTTOM = DataLabelPlacement.BOTTOM
BOTTOM_RIGHT = DataLabelPlacement.BOTTOM_RIGHT
RIGHT = DataLabelPlacement.RIGHT
TOP_RIGHT = DataLabelPlacement.TOP_RIGHT
INSIDE = DataLabelPlacement.INSIDE
OUTSIDE = DataLabelPlacement.OUTSIDE
NEAR_ORIGIN = DataLabelPlacement.NEAR_ORIGIN
CUSTOM = DataLabelPlacement.CUSTOM
__all__ = ['DataLabelPlacement', 'DataLabelPlacementEnum']
|
PypiClean
|
/DebugHeaders-0.1.tar.gz/DebugHeaders-0.1/debugheaders/__init__.py
|
from cStringIO import StringIO
import sys
from paste.request import construct_url
class DebugHeaders(object):
"""Middleware that shows all headers.
"""
translate_keys = {
'CONTENT_LENGTH': 'HTTP_CONTENT_LENGTH',
'CONTENT_TYPE': 'HTTP_CONTENT_TYPE',
}
def __init__(self, app, show_body=False, show_response_body=False,
output=sys.stdout):
self.app = app
self.show_body = show_body
self.show_response_body = show_response_body
self.output = output or sys.stdout
def __call__(self, environ, start_response):
output = self.output
if output == 'wsgi.errors':
output = environ['wsgi.errors']
output.write(
'Incoming headers: (%s %s SCRIPT_NAME=%r)\n' %
(environ['REQUEST_METHOD'], construct_url(environ), environ.get('SCRIPT_NAME')))
for name, value in sorted(environ.items()):
name = self.translate_keys.get(name, name)
if not name.startswith('HTTP_'):
continue
name = name[5:].replace('_', '-').title()
output.write(' %s: %s\n' % (name, value))
if self.show_body:
self.show_request_body(environ, output)
def repl_start_response(status, headers, exc_info=None):
output.write('Outgoing headers: (%s)\n' % status)
for name, value in headers:
output.write(' %s: %s\n' % (name.title(), value))
return start_response(status, headers, exc_info)
if self.show_response_body:
out = []
def capture_start_response(status, headers, exc_info=None):
repl_start_response(status, headers, exc_info)
return out.append
for chunk in self.app(environ, capture_start_response):
out.append(chunk)
output.write('\nResponse body:\n')
self.show_output(''.join(out), output)
return out
else:
return self.app(environ, repl_start_response)
def show_request_body(self, environ, output):
length = int(environ.get('CONTENT_LENGTH') or '0')
body = environ['wsgi.input'].read(length)
environ['wsgi.input'] = StringIO(body)
self.show_output(body, output)
def show_output(self, data, output):
if data:
for line in data.splitlines():
# This way we won't print out control characters:
output.write(line.encode('string_escape')+'\n')
output.write('-'*70+'\n')
def make_debug_headers(app, global_conf, show_body=False,
stderr=False):
"""
Show all the headers that come to the application.
These are printed to sys.stdout, or sys.stderr if stderr=True. If
show_body is true, then the body of all requests is also
displayed.
"""
from paste.deploy.converters import asbool
if asbool(stderr):
output = sys.stderr
else:
output = sys.stdout
return DebugHeaders(app, show_body=asbool(show_body),
output=output)
|
PypiClean
|
/django-suit-ckeditor-0.0.2.tar.gz/django-suit-ckeditor-0.0.2/suit_ckeditor/static/suit-ckeditor/ckeditor/plugins/a11yhelp/dialogs/lang/lt.js
|
/*
Copyright (c) 2003-2013, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.html or http://ckeditor.com/license
*/
CKEDITOR.plugins.setLang("a11yhelp","lt",{title:"Accessibility Instructions",contents:"Help Contents. To close this dialog press ESC.",legend:[{name:"Bendros savybės",items:[{name:"Editor Toolbar",legend:"Press ${toolbarFocus} to navigate to the toolbar. Move to the next and previous toolbar group with TAB and SHIFT-TAB. Move to the next and previous toolbar button with RIGHT ARROW or LEFT ARROW. Press SPACE or ENTER to activate the toolbar button."},{name:"Editor Dialog",legend:"Inside a dialog, press TAB to navigate to next dialog field, press SHIFT + TAB to move to previous field, press ENTER to submit dialog, press ESC to cancel dialog. For dialogs that have multiple tab pages, press ALT + F10 to navigate to tab-list. Then move to next tab with TAB OR RIGTH ARROW. Move to previous tab with SHIFT + TAB or LEFT ARROW. Press SPACE or ENTER to select the tab page."},
{name:"Editor Context Menu",legend:"Press ${contextMenu} or APPLICATION KEY to open context-menu. Then move to next menu option with TAB or DOWN ARROW. Move to previous option with SHIFT+TAB or UP ARROW. Press SPACE or ENTER to select the menu option. Open sub-menu of current option with SPACE or ENTER or RIGHT ARROW. Go back to parent menu item with ESC or LEFT ARROW. Close context menu with ESC."},{name:"Editor List Box",legend:"Inside a list-box, move to next list item with TAB OR DOWN ARROW. Move to previous list item with SHIFT + TAB or UP ARROW. Press SPACE or ENTER to select the list option. Press ESC to close the list-box."},
{name:"Editor Element Path Bar",legend:"Press ${elementsPathFocus} to navigate to the elements path bar. Move to next element button with TAB or RIGHT ARROW. Move to previous button with SHIFT+TAB or LEFT ARROW. Press SPACE or ENTER to select the element in editor."}]},{name:"Commands",items:[{name:" Undo command",legend:"Press ${undo}"},{name:" Redo command",legend:"Press ${redo}"},{name:" Bold command",legend:"Press ${bold}"},{name:" Italic command",legend:"Press ${italic}"},{name:" Underline command",
legend:"Press ${underline}"},{name:" Link command",legend:"Press ${link}"},{name:" Toolbar Collapse command",legend:"Press ${toolbarCollapse}"},{name:" Access previous focus space command",legend:"Press ${accessPreviousSpace} to access the closest unreachable focus space before the caret, for example: two adjacent HR elements. Repeat the key combination to reach distant focus spaces."},{name:" Access next focus space command",legend:"Press ${accessNextSpace} to access the closest unreachable focus space after the caret, for example: two adjacent HR elements. Repeat the key combination to reach distant focus spaces."},
{name:" Accessibility Help",legend:"Press ${a11yHelp}"}]}]});
|
PypiClean
|
/ipyleaflet-0.17.3.tar.gz/ipyleaflet-0.17.3/js/src/layers/GeoJSON.js
|
const L = require('../leaflet.js');
const featuregroup = require('./FeatureGroup.js');
export class LeafletGeoJSONModel extends featuregroup.LeafletFeatureGroupModel {
defaults() {
return {
...super.defaults(),
_view_name: 'LeafletGeoJSONView',
_model_name: 'LeafletGeoJSONModel',
data: {},
style: {},
visible: true,
hover_style: {},
point_style: {},
};
}
}
export class LeafletGeoJSONView extends featuregroup.LeafletFeatureGroupView {
create_obj() {
var style = (feature) => {
const model_style = this.model.get('style');
const feature_style = feature.properties.style || {};
return {
...feature_style,
...model_style,
};
};
var options = {
style: style,
onEachFeature: (feature, layer) => {
var mouseevent = (e) => {
if (e.type == 'mouseover') {
layer.setStyle(this.model.get('hover_style'));
layer.once('mouseout', () => {
this.obj.resetStyle(layer);
});
}
this.send({
event: e.type,
feature: feature,
properties: feature.properties,
id: feature.id,
});
};
layer.on({
mouseover: mouseevent,
click: mouseevent,
});
},
};
var point_style = this.model.get('point_style');
if (Object.keys(point_style).length !== 0) {
options.pointToLayer = function (feature, latlng) {
return new L.CircleMarker(latlng, point_style);
};
}
this.obj = L.geoJson(this.model.get('data'), options);
}
model_events() {
this.listenTo(
this.model,
'change:style',
function () {
this.obj.setStyle(this.model.get('style'));
},
this
);
this.listenTo(
this.model,
'change:data',
function () {
this.obj.clearLayers();
this.obj.addData(this.model.get('data'));
},
this
);
this.listenTo(
this.model,
'change:visible',
function () {
if (this.model.get('visible')) {
this.obj.addData(this.model.get('data'));
} else {
this.obj.clearLayers();
}
},
this
);
}
}
|
PypiClean
|
/django_handyhelpers-0.3.9-py3-none-any.whl/handyhelpers/static/node_modules/chart.js/dist/docs/assets/js/128.87d67ad7.js
|
(window.webpackJsonp=window.webpackJsonp||[]).push([[128],{457:function(t,e,o){"use strict";o.r(e);var d=o(6),v=Object(d.a)({},(function(){var t=this,e=t.$createElement,o=t._self._c||e;return o("ContentSlotsDistributor",{attrs:{"slot-key":t.$parent.slotKey}},[o("h3",{attrs:{id:"common-tick-options-to-all-axes"}},[o("a",{staticClass:"header-anchor",attrs:{href:"#common-tick-options-to-all-axes"}},[t._v("#")]),t._v(" Common tick options to all axes")]),t._v(" "),o("p",[t._v("Namespace: "),o("code",[t._v("options.scales[scaleId].ticks")])]),t._v(" "),o("table",[o("thead",[o("tr",[o("th",[t._v("Name")]),t._v(" "),o("th",[t._v("Type")]),t._v(" "),o("th",{staticStyle:{"text-align":"center"}},[t._v("Scriptable")]),t._v(" "),o("th",[t._v("Default")]),t._v(" "),o("th",[t._v("Description")])])]),t._v(" "),o("tbody",[o("tr",[o("td",[o("code",[t._v("backdropColor")])]),t._v(" "),o("td",[o("RouterLink",{attrs:{to:"/general/colors.html"}},[o("code",[t._v("Color")])])],1),t._v(" "),o("td",{staticStyle:{"text-align":"center"}},[t._v("Yes")]),t._v(" "),o("td",[o("code",[t._v("'rgba(255, 255, 255, 0.75)'")])]),t._v(" "),o("td",[t._v("Color of label backdrops.")])]),t._v(" "),o("tr",[o("td",[o("code",[t._v("backdropPadding")])]),t._v(" "),o("td",[o("RouterLink",{attrs:{to:"/general/padding.html"}},[o("code",[t._v("Padding")])])],1),t._v(" "),o("td",{staticStyle:{"text-align":"center"}}),t._v(" "),o("td",[o("code",[t._v("2")])]),t._v(" "),o("td",[t._v("Padding of label backdrop.")])]),t._v(" "),o("tr",[o("td",[o("code",[t._v("callback")])]),t._v(" "),o("td",[o("code",[t._v("function")])]),t._v(" "),o("td",{staticStyle:{"text-align":"center"}}),t._v(" "),o("td"),t._v(" "),o("td",[t._v("Returns the string representation of the tick value as it should be displayed on the chart. See "),o("RouterLink",{attrs:{to:"/axes/labelling.html#creating-custom-tick-formats"}},[t._v("callback")]),t._v(".")],1)]),t._v(" "),o("tr",[o("td",[o("code",[t._v("display")])]),t._v(" "),o("td",[o("code",[t._v("boolean")])]),t._v(" "),o("td",{staticStyle:{"text-align":"center"}}),t._v(" "),o("td",[o("code",[t._v("true")])]),t._v(" "),o("td",[t._v("If true, show tick labels.")])]),t._v(" "),o("tr",[o("td",[o("code",[t._v("color")])]),t._v(" "),o("td",[o("RouterLink",{attrs:{to:"/general/colors.html"}},[o("code",[t._v("Color")])])],1),t._v(" "),o("td",{staticStyle:{"text-align":"center"}},[t._v("Yes")]),t._v(" "),o("td",[o("code",[t._v("Chart.defaults.color")])]),t._v(" "),o("td",[t._v("Color of ticks.")])]),t._v(" "),o("tr",[o("td",[o("code",[t._v("font")])]),t._v(" "),o("td",[o("code",[t._v("Font")])]),t._v(" "),o("td",{staticStyle:{"text-align":"center"}},[t._v("Yes")]),t._v(" "),o("td",[o("code",[t._v("Chart.defaults.font")])]),t._v(" "),o("td",[t._v("See "),o("RouterLink",{attrs:{to:"/general/fonts.html"}},[t._v("Fonts")])],1)]),t._v(" "),o("tr",[o("td",[o("code",[t._v("major")])]),t._v(" "),o("td",[o("code",[t._v("object")])]),t._v(" "),o("td",{staticStyle:{"text-align":"center"}}),t._v(" "),o("td",[o("code",[t._v("{}")])]),t._v(" "),o("td",[o("RouterLink",{attrs:{to:"/axes/styling.html#major-tick-configuration"}},[t._v("Major ticks configuration")]),t._v(".")],1)]),t._v(" "),o("tr",[o("td",[o("code",[t._v("padding")])]),t._v(" "),o("td",[o("code",[t._v("number")])]),t._v(" "),o("td",{staticStyle:{"text-align":"center"}}),t._v(" "),o("td",[o("code",[t._v("3")])]),t._v(" "),o("td",[t._v("Sets the offset of the tick labels from the axis")])]),t._v(" "),o("tr",[o("td",[o("code",[t._v("showLabelBackdrop")])]),t._v(" "),o("td",[o("code",[t._v("boolean")])]),t._v(" "),o("td",{staticStyle:{"text-align":"center"}},[t._v("Yes")]),t._v(" "),o("td",[o("code",[t._v("true")]),t._v(" for radial scale, "),o("code",[t._v("false")]),t._v(" otherwise")]),t._v(" "),o("td",[t._v("If true, draw a background behind the tick labels.")])]),t._v(" "),o("tr",[o("td",[o("code",[t._v("textStrokeColor")])]),t._v(" "),o("td",[o("RouterLink",{attrs:{to:"/general/colors.html"}},[o("code",[t._v("Color")])])],1),t._v(" "),o("td",{staticStyle:{"text-align":"center"}},[t._v("Yes")]),t._v(" "),o("td",[t._v("``")]),t._v(" "),o("td",[t._v("The color of the stroke around the text.")])]),t._v(" "),o("tr",[o("td",[o("code",[t._v("textStrokeWidth")])]),t._v(" "),o("td",[o("code",[t._v("number")])]),t._v(" "),o("td",{staticStyle:{"text-align":"center"}},[t._v("Yes")]),t._v(" "),o("td",[o("code",[t._v("0")])]),t._v(" "),o("td",[t._v("Stroke width around the text.")])]),t._v(" "),o("tr",[o("td",[o("code",[t._v("z")])]),t._v(" "),o("td",[o("code",[t._v("number")])]),t._v(" "),o("td",{staticStyle:{"text-align":"center"}}),t._v(" "),o("td",[o("code",[t._v("0")])]),t._v(" "),o("td",[t._v("z-index of tick layer. Useful when ticks are drawn on chart area. Values <= 0 are drawn under datasets, > 0 on top.")])])])])])}),[],!1,null,null,null);e.default=v.exports}}]);
|
PypiClean
|
/pyontio-0.0.7-py3-none-any.whl/ontology/core/transaction.py
|
from enum import Enum
from typing import List, Union
from Cryptodome.Random.random import randint
from ontology.core.sig import Sig
from ontology.crypto.digest import Digest
from ontology.common.address import Address
from ontology.account.account import Account
from ontology.core.program import ProgramBuilder
from ontology.io.binary_writer import BinaryWriter
from ontology.io.binary_reader import BinaryReader
from ontology.io.memory_stream import StreamManager
from ontology.exception.error_code import ErrorCode
from ontology.exception.exception import SDKException
class TransactionType(Enum):
Bookkeeping = 0x00
Bookkeeper = 0x02
Claim = 0x03
Enrollment = 0x04
Vote = 0x05
DeployCode = 0xd0
InvokeCode = 0xd1
TransferTransaction = 0x80
TX_MAX_SIG_SIZE = 16
class Transaction(object):
def __init__(self, version=0, tx_type: TransactionType or int = None, gas_price: int = 0, gas_limit: int = 0,
payer: Union[str, bytes, Address, None] = b'', payload: bytearray = bytearray(), nonce: int = None,
attributes: bytearray = bytearray(), sig_list: List[Sig] = None):
if gas_price < 0:
raise SDKException(ErrorCode.other_error('the gas price should be equal or greater than zero.'))
if gas_limit < 0:
raise SDKException(ErrorCode.other_error('the gas limit should be equal or greater than zero.'))
self.version = version
if isinstance(tx_type, TransactionType):
tx_type = tx_type.value
self.tx_type = tx_type
if not nonce:
nonce = randint(0, 0xFFFFFFFF)
self.nonce = nonce
self.gas_price = gas_price
self.gas_limit = gas_limit
if not payer:
payer = b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
if isinstance(payer, str):
payer = Address.b58decode(payer).to_bytes()
if isinstance(payer, Address):
payer = payer.to_bytes()
self.payer = payer
self.payload = payload
self.attributes = attributes
if not sig_list:
sig_list = list()
self.sig_list = sig_list
def __iter__(self):
data = dict()
data['version'] = self.version
data['txType'] = self.tx_type
data['nonce'] = self.nonce
data['gasPrice'] = self.gas_price
data['gasLimit'] = self.gas_limit
data['payer'] = Address(self.payer).b58encode()
data['payload'] = bytes.hex(self.payload)
data['attributes'] = bytearray.hex(self.attributes)
data['sigs'] = list()
for sig in self.sig_list:
data['sigs'].append(dict(sig))
for key, value in data.items():
yield (key, value)
def serialize_unsigned(self, is_str: bool = False) -> bytes or str:
ms = StreamManager.get_stream()
writer = BinaryWriter(ms)
writer.write_uint8(self.version)
writer.write_uint8(self.tx_type)
writer.write_uint32(self.nonce)
writer.write_uint64(self.gas_price)
writer.write_uint64(self.gas_limit)
writer.write_bytes(self.payer)
self.serialize_exclusive_data(writer)
if self.payload is not None:
writer.write_var_bytes(bytes(self.payload))
writer.write_var_int(len(self.attributes))
ms.flush()
hex_bytes = ms.to_bytes()
StreamManager.release_stream(ms)
if is_str:
return bytes.hex(hex_bytes)
else:
return hex_bytes
def serialize_exclusive_data(self, writer):
pass
def hash256_explorer(self) -> str:
tx_serial = self.serialize_unsigned()
digest = Digest.hash256(tx_serial)
if not isinstance(digest, bytes):
raise SDKException(ErrorCode.require_bytes_params)
return bytes.hex(digest[::-1])
def hash256(self, is_hex: bool = False) -> bytes or str:
tx_serial = self.serialize_unsigned()
digest = Digest.hash256(tx_serial, is_hex)
return digest
def serialize(self, is_hex: bool = False) -> bytes or str:
ms = StreamManager.get_stream()
writer = BinaryWriter(ms)
writer.write_bytes(self.serialize_unsigned(is_str=False))
writer.write_var_int(len(self.sig_list))
for sig in self.sig_list:
writer.write_bytes(sig.serialize())
ms.flush()
bytes_tx = ms.to_bytes()
StreamManager.release_stream(ms)
if is_hex:
return bytes_tx.hex()
else:
return bytes_tx
@staticmethod
def deserialize_from(bytes_tx: bytes):
ms = StreamManager.get_stream(bytes_tx)
reader = BinaryReader(ms)
tx = Transaction()
tx.version = reader.read_uint8()
tx.tx_type = reader.read_uint8()
tx.nonce = reader.read_uint32()
tx.gas_price = reader.read_uint64()
tx.gas_limit = reader.read_uint64()
tx.payer = reader.read_bytes(20)
tx.payload = reader.read_var_bytes()
attribute_len = reader.read_var_int()
if attribute_len is 0:
tx.attributes = bytearray()
sig_len = reader.read_var_int()
tx.sig_list = list()
for _ in range(0, sig_len):
tx.sig_list.append(Sig.deserialize(reader))
return tx
def sign_transaction(self, signer: Account):
"""
This interface is used to sign the transaction.
"""
tx_hash = self.hash256()
sig_data = signer.generate_signature(tx_hash)
sig = [Sig([signer.get_public_key_bytes()], 1, [sig_data])]
self.sig_list = sig
def add_sign_transaction(self, signer: Account):
"""
This interface is used to add signature into the transaction.
"""
if self.sig_list is None or len(self.sig_list) == 0:
self.sig_list = []
elif len(self.sig_list) >= TX_MAX_SIG_SIZE:
raise SDKException(ErrorCode.param_err('the number of transaction signatures should not be over 16'))
tx_hash = self.hash256()
sig_data = signer.generate_signature(tx_hash)
sig = Sig([signer.get_public_key_bytes()], 1, [sig_data])
self.sig_list.append(sig)
def add_multi_sign_transaction(self, m: int, pub_keys: List[bytes] or List[str], signer: Account):
"""
This interface is used to generate an Transaction object which has multi signature.
"""
for index, pk in enumerate(pub_keys):
if isinstance(pk, str):
pub_keys[index] = pk.encode('ascii')
pub_keys = ProgramBuilder.sort_public_keys(pub_keys)
tx_hash = self.hash256()
sig_data = signer.generate_signature(tx_hash)
if self.sig_list is None or len(self.sig_list) == 0:
self.sig_list = []
elif len(self.sig_list) >= TX_MAX_SIG_SIZE:
raise SDKException(ErrorCode.param_err('the number of transaction signatures should not be over 16'))
else:
for i in range(len(self.sig_list)):
if self.sig_list[i].public_keys == pub_keys:
if len(self.sig_list[i].sig_data) + 1 > len(pub_keys):
raise SDKException(ErrorCode.param_err('too more sigData'))
if self.sig_list[i].m != m:
raise SDKException(ErrorCode.param_err('M error'))
self.sig_list[i].sig_data.append(sig_data)
return
sig = Sig(pub_keys, m, [sig_data])
self.sig_list.append(sig)
|
PypiClean
|
/django_bootstrap4_datetimepicker-4.2-py3-none-any.whl/bootstrap4_datetime/static/bootstrap4_datetime/js/locales/bootstrap-datetimepicker.ne.js
|
(function (factory) {
if (typeof define === 'function' && define.amd) {
define(['moment'], factory); // AMD
} else if (typeof exports === 'object') {
module.exports = factory(require('../moment')); // Node
} else {
factory(window.moment); // Browser global
}
}(function (moment) {
var symbolMap = {
'1': '१',
'2': '२',
'3': '३',
'4': '४',
'5': '५',
'6': '६',
'7': '७',
'8': '८',
'9': '९',
'0': '०'
},
numberMap = {
'१': '1',
'२': '2',
'३': '3',
'४': '4',
'५': '5',
'६': '6',
'७': '7',
'८': '8',
'९': '9',
'०': '0'
};
return moment.lang('ne', {
months : 'जनवरी_फेब्रुवरी_मार्च_अप्रिल_मई_जुन_जुलाई_अगष्ट_सेप्टेम्बर_अक्टोबर_नोभेम्बर_डिसेम्बर'.split("_"),
monthsShort : 'जन._फेब्रु._मार्च_अप्रि._मई_जुन_जुलाई._अग._सेप्ट._अक्टो._नोभे._डिसे.'.split("_"),
weekdays : 'आइतबार_सोमबार_मङ्गलबार_बुधबार_बिहिबार_शुक्रबार_शनिबार'.split("_"),
weekdaysShort : 'आइत._सोम._मङ्गल._बुध._बिहि._शुक्र._शनि.'.split("_"),
weekdaysMin : 'आइ._सो._मङ्_बु._बि._शु._श.'.split("_"),
longDateFormat : {
LT : "Aको h:mm बजे",
L : "DD/MM/YYYY",
LL : "D MMMM YYYY",
LLL : "D MMMM YYYY, LT",
LLLL : "dddd, D MMMM YYYY, LT"
},
preparse: function (string) {
return string.replace(/[१२३४५६७८९०]/g, function (match) {
return numberMap[match];
});
},
postformat: function (string) {
return string.replace(/\d/g, function (match) {
return symbolMap[match];
});
},
meridiem : function (hour, minute, isLower) {
if (hour < 3) {
return "राती";
} else if (hour < 10) {
return "बिहान";
} else if (hour < 15) {
return "दिउँसो";
} else if (hour < 18) {
return "बेलुका";
} else if (hour < 20) {
return "साँझ";
} else {
return "राती";
}
},
calendar : {
sameDay : '[आज] LT',
nextDay : '[भोली] LT',
nextWeek : '[आउँदो] dddd[,] LT',
lastDay : '[हिजो] LT',
lastWeek : '[गएको] dddd[,] LT',
sameElse : 'L'
},
relativeTime : {
future : "%sमा",
past : "%s अगाडी",
s : "केही समय",
m : "एक मिनेट",
mm : "%d मिनेट",
h : "एक घण्टा",
hh : "%d घण्टा",
d : "एक दिन",
dd : "%d दिन",
M : "एक महिना",
MM : "%d महिना",
y : "एक बर्ष",
yy : "%d बर्ष"
},
week : {
dow : 1, // Monday is the first day of the week.
doy : 7 // The week that contains Jan 1st is the first week of the year.
}
});
}));
|
PypiClean
|
/ansible-solace-0.7.8.tar.gz/ansible-solace-0.7.8/lib/ansible/modules/network/solace/solace_dmr_cluster_link_trusted_cn.py
|
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
import ansible.module_utils.network.solace.solace_utils as su
from ansible.module_utils.basic import AnsibleModule
DOCUMENTATION = '''
---
module: solace_dmr_cluster_link_trusted_cn
short_description: Configure a trusted common name object on a DMR cluster link.
description:
- "Allows addition, removal and configuration of trusted common name objects on DMR cluster links."
- "Reference: https://docs.solace.com/API-Developer-Online-Ref-Documentation/swagger-ui/config/index.html#/dmrCluster/createDmrClusterLinkTlsTrustedCommonName."
options:
name:
description: The expected trusted common name of the remote certificate. Maps to 'tlsTrustedCommonName' in the API.
required: true
dmr:
description: The name of the DMR cluster. Maps to 'dmrClusterName' in the API.
required: true
remote_node_name:
description: The name of the remote node. Maps to 'remoteNodeName' in the API.
required: true
settings:
description: JSON dictionary of additional configuration, see Reference documentation.
required: false
state:
description: Target state. [present|absent].
required: false
default: present
host:
description: Hostname of Solace Broker.
required: false
default: "localhost"
port:
description: Management port of Solace Broker.
required: false
default: 8080
secure_connection:
description: If true, use https rather than http for querying.
required: false
default: false
username:
description: Administrator username for Solace Broker.
required: false
default: "admin"
password:
description: Administrator password for Solace Broker.
required: false
default: "admin"
timeout:
description: Connection timeout in seconds for the http request.
required: false
default: 1
x_broker:
description: Custom HTTP header with the broker virtual router id, if using a SEMPv2 Proxy/agent infrastructure.
required: false
author:
- Mark Street ([email protected])
- Swen-Helge Huber ([email protected])
- Ricardo Gomez-Ulmke ([email protected])
'''
EXAMPLES = '''
- name: Remove 'remoteNode' DMR Link Trusted CN
solace_dmr_cluster_link_trusted_cn:
name: "*.messaging.solace.cloud"
remote_node_name: remoteNode
dmr: foo
state: absent
- name: Add 'remoteNode' DMR Link Trusted CN
solace_dmr_cluster_link_trusted_cn:
name: "*.messaging.solace.cloud"
remote_node_name: remoteNode
dmr: foo
state: present
'''
RETURN = '''
response:
description: The response from the Solace Sempv2 request.
type: dict
'''
class SolaceLinkTrustedCNTask(su.SolaceTask):
LOOKUP_ITEM_KEY = 'tlsTrustedCommonName'
def __init__(self, module):
su.SolaceTask.__init__(self, module)
def lookup_item(self):
return self.module.params['name']
def get_args(self):
return [self.module.params['dmr'], self.module.params['remote_node_name']]
def get_func(self, solace_config, dmr, link, lookup_item_value):
path_array = [su.SEMP_V2_CONFIG, su.DMR_CLUSTERS, dmr, su.LINKS, link, su.TLS_TRUSTED_COMMON_NAMES, lookup_item_value]
return su.get_configuration(solace_config, path_array, self.LOOKUP_ITEM_KEY)
def create_func(self, solace_config, dmr, link, trusted_cn, settings=None):
"""Create a DMR Cluster"""
defaults = {
'dmrClusterName': dmr,
'remoteNodeName': link
}
mandatory = {
'tlsTrustedCommonName': trusted_cn
}
data = su.merge_dicts(defaults, mandatory, settings)
path_array = [su.SEMP_V2_CONFIG, su.DMR_CLUSTERS, dmr, su.LINKS, link, su.TLS_TRUSTED_COMMON_NAMES]
return su.make_post_request(solace_config, path_array, data)
def delete_func(self, solace_config, dmr, link, lookup_item_value):
"""Delete a VPN"""
path_array = [su.SEMP_V2_CONFIG, su.DMR_CLUSTERS, dmr, su.LINKS, link, su.TLS_TRUSTED_COMMON_NAMES, lookup_item_value]
return su.make_delete_request(solace_config, path_array)
def run_module():
"""Entrypoint to module"""
module_args = dict(
name=dict(type='str', required=True),
dmr=dict(type='str', required=True),
remote_node_name=dict(type='str', required=True),
host=dict(type='str', default='localhost'),
port=dict(type='int', default=8080),
secure_connection=dict(type='bool', default=False),
username=dict(type='str', default='admin'),
password=dict(type='str', default='admin', no_log=True),
settings=dict(type='dict', required=False),
state=dict(default='present', choices=['absent', 'present']),
timeout=dict(default='1', required=False),
x_broker=dict(type='str', default='')
)
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True
)
solace_task = SolaceLinkTrustedCNTask(module)
result = solace_task.do_task()
module.exit_json(**result)
def main():
"""Standard boilerplate"""
run_module()
if __name__ == '__main__':
main()
###
# The End.
|
PypiClean
|
/biolinkml-1.7.6.tar.gz/biolinkml-1.7.6/docs/code_set_version.md
|
# Slot: code_set_version
the version identifier of the enumeration code set
URI: [meta:code_set_version](https://w3id.org/biolink/biolinkml/meta/code_set_version)
## Domain and Range
[EnumDefinition](EnumDefinition.md) -> <sub>OPT</sub> [String](types/String.md)
## Parents
## Children
## Used by
* [EnumDefinition](EnumDefinition.md)
## Other properties
| | | |
| --- | --- | --- |
| **Comments:** | | we assume that version identifiers lexically sort in temporal order. Recommend semver when possible |
|
PypiClean
|
/dna.node-2.1.1-py3-none-any.whl/dna/track/matcher/cost_matrices.py
|
from __future__ import absolute_import
from typing import Set
import numpy as np
import numpy.typing as npt
import dna
from dna.detect import Detection
from ..types import ObjectTrack
from dna.track import utils
from ...track.kalman_filter import KalmanFilter
from .base import INVALID_DIST_DISTANCE, INVALID_IOU_DISTANCE, INVALID_METRIC_DISTANCE
def build_dist_cost(kf:KalmanFilter, tracks:list[ObjectTrack], detections:list[Detection]) -> np.ndarray:
dist_matrix = np.ones((len(tracks), len(detections)))
if tracks and detections:
measurements = np.asarray([det.bbox.xyah for det in detections])
for t_idx, track in enumerate(tracks):
mahalanovis_dist = kf.gating_distance(track.mean, track.covariance, measurements)
# detection과 여러 frame동안 association이 없는 track의 경우, detection들과의 거리 값이 다른 track들에
# 비해 짧아지게되기 때문에 이를 보정한다.
# 추후에는 mahalanovis distance를 사용하지 않는 버전을 수정해야 할 것 같다.
dist_matrix[t_idx, :] = mahalanovis_dist * (1 + 0.75*(track.time_since_update-1))
return dist_matrix
def build_iou_cost(tracks:list[ObjectTrack], detections:list[Detection]) -> np.ndarray:
matrix = np.ones((len(tracks), len(detections)))
if tracks and detections:
for t_idx, track in enumerate(tracks):
t_box = track.location
for d_idx, det in enumerate(detections):
matrix[t_idx,d_idx] = 1 - t_box.iou(det.bbox)
return matrix
def gate_dist_iou_cost(dist_cost:np.ndarray, iou_cost:np.ndarray, \
tracks:list[ObjectTrack], detections:list[Detection]) -> tuple[np.ndarray, np.ndarray]:
# track과 detection 사이 matching 과정에서 이 둘 사이의 크기에 많은 차이가 발생하는 경우
# match되지 않도록 cost matrix의 해당 cell 값을 최대값으로 설정한다.
# 'iou' zone 도입이후로 이 기능의 활용도가 떨어지는 것 같아서 나중에 없이질 수도 있음.
validity_mask = build_task_det_ratio_mask(tracks, detections)
gated_dist_cost = np.where(validity_mask, dist_cost, INVALID_DIST_DISTANCE)
gated_iou_cost = np.where(validity_mask, iou_cost, INVALID_IOU_DISTANCE)
return gated_dist_cost, gated_iou_cost
_AREA_RATIO_LIMITS = (0.3, 2.8) # 크기가 일반적인 track의 location 대비 detection과의 크기 비율
_LARGE_AREA_RATIO_LIMITS = (0.5, 2) # 일정 크기 이상의 track의 location 대비 detection과의 크기 비율
def build_task_det_ratio_mask(tracks:list[ObjectTrack], detections:list[Detection],
area_ratio_limits:npt.ArrayLike=_AREA_RATIO_LIMITS):
det_areas = np.array([det.bbox.area() for det in detections])
area_ratio_limits = np.array(area_ratio_limits)
large_area_ratio_limits = np.array(area_ratio_limits)
mask = np.zeros((len(tracks), len(detections)), dtype=bool)
for t_idx, track in enumerate(tracks):
t_area = track.location.area()
ratio_limits = area_ratio_limits if t_area < 100000 else large_area_ratio_limits
limits = ratio_limits * t_area
mask[t_idx,:] = (det_areas >= limits[0]) & (det_areas <= limits[1])
return mask
def build_metric_cost(tracks:list[ObjectTrack], detections:list[Detection],
track_idxes:list[int], det_idxes:list[int]) -> np.ndarray:
def build_matrix(tracks:list[ObjectTrack], detections:list[Detection]) -> np.ndarray:
cost_matrix = np.ones((len(tracks), len(detections)))
if tracks and detections:
det_features = [det.feature for det in detections]
for i, track in enumerate(tracks):
if track.features:
distances = utils.cosine_distance(track.features, det_features)
cost_matrix[i, :] = distances.min(axis=0)
return cost_matrix
reduced_track_idxes = [i for i, track in utils.get_indexed_items(tracks, track_idxes) if track.features]
reduced_det_idxes = [i for i, det in utils.get_indexed_items(detections, det_idxes) if det.feature is not None]
reduced_matrix = build_matrix(utils.get_items(tracks, reduced_track_idxes), utils.get_items(detections, reduced_det_idxes))
cost_matrix = np.ones((len(tracks), len(detections)))
for row_idx, t_idx in enumerate(reduced_track_idxes):
for col_idx, d_idx in enumerate(reduced_det_idxes):
cost_matrix[t_idx, d_idx] = reduced_matrix[row_idx, col_idx]
return cost_matrix
def gate_metric_cost(metric_costs:np.ndarray, dist_costs:np.ndarray,
gate_threshold:float) -> None:
return np.where(dist_costs > gate_threshold, INVALID_METRIC_DISTANCE, metric_costs)
def print_cost_matrix(tracks:list[ObjectTrack], cost, trim_overflow=None):
if trim_overflow:
cost = cost.copy()
cost[cost > trim_overflow] = trim_overflow
for tidx, track in enumerate(tracks):
dists = [int(round(v)) for v in cost[tidx]]
track_str = f" {tidx:02d}: {track.id:03d}({track.state},{track.time_since_update:02d})"
dist_str = ', '.join([f"{v:4d}" if v != trim_overflow else " " for v in dists])
print(f"{track_str}: {dist_str}")
|
PypiClean
|
/focal_loss-0.0.7-py3-none-any.whl/focal_loss/utils/validation.py
|
import numbers
def check_type(obj, base, *, name=None, func=None, allow_none=False,
default=None, error_message=None):
"""Check whether an object is an instance of a base type.
Parameters
----------
obj : object
The object to be validated.
name : str
The name of `obj` in the calling function.
base : type or tuple of type
The base type that `obj` should be an instance of.
func: callable, optional
A function to be applied to `obj` if it is of type `base`. If None, no
function will be applied and `obj` will be returned as-is.
allow_none : bool, optional
Indicates whether the value None should be allowed to pass through.
default : object, optional
The default value to return if `obj` is None and `allow_none` is True.
If `default` is not None, it must be of type `base`, and it will have
`func` applied to it if `func` is not None.
error_message : str or None, optional
Custom error message to display if the type is incorrect.
Returns
-------
base type or None
The validated object.
Raises
------
TypeError
If `obj` is not an instance of `base`.
Examples
--------
>>> check_type(1, int)
1
>>> check_type(1, (int, str))
1
>>> check_type(1, str)
Traceback (most recent call last):
...
TypeError: Invalid type. Expected: str. Actual: int.
>>> check_type(1, (str, bool))
Traceback (most recent call last):
...
TypeError: Invalid type. Expected: (str, bool). Actual: int.
>>> print(check_type(None, str, allow_none=True))
None
>>> check_type(1, str, name='num')
Traceback (most recent call last):
...
TypeError: Invalid type for parameter 'num'. Expected: str. Actual: int.
>>> check_type(1, int, func=str)
'1'
>>> check_type(1, int, func='not callable')
Traceback (most recent call last):
...
ValueError: Parameter 'func' must be callable or None.
>>> check_type(2.0, str, error_message='Not a string!')
Traceback (most recent call last):
...
TypeError: Not a string!
>>> check_type(None, int, allow_none=True, default=0)
0
"""
if allow_none and obj is None:
if default is not None:
return check_type(default, base=base, name=name, func=func,
allow_none=False)
return None
if isinstance(obj, base):
if func is None:
return obj
elif callable(func):
return func(obj)
else:
raise ValueError('Parameter \'func\' must be callable or None.')
# Handle wrong type
if isinstance(base, tuple):
expect = '(' + ', '.join(cls.__name__ for cls in base) + ')'
else:
expect = base.__name__
actual = type(obj).__name__
if error_message is None:
error_message = 'Invalid type'
if name is not None:
error_message += f' for parameter \'{name}\''
error_message += f'. Expected: {expect}. Actual: {actual}.'
raise TypeError(error_message)
def check_bool(obj, *, name=None, allow_none=False, default=None):
"""Validate boolean function arguments.
Parameters
----------
obj : object
The object to be validated.
name : str, optional
The name of `obj` in the calling function.
allow_none : bool, optional
Indicates whether the value None should be allowed.
default : object, optional
The default value to return if `obj` is None and `allow_none` is True.
Returns
-------
bool or None
The validated bool.
Raises
------
TypeError
If `obj` is not an instance of bool.
Examples
--------
>>> check_bool(True)
True
>>> check_bool(1.0)
Traceback (most recent call last):
...
TypeError: Invalid type. Expected: bool. Actual: float.
>>> a = (1 < 2)
>>> check_bool(a, name='a')
True
>>> b = 'not a bool'
>>> check_bool(b, name='b')
Traceback (most recent call last):
...
TypeError: Invalid type for parameter 'b'. Expected: bool. Actual: str.
"""
return check_type(obj, name=name, base=bool, func=bool,
allow_none=allow_none, default=default)
def _check_numeric(*, check_func, obj, name, base, func, positive, minimum,
maximum, allow_none, default):
"""Helper function for check_float and check_int."""
obj = check_type(obj, name=name, base=base, func=func,
allow_none=allow_none, default=default)
if obj is None:
return None
positive = check_bool(positive, name='positive')
if positive and obj <= 0:
if name is None:
message = 'Parameter must be positive.'
else:
message = f'Parameter \'{name}\' must be positive.'
raise ValueError(message)
if minimum is not None:
minimum = check_func(minimum, name='minimum')
if obj < minimum:
if name is None:
message = f'Parameter must be at least {minimum}.'
else:
message = f'Parameter \'{name}\' must be at least {minimum}.'
raise ValueError(message)
if maximum is not None:
maximum = check_func(maximum, name='minimum')
if obj > maximum:
if name is None:
message = f'Parameter must be at most {maximum}.'
else:
message = f'Parameter \'{name}\' must be at most {maximum}.'
raise ValueError(message)
return obj
def check_int(obj, *, name=None, positive=False, minimum=None, maximum=None,
allow_none=False, default=None):
"""Validate integer function arguments.
Parameters
----------
obj : object
The object to be validated.
name : str, optional
The name of `obj` in the calling function.
positive : bool, optional
Whether `obj` must be a positive integer (1 or greater).
minimum : int, optional
The minimum value that `obj` can take (inclusive).
maximum : int, optional
The maximum value that `obj` can take (inclusive).
allow_none : bool, optional
Indicates whether the value None should be allowed.
default : object, optional
The default value to return if `obj` is None and `allow_none` is True.
Returns
-------
int or None
The validated integer.
Raises
------
TypeError
If `obj` is not an integer.
ValueError
If any of the optional positivity or minimum and maximum value
constraints are violated.
Examples
--------
>>> check_int(0)
0
>>> check_int(1, positive=True)
1
>>> check_int(1.0)
Traceback (most recent call last):
...
TypeError: Invalid type. Expected: Integral. Actual: float.
>>> check_int(-1, positive=True)
Traceback (most recent call last):
...
ValueError: Parameter must be positive.
>>> check_int(1, name='a', minimum=10)
Traceback (most recent call last):
...
ValueError: Parameter 'a' must be at least 10.
"""
return _check_numeric(check_func=check_int, obj=obj, name=name,
base=numbers.Integral, func=int, positive=positive,
minimum=minimum, maximum=maximum,
allow_none=allow_none, default=default)
def check_float(obj, *, name=None, positive=False, minimum=None, maximum=None,
allow_none=False, default=None):
"""Validate float function arguments.
Parameters
----------
obj : object
The object to be validated.
name : str, optional
The name of `obj` in the calling function.
positive : bool, optional
Whether `obj` must be a positive float.
minimum : float, optional
The minimum value that `obj` can take (inclusive).
maximum : float, optional
The maximum value that `obj` can take (inclusive).
allow_none : bool, optional
Indicates whether the value None should be allowed.
default : object, optional
The default value to return if `obj` is None and `allow_none` is True.
Returns
-------
float or None
The validated float.
Raises
------
TypeError
If `obj` is not a float.
ValueError
If any of the optional positivity or minimum and maximum value
constraints are violated.
Examples
--------
>>> check_float(0)
0.0
>>> check_float(1.0, positive=True)
1.0
>>> check_float(1.0 + 1.0j)
Traceback (most recent call last):
...
TypeError: Invalid type. Expected: Real. Actual: complex.
>>> check_float(-1, positive=True)
Traceback (most recent call last):
...
ValueError: Parameter must be positive.
>>> check_float(1.2, name='a', minimum=10)
Traceback (most recent call last):
...
ValueError: Parameter 'a' must be at least 10.0.
"""
return _check_numeric(check_func=check_float, obj=obj, name=name,
base=numbers.Real, func=float, positive=positive,
minimum=minimum, maximum=maximum,
allow_none=allow_none, default=default)
|
PypiClean
|
/GraphLab_Create-2.1-cp27-none-macosx_10_5_x86_64.macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.macosx_10_11_intel.macosx_10_11_x86_64.whl/graphlab/canvas/server.py
|
import graphlab.canvas.handlers
import graphlab.canvas.state
import datetime
import time
import threading
import tornado.httpserver
import tornado.ioloop
import tornado.netutil
import tornado.web
import uuid
class Server:
#
# private variables
#
# instance variables
__thread = None # bg thread that runs Tornado
__port = None # port Tornado is running on (determined automatically at thread launch)
__port_lock = threading.RLock()
__last_ping = None # last ping time from the browser (data check)
__last_ping_lock = threading.RLock()
__loop = None
__application = None
__server = None
#
# public API
#
# instance variables
state = None
def __init__(self, state, port=None):
self.state = state
self.__initialize_server()
#Will raise exception if port cannot be bound
self.__bind_socket(port)
# methods
def ping(self):
"""
Updates the timestamp for the alive() calculation.
"""
with self.__last_ping_lock:
self.__last_ping = datetime.datetime.now()
def alive(self):
"""
Returns True if the browser has communicated with the server recently, false otherwise.
"""
with self.__last_ping_lock:
if self.__last_ping is None:
return False
# if the browser hasn't made a request for data in the last 3 seconds, consider it dead
return (datetime.datetime.now() - self.__last_ping) < datetime.timedelta(0, 3)
def get_port(self):
with self.__port_lock:
return self.__port
def start(self):
"""
Starts the canvas server if it is not already active
"""
if self.__thread is None:
self.__thread = threading.Thread(target=self.__run, name='canvas_server')
# TODO -- if we want to keep the Python process running until the
# visualization is closed (a la Matplotlib) we can take this out of daemon
# mode and do our own thread management
self.__thread.daemon = True
self.__thread.start()
# block the thread and wait for the Tornado bg thread to initialize until the port is set
port = None
while port is None:
port = self.get_port()
if port is None:
time.sleep(0.05)
#
# private API
#
# Raises exception if socket cannot be bound on requested port
def __bind_socket(self, port=None):
sockets = tornado.netutil.bind_sockets(port,"localhost")
self.__server.add_sockets(sockets)
with self.__port_lock:
self.__port = sockets[0].getsockname()[:2][1]
def __initialize_server(self):
# use our own IOLoop instead of singleton to avoid clashing with IPython/Jupyter Notebook
self.__loop = tornado.ioloop.IOLoop()
self.__application = tornado.web.Application(
graphlab.canvas.handlers.get_handlers(self, self.state),
io_loop=self.__loop
)
self.__server = tornado.httpserver.HTTPServer(self.__application, io_loop=self.__loop)
def __run(self):
self.__loop.start()
|
PypiClean
|
/flowest-1.0.0.tar.gz/flowest-1.0.0/flower/static/js/bootstrap-carousel.js
|
!function ($) {
"use strict"; // jshint ;_;
/* CAROUSEL CLASS DEFINITION
* ========================= */
var Carousel = function (element, options) {
this.$element = $(element)
this.options = options
this.options.slide && this.slide(this.options.slide)
this.options.pause == 'hover' && this.$element
.on('mouseenter', $.proxy(this.pause, this))
.on('mouseleave', $.proxy(this.cycle, this))
}
Carousel.prototype = {
cycle: function (e) {
if (!e) this.paused = false
this.options.interval
&& !this.paused
&& (this.interval = setInterval($.proxy(this.next, this), this.options.interval))
return this
}
, to: function (pos) {
var $active = this.$element.find('.active')
, children = $active.parent().children()
, activePos = children.index($active)
, that = this
if (pos > (children.length - 1) || pos < 0) return
if (this.sliding) {
return this.$element.one('slid', function () {
that.to(pos)
})
}
if (activePos == pos) {
return this.pause().cycle()
}
return this.slide(pos > activePos ? 'next' : 'prev', $(children[pos]))
}
, pause: function (e) {
if (!e) this.paused = true
clearInterval(this.interval)
this.interval = null
return this
}
, next: function () {
if (this.sliding) return
return this.slide('next')
}
, prev: function () {
if (this.sliding) return
return this.slide('prev')
}
, slide: function (type, next) {
var $active = this.$element.find('.active')
, $next = next || $active[type]()
, isCycling = this.interval
, direction = type == 'next' ? 'left' : 'right'
, fallback = type == 'next' ? 'first' : 'last'
, that = this
, e = $.Event('slide')
this.sliding = true
isCycling && this.pause()
$next = $next.length ? $next : this.$element.find('.item')[fallback]()
if ($next.hasClass('active')) return
if ($.support.transition && this.$element.hasClass('slide')) {
this.$element.trigger(e)
if (e.isDefaultPrevented()) return
$next.addClass(type)
$next[0].offsetWidth // force reflow
$active.addClass(direction)
$next.addClass(direction)
this.$element.one($.support.transition.end, function () {
$next.removeClass([type, direction].join(' ')).addClass('active')
$active.removeClass(['active', direction].join(' '))
that.sliding = false
setTimeout(function () { that.$element.trigger('slid') }, 0)
})
} else {
this.$element.trigger(e)
if (e.isDefaultPrevented()) return
$active.removeClass('active')
$next.addClass('active')
this.sliding = false
this.$element.trigger('slid')
}
isCycling && this.cycle()
return this
}
}
/* CAROUSEL PLUGIN DEFINITION
* ========================== */
$.fn.carousel = function (option) {
return this.each(function () {
var $this = $(this)
, data = $this.data('carousel')
, options = $.extend({}, $.fn.carousel.defaults, typeof option == 'object' && option)
if (!data) $this.data('carousel', (data = new Carousel(this, options)))
if (typeof option == 'number') data.to(option)
else if (typeof option == 'string' || (option = options.slide)) data[option]()
else if (options.interval) data.cycle()
})
}
$.fn.carousel.defaults = {
interval: 5000
, pause: 'hover'
}
$.fn.carousel.Constructor = Carousel
/* CAROUSEL DATA-API
* ================= */
$(function () {
$('body').on('click.carousel.data-api', '[data-slide]', function ( e ) {
var $this = $(this), href
, $target = $($this.attr('data-target') || (href = $this.attr('href')) && href.replace(/.*(?=#[^\s]+$)/, '')) //strip for ie7
, options = !$target.data('modal') && $.extend({}, $target.data(), $this.data())
$target.carousel(options)
e.preventDefault()
})
})
}(window.jQuery);
|
PypiClean
|
/hu-neuro-pipeline-0.7.0.tar.gz/hu-neuro-pipeline-0.7.0/pipeline/averaging.py
|
import warnings
import numpy as np
import pandas as pd
from mne import Epochs, Evoked, grand_average
from mne.time_frequency import AverageTFR, EpochsTFR
def compute_evokeds(epochs, average_by=None, bad_ixs=[], participant_id=None):
"""Computes condition averages (evokeds) based on triggers or columns."""
# Average by triggers in case no log file columns were provided
if average_by is None:
all_evokeds, all_evokeds_df = compute_evokeds_triggers(
epochs, bad_ixs, participant_id)
elif isinstance(average_by, dict):
all_evokeds, all_evokeds_df = compute_evokeds_queries(
epochs, average_by, bad_ixs, participant_id)
else:
warnings.warn(
'Passing a list of column names to `average_by` will ' +
'be deprecated in a future version of the pipeline. ' +
'Please use a dict of labels and log file queries ' +
'instead (see https://github.com/alexenge/hu-neuro-pipeline/blob/main/docs/inputs.md#average_by-recommended-default-none)')
all_evokeds, all_evokeds_df = compute_evokeds_cols(
epochs, average_by, bad_ixs, participant_id)
return all_evokeds, all_evokeds_df
def compute_evokeds_triggers(epochs, bad_ixs=[], participant_id=None):
"""Computes condition averages (evokeds) based on triggers."""
# Get indices of good epochs
good_ixs = [ix for ix in range(len(epochs)) if ix not in bad_ixs]
# Prepare emtpy lists
all_evokeds = []
all_evokeds_dfs = []
# Compute evokeds
epochs_good = epochs.copy()[good_ixs]
evokeds = average_by_events(epochs_good)
all_evokeds = all_evokeds + evokeds
# Convert to DataFrame
evokeds_df = create_evokeds_df(evokeds, participant_id=participant_id)
all_evokeds_dfs.append(evokeds_df)
# Combine DataFrames
all_evokeds_df = pd.concat(all_evokeds_dfs, ignore_index=True)
return all_evokeds, all_evokeds_df
def compute_evokeds_queries(epochs, queries, bad_ixs=[], participant_id=None):
"""Computes condition averages (evokeds) based on log file queries."""
# Get indices of good epochs
good_ixs = [ix for ix in range(len(epochs)) if ix not in bad_ixs]
# Reset index so that trials start at 0
epochs.metadata.reset_index(drop=True, inplace=True)
# Create evokeds for each query
evokeds = []
evoked_dfs = []
for label, query in queries.items():
# Compute evokeds for trials that match the current query
evoked = compute_evoked_query(epochs[good_ixs], query, label)
evokeds.append(evoked)
# Convert to data frame
extra_cols = \
{'participant_id': participant_id, 'label': label, 'query': query}
evoked_df = evoked_to_df(evoked, extra_cols)
evoked_dfs.append(evoked_df)
# Combine data frames
evokeds_df = pd.concat(evoked_dfs, ignore_index=True)
return evokeds, evokeds_df
def compute_evoked_query(epochs, query, label):
"""Computes one condition average (evoked) based on a log file query."""
# Compute evokeds based on ERP or TFR epochs
if isinstance(epochs, EpochsTFR):
evoked = epochs[query].average()
else: # `EpochsTFR.average()` has no `picks` argument
evoked = epochs[query].average(picks=['eeg', 'misc'])
evoked.comment = label
return evoked
def evoked_to_df(evoked, extra_cols={}):
"""Converts MNE's Evoked or AverageTFR to a pandas data frame."""
# Convert to data frame
if isinstance(evoked, AverageTFR):
evoked_df = evoked.to_data_frame()
else: # `AverageTFR.to_data_frame()` has no `scalings` argument
evoked_df = \
evoked.to_data_frame(scalings={'eeg': 1e6, 'misc': 1e6})
# Optionally add extra columns
for column, value in reversed(extra_cols.items()):
evoked_df.insert(0, column, value)
return evoked_df
def compute_evokeds_cols(
epochs, average_by=None, bad_ixs=[], participant_id=None):
"""Computes condition averages (evokeds) based on log file columns."""
# Make sure that provided values are stored in a list
if isinstance(average_by, str):
average_by = [average_by]
# Get indices of good epochs
good_ixs = [ix for ix in range(len(epochs)) if ix not in bad_ixs]
# Prepare emtpy lists
all_evokeds = []
all_evokeds_dfs = []
# Iterate over the provided main effects and interactions
for cols in average_by:
# Parse interaction effects into a list
cols = cols.split('/')
# Compute evokeds
epochs_update = update_events(epochs, cols)[good_ixs]
evokeds = average_by_events(epochs_update)
all_evokeds = all_evokeds + evokeds
# Convert to DataFrame
trials = epochs_update.metadata
evokeds_df = create_evokeds_df(
evokeds, cols, trials, participant_id)
# Append info about averaging
value = '/'.join(cols)
evokeds_df.insert(loc=1, column='average_by', value=value)
all_evokeds_dfs.append(evokeds_df)
# Combine DataFrames
all_evokeds_df = pd.concat(all_evokeds_dfs, ignore_index=True)
# Move condition columns back to the front
# They might have been moved to the end while concatenating
if average_by is not None:
time_ix = all_evokeds_df.columns.get_loc('time')
for cols in reversed(average_by):
if not '/' in cols:
all_evokeds_df.insert(
time_ix - 1, column=cols, value=all_evokeds_df.pop(cols))
# Convert NaNs to empty strings so that R can represent them
all_evokeds_df[cols] = all_evokeds_df[cols].fillna('')
return all_evokeds, all_evokeds_df
def average_by_events(epochs, method='mean'):
"""Create a list of evokeds from epochs, one per event type."""
# Pick channel types for ERPs
# The `average` method for `EpochsTFR` doesn't support `picks`
picks_dict = {'picks': ['eeg', 'misc']} \
if isinstance(epochs, Epochs) else {}
# Loop over event types and average
# TODO: Use MNE built-in argument `by_event_type` once it's in `EpochsTFR`
evokeds = []
for event_type in epochs.event_id.keys():
evoked = epochs[event_type].average(**picks_dict, method=method)
evoked.comment = event_type
evokeds.append(evoked)
return evokeds
def update_events(epochs, cols):
"""Updates the events/event_id structures using cols from the metadata."""
# Generate event codes for the relevant columns
cols_df = pd.DataFrame(epochs.metadata[cols])
cols_df = cols_df.astype('str')
ids = cols_df.agg('/'.join, axis=1)
codes = ids.astype('category').cat.codes
# Create copy of the data with the new event codes
epochs_update = epochs.copy()
epochs_update.events[:, 2] = codes
epochs_update.event_id = dict(zip(ids, codes))
return epochs_update
def create_evokeds_df(evokeds, cols=None, trials=None, participant_id=None):
"""Converts mne.Evoked into a pd.DataFrame with metadata."""
# Convert ERP amplitudes from volts to microvolts
# The `to_data_frame` method for `AverageTFR` doesn't support `scalings`
scalings_dict = {'scalings': {'eeg': 1e6, 'misc': 1e6}} \
if isinstance(evokeds[0], Evoked) else {}
# Convert all evokeds to a single DataFrame
evokeds_dfs = [evoked.to_data_frame(**scalings_dict, time_format=None)
for evoked in evokeds]
evokeds_df = pd.concat(evokeds_dfs, ignore_index=True)
# Optionally add columns from the metadata
repeats = len(evokeds_df)
if cols is not None:
assert trials is not None, 'Must provide trials (metadata) with cols'
cols_df = pd.DataFrame(trials[cols])
cols_df = cols_df.astype('str')
cols_df = cols_df.drop_duplicates()
repeats = len(evokeds_df) / len(cols_df)
cols_df = cols_df.loc[cols_df.index.repeat(repeats)]
cols_df = cols_df.reset_index(drop=True)
evokeds_df = pd.concat([cols_df, evokeds_df], axis=1)
# Otherwise add comments from evokeds (assumed to contain event IDs)
else:
comments = [evoked.comment for evoked in evokeds]
repeats = len(evokeds_df) / len(comments)
comments = np.repeat(comments, repeats)
evokeds_df.insert(loc=0, column='event_id', value=comments)
# Optionally add participant_id
if participant_id is not None:
evokeds_df.insert(loc=0, column='participant_id', value=participant_id)
return evokeds_df
def compute_grands(evokeds_per_participant):
"""Averages evokeds of all participants into grand averages."""
# Average across participants for each condition
evokeds_per_condition = list(map(list, zip(*evokeds_per_participant)))
grands = [grand_average(x) for x in evokeds_per_condition]
# Add meaningful comments
comments = [x[0].comment for x in evokeds_per_condition]
for grand, comment in zip(grands, comments):
grand.comment = comment
return grands
def compute_grands_df(evokeds_df):
"""Averages evoked DataFrames of all participants into grand averages."""
# Get indices of columns to group by (conditions, times, frequencies)
first_grouping_ix = 1 # Column 0 is participant_id (to average over)
last_grouping_col = 'freq' if 'freq' in evokeds_df.columns else 'time'
last_grouping_ix = evokeds_df.columns.get_loc(last_grouping_col)
grouping_ixs = range(first_grouping_ix, last_grouping_ix + 1)
# Average by grouping columns
group_cols = list(evokeds_df.columns[grouping_ixs])
grands_df = evokeds_df.groupby(group_cols, dropna=False).mean(numeric_only=True)
# Convert conditions from index back to columns
grands_df = grands_df.reset_index()
return grands_df
|
PypiClean
|
/idds-monitor-0.11.10.tar.gz/idds-monitor-0.11.10/data/plugins/bower_components/gmaps/lib/gmaps.core.js
|
if (!(typeof window.google === 'object' && window.google.maps)) {
throw 'Google Maps API is required. Please register the following JavaScript library http://maps.google.com/maps/api/js?sensor=true.'
}
var extend_object = function(obj, new_obj) {
var name;
if (obj === new_obj) {
return obj;
}
for (name in new_obj) {
obj[name] = new_obj[name];
}
return obj;
};
var replace_object = function(obj, replace) {
var name;
if (obj === replace) {
return obj;
}
for (name in replace) {
if (obj[name] != undefined) {
obj[name] = replace[name];
}
}
return obj;
};
var array_map = function(array, callback) {
var original_callback_params = Array.prototype.slice.call(arguments, 2),
array_return = [],
array_length = array.length,
i;
if (Array.prototype.map && array.map === Array.prototype.map) {
array_return = Array.prototype.map.call(array, function(item) {
var callback_params = original_callback_params.slice(0);
callback_params.splice(0, 0, item);
return callback.apply(this, callback_params);
});
}
else {
for (i = 0; i < array_length; i++) {
callback_params = original_callback_params;
callback_params.splice(0, 0, array[i]);
array_return.push(callback.apply(this, callback_params));
}
}
return array_return;
};
var array_flat = function(array) {
var new_array = [],
i;
for (i = 0; i < array.length; i++) {
new_array = new_array.concat(array[i]);
}
return new_array;
};
var coordsToLatLngs = function(coords, useGeoJSON) {
var first_coord = coords[0],
second_coord = coords[1];
if (useGeoJSON) {
first_coord = coords[1];
second_coord = coords[0];
}
return new google.maps.LatLng(first_coord, second_coord);
};
var arrayToLatLng = function(coords, useGeoJSON) {
var i;
for (i = 0; i < coords.length; i++) {
if (!(coords[i] instanceof google.maps.LatLng)) {
if (coords[i].length > 0 && typeof(coords[i][0]) === "object") {
coords[i] = arrayToLatLng(coords[i], useGeoJSON);
}
else {
coords[i] = coordsToLatLngs(coords[i], useGeoJSON);
}
}
}
return coords;
};
var getElementsByClassName = function (class_name, context) {
var element,
_class = class_name.replace('.', '');
if ('jQuery' in this && context) {
element = $("." + _class, context)[0];
} else {
element = document.getElementsByClassName(_class)[0];
}
return element;
};
var getElementById = function(id, context) {
var element,
id = id.replace('#', '');
if ('jQuery' in window && context) {
element = $('#' + id, context)[0];
} else {
element = document.getElementById(id);
};
return element;
};
var findAbsolutePosition = function(obj) {
var curleft = 0,
curtop = 0;
if (obj.offsetParent) {
do {
curleft += obj.offsetLeft;
curtop += obj.offsetTop;
} while (obj = obj.offsetParent);
}
return [curleft, curtop];
};
var GMaps = (function(global) {
"use strict";
var doc = document;
var GMaps = function(options) {
if (!this) return new GMaps(options);
options.zoom = options.zoom || 15;
options.mapType = options.mapType || 'roadmap';
var self = this,
i,
events_that_hide_context_menu = [
'bounds_changed', 'center_changed', 'click', 'dblclick', 'drag',
'dragend', 'dragstart', 'idle', 'maptypeid_changed', 'projection_changed',
'resize', 'tilesloaded', 'zoom_changed'
],
events_that_doesnt_hide_context_menu = ['mousemove', 'mouseout', 'mouseover'],
options_to_be_deleted = ['el', 'lat', 'lng', 'mapType', 'width', 'height', 'markerClusterer', 'enableNewStyle'],
identifier = options.el || options.div,
markerClustererFunction = options.markerClusterer,
mapType = google.maps.MapTypeId[options.mapType.toUpperCase()],
map_center = new google.maps.LatLng(options.lat, options.lng),
zoomControl = options.zoomControl || true,
zoomControlOpt = options.zoomControlOpt || {
style: 'DEFAULT',
position: 'TOP_LEFT'
},
zoomControlStyle = zoomControlOpt.style || 'DEFAULT',
zoomControlPosition = zoomControlOpt.position || 'TOP_LEFT',
panControl = options.panControl || true,
mapTypeControl = options.mapTypeControl || true,
scaleControl = options.scaleControl || true,
streetViewControl = options.streetViewControl || true,
overviewMapControl = overviewMapControl || true,
map_options = {},
map_base_options = {
zoom: this.zoom,
center: map_center,
mapTypeId: mapType
},
map_controls_options = {
panControl: panControl,
zoomControl: zoomControl,
zoomControlOptions: {
style: google.maps.ZoomControlStyle[zoomControlStyle],
position: google.maps.ControlPosition[zoomControlPosition]
},
mapTypeControl: mapTypeControl,
scaleControl: scaleControl,
streetViewControl: streetViewControl,
overviewMapControl: overviewMapControl
};
if (typeof(options.el) === 'string' || typeof(options.div) === 'string') {
if (identifier.indexOf("#") > -1) {
this.el = getElementById(identifier, options.context);
} else {
this.el = getElementsByClassName.apply(this, [identifier, options.context]);
}
} else {
this.el = identifier;
}
if (typeof(this.el) === 'undefined' || this.el === null) {
throw 'No element defined.';
}
window.context_menu = window.context_menu || {};
window.context_menu[self.el.id] = {};
this.controls = [];
this.overlays = [];
this.layers = []; // array with kml/georss and fusiontables layers, can be as many
this.singleLayers = {}; // object with the other layers, only one per layer
this.markers = [];
this.polylines = [];
this.routes = [];
this.polygons = [];
this.infoWindow = null;
this.overlay_el = null;
this.zoom = options.zoom;
this.registered_events = {};
this.el.style.width = options.width || this.el.scrollWidth || this.el.offsetWidth;
this.el.style.height = options.height || this.el.scrollHeight || this.el.offsetHeight;
google.maps.visualRefresh = options.enableNewStyle;
for (i = 0; i < options_to_be_deleted.length; i++) {
delete options[options_to_be_deleted[i]];
}
if(options.disableDefaultUI != true) {
map_base_options = extend_object(map_base_options, map_controls_options);
}
map_options = extend_object(map_base_options, options);
for (i = 0; i < events_that_hide_context_menu.length; i++) {
delete map_options[events_that_hide_context_menu[i]];
}
for (i = 0; i < events_that_doesnt_hide_context_menu.length; i++) {
delete map_options[events_that_doesnt_hide_context_menu[i]];
}
this.map = new google.maps.Map(this.el, map_options);
if (markerClustererFunction) {
this.markerClusterer = markerClustererFunction.apply(this, [this.map]);
}
var buildContextMenuHTML = function(control, e) {
var html = '',
options = window.context_menu[self.el.id][control];
for (var i in options){
if (options.hasOwnProperty(i)) {
var option = options[i];
html += '<li><a id="' + control + '_' + i + '" href="#">' + option.title + '</a></li>';
}
}
if (!getElementById('gmaps_context_menu')) return;
var context_menu_element = getElementById('gmaps_context_menu');
context_menu_element.innerHTML = html;
var context_menu_items = context_menu_element.getElementsByTagName('a'),
context_menu_items_count = context_menu_items.length,
i;
for (i = 0; i < context_menu_items_count; i++) {
var context_menu_item = context_menu_items[i];
var assign_menu_item_action = function(ev){
ev.preventDefault();
options[this.id.replace(control + '_', '')].action.apply(self, [e]);
self.hideContextMenu();
};
google.maps.event.clearListeners(context_menu_item, 'click');
google.maps.event.addDomListenerOnce(context_menu_item, 'click', assign_menu_item_action, false);
}
var position = findAbsolutePosition.apply(this, [self.el]),
left = position[0] + e.pixel.x - 15,
top = position[1] + e.pixel.y- 15;
context_menu_element.style.left = left + "px";
context_menu_element.style.top = top + "px";
context_menu_element.style.display = 'block';
};
this.buildContextMenu = function(control, e) {
if (control === 'marker') {
e.pixel = {};
var overlay = new google.maps.OverlayView();
overlay.setMap(self.map);
overlay.draw = function() {
var projection = overlay.getProjection(),
position = e.marker.getPosition();
e.pixel = projection.fromLatLngToContainerPixel(position);
buildContextMenuHTML(control, e);
};
}
else {
buildContextMenuHTML(control, e);
}
};
this.setContextMenu = function(options) {
window.context_menu[self.el.id][options.control] = {};
var i,
ul = doc.createElement('ul');
for (i in options.options) {
if (options.options.hasOwnProperty(i)) {
var option = options.options[i];
window.context_menu[self.el.id][options.control][option.name] = {
title: option.title,
action: option.action
};
}
}
ul.id = 'gmaps_context_menu';
ul.style.display = 'none';
ul.style.position = 'absolute';
ul.style.minWidth = '100px';
ul.style.background = 'white';
ul.style.listStyle = 'none';
ul.style.padding = '8px';
ul.style.boxShadow = '2px 2px 6px #ccc';
doc.body.appendChild(ul);
var context_menu_element = getElementById('gmaps_context_menu')
google.maps.event.addDomListener(context_menu_element, 'mouseout', function(ev) {
if (!ev.relatedTarget || !this.contains(ev.relatedTarget)) {
window.setTimeout(function(){
context_menu_element.style.display = 'none';
}, 400);
}
}, false);
};
this.hideContextMenu = function() {
var context_menu_element = getElementById('gmaps_context_menu');
if (context_menu_element) {
context_menu_element.style.display = 'none';
}
};
var setupListener = function(object, name) {
google.maps.event.addListener(object, name, function(e){
if (e == undefined) {
e = this;
}
options[name].apply(this, [e]);
self.hideContextMenu();
});
};
//google.maps.event.addListener(this.map, 'idle', this.hideContextMenu);
google.maps.event.addListener(this.map, 'zoom_changed', this.hideContextMenu);
for (var ev = 0; ev < events_that_hide_context_menu.length; ev++) {
var name = events_that_hide_context_menu[ev];
if (name in options) {
setupListener(this.map, name);
}
}
for (var ev = 0; ev < events_that_doesnt_hide_context_menu.length; ev++) {
var name = events_that_doesnt_hide_context_menu[ev];
if (name in options) {
setupListener(this.map, name);
}
}
google.maps.event.addListener(this.map, 'rightclick', function(e) {
if (options.rightclick) {
options.rightclick.apply(this, [e]);
}
if(window.context_menu[self.el.id]['map'] != undefined) {
self.buildContextMenu('map', e);
}
});
this.refresh = function() {
google.maps.event.trigger(this.map, 'resize');
};
this.fitZoom = function() {
var latLngs = [],
markers_length = this.markers.length,
i;
for (i = 0; i < markers_length; i++) {
if(typeof(this.markers[i].visible) === 'boolean' && this.markers[i].visible) {
latLngs.push(this.markers[i].getPosition());
}
}
this.fitLatLngBounds(latLngs);
};
this.fitLatLngBounds = function(latLngs) {
var total = latLngs.length,
bounds = new google.maps.LatLngBounds(),
i;
for(i = 0; i < total; i++) {
bounds.extend(latLngs[i]);
}
this.map.fitBounds(bounds);
};
this.setCenter = function(lat, lng, callback) {
this.map.panTo(new google.maps.LatLng(lat, lng));
if (callback) {
callback();
}
};
this.getElement = function() {
return this.el;
};
this.zoomIn = function(value) {
value = value || 1;
this.zoom = this.map.getZoom() + value;
this.map.setZoom(this.zoom);
};
this.zoomOut = function(value) {
value = value || 1;
this.zoom = this.map.getZoom() - value;
this.map.setZoom(this.zoom);
};
var native_methods = [],
method;
for (method in this.map) {
if (typeof(this.map[method]) == 'function' && !this[method]) {
native_methods.push(method);
}
}
for (i = 0; i < native_methods.length; i++) {
(function(gmaps, scope, method_name) {
gmaps[method_name] = function(){
return scope[method_name].apply(scope, arguments);
};
})(this, this.map, native_methods[i]);
}
};
return GMaps;
})(this);
|
PypiClean
|
/odoo14-addon-odoo_instance-14.0.1.2.1.tar.gz/odoo14-addon-odoo_instance-14.0.1.2.1/odoo/addons/odoo_instance/models/odoo_instance.py
|
import logging
import re
from datetime import datetime, time, timedelta
from urllib.parse import urlparse
import pytz
import requests
import yaml
from odoo import api, fields, models
from odoo.exceptions import UserError
_logger = logging.getLogger(__name__)
class OdooInstance(models.Model):
_name = 'odoo.instance'
_description = 'Odoo Instance'
_inherit = ['mail.thread', 'mail.activity.mixin']
# General Information
name = fields.Char(string='Instance Name', required=True)
description = fields.Text(string='Instance Description')
start_date = fields.Date(string='Start Date')
project_id = fields.Many2one(
'project.project', string='Project', ondelete='set null')
helpdesk_team_id = fields.Many2one(
'helpdesk.team', string='Helpdesk Team', ondelete='set null')
instance_url = fields.Char(string='Instance URL')
partner_id = fields.Many2one('res.partner', string='Client', required=True)
client_contact_id = fields.Many2one(
'res.partner', string='Client Contact', domain=[('type', '=', 'contact')])
technician_ids = fields.Many2many(
'res.users', string='Technicians', relation='odoo_instance_technician_rel')
functional_ids = fields.Many2many(
'res.users', string='Functional Experts', relation='odoo_instance_functional_rel')
odoo_version_id = fields.Many2one(
'odoo.version', string="Odoo Version", required=True)
state = fields.Selection([
('in_progress', 'In Progress'),
('running', 'Running'),
('paused', 'Paused'),
('cancelled', 'Terminated')
], string='State', default='in_progress')
# Technical Information
inventory_url = fields.Char(string='Inventory URL')
branch = fields.Char(string='Branch', required=True)
odoo_release_id = fields.Many2one('odoo.release', string='Odoo Release')
instance_type = fields.Selection([
('test', 'Test'),
('production', 'Production'),
], string="Instance Type", required=True)
deploy_schedule = fields.One2many(
'odoo.instance.schedule', 'instance_id', string="Deploy Schedule")
unavailable_windows = fields.One2many(
'odoo.instance.unavailable_schedule', 'instance_id', string='Unavailable Windows')
deploy_event_ids = fields.One2many(
'calendar.event', 'instance_id', string='Deploy Events')
server_information = fields.Text(string='Server Information')
requirements_url = fields.Char(string='Requirements URL')
module_ids = fields.Many2many('odoo.instance.module', string='Modules')
server_type = fields.Selection([
('cx11', 'cx11'),
('cpx11', 'cpx11'),
('cx21', 'cx21'),
('cpx21', 'cpx21'),
('cx31', 'cx31'),
('cpx31', 'cpx31'),
('cx41', 'cx41'),
('cpx41', 'cpx41'),
('cx51', 'cx51'),
('cpx51', 'cpx51'),
], string="Server Type")
troubleshooting_ids = fields.One2many(
'odoo.instance.troubleshooting', 'instance_id', string='Troubleshooting')
deploy_duration = fields.Float(
string='Deploy Duration', default=1.0, help="Duration in hours")
database_ids = fields.One2many(
'odoo.instance.database', 'instance_id', string='Databases')
# Functional Information
process_ids = fields.Many2many(
'odoo.instance.process',
'odoo_instance_process_rel',
'instance_id',
'process_id',
string='Processes'
)
available_windows = fields.One2many(
'odoo.instance.window', 'instance_id', string='Available Windows')
functional_requirement_ids = fields.One2many(
'odoo.instance.functional_requirement', 'odoo_instance_id', string='Functional Requirements')
functional_configuration_ids = fields.Many2many(
'odoo.instance.functional_configuration',
string='Functional Configurations',
relation='odoo_instance_functional_configuration_rel',
column1='odoo_instance_id',
column2='functional_configuration_id',
track_visibility='onchange',
help='List of functional configurations related to the current instance.'
)
# Commercial Information
contact_role_id = fields.Many2one('res.partner', string='Rol de Contacto')
maintenance_contract_ids = fields.Char(string='Contratos de Mantenimiento')
support_contract_ids = fields.Char(string='Contratos de Soporte')
implementation_contract_id = fields.Char(
string='Contrato de Implementación')
# maintenance_contract_ids = fields.Many2many('contract.contract', string='Contratos de Mantenimiento', relation='odoo_instance_maintenance_contract_rel')
# support_contract_ids = fields.Many2many('contract.contract', string='Contratos de Soporte', relation='odoo_instance_support_contract_rel')
# implementation_contract_id = fields.Many2one('contract.contract', string='Contrato de Implementación', relation='odoo_instance_implementation_contract_rel')
def _strip_protocol_and_port(self):
parsed_url = urlparse(self.instance_url)
return parsed_url.hostname
def _get_instance_config_yaml(self, url):
# Get the YAML content
response = requests.get(url)
if response.status_code != 200:
return {}
response.raise_for_status()
return yaml.safe_load(response.text)
def _update_instance_dbs(self):
base_url = self.inventory_url
branch = self.branch
instance_url = self._strip_protocol_and_port()
urls = {
'host_vars': f"{base_url}/-/raw/{branch}/inventory/host_vars/{instance_url}/config.yml",
'group_vars': f"{base_url}/-/raw/{branch}/inventory/group_vars/all.yml",
}
for url in urls.values():
yaml_content = self._get_instance_config_yaml(url)
odoo_dbs = yaml_content.get("odoo_role_odoo_dbs", [])
test_dbs = yaml_content.get("odoo_role_test_dbs", [])
instance_dbs_info = []
self.database_ids.unlink()
for db in odoo_dbs:
self.database_ids.create({
'name': db,
'is_test': False,
'instance_id': self.id,
})
for test_db in test_dbs:
self.database_ids.create({
'name': test_db,
'is_test': True,
'instance_id': self.id,
})
def update_odoo_release(self):
inventory_url = self.inventory_url
branch = self.branch
raw_inventory_url = f"{inventory_url}/-/raw/{branch}/inventory/group_vars/all.yml"
response = requests.get(raw_inventory_url)
if response.status_code == 200:
inventory_data = yaml.safe_load(response.text)
odoo_release_str = inventory_data.get('odoo_role_odoo_release')
if odoo_release_str:
odoo_version, release_date_str = odoo_release_str.split('_')
release_date = fields.Date.from_string(release_date_str)
odoo_version_id = self.env['odoo.version'].search(
[('name', '=', odoo_version)], limit=1)
if odoo_version_id:
odoo_release = self.env['odoo.release'].search([
('name', '=', odoo_release_str),
('odoo_version_id', '=', odoo_version_id.id)
], limit=1)
if not odoo_release:
odoo_release = self.env['odoo.release'].create({
'name': odoo_release_str,
'release_date': release_date,
'odoo_version_id': odoo_version_id.id,
})
self.odoo_release_id = odoo_release
def update_instance_info(self):
for instance in self:
# Update Odoo Release
instance.update_odoo_release()
# Update Instance DBs
instance._update_instance_dbs()
@api.model
def enqueue_update_instance_info(self):
instances = self.search([])
for instance in instances:
if instance.inventory_url:
instance.with_delay().update_instance_info()
if instance.requirements_url:
instance.with_delay().download_and_process_requirements_txt()
def toggle_state(self):
state_order = ['in_progress', 'running', 'paused', 'cancelled']
next_state_index = (state_order.index(
self.state) + 1) % len(state_order)
self.state = state_order[next_state_index]
def download_and_process_requirements_txt(self):
if not self.requirements_url:
return
response = requests.get(self.requirements_url)
if response.status_code == 200:
requirements_txt = response.text
self.import_requirements_txt(requirements_txt)
else:
raise UserError(
('Error downloading requirements.txt file. Status code: %s') % response.status_code)
def open_calendar_event_wizard(self):
self.ensure_one()
wizard = self.env['odoo.instance.calendar.event.wizard'].create({
'instance_id': self.id,
'date_start': self.next_window,
'duration': self.deploy_duration,
})
return {
'name': ('Create Calendar Event'),
'type': 'ir.actions.act_window',
'res_model': 'odoo.instance.calendar.event.wizard',
'res_id': wizard.id,
'view_mode': 'form',
'target': 'new',
}
def import_requirements_txt(self, file_content):
"""Parse a requirements.txt file and update the modules field."""
Module = self.env['odoo.instance.module']
ModuleVersion = self.env['odoo.instance.module.version']
modules_to_link = []
current_versions = ModuleVersion.search([
('instance_ids', 'in', [self.id]),
], limit=1)
for version in current_versions:
# delete the instance in the version
version.instance_ids = [(3, self.id)]
# if the version is not used by any instance, delete it
if not version.instance_ids:
version.unlink()
for line in file_content.splitlines():
line = line.strip()
if not line or '==' not in line:
continue
module_name, version = line.split('==')
module = Module.search(
[('technical_name', '=', module_name)], limit=1)
if not module:
module_data = self.get_module_name_from_pypi(module_name)
module = Module.create({
'technical_name': module_data['technical_name'],
'name': module_data['name'],
'module_type': module_data['module_type'],
'odoo_version_id': module_data['odoo_version_id'],
'pypi_url': f'https://pypi.org/project/{module_name}/',
'is_odoo_module': module_data['is_odoo_module'],
})
version_record = ModuleVersion.search([
('name', '=', version),
('module_id', '=', module.id),
], limit=1)
if version_record:
# Update the existing version record by adding the instance
_logger.critical("Version found %s",
version_record.instance_ids)
version_record.instance_ids |= self
else:
# Create a new version record
_logger.critical("Version not found, creating it")
version_record = ModuleVersion.create({
'name': version,
'module_id': module.id,
'instance_ids': [(4, self.id)],
})
modules_to_link.append(module.id)
self.module_ids = [(6, 0, modules_to_link)]
ModuleVersion.archive_or_delete_unassociated_module_versions()
def get_module_name_from_pypi(self, technical_name):
url = f'https://pypi.org/pypi/{technical_name}/json'
response = requests.get(url)
module_data = {
'technical_name': technical_name,
'name': technical_name,
'module_type': 'dep',
'odoo_version_id': self.odoo_version_id.id,
}
if response.status_code == 200:
data = response.json()
module_data['technical_name'] = data['info']['name']
module_data['name'] = data['info']['summary']
module_data['home_page'] = data['info']['home_page']
module_data['is_odoo_module'] = False
_logger.debug('Importing technical_name: %s', technical_name)
if 'odoo' in module_data['technical_name'].lower():
module_data['is_odoo_module'] = True
if module_data['is_odoo_module']:
# Check if the module is Odoo core or OCA
if 'oca' in module_data['home_page'].lower():
module_data['module_type'] = 'OCA'
# extract odoo version from version key. Example: version "14.0.1.0.3"
odoo_version_pattern = r"(\d{1,2}\.\d{1,2})"
match = re.search(odoo_version_pattern,
data['info']['version'])
if match:
module_data['odoo_version'] = match.group(1)
module_data['odoo_version_id'] = self.env['odoo.version'].search([('name', '=', match.group(1))],
limit=1).id
else:
_logger.warning(
f'Error fetching module name from pypi.org for {technical_name}')
return module_data
def compute_available_windows(self):
for instance in self:
# Clear existing available windows
instance.available_windows.unlink()
# Calculate the available windows in the next 7 days
now_utc = datetime.utcnow()
user_tz = pytz.timezone(self.env.user.tz or 'UTC')
now_local = now_utc.astimezone(user_tz)
for i in range(7):
current_date = now_local.date() + timedelta(days=i)
current_weekday = str(current_date.weekday())
unavailable_periods = []
for schedule in instance.unavailable_windows:
if schedule.day_of_week == current_weekday:
unavailable_periods.append(
schedule.get_unavailable_periods(current_date, user_tz))
unavailable_periods.sort()
available_start_time = user_tz.localize(
datetime.combine(current_date, time(0, 0)))
for period_start, period_end in unavailable_periods:
if period_start > available_start_time:
deploy_end_time = period_start - \
timedelta(hours=instance.deploy_duration)
if deploy_end_time > available_start_time:
instance.available_windows.create({
'instance_id': instance.id,
'start_time': available_start_time.astimezone(pytz.utc).replace(tzinfo=None),
'end_time': deploy_end_time.astimezone(pytz.utc).replace(tzinfo=None),
})
available_start_time = period_end
available_end_time = user_tz.localize(
datetime.combine(current_date, time.max))
if available_end_time > available_start_time:
deploy_end_time = available_end_time - \
timedelta(hours=instance.deploy_duration)
if deploy_end_time > available_start_time:
instance.available_windows.create({
'instance_id': instance.id,
'start_time': available_start_time.astimezone(pytz.utc).replace(tzinfo=None),
'end_time': deploy_end_time.astimezone(pytz.utc).replace(tzinfo=None),
})
instance.action_delete_past_or_orphan_windows()
@api.onchange('odoo_version_id')
def _onchange_odoo_version_id(self):
if self.odoo_version_id:
self.module_ids = False
return {
'domain': {
'module_ids': [('odoo_version_id', '=', self.odoo_version_id.id)]
}
}
else:
return {
'domain': {
'module_ids': []
}
}
def open_instance_url(self):
self.ensure_one()
if not self.instance_url:
raise UserError(("No hay URL para esta instancia."))
return {
'type': 'ir.actions.act_url',
'url': self.instance_url,
'target': 'new',
}
def action_delete_past_or_orphan_windows(self):
self.env["odoo.instance.window"].delete_past_or_orphan_windows()
def action_clear_windows(self):
self.env['odoo.instance.window'].search(
[('instance_id', '=', self.id)]).unlink()
def write(self, vals):
# Si hay cambios en 'functional_configuration_ids'
if 'functional_configuration_ids' in vals:
# Consigue las nuevas configuraciones
new_config_ids = set(vals['functional_configuration_ids'][0][2])
for record in self:
# Consigue las configuraciones antiguas
old_config_ids = set(record.functional_configuration_ids.ids)
# Encuentra las configuraciones que se añadieron y se eliminaron
added_config_ids = new_config_ids - old_config_ids
removed_config_ids = old_config_ids - new_config_ids
# Obtiene los nombres de las configuraciones añadidas y eliminadas
added_config_names = self.env['odoo.instance.functional_configuration'].browse(added_config_ids).mapped('name')
removed_config_names = self.env['odoo.instance.functional_configuration'].browse(removed_config_ids).mapped('name')
# Publica mensajes en el chatter
for config_name in added_config_names:
record.message_post(body="Añadida configuración funcional: %s" % config_name)
for config_name in removed_config_names:
record.message_post(body="Eliminada configuración funcional: %s" % config_name)
return super().write(vals)
class OdooInstanceSchedule(models.Model):
_name = 'odoo.instance.schedule'
_description = 'Deploy Schedule'
instance_id = fields.Many2one('odoo.instance', string="Instance")
day_of_week = fields.Selection([
('0', 'Monday'),
('1', 'Tuesday'),
('2', 'Wednesday'),
('3', 'Thursday'),
('4', 'Friday'),
('5', 'Saturday'),
('6', 'Sunday'),
], string="Day of the Week", required=True)
start_time = fields.Float(string="Start Time", required=True)
end_time = fields.Float(string="End Time", required=True)
duration = fields.Float(
string='Duration', default=1.0, help="Duration in hours")
class FunctionalRequirement(models.Model):
_name = 'odoo.instance.functional_requirement'
_description = 'Functional Requirement'
name = fields.Char('Requirement', required=True)
status = fields.Selection([
('not_started', 'Not Started'),
('in_progress', 'In Progress'),
('completed', 'Completed'),
('waiting_validation', 'Waiting for Validation')
], string='Status', default='not_started', required=True)
odoo_instance_id = fields.Many2one(
'odoo.instance', string='Odoo Instance')
class OdooInstanceFunctionalConfiguration(models.Model):
_name = 'odoo.instance.functional_configuration'
_description = 'Odoo Instance Functional Configuration'
name = fields.Char('Configuration', required=True)
odoo_version_id = fields.Many2one(
'odoo.version', string="Odoo Version", required=True)
description = fields.Text('Description')
required_modules= fields.Many2many(
'odoo.instance.module', string='Required Modules')
handbook_url = fields.Text(string='Handbook URL')
class OdooInstanceTroubleshooting(models.Model):
_name = 'odoo.instance.troubleshooting'
_description = 'Odoo Instance Troubleshooting'
date = fields.Date(string='Date', default=fields.Date.context_today)
title = fields.Char(string='Title', required=True)
url = fields.Char(string='URL')
type = fields.Selection([
('postmortem', 'Post Mortem'),
('config', 'Configuration'),
('other', 'Other')
], string='Type', default='config', required=True)
instance_id = fields.Many2one(
'odoo.instance', string='Instance', ondelete='cascade')
class OdooInstanceUnavailableSchedule(models.Model):
_name = 'odoo.instance.unavailable_schedule'
_description = 'Unavailable Deploy Schedule'
instance_id = fields.Many2one('odoo.instance', string="Instance")
day_of_week = fields.Selection([
('0', 'Monday'),
('1', 'Tuesday'),
('2', 'Wednesday'),
('3', 'Thursday'),
('4', 'Friday'),
('5', 'Saturday'),
('6', 'Sunday'),
], string="Day of the Week", required=True)
start_time = fields.Float(string="Start Time", required=True)
end_time = fields.Float(string="End Time", required=True)
def get_unavailable_periods(self, current_date, user_tz):
schedule_hour = int(self.start_time)
schedule_minute = int((self.start_time % 1) * 60)
schedule_time = time(schedule_hour, schedule_minute, 0)
schedule_end_hour = int(self.end_time)
schedule_end_minute = int((self.end_time % 1) * 60)
schedule_end_time = time(schedule_end_hour, schedule_end_minute, 0)
start_time = user_tz.localize(
datetime.combine(current_date, schedule_time))
end_time = user_tz.localize(
datetime.combine(current_date, schedule_end_time))
return (start_time, end_time)
|
PypiClean
|
/fastapi-session-0.2.8.tar.gz/fastapi-session-0.2.8/fastapi_session/session_manager.py
|
import inspect
import random
import string
from typing import Callable, Awaitable, Union
from fastapi import Request, Cookie, Response
from fastapi import BackgroundTasks, FastAPI
from .session_interface import BackendInterface
from .tasks import repeat_every
class SessionManager:
def __init__(self, backend: BackendInterface = None, app: FastAPI = None):
self._backend = backend
self._session_id_callback = None
self._cookie_name = None
BackgroundTasks.add_task(self._backend.cleanup)
def init_app(self, app: FastAPI) -> None:
@app.on_event("startup")
@repeat_every(seconds=60 * 60)
def remove_expired_sessions_task() -> None:
self._backend.cleanup()
def use_cookie(self, cookie_name: str = "session"):
"""
This is just a basic session id that use's a cookie
"""
self._cookie_name = cookie_name
self.session_id(self._cookie)
def init_cookie(self, response: Response, id: str = None):
if id is None:
id = ''.join(random.choice(string.ascii_letters + string.digits) for i in range(16))
response.set_cookie(self._cookie_name, id)
def _cookie(self, request: Request) -> str:
return request.cookies.get(self._cookie_name, None)
def session_id(self, callback: Union[Callable, Awaitable]) -> Union[Callable, Awaitable]:
"""
This sets the callback to retrieve the session id.
The function should take an unique identifier
and return the id as a string.
Basic usage::
>>> from fastapi import FastAPI
>>> from fastapi_session import SessionManager
>>> app = FastAPI()
>>> manager = SessionManager(backend)
>>> manager.session_id(get_id)
>>> # this is the preferred way
>>> @manager.session_id
>>> def get_id(request: Request):
... # get session id logic here
:param Callable or Awaitable callback: The callback which returns the user
:return: The callback
"""
self._session_id_callback = callback
return callback
async def __call__(self, request: Request):
if self._session_id_callback is None:
raise Exception(
"Missing session_id_callback callback"
)
if inspect.iscoroutinefunction(self._session_id_callback):
_session_id = await self._session_id_callback(request)
else:
_session_id = self._session_id_callback(request)
return self._backend.get_session(_session_id)
|
PypiClean
|
/stickytape-0.2.0.tar.gz/stickytape-0.2.0/README.rst
|
stickytape: Convert Python packages into a single script
========================================================
Stickytape can be used to convert a Python script and any Python modules
it depends on into a single-file Python script.
Since this relies on correctly analysing both your script and any dependent modules,
this may not work correctly in all circumstances.
I bodged together the code a long time ago for a specific use case I had,
so many normal uses of Python imports are not properly supported.
If you need to create a standalone executable from your Python script,
I recommend using an alternative such as `PyInstaller <http://www.pyinstaller.org/>`_.
Installation
------------
::
pip install stickytape
Usage
-----
You can tell stickytape which directories to search using the ``--add-python-path`` argument.
For instance:
.. code:: sh
stickytape scripts/blah --add-python-path . > /tmp/blah-standalone
Or to output directly to a file:
.. code:: sh
stickytape scripts/blah --add-python-path . --output-file /tmp/blah-standalone
You can also point stickytape towards a Python binary that it should use
sys.path from, for instance the Python binary inside a virtualenv:
.. code:: sh
stickytape scripts/blah --python-binary _virtualenv/bin/python --output-file /tmp/blah-standalone
Stickytape cannot automatically detect dynamic imports,
but you can use ``--add-python-module`` to explicitly include modules:
.. code:: sh
stickytape scripts/blah --add-python-module blah.util
By default, stickytape will ignore the shebang in the script
and use ``"#!/usr/bin/env python"`` in the output file.
To copy the shebang from the original script,
use ``--copy-shebang``:
.. code:: sh
stickytape scripts/blah --copy-shebang --output-file /tmp/blah-standalone
As you might expect with a program that munges source files, there are a
few caveats:
- Anything that relies on the specific location of files will probably
no longer work. In other words, ``__file__`` probably isn't all that
useful.
- Any files that aren't imported won't be included. Static data that
might be part of your project, such as other text files or images,
won't be included.
|
PypiClean
|
/morphine-maker-2.0.2.tar.gz/morphine-maker-2.0.2/morphine/minimal_exec.py
|
from PyQt5 import QtCore, QtGui, QtWidgets
from morphine.globals import __intended_audience__, __enviroments__, __license__, __programming_lang__, find_packages
from yapf.yapflib.yapf_api import FormatCode
import os
from morphine.template import __minimal_exec__ as template
import re
class Ui_minimal_exec(object):
place = """Set 'Console Script' Entry Points Line By Line
[executable_name]=[module_name]:[function_name]
[executable_name]=[module_name].[main module]
[executable_name]=[module_name].[main module]:[function_name]
For Example:
morphine-maker=morphine.__main__:executor"""
def __init__(self, data):
self.data = data
try:
self.data.dir
except KeyError:
self.data.dir = "."
pass
def setupUi(self, minimal_exec):
minimal_exec.setObjectName("minimal_exec")
minimal_exec.resize(866, 488)
minimal_exec.setMinimumSize(QtCore.QSize(866, 488))
minimal_exec.setMaximumSize(QtCore.QSize(866, 488))
resolution = QtWidgets.QDesktopWidget().screenGeometry()
minimal_exec.move(
(resolution.width() / 2) - (minimal_exec.frameSize().width() / 2),
(resolution.height() / 2) -
(minimal_exec.frameSize().height() / 2),
)
self.centralwidget = QtWidgets.QWidget(minimal_exec)
self.centralwidget.setObjectName("centralwidget")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(10, 10, 111, 30))
self.label.setObjectName("label")
self.shortDesc = QtWidgets.QLineEdit(self.centralwidget)
self.shortDesc.setGeometry(QtCore.QRect(120, 10, 731, 30))
self.shortDesc.setObjectName("shortDesc")
self.longDesc = QtWidgets.QLineEdit(self.centralwidget)
self.longDesc.setGeometry(QtCore.QRect(120, 50, 690, 30))
self.longDesc.setText("")
self.longDesc.setObjectName("longDesc")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(10, 50, 101, 30))
self.label_2.setObjectName("label_2")
self.browse_long_desc = QtWidgets.QPushButton(self.centralwidget)
self.browse_long_desc.setGeometry(QtCore.QRect(820, 50, 31, 30))
self.browse_long_desc.setObjectName("browse_long_desc")
self.packages = QtWidgets.QLineEdit(self.centralwidget)
self.packages.setGeometry(QtCore.QRect(120, 90, 651, 30))
self.packages.setText("")
self.packages.setObjectName("packages")
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setGeometry(QtCore.QRect(10, 90, 101, 30))
self.label_3.setObjectName("label_3")
self.find_packages = QtWidgets.QPushButton(self.centralwidget)
self.find_packages.setGeometry(QtCore.QRect(780, 90, 71, 30))
self.find_packages.setObjectName("find_packages")
self.label_4 = QtWidgets.QLabel(self.centralwidget)
self.label_4.setGeometry(QtCore.QRect(10, 130, 101, 30))
self.label_4.setObjectName("label_4")
self.entrypoints = QtWidgets.QPlainTextEdit(self.centralwidget)
self.entrypoints.setGeometry(QtCore.QRect(120, 130, 731, 91))
self.entrypoints.setReadOnly(False)
self.entrypoints.setObjectName("entrypoints")
self.label_5 = QtWidgets.QLabel(self.centralwidget)
self.label_5.setGeometry(QtCore.QRect(10, 230, 101, 30))
self.label_5.setObjectName("label_5")
self.keywords = QtWidgets.QLineEdit(self.centralwidget)
self.keywords.setGeometry(QtCore.QRect(120, 230, 320, 30))
self.keywords.setObjectName("keywords")
self.label_6 = QtWidgets.QLabel(self.centralwidget)
self.label_6.setGeometry(QtCore.QRect(450, 230, 81, 30))
self.label_6.setObjectName("label_6")
self.pyreq = QtWidgets.QLineEdit(self.centralwidget)
self.pyreq.setGeometry(QtCore.QRect(530, 230, 320, 30))
self.pyreq.setObjectName("pyreq")
self.label_7 = QtWidgets.QLabel(self.centralwidget)
self.label_7.setGeometry(QtCore.QRect(10, 270, 101, 30))
self.label_7.setObjectName("label_7")
self.label_8 = QtWidgets.QLabel(self.centralwidget)
self.label_8.setGeometry(QtCore.QRect(450, 270, 81, 30))
self.label_8.setObjectName("label_8")
self.envList = QtWidgets.QComboBox(self.centralwidget)
self.envList.setGeometry(QtCore.QRect(120, 270, 241, 30))
self.envList.setObjectName("envList")
self.envRem = QtWidgets.QPushButton(self.centralwidget)
self.envRem.setGeometry(QtCore.QRect(410, 270, 31, 30))
self.envRem.setObjectName("envRem")
self.envAdd = QtWidgets.QPushButton(self.centralwidget)
self.envAdd.setGeometry(QtCore.QRect(370, 270, 31, 30))
self.envAdd.setObjectName("envAdd")
self.audList = QtWidgets.QComboBox(self.centralwidget)
self.audList.setGeometry(QtCore.QRect(530, 270, 241, 30))
self.audList.setObjectName("audList")
self.audAdd = QtWidgets.QPushButton(self.centralwidget)
self.audAdd.setGeometry(QtCore.QRect(780, 270, 31, 30))
self.audAdd.setObjectName("audAdd")
self.audRem = QtWidgets.QPushButton(self.centralwidget)
self.audRem.setGeometry(QtCore.QRect(820, 270, 31, 30))
self.audRem.setObjectName("audRem")
self.licList = QtWidgets.QComboBox(self.centralwidget)
self.licList.setGeometry(QtCore.QRect(530, 310, 241, 30))
self.licList.setObjectName("licList")
self.label_9 = QtWidgets.QLabel(self.centralwidget)
self.label_9.setGeometry(QtCore.QRect(450, 310, 81, 30))
self.label_9.setObjectName("label_9")
self.langList = QtWidgets.QComboBox(self.centralwidget)
self.langList.setGeometry(QtCore.QRect(120, 310, 241, 30))
self.langList.setObjectName("langList")
self.langAdd = QtWidgets.QPushButton(self.centralwidget)
self.langAdd.setGeometry(QtCore.QRect(370, 310, 31, 30))
self.langAdd.setObjectName("langAdd")
self.licRem = QtWidgets.QPushButton(self.centralwidget)
self.licRem.setGeometry(QtCore.QRect(820, 310, 31, 30))
self.licRem.setObjectName("licRem")
self.langRem = QtWidgets.QPushButton(self.centralwidget)
self.langRem.setGeometry(QtCore.QRect(410, 310, 31, 30))
self.langRem.setObjectName("langRem")
self.label_10 = QtWidgets.QLabel(self.centralwidget)
self.label_10.setGeometry(QtCore.QRect(10, 310, 101, 30))
self.label_10.setObjectName("label_10")
self.licAdd = QtWidgets.QPushButton(self.centralwidget)
self.licAdd.setGeometry(QtCore.QRect(780, 310, 31, 30))
self.licAdd.setObjectName("licAdd")
self.label_11 = QtWidgets.QLabel(self.centralwidget)
self.label_11.setGeometry(QtCore.QRect(10, 350, 101, 30))
self.label_11.setObjectName("label_11")
self.classifiers = QtWidgets.QPlainTextEdit(self.centralwidget)
self.classifiers.setGeometry(QtCore.QRect(120, 350, 731, 91))
self.classifiers.setReadOnly(True)
self.classifiers.setObjectName("classifiers")
self.build = QtWidgets.QPushButton(self.centralwidget)
self.build.setGeometry(QtCore.QRect(770, 450, 81, 30))
self.build.setObjectName("build")
minimal_exec.setCentralWidget(self.centralwidget)
self.retranslateUi(minimal_exec)
QtCore.QMetaObject.connectSlotsByName(minimal_exec)
minimal_exec.setTabOrder(self.shortDesc, self.longDesc)
minimal_exec.setTabOrder(self.longDesc, self.browse_long_desc)
minimal_exec.setTabOrder(self.browse_long_desc, self.packages)
minimal_exec.setTabOrder(self.packages, self.find_packages)
minimal_exec.setTabOrder(self.find_packages, self.entrypoints)
minimal_exec.setTabOrder(self.entrypoints, self.keywords)
minimal_exec.setTabOrder(self.keywords, self.pyreq)
minimal_exec.setTabOrder(self.pyreq, self.envList)
minimal_exec.setTabOrder(self.envList, self.envAdd)
minimal_exec.setTabOrder(self.envAdd, self.envRem)
minimal_exec.setTabOrder(self.envRem, self.audList)
minimal_exec.setTabOrder(self.audList, self.audAdd)
minimal_exec.setTabOrder(self.audAdd, self.audRem)
minimal_exec.setTabOrder(self.audRem, self.langList)
minimal_exec.setTabOrder(self.langList, self.langAdd)
minimal_exec.setTabOrder(self.langAdd, self.langRem)
minimal_exec.setTabOrder(self.langRem, self.licList)
minimal_exec.setTabOrder(self.licList, self.licAdd)
minimal_exec.setTabOrder(self.licAdd, self.licRem)
minimal_exec.setTabOrder(self.licRem, self.classifiers)
minimal_exec.setTabOrder(self.classifiers, self.build)
self.entrypoints.setPlaceholderText(self.place)
self.obj = minimal_exec
# adding data
self.audList.addItems(__intended_audience__)
self.envList.addItems(__enviroments__)
self.langList.addItems(__programming_lang__)
self.licList.addItems(__license__)
# binds
self.find_packages.clicked.connect(self.pac)
self.audAdd.clicked.connect(self.addAud)
self.audRem.clicked.connect(self.remAud)
self.envAdd.clicked.connect(self.addEnv)
self.envRem.clicked.connect(self.remEnv)
self.langAdd.clicked.connect(self.addLang)
self.langRem.clicked.connect(self.remLang)
self.licAdd.clicked.connect(self.addLic)
self.licRem.clicked.connect(self.remLic)
self.browse_long_desc.clicked.connect(self.mark)
self.build.clicked.connect(self.builder)
pass
def builder(self):
# brainfuck validation
# short desc
shortDesc = self.shortDesc.text()
if shortDesc == "":
QtWidgets.QMessageBox.warning(self.obj, "Empty Field",
"Short description is empty")
self.shortDesc.setFocus()
return None
# long desc
longDesc = self.longDesc.text()
if longDesc == "":
QtWidgets.QMessageBox.warning(self.obj, "Empty Fields",
"Long description is empty")
self.longDesc.setFocus()
return None
elif not os.path.exists(longDesc):
QtWidgets.QMessageBox.warning(self.obj, "Not Exitsts",
"Long description file not exists")
self.longDesc.setFocus()
return None
# packages
packages = self.packages.text()
if packages == "":
QtWidgets.QMessageBox.warning(self.obj, "Empty Fields",
"Packages are empty")
self.packages.setFocus()
return None
else:
packages = [x.strip() for x in packages.split(",")]
# keywords
keywords = self.keywords.text()
if keywords == "":
QtWidgets.QMessageBox.warning(self.obj, "Empty Fields",
"Keywords are empty")
self.keywords.setFocus()
return None
# python version
pyreq = self.pyreq.text()
if pyreq == "":
QtWidgets.QMessageBox.warning(self.obj, "Empty Fields",
"Python version required is empty")
self.pyreq.setFocus()
return None
classifiers = self.classifiers.toPlainText().split("\n")
execu = self.entrypoints.toPlainText().split("\n")
if len(execu) == 0:
QtWidgets.QMessageBox.warning(self.obj, "Empty Fields",
"Entry point is empty")
self.entrypoints.setFocus()
return None
elif not self.validateEntry():
QtWidgets.QMessageBox.information(
self.obj, "Invalid Input",
"Entry points will not be accepted by pypi")
self.entrypoints.setFocus()
else:
execu = {"console_scripts": execu}
pass
setup = FormatCode(
template.format(
name=self.data.name,
packages=packages,
version=self.data.version,
auth_name=self.data.authorname,
auth_email=self.data.authoremail,
home_url=self.data.home_url,
down_url=self.data.down_url,
short_desc=shortDesc,
long_desc=longDesc,
license=self.data.license,
keywords=keywords,
classifiers=classifiers,
python_required=pyreq,
entry_points=execu),
style_config="pep8")[0]
with open(os.path.join(self.data.dir, "setup.py"), "w") as file:
file.write(setup)
file.close()
QtWidgets.QMessageBox.information(
self.obj, "Done", "Hurry ^_^\nSetup file has been created")
pass
def mark(self):
file = str(
QtWidgets.QFileDialog.getOpenFileName(
self.obj, "Select Long Description", ".",
"Markdown Files (*MD *md)")[0])
self.longDesc.setText(file)
pass
def pac(self):
self.packages.setText(", ".join(find_packages()))
pass
def retranslateUi(self, minimal_exec):
_translate = QtCore.QCoreApplication.translate
minimal_exec.setWindowTitle(
_translate("minimal_exec",
"Morphine :: Minimal + Executable :: Builder"))
self.label.setText(_translate("minimal_exec", "Short Description"))
self.shortDesc.setPlaceholderText(
_translate("minimal_exec", "Enter short description", "sss"))
self.longDesc.setPlaceholderText(
_translate(
"minimal_exec",
"Enter long description file path or browse. (Markdown file required)"
))
self.label_2.setText(_translate("minimal_exec", "Long Description"))
self.browse_long_desc.setText(_translate("minimal_exec", "..."))
self.packages.setPlaceholderText(
_translate("minimal_exec", "Enter packges to include or find it"))
self.label_3.setText(_translate("minimal_exec", "Packages"))
self.find_packages.setText(_translate("minimal_exec", "&Find"))
self.label_4.setText(_translate("minimal_exec", "Entry Points"))
self.label_5.setText(_translate("minimal_exec", "Keywords"))
self.keywords.setPlaceholderText(
_translate("minimal_exec", "Enter keywords for SEO"))
self.label_6.setText(_translate("minimal_exec", "Python Req."))
self.pyreq.setPlaceholderText(
_translate("minimal_exec", "Enter python version(s) required"))
self.label_7.setText(_translate("minimal_exec", "Enviroment"))
self.label_8.setText(_translate("minimal_exec", "Audience"))
self.envRem.setText(_translate("minimal_exec", "-"))
self.envAdd.setText(_translate("minimal_exec", "+"))
self.audAdd.setText(_translate("minimal_exec", "+"))
self.audRem.setText(_translate("minimal_exec", "-"))
self.label_9.setText(_translate("minimal_exec", "Audience"))
self.langAdd.setText(_translate("minimal_exec", "+"))
self.licRem.setText(_translate("minimal_exec", "-"))
self.langRem.setText(_translate("minimal_exec", "-"))
self.label_10.setText(_translate("minimal_exec", "Enviroment"))
self.licAdd.setText(_translate("minimal_exec", "+"))
self.label_11.setText(_translate("minimal_exec", "Entry Points"))
self.build.setText(_translate("minimal_exec", "&Build"))
self.classifiers.appendPlainText(
_translate("minimal_exec", self.data.dev_status))
pass
def addAud(self):
current = "Intended Audience :: " + self.audList.currentText()
classifier = self.classifiers.toPlainText().split("\n")
if current in classifier:
return None
classifier.append(current)
self.classifiers.setPlainText("\n".join(classifier))
pass
def remAud(self):
current = "Intended Audience :: " + self.audList.currentText()
classifier = self.classifiers.toPlainText().split("\n")
try:
classifier.remove(current)
except ValueError:
pass
self.classifiers.setPlainText("\n".join(classifier))
pass
def addLang(self):
current = "Programming Language :: " + self.langList.currentText()
classifier = self.classifiers.toPlainText().split("\n")
if current in classifier:
return None
classifier.append(current)
self.classifiers.setPlainText("\n".join(classifier))
pass
def remLang(self):
current = "Programming Language :: " + self.langList.currentText()
classifier = self.classifiers.toPlainText().split("\n")
try:
classifier.remove(current)
except ValueError:
pass
self.classifiers.setPlainText("\n".join(classifier))
pass
def addEnv(self):
current = "Environment :: " + self.envList.currentText()
classifier = self.classifiers.toPlainText().split("\n")
if current in classifier:
return None
classifier.append(current)
self.classifiers.setPlainText("\n".join(classifier))
pass
def remEnv(self):
current = "Environment :: " + self.envList.currentText()
classifier = self.classifiers.toPlainText().split("\n")
try:
classifier.remove(current)
except ValueError:
pass
self.classifiers.setPlainText("\n".join(classifier))
pass
def addLic(self):
current = "License :: " + self.licList.currentText()
classifier = self.classifiers.toPlainText().split("\n")
if current in classifier:
return None
classifier.append(current)
self.classifiers.setPlainText("\n".join(classifier))
pass
def remLic(self):
current = "License :: " + self.licList.currentText()
classifier = self.classifiers.toPlainText().split("\n")
try:
classifier.remove(current)
except ValueError:
pass
self.classifiers.setPlainText("\n".join(classifier))
pass
def validateEntry(self):
entrypoints = self.entrypoints.toPlainText().split("\n")
for entrypoint in entrypoints:
if re.match(r"^.+=.+\.?.+(:.+)?$", entrypoint) is None:
return False
pass
return True
|
PypiClean
|
/pcl_pangu-1.2.6.2.tar.gz/pcl_pangu-1.2.6.2/pcl_pangu/model/panguAlpha_pytorch/tools/preprocess_data_pangu.py
|
import os
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),
os.path.pardir)))
import argparse
import json
import multiprocessing
import glob
import numpy as np
from megatron.tokenizer.tokenization_jieba import JIEBATokenizer
import time
import torch
try:
import nltk
nltk_available = True
except ImportError:
nltk_available = False
from megatron.data import indexed_dataset
# https://stackoverflow.com/questions/33139531/preserve-empty-lines-with-nltks-punkt-tokenizer
class CustomLanguageVars(nltk.tokenize.punkt.PunktLanguageVars):
_period_context_fmt = r"""
\S* # some word material
%(SentEndChars)s # a potential sentence ending
\s* # <-- THIS is what I changed
(?=(?P<after_tok>
%(NonWord)s # either other punctuation
|
(?P<next_tok>\S+) # <-- Normally you would have \s+ here
))"""
class IdentitySplitter(object):
def tokenize(self, *text):
return text
class Encoder(object):
def __init__(self, args):
self.args = args
self.tokenizer = JIEBATokenizer(self.args.vocab_file)
def encode(self, iterator):
key = self.args.json_keys[0]
len_paras = 0
ids = {}
doc_ids = []
encode_start_time = time.time()
file_num = 0
for file_path in iterator:
print(file_path)
each_start_time = time.time()
json_line = open(file_path, 'r', encoding='utf-8')
strr = json_line.read()
lista = strr.split('\n\n')
len_paras += len(lista)
for para in lista:
if para:
contenta = self.tokenizer.tokenize(para)
para_ids = self.tokenizer.convert_tokens_to_ids(contenta)
if len(para_ids) > 0:
doc_ids.append(para_ids)
if self.args.append_eod:
for i in range(self.args.eod_num):
doc_ids[-1].append(self.tokenizer.eod_id)
# print(doc_ids)
each_end_time = time.time()
print("encode this file using {}s".format(each_end_time - each_start_time))
ids[key] = doc_ids
encode_end_time = time.time()
print("FINISHING ENCODING, USING {}s".format(encode_end_time - encode_start_time))
return ids, len_paras
# print('len_paras',len_paras)
def package_file(it, n):
""" package multiple files"""
stop = False
while not stop:
batch = []
for _ in range(n):
try:
batch.append(next(it))
except StopIteration:
stop = True
if not batch:
break
yield batch
def get_args():
parser = argparse.ArgumentParser()
group = parser.add_argument_group(title='input data')
group.add_argument('--input', type=str, default='/raid/gpt3-train-data/data-v1/new2016zh/txt-data/train/0000*.txt',
help='Path to input txt')
group.add_argument('--json-keys', nargs='+', default=['text'],
help='space separate listed of keys to extract from json')
group.add_argument('--split-sentences', action='store_true',
help='Split documents into sentences.')
group = parser.add_argument_group(title='tokenizer')
group.add_argument('--vocab-file', type=str, default='megatron/tokenizer/bpe_4w_pcl/vocab',
help='Path to the vocab file')
group.add_argument('--append-eod', action='store_true',
help='Append an <eod> token to the end of a document.')
group.add_argument('--eod-num', type=int, default=1,
help='eot number.')
group = parser.add_argument_group(title='output data')
group.add_argument('--output-prefix', type=str, required=True,
help='Path to binary output file without suffix')
group.add_argument('--dataset-impl', type=str, default='mmap',
choices=['lazy', 'cached', 'mmap'])
group = parser.add_argument_group(title='runtime')
group.add_argument('--workers', type=int, default=200,
help='Number of worker processes to launch')
group.add_argument('--log-interval', type=int, default=1,
help='Interval between progress updates')
args = parser.parse_args()
args.keep_empty = False
return args
def main():
args = get_args()
startup_start = time.time()
print("Opening", args.input)
file_iter = glob.iglob(args.input)
if nltk_available and args.split_sentences:
nltk.download("punkt", quiet=True)
encoder = Encoder(args)
# tokenizer = JIEBATokenizer(vocab_path, tokenizer_path)
pool = multiprocessing.Pool(args.workers)
encoded_docs = pool.imap(encoder.encode, package_file(file_iter, 128))#, all_lens))
#encoded_docs = map(encoder.encode, fin)
print('encoded_docs',encoded_docs)
level = "document"
if args.split_sentences:
level = "sentence"
# print(f"Vocab size: {tokenizer.vocab_size}")
print(f"Output prefix: {args.output_prefix}")
output_bin_files = {}
output_idx_files = {}
builders = {}
for key in args.json_keys:
output_bin_files[key] = "{}{}_{}.bin".format(args.output_prefix,
key, level)
output_idx_files[key] = "{}{}_{}.idx".format(args.output_prefix,
key, level)
builders[key] = indexed_dataset.make_builder(output_bin_files[key],
impl=args.dataset_impl,
vocab_size=encoder.tokenizer.vocab_size)
startup_end = time.time()
proc_start = time.time()
total_bytes_processed = 0
print("Time to startup:", startup_end - startup_start)
for i, (doc, bytes_processed) in enumerate(encoded_docs, start=1):
total_bytes_processed += bytes_processed
for key, sentences in doc.items():
for sentence in sentences:
builders[key].add_item(torch.IntTensor(sentence))
builders[key].end_document()
if i % args.log_interval == 0:
current = time.time()
elapsed = current - proc_start
mbs = total_bytes_processed/elapsed/1024/1024
print(f"Processed {i} documents",
f"({i/elapsed} docs/s, {mbs} MB/s).",
file=sys.stderr)
for key in args.json_keys:
builders[key].finalize(output_idx_files[key])
end_time = time.time()
print('Preprocess data using {}s'.format(end_time - startup_end))
if __name__ == '__main__':
main()
|
PypiClean
|
/chop3-0.1.0.tar.gz/chop3-0.1.0/CONTRIBUTING.rst
|
.. highlight:: shell
============
Contributing
============
Contributions are welcome, and they are greatly appreciated! Every little bit
helps, and credit will always be given.
You can contribute in many ways:
Types of Contributions
----------------------
Report Bugs
~~~~~~~~~~~
Report bugs at https://github.com/Fahima-Islam/chop/issues.
If you are reporting a bug, please include:
* Your operating system name and version.
* Any details about your local setup that might be helpful in troubleshooting.
* Detailed steps to reproduce the bug.
Fix Bugs
~~~~~~~~
Look through the GitHub issues for bugs. Anything tagged with "bug" and "help
wanted" is open to whoever wants to implement it.
Implement Features
~~~~~~~~~~~~~~~~~~
Look through the GitHub issues for features. Anything tagged with "enhancement"
and "help wanted" is open to whoever wants to implement it.
Write Documentation
~~~~~~~~~~~~~~~~~~~
chop could always use more documentation, whether as part of the
official chop docs, in docstrings, or even on the web in blog posts,
articles, and such.
Submit Feedback
~~~~~~~~~~~~~~~
The best way to send feedback is to file an issue at https://github.com/Fahima-Islam/chop/issues.
If you are proposing a feature:
* Explain in detail how it would work.
* Keep the scope as narrow as possible, to make it easier to implement.
* Remember that this is a volunteer-driven project, and that contributions
are welcome :)
Get Started!
------------
Ready to contribute? Here's how to set up `chop` for local development.
1. Fork the `chop` repo on GitHub.
2. Clone your fork locally::
$ git clone [email protected]:your_name_here/chop.git
3. Install your local copy into a virtualenv. Assuming you have virtualenvwrapper installed, this is how you set up your fork for local development::
$ mkvirtualenv chop
$ cd chop/
$ python setup.py develop
4. Create a branch for local development::
$ git checkout -b name-of-your-bugfix-or-feature
Now you can make your changes locally.
5. When you're done making changes, check that your changes pass flake8 and the
tests, including testing other Python versions with tox::
$ flake8 chop tests
$ python setup.py test or py.test
$ tox
To get flake8 and tox, just pip install them into your virtualenv.
6. Commit your changes and push your branch to GitHub::
$ git add .
$ git commit -m "Your detailed description of your changes."
$ git push origin name-of-your-bugfix-or-feature
7. Submit a pull request through the GitHub website.
Pull Request Guidelines
-----------------------
Before you submit a pull request, check that it meets these guidelines:
1. The pull request should include tests.
2. If the pull request adds functionality, the docs should be updated. Put
your new functionality into a function with a docstring, and add the
feature to the list in README.rst.
3. The pull request should work for Python 2.7, 3.4, 3.5 and 3.6, and for PyPy. Check
https://travis-ci.org/Fahima-Islam/chop/pull_requests
and make sure that the tests pass for all supported Python versions.
Tips
----
To run a subset of tests::
$ py.test tests.test_chop
Deploying
---------
A reminder for the maintainers on how to deploy.
Make sure all your changes are committed (including an entry in HISTORY.rst).
Then run::
$ bumpversion patch # possible: major / minor / patch
$ git push
$ git push --tags
Travis will then deploy to PyPI if tests pass.
|
PypiClean
|
/gryphon-pusherclient-0.2.1.tar.gz/gryphon-pusherclient-0.2.1/pusherclient/__init__.py
|
from .channel import Channel
from .connection import Connection
import hashlib
import hmac
import logging
try:
import thread
except:
import _thread as thread
try:
import simplejson as json
except:
import json
VERSION = "0.2.0"
class Pusher(object):
host = "ws.pusherapp.com"
client_id = 'PythonPusherClient'
protocol = 6
def __init__(self, key, secure=True, secret=None, user_data=None, log_level=logging.INFO):
self.key = key
self.secret = secret
self.user_data = user_data or {}
self.channels = {}
self.connection = Connection(self._connection_handler, self._build_url(key, secure), log_level=log_level)
def connect(self):
"""Connect to Pusher"""
thread.start_new_thread(self.connection.run, ())
def disconnect(self):
"""Disconnect from Pusher"""
self.connection.disconnect()
self.channels = {}
def subscribe(self, channel_name):
"""Subscribe to a channel
:param channel_name: The name of the channel to subscribe to.
:type channel_name: str
:rtype : Channel
"""
data = {'channel': channel_name}
if channel_name.startswith('presence-'):
data['auth'] = self._generate_presence_key(
self.connection.socket_id,
self.key,
channel_name,
self.secret,
self.user_data
)
data['channel_data'] = json.dumps(self.user_data)
elif channel_name.startswith('private-'):
data['auth'] = self._generate_private_key(
self.connection.socket_id,
self.key,
channel_name,
self.secret
)
self.connection.send_event('pusher:subscribe', data)
self.channels[channel_name] = Channel(channel_name, self.connection)
return self.channels[channel_name]
def unsubscribe(self, channel_name):
"""Unsubscribe from a channel
:param channel_name: The name of the channel to unsubscribe from.
:type channel_name: str
"""
if channel_name in self.channels:
self.connection.send_event(
'pusher:unsubscribe', {
'channel': channel_name,
}
)
del self.channels[channel_name]
def channel(self, channel_name):
"""Get an existing channel object by name
:param channel_name: The name of the channel you want to retrieve
:type channel_name: str
:rtype: Channel or None
"""
return self.channels.get(channel_name)
def _connection_handler(self, event_name, data, channel_name):
if channel_name in self.channels:
self.channels[channel_name]._handle_event(event_name, data)
@staticmethod
def _generate_private_key(socket_id, key, channel_name, secret):
auth_key = ""
if socket_id and key and channel_name and secret:
subject = "%s:%s" % (socket_id, channel_name)
h = hmac.new(secret, subject, hashlib.sha256)
auth_key = "%s:%s" % (key, h.hexdigest())
return auth_key
@staticmethod
def _generate_presence_key(socket_id, key, channel_name, secret, user_data):
auth_key = ""
if socket_id and key and channel_name and secret and user_data:
subject = "%s:%s:%s" % (socket_id, channel_name, json.dumps(user_data))
h = hmac.new(secret, subject, hashlib.sha256)
auth_key = "%s:%s" % (key, h.hexdigest())
return auth_key
@classmethod
def _build_url(cls, key, secure):
path = "/app/%s?client=%s&version=%s&protocol=%s" % (
key,
cls.client_id,
VERSION,
cls.protocol
)
return "%s://%s:%s%s" % (
"wss" if secure else "ws",
cls.host,
443 if secure else 80,
path
)
|
PypiClean
|
/RsCMPX_NrFr1Meas-4.0.185.tar.gz/RsCMPX_NrFr1Meas-4.0.185/RsCMPX_NrFr1Meas/Implementations/NrSubMeas/MultiEval/Cc/Layer/Iemission/Margin/Extreme/__init__.py
|
from .........Internal.Core import Core
from .........Internal.CommandsGroup import CommandsGroup
from .........Internal.StructBase import StructBase
from .........Internal.ArgStruct import ArgStruct
from ......... import repcap
# noinspection PyPep8Naming,PyAttributeOutsideInit,SpellCheckingInspection
class ExtremeCls:
"""Extreme commands group definition. 2 total commands, 1 Subgroups, 1 group commands"""
def __init__(self, core: Core, parent):
self._core = core
self._cmd_group = CommandsGroup("extreme", core, parent)
@property
def rbIndex(self):
"""rbIndex commands group. 0 Sub-classes, 1 commands."""
if not hasattr(self, '_rbIndex'):
from .RbIndex import RbIndexCls
self._rbIndex = RbIndexCls(self._core, self._cmd_group)
return self._rbIndex
# noinspection PyTypeChecker
class FetchStruct(StructBase):
"""Response structure. Fields: \n
- Reliability: int: 'Reliability indicator'
- Out_Of_Tolerance: int: Out of tolerance result, i.e. percentage of measurement intervals of the statistic count for modulation measurements exceeding the specified modulation limits.
- Margin: float: Margin over all non-allocated RBs (scope of general limit component)
- Iq_Image: float: Margin at image frequencies of allocated RBs (scope of I/Q image limit component)
- Carr_Leakage: float: Margin at the carrier frequency (scope of I/Q offset limit component)"""
__meta_args_list = [
ArgStruct.scalar_int('Reliability', 'Reliability'),
ArgStruct.scalar_int('Out_Of_Tolerance'),
ArgStruct.scalar_float('Margin'),
ArgStruct.scalar_float('Iq_Image'),
ArgStruct.scalar_float('Carr_Leakage')]
def __init__(self):
StructBase.__init__(self, self)
self.Reliability: int = None
self.Out_Of_Tolerance: int = None
self.Margin: float = None
self.Iq_Image: float = None
self.Carr_Leakage: float = None
def fetch(self, carrierComponent=repcap.CarrierComponent.Default, layer=repcap.Layer.Default) -> FetchStruct:
"""SCPI: FETCh:NRSub:MEASurement<Instance>:MEValuation[:CC<no>][:LAYer<layer>]:IEMission:MARGin:EXTReme \n
Snippet: value: FetchStruct = driver.nrSubMeas.multiEval.cc.layer.iemission.margin.extreme.fetch(carrierComponent = repcap.CarrierComponent.Default, layer = repcap.Layer.Default) \n
Return the limit line margin results for carrier <no>, layer/antenna <l>. The CURRent margin indicates the minimum
(vertical) distance between the in-band emissions limit line and the current trace. A negative result indicates that the
limit is exceeded. The AVERage, EXTReme and SDEViation values are calculated from the current margins. The margin results
cannot be displayed at the GUI. \n
:param carrierComponent: optional repeated capability selector. Default value: Nr1 (settable in the interface 'Cc')
:param layer: optional repeated capability selector. Default value: Nr1 (settable in the interface 'Layer')
:return: structure: for return value, see the help for FetchStruct structure arguments."""
carrierComponent_cmd_val = self._cmd_group.get_repcap_cmd_value(carrierComponent, repcap.CarrierComponent)
layer_cmd_val = self._cmd_group.get_repcap_cmd_value(layer, repcap.Layer)
return self._core.io.query_struct(f'FETCh:NRSub:MEASurement<Instance>:MEValuation:CC{carrierComponent_cmd_val}:LAYer{layer_cmd_val}:IEMission:MARGin:EXTReme?', self.__class__.FetchStruct())
def clone(self) -> 'ExtremeCls':
"""Clones the group by creating new object from it and its whole existing subgroups
Also copies all the existing default Repeated Capabilities setting,
which you can change independently without affecting the original group"""
new_group = ExtremeCls(self._core, self._cmd_group.parent)
self._cmd_group.synchronize_repcaps(new_group)
return new_group
|
PypiClean
|
/winevtrc-20220106.tar.gz/winevtrc-20220106/docs/sources/eventlog-providers/Provider-Microsoft-Windows-StorageManagement-WSP-Health.md
|
## Microsoft-Windows-StorageManagement-WSP-Health
Seen on:
* Windows 10 (1703, 1709, 1803, 1809, 1903, 1909, 2004, 20H2)
* Windows 11 (21H2)
<table border="1" class="docutils">
<tbody>
<tr>
<td><b>Log source(s):</b></td>
<td>Microsoft-Windows-StorageManagement-WSP-Health</td>
</tr>
<tr>
<td><b>Identifier:</b></td>
<td>{b1f01d1a-ae3a-4940-81ee-ddccbad380ef}</td>
</tr>
<tr>
<td><b>Event message file(s):</b></td>
<td>%systemroot%\system32\wsp_health.dll</td>
</tr>
</tbody>
</table>
|
PypiClean
|
/onshape-test-client-1.0.0.tar.gz/onshape-test-client-1.0.0/onshape_client/oas/model/bt_one_configuration_part_properties1661.py
|
import re # noqa: F401
import sys # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from onshape_client.oas.exceptions import ApiAttributeError
def lazy_import():
from onshape_client.oas.model.bt_one_part_properties230 import BTOnePartProperties230
from onshape_client.oas.model.btfs_value1888 import BTFSValue1888
globals()['BTFSValue1888'] = BTFSValue1888
globals()['BTOnePartProperties230'] = BTOnePartProperties230
class BTOneConfigurationPartProperties1661(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
('property_ids',): {
},
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'bt_type': (str,), # noqa: E501
'configuration': ({str: (BTFSValue1888,)},), # noqa: E501
'merged': (BTOnePartProperties230,), # noqa: E501
'node_id': (str,), # noqa: E501
'properties': ([BTOnePartProperties230],), # noqa: E501
'property_ids': ([str],), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'bt_type': 'btType', # noqa: E501
'configuration': 'configuration', # noqa: E501
'merged': 'merged', # noqa: E501
'node_id': 'nodeId', # noqa: E501
'properties': 'properties', # noqa: E501
'property_ids': 'propertyIds', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""BTOneConfigurationPartProperties1661 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
bt_type (str): [optional] # noqa: E501
configuration ({str: (BTFSValue1888,)}): [optional] # noqa: E501
merged (BTOnePartProperties230): [optional] # noqa: E501
node_id (str): [optional] # noqa: E501
properties ([BTOnePartProperties230]): [optional] # noqa: E501
property_ids ([str]): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""BTOneConfigurationPartProperties1661 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
bt_type (str): [optional] # noqa: E501
configuration ({str: (BTFSValue1888,)}): [optional] # noqa: E501
merged (BTOnePartProperties230): [optional] # noqa: E501
node_id (str): [optional] # noqa: E501
properties ([BTOnePartProperties230]): [optional] # noqa: E501
property_ids ([str]): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
|
PypiClean
|
/nni_daily-1.5.2005180104-py3-none-manylinux1_x86_64.whl/nni_daily-1.5.2005180104.data/data/nni/node_modules/moment/dist/locale/ar.js
|
import moment from '../moment';
var symbolMap = {
'1': '١',
'2': '٢',
'3': '٣',
'4': '٤',
'5': '٥',
'6': '٦',
'7': '٧',
'8': '٨',
'9': '٩',
'0': '٠',
},
numberMap = {
'١': '1',
'٢': '2',
'٣': '3',
'٤': '4',
'٥': '5',
'٦': '6',
'٧': '7',
'٨': '8',
'٩': '9',
'٠': '0',
},
pluralForm = function (n) {
return n === 0
? 0
: n === 1
? 1
: n === 2
? 2
: n % 100 >= 3 && n % 100 <= 10
? 3
: n % 100 >= 11
? 4
: 5;
},
plurals = {
s: [
'أقل من ثانية',
'ثانية واحدة',
['ثانيتان', 'ثانيتين'],
'%d ثوان',
'%d ثانية',
'%d ثانية',
],
m: [
'أقل من دقيقة',
'دقيقة واحدة',
['دقيقتان', 'دقيقتين'],
'%d دقائق',
'%d دقيقة',
'%d دقيقة',
],
h: [
'أقل من ساعة',
'ساعة واحدة',
['ساعتان', 'ساعتين'],
'%d ساعات',
'%d ساعة',
'%d ساعة',
],
d: [
'أقل من يوم',
'يوم واحد',
['يومان', 'يومين'],
'%d أيام',
'%d يومًا',
'%d يوم',
],
M: [
'أقل من شهر',
'شهر واحد',
['شهران', 'شهرين'],
'%d أشهر',
'%d شهرا',
'%d شهر',
],
y: [
'أقل من عام',
'عام واحد',
['عامان', 'عامين'],
'%d أعوام',
'%d عامًا',
'%d عام',
],
},
pluralize = function (u) {
return function (number, withoutSuffix, string, isFuture) {
var f = pluralForm(number),
str = plurals[u][pluralForm(number)];
if (f === 2) {
str = str[withoutSuffix ? 0 : 1];
}
return str.replace(/%d/i, number);
};
},
months = [
'يناير',
'فبراير',
'مارس',
'أبريل',
'مايو',
'يونيو',
'يوليو',
'أغسطس',
'سبتمبر',
'أكتوبر',
'نوفمبر',
'ديسمبر',
];
export default moment.defineLocale('ar', {
months: months,
monthsShort: months,
weekdays: 'الأحد_الإثنين_الثلاثاء_الأربعاء_الخميس_الجمعة_السبت'.split('_'),
weekdaysShort: 'أحد_إثنين_ثلاثاء_أربعاء_خميس_جمعة_سبت'.split('_'),
weekdaysMin: 'ح_ن_ث_ر_خ_ج_س'.split('_'),
weekdaysParseExact: true,
longDateFormat: {
LT: 'HH:mm',
LTS: 'HH:mm:ss',
L: 'D/\u200FM/\u200FYYYY',
LL: 'D MMMM YYYY',
LLL: 'D MMMM YYYY HH:mm',
LLLL: 'dddd D MMMM YYYY HH:mm',
},
meridiemParse: /ص|م/,
isPM: function (input) {
return 'م' === input;
},
meridiem: function (hour, minute, isLower) {
if (hour < 12) {
return 'ص';
} else {
return 'م';
}
},
calendar: {
sameDay: '[اليوم عند الساعة] LT',
nextDay: '[غدًا عند الساعة] LT',
nextWeek: 'dddd [عند الساعة] LT',
lastDay: '[أمس عند الساعة] LT',
lastWeek: 'dddd [عند الساعة] LT',
sameElse: 'L',
},
relativeTime: {
future: 'بعد %s',
past: 'منذ %s',
s: pluralize('s'),
ss: pluralize('s'),
m: pluralize('m'),
mm: pluralize('m'),
h: pluralize('h'),
hh: pluralize('h'),
d: pluralize('d'),
dd: pluralize('d'),
M: pluralize('M'),
MM: pluralize('M'),
y: pluralize('y'),
yy: pluralize('y'),
},
preparse: function (string) {
return string
.replace(/[١٢٣٤٥٦٧٨٩٠]/g, function (match) {
return numberMap[match];
})
.replace(/،/g, ',');
},
postformat: function (string) {
return string
.replace(/\d/g, function (match) {
return symbolMap[match];
})
.replace(/,/g, '،');
},
week: {
dow: 6, // Saturday is the first day of the week.
doy: 12, // The week that contains Jan 12th is the first week of the year.
},
});
|
PypiClean
|
/sfa-3.1-12.tar.gz/sfa-3.1-12/clientbin/getNodes.py
|
import sys
import os
from optparse import OptionParser
from pprint import pprint
from types import StringTypes
def create_parser():
command = sys.argv[0]
argv = sys.argv[1:]
usage = "%(command)s [options]" % locals()
description = """getNodes will open a rspec file and print all key/values, or filter results based on a given key or set of keys."""
parser = OptionParser(usage=usage,description=description)
parser.add_option("-i", "--infile", dest="infile", default=None, help = "input rspec file")
parser.add_option("-t", "--tag", dest="tag", default=None, help = "filter rspec for this tag")
parser.add_option("-a", "--attribute", dest="attribute", default=None, help = "comma separated list of attributes to display")
parser.add_option("-r", "--recursive", dest="print_children", default=False, action="store_true", help = "print the tag's child nodes")
return parser
def print_dict(rdict, options, counter=1):
print_children = options.print_children
attributes = []
if options.attribute:
attributes = options.attribute.split(',')
lists = []
tab = " "
if not isinstance(rdict, dict):
raise "%s not a dict" % rdict
for (key, value) in rdict.iteritems():
if isinstance(value, StringTypes):
if (attributes and key in attributes) or not attributes:
print tab * counter + "%s: %s" % (key, value)
elif isinstance(value, list):
for listitem in value:
if isinstance(listitem, dict):
lists.append((key, listitem))
elif isinstance(value, dict):
lists.append((key, value))
if counter == 1 or print_children:
for (key, listitem) in lists:
if isinstance(listitem, dict):
print tab * (counter - 1) + key
print_dict(listitem, options, counter+1)
elif not attributes or (attributes and 'children' in attributes):
keys = set([key for (key, listitem) in lists])
if keys: print tab * (counter) + "(children: %s)" % (",".join(keys))
def main():
parser = create_parser();
(options, args) = parser.parse_args()
if not options.infile:
print "RSpec file not specified"
return
rspec = RSpec()
try:
rspec.parseFile(options.infile)
except:
print "Error reading rspec file"
if options.tag:
tag_name = options.tag
rspec_dicts = rspec.getDictsByTagName(tag_name)
rspec_dict = {tag_name: rspec_dicts}
else:
rspec_dict = rspec.toDict()
print_dict(rspec_dict, options)
return
if __name__ == '__main__':
try: main()
except Exception, e:
raise
print e
|
PypiClean
|
/PyIGA-0.0.10.tar.gz/PyIGA-0.0.10/external_libraries/zlib/contrib/ada/readme.txt
|
ZLib for Ada thick binding (ZLib.Ada)
Release 1.3
ZLib.Ada is a thick binding interface to the popular ZLib data
compression library, available at http://www.gzip.org/zlib/.
It provides Ada-style access to the ZLib C library.
Here are the main changes since ZLib.Ada 1.2:
- Attension: ZLib.Read generic routine have a initialization requirement
for Read_Last parameter now. It is a bit incompartible with previous version,
but extends functionality, we could use new parameters Allow_Read_Some and
Flush now.
- Added Is_Open routines to ZLib and ZLib.Streams packages.
- Add pragma Assert to check Stream_Element is 8 bit.
- Fix extraction to buffer with exact known decompressed size. Error reported by
Steve Sangwine.
- Fix definition of ULong (changed to unsigned_long), fix regression on 64 bits
computers. Patch provided by Pascal Obry.
- Add Status_Error exception definition.
- Add pragma Assertion that Ada.Streams.Stream_Element size is 8 bit.
How to build ZLib.Ada under GNAT
You should have the ZLib library already build on your computer, before
building ZLib.Ada. Make the directory of ZLib.Ada sources current and
issue the command:
gnatmake test -largs -L<directory where libz.a is> -lz
Or use the GNAT project file build for GNAT 3.15 or later:
gnatmake -Pzlib.gpr -L<directory where libz.a is>
How to build ZLib.Ada under Aonix ObjectAda for Win32 7.2.2
1. Make a project with all *.ads and *.adb files from the distribution.
2. Build the libz.a library from the ZLib C sources.
3. Rename libz.a to z.lib.
4. Add the library z.lib to the project.
5. Add the libc.lib library from the ObjectAda distribution to the project.
6. Build the executable using test.adb as a main procedure.
How to use ZLib.Ada
The source files test.adb and read.adb are small demo programs that show
the main functionality of ZLib.Ada.
The routines from the package specifications are commented.
Homepage: http://zlib-ada.sourceforge.net/
Author: Dmitriy Anisimkov <[email protected]>
Contributors: Pascal Obry <[email protected]>, Steve Sangwine <[email protected]>
|
PypiClean
|
/fs-watcher-1.0.11.tar.gz/fs-watcher-1.0.11/fs_watcher/daemonrunner.py
|
from __future__ import print_function, division, unicode_literals, absolute_import
import os
import logging
import time
import signal, errno
import daemon
from past.builtins import basestring # pip install future
try:
from daemon.pidlockfile import PIDLockFile
from daemon.pidlockfile import AlreadyLocked
except (NameError, ImportError):
from lockfile.pidlockfile import PIDLockFile
from lockfile import AlreadyLocked
class DaemonRunnerError(Exception):
""" Abstract base class for errors from DaemonRunner. """
class DaemonRunnerInvalidActionError(ValueError, DaemonRunnerError):
""" Raised when specified action for DaemonRunner is invalid. """
class DaemonRunnerStartFailureError(RuntimeError, DaemonRunnerError):
""" Raised when failure starting DaemonRunner. """
class DaemonRunnerStopFailureError(RuntimeError, DaemonRunnerError):
""" Raised when failure stopping DaemonRunner. """
class DaemonRunner(object):
""" Controller for a callable running in a separate background process.
* 'start': Become a daemon and call `run()`.
* 'stop': Exit the daemon process specified in the PID file.
* 'restart': Call `stop()`, then `start()`.
* 'run': Run `func(func_arg)`
"""
def __init__(self, func, func_arg=None,
pidfile=None,
stdin=None, stdout=None, stderr=None,
uid=None, gid=None, umask=None,
working_directory=None,
signal_map=None,
files_preserve=None):
""" Set up the parameters of a new runner.
The `func` argument is the function, with single argument `func_arg`, to daemonize.
"""
self.func = func
self.func_arg = func_arg
self.daemon_context = daemon.DaemonContext(umask=umask or 0,
working_directory=working_directory or '/',
uid=uid, gid=gid)
# python-daemon>=2.1 has initgroups=True by default but it requires root privs;
# older versions don't support initgroups as constructor parameter so we set it manually instead:
self.daemon_context.initgroups = False
self.daemon_context.stdin = open(stdin or '/dev/null', 'rb')
self.daemon_context.stdout = open(stdout or '/dev/null', 'w+b')
self.daemon_context.stderr = open(stderr or '/dev/null', 'w+b', buffering=0)
self.pidfile = None
if pidfile is not None:
self.pidfile = make_pidlockfile(pidfile)
self.daemon_context.pidfile = self.pidfile
## TO BE IMPLEMENTED
if signal_map is not None:
self.daemon_context.signal_map = signal_map
self.daemon_context.files_preserve = files_preserve
def restart(self):
""" Stop, then start.
"""
self.stop()
self.start()
def run(self):
""" Run the application.
"""
return self.func(self.func_arg)
def start(self):
""" Open the daemon context and run the application.
"""
status = is_pidfile_stale(self.pidfile)
if status == True:
self.pidfile.break_lock()
elif status == False:
## Allow only one instance of the daemon
logging.info("Daemon already running with PID %r", self.pidfile.read_pid())
return
try:
self.daemon_context.open()
except AlreadyLocked:
logging.info("PID file %r already locked", self.pidfile.path)
return
logging.info('Daemon started with pid %d', os.getpid())
self.run()
def stop(self):
""" Exit the daemon process specified in the current PID file.
"""
if not self.pidfile.is_locked():
logging.info("PID file %r not locked", self.pidfile.path)
return
if is_pidfile_stale(self.pidfile):
self.pidfile.break_lock()
else:
self._terminate_daemon_process()
self.pidfile.break_lock()
logging.info("Daemon stopped")
def _terminate_daemon_process(self, sig=signal.SIGTERM):
""" Terminate the daemon process specified in the current PID file.
"""
pid = self.pidfile.read_pid()
try:
os.kill(pid, sig)
except OSError as exc:
raise DaemonRunnerStopFailureError(
"Failed to terminate %(pid)d: %(exc)s" % vars())
time.sleep(0.2)
try:
os.kill(pid, 0)
except OSError as exc:
if exc.errno == errno.ESRCH:
# The specified PID does not exist
logging.info("Pid %(pid)d terminated.", vars())
return
raise DaemonRunnerStopFailureError(
"Failed to terminate %(pid)d" % vars())
def make_pidlockfile(path):
""" Make a LockFile instance with the given filesystem path. """
if not isinstance(path, basestring):
raise ValueError("Not a filesystem path: %(path)r" % vars())
if not os.path.isabs(path):
raise ValueError("Not an absolute path: %(path)r" % vars())
return PIDLockFile(path)
def is_pidfile_stale(pidfile):
""" Determine whether a PID file is stale.
Return ``True`` (“stale”) if the contents of the PID file are
valid but do not match the PID of a currently-running process;
otherwise return ``False``.
"""
result = False
if not os.path.isfile(pidfile.path):
return None
pidfile_pid = pidfile.read_pid()
if pidfile_pid is not None:
try:
os.kill(pidfile_pid, signal.SIG_DFL)
except OSError as exc:
if exc.errno == errno.ESRCH:
# The specified PID does not exist
result = True
return result
|
PypiClean
|
/newreqtool-0.1.tar.gz/newreqtool-0.1/doorstopnew/common.py
|
import argparse
import glob
import logging
import os
import shutil
import yamldown
import yaml
verbosity = 0 # global verbosity setting for controlling string formatting
PRINT_VERBOSITY = 0 # minimum verbosity to using `print`
STR_VERBOSITY = 3 # minimum verbosity to use verbose `__str__`
MAX_VERBOSITY = 4 # maximum verbosity level implemented
def _trace(self, message, *args, **kws):
if self.isEnabledFor(logging.DEBUG - 1):
self._log(logging.DEBUG - 1, message, args, **kws) # pylint: disable=W0212
logging.addLevelName(logging.DEBUG - 1, "TRACE")
logging.Logger.trace = _trace # type: ignore
logger = logging.getLogger
log = logger(__name__)
# exception classes ##########################################################
class DoorstopError(Exception):
"""Generic Doorstop error."""
class DoorstopFileError(DoorstopError, IOError):
"""Raised on IO errors."""
class DoorstopWarning(DoorstopError, Warning):
"""Generic Doorstop warning."""
class DoorstopInfo(DoorstopWarning, Warning):
"""Generic Doorstop info."""
# logging classes ############################################################
class HelpFormatter(argparse.ArgumentDefaultsHelpFormatter):
"""Command-line help text formatter with wider help text."""
def __init__(self, *args, **kwargs):
kwargs['max_help_position'] = 40
super().__init__(*args, **kwargs)
class WarningFormatter(logging.Formatter):
"""Logging formatter that displays verbose formatting for WARNING+."""
def __init__(self, default_format, verbose_format, *args, **kwargs):
super().__init__(*args, **kwargs)
self.default_format = default_format
self.verbose_format = verbose_format
def format(self, record):
"""Python 3 hack to change the formatting style dynamically."""
if record.levelno > logging.INFO:
self._style._fmt = self.verbose_format # pylint: disable=W0212
else:
self._style._fmt = self.default_format # pylint: disable=W0212
return super().format(record)
# disk helper functions ######################################################
def create_dirname(path):
"""Ensure a parent directory exists for a path."""
dirpath = os.path.dirname(path)
if dirpath and not os.path.isdir(dirpath):
log.info("creating directory {}...".format(dirpath))
os.makedirs(dirpath)
def read_lines(path, encoding='utf-8'):
"""Read lines of text from a file.
:param path: file to write lines
:param encoding: output file encoding
:return: path of new file
"""
log.trace("reading lines from '{}'...".format(path)) # type: ignore
with open(path, 'r', encoding=encoding) as stream:
for line in stream:
yield line
def read_text(path, encoding='utf-8'):
"""Read text from a file.
:param path: file path to read from
:param encoding: input file encoding
:return: string
"""
log.trace("reading text from '{}'...".format(path)) # type: ignore
try:
with open(path, 'r', encoding=encoding) as stream:
return stream.read()
except Exception as ex:
msg = "reading '{}' failed: {}".format(path, ex)
raise DoorstopError(msg)
def load_yaml(text, path, loader=yaml.SafeLoader):
"""Parse a dictionary from YAML text.
:param text: string containing dumped YAML data
:param path: file path for error messages
:return: dictionary
"""
# Load the YAML data
try:
data = yaml.load(text, Loader=loader) or {}
except yaml.error.YAMLError as exc:
msg = "invalid contents: {}:\n{}".format(path, exc)
raise DoorstopError(msg) from None
# Ensure data is a dictionary
if not isinstance(data, dict):
msg = "invalid contents: {}".format(path)
raise DoorstopError(msg)
# Return the parsed data
return data
def load_markdown(text, path, loader=yaml.SafeLoader):
"""Parse a dictionary from Markdown text.
:param text: string containing dumped markdown data
:param path: file path for error messages
:return: markdown data
"""
# Load the MARKDOWN data
data = yamldown.load(text)
return data
def write_lines(lines, path, end='\n', encoding='utf-8'):
"""Write lines of text to a file.
:param lines: iterator of strings
:param path: file to write lines
:param end: string to end lines
:param encoding: output file encoding
:return: path of new file
"""
log.trace("writing lines to '{}'...".format(path)) # type: ignore
with open(path, 'wb') as stream:
for line in lines:
data = (line + end).encode(encoding)
stream.write(data)
return path
def write_text(text, path, encoding='utf-8'):
"""Write text to a file.
:param text: string
:param path: file to write text
:param encoding: output file encoding
:return: path of new file
"""
if text:
log.trace("writing text to '{}'...".format(path)) # type: ignore
with open(path, 'wb') as stream:
data = text.encode(encoding)
stream.write(data)
return path
def touch(path):
"""Ensure a file exists."""
if not os.path.exists(path):
log.trace("creating empty '{}'...".format(path)) # type: ignore
write_text('', path)
def copy_dir_contents(src, dst):
"""Copy the contents of a directory."""
for fpath in glob.glob('{}/*'.format(src)):
dest_path = os.path.join(dst, os.path.split(fpath)[-1])
if os.path.exists(dest_path):
if os.path.basename(fpath) == "doorstopnew":
msg = "Skipping '{}' as this directory name is required by doorstop".format(
fpath
)
else:
msg = "Skipping '{}' as a file or directory with this name already exists".format(
fpath
)
log.warning(msg)
else:
if os.path.isdir(fpath):
shutil.copytree(fpath, dest_path)
else:
shutil.copyfile(fpath, dest_path)
def delete(path):
"""Delete a file or directory with error handling."""
if os.path.isdir(path):
try:
log.trace("deleting '{}'...".format(path)) # type: ignore
shutil.rmtree(path)
except IOError:
# bug: http://code.activestate.com/lists/python-list/159050
msg = "unable to delete: {}".format(path)
log.warning(msg)
elif os.path.isfile(path):
log.trace("deleting '{}'...".format(path)) # type: ignore
os.remove(path)
def delete_contents(dirname):
"""Delete the contents of a directory."""
for file in glob.glob('{}/*'.format(dirname)):
if os.path.isdir(file):
shutil.rmtree(os.path.join(dirname, file))
else:
try:
os.remove(os.path.join(dirname, file))
except FileExistsError:
log.warning(
"Two assets folders have files or directories " "with the same name"
)
raise
|
PypiClean
|
/adapya-base-1.0.6.tar.gz/adapya-base-1.0.6/adapya/base/stck.py
|
__date__='$Date: 2017-11-03 17:52:37 +0100 (Fri, 03 Nov 2017) $'
__revision__='$Rev: 779 $'
from datetime import datetime, timedelta, tzinfo
import time
sec1970=2208988800 # 2208988800L (w/o leap secs)
secyear=int(365*24*3600)
# needs review after June 28, 2017
leapnow=27 # last updated: Oct 2017
leapseconds = ( # last updated: Oct 2017
(datetime(1972, 7, 1), 1),
(datetime(1973, 1, 1), 2),
(datetime(1974, 1, 1), 3),
(datetime(1975, 1, 1), 4),
(datetime(1976, 1, 1), 5),
(datetime(1977, 1, 1), 6),
(datetime(1978, 1, 1), 7),
(datetime(1979, 1, 1), 8),
(datetime(1980, 1, 1), 9),
(datetime(1981, 7, 1), 10),
(datetime(1982, 7, 1), 11),
(datetime(1983, 7, 1), 12),
(datetime(1985, 7, 1), 13),
(datetime(1988, 1, 1), 14),
(datetime(1990, 1, 1), 15),
(datetime(1991, 1, 1), 16),
(datetime(1992, 7, 1), 17),
(datetime(1993, 7, 1), 18),
(datetime(1994, 7, 1), 19),
(datetime(1996, 1, 1), 20),
(datetime(1997, 7, 1), 21),
(datetime(1999, 1, 1), 22),
(datetime(2006, 1, 1), 23),
(datetime(2009, 1, 1), 24),
(datetime(2012, 7, 1), 25),
(datetime(2015, 7, 1), 26),
(datetime(2017, 1, 1), 27),
)
def leap4dt(dt):
"""Determine the leap seconds for a given datetime.
:param dt: datetime value
:returns: leap seconds
>>> leap4dt(datetime(1972, 6,30, 23,59,59))
0
>>> leap4dt(datetime(2017, 1,1))
27
"""
leapsec=0
for leapdate, leaps in leapseconds:
if dt >= leapdate:
leapsec = leaps
else:
break
return leapsec
def csec(stcksec):
"""returns seconds converted from stck seconds
>>> csec(0xd69)
3599.761408
"""
return stcksec * 1.048576
def cstck(stck):
''' returns seconds_since_1970'''
e=stck * 1.048576
return e-sec1970
def cstckd(stckd):
"""converts long STCK time into local time and microseconds"""
a = stckd>>12
b=a//1000000 # seconds
c=int(a%1000000) # micro sec
d=b-sec1970+0.0 # seconds since the epoch 1970
return (d, c)
def sstck(stck,gmt=0):
''' returns ISO date time string from local stck
if gmt !=0: GMT STCK is assumed
'''
if stck==0:
return ''
e=stck * 1.048576
if e >= sec1970:
if gmt==0:
return time.strftime('%Y-%m-%d %H:%M:%S',
time.localtime(e-sec1970))
else:
return time.strftime('%Y-%m-%d %H:%M:%S',
time.gmtime(e-sec1970))
elif e <= secyear:
return str(timedelta(seconds=int(e)))
else:
return 'stck=%s' % hex(stck)
def sstckgmt(stck):
''' returns ISO date time string assuming gmt stck value'''
return sstck(stck,gmt=1)
def sstckd(stckd,gmt=0):
"""converts long STCK time into string
of local time and microseconds
if gmt !=0: GMT STCK is assumed
"""
if stckd == 0:
return ''
a = stckd>>12
b=a//1000000 # seconds
c=int(a%1000000) # micro sec
ns=int((stckd&0xfff)*1000//4096) # nsec
d=b-sec1970+0.0 # seconds since the epoch 1970
if d >= 0:
if gmt==0:
return time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(d))+'.%6.6d.%3.3d'%(c,ns)
else:
return time.strftime('%Y-%m-%d %H:%M:%S',time.gmtime(d))+'.%6.6d.%3.3d'%(c,ns)
elif b <= secyear:
if b < 1000:
return '%d.%6.6d.%3.3d' % (b,c,ns)
return str(timedelta(microseconds=a))+'.%3.3d'%ns
else:
return 'stckd=%s' % hex(stckd)
# some helpers for stimet() function
class Utc(tzinfo): # helper class for simple UTC timezone display
def utcoffset(self, dt):
return timedelta(0)
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return timedelta(0)
UTC = Utc()
dt1970 = datetime.fromtimestamp(0,UTC)
fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
def stimet(timet):
"""Convert time_t value into datetime string, allows negative values.
Supports datetimes between 1900-01-01 and 9999-12-31
>>> stimet(-(70*365+17)*24*3600)
'1900-01-01 00:00:00 UTC (+0000)'
>>> stimet((8035*365+121)*24*3600+23*3600+3599)
'9999-12-31 23:59:59 UTC (+0000)'
"""
dt=dt1970+timedelta(seconds=timet)
return dt.strftime(fmt)
def stckdnow(leapsec=False):
return utc2stckd(leapsec=leapsec)
def utc2stckd(dt=datetime.utcnow(),leapsec=False):
""" convert a datetime to STCK format
:param dt: datetime value to convert to STCK (detault now)
:param leapsec: if True add in leap seconds relevant for
the datetime dt
"""
from .dtconv import utc2micro,UTC1900
leap = 10**6 * leap4dt(dt) if leapsec else 0
microsecs = utc2micro(dt.year,dt.month,dt.day,dt.hour,dt.minute,
dt.second,dt.microsecond) - utc2micro(*UTC1900) \
+ leap
return microsecs*2**12
# Copyright 2004-2019 Software AG
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
PypiClean
|
/tensorflow-gpu-macosx-1.8.1.tar.gz/tensorflow/docs_src/programmers_guide/embedding.md
|
# Embeddings
This document introduces the concept of embeddings, gives a simple example of
how to train an embedding in TensorFlow, and explains how to view embeddings
with the TensorBoard Embedding Projector
([live example](http://projector.tensorflow.org)). The first two parts target
newcomers to machine learning or TensorFlow, and the Embedding Projector how-to
is for users at all levels.
An alternative tutorial on these concepts is available in the
[Embeddings section of Machine Learning Crash Course](https://developers.google.com/machine-learning/crash-course/embeddings/video-lecture).
[TOC]
An **embedding** is a mapping from discrete objects, such as words, to vectors
of real numbers. For example, a 300-dimensional embedding for English words
could include:
```
blue: (0.01359, 0.00075997, 0.24608, ..., -0.2524, 1.0048, 0.06259)
blues: (0.01396, 0.11887, -0.48963, ..., 0.033483, -0.10007, 0.1158)
orange: (-0.24776, -0.12359, 0.20986, ..., 0.079717, 0.23865, -0.014213)
oranges: (-0.35609, 0.21854, 0.080944, ..., -0.35413, 0.38511, -0.070976)
```
The individual dimensions in these vectors typically have no inherent meaning.
Instead, it's the overall patterns of location and distance between vectors
that machine learning takes advantage of.
Embeddings are important for input to machine learning. Classifiers, and neural
networks more generally, work on vectors of real numbers. They train best on
dense vectors, where all values contribute to define an object. However, many
important inputs to machine learning, such as words of text, do not have a
natural vector representation. Embedding functions are the standard and
effective way to transform such discrete input objects into useful
continuous vectors.
Embeddings are also valuable as outputs of machine learning. Because embeddings
map objects to vectors, applications can use similarity in vector space (for
instance, Euclidean distance or the angle between vectors) as a robust and
flexible measure of object similarity. One common use is to find nearest
neighbors. Using the same word embeddings as above, for instance, here are the
three nearest neighbors for each word and the corresponding angles:
```
blue: (red, 47.6°), (yellow, 51.9°), (purple, 52.4°)
blues: (jazz, 53.3°), (folk, 59.1°), (bluegrass, 60.6°)
orange: (yellow, 53.5°), (colored, 58.0°), (bright, 59.9°)
oranges: (apples, 45.3°), (lemons, 48.3°), (mangoes, 50.4°)
```
This would tell an application that apples and oranges are in some way more
similar (45.3° apart) than lemons and oranges (48.3° apart).
## Embeddings in TensorFlow
To create word embeddings in TensorFlow, we first split the text into words
and then assign an integer to every word in the vocabulary. Let us assume that
this has already been done, and that `word_ids` is a vector of these integers.
For example, the sentence “I have a cat.” could be split into
`[“I”, “have”, “a”, “cat”, “.”]` and then the corresponding `word_ids` tensor
would have shape `[5]` and consist of 5 integers. To map these word ids
to vectors, we need to create the embedding variable and use the
`tf.nn.embedding_lookup` function as follows:
```
word_embeddings = tf.get_variable(“word_embeddings”,
[vocabulary_size, embedding_size])
embedded_word_ids = tf.nn.embedding_lookup(word_embeddings, word_ids)
```
After this, the tensor `embedded_word_ids` will have shape `[5, embedding_size]`
in our example and contain the embeddings (dense vectors) for each of the 5
words. At the end of training, `word_embeddings` will contain the embeddings
for all words in the vocabulary.
Embeddings can be trained in many network types, and with various loss
functions and data sets. For example, one could use a recurrent neural network
to predict the next word from the previous one given a large corpus of
sentences, or one could train two networks to do multi-lingual translation.
These methods are described in the @{$word2vec$Vector Representations of Words}
tutorial.
## Visualizing Embeddings
TensorBoard includes the **Embedding Projector**, a tool that lets you
interactively visualize embeddings. This tool can read embeddings from your
model and render them in two or three dimensions.
The Embedding Projector has three panels:
- *Data panel* on the top left, where you can choose the run, the embedding
variable and data columns to color and label points by.
- *Projections panel* on the bottom left, where you can choose the type of
projection.
- *Inspector panel* on the right side, where you can search for particular
points and see a list of nearest neighbors.
### Projections
The Embedding Projector provides three ways to reduce the dimensionality of a
data set.
- *[t-SNE](https://en.wikipedia.org/wiki/T-distributed_stochastic_neighbor_embedding)*:
a nonlinear nondeterministic algorithm (T-distributed stochastic neighbor
embedding) that tries to preserve local neighborhoods in the data, often at
the expense of distorting global structure. You can choose whether to compute
two- or three-dimensional projections.
- *[PCA](https://en.wikipedia.org/wiki/Principal_component_analysis)*:
a linear deterministic algorithm (principal component analysis) that tries to
capture as much of the data variability in as few dimensions as possible. PCA
tends to highlight large-scale structure in the data, but can distort local
neighborhoods. The Embedding Projector computes the top 10 principal
components, from which you can choose two or three to view.
- *Custom*: a linear projection onto horizontal and vertical axes that you
specify using labels in the data. You define the horizontal axis, for
instance, by giving text patterns for "Left" and "Right". The Embedding
Projector finds all points whose label matches the "Left" pattern and
computes the centroid of that set; similarly for "Right". The line passing
through these two centroids defines the horizontal axis. The vertical axis is
likewise computed from the centroids for points matching the "Up" and "Down"
text patterns.
Further useful articles are
[How to Use t-SNE Effectively](https://distill.pub/2016/misread-tsne/) and
[Principal Component Analysis Explained Visually](http://setosa.io/ev/principal-component-analysis/).
### Exploration
You can explore visually by zooming, rotating, and panning using natural
click-and-drag gestures. Hovering your mouse over a point will show any
[metadata](#metadata) for that point. You can also inspect nearest-neighbor
subsets. Clicking on a point causes the right pane to list the nearest
neighbors, along with distances to the current point. The nearest-neighbor
points are also highlighted in the projection.
It is sometimes useful to restrict the view to a subset of points and perform
projections only on those points. To do so, you can select points in multiple
ways:
- After clicking on a point, its nearest neighbors are also selected.
- After a search, the points matching the query are selected.
- Enabling selection, clicking on a point and dragging defines a selection
sphere.
Then click the "Isolate *nnn* points" button at the top of the Inspector pane
on the right hand side. The following image shows 101 points selected and ready
for the user to click "Isolate 101 points":

*Selection of the nearest neighbors of “important” in a word embedding dataset.*
Advanced tip: filtering with custom projection can be powerful. Below, we
filtered the 100 nearest neighbors of “politics” and projected them onto the
“worst” - “best” vector as an x axis. The y axis is random. As a result, one
finds on the right side “ideas”, “science”, “perspective”, “journalism” but on
the left “crisis”, “violence” and “conflict”.
<table width="100%;">
<tr>
<td style="width: 30%;">
<img src="https://www.tensorflow.org/images/embedding-custom-controls.png" alt="Custom controls panel" title="Custom controls panel" />
</td>
<td style="width: 70%;">
<img src="https://www.tensorflow.org/images/embedding-custom-projection.png" alt="Custom projection" title="Custom projection" />
</td>
</tr>
<tr>
<td style="width: 30%;">
Custom projection controls.
</td>
<td style="width: 70%;">
Custom projection of neighbors of "politics" onto "best" - "worst" vector.
</td>
</tr>
</table>
To share your findings, you can use the bookmark panel in the bottom right
corner and save the current state (including computed coordinates of any
projection) as a small file. The Projector can then be pointed to a set of one
or more of these files, producing the panel below. Other users can then walk
through a sequence of bookmarks.
<img src="https://www.tensorflow.org/images/embedding-bookmark.png" alt="Bookmark panel" style="width:300px;">
### Metadata
If you are working with an embedding, you'll probably want to attach
labels/images to the data points. You can do this by generating a metadata file
containing the labels for each point and clicking "Load data" in the data panel
of the Embedding Projector.
The metadata can be either labels or images, which are
stored in a separate file. For labels, the format should
be a [TSV file](https://en.wikipedia.org/wiki/Tab-separated_values)
(tab characters shown in red) whose first line contains column headers
(shown in bold) and subsequent lines contain the metadata values. For example:
<code>
<b>Word<span style="color:#800;">\t</span>Frequency</b><br/>
Airplane<span style="color:#800;">\t</span>345<br/>
Car<span style="color:#800;">\t</span>241<br/>
...
</code>
The order of lines in the metadata file is assumed to match the order of
vectors in the embedding variable, except for the header. Consequently, the
(i+1)-th line in the metadata file corresponds to the i-th row of the embedding
variable. If the TSV metadata file has only a single column, then we don’t
expect a header row, and assume each row is the label of the embedding. We
include this exception because it matches the commonly-used "vocab file"
format.
To use images as metadata, you must produce a single
[sprite image](https://www.google.com/webhp#q=what+is+a+sprite+image),
consisting of small thumbnails, one for each vector in the embedding. The
sprite should store thumbnails in row-first order: the first data point placed
in the top left and the last data point in the bottom right, though the last
row doesn't have to be filled, as shown below.
<table style="border: none;">
<tr style="background-color: transparent;">
<td style="border: 1px solid black">0</td>
<td style="border: 1px solid black">1</td>
<td style="border: 1px solid black">2</td>
</tr>
<tr style="background-color: transparent;">
<td style="border: 1px solid black">3</td>
<td style="border: 1px solid black">4</td>
<td style="border: 1px solid black">5</td>
</tr>
<tr style="background-color: transparent;">
<td style="border: 1px solid black">6</td>
<td style="border: 1px solid black">7</td>
<td style="border: 1px solid black"></td>
</tr>
</table>
Follow [this link]("https://www.tensorflow.org/images/embedding-mnist.mp4" )
to see a fun example of thumbnail images in the Embedding Projector.
## Mini-FAQ
**Is "embedding" an action or a thing?**
Both. People talk about embedding words in a vector space (action) and about
producing word embeddings (things). Common to both is the notion of embedding
as a mapping from discrete objects to vectors. Creating or applying that
mapping is an action, but the mapping itself is a thing.
**Are embeddings high-dimensional or low-dimensional?**
It depends. A 300-dimensional vector space of words and phrases, for instance,
is often called low-dimensional (and dense) when compared to the millions of
words and phrases it can contain. But mathematically it is high-dimensional,
displaying many properties that are dramatically different from what our human
intuition has learned about 2- and 3-dimensional spaces.
**Is an embedding the same as an embedding layer?**
No. An *embedding layer* is a part of neural network, but an *embedding* is a more
general concept.
|
PypiClean
|
/llm_toys-0.1.1-py3-none-any.whl/llm_toys/hf/transformers/models/vilt/modeling_vilt.py
|
""" PyTorch ViLT model."""
import collections.abc
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import CrossEntropyLoss
from ...activations import ACT2FN
from ...modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPooling,
MaskedLMOutput,
ModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from ...modeling_utils import PreTrainedModel
from ...pytorch_utils import (
find_pruneable_heads_and_indices,
meshgrid,
prune_linear_layer,
)
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
from .configuration_vilt import ViltConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "ViltConfig"
_CHECKPOINT_FOR_DOC = "dandelin/vilt-b32-mlm"
VILT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"dandelin/vilt-b32-mlm",
# See all ViLT models at https://huggingface.co/models?filter=vilt
]
@dataclass
class ViltForImagesAndTextClassificationOutput(ModelOutput):
"""
Class for outputs of [`ViltForImagesAndTextClassification`].
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
hidden_states (`List[tuple(torch.FloatTensor)]`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
List of tuples of `torch.FloatTensor` (one for each image-text pair, each tuple containing the output of
the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (`List[tuple(torch.FloatTensor)]`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
List of tuples of `torch.FloatTensor` (one for each image-text pair, each tuple containing the attention
weights of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the
attention softmax, used to compute the weighted average in the self-attention heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
hidden_states: Optional[List[Tuple[torch.FloatTensor]]] = None
attentions: Optional[List[Tuple[torch.FloatTensor]]] = None
class ViltEmbeddings(nn.Module):
"""
Construct the text and patch embeddings.
Text embeddings are equivalent to BERT embeddings.
Patch embeddings are equivalent to ViT embeddings.
"""
def __init__(self, config):
super().__init__()
# text embeddings
self.text_embeddings = TextEmbeddings(config)
# patch embeddings
self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
self.patch_embeddings = ViltPatchEmbeddings(config)
num_patches = self.patch_embeddings.num_patches
self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.hidden_size))
# modality type (text/patch) embeddings
self.token_type_embeddings = nn.Embedding(config.modality_type_vocab_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.config = config
def visual_embed(self, pixel_values, pixel_mask, max_image_length=200):
_, _, ph, pw = self.patch_embeddings.projection.weight.shape
x = self.patch_embeddings(pixel_values)
x_mask = pixel_mask[:, None, :, :].float()
x_mask = nn.functional.interpolate(x_mask, size=(x.shape[2], x.shape[3])).long()
x_h = x_mask[:, 0].sum(dim=1)[:, 0]
x_w = x_mask[:, 0].sum(dim=2)[:, 0]
batch_size, num_channels, height, width = x.shape
patch_dim = self.config.image_size // self.config.patch_size
spatial_pos = self.position_embeddings[:, 1:, :].transpose(1, 2).view(1, num_channels, patch_dim, patch_dim)
pos_embed = torch.cat(
[
nn.functional.pad(
nn.functional.interpolate(
spatial_pos,
size=(h, w),
mode="bilinear",
align_corners=True,
),
(0, width - w, 0, height - h),
)
for h, w in zip(x_h, x_w)
],
dim=0,
)
pos_embed = pos_embed.flatten(2).transpose(1, 2)
x = x.flatten(2).transpose(1, 2)
# Set `device` here, otherwise `patch_index` will always be on `CPU` and will fail near the end for torch>=1.13
patch_index = torch.stack(
meshgrid(torch.arange(x_mask.shape[-2]), torch.arange(x_mask.shape[-1]), indexing="ij"), dim=-1
).to(device=x_mask.device)
patch_index = patch_index[None, None, :, :, :]
patch_index = patch_index.expand(x_mask.shape[0], x_mask.shape[1], -1, -1, -1)
patch_index = patch_index.flatten(1, 3)
x_mask = x_mask.flatten(1)
if max_image_length < 0 or max_image_length is None or not isinstance(max_image_length, int):
# suppose aug is 800 x 1333, then, maximum effective res is 800 x 1333 (if one side gets bigger, the other will be constrained and be shrinked)
# (800 // self.patch_size) * (1333 // self.patch_size) is the maximum number of patches that single image can get.
# if self.patch_size = 32, 25 * 41 = 1025
# if res is 384 x 640, 12 * 20 = 240
effective_resolution = x_h * x_w
max_image_length = effective_resolution.max()
else:
effective_resolution = x_h * x_w
max_image_length = min(effective_resolution.max(), max_image_length)
valid_idx = x_mask.nonzero(as_tuple=False)
non_valid_idx = (1 - x_mask).nonzero(as_tuple=False)
unique_rows = valid_idx[:, 0].unique()
valid_row_idx = [valid_idx[valid_idx[:, 0] == u] for u in unique_rows]
non_valid_row_idx = [non_valid_idx[non_valid_idx[:, 0] == u] for u in unique_rows]
valid_nums = [v.size(0) for v in valid_row_idx]
non_valid_nums = [v.size(0) for v in non_valid_row_idx]
pad_nums = [max_image_length - v for v in valid_nums]
select = []
for i, (v, nv, p) in enumerate(zip(valid_nums, non_valid_nums, pad_nums)):
if p <= 0:
valid_choice = torch.multinomial(torch.ones(v).float(), max_image_length)
select.append(valid_row_idx[i][valid_choice])
else:
pad_choice = torch.multinomial(torch.ones(nv).float(), p, replacement=True)
select.append(torch.cat([valid_row_idx[i], non_valid_row_idx[i][pad_choice]], dim=0))
select = torch.cat(select, dim=0)
x = x[select[:, 0], select[:, 1]].view(batch_size, -1, num_channels)
x_mask = x_mask[select[:, 0], select[:, 1]].view(batch_size, -1)
# `patch_index` should be on the same device as `select` (for torch>=1.13), which is ensured at definition time.
patch_index = patch_index[select[:, 0], select[:, 1]].view(batch_size, -1, 2)
pos_embed = pos_embed[select[:, 0], select[:, 1]].view(batch_size, -1, num_channels)
cls_tokens = self.cls_token.expand(batch_size, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
pos_embed = torch.cat(
(self.position_embeddings[:, 0, :][:, None, :].expand(batch_size, -1, -1), pos_embed), dim=1
)
x = x + pos_embed
x = self.dropout(x)
x_mask = torch.cat([torch.ones(x_mask.shape[0], 1).to(x_mask), x_mask], dim=1)
return x, x_mask, (patch_index, (height, width))
def forward(
self,
input_ids,
attention_mask,
token_type_ids,
pixel_values,
pixel_mask,
inputs_embeds,
image_embeds,
image_token_type_idx=1,
):
# PART 1: text embeddings
text_embeds = self.text_embeddings(
input_ids=input_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
)
# PART 2: patch embeddings (with interpolated position encodings)
if image_embeds is None:
image_embeds, image_masks, patch_index = self.visual_embed(
pixel_values, pixel_mask, max_image_length=self.config.max_image_length
)
else:
image_masks = pixel_mask.flatten(1)
# PART 3: add modality type embeddings
# 0 indicates text, 1 indicates image, 2 is optionally used when a second image is provided (NLVR2)
if image_token_type_idx is None:
image_token_type_idx = 1
text_embeds = text_embeds + self.token_type_embeddings(
torch.zeros_like(attention_mask, dtype=torch.long, device=text_embeds.device)
)
image_embeds = image_embeds + self.token_type_embeddings(
torch.full_like(image_masks, image_token_type_idx, dtype=torch.long, device=text_embeds.device)
)
# PART 4: concatenate
embeddings = torch.cat([text_embeds, image_embeds], dim=1)
masks = torch.cat([attention_mask, image_masks], dim=1)
return embeddings, masks
class TextEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
self.register_buffer(
"position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
)
self.register_buffer(
"token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False
)
def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, :seq_length]
# Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
# when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
# issue #5664
if token_type_ids is None:
if hasattr(self, "token_type_ids"):
buffered_token_type_ids = self.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
if self.position_embedding_type == "absolute":
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class ViltPatchEmbeddings(nn.Module):
"""
Image to Patch Embedding.
"""
def __init__(self, config):
super().__init__()
image_size, patch_size = config.image_size, config.patch_size
num_channels, hidden_size = config.num_channels, config.hidden_size
image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.num_patches = num_patches
self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size)
def forward(self, pixel_values):
batch_size, num_channels, height, width = pixel_values.shape
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration."
)
x = self.projection(pixel_values)
return x
class ViltSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size {config.hidden_size,} is not a multiple of the number of attention "
f"heads {config.num_attention_heads}."
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False):
mixed_query_layer = self.query(hidden_states)
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
query_layer = self.transpose_for_scores(mixed_query_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
# Copied from transformers.models.vit.modeling_vit.ViTSelfOutput with ViT->Vilt
class ViltSelfOutput(nn.Module):
"""
The residual connection is defined in ViltLayer instead of here (as is the case with other models), due to the
layernorm applied before each block.
"""
def __init__(self, config: ViltConfig) -> None:
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
class ViltAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.attention = ViltSelfAttention(config)
self.output = ViltSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.attention.query = prune_linear_layer(self.attention.query, index)
self.attention.key = prune_linear_layer(self.attention.key, index)
self.attention.value = prune_linear_layer(self.attention.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False):
self_outputs = self.attention(hidden_states, attention_mask, head_mask, output_attentions)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
# Copied from transformers.models.vit.modeling_vit.ViTIntermediate with ViT->Vilt
class ViltIntermediate(nn.Module):
def __init__(self, config: ViltConfig) -> None:
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
# Copied from transformers.models.vit.modeling_vit.ViTOutput with ViT->Vilt
class ViltOutput(nn.Module):
def __init__(self, config: ViltConfig) -> None:
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = hidden_states + input_tensor
return hidden_states
class ViltLayer(nn.Module):
"""This corresponds to the Block class in the timm implementation."""
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = ViltAttention(config)
self.intermediate = ViltIntermediate(config)
self.output = ViltOutput(config)
self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False):
self_attention_outputs = self.attention(
self.layernorm_before(hidden_states), # in ViLT, layernorm is applied before self-attention
attention_mask,
head_mask,
output_attentions=output_attentions,
)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
# first residual connection
hidden_states = attention_output + hidden_states.to(attention_output.device)
# in ViLT, layernorm is also applied after self-attention
layer_output = self.layernorm_after(hidden_states)
layer_output = self.intermediate(layer_output)
# second residual connection is done here
layer_output = self.output(layer_output, hidden_states)
outputs = (layer_output,) + outputs
return outputs
class ViltEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([ViltLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
if self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
)
else:
layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, output_attentions)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
class ViltPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = ViltConfig
base_model_prefix = "vilt"
supports_gradient_checkpointing = True
_no_split_modules = ["ViltSelfAttention"]
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv2d)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, ViltEncoder):
module.gradient_checkpointing = value
VILT_START_DOCSTRING = r"""
This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ subclass. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ViltConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
VILT_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `({0})`):
Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See
[`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input
IDs?](../glossary#input-ids)
attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ViltImageProcessor.__call__`] for details.
pixel_mask (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):
Mask to avoid performing attention on padding pixel values. Mask values selected in `[0, 1]`:
- 1 for pixels that are real (i.e. **not masked**),
- 0 for pixels that are padding (i.e. **masked**).
`What are attention masks? <../glossary.html#attention-mask>`__
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
image_embeds (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`, *optional*):
Optionally, instead of passing `pixel_values`, you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `pixel_values` into patch embeddings.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
VILT_IMAGES_AND_TEXT_CLASSIFICATION_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `({0})`):
Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See
[`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input
IDs?](../glossary#input-ids)
attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_images, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ViltImageProcessor.__call__`] for details.
pixel_mask (`torch.LongTensor` of shape `(batch_size, num_images, height, width)`, *optional*):
Mask to avoid performing attention on padding pixel values. Mask values selected in `[0, 1]`:
- 1 for pixels that are real (i.e. **not masked**),
- 0 for pixels that are padding (i.e. **masked**).
`What are attention masks? <../glossary.html#attention-mask>`__
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
image_embeds (`torch.FloatTensor` of shape `(batch_size, num_images, num_patches, hidden_size)`, *optional*):
Optionally, instead of passing `pixel_values`, you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `pixel_values` into patch embeddings.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare ViLT Model transformer outputting raw hidden-states without any specific head on top.",
VILT_START_DOCSTRING,
)
class ViltModel(ViltPreTrainedModel):
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.embeddings = ViltEmbeddings(config)
self.encoder = ViltEncoder(config)
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.pooler = ViltPooler(config) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.text_embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.text_embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(VILT_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
pixel_mask: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
image_embeds: Optional[torch.FloatTensor] = None,
image_token_type_idx: Optional[int] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[BaseModelOutputWithPooling, Tuple[torch.FloatTensor]]:
r"""
Returns:
Examples:
```python
>>> from transformers import ViltProcessor, ViltModel
>>> from PIL import Image
>>> import requests
>>> # prepare image and text
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> text = "hello world"
>>> processor = ViltProcessor.from_pretrained("dandelin/vilt-b32-mlm")
>>> model = ViltModel.from_pretrained("dandelin/vilt-b32-mlm")
>>> inputs = processor(image, text, return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
text_batch_size, seq_length = input_shape
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(((text_batch_size, seq_length)), device=device)
if pixel_values is not None and image_embeds is not None:
raise ValueError("You cannot specify both pixel_values and image_embeds at the same time")
elif pixel_values is None and image_embeds is None:
raise ValueError("You have to specify either pixel_values or image_embeds")
image_batch_size = pixel_values.shape[0] if pixel_values is not None else image_embeds.shape[0]
if image_batch_size != text_batch_size:
raise ValueError("The text inputs and image inputs need to have the same batch size")
if pixel_mask is None:
pixel_mask = torch.ones((image_batch_size, self.config.image_size, self.config.image_size), device=device)
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output, attention_mask = self.embeddings(
input_ids,
attention_mask,
token_type_ids,
pixel_values,
pixel_mask,
inputs_embeds,
image_embeds,
image_token_type_idx=image_token_type_idx,
)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
sequence_output = self.layernorm(sequence_output)
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPooling(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
class ViltPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
@add_start_docstrings(
"""
ViLT Model with a language modeling head on top as done during pretraining.
""",
VILT_START_DOCSTRING,
)
class ViltForMaskedLM(ViltPreTrainedModel):
_tied_weights_keys = ["mlm_score.decoder.weight", "mlm_score.decoder.bias"]
def __init__(self, config):
super().__init__(config)
self.vilt = ViltModel(config)
self.mlm_score = ViltMLMHead(config)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.mlm_score.decoder
def set_output_embeddings(self, new_embeddings):
self.mlm_score.decoder = new_embeddings
@add_start_docstrings_to_model_forward(VILT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
pixel_mask: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
image_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[MaskedLMOutput, Tuple[torch.FloatTensor]]:
r"""
labels (*torch.LongTensor* of shape *(batch_size, sequence_length)*, *optional*):
Labels for computing the masked language modeling loss. Indices should be in *[-100, 0, ...,
config.vocab_size]* (see *input_ids* docstring) Tokens with indices set to *-100* are ignored (masked), the
loss is only computed for the tokens with labels in *[0, ..., config.vocab_size]*
Returns:
Examples:
```python
>>> from transformers import ViltProcessor, ViltForMaskedLM
>>> import requests
>>> from PIL import Image
>>> import re
>>> import torch
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> text = "a bunch of [MASK] laying on a [MASK]."
>>> processor = ViltProcessor.from_pretrained("dandelin/vilt-b32-mlm")
>>> model = ViltForMaskedLM.from_pretrained("dandelin/vilt-b32-mlm")
>>> # prepare inputs
>>> encoding = processor(image, text, return_tensors="pt")
>>> # forward pass
>>> outputs = model(**encoding)
>>> tl = len(re.findall("\[MASK\]", text))
>>> inferred_token = [text]
>>> # gradually fill in the MASK tokens, one by one
>>> with torch.no_grad():
... for i in range(tl):
... encoded = processor.tokenizer(inferred_token)
... input_ids = torch.tensor(encoded.input_ids)
... encoded = encoded["input_ids"][0][1:-1]
... outputs = model(input_ids=input_ids, pixel_values=encoding.pixel_values)
... mlm_logits = outputs.logits[0] # shape (seq_len, vocab_size)
... # only take into account text features (minus CLS and SEP token)
... mlm_logits = mlm_logits[1 : input_ids.shape[1] - 1, :]
... mlm_values, mlm_ids = mlm_logits.softmax(dim=-1).max(dim=-1)
... # only take into account text
... mlm_values[torch.tensor(encoded) != 103] = 0
... select = mlm_values.argmax().item()
... encoded[select] = mlm_ids[select].item()
... inferred_token = [processor.decode(encoded)]
>>> selected_token = ""
>>> encoded = processor.tokenizer(inferred_token)
>>> output = processor.decode(encoded.input_ids[0], skip_special_tokens=True)
>>> print(output)
a bunch of cats laying on a couch.
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.vilt(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
pixel_values=pixel_values,
pixel_mask=pixel_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
image_embeds=image_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output, pooled_output = outputs[:2]
# split up final hidden states into text and image features
text_seq_len = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
text_features, _ = (sequence_output[:, :text_seq_len], sequence_output[:, text_seq_len:])
mlm_logits = self.mlm_score(text_features)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss() # -100 index = padding token
# move labels to correct device to enable PP
labels = labels.to(mlm_logits.device)
masked_lm_loss = loss_fct(mlm_logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (mlm_logits,) + outputs[2:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=mlm_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class ViltPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class ViltMLMHead(nn.Module):
def __init__(self, config, weight=None):
super().__init__()
self.config = config
self.transform = ViltPredictionHeadTransform(config)
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
if weight is not None:
self.decoder.weight = weight
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, x):
x = self.transform(x)
x = self.decoder(x)
return x
@add_start_docstrings(
"""
Vilt Model transformer with a classifier head on top (a linear layer on top of the final hidden state of the [CLS]
token) for visual question answering, e.g. for VQAv2.
""",
VILT_START_DOCSTRING,
)
class ViltForQuestionAnswering(ViltPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.vilt = ViltModel(config)
# Classifier head
self.classifier = nn.Sequential(
nn.Linear(config.hidden_size, config.hidden_size * 2),
nn.LayerNorm(config.hidden_size * 2),
nn.GELU(),
nn.Linear(config.hidden_size * 2, config.num_labels),
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(VILT_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
pixel_mask: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
image_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[SequenceClassifierOutput, Tuple[torch.FloatTensor]]:
r"""
labels (`torch.FloatTensor` of shape `(batch_size, num_labels)`, *optional*):
Labels for computing the visual question answering loss. This tensor must be either a one-hot encoding of
all answers that are applicable for a given example in the batch, or a soft encoding indicating which
answers are applicable, where 1.0 is the highest score.
Returns:
Examples:
```python
>>> from transformers import ViltProcessor, ViltForQuestionAnswering
>>> import requests
>>> from PIL import Image
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> text = "How many cats are there?"
>>> processor = ViltProcessor.from_pretrained("dandelin/vilt-b32-finetuned-vqa")
>>> model = ViltForQuestionAnswering.from_pretrained("dandelin/vilt-b32-finetuned-vqa")
>>> # prepare inputs
>>> encoding = processor(image, text, return_tensors="pt")
>>> # forward pass
>>> outputs = model(**encoding)
>>> logits = outputs.logits
>>> idx = logits.argmax(-1).item()
>>> print("Predicted answer:", model.config.id2label[idx])
Predicted answer: 2
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.vilt(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
pixel_values=pixel_values,
pixel_mask=pixel_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
image_embeds=image_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooler_output = outputs.pooler_output if return_dict else outputs[1]
logits = self.classifier(pooler_output)
loss = None
if labels is not None:
# move labels to correct device to enable PP
labels = labels.to(logits.device)
loss = nn.functional.binary_cross_entropy_with_logits(logits, labels) * labels.shape[1]
# see https://github.com/jnhwkim/ban-vqa/blob/master/train.py#L19
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Vilt Model transformer with a classifier head on top (a linear layer on top of the final hidden state of the [CLS]
token) for image-to-text or text-to-image retrieval, e.g. MSCOCO and F30K.
""",
VILT_START_DOCSTRING,
)
class ViltForImageAndTextRetrieval(ViltPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.vilt = ViltModel(config)
# Classifier head
self.rank_output = nn.Linear(config.hidden_size, 1)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(VILT_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
pixel_mask: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
image_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[SequenceClassifierOutput, Tuple[torch.FloatTensor]]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels are currently not supported.
Returns:
Examples:
```python
>>> from transformers import ViltProcessor, ViltForImageAndTextRetrieval
>>> import requests
>>> from PIL import Image
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> texts = ["An image of two cats chilling on a couch", "A football player scoring a goal"]
>>> processor = ViltProcessor.from_pretrained("dandelin/vilt-b32-finetuned-coco")
>>> model = ViltForImageAndTextRetrieval.from_pretrained("dandelin/vilt-b32-finetuned-coco")
>>> # forward pass
>>> scores = dict()
>>> for text in texts:
... # prepare inputs
... encoding = processor(image, text, return_tensors="pt")
... outputs = model(**encoding)
... scores[text] = outputs.logits[0, :].item()
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.vilt(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
pixel_values=pixel_values,
pixel_mask=pixel_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
image_embeds=image_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooler_output = outputs.pooler_output if return_dict else outputs[1]
logits = self.rank_output(pooler_output)
loss = None
if labels is not None:
# move labels to correct device to enable PP
labels = labels.to(logits.device)
raise NotImplementedError("Training is not yet supported.")
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Vilt Model transformer with a classifier head on top for natural language visual reasoning, e.g. NLVR2.
""",
VILT_IMAGES_AND_TEXT_CLASSIFICATION_INPUTS_DOCSTRING,
)
class ViltForImagesAndTextClassification(ViltPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.vilt = ViltModel(config)
# Classifier head
num_images = config.num_images
self.classifier = nn.Sequential(
nn.Linear(config.hidden_size * num_images, config.hidden_size * num_images),
nn.LayerNorm(config.hidden_size * num_images),
nn.GELU(),
nn.Linear(config.hidden_size * num_images, config.num_labels),
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(VILT_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=ViltForImagesAndTextClassificationOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
pixel_mask: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
image_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[ViltForImagesAndTextClassificationOutput, Tuple[torch.FloatTensor]]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Binary classification labels.
Returns:
Examples:
```python
>>> from transformers import ViltProcessor, ViltForImagesAndTextClassification
>>> import requests
>>> from PIL import Image
>>> image1 = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg", stream=True).raw)
>>> image2 = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_1.jpg", stream=True).raw)
>>> text = "The left image contains twice the number of dogs as the right image."
>>> processor = ViltProcessor.from_pretrained("dandelin/vilt-b32-finetuned-nlvr2")
>>> model = ViltForImagesAndTextClassification.from_pretrained("dandelin/vilt-b32-finetuned-nlvr2")
>>> # prepare inputs
>>> encoding = processor([image1, image2], text, return_tensors="pt")
>>> # forward pass
>>> outputs = model(input_ids=encoding.input_ids, pixel_values=encoding.pixel_values.unsqueeze(0))
>>> logits = outputs.logits
>>> idx = logits.argmax(-1).item()
>>> print("Predicted answer:", model.config.id2label[idx])
Predicted answer: True
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is not None and pixel_values.ndim == 4:
# add dummy num_images dimension
pixel_values = pixel_values.unsqueeze(1)
if image_embeds is not None and image_embeds.ndim == 3:
# add dummy num_images dimension
image_embeds = image_embeds.unsqueeze(1)
num_images = pixel_values.shape[1] if pixel_values is not None else None
if num_images is None:
num_images = image_embeds.shape[1] if image_embeds is not None else None
if num_images != self.config.num_images:
raise ValueError(
"Make sure to match the number of images in the model with the number of images in the input."
)
pooler_outputs = []
hidden_states = [] if output_hidden_states else None
attentions = [] if output_attentions else None
for i in range(num_images):
# forward every image through the model
outputs = self.vilt(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
pixel_values=pixel_values[:, i, :, :, :] if pixel_values is not None else None,
pixel_mask=pixel_mask[:, i, :, :] if pixel_mask is not None else None,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
image_embeds=image_embeds[:, i, :, :] if image_embeds is not None else None,
image_token_type_idx=i + 1,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooler_output = outputs.pooler_output if return_dict else outputs[1]
pooler_outputs.append(pooler_output)
if output_hidden_states:
hidden_states.append(outputs.hidden_states)
if output_attentions:
attentions.append(outputs.attentions)
pooled_output = torch.cat(pooler_outputs, dim=-1)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
# move labels to correct device to enable PP
labels = labels.to(logits.device)
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits, hidden_states, attentions)
return ((loss,) + output) if loss is not None else output
return ViltForImagesAndTextClassificationOutput(
loss=loss,
logits=logits,
hidden_states=hidden_states,
attentions=attentions,
)
@add_start_docstrings(
"""
ViLT Model with a token classification head on top (a linear layer on top of the final hidden-states of the text
tokens) e.g. for Named-Entity-Recognition (NER) tasks.
""",
VILT_START_DOCSTRING,
)
class ViltForTokenClassification(ViltPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.vilt = ViltModel(config, add_pooling_layer=False)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(VILT_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
pixel_mask: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
image_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[TokenClassifierOutput, Tuple[torch.FloatTensor]]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, text_sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
Returns:
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.vilt(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
pixel_values=pixel_values,
pixel_mask=pixel_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
image_embeds=image_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
text_input_size = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output[:, :text_input_size])
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
# move labels to correct device to enable PP
labels = labels.to(logits.device)
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
|
PypiClean
|
/fds.sdk.FundsAPIforDigitalPortals-0.10.0-py3-none-any.whl/fds/sdk/FundsAPIforDigitalPortals/model/inline_response2006_data_legal_entities_fund_domicile.py
|
import re # noqa: F401
import sys # noqa: F401
from fds.sdk.FundsAPIforDigitalPortals.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from fds.sdk.FundsAPIforDigitalPortals.exceptions import ApiAttributeError
class InlineResponse2006DataLegalEntitiesFundDomicile(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'id': (float,), # noqa: E501
'name': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'id': 'id', # noqa: E501
'name': 'name', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""InlineResponse2006DataLegalEntitiesFundDomicile - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
id (float): Identifier of the country.. [optional] # noqa: E501
name (str): Name of the country.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""InlineResponse2006DataLegalEntitiesFundDomicile - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
id (float): Identifier of the country.. [optional] # noqa: E501
name (str): Name of the country.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
|
PypiClean
|
/django_handyhelpers-0.3.9-py3-none-any.whl/handyhelpers/static/node_modules/bootstrap-table/src/extensions/print/bootstrap-table-print.js
|
const Utils = $.fn.bootstrapTable.utils
function printPageBuilderDefault (table) {
return `
<html>
<head>
<style type="text/css" media="print">
@page {
size: auto;
margin: 25px 0 25px 0;
}
</style>
<style type="text/css" media="all">
table {
border-collapse: collapse;
font-size: 12px;
}
table, th, td {
border: 1px solid grey;
}
th, td {
text-align: center;
vertical-align: middle;
}
p {
font-weight: bold;
margin-left:20px;
}
table {
width:94%;
margin-left:3%;
margin-right:3%;
}
div.bs-table-print {
text-align:center;
}
</style>
</head>
<title>Print Table</title>
<body>
<p>Printed on: ${new Date} </p>
<div class="bs-table-print">${table}</div>
</body>
</html>`
}
Object.assign($.fn.bootstrapTable.locales, {
formatPrint () {
return 'Print'
}
})
Object.assign($.fn.bootstrapTable.defaults, $.fn.bootstrapTable.locales)
Object.assign($.fn.bootstrapTable.defaults, {
showPrint: false,
printAsFilteredAndSortedOnUI: true,
printSortColumn: undefined,
printSortOrder: 'asc',
printPageBuilder (table) {
return printPageBuilderDefault(table)
}
})
Object.assign($.fn.bootstrapTable.columnDefaults, {
printFilter: undefined,
printIgnore: false,
printFormatter: undefined
})
Object.assign($.fn.bootstrapTable.defaults.icons, {
print: {
bootstrap3: 'glyphicon-print icon-share',
bootstrap5: 'bi-printer',
'bootstrap-table': 'icon-printer'
}[$.fn.bootstrapTable.theme] || 'fa-print'
})
$.BootstrapTable = class extends $.BootstrapTable {
init (...args) {
super.init(...args)
if (!this.options.showPrint) {
return
}
this.mergedCells = []
}
initToolbar (...args) {
this.showToolbar = this.showToolbar || this.options.showPrint
if (this.options.showPrint) {
this.buttons = Object.assign(this.buttons, {
print: {
text: this.options.formatPrint(),
icon: this.options.icons.print,
event: () => {
this.doPrint(this.options.printAsFilteredAndSortedOnUI ? this.getData() : this.options.data.slice(0))
},
attributes: {
'aria-label': this.options.formatPrint(),
title: this.options.formatPrint()
}
}
})
}
super.initToolbar(...args)
}
mergeCells (options) {
super.mergeCells(options)
if (!this.options.showPrint) {
return
}
let col = this.getVisibleFields().indexOf(options.field)
if (Utils.hasDetailViewIcon(this.options)) {
col += 1
}
this.mergedCells.push({
row: options.index,
col,
rowspan: options.rowspan || 1,
colspan: options.colspan || 1
})
}
doPrint (data) {
const _this2 = this
const formatValue = (row, i, column) => {
const value_ = Utils.getItemField(row, column.field, _this2.options.escape, column.escape)
const value = Utils.calculateObjectValue(column,
column.printFormatter || column.formatter,
[value_, row, i], value_)
return typeof value === 'undefined' || value === null ?
this.options.undefinedText : value
}
const buildTable = (data, columnsArray) => {
const dir = this.$el.attr('dir') || 'ltr'
const html = [`<table dir="${dir}"><thead>`]
for (const columns of columnsArray) {
html.push('<tr>')
for (let h = 0; h < columns.length; h++) {
if (!columns[h].printIgnore && columns[h].visible) {
html.push(
`<th
${Utils.sprintf(' rowspan="%s"', columns[h].rowspan)}
${Utils.sprintf(' colspan="%s"', columns[h].colspan)}
>${columns[h].title}</th>`)
}
}
html.push('</tr>')
}
html.push('</thead><tbody>')
const dontRender = []
if (this.mergedCells) {
for (let mc = 0; mc < this.mergedCells.length; mc++) {
const currentMergedCell = this.mergedCells[mc]
for (let rs = 0; rs < currentMergedCell.rowspan; rs++) {
const row = currentMergedCell.row + rs
for (let cs = 0; cs < currentMergedCell.colspan; cs++) {
const col = currentMergedCell.col + cs
dontRender.push(`${row},${col}`)
}
}
}
}
for (let i = 0; i < data.length; i++) {
html.push('<tr>')
const columns = columnsArray.flat(1)
columns.sort((c1, c2) => {
return c1.colspanIndex - c2.colspanIndex
})
for (let j = 0; j < columns.length; j++) {
if (columns[j].colspanGroup > 0) continue
let rowspan = 0
let colspan = 0
if (this.mergedCells) {
for (let mc = 0; mc < this.mergedCells.length; mc++) {
const currentMergedCell = this.mergedCells[mc]
if (currentMergedCell.col === j && currentMergedCell.row === i) {
rowspan = currentMergedCell.rowspan
colspan = currentMergedCell.colspan
}
}
}
if (
!columns[j].printIgnore && columns[j].visible && columns[j].field &&
(
!dontRender.includes(`${i},${j}`) ||
rowspan > 0 && colspan > 0
)
) {
if (rowspan > 0 && colspan > 0) {
html.push(`<td ${Utils.sprintf(' rowspan="%s"', rowspan)} ${Utils.sprintf(' colspan="%s"', colspan)}>`, formatValue(data[i], i, columns[j]), '</td>')
} else {
html.push('<td>', formatValue(data[i], i, columns[j]), '</td>')
}
}
}
html.push('</tr>')
}
html.push('</tbody>')
if (this.options.showFooter) {
html.push('<footer><tr>')
for (const columns of columnsArray) {
for (let h = 0; h < columns.length; h++) {
if (!columns[h].printIgnore && columns[h].visible) {
const footerData = Utils.trToData(columns, this.$el.find('>tfoot>tr'))
const footerValue = Utils.calculateObjectValue(columns[h], columns[h].footerFormatter, [data], footerData[0] && footerData[0][columns[h].field] || '')
html.push(`<th>${footerValue}</th>`)
}
}
}
html.push('</tr></footer>')
}
html.push('</table>')
return html.join('')
}
const sortRows = (data, colName, sortOrder) => {
if (!colName) {
return data
}
let reverse = sortOrder !== 'asc'
reverse = -(+reverse || -1)
return data.sort((a, b) => reverse * a[colName].localeCompare(b[colName]))
}
const filterRow = (row, filters) => {
for (let index = 0; index < filters.length; ++index) {
if (row[filters[index].colName] !== filters[index].value) {
return false
}
}
return true
}
const filterRows = (data, filters) => data.filter(row => filterRow(row, filters))
const getColumnFilters = columns => !columns || !columns[0] ? [] : columns[0].filter(col => col.printFilter).map(col => ({
colName: col.field,
value: col.printFilter
}))
data = filterRows(data, getColumnFilters(this.options.columns))
data = sortRows(data, this.options.printSortColumn, this.options.printSortOrder)
const table = buildTable(data, this.options.columns)
const newWin = window.open('')
const calculatedPrintPage = Utils.calculateObjectValue(this, this.options.printPageBuilder, [table], printPageBuilderDefault(table))
newWin.document.write(calculatedPrintPage)
newWin.document.close()
newWin.focus()
newWin.print()
newWin.close()
}
}
|
PypiClean
|
/lhub_integ-1.0.12.tar.gz/lhub_integ-1.0.12/lhub_integ/params.py
|
import inspect
import json
import re
from abc import ABCMeta
from collections import defaultdict
from typing import NamedTuple, Dict, Callable, Any
from lhub_integ import util
from lhub_integ.decorators import action as action_decorator
from lhub_integ.env import __EnvVar
from jinja2 import Template
from enum import Enum
ENV_VAR_NAME_REGEX = r"^[a-zA-Z_]\w*$"
TRUTHY = {"y", "true", "yes", "1"}
FALSY = {"n", "false", "no", "0"}
# pulled from forms.model.DataType
class DataType(Enum):
STRING = "string"
COLUMN = "column"
NUMBER = "number"
# no node datatype because integrations can only pull from one node
# Unsupported by UI but validated by custom integrations
BOOL = "bool"
INT = "int"
JSON = "json"
JINJA = "jinja"
def coerce(self, inp):
if self == self.NUMBER:
return float(inp)
elif self == self.INT:
return int(float(inp))
elif self == self.BOOL:
if isinstance(inp, bool):
return inp
if inp.lower() in TRUTHY:
return True
if inp.lower() in FALSY:
return False
raise ValueError(
"Expected boolean input but {input} could not be coerced into a boolean value"
)
elif self == self.JSON:
if isinstance(inp, dict):
return inp
return json.loads(inp)
else:
return inp
# pulled from forms.model.InputType
class InputType(Enum):
TEXT = "text"
TEXT_AREA = "textarea"
EMAIL = "email"
PASSWORD = "password"
SELECT = "select"
COLUMN_SELECT = "columnSelect"
FILE = "file"
ENCRYPTED_FILE = "encryptedFile"
class __Param(__EnvVar, metaclass=ABCMeta):
def __init__(
self,
id,
description=None,
label=None,
default=None,
optional=False,
options=None,
data_type=DataType.STRING,
input_type=InputType.TEXT,
):
self.validate_id(id)
super().__init__(id, default, optional)
if label:
self.label = label
else:
self.label = id
self.description = description
self.default = default
self.data_type = data_type
self.options = options
if data_type == DataType.COLUMN:
self.input_type = InputType.InputType.COLUMN_SELECT
elif options is not None and len(options) > 1:
self.input_type = InputType.SELECT
else:
self.input_type = input_type
@staticmethod
def validate_id(param_id: str):
if re.match(ENV_VAR_NAME_REGEX, param_id) is None:
util.invalid_integration(
"invalid_parameter_name",
f'"{param_id}" is not a valid id for a parameter. '
f'Parameters must match `{ENV_VAR_NAME_REGEX}`. '
'Use label to specify a custom display name',
)
def read(self):
raw = super().read()
return self.data_type.coerce(raw)
class JinjaTemplatedStr(str):
@classmethod
def input_type(cls) -> InputType:
return InputType.TEXT_AREA
@classmethod
def data_type(cls) -> DataType:
return DataType.JINJA
"""
We take most of the param information from our Form.Input case class.
We don't enable a dependsOn field because if the dataType is a column then it will auto depends on its parent.
"""
class ConnectionParam(__Param, metaclass=ABCMeta):
"""
ConnectionParam provides a parameter specified by the connection
Example usage:
API_KEY = ConnectionParam('api_key')
def process_row(url):
requests.get(url, params={api_key: API_KEY.get()})
"""
# LHUB-7385: Using simply a list here to preserve insertion order
# list will suffice as we do check for duplicate connection-param id
_all = []
def __init__(self, *args, **kwargs):
from lhub_integ import util
super().__init__(*args, **kwargs)
for conn_param in self._all:
if conn_param.id == self.id:
util.invalid_integration(
"duplication_connection_param",
f"You can't have 2 connection parameters with the same id ({self.id})",
)
self._all.append(self)
@classmethod
def all(cls):
return cls._all
class ActionParam(__Param, metaclass=ABCMeta):
"""
ActionParam provides a parameter specified by the action
Example usage:
API_KEY = ActionParam('api_key', action='process_row')
import requests
def process_row(url):
requests.get(url, params={api_key: API_KEY.get()})
"""
# LHUB-7385: defaultdict uses dict, so insertion order will be preserved. Also, changed from set to list to
# preserve order of parameters. Since, we do have a duplicate check for action_parameters, this should be a safe
# change.
action_map = defaultdict(list)
def __init__(self, *args, action, **kwargs):
super().__init__(*args, **kwargs)
from lhub_integ import util
caller = inspect.currentframe().f_back
if type(action) in (list, tuple):
actions = action
elif type(action) is str:
actions = [action_str.strip() for action_str in action.split(',')]
else:
raise TypeError("'action' argument can be one of 'list', 'tuple' or 'str' only.")
for action_str in actions:
entrypoint = f"{caller.f_globals['__name__']}.{action_str}"
if entrypoint in self.action_map:
for action_param in self.action_map[entrypoint]:
if action_param.id == self.id:
util.invalid_integration(
"duplicate_action_param",
f"You can't have 2 action parameters with the same id ({action_str})",
)
self.action_map[entrypoint].append(self)
@classmethod
def for_action(cls, action: action_decorator):
return cls.action_map[action.entrypoint]
class ValidationError(NamedTuple):
message: str
param: "__EnvVar"
def to_json(self):
return {"message": self.message, "inputId": self.param.id}
CONVERTIBLE_TYPES = [int, str, float, bool, JinjaTemplatedStr]
def convert(c):
def do_convert(raw: Dict[str, Any], column):
if c == JinjaTemplatedStr:
template = Template(column)
return template.render(**raw)
else:
value = raw[column]
if c in [str, float]:
return c(value)
if c == int:
return int(float(value))
elif c == bool:
return DataType.BOOL.coerce(value)
return do_convert
def get_input_converter(entrypoint_fn) -> Dict[str, Callable[[str], Any]]:
"""
Build the input_conversion map to allow promotion from String to to int, float, and bool
:param entrypoint_fn:
:return: Dict from the name of the function arguments to a converter function.
"""
sig = inspect.signature(entrypoint_fn)
converter = {}
from lhub_integ.util import exit_with_instantiation_errors
for param in sig.parameters:
annot = sig.parameters[param].annotation
# The annotation is the Python class -- in these simple cases we can just call
# the class constructor
if annot in CONVERTIBLE_TYPES:
converter[param] = convert(annot)
elif annot == inspect.Parameter.empty:
converter[param] = lambda raw, column: raw[column]
else:
exit_with_instantiation_errors(
1,
[
f"Unsupported type annotation: {annot}. Valid annotations are: {CONVERTIBLE_TYPES}"
],
)
return converter
|
PypiClean
|
/alipay_sdk_python-3.6.740-py3-none-any.whl/alipay/aop/api/domain/MaintainOrderStatusExtParams.py
|
import json
from alipay.aop.api.constant.ParamConstants import *
class MaintainOrderStatusExtParams(object):
def __init__(self):
self._logistics_code = None
self._logistics_company = None
self._logistics_no = None
self._order_status_txt = None
self._receiver_addr = None
self._sender_addr = None
@property
def logistics_code(self):
return self._logistics_code
@logistics_code.setter
def logistics_code(self, value):
self._logistics_code = value
@property
def logistics_company(self):
return self._logistics_company
@logistics_company.setter
def logistics_company(self, value):
self._logistics_company = value
@property
def logistics_no(self):
return self._logistics_no
@logistics_no.setter
def logistics_no(self, value):
self._logistics_no = value
@property
def order_status_txt(self):
return self._order_status_txt
@order_status_txt.setter
def order_status_txt(self, value):
self._order_status_txt = value
@property
def receiver_addr(self):
return self._receiver_addr
@receiver_addr.setter
def receiver_addr(self, value):
self._receiver_addr = value
@property
def sender_addr(self):
return self._sender_addr
@sender_addr.setter
def sender_addr(self, value):
self._sender_addr = value
def to_alipay_dict(self):
params = dict()
if self.logistics_code:
if hasattr(self.logistics_code, 'to_alipay_dict'):
params['logistics_code'] = self.logistics_code.to_alipay_dict()
else:
params['logistics_code'] = self.logistics_code
if self.logistics_company:
if hasattr(self.logistics_company, 'to_alipay_dict'):
params['logistics_company'] = self.logistics_company.to_alipay_dict()
else:
params['logistics_company'] = self.logistics_company
if self.logistics_no:
if hasattr(self.logistics_no, 'to_alipay_dict'):
params['logistics_no'] = self.logistics_no.to_alipay_dict()
else:
params['logistics_no'] = self.logistics_no
if self.order_status_txt:
if hasattr(self.order_status_txt, 'to_alipay_dict'):
params['order_status_txt'] = self.order_status_txt.to_alipay_dict()
else:
params['order_status_txt'] = self.order_status_txt
if self.receiver_addr:
if hasattr(self.receiver_addr, 'to_alipay_dict'):
params['receiver_addr'] = self.receiver_addr.to_alipay_dict()
else:
params['receiver_addr'] = self.receiver_addr
if self.sender_addr:
if hasattr(self.sender_addr, 'to_alipay_dict'):
params['sender_addr'] = self.sender_addr.to_alipay_dict()
else:
params['sender_addr'] = self.sender_addr
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = MaintainOrderStatusExtParams()
if 'logistics_code' in d:
o.logistics_code = d['logistics_code']
if 'logistics_company' in d:
o.logistics_company = d['logistics_company']
if 'logistics_no' in d:
o.logistics_no = d['logistics_no']
if 'order_status_txt' in d:
o.order_status_txt = d['order_status_txt']
if 'receiver_addr' in d:
o.receiver_addr = d['receiver_addr']
if 'sender_addr' in d:
o.sender_addr = d['sender_addr']
return o
|
PypiClean
|
/ansible-kkvesper-2.3.2.0.tar.gz/ansible-kkvesper-2.3.2.0/lib/ansible/modules/packaging/os/pkg5_publisher.py
|
# Copyright 2014 Peter Oliver <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: pkg5_publisher
author: "Peter Oliver (@mavit)"
short_description: Manages Solaris 11 Image Packaging System publishers
version_added: 1.9
description:
- IPS packages are the native packages in Solaris 11 and higher.
- This modules will configure which publishers a client will download IPS
packages from.
options:
name:
description:
- The publisher's name.
required: true
aliases: [ publisher ]
state:
description:
- Whether to ensure that a publisher is present or absent.
required: false
default: present
choices: [ present, absent ]
sticky:
description:
- Packages installed from a sticky repository can only receive updates
from that repository.
required: false
default: null
choices: [ true, false ]
enabled:
description:
- Is the repository enabled or disabled?
required: false
default: null
choices: [ true, false ]
origin:
description:
- A path or URL to the repository.
- Multiple values may be provided.
required: false
default: null
mirror:
description:
- A path or URL to the repository mirror.
- Multiple values may be provided.
required: false
default: null
'''
EXAMPLES = '''
# Fetch packages for the solaris publisher direct from Oracle:
- pkg5_publisher:
name: solaris
sticky: true
origin: https://pkg.oracle.com/solaris/support/
# Configure a publisher for locally-produced packages:
- pkg5_publisher:
name: site
origin: 'https://pkg.example.com/site/'
'''
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True, aliases=['publisher']),
state=dict(default='present', choices=['present', 'absent']),
sticky=dict(type='bool'),
enabled=dict(type='bool'),
# search_after=dict(),
# search_before=dict(),
origin=dict(type='list'),
mirror=dict(type='list'),
)
)
for option in ['origin', 'mirror']:
if module.params[option] == ['']:
module.params[option] = []
if module.params['state'] == 'present':
modify_publisher(module, module.params)
else:
unset_publisher(module, module.params['name'])
def modify_publisher(module, params):
name = params['name']
existing = get_publishers(module)
if name in existing:
for option in ['origin', 'mirror', 'sticky', 'enabled']:
if params[option] is not None:
if params[option] != existing[name][option]:
return set_publisher(module, params)
else:
return set_publisher(module, params)
module.exit_json()
def set_publisher(module, params):
name = params['name']
args = []
if params['origin'] is not None:
args.append('--remove-origin=*')
args.extend(['--add-origin=' + u for u in params['origin']])
if params['mirror'] is not None:
args.append('--remove-mirror=*')
args.extend(['--add-mirror=' + u for u in params['mirror']])
if params['sticky'] is not None and params['sticky']:
args.append('--sticky')
elif params['sticky'] is not None:
args.append('--non-sticky')
if params['enabled'] is not None and params['enabled']:
args.append('--enable')
elif params['enabled'] is not None:
args.append('--disable')
rc, out, err = module.run_command(
["pkg", "set-publisher"] + args + [name],
check_rc=True
)
response = {
'rc': rc,
'results': [out],
'msg': err,
'changed': True,
}
module.exit_json(**response)
def unset_publisher(module, publisher):
if not publisher in get_publishers(module):
module.exit_json()
rc, out, err = module.run_command(
["pkg", "unset-publisher", publisher],
check_rc=True
)
response = {
'rc': rc,
'results': [out],
'msg': err,
'changed': True,
}
module.exit_json(**response)
def get_publishers(module):
rc, out, err = module.run_command(["pkg", "publisher", "-Ftsv"], True)
lines = out.splitlines()
keys = lines.pop(0).lower().split("\t")
publishers = {}
for line in lines:
values = dict(zip(keys, map(unstringify, line.split("\t"))))
name = values['publisher']
if not name in publishers:
publishers[name] = dict(
(k, values[k]) for k in ['sticky', 'enabled']
)
publishers[name]['origin'] = []
publishers[name]['mirror'] = []
if values['type'] is not None:
publishers[name][values['type']].append(values['uri'])
return publishers
def unstringify(val):
if val == "-" or val == '':
return None
elif val == "true":
return True
elif val == "false":
return False
else:
return val
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
PypiClean
|
/flask-arch-0.1.1.tar.gz/flask-arch-0.1.1/flask_arch/cms/blocks.py
|
import os
from flask import request, send_file
from flask_login import current_user
from .base import ContentManager
from .. import tags
from ..utils import ensure_type, ensure_callable, RequestParser
from ..blocks import RouteBlock
class ManageBlock(RouteBlock):
def __init__(self, keyword, content_manager, **kwargs):
super().__init__(keyword, **kwargs)
ensure_type(content_manager, ContentManager, 'content_manager')
self.content_manager = content_manager
class LstBlock(ManageBlock):
def route(self):
try:
c = self.content_manager.select_all()
return self.render(data=c)
except Exception as e:
# likely a server error
return self.server_error(e)
class ViewBlock(ManageBlock):
def prepare_target(self, rp):
c = self.content_manager.query(rp)
cv = c.view(rp, current_user)
return cv
def route(self):
try:
rp = RequestParser(request)
cv = self.prepare_target(rp)
return self.render(target=cv)
except Exception as e:
# likely a client error
e.reroute = True # specify that the fallback will be a reroute
# this is because, the render has failed!
return self.client_error(e)
class FileBlock(ViewBlock):
def route(self):
if not hasattr(self.content_manager.Content, 'read_file'):
self.abort(404)
try:
rp = RequestParser(request)
target = self.prepare_target(rp)
filename = self.content_manager.Content.parse_filename(rp)
fp = target.read_file(filename)
return send_file(fp, download_name=filename)
except Exception as e:
return self.client_error(e)
class PrepExecBlock(ManageBlock):
def __init__(self, keyword, content_manager, **kwargs):
super().__init__(keyword, content_manager, **kwargs)
ensure_callable(self.prepare, f'{self.__class__.__name__}.prepare')
ensure_callable(self.execute, f'{self.__class__.__name__}.execute')
def initial(self):
return self.render()
@property
def default_methods(self):
return ['GET', 'POST']
def route(self):
if request.method == 'POST':
rp = RequestParser(request)
try:
aargs = self.prepare(rp)
except Exception as e:
# client error
return self.client_error(e)
try:
return self.execute(*aargs)
except Exception as e:
# server error: unexpected exception
self.content_manager.rollback() # rollback
return self.server_error(e)
try:
return self.initial()
except Exception as e:
# client error
e.reroute = True # the render/initial request likely failed. reroute is necessary
return self.client_error(e)
class AddBlock(PrepExecBlock):
def initial(self):
c = self.content_manager.Content
return self.render(Content=c)
def prepare(self, rp):
c = self.content_manager.Content(rp, current_user)
c.before_insert(rp, current_user) # before commiting the insert
return (rp, c)
def execute(self, rp, c):
# insert new user
self.content_manager.insert(c)
self.content_manager.commit() # commit insertion
c.after_insert(rp, current_user)
self.callback(tags.SUCCESS, c.id)
return self.reroute(id=c.id)
class ModBlock(PrepExecBlock):
def initial(self):
rp = RequestParser(request)
c = self.content_manager.query(rp)
cv = c.view(rp, current_user)
return self.render(target=cv)
def prepare(self, rp):
c = self.content_manager.query(rp)
c.modify(rp, current_user)
c.before_update(rp, current_user)
return (rp, c)
def execute(self, rp, c):
# insert new user
self.content_manager.update(c)
self.content_manager.commit() # commit insertion
c.after_update(rp, current_user)
self.callback(tags.SUCCESS, c.id)
return self.reroute(id=c.id)
class DelBlock(PrepExecBlock):
def initial(self):
rp = RequestParser(request)
c = self.content_manager.query(rp)
cv = c.view(rp, current_user)
return self.render(target=cv)
def prepare(self, rp):
c = self.content_manager.query(rp)
c.deinit(rp, current_user)
c.before_delete(rp, current_user)
return (rp, c)
def execute(self, rp, c):
# insert new user
self.content_manager.delete(c)
self.content_manager.commit() # commit insertion
c.after_delete(rp, current_user)
self.callback(tags.SUCCESS, c.id)
return self.reroute(id=c.id)
|
PypiClean
|
/custom-awscli-1.27.51.tar.gz/custom-awscli-1.27.51/awscli/examples/kms/encrypt.rst
|
**Example 1: To encrypt the contents of a file on Linux or MacOS**
The following ``encrypt`` command demonstrates the recommended way to encrypt data with the AWS CLI. ::
aws kms encrypt \
--key-id 1234abcd-12ab-34cd-56ef-1234567890ab \
--plaintext fileb://ExamplePlaintextFile \
--output text \
--query CiphertextBlob | base64 \
--decode > ExampleEncryptedFile
The command does several things:
#. Uses the ``fileb://`` prefix to specify the ``--plaintext`` parameter.
The ``fileb://`` prefix instructs the CLI to read the data to encrypt, called the *plaintext*, from a file and pass the file's contents to the command's ``--plaintext`` parameter. If the file is not in the current directory, type the full path to file. For example: ``fileb:///var/tmp/ExamplePlaintextFile`` or ``fileb://C:\Temp\ExamplePlaintextFile``.
For more information about reading AWS CLI parameter values from a file, see `Loading Parameters from a File <https://docs.aws.amazon.com/cli/latest/userguide/cli-using-param.html#cli-using-param-file>`_ in the *AWS Command Line Interface User Guide* and `Best Practices for Local File Parameters <https://blogs.aws.amazon.com/cli/post/TxLWWN1O25V1HE/Best-Practices-for-Local-File-Parameters>`_ on the AWS Command Line Tool Blog
#. Uses the ``--output`` and ``--query`` parameters to control the command's output.
These parameters extract the encrypted data, called the *ciphertext*, from the command's output.
For more information about controlling output, see `Controlling Command Output <https://docs.aws.amazon.com/cli/latest/userguide/controlling-output.html>`_ in the *AWS Command Line Interface User Guide*.
#. Uses the ``base64`` utility to decode the extracted output.
This utility decodes the extracted ciphertext to binary data. The ciphertext that is returned by a successful ``encrypt`` command is base64-encoded text. You must decode this text before you can use the AWS CLI to decrypt it.
#. Saves the binary ciphertext to a file.
The final part of the command (``> ExampleEncryptedFile``) saves the binary ciphertext to a file to make decryption easier. For an example command that uses the AWS CLI to decrypt data, see the `decrypt examples <decrypt.html#examples>`_.
**Example 2: Using the AWS CLI to encrypt data on Windows**
The preceding example assumes the ``base64`` utility is available, which is commonly the case on Linux and MacOS. For the Windows command prompt, use ``certutil`` instead of ``base64``. This requires two commands, as shown in the following examples. ::
aws kms encrypt \
--key-id 1234abcd-12ab-34cd-56ef-1234567890ab \
--plaintext fileb://ExamplePlaintextFile \
--output text \
--query CiphertextBlob > C:\Temp\ExampleEncryptedFile.base64
certutil -decode C:\Temp\ExampleEncryptedFile.base64 C:\Temp\ExampleEncryptedFile
**Example 3: Encrypting with an asymmetric KMS key**
The following ``encrypt`` command shows how to encrypt plaintext with an asymmetric KMS key. The ``--encryption-algorithm`` parameter is required. ::
aws kms encrypt \
--key-id 1234abcd-12ab-34cd-56ef-1234567890ab \
--encryption-algorithm RSAES_OAEP_SHA_256 \
--plaintext fileb://ExamplePlaintextFile \
--output text \
--query CiphertextBlob | base64 \
--decode > ExampleEncryptedFile
This command produces no output. The output from the ``decrypt`` command is base64-decoded and saved in a file.
|
PypiClean
|
/mlflow_by_johnsnowlabs-2.20.0-py3-none-any.whl/mlflow/tensorflow/__init__.py
|
import os
import shutil
import logging
import concurrent.futures
import warnings
import atexit
import tempfile
from collections import namedtuple
import pandas
from packaging.version import Version
from threading import RLock
import numpy as np
import importlib
import yaml
import re
import mlflow
from mlflow import pyfunc
from mlflow.data.code_dataset_source import CodeDatasetSource
from mlflow.data.numpy_dataset import from_numpy
from mlflow.data.tensorflow_dataset import from_tensorflow
from mlflow.types.schema import TensorSpec
from mlflow.tracking.client import MlflowClient
from mlflow.exceptions import MlflowException
from mlflow.models import Model
from mlflow.models.model import MLMODEL_FILE_NAME
from mlflow.models.signature import ModelSignature
from mlflow.models.utils import ModelInputExample, _save_example
from mlflow.tracking.artifact_utils import _download_artifact_from_uri
from mlflow.utils import is_iterator
from mlflow.utils.environment import (
_validate_env_arguments,
_process_pip_requirements,
_process_conda_env,
_CONDA_ENV_FILE_NAME,
_REQUIREMENTS_FILE_NAME,
_CONSTRAINTS_FILE_NAME,
_PYTHON_ENV_FILE_NAME,
_PythonEnv,
_mlflow_conda_env,
)
from mlflow.utils.file_utils import write_to
from mlflow.utils.requirements_utils import _get_pinned_requirement
from mlflow.utils.docstring_utils import format_docstring, LOG_MODEL_PARAM_DOCS
from mlflow.utils.model_utils import (
_get_flavor_configuration,
_add_code_from_conf_to_system_path,
_validate_and_copy_code_paths,
_validate_and_prepare_target_save_path,
)
from mlflow.utils.autologging_utils import (
autologging_integration,
safe_patch,
resolve_input_example_and_signature,
picklable_exception_safe_function,
PatchFunction,
log_fn_args_as_params,
batch_metrics_logger,
get_autologging_config,
)
from mlflow.utils.time_utils import get_current_time_millis
from mlflow.entities import Metric
from mlflow.tracking._model_registry import DEFAULT_AWAIT_MAX_SLEEP_SECONDS
from mlflow.tracking.context import registry as context_registry
from mlflow.models import infer_signature
from mlflow.exceptions import INVALID_PARAMETER_VALUE
FLAVOR_NAME = "tensorflow"
_logger = logging.getLogger(__name__)
_MAX_METRIC_QUEUE_SIZE = 500
_LOG_EVERY_N_STEPS = 1
_metric_queue_lock = RLock()
_metric_queue = []
_thread_pool = concurrent.futures.ThreadPoolExecutor(max_workers=1)
# For tracking if the run was started by autologging.
_AUTOLOG_RUN_ID = None
# File name to which custom objects cloudpickle is saved - used during save and load
_CUSTOM_OBJECTS_SAVE_PATH = "custom_objects.cloudpickle"
_KERAS_MODULE_SPEC_PATH = "keras_module.txt"
_KERAS_SAVE_FORMAT_PATH = "save_format.txt"
# File name to which keras model is saved
_MODEL_SAVE_PATH = "model"
_MODEL_TYPE_KERAS = "keras"
_MODEL_TYPE_TF1_ESTIMATOR = "tf1-estimator"
_MODEL_TYPE_TF2_MODULE = "tf2-module"
def get_default_pip_requirements(include_cloudpickle=False):
"""
:return: A list of default pip requirements for MLflow Models produced by this flavor.
Calls to :func:`save_model()` and :func:`log_model()` produce a pip environment
that, at minimum, contains these requirements.
"""
pip_deps = [_get_pinned_requirement("tensorflow")]
if include_cloudpickle:
pip_deps.append(_get_pinned_requirement("cloudpickle"))
return pip_deps
def get_default_conda_env():
"""
:return: The default Conda environment for MLflow Models produced by calls to
:func:`save_model()` and :func:`log_model()`.
"""
return _mlflow_conda_env(additional_pip_deps=get_default_pip_requirements())
@format_docstring(LOG_MODEL_PARAM_DOCS.format(package_name=FLAVOR_NAME))
def log_model(
model,
artifact_path,
custom_objects=None,
conda_env=None,
code_paths=None,
signature: ModelSignature = None,
input_example: ModelInputExample = None,
registered_model_name=None,
await_registration_for=DEFAULT_AWAIT_MAX_SLEEP_SECONDS,
pip_requirements=None,
extra_pip_requirements=None,
saved_model_kwargs=None,
keras_model_kwargs=None,
metadata=None,
):
"""
Log a TF2 core model (inheriting tf.Module) or a Keras model in MLflow Model format.
.. note::
If you log a Keras or TensorFlow model without a signature, inference with
:py:func:`mlflow.pyfunc.spark_udf()` will not work unless the model's pyfunc
representation accepts pandas DataFrames as inference inputs.
You can infer a model's signature by calling the :py:func:`mlflow.models.infer_signature()`
API on features from the model's test dataset. You can also manually create a model
signature, for example:
.. code-block:: python
:caption: Example of creating signature for saving TensorFlow and `tf.Keras` models
from mlflow.types.schema import Schema, TensorSpec
from mlflow.models.signature import ModelSignature
import numpy as np
input_schema = Schema(
[
TensorSpec(np.dtype(np.uint64), (-1, 5), "field1"),
TensorSpec(np.dtype(np.float32), (-1, 3, 2), "field2"),
]
)
# Create the signature for a model that requires 2 inputs:
# - Input with name "field1", shape (-1, 5), type "np.uint64"
# - Input with name "field2", shape (-1, 3, 2), type "np.float32"
signature = ModelSignature(inputs=input_schema)
:param model: The TF2 core model (inheriting tf.Module) or Keras model to be saved.
:param artifact_path: The run-relative path to which to log model artifacts.
:param custom_objects: A Keras ``custom_objects`` dictionary mapping names (strings) to
custom classes or functions associated with the Keras model. MLflow saves
these custom layers using CloudPickle and restores them automatically
when the model is loaded with :py:func:`mlflow.tensorflow.load_model` and
:py:func:`mlflow.pyfunc.load_model`.
:param conda_env: {{ conda_env }}
:param code_paths: A list of local filesystem paths to Python file dependencies (or directories
containing file dependencies). These files are *prepended* to the system
path when the model is loaded.
:param registered_model_name: If given, create a model version under
``registered_model_name``, also creating a registered model if one
with the given name does not exist.
:param signature: :py:class:`ModelSignature <mlflow.models.ModelSignature>`
describes model input and output :py:class:`Schema <mlflow.types.Schema>`.
The model signature can be :py:func:`inferred <mlflow.models.infer_signature>`
from datasets with valid model input (e.g. the training dataset with target
column omitted) and valid model output (e.g. model predictions generated on
the training dataset), for example:
.. code-block:: python
from mlflow.models.signature import infer_signature
train = df.drop_column("target_label")
predictions = ... # compute model predictions
signature = infer_signature(train, predictions)
:param input_example: Input example provides one or several instances of valid
model input. The example can be used as a hint of what data to feed the
model. The given example can be a Pandas DataFrame where the given
example will be serialized to json using the Pandas split-oriented
format, or a numpy array where the example will be serialized to json
by converting it to a list. Bytes are base64-encoded.
:param await_registration_for: Number of seconds to wait for the model version to finish
being created and is in ``READY`` status. By default, the function
waits for five minutes. Specify 0 or None to skip waiting.
:param pip_requirements: {{ pip_requirements }}
:param extra_pip_requirements: {{ extra_pip_requirements }}
:param saved_model_kwargs: a dict of kwargs to pass to ``tensorflow.saved_model.save`` method.
:param keras_model_kwargs: a dict of kwargs to pass to ``keras_model.save`` method.
:param metadata: Custom metadata dictionary passed to the model and stored in the MLmodel file.
.. Note:: Experimental: This parameter may change or be removed in a future
release without warning.
:return: A :py:class:`ModelInfo <mlflow.models.model.ModelInfo>` instance that contains the
metadata of the logged model.
"""
return Model.log(
artifact_path=artifact_path,
flavor=mlflow.tensorflow,
model=model,
conda_env=conda_env,
code_paths=code_paths,
custom_objects=custom_objects,
registered_model_name=registered_model_name,
signature=signature,
input_example=input_example,
await_registration_for=await_registration_for,
pip_requirements=pip_requirements,
extra_pip_requirements=extra_pip_requirements,
saved_model_kwargs=saved_model_kwargs,
keras_model_kwargs=keras_model_kwargs,
metadata=metadata,
)
def _save_keras_custom_objects(path, custom_objects):
"""
Save custom objects dictionary to a cloudpickle file so a model can be easily loaded later.
:param path: An absolute path that points to the data directory within /path/to/model.
:param custom_objects: Keras ``custom_objects`` is a dictionary mapping
names (strings) to custom classes or functions to be considered
during deserialization. MLflow saves these custom layers using
CloudPickle and restores them automatically when the model is
loaded with :py:func:`mlflow.keras.load_model` and
:py:func:`mlflow.pyfunc.load_model`.
"""
import cloudpickle
custom_objects_path = os.path.join(path, _CUSTOM_OBJECTS_SAVE_PATH)
with open(custom_objects_path, "wb") as out_f:
cloudpickle.dump(custom_objects, out_f)
_NO_MODEL_SIGNATURE_WARNING = (
"You are saving a TensorFlow Core model or Keras model "
"without a signature. Inference with mlflow.pyfunc.spark_udf() will not work "
"unless the model's pyfunc representation accepts pandas DataFrames as "
"inference inputs."
)
def _get_keras_version(keras_module):
import tensorflow
if Version(tensorflow.__version__) >= Version("2.6.0"):
import keras
return keras.__version__
else:
return keras_module.__version__
def save_model(
model,
path,
conda_env=None,
code_paths=None,
mlflow_model=None,
custom_objects=None,
signature: ModelSignature = None,
input_example: ModelInputExample = None,
pip_requirements=None,
extra_pip_requirements=None,
saved_model_kwargs=None,
keras_model_kwargs=None,
metadata=None,
):
"""
Save a TF2 core model (inheriting tf.Module) or Keras model in MLflow Model format to a path on
the local file system.
.. note::
If you save a Keras or TensorFlow model without a signature, inference with
:py:func:`mlflow.pyfunc.spark_udf()` will not work unless the model's pyfunc
representation accepts pandas DataFrames as inference inputs.
You can infer a model's signature by calling the :py:func:`mlflow.models.infer_signature()`
API on features from the model's test dataset. You can also manually create a model
signature, for example:
.. code-block:: python
:caption: Example of creating signature for saving TensorFlow and `tf.Keras` models
from mlflow.types.schema import Schema, TensorSpec
from mlflow.models.signature import ModelSignature
import numpy as np
input_schema = Schema(
[
TensorSpec(np.dtype(np.uint64), (-1, 5), "field1"),
TensorSpec(np.dtype(np.float32), (-1, 3, 2), "field2"),
]
)
# Create the signature for a model that requires 2 inputs:
# - Input with name "field1", shape (-1, 5), type "np.uint64"
# - Input with name "field2", shape (-1, 3, 2), type "np.float32"
signature = ModelSignature(inputs=input_schema)
:param model: The Keras model or Tensorflow module to be saved.
:param path: Local path where the MLflow model is to be saved.
:param conda_env: {{ conda_env }}
:param code_paths: A list of local filesystem paths to Python file dependencies (or directories
containing file dependencies). These files are *prepended* to the system
path when the model is loaded.
:param mlflow_model: MLflow model configuration to which to add the ``tensorflow`` flavor.
:param custom_objects: A Keras ``custom_objects`` dictionary mapping names (strings) to
custom classes or functions associated with the Keras model. MLflow saves
these custom layers using CloudPickle and restores them automatically
when the model is loaded with :py:func:`mlflow.tensorflow.load_model` and
:py:func:`mlflow.pyfunc.load_model`.
:param signature: :py:class:`ModelSignature <mlflow.models.ModelSignature>`
describes model input and output :py:class:`Schema <mlflow.types.Schema>`.
The model signature can be :py:func:`inferred <mlflow.models.infer_signature>`
from datasets with valid model input (e.g. the training dataset with target
column omitted) and valid model output (e.g. model predictions generated on
the training dataset), for example:
.. code-block:: python
from mlflow.models.signature import infer_signature
train = df.drop_column("target_label")
predictions = ... # compute model predictions
signature = infer_signature(train, predictions)
:param input_example: Input example provides one or several instances of valid
model input. The example can be used as a hint of what data to feed the
model. The given example can be a Pandas DataFrame where the given
example will be serialized to json using the Pandas split-oriented
format, or a numpy array where the example will be serialized to json
by converting it to a list. Bytes are base64-encoded.
:param pip_requirements: {{ pip_requirements }}
:param extra_pip_requirements: {{ extra_pip_requirements }}
:param saved_model_kwargs: a dict of kwargs to pass to ``tensorflow.saved_model.save`` method
if the model to be saved is a Tensorflow module.
:param keras_model_kwargs: a dict of kwargs to pass to ``model.save`` method if the model
to be saved is a keras model.
:param metadata: Custom metadata dictionary passed to the model and stored in the MLmodel file.
.. Note:: Experimental: This parameter may change or be removed in a future
release without warning.
"""
import tensorflow
from tensorflow.keras.models import Model as KerasModel
if signature is None:
_logger.warning(_NO_MODEL_SIGNATURE_WARNING)
else:
num_inputs = len(signature.inputs.inputs)
if num_inputs == 0:
raise MlflowException(
"The model signature's input schema must contain at least one field.",
error_code=INVALID_PARAMETER_VALUE,
)
for field in signature.inputs.inputs:
if not isinstance(field, TensorSpec):
raise MlflowException(
"All fields in the model signature's input schema must be of type TensorSpec.",
error_code=INVALID_PARAMETER_VALUE,
)
if field.shape[0] != -1:
raise MlflowException(
"All fields in the model signature's input schema must have a shape "
"in which the first dimension is a variable dimension.",
error_code=INVALID_PARAMETER_VALUE,
)
_validate_env_arguments(conda_env, pip_requirements, extra_pip_requirements)
# check if path exists
path = os.path.abspath(path)
_validate_and_prepare_target_save_path(path)
code_dir_subpath = _validate_and_copy_code_paths(code_paths, path)
if mlflow_model is None:
mlflow_model = Model()
if signature is not None:
mlflow_model.signature = signature
if input_example is not None:
_save_example(mlflow_model, input_example, path)
if metadata is not None:
mlflow_model.metadata = metadata
if isinstance(model, KerasModel):
keras_model_kwargs = keras_model_kwargs or {}
data_subpath = "data"
# construct new data folder in existing path
data_path = os.path.join(path, data_subpath)
os.makedirs(data_path)
model_subpath = os.path.join(data_subpath, _MODEL_SAVE_PATH)
keras_module = importlib.import_module("tensorflow.keras")
# save custom objects if there are custom objects
if custom_objects is not None:
_save_keras_custom_objects(data_path, custom_objects)
# save keras module spec to path/data/keras_module.txt
with open(os.path.join(data_path, _KERAS_MODULE_SPEC_PATH), "w") as f:
f.write(keras_module.__name__)
# Use the SavedModel format if `save_format` is unspecified
save_format = keras_model_kwargs.get("save_format", "tf")
# save keras save_format to path/data/save_format.txt
with open(os.path.join(data_path, _KERAS_SAVE_FORMAT_PATH), "w") as f:
f.write(save_format)
# save keras model
# To maintain prior behavior, when the format is HDF5, we save
# with the h5 file extension. Otherwise, model_path is a directory
# where the saved_model.pb will be stored (for SavedModel format)
file_extension = ".h5" if save_format == "h5" else ""
model_path = os.path.join(path, model_subpath) + file_extension
if path.startswith("/dbfs/"):
# The Databricks Filesystem uses a FUSE implementation that does not support
# random writes. It causes an error.
with tempfile.NamedTemporaryFile(suffix=".h5") as f:
model.save(f.name, **keras_model_kwargs)
f.flush() # force flush the data
shutil.copyfile(src=f.name, dst=model_path)
else:
model.save(model_path, **keras_model_kwargs)
pyfunc_options = {
"data": data_subpath,
}
flavor_options = {
**pyfunc_options,
"model_type": _MODEL_TYPE_KERAS,
"keras_version": _get_keras_version(keras_module),
"save_format": save_format,
}
elif isinstance(model, tensorflow.Module):
saved_model_kwargs = saved_model_kwargs or {}
model_dir_subpath = "tf2model"
model_path = os.path.join(path, model_dir_subpath)
tensorflow.saved_model.save(model, model_path, **saved_model_kwargs)
pyfunc_options = {}
flavor_options = {
"saved_model_dir": model_dir_subpath,
"model_type": _MODEL_TYPE_TF2_MODULE,
}
else:
raise MlflowException(f"Unknown model type: {type(model)}")
# update flavor info to mlflow_model
mlflow_model.add_flavor(FLAVOR_NAME, code=code_dir_subpath, **flavor_options)
# append loader_module, data and env data to mlflow_model
pyfunc.add_to_model(
mlflow_model,
loader_module="mlflow.tensorflow",
conda_env=_CONDA_ENV_FILE_NAME,
python_env=_PYTHON_ENV_FILE_NAME,
code=code_dir_subpath,
**pyfunc_options,
)
# save mlflow_model to path/MLmodel
mlflow_model.save(os.path.join(path, MLMODEL_FILE_NAME))
include_cloudpickle = custom_objects is not None
if conda_env is None:
if pip_requirements is None:
default_reqs = get_default_pip_requirements(include_cloudpickle)
# To ensure `_load_pyfunc` can successfully load the model during the dependency
# inference, `mlflow_model.save` must be called beforehand to save an MLmodel file.
inferred_reqs = mlflow.models.infer_pip_requirements(
path, FLAVOR_NAME, fallback=default_reqs
)
default_reqs = sorted(set(inferred_reqs).union(default_reqs))
else:
default_reqs = None
conda_env, pip_requirements, pip_constraints = _process_pip_requirements(
default_reqs,
pip_requirements,
extra_pip_requirements,
)
else:
conda_env, pip_requirements, pip_constraints = _process_conda_env(conda_env)
with open(os.path.join(path, _CONDA_ENV_FILE_NAME), "w") as f:
yaml.safe_dump(conda_env, stream=f, default_flow_style=False)
# Save `constraints.txt` if necessary
if pip_constraints:
write_to(os.path.join(path, _CONSTRAINTS_FILE_NAME), "\n".join(pip_constraints))
# Save `requirements.txt`
write_to(os.path.join(path, _REQUIREMENTS_FILE_NAME), "\n".join(pip_requirements))
_PythonEnv.current().to_yaml(os.path.join(path, _PYTHON_ENV_FILE_NAME))
def _load_keras_model(model_path, keras_module, save_format, **kwargs):
keras_models = importlib.import_module(keras_module.__name__ + ".models")
custom_objects = kwargs.pop("custom_objects", {})
custom_objects_path = None
if os.path.isdir(model_path):
if os.path.isfile(os.path.join(model_path, _CUSTOM_OBJECTS_SAVE_PATH)):
custom_objects_path = os.path.join(model_path, _CUSTOM_OBJECTS_SAVE_PATH)
model_path = os.path.join(model_path, _MODEL_SAVE_PATH)
if custom_objects_path is not None:
import cloudpickle
with open(custom_objects_path, "rb") as in_f:
pickled_custom_objects = cloudpickle.load(in_f)
pickled_custom_objects.update(custom_objects)
custom_objects = pickled_custom_objects
# If the save_format is HDF5, then we save with h5 file
# extension to align with prior behavior of mlflow logging
if save_format == "h5":
model_path = model_path + ".h5"
# keras in tensorflow used to have a '-tf' suffix in the version:
# https://github.com/tensorflow/tensorflow/blob/v2.2.1/tensorflow/python/keras/__init__.py#L36
unsuffixed_version = re.sub(r"-tf$", "", _get_keras_version(keras_module))
if save_format == "h5" and Version(unsuffixed_version) >= Version("2.2.3"):
# NOTE: Keras 2.2.3 does not work with unicode paths in python2. Pass in h5py.File instead
# of string to avoid issues.
import h5py
with h5py.File(os.path.abspath(model_path), "r") as model_path:
return keras_models.load_model(model_path, custom_objects=custom_objects, **kwargs)
else:
# NOTE: Older versions of Keras only handle filepath.
return keras_models.load_model(model_path, custom_objects=custom_objects, **kwargs)
def _get_flavor_conf(model_conf):
if "keras" in model_conf.flavors:
return model_conf.flavors["keras"]
return model_conf.flavors[FLAVOR_NAME]
def _infer_model_type(model_conf):
model_type = _get_flavor_conf(model_conf).get("model_type")
if model_type is not None:
return model_type
# Loading model logged by old version mlflow, which deos not record model_type
# Inferring model type by checking whether model_conf contains "keras" flavor.
if "keras" in model_conf.flavors:
return _MODEL_TYPE_KERAS
return _MODEL_TYPE_TF1_ESTIMATOR
def load_model(model_uri, dst_path=None, saved_model_kwargs=None, keras_model_kwargs=None):
"""
Load an MLflow model that contains the TensorFlow flavor from the specified path.
:param model_uri: The location, in URI format, of the MLflow model. For example:
- ``/Users/me/path/to/local/model``
- ``relative/path/to/local/model``
- ``s3://my_bucket/path/to/model``
- ``runs:/<mlflow_run_id>/run-relative/path/to/model``
- ``models:/<model_name>/<model_version>``
- ``models:/<model_name>/<stage>``
For more information about supported URI schemes, see
`Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#
artifact-locations>`_.
:param dst_path: The local filesystem path to which to download the model artifact.
This directory must already exist. If unspecified, a local output
path will be created.
:param saved_model_kwargs: kwargs to pass to ``tensorflow.saved_model.load`` method.
Only available when you are loading a tensorflow2 core model.
:param keras_model_kwargs: kwargs to pass to ``keras.models.load_model`` method.
Only available when you are loading a Keras model.
:return: A callable graph (tf.function) that takes inputs and returns inferences.
.. code-block:: python
:caption: Example
import mlflow
import tensorflow as tf
tf_graph = tf.Graph()
tf_sess = tf.Session(graph=tf_graph)
with tf_graph.as_default():
signature_definition = mlflow.tensorflow.load_model(
model_uri="model_uri", tf_sess=tf_sess
)
input_tensors = [
tf_graph.get_tensor_by_name(input_signature.name)
for _, input_signature in signature_definition.inputs.items()
]
output_tensors = [
tf_graph.get_tensor_by_name(output_signature.name)
for _, output_signature in signature_definition.outputs.items()
]
"""
import tensorflow
local_model_path = _download_artifact_from_uri(artifact_uri=model_uri, output_path=dst_path)
model_configuration_path = os.path.join(local_model_path, MLMODEL_FILE_NAME)
model_conf = Model.load(model_configuration_path)
flavor_conf = _get_flavor_conf(model_conf)
_add_code_from_conf_to_system_path(local_model_path, flavor_conf)
model_type = _infer_model_type(model_conf)
if model_type == _MODEL_TYPE_KERAS:
keras_model_kwargs = keras_model_kwargs or {}
keras_module = importlib.import_module(flavor_conf.get("keras_module", "tensorflow.keras"))
# For backwards compatibility, we assume h5 when the save_format is absent
save_format = flavor_conf.get("save_format", "h5")
model_path = os.path.join(local_model_path, flavor_conf.get("data", _MODEL_SAVE_PATH))
return _load_keras_model(
model_path=model_path,
keras_module=keras_module,
save_format=save_format,
**keras_model_kwargs,
)
if model_type == _MODEL_TYPE_TF1_ESTIMATOR:
tf_saved_model_dir = os.path.join(local_model_path, flavor_conf["saved_model_dir"])
tf_meta_graph_tags = flavor_conf["meta_graph_tags"]
tf_signature_def_key = flavor_conf["signature_def_key"]
return _load_tf1_estimator_saved_model(
tf_saved_model_dir=tf_saved_model_dir,
tf_meta_graph_tags=tf_meta_graph_tags,
tf_signature_def_key=tf_signature_def_key,
)
if model_type == _MODEL_TYPE_TF2_MODULE:
saved_model_kwargs = saved_model_kwargs or {}
tf_saved_model_dir = os.path.join(local_model_path, flavor_conf["saved_model_dir"])
return tensorflow.saved_model.load(tf_saved_model_dir, **saved_model_kwargs)
raise MlflowException(f"Unknown model_type: {model_type}")
def _load_tf1_estimator_saved_model(tf_saved_model_dir, tf_meta_graph_tags, tf_signature_def_key):
"""
Load a specified TensorFlow model consisting of a TensorFlow metagraph and signature definition
from a serialized TensorFlow ``SavedModel`` collection.
:param tf_saved_model_dir: The local filesystem path or run-relative artifact path to the model.
:param tf_meta_graph_tags: A list of tags identifying the model's metagraph within the
serialized ``SavedModel`` object. For more information, see the
``tags`` parameter of the `tf.saved_model.builder.SavedModelBuilder
method <https://www.tensorflow.org/api_docs/python/tf/saved_model/
builder/SavedModelBuilder#add_meta_graph>`_.
:param tf_signature_def_key: A string identifying the input/output signature associated with the
model. This is a key within the serialized ``SavedModel``'s
signature definition mapping. For more information, see the
``signature_def_map`` parameter of the
``tf.saved_model.builder.SavedModelBuilder`` method.
:return: A callable graph (tensorflow.function) that takes inputs and returns inferences.
"""
import tensorflow
loaded = tensorflow.saved_model.load( # pylint: disable=no-value-for-parameter
tags=tf_meta_graph_tags, export_dir=tf_saved_model_dir
)
loaded_sig = loaded.signatures
if tf_signature_def_key not in loaded_sig:
raise MlflowException(
"Could not find signature def key %s. Available keys are: %s"
% (tf_signature_def_key, list(loaded_sig.keys()))
)
return loaded_sig[tf_signature_def_key]
def _load_pyfunc(path):
"""
Load PyFunc implementation. Called by ``pyfunc.load_model``. This function loads an MLflow
model with the TensorFlow flavor into a new TensorFlow graph and exposes it behind the
``pyfunc.predict`` interface.
:param path: Local filesystem path to the MLflow Model with the ``tensorflow`` flavor.
"""
import tensorflow
model_meta_path1 = os.path.join(path, MLMODEL_FILE_NAME)
model_meta_path2 = os.path.join(os.path.dirname(path), MLMODEL_FILE_NAME)
if os.path.isfile(model_meta_path1):
model_meta = Model.load(model_meta_path1)
elif os.path.isfile(model_meta_path2):
model_meta = Model.load(model_meta_path2)
else:
raise MlflowException(f"Cannot find file {MLMODEL_FILE_NAME} for the logged model.")
model_type = _infer_model_type(model_meta)
if model_type == _MODEL_TYPE_KERAS:
if os.path.isfile(os.path.join(path, _KERAS_MODULE_SPEC_PATH)):
with open(os.path.join(path, _KERAS_MODULE_SPEC_PATH)) as f:
keras_module = importlib.import_module(f.read())
else:
import tensorflow.keras
keras_module = tensorflow.keras
# By default, we assume the save_format is h5 for backwards compatibility
save_format = "h5"
save_format_path = os.path.join(path, _KERAS_SAVE_FORMAT_PATH)
if os.path.isfile(save_format_path):
with open(save_format_path) as f:
save_format = f.read()
# In SavedModel format, if we don't compile the model
should_compile = save_format == "tf"
K = importlib.import_module(keras_module.__name__ + ".backend")
if K.backend() == "tensorflow":
K.set_learning_phase(0)
m = _load_keras_model(
path, keras_module=keras_module, save_format=save_format, compile=should_compile
)
return _KerasModelWrapper(m, model_meta.signature)
else:
raise MlflowException("Unsupported backend '%s'" % K._BACKEND)
if model_type == _MODEL_TYPE_TF1_ESTIMATOR:
flavor_conf = _get_flavor_configuration(path, FLAVOR_NAME)
tf_saved_model_dir = os.path.join(path, flavor_conf["saved_model_dir"])
tf_meta_graph_tags = flavor_conf["meta_graph_tags"]
tf_signature_def_key = flavor_conf["signature_def_key"]
loaded_model = tensorflow.saved_model.load( # pylint: disable=no-value-for-parameter
export_dir=tf_saved_model_dir, tags=tf_meta_graph_tags
)
return _TF2Wrapper(model=loaded_model, infer=loaded_model.signatures[tf_signature_def_key])
if model_type == _MODEL_TYPE_TF2_MODULE:
flavor_conf = _get_flavor_configuration(path, FLAVOR_NAME)
tf_saved_model_dir = os.path.join(path, flavor_conf["saved_model_dir"])
loaded_model = tensorflow.saved_model.load(tf_saved_model_dir)
return _TF2ModuleWrapper(model=loaded_model, signature=model_meta.signature)
raise MlflowException("Unknown model_type.")
class _TF2Wrapper:
"""
Wrapper class that exposes a TensorFlow model for inference via a ``predict`` function such that
``predict(data: pandas.DataFrame) -> pandas.DataFrame``. For TensorFlow versions >= 2.0.0.
"""
def __init__(self, model, infer):
"""
:param model: A Tensorflow SavedModel.
:param infer: Tensorflow function returned by a saved model that is used for inference.
"""
# Note: we need to retain the model reference in TF2Wrapper object, because the infer
# function in tensorflow will be `ConcreteFunction` which only retains WeakRefs to the
# variables they close over.
# See https://www.tensorflow.org/guide/function#deleting_tfvariables_between_function_calls
self.model = model
self.infer = infer
def predict(self, data):
import tensorflow
feed_dict = {}
if isinstance(data, dict):
feed_dict = {k: tensorflow.constant(v) for k, v in data.items()}
elif isinstance(data, pandas.DataFrame):
for df_col_name in list(data):
# If there are multiple columns with the same name, selecting the shared name
# from the DataFrame will result in another DataFrame containing the columns
# with the shared name. TensorFlow cannot make eager tensors out of pandas
# DataFrames, so we convert the DataFrame to a numpy array here.
val = data[df_col_name]
if isinstance(val, pandas.DataFrame):
val = val.values
else:
val = np.array(val.to_list())
feed_dict[df_col_name] = tensorflow.constant(val)
else:
raise TypeError("Only dict and DataFrame input types are supported")
raw_preds = self.infer(**feed_dict)
pred_dict = {col_name: raw_preds[col_name].numpy() for col_name in raw_preds.keys()}
for col in pred_dict.keys():
# If the output tensor is not 1-dimensional
# AND all elements have length of 1, flatten the array with `ravel()`
if len(pred_dict[col].shape) != 1 and all(
len(element) == 1 for element in pred_dict[col]
):
pred_dict[col] = pred_dict[col].ravel()
else:
pred_dict[col] = pred_dict[col].tolist()
if isinstance(data, dict):
return pred_dict
else:
return pandas.DataFrame.from_dict(data=pred_dict)
class _TF2ModuleWrapper:
def __init__(self, model, signature):
self.model = model
self.signature = signature
def predict(self, data):
import tensorflow
if isinstance(data, (np.ndarray, list)):
data = tensorflow.convert_to_tensor(data)
else:
raise MlflowException(
f"Unsupported input data type: {type(data)}, the input data must be "
"numpy array or a list."
)
result = self.model(data)
if isinstance(result, tensorflow.Tensor):
return result.numpy()
return result
class _KerasModelWrapper:
def __init__(self, keras_model, signature):
self.keras_model = keras_model
self.signature = signature
def predict(self, data):
if isinstance(data, pandas.DataFrame):
# This line is for backwards compatibility:
# If model signature is not None, when calling
# `keras_pyfunc_model.predict(pandas_dataframe)`, `_enforce_schema` will convert
# dataframe input into dict input, so in the case `_KerasModelWrapper.predict`
# will receive a dict type input.
# If model signature is None, `_enforce_schema` can do nothing, and if the input
# is dataframe, `_KerasModelWrapper.predict` will receive a dataframe input,
# we need to handle this case, to keep backwards compatibility.
return pandas.DataFrame(self.keras_model.predict(data.values), index=data.index)
supported_input_types = (np.ndarray, list, tuple, dict)
if not isinstance(data, supported_input_types):
raise MlflowException(
f"Unsupported input data type: {type(data)}. "
f"Must be one of: {[x.__name__ for x in supported_input_types]}",
INVALID_PARAMETER_VALUE,
)
return self.keras_model.predict(data)
def _assoc_list_to_map(lst):
"""
Convert an association list to a dictionary.
"""
d = {}
for run_id, metric in lst:
d[run_id] = d[run_id] + [metric] if run_id in d else [metric]
return d
def _flush_queue():
"""
Flush the metric queue and log contents in batches to MLflow.
Queue is divided into batches according to run id.
"""
try:
# Multiple queue flushes may be scheduled simultaneously on different threads
# (e.g., if the queue is at its flush threshold and several more items
# are added before a flush occurs). For correctness and efficiency, only one such
# flush operation should proceed; all others are redundant and should be dropped
acquired_lock = _metric_queue_lock.acquire(blocking=False)
if acquired_lock:
client = MlflowClient()
# For thread safety and to avoid modifying a list while iterating over it, we record a
# separate list of the items being flushed and remove each one from the metric queue,
# rather than clearing the metric queue or reassigning it (clearing / reassigning is
# dangerous because we don't block threads from adding to the queue while a flush is
# in progress)
snapshot = _metric_queue[:]
for item in snapshot:
_metric_queue.remove(item)
metrics_by_run = _assoc_list_to_map(snapshot)
for run_id, metrics in metrics_by_run.items():
client.log_batch(run_id, metrics=metrics, params=[], tags=[])
finally:
if acquired_lock:
_metric_queue_lock.release()
def _add_to_queue(key, value, step, time, run_id):
"""
Add a metric to the metric queue. Flush the queue if it exceeds
max size.
"""
met = Metric(key=key, value=value, timestamp=time, step=step)
_metric_queue.append((run_id, met))
if len(_metric_queue) > _MAX_METRIC_QUEUE_SIZE:
_thread_pool.submit(_flush_queue)
def _log_event(event):
"""
Extracts metric information from the event protobuf
"""
if event.WhichOneof("what") == "summary":
summary = event.summary
for v in summary.value:
if v.HasField("simple_value"):
# NB: Most TensorFlow APIs use one-indexing for epochs, while tf.Keras
# uses zero-indexing. Accordingly, the modular arithmetic used here is slightly
# different from the arithmetic used in `__MLflowTfKeras2Callback.on_epoch_end`,
# which provides metric logging hooks for tf.Keras
if (event.step - 1) % _LOG_EVERY_N_STEPS == 0:
_add_to_queue(
key=v.tag,
value=v.simple_value,
step=event.step,
time=get_current_time_millis(),
run_id=mlflow.active_run().info.run_id,
)
@picklable_exception_safe_function
def _get_tensorboard_callback(lst):
import tensorflow
for x in lst:
if isinstance(x, tensorflow.keras.callbacks.TensorBoard):
return x
return None
# A representation of a TensorBoard event logging directory with two attributes:
# :location - string: The filesystem location of the logging directory
# :is_temp - boolean: `True` if the logging directory was created for temporary use by MLflow,
# `False` otherwise
_TensorBoardLogDir = namedtuple("_TensorBoardLogDir", ["location", "is_temp"])
def _setup_callbacks(lst, metrics_logger):
"""
Adds TensorBoard and MlfLowTfKeras callbacks to the
input list, and returns the new list and appropriate log directory.
"""
# pylint: disable=no-name-in-module
from mlflow.tensorflow._autolog import _TensorBoard, __MLflowTfKeras2Callback
tb = _get_tensorboard_callback(lst)
if tb is None:
log_dir = _TensorBoardLogDir(location=tempfile.mkdtemp(), is_temp=True)
out_list = lst + [_TensorBoard(log_dir.location)]
else:
log_dir = _TensorBoardLogDir(location=tb.log_dir, is_temp=False)
out_list = lst
out_list += [__MLflowTfKeras2Callback(metrics_logger, _LOG_EVERY_N_STEPS)]
return out_list, log_dir
@autologging_integration(FLAVOR_NAME)
def autolog(
every_n_iter=1,
log_models=True,
log_datasets=True,
disable=False,
exclusive=False,
disable_for_unsupported_versions=False,
silent=False,
registered_model_name=None,
log_input_examples=False,
log_model_signatures=True,
saved_model_kwargs=None,
keras_model_kwargs=None,
): # pylint: disable=unused-argument
# pylint: disable=no-name-in-module
"""
Enables autologging for ``tf.keras`` and ``keras``.
Note that only ``tensorflow>=2.3`` are supported.
As an example, try running the
`Keras/TensorFlow example <https://github.com/mlflow/mlflow/blob/master/examples/keras/train.py>`_.
For each TensorFlow module, autologging captures the following information:
**tf.keras**
- **Metrics** and **Parameters**
- Training loss; validation loss; user-specified metrics
- ``fit()`` or ``fit_generator()`` parameters; optimizer name; learning rate; epsilon
- **Artifacts**
- Model summary on training start
- `MLflow Model <https://mlflow.org/docs/latest/models.html>`_ (Keras model)
- TensorBoard logs on training end
**tf.keras.callbacks.EarlyStopping**
- **Metrics** and **Parameters**
- Metrics from the ``EarlyStopping`` callbacks: ``stopped_epoch``, ``restored_epoch``,
``restore_best_weight``, etc
- ``fit()`` or ``fit_generator()`` parameters associated with ``EarlyStopping``:
``min_delta``, ``patience``, ``baseline``, ``restore_best_weights``, etc
Refer to the autologging tracking documentation for more
information on `TensorFlow workflows
<https://www.mlflow.org/docs/latest/tracking.html#tensorflow-and-keras-experimental>`_.
:param every_n_iter: The frequency with which metrics should be logged. For example, a value of
100 will log metrics at step 0, 100, 200, etc.
:param log_models: If ``True``, trained models are logged as MLflow model artifacts.
If ``False``, trained models are not logged.
:param log_datasets: If ``True``, dataset information is logged to MLflow Tracking.
If ``False``, dataset information is not logged.
:param disable: If ``True``, disables the TensorFlow autologging integration. If ``False``,
enables the TensorFlow integration autologging integration.
:param exclusive: If ``True``, autologged content is not logged to user-created fluent runs.
If ``False``, autologged content is logged to the active fluent run,
which may be user-created.
:param disable_for_unsupported_versions: If ``True``, disable autologging for versions of
tensorflow that have not been tested against this version of the MLflow
client or are incompatible.
:param silent: If ``True``, suppress all event logs and warnings from MLflow during TensorFlow
autologging. If ``False``, show all events and warnings during TensorFlow
autologging.
:param registered_model_name: If given, each time a model is trained, it is registered as a
new model version of the registered model with this name.
The registered model is created if it does not already exist.
:param log_input_examples: If ``True``, input examples from training datasets are collected and
logged along with tf/keras model artifacts during training. If
``False``, input examples are not logged.
:param log_model_signatures: If ``True``,
:py:class:`ModelSignatures <mlflow.models.ModelSignature>`
describing model inputs and outputs are collected and logged along
with tf/keras model artifacts during training. If ``False``,
signatures are not logged. Note that logging TensorFlow models
with signatures changes their pyfunc inference behavior when
Pandas DataFrames are passed to ``predict()``.
When a signature is present, an ``np.ndarray``
(for single-output models) or a mapping from
``str`` -> ``np.ndarray`` (for multi-output models) is returned;
when a signature is not present, a Pandas DataFrame is returned.
:param saved_model_kwargs: a dict of kwargs to pass to ``tensorflow.saved_model.save`` method.
:param keras_model_kwargs: a dict of kwargs to pass to ``keras_model.save`` method.
"""
import tensorflow
global _LOG_EVERY_N_STEPS
_LOG_EVERY_N_STEPS = every_n_iter
atexit.register(_flush_queue)
if Version(tensorflow.__version__) < Version("2.3"):
warnings.warn("Could not log to MLflow. TensorFlow versions below 2.3 are not supported.")
return
@picklable_exception_safe_function
def _get_early_stop_callback(callbacks):
for callback in callbacks:
if isinstance(callback, tensorflow.keras.callbacks.EarlyStopping):
return callback
return None
def _log_early_stop_callback_params(callback):
if callback:
try:
earlystopping_params = {
"monitor": callback.monitor,
"min_delta": callback.min_delta,
"patience": callback.patience,
"baseline": callback.baseline,
"restore_best_weights": callback.restore_best_weights,
}
mlflow.log_params(earlystopping_params)
except Exception:
return
def _get_early_stop_callback_attrs(callback):
try:
return callback.stopped_epoch, callback.restore_best_weights, callback.patience
except Exception:
return None
def _log_early_stop_callback_metrics(callback, history, metrics_logger):
if callback is None or not callback.model.stop_training:
return
callback_attrs = _get_early_stop_callback_attrs(callback)
if callback_attrs is None:
return
stopped_epoch, restore_best_weights, _ = callback_attrs
metrics_logger.record_metrics({"stopped_epoch": stopped_epoch})
if not restore_best_weights or callback.best_weights is None:
return
monitored_metric = history.history.get(callback.monitor)
if not monitored_metric:
return
initial_epoch = history.epoch[0]
# If `monitored_metric` contains multiple best values (e.g. [0.1, 0.1, 0.2] where 0.1 is
# the minimum loss), the epoch corresponding to the first occurrence of the best value is
# the best epoch. In keras > 2.6.0, the best epoch can be obtained via the `best_epoch`
# attribute of an `EarlyStopping` instance: https://github.com/keras-team/keras/pull/15197
restored_epoch = initial_epoch + monitored_metric.index(callback.best)
metrics_logger.record_metrics({"restored_epoch": restored_epoch})
restored_index = history.epoch.index(restored_epoch)
restored_metrics = {
key: metrics[restored_index] for key, metrics in history.history.items()
}
# Checking that a metric history exists
metric_key = next(iter(history.history), None)
if metric_key is not None:
metrics_logger.record_metrics(restored_metrics, stopped_epoch + 1)
def _log_keras_model(history, args):
def _infer_model_signature(input_data_slice):
# In certain TensorFlow versions, calling `predict()` on model may modify
# the `stop_training` attribute, so we save and restore it accordingly
original_stop_training = history.model.stop_training
model_output = history.model.predict(input_data_slice)
history.model.stop_training = original_stop_training
return infer_signature(input_data_slice, model_output)
from mlflow.tensorflow._autolog import extract_tf_keras_input_example
def _get_tf_keras_input_example_slice():
input_training_data = args[0]
keras_input_example_slice = extract_tf_keras_input_example(input_training_data)
if keras_input_example_slice is None:
raise MlflowException(
"Cannot log input example or model signature for input with type"
f" {type(input_training_data)}. TensorFlow Keras autologging can"
" only log input examples and model signatures for the following"
" input types: numpy.ndarray, dict[string -> numpy.ndarray],"
" tensorflow.keras.utils.Sequence, and"
" tensorflow.data.Dataset (TensorFlow >= 2.1.0 required)",
INVALID_PARAMETER_VALUE,
)
return keras_input_example_slice
input_example, signature = resolve_input_example_and_signature(
_get_tf_keras_input_example_slice,
_infer_model_signature,
log_input_examples,
log_model_signatures,
_logger,
)
log_model(
model=history.model,
artifact_path="model",
input_example=input_example,
signature=signature,
registered_model_name=get_autologging_config(
FLAVOR_NAME, "registered_model_name", None
),
saved_model_kwargs=saved_model_kwargs,
keras_model_kwargs=keras_model_kwargs,
)
class FitPatch(PatchFunction):
def __init__(self):
self.log_dir = None
def _patch_implementation(
self, original, inst, *args, **kwargs
): # pylint: disable=arguments-differ
unlogged_params = ["self", "x", "y", "callbacks", "validation_data", "verbose"]
batch_size = None
try:
is_single_input_model = isinstance(inst.input_shape, tuple)
training_data = kwargs["x"] if "x" in kwargs else args[0]
if isinstance(training_data, tensorflow.data.Dataset) and hasattr(
training_data, "_batch_size"
):
batch_size = training_data._batch_size.numpy()
elif isinstance(training_data, tensorflow.keras.utils.Sequence):
first_batch_inputs, _ = training_data[0]
if is_single_input_model:
batch_size = len(first_batch_inputs)
else:
batch_size = len(first_batch_inputs[0])
elif is_iterator(training_data):
peek = next(training_data)
if is_single_input_model:
batch_size = len(peek[0])
else:
batch_size = len(peek[0][0])
def __restore_generator(prev_generator):
yield peek
yield from prev_generator
restored_generator = __restore_generator(training_data)
if "x" in kwargs:
kwargs["x"] = restored_generator
else:
args = (restored_generator,) + args[1:]
except Exception as e:
_logger.warning(
"Encountered unexpected error while inferring batch size from training"
" dataset: %s",
e,
)
if batch_size is not None:
mlflow.log_param("batch_size", batch_size)
unlogged_params.append("batch_size")
log_fn_args_as_params(original, args, kwargs, unlogged_params)
run_id = mlflow.active_run().info.run_id
with batch_metrics_logger(run_id) as metrics_logger:
# Check if the 'callback' argument of fit() is set positionally
if len(args) >= 6:
# Convert the positional training function arguments to a list in order to
# mutate the contents
args = list(args)
# Make a shallow copy of the preexisting callbacks to avoid permanently
# modifying their contents for future training invocations. Introduce
# TensorBoard & tf.keras callbacks if necessary
callbacks = list(args[5])
callbacks, self.log_dir = _setup_callbacks(callbacks, metrics_logger)
# Replace the callbacks positional entry in the copied arguments and convert
# the arguments back to tuple form for usage in the training function
args[5] = callbacks
args = tuple(args)
else:
# Make a shallow copy of the preexisting callbacks and introduce TensorBoard
# & tf.keras callbacks if necessary
callbacks = list(kwargs.get("callbacks") or [])
kwargs["callbacks"], self.log_dir = _setup_callbacks(callbacks, metrics_logger)
early_stop_callback = _get_early_stop_callback(callbacks)
_log_early_stop_callback_params(early_stop_callback)
if log_datasets:
try:
context_tags = context_registry.resolve_tags()
source = CodeDatasetSource(tags=context_tags)
x = kwargs["x"] if "x" in kwargs else args[0]
if "y" in kwargs:
y = kwargs["y"]
elif len(args) >= 2:
y = args[1]
else:
y = None
if "validation_data" in kwargs:
validation_data = kwargs["validation_data"]
elif len(args) >= 8:
validation_data = args[7]
else:
validation_data = None
_log_tensorflow_dataset(x, source, "train", targets=y)
if validation_data is not None:
_log_tensorflow_dataset(validation_data, source, "eval")
except Exception as e:
_logger.warning(
"Failed to log training dataset information to "
"MLflow Tracking. Reason: %s",
e,
)
history = original(inst, *args, **kwargs)
if log_models:
_log_keras_model(history, args)
_log_early_stop_callback_metrics(
callback=early_stop_callback,
history=history,
metrics_logger=metrics_logger,
)
_flush_queue()
mlflow.log_artifacts(
local_dir=self.log_dir.location,
artifact_path="tensorboard_logs",
)
if self.log_dir.is_temp:
shutil.rmtree(self.log_dir.location)
return history
def _on_exception(self, exception):
if (
self.log_dir is not None
and self.log_dir.is_temp
and os.path.exists(self.log_dir.location)
):
shutil.rmtree(self.log_dir.location)
managed = [
(tensorflow.keras.Model, "fit", FitPatch),
]
for p in managed:
safe_patch(FLAVOR_NAME, *p, manage_run=True)
def _log_tensorflow_dataset(tensorflow_dataset, source, context, name=None, targets=None):
import tensorflow
# create a dataset
if isinstance(tensorflow_dataset, np.ndarray):
dataset = from_numpy(features=tensorflow_dataset, targets=targets, source=source, name=name)
elif isinstance(tensorflow_dataset, tensorflow.Tensor):
dataset = from_tensorflow(
features=tensorflow_dataset, targets=targets, source=source, name=name
)
elif isinstance(tensorflow_dataset, tensorflow.data.Dataset):
dataset = from_tensorflow(features=tensorflow_dataset, source=source, name=name)
elif isinstance(tensorflow_dataset, tuple):
x = tensorflow_dataset[0]
y = tensorflow_dataset[1]
# check if x and y are tensors
if isinstance(x, tensorflow.Tensor) and isinstance(y, tensorflow.Tensor):
dataset = from_tensorflow(features=x, source=source, targets=y, name=name)
else:
dataset = from_numpy(features=x, targets=y, source=source, name=name)
else:
_logger.warning(
"Unrecognized dataset type %s. Dataset logging skipped.", type(tensorflow_dataset)
)
return
mlflow.log_input(dataset, context)
|
PypiClean
|
/desc-opt-0.10.0.tar.gz/desc-opt-0.10.0/desc/backend.py
|
import os
import warnings
import numpy as np
from termcolor import colored
import desc
from desc import config as desc_config
from desc import set_device
if os.environ.get("DESC_BACKEND") == "numpy":
jnp = np
use_jax = False
set_device(kind="cpu")
print(
"DESC version {}, using numpy backend, version={}, dtype={}".format(
desc.__version__, np.__version__, np.linspace(0, 1).dtype
)
)
else:
if desc_config.get("device") is None:
set_device("cpu")
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
import jax
import jax.numpy as jnp
import jaxlib
from jax.config import config as jax_config
jax_config.update("jax_enable_x64", True)
if desc_config.get("kind") == "gpu" and len(jax.devices("gpu")) == 0:
warnings.warn(
"JAX failed to detect GPU, are you sure you "
+ "installed JAX with GPU support?"
)
set_device("cpu")
x = jnp.linspace(0, 5)
y = jnp.exp(x)
use_jax = True
print(
f"DESC version {desc.__version__},"
+ f"using JAX backend, jax version={jax.__version__}, "
+ f"jaxlib version={jaxlib.__version__}, dtype={y.dtype}"
)
del x, y
except ModuleNotFoundError:
jnp = np
x = jnp.linspace(0, 5)
y = jnp.exp(x)
use_jax = False
set_device(kind="cpu")
warnings.warn(colored("Failed to load JAX", "red"))
print(
"DESC version {}, using NumPy backend, version={}, dtype={}".format(
desc.__version__, np.__version__, y.dtype
)
)
print(
"Using device: {}, with {:.2f} GB available memory".format(
desc_config.get("device"), desc_config.get("avail_mem")
)
)
if use_jax: # noqa: C901 - FIXME: simplify this, define globally and then assign?
jit = jax.jit
fori_loop = jax.lax.fori_loop
cond = jax.lax.cond
switch = jax.lax.switch
while_loop = jax.lax.while_loop
vmap = jax.vmap
bincount = jnp.bincount
from jax.experimental.ode import odeint
from jax.scipy.linalg import block_diag, cho_factor, cho_solve, qr, solve_triangular
from jax.scipy.special import gammaln, logsumexp
from jax.tree_util import register_pytree_node
def put(arr, inds, vals):
"""Functional interface for array "fancy indexing".
Provides a way to do arr[inds] = vals in a way that works with JAX.
Parameters
----------
arr : array-like
Array to populate
inds : array-like of int
Indices to populate
vals : array-like
Values to insert
Returns
-------
arr : array-like
Input array with vals inserted at inds.
"""
return jnp.asarray(arr).at[inds].set(vals)
def sign(x):
"""Sign function, but returns 1 for x==0.
Parameters
----------
x : array-like
array of input values
Returns
-------
y : array-like
1 where x>=0, -1 where x<0
"""
x = jnp.atleast_1d(x)
y = jnp.where(x == 0, 1, jnp.sign(x))
return y
else:
jit = lambda func, *args, **kwargs: func
from scipy.integrate import odeint # noqa: F401
from scipy.linalg import ( # noqa: F401
block_diag,
cho_factor,
cho_solve,
qr,
solve_triangular,
)
from scipy.special import gammaln, logsumexp # noqa: F401
def register_pytree_node(foo, *args):
"""Dummy decorator for non-jax pytrees."""
return foo
def put(arr, inds, vals):
"""Functional interface for array "fancy indexing".
Provides a way to do arr[inds] = vals in a way that works with JAX.
Parameters
----------
arr : array-like
Array to populate
inds : array-like of int
Indices to populate
vals : array-like
Values to insert
Returns
-------
arr : array-like
Input array with vals inserted at inds.
"""
arr[inds] = vals
return arr
def sign(x):
"""Sign function, but returns 1 for x==0.
Parameters
----------
x : array-like
array of input values
Returns
-------
y : array-like
1 where x>=0, -1 where x<0
"""
x = np.atleast_1d(x)
y = np.where(x == 0, 1, np.sign(x))
return y
def fori_loop(lower, upper, body_fun, init_val):
"""Loop from lower to upper, applying body_fun to init_val.
This version is for the numpy backend, for jax backend see jax.lax.fori_loop
Parameters
----------
lower : int
an integer representing the loop index lower bound (inclusive)
upper : int
an integer representing the loop index upper bound (exclusive)
body_fun : callable
function of type ``(int, a) -> a``.
init_val : array-like or container
initial loop carry value of type ``a``
Returns
-------
final_val: array-like or container
Loop value from the final iteration, of type ``a``.
"""
val = init_val
for i in np.arange(lower, upper):
val = body_fun(i, val)
return val
def cond(pred, true_fun, false_fun, *operand):
"""Conditionally apply true_fun or false_fun.
This version is for the numpy backend, for jax backend see jax.lax.cond
Parameters
----------
pred: bool
which branch function to apply.
true_fun: callable
Function (A -> B), to be applied if pred is True.
false_fun: callable
Function (A -> B), to be applied if pred is False.
operand: any
input to either branch depending on pred. The type can be a scalar, array,
or any pytree (nested Python tuple/list/dict) thereof.
Returns
-------
value: any
value of either true_fun(operand) or false_fun(operand), depending on the
value of pred. The type can be a scalar, array, or any pytree (nested
Python tuple/list/dict) thereof.
"""
if pred:
return true_fun(*operand)
else:
return false_fun(*operand)
def switch(index, branches, operand):
"""Apply exactly one of branches given by index.
If index is out of bounds, it is clamped to within bounds.
Parameters
----------
index: int
which branch function to apply.
branches: Sequence[Callable]
sequence of functions (A -> B) to be applied based on index.
operand: any
input to whichever branch is applied.
Returns
-------
value: any
output of branches[index](operand)
"""
index = np.clip(index, 0, len(branches) - 1)
return branches[index](operand)
def while_loop(cond_fun, body_fun, init_val):
"""Call body_fun repeatedly in a loop while cond_fun is True.
Parameters
----------
cond_fun: callable
function of type a -> bool.
body_fun: callable
function of type a -> a.
init_val: any
value of type a, a type that can be a scalar, array, or any pytree (nested
Python tuple/list/dict) thereof, representing the initial loop carry value.
Returns
-------
value: any
The output from the final iteration of body_fun, of type a.
"""
val = init_val
while cond_fun(val):
val = body_fun(val)
return val
def vmap(fun, out_axes=0):
"""A numpy implementation of jax.lax.map whose API is a subset of jax.vmap.
Like Python's builtin map,
except inputs and outputs are in the form of stacked arrays,
and the returned object is a vectorized version of the input function.
Parameters
----------
fun: callable
Function (A -> B)
out_axes: int
An integer indicating where the mapped axis should appear in the output.
Returns
-------
fun_vmap: callable
Vectorized version of fun.
"""
def fun_vmap(fun_inputs):
return np.stack([fun(fun_input) for fun_input in fun_inputs], axis=out_axes)
return fun_vmap
def bincount(x, weights=None, minlength=None, length=None):
"""Same as np.bincount but with a dummy parameter to match jnp.bincount API."""
return np.bincount(x, weights, minlength)
|
PypiClean
|
/CaMo-0.0.5-py3-none-any.whl/camo/estimate/methods.py
|
from inspect import getmembers, ismethod
import numpy as np
import pandas as pd
import statsmodels.formula.api as smf
from ..utils import _as_set, _try_get
estimators = dict(getmembers(smf, ismethod))
def g_formula(
data: pd.DataFrame,
X: str,
Y: str,
Z: str = None,
estimator: str = "ols"
) -> float:
# Try get value from estimators
estimator = _try_get(estimator, estimators)
# Build the formula
Z = _as_set(Z)
formula = f"{Y} ~ {X}"
if Z:
formula += " + " + " + ".join(Z)
# Fit the estimator
estimator = estimator(formula, data)
estimator = estimator.fit()
# Helper function
def _fill_copy(data, x):
data = data[[X, *Z]].copy()
data[X] = x
return data
# Estimate E[Y|do(X=0),Z] and E[Y|do(X=1),Z]
estimates = (
_fill_copy(data, x)
for x in (0, 1)
)
estimates = [
estimator.predict(x)
for x in estimates
]
return estimates
def propensity_score(
data: pd.DataFrame,
X: str,
Y: str,
Z: str = None,
estimator: str = "logit"
) -> float:
# Try get value from estimators
estimator = _try_get(estimator, estimators)
Z = _as_set(Z)
if Z:
# Build the formula
formula = f"{X} ~ " + " + ".join(Z)
# Fit the estimator
estimator = estimator(formula, data)
estimator = estimator.fit()
# Compute the propensity given Z
propensity = estimator.predict(data)
else:
# Compute the propensity without Z
propensity = np.mean(data[X])
propensity = np.fill((len(data), ), propensity)
return propensity
def ipw(
data: pd.DataFrame,
X: str,
Y: str,
Z: str = None,
estimator: str = "logit"
) -> float:
# Compute the propensity score
propensity = propensity_score(data, X, Y, Z, estimator)
# Compute the complement propensity
complement = data.index[data[X] == 0]
propensity[complement] = 1 - propensity[complement]
# Estimate E[Y|do(X=0),Z] and E[Y|do(X=1),Z]
estimates = [
# Reweight data to get pseudo-population
(data[X] == x) / propensity * data[Y]
for x in (0, 1)
]
return estimates
|
PypiClean
|
/nexuscloud-client-1.0.9.tar.gz/nexuscloud-client-1.0.9/nexuscloud_client/model/nexus_insights_api_v1_vcenter_summary_get200_response.py
|
import re # noqa: F401
import sys # noqa: F401
from nexuscloud_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from nexuscloud_client.exceptions import ApiAttributeError
def lazy_import():
from nexuscloud_client.model.nexus_insights_api_v1_vcenter_summary_get200_response_entries_inner import NexusInsightsApiV1VcenterSummaryGet200ResponseEntriesInner
globals()['NexusInsightsApiV1VcenterSummaryGet200ResponseEntriesInner'] = NexusInsightsApiV1VcenterSummaryGet200ResponseEntriesInner
class NexusInsightsApiV1VcenterSummaryGet200Response(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'entries': ([NexusInsightsApiV1VcenterSummaryGet200ResponseEntriesInner],), # noqa: E501
'total_object_count': (int,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'entries': 'entries', # noqa: E501
'total_object_count': 'totalObjectCount', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""NexusInsightsApiV1VcenterSummaryGet200Response - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
entries ([NexusInsightsApiV1VcenterSummaryGet200ResponseEntriesInner]): [optional] # noqa: E501
total_object_count (int): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', True)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""NexusInsightsApiV1VcenterSummaryGet200Response - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
entries ([NexusInsightsApiV1VcenterSummaryGet200ResponseEntriesInner]): [optional] # noqa: E501
total_object_count (int): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
|
PypiClean
|
/azure_eventhub-5.11.4-py3-none-any.whl/azure/eventhub/_pyamqp/aio/_management_operation_async.py
|
import logging
import uuid
import time
from functools import partial
from ._management_link_async import ManagementLink
from ..error import (
AMQPLinkError,
ErrorCondition
)
from ..constants import (
ManagementOpenResult,
ManagementExecuteOperationResult
)
_LOGGER = logging.getLogger(__name__)
class ManagementOperation(object):
def __init__(self, session, endpoint='$management', **kwargs):
self._mgmt_link_open_status = None
self._session = session
self._connection = self._session._connection
self._network_trace_params = {
"amqpConnection": self._session._connection._container_id,
"amqpSession": self._session.name,
"amqpLink": None
}
self._mgmt_link = self._session.create_request_response_link_pair(
endpoint=endpoint,
on_amqp_management_open_complete=self._on_amqp_management_open_complete,
on_amqp_management_error=self._on_amqp_management_error,
**kwargs
) # type: ManagementLink
self._responses = {}
self._mgmt_error = None
async def _on_amqp_management_open_complete(self, result):
"""Callback run when the send/receive links are open and ready
to process messages.
:param result: Whether the link opening was successful.
:type result: int
"""
self._mgmt_link_open_status = result
async def _on_amqp_management_error(self):
"""Callback run if an error occurs in the send/receive links."""
# TODO: This probably shouldn't be ValueError
self._mgmt_error = ValueError("Management Operation error occurred.")
async def _on_execute_operation_complete(
self,
operation_id,
operation_result,
status_code,
status_description,
raw_message,
error=None
):
_LOGGER.debug(
"Management operation completed, id: %r; result: %r; code: %r; description: %r, error: %r",
operation_id,
operation_result,
status_code,
status_description,
error,
extra=self._network_trace_params
)
if operation_result in\
(ManagementExecuteOperationResult.ERROR, ManagementExecuteOperationResult.LINK_CLOSED):
self._mgmt_error = error
_LOGGER.error(
"Failed to complete management operation due to error: %r.",
error,
extra=self._network_trace_params
)
else:
self._responses[operation_id] = (status_code, status_description, raw_message)
async def execute(self, message, operation=None, operation_type=None, timeout=0):
start_time = time.time()
operation_id = str(uuid.uuid4())
self._responses[operation_id] = None
self._mgmt_error = None
await self._mgmt_link.execute_operation(
message,
partial(self._on_execute_operation_complete, operation_id),
timeout=timeout,
operation=operation,
type=operation_type
)
while not self._responses[operation_id] and not self._mgmt_error:
if timeout and timeout > 0:
now = time.time()
if (now - start_time) >= timeout:
raise TimeoutError("Failed to receive mgmt response in {}ms".format(timeout))
await self._connection.listen()
if self._mgmt_error:
self._responses.pop(operation_id)
raise self._mgmt_error # pylint: disable=raising-bad-type
response = self._responses.pop(operation_id)
return response
async def open(self):
self._mgmt_link_open_status = ManagementOpenResult.OPENING
await self._mgmt_link.open()
async def ready(self):
try:
raise self._mgmt_error # pylint: disable=raising-bad-type
except TypeError:
pass
if self._mgmt_link_open_status == ManagementOpenResult.OPENING:
return False
if self._mgmt_link_open_status == ManagementOpenResult.OK:
return True
# ManagementOpenResult.ERROR or CANCELLED
# TODO: update below with correct status code + info
raise AMQPLinkError(
condition=ErrorCondition.ClientError,
description="Failed to open mgmt link, management link status: {}".format(self._mgmt_link_open_status),
info=None
)
async def close(self):
await self._mgmt_link.close()
|
PypiClean
|
/Pootle-2.9.0b3.tar.bz2/Pootle-2.9.0b3/pootle/static/translations/pt/djangojs.js
|
(function(globals) {
var django = globals.django || (globals.django = {});
django.pluralidx = function(n) {
var v=(n != 1);
if (typeof(v) == 'boolean') {
return v ? 1 : 0;
} else {
return v;
}
};
/* gettext library */
django.catalog = django.catalog || {};
var newcatalog = {
"#%(position)s": "#%(position)s",
"%(count)s language matches your query.": [
"%(count)s idioma coincidente com a sua consulta.",
"%(count)s idiomas coincidentes com a sua consulta."
],
"%(count)s project matches your query.": [
"%(count)s projeto corresponde com a sua consulta.",
"%(count)s projetos correspondem com a sua consulta."
],
"%(count)s user matches your query.": [
"%(count)s utilizador coincidente com a sua consulta.",
"%(count)s utilizadores coincidentes com a sua consulta."
],
"%(timeSince)s via file upload": "%(timeSince)s atrav\u00e9s de envio de ficheiro",
"%s word": [
"%s palavra",
"%s palavras"
],
"%s's accepted suggestions": "Sugest\u00f5es aceites para %s",
"%s's overwritten submissions": "Submiss\u00f5es substitu\u00eddas para %s",
"%s's pending suggestions": "Sugest\u00f5es pendentes para %s",
"%s's rejected suggestions": "Sugest\u00f5es rejeitadas para %s",
"%s's submissions": "Submiss\u00f5es de %s",
"Accept": "Aceitar",
"Account Activation": "Ativa\u00e7\u00e3o de conta",
"Account Inactive": "Conta inativa",
"Active": "Ativo",
"Add Language": "Adicionar idioma",
"Add Project": "Adicionar projeto",
"Add User": "Adicionar utilizador",
"Administrator": "Administrador",
"After changing your password you will sign in automatically.": "Depois de alterar a sua palavra-passe, a sua sess\u00e3o ser\u00e1 iniciada automaticamente.",
"All Languages": "Todos os idiomas",
"All Projects": "Todos os projetos",
"An error occurred while attempting to sign in via %s.": "Ocorreu um erro ao tentar iniciar sess\u00e3o via %s.",
"An error occurred while attempting to sign in via your social account.": "Ocorreu um erro ao tentar iniciar a sess\u00e3o atrav\u00e9s da sua conta social.",
"Avatar": "Avatar",
"Cancel": "Cancelar",
"Clear all": "Limpar tudo",
"Clear value": "Limpar valor",
"Close": "Fechar",
"Code": "C\u00f3digo",
"Collapse details": "Ocultar detalhes",
"Congratulations! You have completed this task!": "Parab\u00e9ns! Concluiu esta tarefa!",
"Contact Us": "Contacte-nos",
"Contributors, 30 Days": "Colaboradores, 30 dias",
"Creating new user accounts is prohibited.": "\u00c9 proibido criar novas contas de utilizador.",
"Delete": "Apagar",
"Deleted successfully.": "Apagada com sucesso.",
"Didn't receive an email? Check if it was accidentally filtered out as spam, or try requesting another copy of the email.": "N\u00e3o recebeu o e-mail? Verifique se esta est\u00e1 na pasta de spam ou tente solicitar uma outra c\u00f3pia do e-mail.",
"Disabled": "Desativado",
"Discard changes.": "Rejeitar altera\u00e7\u00f5es.",
"Edit Language": "Editar idioma",
"Edit My Public Profile": "Editar o meu perfil p\u00fablico",
"Edit Project": "Editar projeto",
"Edit User": "Editar utilizador",
"Edit the suggestion before accepting, if necessary": "Se necess\u00e1rio, editar sugest\u00e3o antes de aceitar",
"Email": "E-mail",
"Email Address": "Endere\u00e7o de e-mail",
"Email Confirmation": "Mensagem de confirma\u00e7\u00e3o",
"Enter your email address, and we will send you a message with the special link to reset your password.": "Insira o seu endere\u00e7o de e-mail, e n\u00f3s iremos enviar-lhe uma mensagem com uma liga\u00e7\u00e3o especial para redefinir a sua palavra-passe.",
"Error while connecting to the server": "Ocorreu um erro ao ligar ao servidor.",
"Expand details": "Expandir detalhes",
"File types": "Tipos de ficheiro",
"Filesystems": "Sistema de ficheiros",
"Find language by name, code": "Encontrar idiomas por nome, c\u00f3digo",
"Find project by name, code": "Encontrar projeto pelo nome, c\u00f3digo",
"Find user by name, email, properties": "Encontrar utilizador por nome, e-mail, propriedades",
"Full Name": "Nome completo",
"Go back to browsing": "Voltar para a navega\u00e7\u00e3o",
"Go to the next string (Ctrl+.)<br/><br/>Also:<br/>Next page: Ctrl+Shift+.<br/>Last page: Ctrl+Shift+End": "Ir para a linha seguinte (Ctrl+.)<br/><br/>Tamb\u00e9m:<br/>P\u00e1gina seguinte: Ctrl+Shift+.<br/>\u00daltima p\u00e1gina: Ctrl+Shift+End",
"Go to the previous string (Ctrl+,)<br/><br/>Also:<br/>Previous page: Ctrl+Shift+,<br/>First page: Ctrl+Shift+Home": "Ir para a linha anterior (Ctrl+,)<br/><br/>Tamb\u00e9m:<br/>P\u00e1gina anterior: Ctrl+Shift+,<br/>Primeira p\u00e1gina: Ctrl+Shift+Home",
"Hide": "Ocultar",
"Hide disabled": "Ocultar desativados",
"I forgot my password": "Eu esqueci-me da minha palavra-passe",
"Ignore Files": "Ignorar ficheiros",
"Languages": "Idiomas",
"Less": "Menos",
"LinkedIn": "LinkedIn",
"LinkedIn profile URL": "URL do perfil LinkedIn",
"Load More": "Carregar mais",
"Loading...": "A carregar...",
"Login / Password": "Sess\u00e3o/Palavra-passe",
"More": "Mais",
"More...": "Mais...",
"My Public Profile": "O meu perfil p\u00fablico",
"No": "N\u00e3o",
"No activity recorded in a given period": "N\u00e3o existe atividade no per\u00edodo indicado.",
"No results found": "N\u00e3o foram encontrados resultados",
"No results.": "Nenhum resultado.",
"No, thanks": "N\u00e3o, obrigado",
"Not found": "N\u00e3o encontrado",
"Note: when deleting a user their contributions to the site, e.g. comments, suggestions and translations, are attributed to the anonymous user (nobody).": "Nota: se eliminar um utilizador, todos os contributos desse utilizador, por exemplo coment\u00e1rios, sugest\u00f5es e tradu\u00e7\u00f5es ser\u00e3o atribu\u00eddos ao utilizador an\u00f3nimo (ningu\u00e9m).",
"Number of Plurals": "N\u00famero de plurais",
"Oops...": "Ups...",
"Overview": "Sinopse",
"Password": "Palavra-passe",
"Password changed, signing in...": "Palavra-passe alterada, a iniciar sess\u00e3o...",
"Permissions": "Permiss\u00f5es",
"Personal description": "Descri\u00e7\u00e3o pessoal",
"Personal website URL": "URL do site pessoal",
"Please follow that link to continue the account creation.": "Por favor, siga esta liga\u00e7\u00e3o para continuar com a cria\u00e7\u00e3o da conta.",
"Please follow that link to continue the password reset procedure.": "Por favor, siga essa liga\u00e7\u00e3o para continuar com a reposi\u00e7\u00e3o da palavra-passe.",
"Please select a valid user.": "Por favor, selecione um utilizador v\u00e1lido.",
"Plural Equation": "Equa\u00e7\u00e3o plural",
"Plural form %(index)s": "Forma plural %(index)s",
"Preview will be displayed here.": "A pr\u00e9-visualiza\u00e7\u00e3o ser\u00e1 mostrada aqui.",
"Project / Language": "Projeto/Idioma",
"Project Tree Style": "Estilo de \u00e1rvore do projeto",
"Provide optional comment (will be publicly visible)": "Disponibilizar coment\u00e1rio opcional (vis\u00edvel ao p\u00fablico)",
"Public Profile": "Perfil p\u00fablico",
"Quality Checks": "Verifica\u00e7\u00f5es de qualidade",
"Reject": "Rejeitar",
"Reload page": "Recarregar p\u00e1gina",
"Repeat Password": "Repetir palavra-passe",
"Resend Email": "Reenviar mensagem",
"Reset Password": "Repor palavra-passe",
"Reset Your Password": "Repor a sua palavra-passe",
"Reviewed": "Revista",
"Save": "Guardar",
"Saved successfully.": "Guardada com sucesso.",
"Score Change": "Altera\u00e7\u00e3o de pontua\u00e7\u00e3o",
"Screenshot Search Prefix": "Prefixo para a captura de ecr\u00e3 da procura",
"Search Languages": "Procurar idiomas",
"Search Projects": "Procurar projetos",
"Search Users": "Procurar utilizadores",
"Select...": "Selecionar...",
"Send Email": "Enviar e-mail",
"Sending email to %s...": "A enviar e-mail para %s...",
"Server error": "Erro de servidor",
"Set New Password": "Definir nova palavra-passe",
"Set a new password": "Defina uma nova palavra-passe",
"Settings": "Defini\u00e7\u00f5es",
"Short Bio": "Biografia",
"Show": "Mostrar",
"Show disabled": "Mostrar desativados",
"Sign In": "Iniciar sess\u00e3o",
"Sign In With %s": "Iniciar sess\u00e3o com %s",
"Sign In With...": "Iniciar sess\u00e3o com...",
"Sign Up": "Registar",
"Sign in as an existing user": "Iniciar sess\u00e3o como utilizador existente",
"Sign up as a new user": "Registar como um novo utilizador",
"Signed in. Redirecting...": "Sess\u00e3o iniciada. A redirecionar...",
"Signing in with an external service for the first time will automatically create an account for you.": "Ao iniciar a sess\u00e3o com um servi\u00e7o externo pela primeira vez, ir\u00e1 criar automaticamente uma nova conta.",
"Similar translations": "Tradu\u00e7\u00f5es similares",
"Social Services": "Servi\u00e7os sociais",
"Social Verification": "Verifica\u00e7\u00e3o social",
"Source Language": "Idioma original",
"Special Characters": "Carateres especiais",
"String Errors Contact": "Contacto para erros nas cadeias",
"Suggested": "Sugerida",
"Team": "Equipa",
"The password reset link was invalid, possibly because it has already been used. Please request a new password reset.": "A liga\u00e7\u00e3o de reposi\u00e7\u00e3o de palavra-passe era inv\u00e1lida, possivelmente porque j\u00e1 foi utilizada. Por favor, solicite uma nova reposi\u00e7\u00e3o de palavra-passe.",
"The server seems down. Try again later.": "Parece que o servidor est\u00e1 desligado. Tente mais tarde.",
"There are unsaved changes. Do you want to discard them?": "Existem altera\u00e7\u00f5es n\u00e3o guardadas. Pretende rejeit\u00e1-las?",
"There is %(count)s language.": [
"Existe %(count)s idioma.",
"Existem %(count)s idiomas. Em baixo est\u00e3o as adi\u00e7\u00f5es mais recentes."
],
"There is %(count)s project.": [
"Existe %(count)s projeto.",
"Existem %(count)s projetos. Em baixo est\u00e3o as adi\u00e7\u00f5es mais recentes."
],
"There is %(count)s user.": [
"Existe %(count)s utilizador.",
"Existem %(count)s utilizadores. Em baixo est\u00e3o os adicionados mais recentemente."
],
"This email confirmation link expired or is invalid.": "Esta liga\u00e7\u00e3o de confirma\u00e7\u00e3o de e-mail expirou ou \u00e9 inv\u00e1lida.",
"This string no longer exists.": "Esta linha j\u00e1 n\u00e3o existe.",
"To set or change your avatar for your email address (%(email)s), please go to gravatar.com.": "Para definir ou alterar o seu avatar do seu endere\u00e7o de e-mail (%(email)s), por favor, v\u00e1 para gravatar.com.",
"Translated": "Traduzida",
"Translated by %(fullname)s in \u201c<span title=\"%(path)s\">%(project)s</span>\u201d project": "Traduzido por %(fullname)s no projeto \u201c<span title=\"%(path)s\">%(project)s</span>\u201d",
"Translated by %(fullname)s in \u201c<span title=\"%(path)s\">%(project)s</span>\u201d project %(time_ago)s": "Traduzido por %(fullname)s no projeto \u201c<span title=\"%(path)s\">%(project)s</span>\u201d a %(time_ago)s",
"Try again": "Tentar novamente",
"Twitter": "Twitter",
"Twitter username": "Nome de utilizador do Twitter",
"Type to search": "Digite para procurar",
"Updating data": "A atualizar dados",
"Use the search form to find the language, then click on a language to edit.": "Utilize o campo de procura para encontrar o idioma e clique no idioma para o editar.",
"Use the search form to find the project, then click on a project to edit.": "Utilize o campo de procura para encontrar o projeto e clique no projeto para o editar.",
"Use the search form to find the user, then click on a user to edit.": "Utilize o campo de procura para encontrar o utilizador e clique no utilizador para o editar.",
"Username": "Nome de utilizador",
"We found a user with <span>%(email)s</span> email in our system. Please provide the password to finish the sign in procedure. This is a one-off procedure, which will establish a link between your Pootle and %(provider)s accounts.": "Encontr\u00e1mos, no nosso sistema, um utilizador com o e-mail <span>%(email)s</span>. Por favor, introduza a palavra-passe para terminar o in\u00edcio da sess\u00e3o. Este \u00e9 um procedimento \u00fanico, que ir\u00e1 estabelecer uma associa\u00e7\u00e3o entre o seu Pootle e as contas %(provider)s.",
"We have sent an email containing the special link to <span>%s</span>": "Envi\u00e1mos um e-mail com uma liga\u00e7\u00e3o especial para <span>%s</span>",
"We have sent an email containing the special link to <span>%s</span>. Please check your spam folder if you do not see the email.": "Envi\u00e1mos uma mensagem com uma hiperliga\u00e7\u00e3o especial para <span>%s</span>. Verifique a sua pasta de Spam caso n\u00e3o veja a mensagem.",
"We have sent an email containing the special link to the address used to register this account. Please check your spam folder if you do not see the email.": "Envi\u00e1mos uma mensagem com uma hiperliga\u00e7\u00e3o especial para o endere\u00e7o utilizado no registo desta conta. Verifique a sua pasta de Spam caso n\u00e3o veja a mensagem. ",
"Website": "Site",
"Why are you part of our translation project? Describe yourself, inspire others!": "Porque \u00e9 que faz parte do nosso projeto de tradu\u00e7\u00e3o? Descreva-se, e inspire as outras pessoas!",
"Yes": "Sim",
"You have unsaved changes in this string. Navigating away will discard those changes.": "Tem altera\u00e7\u00f5es n\u00e3o guardadas nesta linha. Se sair da mesma, estas ser\u00e3o ignoradas.",
"Your Full Name": "O seu nome completo",
"Your LinkedIn profile URL": "O URL do seu perfil LinkedIn",
"Your Personal website/blog URL": "O URL do seu site da Web/blogue Pessoal",
"Your Twitter username": "O seu nome de utilizador do Twitter",
"Your account is inactive because an administrator deactivated it.": "A sua conta est\u00e1 inativa porque foi desativada por um administrador.",
"Your account needs activation.": "A sua conta precisa de ser ativada.",
"disabled": "desativado",
"some anonymous user": "algum utilizador an\u00f3nimo",
"someone": "algu\u00e9m"
};
for (var key in newcatalog) {
django.catalog[key] = newcatalog[key];
}
if (!django.jsi18n_initialized) {
django.gettext = function(msgid) {
var value = django.catalog[msgid];
if (typeof(value) == 'undefined') {
return msgid;
} else {
return (typeof(value) == 'string') ? value : value[0];
}
};
django.ngettext = function(singular, plural, count) {
var value = django.catalog[singular];
if (typeof(value) == 'undefined') {
return (count == 1) ? singular : plural;
} else {
return value[django.pluralidx(count)];
}
};
django.gettext_noop = function(msgid) { return msgid; };
django.pgettext = function(context, msgid) {
var value = django.gettext(context + '\x04' + msgid);
if (value.indexOf('\x04') != -1) {
value = msgid;
}
return value;
};
django.npgettext = function(context, singular, plural, count) {
var value = django.ngettext(context + '\x04' + singular, context + '\x04' + plural, count);
if (value.indexOf('\x04') != -1) {
value = django.ngettext(singular, plural, count);
}
return value;
};
django.interpolate = function(fmt, obj, named) {
if (named) {
return fmt.replace(/%\(\w+\)s/g, function(match){return String(obj[match.slice(2,-2)])});
} else {
return fmt.replace(/%s/g, function(match){return String(obj.shift())});
}
};
/* formatting library */
django.formats = {
"DATETIME_FORMAT": "N j, Y, P",
"DATETIME_INPUT_FORMATS": [
"%Y-%m-%d %H:%M:%S",
"%Y-%m-%d %H:%M:%S.%f",
"%Y-%m-%d %H:%M",
"%Y-%m-%d",
"%m/%d/%Y %H:%M:%S",
"%m/%d/%Y %H:%M:%S.%f",
"%m/%d/%Y %H:%M",
"%m/%d/%Y",
"%m/%d/%y %H:%M:%S",
"%m/%d/%y %H:%M:%S.%f",
"%m/%d/%y %H:%M",
"%m/%d/%y"
],
"DATE_FORMAT": "N j, Y",
"DATE_INPUT_FORMATS": [
"%Y-%m-%d",
"%m/%d/%Y",
"%m/%d/%y",
"%b %d %Y",
"%b %d, %Y",
"%d %b %Y",
"%d %b, %Y",
"%B %d %Y",
"%B %d, %Y",
"%d %B %Y",
"%d %B, %Y"
],
"DECIMAL_SEPARATOR": ".",
"FIRST_DAY_OF_WEEK": "0",
"MONTH_DAY_FORMAT": "F j",
"NUMBER_GROUPING": "0",
"SHORT_DATETIME_FORMAT": "m/d/Y P",
"SHORT_DATE_FORMAT": "m/d/Y",
"THOUSAND_SEPARATOR": ",",
"TIME_FORMAT": "P",
"TIME_INPUT_FORMATS": [
"%H:%M:%S",
"%H:%M:%S.%f",
"%H:%M"
],
"YEAR_MONTH_FORMAT": "F Y"
};
django.get_format = function(format_type) {
var value = django.formats[format_type];
if (typeof(value) == 'undefined') {
return format_type;
} else {
return value;
}
};
/* add to global namespace */
globals.pluralidx = django.pluralidx;
globals.gettext = django.gettext;
globals.ngettext = django.ngettext;
globals.gettext_noop = django.gettext_noop;
globals.pgettext = django.pgettext;
globals.npgettext = django.npgettext;
globals.interpolate = django.interpolate;
globals.get_format = django.get_format;
django.jsi18n_initialized = true;
}
}(this));
|
PypiClean
|
/django-admin-berry-1.0.10.tar.gz/django-admin-berry-1.0.10/admin_berry/static/assets/js/plugins/simplebar.min.js
|
!function(t,e){"object"==typeof exports&&"undefined"!=typeof module?module.exports=e():"function"==typeof define&&define.amd?define(e):(t=t||self).SimpleBar=e()}(this,(function(){"use strict";var t="undefined"!=typeof globalThis?globalThis:"undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{};function e(t,e){return t(e={exports:{}},e.exports),e.exports}var r,n,i=function(t){return t&&t.Math==Math&&t},o=i("object"==typeof globalThis&&globalThis)||i("object"==typeof window&&window)||i("object"==typeof self&&self)||i("object"==typeof t&&t)||function(){return this}()||Function("return this")(),s=Object.defineProperty,a=function(t,e){try{s(o,t,{value:e,configurable:!0,writable:!0})}catch(r){o[t]=e}return e},c=o["__core-js_shared__"]||a("__core-js_shared__",{}),l=e((function(t){(t.exports=function(t,e){return c[t]||(c[t]=void 0!==e?e:{})})("versions",[]).push({version:"3.22.6",mode:"global",copyright:"© 2014-2022 Denis Pushkarev (zloirock.ru)",license:"https://github.com/zloirock/core-js/blob/v3.22.6/LICENSE",source:"https://github.com/zloirock/core-js"})})),u=function(t){try{return!!t()}catch(t){return!0}},f=!u((function(){var t=function(){}.bind();return"function"!=typeof t||t.hasOwnProperty("prototype")})),h=Function.prototype,d=h.bind,p=h.call,v=f&&d.bind(p,p),g=f?function(t){return t&&v(t)}:function(t){return t&&function(){return p.apply(t,arguments)}},b=o.TypeError,y=function(t){if(null==t)throw b("Can't call method on "+t);return t},m=o.Object,x=function(t){return m(y(t))},E=g({}.hasOwnProperty),w=Object.hasOwn||function(t,e){return E(x(t),e)},O=0,S=Math.random(),A=g(1..toString),k=function(t){return"Symbol("+(void 0===t?"":t)+")_"+A(++O+S,36)},T=function(t){return"function"==typeof t},L=function(t){return T(t)?t:void 0},R=function(t,e){return arguments.length<2?L(o[t]):o[t]&&o[t][e]},_=R("navigator","userAgent")||"",j=o.process,z=o.Deno,M=j&&j.versions||z&&z.version,C=M&&M.v8;C&&(n=(r=C.split("."))[0]>0&&r[0]<4?1:+(r[0]+r[1])),!n&&_&&(!(r=_.match(/Edge\/(\d+)/))||r[1]>=74)&&(r=_.match(/Chrome\/(\d+)/))&&(n=+r[1]);var N=n,W=!!Object.getOwnPropertySymbols&&!u((function(){var t=Symbol();return!String(t)||!(Object(t)instanceof Symbol)||!Symbol.sham&&N&&N<41})),I=W&&!Symbol.sham&&"symbol"==typeof Symbol.iterator,P=l("wks"),B=o.Symbol,D=B&&B.for,F=I?B:B&&B.withoutSetter||k,V=function(t){if(!w(P,t)||!W&&"string"!=typeof P[t]){var e="Symbol."+t;W&&w(B,t)?P[t]=B[t]:P[t]=I&&D?D(e):F(e)}return P[t]},$={};$[V("toStringTag")]="z";var X="[object z]"===String($),H=!u((function(){return 7!=Object.defineProperty({},1,{get:function(){return 7}})[1]})),q=function(t){return"object"==typeof t?null!==t:T(t)},Y=o.document,G=q(Y)&&q(Y.createElement),U=function(t){return G?Y.createElement(t):{}},K=!H&&!u((function(){return 7!=Object.defineProperty(U("div"),"a",{get:function(){return 7}}).a})),J=H&&u((function(){return 42!=Object.defineProperty((function(){}),"prototype",{value:42,writable:!1}).prototype})),Q=o.String,Z=o.TypeError,tt=function(t){if(q(t))return t;throw Z(Q(t)+" is not an object")},et=Function.prototype.call,rt=f?et.bind(et):function(){return et.apply(et,arguments)},nt=g({}.isPrototypeOf),it=o.Object,ot=I?function(t){return"symbol"==typeof t}:function(t){var e=R("Symbol");return T(e)&&nt(e.prototype,it(t))},st=o.String,at=function(t){try{return st(t)}catch(t){return"Object"}},ct=o.TypeError,lt=function(t){if(T(t))return t;throw ct(at(t)+" is not a function")},ut=function(t,e){var r=t[e];return null==r?void 0:lt(r)},ft=o.TypeError,ht=o.TypeError,dt=V("toPrimitive"),pt=function(t,e){if(!q(t)||ot(t))return t;var r,n=ut(t,dt);if(n){if(void 0===e&&(e="default"),r=rt(n,t,e),!q(r)||ot(r))return r;throw ht("Can't convert object to primitive value")}return void 0===e&&(e="number"),function(t,e){var r,n;if("string"===e&&T(r=t.toString)&&!q(n=rt(r,t)))return n;if(T(r=t.valueOf)&&!q(n=rt(r,t)))return n;if("string"!==e&&T(r=t.toString)&&!q(n=rt(r,t)))return n;throw ft("Can't convert object to primitive value")}(t,e)},vt=function(t){var e=pt(t,"string");return ot(e)?e:e+""},gt=o.TypeError,bt=Object.defineProperty,yt=Object.getOwnPropertyDescriptor,mt={f:H?J?function(t,e,r){if(tt(t),e=vt(e),tt(r),"function"==typeof t&&"prototype"===e&&"value"in r&&"writable"in r&&!r.writable){var n=yt(t,e);n&&n.writable&&(t[e]=r.value,r={configurable:"configurable"in r?r.configurable:n.configurable,enumerable:"enumerable"in r?r.enumerable:n.enumerable,writable:!1})}return bt(t,e,r)}:bt:function(t,e,r){if(tt(t),e=vt(e),tt(r),K)try{return bt(t,e,r)}catch(t){}if("get"in r||"set"in r)throw gt("Accessors not supported");return"value"in r&&(t[e]=r.value),t}},xt=function(t,e){return{enumerable:!(1&t),configurable:!(2&t),writable:!(4&t),value:e}},Et=H?function(t,e,r){return mt.f(t,e,xt(1,r))}:function(t,e,r){return t[e]=r,t},wt=Function.prototype,Ot=H&&Object.getOwnPropertyDescriptor,St=w(wt,"name"),At={EXISTS:St,PROPER:St&&"something"===function(){}.name,CONFIGURABLE:St&&(!H||H&&Ot(wt,"name").configurable)},kt=g(Function.toString);T(c.inspectSource)||(c.inspectSource=function(t){return kt(t)});var Tt,Lt,Rt,_t=c.inspectSource,jt=o.WeakMap,zt=T(jt)&&/native code/.test(_t(jt)),Mt=l("keys"),Ct=function(t){return Mt[t]||(Mt[t]=k(t))},Nt={},Wt=o.TypeError,It=o.WeakMap;if(zt||c.state){var Pt=c.state||(c.state=new It),Bt=g(Pt.get),Dt=g(Pt.has),Ft=g(Pt.set);Tt=function(t,e){if(Dt(Pt,t))throw new Wt("Object already initialized");return e.facade=t,Ft(Pt,t,e),e},Lt=function(t){return Bt(Pt,t)||{}},Rt=function(t){return Dt(Pt,t)}}else{var Vt=Ct("state");Nt[Vt]=!0,Tt=function(t,e){if(w(t,Vt))throw new Wt("Object already initialized");return e.facade=t,Et(t,Vt,e),e},Lt=function(t){return w(t,Vt)?t[Vt]:{}},Rt=function(t){return w(t,Vt)}}var $t={set:Tt,get:Lt,has:Rt,enforce:function(t){return Rt(t)?Lt(t):Tt(t,{})},getterFor:function(t){return function(e){var r;if(!q(e)||(r=Lt(e)).type!==t)throw Wt("Incompatible receiver, "+t+" required");return r}}},Xt=e((function(t){var e=At.CONFIGURABLE,r=$t.enforce,n=$t.get,i=Object.defineProperty,o=H&&!u((function(){return 8!==i((function(){}),"length",{value:8}).length})),s=String(String).split("String"),a=t.exports=function(t,n,a){if("Symbol("===String(n).slice(0,7)&&(n="["+String(n).replace(/^Symbol\(([^)]*)\)/,"$1")+"]"),a&&a.getter&&(n="get "+n),a&&a.setter&&(n="set "+n),(!w(t,"name")||e&&t.name!==n)&&i(t,"name",{value:n,configurable:!0}),o&&a&&w(a,"arity")&&t.length!==a.arity&&i(t,"length",{value:a.arity}),a&&w(a,"constructor")&&a.constructor){if(H)try{i(t,"prototype",{writable:!1})}catch(t){}}else t.prototype=void 0;var c=r(t);return w(c,"source")||(c.source=s.join("string"==typeof n?n:"")),t};Function.prototype.toString=a((function(){return T(this)&&n(this).source||_t(this)}),"toString")})),Ht=function(t,e,r,n){n||(n={});var i=n.enumerable,o=void 0!==n.name?n.name:e;return T(r)&&Xt(r,o,n),n.global?i?t[e]=r:a(e,r):(n.unsafe?t[e]&&(i=!0):delete t[e],i?t[e]=r:Et(t,e,r)),t},qt=g({}.toString),Yt=g("".slice),Gt=function(t){return Yt(qt(t),8,-1)},Ut=V("toStringTag"),Kt=o.Object,Jt="Arguments"==Gt(function(){return arguments}()),Qt=X?Gt:function(t){var e,r,n;return void 0===t?"Undefined":null===t?"Null":"string"==typeof(r=function(t,e){try{return t[e]}catch(t){}}(e=Kt(t),Ut))?r:Jt?Gt(e):"Object"==(n=Gt(e))&&T(e.callee)?"Arguments":n},Zt=X?{}.toString:function(){return"[object "+Qt(this)+"]"};X||Ht(Object.prototype,"toString",Zt,{unsafe:!0});var te={CSSRuleList:0,CSSStyleDeclaration:0,CSSValueList:0,ClientRectList:0,DOMRectList:0,DOMStringList:0,DOMTokenList:1,DataTransferItemList:0,FileList:0,HTMLAllCollection:0,HTMLCollection:0,HTMLFormElement:0,HTMLSelectElement:0,MediaList:0,MimeTypeArray:0,NamedNodeMap:0,NodeList:1,PaintRequestList:0,Plugin:0,PluginArray:0,SVGLengthList:0,SVGNumberList:0,SVGPathSegList:0,SVGPointList:0,SVGStringList:0,SVGTransformList:0,SourceBufferList:0,StyleSheetList:0,TextTrackCueList:0,TextTrackList:0,TouchList:0},ee=U("span").classList,re=ee&&ee.constructor&&ee.constructor.prototype,ne=re===Object.prototype?void 0:re,ie=g(g.bind),oe=function(t,e){return lt(t),void 0===e?t:f?ie(t,e):function(){return t.apply(e,arguments)}},se=o.Object,ae=g("".split),ce=u((function(){return!se("z").propertyIsEnumerable(0)}))?function(t){return"String"==Gt(t)?ae(t,""):se(t)}:se,le=Math.ceil,ue=Math.floor,fe=Math.trunc||function(t){var e=+t;return(e>0?ue:le)(e)},he=function(t){var e=+t;return e!=e||0===e?0:fe(e)},de=Math.min,pe=function(t){return t>0?de(he(t),9007199254740991):0},ve=function(t){return pe(t.length)},ge=Array.isArray||function(t){return"Array"==Gt(t)},be=function(){},ye=[],me=R("Reflect","construct"),xe=/^\s*(?:class|function)\b/,Ee=g(xe.exec),we=!xe.exec(be),Oe=function(t){if(!T(t))return!1;try{return me(be,ye,t),!0}catch(t){return!1}},Se=function(t){if(!T(t))return!1;switch(Qt(t)){case"AsyncFunction":case"GeneratorFunction":case"AsyncGeneratorFunction":return!1}try{return we||!!Ee(xe,_t(t))}catch(t){return!0}};Se.sham=!0;var Ae=!me||u((function(){var t;return Oe(Oe.call)||!Oe(Object)||!Oe((function(){t=!0}))||t}))?Se:Oe,ke=V("species"),Te=o.Array,Le=function(t,e){return new(function(t){var e;return ge(t)&&(e=t.constructor,(Ae(e)&&(e===Te||ge(e.prototype))||q(e)&&null===(e=e[ke]))&&(e=void 0)),void 0===e?Te:e}(t))(0===e?0:e)},Re=g([].push),_e=function(t){var e=1==t,r=2==t,n=3==t,i=4==t,o=6==t,s=7==t,a=5==t||o;return function(c,l,u,f){for(var h,d,p=x(c),v=ce(p),g=oe(l,u),b=ve(v),y=0,m=f||Le,E=e?m(c,b):r||s?m(c,0):void 0;b>y;y++)if((a||y in v)&&(d=g(h=v[y],y,p),t))if(e)E[y]=d;else if(d)switch(t){case 3:return!0;case 5:return h;case 6:return y;case 2:Re(E,h)}else switch(t){case 4:return!1;case 7:Re(E,h)}return o?-1:n||i?i:E}},je={forEach:_e(0),map:_e(1),filter:_e(2),some:_e(3),every:_e(4),find:_e(5),findIndex:_e(6),filterReject:_e(7)},ze=function(t,e){var r=[][t];return!!r&&u((function(){r.call(null,e||function(){return 1},1)}))},Me=je.forEach,Ce=ze("forEach")?[].forEach:function(t){return Me(this,t,arguments.length>1?arguments[1]:void 0)},Ne=function(t){if(t&&t.forEach!==Ce)try{Et(t,"forEach",Ce)}catch(e){t.forEach=Ce}};for(var We in te)te[We]&&Ne(o[We]&&o[We].prototype);Ne(ne);var Ie=!("undefined"==typeof window||!window.document||!window.document.createElement),Pe={}.propertyIsEnumerable,Be=Object.getOwnPropertyDescriptor,De={f:Be&&!Pe.call({1:2},1)?function(t){var e=Be(this,t);return!!e&&e.enumerable}:Pe},Fe=function(t){return ce(y(t))},Ve=Object.getOwnPropertyDescriptor,$e={f:H?Ve:function(t,e){if(t=Fe(t),e=vt(e),K)try{return Ve(t,e)}catch(t){}if(w(t,e))return xt(!rt(De.f,t,e),t[e])}},Xe=Math.max,He=Math.min,qe=function(t,e){var r=he(t);return r<0?Xe(r+e,0):He(r,e)},Ye=function(t){return function(e,r,n){var i,o=Fe(e),s=ve(o),a=qe(n,s);if(t&&r!=r){for(;s>a;)if((i=o[a++])!=i)return!0}else for(;s>a;a++)if((t||a in o)&&o[a]===r)return t||a||0;return!t&&-1}},Ge={includes:Ye(!0),indexOf:Ye(!1)}.indexOf,Ue=g([].push),Ke=function(t,e){var r,n=Fe(t),i=0,o=[];for(r in n)!w(Nt,r)&&w(n,r)&&Ue(o,r);for(;e.length>i;)w(n,r=e[i++])&&(~Ge(o,r)||Ue(o,r));return o},Je=["constructor","hasOwnProperty","isPrototypeOf","propertyIsEnumerable","toLocaleString","toString","valueOf"],Qe=Je.concat("length","prototype"),Ze={f:Object.getOwnPropertyNames||function(t){return Ke(t,Qe)}},tr={f:Object.getOwnPropertySymbols},er=g([].concat),rr=R("Reflect","ownKeys")||function(t){var e=Ze.f(tt(t)),r=tr.f;return r?er(e,r(t)):e},nr=function(t,e,r){for(var n=rr(e),i=mt.f,o=$e.f,s=0;s<n.length;s++){var a=n[s];w(t,a)||r&&w(r,a)||i(t,a,o(e,a))}},ir=/#|\.prototype\./,or=function(t,e){var r=ar[sr(t)];return r==lr||r!=cr&&(T(e)?u(e):!!e)},sr=or.normalize=function(t){return String(t).replace(ir,".").toLowerCase()},ar=or.data={},cr=or.NATIVE="N",lr=or.POLYFILL="P",ur=or,fr=$e.f,hr=function(t,e){var r,n,i,s,c,l=t.target,u=t.global,f=t.stat;if(r=u?o:f?o[l]||a(l,{}):(o[l]||{}).prototype)for(n in e){if(s=e[n],i=t.dontCallGetSet?(c=fr(r,n))&&c.value:r[n],!ur(u?n:l+(f?".":"#")+n,t.forced)&&void 0!==i){if(typeof s==typeof i)continue;nr(s,i)}(t.sham||i&&i.sham)&&Et(s,"sham",!0),Ht(r,n,s,t)}},dr=o.String,pr=function(t){if("Symbol"===Qt(t))throw TypeError("Cannot convert a Symbol value to a string");return dr(t)},vr="\t\n\v\f\r \u2028\u2029\ufeff",gr=g("".replace),br="["+vr+"]",yr=RegExp("^"+br+br+"*"),mr=RegExp(br+br+"*$"),xr=function(t){return function(e){var r=pr(y(e));return 1&t&&(r=gr(r,yr,"")),2&t&&(r=gr(r,mr,"")),r}},Er={start:xr(1),end:xr(2),trim:xr(3)}.trim,wr=o.parseInt,Or=o.Symbol,Sr=Or&&Or.iterator,Ar=/^[+-]?0x/i,kr=g(Ar.exec),Tr=8!==wr(vr+"08")||22!==wr(vr+"0x16")||Sr&&!u((function(){wr(Object(Sr))}))?function(t,e){var r=Er(pr(t));return wr(r,e>>>0||(kr(Ar,r)?16:10))}:wr;hr({global:!0,forced:parseInt!=Tr},{parseInt:Tr});var Lr=Object.keys||function(t){return Ke(t,Je)},Rr=Object.assign,_r=Object.defineProperty,jr=g([].concat),zr=!Rr||u((function(){if(H&&1!==Rr({b:1},Rr(_r({},"a",{enumerable:!0,get:function(){_r(this,"b",{value:3,enumerable:!1})}}),{b:2})).b)return!0;var t={},e={},r=Symbol();return t[r]=7,"abcdefghijklmnopqrst".split("").forEach((function(t){e[t]=t})),7!=Rr({},t)[r]||"abcdefghijklmnopqrst"!=Lr(Rr({},e)).join("")}))?function(t,e){for(var r=x(t),n=arguments.length,i=1,o=tr.f,s=De.f;n>i;)for(var a,c=ce(arguments[i++]),l=o?jr(Lr(c),o(c)):Lr(c),u=l.length,f=0;u>f;)a=l[f++],H&&!rt(s,c,a)||(r[a]=c[a]);return r}:Rr;hr({target:"Object",stat:!0,arity:2,forced:Object.assign!==zr},{assign:zr});var Mr,Cr=V("species"),Nr=je.filter,Wr=(Mr="filter",N>=51||!u((function(){var t=[];return(t.constructor={})[Cr]=function(){return{foo:1}},1!==t[Mr](Boolean).foo})));hr({target:"Array",proto:!0,forced:!Wr},{filter:function(t){return Nr(this,t,arguments.length>1?arguments[1]:void 0)}});var Ir,Pr={f:H&&!J?Object.defineProperties:function(t,e){tt(t);for(var r,n=Fe(e),i=Lr(e),o=i.length,s=0;o>s;)mt.f(t,r=i[s++],n[r]);return t}},Br=R("document","documentElement"),Dr=Ct("IE_PROTO"),Fr=function(){},Vr=function(t){return"<script>"+t+"<\/script>"},$r=function(t){t.write(Vr("")),t.close();var e=t.parentWindow.Object;return t=null,e},Xr=function(){try{Ir=new ActiveXObject("htmlfile")}catch(t){}var t,e;Xr="undefined"!=typeof document?document.domain&&Ir?$r(Ir):((e=U("iframe")).style.display="none",Br.appendChild(e),e.src=String("javascript:"),(t=e.contentWindow.document).open(),t.write(Vr("document.F=Object")),t.close(),t.F):$r(Ir);for(var r=Je.length;r--;)delete Xr.prototype[Je[r]];return Xr()};Nt[Dr]=!0;var Hr=Object.create||function(t,e){var r;return null!==t?(Fr.prototype=tt(t),r=new Fr,Fr.prototype=null,r[Dr]=t):r=Xr(),void 0===e?r:Pr.f(r,e)},qr=mt.f,Yr=V("unscopables"),Gr=Array.prototype;null==Gr[Yr]&&qr(Gr,Yr,{configurable:!0,value:Hr(null)});var Ur,Kr,Jr,Qr=function(t){Gr[Yr][t]=!0},Zr={},tn=!u((function(){function t(){}return t.prototype.constructor=null,Object.getPrototypeOf(new t)!==t.prototype})),en=Ct("IE_PROTO"),rn=o.Object,nn=rn.prototype,on=tn?rn.getPrototypeOf:function(t){var e=x(t);if(w(e,en))return e[en];var r=e.constructor;return T(r)&&e instanceof r?r.prototype:e instanceof rn?nn:null},sn=V("iterator"),an=!1;[].keys&&("next"in(Jr=[].keys())?(Kr=on(on(Jr)))!==Object.prototype&&(Ur=Kr):an=!0),(null==Ur||u((function(){var t={};return Ur[sn].call(t)!==t})))&&(Ur={}),T(Ur[sn])||Ht(Ur,sn,(function(){return this}));var cn={IteratorPrototype:Ur,BUGGY_SAFARI_ITERATORS:an},ln=mt.f,un=V("toStringTag"),fn=function(t,e,r){t&&!r&&(t=t.prototype),t&&!w(t,un)&&ln(t,un,{configurable:!0,value:e})},hn=cn.IteratorPrototype,dn=function(){return this},pn=o.String,vn=o.TypeError,gn=Object.setPrototypeOf||("__proto__"in{}?function(){var t,e=!1,r={};try{(t=g(Object.getOwnPropertyDescriptor(Object.prototype,"__proto__").set))(r,[]),e=r instanceof Array}catch(t){}return function(r,n){return tt(r),function(t){if("object"==typeof t||T(t))return t;throw vn("Can't set "+pn(t)+" as a prototype")}(n),e?t(r,n):r.__proto__=n,r}}():void 0),bn=At.PROPER,yn=At.CONFIGURABLE,mn=cn.IteratorPrototype,xn=cn.BUGGY_SAFARI_ITERATORS,En=V("iterator"),wn=function(){return this},On=function(t,e,r,n,i,o,s){!function(t,e,r,n){var i=e+" Iterator";t.prototype=Hr(hn,{next:xt(+!n,r)}),fn(t,i,!1),Zr[i]=dn}(r,e,n);var a,c,l,u=function(t){if(t===i&&v)return v;if(!xn&&t in d)return d[t];switch(t){case"keys":case"values":case"entries":return function(){return new r(this,t)}}return function(){return new r(this)}},f=e+" Iterator",h=!1,d=t.prototype,p=d[En]||d["@@iterator"]||i&&d[i],v=!xn&&p||u(i),g="Array"==e&&d.entries||p;if(g&&(a=on(g.call(new t)))!==Object.prototype&&a.next&&(on(a)!==mn&&(gn?gn(a,mn):T(a[En])||Ht(a,En,wn)),fn(a,f,!0)),bn&&"values"==i&&p&&"values"!==p.name&&(yn?Et(d,"name","values"):(h=!0,v=function(){return rt(p,this)})),i)if(c={values:u("values"),keys:o?v:u("keys"),entries:u("entries")},s)for(l in c)(xn||h||!(l in d))&&Ht(d,l,c[l]);else hr({target:e,proto:!0,forced:xn||h},c);return d[En]!==v&&Ht(d,En,v,{name:i}),Zr[e]=v,c},Sn=mt.f,An=$t.set,kn=$t.getterFor("Array Iterator"),Tn=On(Array,"Array",(function(t,e){An(this,{type:"Array Iterator",target:Fe(t),index:0,kind:e})}),(function(){var t=kn(this),e=t.target,r=t.kind,n=t.index++;return!e||n>=e.length?(t.target=void 0,{value:void 0,done:!0}):"keys"==r?{value:n,done:!1}:"values"==r?{value:e[n],done:!1}:{value:[n,e[n]],done:!1}}),"values"),Ln=Zr.Arguments=Zr.Array;if(Qr("keys"),Qr("values"),Qr("entries"),H&&"values"!==Ln.name)try{Sn(Ln,"name",{value:"values"})}catch(t){}var Rn=g("".charAt),_n=g("".charCodeAt),jn=g("".slice),zn=function(t){return function(e,r){var n,i,o=pr(y(e)),s=he(r),a=o.length;return s<0||s>=a?t?"":void 0:(n=_n(o,s))<55296||n>56319||s+1===a||(i=_n(o,s+1))<56320||i>57343?t?Rn(o,s):n:t?jn(o,s,s+2):i-56320+(n-55296<<10)+65536}},Mn={codeAt:zn(!1),charAt:zn(!0)},Cn=Mn.charAt,Nn=$t.set,Wn=$t.getterFor("String Iterator");On(String,"String",(function(t){Nn(this,{type:"String Iterator",string:pr(t),index:0})}),(function(){var t,e=Wn(this),r=e.string,n=e.index;return n>=r.length?{value:void 0,done:!0}:(t=Cn(r,n),e.index+=t.length,{value:t,done:!1})}));var In=function(t,e,r){for(var n in e)Ht(t,n,e[n],r);return t},Pn=o.Array,Bn=Math.max,Dn=Ze.f,Fn="object"==typeof window&&window&&Object.getOwnPropertyNames?Object.getOwnPropertyNames(window):[],Vn=function(t){try{return Dn(t)}catch(t){return function(t,e,r){for(var n,i,o,s,a=ve(t),c=qe(e,a),l=qe(void 0===r?a:r,a),u=Pn(Bn(l-c,0)),f=0;c<l;c++,f++)n=u,i=f,o=t[c],s=void 0,(s=vt(i))in n?mt.f(n,s,xt(0,o)):n[s]=o;return u.length=f,u}(Fn)}},$n={f:function(t){return Fn&&"Window"==Gt(t)?Vn(t):Dn(Fe(t))}},Xn=u((function(){if("function"==typeof ArrayBuffer){var t=new ArrayBuffer(8);Object.isExtensible(t)&&Object.defineProperty(t,"a",{value:8})}})),Hn=Object.isExtensible,qn=u((function(){Hn(1)}))||Xn?function(t){return!!q(t)&&((!Xn||"ArrayBuffer"!=Gt(t))&&(!Hn||Hn(t)))}:Hn,Yn=!u((function(){return Object.isExtensible(Object.preventExtensions({}))})),Gn=e((function(t){var e=mt.f,r=!1,n=k("meta"),i=0,o=function(t){e(t,n,{value:{objectID:"O"+i++,weakData:{}}})},s=t.exports={enable:function(){s.enable=function(){},r=!0;var t=Ze.f,e=g([].splice),i={};i[n]=1,t(i).length&&(Ze.f=function(r){for(var i=t(r),o=0,s=i.length;o<s;o++)if(i[o]===n){e(i,o,1);break}return i},hr({target:"Object",stat:!0,forced:!0},{getOwnPropertyNames:$n.f}))},fastKey:function(t,e){if(!q(t))return"symbol"==typeof t?t:("string"==typeof t?"S":"P")+t;if(!w(t,n)){if(!qn(t))return"F";if(!e)return"E";o(t)}return t[n].objectID},getWeakData:function(t,e){if(!w(t,n)){if(!qn(t))return!0;if(!e)return!1;o(t)}return t[n].weakData},onFreeze:function(t){return Yn&&r&&qn(t)&&!w(t,n)&&o(t),t}};Nt[n]=!0})),Un=(Gn.enable,Gn.fastKey,Gn.getWeakData,Gn.onFreeze,V("iterator")),Kn=Array.prototype,Jn=V("iterator"),Qn=function(t){if(null!=t)return ut(t,Jn)||ut(t,"@@iterator")||Zr[Qt(t)]},Zn=o.TypeError,ti=function(t,e,r){var n,i;tt(t);try{if(!(n=ut(t,"return"))){if("throw"===e)throw r;return r}n=rt(n,t)}catch(t){i=!0,n=t}if("throw"===e)throw r;if(i)throw n;return tt(n),r},ei=o.TypeError,ri=function(t,e){this.stopped=t,this.result=e},ni=ri.prototype,ii=function(t,e,r){var n,i,o,s,a,c,l,u,f=r&&r.that,h=!(!r||!r.AS_ENTRIES),d=!(!r||!r.IS_ITERATOR),p=!(!r||!r.INTERRUPTED),v=oe(e,f),g=function(t){return n&&ti(n,"normal",t),new ri(!0,t)},b=function(t){return h?(tt(t),p?v(t[0],t[1],g):v(t[0],t[1])):p?v(t,g):v(t)};if(d)n=t;else{if(!(i=Qn(t)))throw ei(at(t)+" is not iterable");if(void 0!==(u=i)&&(Zr.Array===u||Kn[Un]===u)){for(o=0,s=ve(t);s>o;o++)if((a=b(t[o]))&&nt(ni,a))return a;return new ri(!1)}n=function(t,e){var r=arguments.length<2?Qn(t):e;if(lt(r))return tt(rt(r,t));throw Zn(at(t)+" is not iterable")}(t,i)}for(c=n.next;!(l=rt(c,n)).done;){try{a=b(l.value)}catch(t){ti(n,"throw",t)}if("object"==typeof a&&a&&nt(ni,a))return a}return new ri(!1)},oi=o.TypeError,si=function(t,e){if(nt(e,t))return t;throw oi("Incorrect invocation")},ai=V("iterator"),ci=!1;try{var li=0,ui={next:function(){return{done:!!li++}},return:function(){ci=!0}};ui[ai]=function(){return this},Array.from(ui,(function(){throw 2}))}catch(t){}var fi=Gn.getWeakData,hi=$t.set,di=$t.getterFor,pi=je.find,vi=je.findIndex,gi=g([].splice),bi=0,yi=function(t){return t.frozen||(t.frozen=new mi)},mi=function(){this.entries=[]},xi=function(t,e){return pi(t.entries,(function(t){return t[0]===e}))};mi.prototype={get:function(t){var e=xi(this,t);if(e)return e[1]},has:function(t){return!!xi(this,t)},set:function(t,e){var r=xi(this,t);r?r[1]=e:this.entries.push([t,e])},delete:function(t){var e=vi(this.entries,(function(e){return e[0]===t}));return~e&&gi(this.entries,e,1),!!~e}};var Ei,wi={getConstructor:function(t,e,r,n){var i=t((function(t,i){si(t,o),hi(t,{type:e,id:bi++,frozen:void 0}),null!=i&&ii(i,t[n],{that:t,AS_ENTRIES:r})})),o=i.prototype,s=di(e),a=function(t,e,r){var n=s(t),i=fi(tt(e),!0);return!0===i?yi(n).set(e,r):i[n.id]=r,t};return In(o,{delete:function(t){var e=s(this);if(!q(t))return!1;var r=fi(t);return!0===r?yi(e).delete(t):r&&w(r,e.id)&&delete r[e.id]},has:function(t){var e=s(this);if(!q(t))return!1;var r=fi(t);return!0===r?yi(e).has(t):r&&w(r,e.id)}}),In(o,r?{get:function(t){var e=s(this);if(q(t)){var r=fi(t);return!0===r?yi(e).get(t):r?r[e.id]:void 0}},set:function(t,e){return a(this,t,e)}}:{add:function(t){return a(this,t,!0)}}),i}},Oi=$t.enforce,Si=!o.ActiveXObject&&"ActiveXObject"in o,Ai=function(t){return function(){return t(this,arguments.length?arguments[0]:void 0)}},ki=function(t,e,r){var n=-1!==t.indexOf("Map"),i=-1!==t.indexOf("Weak"),s=n?"set":"add",a=o[t],c=a&&a.prototype,l=a,f={},h=function(t){var e=g(c[t]);Ht(c,t,"add"==t?function(t){return e(this,0===t?0:t),this}:"delete"==t?function(t){return!(i&&!q(t))&&e(this,0===t?0:t)}:"get"==t?function(t){return i&&!q(t)?void 0:e(this,0===t?0:t)}:"has"==t?function(t){return!(i&&!q(t))&&e(this,0===t?0:t)}:function(t,r){return e(this,0===t?0:t,r),this})};if(ur(t,!T(a)||!(i||c.forEach&&!u((function(){(new a).entries().next()})))))l=r.getConstructor(e,t,n,s),Gn.enable();else if(ur(t,!0)){var d=new l,p=d[s](i?{}:-0,1)!=d,v=u((function(){d.has(1)})),b=function(t,e){if(!e&&!ci)return!1;var r=!1;try{var n={};n[ai]=function(){return{next:function(){return{done:r=!0}}}},t(n)}catch(t){}return r}((function(t){new a(t)})),y=!i&&u((function(){for(var t=new a,e=5;e--;)t[s](e,e);return!t.has(-0)}));b||((l=e((function(t,e){si(t,c);var r=function(t,e,r){var n,i;return gn&&T(n=e.constructor)&&n!==r&&q(i=n.prototype)&&i!==r.prototype&&gn(t,i),t}(new a,t,l);return null!=e&&ii(e,r[s],{that:r,AS_ENTRIES:n}),r}))).prototype=c,c.constructor=l),(v||y)&&(h("delete"),h("has"),n&&h("get")),(y||p)&&h(s),i&&c.clear&&delete c.clear}return f[t]=l,hr({global:!0,constructor:!0,forced:l!=a},f),fn(l,t),i||r.setStrong(l,t,n),l}("WeakMap",Ai,wi);if(zt&&Si){Ei=wi.getConstructor(Ai,"WeakMap",!0),Gn.enable();var Ti=ki.prototype,Li=g(Ti.delete),Ri=g(Ti.has),_i=g(Ti.get),ji=g(Ti.set);In(Ti,{delete:function(t){if(q(t)&&!qn(t)){var e=Oi(this);return e.frozen||(e.frozen=new Ei),Li(this,t)||e.frozen.delete(t)}return Li(this,t)},has:function(t){if(q(t)&&!qn(t)){var e=Oi(this);return e.frozen||(e.frozen=new Ei),Ri(this,t)||e.frozen.has(t)}return Ri(this,t)},get:function(t){if(q(t)&&!qn(t)){var e=Oi(this);return e.frozen||(e.frozen=new Ei),Ri(this,t)?_i(this,t):e.frozen.get(t)}return _i(this,t)},set:function(t,e){if(q(t)&&!qn(t)){var r=Oi(this);r.frozen||(r.frozen=new Ei),Ri(this,t)?ji(this,t,e):r.frozen.set(t,e)}else ji(this,t,e);return this}})}var zi=V("iterator"),Mi=V("toStringTag"),Ci=Tn.values,Ni=function(t,e){if(t){if(t[zi]!==Ci)try{Et(t,zi,Ci)}catch(e){t[zi]=Ci}if(t[Mi]||Et(t,Mi,e),te[e])for(var r in Tn)if(t[r]!==Tn[r])try{Et(t,r,Tn[r])}catch(e){t[r]=Tn[r]}}};for(var Wi in te)Ni(o[Wi]&&o[Wi].prototype,Wi);Ni(ne,"DOMTokenList");var Ii=/^\s+|\s+$/g,Pi=/^[-+]0x[0-9a-f]+$/i,Bi=/^0b[01]+$/i,Di=/^0o[0-7]+$/i,Fi=parseInt,Vi="object"==typeof t&&t&&t.Object===Object&&t,$i="object"==typeof self&&self&&self.Object===Object&&self,Xi=Vi||$i||Function("return this")(),Hi=Object.prototype.toString,qi=Math.max,Yi=Math.min,Gi=function(){return Xi.Date.now()};function Ui(t,e,r){var n,i,o,s,a,c,l=0,u=!1,f=!1,h=!0;if("function"!=typeof t)throw new TypeError("Expected a function");function d(e){var r=n,o=i;return n=i=void 0,l=e,s=t.apply(o,r)}function p(t){return l=t,a=setTimeout(g,e),u?d(t):s}function v(t){var r=t-c;return void 0===c||r>=e||r<0||f&&t-l>=o}function g(){var t=Gi();if(v(t))return b(t);a=setTimeout(g,function(t){var r=e-(t-c);return f?Yi(r,o-(t-l)):r}(t))}function b(t){return a=void 0,h&&n?d(t):(n=i=void 0,s)}function y(){var t=Gi(),r=v(t);if(n=arguments,i=this,c=t,r){if(void 0===a)return p(c);if(f)return a=setTimeout(g,e),d(c)}return void 0===a&&(a=setTimeout(g,e)),s}return e=Ji(e)||0,Ki(r)&&(u=!!r.leading,o=(f="maxWait"in r)?qi(Ji(r.maxWait)||0,e):o,h="trailing"in r?!!r.trailing:h),y.cancel=function(){void 0!==a&&clearTimeout(a),l=0,n=c=i=a=void 0},y.flush=function(){return void 0===a?s:b(Gi())},y}function Ki(t){var e=typeof t;return!!t&&("object"==e||"function"==e)}function Ji(t){if("number"==typeof t)return t;if(function(t){return"symbol"==typeof t||function(t){return!!t&&"object"==typeof t}(t)&&"[object Symbol]"==Hi.call(t)}(t))return NaN;if(Ki(t)){var e="function"==typeof t.valueOf?t.valueOf():t;t=Ki(e)?e+"":e}if("string"!=typeof t)return 0===t?t:+t;t=t.replace(Ii,"");var r=Bi.test(t);return r||Di.test(t)?Fi(t.slice(2),r?2:8):Pi.test(t)?NaN:+t}var Qi=function(t,e,r){var n=!0,i=!0;if("function"!=typeof t)throw new TypeError("Expected a function");return Ki(r)&&(n="leading"in r?!!r.leading:n,i="trailing"in r?!!r.trailing:i),Ui(t,e,{leading:n,maxWait:e,trailing:i})},Zi=/^\s+|\s+$/g,to=/^[-+]0x[0-9a-f]+$/i,eo=/^0b[01]+$/i,ro=/^0o[0-7]+$/i,no=parseInt,io="object"==typeof t&&t&&t.Object===Object&&t,oo="object"==typeof self&&self&&self.Object===Object&&self,so=io||oo||Function("return this")(),ao=Object.prototype.toString,co=Math.max,lo=Math.min,uo=function(){return so.Date.now()};function fo(t){var e=typeof t;return!!t&&("object"==e||"function"==e)}function ho(t){if("number"==typeof t)return t;if(function(t){return"symbol"==typeof t||function(t){return!!t&&"object"==typeof t}(t)&&"[object Symbol]"==ao.call(t)}(t))return NaN;if(fo(t)){var e="function"==typeof t.valueOf?t.valueOf():t;t=fo(e)?e+"":e}if("string"!=typeof t)return 0===t?t:+t;t=t.replace(Zi,"");var r=eo.test(t);return r||ro.test(t)?no(t.slice(2),r?2:8):to.test(t)?NaN:+t}var po=function(t,e,r){var n,i,o,s,a,c,l=0,u=!1,f=!1,h=!0;if("function"!=typeof t)throw new TypeError("Expected a function");function d(e){var r=n,o=i;return n=i=void 0,l=e,s=t.apply(o,r)}function p(t){return l=t,a=setTimeout(g,e),u?d(t):s}function v(t){var r=t-c;return void 0===c||r>=e||r<0||f&&t-l>=o}function g(){var t=uo();if(v(t))return b(t);a=setTimeout(g,function(t){var r=e-(t-c);return f?lo(r,o-(t-l)):r}(t))}function b(t){return a=void 0,h&&n?d(t):(n=i=void 0,s)}function y(){var t=uo(),r=v(t);if(n=arguments,i=this,c=t,r){if(void 0===a)return p(c);if(f)return a=setTimeout(g,e),d(c)}return void 0===a&&(a=setTimeout(g,e)),s}return e=ho(e)||0,fo(r)&&(u=!!r.leading,o=(f="maxWait"in r)?co(ho(r.maxWait)||0,e):o,h="trailing"in r?!!r.trailing:h),y.cancel=function(){void 0!==a&&clearTimeout(a),l=0,n=c=i=a=void 0},y.flush=function(){return void 0===a?s:b(uo())},y},vo=/^\[object .+?Constructor\]$/,go="object"==typeof t&&t&&t.Object===Object&&t,bo="object"==typeof self&&self&&self.Object===Object&&self,yo=go||bo||Function("return this")();var mo=Array.prototype,xo=Function.prototype,Eo=Object.prototype,wo=yo["__core-js_shared__"],Oo=function(){var t=/[^.]+$/.exec(wo&&wo.keys&&wo.keys.IE_PROTO||"");return t?"Symbol(src)_1."+t:""}(),So=xo.toString,Ao=Eo.hasOwnProperty,ko=Eo.toString,To=RegExp("^"+So.call(Ao).replace(/[\\^$.*+?()[\]{}|]/g,"\\$&").replace(/hasOwnProperty|(function).*?(?=\\\()| for .+?(?=\\\])/g,"$1.*?")+"$"),Lo=mo.splice,Ro=Io(yo,"Map"),_o=Io(Object,"create");function jo(t){var e=-1,r=t?t.length:0;for(this.clear();++e<r;){var n=t[e];this.set(n[0],n[1])}}function zo(t){var e=-1,r=t?t.length:0;for(this.clear();++e<r;){var n=t[e];this.set(n[0],n[1])}}function Mo(t){var e=-1,r=t?t.length:0;for(this.clear();++e<r;){var n=t[e];this.set(n[0],n[1])}}function Co(t,e){for(var r,n,i=t.length;i--;)if((r=t[i][0])===(n=e)||r!=r&&n!=n)return i;return-1}function No(t){return!(!Bo(t)||(e=t,Oo&&Oo in e))&&(function(t){var e=Bo(t)?ko.call(t):"";return"[object Function]"==e||"[object GeneratorFunction]"==e}(t)||function(t){var e=!1;if(null!=t&&"function"!=typeof t.toString)try{e=!!(t+"")}catch(t){}return e}(t)?To:vo).test(function(t){if(null!=t){try{return So.call(t)}catch(t){}try{return t+""}catch(t){}}return""}(t));var e}function Wo(t,e){var r,n,i=t.__data__;return("string"==(n=typeof(r=e))||"number"==n||"symbol"==n||"boolean"==n?"__proto__"!==r:null===r)?i["string"==typeof e?"string":"hash"]:i.map}function Io(t,e){var r=function(t,e){return null==t?void 0:t[e]}(t,e);return No(r)?r:void 0}function Po(t,e){if("function"!=typeof t||e&&"function"!=typeof e)throw new TypeError("Expected a function");var r=function(){var n=arguments,i=e?e.apply(this,n):n[0],o=r.cache;if(o.has(i))return o.get(i);var s=t.apply(this,n);return r.cache=o.set(i,s),s};return r.cache=new(Po.Cache||Mo),r}function Bo(t){var e=typeof t;return!!t&&("object"==e||"function"==e)}jo.prototype.clear=function(){this.__data__=_o?_o(null):{}},jo.prototype.delete=function(t){return this.has(t)&&delete this.__data__[t]},jo.prototype.get=function(t){var e=this.__data__;if(_o){var r=e[t];return"__lodash_hash_undefined__"===r?void 0:r}return Ao.call(e,t)?e[t]:void 0},jo.prototype.has=function(t){var e=this.__data__;return _o?void 0!==e[t]:Ao.call(e,t)},jo.prototype.set=function(t,e){return this.__data__[t]=_o&&void 0===e?"__lodash_hash_undefined__":e,this},zo.prototype.clear=function(){this.__data__=[]},zo.prototype.delete=function(t){var e=this.__data__,r=Co(e,t);return!(r<0)&&(r==e.length-1?e.pop():Lo.call(e,r,1),!0)},zo.prototype.get=function(t){var e=this.__data__,r=Co(e,t);return r<0?void 0:e[r][1]},zo.prototype.has=function(t){return Co(this.__data__,t)>-1},zo.prototype.set=function(t,e){var r=this.__data__,n=Co(r,t);return n<0?r.push([t,e]):r[n][1]=e,this},Mo.prototype.clear=function(){this.__data__={hash:new jo,map:new(Ro||zo),string:new jo}},Mo.prototype.delete=function(t){return Wo(this,t).delete(t)},Mo.prototype.get=function(t){return Wo(this,t).get(t)},Mo.prototype.has=function(t){return Wo(this,t).has(t)},Mo.prototype.set=function(t,e){return Wo(this,t).set(t,e),this},Po.Cache=Mo;var Do,Fo=Po,Vo=[],$o="ResizeObserver loop completed with undelivered notifications.";!function(t){t.BORDER_BOX="border-box",t.CONTENT_BOX="content-box",t.DEVICE_PIXEL_CONTENT_BOX="device-pixel-content-box"}(Do||(Do={}));var Xo,Ho=function(t){return Object.freeze(t)},qo=function(t,e){this.inlineSize=t,this.blockSize=e,Ho(this)},Yo=function(){function t(t,e,r,n){return this.x=t,this.y=e,this.width=r,this.height=n,this.top=this.y,this.left=this.x,this.bottom=this.top+this.height,this.right=this.left+this.width,Ho(this)}return t.prototype.toJSON=function(){var t=this;return{x:t.x,y:t.y,top:t.top,right:t.right,bottom:t.bottom,left:t.left,width:t.width,height:t.height}},t.fromRect=function(e){return new t(e.x,e.y,e.width,e.height)},t}(),Go=function(t){return t instanceof SVGElement&&"getBBox"in t},Uo=function(t){if(Go(t)){var e=t.getBBox(),r=e.width,n=e.height;return!r&&!n}var i=t,o=i.offsetWidth,s=i.offsetHeight;return!(o||s||t.getClientRects().length)},Ko=function(t){var e,r;if(t instanceof Element)return!0;var n=null===(r=null===(e=t)||void 0===e?void 0:e.ownerDocument)||void 0===r?void 0:r.defaultView;return!!(n&&t instanceof n.Element)},Jo="undefined"!=typeof window?window:{},Qo=new WeakMap,Zo=/auto|scroll/,ts=/^tb|vertical/,es=/msie|trident/i.test(Jo.navigator&&Jo.navigator.userAgent),rs=function(t){return parseFloat(t||"0")},ns=function(t,e,r){return void 0===t&&(t=0),void 0===e&&(e=0),void 0===r&&(r=!1),new qo((r?e:t)||0,(r?t:e)||0)},is=Ho({devicePixelContentBoxSize:ns(),borderBoxSize:ns(),contentBoxSize:ns(),contentRect:new Yo(0,0,0,0)}),os=function(t,e){if(void 0===e&&(e=!1),Qo.has(t)&&!e)return Qo.get(t);if(Uo(t))return Qo.set(t,is),is;var r=getComputedStyle(t),n=Go(t)&&t.ownerSVGElement&&t.getBBox(),i=!es&&"border-box"===r.boxSizing,o=ts.test(r.writingMode||""),s=!n&&Zo.test(r.overflowY||""),a=!n&&Zo.test(r.overflowX||""),c=n?0:rs(r.paddingTop),l=n?0:rs(r.paddingRight),u=n?0:rs(r.paddingBottom),f=n?0:rs(r.paddingLeft),h=n?0:rs(r.borderTopWidth),d=n?0:rs(r.borderRightWidth),p=n?0:rs(r.borderBottomWidth),v=f+l,g=c+u,b=(n?0:rs(r.borderLeftWidth))+d,y=h+p,m=a?t.offsetHeight-y-t.clientHeight:0,x=s?t.offsetWidth-b-t.clientWidth:0,E=i?v+b:0,w=i?g+y:0,O=n?n.width:rs(r.width)-E-x,S=n?n.height:rs(r.height)-w-m,A=O+v+x+b,k=S+g+m+y,T=Ho({devicePixelContentBoxSize:ns(Math.round(O*devicePixelRatio),Math.round(S*devicePixelRatio),o),borderBoxSize:ns(A,k,o),contentBoxSize:ns(O,S,o),contentRect:new Yo(f,c,O,S)});return Qo.set(t,T),T},ss=function(t,e,r){var n=os(t,r),i=n.borderBoxSize,o=n.contentBoxSize,s=n.devicePixelContentBoxSize;switch(e){case Do.DEVICE_PIXEL_CONTENT_BOX:return s;case Do.BORDER_BOX:return i;default:return o}},as=function(t){var e=os(t);this.target=t,this.contentRect=e.contentRect,this.borderBoxSize=Ho([e.borderBoxSize]),this.contentBoxSize=Ho([e.contentBoxSize]),this.devicePixelContentBoxSize=Ho([e.devicePixelContentBoxSize])},cs=function(t){if(Uo(t))return 1/0;for(var e=0,r=t.parentNode;r;)e+=1,r=r.parentNode;return e},ls=function(){var t=1/0,e=[];Vo.forEach((function(r){if(0!==r.activeTargets.length){var n=[];r.activeTargets.forEach((function(e){var r=new as(e.target),i=cs(e.target);n.push(r),e.lastReportedSize=ss(e.target,e.observedBox),i<t&&(t=i)})),e.push((function(){r.callback.call(r.observer,n,r.observer)})),r.activeTargets.splice(0,r.activeTargets.length)}}));for(var r=0,n=e;r<n.length;r++){(0,n[r])()}return t},us=function(t){Vo.forEach((function(e){e.activeTargets.splice(0,e.activeTargets.length),e.skippedTargets.splice(0,e.skippedTargets.length),e.observationTargets.forEach((function(r){r.isActive()&&(cs(r.target)>t?e.activeTargets.push(r):e.skippedTargets.push(r))}))}))},fs=function(){var t,e=0;for(us(e);Vo.some((function(t){return t.activeTargets.length>0}));)e=ls(),us(e);return Vo.some((function(t){return t.skippedTargets.length>0}))&&("function"==typeof ErrorEvent?t=new ErrorEvent("error",{message:$o}):((t=document.createEvent("Event")).initEvent("error",!1,!1),t.message=$o),window.dispatchEvent(t)),e>0},hs=[],ds=function(t){if(!Xo){var e=0,r=document.createTextNode("");new MutationObserver((function(){return hs.splice(0).forEach((function(t){return t()}))})).observe(r,{characterData:!0}),Xo=function(){r.textContent=""+(e?e--:e++)}}hs.push(t),Xo()},ps=0,vs={attributes:!0,characterData:!0,childList:!0,subtree:!0},gs=["resize","load","transitionend","animationend","animationstart","animationiteration","keyup","keydown","mouseup","mousedown","mouseover","mouseout","blur","focus"],bs=function(t){return void 0===t&&(t=0),Date.now()+t},ys=!1,ms=new(function(){function t(){var t=this;this.stopped=!0,this.listener=function(){return t.schedule()}}return t.prototype.run=function(t){var e=this;if(void 0===t&&(t=250),!ys){ys=!0;var r,n=bs(t);r=function(){var r=!1;try{r=fs()}finally{if(ys=!1,t=n-bs(),!ps)return;r?e.run(1e3):t>0?e.run(t):e.start()}},ds((function(){requestAnimationFrame(r)}))}},t.prototype.schedule=function(){this.stop(),this.run()},t.prototype.observe=function(){var t=this,e=function(){return t.observer&&t.observer.observe(document.body,vs)};document.body?e():Jo.addEventListener("DOMContentLoaded",e)},t.prototype.start=function(){var t=this;this.stopped&&(this.stopped=!1,this.observer=new MutationObserver(this.listener),this.observe(),gs.forEach((function(e){return Jo.addEventListener(e,t.listener,!0)})))},t.prototype.stop=function(){var t=this;this.stopped||(this.observer&&this.observer.disconnect(),gs.forEach((function(e){return Jo.removeEventListener(e,t.listener,!0)})),this.stopped=!0)},t}()),xs=function(t){!ps&&t>0&&ms.start(),!(ps+=t)&&ms.stop()},Es=function(){function t(t,e){this.target=t,this.observedBox=e||Do.CONTENT_BOX,this.lastReportedSize={inlineSize:0,blockSize:0}}return t.prototype.isActive=function(){var t,e=ss(this.target,this.observedBox,!0);return t=this.target,Go(t)||function(t){switch(t.tagName){case"INPUT":if("image"!==t.type)break;case"VIDEO":case"AUDIO":case"EMBED":case"OBJECT":case"CANVAS":case"IFRAME":case"IMG":return!0}return!1}(t)||"inline"!==getComputedStyle(t).display||(this.lastReportedSize=e),this.lastReportedSize.inlineSize!==e.inlineSize||this.lastReportedSize.blockSize!==e.blockSize},t}(),ws=function(t,e){this.activeTargets=[],this.skippedTargets=[],this.observationTargets=[],this.observer=t,this.callback=e},Os=new WeakMap,Ss=function(t,e){for(var r=0;r<t.length;r+=1)if(t[r].target===e)return r;return-1},As=function(){function t(){}return t.connect=function(t,e){var r=new ws(t,e);Os.set(t,r)},t.observe=function(t,e,r){var n=Os.get(t),i=0===n.observationTargets.length;Ss(n.observationTargets,e)<0&&(i&&Vo.push(n),n.observationTargets.push(new Es(e,r&&r.box)),xs(1),ms.schedule())},t.unobserve=function(t,e){var r=Os.get(t),n=Ss(r.observationTargets,e),i=1===r.observationTargets.length;n>=0&&(i&&Vo.splice(Vo.indexOf(r),1),r.observationTargets.splice(n,1),xs(-1))},t.disconnect=function(t){var e=this,r=Os.get(t);r.observationTargets.slice().forEach((function(r){return e.unobserve(t,r.target)})),r.activeTargets.splice(0,r.activeTargets.length)},t}(),ks=function(){function t(t){if(0===arguments.length)throw new TypeError("Failed to construct 'ResizeObserver': 1 argument required, but only 0 present.");if("function"!=typeof t)throw new TypeError("Failed to construct 'ResizeObserver': The callback provided as parameter 1 is not a function.");As.connect(this,t)}return t.prototype.observe=function(t,e){if(0===arguments.length)throw new TypeError("Failed to execute 'observe' on 'ResizeObserver': 1 argument required, but only 0 present.");if(!Ko(t))throw new TypeError("Failed to execute 'observe' on 'ResizeObserver': parameter 1 is not of type 'Element");As.observe(this,t,e)},t.prototype.unobserve=function(t){if(0===arguments.length)throw new TypeError("Failed to execute 'unobserve' on 'ResizeObserver': 1 argument required, but only 0 present.");if(!Ko(t))throw new TypeError("Failed to execute 'unobserve' on 'ResizeObserver': parameter 1 is not of type 'Element");As.unobserve(this,t)},t.prototype.disconnect=function(){As.disconnect(this)},t.toString=function(){return"function ResizeObserver () { [polyfill code] }"},t}(),Ts=o.TypeError,Ls=function(t){return function(e,r,n,i){lt(r);var o=x(e),s=ce(o),a=ve(o),c=t?a-1:0,l=t?-1:1;if(n<2)for(;;){if(c in s){i=s[c],c+=l;break}if(c+=l,t?c<0:a<=c)throw Ts("Reduce of empty array with no initial value")}for(;t?c>=0:a>c;c+=l)c in s&&(i=r(i,s[c],c,o));return i}},Rs={left:Ls(!1),right:Ls(!0)},_s="process"==Gt(o.process),js=Rs.left,zs=ze("reduce");hr({target:"Array",proto:!0,forced:!zs||!_s&&N>79&&N<83},{reduce:function(t){var e=arguments.length;return js(this,t,e,e>1?arguments[1]:void 0)}});var Ms,Cs,Ns=function(){var t=tt(this),e="";return t.hasIndices&&(e+="d"),t.global&&(e+="g"),t.ignoreCase&&(e+="i"),t.multiline&&(e+="m"),t.dotAll&&(e+="s"),t.unicode&&(e+="u"),t.sticky&&(e+="y"),e},Ws=o.RegExp,Is=u((function(){var t=Ws("a","y");return t.lastIndex=2,null!=t.exec("abcd")})),Ps=Is||u((function(){return!Ws("a","y").sticky})),Bs={BROKEN_CARET:Is||u((function(){var t=Ws("^r","gy");return t.lastIndex=2,null!=t.exec("str")})),MISSED_STICKY:Ps,UNSUPPORTED_Y:Is},Ds=o.RegExp,Fs=u((function(){var t=Ds(".","s");return!(t.dotAll&&t.exec("\n")&&"s"===t.flags)})),Vs=o.RegExp,$s=u((function(){var t=Vs("(?<a>b)","g");return"b"!==t.exec("b").groups.a||"bc"!=="b".replace(t,"$<a>c")})),Xs=$t.get,Hs=l("native-string-replace",String.prototype.replace),qs=RegExp.prototype.exec,Ys=qs,Gs=g("".charAt),Us=g("".indexOf),Ks=g("".replace),Js=g("".slice),Qs=(Cs=/b*/g,rt(qs,Ms=/a/,"a"),rt(qs,Cs,"a"),0!==Ms.lastIndex||0!==Cs.lastIndex),Zs=Bs.BROKEN_CARET,ta=void 0!==/()??/.exec("")[1];(Qs||ta||Zs||Fs||$s)&&(Ys=function(t){var e,r,n,i,o,s,a,c=this,l=Xs(c),u=pr(t),f=l.raw;if(f)return f.lastIndex=c.lastIndex,e=rt(Ys,f,u),c.lastIndex=f.lastIndex,e;var h=l.groups,d=Zs&&c.sticky,p=rt(Ns,c),v=c.source,g=0,b=u;if(d&&(p=Ks(p,"y",""),-1===Us(p,"g")&&(p+="g"),b=Js(u,c.lastIndex),c.lastIndex>0&&(!c.multiline||c.multiline&&"\n"!==Gs(u,c.lastIndex-1))&&(v="(?: "+v+")",b=" "+b,g++),r=new RegExp("^(?:"+v+")",p)),ta&&(r=new RegExp("^"+v+"$(?!\\s)",p)),Qs&&(n=c.lastIndex),i=rt(qs,d?r:c,b),d?i?(i.input=Js(i.input,g),i[0]=Js(i[0],g),i.index=c.lastIndex,c.lastIndex+=i[0].length):c.lastIndex=0:Qs&&i&&(c.lastIndex=c.global?i.index+i[0].length:n),ta&&i&&i.length>1&&rt(Hs,i[0],r,(function(){for(o=1;o<arguments.length-2;o++)void 0===arguments[o]&&(i[o]=void 0)})),i&&h)for(i.groups=s=Hr(null),o=0;o<h.length;o++)s[(a=h[o])[0]]=i[a[1]];return i});var ea=Ys;hr({target:"RegExp",proto:!0,forced:/./.exec!==ea},{exec:ea});var ra=V("species"),na=RegExp.prototype,ia=function(t,e,r,n){var i=V(t),o=!u((function(){var e={};return e[i]=function(){return 7},7!=""[t](e)})),s=o&&!u((function(){var e=!1,r=/a/;return"split"===t&&((r={}).constructor={},r.constructor[ra]=function(){return r},r.flags="",r[i]=/./[i]),r.exec=function(){return e=!0,null},r[i](""),!e}));if(!o||!s||r){var a=g(/./[i]),c=e(i,""[t],(function(t,e,r,n,i){var s=g(t),c=e.exec;return c===ea||c===na.exec?o&&!i?{done:!0,value:a(e,r,n)}:{done:!0,value:s(r,e,n)}:{done:!1}}));Ht(String.prototype,t,c[0]),Ht(na,i,c[1])}n&&Et(na[i],"sham",!0)},oa=Mn.charAt,sa=function(t,e,r){return e+(r?oa(t,e).length:1)},aa=o.TypeError,ca=function(t,e){var r=t.exec;if(T(r)){var n=rt(r,t,e);return null!==n&&tt(n),n}if("RegExp"===Gt(t))return rt(ea,t,e);throw aa("RegExp#exec called on incompatible receiver")};ia("match",(function(t,e,r){return[function(e){var r=y(this),n=null==e?void 0:ut(e,t);return n?rt(n,e,r):new RegExp(e)[t](pr(r))},function(t){var n=tt(this),i=pr(t),o=r(e,n,i);if(o.done)return o.value;if(!n.global)return ca(n,i);var s=n.unicode;n.lastIndex=0;for(var a,c=[],l=0;null!==(a=ca(n,i));){var u=pr(a[0]);c[l]=u,""===u&&(n.lastIndex=sa(i,pe(n.lastIndex),s)),l++}return 0===l?null:c}]}));var la=At.EXISTS,ua=mt.f,fa=Function.prototype,ha=g(fa.toString),da=/function\b(?:\s|\/\*[\S\s]*?\*\/|\/\/[^\n\r]*[\n\r]+)*([^\s(/]*)/,pa=g(da.exec);H&&!la&&ua(fa,"name",{configurable:!0,get:function(){try{return pa(da,ha(this))[1]}catch(t){return""}}});var va=Function.prototype,ga=va.apply,ba=va.call,ya="object"==typeof Reflect&&Reflect.apply||(f?ba.bind(ga):function(){return ba.apply(ga,arguments)}),ma=Math.floor,xa=g("".charAt),Ea=g("".replace),wa=g("".slice),Oa=/\$([$&'`]|\d{1,2}|<[^>]*>)/g,Sa=/\$([$&'`]|\d{1,2})/g,Aa=function(t,e,r,n,i,o){var s=r+t.length,a=n.length,c=Sa;return void 0!==i&&(i=x(i),c=Oa),Ea(o,c,(function(o,c){var l;switch(xa(c,0)){case"$":return"$";case"&":return t;case"`":return wa(e,0,r);case"'":return wa(e,s);case"<":l=i[wa(c,1,-1)];break;default:var u=+c;if(0===u)return o;if(u>a){var f=ma(u/10);return 0===f?o:f<=a?void 0===n[f-1]?xa(c,1):n[f-1]+xa(c,1):o}l=n[u-1]}return void 0===l?"":l}))},ka=V("replace"),Ta=Math.max,La=Math.min,Ra=g([].concat),_a=g([].push),ja=g("".indexOf),za=g("".slice),Ma="$0"==="a".replace(/./,"$0"),Ca=!!/./[ka]&&""===/./[ka]("a","$0");ia("replace",(function(t,e,r){var n=Ca?"$":"$0";return[function(t,r){var n=y(this),i=null==t?void 0:ut(t,ka);return i?rt(i,t,n,r):rt(e,pr(n),t,r)},function(t,i){var o=tt(this),s=pr(t);if("string"==typeof i&&-1===ja(i,n)&&-1===ja(i,"$<")){var a=r(e,o,s,i);if(a.done)return a.value}var c=T(i);c||(i=pr(i));var l=o.global;if(l){var u=o.unicode;o.lastIndex=0}for(var f=[];;){var h=ca(o,s);if(null===h)break;if(_a(f,h),!l)break;""===pr(h[0])&&(o.lastIndex=sa(s,pe(o.lastIndex),u))}for(var d,p="",v=0,g=0;g<f.length;g++){for(var b=pr((h=f[g])[0]),y=Ta(La(he(h.index),s.length),0),m=[],x=1;x<h.length;x++)_a(m,void 0===(d=h[x])?d:String(d));var E=h.groups;if(c){var w=Ra([b],m,y,s);void 0!==E&&_a(w,E);var O=pr(ya(i,void 0,w))}else O=Aa(b,s,y,m,E,i);y>=v&&(p+=za(s,v,y)+O,v=y+b.length)}return p+za(s,v)}]}),!!u((function(){var t=/./;return t.exec=function(){var t=[];return t.groups={a:"7"},t},"7"!=="".replace(t,"$<a>")}))||!Ma||Ca);var Na=function(t){return Array.prototype.reduce.call(t,(function(t,e){var r=e.name.match(/data-simplebar-(.+)/);if(r){var n=r[1].replace(/\W+(.)/g,(function(t,e){return e.toUpperCase()}));switch(e.value){case"true":t[n]=!0;break;case"false":t[n]=!1;break;case void 0:t[n]=!0;break;default:t[n]=e.value}}return t}),{})};function Wa(t){return t&&t.ownerDocument&&t.ownerDocument.defaultView?t.ownerDocument.defaultView:window}function Ia(t){return t&&t.ownerDocument?t.ownerDocument:document}var Pa=null,Ba=null;function Da(t){if(null===Pa){var e=Ia(t);if(void 0===e)return Pa=0;var r=e.body,n=e.createElement("div");n.classList.add("simplebar-hide-scrollbar"),r.appendChild(n);var i=n.getBoundingClientRect().right;r.removeChild(n),Pa=i}return Pa}Ie&&window.addEventListener("resize",(function(){Ba!==window.devicePixelRatio&&(Ba=window.devicePixelRatio,Pa=null)}));var Fa=function(){function t(e,r){var n=this;this.onScroll=function(){var t=Wa(n.el);n.scrollXTicking||(t.requestAnimationFrame(n.scrollX),n.scrollXTicking=!0),n.scrollYTicking||(t.requestAnimationFrame(n.scrollY),n.scrollYTicking=!0)},this.scrollX=function(){n.axis.x.isOverflowing&&(n.showScrollbar("x"),n.positionScrollbar("x")),n.scrollXTicking=!1},this.scrollY=function(){n.axis.y.isOverflowing&&(n.showScrollbar("y"),n.positionScrollbar("y")),n.scrollYTicking=!1},this.onMouseEnter=function(){n.showScrollbar("x"),n.showScrollbar("y")},this.onMouseMove=function(t){n.mouseX=t.clientX,n.mouseY=t.clientY,(n.axis.x.isOverflowing||n.axis.x.forceVisible)&&n.onMouseMoveForAxis("x"),(n.axis.y.isOverflowing||n.axis.y.forceVisible)&&n.onMouseMoveForAxis("y")},this.onMouseLeave=function(){n.onMouseMove.cancel(),(n.axis.x.isOverflowing||n.axis.x.forceVisible)&&n.onMouseLeaveForAxis("x"),(n.axis.y.isOverflowing||n.axis.y.forceVisible)&&n.onMouseLeaveForAxis("y"),n.mouseX=-1,n.mouseY=-1},this.onWindowResize=function(){n.scrollbarWidth=n.getScrollbarWidth(),n.hideNativeScrollbar()},this.hideScrollbars=function(){n.axis.x.track.rect=n.axis.x.track.el.getBoundingClientRect(),n.axis.y.track.rect=n.axis.y.track.el.getBoundingClientRect(),n.isWithinBounds(n.axis.y.track.rect)||(n.axis.y.scrollbar.el.classList.remove(n.classNames.visible),n.axis.y.isVisible=!1),n.isWithinBounds(n.axis.x.track.rect)||(n.axis.x.scrollbar.el.classList.remove(n.classNames.visible),n.axis.x.isVisible=!1)},this.onPointerEvent=function(t){var e,r;n.axis.x.track.rect=n.axis.x.track.el.getBoundingClientRect(),n.axis.y.track.rect=n.axis.y.track.el.getBoundingClientRect(),(n.axis.x.isOverflowing||n.axis.x.forceVisible)&&(e=n.isWithinBounds(n.axis.x.track.rect)),(n.axis.y.isOverflowing||n.axis.y.forceVisible)&&(r=n.isWithinBounds(n.axis.y.track.rect)),(e||r)&&(t.preventDefault(),t.stopPropagation(),"mousedown"===t.type&&(e&&(n.axis.x.scrollbar.rect=n.axis.x.scrollbar.el.getBoundingClientRect(),n.isWithinBounds(n.axis.x.scrollbar.rect)?n.onDragStart(t,"x"):n.onTrackClick(t,"x")),r&&(n.axis.y.scrollbar.rect=n.axis.y.scrollbar.el.getBoundingClientRect(),n.isWithinBounds(n.axis.y.scrollbar.rect)?n.onDragStart(t,"y"):n.onTrackClick(t,"y"))))},this.drag=function(e){var r=n.axis[n.draggedAxis].track,i=r.rect[n.axis[n.draggedAxis].sizeAttr],o=n.axis[n.draggedAxis].scrollbar,s=n.contentWrapperEl[n.axis[n.draggedAxis].scrollSizeAttr],a=parseInt(n.elStyles[n.axis[n.draggedAxis].sizeAttr],10);e.preventDefault(),e.stopPropagation();var c=(("y"===n.draggedAxis?e.pageY:e.pageX)-r.rect[n.axis[n.draggedAxis].offsetAttr]-n.axis[n.draggedAxis].dragOffset)/(i-o.size)*(s-a);"x"===n.draggedAxis&&(c=n.isRtl&&t.getRtlHelpers().isRtlScrollbarInverted?c-(i+o.size):c,c=n.isRtl&&t.getRtlHelpers().isRtlScrollingInverted?-c:c),n.contentWrapperEl[n.axis[n.draggedAxis].scrollOffsetAttr]=c},this.onEndDrag=function(t){var e=Ia(n.el),r=Wa(n.el);t.preventDefault(),t.stopPropagation(),n.el.classList.remove(n.classNames.dragging),e.removeEventListener("mousemove",n.drag,!0),e.removeEventListener("mouseup",n.onEndDrag,!0),n.removePreventClickId=r.setTimeout((function(){e.removeEventListener("click",n.preventClick,!0),e.removeEventListener("dblclick",n.preventClick,!0),n.removePreventClickId=null}))},this.preventClick=function(t){t.preventDefault(),t.stopPropagation()},this.el=e,this.minScrollbarWidth=20,this.options=Object.assign({},t.defaultOptions,r),this.classNames=Object.assign({},t.defaultOptions.classNames,this.options.classNames),this.axis={x:{scrollOffsetAttr:"scrollLeft",sizeAttr:"width",scrollSizeAttr:"scrollWidth",offsetSizeAttr:"offsetWidth",offsetAttr:"left",overflowAttr:"overflowX",dragOffset:0,isOverflowing:!0,isVisible:!1,forceVisible:!1,track:{},scrollbar:{}},y:{scrollOffsetAttr:"scrollTop",sizeAttr:"height",scrollSizeAttr:"scrollHeight",offsetSizeAttr:"offsetHeight",offsetAttr:"top",overflowAttr:"overflowY",dragOffset:0,isOverflowing:!0,isVisible:!1,forceVisible:!1,track:{},scrollbar:{}}},this.removePreventClickId=null,t.instances.has(this.el)||(this.recalculate=Qi(this.recalculate.bind(this),64),this.onMouseMove=Qi(this.onMouseMove.bind(this),64),this.hideScrollbars=po(this.hideScrollbars.bind(this),this.options.timeout),this.onWindowResize=po(this.onWindowResize.bind(this),64,{leading:!0}),t.getRtlHelpers=Fo(t.getRtlHelpers),this.init())}t.getRtlHelpers=function(){var e=document.createElement("div");e.innerHTML='<div class="hs-dummy-scrollbar-size"><div style="height: 200%; width: 200%; margin: 10px 0;"></div></div>';var r=e.firstElementChild;document.body.appendChild(r);var n=r.firstElementChild;r.scrollLeft=0;var i=t.getOffset(r),o=t.getOffset(n);r.scrollLeft=999;var s=t.getOffset(n);return{isRtlScrollingInverted:i.left!==o.left&&o.left-s.left!=0,isRtlScrollbarInverted:i.left!==o.left}},t.getOffset=function(t){var e=t.getBoundingClientRect(),r=Ia(t),n=Wa(t);return{top:e.top+(n.pageYOffset||r.documentElement.scrollTop),left:e.left+(n.pageXOffset||r.documentElement.scrollLeft)}};var e=t.prototype;return e.init=function(){t.instances.set(this.el,this),Ie&&(this.initDOM(),this.setAccessibilityAttributes(),this.scrollbarWidth=this.getScrollbarWidth(),this.recalculate(),this.initListeners())},e.initDOM=function(){var t=this;if(Array.prototype.filter.call(this.el.children,(function(e){return e.classList.contains(t.classNames.wrapper)})).length)this.wrapperEl=this.el.querySelector("."+this.classNames.wrapper),this.contentWrapperEl=this.options.scrollableNode||this.el.querySelector("."+this.classNames.contentWrapper),this.contentEl=this.options.contentNode||this.el.querySelector("."+this.classNames.contentEl),this.offsetEl=this.el.querySelector("."+this.classNames.offset),this.maskEl=this.el.querySelector("."+this.classNames.mask),this.placeholderEl=this.findChild(this.wrapperEl,"."+this.classNames.placeholder),this.heightAutoObserverWrapperEl=this.el.querySelector("."+this.classNames.heightAutoObserverWrapperEl),this.heightAutoObserverEl=this.el.querySelector("."+this.classNames.heightAutoObserverEl),this.axis.x.track.el=this.findChild(this.el,"."+this.classNames.track+"."+this.classNames.horizontal),this.axis.y.track.el=this.findChild(this.el,"."+this.classNames.track+"."+this.classNames.vertical);else{for(this.wrapperEl=document.createElement("div"),this.contentWrapperEl=document.createElement("div"),this.offsetEl=document.createElement("div"),this.maskEl=document.createElement("div"),this.contentEl=document.createElement("div"),this.placeholderEl=document.createElement("div"),this.heightAutoObserverWrapperEl=document.createElement("div"),this.heightAutoObserverEl=document.createElement("div"),this.wrapperEl.classList.add(this.classNames.wrapper),this.contentWrapperEl.classList.add(this.classNames.contentWrapper),this.offsetEl.classList.add(this.classNames.offset),this.maskEl.classList.add(this.classNames.mask),this.contentEl.classList.add(this.classNames.contentEl),this.placeholderEl.classList.add(this.classNames.placeholder),this.heightAutoObserverWrapperEl.classList.add(this.classNames.heightAutoObserverWrapperEl),this.heightAutoObserverEl.classList.add(this.classNames.heightAutoObserverEl);this.el.firstChild;)this.contentEl.appendChild(this.el.firstChild);this.contentWrapperEl.appendChild(this.contentEl),this.offsetEl.appendChild(this.contentWrapperEl),this.maskEl.appendChild(this.offsetEl),this.heightAutoObserverWrapperEl.appendChild(this.heightAutoObserverEl),this.wrapperEl.appendChild(this.heightAutoObserverWrapperEl),this.wrapperEl.appendChild(this.maskEl),this.wrapperEl.appendChild(this.placeholderEl),this.el.appendChild(this.wrapperEl)}if(!this.axis.x.track.el||!this.axis.y.track.el){var e=document.createElement("div"),r=document.createElement("div");e.classList.add(this.classNames.track),r.classList.add(this.classNames.scrollbar),e.appendChild(r),this.axis.x.track.el=e.cloneNode(!0),this.axis.x.track.el.classList.add(this.classNames.horizontal),this.axis.y.track.el=e.cloneNode(!0),this.axis.y.track.el.classList.add(this.classNames.vertical),this.el.appendChild(this.axis.x.track.el),this.el.appendChild(this.axis.y.track.el)}this.axis.x.scrollbar.el=this.axis.x.track.el.querySelector("."+this.classNames.scrollbar),this.axis.y.scrollbar.el=this.axis.y.track.el.querySelector("."+this.classNames.scrollbar),this.options.autoHide||(this.axis.x.scrollbar.el.classList.add(this.classNames.visible),this.axis.y.scrollbar.el.classList.add(this.classNames.visible)),this.el.setAttribute("data-simplebar","init")},e.setAccessibilityAttributes=function(){var t=this.options.ariaLabel||"scrollable content";this.contentWrapperEl.setAttribute("tabindex","0"),this.contentWrapperEl.setAttribute("role","region"),this.contentWrapperEl.setAttribute("aria-label",t)},e.initListeners=function(){var t=this,e=Wa(this.el);this.options.autoHide&&this.el.addEventListener("mouseenter",this.onMouseEnter),["mousedown","click","dblclick"].forEach((function(e){t.el.addEventListener(e,t.onPointerEvent,!0)})),["touchstart","touchend","touchmove"].forEach((function(e){t.el.addEventListener(e,t.onPointerEvent,{capture:!0,passive:!0})})),this.el.addEventListener("mousemove",this.onMouseMove),this.el.addEventListener("mouseleave",this.onMouseLeave),this.contentWrapperEl.addEventListener("scroll",this.onScroll),e.addEventListener("resize",this.onWindowResize);var r=!1,n=e.ResizeObserver||ks;this.resizeObserver=new n((function(){r&&t.recalculate()})),this.resizeObserver.observe(this.el),this.resizeObserver.observe(this.contentEl),e.requestAnimationFrame((function(){r=!0})),this.mutationObserver=new e.MutationObserver(this.recalculate),this.mutationObserver.observe(this.contentEl,{childList:!0,subtree:!0,characterData:!0})},e.recalculate=function(){var t=Wa(this.el);this.elStyles=t.getComputedStyle(this.el),this.isRtl="rtl"===this.elStyles.direction;var e=this.heightAutoObserverEl.offsetHeight<=1,r=this.heightAutoObserverEl.offsetWidth<=1,n=this.contentEl.offsetWidth,i=this.contentWrapperEl.offsetWidth,o=this.elStyles.overflowX,s=this.elStyles.overflowY;this.contentEl.style.padding=this.elStyles.paddingTop+" "+this.elStyles.paddingRight+" "+this.elStyles.paddingBottom+" "+this.elStyles.paddingLeft,this.wrapperEl.style.margin="-"+this.elStyles.paddingTop+" -"+this.elStyles.paddingRight+" -"+this.elStyles.paddingBottom+" -"+this.elStyles.paddingLeft;var a=this.contentEl.scrollHeight,c=this.contentEl.scrollWidth;this.contentWrapperEl.style.height=e?"auto":"100%",this.placeholderEl.style.width=r?n+"px":"auto",this.placeholderEl.style.height=a+"px";var l=this.contentWrapperEl.offsetHeight;this.axis.x.isOverflowing=c>n,this.axis.y.isOverflowing=a>l,this.axis.x.isOverflowing="hidden"!==o&&this.axis.x.isOverflowing,this.axis.y.isOverflowing="hidden"!==s&&this.axis.y.isOverflowing,this.axis.x.forceVisible="x"===this.options.forceVisible||!0===this.options.forceVisible,this.axis.y.forceVisible="y"===this.options.forceVisible||!0===this.options.forceVisible,this.hideNativeScrollbar();var u=this.axis.x.isOverflowing?this.scrollbarWidth:0,f=this.axis.y.isOverflowing?this.scrollbarWidth:0;this.axis.x.isOverflowing=this.axis.x.isOverflowing&&c>i-f,this.axis.y.isOverflowing=this.axis.y.isOverflowing&&a>l-u,this.axis.x.scrollbar.size=this.getScrollbarSize("x"),this.axis.y.scrollbar.size=this.getScrollbarSize("y"),this.axis.x.scrollbar.el.style.width=this.axis.x.scrollbar.size+"px",this.axis.y.scrollbar.el.style.height=this.axis.y.scrollbar.size+"px",this.positionScrollbar("x"),this.positionScrollbar("y"),this.toggleTrackVisibility("x"),this.toggleTrackVisibility("y")},e.getScrollbarSize=function(t){if(void 0===t&&(t="y"),!this.axis[t].isOverflowing)return 0;var e,r=this.contentEl[this.axis[t].scrollSizeAttr],n=this.axis[t].track.el[this.axis[t].offsetSizeAttr],i=n/r;return e=Math.max(~~(i*n),this.options.scrollbarMinSize),this.options.scrollbarMaxSize&&(e=Math.min(e,this.options.scrollbarMaxSize)),e},e.positionScrollbar=function(e){if(void 0===e&&(e="y"),this.axis[e].isOverflowing){var r=this.contentWrapperEl[this.axis[e].scrollSizeAttr],n=this.axis[e].track.el[this.axis[e].offsetSizeAttr],i=parseInt(this.elStyles[this.axis[e].sizeAttr],10),o=this.axis[e].scrollbar,s=this.contentWrapperEl[this.axis[e].scrollOffsetAttr],a=(s="x"===e&&this.isRtl&&t.getRtlHelpers().isRtlScrollingInverted?-s:s)/(r-i),c=~~((n-o.size)*a);c="x"===e&&this.isRtl&&t.getRtlHelpers().isRtlScrollbarInverted?c+(n-o.size):c,o.el.style.transform="x"===e?"translate3d("+c+"px, 0, 0)":"translate3d(0, "+c+"px, 0)"}},e.toggleTrackVisibility=function(t){void 0===t&&(t="y");var e=this.axis[t].track.el,r=this.axis[t].scrollbar.el;this.axis[t].isOverflowing||this.axis[t].forceVisible?(e.style.visibility="visible",this.contentWrapperEl.style[this.axis[t].overflowAttr]="scroll"):(e.style.visibility="hidden",this.contentWrapperEl.style[this.axis[t].overflowAttr]="hidden"),this.axis[t].isOverflowing?r.style.display="block":r.style.display="none"},e.hideNativeScrollbar=function(){this.offsetEl.style[this.isRtl?"left":"right"]=this.axis.y.isOverflowing||this.axis.y.forceVisible?"-"+this.scrollbarWidth+"px":0,this.offsetEl.style.bottom=this.axis.x.isOverflowing||this.axis.x.forceVisible?"-"+this.scrollbarWidth+"px":0},e.onMouseMoveForAxis=function(t){void 0===t&&(t="y"),this.axis[t].track.rect=this.axis[t].track.el.getBoundingClientRect(),this.axis[t].scrollbar.rect=this.axis[t].scrollbar.el.getBoundingClientRect(),this.isWithinBounds(this.axis[t].scrollbar.rect)?this.axis[t].scrollbar.el.classList.add(this.classNames.hover):this.axis[t].scrollbar.el.classList.remove(this.classNames.hover),this.isWithinBounds(this.axis[t].track.rect)?(this.showScrollbar(t),this.axis[t].track.el.classList.add(this.classNames.hover)):this.axis[t].track.el.classList.remove(this.classNames.hover)},e.onMouseLeaveForAxis=function(t){void 0===t&&(t="y"),this.axis[t].track.el.classList.remove(this.classNames.hover),this.axis[t].scrollbar.el.classList.remove(this.classNames.hover)},e.showScrollbar=function(t){void 0===t&&(t="y");var e=this.axis[t].scrollbar.el;this.axis[t].isVisible||(e.classList.add(this.classNames.visible),this.axis[t].isVisible=!0),this.options.autoHide&&this.hideScrollbars()},e.onDragStart=function(t,e){void 0===e&&(e="y");var r=Ia(this.el),n=Wa(this.el),i=this.axis[e].scrollbar,o="y"===e?t.pageY:t.pageX;this.axis[e].dragOffset=o-i.rect[this.axis[e].offsetAttr],this.draggedAxis=e,this.el.classList.add(this.classNames.dragging),r.addEventListener("mousemove",this.drag,!0),r.addEventListener("mouseup",this.onEndDrag,!0),null===this.removePreventClickId?(r.addEventListener("click",this.preventClick,!0),r.addEventListener("dblclick",this.preventClick,!0)):(n.clearTimeout(this.removePreventClickId),this.removePreventClickId=null)},e.onTrackClick=function(t,e){var r=this;if(void 0===e&&(e="y"),this.options.clickOnTrack){var n=Wa(this.el);this.axis[e].scrollbar.rect=this.axis[e].scrollbar.el.getBoundingClientRect();var i=this.axis[e].scrollbar.rect[this.axis[e].offsetAttr],o=parseInt(this.elStyles[this.axis[e].sizeAttr],10),s=this.contentWrapperEl[this.axis[e].scrollOffsetAttr],a=("y"===e?this.mouseY-i:this.mouseX-i)<0?-1:1,c=-1===a?s-o:s+o;!function t(){var i,o;-1===a?s>c&&(s-=r.options.clickOnTrackSpeed,r.contentWrapperEl.scrollTo(((i={})[r.axis[e].offsetAttr]=s,i)),n.requestAnimationFrame(t)):s<c&&(s+=r.options.clickOnTrackSpeed,r.contentWrapperEl.scrollTo(((o={})[r.axis[e].offsetAttr]=s,o)),n.requestAnimationFrame(t))}()}},e.getContentElement=function(){return this.contentEl},e.getScrollElement=function(){return this.contentWrapperEl},e.getScrollbarWidth=function(){try{return"none"===getComputedStyle(this.contentWrapperEl,"::-webkit-scrollbar").display||"scrollbarWidth"in document.documentElement.style||"-ms-overflow-style"in document.documentElement.style?0:Da(this.el)}catch(t){return Da(this.el)}},e.removeListeners=function(){var t=this,e=Wa(this.el);this.options.autoHide&&this.el.removeEventListener("mouseenter",this.onMouseEnter),["mousedown","click","dblclick"].forEach((function(e){t.el.removeEventListener(e,t.onPointerEvent,!0)})),["touchstart","touchend","touchmove"].forEach((function(e){t.el.removeEventListener(e,t.onPointerEvent,{capture:!0,passive:!0})})),this.el.removeEventListener("mousemove",this.onMouseMove),this.el.removeEventListener("mouseleave",this.onMouseLeave),this.contentWrapperEl&&this.contentWrapperEl.removeEventListener("scroll",this.onScroll),e.removeEventListener("resize",this.onWindowResize),this.mutationObserver&&this.mutationObserver.disconnect(),this.resizeObserver&&this.resizeObserver.disconnect(),this.recalculate.cancel(),this.onMouseMove.cancel(),this.hideScrollbars.cancel(),this.onWindowResize.cancel()},e.unMount=function(){this.removeListeners(),t.instances.delete(this.el)},e.isWithinBounds=function(t){return this.mouseX>=t.left&&this.mouseX<=t.left+t.width&&this.mouseY>=t.top&&this.mouseY<=t.top+t.height},e.findChild=function(t,e){var r=t.matches||t.webkitMatchesSelector||t.mozMatchesSelector||t.msMatchesSelector;return Array.prototype.filter.call(t.children,(function(t){return r.call(t,e)}))[0]},t}();return Fa.defaultOptions={autoHide:!0,forceVisible:!1,clickOnTrack:!0,clickOnTrackSpeed:40,classNames:{contentEl:"simplebar-content",contentWrapper:"simplebar-content-wrapper",offset:"simplebar-offset",mask:"simplebar-mask",wrapper:"simplebar-wrapper",placeholder:"simplebar-placeholder",scrollbar:"simplebar-scrollbar",track:"simplebar-track",heightAutoObserverWrapperEl:"simplebar-height-auto-observer-wrapper",heightAutoObserverEl:"simplebar-height-auto-observer",visible:"simplebar-visible",horizontal:"simplebar-horizontal",vertical:"simplebar-vertical",hover:"simplebar-hover",dragging:"simplebar-dragging"},scrollbarMinSize:25,scrollbarMaxSize:0,timeout:1e3},Fa.instances=new WeakMap,Fa.initDOMLoadedElements=function(){document.removeEventListener("DOMContentLoaded",this.initDOMLoadedElements),window.removeEventListener("load",this.initDOMLoadedElements),Array.prototype.forEach.call(document.querySelectorAll("[data-simplebar]"),(function(t){"init"===t.getAttribute("data-simplebar")||Fa.instances.has(t)||new Fa(t,Na(t.attributes))}))},Fa.removeObserver=function(){this.globalObserver.disconnect()},Fa.initHtmlApi=function(){this.initDOMLoadedElements=this.initDOMLoadedElements.bind(this),"undefined"!=typeof MutationObserver&&(this.globalObserver=new MutationObserver(Fa.handleMutations),this.globalObserver.observe(document,{childList:!0,subtree:!0})),"complete"===document.readyState||"loading"!==document.readyState&&!document.documentElement.doScroll?window.setTimeout(this.initDOMLoadedElements):(document.addEventListener("DOMContentLoaded",this.initDOMLoadedElements),window.addEventListener("load",this.initDOMLoadedElements))},Fa.handleMutations=function(t){t.forEach((function(t){Array.prototype.forEach.call(t.addedNodes,(function(t){1===t.nodeType&&(t.hasAttribute("data-simplebar")?!Fa.instances.has(t)&&document.documentElement.contains(t)&&new Fa(t,Na(t.attributes)):Array.prototype.forEach.call(t.querySelectorAll("[data-simplebar]"),(function(t){"init"!==t.getAttribute("data-simplebar")&&!Fa.instances.has(t)&&document.documentElement.contains(t)&&new Fa(t,Na(t.attributes))})))})),Array.prototype.forEach.call(t.removedNodes,(function(t){1===t.nodeType&&("init"===t.getAttribute("data-simplebar")?Fa.instances.has(t)&&!document.documentElement.contains(t)&&Fa.instances.get(t).unMount():Array.prototype.forEach.call(t.querySelectorAll('[data-simplebar="init"]'),(function(t){Fa.instances.has(t)&&!document.documentElement.contains(t)&&Fa.instances.get(t).unMount()})))}))}))},Fa.getOptions=Na,Ie&&Fa.initHtmlApi(),Fa}));
|
PypiClean
|
/isittor-1.2.tar.gz/isittor-1.2/README.md
|
# IsItTor
IsItTor is a script to check if an IP address is a Tor exit node.
Install:
# sudo pip install requests[security] isittor
Usage:
# isittor.py 99.245.160.4
02/10/2015 14:22:00 INFO - Starting new HTTPS connection (1): www.dan.me.uk
02/10/2015 14:22:01 INFO - Load TOR exit node list from https://www.dan.me.uk/torlist/
02/10/2015 14:22:01 INFO - 99.245.160.4 is a TOR exit node
# isittor.py 99.245.160.4 10.10.10.10
02/10/2015 14:22:00 INFO - Starting new HTTPS connection (1): www.dan.me.uk
02/10/2015 14:22:01 INFO - Load TOR exit node list from https://www.dan.me.uk/torlist/
02/10/2015 14:22:01 INFO - 99.245.160.4 is a TOR exit node
02/10/2015 14:22:01 INFO - 10.10.10.10 is NOT a TOR exit node
|
PypiClean
|
/ixnetwork_restpy-1.1.10.tar.gz/ixnetwork_restpy-1.1.10/ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocolstack/vicclientrange_5a725992fa0bed642bb51f91f1943859.py
|
import sys
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
if sys.version_info >= (3, 5):
from typing import List, Any, Union
class VicClientRange(Base):
"""Range settings for VIC protocol
The VicClientRange class encapsulates a list of vicClientRange resources that are managed by the user.
A list of resources can be retrieved from the server using the VicClientRange.find() method.
The list can be managed by using the VicClientRange.add() and VicClientRange.remove() methods.
"""
__slots__ = ()
_SDM_NAME = "vicClientRange"
_SDM_ATT_MAP = {
"ChannelIdIncrement": "channelIdIncrement",
"ChannelIdStart": "channelIdStart",
"Enabled": "enabled",
"MacsPerVif": "macsPerVif",
"Name": "name",
"ObjectId": "objectId",
"ProvInfoOui": "provInfoOui",
"ProvInfoTlvs": "provInfoTlvs",
"ProvInfoType": "provInfoType",
"TlvOffset": "tlvOffset",
"VifActive": "vifActive",
}
_SDM_ENUM_MAP = {}
def __init__(self, parent, list_op=False):
super(VicClientRange, self).__init__(parent, list_op)
@property
def ChannelIdIncrement(self):
# type: () -> int
"""
Returns
-------
- number: The increment step for channel ID.
"""
return self._get_attribute(self._SDM_ATT_MAP["ChannelIdIncrement"])
@ChannelIdIncrement.setter
def ChannelIdIncrement(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP["ChannelIdIncrement"], value)
@property
def ChannelIdStart(self):
# type: () -> int
"""
Returns
-------
- number: The channel ID within VIC session.
"""
return self._get_attribute(self._SDM_ATT_MAP["ChannelIdStart"])
@ChannelIdStart.setter
def ChannelIdStart(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP["ChannelIdStart"], value)
@property
def Enabled(self):
# type: () -> bool
"""
Returns
-------
- bool: Disabled ranges won't be configured nor validated.
"""
return self._get_attribute(self._SDM_ATT_MAP["Enabled"])
@Enabled.setter
def Enabled(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP["Enabled"], value)
@property
def MacsPerVif(self):
# type: () -> int
"""
Returns
-------
- number: Number of MAC interfaces managed by a single VIF object.
"""
return self._get_attribute(self._SDM_ATT_MAP["MacsPerVif"])
@MacsPerVif.setter
def MacsPerVif(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP["MacsPerVif"], value)
@property
def Name(self):
# type: () -> str
"""
Returns
-------
- str: Name of range
"""
return self._get_attribute(self._SDM_ATT_MAP["Name"])
@Name.setter
def Name(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP["Name"], value)
@property
def ObjectId(self):
# type: () -> str
"""
Returns
-------
- str: Unique identifier for this object
"""
return self._get_attribute(self._SDM_ATT_MAP["ObjectId"])
@property
def ProvInfoOui(self):
# type: () -> str
"""
Returns
-------
- str: IEEE OUI owning the provisioning information type space.
"""
return self._get_attribute(self._SDM_ATT_MAP["ProvInfoOui"])
@ProvInfoOui.setter
def ProvInfoOui(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP["ProvInfoOui"], value)
@property
def ProvInfoTlvs(self):
# type: () -> str
"""
Returns
-------
- str(None | /api/v1/sessions/1/ixnetwork/globals/protocolStack/vicClientGlobals/vicOptionSet): The provisioning TLVs associated with this range.
"""
return self._get_attribute(self._SDM_ATT_MAP["ProvInfoTlvs"])
@ProvInfoTlvs.setter
def ProvInfoTlvs(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP["ProvInfoTlvs"], value)
@property
def ProvInfoType(self):
# type: () -> int
"""
Returns
-------
- number: The type of the provisioning information (defined in each namespace).
"""
return self._get_attribute(self._SDM_ATT_MAP["ProvInfoType"])
@ProvInfoType.setter
def ProvInfoType(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP["ProvInfoType"], value)
@property
def TlvOffset(self):
# type: () -> int
"""
Returns
-------
- number: The number of TLV increments to apply before using the TLV values for this range.
"""
return self._get_attribute(self._SDM_ATT_MAP["TlvOffset"])
@TlvOffset.setter
def TlvOffset(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP["TlvOffset"], value)
@property
def VifActive(self):
# type: () -> bool
"""
Returns
-------
- bool: The initial state of this interface set: true for Active, false for Standby.
"""
return self._get_attribute(self._SDM_ATT_MAP["VifActive"])
@VifActive.setter
def VifActive(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP["VifActive"], value)
def update(
self,
ChannelIdIncrement=None,
ChannelIdStart=None,
Enabled=None,
MacsPerVif=None,
Name=None,
ProvInfoOui=None,
ProvInfoTlvs=None,
ProvInfoType=None,
TlvOffset=None,
VifActive=None,
):
# type: (int, int, bool, int, str, str, str, int, int, bool) -> VicClientRange
"""Updates vicClientRange resource on the server.
Args
----
- ChannelIdIncrement (number): The increment step for channel ID.
- ChannelIdStart (number): The channel ID within VIC session.
- Enabled (bool): Disabled ranges won't be configured nor validated.
- MacsPerVif (number): Number of MAC interfaces managed by a single VIF object.
- Name (str): Name of range
- ProvInfoOui (str): IEEE OUI owning the provisioning information type space.
- ProvInfoTlvs (str(None | /api/v1/sessions/1/ixnetwork/globals/protocolStack/vicClientGlobals/vicOptionSet)): The provisioning TLVs associated with this range.
- ProvInfoType (number): The type of the provisioning information (defined in each namespace).
- TlvOffset (number): The number of TLV increments to apply before using the TLV values for this range.
- VifActive (bool): The initial state of this interface set: true for Active, false for Standby.
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(
self,
ChannelIdIncrement=None,
ChannelIdStart=None,
Enabled=None,
MacsPerVif=None,
Name=None,
ProvInfoOui=None,
ProvInfoTlvs=None,
ProvInfoType=None,
TlvOffset=None,
VifActive=None,
):
# type: (int, int, bool, int, str, str, str, int, int, bool) -> VicClientRange
"""Adds a new vicClientRange resource on the server and adds it to the container.
Args
----
- ChannelIdIncrement (number): The increment step for channel ID.
- ChannelIdStart (number): The channel ID within VIC session.
- Enabled (bool): Disabled ranges won't be configured nor validated.
- MacsPerVif (number): Number of MAC interfaces managed by a single VIF object.
- Name (str): Name of range
- ProvInfoOui (str): IEEE OUI owning the provisioning information type space.
- ProvInfoTlvs (str(None | /api/v1/sessions/1/ixnetwork/globals/protocolStack/vicClientGlobals/vicOptionSet)): The provisioning TLVs associated with this range.
- ProvInfoType (number): The type of the provisioning information (defined in each namespace).
- TlvOffset (number): The number of TLV increments to apply before using the TLV values for this range.
- VifActive (bool): The initial state of this interface set: true for Active, false for Standby.
Returns
-------
- self: This instance with all currently retrieved vicClientRange resources using find and the newly added vicClientRange resources available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
"""Deletes all the contained vicClientRange resources in this instance from the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(
self,
ChannelIdIncrement=None,
ChannelIdStart=None,
Enabled=None,
MacsPerVif=None,
Name=None,
ObjectId=None,
ProvInfoOui=None,
ProvInfoTlvs=None,
ProvInfoType=None,
TlvOffset=None,
VifActive=None,
):
# type: (int, int, bool, int, str, str, str, str, int, int, bool) -> VicClientRange
"""Finds and retrieves vicClientRange resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve vicClientRange resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all vicClientRange resources from the server.
Args
----
- ChannelIdIncrement (number): The increment step for channel ID.
- ChannelIdStart (number): The channel ID within VIC session.
- Enabled (bool): Disabled ranges won't be configured nor validated.
- MacsPerVif (number): Number of MAC interfaces managed by a single VIF object.
- Name (str): Name of range
- ObjectId (str): Unique identifier for this object
- ProvInfoOui (str): IEEE OUI owning the provisioning information type space.
- ProvInfoTlvs (str(None | /api/v1/sessions/1/ixnetwork/globals/protocolStack/vicClientGlobals/vicOptionSet)): The provisioning TLVs associated with this range.
- ProvInfoType (number): The type of the provisioning information (defined in each namespace).
- TlvOffset (number): The number of TLV increments to apply before using the TLV values for this range.
- VifActive (bool): The initial state of this interface set: true for Active, false for Standby.
Returns
-------
- self: This instance with matching vicClientRange resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of vicClientRange data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the vicClientRange resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def CustomProtocolStack(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the customProtocolStack operation on the server.
Create custom protocol stack under /vport/protocolStack
customProtocolStack(Arg2=list, Arg3=enum, async_operation=bool)
---------------------------------------------------------------
- Arg2 (list(str)): List of plugin types to be added in the new custom stack
- Arg3 (str(kAppend | kMerge | kOverwrite)): Append, merge or overwrite existing protocol stack
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = {"Arg1": self}
for i in range(len(args)):
payload["Arg%s" % (i + 2)] = args[i]
for item in kwargs.items():
payload[item[0]] = item[1]
return self._execute(
"customProtocolStack", payload=payload, response_object=None
)
def DisableProtocolStack(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[str, None]
"""Executes the disableProtocolStack operation on the server.
Disable a protocol under protocolStack using the class name
disableProtocolStack(Arg2=string, async_operation=bool)string
-------------------------------------------------------------
- Arg2 (str): Protocol class name to disable
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns str: Status of the exec
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = {"Arg1": self.href}
for i in range(len(args)):
payload["Arg%s" % (i + 2)] = args[i]
for item in kwargs.items():
payload[item[0]] = item[1]
return self._execute(
"disableProtocolStack", payload=payload, response_object=None
)
def EnableProtocolStack(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[str, None]
"""Executes the enableProtocolStack operation on the server.
Enable a protocol under protocolStack using the class name
enableProtocolStack(Arg2=string, async_operation=bool)string
------------------------------------------------------------
- Arg2 (str): Protocol class name to enable
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns str: Status of the exec
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = {"Arg1": self.href}
for i in range(len(args)):
payload["Arg%s" % (i + 2)] = args[i]
for item in kwargs.items():
payload[item[0]] = item[1]
return self._execute(
"enableProtocolStack", payload=payload, response_object=None
)
def VicClientActivate(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the vicClientActivate operation on the server.
Send VIF-ACTIVATE for selected ranges
vicClientActivate(async_operation=bool)
---------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = {"Arg1": self}
for i in range(len(args)):
payload["Arg%s" % (i + 2)] = args[i]
for item in kwargs.items():
payload[item[0]] = item[1]
return self._execute("vicClientActivate", payload=payload, response_object=None)
def VicClientCreate(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the vicClientCreate operation on the server.
Send VIF-CREATE for selected ranges
vicClientCreate(async_operation=bool)
-------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = {"Arg1": self}
for i in range(len(args)):
payload["Arg%s" % (i + 2)] = args[i]
for item in kwargs.items():
payload[item[0]] = item[1]
return self._execute("vicClientCreate", payload=payload, response_object=None)
def VicClientDeactivate(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the vicClientDeactivate operation on the server.
Send VIF-DEACTIVATE for selected ranges
vicClientDeactivate(async_operation=bool)
-----------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = {"Arg1": self}
for i in range(len(args)):
payload["Arg%s" % (i + 2)] = args[i]
for item in kwargs.items():
payload[item[0]] = item[1]
return self._execute(
"vicClientDeactivate", payload=payload, response_object=None
)
def VicClientDelete(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the vicClientDelete operation on the server.
Send VIF-DELETE for selected ranges
vicClientDelete(async_operation=bool)
-------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = {"Arg1": self}
for i in range(len(args)):
payload["Arg%s" % (i + 2)] = args[i]
for item in kwargs.items():
payload[item[0]] = item[1]
return self._execute("vicClientDelete", payload=payload, response_object=None)
def VicClientDisable(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the vicClientDisable operation on the server.
Send VIF-DISABLE for selected ranges
vicClientDisable(async_operation=bool)
--------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = {"Arg1": self}
for i in range(len(args)):
payload["Arg%s" % (i + 2)] = args[i]
for item in kwargs.items():
payload[item[0]] = item[1]
return self._execute("vicClientDisable", payload=payload, response_object=None)
def VicClientEnable(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the vicClientEnable operation on the server.
Send VIF-ENABLE for selected ranges
vicClientEnable(async_operation=bool)
-------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = {"Arg1": self}
for i in range(len(args)):
payload["Arg%s" % (i + 2)] = args[i]
for item in kwargs.items():
payload[item[0]] = item[1]
return self._execute("vicClientEnable", payload=payload, response_object=None)
def VicClientStart(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the vicClientStart operation on the server.
Negotiate VIC sessions for selected ranges
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
vicClientStart(async_operation=bool)
------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
vicClientStart(Arg2=enum, async_operation=bool)
-----------------------------------------------
- Arg2 (str(async | sync)): kArray[kObjref=/vport/protocolStack/ethernet/dcbxEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/dhcpEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/dhcpServerEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/emulatedRouter/dhcpServerEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/emulatedRouterEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/fcoeClientEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/fcoeFwdEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/ip/smDnsEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/ipEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/pppoxEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/vepaEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/vicClient,/vport/protocolStack/ethernetEndpoint/range/vicClientRange,/vport/protocolStack/ethernetEndpoint/vicClient]
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = {"Arg1": self}
for i in range(len(args)):
payload["Arg%s" % (i + 2)] = args[i]
for item in kwargs.items():
payload[item[0]] = item[1]
return self._execute("vicClientStart", payload=payload, response_object=None)
def VicClientStop(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the vicClientStop operation on the server.
Teardown VIC sessions for selected ranges
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
vicClientStop(async_operation=bool)
-----------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
vicClientStop(Arg2=enum, async_operation=bool)
----------------------------------------------
- Arg2 (str(async | sync)): kArray[kObjref=/vport/protocolStack/ethernet/dcbxEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/dhcpEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/dhcpServerEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/emulatedRouter/dhcpServerEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/emulatedRouterEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/fcoeClientEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/fcoeFwdEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/ip/smDnsEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/ipEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/pppoxEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/vepaEndpoint/range/vicClientRange,/vport/protocolStack/ethernet/vicClient,/vport/protocolStack/ethernetEndpoint/range/vicClientRange,/vport/protocolStack/ethernetEndpoint/vicClient]
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = {"Arg1": self}
for i in range(len(args)):
payload["Arg%s" % (i + 2)] = args[i]
for item in kwargs.items():
payload[item[0]] = item[1]
return self._execute("vicClientStop", payload=payload, response_object=None)
|
PypiClean
|
/gbptestheat-2014.2.tar.gz/gbptestheat-2014.2.dev114.gc2f22b1/doc/source/man/heat-engine.rst
|
===========
heat-engine
===========
.. program:: heat-engine
SYNOPSIS
========
``heat-engine [options]``
DESCRIPTION
===========
Heat is the heat project server with an internal api called by the heat-api.
INVENTORY
=========
The heat engine does all the orchestration work and is the layer in which
the resource integration is implemented.
OPTIONS
=======
.. cmdoption:: --config-file
Path to a config file to use. Multiple config files can be specified, with
values in later files taking precedence.
.. cmdoption:: --config-dir
Path to a config directory to pull .conf files from. This file set is
sorted, so as to provide a predictable parse order if individual options are
over-ridden. The set is parsed after the file(s), if any, specified via
--config-file, hence over-ridden options in the directory take precedence.
FILES
========
* /etc/heat/heat.conf
|
PypiClean
|
/onnxruntime_training-1.15.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/onnxruntime/training/optim/_modifier.py
|
import warnings
import torch
from numpy import inf
from ._multi_tensor_apply import MultiTensorApply
multi_tensor_applier = MultiTensorApply(2048 * 32)
class FP16OptimizerModifier:
def __init__(self, optimizer) -> None:
super().__init__()
self._optimizer = optimizer
def apply(self):
if self.can_be_modified():
self.override_function()
def check_requirements(self, required_funcs, require_apex=False, require_torch_non_finite_check=False):
try:
if require_apex is True:
import amp_C # noqa: F401
from apex import amp # noqa: F401
if require_torch_non_finite_check is True:
_ = torch._amp_foreach_non_finite_check_and_unscale_
except Exception:
warnings.warn("Skip modifying optimizer because of Apex or torch_non_finite_check not found.", UserWarning)
return False
if required_funcs:
for func_name in required_funcs:
func = getattr(self._optimizer, func_name, None)
if not func or not callable(func):
warnings.warn(
"Skip modifying optimizer because of specific function not found in optimizer.", UserWarning
)
return False
return True
def check_overflow(params):
grad_data = [p.grad.data for p in params if p.grad is not None]
return check_overflow_for_grads(grad_data)
def check_overflow_for_grads(grad_data):
found_inf = torch.cuda.FloatTensor([0.0])
scaler = torch.cuda.FloatTensor([1.0])
# Unscale and set found inf/nan
torch._amp_foreach_non_finite_check_and_unscale_(grad_data, found_inf, scaler)
# Check for nan.
overflow = found_inf.item() > 0
return overflow
def clip_grad_norm_fp32(
parameters, max_norm, norm_type, get_horizontal_model_parallel_rank=None, get_horizontal_model_parallel_group=None
):
from onnxruntime.training.ortmodule.torch_cpp_extensions import fused_ops
horizontal_model_parallel_grad_norm_aggregation = False
if get_horizontal_model_parallel_rank and get_horizontal_model_parallel_group:
horizontal_model_parallel_grad_norm_aggregation = True
def param_is_not_tensor_parallel_duplicate(param):
is_mp_tensor = hasattr(param, "model_parallel") and param.model_parallel
return is_mp_tensor or (get_horizontal_model_parallel_rank() == 0)
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
# Filter parameters based on:
# - grad should not be none
# - should not be a replica due to tensor model parallelism
grads_for_norm = []
for param in parameters:
grad_not_none = param.grad is not None
grad = param.grad.detach()
if grad_not_none:
# Make sure the grads are in fp32
assert param.grad.type() == "torch.cuda.FloatTensor"
if horizontal_model_parallel_grad_norm_aggregation:
is_not_tp_duplicate = param_is_not_tensor_parallel_duplicate(param)
if grad_not_none and is_not_tp_duplicate:
grads_for_norm.append(grad)
else:
grads_for_norm.append(grad)
# Norm parameters.
max_norm = float(max_norm)
norm_type = float(norm_type)
total_norm = 0.0
dummy_overflow_buf = torch.cuda.IntTensor([0])
# Calculate norm.
if norm_type == inf:
total_norm = max(grad.abs().max() for grad in grads_for_norm)
if horizontal_model_parallel_grad_norm_aggregation:
total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)])
# Take max across all model-parallel GPUs.
torch.distributed.all_reduce(
total_norm_cuda, op=torch.distributed.ReduceOp.MAX, group=get_horizontal_model_parallel_group()
)
total_norm = total_norm_cuda[0].item()
else:
if norm_type == 2.0:
# Use apex's multi-tensor applier for efficiency reasons.
# Multi-tensor applier takes a function and a list of list
# and performs the operation on that list all in one kernel.
grad_norm, _ = multi_tensor_applier(
fused_ops.multi_tensor_l2norm,
dummy_overflow_buf,
[fused_ops.TorchTensorVector(grads_for_norm)],
False, # no per-parameter norm
)
if not horizontal_model_parallel_grad_norm_aggregation:
return grad_norm.item()
# Since we will be summing across data parallel groups,
# we need the pow(norm-type).
total_norm = grad_norm**norm_type
else:
for grad in grads_for_norm:
grad_norm = torch.norm(grad, norm_type)
total_norm += grad_norm**norm_type
if horizontal_model_parallel_grad_norm_aggregation:
# Sum across all model-parallel GPUs.
torch.distributed.all_reduce(
total_norm, op=torch.distributed.ReduceOp.SUM, group=get_horizontal_model_parallel_group()
)
total_norm = total_norm.item() ** (1.0 / norm_type)
clip_coef = max_norm / (total_norm + 1e-6)
# Filter parameters with gradients.
grads = [p.grad for p in parameters if p.grad is not None]
if clip_coef < 1.0:
grads_vec = fused_ops.TorchTensorVector(grads)
multi_tensor_applier(fused_ops.multi_tensor_scale, dummy_overflow_buf, [grads_vec, grads_vec], clip_coef)
return total_norm
|
PypiClean
|
/brock-1.0.2-py3-none-any.whl/bedrock/manifest.py
|
import argparse
import os
from os.path import expanduser
import docker
import yaml
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
def init_path(path):
os.makedirs(expanduser(f'~/.bedrock/{path}'), exist_ok=True)
def parse_manifest(file):
return yaml.load(file, Loader=Loader)
def resolve_key(parts, varlist, default_key):
varmap = dict(map(lambda var: var.split('='), varlist))
keyparts = [key for key in (map(lambda part: varmap[part] if part in varmap else None, parts) if parts else None) if key is not None]
return f'{"-".join(keyparts)}' if keyparts else default_key
def append_env(environment, env_var):
if env_var in os.environ:
environment.append(f'{env_var}={os.environ[env_var]}')
def apply_blueprint(name, key, config, action, extra_volumes, extra_config):
print(f'Apply blueprint: {name}/{key} [{action}]')
init_path(f'{name}/{key}')
client = docker.from_env()
environment = [
f'TF_BACKEND_KEY={name}/{key}',
f'AWS_ACCESS_KEY_ID={os.environ["AWS_ACCESS_KEY_ID"]}',
f'AWS_SECRET_ACCESS_KEY={os.environ["AWS_SECRET_ACCESS_KEY"]}',
f'AWS_DEFAULT_REGION={os.environ["AWS_DEFAULT_REGION"]}',
]
# Append optional environment variables..
for env_var in ['AWS_SESSION_TOKEN', 'TF_INIT_ARGS', 'TF_APPLY_ARGS', 'TF_PLAN_ARGS', 'TF_DESTROY_ARGS',
'http_proxy', 'https_proxy', 'no_proxy']:
append_env(environment, env_var)
if config:
for item in config:
if isinstance(config[item], list):
config_string = '["%s"]' % '","'.join(config[item])
environment.append(f'TF_VAR_{item}={config_string}')
else:
environment.append(f'TF_VAR_{item}={config[item]}')
if extra_config:
for cnf in extra_config:
cargs = cnf.split('=')
environment.append(f'TF_VAR_{cargs[0]}={cargs[1]}')
volumes = {
expanduser(f'~/.bedrock/{name}/{key}'): {
'bind': '/work',
'mode': 'rw'
}
}
if extra_volumes:
for volume in extra_volumes:
vargs = volume.split(':')
volumes[vargs[0]] = {
'bind': vargs[1],
'mode': 'ro'
}
container = client.containers.run(f"bedrock/{name}", action, privileged=True, network_mode='host',
remove=True, environment=environment, volumes=volumes, tty=True, detach=True)
logs = container.logs(stream=True)
for log in logs:
print(log.decode('utf-8'), end='')
def apply_blueprints(tf_key, blueprints, action, volumes, config):
for blueprint in blueprints:
apply_blueprint(blueprint, tf_key, blueprints[blueprint], action, volumes, config)
def main():
parser = argparse.ArgumentParser(description='Bedrock Manifest Tool.')
parser.add_argument('-m', '--manifest', metavar='<manifest_path>', default='manifest.yml', type=argparse.FileType('r'),
help='location of manifest file (default: %(default)s)')
parser.add_argument('-v', '--volumes', metavar='<path:volume>', nargs='+',
help='additional volumes mounted to support blueprints')
parser.add_argument('-c', '--config', metavar='<key=value>', nargs='+',
help='additional configuration to support blueprints')
parser.add_argument('action', metavar='<command>', choices=['init', 'apply', 'plan', 'destroy'],
help='manifest action (possible values: %(choices)s)', nargs='?', default='init')
args = parser.parse_args()
manifest = parse_manifest(args.manifest)
constellations = manifest['constellations']
if args.action == 'destroy':
# destroy in reverse order..
constellations = constellations[::-1]
for constellation in constellations:
constellation_key = None
blueprints = None
if 'keyvars' in manifest['constellations'][constellation]:
constellation_key = resolve_key(manifest['constellations'][constellation]['keyvars'],
args.config, constellation)
# blueprints = {k:v for (k,v) in manifest['constellations'][constellation].items() if k != 'keyvars'}
blueprints = manifest['constellations'][constellation]['blueprints']
else:
constellation_key = constellation
blueprints = manifest['constellations'][constellation]
if args.action == 'destroy':
# destroy in reverse order..
blueprints = blueprints[::-1]
apply_blueprints(constellation_key, blueprints, args.action, args.volumes, args.config)
if __name__ == "__main__":
main()
|
PypiClean
|
/pythong-0.7.1.tar.gz/pythong-0.7.1/README.rst
|
pythong
=======
:Author: David Gay <[email protected]>
Set up a minimal, yet comfortable structure for a Python project.
Features
--------
- Create a standard Python project directory structure
- Get help creating your setup.py file, or
choose to write it yourself by passing the ``--snap``
command
- Clear your project of messy build files (build/dist/egg/pyc)
with the ``--wash`` command
- Generate a distribute_setup.py file to use a setup.py file
with distribute
- Use a tree-style menu to set your PyPI classifiers in your
setup.py file
Example Usage
-------------
Create a new project like so::
$ pythong mynewproject
Or, for a quicker setup... ::
$ pythong --snap
Including the project name in the command is optional.
You can **wash** your pythong of messy build files::
$ pythong --wash
Pythong will help you add classifiers to your setup.py
during project creation, or after the fact with the **label** command::
$ pythong --label
Files and directories can be added to the manifest file with **pin**::
$ pythong --pin [FILE_OR_DIRECTORY]
A full list of options can be seen with::
$ pythong --help
Get Pythong
-----------
You can install the latest release of Pythong from `PyPI
<https://pypi.python.org/pypi/pythong>`_ with pip::
$ pip install pythong
You can also get the source from PyPI or `GitHub
<https://github.com/oddshocks/pythong>`_.
Contributions are welcome! Yay, software freedom!
License
-------
pythong is released under the GNU GPLv3+.
Contributors
------------
Feel free to add your name.
- David Gay <[email protected]>
- Ryan Scott Brown <[email protected]>
- Ralph Bean <[email protected]>
|
PypiClean
|
/ansible-8.3.0-py3-none-any.whl/ansible_collections/netbox/netbox/plugins/modules/netbox_location.py
|
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r"""
---
module: netbox_location
short_description: Create, update or delete locations within NetBox
description:
- Creates, updates or removes locations from NetBox
notes:
- Tags should be defined as a YAML list
- This should be ran with connection C(local) and hosts C(localhost)
author:
- Andrew Simmons (@andybro19)
requirements:
- pynetbox
version_added: '3.3.0'
extends_documentation_fragment:
- netbox.netbox.common
options:
data:
type: dict
description:
- Defines the location configuration
suboptions:
name:
description:
- The name of the location
required: true
type: str
slug:
description:
- The slugified version of the name or custom slug.
- This is auto-generated following NetBox rules if not provided
required: false
type: str
site:
description:
- Required if I(state=present) and the location does not exist yet
required: false
type: raw
parent_location:
description:
- The parent location the location will be associated with
required: false
type: raw
tenant:
description:
- The tenant that the location will be associated with
required: false
type: raw
version_added: "3.8.0"
description:
description:
- The description of the location
required: false
type: str
tags:
description:
- The tags to add/update
required: false
type: list
elements: raw
version_added: "3.6.0"
custom_fields:
description:
- Must exist in NetBox
required: false
type: dict
version_added: "3.6.0"
required: true
"""
EXAMPLES = r"""
- name: "Test NetBox modules"
connection: local
hosts: localhost
gather_facts: False
tasks:
- name: Create location within NetBox with only required information
netbox.netbox.netbox_location:
netbox_url: http://netbox.local
netbox_token: thisIsMyToken
data:
name: Test location
site: Test Site
state: present
- name: Create location within NetBox with a parent location
netbox.netbox.netbox_location:
netbox_url: http://netbox.local
netbox_token: thisIsMyToken
data:
name: Child location
site: Test Site
parent_location: Test location
state: present
- name: Delete location within NetBox
netbox.netbox.netbox_location:
netbox_url: http://netbox.local
netbox_token: thisIsMyToken
data:
name: Test location
state: absent
"""
RETURN = r"""
location:
description: Serialized object as created or already existent within NetBox
returned: success (when I(state=present))
type: dict
msg:
description: Message indicating failure or info about what has been achieved
returned: always
type: str
"""
from ansible_collections.netbox.netbox.plugins.module_utils.netbox_utils import (
NetboxAnsibleModule,
NETBOX_ARG_SPEC,
)
from ansible_collections.netbox.netbox.plugins.module_utils.netbox_dcim import (
NetboxDcimModule,
NB_LOCATIONS,
)
from copy import deepcopy
def main():
"""
Main entry point for module execution
"""
argument_spec = deepcopy(NETBOX_ARG_SPEC)
argument_spec.update(
dict(
data=dict(
type="dict",
required=True,
options=dict(
name=dict(required=True, type="str"),
slug=dict(required=False, type="str"),
site=dict(required=False, type="raw"),
parent_location=dict(required=False, type="raw"),
tenant=dict(required=False, type="raw"),
description=dict(required=False, type="str"),
tags=dict(required=False, type="list", elements="raw"),
custom_fields=dict(required=False, type="dict"),
),
),
)
)
required_if = [("state", "present", ["name"]), ("state", "absent", ["name"])]
module = NetboxAnsibleModule(
argument_spec=argument_spec, supports_check_mode=True, required_if=required_if
)
netbox_location = NetboxDcimModule(module, NB_LOCATIONS)
netbox_location.run()
if __name__ == "__main__": # pragma: no cover
main()
|
PypiClean
|
/adapter_transformers-3.2.1-py3-none-any.whl/transformers/trainer.py
|
import contextlib
import functools
import glob
import inspect
import math
import os
import random
import re
import shutil
import sys
import time
import warnings
from collections.abc import Mapping
from distutils.util import strtobool
from pathlib import Path
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
from tqdm.auto import tqdm
# Integrations must be imported before ML frameworks:
from .integrations import ( # isort: split
default_hp_search_backend,
get_reporting_integration_callbacks,
hp_params,
is_fairscale_available,
is_optuna_available,
is_ray_tune_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
import numpy as np
import torch
import torch.distributed as dist
from packaging import version
from torch import nn
from torch.utils.data import DataLoader, Dataset, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from huggingface_hub import Repository, create_repo
from . import __version__
from .configuration_utils import PretrainedConfig
from .data.data_collator import DataCollator, DataCollatorWithPadding, default_data_collator
from .debug_utils import DebugOption, DebugUnderflowOverflow
from .deepspeed import deepspeed_init, is_deepspeed_zero3_enabled
from .dependency_versions_check import dep_version_check
from .modelcard import TrainingSummary
from .modeling_utils import PreTrainedModel, load_sharded_checkpoint, unwrap_model
from .models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_MAPPING_NAMES
from .optimization import Adafactor, get_scheduler
from .pytorch_utils import ALL_LAYERNORM_LAYERS, is_torch_greater_or_equal_than_1_10, is_torch_less_than_1_11
from .tokenization_utils_base import PreTrainedTokenizerBase
from .trainer_callback import (
CallbackHandler,
DefaultFlowCallback,
PrinterCallback,
ProgressCallback,
TrainerCallback,
TrainerControl,
TrainerState,
)
from .trainer_pt_utils import (
DistributedLengthGroupedSampler,
DistributedSamplerWithLoop,
DistributedTensorGatherer,
IterableDatasetShard,
LabelSmoother,
LengthGroupedSampler,
SequentialDistributedSampler,
ShardSampler,
distributed_broadcast_scalars,
distributed_concat,
find_batch_size,
get_module_class_from_name,
get_parameter_names,
nested_concat,
nested_detach,
nested_numpify,
nested_truncate,
nested_xla_mesh_reduce,
reissue_pt_warnings,
)
from .trainer_utils import (
PREFIX_CHECKPOINT_DIR,
BestRun,
EvalLoopOutput,
EvalPrediction,
FSDPOption,
HPSearchBackend,
HubStrategy,
IntervalStrategy,
PredictionOutput,
RemoveColumnsCollator,
ShardedDDPOption,
TrainerMemoryTracker,
TrainOutput,
default_compute_objective,
default_hp_space,
denumpify_detensorize,
enable_full_determinism,
find_executable_batch_size,
get_last_checkpoint,
has_length,
number_of_arguments,
seed_worker,
set_seed,
speed_metrics,
)
from .training_args import OptimizerNames, ParallelMode, TrainingArguments
from .utils import (
CONFIG_NAME,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
can_return_loss,
find_labels,
get_full_repo_name,
is_apex_available,
is_datasets_available,
is_in_notebook,
is_ipex_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_torch_compile_available,
is_torch_tpu_available,
logging,
)
from .utils.generic import ContextManagers
_is_native_cpu_amp_available = is_torch_greater_or_equal_than_1_10
DEFAULT_CALLBACKS = [DefaultFlowCallback]
DEFAULT_PROGRESS_CALLBACK = ProgressCallback
if is_in_notebook():
from .utils.notebook import NotebookProgressCallback
DEFAULT_PROGRESS_CALLBACK = NotebookProgressCallback
if is_apex_available():
from apex import amp
if is_datasets_available():
import datasets
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
import torch_xla.distributed.parallel_loader as pl
if is_fairscale_available():
dep_version_check("fairscale")
import fairscale
from fairscale.nn.data_parallel import FullyShardedDataParallel as FullyShardedDDP
from fairscale.nn.data_parallel import ShardedDataParallel as ShardedDDP
from fairscale.nn.wrap import auto_wrap
from fairscale.optim import OSS
from fairscale.optim.grad_scaler import ShardedGradScaler
if is_sagemaker_mp_enabled():
import smdistributed.modelparallel.torch as smp
from smdistributed.modelparallel import __version__ as SMP_VERSION
IS_SAGEMAKER_MP_POST_1_10 = version.parse(SMP_VERSION) >= version.parse("1.10")
from .trainer_pt_utils import smp_forward_backward, smp_forward_only, smp_gather, smp_nested_concat
else:
IS_SAGEMAKER_MP_POST_1_10 = False
if TYPE_CHECKING:
import optuna
logger = logging.get_logger(__name__)
# Name of the files used for checkpointing
TRAINING_ARGS_NAME = "training_args.bin"
TRAINER_STATE_NAME = "trainer_state.json"
OPTIMIZER_NAME = "optimizer.pt"
SCHEDULER_NAME = "scheduler.pt"
SCALER_NAME = "scaler.pt"
class Trainer:
"""
Trainer is a simple but feature-complete training and eval loop for PyTorch, optimized for 🤗 Transformers.
Args:
model ([`PreTrainedModel`] or `torch.nn.Module`, *optional*):
The model to train, evaluate or use for predictions. If not provided, a `model_init` must be passed.
<Tip>
[`Trainer`] is optimized to work with the [`PreTrainedModel`] provided by the library. You can still use
your own models defined as `torch.nn.Module` as long as they work the same way as the 🤗 Transformers
models.
</Tip>
args ([`TrainingArguments`], *optional*):
The arguments to tweak for training. Will default to a basic instance of [`TrainingArguments`] with the
`output_dir` set to a directory named *tmp_trainer* in the current directory if not provided.
data_collator (`DataCollator`, *optional*):
The function to use to form a batch from a list of elements of `train_dataset` or `eval_dataset`. Will
default to [`default_data_collator`] if no `tokenizer` is provided, an instance of
[`DataCollatorWithPadding`] otherwise.
train_dataset (`torch.utils.data.Dataset` or `torch.utils.data.IterableDataset`, *optional*):
The dataset to use for training. If it is a [`~datasets.Dataset`], columns not accepted by the
`model.forward()` method are automatically removed.
Note that if it's a `torch.utils.data.IterableDataset` with some randomization and you are training in a
distributed fashion, your iterable dataset should either use a internal attribute `generator` that is a
`torch.Generator` for the randomization that must be identical on all processes (and the Trainer will
manually set the seed of this `generator` at each epoch) or have a `set_epoch()` method that internally
sets the seed of the RNGs used.
eval_dataset (Union[`torch.utils.data.Dataset`, Dict[str, `torch.utils.data.Dataset`]), *optional*):
The dataset to use for evaluation. If it is a [`~datasets.Dataset`], columns not accepted by the
`model.forward()` method are automatically removed. If it is a dictionary, it will evaluate on each
dataset prepending the dictionary key to the metric name.
tokenizer ([`PreTrainedTokenizerBase`], *optional*):
The tokenizer used to preprocess the data. If provided, will be used to automatically pad the inputs to the
maximum length when batching inputs, and it will be saved along the model to make it easier to rerun an
interrupted training or reuse the fine-tuned model.
model_init (`Callable[[], PreTrainedModel]`, *optional*):
A function that instantiates the model to be used. If provided, each call to [`~Trainer.train`] will start
from a new instance of the model as given by this function.
The function may have zero argument, or a single one containing the optuna/Ray Tune/SigOpt trial object, to
be able to choose different architectures according to hyper parameters (such as layer count, sizes of
inner layers, dropout probabilities etc).
compute_metrics (`Callable[[EvalPrediction], Dict]`, *optional*):
The function that will be used to compute metrics at evaluation. Must take a [`EvalPrediction`] and return
a dictionary string to metric values.
callbacks (List of [`TrainerCallback`], *optional*):
A list of callbacks to customize the training loop. Will add those to the list of default callbacks
detailed in [here](callback).
If you want to remove one of the default callbacks used, use the [`Trainer.remove_callback`] method.
optimizers (`Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]`, *optional*): A tuple
containing the optimizer and the scheduler to use. Will default to an instance of [`AdamW`] on your model
and a scheduler given by [`get_linear_schedule_with_warmup`] controlled by `args`.
preprocess_logits_for_metrics (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`, *optional*):
A function that preprocess the logits right before caching them at each evaluation step. Must take two
tensors, the logits and the labels, and return the logits once processed as desired. The modifications made
by this function will be reflected in the predictions received by `compute_metrics`.
Note that the labels (second parameter) will be `None` if the dataset does not have them.
Important attributes:
- **model** -- Always points to the core model. If using a transformers model, it will be a [`PreTrainedModel`]
subclass.
- **model_wrapped** -- Always points to the most external model in case one or more other modules wrap the
original model. This is the model that should be used for the forward pass. For example, under `DeepSpeed`,
the inner model is wrapped in `DeepSpeed` and then again in `torch.nn.DistributedDataParallel`. If the inner
model hasn't been wrapped, then `self.model_wrapped` is the same as `self.model`.
- **is_model_parallel** -- Whether or not a model has been switched to a model parallel mode (different from
data parallelism, this means some of the model layers are split on different GPUs).
- **place_model_on_device** -- Whether or not to automatically place the model on the device - it will be set
to `False` if model parallel or deepspeed is used, or if the default
`TrainingArguments.place_model_on_device` is overridden to return `False` .
- **is_in_train** -- Whether or not a model is currently running `train` (e.g. when `evaluate` is called while
in `train`)
"""
from .trainer_pt_utils import _get_learning_rate, log_metrics, metrics_format, save_metrics, save_state
def __init__(
self,
model: Union[PreTrainedModel, nn.Module] = None,
args: TrainingArguments = None,
data_collator: Optional[DataCollator] = None,
train_dataset: Optional[Dataset] = None,
eval_dataset: Optional[Union[Dataset, Dict[str, Dataset]]] = None,
tokenizer: Optional[PreTrainedTokenizerBase] = None,
model_init: Optional[Callable[[], PreTrainedModel]] = None,
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,
callbacks: Optional[List[TrainerCallback]] = None,
optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None),
preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None,
):
if args is None:
output_dir = "tmp_trainer"
logger.info(f"No `TrainingArguments` passed, using `output_dir={output_dir}`.")
args = TrainingArguments(output_dir=output_dir)
self.args = args
# Seed must be set before instantiating the model when using model
enable_full_determinism(self.args.seed) if self.args.full_determinism else set_seed(self.args.seed)
self.hp_name = None
self.deepspeed = None
self.is_in_train = False
# memory metrics - must set up as early as possible
self._memory_tracker = TrainerMemoryTracker(self.args.skip_memory_metrics)
self._memory_tracker.start()
# set the correct log level depending on the node
log_level = args.get_process_log_level()
logging.set_verbosity(log_level)
# force device and distributed setup init explicitly
args._setup_devices
if model is None:
if model_init is not None:
self.model_init = model_init
model = self.call_model_init()
else:
raise RuntimeError("`Trainer` requires either a `model` or `model_init` argument")
else:
if model_init is not None:
warnings.warn(
"`Trainer` requires either a `model` or `model_init` argument, but not both. `model_init` will"
" overwrite your model when calling the `train` method. This will become a fatal error in the next"
" release.",
FutureWarning,
)
self.model_init = model_init
if model.__class__.__name__ in MODEL_MAPPING_NAMES:
raise ValueError(
f"The model you have picked ({model.__class__.__name__}) cannot be used as is for training: it only "
"computes hidden states and does not accept any labels. You should choose a model with a head "
"suitable for your task like any of the `AutoModelForXxx` listed at "
"https://huggingface.co/docs/transformers/model_doc/auto."
)
if hasattr(model, "is_parallelizable") and model.is_parallelizable and model.model_parallel:
self.is_model_parallel = True
else:
self.is_model_parallel = False
# At this stage the model is already loaded
if getattr(model, "is_loaded_in_8bit", False):
raise ValueError(
"The model you want to train is loaded in 8-bit precision. "
"Training an 8-bit model is not supported yet. "
)
# Setup Sharded DDP training
self.sharded_ddp = None
if len(args.sharded_ddp) > 0:
if args.deepspeed:
raise ValueError(
"Using --sharded_ddp xxx together with --deepspeed is not possible, deactivate one of those flags."
)
if len(args.fsdp) > 0:
raise ValueError(
"Using --sharded_ddp xxx together with --fsdp is not possible, deactivate one of those flags."
)
if args.local_rank == -1:
raise ValueError("Using sharded DDP only works in distributed training.")
elif not is_fairscale_available():
raise ImportError("Sharded DDP training requires fairscale: `pip install fairscale`.")
elif ShardedDDPOption.SIMPLE not in args.sharded_ddp and FullyShardedDDP is None:
raise ImportError(
"Sharded DDP in a mode other than simple training requires fairscale version >= 0.3, found "
f"{fairscale.__version__}. Upgrade your fairscale library: `pip install --upgrade fairscale`."
)
elif ShardedDDPOption.SIMPLE in args.sharded_ddp:
self.sharded_ddp = ShardedDDPOption.SIMPLE
elif ShardedDDPOption.ZERO_DP_2 in args.sharded_ddp:
self.sharded_ddp = ShardedDDPOption.ZERO_DP_2
elif ShardedDDPOption.ZERO_DP_3 in args.sharded_ddp:
self.sharded_ddp = ShardedDDPOption.ZERO_DP_3
self.fsdp = None
if len(args.fsdp) > 0:
if args.deepspeed:
raise ValueError(
"Using --fsdp xxx together with --deepspeed is not possible, deactivate one of those flags."
)
if args.local_rank == -1:
raise ValueError("Using fsdp only works in distributed training.")
# dep_version_check("torch>=1.12.0")
# Would have to update setup.py with torch>=1.12.0
# which isn't ideally given that it will force people not using FSDP to also use torch>=1.12.0
# below is the current alternative.
if version.parse(version.parse(torch.__version__).base_version) < version.parse("1.12.0"):
raise ValueError("FSDP requires PyTorch >= 1.12.0")
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
if FSDPOption.FULL_SHARD in args.fsdp:
self.fsdp = ShardingStrategy.FULL_SHARD
elif FSDPOption.SHARD_GRAD_OP in args.fsdp:
self.fsdp = ShardingStrategy.SHARD_GRAD_OP
elif FSDPOption.NO_SHARD in args.fsdp:
self.fsdp = ShardingStrategy.NO_SHARD
# one place to sort out whether to place the model on device or not
# postpone switching model to cuda when:
# 1. MP - since we are trying to fit a much bigger than 1 gpu model
# 2. fp16-enabled DeepSpeed loads the model in half the size and it doesn't need .to() anyway,
# and we only use deepspeed for training at the moment
# 3. full bf16 or fp16 eval - since the model needs to be cast to the right dtype first
# 4. Sharded DDP - same as MP
# 5. FSDP - same as MP
self.place_model_on_device = args.place_model_on_device
if (
self.is_model_parallel
or args.deepspeed
or ((args.fp16_full_eval or args.bf16_full_eval) and not args.do_train)
or (self.sharded_ddp in [ShardedDDPOption.ZERO_DP_2, ShardedDDPOption.ZERO_DP_3])
or (self.fsdp is not None)
):
self.place_model_on_device = False
default_collator = default_data_collator if tokenizer is None else DataCollatorWithPadding(tokenizer)
self.data_collator = data_collator if data_collator is not None else default_collator
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.tokenizer = tokenizer
if self.place_model_on_device:
self._move_model_to_device(model, args.device)
# Force n_gpu to 1 to avoid DataParallel as MP will manage the GPUs
if self.is_model_parallel:
self.args._n_gpu = 1
# later use `self.model is self.model_wrapped` to check if it's wrapped or not
self.model_wrapped = model
self.model = model
self.compute_metrics = compute_metrics
self.preprocess_logits_for_metrics = preprocess_logits_for_metrics
self.optimizer, self.lr_scheduler = optimizers
if model_init is not None and (self.optimizer is not None or self.lr_scheduler is not None):
raise RuntimeError(
"Passing a `model_init` is incompatible with providing the `optimizers` argument. "
"You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method."
)
if is_torch_tpu_available() and self.optimizer is not None:
for param in self.model.parameters():
model_device = param.device
break
for param_group in self.optimizer.param_groups:
if len(param_group["params"]) > 0:
optimizer_device = param_group["params"][0].device
break
if model_device != optimizer_device:
raise ValueError(
"The model and the optimizer parameters are not on the same device, which probably means you"
" created an optimizer around your model **before** putting on the device and passing it to the"
" `Trainer`. Make sure the lines `import torch_xla.core.xla_model as xm` and"
" `model.to(xm.xla_device())` is performed before the optimizer creation in your script."
)
if ((self.sharded_ddp is not None) or args.deepspeed or (self.fsdp is not None)) and (
self.optimizer is not None or self.lr_scheduler is not None
):
raise RuntimeError(
"Passing `optimizers` is not allowed if Fairscale, Deepspeed or PyTorch FSDP is enabled."
"You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method."
)
default_callbacks = DEFAULT_CALLBACKS + get_reporting_integration_callbacks(self.args.report_to)
callbacks = default_callbacks if callbacks is None else default_callbacks + callbacks
self.callback_handler = CallbackHandler(
callbacks, self.model, self.tokenizer, self.optimizer, self.lr_scheduler
)
self.add_callback(PrinterCallback if self.args.disable_tqdm else DEFAULT_PROGRESS_CALLBACK)
# Will be set to True by `self._setup_loggers()` on first call to `self.log()`.
self._loggers_initialized = False
# Create clone of distant repo and output directory if needed
if self.args.push_to_hub:
self.init_git_repo(at_init=True)
# In case of pull, we need to make sure every process has the latest.
if is_torch_tpu_available():
xm.rendezvous("init git repo")
elif args.local_rank != -1:
dist.barrier()
if self.args.should_save:
os.makedirs(self.args.output_dir, exist_ok=True)
if not callable(self.data_collator) and callable(getattr(self.data_collator, "collate_batch", None)):
raise ValueError("The `data_collator` should be a simple callable (function, class with `__call__`).")
if args.max_steps > 0:
logger.info("max_steps is given, it will override any value given in num_train_epochs")
if train_dataset is not None and not has_length(train_dataset) and args.max_steps <= 0:
raise ValueError("train_dataset does not implement __len__, max_steps has to be specified")
if (
train_dataset is not None
and isinstance(train_dataset, torch.utils.data.IterableDataset)
and args.group_by_length
):
raise ValueError("the `--group_by_length` option is only available for `Dataset`, not `IterableDataset")
self._signature_columns = None
# Mixed precision setup
self.use_apex = False
self.use_cuda_amp = False
self.use_cpu_amp = False
# Mixed precision setup for SageMaker Model Parallel
if is_sagemaker_mp_enabled():
# BF16 + model parallelism in SageMaker: currently not supported, raise an error
if args.bf16:
raise ValueError("SageMaker Model Parallelism does not support BF16 yet. Please use FP16 instead ")
if IS_SAGEMAKER_MP_POST_1_10:
# When there's mismatch between SMP config and trainer argument, use SMP config as truth
if args.fp16 != smp.state.cfg.fp16:
logger.warning(
f"FP16 provided in SM_HP_MP_PARAMETERS is {smp.state.cfg.fp16},"
f"but FP16 provided in trainer argument is {args.fp16},"
f"setting to {smp.state.cfg.fp16}"
)
args.fp16 = smp.state.cfg.fp16
else:
# smp < 1.10 does not support fp16 in trainer.
if hasattr(smp.state.cfg, "fp16"):
logger.warning(
f"FP16 provided in SM_HP_MP_PARAMETERS is {smp.state.cfg.fp16}, "
"but SageMaker Model Parallelism < 1.10 does not support FP16 in trainer."
)
if args.fp16 or args.bf16:
if args.half_precision_backend == "auto":
if args.device == torch.device("cpu"):
if args.fp16:
raise ValueError("Tried to use `fp16` but it is not supported on cpu")
elif _is_native_cpu_amp_available:
args.half_precision_backend = "cpu_amp"
else:
raise ValueError("Tried to use cpu amp but native cpu amp is not available")
else:
args.half_precision_backend = "cuda_amp"
logger.info(f"Using {args.half_precision_backend} half precision backend")
self.do_grad_scaling = False
if (args.fp16 or args.bf16) and not (args.deepspeed or is_sagemaker_mp_enabled() or is_torch_tpu_available()):
# deepspeed and SageMaker Model Parallel manage their own half precision
if args.half_precision_backend == "cuda_amp":
self.use_cuda_amp = True
self.amp_dtype = torch.float16 if args.fp16 else torch.bfloat16
self.do_grad_scaling = True
if self.sharded_ddp is not None:
self.scaler = ShardedGradScaler()
elif self.fsdp is not None:
if self.amp_dtype == torch.float16:
from torch.distributed.fsdp.sharded_grad_scaler import (
ShardedGradScaler as FSDPShardedGradScaler,
)
self.scaler = FSDPShardedGradScaler()
else:
self.do_grad_scaling = False
self.use_cuda_amp = False
self.amp_dtype = None
elif is_torch_tpu_available():
from torch_xla.amp import GradScaler
self.scaler = GradScaler()
else:
self.scaler = torch.cuda.amp.GradScaler()
elif args.half_precision_backend == "cpu_amp":
self.use_cpu_amp = True
self.amp_dtype = torch.bfloat16
else:
if not is_apex_available():
raise ImportError(
"Using FP16 with APEX but APEX is not installed, please refer to"
" https://www.github.com/nvidia/apex."
)
self.use_apex = True
# FP16 + model parallelism in SageMaker: gradient clipping does not work for now so we raise a helpful error.
if (
is_sagemaker_mp_enabled()
and self.use_cuda_amp
and args.max_grad_norm is not None
and args.max_grad_norm > 0
):
raise ValueError(
"SageMaker Model Parallelism in mixed precision mode does not support gradient clipping yet. Pass "
"along 'max_grad_norm': 0 in your hyperparameters."
)
# Label smoothing
if self.args.label_smoothing_factor != 0:
self.label_smoother = LabelSmoother(epsilon=self.args.label_smoothing_factor)
else:
self.label_smoother = None
self.state = TrainerState(
is_local_process_zero=self.is_local_process_zero(),
is_world_process_zero=self.is_world_process_zero(),
)
self.control = TrainerControl()
# Internal variable to count flos in each process, will be accumulated in `self.state.total_flos` then
# returned to 0 every time flos need to be logged
self.current_flos = 0
self.hp_search_backend = None
self.use_tune_checkpoints = False
default_label_names = find_labels(self.model.__class__)
self.label_names = default_label_names if self.args.label_names is None else self.args.label_names
self.can_return_loss = can_return_loss(self.model.__class__)
self.control = self.callback_handler.on_init_end(self.args, self.state, self.control)
# Internal variables to keep track of the original batch size
self._train_batch_size = args.train_batch_size
# very last
self._memory_tracker.stop_and_update_metrics()
# torch.compile
if args.torch_compile and not is_torch_compile_available():
raise RuntimeError("Using torch.compile requires a nighly install of PyTorch.")
def add_callback(self, callback):
"""
Add a callback to the current list of [`~transformer.TrainerCallback`].
Args:
callback (`type` or [`~transformer.TrainerCallback`]):
A [`~transformer.TrainerCallback`] class or an instance of a [`~transformer.TrainerCallback`]. In the
first case, will instantiate a member of that class.
"""
self.callback_handler.add_callback(callback)
def pop_callback(self, callback):
"""
Remove a callback from the current list of [`~transformer.TrainerCallback`] and returns it.
If the callback is not found, returns `None` (and no error is raised).
Args:
callback (`type` or [`~transformer.TrainerCallback`]):
A [`~transformer.TrainerCallback`] class or an instance of a [`~transformer.TrainerCallback`]. In the
first case, will pop the first member of that class found in the list of callbacks.
Returns:
[`~transformer.TrainerCallback`]: The callback removed, if found.
"""
return self.callback_handler.pop_callback(callback)
def remove_callback(self, callback):
"""
Remove a callback from the current list of [`~transformer.TrainerCallback`].
Args:
callback (`type` or [`~transformer.TrainerCallback`]):
A [`~transformer.TrainerCallback`] class or an instance of a [`~transformer.TrainerCallback`]. In the
first case, will remove the first member of that class found in the list of callbacks.
"""
self.callback_handler.remove_callback(callback)
def _move_model_to_device(self, model, device):
model = model.to(device)
# Moving a model to an XLA device disconnects the tied weights, so we have to retie them.
if self.args.parallel_mode == ParallelMode.TPU and hasattr(model, "tie_weights"):
model.tie_weights()
def _set_signature_columns_if_needed(self):
if self._signature_columns is None:
# Inspect model forward signature to keep only the arguments it accepts.
signature = inspect.signature(self.model.forward)
self._signature_columns = list(signature.parameters.keys())
# Labels may be named label or label_ids, the default data collator handles that.
self._signature_columns += list(set(["label", "label_ids"] + self.label_names))
def _remove_unused_columns(self, dataset: "datasets.Dataset", description: Optional[str] = None):
if not self.args.remove_unused_columns:
return dataset
self._set_signature_columns_if_needed()
signature_columns = self._signature_columns
ignored_columns = list(set(dataset.column_names) - set(signature_columns))
if len(ignored_columns) > 0:
dset_description = "" if description is None else f"in the {description} set"
logger.info(
f"The following columns {dset_description} don't have a corresponding argument in "
f"`{self.model.__class__.__name__}.forward` and have been ignored: {', '.join(ignored_columns)}."
f" If {', '.join(ignored_columns)} are not expected by `{self.model.__class__.__name__}.forward`, "
" you can safely ignore this message."
)
columns = [k for k in signature_columns if k in dataset.column_names]
if version.parse(datasets.__version__) < version.parse("1.4.0"):
dataset.set_format(
type=dataset.format["type"], columns=columns, format_kwargs=dataset.format["format_kwargs"]
)
return dataset
else:
return dataset.remove_columns(ignored_columns)
def _get_collator_with_removed_columns(
self, data_collator: Callable, description: Optional[str] = None
) -> Callable:
"""Wrap the data collator in a callable removing unused columns."""
if not self.args.remove_unused_columns:
return data_collator
self._set_signature_columns_if_needed()
signature_columns = self._signature_columns
remove_columns_collator = RemoveColumnsCollator(
data_collator=data_collator,
signature_columns=signature_columns,
logger=logger,
description=description,
model_name=self.model.__class__.__name__,
)
return remove_columns_collator
def _get_train_sampler(self) -> Optional[torch.utils.data.Sampler]:
if self.train_dataset is None or not has_length(self.train_dataset):
return None
generator = None
if self.args.world_size <= 1:
generator = torch.Generator()
# for backwards compatibility, we generate a seed here (which is sampled from a generator seeded with
# `args.seed`) if data_seed isn't provided.
# Further on in this method, we default to `args.seed` instead.
if self.args.data_seed is None:
seed = int(torch.empty((), dtype=torch.int64).random_().item())
else:
seed = self.args.data_seed
generator.manual_seed(seed)
seed = self.args.data_seed if self.args.data_seed is not None else self.args.seed
# Build the sampler.
if self.args.group_by_length:
if is_datasets_available() and isinstance(self.train_dataset, datasets.Dataset):
lengths = (
self.train_dataset[self.args.length_column_name]
if self.args.length_column_name in self.train_dataset.column_names
else None
)
else:
lengths = None
model_input_name = self.tokenizer.model_input_names[0] if self.tokenizer is not None else None
if self.args.world_size <= 1:
return LengthGroupedSampler(
self.args.train_batch_size * self.args.gradient_accumulation_steps,
dataset=self.train_dataset,
lengths=lengths,
model_input_name=model_input_name,
generator=generator,
)
else:
return DistributedLengthGroupedSampler(
self.args.train_batch_size * self.args.gradient_accumulation_steps,
dataset=self.train_dataset,
num_replicas=self.args.world_size,
rank=self.args.process_index,
lengths=lengths,
model_input_name=model_input_name,
seed=seed,
)
else:
if self.args.world_size <= 1:
return RandomSampler(self.train_dataset, generator=generator)
elif (
self.args.parallel_mode in [ParallelMode.TPU, ParallelMode.SAGEMAKER_MODEL_PARALLEL]
and not self.args.dataloader_drop_last
):
# Use a loop for TPUs when drop_last is False to have all batches have the same size.
return DistributedSamplerWithLoop(
self.train_dataset,
batch_size=self.args.per_device_train_batch_size,
num_replicas=self.args.world_size,
rank=self.args.process_index,
seed=seed,
)
else:
return DistributedSampler(
self.train_dataset,
num_replicas=self.args.world_size,
rank=self.args.process_index,
seed=seed,
)
def get_train_dataloader(self) -> DataLoader:
"""
Returns the training [`~torch.utils.data.DataLoader`].
Will use no sampler if `train_dataset` does not implement `__len__`, a random sampler (adapted to distributed
training if necessary) otherwise.
Subclass and override this method if you want to inject some custom behavior.
"""
if self.train_dataset is None:
raise ValueError("Trainer: training requires a train_dataset.")
train_dataset = self.train_dataset
data_collator = self.data_collator
if is_datasets_available() and isinstance(train_dataset, datasets.Dataset):
train_dataset = self._remove_unused_columns(train_dataset, description="training")
else:
data_collator = self._get_collator_with_removed_columns(data_collator, description="training")
if isinstance(train_dataset, torch.utils.data.IterableDataset):
if self.args.world_size > 1:
train_dataset = IterableDatasetShard(
train_dataset,
batch_size=self._train_batch_size,
drop_last=self.args.dataloader_drop_last,
num_processes=self.args.world_size,
process_index=self.args.process_index,
)
return DataLoader(
train_dataset,
batch_size=self.args.per_device_train_batch_size,
collate_fn=data_collator,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
train_sampler = self._get_train_sampler()
return DataLoader(
train_dataset,
batch_size=self._train_batch_size,
sampler=train_sampler,
collate_fn=data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
worker_init_fn=seed_worker,
)
def _get_eval_sampler(self, eval_dataset: Dataset) -> Optional[torch.utils.data.Sampler]:
# Deprecated code
if self.args.use_legacy_prediction_loop:
if is_torch_tpu_available():
return SequentialDistributedSampler(
eval_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal()
)
elif is_sagemaker_mp_enabled():
return SequentialDistributedSampler(
eval_dataset,
num_replicas=smp.dp_size(),
rank=smp.dp_rank(),
batch_size=self.args.per_device_eval_batch_size,
)
elif self.args.local_rank != -1:
return SequentialDistributedSampler(eval_dataset)
else:
return SequentialSampler(eval_dataset)
if self.args.world_size <= 1:
return SequentialSampler(eval_dataset)
else:
return ShardSampler(
eval_dataset,
batch_size=self.args.per_device_eval_batch_size,
num_processes=self.args.world_size,
process_index=self.args.process_index,
)
def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader:
"""
Returns the evaluation [`~torch.utils.data.DataLoader`].
Subclass and override this method if you want to inject some custom behavior.
Args:
eval_dataset (`torch.utils.data.Dataset`, *optional*):
If provided, will override `self.eval_dataset`. If it is a [`~datasets.Dataset`], columns not accepted
by the `model.forward()` method are automatically removed. It must implement `__len__`.
"""
if eval_dataset is None and self.eval_dataset is None:
raise ValueError("Trainer: evaluation requires an eval_dataset.")
eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
data_collator = self.data_collator
if is_datasets_available() and isinstance(eval_dataset, datasets.Dataset):
eval_dataset = self._remove_unused_columns(eval_dataset, description="evaluation")
else:
data_collator = self._get_collator_with_removed_columns(data_collator, description="evaluation")
if isinstance(eval_dataset, torch.utils.data.IterableDataset):
if self.args.world_size > 1:
eval_dataset = IterableDatasetShard(
eval_dataset,
batch_size=self.args.per_device_eval_batch_size,
drop_last=self.args.dataloader_drop_last,
num_processes=self.args.world_size,
process_index=self.args.process_index,
)
return DataLoader(
eval_dataset,
batch_size=self.args.eval_batch_size,
collate_fn=data_collator,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
eval_sampler = self._get_eval_sampler(eval_dataset)
return DataLoader(
eval_dataset,
sampler=eval_sampler,
batch_size=self.args.eval_batch_size,
collate_fn=data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader:
"""
Returns the test [`~torch.utils.data.DataLoader`].
Subclass and override this method if you want to inject some custom behavior.
Args:
test_dataset (`torch.utils.data.Dataset`, *optional*):
The test dataset to use. If it is a [`~datasets.Dataset`], columns not accepted by the
`model.forward()` method are automatically removed. It must implement `__len__`.
"""
data_collator = self.data_collator
if is_datasets_available() and isinstance(test_dataset, datasets.Dataset):
test_dataset = self._remove_unused_columns(test_dataset, description="test")
else:
data_collator = self._get_collator_with_removed_columns(data_collator, description="test")
if isinstance(test_dataset, torch.utils.data.IterableDataset):
if self.args.world_size > 1:
test_dataset = IterableDatasetShard(
test_dataset,
batch_size=self.args.eval_batch_size,
drop_last=self.args.dataloader_drop_last,
num_processes=self.args.world_size,
process_index=self.args.process_index,
)
return DataLoader(
test_dataset,
batch_size=self.args.eval_batch_size,
collate_fn=data_collator,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
test_sampler = self._get_eval_sampler(test_dataset)
# We use the same batch_size as for eval.
return DataLoader(
test_dataset,
sampler=test_sampler,
batch_size=self.args.eval_batch_size,
collate_fn=data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
def create_optimizer_and_scheduler(self, num_training_steps: int):
"""
Setup the optimizer and the learning rate scheduler.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
Trainer's init through `optimizers`, or subclass and override this method (or `create_optimizer` and/or
`create_scheduler`) in a subclass.
"""
self.create_optimizer()
if IS_SAGEMAKER_MP_POST_1_10 and smp.state.cfg.fp16:
# If smp >= 1.10 and fp16 is enabled, we unwrap the optimizer
optimizer = self.optimizer.optimizer
else:
optimizer = self.optimizer
self.create_scheduler(num_training_steps=num_training_steps, optimizer=optimizer)
def create_optimizer(self):
"""
Setup the optimizer.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
Trainer's init through `optimizers`, or subclass and override this method in a subclass.
"""
opt_model = self.model_wrapped if is_sagemaker_mp_enabled() else self.model
if self.optimizer is None:
decay_parameters = get_parameter_names(opt_model, ALL_LAYERNORM_LAYERS)
decay_parameters = [name for name in decay_parameters if "bias" not in name]
optimizer_grouped_parameters = [
{
"params": [
p for n, p in opt_model.named_parameters() if (n in decay_parameters and p.requires_grad)
],
"weight_decay": self.args.weight_decay,
},
{
"params": [
p for n, p in opt_model.named_parameters() if (n not in decay_parameters and p.requires_grad)
],
"weight_decay": 0.0,
},
]
optimizer_cls, optimizer_kwargs = Trainer.get_optimizer_cls_and_kwargs(self.args)
if self.sharded_ddp == ShardedDDPOption.SIMPLE:
self.optimizer = OSS(
params=optimizer_grouped_parameters,
optim=optimizer_cls,
**optimizer_kwargs,
)
else:
self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs)
if optimizer_cls.__name__ == "Adam8bit":
import bitsandbytes
manager = bitsandbytes.optim.GlobalOptimManager.get_instance()
skipped = 0
for module in opt_model.modules():
if isinstance(module, nn.Embedding):
skipped += sum(dict((p.data_ptr(), p.numel()) for p in module.parameters()).values())
print(f"skipped {module}: {skipped/2**20}M params")
manager.register_module_override(module, "weight", {"optim_bits": 32})
logger.debug(f"bitsandbytes: will optimize {module} in fp32")
print(f"skipped: {skipped/2**20}M params")
if is_sagemaker_mp_enabled():
self.optimizer = smp.DistributedOptimizer(self.optimizer)
return self.optimizer
@staticmethod
def get_optimizer_cls_and_kwargs(args: TrainingArguments) -> Tuple[Any, Any]:
"""
Returns the optimizer class and optimizer parameters based on the training arguments.
Args:
args (`transformers.training_args.TrainingArguments`):
The training arguments for the training session.
"""
# parse args.optim_args
optim_args = {}
if args.optim_args:
for mapping in args.optim_args.replace(" ", "").split(","):
key, value = mapping.split("=")
optim_args[key] = value
optimizer_kwargs = {"lr": args.learning_rate}
adam_kwargs = {
"betas": (args.adam_beta1, args.adam_beta2),
"eps": args.adam_epsilon,
}
if args.optim == OptimizerNames.ADAFACTOR:
optimizer_cls = Adafactor
optimizer_kwargs.update({"scale_parameter": False, "relative_step": False})
elif args.optim == OptimizerNames.ADAMW_HF:
from .optimization import AdamW
optimizer_cls = AdamW
optimizer_kwargs.update(adam_kwargs)
elif args.optim == OptimizerNames.ADAMW_TORCH:
from torch.optim import AdamW
optimizer_cls = AdamW
optimizer_kwargs.update(adam_kwargs)
elif args.optim == OptimizerNames.ADAMW_TORCH_XLA:
try:
from torch_xla.amp.syncfree import AdamW
optimizer_cls = AdamW
optimizer_kwargs.update(adam_kwargs)
except ImportError:
raise ValueError("Trainer failed to import syncfree AdamW from torch_xla.")
elif args.optim == OptimizerNames.ADAMW_APEX_FUSED:
try:
from apex.optimizers import FusedAdam
optimizer_cls = FusedAdam
optimizer_kwargs.update(adam_kwargs)
except ImportError:
raise ValueError("Trainer tried to instantiate apex FusedAdam but apex is not installed!")
elif args.optim == OptimizerNames.ADAMW_BNB:
try:
from bitsandbytes.optim import Adam8bit
optimizer_cls = Adam8bit
optimizer_kwargs.update(adam_kwargs)
except ImportError:
raise ValueError("Trainer tried to instantiate bnb Adam8bit but bnb is not installed!")
elif args.optim == OptimizerNames.ADAMW_ANYPRECISION:
try:
from torchdistx.optimizers import AnyPrecisionAdamW
optimizer_cls = AnyPrecisionAdamW
optimizer_kwargs.update(adam_kwargs)
# TODO Change dtypes back to M=FP32, Var = BF16, Kahan = False once they can be cast together in torchdistx.
optimizer_kwargs.update(
{
"use_kahan_summation": strtobool(optim_args.get("use_kahan_summation", "False")),
"momentum_dtype": getattr(torch, optim_args.get("momentum_dtype", "float32")),
"variance_dtype": getattr(torch, optim_args.get("variance_dtype", "float32")),
"compensation_buffer_dtype": getattr(
torch, optim_args.get("compensation_buffer_dtype", "bfloat16")
),
}
)
except ImportError:
raise ValueError("Please install https://github.com/pytorch/torchdistx")
elif args.optim == OptimizerNames.SGD:
optimizer_cls = torch.optim.SGD
elif args.optim == OptimizerNames.ADAGRAD:
optimizer_cls = torch.optim.Adagrad
else:
raise ValueError(f"Trainer cannot instantiate unsupported optimizer: {args.optim}")
return optimizer_cls, optimizer_kwargs
def create_scheduler(self, num_training_steps: int, optimizer: torch.optim.Optimizer = None):
"""
Setup the scheduler. The optimizer of the trainer must have been set up either before this method is called or
passed as an argument.
Args:
num_training_steps (int): The number of training steps to do.
"""
if self.lr_scheduler is None:
self.lr_scheduler = get_scheduler(
self.args.lr_scheduler_type,
optimizer=self.optimizer if optimizer is None else optimizer,
num_warmup_steps=self.args.get_warmup_steps(num_training_steps),
num_training_steps=num_training_steps,
)
return self.lr_scheduler
def num_examples(self, dataloader: DataLoader) -> int:
"""
Helper to get number of samples in a [`~torch.utils.data.DataLoader`] by accessing its dataset. When
dataloader.dataset does not exist or has no length, estimates as best it can
"""
try:
dataset = dataloader.dataset
# Special case for IterableDatasetShard, we need to dig deeper
if isinstance(dataset, IterableDatasetShard):
return len(dataloader.dataset.dataset)
return len(dataloader.dataset)
except (NameError, AttributeError, TypeError): # no dataset or length, estimate by length of dataloader
return len(dataloader) * self.args.per_device_train_batch_size
def _hp_search_setup(self, trial: Union["optuna.Trial", Dict[str, Any]]):
"""HP search setup code"""
self._trial = trial
if self.hp_search_backend is None or trial is None:
return
if self.hp_search_backend == HPSearchBackend.OPTUNA:
params = self.hp_space(trial)
elif self.hp_search_backend == HPSearchBackend.RAY:
params = trial
params.pop("wandb", None)
elif self.hp_search_backend == HPSearchBackend.SIGOPT:
params = {k: int(v) if isinstance(v, str) else v for k, v in trial.assignments.items()}
elif self.hp_search_backend == HPSearchBackend.WANDB:
params = trial
for key, value in params.items():
if not hasattr(self.args, key):
logger.warning(
f"Trying to set {key} in the hyperparameter search but there is no corresponding field in"
" `TrainingArguments`."
)
continue
old_attr = getattr(self.args, key, None)
# Casting value to the proper type
if old_attr is not None:
value = type(old_attr)(value)
setattr(self.args, key, value)
if self.hp_search_backend == HPSearchBackend.OPTUNA:
logger.info(f"Trial: {trial.params}")
if self.hp_search_backend == HPSearchBackend.SIGOPT:
logger.info(f"SigOpt Assignments: {trial.assignments}")
if self.hp_search_backend == HPSearchBackend.WANDB:
logger.info(f"W&B Sweep parameters: {trial}")
if self.args.deepspeed:
# Rebuild the deepspeed config to reflect the updated training parameters
from transformers.deepspeed import HfTrainerDeepSpeedConfig
self.args.hf_deepspeed_config = HfTrainerDeepSpeedConfig(self.args.deepspeed)
self.args.hf_deepspeed_config.trainer_config_process(self.args)
def _report_to_hp_search(self, trial: Union["optuna.Trial", Dict[str, Any]], step: int, metrics: Dict[str, float]):
if self.hp_search_backend is None or trial is None:
return
self.objective = self.compute_objective(metrics.copy())
if self.hp_search_backend == HPSearchBackend.OPTUNA:
import optuna
trial.report(self.objective, step)
if trial.should_prune():
self.callback_handler.on_train_end(self.args, self.state, self.control)
raise optuna.TrialPruned()
elif self.hp_search_backend == HPSearchBackend.RAY:
from ray import tune
if self.control.should_save:
self._tune_save_checkpoint()
tune.report(objective=self.objective, **metrics)
def _tune_save_checkpoint(self):
from ray import tune
if not self.use_tune_checkpoints:
return
with tune.checkpoint_dir(step=self.state.global_step) as checkpoint_dir:
output_dir = os.path.join(checkpoint_dir, f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}")
self.save_model(output_dir, _internal_call=True)
if self.args.should_save:
self.state.save_to_json(os.path.join(output_dir, TRAINER_STATE_NAME))
torch.save(self.optimizer.state_dict(), os.path.join(output_dir, OPTIMIZER_NAME))
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME))
def call_model_init(self, trial=None):
model_init_argcount = number_of_arguments(self.model_init)
if model_init_argcount == 0:
model = self.model_init()
elif model_init_argcount == 1:
model = self.model_init(trial)
else:
raise RuntimeError("model_init should have 0 or 1 argument.")
if model is None:
raise RuntimeError("model_init should not return None.")
return model
def torch_jit_model_eval(self, model, dataloader, training=False):
if not training:
if dataloader is None:
logger.warning("failed to use PyTorch jit mode due to current dataloader is none.")
return model
example_batch = next(iter(dataloader))
example_batch = self._prepare_inputs(example_batch)
try:
jit_model = model.eval()
with ContextManagers([self.autocast_smart_context_manager(cache_enabled=False), torch.no_grad()]):
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.14.0"):
if isinstance(example_batch, dict):
jit_model = torch.jit.trace(jit_model, example_kwarg_inputs=example_batch, strict=False)
else:
jit_model = torch.jit.trace(
jit_model,
example_kwarg_inputs={key: example_batch[key] for key in example_batch},
strict=False,
)
else:
jit_inputs = []
for key in example_batch:
example_tensor = torch.ones_like(example_batch[key])
jit_inputs.append(example_tensor)
jit_inputs = tuple(jit_inputs)
jit_model = torch.jit.trace(jit_model, jit_inputs, strict=False)
jit_model = torch.jit.freeze(jit_model)
with torch.no_grad():
jit_model(**example_batch)
jit_model(**example_batch)
model = jit_model
self.use_cpu_amp = False
self.use_cuda_amp = False
except (RuntimeError, TypeError, ValueError, NameError, IndexError) as e:
logger.warning(f"failed to use PyTorch jit mode due to: {e}.")
return model
def ipex_optimize_model(self, model, training=False, dtype=torch.float32):
if not is_ipex_available():
raise ImportError(
"Using IPEX but IPEX is not installed or IPEX's version does not match current PyTorch, please refer"
" to https://github.com/intel/intel-extension-for-pytorch."
)
import intel_extension_for_pytorch as ipex
if not training:
model.eval()
dtype = torch.bfloat16 if not self.is_in_train and self.args.bf16_full_eval else dtype
# conv_bn_folding is disabled as it fails in symbolic tracing, resulting in ipex warnings
model = ipex.optimize(model, dtype=dtype, level="O1", conv_bn_folding=False, inplace=not self.is_in_train)
else:
if not model.training:
model.train()
model, self.optimizer = ipex.optimize(
model, dtype=dtype, optimizer=self.optimizer, inplace=True, level="O1"
)
return model
def _wrap_model(self, model, training=True, dataloader=None):
if self.args.torch_compile:
model = torch.compile(model, backend=self.args.torch_compile_backend, mode=self.args.torch_compile_mode)
if self.args.use_ipex:
dtype = torch.bfloat16 if self.use_cpu_amp else torch.float32
model = self.ipex_optimize_model(model, training, dtype=dtype)
if is_sagemaker_mp_enabled():
# Wrapping the base model twice in a DistributedModel will raise an error.
if isinstance(self.model_wrapped, smp.model.DistributedModel):
return self.model_wrapped
return smp.DistributedModel(model, backward_passes_per_step=self.args.gradient_accumulation_steps)
# already initialized its own DDP and AMP
if self.deepspeed:
return self.deepspeed
# train/eval could be run multiple-times - if already wrapped, don't re-wrap it again
if unwrap_model(model) is not model:
return model
# Mixed precision training with apex (torch < 1.6)
if self.use_apex and training:
model, self.optimizer = amp.initialize(model, self.optimizer, opt_level=self.args.fp16_opt_level)
# Multi-gpu training (should be after apex fp16 initialization)
if self.args.n_gpu > 1:
model = nn.DataParallel(model)
if self.args.jit_mode_eval:
start_time = time.time()
model = self.torch_jit_model_eval(model, dataloader, training)
self.jit_compilation_time = round(time.time() - start_time, 4)
# Note: in torch.distributed mode, there's no point in wrapping the model
# inside a DistributedDataParallel as we'll be under `no_grad` anyways.
if not training:
return model
# Distributed training (should be after apex fp16 initialization)
if self.sharded_ddp is not None:
# Sharded DDP!
if self.sharded_ddp == ShardedDDPOption.SIMPLE:
model = ShardedDDP(model, self.optimizer)
else:
mixed_precision = self.args.fp16 or self.args.bf16
cpu_offload = ShardedDDPOption.OFFLOAD in self.args.sharded_ddp
zero_3 = self.sharded_ddp == ShardedDDPOption.ZERO_DP_3
# XXX: Breaking the self.model convention but I see no way around it for now.
if ShardedDDPOption.AUTO_WRAP in self.args.sharded_ddp:
model = auto_wrap(model)
self.model = model = FullyShardedDDP(
model,
mixed_precision=mixed_precision,
reshard_after_forward=zero_3,
cpu_offload=cpu_offload,
).to(self.args.device)
# Distributed training using PyTorch FSDP
elif self.fsdp is not None:
# PyTorch FSDP!
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.wrap import size_based_auto_wrap_policy, transformer_auto_wrap_policy
if FSDPOption.OFFLOAD in self.args.fsdp:
cpu_offload = CPUOffload(offload_params=True)
else:
cpu_offload = CPUOffload(offload_params=False)
auto_wrap_policy = None
if FSDPOption.AUTO_WRAP in self.args.fsdp:
if self.args.fsdp_min_num_params > 0:
auto_wrap_policy = functools.partial(
size_based_auto_wrap_policy, min_num_params=self.args.fsdp_min_num_params
)
elif self.args.fsdp_transformer_layer_cls_to_wrap is not None:
transformer_cls_to_wrap = get_module_class_from_name(
model, self.args.fsdp_transformer_layer_cls_to_wrap
)
if transformer_cls_to_wrap is None:
raise Exception("Could not find the transformer layer class to wrap in the model.")
auto_wrap_policy = functools.partial(
transformer_auto_wrap_policy,
# Transformer layer class to wrap
transformer_layer_cls={transformer_cls_to_wrap},
)
mixed_precision_policy = None
dtype = None
if self.args.fp16:
dtype = torch.float16
elif self.args.bf16:
dtype = torch.bfloat16
if dtype is not None:
mixed_precision_policy = MixedPrecision(param_dtype=dtype, reduce_dtype=dtype, buffer_dtype=dtype)
if type(model) != FSDP:
# XXX: Breaking the self.model convention but I see no way around it for now.
self.model = model = FSDP(
model,
sharding_strategy=self.fsdp,
cpu_offload=cpu_offload,
auto_wrap_policy=auto_wrap_policy,
mixed_precision=mixed_precision_policy,
device_id=self.args.device,
)
elif is_sagemaker_dp_enabled():
model = nn.parallel.DistributedDataParallel(
model, device_ids=[int(os.getenv("SMDATAPARALLEL_LOCAL_RANK"))]
)
elif self.args.local_rank != -1:
kwargs = {}
if self.args.ddp_find_unused_parameters is not None:
kwargs["find_unused_parameters"] = self.args.ddp_find_unused_parameters
elif isinstance(model, PreTrainedModel):
# find_unused_parameters breaks checkpointing as per
# https://github.com/huggingface/transformers/pull/4659#issuecomment-643356021
kwargs["find_unused_parameters"] = not model.is_gradient_checkpointing
else:
kwargs["find_unused_parameters"] = True
if self.args.ddp_bucket_cap_mb is not None:
kwargs["bucket_cap_mb"] = self.args.ddp_bucket_cap_mb
model = nn.parallel.DistributedDataParallel(
model,
device_ids=[self.args.local_rank] if self.args._n_gpu != 0 else None,
output_device=self.args.local_rank if self.args._n_gpu != 0 else None,
**kwargs,
)
return model
def train(
self,
resume_from_checkpoint: Optional[Union[str, bool]] = None,
trial: Union["optuna.Trial", Dict[str, Any]] = None,
ignore_keys_for_eval: Optional[List[str]] = None,
**kwargs,
):
"""
Main training entry point.
Args:
resume_from_checkpoint (`str` or `bool`, *optional*):
If a `str`, local path to a saved checkpoint as saved by a previous instance of [`Trainer`]. If a
`bool` and equals `True`, load the last checkpoint in *args.output_dir* as saved by a previous instance
of [`Trainer`]. If present, training will resume from the model/optimizer/scheduler states loaded here.
trial (`optuna.Trial` or `Dict[str, Any]`, *optional*):
The trial run or the hyperparameter dictionary for hyperparameter search.
ignore_keys_for_eval (`List[str]`, *optional*)
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions for evaluation during the training.
kwargs:
Additional keyword arguments used to hide deprecated arguments
"""
if resume_from_checkpoint is False:
resume_from_checkpoint = None
# memory metrics - must set up as early as possible
self._memory_tracker.start()
args = self.args
self.is_in_train = True
# do_train is not a reliable argument, as it might not be set and .train() still called, so
# the following is a workaround:
if (args.fp16_full_eval or args.bf16_full_eval) and not args.do_train:
self._move_model_to_device(self.model, args.device)
if "model_path" in kwargs:
resume_from_checkpoint = kwargs.pop("model_path")
warnings.warn(
"`model_path` is deprecated and will be removed in a future version. Use `resume_from_checkpoint` "
"instead.",
FutureWarning,
)
if len(kwargs) > 0:
raise TypeError(f"train() received got unexpected keyword arguments: {', '.join(list(kwargs.keys()))}.")
# This might change the seed so needs to run first.
self._hp_search_setup(trial)
self._train_batch_size = self.args.train_batch_size
# Model re-init
model_reloaded = False
if self.model_init is not None:
# Seed must be set before instantiating the model when using model_init.
enable_full_determinism(self.args.seed) if self.args.full_determinism else set_seed(self.args.seed)
self.model = self.call_model_init(trial)
model_reloaded = True
# Reinitializes optimizer and scheduler
self.optimizer, self.lr_scheduler = None, None
# Load potential model checkpoint
if isinstance(resume_from_checkpoint, bool) and resume_from_checkpoint:
resume_from_checkpoint = get_last_checkpoint(args.output_dir)
if resume_from_checkpoint is None:
raise ValueError(f"No valid checkpoint found in output directory ({args.output_dir})")
if resume_from_checkpoint is not None and not is_sagemaker_mp_enabled():
self._load_from_checkpoint(resume_from_checkpoint)
# If model was re-initialized, put it on the right device and update self.model_wrapped
if model_reloaded:
if self.place_model_on_device:
self._move_model_to_device(self.model, args.device)
self.model_wrapped = self.model
inner_training_loop = find_executable_batch_size(
self._inner_training_loop, self._train_batch_size, args.auto_find_batch_size
)
return inner_training_loop(
args=args,
resume_from_checkpoint=resume_from_checkpoint,
trial=trial,
ignore_keys_for_eval=ignore_keys_for_eval,
)
def _inner_training_loop(
self, batch_size=None, args=None, resume_from_checkpoint=None, trial=None, ignore_keys_for_eval=None
):
self._train_batch_size = batch_size
# Data loader and number of training steps
train_dataloader = self.get_train_dataloader()
# Setting up training control variables:
# number of training epochs: num_train_epochs
# number of training steps per epoch: num_update_steps_per_epoch
# total number of training steps to execute: max_steps
total_train_batch_size = args.train_batch_size * args.gradient_accumulation_steps * args.world_size
len_dataloader = None
if has_length(train_dataloader):
len_dataloader = len(train_dataloader)
num_update_steps_per_epoch = len_dataloader // args.gradient_accumulation_steps
num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1)
num_examples = self.num_examples(train_dataloader)
if args.max_steps > 0:
max_steps = args.max_steps
num_train_epochs = args.max_steps // num_update_steps_per_epoch + int(
args.max_steps % num_update_steps_per_epoch > 0
)
# May be slightly incorrect if the last batch in the training dataloader has a smaller size but it's
# the best we can do.
num_train_samples = args.max_steps * total_train_batch_size
else:
max_steps = math.ceil(args.num_train_epochs * num_update_steps_per_epoch)
num_train_epochs = math.ceil(args.num_train_epochs)
num_train_samples = self.num_examples(train_dataloader) * args.num_train_epochs
elif args.max_steps > 0: # Rely on max_steps when dataloader does not have a working size
max_steps = args.max_steps
# Setting a very large number of epochs so we go as many times as necessary over the iterator.
num_train_epochs = sys.maxsize
num_update_steps_per_epoch = max_steps
num_examples = total_train_batch_size * args.max_steps
num_train_samples = args.max_steps * total_train_batch_size
else:
raise ValueError(
"args.max_steps must be set to a positive value if dataloader does not have a length, was"
f" {args.max_steps}"
)
if DebugOption.UNDERFLOW_OVERFLOW in self.args.debug:
if self.args.n_gpu > 1:
# nn.DataParallel(model) replicates the model, creating new variables and module
# references registered here no longer work on other gpus, breaking the module
raise ValueError(
"Currently --debug underflow_overflow is not supported under DP. Please use DDP"
" (torch.distributed.launch)."
)
else:
debug_overflow = DebugUnderflowOverflow(self.model) # noqa
delay_optimizer_creation = (
self.sharded_ddp is not None
and self.sharded_ddp != ShardedDDPOption.SIMPLE
or is_sagemaker_mp_enabled()
or self.fsdp is not None
)
if args.deepspeed:
deepspeed_engine, optimizer, lr_scheduler = deepspeed_init(
self, num_training_steps=max_steps, resume_from_checkpoint=resume_from_checkpoint
)
self.model = deepspeed_engine.module
self.model_wrapped = deepspeed_engine
self.deepspeed = deepspeed_engine
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
elif not delay_optimizer_creation:
self.create_optimizer_and_scheduler(num_training_steps=max_steps)
self.state = TrainerState()
self.state.is_hyper_param_search = trial is not None
# Activate gradient checkpointing if needed
if args.gradient_checkpointing:
self.model.gradient_checkpointing_enable()
model = self._wrap_model(self.model_wrapped)
if is_sagemaker_mp_enabled() and resume_from_checkpoint is not None:
self._load_from_checkpoint(resume_from_checkpoint, model)
# for the rest of this function `model` is the outside model, whether it was wrapped or not
if model is not self.model:
self.model_wrapped = model
if delay_optimizer_creation:
self.create_optimizer_and_scheduler(num_training_steps=max_steps)
# Check if saved optimizer or scheduler states exist
self._load_optimizer_and_scheduler(resume_from_checkpoint)
# important: at this point:
# self.model is the Transformers Model
# self.model_wrapped is DDP(Transformers Model), Deepspeed(Transformers Model), etc.
# Train!
logger.info("***** Running training *****")
logger.info(f" Num examples = {num_examples}")
logger.info(f" Num Epochs = {num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size}")
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {max_steps}")
logger.info(
f" Number of trainable parameters = {sum(p.numel() for p in model.parameters() if p.requires_grad)}"
)
self.state.epoch = 0
start_time = time.time()
epochs_trained = 0
steps_trained_in_current_epoch = 0
steps_trained_progress_bar = None
# Check if continuing training from a checkpoint
if resume_from_checkpoint is not None and os.path.isfile(
os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME)
):
self.state = TrainerState.load_from_json(os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME))
epochs_trained = self.state.global_step // num_update_steps_per_epoch
if not args.ignore_data_skip:
steps_trained_in_current_epoch = self.state.global_step % (num_update_steps_per_epoch)
steps_trained_in_current_epoch *= args.gradient_accumulation_steps
else:
steps_trained_in_current_epoch = 0
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(f" Continuing training from epoch {epochs_trained}")
logger.info(f" Continuing training from global step {self.state.global_step}")
if not args.ignore_data_skip:
logger.info(
f" Will skip the first {epochs_trained} epochs then the first {steps_trained_in_current_epoch} "
"batches in the first epoch. If this takes a lot of time, you can add the `--ignore_data_skip` "
"flag to your launch command, but you will resume the training on data already seen by your model."
)
if self.is_local_process_zero() and not args.disable_tqdm:
steps_trained_progress_bar = tqdm(total=steps_trained_in_current_epoch)
steps_trained_progress_bar.set_description("Skipping the first batches")
# Update the references
self.callback_handler.model = self.model
self.callback_handler.optimizer = self.optimizer
self.callback_handler.lr_scheduler = self.lr_scheduler
self.callback_handler.train_dataloader = train_dataloader
if self.hp_name is not None and self._trial is not None:
# use self._trial because the SigOpt/Optuna hpo only call `_hp_search_setup(trial)` instead of passing trial
# parameter to Train when using DDP.
self.state.trial_name = self.hp_name(self._trial)
if trial is not None:
assignments = trial.assignments if self.hp_search_backend == HPSearchBackend.SIGOPT else trial
self.state.trial_params = hp_params(assignments)
else:
self.state.trial_params = None
# This should be the same if the state has been saved but in case the training arguments changed, it's safer
# to set this after the load.
self.state.max_steps = max_steps
self.state.num_train_epochs = num_train_epochs
self.state.is_local_process_zero = self.is_local_process_zero()
self.state.is_world_process_zero = self.is_world_process_zero()
# tr_loss is a tensor to avoid synchronization of TPUs through .item()
tr_loss = torch.tensor(0.0).to(args.device)
# _total_loss_scalar is updated everytime .item() has to be called on tr_loss and stores the sum of all losses
self._total_loss_scalar = 0.0
self._globalstep_last_logged = self.state.global_step
model.zero_grad()
self.control = self.callback_handler.on_train_begin(args, self.state, self.control)
# Skip the first epochs_trained epochs to get the random state of the dataloader at the right point.
if not args.ignore_data_skip:
for epoch in range(epochs_trained):
is_random_sampler = hasattr(train_dataloader, "sampler") and isinstance(
train_dataloader.sampler, RandomSampler
)
if is_torch_less_than_1_11 or not is_random_sampler:
# We just need to begin an iteration to create the randomization of the sampler.
# That was before PyTorch 1.11 however...
for _ in train_dataloader:
break
else:
# Otherwise we need to call the whooooole sampler cause there is some random operation added
# AT THE VERY END!
_ = list(train_dataloader.sampler)
for epoch in range(epochs_trained, num_train_epochs):
if isinstance(train_dataloader, DataLoader) and isinstance(train_dataloader.sampler, DistributedSampler):
train_dataloader.sampler.set_epoch(epoch)
elif hasattr(train_dataloader, "dataset") and isinstance(train_dataloader.dataset, IterableDatasetShard):
train_dataloader.dataset.set_epoch(epoch)
if is_torch_tpu_available():
parallel_loader = pl.ParallelLoader(train_dataloader, [args.device]).per_device_loader(args.device)
epoch_iterator = parallel_loader
else:
epoch_iterator = train_dataloader
# Reset the past mems state at the beginning of each epoch if necessary.
if args.past_index >= 0:
self._past = None
steps_in_epoch = (
len(epoch_iterator)
if len_dataloader is not None
else args.max_steps * args.gradient_accumulation_steps
)
self.control = self.callback_handler.on_epoch_begin(args, self.state, self.control)
if epoch == epochs_trained and resume_from_checkpoint is not None and steps_trained_in_current_epoch == 0:
self._load_rng_state(resume_from_checkpoint)
step = -1
for step, inputs in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
if steps_trained_progress_bar is not None:
steps_trained_progress_bar.update(1)
if steps_trained_in_current_epoch == 0:
self._load_rng_state(resume_from_checkpoint)
continue
elif steps_trained_progress_bar is not None:
steps_trained_progress_bar.close()
steps_trained_progress_bar = None
if step % args.gradient_accumulation_steps == 0:
self.control = self.callback_handler.on_step_begin(args, self.state, self.control)
if (
((step + 1) % args.gradient_accumulation_steps != 0)
and args.local_rank != -1
and args._no_sync_in_gradient_accumulation
):
# Avoid unnecessary DDP synchronization since there will be no backward pass on this example.
with model.no_sync():
tr_loss_step = self.training_step(model, inputs)
else:
tr_loss_step = self.training_step(model, inputs)
if (
args.logging_nan_inf_filter
and not is_torch_tpu_available()
and (torch.isnan(tr_loss_step) or torch.isinf(tr_loss_step))
):
# if loss is nan or inf simply add the average of previous logged losses
tr_loss += tr_loss / (1 + self.state.global_step - self._globalstep_last_logged)
else:
tr_loss += tr_loss_step
self.current_flos += float(self.floating_point_ops(inputs))
# Optimizer step for deepspeed must be called on every step regardless of the value of gradient_accumulation_steps
if self.deepspeed:
self.deepspeed.step()
if (step + 1) % args.gradient_accumulation_steps == 0 or (
# last step in epoch but step is always smaller than gradient_accumulation_steps
steps_in_epoch <= args.gradient_accumulation_steps
and (step + 1) == steps_in_epoch
):
# Gradient clipping
if args.max_grad_norm is not None and args.max_grad_norm > 0 and not self.deepspeed:
# deepspeed does its own clipping
if self.do_grad_scaling:
# Reduce gradients first for XLA
if is_torch_tpu_available():
gradients = xm._fetch_gradients(self.optimizer)
xm.all_reduce("sum", gradients, scale=1.0 / xm.xrt_world_size())
# AMP: gradients need unscaling
self.scaler.unscale_(self.optimizer)
if is_sagemaker_mp_enabled() and args.fp16:
self.optimizer.clip_master_grads(args.max_grad_norm)
elif hasattr(self.optimizer, "clip_grad_norm"):
# Some optimizers (like the sharded optimizer) have a specific way to do gradient clipping
self.optimizer.clip_grad_norm(args.max_grad_norm)
elif hasattr(model, "clip_grad_norm_"):
# Some models (like FullyShardedDDP) have a specific way to do gradient clipping
model.clip_grad_norm_(args.max_grad_norm)
else:
# Revert to normal clipping otherwise, handling Apex or full precision
nn.utils.clip_grad_norm_(
amp.master_params(self.optimizer) if self.use_apex else model.parameters(),
args.max_grad_norm,
)
# Optimizer step
optimizer_was_run = True
if self.deepspeed:
pass # called outside the loop
elif is_torch_tpu_available():
if self.do_grad_scaling:
self.scaler.step(self.optimizer)
self.scaler.update()
else:
xm.optimizer_step(self.optimizer)
elif self.do_grad_scaling:
scale_before = self.scaler.get_scale()
self.scaler.step(self.optimizer)
self.scaler.update()
scale_after = self.scaler.get_scale()
optimizer_was_run = scale_before <= scale_after
else:
self.optimizer.step()
if optimizer_was_run and not self.deepspeed:
self.lr_scheduler.step()
model.zero_grad()
self.state.global_step += 1
self.state.epoch = epoch + (step + 1) / steps_in_epoch
self.control = self.callback_handler.on_step_end(args, self.state, self.control)
self._maybe_log_save_evaluate(tr_loss, model, trial, epoch, ignore_keys_for_eval)
else:
self.control = self.callback_handler.on_substep_end(args, self.state, self.control)
if self.control.should_epoch_stop or self.control.should_training_stop:
break
if step < 0:
logger.warning(
"There seems to be not a single sample in your epoch_iterator, stopping training at step"
f" {self.state.global_step}! This is expected if you're using an IterableDataset and set"
f" num_steps ({max_steps}) higher than the number of available samples."
)
self.control.should_training_stop = True
self.control = self.callback_handler.on_epoch_end(args, self.state, self.control)
self._maybe_log_save_evaluate(tr_loss, model, trial, epoch, ignore_keys_for_eval)
if DebugOption.TPU_METRICS_DEBUG in self.args.debug:
if is_torch_tpu_available():
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
else:
logger.warning(
"You enabled PyTorch/XLA debug metrics but you don't have a TPU "
"configured. Check your training configuration if this is unexpected."
)
if self.control.should_training_stop:
break
if args.past_index and hasattr(self, "_past"):
# Clean the state at the end of training
delattr(self, "_past")
logger.info("\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n")
if args.load_best_model_at_end and self.state.best_model_checkpoint is not None:
# Wait for everyone to get here so we are sur the model has been saved by process 0.
if is_torch_tpu_available():
xm.rendezvous("load_best_model_at_end")
elif args.local_rank != -1:
dist.barrier()
elif is_sagemaker_mp_enabled():
smp.barrier()
self._load_best_model()
# add remaining tr_loss
self._total_loss_scalar += tr_loss.item()
train_loss = self._total_loss_scalar / self.state.global_step
metrics = speed_metrics("train", start_time, num_samples=num_train_samples, num_steps=self.state.max_steps)
self.store_flos()
metrics["total_flos"] = self.state.total_flos
metrics["train_loss"] = train_loss
self.is_in_train = False
self._memory_tracker.stop_and_update_metrics(metrics)
self.log(metrics)
run_dir = self._get_output_dir(trial)
checkpoints_sorted = self._sorted_checkpoints(use_mtime=False, output_dir=run_dir)
# Delete the last checkpoint when save_total_limit=1 if it's different from the best checkpoint and process allowed to save.
if self.args.should_save and self.state.best_model_checkpoint is not None and self.args.save_total_limit == 1:
for checkpoint in checkpoints_sorted:
if checkpoint != self.state.best_model_checkpoint:
logger.info(f"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit")
shutil.rmtree(checkpoint)
self.control = self.callback_handler.on_train_end(args, self.state, self.control)
return TrainOutput(self.state.global_step, train_loss, metrics)
def _get_output_dir(self, trial):
if self.hp_search_backend is not None and trial is not None:
if self.hp_search_backend == HPSearchBackend.OPTUNA:
run_id = trial.number
elif self.hp_search_backend == HPSearchBackend.RAY:
from ray import tune
run_id = tune.get_trial_id()
elif self.hp_search_backend == HPSearchBackend.SIGOPT:
run_id = trial.id
elif self.hp_search_backend == HPSearchBackend.WANDB:
import wandb
run_id = wandb.run.id
run_name = self.hp_name(trial) if self.hp_name is not None else f"run-{run_id}"
run_dir = os.path.join(self.args.output_dir, run_name)
else:
run_dir = self.args.output_dir
return run_dir
def _load_from_checkpoint(self, resume_from_checkpoint, model=None):
if model is None:
model = self.model
if not os.path.isfile(os.path.join(resume_from_checkpoint, WEIGHTS_NAME)) and not os.path.isfile(
os.path.join(resume_from_checkpoint, WEIGHTS_INDEX_NAME)
):
raise ValueError(f"Can't find a valid checkpoint at {resume_from_checkpoint}")
logger.info(f"Loading model from {resume_from_checkpoint}.")
if os.path.isfile(os.path.join(resume_from_checkpoint, CONFIG_NAME)):
config = PretrainedConfig.from_json_file(os.path.join(resume_from_checkpoint, CONFIG_NAME))
checkpoint_version = config.transformers_version
if checkpoint_version is not None and checkpoint_version != __version__:
logger.warning(
f"You are resuming training from a checkpoint trained with {checkpoint_version} of "
f"Transformers but your current version is {__version__}. This is not recommended and could "
"yield to errors or unwanted behaviors."
)
if self.args.deepspeed:
# will be resumed in deepspeed_init
pass
elif os.path.isfile(os.path.join(resume_from_checkpoint, WEIGHTS_NAME)):
# If the model is on the GPU, it still works!
if is_sagemaker_mp_enabled():
if os.path.isfile(os.path.join(resume_from_checkpoint, "user_content.pt")):
# If the 'user_content.pt' file exists, load with the new smp api.
# Checkpoint must have been saved with the new smp api.
smp.resume_from_checkpoint(
path=resume_from_checkpoint, tag=WEIGHTS_NAME, partial=False, load_optimizer=False
)
else:
# If the 'user_content.pt' file does NOT exist, load with the old smp api.
# Checkpoint must have been saved with the old smp api.
if hasattr(self.args, "fp16") and self.args.fp16 is True:
logger.warning(
"Enabling FP16 and loading from smp < 1.10 checkpoint together is not suppported."
)
state_dict = torch.load(os.path.join(resume_from_checkpoint, WEIGHTS_NAME), map_location="cpu")
# Required for smp to not auto-translate state_dict from hf to smp (is already smp).
state_dict["_smp_is_partial"] = False
load_result = model.load_state_dict(state_dict, strict=True)
# release memory
del state_dict
else:
# We load the model state dict on the CPU to avoid an OOM error.
state_dict = torch.load(os.path.join(resume_from_checkpoint, WEIGHTS_NAME), map_location="cpu")
# workaround for FSDP bug https://github.com/pytorch/pytorch/issues/82963
# which takes *args instead of **kwargs
load_result = model.load_state_dict(state_dict, False)
# release memory
del state_dict
self._issue_warnings_after_load(load_result)
else:
# We load the sharded checkpoint
load_result = load_sharded_checkpoint(model, resume_from_checkpoint, strict=is_sagemaker_mp_enabled())
if not is_sagemaker_mp_enabled():
self._issue_warnings_after_load(load_result)
def _load_best_model(self):
logger.info(f"Loading best model from {self.state.best_model_checkpoint} (score: {self.state.best_metric}).")
best_model_path = os.path.join(self.state.best_model_checkpoint, WEIGHTS_NAME)
model = self.model_wrapped if is_sagemaker_mp_enabled() else self.model
if os.path.exists(best_model_path):
if self.deepspeed:
if self.model_wrapped is not None:
# this removes the pre-hooks from the previous engine
self.model_wrapped.destroy()
self.model_wrapped = None
# temp hack until Deepspeed fixes the problem with resume from an existing engine that did some stepping
deepspeed_engine, optimizer, lr_scheduler = deepspeed_init(
self,
num_training_steps=self.args.max_steps,
resume_from_checkpoint=self.state.best_model_checkpoint,
)
self.model = deepspeed_engine.module
self.model_wrapped = deepspeed_engine
self.deepspeed = deepspeed_engine
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
else:
if is_sagemaker_mp_enabled():
if os.path.isfile(os.path.join(self.state.best_model_checkpoint, "user_content.pt")):
# If the 'user_content.pt' file exists, load with the new smp api.
# Checkpoint must have been saved with the new smp api.
smp.resume_from_checkpoint(
path=self.state.best_model_checkpoint,
tag=WEIGHTS_NAME,
partial=False,
load_optimizer=False,
)
else:
# If the 'user_content.pt' file does NOT exist, load with the old smp api.
# Checkpoint must have been saved with the old smp api.
state_dict = torch.load(best_model_path, map_location="cpu")
state_dict["_smp_is_partial"] = False
load_result = model.load_state_dict(state_dict, strict=True)
else:
# We load the model state dict on the CPU to avoid an OOM error.
state_dict = torch.load(best_model_path, map_location="cpu")
# If the model is on the GPU, it still works!
# workaround for FSDP bug https://github.com/pytorch/pytorch/issues/82963
# which takes *args instead of **kwargs
load_result = model.load_state_dict(state_dict, False)
if not is_sagemaker_mp_enabled():
self._issue_warnings_after_load(load_result)
elif os.path.exists(os.path.join(self.state.best_model_checkpoint, WEIGHTS_INDEX_NAME)):
load_result = load_sharded_checkpoint(
model, self.state.best_model_checkpoint, strict=is_sagemaker_mp_enabled()
)
if not is_sagemaker_mp_enabled():
self._issue_warnings_after_load(load_result)
else:
logger.warning(
f"Could not locate the best model at {best_model_path}, if you are running a distributed training "
"on multiple nodes, you should activate `--save_on_each_node`."
)
def _issue_warnings_after_load(self, load_result):
if len(load_result.missing_keys) != 0:
if self.model._keys_to_ignore_on_save is not None and set(load_result.missing_keys) == set(
self.model._keys_to_ignore_on_save
):
self.model.tie_weights()
else:
logger.warning(f"There were missing keys in the checkpoint model loaded: {load_result.missing_keys}.")
if len(load_result.unexpected_keys) != 0:
logger.warning(
f"There were unexpected keys in the checkpoint model loaded: {load_result.unexpected_keys}."
)
def _maybe_log_save_evaluate(self, tr_loss, model, trial, epoch, ignore_keys_for_eval):
if self.control.should_log:
if is_torch_tpu_available():
xm.mark_step()
logs: Dict[str, float] = {}
# all_gather + mean() to get average loss over all processes
tr_loss_scalar = self._nested_gather(tr_loss).mean().item()
# reset tr_loss to zero
tr_loss -= tr_loss
logs["loss"] = round(tr_loss_scalar / (self.state.global_step - self._globalstep_last_logged), 4)
logs["learning_rate"] = self._get_learning_rate()
self._total_loss_scalar += tr_loss_scalar
self._globalstep_last_logged = self.state.global_step
self.store_flos()
self.log(logs)
metrics = None
if self.control.should_evaluate:
if isinstance(self.eval_dataset, dict):
for eval_dataset_name, eval_dataset in self.eval_dataset.items():
metrics = self.evaluate(
eval_dataset=eval_dataset,
ignore_keys=ignore_keys_for_eval,
metric_key_prefix=f"eval_{eval_dataset_name}",
)
else:
metrics = self.evaluate(ignore_keys=ignore_keys_for_eval)
self._report_to_hp_search(trial, self.state.global_step, metrics)
if self.control.should_save:
self._save_checkpoint(model, trial, metrics=metrics)
self.control = self.callback_handler.on_save(self.args, self.state, self.control)
def _load_rng_state(self, checkpoint):
# Load RNG states from `checkpoint`
if checkpoint is None:
return
if self.args.world_size > 1:
process_index = self.args.process_index
rng_file = os.path.join(checkpoint, f"rng_state_{process_index}.pth")
if not os.path.isfile(rng_file):
logger.info(
f"Didn't find an RNG file for process {process_index}, if you are resuming a training that "
"wasn't launched in a distributed fashion, reproducibility is not guaranteed."
)
return
else:
rng_file = os.path.join(checkpoint, "rng_state.pth")
if not os.path.isfile(rng_file):
logger.info(
"Didn't find an RNG file, if you are resuming a training that was launched in a distributed "
"fashion, reproducibility is not guaranteed."
)
return
checkpoint_rng_state = torch.load(rng_file)
random.setstate(checkpoint_rng_state["python"])
np.random.set_state(checkpoint_rng_state["numpy"])
torch.random.set_rng_state(checkpoint_rng_state["cpu"])
if torch.cuda.is_available():
if self.args.local_rank != -1:
torch.cuda.random.set_rng_state(checkpoint_rng_state["cuda"])
else:
try:
torch.cuda.random.set_rng_state_all(checkpoint_rng_state["cuda"])
except Exception as e:
logger.info(
f"Didn't manage to set back the RNG states of the GPU because of the following error:\n {e}"
"\nThis won't yield the same results as if the training had not been interrupted."
)
if is_torch_tpu_available():
xm.set_rng_state(checkpoint_rng_state["xla"])
def _save_checkpoint(self, model, trial, metrics=None):
# In all cases, including ddp/dp/deepspeed, self.model is always a reference to the model we
# want to save except FullyShardedDDP.
# assert unwrap_model(model) is self.model, "internal model should be a reference to self.model"
# Save model checkpoint
checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}"
if self.hp_search_backend is None and trial is None:
self.store_flos()
run_dir = self._get_output_dir(trial=trial)
output_dir = os.path.join(run_dir, checkpoint_folder)
self.save_model(output_dir, _internal_call=True)
if self.deepspeed:
# under zero3 model file itself doesn't get saved since it's bogus! Unless deepspeed
# config `stage3_gather_16bit_weights_on_model_save` is True
self.deepspeed.save_checkpoint(output_dir)
# Save optimizer and scheduler
if self.sharded_ddp == ShardedDDPOption.SIMPLE:
self.optimizer.consolidate_state_dict()
if is_torch_tpu_available():
xm.rendezvous("saving_optimizer_states")
xm.save(self.optimizer.state_dict(), os.path.join(output_dir, OPTIMIZER_NAME))
with warnings.catch_warnings(record=True) as caught_warnings:
xm.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME))
reissue_pt_warnings(caught_warnings)
elif is_sagemaker_mp_enabled():
opt_state_dict = self.optimizer.local_state_dict(gather_if_shard=False)
smp.barrier()
if smp.rdp_rank() == 0 or smp.state.cfg.shard_optimizer_state:
smp.save(
opt_state_dict,
os.path.join(output_dir, OPTIMIZER_NAME),
partial=True,
v3=smp.state.cfg.shard_optimizer_state,
)
if self.args.should_save:
with warnings.catch_warnings(record=True) as caught_warnings:
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME))
reissue_pt_warnings(caught_warnings)
if self.do_grad_scaling:
torch.save(self.scaler.state_dict(), os.path.join(output_dir, SCALER_NAME))
elif self.args.should_save and not self.deepspeed:
# deepspeed.save_checkpoint above saves model/optim/sched
torch.save(self.optimizer.state_dict(), os.path.join(output_dir, OPTIMIZER_NAME))
with warnings.catch_warnings(record=True) as caught_warnings:
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME))
reissue_pt_warnings(caught_warnings)
if self.do_grad_scaling:
torch.save(self.scaler.state_dict(), os.path.join(output_dir, SCALER_NAME))
# Determine the new best metric / best model checkpoint
if metrics is not None and self.args.metric_for_best_model is not None:
metric_to_check = self.args.metric_for_best_model
if not metric_to_check.startswith("eval_"):
metric_to_check = f"eval_{metric_to_check}"
metric_value = metrics[metric_to_check]
operator = np.greater if self.args.greater_is_better else np.less
if (
self.state.best_metric is None
or self.state.best_model_checkpoint is None
or operator(metric_value, self.state.best_metric)
):
self.state.best_metric = metric_value
self.state.best_model_checkpoint = output_dir
# Save the Trainer state
if self.args.should_save:
self.state.save_to_json(os.path.join(output_dir, TRAINER_STATE_NAME))
# Save RNG state in non-distributed training
rng_states = {
"python": random.getstate(),
"numpy": np.random.get_state(),
"cpu": torch.random.get_rng_state(),
}
if torch.cuda.is_available():
if self.args.local_rank == -1:
# In non distributed, we save the global CUDA RNG state (will take care of DataParallel)
rng_states["cuda"] = torch.cuda.random.get_rng_state_all()
else:
rng_states["cuda"] = torch.cuda.random.get_rng_state()
if is_torch_tpu_available():
rng_states["xla"] = xm.get_rng_state()
# A process can arrive here before the process 0 has a chance to save the model, in which case output_dir may
# not yet exist.
os.makedirs(output_dir, exist_ok=True)
if self.args.world_size <= 1:
torch.save(rng_states, os.path.join(output_dir, "rng_state.pth"))
else:
torch.save(rng_states, os.path.join(output_dir, f"rng_state_{self.args.process_index}.pth"))
if self.args.push_to_hub:
self._push_from_checkpoint(output_dir)
# Maybe delete some older checkpoints.
if self.args.should_save:
self._rotate_checkpoints(use_mtime=True, output_dir=run_dir)
def _load_optimizer_and_scheduler(self, checkpoint):
"""If optimizer and scheduler states exist, load them."""
if checkpoint is None:
return
if self.deepspeed:
# deepspeed loads optimizer/lr_scheduler together with the model in deepspeed_init
return
checkpoint_file_exists = (
glob.glob(os.path.join(checkpoint, OPTIMIZER_NAME) + "_*")
if is_sagemaker_mp_enabled()
else os.path.isfile(os.path.join(checkpoint, OPTIMIZER_NAME))
)
if checkpoint_file_exists and os.path.isfile(os.path.join(checkpoint, SCHEDULER_NAME)):
# Load in optimizer and scheduler states
if is_torch_tpu_available():
# On TPU we have to take some extra precautions to properly load the states on the right device.
optimizer_state = torch.load(os.path.join(checkpoint, OPTIMIZER_NAME), map_location="cpu")
with warnings.catch_warnings(record=True) as caught_warnings:
lr_scheduler_state = torch.load(os.path.join(checkpoint, SCHEDULER_NAME), map_location="cpu")
reissue_pt_warnings(caught_warnings)
xm.send_cpu_data_to_device(optimizer_state, self.args.device)
xm.send_cpu_data_to_device(lr_scheduler_state, self.args.device)
self.optimizer.load_state_dict(optimizer_state)
self.lr_scheduler.load_state_dict(lr_scheduler_state)
else:
map_location = "cpu" if is_sagemaker_mp_enabled() else self.args.device
if is_sagemaker_mp_enabled():
if os.path.isfile(os.path.join(checkpoint, "user_content.pt")):
# Optimizer checkpoint was saved with smp >= 1.10
def opt_load_hook(mod, opt):
opt.load_state_dict(smp.load(os.path.join(checkpoint, OPTIMIZER_NAME), partial=True))
else:
# Optimizer checkpoint was saved with smp < 1.10
def opt_load_hook(mod, opt):
if IS_SAGEMAKER_MP_POST_1_10:
opt.load_state_dict(
smp.load(os.path.join(checkpoint, OPTIMIZER_NAME), partial=True, back_compat=True)
)
else:
opt.load_state_dict(smp.load(os.path.join(checkpoint, OPTIMIZER_NAME), partial=True))
self.model_wrapped.register_post_step_hook(opt_load_hook)
else:
self.optimizer.load_state_dict(
torch.load(os.path.join(checkpoint, OPTIMIZER_NAME), map_location=map_location)
)
with warnings.catch_warnings(record=True) as caught_warnings:
self.lr_scheduler.load_state_dict(torch.load(os.path.join(checkpoint, SCHEDULER_NAME)))
reissue_pt_warnings(caught_warnings)
if self.do_grad_scaling and os.path.isfile(os.path.join(checkpoint, SCALER_NAME)):
self.scaler.load_state_dict(torch.load(os.path.join(checkpoint, SCALER_NAME)))
def hyperparameter_search(
self,
hp_space: Optional[Callable[["optuna.Trial"], Dict[str, float]]] = None,
compute_objective: Optional[Callable[[Dict[str, float]], float]] = None,
n_trials: int = 20,
direction: str = "minimize",
backend: Optional[Union["str", HPSearchBackend]] = None,
hp_name: Optional[Callable[["optuna.Trial"], str]] = None,
**kwargs,
) -> BestRun:
"""
Launch an hyperparameter search using `optuna` or `Ray Tune` or `SigOpt`. The optimized quantity is determined
by `compute_objective`, which defaults to a function returning the evaluation loss when no metric is provided,
the sum of all metrics otherwise.
<Tip warning={true}>
To use this method, you need to have provided a `model_init` when initializing your [`Trainer`]: we need to
reinitialize the model at each new run. This is incompatible with the `optimizers` argument, so you need to
subclass [`Trainer`] and override the method [`~Trainer.create_optimizer_and_scheduler`] for custom
optimizer/scheduler.
</Tip>
Args:
hp_space (`Callable[["optuna.Trial"], Dict[str, float]]`, *optional*):
A function that defines the hyperparameter search space. Will default to
[`~trainer_utils.default_hp_space_optuna`] or [`~trainer_utils.default_hp_space_ray`] or
[`~trainer_utils.default_hp_space_sigopt`] depending on your backend.
compute_objective (`Callable[[Dict[str, float]], float]`, *optional*):
A function computing the objective to minimize or maximize from the metrics returned by the `evaluate`
method. Will default to [`~trainer_utils.default_compute_objective`].
n_trials (`int`, *optional*, defaults to 100):
The number of trial runs to test.
direction (`str`, *optional*, defaults to `"minimize"`):
Whether to optimize greater or lower objects. Can be `"minimize"` or `"maximize"`, you should pick
`"minimize"` when optimizing the validation loss, `"maximize"` when optimizing one or several metrics.
backend (`str` or [`~training_utils.HPSearchBackend`], *optional*):
The backend to use for hyperparameter search. Will default to optuna or Ray Tune or SigOpt, depending
on which one is installed. If all are installed, will default to optuna.
hp_name (`Callable[["optuna.Trial"], str]]`, *optional*):
A function that defines the trial/run name. Will default to None.
kwargs (`Dict[str, Any]`, *optional*):
Additional keyword arguments passed along to `optuna.create_study` or `ray.tune.run`. For more
information see:
- the documentation of
[optuna.create_study](https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.create_study.html)
- the documentation of [tune.run](https://docs.ray.io/en/latest/tune/api_docs/execution.html#tune-run)
- the documentation of [sigopt](https://app.sigopt.com/docs/endpoints/experiments/create)
Returns:
[`trainer_utils.BestRun`]: All the information about the best run.
"""
if backend is None:
backend = default_hp_search_backend()
if backend is None:
raise RuntimeError(
"At least one of optuna or ray should be installed. "
"To install optuna run `pip install optuna`. "
"To install ray run `pip install ray[tune]`. "
"To install sigopt run `pip install sigopt`."
)
backend = HPSearchBackend(backend)
if backend == HPSearchBackend.OPTUNA and not is_optuna_available():
raise RuntimeError("You picked the optuna backend, but it is not installed. Use `pip install optuna`.")
if backend == HPSearchBackend.RAY and not is_ray_tune_available():
raise RuntimeError(
"You picked the Ray Tune backend, but it is not installed. Use `pip install 'ray[tune]'`."
)
if backend == HPSearchBackend.SIGOPT and not is_sigopt_available():
raise RuntimeError("You picked the sigopt backend, but it is not installed. Use `pip install sigopt`.")
if backend == HPSearchBackend.WANDB and not is_wandb_available():
raise RuntimeError("You picked the wandb backend, but it is not installed. Use `pip install wandb`.")
self.hp_search_backend = backend
if self.model_init is None:
raise RuntimeError(
"To use hyperparameter search, you need to pass your model through a model_init function."
)
self.hp_space = default_hp_space[backend] if hp_space is None else hp_space
self.hp_name = hp_name
self.compute_objective = default_compute_objective if compute_objective is None else compute_objective
backend_dict = {
HPSearchBackend.OPTUNA: run_hp_search_optuna,
HPSearchBackend.RAY: run_hp_search_ray,
HPSearchBackend.SIGOPT: run_hp_search_sigopt,
HPSearchBackend.WANDB: run_hp_search_wandb,
}
best_run = backend_dict[backend](self, n_trials, direction, **kwargs)
self.hp_search_backend = None
return best_run
def log(self, logs: Dict[str, float]) -> None:
"""
Log `logs` on the various objects watching training.
Subclass and override this method to inject custom behavior.
Args:
logs (`Dict[str, float]`):
The values to log.
"""
if self.state.epoch is not None:
logs["epoch"] = round(self.state.epoch, 2)
output = {**logs, **{"step": self.state.global_step}}
self.state.log_history.append(output)
self.control = self.callback_handler.on_log(self.args, self.state, self.control, logs)
def _prepare_input(self, data: Union[torch.Tensor, Any]) -> Union[torch.Tensor, Any]:
"""
Prepares one `data` before feeding it to the model, be it a tensor or a nested list/dictionary of tensors.
"""
if isinstance(data, Mapping):
return type(data)({k: self._prepare_input(v) for k, v in data.items()})
elif isinstance(data, (tuple, list)):
return type(data)(self._prepare_input(v) for v in data)
elif isinstance(data, torch.Tensor):
kwargs = dict(device=self.args.device)
if self.deepspeed and data.dtype != torch.int64:
# NLP models inputs are int64 and those get adjusted to the right dtype of the
# embedding. Other models such as wav2vec2's inputs are already float and thus
# may need special handling to match the dtypes of the model
kwargs.update(dict(dtype=self.args.hf_deepspeed_config.dtype()))
return data.to(**kwargs)
return data
def _prepare_inputs(self, inputs: Dict[str, Union[torch.Tensor, Any]]) -> Dict[str, Union[torch.Tensor, Any]]:
"""
Prepare `inputs` before feeding them to the model, converting them to tensors if they are not already and
handling potential state.
"""
inputs = self._prepare_input(inputs)
if len(inputs) == 0:
raise ValueError(
"The batch received was empty, your model won't be able to train on it. Double-check that your "
f"training dataset contains keys expected by the model: {','.join(self._signature_columns)}."
)
if self.args.past_index >= 0 and self._past is not None:
inputs["mems"] = self._past
return inputs
def compute_loss_context_manager(self):
"""
A helper wrapper to group together context managers.
"""
return self.autocast_smart_context_manager()
def autocast_smart_context_manager(self, cache_enabled: Optional[bool] = True):
"""
A helper wrapper that creates an appropriate context manager for `autocast` while feeding it the desired
arguments, depending on the situation.
"""
if self.use_cuda_amp or self.use_cpu_amp:
if is_torch_greater_or_equal_than_1_10:
ctx_manager = (
torch.cpu.amp.autocast(cache_enabled=cache_enabled, dtype=self.amp_dtype)
if self.use_cpu_amp
else torch.cuda.amp.autocast(cache_enabled=cache_enabled, dtype=self.amp_dtype)
)
else:
ctx_manager = torch.cuda.amp.autocast()
else:
ctx_manager = contextlib.nullcontext() if sys.version_info >= (3, 7) else contextlib.suppress()
return ctx_manager
def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor:
"""
Perform a training step on a batch of inputs.
Subclass and override to inject custom behavior.
Args:
model (`nn.Module`):
The model to train.
inputs (`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument `labels`. Check your model's documentation for all accepted arguments.
Return:
`torch.Tensor`: The tensor with training loss on this batch.
"""
model.train()
inputs = self._prepare_inputs(inputs)
if is_sagemaker_mp_enabled():
loss_mb = smp_forward_backward(model, inputs, self.args.gradient_accumulation_steps)
return loss_mb.reduce_mean().detach().to(self.args.device)
with self.compute_loss_context_manager():
loss = self.compute_loss(model, inputs)
if self.args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if self.args.gradient_accumulation_steps > 1 and not self.deepspeed:
# deepspeed handles loss scaling by gradient_accumulation_steps in its `backward`
loss = loss / self.args.gradient_accumulation_steps
if self.do_grad_scaling:
self.scaler.scale(loss).backward()
elif self.use_apex:
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
# loss gets scaled under gradient_accumulation_steps in deepspeed
loss = self.deepspeed.backward(loss)
else:
loss.backward()
return loss.detach()
def compute_loss(self, model, inputs, return_outputs=False):
"""
How the loss is computed by Trainer. By default, all models return the loss in the first element.
Subclass and override for custom behavior.
"""
if self.label_smoother is not None and "labels" in inputs:
labels = inputs.pop("labels")
else:
labels = None
outputs = model(**inputs)
# Save past state if it exists
# TODO: this needs to be fixed and made cleaner later.
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index]
if labels is not None:
if unwrap_model(model)._get_name() in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES.values():
loss = self.label_smoother(outputs, labels, shift_labels=True)
else:
loss = self.label_smoother(outputs, labels)
else:
if isinstance(outputs, dict) and "loss" not in outputs:
raise ValueError(
"The model did not return a loss from the inputs, only the following keys: "
f"{','.join(outputs.keys())}. For reference, the inputs it received are {','.join(inputs.keys())}."
)
# We don't use .loss here since the model may return tuples instead of ModelOutput.
loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0]
return (loss, outputs) if return_outputs else loss
def is_local_process_zero(self) -> bool:
"""
Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several
machines) main process.
"""
return self.args.local_process_index == 0
def is_world_process_zero(self) -> bool:
"""
Whether or not this process is the global main process (when training in a distributed fashion on several
machines, this is only going to be `True` for one process).
"""
# Special case for SageMaker ModelParallel since there process_index is dp_process_index, not the global
# process index.
if is_sagemaker_mp_enabled():
return smp.rank() == 0
else:
return self.args.process_index == 0
def save_model(self, output_dir: Optional[str] = None, _internal_call: bool = False):
"""
Will save the model, so you can reload it using `from_pretrained()`.
Will only save from the main process.
"""
if output_dir is None:
output_dir = self.args.output_dir
if is_torch_tpu_available():
self._save_tpu(output_dir)
elif is_sagemaker_mp_enabled():
# Calling the state_dict needs to be done on the wrapped model and on all processes.
os.makedirs(output_dir, exist_ok=True)
state_dict = self.model_wrapped.state_dict()
if self.args.should_save:
self._save(output_dir, state_dict=state_dict)
if IS_SAGEMAKER_MP_POST_1_10:
# 'user_content.pt' indicates model state_dict saved with smp >= 1.10
Path(os.path.join(output_dir, "user_content.pt")).touch()
elif (
ShardedDDPOption.ZERO_DP_2 in self.args.sharded_ddp
or ShardedDDPOption.ZERO_DP_3 in self.args.sharded_ddp
or self.fsdp is not None
):
state_dict = self.model.state_dict()
if self.args.should_save:
self._save(output_dir, state_dict=state_dict)
elif self.deepspeed:
# this takes care of everything as long as we aren't under zero3
if self.args.should_save:
self._save(output_dir)
if is_deepspeed_zero3_enabled():
# It's too complicated to try to override different places where the weights dump gets
# saved, so since under zero3 the file is bogus, simply delete it. The user should
# either user deepspeed checkpoint to resume or to recover full weights use
# zero_to_fp32.py stored in the checkpoint.
if self.args.should_save:
file = os.path.join(output_dir, WEIGHTS_NAME)
if os.path.isfile(file):
# logger.info(f"deepspeed zero3: removing {file}, see zero_to_fp32.py to recover weights")
os.remove(file)
# now save the real model if stage3_gather_16bit_weights_on_model_save=True
# if false it will not be saved.
# This must be called on all ranks
if not self.deepspeed.save_16bit_model(output_dir, WEIGHTS_NAME):
logger.warning(
"deepspeed.save_16bit_model didn't save the model, since"
" stage3_gather_16bit_weights_on_model_save=false. Saving the full checkpoint instead, use"
" zero_to_fp32.py to recover weights"
)
self.deepspeed.save_checkpoint(output_dir)
elif self.args.should_save:
self._save(output_dir)
# Push to the Hub when `save_model` is called by the user.
if self.args.push_to_hub and not _internal_call:
self.push_to_hub(commit_message="Model save")
def _save_tpu(self, output_dir: Optional[str] = None):
output_dir = output_dir if output_dir is not None else self.args.output_dir
logger.info(f"Saving model checkpoint to {output_dir}")
if xm.is_master_ordinal():
os.makedirs(output_dir, exist_ok=True)
torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME))
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
xm.rendezvous("saving_checkpoint")
if not isinstance(self.model, PreTrainedModel):
if isinstance(unwrap_model(self.model), PreTrainedModel):
unwrap_model(self.model).save_pretrained(
output_dir,
is_main_process=self.args.should_save,
state_dict=self.model.state_dict(),
save_function=xm.save,
)
else:
logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.")
state_dict = self.model.state_dict()
xm.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))
else:
self.model.save_pretrained(output_dir, is_main_process=self.args.should_save, save_function=xm.save)
if self.tokenizer is not None and self.args.should_save:
self.tokenizer.save_pretrained(output_dir)
def _save(self, output_dir: Optional[str] = None, state_dict=None):
# If we are executing this function, we are the process zero, so we don't check for that.
output_dir = output_dir if output_dir is not None else self.args.output_dir
os.makedirs(output_dir, exist_ok=True)
logger.info(f"Saving model checkpoint to {output_dir}")
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
if not isinstance(self.model, PreTrainedModel):
if isinstance(unwrap_model(self.model), PreTrainedModel):
if state_dict is None:
state_dict = self.model.state_dict()
unwrap_model(self.model).save_pretrained(output_dir, state_dict=state_dict)
else:
logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.")
if state_dict is None:
state_dict = self.model.state_dict()
torch.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))
else:
self.model.save_pretrained(output_dir, state_dict=state_dict)
if self.tokenizer is not None:
self.tokenizer.save_pretrained(output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME))
def store_flos(self):
# Storing the number of floating-point operations that went into the model
if self.args.local_rank != -1:
self.state.total_flos += (
distributed_broadcast_scalars([self.current_flos], device=self.args.device).sum().item()
)
self.current_flos = 0
else:
self.state.total_flos += self.current_flos
self.current_flos = 0
def _sorted_checkpoints(
self, output_dir=None, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, use_mtime=False
) -> List[str]:
ordering_and_checkpoint_path = []
glob_checkpoints = [str(x) for x in Path(output_dir).glob(f"{checkpoint_prefix}-*") if os.path.isdir(x)]
for path in glob_checkpoints:
if use_mtime:
ordering_and_checkpoint_path.append((os.path.getmtime(path), path))
else:
regex_match = re.match(f".*{checkpoint_prefix}-([0-9]+)", path)
if regex_match is not None and regex_match.groups() is not None:
ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))
checkpoints_sorted = sorted(ordering_and_checkpoint_path)
checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
# Make sure we don't delete the best model.
if self.state.best_model_checkpoint is not None:
best_model_index = checkpoints_sorted.index(str(Path(self.state.best_model_checkpoint)))
for i in range(best_model_index, len(checkpoints_sorted) - 2):
checkpoints_sorted[i], checkpoints_sorted[i + 1] = checkpoints_sorted[i + 1], checkpoints_sorted[i]
return checkpoints_sorted
def _rotate_checkpoints(self, use_mtime=False, output_dir=None) -> None:
if self.args.save_total_limit is None or self.args.save_total_limit <= 0:
return
# Check if we should delete older checkpoint(s)
checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime, output_dir=output_dir)
if len(checkpoints_sorted) <= self.args.save_total_limit:
return
# If save_total_limit=1 with load_best_model_at_end=True, we could end up deleting the last checkpoint, which
# we don't do to allow resuming.
save_total_limit = self.args.save_total_limit
if (
self.state.best_model_checkpoint is not None
and self.args.save_total_limit == 1
and checkpoints_sorted[-1] != self.state.best_model_checkpoint
):
save_total_limit = 2
number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - save_total_limit)
checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
for checkpoint in checkpoints_to_be_deleted:
logger.info(f"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit")
shutil.rmtree(checkpoint, ignore_errors=True)
def evaluate(
self,
eval_dataset: Optional[Dataset] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> Dict[str, float]:
"""
Run evaluation and returns metrics.
The calling script will be responsible for providing a method to compute metrics, as they are task-dependent
(pass it to the init `compute_metrics` argument).
You can also subclass and override this method to inject custom behavior.
Args:
eval_dataset (`Dataset`, *optional*):
Pass a dataset if you wish to override `self.eval_dataset`. If it is a [`~datasets.Dataset`], columns
not accepted by the `model.forward()` method are automatically removed. It must implement the `__len__`
method.
ignore_keys (`Lst[str]`, *optional*):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
metric_key_prefix (`str`, *optional*, defaults to `"eval"`):
An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
"eval_bleu" if the prefix is "eval" (default)
Returns:
A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The
dictionary also contains the epoch number which comes from the training state.
"""
# memory metrics - must set up as early as possible
self._memory_tracker.start()
eval_dataloader = self.get_eval_dataloader(eval_dataset)
start_time = time.time()
eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
output = eval_loop(
eval_dataloader,
description="Evaluation",
# No point gathering the predictions if there are no metrics, otherwise we defer to
# self.args.prediction_loss_only
prediction_loss_only=True if self.compute_metrics is None else None,
ignore_keys=ignore_keys,
metric_key_prefix=metric_key_prefix,
)
total_batch_size = self.args.eval_batch_size * self.args.world_size
if f"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
metric_key_prefix,
start_time,
num_samples=output.num_samples,
num_steps=math.ceil(output.num_samples / total_batch_size),
)
)
self.log(output.metrics)
if DebugOption.TPU_METRICS_DEBUG in self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, output.metrics)
self._memory_tracker.stop_and_update_metrics(output.metrics)
return output.metrics
def predict(
self, test_dataset: Dataset, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = "test"
) -> PredictionOutput:
"""
Run prediction and returns predictions and potential metrics.
Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method
will also return metrics, like in `evaluate()`.
Args:
test_dataset (`Dataset`):
Dataset to run the predictions on. If it is an `datasets.Dataset`, columns not accepted by the
`model.forward()` method are automatically removed. Has to implement the method `__len__`
ignore_keys (`Lst[str]`, *optional*):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
metric_key_prefix (`str`, *optional*, defaults to `"test"`):
An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
"test_bleu" if the prefix is "test" (default)
<Tip>
If your predictions or labels have different sequence length (for instance because you're doing dynamic padding
in a token classification task) the predictions will be padded (on the right) to allow for concatenation into
one array. The padding index is -100.
</Tip>
Returns: *NamedTuple* A namedtuple with the following keys:
- predictions (`np.ndarray`): The predictions on `test_dataset`.
- label_ids (`np.ndarray`, *optional*): The labels (if the dataset contained some).
- metrics (`Dict[str, float]`, *optional*): The potential dictionary of metrics (if the dataset contained
labels).
"""
# memory metrics - must set up as early as possible
self._memory_tracker.start()
test_dataloader = self.get_test_dataloader(test_dataset)
start_time = time.time()
eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
output = eval_loop(
test_dataloader, description="Prediction", ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix
)
total_batch_size = self.args.eval_batch_size * self.args.world_size
if f"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
metric_key_prefix,
start_time,
num_samples=output.num_samples,
num_steps=math.ceil(output.num_samples / total_batch_size),
)
)
self.control = self.callback_handler.on_predict(self.args, self.state, self.control, output.metrics)
self._memory_tracker.stop_and_update_metrics(output.metrics)
return PredictionOutput(predictions=output.predictions, label_ids=output.label_ids, metrics=output.metrics)
def evaluation_loop(
self,
dataloader: DataLoader,
description: str,
prediction_loss_only: Optional[bool] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> EvalLoopOutput:
"""
Prediction/evaluation loop, shared by `Trainer.evaluate()` and `Trainer.predict()`.
Works both with or without labels.
"""
args = self.args
prediction_loss_only = prediction_loss_only if prediction_loss_only is not None else args.prediction_loss_only
# if eval is called w/o train init deepspeed here
if args.deepspeed and not self.deepspeed:
# XXX: eval doesn't have `resume_from_checkpoint` arg but we should be able to do eval
# from the checkpoint eventually
deepspeed_engine, _, _ = deepspeed_init(
self, num_training_steps=0, resume_from_checkpoint=None, inference=True
)
self.model = deepspeed_engine.module
self.model_wrapped = deepspeed_engine
self.deepspeed = deepspeed_engine
model = self._wrap_model(self.model, training=False, dataloader=dataloader)
# if full fp16 or bf16 eval is wanted and this ``evaluation`` or ``predict`` isn't called
# while ``train`` is running, cast it to the right dtype first and then put on device
if not self.is_in_train:
if args.fp16_full_eval:
model = model.to(dtype=torch.float16, device=args.device)
elif args.bf16_full_eval:
model = model.to(dtype=torch.bfloat16, device=args.device)
batch_size = self.args.eval_batch_size
logger.info(f"***** Running {description} *****")
if has_length(dataloader):
logger.info(f" Num examples = {self.num_examples(dataloader)}")
else:
logger.info(" Num examples: Unknown")
logger.info(f" Batch size = {batch_size}")
model.eval()
self.callback_handler.eval_dataloader = dataloader
# Do this before wrapping.
eval_dataset = getattr(dataloader, "dataset", None)
if is_torch_tpu_available():
dataloader = pl.ParallelLoader(dataloader, [args.device]).per_device_loader(args.device)
if args.past_index >= 0:
self._past = None
# Initialize containers
# losses/preds/labels on GPU/TPU (accumulated for eval_accumulation_steps)
losses_host = None
preds_host = None
labels_host = None
inputs_host = None
# losses/preds/labels on CPU (final containers)
all_losses = None
all_preds = None
all_labels = None
all_inputs = None
# Will be useful when we have an iterable dataset so don't know its length.
observed_num_examples = 0
# Main evaluation loop
for step, inputs in enumerate(dataloader):
# Update the observed num examples
observed_batch_size = find_batch_size(inputs)
if observed_batch_size is not None:
observed_num_examples += observed_batch_size
# For batch samplers, batch_size is not known by the dataloader in advance.
if batch_size is None:
batch_size = observed_batch_size
# Prediction step
loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys)
inputs_decode = self._prepare_input(inputs["input_ids"]) if args.include_inputs_for_metrics else None
if is_torch_tpu_available():
xm.mark_step()
# Update containers on host
if loss is not None:
losses = self._nested_gather(loss.repeat(batch_size))
losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0)
if labels is not None:
labels = self._pad_across_processes(labels)
labels = self._nested_gather(labels)
labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100)
if inputs_decode is not None:
inputs_decode = self._pad_across_processes(inputs_decode)
inputs_decode = self._nested_gather(inputs_decode)
inputs_host = (
inputs_decode
if inputs_host is None
else nested_concat(inputs_host, inputs_decode, padding_index=-100)
)
if logits is not None:
logits = self._pad_across_processes(logits)
logits = self._nested_gather(logits)
if self.preprocess_logits_for_metrics is not None:
logits = self.preprocess_logits_for_metrics(logits, labels)
preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100)
self.control = self.callback_handler.on_prediction_step(args, self.state, self.control)
# Gather all tensors and put them back on the CPU if we have done enough accumulation steps.
if args.eval_accumulation_steps is not None and (step + 1) % args.eval_accumulation_steps == 0:
if losses_host is not None:
losses = nested_numpify(losses_host)
all_losses = losses if all_losses is None else np.concatenate((all_losses, losses), axis=0)
if preds_host is not None:
logits = nested_numpify(preds_host)
all_preds = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if inputs_host is not None:
inputs_decode = nested_numpify(inputs_host)
all_inputs = (
inputs_decode
if all_inputs is None
else nested_concat(all_inputs, inputs_decode, padding_index=-100)
)
if labels_host is not None:
labels = nested_numpify(labels_host)
all_labels = (
labels if all_labels is None else nested_concat(all_labels, labels, padding_index=-100)
)
# Set back to None to begin a new accumulation
losses_host, preds_host, inputs_host, labels_host = None, None, None, None
if args.past_index and hasattr(self, "_past"):
# Clean the state at the end of the evaluation loop
delattr(self, "_past")
# Gather all remaining tensors and put them back on the CPU
if losses_host is not None:
losses = nested_numpify(losses_host)
all_losses = losses if all_losses is None else np.concatenate((all_losses, losses), axis=0)
if preds_host is not None:
logits = nested_numpify(preds_host)
all_preds = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if inputs_host is not None:
inputs_decode = nested_numpify(inputs_host)
all_inputs = (
inputs_decode if all_inputs is None else nested_concat(all_inputs, inputs_decode, padding_index=-100)
)
if labels_host is not None:
labels = nested_numpify(labels_host)
all_labels = labels if all_labels is None else nested_concat(all_labels, labels, padding_index=-100)
# Number of samples
if has_length(eval_dataset):
num_samples = len(eval_dataset)
# The instance check is weird and does not actually check for the type, but whether the dataset has the right
# methods. Therefore we need to make sure it also has the attribute.
elif isinstance(eval_dataset, IterableDatasetShard) and getattr(eval_dataset, "num_examples", 0) > 0:
num_samples = eval_dataset.num_examples
else:
if has_length(dataloader):
num_samples = self.num_examples(dataloader)
else: # both len(dataloader.dataset) and len(dataloader) fail
num_samples = observed_num_examples
if num_samples == 0 and observed_num_examples > 0:
num_samples = observed_num_examples
# Number of losses has been rounded to a multiple of batch_size and in a distributed training, the number of
# samplers has been rounded to a multiple of batch_size, so we truncate.
if all_losses is not None:
all_losses = all_losses[:num_samples]
if all_preds is not None:
all_preds = nested_truncate(all_preds, num_samples)
if all_labels is not None:
all_labels = nested_truncate(all_labels, num_samples)
if all_inputs is not None:
all_inputs = nested_truncate(all_inputs, num_samples)
# Metrics!
if self.compute_metrics is not None and all_preds is not None and all_labels is not None:
if args.include_inputs_for_metrics:
metrics = self.compute_metrics(
EvalPrediction(predictions=all_preds, label_ids=all_labels, inputs=all_inputs)
)
else:
metrics = self.compute_metrics(EvalPrediction(predictions=all_preds, label_ids=all_labels))
else:
metrics = {}
# To be JSON-serializable, we need to remove numpy types or zero-d tensors
metrics = denumpify_detensorize(metrics)
if all_losses is not None:
metrics[f"{metric_key_prefix}_loss"] = all_losses.mean().item()
if hasattr(self, "jit_compilation_time"):
metrics[f"{metric_key_prefix}_jit_compilation_time"] = self.jit_compilation_time
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"{metric_key_prefix}_"):
metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key)
return EvalLoopOutput(predictions=all_preds, label_ids=all_labels, metrics=metrics, num_samples=num_samples)
def _nested_gather(self, tensors, name=None):
"""
Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before
concatenating them to `gathered`
"""
if tensors is None:
return
if is_torch_tpu_available():
if name is None:
name = "nested_gather"
tensors = nested_xla_mesh_reduce(tensors, name)
elif is_sagemaker_mp_enabled():
tensors = smp_gather(tensors)
elif self.args.local_rank != -1:
tensors = distributed_concat(tensors)
return tensors
# Copied from Accelerate.
def _pad_across_processes(self, tensor, pad_index=-100):
"""
Recursively pad the tensors in a nested list/tuple/dictionary of tensors from all devices to the same size so
they can safely be gathered.
"""
if isinstance(tensor, (list, tuple)):
return type(tensor)(self._pad_across_processes(t, pad_index=pad_index) for t in tensor)
elif isinstance(tensor, dict):
return type(tensor)({k: self._pad_across_processes(v, pad_index=pad_index) for k, v in tensor.items()})
elif not isinstance(tensor, torch.Tensor):
raise TypeError(
f"Can't pad the values of type {type(tensor)}, only of nested list/tuple/dicts of tensors."
)
if len(tensor.shape) < 2:
return tensor
# Gather all sizes
size = torch.tensor(tensor.shape, device=tensor.device)[None]
sizes = self._nested_gather(size).cpu()
max_size = max(s[1] for s in sizes)
# When extracting XLA graphs for compilation, max_size is 0,
# so use inequality to avoid errors.
if tensor.shape[1] >= max_size:
return tensor
# Then pad to the maximum size
old_size = tensor.shape
new_size = list(old_size)
new_size[1] = max_size
new_tensor = tensor.new_zeros(tuple(new_size)) + pad_index
new_tensor[:, : old_size[1]] = tensor
return new_tensor
def prediction_step(
self,
model: nn.Module,
inputs: Dict[str, Union[torch.Tensor, Any]],
prediction_loss_only: bool,
ignore_keys: Optional[List[str]] = None,
) -> Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]:
"""
Perform an evaluation step on `model` using `inputs`.
Subclass and override to inject custom behavior.
Args:
model (`nn.Module`):
The model to evaluate.
inputs (`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument `labels`. Check your model's documentation for all accepted arguments.
prediction_loss_only (`bool`):
Whether or not to return the loss only.
ignore_keys (`Lst[str]`, *optional*):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
Return:
Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss,
logits and labels (each being optional).
"""
has_labels = False if len(self.label_names) == 0 else all(inputs.get(k) is not None for k in self.label_names)
# For CLIP-like models capable of returning loss values.
# If `return_loss` is not specified or being `None` in `inputs`, we check if the default value of `return_loss`
# is `True` in `model.forward`.
return_loss = inputs.get("return_loss", None)
if return_loss is None:
return_loss = self.can_return_loss
loss_without_labels = True if len(self.label_names) == 0 and return_loss else False
inputs = self._prepare_inputs(inputs)
if ignore_keys is None:
if hasattr(self.model, "config"):
ignore_keys = getattr(self.model.config, "keys_to_ignore_at_inference", [])
else:
ignore_keys = []
# labels may be popped when computing the loss (label smoothing for instance) so we grab them first.
if has_labels or loss_without_labels:
labels = nested_detach(tuple(inputs.get(name) for name in self.label_names))
if len(labels) == 1:
labels = labels[0]
else:
labels = None
with torch.no_grad():
if is_sagemaker_mp_enabled():
raw_outputs = smp_forward_only(model, inputs)
if has_labels or loss_without_labels:
if isinstance(raw_outputs, dict):
loss_mb = raw_outputs["loss"]
logits_mb = tuple(v for k, v in raw_outputs.items() if k not in ignore_keys + ["loss"])
else:
loss_mb = raw_outputs[0]
logits_mb = raw_outputs[1:]
loss = loss_mb.reduce_mean().detach().cpu()
logits = smp_nested_concat(logits_mb)
else:
loss = None
if isinstance(raw_outputs, dict):
logits_mb = tuple(v for k, v in raw_outputs.items() if k not in ignore_keys)
else:
logits_mb = raw_outputs
logits = smp_nested_concat(logits_mb)
else:
if has_labels or loss_without_labels:
with self.compute_loss_context_manager():
loss, outputs = self.compute_loss(model, inputs, return_outputs=True)
loss = loss.mean().detach()
if isinstance(outputs, dict):
logits = tuple(v for k, v in outputs.items() if k not in ignore_keys + ["loss"])
else:
logits = outputs[1:]
else:
loss = None
with self.compute_loss_context_manager():
outputs = model(**inputs)
if isinstance(outputs, dict):
logits = tuple(v for k, v in outputs.items() if k not in ignore_keys)
else:
logits = outputs
# TODO: this needs to be fixed and made cleaner later.
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index - 1]
if prediction_loss_only:
return (loss, None, None)
logits = nested_detach(logits)
if len(logits) == 1:
logits = logits[0]
return (loss, logits, labels)
def floating_point_ops(self, inputs: Dict[str, Union[torch.Tensor, Any]]):
"""
For models that inherit from [`PreTrainedModel`], uses that method to compute the number of floating point
operations for every backward + forward pass. If using another model, either implement such a method in the
model or subclass and override this method.
Args:
inputs (`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
Returns:
`int`: The number of floating-point operations.
"""
if hasattr(self.model, "floating_point_ops"):
return self.model.floating_point_ops(inputs)
else:
return 0
def init_git_repo(self, at_init: bool = False):
"""
Initializes a git repo in `self.args.hub_model_id`.
Args:
at_init (`bool`, *optional*, defaults to `False`):
Whether this function is called before any training or not. If `self.args.overwrite_output_dir` is
`True` and `at_init` is `True`, the path to the repo (which is `self.args.output_dir`) might be wiped
out.
"""
if not self.is_world_process_zero():
return
if self.args.hub_model_id is None:
repo_name = Path(self.args.output_dir).absolute().name
else:
repo_name = self.args.hub_model_id
if "/" not in repo_name:
repo_name = get_full_repo_name(repo_name, token=self.args.hub_token)
# Make sure the repo exists.
create_repo(repo_name, token=self.args.hub_token, private=self.args.hub_private_repo, exist_ok=True)
try:
self.repo = Repository(self.args.output_dir, clone_from=repo_name, token=self.args.hub_token)
except EnvironmentError:
if self.args.overwrite_output_dir and at_init:
# Try again after wiping output_dir
shutil.rmtree(self.args.output_dir)
self.repo = Repository(self.args.output_dir, clone_from=repo_name, token=self.args.hub_token)
else:
raise
self.repo.git_pull()
# By default, ignore the checkpoint folders
if (
not os.path.exists(os.path.join(self.args.output_dir, ".gitignore"))
and self.args.hub_strategy != HubStrategy.ALL_CHECKPOINTS
):
with open(os.path.join(self.args.output_dir, ".gitignore"), "w", encoding="utf-8") as writer:
writer.writelines(["checkpoint-*/"])
self.push_in_progress = None
def create_model_card(
self,
language: Optional[str] = None,
license: Optional[str] = None,
tags: Union[str, List[str], None] = None,
model_name: Optional[str] = None,
finetuned_from: Optional[str] = None,
tasks: Union[str, List[str], None] = None,
dataset_tags: Union[str, List[str], None] = None,
dataset: Union[str, List[str], None] = None,
dataset_args: Union[str, List[str], None] = None,
):
"""
Creates a draft of a model card using the information available to the `Trainer`.
Args:
language (`str`, *optional*):
The language of the model (if applicable)
license (`str`, *optional*):
The license of the model. Will default to the license of the pretrained model used, if the original
model given to the `Trainer` comes from a repo on the Hub.
tags (`str` or `List[str]`, *optional*):
Some tags to be included in the metadata of the model card.
model_name (`str`, *optional*):
The name of the model.
finetuned_from (`str`, *optional*):
The name of the model used to fine-tune this one (if applicable). Will default to the name of the repo
of the original model given to the `Trainer` (if it comes from the Hub).
tasks (`str` or `List[str]`, *optional*):
One or several task identifiers, to be included in the metadata of the model card.
dataset_tags (`str` or `List[str]`, *optional*):
One or several dataset tags, to be included in the metadata of the model card.
dataset (`str` or `List[str]`, *optional*):
One or several dataset identifiers, to be included in the metadata of the model card.
dataset_args (`str` or `List[str]`, *optional*):
One or several dataset arguments, to be included in the metadata of the model card.
"""
if not self.is_world_process_zero():
return
training_summary = TrainingSummary.from_trainer(
self,
language=language,
license=license,
tags=tags,
model_name=model_name,
finetuned_from=finetuned_from,
tasks=tasks,
dataset_tags=dataset_tags,
dataset=dataset,
dataset_args=dataset_args,
)
model_card = training_summary.to_model_card()
with open(os.path.join(self.args.output_dir, "README.md"), "w") as f:
f.write(model_card)
def _push_from_checkpoint(self, checkpoint_folder):
# Only push from one node.
if not self.is_world_process_zero() or self.args.hub_strategy == HubStrategy.END:
return
# If we haven't finished the last push, we don't do this one.
if self.push_in_progress is not None and not self.push_in_progress.is_done:
return
output_dir = self.args.output_dir
# To avoid a new synchronization of all model weights, we just copy the file from the checkpoint folder
modeling_files = [CONFIG_NAME, WEIGHTS_NAME]
for modeling_file in modeling_files:
if os.path.isfile(os.path.join(checkpoint_folder, modeling_file)):
shutil.copy(os.path.join(checkpoint_folder, modeling_file), os.path.join(output_dir, modeling_file))
# Saving the tokenizer is fast and we don't know how many files it may have spawned, so we resave it to be sure.
if self.tokenizer is not None:
self.tokenizer.save_pretrained(output_dir)
# Same for the training arguments
torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME))
try:
if self.args.hub_strategy == HubStrategy.CHECKPOINT:
# Temporarily move the checkpoint just saved for the push
tmp_checkpoint = os.path.join(output_dir, "last-checkpoint")
# We have to remove the "last-checkpoint" dir if it exists, otherwise the checkpoint is moved as a
# subfolder.
if os.path.isdir(tmp_checkpoint):
shutil.rmtree(tmp_checkpoint)
shutil.move(checkpoint_folder, tmp_checkpoint)
if self.args.save_strategy == IntervalStrategy.STEPS:
commit_message = f"Training in progress, step {self.state.global_step}"
else:
commit_message = f"Training in progress, epoch {int(self.state.epoch)}"
_, self.push_in_progress = self.repo.push_to_hub(
commit_message=commit_message, blocking=False, auto_lfs_prune=True
)
finally:
if self.args.hub_strategy == HubStrategy.CHECKPOINT:
# Move back the checkpoint to its place
shutil.move(tmp_checkpoint, checkpoint_folder)
def push_to_hub(self, commit_message: Optional[str] = "End of training", blocking: bool = True, **kwargs) -> str:
"""
Upload *self.model* and *self.tokenizer* to the 🤗 model hub on the repo *self.args.hub_model_id*.
Parameters:
commit_message (`str`, *optional*, defaults to `"End of training"`):
Message to commit while pushing.
blocking (`bool`, *optional*, defaults to `True`):
Whether the function should return only when the `git push` has finished.
kwargs:
Additional keyword arguments passed along to [`~Trainer.create_model_card`].
Returns:
The url of the commit of your model in the given repository if `blocking=False`, a tuple with the url of
the commit and an object to track the progress of the commit if `blocking=True`
"""
# If a user calls manually `push_to_hub` with `self.args.push_to_hub = False`, we try to create the repo but
# it might fail.
if not hasattr(self, "repo"):
self.init_git_repo()
model_name = kwargs.pop("model_name", None)
if model_name is None and self.args.should_save:
if self.args.hub_model_id is None:
model_name = Path(self.args.output_dir).name
else:
model_name = self.args.hub_model_id.split("/")[-1]
# Needs to be executed on all processes for TPU training, but will only save on the processed determined by
# self.args.should_save.
self.save_model(_internal_call=True)
# Only push from one node.
if not self.is_world_process_zero():
return
# Cancel any async push in progress if blocking=True. The commits will all be pushed together.
if blocking and self.push_in_progress is not None and not self.push_in_progress.is_done:
self.push_in_progress._process.kill()
self.push_in_progress = None
git_head_commit_url = self.repo.push_to_hub(
commit_message=commit_message, blocking=blocking, auto_lfs_prune=True
)
# push separately the model card to be independant from the rest of the model
if self.args.should_save:
self.create_model_card(model_name=model_name, **kwargs)
try:
self.repo.push_to_hub(
commit_message="update model card README.md", blocking=blocking, auto_lfs_prune=True
)
except EnvironmentError as exc:
logger.error(f"Error pushing update to the model card. Please read logs and retry.\n${exc}")
return git_head_commit_url
#
# Deprecated code
#
def prediction_loop(
self,
dataloader: DataLoader,
description: str,
prediction_loss_only: Optional[bool] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> EvalLoopOutput:
"""
Prediction/evaluation loop, shared by `Trainer.evaluate()` and `Trainer.predict()`.
Works both with or without labels.
"""
args = self.args
if not has_length(dataloader):
raise ValueError("dataloader must implement a working __len__")
prediction_loss_only = prediction_loss_only if prediction_loss_only is not None else args.prediction_loss_only
# if eval is called w/o train init deepspeed here
if args.deepspeed and not self.deepspeed:
# XXX: eval doesn't have `resume_from_checkpoint` arg but we should be able to do eval
# from the checkpoint eventually
deepspeed_engine, _, _ = deepspeed_init(self, num_training_steps=0, resume_from_checkpoint=None)
self.model = deepspeed_engine.module
self.model_wrapped = deepspeed_engine
self.deepspeed = deepspeed_engine
# XXX: we don't need optim/sched for inference, but this needs to be sorted out, since
# for example the Z3-optimizer is a must for zero3 to work even for inference - what we
# don't need is the deepspeed basic optimizer which is self.optimizer.optimizer
deepspeed_engine.optimizer.optimizer = None
deepspeed_engine.lr_scheduler = None
model = self._wrap_model(self.model, training=False, dataloader=dataloader)
# if full fp16 or bf16 eval is wanted and this ``evaluation`` or ``predict`` isn't called
# while ``train`` is running, cast it to the right dtype first and then put on device
if not self.is_in_train:
if args.fp16_full_eval:
model = model.to(dtype=torch.float16, device=args.device)
elif args.bf16_full_eval:
model = model.to(dtype=torch.bfloat16, device=args.device)
batch_size = dataloader.batch_size
num_examples = self.num_examples(dataloader)
logger.info(f"***** Running {description} *****")
logger.info(f" Num examples = {num_examples}")
logger.info(f" Batch size = {batch_size}")
losses_host: torch.Tensor = None
preds_host: Union[torch.Tensor, List[torch.Tensor]] = None
labels_host: Union[torch.Tensor, List[torch.Tensor]] = None
inputs_host: Union[torch.Tensor, List[torch.Tensor]] = None
world_size = max(1, args.world_size)
eval_losses_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=batch_size)
if not prediction_loss_only:
# The actual number of eval_sample can be greater than num_examples in distributed settings (when we pass
# a batch size to the sampler)
make_multiple_of = None
if hasattr(dataloader, "sampler") and isinstance(dataloader.sampler, SequentialDistributedSampler):
make_multiple_of = dataloader.sampler.batch_size
preds_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of)
labels_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of)
inputs_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of)
model.eval()
if is_torch_tpu_available():
dataloader = pl.ParallelLoader(dataloader, [args.device]).per_device_loader(args.device)
if args.past_index >= 0:
self._past = None
self.callback_handler.eval_dataloader = dataloader
for step, inputs in enumerate(dataloader):
loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys)
inputs_decode = self._prepare_input(inputs["input_ids"]) if args.include_inputs_for_metrics else None
if loss is not None:
losses = loss.repeat(batch_size)
losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0)
if logits is not None:
preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100)
if labels is not None:
labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100)
if inputs_decode is not None:
inputs_host = (
inputs_decode
if inputs_host is None
else nested_concat(inputs_host, inputs_decode, padding_index=-100)
)
self.control = self.callback_handler.on_prediction_step(args, self.state, self.control)
# Gather all tensors and put them back on the CPU if we have done enough accumulation steps.
if args.eval_accumulation_steps is not None and (step + 1) % args.eval_accumulation_steps == 0:
eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses"))
if not prediction_loss_only:
preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds"))
labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids"))
inputs_gatherer.add_arrays(self._gather_and_numpify(inputs_host, "eval_inputs_ids"))
# Set back to None to begin a new accumulation
losses_host, preds_host, labels_host, inputs_host = None, None, None, None
if args.past_index and hasattr(self, "_past"):
# Clean the state at the end of the evaluation loop
delattr(self, "_past")
# Gather all remaining tensors and put them back on the CPU
eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses"))
if not prediction_loss_only:
preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds"))
labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids"))
inputs_gatherer.add_arrays(self._gather_and_numpify(inputs_host, "eval_inputs_ids"))
eval_loss = eval_losses_gatherer.finalize()
preds = preds_gatherer.finalize() if not prediction_loss_only else None
label_ids = labels_gatherer.finalize() if not prediction_loss_only else None
inputs_ids = inputs_gatherer.finalize() if not prediction_loss_only else None
if self.compute_metrics is not None and preds is not None and label_ids is not None:
if args.include_inputs_for_metrics:
metrics = self.compute_metrics(
EvalPrediction(predictions=preds, label_ids=label_ids, inputs=inputs_ids)
)
else:
metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
else:
metrics = {}
# To be JSON-serializable, we need to remove numpy types or zero-d tensors
metrics = denumpify_detensorize(metrics)
if eval_loss is not None:
metrics[f"{metric_key_prefix}_loss"] = eval_loss.mean().item()
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"{metric_key_prefix}_"):
metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key)
return EvalLoopOutput(predictions=preds, label_ids=label_ids, metrics=metrics, num_samples=num_examples)
def _gather_and_numpify(self, tensors, name):
"""
Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before
concatenating them to `gathered`
"""
if tensors is None:
return
if is_torch_tpu_available():
tensors = nested_xla_mesh_reduce(tensors, name)
elif is_sagemaker_mp_enabled():
tensors = smp_gather(tensors)
elif self.args.local_rank != -1:
tensors = distributed_concat(tensors)
return nested_numpify(tensors)
|
PypiClean
|
/arm-avhclient-0.1.10.tar.gz/arm-avhclient-0.1.10/arm/avhclient/aws_backend.py
|
import logging
import os
import time
import subprocess
from pathlib import Path
from tempfile import NamedTemporaryFile
from typing import List, Union
import boto3
from botocore.exceptions import ClientError
from botocore.exceptions import WaiterError
from semantic_version import Version, SimpleSpec
from .avh_backend import AvhBackend, AvhBackendState
class AwsBackend(AvhBackend):
"""
AVH AWS Backend
This backend runs in your Amazon account:
* Creates/starts/setup a [new] AVH EC2 instance.
* Run AVH-related commands.
* Get the outputs
* Terminates/Stops the AVH EC2 instance.
The AWS credentials key is expected as envs. See _is_aws_credentials_present method.
Some AWS-related info is expected as envs. See _setup.
"""
AMI_WORKDIR = '/home/ubuntu'
@staticmethod
def name() -> str:
return "aws"
@staticmethod
def priority() -> int:
return 10
@property
def ami_id(self) -> str:
"""Amazon Machine Image ID (AWS_AMI_ID)."""
return self._ami_id or os.environ.get('AWS_AMI_ID', '')
@ami_id.setter
def ami_id(self, value: str):
self._ami_id = value
@property
def ami_version(self) -> str:
"""Amazon Machine Image version (AWS_AMI_VERSION). Must be a valid PEP-440 version specifier."""
return self._ami_version or os.environ.get('AWS_AMI_VERSION', '==*')
@ami_version.setter
def ami_version(self, value: str):
self._ami_version = value
@property
def default_region(self) -> str:
"AWS Default Region (AWS_DEFAULT_REGION)"
return self._default_region or os.environ.get('AWS_DEFAULT_REGION', 'eu-west-1')
@default_region.setter
def default_region(self, value: str):
self._default_region = value
@property
def efs_dns_name(self) -> str:
"""AWS EFS DNS Name e.g. fs-086c927c9d324a69f.efs.eu-west-1.amazonaws.com"""
return self._efs_dns_name or os.environ.get('AWS_EFS_DNS_NAME', '')
@efs_dns_name.setter
def efs_dns_name(self, value: str):
self._efs_dns_name = value
@property
def efs_packs_dir(self) -> str:
"""AWS EFS Packs Folder e.g. packs. Default: packs"""
return self._efs_packs_dir or os.environ.get('AWS_EFS_PACK_DIR', 'packs')
@efs_packs_dir.setter
def efs_packs_dir(self, value: str):
self._efs_packs_dir = value
@property
def iam_profile(self) -> str:
"""Amazon IAM profile (AWS_IAM_PROFILE)."""
return self._iam_profile or os.environ.get('AWS_IAM_PROFILE', '')
@iam_profile.setter
def iam_profile(self, value: str):
self._iam_profile = value
@property
def instance_name(self) -> str:
"""Amazon EC2 instance name (AWS_INSTANCE_NAME)."""
return self._instance_name or os.environ.get('AWS_INSTANCE_NAME', '')
@instance_name.setter
def instance_name(self, value: str):
self._instance_name = value
@property
def instance_id(self) -> str:
"""Amazon EC2 instance id (AWS_INSTANCE_ID)."""
return self._instance_id or os.environ.get('AWS_INSTANCE_ID', '')
@instance_id.setter
def instance_id(self, value: str):
self._instance_id = value
@property
def instance_type(self) -> str:
"""Amazon EC2 instance type (AWS_INSTANCE_TYPE)."""
return self._instance_type or os.environ.get('AWS_INSTANCE_TYPE', 'c5.large')
@instance_type.setter
def instance_type(self, value: str):
self._instance_type = value
@property
def key_name(self) -> str:
"""Amazon EC2 SSH key name (AWS_KEY_NAME)."""
return self._key_name or os.environ.get('AWS_KEY_NAME', '')
@key_name.setter
def key_name(self, value: str):
self._key_name = value
@property
def s3_bucket_name(self) -> str:
"""Amazon S3 bucket name (AWS_S3_BUCKET_NAME)."""
return self._s3_bucket_name or os.environ.get('AWS_S3_BUCKET_NAME', '')
@s3_bucket_name.setter
def s3_bucket_name(self, value: str):
self._s3_bucket_name = value
@property
def security_group_id(self) -> str:
"""Amazon EC2 security group id (AWS_SECURITY_GROUP_ID)."""
return self._security_group_id or os.environ.get('AWS_SECURITY_GROUP_ID', '')
@security_group_id.setter
def security_group_id(self, value: str):
self._security_group_id = value
@property
def subnet_id(self) -> str:
"""Amazon EC2 subnet id (AWS_SUBNET_ID)."""
return self._subnet_id or os.environ.get('AWS_SUBNET_ID', '')
@subnet_id.setter
def subnet_id(self, value: str):
self._subnet_id = value
@property
def keep_ec2_instance(self) -> bool:
"""Keep the EC2 instance running or terminate? (AWS_KEEP_EC2_INSTANCES)."""
return self._keep_ec2_instance or (os.environ.get('AWS_KEEP_EC2_INSTANCES', 'false').lower() == 'true')
@keep_ec2_instance.setter
def keep_ec2_instance(self, value: bool):
self._keep_ec2_instance = value
@property
def s3_keyprefix(self) -> bool:
"""Amazon S3 storage key prefix (AWS_S3_KEYPREFIX)."""
return self._s3_keyprefix or os.environ.get('AWS_S3_KEYPREFIX', 'ssm')
@s3_keyprefix.setter
def s3_keyprefix(self, value: bool):
self._s3_keyprefix = value
def __init__(self):
self._ami_id = None
self._ami_version = None
self._default_region = None
self._efs_dns_name = None
self._efs_packs_dir = None
self._iam_profile = None
self._instance_name = None
self._instance_id = None
self._instance_type = None
self._key_name = None
self._s3_bucket_name = None
self._security_group_id = None
self._subnet_id = None
self._keep_ec2_instance = None
self._s3_keyprefix = None
def __repr__(self):
return (
f"ami_id={self.ami_id},"
f"ami_version={self.ami_version},"
f"default_region={self.default_region},"
f"efs_dns_name={self.efs_dns_name},"
f"efs_packs_dir={self.efs_packs_dir},"
f"iam_profile={self.iam_profile},"
f"instance_name={self.instance_name},"
f"instance_id={self.instance_id},"
f"instance_type={self.instance_type},"
f"key_name={self.key_name},"
f"s3_bucket_name={self.s3_bucket_name},"
f"security_group_id={self.security_group_id},"
f"subnet_id={self.subnet_id},"
f"keep_ec2_instance={self.keep_ec2_instance}"
)
def _init(self):
self._init = lambda: None
self._is_aws_credentials_present()
logging.debug('aws:Creating EC2 client...')
self._ec2_client = boto3.client('ec2')
logging.debug('aws:Creating SSM client...')
self._ssm_client = boto3.client('ssm')
logging.debug('aws:Creating S3 client...')
self._s3_client = boto3.client('s3')
logging.debug('aws:Creating S3 resource...')
self._s3_resource = boto3.resource('s3')
self._setup()
@staticmethod
def _check_env(key) -> bool:
if key in os.environ:
logging.debug("aws:%s present!", key)
return True
logging.warning("aws:%s environment variable not present!", key)
return False
def _is_aws_credentials_present(self):
"""
Verifies presence of AWS Credentias as Environment Variables.
AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY are mandatory
AWS_SESSION_TOKEN is optional for IAM User credentials.
"""
self._check_env('AWS_ACCESS_KEY_ID')
self._check_env('AWS_SECRET_ACCESS_KEY')
self._check_env('AWS_DEFAULT_REGION')
if not self._check_env('AWS_SESSION_TOKEN'):
logging.debug('aws:It is expected for an IAM User')
def _get_efs_packs_user_data(self) -> str:
"""
Return the user data to mount the EFS packs in the EC2 instace
This is run in the EC2 cloud-init phase.
"""
return ("#cloud-config\n"
"package_update: false\n"
"package_upgrade: false\n"
"runcmd:\n"
"- ubuntu_folder=/home/ubuntu\n"
"- efs_mount_point_1=/mnt/efs/fs1\n"
f"- file_system_id_1={self.efs_dns_name.split('.')[0]}\n"
f"- efs_dns_name={self.efs_dns_name}\n"
f"- pack_folder={self.efs_packs_dir}\n"
"- yum install -y amazon-efs-utils\n"
"- apt-get -y install amazon-efs-utils\n"
"- yum install -y nfs-utils\n"
"- apt-get -y install nfs-common\n"
"- mkdir -p \"${efs_mount_point_1}\"\n"
"- test -f \"/sbin/mount.efs\" && printf \"\\n${file_system_id_1}:/ ${efs_mount_point_1} efs tls,_netdev\\n\" >> /etc/fstab || printf \"\\n${efs_dns_name}:/ ${efs_mount_point_1} nfs4 nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2,noresvport,_netdev 0 0\\n\" >> /etc/fstab\n"
"- test -f \"/sbin/mount.efs\" && grep -ozP 'client-info]\\nsource' '/etc/amazon/efs/efs-utils.conf'; if [[ $? == 1 ]]; then printf \"\\n[client-info]\\nsource=liw\\n\" >> /etc/amazon/efs/efs-utils.conf; fi;\n"
"- retryCnt=15; waitTime=30; while true; do mount -a -t efs,nfs4 defaults; if [ $? = 0 ] || [ $retryCnt -lt 1 ]; then echo File system mounted successfully; break; fi; echo File system not available, retrying to mount.; ((retryCnt--)); sleep $waitTime; done;\n"
"- rm -rf \"${ubuntu_folder}/${pack_folder}\"\n"
"- mkdir -p \"${ubuntu_folder}/${pack_folder}\"\n"
"- chown -R ubuntu:ubuntu \"${ubuntu_folder}/${pack_folder}\"\n"
"- mount -t nfs -o nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2,noresvport ${efs_dns_name}:/${pack_folder} ${ubuntu_folder}/${pack_folder}\n"
"- printf \"\\n${efs_dns_name}:/${pack_folder} ${ubuntu_folder}/${pack_folder} nfs4 nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2,noresvport,_netdev 0 0\\n\" >> /etc/fstab\n"
)
def _get_git_repo_origin_url(self, remote = 'origin'):
"""
Collect the Git repo remote info
Returns empty string if commands failed.
"""
cmd_as_list = f"git config --get remote.{remote}.url".split()
try:
ret = subprocess.run(
cmd_as_list,
capture_output=True,
check=True,
text=True, # return str instead of byte type
timeout=60)
except subprocess.CalledProcessError as e:
logging.warning("aws:_get_git_repo_origin_url: failed")
logging.warning("aws:_get_git_repo_origin_url: '%s'", str(e))
return ''
return ret.stdout
def _setup(self):
"""
Setup AWS object by collecting env vars & preparing AWS instance
"""
# Initializing None all AVH related variables
logging.debug("aws:setting up aws backend")
# EC2-related info is not needed if an instance is already created
if self.instance_name and not self.instance_id:
self.instance_id = self.find_instance_by_name(self.instance_name)
if not self.instance_id:
if not self.instance_name:
user = os.environ.get('USER', os.environ.get('USERNAME', "unknown"))
host = os.environ.get('HOSTNAME', "unknown")
self.instance_name = f"{user}@{host}"
if not self.ami_id:
self.ami_id = self.get_image_id()
if not self.ami_id:
logging.error('AWS_AMI_ID must not be blank. You should inform either AWS_AMI_ID or provide a valid AWS_AMI_VERSION')
raise RuntimeError('AWS_AMI_ID must not be blank. You should inform either AWS_AMI_ID or provide a valid AWS_AMI_VERSION')
if not self.iam_profile:
logging.error("aws:environment variable `AWS_IAM_PROFILE` needs to be present!")
raise RuntimeError("aws:environment variable `AWS_IAM_PROFILE` needs to be present!")
if not self.security_group_id:
logging.error("aws:environment variable `AWS_SECURITY_GROUP_ID` needs to be present!")
raise RuntimeError("aws:environment variable `AWS_SECURITY_GROUP_ID` needs to be present!")
if not self.subnet_id:
logging.error("aws:environment variable `AWS_SUBNET_ID` needs to be present!")
raise RuntimeError("aws:environment variable `AWS_SUBNET_ID` needs to be present!")
if not self.s3_bucket_name:
logging.error("aws:environment variable `AWS_S3_BUCKET_NAME` needs to be present!")
raise RuntimeError("aws:environment variable `AWS_S3_BUCKET_NAME` needs to be present!")
if self.efs_dns_name:
logging.info("aws:EFS DNS %s is going to be mounted", self.efs_dns_name)
logging.info("aws:Local packs directory will be replace by EFS packs folder named %s", self.efs_packs_dir)
logging.debug("aws:aws__repr__:%s", self.__repr__())
logging.info("aws:Backend successfully initialized!")
def find_instance_by_name(self, name: str) -> Union[str, None]:
"""Find an instance by name attribute.
The result is None if more than one instance with the given name exists.
Params:
name - The name of a machine instance to lookup.
Returns:
The machine id or None
"""
instance_id = None
name_filter = [
{'Name': 'tag:Name', 'Values': [name]},
{'Name': 'instance-state-name', 'Values': ['running', 'stopped']}
]
response = self._ec2_client.describe_instances(Filters=name_filter)
if 'Reservations' not in response:
logging.debug("Response doesn't contain element 'Reservations'")
elif len(response['Reservations']) == 0:
logging.debug("Response doesn't contain elements in 'Reservations'")
elif len(response['Reservations']) > 1:
logging.warning("Cannot identify EC2 instance by name '%s' due to ambiguity!", self.instance_name)
elif 'Instances' not in response['Reservations'][0]:
logging.debug("Response doesn't contain element 'Instances' in 'Reservations'")
elif len(response['Reservations'][0]['Instances']) != 1:
logging.debug("Response doesn't contain single instance in 'Reservations'")
elif 'InstanceId' not in response['Reservations'][0]['Instances'][0]:
logging.debug("Response doesn't contain element 'InstanceId' in 'Instances'")
else:
instance_id = response['Reservations'][0]['Instances'][0]['InstanceId']
logging.info("aws:Resolved EC2 instance name %s as instance ID %s", name, instance_id)
return instance_id
def create_instance(self):
"""
Create an EC2 Instance. It is a wrapper for create_ec2_instance.
If key_name is present, it creates a instance with the selected private key.
This is a mandatory AVH backend method.
"""
self._init()
self.instance_id = self.create_ec2_instance(
BlockDeviceMappings=[{
'DeviceName': '/dev/sda1',
'Ebs': {
'DeleteOnTermination': True,
'Iops': 3000,
'VolumeType': 'gp3',
'Throughput': 150,
'Encrypted': False
}
}],
ImageId=self.ami_id,
InstanceType=self.instance_type,
MaxCount=1,
MinCount=1,
NetworkInterfaces=[{
'AssociatePublicIpAddress': True,
'DeleteOnTermination': True,
'Description':'AVH Network Instance',
'DeviceIndex': 0,
'Groups': [self.security_group_id],
'SubnetId': self.subnet_id
}],
KeyName=self.key_name,
TagSpecifications=[{'ResourceType': 'instance', 'Tags': [
{'Key': 'Name', 'Value': self.instance_name},
{'Key': 'AVH_CLI', 'Value': 'true'},
{'Key': 'Repo', 'Value': self._get_git_repo_origin_url()}
]}],
UserData=self._get_efs_packs_user_data() if self.efs_dns_name != '' else '',
IamInstanceProfile={'Name': self.iam_profile}
)
return self.instance_id
def create_ec2_instance(self, **kwargs):
"""
Create a new EC2 Instance
Parameters
----------
**kwargs: Keyword args associated with run-instances API doc e.g.:
--create-ec2-instance
ImageId=ami-0c5eeabe11f3a2685 \
InstanceType=t2.micro \
MaxCount=1 \
MinCount=1 \
SecurityGroupIds=['sg-04022e04e91197ce3'] \
SubnetId=subnet-00455495b268076f0 \
IamInstanceProfile="{'Name': 'Proj-s3-orta-vht-role'}"
Returns
-------
string
Instance ID
More
----
API Definition
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html#EC2.Client.run_instances
"""
kwargs = {k: v for k, v in kwargs.items() if v}
logging.debug('aws:DryRun=True to test for permission check')
logging.debug("aws:create_ec2_instance:kwargs:%s", kwargs)
try:
self._ec2_client.run_instances(**kwargs, DryRun=True)
except ClientError as e:
if 'DryRunOperation' not in str(e):
raise RuntimeError from e
logging.info('aws:Creating EC2 instance...')
try:
response = self._ec2_client.run_instances(**kwargs)
except ClientError as e:
raise RuntimeError from e
logging.debug(response)
self.instance_id = response['Instances'][0]['InstanceId']
assert isinstance(self.instance_id, str)
self.wait_ec2_running()
self.wait_ec2_status_ok()
logging.info("aws:EC2 instance %s created!", self.instance_id)
return self.instance_id
def delete_file_from_cloud(self, key):
"""
Delete S3 Object
Parameters
----------
String
key (s3 path)
More
----
API Definition
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.delete_object
This is a mandatory AVH backend method.
"""
self._init()
logging.debug("aws:Delete S3 Object from S3 Bucket %s, Key %s", self.s3_bucket_name, key)
try:
response = self._s3_client.delete_object(
Bucket=self.s3_bucket_name,
Key=key
)
except ClientError as e:
raise RuntimeError from e
logging.debug(response)
def download_file_from_cloud(self, filename, key):
"""
Download S3 File
Parameters
----------
String
filename (destination local path)
key (s3 path)
More
----
API Definition
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.download_file
This is a mandatory AVH backend method.
"""
self._init()
try:
logging.debug("aws:Downloading S3 file from bucket %s , key %s, filename %s", self.s3_bucket_name, key, filename)
self._s3_client.download_file(self.s3_bucket_name, key, filename)
except ClientError as e:
if 'HeadObject operation: Not Found' in str(e):
logging.error("Key '%s' not found on S3 Bucket Name = '%s'", key, self.s3_bucket_name)
raise RuntimeError from e
def get_image_id(self):
"""
Get the AVH AMI ID for the region
The AVH AMI ID changes for each AWS region
Return
----------
String
AVH AMI ID
More
----
API Definition
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html#EC2.Client.describe_images
This is a mandatory AVH backend method.
"""
assert self.ami_version is not None, \
"The variable `ami_version` is not present"
try:
response = self._ec2_client.describe_images(
Filters=[
{
'Name': 'name',
'Values': ["ArmVirtualHardware-*"]
},
]
)
except ClientError as e:
raise RuntimeError from e
logging.debug("aws:get_vht_ami_id_by_version:%s", response)
version_spec = SimpleSpec(self.ami_version)
images = {}
for image in response['Images']:
ver = image['Name'].split('-')[1]
try:
images[Version(ver)] = image['ImageId']
except ValueError:
logging.debug("aws:get_vht_ami_id_by_version:Invalid version identifier found: %s", ver)
versions = sorted(version_spec.filter(images.keys()), reverse=True)
if not versions:
logging.error("aws:get_vht_ami_id_by_version:No AMI found matching version spec %s", self.ami_version)
logging.error("aws:get_vht_ami_id_by_version:Available AMI versions %s",
sorted([str(k) for k, v in images.items()], reverse=True))
raise RuntimeError()
self.ami_id = images[versions[0]]
logging.info("aws:Selecting AMI version %s, AMI ID %s", versions[0], self.ami_id)
return self.ami_id
def get_instance_state(self):
"""
Get EC2 Instance State
Return
----------
String
EC2 Instance State ('pending'|'running'|'shutting-down'|'terminated'|'stopping'|'stopped')
More
----
API Definition
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html#EC2.Client.describe_instances
"""
self._init()
try:
response = self._ec2_client.describe_instances(
InstanceIds=[
self.instance_id,
],
)
except ClientError as e:
raise RuntimeError from e
logging.debug("aws:get_instance_state: %s", response)
instance_state = response['Reservations'][0]['Instances'][0]['State']['Name']
logging.debug("aws:The EC2 instance state is %s...", instance_state)
return instance_state
def get_s3_file_content(self, key):
"""
Get S3 File Content
Parameters
----------
String
key (s3 path)
Return
----------
String
S3 File Content
More
----
API Definition
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#object
"""
self._init()
content = ''
try:
content = self._s3_resource.Object(self.s3_bucket_name, key).get()['Body'].read().decode('utf-8')
except self._s3_client.exceptions.NoSuchKey:
logging.debug("aws:Key '%s' not found on S3 bucket '%s'", key, self.s3_bucket_name)
return content
def get_s3_ssm_command_id_key(self, command_id, output_type):
"""
Get calculated S3 SSM Command ID Output Key
Parameters
----------
String
command_id (Command ID)
output_type (`stderr` or `stdout`)
Return
----------
String
S3 SSM Command ID Key
"""
return f"{self.s3_keyprefix}/{command_id}/{self.instance_id}/awsrunShellScript/0.awsrunShellScript/{output_type}"
def get_ssm_command_id_status(self, command_id):
"""
Get the Status for a specific command ID and Instance ID.
Parameters
----------
String
command_id (Command ID)
Return
----------
String
Command ID Status
More
----------
API Definition:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ssm.html#SSM.Client.list_commands
"""
try:
response = self._ssm_client.list_commands(
CommandId=command_id
)
except ClientError as e:
raise RuntimeError from e
logging.debug("aws:get_ssm_command_id_status:%s", response)
command_id_status = response['Commands'][0]['Status']
logging.debug("aws:The command_id %s status is %s...", command_id, command_id_status)
return command_id_status
def get_ssm_command_id_status_details(self, command_id):
"""
Get the Status details for a specific command ID and Instance ID.
Parameters
----------
String
command_id (Command ID)
Return
----------
String
Command ID Status Details
More
----------
API Definition:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ssm.html#SSM.Client.get_command_invocation
"""
try:
response = self._ssm_client.get_command_invocation(
CommandId=command_id,
InstanceId=self.instance_id
)
except ClientError as e:
raise RuntimeError from e
logging.debug("aws:get_ssm_command_id_status_details:%s", response)
logging.info("aws:The command_id %s status details is %s ...", command_id, response['StatusDetails'])
return response['StatusDetails']
def get_ssm_command_id_stdout_url(self, command_id):
"""
Get the stdout output URL for a specific command ID and Instance ID.
Parameters
----------
String
command_id (Command ID)
Return
----------
String
Command ID Stdout URL
More
----------
API Definition
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ssm.html#SSM.Client.list_command_invocations
"""
try:
response = self._ssm_client.list_command_invocations(
CommandId=command_id,
InstanceId=self.instance_id
)
except ClientError as e:
raise RuntimeError from e
logging.debug("aws:get_ssm_command_id_stdout_url:%s", response)
return response['CommandInvocations'][0]['StandardOutputUrl']
def get_ssm_command_id_stderr_url(self, command_id):
"""
Get the stderr output URL for a specific command ID and Instance ID.
Parameters
----------
String
command_id (Command ID)
Return
----------
String
Command ID Stderr URL
More
----------
API Definition
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ssm.html#SSM.Client.list_command_invocations
"""
try:
response = self._ssm_client.list_command_invocations(
CommandId=command_id,
InstanceId=self.instance_id
)
except ClientError as e:
raise RuntimeError from e
logging.debug("aws:get_ssm_command_id_stderr_url:%s", response)
return response['CommandInvocations'][0]['StandardErrorUrl']
def create_or_start_instance(self) -> AvhBackendState:
"""Create a new or start an existing machine instance
Returns:
The machine instance state.
"""
self._init()
if self.instance_id:
state = self.get_instance_state()
if state == "running":
logging.debug("aws:EC2 Instance %s already running!", self.instance_id)
return AvhBackendState.RUNNING
if state == "stopped":
logging.debug("aws:EC2 Instance %s provided!", self.instance_id)
self.start_instance()
return AvhBackendState.STARTED
logging.warning("aws:EC2 Instance %s cannot be reused from state %s!", self.instance_id, state)
self.create_instance()
return AvhBackendState.CREATED
def prepare(self, force: bool = False) -> AvhBackendState:
self._init()
state = self.create_or_start_instance()
if state == AvhBackendState.CREATED or force:
logging.info("aws:Setting up the instance...")
commands = [
f"runuser -l ubuntu -c 'cat ~/.bashrc | grep export > {self.AMI_WORKDIR}/vars'",
f"runuser -l ubuntu -c 'mkdir -p {self.AMI_WORKDIR}/packs/.Web'",
f"runuser -l ubuntu -c 'wget -N https://www.keil.com/pack/index.pidx -O {self.AMI_WORKDIR}/packs/.Web/index.pidx'",
"apt -o DPkg::Lock::Timeout=600 update",
"apt -o DPkg::Lock::Timeout=600 install awscli -y"
]
self.send_remote_command_batch(
commands,
working_dir=self.AMI_WORKDIR,
fail_if_unsuccess=True,
enable_logging_info=False)
logging.info("aws:Setting up instance workspace...")
commands = [
f"runuser -l ubuntu -c 'rm -rf {self.AMI_WORKDIR}/workspace'",
f"runuser -l ubuntu -c 'mkdir -p {self.AMI_WORKDIR}/workspace'"
]
self.send_remote_command_batch(
commands,
working_dir=self.AMI_WORKDIR,
fail_if_unsuccess=True,
enable_logging_info=False)
return state
def run_commands(self, cmds: List[str]):
self._init()
shfile = Path(NamedTemporaryFile(prefix="script-", suffix=".sh", delete=False).name)
try:
with open(shfile, mode="w", encoding='UTF-8', newline='\n') as file:
file.write("#!/bin/bash\n")
file.write("set +x\n")
file.write("\n".join(cmds))
file.write("\n")
self.upload_file_to_cloud(str(shfile), shfile.name)
# commands which do not need to go to INFO
commands = [
f"runuser -l ubuntu -c 'aws s3 cp s3://{self.s3_bucket_name}/{shfile.name} "
f"{self.AMI_WORKDIR}/{shfile.name} --region {self.default_region} && "
f"chmod +x {self.AMI_WORKDIR}/{shfile.name}'"
]
self.send_remote_command_batch(
commands,
working_dir=self.AMI_WORKDIR,
fail_if_unsuccess=True,
enable_logging_info=False)
# commands which need to go to INFO
commands = [
f"runuser -l ubuntu -c 'source {self.AMI_WORKDIR}/vars "
f"&& pushd {self.AMI_WORKDIR}/workspace && {self.AMI_WORKDIR}/{shfile.name}'"
]
self.send_remote_command_batch(
commands,
working_dir=self.AMI_WORKDIR,
fail_if_unsuccess=True,
enable_logging_info=True)
finally:
os.unlink(shfile)
self.delete_file_from_cloud(shfile.name)
def upload_workspace(self, filename: Union[str, Path]):
self._init()
if isinstance(filename, str):
filename = Path(filename)
try:
self.upload_file_to_cloud(str(filename), filename.name)
commands = [
f"runuser -l ubuntu -c 'aws s3 cp s3://{self.s3_bucket_name}/{filename.name} {self.AMI_WORKDIR}/{filename.name} --region {self.default_region}'",
f"runuser -l ubuntu -c 'cd {self.AMI_WORKDIR}/workspace; tar xf {self.AMI_WORKDIR}/{filename.name}'",
f"runuser -l ubuntu -c 'rm -f {self.AMI_WORKDIR}/{filename.name}'"
]
self.send_remote_command_batch(
commands,
working_dir=self.AMI_WORKDIR,
fail_if_unsuccess=True,
enable_logging_info=False)
finally:
self.delete_file_from_cloud(filename.name)
def download_workspace(self, filename: Union[str, Path], globs: List[str] = None):
if not globs:
globs = ['**/*']
self._init()
if isinstance(filename, str):
filename = Path(filename)
try:
tarbz2 = [f"rm -f {self.AMI_WORKDIR}/{filename.stem}.tar"]
for pattern in globs:
if pattern.startswith("-:"):
tarbz2.append(f"tar df {self.AMI_WORKDIR}/{filename.stem}.tar $(find {pattern[2:]} -type f)")
else:
tarbz2.append(f"tar uf {self.AMI_WORKDIR}/{filename.stem}.tar $(find {pattern} -type f)")
tarbz2.append(f"bzip2 {self.AMI_WORKDIR}/{filename.stem}.tar")
commands = [
f"runuser -l ubuntu -c 'cd {self.AMI_WORKDIR}/workspace; {'; '.join(tarbz2)}'",
f"runuser -l ubuntu -c 'aws s3 cp {self.AMI_WORKDIR}/{filename.stem}.tar.bz2 s3://{self.s3_bucket_name}/{filename.name} --region {self.default_region}'",
f"runuser -l ubuntu -c 'rm -f {self.AMI_WORKDIR}/{filename.stem}.tar.bz2'",
]
self.send_remote_command_batch(
commands,
working_dir=self.AMI_WORKDIR,
fail_if_unsuccess=True,
enable_logging_info=False)
self.download_file_from_cloud(str(filename), filename.name)
finally:
self.delete_file_from_cloud(filename.name)
def upload_file_to_cloud(self, filename, key):
"""
Upload a file to a S3 Bucket
Parameters
----------
filename: Local Filename Path
key: Filepath to be stored on S3 Bucket
More
----------
API Definition
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.upload_file
"""
self._init()
logging.debug("aws:Upload File %s to S3 Bucket %s, Key %s", filename, self.s3_bucket_name, key)
self._s3_resource.meta.client.upload_file(filename, self.s3_bucket_name, key)
def send_remote_command(
self,
command_list,
working_dir,
fail_if_unsuccess=True,
enable_logging_info=True):
"""
Send a remote command to an EC2 Instance.
Parameters
----------
List
command_list (List of commands)
String
working_dir (Directory where the remote command will be executed)
Boolean
fail_if_unsuccess (Fail the method in case the command failed)
enable_logging_info (Enable or disable command logging)
Return
------
JSON data from send_ssm_shell_command method.
This is a mandatory AVH backend method.
"""
self._init()
logging.debug("aws:command_list = %s", command_list)
response = self.send_ssm_shell_command(
command_list=command_list,
working_dir=working_dir
)
logging.log(logging.INFO if enable_logging_info else logging.DEBUG, '='*80)
for i in response.keys():
logging.log(logging.INFO if enable_logging_info else logging.DEBUG,
"aws:send_remote_command:%s = %s", i, response[i].strip())
if response['CommandIdStatus'] != 'Success' and fail_if_unsuccess:
logging.error("aws:send_remote_command:Command %s failed!", command_list)
logging.error("aws:send_remote_command:response\n%s", response)
raise RuntimeError()
return response
def send_remote_command_batch(
self,
command_list,
working_dir,
fail_if_unsuccess=True,
enable_logging_info=True):
"""
Send batch of remote commands to an EC2 Instance.
Parameters
----------
List
command_list (List of List of commands)
String
working_dir (Directory where the remote command will be executed)
Boolean
fail_if_unsuccess (Fail the method in case the command failed - Default: True)
enable_logging_info (Enable or disable command logging)
Return
------
JSON data from send_ssm_shell_command method.
This is a mandatory AVH backend method.
"""
self._init()
logging.debug("aws: command_list = %s", command_list)
all_responses = []
for command in command_list:
all_responses.append(
self.send_remote_command(
command_list=command,
working_dir=working_dir,
fail_if_unsuccess=fail_if_unsuccess,
enable_logging_info=enable_logging_info))
logging.debug("aws: all_responses = %s", all_responses)
return all_responses
def send_ssm_shell_command(
self,
command_list,
working_dir='/',
return_type='all',
timeout_seconds=10800):
"""
Send SSM Shell Commands to a EC2 Instance
Parameters
----------
String
command_list (List of commands to be executed on the instance_id)
working_dir (Working directory - Default: '/')
return_type (
Method return types:
`all`: Return as a dict: 'CommandId', 'CommandIdStatus', 'CommandList', 'StdOut', 'StdErr' - Default
`command_id`: Return only the `command_id` as a String
)
timeout_seconds (Command Timeout in Seconds - Default: 600)
Return
----------
Dict
if return_type == `all` (Default):
'CommandId', 'CommandIdStatus', 'CommandList', 'StdOut', 'StdErr'
String
if return_type == `command_id`:
command_id
More
----------
TODO: Use **kwargs
API Definition
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ssm.html#SSM.Client.send_command
https://docs.aws.amazon.com/systems-manager/latest/userguide/ssm-plugins.html#aws-runShellScript
"""
logging.debug("aws:send_ssm_shell_command:%s:%s", working_dir, command_list)
try:
response = self._ssm_client.send_command(
InstanceIds=[
self.instance_id
],
DocumentName='AWS-RunShellScript',
Parameters={
'workingDirectory': [
working_dir,
],
'commands': [
command_list,
]
},
OutputS3BucketName=self.s3_bucket_name,
OutputS3KeyPrefix=self.s3_keyprefix,
TimeoutSeconds=timeout_seconds,
)
except ClientError as e:
raise RuntimeError from e
logging.debug("aws:send_ssm_shell_command:%s", response)
command_id = response['Command']['CommandId']
logging.debug("aws:command_id = %s", command_id)
# We need a little bit of time to wait for a command
time.sleep(2)
logging.debug("aws:Waiting command id %s to finish", command_id)
self.wait_ssm_command_finished(command_id)
logging.debug("aws:Get command id %s status", command_id)
command_id_status = self.get_ssm_command_id_status(command_id)
logging.debug("aws:Command status = %s", command_id_status)
stdout_key = self.get_s3_ssm_command_id_key(command_id, 'stdout')
stdout_str = self.get_s3_file_content(stdout_key)
stderr_str = ''
if command_id_status != 'Success':
stderr_key = self.get_s3_ssm_command_id_key(command_id, 'stderr')
stderr_str = self.get_s3_file_content(stderr_key)
if return_type == 'all':
return {
'CommandId': command_id,
'CommandIdStatus': command_id_status,
'CommandList': command_list,
'StdOut': stdout_str,
'StdErr': stderr_str
}
if return_type == 'command_id':
return command_id
raise AttributeError(f"Output type '{return_type}' invalid. See docs.")
def start_instance(self):
"""
Start an Instance and wait it to become running and status OK
More
----------
API Definition
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html#EC2.Client.start_instances
This is a mandatory AVH backend method.
"""
self._init()
logging.info("aws:Starting EC2 instance %s", self.instance_id)
try:
response = self._ec2_client.start_instances(
InstanceIds=[
self.instance_id,
]
)
except ClientError as e:
raise RuntimeError from e
logging.debug("aws:start_ec2_instance:%s", response)
self.wait_ec2_running()
self.wait_ec2_status_ok()
return self.instance_id
def stop_instance(self):
"""
Stop an Instance and wait it becomes stopped.
More
----------
API Definition
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html#EC2.Client.stop_instances
This is a mandatory AVH backend method.
"""
self._init()
logging.info("aws:Stopping EC2 instance %s", self.instance_id)
try:
response = self._ec2_client.stop_instances(
InstanceIds=[
self.instance_id
]
)
except ClientError as e:
raise RuntimeError from e
logging.debug("aws:stop_instance:%s", response)
self.wait_ec2_stopped()
return self.instance_id
def wait_ec2_status_ok(self):
"""
Wait an EC2 instance to have a Status == OK.
More
----------
API Definition
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html#EC2.Waiter.InstanceStatusOk
"""
logging.debug("aws:Waiting until EC2 instance id %s Status Ok...", self.instance_id)
try:
waiter = self._ec2_client.get_waiter('instance_status_ok')
waiter.wait(
InstanceIds=[
self.instance_id
]
)
except WaiterError as e:
raise RuntimeError from e
def wait_ec2_running(self):
"""
Wait an EC2 instance to be running
More
----------
API Definition
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html#EC2.Waiter.InstanceRunning
"""
logging.debug("aws:Waiting until EC2 instance id %s is running...", self.instance_id)
try:
waiter = self._ec2_client.get_waiter('instance_running')
waiter.wait(
InstanceIds=[
self.instance_id
]
)
except WaiterError as e:
raise RuntimeError from e
def wait_ec2_stopped(self):
"""
Wait an EC2 instance to stop
More
----------
API Definition
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html#EC2.Instance.wait_until_stopped
"""
logging.debug("aws:Waiting until EC2 instance id %s is stopped...", self.instance_id)
instance = boto3.resource('ec2').Instance(self.instance_id)
instance.wait_until_stopped()
def wait_ec2_terminated(self):
"""
Wait an EC2 instance to terminate
More
----------
API Definition
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html#EC2.Instance.wait_until_terminated
"""
logging.debug("aws:Waiting until EC2 instance id %s is terminated...", self.instance_id)
instance = boto3.resource('ec2').Instance(self.instance_id)
instance.wait_until_terminated()
def wait_s3_object_exists(self, key, delay=5, max_attempts=2160):
"""
Wait an S3 Object to exists
Parameters
----------
String
key (S3 Keypath)
delay (Retry delay in seconds - Default: 5)
max_attemps (Max retry - Default: 120)
More
----------
API Definition
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Waiter.ObjectExists
"""
try:
waiter = self._s3_client.get_waiter('object_exists')
waiter.wait(
Bucket=self.s3_bucket_name,
Key=key,
WaiterConfig={
'Delay': delay,
'MaxAttempts': max_attempts
}
)
except WaiterError as e:
raise RuntimeError from e
def cleanup(self, state):
self._init()
if state in (AvhBackendState.RUNNING, AvhBackendState.INVALID):
pass
elif (state == AvhBackendState.STARTED) or self.keep_ec2_instance:
self.stop_instance()
else:
self.terminate_instance()
def wait_ssm_command_finished(self, command_id, delay=5, max_attempts=2160):
"""
Wait the SSM command to reach a terminal status.
Wait time is delay * max_attemps = 10800s (matching with SSM Shell Timeout)
Parameters
----------
String
command_id (Command ID)
delay (Retry delay in seconds - Default: 5)
max_attemps (Max retry - Default: 2160)
More
----------
API Definition
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ssm.html#SSM.Waiter.CommandExecuted
"""
try:
waiter = self._ssm_client.get_waiter('command_executed')
waiter.wait(
CommandId=command_id,
InstanceId=self.instance_id,
WaiterConfig={
'Delay': delay,
'MaxAttempts': max_attempts
}
)
except WaiterError:
if "Failed" in str(WaiterError):
logging.error("aws:Failed status found while wainting for command id")
def terminate_instance(self):
"""
Terminate an Instance and wait it to terminated.
More
----------
API Definition
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html#EC2.Client.terminate_instances
This is a mandatory AVH backend method.
"""
self._init()
logging.debug('aws:terminate_instance: DryRun=True to test for permission check')
try:
self._ec2_client.terminate_instances(
InstanceIds=[
self.instance_id
],
DryRun=True
)
except ClientError as e:
if 'DryRunOperation' not in str(e):
raise RuntimeError from e
logging.info('aws:Terminating EC2 instance...')
try:
response = self._ec2_client.terminate_instances(
InstanceIds=[
self.instance_id
]
)
except ClientError as e:
raise RuntimeError from e
logging.debug("aws:terminate_instance:%s", response)
self.wait_ec2_terminated()
return response
|
PypiClean
|
/yadage-httpctrl-server-0.0.7.tar.gz/yadage-httpctrl-server-0.0.7/yadagehttpctrl/static/bower_components/jquery/external/sizzle/dist/sizzle.min.js
|
!function(a){var b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u="sizzle"+1*new Date,v=a.document,w=0,x=0,y=ha(),z=ha(),A=ha(),B=function(a,b){return a===b&&(l=!0),0},C={}.hasOwnProperty,D=[],E=D.pop,F=D.push,G=D.push,H=D.slice,I=function(a,b){for(var c=0,d=a.length;c<d;c++)if(a[c]===b)return c;return-1},J="checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped",K="[\\x20\\t\\r\\n\\f]",L="(?:\\\\.|[\\w-]|[^\0-\\xa0])+",M="\\["+K+"*("+L+")(?:"+K+"*([*^$|!~]?=)"+K+"*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|("+L+"))|)"+K+"*\\]",N=":("+L+")(?:\\((('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|((?:\\\\.|[^\\\\()[\\]]|"+M+")*)|.*)\\)|)",O=new RegExp(K+"+","g"),P=new RegExp("^"+K+"+|((?:^|[^\\\\])(?:\\\\.)*)"+K+"+$","g"),Q=new RegExp("^"+K+"*,"+K+"*"),R=new RegExp("^"+K+"*([>+~]|"+K+")"+K+"*"),S=new RegExp("="+K+"*([^\\]'\"]*?)"+K+"*\\]","g"),T=new RegExp(N),U=new RegExp("^"+L+"$"),V={ID:new RegExp("^#("+L+")"),CLASS:new RegExp("^\\.("+L+")"),TAG:new RegExp("^("+L+"|[*])"),ATTR:new RegExp("^"+M),PSEUDO:new RegExp("^"+N),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+K+"*(even|odd|(([+-]|)(\\d*)n|)"+K+"*(?:([+-]|)"+K+"*(\\d+)|))"+K+"*\\)|)","i"),bool:new RegExp("^(?:"+J+")$","i"),needsContext:new RegExp("^"+K+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+K+"*((?:-\\d)?\\d*)"+K+"*\\)|)(?=[^-]|$)","i")},W=/^(?:input|select|textarea|button)$/i,X=/^h\d$/i,Y=/^[^{]+\{\s*\[native \w/,Z=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,$=/[+~]/,_=new RegExp("\\\\([\\da-f]{1,6}"+K+"?|("+K+")|.)","ig"),aa=function(a,b,c){var d="0x"+b-65536;return d!==d||c?b:d<0?String.fromCharCode(d+65536):String.fromCharCode(d>>10|55296,1023&d|56320)},ba=/([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g,ca=function(a,b){return b?"\0"===a?"\ufffd":a.slice(0,-1)+"\\"+a.charCodeAt(a.length-1).toString(16)+" ":"\\"+a},da=function(){m()},ea=ta(function(a){return a.disabled===!0&&("form"in a||"label"in a)},{dir:"parentNode",next:"legend"});try{G.apply(D=H.call(v.childNodes),v.childNodes),D[v.childNodes.length].nodeType}catch(fa){G={apply:D.length?function(a,b){F.apply(a,H.call(b))}:function(a,b){var c=a.length,d=0;while(a[c++]=b[d++]);a.length=c-1}}}function ga(a,b,d,e){var f,h,j,k,l,o,r,s=b&&b.ownerDocument,w=b?b.nodeType:9;if(d=d||[],"string"!=typeof a||!a||1!==w&&9!==w&&11!==w)return d;if(!e&&((b?b.ownerDocument||b:v)!==n&&m(b),b=b||n,p)){if(11!==w&&(l=Z.exec(a)))if(f=l[1]){if(9===w){if(!(j=b.getElementById(f)))return d;if(j.id===f)return d.push(j),d}else if(s&&(j=s.getElementById(f))&&t(b,j)&&j.id===f)return d.push(j),d}else{if(l[2])return G.apply(d,b.getElementsByTagName(a)),d;if((f=l[3])&&c.getElementsByClassName&&b.getElementsByClassName)return G.apply(d,b.getElementsByClassName(f)),d}if(c.qsa&&!A[a+" "]&&(!q||!q.test(a))){if(1!==w)s=b,r=a;else if("object"!==b.nodeName.toLowerCase()){(k=b.getAttribute("id"))?k=k.replace(ba,ca):b.setAttribute("id",k=u),o=g(a),h=o.length;while(h--)o[h]="#"+k+" "+sa(o[h]);r=o.join(","),s=$.test(a)&&qa(b.parentNode)||b}if(r)try{return G.apply(d,s.querySelectorAll(r)),d}catch(x){}finally{k===u&&b.removeAttribute("id")}}}return i(a.replace(P,"$1"),b,d,e)}function ha(){var a=[];function b(c,e){return a.push(c+" ")>d.cacheLength&&delete b[a.shift()],b[c+" "]=e}return b}function ia(a){return a[u]=!0,a}function ja(a){var b=n.createElement("fieldset");try{return!!a(b)}catch(c){return!1}finally{b.parentNode&&b.parentNode.removeChild(b),b=null}}function ka(a,b){var c=a.split("|"),e=c.length;while(e--)d.attrHandle[c[e]]=b}function la(a,b){var c=b&&a,d=c&&1===a.nodeType&&1===b.nodeType&&a.sourceIndex-b.sourceIndex;if(d)return d;if(c)while(c=c.nextSibling)if(c===b)return-1;return a?1:-1}function ma(a){return function(b){var c=b.nodeName.toLowerCase();return"input"===c&&b.type===a}}function na(a){return function(b){var c=b.nodeName.toLowerCase();return("input"===c||"button"===c)&&b.type===a}}function oa(a){return function(b){return"form"in b?b.parentNode&&b.disabled===!1?"label"in b?"label"in b.parentNode?b.parentNode.disabled===a:b.disabled===a:b.isDisabled===a||b.isDisabled!==!a&&ea(b)===a:b.disabled===a:"label"in b&&b.disabled===a}}function pa(a){return ia(function(b){return b=+b,ia(function(c,d){var e,f=a([],c.length,b),g=f.length;while(g--)c[e=f[g]]&&(c[e]=!(d[e]=c[e]))})})}function qa(a){return a&&"undefined"!=typeof a.getElementsByTagName&&a}c=ga.support={},f=ga.isXML=function(a){var b=a&&(a.ownerDocument||a).documentElement;return!!b&&"HTML"!==b.nodeName},m=ga.setDocument=function(a){var b,e,g=a?a.ownerDocument||a:v;return g!==n&&9===g.nodeType&&g.documentElement?(n=g,o=n.documentElement,p=!f(n),v!==n&&(e=n.defaultView)&&e.top!==e&&(e.addEventListener?e.addEventListener("unload",da,!1):e.attachEvent&&e.attachEvent("onunload",da)),c.attributes=ja(function(a){return a.className="i",!a.getAttribute("className")}),c.getElementsByTagName=ja(function(a){return a.appendChild(n.createComment("")),!a.getElementsByTagName("*").length}),c.getElementsByClassName=Y.test(n.getElementsByClassName),c.getById=ja(function(a){return o.appendChild(a).id=u,!n.getElementsByName||!n.getElementsByName(u).length}),c.getById?(d.filter.ID=function(a){var b=a.replace(_,aa);return function(a){return a.getAttribute("id")===b}},d.find.ID=function(a,b){if("undefined"!=typeof b.getElementById&&p){var c=b.getElementById(a);return c?[c]:[]}}):(d.filter.ID=function(a){var b=a.replace(_,aa);return function(a){var c="undefined"!=typeof a.getAttributeNode&&a.getAttributeNode("id");return c&&c.value===b}},d.find.ID=function(a,b){if("undefined"!=typeof b.getElementById&&p){var c,d,e,f=b.getElementById(a);if(f){if(c=f.getAttributeNode("id"),c&&c.value===a)return[f];e=b.getElementsByName(a),d=0;while(f=e[d++])if(c=f.getAttributeNode("id"),c&&c.value===a)return[f]}return[]}}),d.find.TAG=c.getElementsByTagName?function(a,b){return"undefined"!=typeof b.getElementsByTagName?b.getElementsByTagName(a):c.qsa?b.querySelectorAll(a):void 0}:function(a,b){var c,d=[],e=0,f=b.getElementsByTagName(a);if("*"===a){while(c=f[e++])1===c.nodeType&&d.push(c);return d}return f},d.find.CLASS=c.getElementsByClassName&&function(a,b){if("undefined"!=typeof b.getElementsByClassName&&p)return b.getElementsByClassName(a)},r=[],q=[],(c.qsa=Y.test(n.querySelectorAll))&&(ja(function(a){o.appendChild(a).innerHTML="<a id='"+u+"'></a><select id='"+u+"-\r\\' msallowcapture=''><option selected=''></option></select>",a.querySelectorAll("[msallowcapture^='']").length&&q.push("[*^$]="+K+"*(?:''|\"\")"),a.querySelectorAll("[selected]").length||q.push("\\["+K+"*(?:value|"+J+")"),a.querySelectorAll("[id~="+u+"-]").length||q.push("~="),a.querySelectorAll(":checked").length||q.push(":checked"),a.querySelectorAll("a#"+u+"+*").length||q.push(".#.+[+~]")}),ja(function(a){a.innerHTML="<a href='' disabled='disabled'></a><select disabled='disabled'><option/></select>";var b=n.createElement("input");b.setAttribute("type","hidden"),a.appendChild(b).setAttribute("name","D"),a.querySelectorAll("[name=d]").length&&q.push("name"+K+"*[*^$|!~]?="),2!==a.querySelectorAll(":enabled").length&&q.push(":enabled",":disabled"),o.appendChild(a).disabled=!0,2!==a.querySelectorAll(":disabled").length&&q.push(":enabled",":disabled"),a.querySelectorAll("*,:x"),q.push(",.*:")})),(c.matchesSelector=Y.test(s=o.matches||o.webkitMatchesSelector||o.mozMatchesSelector||o.oMatchesSelector||o.msMatchesSelector))&&ja(function(a){c.disconnectedMatch=s.call(a,"*"),s.call(a,"[s!='']:x"),r.push("!=",N)}),q=q.length&&new RegExp(q.join("|")),r=r.length&&new RegExp(r.join("|")),b=Y.test(o.compareDocumentPosition),t=b||Y.test(o.contains)?function(a,b){var c=9===a.nodeType?a.documentElement:a,d=b&&b.parentNode;return a===d||!(!d||1!==d.nodeType||!(c.contains?c.contains(d):a.compareDocumentPosition&&16&a.compareDocumentPosition(d)))}:function(a,b){if(b)while(b=b.parentNode)if(b===a)return!0;return!1},B=b?function(a,b){if(a===b)return l=!0,0;var d=!a.compareDocumentPosition-!b.compareDocumentPosition;return d?d:(d=(a.ownerDocument||a)===(b.ownerDocument||b)?a.compareDocumentPosition(b):1,1&d||!c.sortDetached&&b.compareDocumentPosition(a)===d?a===n||a.ownerDocument===v&&t(v,a)?-1:b===n||b.ownerDocument===v&&t(v,b)?1:k?I(k,a)-I(k,b):0:4&d?-1:1)}:function(a,b){if(a===b)return l=!0,0;var c,d=0,e=a.parentNode,f=b.parentNode,g=[a],h=[b];if(!e||!f)return a===n?-1:b===n?1:e?-1:f?1:k?I(k,a)-I(k,b):0;if(e===f)return la(a,b);c=a;while(c=c.parentNode)g.unshift(c);c=b;while(c=c.parentNode)h.unshift(c);while(g[d]===h[d])d++;return d?la(g[d],h[d]):g[d]===v?-1:h[d]===v?1:0},n):n},ga.matches=function(a,b){return ga(a,null,null,b)},ga.matchesSelector=function(a,b){if((a.ownerDocument||a)!==n&&m(a),b=b.replace(S,"='$1']"),c.matchesSelector&&p&&!A[b+" "]&&(!r||!r.test(b))&&(!q||!q.test(b)))try{var d=s.call(a,b);if(d||c.disconnectedMatch||a.document&&11!==a.document.nodeType)return d}catch(e){}return ga(b,n,null,[a]).length>0},ga.contains=function(a,b){return(a.ownerDocument||a)!==n&&m(a),t(a,b)},ga.attr=function(a,b){(a.ownerDocument||a)!==n&&m(a);var e=d.attrHandle[b.toLowerCase()],f=e&&C.call(d.attrHandle,b.toLowerCase())?e(a,b,!p):void 0;return void 0!==f?f:c.attributes||!p?a.getAttribute(b):(f=a.getAttributeNode(b))&&f.specified?f.value:null},ga.escape=function(a){return(a+"").replace(ba,ca)},ga.error=function(a){throw new Error("Syntax error, unrecognized expression: "+a)},ga.uniqueSort=function(a){var b,d=[],e=0,f=0;if(l=!c.detectDuplicates,k=!c.sortStable&&a.slice(0),a.sort(B),l){while(b=a[f++])b===a[f]&&(e=d.push(f));while(e--)a.splice(d[e],1)}return k=null,a},e=ga.getText=function(a){var b,c="",d=0,f=a.nodeType;if(f){if(1===f||9===f||11===f){if("string"==typeof a.textContent)return a.textContent;for(a=a.firstChild;a;a=a.nextSibling)c+=e(a)}else if(3===f||4===f)return a.nodeValue}else while(b=a[d++])c+=e(b);return c},d=ga.selectors={cacheLength:50,createPseudo:ia,match:V,attrHandle:{},find:{},relative:{">":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(a){return a[1]=a[1].replace(_,aa),a[3]=(a[3]||a[4]||a[5]||"").replace(_,aa),"~="===a[2]&&(a[3]=" "+a[3]+" "),a.slice(0,4)},CHILD:function(a){return a[1]=a[1].toLowerCase(),"nth"===a[1].slice(0,3)?(a[3]||ga.error(a[0]),a[4]=+(a[4]?a[5]+(a[6]||1):2*("even"===a[3]||"odd"===a[3])),a[5]=+(a[7]+a[8]||"odd"===a[3])):a[3]&&ga.error(a[0]),a},PSEUDO:function(a){var b,c=!a[6]&&a[2];return V.CHILD.test(a[0])?null:(a[3]?a[2]=a[4]||a[5]||"":c&&T.test(c)&&(b=g(c,!0))&&(b=c.indexOf(")",c.length-b)-c.length)&&(a[0]=a[0].slice(0,b),a[2]=c.slice(0,b)),a.slice(0,3))}},filter:{TAG:function(a){var b=a.replace(_,aa).toLowerCase();return"*"===a?function(){return!0}:function(a){return a.nodeName&&a.nodeName.toLowerCase()===b}},CLASS:function(a){var b=y[a+" "];return b||(b=new RegExp("(^|"+K+")"+a+"("+K+"|$)"))&&y(a,function(a){return b.test("string"==typeof a.className&&a.className||"undefined"!=typeof a.getAttribute&&a.getAttribute("class")||"")})},ATTR:function(a,b,c){return function(d){var e=ga.attr(d,a);return null==e?"!="===b:!b||(e+="","="===b?e===c:"!="===b?e!==c:"^="===b?c&&0===e.indexOf(c):"*="===b?c&&e.indexOf(c)>-1:"$="===b?c&&e.slice(-c.length)===c:"~="===b?(" "+e.replace(O," ")+" ").indexOf(c)>-1:"|="===b&&(e===c||e.slice(0,c.length+1)===c+"-"))}},CHILD:function(a,b,c,d,e){var f="nth"!==a.slice(0,3),g="last"!==a.slice(-4),h="of-type"===b;return 1===d&&0===e?function(a){return!!a.parentNode}:function(b,c,i){var j,k,l,m,n,o,p=f!==g?"nextSibling":"previousSibling",q=b.parentNode,r=h&&b.nodeName.toLowerCase(),s=!i&&!h,t=!1;if(q){if(f){while(p){m=b;while(m=m[p])if(h?m.nodeName.toLowerCase()===r:1===m.nodeType)return!1;o=p="only"===a&&!o&&"nextSibling"}return!0}if(o=[g?q.firstChild:q.lastChild],g&&s){m=q,l=m[u]||(m[u]={}),k=l[m.uniqueID]||(l[m.uniqueID]={}),j=k[a]||[],n=j[0]===w&&j[1],t=n&&j[2],m=n&&q.childNodes[n];while(m=++n&&m&&m[p]||(t=n=0)||o.pop())if(1===m.nodeType&&++t&&m===b){k[a]=[w,n,t];break}}else if(s&&(m=b,l=m[u]||(m[u]={}),k=l[m.uniqueID]||(l[m.uniqueID]={}),j=k[a]||[],n=j[0]===w&&j[1],t=n),t===!1)while(m=++n&&m&&m[p]||(t=n=0)||o.pop())if((h?m.nodeName.toLowerCase()===r:1===m.nodeType)&&++t&&(s&&(l=m[u]||(m[u]={}),k=l[m.uniqueID]||(l[m.uniqueID]={}),k[a]=[w,t]),m===b))break;return t-=e,t===d||t%d===0&&t/d>=0}}},PSEUDO:function(a,b){var c,e=d.pseudos[a]||d.setFilters[a.toLowerCase()]||ga.error("unsupported pseudo: "+a);return e[u]?e(b):e.length>1?(c=[a,a,"",b],d.setFilters.hasOwnProperty(a.toLowerCase())?ia(function(a,c){var d,f=e(a,b),g=f.length;while(g--)d=I(a,f[g]),a[d]=!(c[d]=f[g])}):function(a){return e(a,0,c)}):e}},pseudos:{not:ia(function(a){var b=[],c=[],d=h(a.replace(P,"$1"));return d[u]?ia(function(a,b,c,e){var f,g=d(a,null,e,[]),h=a.length;while(h--)(f=g[h])&&(a[h]=!(b[h]=f))}):function(a,e,f){return b[0]=a,d(b,null,f,c),b[0]=null,!c.pop()}}),has:ia(function(a){return function(b){return ga(a,b).length>0}}),contains:ia(function(a){return a=a.replace(_,aa),function(b){return(b.textContent||b.innerText||e(b)).indexOf(a)>-1}}),lang:ia(function(a){return U.test(a||"")||ga.error("unsupported lang: "+a),a=a.replace(_,aa).toLowerCase(),function(b){var c;do if(c=p?b.lang:b.getAttribute("xml:lang")||b.getAttribute("lang"))return c=c.toLowerCase(),c===a||0===c.indexOf(a+"-");while((b=b.parentNode)&&1===b.nodeType);return!1}}),target:function(b){var c=a.location&&a.location.hash;return c&&c.slice(1)===b.id},root:function(a){return a===o},focus:function(a){return a===n.activeElement&&(!n.hasFocus||n.hasFocus())&&!!(a.type||a.href||~a.tabIndex)},enabled:oa(!1),disabled:oa(!0),checked:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&!!a.checked||"option"===b&&!!a.selected},selected:function(a){return a.parentNode&&a.parentNode.selectedIndex,a.selected===!0},empty:function(a){for(a=a.firstChild;a;a=a.nextSibling)if(a.nodeType<6)return!1;return!0},parent:function(a){return!d.pseudos.empty(a)},header:function(a){return X.test(a.nodeName)},input:function(a){return W.test(a.nodeName)},button:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&"button"===a.type||"button"===b},text:function(a){var b;return"input"===a.nodeName.toLowerCase()&&"text"===a.type&&(null==(b=a.getAttribute("type"))||"text"===b.toLowerCase())},first:pa(function(){return[0]}),last:pa(function(a,b){return[b-1]}),eq:pa(function(a,b,c){return[c<0?c+b:c]}),even:pa(function(a,b){for(var c=0;c<b;c+=2)a.push(c);return a}),odd:pa(function(a,b){for(var c=1;c<b;c+=2)a.push(c);return a}),lt:pa(function(a,b,c){for(var d=c<0?c+b:c;--d>=0;)a.push(d);return a}),gt:pa(function(a,b,c){for(var d=c<0?c+b:c;++d<b;)a.push(d);return a})}},d.pseudos.nth=d.pseudos.eq;for(b in{radio:!0,checkbox:!0,file:!0,password:!0,image:!0})d.pseudos[b]=ma(b);for(b in{submit:!0,reset:!0})d.pseudos[b]=na(b);function ra(){}ra.prototype=d.filters=d.pseudos,d.setFilters=new ra,g=ga.tokenize=function(a,b){var c,e,f,g,h,i,j,k=z[a+" "];if(k)return b?0:k.slice(0);h=a,i=[],j=d.preFilter;while(h){c&&!(e=Q.exec(h))||(e&&(h=h.slice(e[0].length)||h),i.push(f=[])),c=!1,(e=R.exec(h))&&(c=e.shift(),f.push({value:c,type:e[0].replace(P," ")}),h=h.slice(c.length));for(g in d.filter)!(e=V[g].exec(h))||j[g]&&!(e=j[g](e))||(c=e.shift(),f.push({value:c,type:g,matches:e}),h=h.slice(c.length));if(!c)break}return b?h.length:h?ga.error(a):z(a,i).slice(0)};function sa(a){for(var b=0,c=a.length,d="";b<c;b++)d+=a[b].value;return d}function ta(a,b,c){var d=b.dir,e=b.next,f=e||d,g=c&&"parentNode"===f,h=x++;return b.first?function(b,c,e){while(b=b[d])if(1===b.nodeType||g)return a(b,c,e);return!1}:function(b,c,i){var j,k,l,m=[w,h];if(i){while(b=b[d])if((1===b.nodeType||g)&&a(b,c,i))return!0}else while(b=b[d])if(1===b.nodeType||g)if(l=b[u]||(b[u]={}),k=l[b.uniqueID]||(l[b.uniqueID]={}),e&&e===b.nodeName.toLowerCase())b=b[d]||b;else{if((j=k[f])&&j[0]===w&&j[1]===h)return m[2]=j[2];if(k[f]=m,m[2]=a(b,c,i))return!0}return!1}}function ua(a){return a.length>1?function(b,c,d){var e=a.length;while(e--)if(!a[e](b,c,d))return!1;return!0}:a[0]}function va(a,b,c){for(var d=0,e=b.length;d<e;d++)ga(a,b[d],c);return c}function wa(a,b,c,d,e){for(var f,g=[],h=0,i=a.length,j=null!=b;h<i;h++)(f=a[h])&&(c&&!c(f,d,e)||(g.push(f),j&&b.push(h)));return g}function xa(a,b,c,d,e,f){return d&&!d[u]&&(d=xa(d)),e&&!e[u]&&(e=xa(e,f)),ia(function(f,g,h,i){var j,k,l,m=[],n=[],o=g.length,p=f||va(b||"*",h.nodeType?[h]:h,[]),q=!a||!f&&b?p:wa(p,m,a,h,i),r=c?e||(f?a:o||d)?[]:g:q;if(c&&c(q,r,h,i),d){j=wa(r,n),d(j,[],h,i),k=j.length;while(k--)(l=j[k])&&(r[n[k]]=!(q[n[k]]=l))}if(f){if(e||a){if(e){j=[],k=r.length;while(k--)(l=r[k])&&j.push(q[k]=l);e(null,r=[],j,i)}k=r.length;while(k--)(l=r[k])&&(j=e?I(f,l):m[k])>-1&&(f[j]=!(g[j]=l))}}else r=wa(r===g?r.splice(o,r.length):r),e?e(null,g,r,i):G.apply(g,r)})}function ya(a){for(var b,c,e,f=a.length,g=d.relative[a[0].type],h=g||d.relative[" "],i=g?1:0,k=ta(function(a){return a===b},h,!0),l=ta(function(a){return I(b,a)>-1},h,!0),m=[function(a,c,d){var e=!g&&(d||c!==j)||((b=c).nodeType?k(a,c,d):l(a,c,d));return b=null,e}];i<f;i++)if(c=d.relative[a[i].type])m=[ta(ua(m),c)];else{if(c=d.filter[a[i].type].apply(null,a[i].matches),c[u]){for(e=++i;e<f;e++)if(d.relative[a[e].type])break;return xa(i>1&&ua(m),i>1&&sa(a.slice(0,i-1).concat({value:" "===a[i-2].type?"*":""})).replace(P,"$1"),c,i<e&&ya(a.slice(i,e)),e<f&&ya(a=a.slice(e)),e<f&&sa(a))}m.push(c)}return ua(m)}function za(a,b){var c=b.length>0,e=a.length>0,f=function(f,g,h,i,k){var l,o,q,r=0,s="0",t=f&&[],u=[],v=j,x=f||e&&d.find.TAG("*",k),y=w+=null==v?1:Math.random()||.1,z=x.length;for(k&&(j=g===n||g||k);s!==z&&null!=(l=x[s]);s++){if(e&&l){o=0,g||l.ownerDocument===n||(m(l),h=!p);while(q=a[o++])if(q(l,g||n,h)){i.push(l);break}k&&(w=y)}c&&((l=!q&&l)&&r--,f&&t.push(l))}if(r+=s,c&&s!==r){o=0;while(q=b[o++])q(t,u,g,h);if(f){if(r>0)while(s--)t[s]||u[s]||(u[s]=E.call(i));u=wa(u)}G.apply(i,u),k&&!f&&u.length>0&&r+b.length>1&&ga.uniqueSort(i)}return k&&(w=y,j=v),t};return c?ia(f):f}h=ga.compile=function(a,b){var c,d=[],e=[],f=A[a+" "];if(!f){b||(b=g(a)),c=b.length;while(c--)f=ya(b[c]),f[u]?d.push(f):e.push(f);f=A(a,za(e,d)),f.selector=a}return f},i=ga.select=function(a,b,c,e){var f,i,j,k,l,m="function"==typeof a&&a,n=!e&&g(a=m.selector||a);if(c=c||[],1===n.length){if(i=n[0]=n[0].slice(0),i.length>2&&"ID"===(j=i[0]).type&&9===b.nodeType&&p&&d.relative[i[1].type]){if(b=(d.find.ID(j.matches[0].replace(_,aa),b)||[])[0],!b)return c;m&&(b=b.parentNode),a=a.slice(i.shift().value.length)}f=V.needsContext.test(a)?0:i.length;while(f--){if(j=i[f],d.relative[k=j.type])break;if((l=d.find[k])&&(e=l(j.matches[0].replace(_,aa),$.test(i[0].type)&&qa(b.parentNode)||b))){if(i.splice(f,1),a=e.length&&sa(i),!a)return G.apply(c,e),c;break}}}return(m||h(a,n))(e,b,!p,c,!b||$.test(a)&&qa(b.parentNode)||b),c},c.sortStable=u.split("").sort(B).join("")===u,c.detectDuplicates=!!l,m(),c.sortDetached=ja(function(a){return 1&a.compareDocumentPosition(n.createElement("fieldset"))}),ja(function(a){return a.innerHTML="<a href='#'></a>","#"===a.firstChild.getAttribute("href")})||ka("type|href|height|width",function(a,b,c){if(!c)return a.getAttribute(b,"type"===b.toLowerCase()?1:2)}),c.attributes&&ja(function(a){return a.innerHTML="<input/>",a.firstChild.setAttribute("value",""),""===a.firstChild.getAttribute("value")})||ka("value",function(a,b,c){if(!c&&"input"===a.nodeName.toLowerCase())return a.defaultValue}),ja(function(a){return null==a.getAttribute("disabled")})||ka(J,function(a,b,c){var d;if(!c)return a[b]===!0?b.toLowerCase():(d=a.getAttributeNode(b))&&d.specified?d.value:null});var Aa=a.Sizzle;ga.noConflict=function(){return a.Sizzle===ga&&(a.Sizzle=Aa),ga},"function"==typeof define&&define.amd?define(function(){return ga}):"undefined"!=typeof module&&module.exports?module.exports=ga:a.Sizzle=ga}(window);
//# sourceMappingURL=sizzle.min.map
|
PypiClean
|
/django_htmx_ui_adminlte-0.1.13-py3-none-any.whl/django_htmx_ui_adminlte/static/adminlte/plugins/codemirror/mode/q/q.js
|
(function(mod) {
if (typeof exports == "object" && typeof module == "object") // CommonJS
mod(require("../../lib/codemirror"));
else if (typeof define == "function" && define.amd) // AMD
define(["../../lib/codemirror"], mod);
else // Plain browser env
mod(CodeMirror);
})(function(CodeMirror) {
"use strict";
CodeMirror.defineMode("q",function(config){
var indentUnit=config.indentUnit,
curPunc,
keywords=buildRE(["abs","acos","aj","aj0","all","and","any","asc","asin","asof","atan","attr","avg","avgs","bin","by","ceiling","cols","cor","cos","count","cov","cross","csv","cut","delete","deltas","desc","dev","differ","distinct","div","do","each","ej","enlist","eval","except","exec","exit","exp","fby","fills","first","fkeys","flip","floor","from","get","getenv","group","gtime","hclose","hcount","hdel","hopen","hsym","iasc","idesc","if","ij","in","insert","inter","inv","key","keys","last","like","list","lj","load","log","lower","lsq","ltime","ltrim","mavg","max","maxs","mcount","md5","mdev","med","meta","min","mins","mmax","mmin","mmu","mod","msum","neg","next","not","null","or","over","parse","peach","pj","plist","prd","prds","prev","prior","rand","rank","ratios","raze","read0","read1","reciprocal","reverse","rload","rotate","rsave","rtrim","save","scan","select","set","setenv","show","signum","sin","sqrt","ss","ssr","string","sublist","sum","sums","sv","system","tables","tan","til","trim","txf","type","uj","ungroup","union","update","upper","upsert","value","var","view","views","vs","wavg","where","where","while","within","wj","wj1","wsum","xasc","xbar","xcol","xcols","xdesc","xexp","xgroup","xkey","xlog","xprev","xrank"]),
E=/[|/&^!+:\\\-*%$=~#;@><,?_\'\"\[\(\]\)\s{}]/;
function buildRE(w){return new RegExp("^("+w.join("|")+")$");}
function tokenBase(stream,state){
var sol=stream.sol(),c=stream.next();
curPunc=null;
if(sol)
if(c=="/")
return(state.tokenize=tokenLineComment)(stream,state);
else if(c=="\\"){
if(stream.eol()||/\s/.test(stream.peek()))
return stream.skipToEnd(),/^\\\s*$/.test(stream.current())?(state.tokenize=tokenCommentToEOF)(stream):state.tokenize=tokenBase,"comment";
else
return state.tokenize=tokenBase,"builtin";
}
if(/\s/.test(c))
return stream.peek()=="/"?(stream.skipToEnd(),"comment"):"whitespace";
if(c=='"')
return(state.tokenize=tokenString)(stream,state);
if(c=='`')
return stream.eatWhile(/[A-Za-z\d_:\/.]/),"symbol";
if(("."==c&&/\d/.test(stream.peek()))||/\d/.test(c)){
var t=null;
stream.backUp(1);
if(stream.match(/^\d{4}\.\d{2}(m|\.\d{2}([DT](\d{2}(:\d{2}(:\d{2}(\.\d{1,9})?)?)?)?)?)/)
|| stream.match(/^\d+D(\d{2}(:\d{2}(:\d{2}(\.\d{1,9})?)?)?)/)
|| stream.match(/^\d{2}:\d{2}(:\d{2}(\.\d{1,9})?)?/)
|| stream.match(/^\d+[ptuv]{1}/))
t="temporal";
else if(stream.match(/^0[NwW]{1}/)
|| stream.match(/^0x[\da-fA-F]*/)
|| stream.match(/^[01]+[b]{1}/)
|| stream.match(/^\d+[chijn]{1}/)
|| stream.match(/-?\d*(\.\d*)?(e[+\-]?\d+)?(e|f)?/))
t="number";
return(t&&(!(c=stream.peek())||E.test(c)))?t:(stream.next(),"error");
}
if(/[A-Za-z]|\./.test(c))
return stream.eatWhile(/[A-Za-z._\d]/),keywords.test(stream.current())?"keyword":"variable";
if(/[|/&^!+:\\\-*%$=~#;@><\.,?_\']/.test(c))
return null;
if(/[{}\(\[\]\)]/.test(c))
return null;
return"error";
}
function tokenLineComment(stream,state){
return stream.skipToEnd(),/\/\s*$/.test(stream.current())?(state.tokenize=tokenBlockComment)(stream,state):(state.tokenize=tokenBase),"comment";
}
function tokenBlockComment(stream,state){
var f=stream.sol()&&stream.peek()=="\\";
stream.skipToEnd();
if(f&&/^\\\s*$/.test(stream.current()))
state.tokenize=tokenBase;
return"comment";
}
function tokenCommentToEOF(stream){return stream.skipToEnd(),"comment";}
function tokenString(stream,state){
var escaped=false,next,end=false;
while((next=stream.next())){
if(next=="\""&&!escaped){end=true;break;}
escaped=!escaped&&next=="\\";
}
if(end)state.tokenize=tokenBase;
return"string";
}
function pushContext(state,type,col){state.context={prev:state.context,indent:state.indent,col:col,type:type};}
function popContext(state){state.indent=state.context.indent;state.context=state.context.prev;}
return{
startState:function(){
return{tokenize:tokenBase,
context:null,
indent:0,
col:0};
},
token:function(stream,state){
if(stream.sol()){
if(state.context&&state.context.align==null)
state.context.align=false;
state.indent=stream.indentation();
}
//if (stream.eatSpace()) return null;
var style=state.tokenize(stream,state);
if(style!="comment"&&state.context&&state.context.align==null&&state.context.type!="pattern"){
state.context.align=true;
}
if(curPunc=="(")pushContext(state,")",stream.column());
else if(curPunc=="[")pushContext(state,"]",stream.column());
else if(curPunc=="{")pushContext(state,"}",stream.column());
else if(/[\]\}\)]/.test(curPunc)){
while(state.context&&state.context.type=="pattern")popContext(state);
if(state.context&&curPunc==state.context.type)popContext(state);
}
else if(curPunc=="."&&state.context&&state.context.type=="pattern")popContext(state);
else if(/atom|string|variable/.test(style)&&state.context){
if(/[\}\]]/.test(state.context.type))
pushContext(state,"pattern",stream.column());
else if(state.context.type=="pattern"&&!state.context.align){
state.context.align=true;
state.context.col=stream.column();
}
}
return style;
},
indent:function(state,textAfter){
var firstChar=textAfter&&textAfter.charAt(0);
var context=state.context;
if(/[\]\}]/.test(firstChar))
while (context&&context.type=="pattern")context=context.prev;
var closing=context&&firstChar==context.type;
if(!context)
return 0;
else if(context.type=="pattern")
return context.col;
else if(context.align)
return context.col+(closing?0:1);
else
return context.indent+(closing?0:indentUnit);
}
};
});
CodeMirror.defineMIME("text/x-q","q");
});
|
PypiClean
|
/BiblioPixel-3.4.46.tar.gz/BiblioPixel-3.4.46/bibliopixel/animation/game.py
|
from . matrix import Matrix
class Game(Matrix):
def __init__(self, layout, inputDev):
super().__init__(layout)
self._input_dev = inputDev
self._keys = None
self._lastKeys = None
self._speedStep = 0
self._speeds = {}
self._keyfuncs = {}
def _exit(self, type, value, traceback):
if hasattr(self._input_dev, 'setLightsOff'):
self._input_dev.setLightsOff(5)
self._input_dev.close()
def setSpeed(self, name, speed):
self._speeds[name] = speed
def getSpeed(self, name):
return self._speeds.get(name)
def _checkSpeed(self, speed):
return not (self._speedStep % speed)
def checkSpeed(self, name):
return name in self._speeds and self._checkSpeed(self._speeds[name])
def addKeyFunc(self, key, func, speed=1, hold=True):
if not isinstance(key, list):
key = [key]
for k in key:
self._keyfuncs[k] = {
"func": func,
"speed": speed,
"hold": hold,
"last": False,
"inter": False
}
def handleKeys(self):
for key in self._keys:
val = self._keys[key]
if key in self._keyfuncs:
cfg = self._keyfuncs[key]
speed_pass = self._checkSpeed(cfg.speed)
if cfg.hold:
if speed_pass:
if (val or cfg.inter):
cfg.func()
else:
cfg.inter = cfg.last = val
elif speed_pass:
if (val or cfg.inter) and not cfg.last:
cfg.func()
cfg.inter = cfg.last = val
else:
cfg.inter |= val
self._lastKeys = self._keys
def step(self, amt):
self._keys = self._input_dev.getKeys()
self._speedStep += 1
from .. util import deprecated
if deprecated.allowed():
BaseGameAnim = Game
|
PypiClean
|
/msgraph_beta_sdk-1.0.0a9-py3-none-any.whl/msgraph/generated/drives/item/items/item/workbook/tables/tables_request_builder.py
|
from __future__ import annotations
from dataclasses import dataclass
from kiota_abstractions.get_path_parameters import get_path_parameters
from kiota_abstractions.method import Method
from kiota_abstractions.request_adapter import RequestAdapter
from kiota_abstractions.request_information import RequestInformation
from kiota_abstractions.request_option import RequestOption
from kiota_abstractions.response_handler import ResponseHandler
from kiota_abstractions.serialization import Parsable, ParsableFactory
from typing import Any, Callable, Dict, List, Optional, TYPE_CHECKING, Union
if TYPE_CHECKING:
from .......models import workbook_table, workbook_table_collection_response
from .......models.o_data_errors import o_data_error
from .add import add_request_builder
from .count import count_request_builder
from .item import workbook_table_item_request_builder
from .item_at_with_index import item_at_with_index_request_builder
class TablesRequestBuilder():
"""
Provides operations to manage the tables property of the microsoft.graph.workbook entity.
"""
def __init__(self,request_adapter: RequestAdapter, path_parameters: Optional[Union[Dict[str, Any], str]] = None) -> None:
"""
Instantiates a new TablesRequestBuilder and sets the default values.
Args:
pathParameters: The raw url or the Url template parameters for the request.
requestAdapter: The request adapter to use to execute the requests.
"""
if path_parameters is None:
raise Exception("path_parameters cannot be undefined")
if request_adapter is None:
raise Exception("request_adapter cannot be undefined")
# Url template to use to build the URL for the current request builder
self.url_template: str = "{+baseurl}/drives/{drive%2Did}/items/{driveItem%2Did}/workbook/tables{?%24top,%24skip,%24search,%24filter,%24count,%24orderby,%24select,%24expand}"
url_tpl_params = get_path_parameters(path_parameters)
self.path_parameters = url_tpl_params
self.request_adapter = request_adapter
def by_workbook_table_id(self,workbook_table_id: str) -> workbook_table_item_request_builder.WorkbookTableItemRequestBuilder:
"""
Provides operations to manage the tables property of the microsoft.graph.workbook entity.
Args:
workbook_table_id: Unique identifier of the item
Returns: workbook_table_item_request_builder.WorkbookTableItemRequestBuilder
"""
if workbook_table_id is None:
raise Exception("workbook_table_id cannot be undefined")
from .item import workbook_table_item_request_builder
url_tpl_params = get_path_parameters(self.path_parameters)
url_tpl_params["workbookTable%2Did"] = workbook_table_id
return workbook_table_item_request_builder.WorkbookTableItemRequestBuilder(self.request_adapter, url_tpl_params)
async def get(self,request_configuration: Optional[TablesRequestBuilderGetRequestConfiguration] = None) -> Optional[workbook_table_collection_response.WorkbookTableCollectionResponse]:
"""
Represents a collection of tables associated with the workbook. Read-only.
Args:
requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options.
Returns: Optional[workbook_table_collection_response.WorkbookTableCollectionResponse]
"""
request_info = self.to_get_request_information(
request_configuration
)
from .......models.o_data_errors import o_data_error
error_mapping: Dict[str, ParsableFactory] = {
"4XX": o_data_error.ODataError,
"5XX": o_data_error.ODataError,
}
if not self.request_adapter:
raise Exception("Http core is null")
from .......models import workbook_table_collection_response
return await self.request_adapter.send_async(request_info, workbook_table_collection_response.WorkbookTableCollectionResponse, error_mapping)
def item_at_with_index(self,index: Optional[int] = None) -> item_at_with_index_request_builder.ItemAtWithIndexRequestBuilder:
"""
Provides operations to call the itemAt method.
Args:
index: Usage: index={index}
Returns: item_at_with_index_request_builder.ItemAtWithIndexRequestBuilder
"""
if index is None:
raise Exception("index cannot be undefined")
from .item_at_with_index import item_at_with_index_request_builder
return item_at_with_index_request_builder.ItemAtWithIndexRequestBuilder(self.request_adapter, self.path_parameters, index)
async def post(self,body: Optional[workbook_table.WorkbookTable] = None, request_configuration: Optional[TablesRequestBuilderPostRequestConfiguration] = None) -> Optional[workbook_table.WorkbookTable]:
"""
Create new navigation property to tables for drives
Args:
body: The request body
requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options.
Returns: Optional[workbook_table.WorkbookTable]
"""
if body is None:
raise Exception("body cannot be undefined")
request_info = self.to_post_request_information(
body, request_configuration
)
from .......models.o_data_errors import o_data_error
error_mapping: Dict[str, ParsableFactory] = {
"4XX": o_data_error.ODataError,
"5XX": o_data_error.ODataError,
}
if not self.request_adapter:
raise Exception("Http core is null")
from .......models import workbook_table
return await self.request_adapter.send_async(request_info, workbook_table.WorkbookTable, error_mapping)
def to_get_request_information(self,request_configuration: Optional[TablesRequestBuilderGetRequestConfiguration] = None) -> RequestInformation:
"""
Represents a collection of tables associated with the workbook. Read-only.
Args:
requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options.
Returns: RequestInformation
"""
request_info = RequestInformation()
request_info.url_template = self.url_template
request_info.path_parameters = self.path_parameters
request_info.http_method = Method.GET
request_info.headers["Accept"] = ["application/json"]
if request_configuration:
request_info.add_request_headers(request_configuration.headers)
request_info.set_query_string_parameters_from_raw_object(request_configuration.query_parameters)
request_info.add_request_options(request_configuration.options)
return request_info
def to_post_request_information(self,body: Optional[workbook_table.WorkbookTable] = None, request_configuration: Optional[TablesRequestBuilderPostRequestConfiguration] = None) -> RequestInformation:
"""
Create new navigation property to tables for drives
Args:
body: The request body
requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options.
Returns: RequestInformation
"""
if body is None:
raise Exception("body cannot be undefined")
request_info = RequestInformation()
request_info.url_template = self.url_template
request_info.path_parameters = self.path_parameters
request_info.http_method = Method.POST
request_info.headers["Accept"] = ["application/json"]
if request_configuration:
request_info.add_request_headers(request_configuration.headers)
request_info.add_request_options(request_configuration.options)
request_info.set_content_from_parsable(self.request_adapter, "application/json", body)
return request_info
@property
def add(self) -> add_request_builder.AddRequestBuilder:
"""
Provides operations to call the add method.
"""
from .add import add_request_builder
return add_request_builder.AddRequestBuilder(self.request_adapter, self.path_parameters)
@property
def count(self) -> count_request_builder.CountRequestBuilder:
"""
Provides operations to call the count method.
"""
from .count import count_request_builder
return count_request_builder.CountRequestBuilder(self.request_adapter, self.path_parameters)
@dataclass
class TablesRequestBuilderGetQueryParameters():
"""
Represents a collection of tables associated with the workbook. Read-only.
"""
def get_query_parameter(self,original_name: Optional[str] = None) -> str:
"""
Maps the query parameters names to their encoded names for the URI template parsing.
Args:
originalName: The original query parameter name in the class.
Returns: str
"""
if original_name is None:
raise Exception("original_name cannot be undefined")
if original_name == "count":
return "%24count"
if original_name == "expand":
return "%24expand"
if original_name == "filter":
return "%24filter"
if original_name == "orderby":
return "%24orderby"
if original_name == "search":
return "%24search"
if original_name == "select":
return "%24select"
if original_name == "skip":
return "%24skip"
if original_name == "top":
return "%24top"
return original_name
# Include count of items
count: Optional[bool] = None
# Expand related entities
expand: Optional[List[str]] = None
# Filter items by property values
filter: Optional[str] = None
# Order items by property values
orderby: Optional[List[str]] = None
# Search items by search phrases
search: Optional[str] = None
# Select properties to be returned
select: Optional[List[str]] = None
# Skip the first n items
skip: Optional[int] = None
# Show only the first n items
top: Optional[int] = None
@dataclass
class TablesRequestBuilderGetRequestConfiguration():
"""
Configuration for the request such as headers, query parameters, and middleware options.
"""
# Request headers
headers: Optional[Dict[str, Union[str, List[str]]]] = None
# Request options
options: Optional[List[RequestOption]] = None
# Request query parameters
query_parameters: Optional[TablesRequestBuilder.TablesRequestBuilderGetQueryParameters] = None
@dataclass
class TablesRequestBuilderPostRequestConfiguration():
"""
Configuration for the request such as headers, query parameters, and middleware options.
"""
# Request headers
headers: Optional[Dict[str, Union[str, List[str]]]] = None
# Request options
options: Optional[List[RequestOption]] = None
|
PypiClean
|
/chalkpy-2.16.10.tar.gz/chalkpy-2.16.10/chalk/feature_n/feature_117/feature.py
|
from typing import TypeVar, Generic, Optional, Dict
from chalk.features.dataframe import DataFrameMeta
T1 = TypeVar("T1")
T2 = TypeVar("T2")
T3 = TypeVar("T3")
T4 = TypeVar("T4")
T5 = TypeVar("T5")
T6 = TypeVar("T6")
T7 = TypeVar("T7")
T8 = TypeVar("T8")
T9 = TypeVar("T9")
T10 = TypeVar("T10")
T11 = TypeVar("T11")
T12 = TypeVar("T12")
T13 = TypeVar("T13")
T14 = TypeVar("T14")
T15 = TypeVar("T15")
T16 = TypeVar("T16")
T17 = TypeVar("T17")
T18 = TypeVar("T18")
T19 = TypeVar("T19")
T20 = TypeVar("T20")
T21 = TypeVar("T21")
T22 = TypeVar("T22")
T23 = TypeVar("T23")
T24 = TypeVar("T24")
T25 = TypeVar("T25")
T26 = TypeVar("T26")
T27 = TypeVar("T27")
T28 = TypeVar("T28")
T29 = TypeVar("T29")
T30 = TypeVar("T30")
T31 = TypeVar("T31")
T32 = TypeVar("T32")
T33 = TypeVar("T33")
T34 = TypeVar("T34")
T35 = TypeVar("T35")
T36 = TypeVar("T36")
T37 = TypeVar("T37")
T38 = TypeVar("T38")
T39 = TypeVar("T39")
T40 = TypeVar("T40")
T41 = TypeVar("T41")
T42 = TypeVar("T42")
T43 = TypeVar("T43")
T44 = TypeVar("T44")
T45 = TypeVar("T45")
T46 = TypeVar("T46")
T47 = TypeVar("T47")
T48 = TypeVar("T48")
T49 = TypeVar("T49")
T50 = TypeVar("T50")
T51 = TypeVar("T51")
T52 = TypeVar("T52")
T53 = TypeVar("T53")
T54 = TypeVar("T54")
T55 = TypeVar("T55")
T56 = TypeVar("T56")
T57 = TypeVar("T57")
T58 = TypeVar("T58")
T59 = TypeVar("T59")
T60 = TypeVar("T60")
T61 = TypeVar("T61")
T62 = TypeVar("T62")
T63 = TypeVar("T63")
T64 = TypeVar("T64")
T65 = TypeVar("T65")
T66 = TypeVar("T66")
T67 = TypeVar("T67")
T68 = TypeVar("T68")
T69 = TypeVar("T69")
T70 = TypeVar("T70")
T71 = TypeVar("T71")
T72 = TypeVar("T72")
T73 = TypeVar("T73")
T74 = TypeVar("T74")
T75 = TypeVar("T75")
T76 = TypeVar("T76")
T77 = TypeVar("T77")
T78 = TypeVar("T78")
T79 = TypeVar("T79")
T80 = TypeVar("T80")
T81 = TypeVar("T81")
T82 = TypeVar("T82")
T83 = TypeVar("T83")
T84 = TypeVar("T84")
T85 = TypeVar("T85")
T86 = TypeVar("T86")
T87 = TypeVar("T87")
T88 = TypeVar("T88")
T89 = TypeVar("T89")
T90 = TypeVar("T90")
T91 = TypeVar("T91")
T92 = TypeVar("T92")
T93 = TypeVar("T93")
T94 = TypeVar("T94")
T95 = TypeVar("T95")
T96 = TypeVar("T96")
T97 = TypeVar("T97")
T98 = TypeVar("T98")
T99 = TypeVar("T99")
T100 = TypeVar("T100")
T101 = TypeVar("T101")
T102 = TypeVar("T102")
T103 = TypeVar("T103")
T104 = TypeVar("T104")
T105 = TypeVar("T105")
T106 = TypeVar("T106")
T107 = TypeVar("T107")
T108 = TypeVar("T108")
T109 = TypeVar("T109")
T110 = TypeVar("T110")
T111 = TypeVar("T111")
T112 = TypeVar("T112")
T113 = TypeVar("T113")
T114 = TypeVar("T114")
T115 = TypeVar("T115")
T116 = TypeVar("T116")
T117 = TypeVar("T117")
class Features(
Generic[
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21,
T22,
T23,
T24,
T25,
T26,
T27,
T28,
T29,
T30,
T31,
T32,
T33,
T34,
T35,
T36,
T37,
T38,
T39,
T40,
T41,
T42,
T43,
T44,
T45,
T46,
T47,
T48,
T49,
T50,
T51,
T52,
T53,
T54,
T55,
T56,
T57,
T58,
T59,
T60,
T61,
T62,
T63,
T64,
T65,
T66,
T67,
T68,
T69,
T70,
T71,
T72,
T73,
T74,
T75,
T76,
T77,
T78,
T79,
T80,
T81,
T82,
T83,
T84,
T85,
T86,
T87,
T88,
T89,
T90,
T91,
T92,
T93,
T94,
T95,
T96,
T97,
T98,
T99,
T100,
T101,
T102,
T103,
T104,
T105,
T106,
T107,
T108,
T109,
T110,
T111,
T112,
T113,
T114,
T115,
T116,
T117,
]
):
pass
class DataFrame(
Generic[
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21,
T22,
T23,
T24,
T25,
T26,
T27,
T28,
T29,
T30,
T31,
T32,
T33,
T34,
T35,
T36,
T37,
T38,
T39,
T40,
T41,
T42,
T43,
T44,
T45,
T46,
T47,
T48,
T49,
T50,
T51,
T52,
T53,
T54,
T55,
T56,
T57,
T58,
T59,
T60,
T61,
T62,
T63,
T64,
T65,
T66,
T67,
T68,
T69,
T70,
T71,
T72,
T73,
T74,
T75,
T76,
T77,
T78,
T79,
T80,
T81,
T82,
T83,
T84,
T85,
T86,
T87,
T88,
T89,
T90,
T91,
T92,
T93,
T94,
T95,
T96,
T97,
T98,
T99,
T100,
T101,
T102,
T103,
T104,
T105,
T106,
T107,
T108,
T109,
T110,
T111,
T112,
T113,
T114,
T115,
T116,
T117,
],
metaclass=DataFrameMeta,
):
def __getitem__(self, item):
pass
|
PypiClean
|
/dan-build-0.2.7.tar.gz/dan-build-0.2.7/dan/cli/io.py
|
import asyncio
import fnmatch
import os
import contextlib
from dan.cli import click
from dan.core.requirements import parse_package
from dan.core.cache import Cache
from dan.io.repositories import RepositoriesSettings, _get_settings
from dan.make import Make
def get_source_path():
from dan.cxx.detect import get_dan_path
source_path = get_dan_path() / 'deps'
source_path.mkdir(exist_ok=True, parents=True)
return source_path
_make : Make = None
async def get_make(toolchain='default', quiet=True):
global _make
if _make is None:
source_path = get_source_path()
os.chdir(source_path)
(source_path / 'dan-build.py').touch()
make = Make(source_path / 'build', quiet=quiet)
make.config.source_path = str(source_path)
make.config.build_path = str(source_path / 'build')
make.config.toolchain = toolchain
await make._config.save()
await make.initialize()
_make = make
return _make
_repositories = None
async def get_repositories():
global _repositories
if _repositories is None:
from dan.io.repositories import get_all_repo_instances
await get_make()
_repositories = get_all_repo_instances()
async with asyncio.TaskGroup() as g:
for repo in _repositories:
g.create_task(repo.build())
return _repositories
async def get_repository(name = None):
from dan.io.repositories import get_repo_instance
await get_make()
repo = get_repo_instance(name)
await repo.build()
return repo
@contextlib.asynccontextmanager
async def make_context(toolchain='default', quiet=True):
make = await get_make(toolchain, quiet=quiet)
with make.context:
yield make
@click.group()
def cli():
pass
@cli.command()
@click.option('--setting', '-s', 'settings', type=click.SettingsParamType(RepositoriesSettings), multiple=True)
async def configure(settings):
io_settings = _get_settings()
from dan.core.settings import apply_settings
apply_settings(io_settings, *settings, logger=click.logger)
await Cache.save_all()
@cli.group()
def ls():
"""Inspect stuff"""
pass
@ls.command()
async def repositories():
"""List available repositories"""
async with make_context():
repos = await get_repositories()
for repo in repos:
click.echo(repo.name)
@ls.command()
async def libraries():
"""List available libraries"""
async with make_context():
repos = await get_repositories()
for repo in repos:
for name, lib in repo.installed.items():
click.echo(f'{name} = {lib.version}')
async def get_library(library_spec):
package, library, repository = parse_package(library_spec)
repo = await get_repository(repository)
if repo is None:
raise RuntimeError(f'cannot find repository {repository}')
lib = repo.find(library, package)
if lib is None:
if repository is None:
repository = repo.name
if package is None:
package = library
raise RuntimeError(f'cannot find {package}:{library}@{repository}')
return lib
@ls.command()
@click.argument('LIBRARY')
async def versions(library: str):
"""Get LIBRARY's available versions"""
async with make_context():
lib = await get_library(library)
from dan.src.github import GitHubReleaseSources
sources: GitHubReleaseSources = lib.get_dependency(GitHubReleaseSources)
available_versions = await sources.available_versions()
available_versions = sorted(available_versions.keys())
for v in available_versions:
if v == lib.version:
click.echo(f' - {v} (default)')
else:
click.echo(f' - {v}')
@ls.command()
@click.argument('LIBRARY')
async def options(library: str):
"""Get LIBRARY's available options"""
async with make_context():
lib = await get_library(library)
await lib.initialize()
for o in lib.options:
current = ''
if o.value != o.default:
current = f', current: {o.value}'
click.echo(f'{o.name}: {o.help} (type: {o.type.__name__}, default: {o.default}{current})')
@cli.command()
@click.argument('NAME')
async def search(name):
"""Search for NAME in repositories"""
async with make_context():
name = f'*{name}*'
repos = await get_repositories()
for repo in repos:
installed = repo.installed
for libname, lib in installed.items():
if fnmatch.fnmatch(libname, name):
click.echo(f'{libname} = {lib.version}')
@cli.command()
@click.option('--toolchain', '-t', type=click.ToolchainParamType(), default='default')
@click.argument('PACKAGE_SPEC')
@click.argument('VERSION', required=False)
async def install(toolchain, package_spec, version):
"""Intall given PACKAGE_SPEC"""
from dan.io.package import PackageBuild
async with make_context(toolchain, quiet=False) as make:
package, name, repository = parse_package(package_spec)
pkg = PackageBuild(name, version, package, repository, makefile=make.root)
await pkg.initialize()
if pkg.up_to_date:
click.echo(f'Package {package_spec} already installed at version {pkg.version}')
else:
await pkg.build()
click.echo(f'Package {package_spec} installed successfully at version {pkg.version}')
def main():
import sys
try:
cli(auto_envvar_prefix='DAN')
except Exception as err:
click.logger.error(str(err))
_ex_type, _ex, tb = sys.exc_info()
import traceback
click.logger.debug(' '.join(traceback.format_tb(tb)))
try:
# wait asyncio loop to terminate
asyncio.get_running_loop().run_until_complete()
except Exception:
pass
return -1
if __name__ == '__main__':
main()
|
PypiClean
|
/pycp-8.0.8.tar.gz/pycp-8.0.8/Changelog.rst
|
8.0.8
-----
* Fix long_description metadata
8.0.7
-----
* Remove dependency on `python-cli-ui`
8.0.6
-----
* Fix packaging issue: ``pycp`` wheels are no longer universal.
8.0.5
-----
* Fix crash in ``pycp -g``. Reported by @z1lt0id
8.0.4
-----
* Partial revert of 8.0.3: ``pycp`` is now still fast even with just one CPU
8.0.3
-----
* Performance improvements (see #20). We are now faster than v7 :)
Note that ``pycp`` will be still be slow if only one CPU is available.
8.0.2
-----
* Packaging fixes
8.0.1
----
* Fix calling ``--version`` is some corner cases.
8.0
---
* New feature: colors by default.
I'd like to thank @schvabodka-man for giving me the opportunity to
refactor code that was more than 7 years old :)
* Breaking change: remove ``--all`` see `#19 <https://github.com/dmerejkowsky/pycp/issues/19>`_
for details.
* Drop Windows support
* Drop Python2 support
* Massive refactoring
* Stricter CI
7.3
---
* Try to preserve user and group when used with ``-p,--preserve``
* Optimization : read source file size only once
* Fix crash when file size increases while it's being copied
7.2.2
-----
* Include test/test_dir/ in source package. This
makes it possible for pycp packages to run the tests
7.2.1
-----
* Fix README. (version bump required for updating
pypi page)
7.2
---
* Bring back Python2.7 compatibily. Why not ?
* Display a file count even when not using ``-g``
7.1
---
* Fix classifiers
7.0
---
* port to Python3
* switch to setuptools for the packaging
6.1
---
* improve symlink support
6.0
---
* massive refactoring
* pycp no longer depends on progressbar
* add pycp -g option to display a global progress bar on
several lines
5.0
---
* massive refactoring
* pycp no longer uses threading code.
copying small files should now be painless
(no more time.sleep)
* pycp learned --all and --preserve options
* change license from GPL to BSD
4.3.3
-----
* pycp no longer hangs when copy fails.
* error code is non zero when serious problems occurs.
4.3.2
-----
Bug fixes concerning small and empty files
4.3.1
-----
Bug fix: ``pymv a_dir b_dir`` left an empty ``a_dir`` behind
4.3
----
Nicer print of what is being transfered::
/path/to/{foo => bar}/a/b
instead of::
/path/to/foo/a/b -> /path/to/bar/a/b
4.2
---
Pycp now is available on Pypi:
http://pypi.python.org/pypi/pycp/
4.1
---
You can now use --safe to never overwrite files.
4.0.2
-----
Lots of bug fixes, introducing automatic tests
4.0.1
------
Fix bug for Python2.5: threading module still has
only camelCase functions.
4.0
----
Now using ``shutil`` and ``thread`` modules instead of ``subprocess``.
(Replacing ``supbrocess.popen("/bin/cp")`` by calling a thread
running ``shutil.copy``)
Bonus: pycp might become cross-platform
3.2
----
Switch from ``getopt`` to ``OptionParser`` (much better)
3.1
---
* Now using ``/bin/cp`` instead of ``cp`` (thanks, Chris Gilles)
* No more ``-o`` option. Files are now overwritten by default.
Pass a ``-i,--interactive`` option if you want to be asked
for confirmation before overwritting files
* Mimic ``cp`` behaviour. (thanks, ctaf)
3.0
---
Little trick to have a ``pymv``
2.2
---
* Skips existing files instead of canceling whole operation
* Implementing ``-o,--overwrite`` option.
2.1
---
Able to copy multiple files::
pycp bar foo /path/to/baz
2.0
----
Now able to copy recursively files!
1.3
----
Add an ETA and file speed estimation
1.2
---
* Fix possible division by zero
* Fix possible race condition
1.1
---
Add a proper license
1.0
---
Initial commit
|
PypiClean
|
/sym_cli-0.6.1.tar.gz/sym_cli-0.6.1/sym/cli/saml_clients/saml2aws.py
|
import shlex
import subprocess
from configparser import ConfigParser, NoOptionError
from functools import cached_property
from pathlib import Path
from typing import Final, Iterator, Optional, Tuple
import click
from semver import VersionInfo
from sym.shared.cli.helpers.contexts import push_envs
from sym.shared.cli.helpers.keywords_to_options import (
Argument,
Options,
keywords_to_options,
)
from ..decorators import command_require_bins, intercept_errors, run_subprocess
from ..errors import (
ExpiredCredentials,
FailedSubprocessError,
SamlClientNotSetup,
UnavailableResourceError,
)
from ..helpers.config import Config, SymConfigFile
from ..helpers.constants import Saml2AwsCredsExpired, Saml2AwsNoCreds, Saml2AwsNoRoles
from ..helpers.params import Profile, get_saml2aws_params
from .saml_client import SAMLClient
MIN_VERSION = VersionInfo.parse("2.26.2")
MIN_DOUBLE_OKTA_VERSION = VersionInfo.parse("2.28.3")
MIN_CACHE_VERSION = VersionInfo.parse("2.29.0")
PREFERRED_VERSION = VersionInfo.parse("2.30.0")
ErrorPatterns = {
Saml2AwsNoRoles: UnavailableResourceError,
Saml2AwsNoCreds: SamlClientNotSetup,
Saml2AwsCredsExpired: ExpiredCredentials,
}
class Saml2Aws(SAMLClient):
__slots__ = ["config_file", "resource", "options", "_config"]
binary = "saml2aws"
option_value = "saml2aws"
priority = 10
setup_help = (
f"Upgrade to the latest version (>= {MIN_VERSION}), then run `saml2aws login`."
)
resource: str
options: "GlobalOptions"
config_file: Final[SymConfigFile]
_config: Optional[ConfigParser]
_s2a_options: Final[Options]
def __init__(self, resource: str, *, options: "GlobalOptions") -> None:
super().__init__(resource, options=options)
self.config_file = SymConfigFile(resource=resource, file_name="saml2aws.cfg")
self._s2a_options = {
"verbose": self.debug,
"config": str(self.config_file),
"idp_account": self._section_name,
"skip_prompt": True,
}
@cached_property
def _version(self) -> VersionInfo:
# Can't use run_subprocess because saml2aws annoyingly outputs the version on stderr
version = subprocess.run(
["saml2aws", "--version"],
text=True,
capture_output=True,
).stderr
if not version:
return VersionInfo.parse("0.0.0")
return VersionInfo.parse(version.strip())
def _check_version(self) -> bool:
if self._version < MIN_VERSION:
return False
if self._version < MIN_DOUBLE_OKTA_VERSION:
click.secho(
f"Hint: Your version of saml2aws has a bug that will cause double Okta prompts. Try upgrading to at least {PREFERRED_VERSION}.\n",
err=True,
fg="cyan",
)
elif self._version < MIN_CACHE_VERSION:
click.secho(
f"Hint: Your version of saml2aws has a bug related to caching. Try upgrading to at least {PREFERRED_VERSION}.\n",
err=True,
fg="cyan",
)
elif self._version < PREFERRED_VERSION:
click.secho(
f"Hint: Your version of saml2aws is out of date. Try upgrading to at least {PREFERRED_VERSION}.\n",
err=True,
fg="cyan",
)
return True
def is_setup(self) -> bool:
path = Path.home() / ".saml2aws"
if not path.exists():
return False
if not self._check_version():
return False
config = ConfigParser(strict=False)
config.read(path)
for section in config.sections():
try:
if config.get(section, "username"):
return True
except NoOptionError:
continue
return False
def _saml2aws_envs(self) -> dict:
return {
"AWS_REGION": self.ensure_config()[self._section_name]["region"],
}
@intercept_errors(ErrorPatterns)
@run_subprocess
@command_require_bins(binary)
def _exec(self, *args: str, **opts: str) -> Iterator[Tuple[Argument, ...]]:
# saml2aws exec actually joins all the arguments into a single string and
# runs it with the shell. So we have to use shlex.join to get around that!
reparseable = shlex.join(keywords_to_options([*args, opts]))
with push_envs(self._saml2aws_envs()):
yield (
"saml2aws",
self._s2a_options,
"exec",
"--",
reparseable,
)
@intercept_errors(ErrorPatterns, suppress=True)
@run_subprocess
@command_require_bins(binary)
def _login(self, show_prompt=False, force=False):
if show_prompt:
options = {**self._s2a_options, "skip_prompt": False}
args = {"username": Config.get_email()}
else:
options = {**self._s2a_options}
args = {}
if (
self._version >= MIN_CACHE_VERSION
and not force
and not self.options.disable_caches
and self.has_creds()
):
args["cache_saml"] = True
with push_envs(self._saml2aws_envs()):
# no-op if session active when force=False
yield "saml2aws", options, "login", args, {"force": force}
def _ensure_session(self, *, force: bool):
try:
self._login(silence_stderr_=not self.debug, force=force)
except FailedSubprocessError:
self._login(silence_stderr_=not self.debug, force=force, show_prompt=True)
@property
def _aws_session_duration(self) -> Optional[str]:
if self.session_length:
return str(self.session_length * 60)
return get_saml2aws_params().get("aws_session_duration")
def _ensure_config(self, profile: Profile) -> ConfigParser:
saml2aws_params = get_saml2aws_params()
config = ConfigParser(strict=False)
config.read_dict(
{
self._section_name: {
"aws_profile": self._section_name,
"url": self.get_aws_saml_url(),
"provider": "Okta",
"skip_verify": "false",
"timeout": "0",
"aws_urn": "urn:amazon:webservices",
**saml2aws_params,
"aws_session_duration": self._aws_session_duration,
"role_arn": profile.arn,
"region": profile.region,
}
}
)
return config
|
PypiClean
|
/xfacereclib.paper.IET2014-1.0.0.zip/xfacereclib.paper.IET2014-1.0.0/xfacereclib/paper/IET2014/categorical.py
|
import bob
import numpy
import facereclib
import argparse
import os
from .utils import split_score_file, evaluate_scores
def command_line_options(command_line_parameters = None):
# set up command line parser
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-R', '--result-directory', default = 'results',
help = "The output directory for result files.")
parser.add_argument('-w', '--latex-directory', default = 'latex',
help = "Change the directory into which the exported LaTeX file is written")
facereclib.utils.add_logger_command_line_option(parser)
# parse command line options
args = parser.parse_args(command_line_parameters)
facereclib.utils.set_verbosity_level(args.verbose)
# return command line options
return args
def apply_on_score_file(machine, infile, outfile):
"""Applies the given linear machine to the scores in the given input file and generate the given output file."""
# read the input score files
columns = bob.measure.load.four_column(infile)
facereclib.utils.debug("Calibrating scores from file %s to file %s" % (infile, outfile))
with open(outfile, 'w') as w:
# iterate over the lines of the input score file
for line in columns:
# extract the distance from the probe file name
distance = int(line[2].split('/')[0][-1]) - 1
# arrange score to be used in categorical calibration
score = numpy.array([line[3], distance==0, distance==1, distance==2], numpy.float64)
if machine.weights.shape[0] == 1:
# only a single weight => linear calibration
calibrated = machine(score[:1])
else:
# more than one weight (should be 4 here) => categorical calibration
calibrated = machine(score)
# write the calibrated score
w.write("%s %s %s %s\n" % (line[0], line[1], line[2], str(calibrated[0])))
def main():
args = command_line_options()
# read the raw score files
score_dir = os.path.join(args.result_directory, 'scface', 'combined', 'scores', 'combined', 'ztnorm')
facereclib.utils.info("Reading score files %s and %s" % (os.path.join(score_dir, 'scores-dev'), os.path.join(score_dir, 'scores-eval')))
dev_scores = split_score_file(os.path.join(score_dir, 'scores-dev'))
eval_scores = split_score_file(os.path.join(score_dir, 'scores-eval'))
# arrange development set scores to perform categorical calibration
sorted_scores = []
for scores in dev_scores:
data = numpy.hstack([scores[0], scores[1], scores[2]])
line1 = numpy.hstack([numpy.ones(len(scores[0])), numpy.zeros(len(scores[1])), numpy.zeros(len(scores[2]))])
line2 = numpy.hstack([numpy.zeros(len(scores[0])), numpy.ones(len(scores[1])), numpy.zeros(len(scores[2]))])
line3 = numpy.hstack([numpy.zeros(len(scores[0])), numpy.zeros(len(scores[1])), numpy.ones(len(scores[2]))])
sorted_scores.append(numpy.vstack([data, line1, line2, line3]))
facereclib.utils.info("Calibrating scores")
# create the Linear Logistic Regressor from Bob
llr_trainer = bob.trainer.CGLogRegTrainer(0.5, 1e-16, 100000)
# perform linear calibration using only the score data
facereclib.utils.debug("Linear calibration")
linear_machine = llr_trainer.train(sorted_scores[0][0:1].T, sorted_scores[1][0:1].T)
# perform categorical calibration using the arranged score data
facereclib.utils.debug("Categorical calibration")
categorical_machine = llr_trainer.train(sorted_scores[0].T, sorted_scores[1].T)
# Write the calibrated score files for development and evaluation set since we'll need them for plotting
apply_on_score_file(linear_machine, os.path.join(score_dir, 'scores-dev'), os.path.join(score_dir, 'scores-dev-linear'))
apply_on_score_file(linear_machine, os.path.join(score_dir, 'scores-eval'), os.path.join(score_dir, 'scores-eval-linear'))
apply_on_score_file(categorical_machine, os.path.join(score_dir, 'scores-dev'), os.path.join(score_dir, 'scores-dev-categorical'))
apply_on_score_file(categorical_machine, os.path.join(score_dir, 'scores-eval'), os.path.join(score_dir, 'scores-eval-categorical'))
# compute raw, linear and categorical calibrated scores
all_scores = {'none':{}, 'linear':{}, 'categorical':{}}
for group, group_scores in (('dev', dev_scores), ('eval', eval_scores)):
for t in all_scores: all_scores[t][group] = {}
for type, scores in (('neg', group_scores[0]), ('pos', group_scores[1])):
# raw scores
all_scores['none'][group][type] = [s for d in scores for s in d]
# linear calibrated scores
all_scores['linear'][group][type] = numpy.array([s[0] for d in scores for s in linear_machine(d.reshape((d.shape[0],1)))])
# categorically calibrated scores
all_scores['categorical'][group][type] = numpy.array([categorical_machine(numpy.array([s, i==0, i==1, i==2], numpy.float64))[0] for i,d in enumerate(scores) for s in d])
# compute performance for the three different types of calibrations
for cal in all_scores:
facereclib.utils.debug("Evaluating calibrated scores with type %s" % cal)
# scores
neg_dev = all_scores[cal]['dev']['neg']
pos_dev = all_scores[cal]['dev']['pos']
neg_eval = all_scores[cal]['eval']['neg']
pos_eval = all_scores[cal]['eval']['pos']
# compute performances for the current calibration type
Cver_min_dev, Cver_min_eval, Cver_eval, Cver_0_dev, Cver_0_eval, Pfr_dev, Pfr_eval, Cllr_dev, Cllr_eval, Cllr_min_dev, Cllr_min_eval = evaluate_scores(neg_dev, pos_dev, neg_eval, pos_eval)
# create output directory if needed
facereclib.utils.ensure_dir(args.latex_directory)
# write results in LaTeX-compatible format
latex_file = os.path.join(args.latex_directory, "calibration-%s.tex" % cal)
with open(latex_file, 'w') as f:
# write header
f.write("% Cver_min-dev Cver_min-eval Cver-eval Cllr_min-dev Cllr-dev Cmc-dev Cllr_min-eval Cllr-eval Cmc-eval\n")
# write \Result macro
f.write("\\Result{%3.2f}{%3.2f}{%3.2f} {%1.3f}{%1.3f}{%1.3f} {%1.3f}{%1.3f}{%1.3f}\n" % (
Cver_min_dev * 100.,
Cver_min_eval * 100.,
Cver_eval * 100.,
Cllr_min_dev,
Cllr_dev,
Cllr_dev - Cllr_min_dev,
Cllr_min_eval,
Cllr_eval,
Cllr_eval - Cllr_min_eval,
)
)
# write second macro with results threshold 0 and at FAR 1 %
f.write("% Cver-dev Cver-eval at threshold 0; Pfr-dev and Pfr-eval at FAR 1%\n")
f.write("\\ResultAt{%3.2f}{%3.2f} {%3.2f}{%3.2f}\n" % (
Cver_0_dev * 100.,
Cver_0_eval * 100.,
Pfr_dev * 100.,
Pfr_eval * 100.
)
)
facereclib.utils.info("Wrote LaTeX-compatible file %s\n" % latex_file)
|
PypiClean
|
/orchestrator_core-1.2.3rc3.tar.gz/orchestrator_core-1.2.3rc3/docs/architecture/product_modelling/node.md
|
# Node
The administration handoff in IMS will be different for every organisation. For
this example, it is assumed that all administration that comes with the physical
installation and first-time configuration of the network node in IMS is done
manually by a NOC engineer. This makes the node product rather simple. The only
product block that is defined holds pointers to all related information that is
stored in the operations support systems (OSS). This includes of course a
pointer to the information in IMS, and after the service has been deployed on
the network, another pointer to the related information in the NRM. To keep
track of all IP addresses and prefixes used across the network service product,
the pointers to the IPv4 and IPv6 loopback addresses on the node are also
stored.
<img height="75%" src="../node.png" title="Node Product Model" width="75%"/>
* **ims_id**: ID of the node in the inventory management system
* **nrm_id**: ID of the node in the network resource manager
* **ipv4_ipam_id**: ID of the node’s iPv4 loopback address in IPAM
* **ipv6_ipam_id**: ID of the node’s iPv6 loopback address in IPAM
|
PypiClean
|
/xformers-0.0.21.tar.gz/xformers-0.0.21/third_party/flash-attention/csrc/cutlass/examples/44_multi_gemm_ir_and_codegen/README.md
|
This example provides utilities for generating back-to-back (B2B) GEMMs using CUTLASS.
## Quick start
A configuration file containing the GEMMs to be fused together is located in [config.json](config.json). Edit
this to change the configuration that you would like to run.
```shell
cd ir_gen
# Set up basic variables
out_dir=directory_to_emit_files
cutlass_dir=$(pwd)/../../..
config_file=$(pwd)/../config.json
# Generate code for GEMMs described in `config_file`
./generate.sh $config_file $out_dir $cutlass_dir
# Build the generated code
cd $out_dir
mkdir build && cd build
cmake .. -DGPU_ARCHS="75;80"
make -j
# Run the generated code with M=1024 K0=32 and Batch=1
./sample 1024 32 1
```
## Current restrictions
This experimental example has the following restrictions:
1. N tile should not exceed 256, or register spilling will occur.
2. Only FP16 is supported currently
3. Matrix A must be row major, matrix B must be column major, matrices C and D must be row major.
## Copyright
Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
```
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
```
|
PypiClean
|
/tangelo_gc-0.4.0-py3-none-any.whl/tangelo/linq/qpu_connection/ibm_connection.py
|
import os
from tangelo.linq.translator import translate_operator, translate_circuit
from tangelo.linq.qpu_connection.qpu_connection import QpuConnection
try:
from qiskit.providers.jobstatus import JobStatus
from qiskit.primitives import SamplerResult, EstimatorResult
from qiskit_ibm_runtime import QiskitRuntimeService, Sampler, Estimator, Session, Options
is_qiskit_installed = True
except ModuleNotFoundError:
is_qiskit_installed = False
class IBMConnection(QpuConnection):
""" Wrapper around IBM Qiskit runtime API to facilitate job submission from Tangelo """
def __init__(self, ibm_quantum_token=None):
if not is_qiskit_installed:
raise ModuleNotFoundError("Both qiskit and qiskit_ibm_runtime need to be installed.")
self.api_key = ibm_quantum_token if ibm_quantum_token else os.getenv("IBM_TOKEN", None)
self.service = self._login()
self.jobs = dict()
self.jobs_results = dict()
def _login(self):
""" Attempt to connect to the service. Fails if environment variable IBM_TOKEN
has not been set to a correct value.
"""
if not self.api_key:
raise RuntimeError(f"Please provide IBM_TOKEN (as environment variable or at instantiation of connection.")
try:
return QiskitRuntimeService(channel="ibm_quantum", token=self.api_key)
except Exception as err:
raise RuntimeError(f"{err}")
def get_backend_info(self):
""" Return configuration information for each device found on the service """
return {b.name: b.configuration() for b in self.service.backends()}
def job_submit(self, program, backend_name, n_shots, circuits, operators=None, runtime_options=None):
""" Submit job, return job ID.
Args:
program (str): name of available qiskit-runtime program (e.g sampler, estimator currently)
backend_name (str): name of a qiskit backend
n_shots (int): Number of shots to use on the target backend
circuits (Circuit | List[Circuit]): Tangelo circuit(s)
operators (QubitOperator | List[QubitOperator]) : Optional, qubit operators for computing expectation values
runtime_options (dict): Optional, extra keyword arguments for options supported in qiskit-runtime.
Returns:
str: string representing the job id
"""
# Set up options and intermediary Qiskit runtime objects
backend = self.service.backend(backend_name)
session = Session(service=self.service, backend=backend)
if runtime_options is None:
runtime_options = dict()
options = Options(optimization_level=runtime_options.get('optimization_level', 1),
resilience_level=runtime_options.get('resilience_level', 0))
# Translate circuits in qiskit format, add final measurements
if not isinstance(circuits, list):
circuits = [circuits]
qiskit_cs = list()
for c in circuits:
qiskit_c = translate_circuit(c, target="qiskit")
qiskit_c.remove_final_measurements()
qiskit_c.measure_all(add_bits=False)
qiskit_cs.append(qiskit_c)
# If needed, translate qubit operators in qiskit format
if operators:
if not isinstance(operators, list):
operators = [operators]
qiskit_ops = [translate_operator(op, source="tangelo", target="qiskit") for op in operators]
# Execute qiskit-runtime program, retrieve job ID
if program == 'sampler':
job = self._submit_sampler(qiskit_c, n_shots, session, options)
elif program == 'estimator':
estimator = Estimator(session=session, options=options)
job = estimator.run(circuits=qiskit_cs, observables=qiskit_ops, shots=n_shots)
else:
raise NotImplementedError("Only Sampler and Estimator programs currently available.")
# Store job object, return job ID.
self.jobs[job.job_id] = job
return job.job_id
def job_status(self, job_id):
""" Return information about the job corresponding to the input job ID
Args:
job_id (str): string representing the job id
Returns:
enum value: status response from the native API
"""
return self.jobs[job_id].status()
def job_results(self, job_id):
""" Blocking call requesting job results.
Args:
job_id (str): string representing the job id
Returns:
dict: histogram of measurements
"""
# Retrieve job object, check job has not been cancelled, retrieve results if not
job = self.jobs[job_id]
result = job.result()
if job.status() == JobStatus.CANCELLED:
print(f"Job {job_id} was cancelled and no results can be retrieved.")
return None
self.jobs_results[job_id] = job._results
# Sampler: return histogram for user in standard Tangelo format
if isinstance(result, SamplerResult):
hist = result.quasi_dists[0]
freqs = dict()
for i, freq in hist.items():
bs = bin(i).split('b')[-1]
n_qubits = job.inputs['circuits'].num_qubits
state_binstr = "0" * (n_qubits - len(bs)) + bs
freqs[state_binstr[::-1]] = freq
return freqs
# Estimator: return the array of expectation values
elif isinstance(result, EstimatorResult):
return list(result.values)
def job_cancel(self, job_id):
""" Attempt to cancel an existing job. May fail depending on job status (e.g too late)
Args:
job_id (str): string representing the job id
Returns:
bool: whether the job was successfully cancelled.
"""
job = self.jobs[job_id]
is_cancelled = True
try:
job.cancel()
except Exception as err:
is_cancelled = False
message = "successful" if is_cancelled else "failed"
print(f"Job {job_id} :: cancellation {message}.")
return is_cancelled
def _submit_sampler(self, qiskit_c, n_shots, session, options):
""" Submit job using Sampler primitive, return job ID.
Args:
qiskit_c (Qiskit.QuantumCircuit): Circuit in Qiskit format
n_shots (int): Number of shots
session (qiskit_ibm_runtime.Session): Qiskit runtime Session object
options (qiskit_ibm_runtime.Options): Qiskit runtime Options object
Returns:
str: string representing the job id
"""
# Set up program inputs
run_options = {"shots": n_shots}
resilience_settings = {"level": options.resilience_level}
program_inputs = {"circuits": qiskit_c, "circuit_indices": [0],
"run_options": run_options,
"resilience_settings": resilience_settings}
# Set backend
more_options = {"backend_name": session.backend()}
job = self.service.run(program_id="sampler", options=more_options, inputs=program_inputs)
return job
|
PypiClean
|
/certora_cli_alpha_jtoman_cert_1920-20230505.6.20.438554-py3-none-any.whl/certora_cli/Shared/certoraUtils.py
|
import csv
import json
import os
import subprocess
from abc import ABCMeta
from enum import Enum, unique
import sys
import platform
import shlex
import shutil
import re
import queue
import math
from typing import Any, Callable, Dict, List, Optional, Set, Union, Generator, Tuple
from pathlib import Path
from contextlib import contextmanager
from Shared.certoraTester import compareResultsWithExpected, get_errors, has_violations, get_violations
import logging
import random
import time
from datetime import datetime
io_logger = logging.getLogger("file")
# logger for issues calling/shelling out to external functions
process_logger = logging.getLogger("rpc")
# messages from the verification results
verification_logger = logging.getLogger("verification")
# errors handling csvs (???)
csv_logger = logging.getLogger("csv")
# logger for issues regarding type checking
typecheck_logger = logging.getLogger("type_check")
context_logger = logging.getLogger("context")
LEGAL_CERTORA_KEY_LENGTHS = [32, 40]
# bash colors
BASH_ORANGE_COLOR = "\033[33m"
BASH_END_COLOR = "\033[0m"
BASH_GREEN_COLOR = "\033[32m"
BASH_RED_COLOR = "\033[31m"
BASH_PURPLE_COLOR = "\033[35m"
VERIFICATION_ERR_MSG_PREFIX = "Prover found violations:"
VERIFICATION_SUCCESS_MSG = "No errors found by Prover!"
DEFAULT_SOLC = "solc"
ENVVAR_CERTORA = "CERTORA"
PUBLIC_KEY = "795ebbac71ae5fd6a19e7a214a524b064e33ff05"
CERTORA_INTERNAL_ROOT = ".certora_internal"
PRODUCTION_PACKAGE_NAME = "certora-cli"
BETA_PACKAGE_NAME = "certora-cli-beta"
DEV_PACKAGE_NAME_PREFIX = f"{PRODUCTION_PACKAGE_NAME}-"
CERTORA_BUILD_DIRECTORY = Path("")
CERTORA_JARS = Path("certora_jars")
CERTORA_CLI_VERSION_METADATA = Path("CERTORA-CLI-VERSION-METADATA.json")
PRE_AUTOFINDER_BACKUP_DIR = Path(".pre_autofinders")
POST_AUTOFINDER_BACKUP_DIR = Path(".post_autofinders")
PACKAGE_FILE = Path("package.json")
REMAPPINGS_FILE = Path("remappings.txt")
RECENT_JOBS_FILE = Path(".certora_recent_jobs.json")
@unique
class SupportedServers(Enum):
"""
mapping between servers and their url
"""
STAGING = 'https://vaas-stg.certora.com'
PRODUCTION = 'https://prover.certora.com'
def get_certora_internal_dir() -> Path:
return CERTORA_BUILD_DIRECTORY
def get_random_build_dir() -> Path:
for tries in range(3):
build_uuid = f"{datetime.now().strftime('%y_%m_%d_%H_%M_%S')}_{random.randint(0, 999):03d}"
build_dir = CERTORA_INTERNAL_ROOT / Path(build_uuid)
if not build_dir.exists():
return build_dir
time.sleep(0.5)
raise Exception('Unable to generate random build directory')
def reset_certora_internal_dir(build_dir_str: Optional[str] = None) -> None:
"""
build_dir_str constraints are defined in type_build_dir (basically not an existing file/dir and open for creating
a new directory
"""
global CERTORA_BUILD_DIRECTORY
if build_dir_str is None:
build_dir = get_random_build_dir()
safe_create_dir(Path(
CERTORA_INTERNAL_ROOT)) # create, also so that could generate symlink latest when directory is empty
if is_windows():
build_dir = Path(".")
else:
build_dir = Path(build_dir_str)
CERTORA_BUILD_DIRECTORY = Path(build_dir)
if build_dir_str is None:
# We are using the default dir, with the BUILD_UUID. Add a symlink to the last one to run, for ease of use.
# Note that when running concurrently 'latest' may not be well defined, but for local usage it could be useful.
last_build = build_dir.parent / 'latest'
try:
last_build.unlink(missing_ok=True)
last_build.symlink_to(build_dir.relative_to(build_dir.parent), target_is_directory=True)
except Exception as e:
# This is a nice-to-have thing, so if we fail for some reason (e.g. permission error)
# we'll just continue without it.
io_logger.warning(f"Failed to create the '{last_build}' symlink. {e}")
def path_in_certora_internal(path: Path) -> Path:
return path if (path.parent == CERTORA_BUILD_DIRECTORY) else CERTORA_BUILD_DIRECTORY / path
def get_certora_config_dir() -> Path:
return path_in_certora_internal(Path(".certora_config"))
def get_certora_sources_dir() -> Path:
return path_in_certora_internal(Path(".certora_sources"))
def get_certora_build_file() -> Path:
return path_in_certora_internal(Path(".certora_build.json"))
def get_certora_verify_file() -> Path:
return path_in_certora_internal(Path(".certora_verify.json"))
def get_certora_verify_file_cvl1() -> Path:
return path_in_certora_internal(Path(".certora_verify.cvl1.json"))
def get_certora_metadata_file() -> Path:
return path_in_certora_internal(Path(".certora_metadata.json"))
def get_resource_errors_file() -> Path:
return path_in_certora_internal(Path("resource_errors.json"))
def get_last_confs_directory() -> Path:
return path_in_certora_internal(Path(".last_confs"))
def get_debug_log_file() -> Path:
return path_in_certora_internal(Path("certora_debug_log.txt"))
class SolcCompilationException(Exception):
pass
class CertoraUserInputError(ValueError):
pass
class DeprecatedFeature(CertoraUserInputError):
pass
MIN_JAVA_VERSION = 11 # minimal java version to run the local type checker jar
def __colored_text(txt: str, color: str) -> str:
return color + txt + BASH_END_COLOR
def orange_text(txt: str) -> str:
return __colored_text(txt, BASH_ORANGE_COLOR)
def purple_text(txt: str) -> str:
return __colored_text(txt, BASH_PURPLE_COLOR)
def red_text(txt: str) -> str:
return __colored_text(txt, BASH_RED_COLOR)
def green_text(txt: str) -> str:
return __colored_text(txt, BASH_GREEN_COLOR)
def print_completion_message(txt: str, flush: bool = False) -> None:
print(green_text(txt), flush=flush)
def print_progress_message(txt: str, flush: bool = False) -> None:
if not is_ci_or_git_action():
print(txt, flush=flush)
def is_ci_or_git_action() -> bool:
if os.environ.get("GITHUB_ACTIONS", False) or os.environ.get("CI", False):
return True
return False
def remove_file(file_path: Union[str, Path]) -> None: # TODO - accept only Path
if isinstance(file_path, str):
try:
os.remove(file_path)
except OSError:
pass
else:
try:
# When we upgrade to Python 3.8, we can use unlink(missing_ok=True) and remove the try/except clauses
file_path.unlink()
except FileNotFoundError:
pass
def get_package_and_version() -> Tuple[bool, str, str]:
"""
@return: A tuple (is insatlled package, package name, version)
is installed package - True if we run an installed package, false if we run as a local script
package name - either certora-cli / certora-cli-beta, or certora-cli-alpha-master and others
version - the python package version in format X.Y.Z if found
"""
# Note: the most common reason not to have an installed package is in circleci
version_metadata_file = get_package_resource(CERTORA_JARS / CERTORA_CLI_VERSION_METADATA)
if not version_metadata_file.exists():
return False, "", ""
try:
with open(version_metadata_file) as version_metadata_handle:
version_metadata = json.load(version_metadata_handle)
if "name" in version_metadata and "version" in version_metadata:
return True, version_metadata["name"], version_metadata["version"]
else:
raise Exception(f"Invalid format for {version_metadata_file}, got {version_metadata}")
except OSError as e: # json errors - better to just propagate up
raise Exception(f"Failed to open {version_metadata_file}: {e.strerror}")
def check_results_from_file(output_path: str, expected_filename: str) -> bool:
with open(output_path) as output_file:
actual = json.load(output_file)
return check_results(actual, expected_filename)
def check_results(actual: Dict[str, Any], expected_filename: str) -> bool:
actual_results = actual
based_on_expected = os.path.exists(expected_filename)
if based_on_expected: # compare actual results with expected
with open(expected_filename) as expectedFile:
expected = json.load(expectedFile)
if "rules" in actual_results and "rules" in expected:
is_equal = compareResultsWithExpected("test", actual_results["rules"], expected["rules"], {}, {})
elif "rules" not in actual_results and "rules" not in expected:
is_equal = True
else:
is_equal = False
if is_equal:
print_completion_message(f"{VERIFICATION_SUCCESS_MSG} (based on {expected_filename})")
return True
# not is_equal:
error_str = get_errors()
if error_str:
verification_logger.error(f"{VERIFICATION_ERR_MSG_PREFIX} {error_str}")
if has_violations():
verification_logger.error(VERIFICATION_ERR_MSG_PREFIX)
get_violations()
return False
# if expected results are not defined
# traverse results and look for violation
errors = []
result = True
if "rules" not in actual_results:
errors.append("No rules in results")
result = False
elif len(actual_results["rules"]) == 0:
errors.append("No rule results found. Please make sure you wrote the rule and method names correctly.")
result = False
else:
for rule in actual_results["rules"].keys():
rule_result = actual_results["rules"][rule]
if isinstance(rule_result, str) and rule_result != 'SUCCESS':
errors.append("[rule] " + rule)
result = False
elif isinstance(rule_result, dict):
# nested rule - ruleName: {result1: [functions list], result2: [functions list] }
nesting = rule_result
violating_functions = ""
for method in nesting.keys():
if method != 'SUCCESS' and len(nesting[method]) > 0:
violating_functions += '\n [func] ' + '\n [func] '.join(nesting[method])
result = False
if violating_functions:
errors.append("[rule] " + rule + ":" + violating_functions)
if not result:
verification_logger.error(VERIFICATION_ERR_MSG_PREFIX)
verification_logger.error('\n'.join(errors))
return False
print_completion_message(VERIFICATION_SUCCESS_MSG)
return True
def is_windows() -> bool:
return platform.system() == 'Windows'
def replace_file_name(old_file: str, new_file_name: str) -> str:
"""
:param old_file: the full original path
:param new_file_name: the new base name of the file
:return: file_with_path with the base name of the file replaced with new_file_name,
preserving the file extension and the base path
"""
old_file_path = Path(old_file)
return str(old_file_path.parent / f'{new_file_name}')
def safe_create_dir(path: Path, revert: bool = True) -> None:
if path.is_dir():
io_logger.debug(f"directory {path} already exists")
return
try:
path.mkdir(parents=True)
except OSError as e:
msg = f"Failed to create directory {path.resolve()}"
if revert:
io_logger.error(msg, exc_info=e)
raise e
else:
io_logger.debug(msg, exc_info=e)
def as_posix(path: str) -> str:
"""
Converts path from windows to unix
:param path: Path to translate
:return: A unix path
"""
return path.replace("\\", "/")
def normalize_double_paths(path: str) -> str:
"""
Handles an oddity of paths from absolutePath nodes in solc AST,
specifically "//" instead of just "/"
"""
return path.replace("//", "/")
def abs_posix_path(path: Union[str, Path]) -> str:
"""
Returns the absolute path, unix style
:param path: Path to change
:return: A posix style absolute path string
"""
return as_posix(str(abs_posix_path_obj(path)))
def abs_posix_path_obj(path: Union[str, Path]) -> Path:
"""
Returns the absolute path, unix style
:param path: Path to change
:return: A posix style absolute Path, resolving symlinks
"""
sanitized_path = as_posix(str(path)) # Windows works with / as file separator, so we always convert
abs_path = Path(sanitized_path).expanduser().resolve()
return abs_path
def abs_posix_path_relative_to_root_file(rel_path: Path, root_file: Path) -> Path:
"""
Returns the absolute path, unix style
:param rel_path: Relative path to change.
:param root_file: rel_path is assumed to be relative to the directory of the file root_file.
:return: A posix style absolute path
"""
root_dir = root_file.parent
file_path = root_dir / rel_path
return Path(abs_posix_path(file_path))
def convert_path_for_solc_import(path: Union[Path, str]) -> str:
"""
Converts a path to a solc-compatible import.
Solc paths only accept / as a file separator, and do not accept drives in path
:param path: A path to convert
:return: the converted path
"""
unix_file_sep_path = abs_posix_path(path)
driveless_path = re.sub("^[a-zA-Z]:", "", unix_file_sep_path)
return as_posix(os.path.abspath(driveless_path))
def remove_and_recreate_dir(path: Path) -> None:
if path.is_dir():
shutil.rmtree(path)
safe_create_dir(path)
def prepare_call_args(cmd: str) -> List[str]:
"""
Takes a command line as a string and returns a list of strings that consist that line.
Importantly, does not interpret forward slashes used for newline continuation as a word.
We replace a call to a Python script with a call to the Python executable first.
We also fix the path to the certora root directory
:param cmd - the command line we split. We assume it contains no comments!
:return - a list of words that make up the command line given
"""
if is_windows():
"""
There is no good shlex alternative to Windows, but quoting works well, and spaces should work too
see https://stackoverflow.com/questions/33560364/python-windows-parsing-command-lines-with-shlex
"""
split = cmd.split()
else:
# Using shlex here is necessary, as otherwise quotes are not handled well especially in lists like "a/path",.
split = shlex.split(cmd)
if split[0].endswith('.py'):
# sys.executable returns a full path to the current running python, so it's good for running our own scripts
certora_root = get_certora_root_directory()
args = [sys.executable, (certora_root / split[0]).as_posix()] + split[1:]
else:
args = split
return args
def get_certora_root_directory() -> Path:
return Path(os.getenv(ENVVAR_CERTORA, os.getcwd()))
def get_certora_envvar() -> str:
return os.getenv(ENVVAR_CERTORA, "")
def get_certora_dump_config() -> str:
return os.getenv("CERTORA_DUMP_CONFIG", "")
def which(filename: str) -> Optional[str]:
if is_windows() and not filename.endswith(".exe"):
filename += ".exe"
# TODO: find a better way to iterate over all directories in $Path
for dirname in os.environ['PATH'].split(os.pathsep) + [os.getcwd()]:
candidate = os.path.join(dirname, filename)
if os.path.isfile(candidate) and os.access(candidate, os.X_OK):
return filename
return None
def read_json_file(file_name: Path) -> Dict[str, Any]:
with file_name.open() as json_file:
json_obj = json.load(json_file)
return json_obj
def write_json_file(data: Union[Dict[str, Any], List[Dict[str, Any]]], file_name: Path) -> None:
with file_name.open("w+") as json_file:
json.dump(data, json_file, indent=4)
def output_to_csv(filename: str, fieldnames: List[str], row: Dict[str, Any]) -> bool:
"""
Creates and appends the row to csv file
@param filename: csv filename without the extension
@param fieldnames: headers of the csv file
@param row: data to append (as a row) to the csv file
@return: true if completed successfully
"""
try:
csv_path = Path(f'{filename}.csv')
if csv_path.exists():
with csv_path.open("a") as f:
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writerow(row)
else:
with csv_path.open('a+') as f:
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writeheader()
writer.writerow(row)
return True
except ValueError as e: # when the row contains fields not in fieldnames (file header)
csv_logger.error("value conversion failed", exc_info=e)
return False
class NoValEnum(Enum):
"""
A class for an enum where the numerical value has no meaning.
"""
def __repr__(self) -> str:
"""
Do not print the value of this enum, it is meaningless
"""
return f'<{self.__class__.__name__}.{self.name}>'
class Mode(NoValEnum):
"""
Mode of operation - the modes are mutually exclusive:
1. CLI parameters consist of a single .tac file.
We check the verification condition given by that file.
2. CLI parameters consist of a single .conf file.
A .conf file is created on each tool run inside the .certora_config directory. It contains the command line
options that were used for the run (in a parsed format).
We take the options given from that file as a basis for this run; additionally given command line options
override options given in the .conf file.
3. CLI parameters consist of one or more Solidity (.sol) files and the `--assert` option is set.
We create and check verification conditions based on the `assert` statements in the given Solidity contracts.
4. CLI parameters consist of one or more Solidity (.sol) files and the `--verify` option is set (the option takes
an additional .spec/.cvl file).
We use the given .spec/.cvl file to create and check verification conditions for the given Solidity contracts.
5. CLI parameters consist of 0 files but all are provided in --bytecode.
The bytecode files are in JSON, and adhere to a format given by blockchain scrapers.
--bytecode_spec must be specified as well if this mode is used.
The spec will be checked against the first bytecode provided, with the other bytecodes serving as auxiliary.
"""
TAC = "a single .tac file"
CONF = "a single .conf file"
VERIFY = "using --verify"
ASSERT = "using --assert"
BYTECODE = "using --bytecode"
def mode_has_spec_file(mode: Mode) -> bool:
return mode not in [Mode.ASSERT, Mode.TAC]
def is_hex_or_dec(s: str) -> bool:
"""
@param s: A string
@return: True if it is a decimal or hexadecimal number
"""
try:
int(s, 16)
return True
except ValueError:
return False
def is_hex(number: str) -> bool:
"""
@param number: A string
@return: True if the number is a hexadecimal number:
- Starts with 0
- Second character is either x or X
- All other characters are digits 0-9, or letters a-f or A-F
"""
match = re.search(r'^0[xX][0-9a-fA-F]+$', number)
return match is not None
def hex_str_to_cvt_compatible(s: str) -> str:
"""
@param s: A string representing a number in base 16 with '0x' prefix
@return: A string representing the number in base 16 but without the '0x' prefix
"""
assert is_hex(s)
return re.sub(r'^0[xX]', '', s)
def decimal_str_to_cvt_compatible(s: str) -> str:
"""
@param s: A string representing a number in base 10
@return: A string representing the hexadecimal representation of the number, without the '0x' prefix
"""
assert s.isnumeric()
return re.sub(r'^0[xX]', '', hex(int(s)))
def split_by_delimiter_and_ignore_character(input_str: str, delimiter: str, ignore_splitting_char: str,
last_delimiter_chars_to_include: int = 0) -> List[str]:
"""
Splits a string by a given delimiter, ignoring anything between a special pair of characters.
For example, if the delimiter is a comma, and the ignore splitting character is an asterisk, then the input:
hello,we,dislike*splitting,if,*it,is,complex
Will return:
['hello', 'we', 'dislike*splitting,if,*it', 'is', 'complex']
If we want to include some of the last characters of the delimiter in the preceding substring, we should specify a
positive number for the parameter last_delimiter_chars_to_include. A negative number will not include that amount
of characters after the delimiter in the substrings.
A more complex example, for delimiter ", -", ignore character ", the input string:
"-b=2, -assumeUnwindCond, -rule=bounded_supply, -m=withdrawCollateral(uint256, (bool, bool)), -regressionTest,
-solvers=bitwuzla, yices"
will return:
['-b=2',
'-assumeUnwindCond',
'-rule=bounded_supply',
'-m=withdrawCollateral(uint256, (bool, bool))',
'-regressionTest',
'-solvers=bitwuzla, yices']
Assumptions:
- We do not check for the validity of the last_delimiter_chars_to_include parameter. If it is too large or too
small, we will get an out-of-bounds error.
Notes:
- We currently do not support a different character to start and end an ignored section, like an opening and
closing parenthesis.
@param input_str a string we want to split to substrings
@param delimiter a sequence of characters by which we split
@param ignore_splitting_char a character that must appear an even amount of times in the string. Between every
pair of appearances, we skip splitting
@param last_delimiter_chars_to_include a number of characters from the end of the delimeter to include in the
following substring. See above.
@returns a list of strings that represents individual settings to pass to the jar. They might have illegal syntax.
"""
if input_str.count(ignore_splitting_char) % 2 != 0:
raise ValueError(f'Uneven number of {ignore_splitting_char} in {input_str}')
substrings = [] # type: List[str]
i = 0
substring_start_index = 0
ignore_splitting = False # if we are between the two ignore characters, we skip splitting
while i < len(input_str):
if input_str[i] == ignore_splitting_char:
ignore_splitting = not ignore_splitting
elif not ignore_splitting:
if i + len(delimiter) < len(input_str):
if input_str[i:i + len(delimiter)] == delimiter:
substrings.append(input_str[substring_start_index:i])
i += len(delimiter)
substring_start_index = i - last_delimiter_chars_to_include
continue
i += 1
if substring_start_index < len(input_str):
substrings.append(input_str[substring_start_index:])
return substrings
def string_distance_function(input_str: str, dictionary_str: str) -> float:
"""
Calculates a modified levenshtein distance between two strings. The distance function is modified to penalize less
for more common user mistakes.
Each subtraction, insertion or replacement of a character adds 1 to the distance of the two strings, unless:
1. The input string is a prefix of the dictionary string or vice versa - the distance is 0.1 per extra letter.
2. The replacement is between two equal letter except casing - adds nothing to the distance
3. The subtraction/addition is of an underscore, adds 0.1 to the distance
4. Repeated characters cost nothing, e.g. 'balloon', 'baloon' and 'balllllloooonn' have distance 0 from each other
:param input_str: the string the user gave as input, error-prone
:param dictionary_str: a legal string we compare the wrong input to
:return a distance measure between the two string. A low number indicates a high probably the user to give the
dictionary string as input
"""
# treat special cases first:
input_str = input_str.lower()
dictionary_str = dictionary_str.lower()
if input_str == dictionary_str:
return 0
if dictionary_str.startswith(input_str) or input_str.startswith(dictionary_str):
diff = abs(len(input_str) - len(dictionary_str))
return 0.1 * diff
"""
We are calculating the Levenshtein distance with a dynamic programming algorithm based on
https://en.wikipedia.org/wiki/Levenshtein_distance
Each matrix value distance_matrix[row][col] we calculate represent the distance between the two prefix substrings
input_str[0..row-1] and dictionary_str[0..col-1]
NOTE: our implementation differs from the classic implementation in that the cost of deletions/insertions is not
constant
"""
# Initialize matrix of zeros
rows = len(input_str) + 1
cols = len(dictionary_str) + 1
distance_matrix = []
for row in range(rows):
column = []
for j in range(cols):
column.append(0.0)
distance_matrix.append(column)
# Populate matrix of zeros with the indices of each character of both strings
for i in range(1, rows):
distance_matrix[i][0] = i
for k in range(1, cols):
distance_matrix[0][k] = k
# Calculate modified Levenshtein distance
for col in range(1, cols):
for row in range(1, rows):
if input_str[row - 1] == dictionary_str[col - 1]:
# No cost if the characters are the same up to casing in the two strings
cost: float = 0
elif input_str[row - 1] == '_' or dictionary_str[col - 1] == '_':
# common mistake
cost = 0.1
else:
# full cost
cost = 1
distance_matrix[row][col] = min(distance_matrix[row - 1][col] + cost, # Cost of deletions
distance_matrix[row][col - 1] + cost, # Cost of insertions
distance_matrix[row - 1][col - 1] + cost) # Cost of substitutions
return distance_matrix[rows - 1][cols - 1]
def get_closest_strings(input_word: str, word_dictionary: List[str],
distance: Callable[[str, str], float] = string_distance_function,
max_dist: float = 4, max_dist_ratio: float = 0.5, max_suggestions: int = 2,
max_delta: float = 0.2) -> List[str]:
"""
Gets an input word, which doesn't belong to a dictionary of predefined words, and returns a list of the closest
words from the dictionary, with respect to a distance function.
:param input_word: The word we look for closest matches of.
:param word_dictionary: A collection of words to suggest matches from.
:param distance: The distance function we use to measure proximity of words.
:param max_dist: The maximal distance between words, over which no suggestions will be made.
:param max_dist_ratio: A maximal ratio between the distance and the input word's length. No suggestions will be made
over this ratio.
:param max_suggestions: The maximal number of suggestions to return.
:param max_delta: We stop giving suggestions if the next best suggestion is worse than the one before it by more
than the maximal delta.
:return: A list of suggested words, ordered from the best match to the worst.
"""
distance_queue: queue.PriorityQueue = queue.PriorityQueue() # Ordered in a distance ascending order
for candidate_word in word_dictionary:
dist = distance(input_word, candidate_word)
distance_queue.put((dist, candidate_word))
all_suggestions: List[str] = []
last_dist = None
while not distance_queue.empty() and len(all_suggestions) <= max_suggestions:
suggested_dist, suggested_rule = distance_queue.get()
if suggested_dist > max_dist or suggested_dist / len(input_word) > max_dist_ratio:
break # The distances are monotonically increasing
if (last_dist is None) or (suggested_dist - last_dist <= max_delta):
all_suggestions.append(suggested_rule)
last_dist = suggested_dist
return all_suggestions
def get_readable_time(milliseconds: int) -> str:
# calculate (and subtract) whole hours
milliseconds_in_hour = 3600000 # 1000 * 60 * 60
hours = math.floor(milliseconds / milliseconds_in_hour)
milliseconds -= hours * milliseconds_in_hour
# calculate (and subtract) whole minutes
milliseconds_in_minute = 60000 # 1000 * 60
minutes = math.floor(milliseconds / milliseconds_in_minute)
milliseconds -= minutes * milliseconds_in_minute
# seconds
seconds = math.floor(milliseconds / 1000)
milliseconds -= seconds * 1000
duration = ""
if hours > 0:
duration += f"{hours}h "
duration += f"{minutes}m {seconds}s {milliseconds}ms"
return duration
def flush_stdout() -> None:
print("", flush=True)
def flatten_nested_list(nested_list: List[list]) -> list:
"""
@param nested_list: A list of lists: [[a], [b, c], []]
@return: a flat list, in our example [a, b, c]. If None was entered, returns None
"""
return [item for sublist in nested_list for item in sublist]
def flatten_set_list(set_list: List[Set[Any]]) -> List[Any]:
"""
Gets a list of sets, returns a list that contains all members of all sets without duplicates
:param set_list: A list containing sets of elements
:return: A list containing all members of all sets. There are no guarantees on the order of elements.
"""
ret_set = set()
for _set in set_list:
for member in _set:
ret_set.add(member)
return list(ret_set)
def is_relative_to(path1: Path, path2: Path) -> bool:
"""certora-cli currently requires python3.8 and it's the last version without support for is_relative_to.
Shamelessly copying.
"""
# return path1.is_relative_to(path2)
try:
path1.relative_to(path2)
return True
except ValueError:
return False
def find_jar(jar_name: str) -> Path:
# if we are a dev running certoraRun.py (local version), we want to get the local jar
# if we are a dev running an installed version of certoraRun, we want to get the installed jar
# how would we know? if $CERTORA is not empty, __file__ is relative to $CERTORA,
# and we have a local jar, then we need the local jar. Otherwise we take the installed one.
# A regular user should not have $CERTORA enabled, or the local jar doesn't exist.
# if $CERTORA is set to site-packages, it should be fine too. (but let's hope nobody does that.)
certora_home = get_certora_envvar()
if certora_home != "":
local_certora_path = Path(certora_home) / CERTORA_JARS / jar_name
if is_relative_to(Path(__file__), Path(certora_home)) and local_certora_path.is_file():
return local_certora_path
return get_package_resource(CERTORA_JARS / jar_name)
def get_package_resource(resource: Path) -> Path:
"""
Returns a resource installed in the package. Since we are in
`site-packages/certora_cli/Shared/certoraUtils.py`, we go 3 parents up, and then can access, e.g.,
- certora_jars (sibling to certora_cli)
"""
return Path(__file__).parents[2] / resource
def run_typechecker(typechecker_name: str, with_typechecking: bool, suppress_output: bool = False,
cvl1: bool = False) -> int:
"""
Runs a spec typechecker or syntax checker and returns an integer for the success/failure as detailed below:
@param typechecker_name - the name of the jar that we should run for running typechecking
@param with_typechecking - True if we want full typechecking including build (Solidity outputs) file,
False if we run only the leaner syntax checking.
@param suppress_output - True if we do not wish to redirect the typechecker's output to the screen.
@param cvl1 - True if we run a cvl1 typechecker - this is only temporary in the transition period!
@returns int - -1 if could not find the typechecker jar,
otherwise returns the exit code returned by the typechecker.
"""
# Find path to typechecker jar
path_to_typechecker = find_jar(typechecker_name)
typecheck_logger.info(f"Path to typechecker is {path_to_typechecker}")
# if typechecker jar does not exist, we just skip this step
if not path_to_typechecker.is_file():
typecheck_logger.error(f"Could not run type checker locally: file not found {path_to_typechecker}")
return -1
# args to typechecker
base_cmd = f"java -jar {path_to_typechecker} -v " \
f"{get_certora_verify_file_cvl1() if cvl1 else get_certora_verify_file()} " \
f"-m {get_certora_metadata_file()} -d {get_certora_internal_dir()}"
if with_typechecking:
typecheck_cmd = f"{base_cmd} -b {get_certora_build_file()}"
else:
typecheck_cmd = base_cmd
# run it - exit with code 1 if failed
if not suppress_output:
exit_code = run_jar_cmd(typecheck_cmd, False, custom_error_message="Failed to compile spec file",
logger_topic="type_check")
else:
exit_code = run_jar_cmd(typecheck_cmd, False, print_err=False)
return exit_code
def run_local_spec_check(with_typechecking: bool) -> int:
"""
Runs the local type checker in one of two modes: (1) syntax only,
and (2) including full typechecking after building the contracts
:param with_typechecking: True if we want the full check, false for a quick CVL syntax check
@return 0 on success or if the type checking was skipped, an error exit code otherwise
"""
# Check if java exists on the machine
java = which("java")
if java is None:
print(
f"`java` is not installed. Installing Java version {MIN_JAVA_VERSION} or later will enable faster "
f"CVL specification syntax checking before uploading to the cloud.")
return 0 # if user doesn't have java installed, user will have to wait for remote type checking
try:
java_version_str = subprocess.check_output(['java', '-version'], stderr=subprocess.STDOUT).decode()
major_java_version = re.search(r'version \"(\d+).*', java_version_str).groups()[0] # type: ignore[union-attr]
if int(major_java_version) < MIN_JAVA_VERSION:
print(f"Installed Java version is too old to check CVL specification files locally. Installing Java version"
f" {MIN_JAVA_VERSION} or later will enable faster CVL syntax checking before uploading to the cloud.")
# if user doesn't have a valid version of java installed, user will have to wait for remote CVL syntax
# checking
return 0
except (subprocess.CalledProcessError, AttributeError):
print("Couldn't find the installed Java version. Skipping local CVL specification checking")
# if user doesn't have a valid version of java installed, user will have to wait for remote CVL syntax
# checking
return 0
cvl2_exit_code = run_typechecker("Typechecker.jar", with_typechecking)
# remove when CVL1 becomes fully obsolete: if typechecker failed, run also with the old typechecker
if cvl2_exit_code == 1:
try:
cvl1_exit_code = run_typechecker("Typechecker.3.6.5.jar", with_typechecking, suppress_output=True,
cvl1=True)
if cvl1_exit_code == 0: # succeeded
print(orange_text("This verification task is not compatible with CVL2."))
print(orange_text("Please refer to the CVL2 documentation for "
"a migration guide https://docs.certora.com/en/cvl_rewrite-main/"))
print(orange_text("or downgrade to certora-cli version 3.6.5"))
except Exception:
pass
# in any case we fail if typechecker for cvl2 today
return cvl2_exit_code
def run_jar_cmd(jar_cmd: str, override_exit_code: bool, custom_error_message: Optional[str] = None,
logger_topic: Optional[str] = "run", print_output: bool = False, print_err: bool = True) -> int:
"""
@return: 0 on success, an error exit code otherwise
@param override_exit_code if true, always returns 0 (ignores/overrides non-zero exit codes of the jar subprocess)
@param custom_error_message if specified, determines the header of the error message printed for non-zero exit codes
@param logger_topic the topic of the logger being used
@param print_output If True, the process' standard output will be printed on the screen
@param print_err If True, the process' standard error will be printed on the screen
@param jar_cmd a command line that runs a jar file (EVMVerifier, Typechecker or MutationTest)
One may be confused why we need both override_exit_code and print_err, that have a similar effect:
logs are not printed if either override_exit_code is enabled or print_err is disabled.
The difference is that override_exit_code also controls the return value of this function and print_err
only affects the logging.
The use case for setting override_exit_code is the comparison of expected files instead of the Prover's default
exit code which is failure in any case of not all-successful rules.
The use case for print_err is suppressing CVL1 messages when checking if the spec was written for CVL1 and not CVL2.
"""
logger = logging.getLogger(logger_topic)
try:
args = prepare_call_args(jar_cmd)
logger.info(f"running {args}")
if print_output:
stdout_stream = None
else:
stdout_stream = subprocess.DEVNULL
run_result = \
subprocess.run(args, shell=False, universal_newlines=True, stderr=subprocess.PIPE, stdout=stdout_stream)
return_code = run_result.returncode
if return_code:
default_msg = f"Execution of command \"{' '.join(args)}\" terminated with exitcode {return_code}."
err_msg_header = custom_error_message if custom_error_message is not None else default_msg
if print_err:
logger.error(err_msg_header)
else:
logger.info(err_msg_header)
# We log all lines in stderr, as they contain useful information we do not want the
# Python loggers to miss
# specifically, the errors go only to the log if we disabled printing of errors or exit code override is on
log_level = logging.INFO if (override_exit_code or not print_err) else logging.CRITICAL
stderr_lines = run_result.stderr.splitlines()
for line in stderr_lines:
logger.log(log_level, line)
if not override_exit_code: # else, we return 0
return 1
return 0
except Exception as e:
logger.error(e)
return 1
def print_failed_to_run(cmd: str) -> None:
print()
print(f"Failed to run {cmd}")
if is_windows() and cmd.find('solc') != -1 and cmd.find('exe') == -1:
print("did you forget the .exe extension for solcXX.exe??")
print()
# TODO move to CompilerCollectorFactory.py
def run_solc_cmd(solc_cmd: str, output_file_name: str, config_path: Path, solc_input: Optional[bytes] = None) -> None:
"""
@param solc_cmd The command that runs the solc
@param output_file_name the name of the .stdout and .stderr file
@param config_path the directory of the generated files
@param solc_input input to the solc subprocess
"""
process_logger.debug(f"Running cmd {solc_cmd}")
build_start = time.perf_counter()
stdout_name = config_path / f'{output_file_name}.stdout'
stderr_name = config_path / f'{output_file_name}.stderr'
process_logger.debug(f"stdout, stderr = {stdout_name}, {stderr_name}")
with stdout_name.open('w+') as stdout:
with stderr_name.open('w+') as stderr:
try:
args = prepare_call_args(solc_cmd)
exitcode = subprocess.run(args, stdout=stdout, stderr=stderr, input=solc_input).returncode
if exitcode:
msg = f"Failed to run {solc_cmd}, exit code {exitcode}"
with open(stderr_name, 'r') as stderr_read:
for line in stderr_read:
print(line)
raise Exception(msg)
else:
process_logger.debug(f"Exitcode {exitcode}")
except Exception as e:
print(f"Error: {e}")
print_failed_to_run(solc_cmd)
raise
build_end = time.perf_counter()
time_run = round(build_end - build_start, 4)
process_logger.debug(f"Solc run {solc_cmd} time: {time_run}")
@contextmanager
def change_working_directory(path: Union[str, os.PathLike]) -> Generator[None, None, None]:
"""
Changes working directory and returns to previous on exit.
Note: the directory will return to the previous even if an exception is thrown, for example: if path does not exist
"""
prev_cwd = Path.cwd()
os.chdir(path)
try:
yield
finally:
os.chdir(prev_cwd)
def file_is_not_empty(file_path: Path) -> bool:
return file_path.exists() and file_path.stat().st_size != 0
class Singleton(type):
"""
This is intended to be used as a metaclass to enforce only a single instance of a class can be created
"""
_instances: Dict[Any, Any] = {} # Mapping from a class type to its instance
def __call__(cls, *args: Any, **kwargs: Any) -> Any:
"""
returns the instance of a class if exists, otherwise constructs it
"""
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class AbstractAndSingleton(Singleton, ABCMeta):
pass
def match_path_to_mapping_key(path: Path, m: Dict[str, str]) -> Optional[str]:
"""
Matches the path to the best match in the dictionary's keys.
For example, given an absolute path `/Users/JohnDoe/Path/ToSolc/a.sol`, if the map contains
`b/a.sol` and `ToSolc/a.sol`, it will match on `ToSolc/a.sol`.
It is assumed the map does not contain any ambiguities, e.g. both `a.sol` and `ToSolc/a.sol`.
@param path: the path to match against
@param m: the map whose keys we're searching
@return: the value from the map that best matches the path, None if not found.
"""
resolved_abs_path = path.resolve()
for k, v in m.items():
if Path(k).resolve() == resolved_abs_path:
return v
return None
def find_in(dir_path: Path, file_to_find: Path) -> Optional[Path]:
"""
Given a directory dir_path and a file we wish to find within that directory,
we iterate by trimming the prefix of file_to_find.
Use case: since .certora_sources is a common root of paths we copy, we wish to resolve
the original files inside .certora_sources.
Note that file_to_find should not have directory traversals.
Also, the result must not be an absolute path.
@param dir_path: A path to root the new file in
@param file_to_find: The file to re-root
@return The path of file_to_find rooted in dir_path, and None if it is not there
"""
num_parts = len(file_to_find.parts)
if file_to_find.is_absolute():
start = 1 # we must trim the `/` so that we do not return absolute paths
else:
start = 0
for i in range(start, num_parts):
candidate_path = Path(*file_to_find.parts[i:])
if (dir_path / candidate_path).is_file():
return candidate_path
return None
def find_filename_in(dir_path: Path, filename_to_find: str) -> Optional[str]:
res = find_in(dir_path, Path(filename_to_find))
if res is not None:
return str(res)
else:
return None
def get_trivial_contract_name(contract: str) -> str:
"""
Gets a path to a .sol file and returns its trivial contract name. The trivial contract name is the basename of the
path of the file, without file type suffix.
For example: for 'file/Test/opyn/vault.sol', the trivial contract name is 'vault'.
@param contract: A path to a .sol file
@return: The trivial contract name of a file
"""
return abs_posix_path_obj(contract).stem
def is_new_api() -> bool:
return 'CERTORA_NEW_API' in os.environ and os.environ['CERTORA_NEW_API'] == '1'
|
PypiClean
|
/aa-gdpr-0.3.3.tar.gz/aa-gdpr-0.3.3/aagdpr/static/aagdpr/ajax/libs/moment.js/2.27.0/locale/fi.js
|
;(function (global, factory) {
typeof exports === 'object' && typeof module !== 'undefined'
&& typeof require === 'function' ? factory(require('../moment')) :
typeof define === 'function' && define.amd ? define(['../moment'], factory) :
factory(global.moment)
}(this, (function (moment) { 'use strict';
//! moment.js locale configuration
var numbersPast = 'nolla yksi kaksi kolme neljä viisi kuusi seitsemän kahdeksan yhdeksän'.split(
' '
),
numbersFuture = [
'nolla',
'yhden',
'kahden',
'kolmen',
'neljän',
'viiden',
'kuuden',
numbersPast[7],
numbersPast[8],
numbersPast[9],
];
function translate(number, withoutSuffix, key, isFuture) {
var result = '';
switch (key) {
case 's':
return isFuture ? 'muutaman sekunnin' : 'muutama sekunti';
case 'ss':
result = isFuture ? 'sekunnin' : 'sekuntia';
break;
case 'm':
return isFuture ? 'minuutin' : 'minuutti';
case 'mm':
result = isFuture ? 'minuutin' : 'minuuttia';
break;
case 'h':
return isFuture ? 'tunnin' : 'tunti';
case 'hh':
result = isFuture ? 'tunnin' : 'tuntia';
break;
case 'd':
return isFuture ? 'päivän' : 'päivä';
case 'dd':
result = isFuture ? 'päivän' : 'päivää';
break;
case 'M':
return isFuture ? 'kuukauden' : 'kuukausi';
case 'MM':
result = isFuture ? 'kuukauden' : 'kuukautta';
break;
case 'y':
return isFuture ? 'vuoden' : 'vuosi';
case 'yy':
result = isFuture ? 'vuoden' : 'vuotta';
break;
}
result = verbalNumber(number, isFuture) + ' ' + result;
return result;
}
function verbalNumber(number, isFuture) {
return number < 10
? isFuture
? numbersFuture[number]
: numbersPast[number]
: number;
}
var fi = moment.defineLocale('fi', {
months: 'tammikuu_helmikuu_maaliskuu_huhtikuu_toukokuu_kesäkuu_heinäkuu_elokuu_syyskuu_lokakuu_marraskuu_joulukuu'.split(
'_'
),
monthsShort: 'tammi_helmi_maalis_huhti_touko_kesä_heinä_elo_syys_loka_marras_joulu'.split(
'_'
),
weekdays: 'sunnuntai_maanantai_tiistai_keskiviikko_torstai_perjantai_lauantai'.split(
'_'
),
weekdaysShort: 'su_ma_ti_ke_to_pe_la'.split('_'),
weekdaysMin: 'su_ma_ti_ke_to_pe_la'.split('_'),
longDateFormat: {
LT: 'HH.mm',
LTS: 'HH.mm.ss',
L: 'DD.MM.YYYY',
LL: 'Do MMMM[ta] YYYY',
LLL: 'Do MMMM[ta] YYYY, [klo] HH.mm',
LLLL: 'dddd, Do MMMM[ta] YYYY, [klo] HH.mm',
l: 'D.M.YYYY',
ll: 'Do MMM YYYY',
lll: 'Do MMM YYYY, [klo] HH.mm',
llll: 'ddd, Do MMM YYYY, [klo] HH.mm',
},
calendar: {
sameDay: '[tänään] [klo] LT',
nextDay: '[huomenna] [klo] LT',
nextWeek: 'dddd [klo] LT',
lastDay: '[eilen] [klo] LT',
lastWeek: '[viime] dddd[na] [klo] LT',
sameElse: 'L',
},
relativeTime: {
future: '%s päästä',
past: '%s sitten',
s: translate,
ss: translate,
m: translate,
mm: translate,
h: translate,
hh: translate,
d: translate,
dd: translate,
M: translate,
MM: translate,
y: translate,
yy: translate,
},
dayOfMonthOrdinalParse: /\d{1,2}\./,
ordinal: '%d.',
week: {
dow: 1, // Monday is the first day of the week.
doy: 4, // The week that contains Jan 4th is the first week of the year.
},
});
return fi;
})));
|
PypiClean
|
/matminer-0.8.0.tar.gz/matminer-0.8.0/docs_rst/contributors.rst
|
.. title:: MatMiner Contributors
.. _contributors:
=====================
matminer Contributors
=====================
.. caution:: Starting v0.6.6 onwards, the contributors list is no longer maintained. the contributors list is no longer maintained. Please check the Github contributors list instead.
Matminer is led by `Anubhav Jain <http://www.anubhavjain.net/>`_ and the HackingMaterials research group at Lawrence Berkeley National Lab.
`LBNL - Hacking Materials <https://hackingmaterials.lbl.gov/>`_
----------------------------------------------------------------
* Anubhav Jain
* Saurabh Bajaj (further contributions through Citrine Informatics)
* Alireza Faghaninia
* Nils Zimmermann
* Qi Wang
* Sayan Rowchowdhury
* Alex Dunn
* Jason Frost
* Julien Brenneck
* Alex Ganose
* Daniel Dopp
* Samy Cherfaoui
University of Chicago
----------------------
* Logan Ward
* Jiming Chen
* Ashwin Aggarwal
* Aik Rui
`LBNL - Persson Group <http://perssongroup.lbl.gov/>`_
------------------------------------------------------
* Kiran Mathew
* Matt Horton
* Donny Winston
* Joseph Montoya
* Koki Muraoka
* Christian Legaspi
Other
-----
* Kyle Bystrom (Asta Research Group, UC Berkeley)
* Shyue Ping Ong (UC San Diego)
* Evgeny Blokhin (Tilde Lab)
* Max Dylla (Snyder Research Group, Northwestern University)
* David Waroquiers (UC Louvain, Belgium)
* Logan Williams (U Michigan Ann Arbor)
* Aidan Thompson (Sandia National Lab)
* Kamal Choudhary (NIST)
* Tian Xie (MIT)
* Daiki Nishikawa
* Nicholas Wagner (Northwestern University)
* Amalie Trewartha (LBNL)
* Janosh Riebesell
* Brandon Krull
|
PypiClean
|
/selenium_chrome-0.0.29.tar.gz/selenium_chrome-0.0.29/selenium_chrome/chrome_addons/builtin_addon_install_settings/resources/ublock_origin/js/traffic.js
|
'use strict';
/******************************************************************************/
// Start isolation from global scope
µBlock.webRequest = (( ) => {
/******************************************************************************/
// Platform-specific behavior.
// https://github.com/uBlockOrigin/uBlock-issues/issues/42
// https://bugzilla.mozilla.org/show_bug.cgi?id=1376932
// Add proper version number detection once issue is fixed in Firefox.
let dontCacheResponseHeaders =
vAPI.webextFlavor.soup.has('firefox');
// https://github.com/gorhill/uMatrix/issues/967#issuecomment-373002011
// This can be removed once Firefox 60 ESR is released.
let cantMergeCSPHeaders =
vAPI.webextFlavor.soup.has('firefox') && vAPI.webextFlavor.major < 59;
// The real actual webextFlavor value may not be set in stone, so listen
// for possible future changes.
window.addEventListener('webextFlavor', function() {
dontCacheResponseHeaders =
vAPI.webextFlavor.soup.has('firefox');
cantMergeCSPHeaders =
vAPI.webextFlavor.soup.has('firefox') &&
vAPI.webextFlavor.major < 59;
}, { once: true });
// https://github.com/uBlockOrigin/uBlock-issues/issues/1553
const supportsFloc = document.interestCohort instanceof Function;
/******************************************************************************/
// Intercept and filter web requests.
const onBeforeRequest = function(details) {
const fctxt = µBlock.filteringContext.fromWebrequestDetails(details);
// Special handling for root document.
// https://github.com/chrisaljoudi/uBlock/issues/1001
// This must be executed regardless of whether the request is
// behind-the-scene
if ( fctxt.itype === fctxt.MAIN_FRAME ) {
return onBeforeRootFrameRequest(fctxt);
}
// Special treatment: behind-the-scene requests
const tabId = details.tabId;
if ( tabId < 0 ) {
return onBeforeBehindTheSceneRequest(fctxt);
}
// Lookup the page store associated with this tab id.
const µb = µBlock;
let pageStore = µb.pageStoreFromTabId(tabId);
if ( pageStore === null ) {
const tabContext = µb.tabContextManager.mustLookup(tabId);
if ( tabContext.tabId < 0 ) {
return onBeforeBehindTheSceneRequest(fctxt);
}
vAPI.tabs.onNavigation({ tabId, frameId: 0, url: tabContext.rawURL });
pageStore = µb.pageStoreFromTabId(tabId);
}
const result = pageStore.filterRequest(fctxt);
pageStore.journalAddRequest(fctxt, result);
if ( µb.logger.enabled ) {
fctxt.setRealm('network').toLogger();
}
// Redirected
if ( fctxt.redirectURL !== undefined ) {
return { redirectUrl: fctxt.redirectURL };
}
// Not redirected
// Blocked
if ( result === 1 ) {
return { cancel: true };
}
// Not blocked
if (
fctxt.itype === fctxt.SUB_FRAME &&
details.parentFrameId !== -1 &&
details.aliasURL === undefined
) {
pageStore.setFrameURL(details);
}
if ( result === 2 ) {
return { cancel: false };
}
};
/******************************************************************************/
const onBeforeRootFrameRequest = function(fctxt) {
const µb = µBlock;
const requestURL = fctxt.url;
// Special handling for root document.
// https://github.com/chrisaljoudi/uBlock/issues/1001
// This must be executed regardless of whether the request is
// behind-the-scene
const requestHostname = fctxt.getHostname();
const loggerEnabled = µb.logger.enabled;
let result = 0;
let logData;
// If the site is whitelisted, disregard strict blocking
const trusted = µb.getNetFilteringSwitch(requestURL) === false;
if ( trusted ) {
result = 2;
if ( loggerEnabled ) {
logData = { engine: 'u', result: 2, raw: 'whitelisted' };
}
}
// Permanently unrestricted?
if (
result === 0 &&
µb.sessionSwitches.evaluateZ('no-strict-blocking', requestHostname)
) {
result = 2;
if ( loggerEnabled ) {
logData = {
engine: 'u',
result: 2,
raw: `no-strict-blocking: ${µb.sessionSwitches.z} true`
};
}
}
// Temporarily whitelisted?
if ( result === 0 && strictBlockBypasser.isBypassed(requestHostname) ) {
result = 2;
if ( loggerEnabled ) {
logData = {
engine: 'u',
result: 2,
raw: 'no-strict-blocking: true (temporary)'
};
}
}
// Static filtering
if ( result === 0 ) {
({ result, logData } = shouldStrictBlock(fctxt, loggerEnabled));
}
const pageStore = µb.bindTabToPageStore(fctxt.tabId, 'beforeRequest');
if ( pageStore !== null ) {
pageStore.journalAddRootFrame('uncommitted', requestURL);
pageStore.journalAddRequest(fctxt, result);
}
if ( loggerEnabled ) {
fctxt.setFilter(logData);
}
// https://github.com/uBlockOrigin/uBlock-issues/issues/760
// Redirect non-blocked request?
if (
result !== 1 &&
trusted === false &&
pageStore !== null &&
µb.staticNetFilteringEngine.hasQuery(fctxt)
) {
pageStore.redirectNonBlockedRequest(fctxt);
}
if ( loggerEnabled ) {
fctxt.setRealm('network').toLogger();
}
// Redirected
if ( fctxt.redirectURL !== undefined ) {
return { redirectUrl: fctxt.redirectURL };
}
// Not blocked
if ( result !== 1 ) { return; }
// No log data means no strict blocking (because we need to report why
// the blocking occurs.
if ( logData === undefined ) { return; }
// Blocked
const query = encodeURIComponent(JSON.stringify({
url: requestURL,
hn: requestHostname,
dn: fctxt.getDomain() || requestHostname,
fs: logData.raw
}));
vAPI.tabs.replace(
fctxt.tabId,
vAPI.getURL('document-blocked.html?details=') + query
);
return { cancel: true };
};
/******************************************************************************/
// Strict blocking through static filtering
//
// https://github.com/chrisaljoudi/uBlock/issues/1128
// Do not block if the match begins after the hostname,
// except when the filter is specifically of type `other`.
// https://github.com/gorhill/uBlock/issues/490
// Removing this for the time being, will need a new, dedicated type.
// https://github.com/uBlockOrigin/uBlock-issues/issues/1501
// Support explicit exception filters.
//
// Let result of match for specific `document` type be `rs`
// Let result of match for no specific type be `rg` *after* going through
// confirmation necessary for implicit matches
// Let `important` be `i`
// Let final result be logical combination of `rs` and `rg` as follow:
//
// | rs |
// +--------+--------+--------+--------|
// | 0 | 1 | 1i | 2 |
// --------+--------+--------+--------+--------+--------|
// | 0 | rg | rs | rs | rs |
// rg | 1 | rg | rs | rs | rs |
// | 1i | rg | rg | rs | rg |
// | 2 | rg | rg | rs | rs |
// --------+--------+--------+--------+--------+--------+
const shouldStrictBlock = function(fctxt, loggerEnabled) {
const µb = µBlock;
const snfe = µb.staticNetFilteringEngine;
// Explicit filtering: `document` option
const rs = snfe.matchString(fctxt, 0b0011);
const is = rs === 1 && snfe.isBlockImportant();
let lds;
if ( rs !== 0 || loggerEnabled ) {
lds = snfe.toLogData();
}
// | rs |
// +--------+--------+--------+--------|
// | 0 | 1 | 1i | 2 |
// --------+--------+--------+--------+--------+--------|
// | 0 | rg | rs | x | rs |
// rg | 1 | rg | rs | x | rs |
// | 1i | rg | rg | x | rg |
// | 2 | rg | rg | x | rs |
// --------+--------+--------+--------+--------+--------+
if ( rs === 1 && is ) {
return { result: rs, logData: lds };
}
// Implicit filtering: no `document` option
fctxt.type = 'no_type';
let rg = snfe.matchString(fctxt, 0b0011);
fctxt.type = 'main_frame';
const ig = rg === 1 && snfe.isBlockImportant();
let ldg;
if ( rg !== 0 || loggerEnabled ) {
ldg = snfe.toLogData();
if ( rg === 1 && validateStrictBlock(fctxt, ldg) === false ) {
rg = 0; ldg = undefined;
}
}
// | rs |
// +--------+--------+--------+--------|
// | 0 | 1 | 1i | 2 |
// --------+--------+--------+--------+--------+--------|
// | 0 | x | rs | - | rs |
// rg | 1 | x | rs | - | rs |
// | 1i | x | x | - | x |
// | 2 | x | x | - | rs |
// --------+--------+--------+--------+--------+--------+
if ( rs === 0 || rg === 1 && ig || rg === 2 && rs !== 2 ) {
return { result: rg, logData: ldg };
}
// | rs |
// +--------+--------+--------+--------|
// | 0 | 1 | 1i | 2 |
// --------+--------+--------+--------+--------+--------|
// | 0 | - | x | - | x |
// rg | 1 | - | x | - | x |
// | 1i | - | - | - | - |
// | 2 | - | - | - | x |
// --------+--------+--------+--------+--------+--------+
return { result: rs, logData: lds };
};
/******************************************************************************/
// https://github.com/gorhill/uBlock/issues/3208
// Mind case insensitivity.
// https://github.com/uBlockOrigin/uBlock-issues/issues/1147
// Do not strict-block if the filter pattern does not contain at least one
// token character.
const validateStrictBlock = function(fctxt, logData) {
if ( typeof logData.regex !== 'string' ) { return false; }
if ( typeof logData.raw === 'string' && /\w/.test(logData.raw) === false ) {
return false;
}
const url = fctxt.url;
const re = new RegExp(logData.regex, 'i');
const match = re.exec(url.toLowerCase());
if ( match === null ) { return false; }
// https://github.com/chrisaljoudi/uBlock/issues/1128
// https://github.com/chrisaljoudi/uBlock/issues/1212
// Verify that the end of the match is anchored to the end of the
// hostname.
// https://github.com/uBlockOrigin/uAssets/issues/7619#issuecomment-653010310
// Also match FQDN.
const hostname = fctxt.getHostname();
const hnpos = url.indexOf(hostname);
const hnlen = hostname.length;
const end = match.index + match[0].length - hnpos - hnlen;
return end === 0 || end === 1 ||
end === 2 && url.charCodeAt(hnpos + hnlen) === 0x2E /* '.' */;
};
/******************************************************************************/
// Intercept and filter behind-the-scene requests.
const onBeforeBehindTheSceneRequest = function(fctxt) {
const µb = µBlock;
const pageStore = µb.pageStoreFromTabId(fctxt.tabId);
if ( pageStore === null ) { return; }
// https://github.com/gorhill/uBlock/issues/3150
// Ability to globally block CSP reports MUST also apply to
// behind-the-scene network requests.
let result = 0;
// https://github.com/uBlockOrigin/uBlock-issues/issues/339
// Need to also test against `-scheme` since tabOrigin is normalized.
// Not especially elegant but for now this accomplishes the purpose of
// not dealing with network requests fired from a synthetic scope,
// that is unless advanced user mode is enabled.
if (
fctxt.tabOrigin.endsWith('-scheme') === false &&
µb.URI.isNetworkURI(fctxt.tabOrigin) ||
µb.userSettings.advancedUserEnabled ||
fctxt.itype === fctxt.CSP_REPORT
) {
result = pageStore.filterRequest(fctxt);
// The "any-tab" scope is not whitelist-able, and in such case we must
// use the origin URL as the scope. Most such requests aren't going to
// be blocked, so we test for whitelisting and modify the result only
// when the request is being blocked.
//
// https://github.com/uBlockOrigin/uBlock-issues/issues/1478
// Also remove potential redirection when request is to be
// whitelisted.
if (
result === 1 &&
µb.getNetFilteringSwitch(fctxt.tabOrigin) === false
) {
result = 2;
fctxt.redirectURL = undefined;
fctxt.filter = { engine: 'u', result: 2, raw: 'whitelisted' };
}
}
// https://github.com/uBlockOrigin/uBlock-issues/issues/1204
onBeforeBehindTheSceneRequest.journalAddRequest(fctxt, result);
if ( µb.logger.enabled ) {
fctxt.setRealm('network').toLogger();
}
// Redirected
if ( fctxt.redirectURL !== undefined ) {
return { redirectUrl: fctxt.redirectURL };
}
// Blocked?
if ( result === 1 ) {
return { cancel: true };
}
};
// https://github.com/uBlockOrigin/uBlock-issues/issues/1204
// Report the tabless network requests to all page stores matching the
// document origin. This is an approximation, there is unfortunately no
// way to know for sure which exact page triggered a tabless network
// request.
{
let hostname = '';
let pageStores = new Set();
let pageStoresToken = 0;
let gcTimer;
const reset = function() {
hostname = '';
pageStores = new Set();
pageStoresToken = 0;
};
const gc = ( ) => {
gcTimer = undefined;
if ( pageStoresToken !== µBlock.pageStoresToken ) { return reset(); }
gcTimer = vAPI.setTimeout(gc, 30011);
};
onBeforeBehindTheSceneRequest.journalAddRequest = (fctxt, result) => {
const docHostname = fctxt.getDocHostname();
if (
docHostname !== hostname ||
pageStoresToken !== µBlock.pageStoresToken
) {
hostname = docHostname;
pageStores = new Set();
for ( const pageStore of µBlock.pageStores.values() ) {
if ( pageStore.tabHostname !== docHostname ) { continue; }
pageStores.add(pageStore);
}
pageStoresToken = µBlock.pageStoresToken;
if ( gcTimer !== undefined ) {
clearTimeout(gcTimer);
}
gcTimer = vAPI.setTimeout(gc, 30011);
}
for ( const pageStore of pageStores ) {
pageStore.journalAddRequest(fctxt, result);
}
};
}
/******************************************************************************/
// To handle:
// - Media elements larger than n kB
// - Scriptlet injection (requires ability to modify response body)
// - HTML filtering (requires ability to modify response body)
// - CSP injection
const onHeadersReceived = function(details) {
// https://github.com/uBlockOrigin/uBlock-issues/issues/610
// Process behind-the-scene requests in a special way.
if (
details.tabId < 0 &&
normalizeBehindTheSceneResponseHeaders(details) === false
) {
return;
}
const µb = µBlock;
const fctxt = µb.filteringContext.fromWebrequestDetails(details);
const isRootDoc = fctxt.itype === fctxt.MAIN_FRAME;
let pageStore = µb.pageStoreFromTabId(fctxt.tabId);
if ( pageStore === null ) {
if ( isRootDoc === false ) { return; }
pageStore = µb.bindTabToPageStore(fctxt.tabId, 'beforeRequest');
}
if ( pageStore.getNetFilteringSwitch(fctxt) === false ) { return; }
if ( fctxt.itype === fctxt.IMAGE || fctxt.itype === fctxt.MEDIA ) {
const result = foilLargeMediaElement(details, fctxt, pageStore);
if ( result !== undefined ) { return result; }
}
// Keep in mind response headers will be modified in-place if needed, so
// `details.responseHeaders` will always point to the modified response
// headers.
const { responseHeaders } = details;
if ( Array.isArray(responseHeaders) === false ) { return; }
if ( isRootDoc === false && µb.hiddenSettings.filterOnHeaders === true ) {
const result = pageStore.filterOnHeaders(fctxt, responseHeaders);
if ( result !== 0 ) {
if ( µb.logger.enabled ) {
fctxt.setRealm('network').toLogger();
}
if ( result === 1 ) {
pageStore.journalAddRequest(fctxt, 1);
return { cancel: true };
}
}
}
if ( isRootDoc === false && fctxt.itype !== fctxt.SUB_FRAME ) { return; }
// https://github.com/gorhill/uBlock/issues/2813
// Disable the blocking of large media elements if the document is itself
// a media element: the resource was not prevented from loading so no
// point to further block large media elements for the current document.
if ( isRootDoc ) {
const contentType = headerValueFromName('content-type', responseHeaders);
if ( reMediaContentTypes.test(contentType) ) {
pageStore.allowLargeMediaElementsUntil = 0;
// Fall-through: this could be an SVG document, which supports
// script tags.
}
}
// At this point we have a HTML document.
const filteredHTML =
µb.canFilterResponseData && filterDocument(fctxt, details) === true;
let modifiedHeaders = false;
if ( µb.httpheaderFilteringEngine.apply(fctxt, responseHeaders) === true ) {
modifiedHeaders = true;
}
if ( injectCSP(fctxt, pageStore, responseHeaders) === true ) {
modifiedHeaders = true;
}
if ( supportsFloc && foilFloc(fctxt, responseHeaders) ) {
modifiedHeaders = true;
}
// https://bugzilla.mozilla.org/show_bug.cgi?id=1376932
// Prevent document from being cached by the browser if we modified it,
// either through HTML filtering and/or modified response headers.
// https://github.com/uBlockOrigin/uBlock-issues/issues/229
// Use `no-cache` instead of `no-cache, no-store, must-revalidate`, this
// allows Firefox's offline mode to work as expected.
if ( (filteredHTML || modifiedHeaders) && dontCacheResponseHeaders ) {
const cacheControl = µb.hiddenSettings.cacheControlForFirefox1376932;
if ( cacheControl !== 'unset' ) {
let i = headerIndexFromName('cache-control', responseHeaders);
if ( i !== -1 ) {
responseHeaders[i].value = cacheControl;
} else {
responseHeaders.push({ name: 'Cache-Control', value: cacheControl });
}
modifiedHeaders = true;
}
}
if ( modifiedHeaders ) {
return { responseHeaders };
}
};
const reMediaContentTypes = /^(?:audio|image|video)\//;
/******************************************************************************/
// https://github.com/uBlockOrigin/uBlock-issues/issues/610
const normalizeBehindTheSceneResponseHeaders = function(details) {
if ( details.type !== 'xmlhttprequest' ) { return false; }
const headers = details.responseHeaders;
if ( Array.isArray(headers) === false ) { return false; }
const contentType = headerValueFromName('content-type', headers);
if ( contentType === '' ) { return false; }
if ( reMediaContentTypes.test(contentType) === false ) { return false; }
if ( contentType.startsWith('image') ) {
details.type = 'image';
} else {
details.type = 'media';
}
return true;
};
/*******************************************************************************
The response body filterer is responsible for:
- HTML filtering
In the spirit of efficiency, the response body filterer works this way:
If:
- HTML filtering: no.
Then:
No response body filtering is initiated.
If:
- HTML filtering: yes.
Then:
Assemble all response body data into a single buffer. Once all the
response data has been received, create a document from it. Then:
- Remove all DOM elements matching HTML filters.
Then serialize the resulting modified document as the new response
body.
**/
const filterDocument = (( ) => {
const µb = µBlock;
const filterers = new Map();
let domParser, xmlSerializer,
utf8TextDecoder, textDecoder, textEncoder;
const textDecode = function(encoding, buffer) {
if (
textDecoder !== undefined &&
textDecoder.encoding !== encoding
) {
textDecoder = undefined;
}
if ( textDecoder === undefined ) {
textDecoder = new TextDecoder(encoding);
}
return textDecoder.decode(buffer);
};
const reContentTypeDocument = /^(?:text\/html|application\/xhtml\+xml)/i;
const reContentTypeCharset = /charset=['"]?([^'" ]+)/i;
const mimeFromContentType = function(contentType) {
const match = reContentTypeDocument.exec(contentType);
if ( match !== null ) {
return match[0].toLowerCase();
}
};
const charsetFromContentType = function(contentType) {
const match = reContentTypeCharset.exec(contentType);
if ( match !== null ) {
return match[1].toLowerCase();
}
};
const charsetFromDoc = function(doc) {
let meta = doc.querySelector('meta[charset]');
if ( meta !== null ) {
return meta.getAttribute('charset').toLowerCase();
}
meta = doc.querySelector(
'meta[http-equiv="content-type" i][content]'
);
if ( meta !== null ) {
return charsetFromContentType(meta.getAttribute('content'));
}
};
const streamClose = function(filterer, buffer) {
if ( buffer !== undefined ) {
filterer.stream.write(buffer);
} else if ( filterer.buffer !== undefined ) {
filterer.stream.write(filterer.buffer);
}
filterer.stream.close();
};
const onStreamData = function(ev) {
const filterer = filterers.get(this);
if ( filterer === undefined ) {
this.write(ev.data);
this.disconnect();
return;
}
if (
this.status !== 'transferringdata' &&
this.status !== 'finishedtransferringdata'
) {
filterers.delete(this);
this.disconnect();
return;
}
// TODO:
// - Possibly improve buffer growth, if benchmarking shows it's worth
// it.
// - Also evaluate whether keeping a list of buffers and then decoding
// them in sequence using TextDecoder's "stream" option is more
// efficient. Can the data buffers be safely kept around for later
// use?
// - Informal, quick benchmarks seem to show most of the overhead is
// from calling TextDecoder.decode() and TextEncoder.encode(), and if
// confirmed, there is nothing which can be done uBO-side to reduce
// overhead.
if ( filterer.buffer === null ) {
filterer.buffer = new Uint8Array(ev.data);
return;
}
const buffer = new Uint8Array(
filterer.buffer.byteLength +
ev.data.byteLength
);
buffer.set(filterer.buffer);
buffer.set(new Uint8Array(ev.data), filterer.buffer.byteLength);
filterer.buffer = buffer;
};
const onStreamStop = function() {
const filterer = filterers.get(this);
filterers.delete(this);
if ( filterer === undefined || filterer.buffer === null ) {
this.close();
return;
}
if ( this.status !== 'finishedtransferringdata' ) { return; }
if ( domParser === undefined ) {
domParser = new DOMParser();
xmlSerializer = new XMLSerializer();
}
if ( textEncoder === undefined ) {
textEncoder = new TextEncoder();
}
let doc;
// If stream encoding is still unknnown, try to extract from document.
let charsetFound = filterer.charset,
charsetUsed = charsetFound;
if ( charsetFound === undefined ) {
if ( utf8TextDecoder === undefined ) {
utf8TextDecoder = new TextDecoder();
}
doc = domParser.parseFromString(
utf8TextDecoder.decode(filterer.buffer.slice(0, 1024)),
filterer.mime
);
charsetFound = charsetFromDoc(doc);
charsetUsed = µb.textEncode.normalizeCharset(charsetFound);
if ( charsetUsed === undefined ) {
return streamClose(filterer);
}
}
doc = domParser.parseFromString(
textDecode(charsetUsed, filterer.buffer),
filterer.mime
);
// https://github.com/gorhill/uBlock/issues/3507
// In case of no explicit charset found, try to find one again, but
// this time with the whole document parsed.
if ( charsetFound === undefined ) {
charsetFound = µb.textEncode.normalizeCharset(charsetFromDoc(doc));
if ( charsetFound !== charsetUsed ) {
if ( charsetFound === undefined ) {
return streamClose(filterer);
}
charsetUsed = charsetFound;
doc = domParser.parseFromString(
textDecode(charsetFound, filterer.buffer),
filterer.mime
);
}
}
let modified = false;
if ( filterer.selectors !== undefined ) {
if ( µb.htmlFilteringEngine.apply(doc, filterer) ) {
modified = true;
}
}
if ( modified === false ) {
return streamClose(filterer);
}
// https://stackoverflow.com/questions/6088972/get-doctype-of-an-html-as-string-with-javascript/10162353#10162353
const doctypeStr = doc.doctype instanceof Object ?
xmlSerializer.serializeToString(doc.doctype) + '\n' :
'';
// https://github.com/gorhill/uBlock/issues/3391
let encodedStream = textEncoder.encode(
doctypeStr +
doc.documentElement.outerHTML
);
if ( charsetUsed !== 'utf-8' ) {
encodedStream = µb.textEncode.encode(
charsetUsed,
encodedStream
);
}
streamClose(filterer, encodedStream);
};
const onStreamError = function() {
filterers.delete(this);
};
return function(fctxt, extras) {
// https://github.com/gorhill/uBlock/issues/3478
const statusCode = extras.statusCode || 0;
if ( statusCode !== 0 && (statusCode < 200 || statusCode >= 300) ) {
return;
}
const hostname = fctxt.getHostname();
if ( hostname === '' ) { return; }
const domain = fctxt.getDomain();
const request = {
stream: undefined,
tabId: fctxt.tabId,
url: fctxt.url,
hostname: hostname,
domain: domain,
entity: µb.URI.entityFromDomain(domain),
selectors: undefined,
buffer: null,
mime: 'text/html',
charset: undefined
};
request.selectors = µb.htmlFilteringEngine.retrieve(request);
if ( request.selectors === undefined ) { return; }
const headers = extras.responseHeaders;
const contentType = headerValueFromName('content-type', headers);
if ( contentType !== '' ) {
request.mime = mimeFromContentType(contentType);
if ( request.mime === undefined ) { return; }
let charset = charsetFromContentType(contentType);
if ( charset !== undefined ) {
charset = µb.textEncode.normalizeCharset(charset);
if ( charset === undefined ) { return; }
request.charset = charset;
}
}
// https://bugzilla.mozilla.org/show_bug.cgi?id=1426789
if ( headerValueFromName('content-disposition', headers) ) { return; }
const stream = request.stream =
browser.webRequest.filterResponseData(extras.requestId);
stream.ondata = onStreamData;
stream.onstop = onStreamStop;
stream.onerror = onStreamError;
filterers.set(stream, request);
return true;
};
})();
/******************************************************************************/
const injectCSP = function(fctxt, pageStore, responseHeaders) {
const µb = µBlock;
const loggerEnabled = µb.logger.enabled;
const cspSubsets = [];
const requestType = fctxt.type;
// Start collecting policies >>>>>>>>
// ======== built-in policies
const builtinDirectives = [];
if ( pageStore.filterScripting(fctxt, true) === 1 ) {
builtinDirectives.push(µBlock.cspNoScripting);
if ( loggerEnabled ) {
fctxt.setRealm('network').setType('scripting').toLogger();
}
}
// https://github.com/uBlockOrigin/uBlock-issues/issues/422
// We need to derive a special context for filtering `inline-script`,
// as the embedding document for this "resource" will always be the
// frame itself, not that of the parent of the frame.
else {
const fctxt2 = fctxt.duplicate();
fctxt2.type = 'inline-script';
fctxt2.setDocOriginFromURL(fctxt.url);
const result = pageStore.filterRequest(fctxt2);
if ( result === 1 ) {
builtinDirectives.push(µBlock.cspNoInlineScript);
}
if ( result === 2 && loggerEnabled ) {
fctxt2.setRealm('network').toLogger();
}
}
// https://github.com/gorhill/uBlock/issues/1539
// - Use a CSP to also forbid inline fonts if remote fonts are blocked.
fctxt.type = 'inline-font';
if ( pageStore.filterRequest(fctxt) === 1 ) {
builtinDirectives.push(µBlock.cspNoInlineFont);
if ( loggerEnabled ) {
fctxt.setRealm('network').toLogger();
}
}
if ( builtinDirectives.length !== 0 ) {
cspSubsets[0] = builtinDirectives.join(', ');
}
// ======== filter-based policies
// Static filtering.
fctxt.type = requestType;
const staticDirectives =
µb.staticNetFilteringEngine.matchAndFetchModifiers(fctxt, 'csp');
if ( staticDirectives !== undefined ) {
for ( const directive of staticDirectives ) {
if ( directive.result !== 1 ) { continue; }
cspSubsets.push(directive.modifier.value);
}
}
// URL filtering `allow` rules override static filtering.
if (
cspSubsets.length !== 0 &&
µb.sessionURLFiltering.evaluateZ(
fctxt.getTabHostname(),
fctxt.url,
'csp'
) === 2
) {
if ( loggerEnabled ) {
fctxt.setRealm('network')
.setType('csp')
.setFilter(µb.sessionURLFiltering.toLogData())
.toLogger();
}
return;
}
// Dynamic filtering `allow` rules override static filtering.
if (
cspSubsets.length !== 0 &&
µb.userSettings.advancedUserEnabled &&
µb.sessionFirewall.evaluateCellZY(
fctxt.getTabHostname(),
fctxt.getTabHostname(),
'*'
) === 2
) {
if ( loggerEnabled ) {
fctxt.setRealm('network')
.setType('csp')
.setFilter(µb.sessionFirewall.toLogData())
.toLogger();
}
return;
}
// <<<<<<<< All policies have been collected
// Static CSP policies will be applied.
if ( loggerEnabled && staticDirectives !== undefined ) {
fctxt.setRealm('network')
.pushFilters(staticDirectives.map(a => a.logData()))
.toLogger();
}
if ( cspSubsets.length === 0 ) { return; }
µb.updateToolbarIcon(fctxt.tabId, 0x02);
// Use comma to merge CSP directives.
// Ref.: https://www.w3.org/TR/CSP2/#implementation-considerations
//
// https://github.com/gorhill/uMatrix/issues/967
// Inject a new CSP header rather than modify an existing one, except
// if the current environment does not support merging headers:
// Firefox 58/webext and less can't merge CSP headers, so we will merge
// them here.
if ( cantMergeCSPHeaders ) {
const i = headerIndexFromName(
'content-security-policy',
responseHeaders
);
if ( i !== -1 ) {
cspSubsets.unshift(responseHeaders[i].value.trim());
responseHeaders.splice(i, 1);
}
}
responseHeaders.push({
name: 'Content-Security-Policy',
value: cspSubsets.join(', ')
});
return true;
};
/******************************************************************************/
// https://github.com/uBlockOrigin/uBlock-issues/issues/1553
// https://github.com/WICG/floc#opting-out-of-computation
const foilFloc = function(fctxt, responseHeaders) {
const hn = fctxt.getHostname();
if ( µBlock.scriptletFilteringEngine.hasScriptlet(hn, 1, 'no-floc') === false ) {
return false;
}
responseHeaders.push({
name: 'Permissions-Policy',
value: 'interest-cohort=()' }
);
return true;
};
/******************************************************************************/
// https://github.com/gorhill/uBlock/issues/1163
// "Block elements by size".
// https://github.com/gorhill/uBlock/issues/1390#issuecomment-187310719
// Do not foil when the media element is fetched from the browser
// cache. This works only when the webext API supports the `fromCache`
// property (Firefox).
const foilLargeMediaElement = function(details, fctxt, pageStore) {
if ( details.fromCache === true ) { return; }
let size = 0;
if ( µBlock.userSettings.largeMediaSize !== 0 ) {
const headers = details.responseHeaders;
const i = headerIndexFromName('content-length', headers);
if ( i === -1 ) { return; }
size = parseInt(headers[i].value, 10) || 0;
}
const result = pageStore.filterLargeMediaElement(fctxt, size);
if ( result === 0 ) { return; }
if ( µBlock.logger.enabled ) {
fctxt.setRealm('network').toLogger();
}
return { cancel: true };
};
/******************************************************************************/
// Caller must ensure headerName is normalized to lower case.
const headerIndexFromName = function(headerName, headers) {
let i = headers.length;
while ( i-- ) {
if ( headers[i].name.toLowerCase() === headerName ) {
return i;
}
}
return -1;
};
const headerValueFromName = function(headerName, headers) {
const i = headerIndexFromName(headerName, headers);
return i !== -1 ? headers[i].value : '';
};
/******************************************************************************/
const strictBlockBypasser = {
hostnameToDeadlineMap: new Map(),
cleanupTimer: undefined,
cleanup: function() {
for ( const [ hostname, deadline ] of this.hostnameToDeadlineMap ) {
if ( deadline <= Date.now() ) {
this.hostnameToDeadlineMap.delete(hostname);
}
}
},
bypass: function(hostname) {
if ( typeof hostname !== 'string' || hostname === '' ) { return; }
this.hostnameToDeadlineMap.set(
hostname,
Date.now() + µBlock.hiddenSettings.strictBlockingBypassDuration * 1000
);
},
isBypassed: function(hostname) {
if ( this.hostnameToDeadlineMap.size === 0 ) { return false; }
let bypassDuration =
µBlock.hiddenSettings.strictBlockingBypassDuration * 1000;
if ( this.cleanupTimer === undefined ) {
this.cleanupTimer = vAPI.setTimeout(
( ) => {
this.cleanupTimer = undefined;
this.cleanup();
},
bypassDuration + 10000
);
}
for (;;) {
const deadline = this.hostnameToDeadlineMap.get(hostname);
if ( deadline !== undefined ) {
if ( deadline > Date.now() ) {
this.hostnameToDeadlineMap.set(
hostname,
Date.now() + bypassDuration
);
return true;
}
this.hostnameToDeadlineMap.delete(hostname);
}
const pos = hostname.indexOf('.');
if ( pos === -1 ) { break; }
hostname = hostname.slice(pos + 1);
}
return false;
}
};
/******************************************************************************/
return {
start: (( ) => {
vAPI.net = new vAPI.Net();
vAPI.net.suspend();
return ( ) => {
vAPI.net.setSuspendableListener(onBeforeRequest);
vAPI.net.addListener(
'onHeadersReceived',
onHeadersReceived,
{ urls: [ 'http://*/*', 'https://*/*' ] },
[ 'blocking', 'responseHeaders' ]
);
vAPI.net.unsuspend(true);
};
})(),
strictBlockBypass: hostname => {
strictBlockBypasser.bypass(hostname);
},
};
/******************************************************************************/
})();
/******************************************************************************/
|
PypiClean
|
/docproduct-0.2.0-py3-none-any.whl/keras_bert/tokenizer.py
|
import unicodedata
from .bert import TOKEN_CLS, TOKEN_SEP, TOKEN_UNK
class Tokenizer(object):
def __init__(self,
token_dict,
token_cls=TOKEN_CLS,
token_sep=TOKEN_SEP,
token_unk=TOKEN_UNK,
pad_index=0,
cased=False):
"""Initialize tokenizer.
:param token_dict: A dict maps tokens to indices.
:param token_cls: The token represents classification.
:param token_sep: The token represents separator.
:param token_unk: The token represents unknown token.
:param pad_index: The index to pad.
:param cased: Whether to keep the case.
"""
self._token_dict = token_dict
self._token_cls = token_cls
self._token_sep = token_sep
self._token_unk = token_unk
self._pad_index = pad_index
self._cased = cased
@staticmethod
def _truncate(first_tokens, second_tokens=None, max_len=None):
if max_len is None:
return
if second_tokens is not None:
while True:
total_len = len(first_tokens) + len(second_tokens)
if total_len <= max_len - 3: # 3 for [CLS] .. tokens_a .. [SEP] .. tokens_b [SEP]
break
if len(first_tokens) > len(second_tokens):
first_tokens.pop()
else:
second_tokens.pop()
else:
del first_tokens[max_len - 2:] # 2 for [CLS] .. tokens .. [SEP]
def _pack(self, first_tokens, second_tokens=None):
first_packed_tokens = [self._token_cls] + first_tokens + [self._token_sep]
if second_tokens is not None:
second_packed_tokens = second_tokens + [self._token_sep]
return first_packed_tokens + second_packed_tokens, len(first_packed_tokens), len(second_packed_tokens)
else:
return first_packed_tokens, len(first_packed_tokens), 0
def _convert_tokens_to_ids(self, tokens):
unk_id = self._token_dict.get(self._token_unk)
return [self._token_dict.get(token, unk_id) for token in tokens]
def tokenize(self, first, second=None):
first_tokens = self._tokenize(first)
second_tokens = self._tokenize(second) if second is not None else None
tokens, _, _ = self._pack(first_tokens, second_tokens)
return tokens
def encode(self, first, second=None, max_len=None):
first_tokens = self._tokenize(first)
second_tokens = self._tokenize(second) if second is not None else None
self._truncate(first_tokens, second_tokens, max_len)
tokens, first_len, second_len = self._pack(first_tokens, second_tokens)
token_ids = self._convert_tokens_to_ids(tokens)
segment_ids = [0] * first_len + [1] * second_len
if max_len is not None:
pad_len = max_len - first_len - second_len
token_ids += [self._pad_index] * pad_len
segment_ids += [0] * pad_len
return token_ids, segment_ids
def _tokenize(self, text):
if not self._cased:
text = unicodedata.normalize('NFD', text)
text = ''.join([ch for ch in text if unicodedata.category(ch) != 'Mn'])
text = text.lower()
spaced = ''
for ch in text:
if self._is_punctuation(ch) or self._is_cjk_character(ch):
spaced += ' ' + ch + ' '
elif self._is_space(ch):
spaced += ' '
elif ord(ch) == 0 or ord(ch) == 0xfffd or self._is_control(ch):
continue
else:
spaced += ch
tokens = []
for word in spaced.strip().split():
tokens += self._word_piece_tokenize(word)
return tokens
def _word_piece_tokenize(self, word):
if word in self._token_dict:
return [word]
tokens = []
start, stop = 0, 0
while start < len(word):
stop = len(word)
while stop > start:
sub = word[start:stop]
if start > 0:
sub = '##' + sub
if sub in self._token_dict:
break
stop -= 1
if start == stop:
stop += 1
tokens.append(sub)
start = stop
return tokens
@staticmethod
def _is_punctuation(ch):
code = ord(ch)
return 33 <= code <= 47 or \
58 <= code <= 64 or \
91 <= code <= 96 or \
123 <= code <= 126 or \
unicodedata.category(ch).startswith('P')
@staticmethod
def _is_cjk_character(ch):
code = ord(ch)
return 0x4E00 <= code <= 0x9FFF or \
0x3400 <= code <= 0x4DBF or \
0x20000 <= code <= 0x2A6DF or \
0x2A700 <= code <= 0x2B73F or \
0x2B740 <= code <= 0x2B81F or \
0x2B820 <= code <= 0x2CEAF or \
0xF900 <= code <= 0xFAFF or \
0x2F800 <= code <= 0x2FA1F
@staticmethod
def _is_space(ch):
return ch == ' ' or ch == '\n' or ch == '\r' or ch == '\t' or \
unicodedata.category(ch) == 'Zs'
@staticmethod
def _is_control(ch):
return unicodedata.category(ch).startswith('C')
|
PypiClean
|
/plaid-python-15.5.0.tar.gz/plaid-python-15.5.0/plaid/model/initial_update_webhook.py
|
import re # noqa: F401
import sys # noqa: F401
from plaid.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from plaid.exceptions import ApiAttributeError
def lazy_import():
from plaid.model.webhook_environment_values import WebhookEnvironmentValues
globals()['WebhookEnvironmentValues'] = WebhookEnvironmentValues
class InitialUpdateWebhook(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'webhook_type': (str,), # noqa: E501
'webhook_code': (str,), # noqa: E501
'new_transactions': (float,), # noqa: E501
'item_id': (str,), # noqa: E501
'environment': (WebhookEnvironmentValues,), # noqa: E501
'error': (str, none_type,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'webhook_type': 'webhook_type', # noqa: E501
'webhook_code': 'webhook_code', # noqa: E501
'new_transactions': 'new_transactions', # noqa: E501
'item_id': 'item_id', # noqa: E501
'environment': 'environment', # noqa: E501
'error': 'error', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, webhook_type, webhook_code, new_transactions, item_id, environment, *args, **kwargs): # noqa: E501
"""InitialUpdateWebhook - a model defined in OpenAPI
Args:
webhook_type (str): `TRANSACTIONS`
webhook_code (str): `INITIAL_UPDATE`
new_transactions (float): The number of new, unfetched transactions available.
item_id (str): The `item_id` of the Item associated with this webhook, warning, or error
environment (WebhookEnvironmentValues):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
error (str, none_type): The error code associated with the webhook.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.webhook_type = webhook_type
self.webhook_code = webhook_code
self.new_transactions = new_transactions
self.item_id = item_id
self.environment = environment
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, webhook_type, webhook_code, new_transactions, item_id, environment, *args, **kwargs): # noqa: E501
"""InitialUpdateWebhook - a model defined in OpenAPI
Args:
webhook_type (str): `TRANSACTIONS`
webhook_code (str): `INITIAL_UPDATE`
new_transactions (float): The number of new, unfetched transactions available.
item_id (str): The `item_id` of the Item associated with this webhook, warning, or error
environment (WebhookEnvironmentValues):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
error (str, none_type): The error code associated with the webhook.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.webhook_type = webhook_type
self.webhook_code = webhook_code
self.new_transactions = new_transactions
self.item_id = item_id
self.environment = environment
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
|
PypiClean
|
/google-cloud-retail-1.16.2.tar.gz/google-cloud-retail-1.16.2/google/cloud/retail_v2/types/product.py
|
from __future__ import annotations
from typing import MutableMapping, MutableSequence
from google.protobuf import duration_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from google.protobuf import wrappers_pb2 # type: ignore
import proto # type: ignore
from google.cloud.retail_v2.types import common, promotion
__protobuf__ = proto.module(
package="google.cloud.retail.v2",
manifest={
"Product",
},
)
class Product(proto.Message):
r"""Product captures all metadata information of items to be
recommended or searched.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
expire_time (google.protobuf.timestamp_pb2.Timestamp):
The timestamp when this product becomes unavailable for
[SearchService.Search][google.cloud.retail.v2.SearchService.Search].
Note that this is only applicable to
[Type.PRIMARY][google.cloud.retail.v2.Product.Type.PRIMARY]
and
[Type.COLLECTION][google.cloud.retail.v2.Product.Type.COLLECTION],
and ignored for
[Type.VARIANT][google.cloud.retail.v2.Product.Type.VARIANT].
In general, we suggest the users to delete the stale
products explicitly, instead of using this field to
determine staleness.
If it is set, the [Product][google.cloud.retail.v2.Product]
is not available for
[SearchService.Search][google.cloud.retail.v2.SearchService.Search]
after
[expire_time][google.cloud.retail.v2.Product.expire_time].
However, the product can still be retrieved by
[ProductService.GetProduct][google.cloud.retail.v2.ProductService.GetProduct]
and
[ProductService.ListProducts][google.cloud.retail.v2.ProductService.ListProducts].
[expire_time][google.cloud.retail.v2.Product.expire_time]
must be later than
[available_time][google.cloud.retail.v2.Product.available_time]
and
[publish_time][google.cloud.retail.v2.Product.publish_time],
otherwise an INVALID_ARGUMENT error is thrown.
Corresponding properties: Google Merchant Center property
`expiration_date <https://support.google.com/merchants/answer/6324499>`__.
This field is a member of `oneof`_ ``expiration``.
ttl (google.protobuf.duration_pb2.Duration):
Input only. The TTL (time to live) of the product. Note that
this is only applicable to
[Type.PRIMARY][google.cloud.retail.v2.Product.Type.PRIMARY]
and
[Type.COLLECTION][google.cloud.retail.v2.Product.Type.COLLECTION],
and ignored for
[Type.VARIANT][google.cloud.retail.v2.Product.Type.VARIANT].
In general, we suggest the users to delete the stale
products explicitly, instead of using this field to
determine staleness.
If it is set, it must be a non-negative value, and
[expire_time][google.cloud.retail.v2.Product.expire_time] is
set as current timestamp plus
[ttl][google.cloud.retail.v2.Product.ttl]. The derived
[expire_time][google.cloud.retail.v2.Product.expire_time] is
returned in the output and
[ttl][google.cloud.retail.v2.Product.ttl] is left blank when
retrieving the [Product][google.cloud.retail.v2.Product].
If it is set, the product is not available for
[SearchService.Search][google.cloud.retail.v2.SearchService.Search]
after current timestamp plus
[ttl][google.cloud.retail.v2.Product.ttl]. However, the
product can still be retrieved by
[ProductService.GetProduct][google.cloud.retail.v2.ProductService.GetProduct]
and
[ProductService.ListProducts][google.cloud.retail.v2.ProductService.ListProducts].
This field is a member of `oneof`_ ``expiration``.
name (str):
Immutable. Full resource name of the product, such as
``projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id``.
id (str):
Immutable. [Product][google.cloud.retail.v2.Product]
identifier, which is the final component of
[name][google.cloud.retail.v2.Product.name]. For example,
this field is "id_1", if
[name][google.cloud.retail.v2.Product.name] is
``projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/id_1``.
This field must be a UTF-8 encoded string with a length
limit of 128 characters. Otherwise, an INVALID_ARGUMENT
error is returned.
Corresponding properties: Google Merchant Center property
`id <https://support.google.com/merchants/answer/6324405>`__.
Schema.org property
`Product.sku <https://schema.org/sku>`__.
type_ (google.cloud.retail_v2.types.Product.Type):
Immutable. The type of the product. Default to
[Catalog.product_level_config.ingestion_product_type][google.cloud.retail.v2.ProductLevelConfig.ingestion_product_type]
if unset.
primary_product_id (str):
Variant group identifier. Must be an
[id][google.cloud.retail.v2.Product.id], with the same
parent branch with this product. Otherwise, an error is
thrown.
For
[Type.PRIMARY][google.cloud.retail.v2.Product.Type.PRIMARY]
[Product][google.cloud.retail.v2.Product]s, this field can
only be empty or set to the same value as
[id][google.cloud.retail.v2.Product.id].
For VARIANT [Product][google.cloud.retail.v2.Product]s, this
field cannot be empty. A maximum of 2,000 products are
allowed to share the same
[Type.PRIMARY][google.cloud.retail.v2.Product.Type.PRIMARY]
[Product][google.cloud.retail.v2.Product]. Otherwise, an
INVALID_ARGUMENT error is returned.
Corresponding properties: Google Merchant Center property
`item_group_id <https://support.google.com/merchants/answer/6324507>`__.
Schema.org property
`Product.inProductGroupWithID <https://schema.org/inProductGroupWithID>`__.
collection_member_ids (MutableSequence[str]):
The [id][google.cloud.retail.v2.Product.id] of the
collection members when
[type][google.cloud.retail.v2.Product.type] is
[Type.COLLECTION][google.cloud.retail.v2.Product.Type.COLLECTION].
Non-existent product ids are allowed. The
[type][google.cloud.retail.v2.Product.type] of the members
must be either
[Type.PRIMARY][google.cloud.retail.v2.Product.Type.PRIMARY]
or
[Type.VARIANT][google.cloud.retail.v2.Product.Type.VARIANT]
otherwise an INVALID_ARGUMENT error is thrown. Should not
set it for other types. A maximum of 1000 values are
allowed. Otherwise, an INVALID_ARGUMENT error is return.
gtin (str):
The Global Trade Item Number (GTIN) of the product.
This field must be a UTF-8 encoded string with a length
limit of 128 characters. Otherwise, an INVALID_ARGUMENT
error is returned.
This field must be a Unigram. Otherwise, an INVALID_ARGUMENT
error is returned.
Corresponding properties: Google Merchant Center property
`gtin <https://support.google.com/merchants/answer/6324461>`__.
Schema.org property
`Product.isbn <https://schema.org/isbn>`__,
`Product.gtin8 <https://schema.org/gtin8>`__,
`Product.gtin12 <https://schema.org/gtin12>`__,
`Product.gtin13 <https://schema.org/gtin13>`__, or
`Product.gtin14 <https://schema.org/gtin14>`__.
If the value is not a valid GTIN, an INVALID_ARGUMENT error
is returned.
categories (MutableSequence[str]):
Product categories. This field is repeated for supporting
one product belonging to several parallel categories.
Strongly recommended using the full path for better search /
recommendation quality.
To represent full path of category, use '>' sign to separate
different hierarchies. If '>' is part of the category name,
replace it with other character(s).
For example, if a shoes product belongs to both ["Shoes &
Accessories" -> "Shoes"] and ["Sports & Fitness" ->
"Athletic Clothing" -> "Shoes"], it could be represented as:
::
"categories": [
"Shoes & Accessories > Shoes",
"Sports & Fitness > Athletic Clothing > Shoes"
]
Must be set for
[Type.PRIMARY][google.cloud.retail.v2.Product.Type.PRIMARY]
[Product][google.cloud.retail.v2.Product] otherwise an
INVALID_ARGUMENT error is returned.
At most 250 values are allowed per
[Product][google.cloud.retail.v2.Product]. Empty values are
not allowed. Each value must be a UTF-8 encoded string with
a length limit of 5,000 characters. Otherwise, an
INVALID_ARGUMENT error is returned.
Corresponding properties: Google Merchant Center property
`google_product_category <https://support.google.com/merchants/answer/6324436>`__.
Schema.org property [Product.category]
(https://schema.org/category).
title (str):
Required. Product title.
This field must be a UTF-8 encoded string with a length
limit of 1,000 characters. Otherwise, an INVALID_ARGUMENT
error is returned.
Corresponding properties: Google Merchant Center property
`title <https://support.google.com/merchants/answer/6324415>`__.
Schema.org property
`Product.name <https://schema.org/name>`__.
brands (MutableSequence[str]):
The brands of the product.
A maximum of 30 brands are allowed. Each brand must be a
UTF-8 encoded string with a length limit of 1,000
characters. Otherwise, an INVALID_ARGUMENT error is
returned.
Corresponding properties: Google Merchant Center property
`brand <https://support.google.com/merchants/answer/6324351>`__.
Schema.org property
`Product.brand <https://schema.org/brand>`__.
description (str):
Product description.
This field must be a UTF-8 encoded string with a length
limit of 5,000 characters. Otherwise, an INVALID_ARGUMENT
error is returned.
Corresponding properties: Google Merchant Center property
`description <https://support.google.com/merchants/answer/6324468>`__.
Schema.org property
`Product.description <https://schema.org/description>`__.
language_code (str):
Language of the title/description and other string
attributes. Use language tags defined by `BCP
47 <https://www.rfc-editor.org/rfc/bcp/bcp47.txt>`__.
For product prediction, this field is ignored and the model
automatically detects the text language. The
[Product][google.cloud.retail.v2.Product] can include text
in different languages, but duplicating
[Product][google.cloud.retail.v2.Product]s to provide text
in multiple languages can result in degraded model
performance.
For product search this field is in use. It defaults to
"en-US" if unset.
attributes (MutableMapping[str, google.cloud.retail_v2.types.CustomAttribute]):
Highly encouraged. Extra product attributes to be included.
For example, for products, this could include the store
name, vendor, style, color, etc. These are very strong
signals for recommendation model, thus we highly recommend
providing the attributes here.
Features that can take on one of a limited number of
possible values. Two types of features can be set are:
Textual features. some examples would be the brand/maker of
a product, or country of a customer. Numerical features.
Some examples would be the height/weight of a product, or
age of a customer.
For example:
``{ "vendor": {"text": ["vendor123", "vendor456"]}, "lengths_cm": {"numbers":[2.3, 15.4]}, "heights_cm": {"numbers":[8.1, 6.4]} }``.
This field needs to pass all below criteria, otherwise an
INVALID_ARGUMENT error is returned:
- Max entries count: 200.
- The key must be a UTF-8 encoded string with a length
limit of 128 characters.
- For indexable attribute, the key must match the pattern:
``[a-zA-Z0-9][a-zA-Z0-9_]*``. For example,
``key0LikeThis`` or ``KEY_1_LIKE_THIS``.
- For text attributes, at most 400 values are allowed.
Empty values are not allowed. Each value must be a
non-empty UTF-8 encoded string with a length limit of 256
characters.
- For number attributes, at most 400 values are allowed.
tags (MutableSequence[str]):
Custom tags associated with the product.
At most 250 values are allowed per
[Product][google.cloud.retail.v2.Product]. This value must
be a UTF-8 encoded string with a length limit of 1,000
characters. Otherwise, an INVALID_ARGUMENT error is
returned.
This tag can be used for filtering recommendation results by
passing the tag as part of the
[PredictRequest.filter][google.cloud.retail.v2.PredictRequest.filter].
Corresponding properties: Google Merchant Center property
`custom_label_0–4 <https://support.google.com/merchants/answer/6324473>`__.
price_info (google.cloud.retail_v2.types.PriceInfo):
Product price and cost information.
Corresponding properties: Google Merchant Center property
`price <https://support.google.com/merchants/answer/6324371>`__.
rating (google.cloud.retail_v2.types.Rating):
The rating of this product.
available_time (google.protobuf.timestamp_pb2.Timestamp):
The timestamp when this
[Product][google.cloud.retail.v2.Product] becomes available
for
[SearchService.Search][google.cloud.retail.v2.SearchService.Search].
Note that this is only applicable to
[Type.PRIMARY][google.cloud.retail.v2.Product.Type.PRIMARY]
and
[Type.COLLECTION][google.cloud.retail.v2.Product.Type.COLLECTION],
and ignored for
[Type.VARIANT][google.cloud.retail.v2.Product.Type.VARIANT].
availability (google.cloud.retail_v2.types.Product.Availability):
The online availability of the
[Product][google.cloud.retail.v2.Product]. Default to
[Availability.IN_STOCK][google.cloud.retail.v2.Product.Availability.IN_STOCK].
Corresponding properties: Google Merchant Center property
`availability <https://support.google.com/merchants/answer/6324448>`__.
Schema.org property
`Offer.availability <https://schema.org/availability>`__.
available_quantity (google.protobuf.wrappers_pb2.Int32Value):
The available quantity of the item.
fulfillment_info (MutableSequence[google.cloud.retail_v2.types.FulfillmentInfo]):
Fulfillment information, such as the store IDs for in-store
pickup or region IDs for different shipping methods.
All the elements must have distinct
[FulfillmentInfo.type][google.cloud.retail.v2.FulfillmentInfo.type].
Otherwise, an INVALID_ARGUMENT error is returned.
uri (str):
Canonical URL directly linking to the product detail page.
It is strongly recommended to provide a valid uri for the
product, otherwise the service performance could be
significantly degraded.
This field must be a UTF-8 encoded string with a length
limit of 5,000 characters. Otherwise, an INVALID_ARGUMENT
error is returned.
Corresponding properties: Google Merchant Center property
`link <https://support.google.com/merchants/answer/6324416>`__.
Schema.org property `Offer.url <https://schema.org/url>`__.
images (MutableSequence[google.cloud.retail_v2.types.Image]):
Product images for the product. We highly recommend putting
the main image first.
A maximum of 300 images are allowed.
Corresponding properties: Google Merchant Center property
`image_link <https://support.google.com/merchants/answer/6324350>`__.
Schema.org property
`Product.image <https://schema.org/image>`__.
audience (google.cloud.retail_v2.types.Audience):
The target group associated with a given
audience (e.g. male, veterans, car owners,
musicians, etc.) of the product.
color_info (google.cloud.retail_v2.types.ColorInfo):
The color of the product.
Corresponding properties: Google Merchant Center property
`color <https://support.google.com/merchants/answer/6324487>`__.
Schema.org property
`Product.color <https://schema.org/color>`__.
sizes (MutableSequence[str]):
The size of the product. To represent different size systems
or size types, consider using this format:
[[[size_system:]size_type:]size_value].
For example, in "US:MENS:M", "US" represents size system;
"MENS" represents size type; "M" represents size value. In
"GIRLS:27", size system is empty; "GIRLS" represents size
type; "27" represents size value. In "32 inches", both size
system and size type are empty, while size value is "32
inches".
A maximum of 20 values are allowed per
[Product][google.cloud.retail.v2.Product]. Each value must
be a UTF-8 encoded string with a length limit of 128
characters. Otherwise, an INVALID_ARGUMENT error is
returned.
Corresponding properties: Google Merchant Center property
`size <https://support.google.com/merchants/answer/6324492>`__,
`size_type <https://support.google.com/merchants/answer/6324497>`__,
and
`size_system <https://support.google.com/merchants/answer/6324502>`__.
Schema.org property
`Product.size <https://schema.org/size>`__.
materials (MutableSequence[str]):
The material of the product. For example, "leather",
"wooden".
A maximum of 20 values are allowed. Each value must be a
UTF-8 encoded string with a length limit of 200 characters.
Otherwise, an INVALID_ARGUMENT error is returned.
Corresponding properties: Google Merchant Center property
`material <https://support.google.com/merchants/answer/6324410>`__.
Schema.org property
`Product.material <https://schema.org/material>`__.
patterns (MutableSequence[str]):
The pattern or graphic print of the product. For example,
"striped", "polka dot", "paisley".
A maximum of 20 values are allowed per
[Product][google.cloud.retail.v2.Product]. Each value must
be a UTF-8 encoded string with a length limit of 128
characters. Otherwise, an INVALID_ARGUMENT error is
returned.
Corresponding properties: Google Merchant Center property
`pattern <https://support.google.com/merchants/answer/6324483>`__.
Schema.org property
`Product.pattern <https://schema.org/pattern>`__.
conditions (MutableSequence[str]):
The condition of the product. Strongly encouraged to use the
standard values: "new", "refurbished", "used".
A maximum of 1 value is allowed per
[Product][google.cloud.retail.v2.Product]. Each value must
be a UTF-8 encoded string with a length limit of 128
characters. Otherwise, an INVALID_ARGUMENT error is
returned.
Corresponding properties: Google Merchant Center property
`condition <https://support.google.com/merchants/answer/6324469>`__.
Schema.org property
`Offer.itemCondition <https://schema.org/itemCondition>`__.
promotions (MutableSequence[google.cloud.retail_v2.types.Promotion]):
The promotions applied to the product. A maximum of 10
values are allowed per
[Product][google.cloud.retail.v2.Product]. Only
[Promotion.promotion_id][google.cloud.retail.v2.Promotion.promotion_id]
will be used, other fields will be ignored if set.
publish_time (google.protobuf.timestamp_pb2.Timestamp):
The timestamp when the product is published by the retailer
for the first time, which indicates the freshness of the
products. Note that this field is different from
[available_time][google.cloud.retail.v2.Product.available_time],
given it purely describes product freshness regardless of
when it is available on search and recommendation.
retrievable_fields (google.protobuf.field_mask_pb2.FieldMask):
Indicates which fields in the
[Product][google.cloud.retail.v2.Product]s are returned in
[SearchResponse][google.cloud.retail.v2.SearchResponse].
Supported fields for all
[type][google.cloud.retail.v2.Product.type]s:
- [audience][google.cloud.retail.v2.Product.audience]
- [availability][google.cloud.retail.v2.Product.availability]
- [brands][google.cloud.retail.v2.Product.brands]
- [color_info][google.cloud.retail.v2.Product.color_info]
- [conditions][google.cloud.retail.v2.Product.conditions]
- [gtin][google.cloud.retail.v2.Product.gtin]
- [materials][google.cloud.retail.v2.Product.materials]
- [name][google.cloud.retail.v2.Product.name]
- [patterns][google.cloud.retail.v2.Product.patterns]
- [price_info][google.cloud.retail.v2.Product.price_info]
- [rating][google.cloud.retail.v2.Product.rating]
- [sizes][google.cloud.retail.v2.Product.sizes]
- [title][google.cloud.retail.v2.Product.title]
- [uri][google.cloud.retail.v2.Product.uri]
Supported fields only for
[Type.PRIMARY][google.cloud.retail.v2.Product.Type.PRIMARY]
and
[Type.COLLECTION][google.cloud.retail.v2.Product.Type.COLLECTION]:
- [categories][google.cloud.retail.v2.Product.categories]
- [description][google.cloud.retail.v2.Product.description]
- [images][google.cloud.retail.v2.Product.images]
Supported fields only for
[Type.VARIANT][google.cloud.retail.v2.Product.Type.VARIANT]:
- Only the first image in
[images][google.cloud.retail.v2.Product.images]
To mark
[attributes][google.cloud.retail.v2.Product.attributes] as
retrievable, include paths of the form "attributes.key"
where "key" is the key of a custom attribute, as specified
in [attributes][google.cloud.retail.v2.Product.attributes].
For
[Type.PRIMARY][google.cloud.retail.v2.Product.Type.PRIMARY]
and
[Type.COLLECTION][google.cloud.retail.v2.Product.Type.COLLECTION],
the following fields are always returned in
[SearchResponse][google.cloud.retail.v2.SearchResponse] by
default:
- [name][google.cloud.retail.v2.Product.name]
For
[Type.VARIANT][google.cloud.retail.v2.Product.Type.VARIANT],
the following fields are always returned in by default:
- [name][google.cloud.retail.v2.Product.name]
- [color_info][google.cloud.retail.v2.Product.color_info]
The maximum number of paths is 30. Otherwise, an
INVALID_ARGUMENT error is returned.
Note: Returning more fields in
[SearchResponse][google.cloud.retail.v2.SearchResponse] can
increase response payload size and serving latency.
This field is deprecated. Use the retrievable site-wide
control instead.
variants (MutableSequence[google.cloud.retail_v2.types.Product]):
Output only. Product variants grouped together on primary
product which share similar product attributes. It's
automatically grouped by
[primary_product_id][google.cloud.retail.v2.Product.primary_product_id]
for all the product variants. Only populated for
[Type.PRIMARY][google.cloud.retail.v2.Product.Type.PRIMARY]
[Product][google.cloud.retail.v2.Product]s.
Note: This field is OUTPUT_ONLY for
[ProductService.GetProduct][google.cloud.retail.v2.ProductService.GetProduct].
Do not set this field in API requests.
local_inventories (MutableSequence[google.cloud.retail_v2.types.LocalInventory]):
Output only. A list of local inventories specific to
different places.
This field can be managed by
[ProductService.AddLocalInventories][google.cloud.retail.v2.ProductService.AddLocalInventories]
and
[ProductService.RemoveLocalInventories][google.cloud.retail.v2.ProductService.RemoveLocalInventories]
APIs if fine-grained, high-volume updates are necessary.
"""
class Type(proto.Enum):
r"""The type of this product.
Values:
TYPE_UNSPECIFIED (0):
Default value. Default to
[Catalog.product_level_config.ingestion_product_type][google.cloud.retail.v2.ProductLevelConfig.ingestion_product_type]
if unset.
PRIMARY (1):
The primary type.
As the primary unit for predicting, indexing and search
serving, a
[Type.PRIMARY][google.cloud.retail.v2.Product.Type.PRIMARY]
[Product][google.cloud.retail.v2.Product] is grouped with
multiple
[Type.VARIANT][google.cloud.retail.v2.Product.Type.VARIANT]
[Product][google.cloud.retail.v2.Product]s.
VARIANT (2):
The variant type.
[Type.VARIANT][google.cloud.retail.v2.Product.Type.VARIANT]
[Product][google.cloud.retail.v2.Product]s usually share
some common attributes on the same
[Type.PRIMARY][google.cloud.retail.v2.Product.Type.PRIMARY]
[Product][google.cloud.retail.v2.Product]s, but they have
variant attributes like different colors, sizes and prices,
etc.
COLLECTION (3):
The collection type. Collection products are bundled
[Type.PRIMARY][google.cloud.retail.v2.Product.Type.PRIMARY]
[Product][google.cloud.retail.v2.Product]s or
[Type.VARIANT][google.cloud.retail.v2.Product.Type.VARIANT]
[Product][google.cloud.retail.v2.Product]s that are sold
together, such as a jewelry set with necklaces, earrings and
rings, etc.
"""
TYPE_UNSPECIFIED = 0
PRIMARY = 1
VARIANT = 2
COLLECTION = 3
class Availability(proto.Enum):
r"""Product availability. If this field is unspecified, the
product is assumed to be in stock.
Values:
AVAILABILITY_UNSPECIFIED (0):
Default product availability. Default to
[Availability.IN_STOCK][google.cloud.retail.v2.Product.Availability.IN_STOCK]
if unset.
IN_STOCK (1):
Product in stock.
OUT_OF_STOCK (2):
Product out of stock.
PREORDER (3):
Product that is in pre-order state.
BACKORDER (4):
Product that is back-ordered (i.e.
temporarily out of stock).
"""
AVAILABILITY_UNSPECIFIED = 0
IN_STOCK = 1
OUT_OF_STOCK = 2
PREORDER = 3
BACKORDER = 4
expire_time: timestamp_pb2.Timestamp = proto.Field(
proto.MESSAGE,
number=16,
oneof="expiration",
message=timestamp_pb2.Timestamp,
)
ttl: duration_pb2.Duration = proto.Field(
proto.MESSAGE,
number=17,
oneof="expiration",
message=duration_pb2.Duration,
)
name: str = proto.Field(
proto.STRING,
number=1,
)
id: str = proto.Field(
proto.STRING,
number=2,
)
type_: Type = proto.Field(
proto.ENUM,
number=3,
enum=Type,
)
primary_product_id: str = proto.Field(
proto.STRING,
number=4,
)
collection_member_ids: MutableSequence[str] = proto.RepeatedField(
proto.STRING,
number=5,
)
gtin: str = proto.Field(
proto.STRING,
number=6,
)
categories: MutableSequence[str] = proto.RepeatedField(
proto.STRING,
number=7,
)
title: str = proto.Field(
proto.STRING,
number=8,
)
brands: MutableSequence[str] = proto.RepeatedField(
proto.STRING,
number=9,
)
description: str = proto.Field(
proto.STRING,
number=10,
)
language_code: str = proto.Field(
proto.STRING,
number=11,
)
attributes: MutableMapping[str, common.CustomAttribute] = proto.MapField(
proto.STRING,
proto.MESSAGE,
number=12,
message=common.CustomAttribute,
)
tags: MutableSequence[str] = proto.RepeatedField(
proto.STRING,
number=13,
)
price_info: common.PriceInfo = proto.Field(
proto.MESSAGE,
number=14,
message=common.PriceInfo,
)
rating: common.Rating = proto.Field(
proto.MESSAGE,
number=15,
message=common.Rating,
)
available_time: timestamp_pb2.Timestamp = proto.Field(
proto.MESSAGE,
number=18,
message=timestamp_pb2.Timestamp,
)
availability: Availability = proto.Field(
proto.ENUM,
number=19,
enum=Availability,
)
available_quantity: wrappers_pb2.Int32Value = proto.Field(
proto.MESSAGE,
number=20,
message=wrappers_pb2.Int32Value,
)
fulfillment_info: MutableSequence[common.FulfillmentInfo] = proto.RepeatedField(
proto.MESSAGE,
number=21,
message=common.FulfillmentInfo,
)
uri: str = proto.Field(
proto.STRING,
number=22,
)
images: MutableSequence[common.Image] = proto.RepeatedField(
proto.MESSAGE,
number=23,
message=common.Image,
)
audience: common.Audience = proto.Field(
proto.MESSAGE,
number=24,
message=common.Audience,
)
color_info: common.ColorInfo = proto.Field(
proto.MESSAGE,
number=25,
message=common.ColorInfo,
)
sizes: MutableSequence[str] = proto.RepeatedField(
proto.STRING,
number=26,
)
materials: MutableSequence[str] = proto.RepeatedField(
proto.STRING,
number=27,
)
patterns: MutableSequence[str] = proto.RepeatedField(
proto.STRING,
number=28,
)
conditions: MutableSequence[str] = proto.RepeatedField(
proto.STRING,
number=29,
)
promotions: MutableSequence[promotion.Promotion] = proto.RepeatedField(
proto.MESSAGE,
number=34,
message=promotion.Promotion,
)
publish_time: timestamp_pb2.Timestamp = proto.Field(
proto.MESSAGE,
number=33,
message=timestamp_pb2.Timestamp,
)
retrievable_fields: field_mask_pb2.FieldMask = proto.Field(
proto.MESSAGE,
number=30,
message=field_mask_pb2.FieldMask,
)
variants: MutableSequence["Product"] = proto.RepeatedField(
proto.MESSAGE,
number=31,
message="Product",
)
local_inventories: MutableSequence[common.LocalInventory] = proto.RepeatedField(
proto.MESSAGE,
number=35,
message=common.LocalInventory,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
PypiClean
|
/pulumi_azure_native-2.5.1a1693590910.tar.gz/pulumi_azure_native-2.5.1a1693590910/pulumi_azure_native/storagemover/v20230701preview/job_definition.py
|
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
__all__ = ['JobDefinitionArgs', 'JobDefinition']
@pulumi.input_type
class JobDefinitionArgs:
def __init__(__self__, *,
copy_mode: pulumi.Input[Union[str, 'CopyMode']],
project_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
source_name: pulumi.Input[str],
storage_mover_name: pulumi.Input[str],
target_name: pulumi.Input[str],
agent_name: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
job_definition_name: Optional[pulumi.Input[str]] = None,
source_subpath: Optional[pulumi.Input[str]] = None,
target_subpath: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a JobDefinition resource.
:param pulumi.Input[Union[str, 'CopyMode']] copy_mode: Strategy to use for copy.
:param pulumi.Input[str] project_name: The name of the Project resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[str] source_name: The name of the source Endpoint.
:param pulumi.Input[str] storage_mover_name: The name of the Storage Mover resource.
:param pulumi.Input[str] target_name: The name of the target Endpoint.
:param pulumi.Input[str] agent_name: Name of the Agent to assign for new Job Runs of this Job Definition.
:param pulumi.Input[str] description: A description for the Job Definition.
:param pulumi.Input[str] job_definition_name: The name of the Job Definition resource.
:param pulumi.Input[str] source_subpath: The subpath to use when reading from the source Endpoint.
:param pulumi.Input[str] target_subpath: The subpath to use when writing to the target Endpoint.
"""
pulumi.set(__self__, "copy_mode", copy_mode)
pulumi.set(__self__, "project_name", project_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "source_name", source_name)
pulumi.set(__self__, "storage_mover_name", storage_mover_name)
pulumi.set(__self__, "target_name", target_name)
if agent_name is not None:
pulumi.set(__self__, "agent_name", agent_name)
if description is not None:
pulumi.set(__self__, "description", description)
if job_definition_name is not None:
pulumi.set(__self__, "job_definition_name", job_definition_name)
if source_subpath is not None:
pulumi.set(__self__, "source_subpath", source_subpath)
if target_subpath is not None:
pulumi.set(__self__, "target_subpath", target_subpath)
@property
@pulumi.getter(name="copyMode")
def copy_mode(self) -> pulumi.Input[Union[str, 'CopyMode']]:
"""
Strategy to use for copy.
"""
return pulumi.get(self, "copy_mode")
@copy_mode.setter
def copy_mode(self, value: pulumi.Input[Union[str, 'CopyMode']]):
pulumi.set(self, "copy_mode", value)
@property
@pulumi.getter(name="projectName")
def project_name(self) -> pulumi.Input[str]:
"""
The name of the Project resource.
"""
return pulumi.get(self, "project_name")
@project_name.setter
def project_name(self, value: pulumi.Input[str]):
pulumi.set(self, "project_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group. The name is case insensitive.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="sourceName")
def source_name(self) -> pulumi.Input[str]:
"""
The name of the source Endpoint.
"""
return pulumi.get(self, "source_name")
@source_name.setter
def source_name(self, value: pulumi.Input[str]):
pulumi.set(self, "source_name", value)
@property
@pulumi.getter(name="storageMoverName")
def storage_mover_name(self) -> pulumi.Input[str]:
"""
The name of the Storage Mover resource.
"""
return pulumi.get(self, "storage_mover_name")
@storage_mover_name.setter
def storage_mover_name(self, value: pulumi.Input[str]):
pulumi.set(self, "storage_mover_name", value)
@property
@pulumi.getter(name="targetName")
def target_name(self) -> pulumi.Input[str]:
"""
The name of the target Endpoint.
"""
return pulumi.get(self, "target_name")
@target_name.setter
def target_name(self, value: pulumi.Input[str]):
pulumi.set(self, "target_name", value)
@property
@pulumi.getter(name="agentName")
def agent_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the Agent to assign for new Job Runs of this Job Definition.
"""
return pulumi.get(self, "agent_name")
@agent_name.setter
def agent_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "agent_name", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A description for the Job Definition.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="jobDefinitionName")
def job_definition_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Job Definition resource.
"""
return pulumi.get(self, "job_definition_name")
@job_definition_name.setter
def job_definition_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "job_definition_name", value)
@property
@pulumi.getter(name="sourceSubpath")
def source_subpath(self) -> Optional[pulumi.Input[str]]:
"""
The subpath to use when reading from the source Endpoint.
"""
return pulumi.get(self, "source_subpath")
@source_subpath.setter
def source_subpath(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_subpath", value)
@property
@pulumi.getter(name="targetSubpath")
def target_subpath(self) -> Optional[pulumi.Input[str]]:
"""
The subpath to use when writing to the target Endpoint.
"""
return pulumi.get(self, "target_subpath")
@target_subpath.setter
def target_subpath(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "target_subpath", value)
class JobDefinition(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
agent_name: Optional[pulumi.Input[str]] = None,
copy_mode: Optional[pulumi.Input[Union[str, 'CopyMode']]] = None,
description: Optional[pulumi.Input[str]] = None,
job_definition_name: Optional[pulumi.Input[str]] = None,
project_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
source_name: Optional[pulumi.Input[str]] = None,
source_subpath: Optional[pulumi.Input[str]] = None,
storage_mover_name: Optional[pulumi.Input[str]] = None,
target_name: Optional[pulumi.Input[str]] = None,
target_subpath: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
The Job Definition resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] agent_name: Name of the Agent to assign for new Job Runs of this Job Definition.
:param pulumi.Input[Union[str, 'CopyMode']] copy_mode: Strategy to use for copy.
:param pulumi.Input[str] description: A description for the Job Definition.
:param pulumi.Input[str] job_definition_name: The name of the Job Definition resource.
:param pulumi.Input[str] project_name: The name of the Project resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[str] source_name: The name of the source Endpoint.
:param pulumi.Input[str] source_subpath: The subpath to use when reading from the source Endpoint.
:param pulumi.Input[str] storage_mover_name: The name of the Storage Mover resource.
:param pulumi.Input[str] target_name: The name of the target Endpoint.
:param pulumi.Input[str] target_subpath: The subpath to use when writing to the target Endpoint.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: JobDefinitionArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
The Job Definition resource.
:param str resource_name: The name of the resource.
:param JobDefinitionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(JobDefinitionArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
agent_name: Optional[pulumi.Input[str]] = None,
copy_mode: Optional[pulumi.Input[Union[str, 'CopyMode']]] = None,
description: Optional[pulumi.Input[str]] = None,
job_definition_name: Optional[pulumi.Input[str]] = None,
project_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
source_name: Optional[pulumi.Input[str]] = None,
source_subpath: Optional[pulumi.Input[str]] = None,
storage_mover_name: Optional[pulumi.Input[str]] = None,
target_name: Optional[pulumi.Input[str]] = None,
target_subpath: Optional[pulumi.Input[str]] = None,
__props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = JobDefinitionArgs.__new__(JobDefinitionArgs)
__props__.__dict__["agent_name"] = agent_name
if copy_mode is None and not opts.urn:
raise TypeError("Missing required property 'copy_mode'")
__props__.__dict__["copy_mode"] = copy_mode
__props__.__dict__["description"] = description
__props__.__dict__["job_definition_name"] = job_definition_name
if project_name is None and not opts.urn:
raise TypeError("Missing required property 'project_name'")
__props__.__dict__["project_name"] = project_name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if source_name is None and not opts.urn:
raise TypeError("Missing required property 'source_name'")
__props__.__dict__["source_name"] = source_name
__props__.__dict__["source_subpath"] = source_subpath
if storage_mover_name is None and not opts.urn:
raise TypeError("Missing required property 'storage_mover_name'")
__props__.__dict__["storage_mover_name"] = storage_mover_name
if target_name is None and not opts.urn:
raise TypeError("Missing required property 'target_name'")
__props__.__dict__["target_name"] = target_name
__props__.__dict__["target_subpath"] = target_subpath
__props__.__dict__["agent_resource_id"] = None
__props__.__dict__["latest_job_run_name"] = None
__props__.__dict__["latest_job_run_resource_id"] = None
__props__.__dict__["latest_job_run_status"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["source_resource_id"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["target_resource_id"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-native:storagemover:JobDefinition"), pulumi.Alias(type_="azure-native:storagemover/v20220701preview:JobDefinition"), pulumi.Alias(type_="azure-native:storagemover/v20230301:JobDefinition")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(JobDefinition, __self__).__init__(
'azure-native:storagemover/v20230701preview:JobDefinition',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'JobDefinition':
"""
Get an existing JobDefinition resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = JobDefinitionArgs.__new__(JobDefinitionArgs)
__props__.__dict__["agent_name"] = None
__props__.__dict__["agent_resource_id"] = None
__props__.__dict__["copy_mode"] = None
__props__.__dict__["description"] = None
__props__.__dict__["latest_job_run_name"] = None
__props__.__dict__["latest_job_run_resource_id"] = None
__props__.__dict__["latest_job_run_status"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["source_name"] = None
__props__.__dict__["source_resource_id"] = None
__props__.__dict__["source_subpath"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["target_name"] = None
__props__.__dict__["target_resource_id"] = None
__props__.__dict__["target_subpath"] = None
__props__.__dict__["type"] = None
return JobDefinition(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="agentName")
def agent_name(self) -> pulumi.Output[Optional[str]]:
"""
Name of the Agent to assign for new Job Runs of this Job Definition.
"""
return pulumi.get(self, "agent_name")
@property
@pulumi.getter(name="agentResourceId")
def agent_resource_id(self) -> pulumi.Output[str]:
"""
Fully qualified resource id of the Agent to assign for new Job Runs of this Job Definition.
"""
return pulumi.get(self, "agent_resource_id")
@property
@pulumi.getter(name="copyMode")
def copy_mode(self) -> pulumi.Output[str]:
"""
Strategy to use for copy.
"""
return pulumi.get(self, "copy_mode")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
A description for the Job Definition.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="latestJobRunName")
def latest_job_run_name(self) -> pulumi.Output[str]:
"""
The name of the Job Run in a non-terminal state, if exists.
"""
return pulumi.get(self, "latest_job_run_name")
@property
@pulumi.getter(name="latestJobRunResourceId")
def latest_job_run_resource_id(self) -> pulumi.Output[str]:
"""
The fully qualified resource ID of the Job Run in a non-terminal state, if exists.
"""
return pulumi.get(self, "latest_job_run_resource_id")
@property
@pulumi.getter(name="latestJobRunStatus")
def latest_job_run_status(self) -> pulumi.Output[str]:
"""
The current status of the Job Run in a non-terminal state, if exists.
"""
return pulumi.get(self, "latest_job_run_status")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of this resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="sourceName")
def source_name(self) -> pulumi.Output[str]:
"""
The name of the source Endpoint.
"""
return pulumi.get(self, "source_name")
@property
@pulumi.getter(name="sourceResourceId")
def source_resource_id(self) -> pulumi.Output[str]:
"""
Fully qualified resource ID of the source Endpoint.
"""
return pulumi.get(self, "source_resource_id")
@property
@pulumi.getter(name="sourceSubpath")
def source_subpath(self) -> pulumi.Output[Optional[str]]:
"""
The subpath to use when reading from the source Endpoint.
"""
return pulumi.get(self, "source_subpath")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
Azure Resource Manager metadata containing createdBy and modifiedBy information.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter(name="targetName")
def target_name(self) -> pulumi.Output[str]:
"""
The name of the target Endpoint.
"""
return pulumi.get(self, "target_name")
@property
@pulumi.getter(name="targetResourceId")
def target_resource_id(self) -> pulumi.Output[str]:
"""
Fully qualified resource ID of the target Endpoint.
"""
return pulumi.get(self, "target_resource_id")
@property
@pulumi.getter(name="targetSubpath")
def target_subpath(self) -> pulumi.Output[Optional[str]]:
"""
The subpath to use when writing to the target Endpoint.
"""
return pulumi.get(self, "target_subpath")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
|
PypiClean
|
/wemake_python_styleguide-0.18.0-py3-none-any.whl/wemake_python_styleguide/logic/tokens/comprehensions.py
|
import tokenize
from typing import List, Optional
import attr
from typing_extensions import final
@final
@attr.dataclass(slots=True)
class Compehension(object):
"""
Represents a syntax for Python comprehension.
The optimal way of using this class is
by just creating it with the first opening ``left_bracket``
and then assigning values you need when you meet them.
"""
left_bracket: tokenize.TokenInfo
expr: Optional[tokenize.TokenInfo] = None
# `for` keywords
fors: List[tokenize.TokenInfo] = attr.ib(factory=list)
# `in` part, keywords and expressions
ins: List[tokenize.TokenInfo] = attr.ib(factory=list)
in_exprs: List[tokenize.TokenInfo] = attr.ib(factory=list)
# Condition part:
_ifs: List[tokenize.TokenInfo] = attr.ib(factory=list)
async_broken: bool = False
_checked: bool = False
def append_if(self, token: tokenize.TokenInfo) -> None:
"""
Conditionally appends ``if`` token, if there's at least one ``for``.
Why? Because you might have ``if`` before ``for``.
In this case it is just a ternary inside ``expr``.
In real comprehensions ``if`` are always after ``for``.
"""
if self.fors:
self._ifs.append(token)
def is_ready(self) -> bool:
"""
Checks that comprehension is built correctly with all required parts.
We also check that each compehension is analyzed only once.
"""
return (
self.expr is not None and
bool(self.fors) and
len(self.fors) == len(self.ins) == len(self.in_exprs) and
not self._checked
)
def is_valid(self) -> bool:
"""Checks that compehension definition is valid."""
if self.async_broken:
return False
for_in = self._check_for_in()
# mypy requires this `assert`, always true if `is_ready()`
assert self.expr # noqa: S101
is_multiline = self.expr.start[0] != self._first_for_line
fors = self._check_fors(is_multiline=is_multiline)
for_if = self._check_for_if(is_multiline=is_multiline)
self._checked = True # noqa: WPS601
return for_in and fors and for_if
@property
def _first_for_line(self) -> int:
"""Returns the line number of the first ``for`` token."""
return self.fors[0].start[0]
def _check_for_in(self) -> bool:
"""Checks that all ``for`` and ``in`` tokens are aligned together."""
return all(
for_.start[0] == in_.start[0] == in_expr.start[0]
for for_, in_, in_expr in zip(self.fors, self.ins, self.in_exprs)
)
def _check_fors(self, *, is_multiline: bool) -> bool:
"""Checks that all ``for`` tokens are aligned."""
if len(self.fors) == 1:
return True # one `for` is always correct
if is_multiline:
return all(
for_.start[0] == self._first_for_line + index
for index, for_ in enumerate(self.fors)
if index > 0
)
return all(
for_.start[0] == self._first_for_line
for for_ in self.fors
)
def _check_for_if(self, *, is_multiline: bool) -> bool:
"""Checks that all ``for`` and ``if`` tokens are aligned."""
if is_multiline:
last_for_line = self.fors[-1].start[0]
return all(
if_.start[0] == last_for_line + index + 1
for index, if_ in enumerate(self._ifs)
)
return all(
if_.start[0] == self._first_for_line
for if_ in self._ifs
)
|
PypiClean
|
/fiduswriter-github-export-3.10.5.tar.gz/fiduswriter-github-export-3.10.5/fiduswriter/github_export/proxy_views.py
|
import re
import json
from tornado.web import RequestHandler
from tornado.httpclient import AsyncHTTPClient, HTTPRequest, HTTPError
from base.django_handler_mixin import DjangoHandlerMixin
from allauth.socialaccount.models import SocialToken
from . import models
ALLOWED_PATHS = [
re.compile(r"^repos/([\w\.\-@_]+)/([\w\.\-@_]+)/contents/"),
re.compile(r"^user/repos$"),
re.compile(r"^user/repos/reload$"),
re.compile(r"^repos/([\w\.\-@_]+)/([\w\.\-@_]+)/git/blobs/([\w\d]+)$"),
re.compile(
r"^repos/([\w\.\-@_]+)/([\w\.\-@_]+)/git/refs/heads/([\w\d]+)$"
),
re.compile(r"^repos/([\w\.\-@_]+)/([\w\.\-@_]+)/git/blobs$"),
re.compile(r"^repos/([\w\.\-@_]+)/([\w\.\-@_]+)$"),
re.compile(r"^repos/([\w\.\-@_]+)/([\w\.\-@_]+)/git/commits$"),
re.compile(r"^repos/([\w\.\-@_]+)/([\w\.\-@_]+)/git/trees$"),
]
class Proxy(DjangoHandlerMixin, RequestHandler):
async def get(self, path):
user = self.get_current_user()
social_token = SocialToken.objects.filter(
account__user=user, account__provider="github"
).first()
if (
not any(regex.match(path) for regex in ALLOWED_PATHS)
or not social_token
or not user.is_authenticated
):
self.set_status(401)
self.finish()
return
headers = {
"Authorization": "token {}".format(social_token.token),
"User-Agent": "Fidus Writer",
"Accept": "application/vnd.github.v3+json",
}
if path == "user/repos":
await self.get_repos(path, user, social_token, headers)
self.finish()
return
elif path == "user/repos/reload":
await self.get_repos(
path, user, social_token, headers, reload=True
)
self.finish()
return
query = self.request.query
url = "https://api.github.com/{}".format(path)
if query:
url += "?" + query
request = HTTPRequest(url, "GET", headers, request_timeout=120)
http = AsyncHTTPClient()
try:
response = await http.fetch(request)
except HTTPError as e:
if e.response.code == 404:
# We remove the 404 response so it will not show up as an
# error in the browser
self.write("[]")
else:
self.set_status(e.response.code)
self.write(e.response.body)
except Exception as e:
self.set_status(500)
self.write("Error: %s" % e)
else:
self.set_status(response.code)
self.write(response.body)
self.finish()
async def get_repos(self, path, user, social_token, headers, reload=False):
repo_info = models.RepoInfo.objects.filter(user=user).first()
if repo_info:
if reload:
repo_info.delete()
else:
self.set_status(200)
self.write(json.dumps(repo_info.content))
return
repos = []
page = 1
last_page = False
while not last_page:
url = f"https://api.github.com/user/repos?page={page}&per_page=100"
request = HTTPRequest(url, "GET", headers, request_timeout=120)
http = AsyncHTTPClient()
try:
response = await http.fetch(request)
except HTTPError as e:
if e.response.code == 404:
# We remove the 404 response so it will not show up as an
# error in the browser
self.write("[]")
else:
self.set_status(e.response.code)
self.write(e.response.body)
return
except Exception as e:
self.set_status(500)
self.write("Error: %s" % e)
return
else:
content = json.loads(response.body)
repos += content
if len(content) == 100:
page += 1
else:
last_page = True
repo_info, created = models.RepoInfo.objects.get_or_create(user=user)
repo_info.content = repos
repo_info.save()
self.set_status(200)
self.write(json.dumps(repo_info.content))
async def post(self, path):
user = self.get_current_user()
social_token = SocialToken.objects.filter(
account__user=user, account__provider="github"
).first()
if (
not any(regex.match(path) for regex in ALLOWED_PATHS)
or not social_token
or not user.is_authenticated
):
self.set_status(401)
self.finish()
return
headers = {
"Authorization": "token {}".format(social_token.token),
"User-Agent": "Fidus Writer",
"Accept": "application/vnd.github.v3+json",
}
query = self.request.query
url = "https://api.github.com/{}".format(path)
if query:
url += "?" + query
request = HTTPRequest(
url, "POST", headers, body=self.request.body, request_timeout=120
)
http = AsyncHTTPClient()
try:
response = await http.fetch(request)
except Exception as e:
self.set_status(500)
self.write("Error: %s" % e)
else:
self.set_status(response.code)
self.write(response.body)
self.finish()
async def patch(self, path):
user = self.get_current_user()
social_token = SocialToken.objects.filter(
account__user=user, account__provider="github"
).first()
if (
not any(regex.match(path) for regex in ALLOWED_PATHS)
or not social_token
or not user.is_authenticated
):
self.set_status(401)
self.finish()
return
headers = {
"Authorization": "token {}".format(social_token.token),
"User-Agent": "Fidus Writer",
"Accept": "application/vnd.github.v3+json",
}
query = self.request.query
url = "https://api.github.com/{}".format(path)
if query:
url += "?" + query
request = HTTPRequest(
url, "PATCH", headers, body=self.request.body, request_timeout=120
)
http = AsyncHTTPClient()
try:
response = await http.fetch(request)
except Exception as e:
self.set_status(500)
self.write("Error: %s" % e)
else:
self.set_status(response.code)
self.write(response.body)
self.finish()
|
PypiClean
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.