id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
/pulumi_aws-6.1.0a1693529760.tar.gz/pulumi_aws-6.1.0a1693529760/pulumi_aws/ec2transitgateway/route_table_propagation.py
|
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['RouteTablePropagationArgs', 'RouteTablePropagation']
@pulumi.input_type
class RouteTablePropagationArgs:
def __init__(__self__, *,
transit_gateway_attachment_id: pulumi.Input[str],
transit_gateway_route_table_id: pulumi.Input[str]):
"""
The set of arguments for constructing a RouteTablePropagation resource.
:param pulumi.Input[str] transit_gateway_attachment_id: Identifier of EC2 Transit Gateway Attachment.
:param pulumi.Input[str] transit_gateway_route_table_id: Identifier of EC2 Transit Gateway Route Table.
"""
pulumi.set(__self__, "transit_gateway_attachment_id", transit_gateway_attachment_id)
pulumi.set(__self__, "transit_gateway_route_table_id", transit_gateway_route_table_id)
@property
@pulumi.getter(name="transitGatewayAttachmentId")
def transit_gateway_attachment_id(self) -> pulumi.Input[str]:
"""
Identifier of EC2 Transit Gateway Attachment.
"""
return pulumi.get(self, "transit_gateway_attachment_id")
@transit_gateway_attachment_id.setter
def transit_gateway_attachment_id(self, value: pulumi.Input[str]):
pulumi.set(self, "transit_gateway_attachment_id", value)
@property
@pulumi.getter(name="transitGatewayRouteTableId")
def transit_gateway_route_table_id(self) -> pulumi.Input[str]:
"""
Identifier of EC2 Transit Gateway Route Table.
"""
return pulumi.get(self, "transit_gateway_route_table_id")
@transit_gateway_route_table_id.setter
def transit_gateway_route_table_id(self, value: pulumi.Input[str]):
pulumi.set(self, "transit_gateway_route_table_id", value)
@pulumi.input_type
class _RouteTablePropagationState:
def __init__(__self__, *,
resource_id: Optional[pulumi.Input[str]] = None,
resource_type: Optional[pulumi.Input[str]] = None,
transit_gateway_attachment_id: Optional[pulumi.Input[str]] = None,
transit_gateway_route_table_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering RouteTablePropagation resources.
:param pulumi.Input[str] resource_id: Identifier of the resource
:param pulumi.Input[str] resource_type: Type of the resource
:param pulumi.Input[str] transit_gateway_attachment_id: Identifier of EC2 Transit Gateway Attachment.
:param pulumi.Input[str] transit_gateway_route_table_id: Identifier of EC2 Transit Gateway Route Table.
"""
if resource_id is not None:
pulumi.set(__self__, "resource_id", resource_id)
if resource_type is not None:
pulumi.set(__self__, "resource_type", resource_type)
if transit_gateway_attachment_id is not None:
pulumi.set(__self__, "transit_gateway_attachment_id", transit_gateway_attachment_id)
if transit_gateway_route_table_id is not None:
pulumi.set(__self__, "transit_gateway_route_table_id", transit_gateway_route_table_id)
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> Optional[pulumi.Input[str]]:
"""
Identifier of the resource
"""
return pulumi.get(self, "resource_id")
@resource_id.setter
def resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_id", value)
@property
@pulumi.getter(name="resourceType")
def resource_type(self) -> Optional[pulumi.Input[str]]:
"""
Type of the resource
"""
return pulumi.get(self, "resource_type")
@resource_type.setter
def resource_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_type", value)
@property
@pulumi.getter(name="transitGatewayAttachmentId")
def transit_gateway_attachment_id(self) -> Optional[pulumi.Input[str]]:
"""
Identifier of EC2 Transit Gateway Attachment.
"""
return pulumi.get(self, "transit_gateway_attachment_id")
@transit_gateway_attachment_id.setter
def transit_gateway_attachment_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "transit_gateway_attachment_id", value)
@property
@pulumi.getter(name="transitGatewayRouteTableId")
def transit_gateway_route_table_id(self) -> Optional[pulumi.Input[str]]:
"""
Identifier of EC2 Transit Gateway Route Table.
"""
return pulumi.get(self, "transit_gateway_route_table_id")
@transit_gateway_route_table_id.setter
def transit_gateway_route_table_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "transit_gateway_route_table_id", value)
class RouteTablePropagation(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
transit_gateway_attachment_id: Optional[pulumi.Input[str]] = None,
transit_gateway_route_table_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages an EC2 Transit Gateway Route Table propagation.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.ec2transitgateway.RouteTablePropagation("example",
transit_gateway_attachment_id=aws_ec2_transit_gateway_vpc_attachment["example"]["id"],
transit_gateway_route_table_id=aws_ec2_transit_gateway_route_table["example"]["id"])
```
## Import
Using `pulumi import`, import `aws_ec2_transit_gateway_route_table_propagation` using the EC2 Transit Gateway Route Table identifier, an underscore, and the EC2 Transit Gateway Attachment identifier. For example:
```sh
$ pulumi import aws:ec2transitgateway/routeTablePropagation:RouteTablePropagation example tgw-rtb-12345678_tgw-attach-87654321
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] transit_gateway_attachment_id: Identifier of EC2 Transit Gateway Attachment.
:param pulumi.Input[str] transit_gateway_route_table_id: Identifier of EC2 Transit Gateway Route Table.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: RouteTablePropagationArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages an EC2 Transit Gateway Route Table propagation.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.ec2transitgateway.RouteTablePropagation("example",
transit_gateway_attachment_id=aws_ec2_transit_gateway_vpc_attachment["example"]["id"],
transit_gateway_route_table_id=aws_ec2_transit_gateway_route_table["example"]["id"])
```
## Import
Using `pulumi import`, import `aws_ec2_transit_gateway_route_table_propagation` using the EC2 Transit Gateway Route Table identifier, an underscore, and the EC2 Transit Gateway Attachment identifier. For example:
```sh
$ pulumi import aws:ec2transitgateway/routeTablePropagation:RouteTablePropagation example tgw-rtb-12345678_tgw-attach-87654321
```
:param str resource_name: The name of the resource.
:param RouteTablePropagationArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(RouteTablePropagationArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
transit_gateway_attachment_id: Optional[pulumi.Input[str]] = None,
transit_gateway_route_table_id: Optional[pulumi.Input[str]] = None,
__props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = RouteTablePropagationArgs.__new__(RouteTablePropagationArgs)
if transit_gateway_attachment_id is None and not opts.urn:
raise TypeError("Missing required property 'transit_gateway_attachment_id'")
__props__.__dict__["transit_gateway_attachment_id"] = transit_gateway_attachment_id
if transit_gateway_route_table_id is None and not opts.urn:
raise TypeError("Missing required property 'transit_gateway_route_table_id'")
__props__.__dict__["transit_gateway_route_table_id"] = transit_gateway_route_table_id
__props__.__dict__["resource_id"] = None
__props__.__dict__["resource_type"] = None
super(RouteTablePropagation, __self__).__init__(
'aws:ec2transitgateway/routeTablePropagation:RouteTablePropagation',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
resource_id: Optional[pulumi.Input[str]] = None,
resource_type: Optional[pulumi.Input[str]] = None,
transit_gateway_attachment_id: Optional[pulumi.Input[str]] = None,
transit_gateway_route_table_id: Optional[pulumi.Input[str]] = None) -> 'RouteTablePropagation':
"""
Get an existing RouteTablePropagation resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] resource_id: Identifier of the resource
:param pulumi.Input[str] resource_type: Type of the resource
:param pulumi.Input[str] transit_gateway_attachment_id: Identifier of EC2 Transit Gateway Attachment.
:param pulumi.Input[str] transit_gateway_route_table_id: Identifier of EC2 Transit Gateway Route Table.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _RouteTablePropagationState.__new__(_RouteTablePropagationState)
__props__.__dict__["resource_id"] = resource_id
__props__.__dict__["resource_type"] = resource_type
__props__.__dict__["transit_gateway_attachment_id"] = transit_gateway_attachment_id
__props__.__dict__["transit_gateway_route_table_id"] = transit_gateway_route_table_id
return RouteTablePropagation(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> pulumi.Output[str]:
"""
Identifier of the resource
"""
return pulumi.get(self, "resource_id")
@property
@pulumi.getter(name="resourceType")
def resource_type(self) -> pulumi.Output[str]:
"""
Type of the resource
"""
return pulumi.get(self, "resource_type")
@property
@pulumi.getter(name="transitGatewayAttachmentId")
def transit_gateway_attachment_id(self) -> pulumi.Output[str]:
"""
Identifier of EC2 Transit Gateway Attachment.
"""
return pulumi.get(self, "transit_gateway_attachment_id")
@property
@pulumi.getter(name="transitGatewayRouteTableId")
def transit_gateway_route_table_id(self) -> pulumi.Output[str]:
"""
Identifier of EC2 Transit Gateway Route Table.
"""
return pulumi.get(self, "transit_gateway_route_table_id")
|
PypiClean
|
/reg-bench-0.0.3.tar.gz/reg-bench-0.0.3/reg_bench/symbolic_regression/util.py
|
import collections
import inspect
import sys
from functools import partial
from inspect import getframeinfo
from inspect import getmodulename
from inspect import stack
from itertools import repeat
import numpy as np
import toolz
def poly(x, i):
return np.sum(x ** j for j in range(1, i + 1))
test_data = collections.namedtuple("TestData", "data target")
def generate_data_set(testfunction, num_points, dist, params):
dim = len(inspect.getfullargspec(testfunction).args)
if isinstance(params, dict):
dist_ = lambda size, params: dist(size=size, **params)
else:
dist_ = nd_dist_factory(dist)
data = dist_(size=(dim, num_points), params=params)
target = testfunction(*data)
return test_data(data=data, target=target)
def nd_dist_factory(dist):
return lambda size, params: np.array(
[dist(size=s, **p) for s, p in zip(repeat(size[1], times=size[0]), params)]
)
def generate_uniform_data_set(testfunction, num_points, ranges, rng=np.random):
to_dict = lambda range_: dict(low=range_[0], high=range_[1])
params = (to_dict(range_) for range_ in ranges) if toolz.isiterable(ranges[0]) else to_dict(ranges)
return generate_data_set(testfunction, num_points, rng.uniform, params)
def isiterable(x):
try:
iter(x)
return True
except TypeError:
return False
def generate_evenly_spaced_data_set(testfunction, step_sizes, ranges):
dim = len(inspect.getfullargspec(testfunction).args)
if len(ranges) == 2 and not isiterable(ranges[0]):
ranges = repeat(ranges, times=dim)
else:
if dim != len(ranges):
raise ValueError
if isinstance(step_sizes, float):
step_sizes = repeat(step_sizes, times=dim)
else:
if dim != len(step_sizes):
raise ValueError
grid = np.meshgrid(
*[
np.linspace(l, u, (u - l) / step_size + 1, endpoint=True)
for (l, u), step_size in zip(ranges, step_sizes)
]
)
data = np.array([g.flatten() for g in grid])
return test_data(data=data, target=testfunction(*data))
def generator_from_helper(helper, shift=0, i=()):
caller = getframeinfo(stack()[1][0]) # find current_module by looking up caller in stack
name = getmodulename(caller.filename)
current_module = [mod for mname, mod in sys.modules.items() if name == mname.split(".")[-1]][0]
context = dir(current_module)
for f, fname in (
(getattr(current_module, func), func) for func in context if "{}_func".format(name) in func
):
f_ = partial(helper, func=f)
n = int(fname.split("_func")[-1])
if n in i or not i:
generator_name = "generate_{}{}".format(name, n + shift)
if generator_name not in context:
setattr(current_module, generator_name, f_) # register generator function in current_module
|
PypiClean
|
/Cherrydoor-0.8.1-py3-none-any.whl/cherrydoor/static/components/ManageUsers.js
|
const ManageUsers = {
data() {
return {
users: [],
available_permissions: this.user.permissions,
new_user: {
username: "",
password: "",
permissions: Object.keys(this.user.permissions).reduce(
(obj, permission) => {
obj[permission] = false;
if (permission == "enter") obj["enter"] = true;
return obj;
},
{}
),
cards: [],
password: "",
},
original_users: [],
};
},
inject: ["user", "socket"],
mounted() {
this.socket.on("users", (data) => {
if (data != null) {
this.$data.original_users = JSON.parse(JSON.stringify(data.users));
data.users.forEach((user) => {
user.edit = {
permissions: false,
cards: user.cards.map((x) => false),
username: false,
};
});
this.$data.users = data.users;
}
});
this.socket.emit("enter_room", { room: "users" });
},
methods: {
addUser() {
this.$data.users.push({
username: this.$data.new_user.username,
permissions: JSON.parse(
JSON.stringify(this.$data.new_user.permissions)
),
cards: this.$data.new_user.cards.slice(),
password: this.$data.new_user.password,
edit: { permissions: false, cards: [false], username: false },
});
this.$data.new_user = {
username: "",
permissions: [],
cards: [],
password: "",
};
},
submitUsers() {
const original_users = this.$data.original_users;
function difference(a, b) {
a = new Set(a);
b = new Set(b);
return new Set([...a].filter((x) => !b.has(x)));
}
const changed_users = this.$data.users
.slice(0, original_users.length)
.map((user, i) => {
if (user.username !== original_users[i].username) {
user.current_username = original_users[i].username;
}
return user;
})
.filter((user, i) => {
if (
user.username === original_users[i].username &&
difference(
Object.values(user.permissions),
Object.values(original_users[i].permissions)
).size === 0 &&
difference(user.cards, original_users[i].cards).size === 0
) {
return false;
}
return true;
});
this.socket.emit("modify_users", { users: changed_users });
const new_users = this.$data.users.slice(original_users.length);
this.socket.emit("create_users", { users: new_users });
},
getCard(userIndex, cardIndex) {
this.socket.emit("get_card", (data) => {
if (userIndex === null) {
this.$data.new_user.cards[cardIndex] = data.uid;
} else {
this.$data.users[userIndex].cards[cardIndex] = data.uid;
}
});
},
deleteUser(username) {
if (confirm(`czy na pewno chcesz usunąć użytkownika ${username}?`)) {
this.socket.emit("delete_user", { username: username });
}
},
addCard(index = null) {
if (index === null) {
this.$data.new_user.cards.push("");
} else {
this.$data.users[index].cards.push("");
this.$data.users[index].edit.cards.push(true);
}
},
},
template: `
<ul class="manage-users">
<li class="user" v-for="(usr, i) in users" :key="i">
<div class="input-editable">
<input
class="username existing-user"
v-model="usr.username"
:disabled="!usr.edit.username"
/>
<button
@click="usr.edit.username = true"
class="edit-button edit-username btn waves-effect waves-light right"
v-if="!usr.edit.username"
>
Edit
</button>
</div>
<div class="user-permissions existing-user" v-if="usr.permissions!=undefined">
<div
class="page__checkbox input-field"
v-for="(has_permission, permission, index) in usr.permissions"
>
<label class="checkbox">
<input
type="checkbox"
:name="permission"
:id="permission+i"
:aria-label="permission"
class="checkbox-input"
v-model="has_permission"
:disabled="!usr.edit.permissions"
/>
<span class="checkbox-label">
<span class="checkbox-text">{{permission}}</span>
</span>
</label>
</div>
<button
class="edit-button edit-permissions btn waves-effect waves-light right"
@click="usr.edit.permissions = true"
v-if="!usr.edit.permissions"
>
Edit permissions
</button>
</div>
<ul class="user-cards existing-user">
<li v-for="(card, index) in usr.cards">
<div class="input-editable">
<input
class="card-uid existing-user"
v-model="usr.cards[index]"
:disabled="!usr.edit.cards[index]"
/>
<button
class="edit-button edit-card btn waves-effect waves-light right"
@click="usr.edit.cards[index]=true"
aria-label="Edit card"
v-if="!usr.edit.cards[index]"
>
Edit</button>
<button
class="edit-button reader-input btn waves-effect waves-light"
@click="getCard(i, index)"
aria-label="Get card from reader"
>
Get card from reader
</button>
</div>
</li>
<li>
<button class="new-card plus-button" @click="addCard(i)">Add card</button>
</li>
</ul>
<button
class="delete-button btn waves-effect waves-light"
@click="deleteUser(usr.username)"
aria-label="Delete user"
>
Delete user
</button>
</li>
<li class="user new-user">
<h3>Nowy użytkownik</h3>
<label for="new-user-username">Nazwa użytkownika:</label>
<input class="username new-user" v-model="new_user.username" id="new-user-username" />
<label for="new-user-password">Hasło: <br/> <small>opcjonalne - konieczne tylko do logowania do panelu</small></label>
<input type="password" class="password new-user" v-model="new_user.password" id="new-user-password" />
<div class="user-permissions new-user">
<div
class="page__checkbox input-field"
v-for="(has_permission, permission) in available_permissions"
>
<label class="checkbox" v-if="has_permission">
<input
type="checkbox"
:name="permission"
:aria-label="permission"
class="checkbox-input"
v-model="new_user.permissions[permission]"
/>
<span class="checkbox-label">
<span class="checkbox-text">{{permission}}</span>
</span>
</label>
</div>
</div>
<ul class="user-cards new-user">
<li v-for="(card, index) in new_user.cards">
<div class="input-editable">
<input
class="card-uid new-user"
v-model="new_user.cards[index]"
/>
<button
class="edit-button reader-input btn waves-effect waves-light"
@click="getCard(null, index)"
aria-label="Get card from reader"
>
Get card from reader
</button>
</div>
</li>
<li>
<button class="new-card plus-button" @click="addCard()">Add card</button>
</li>
</ul>
<button class="new-user plus-button" @click="addUser()">Add user</button>
</li>
<li>
<button class="submit-user btn" @click="submitUsers()">Save</button>
</li>
</ul>
`,
};
export default ManageUsers;
|
PypiClean
|
/EDDIE-Tool-1.0.0.tar.gz/EDDIE-Tool-1.0.0/eddietool/common/Directives/icmp.py
|
import inet
import array
import struct
ICMP_MINLEN = 8
ICMP_MASKLEN = 12
ICMP_ECHOREPLY = 0
ICMP_UNREACH = 3
ICMP_UNREACH_NET = 0
ICMP_UNREACH_HOST = 1
ICMP_UNREACH_PROTOCOL = 2
ICMP_UNREACH_PORT = 3
ICMP_UNREACH_NEEDFRAG = 4
ICMP_UNREACH_SRCFAIL = 5
ICMP_SOURCEQUENCH = 4
ICMP_REDIRECT = 5
ICMP_REDIRECT_NET = 0
ICMP_REDIRECT_HOST = 1
ICMP_REDIRECT_TOSNET = 2
ICMP_REDIRECT_TOSHOST = 3
ICMP_ECHO = 8
ICMP_TIMXCEED = 11
ICMP_TIMXCEED_INTRANS = 0
ICMP_TIMXCEED_REASS = 1
ICMP_PARAMPROB = 12
ICMP_TSTAMP = 13
ICMP_TSTAMPREPLY = 14
ICMP_IREQ = 15
ICMP_IREQREPLY = 16
ICMP_MASKREQ = 17
ICMP_MASKREPLY = 18
class Packet:
"""Basic ICMP packet definition.
Equivalent to ICMP_ECHO_REQUEST and ICMP_REPLY packets.
Other packets are defined as subclasses probably.
"""
def __init__(self, packet=None, cksum=1):
if packet:
self.__disassemble(packet, cksum)
else:
self.type = 0
self.code = 0
self.cksum = 0
self.id = 0
self.seq = 0
self.data = ''
def __repr__(self):
return "<ICMP packet %d %d %d %d>" % (self.type, self.code,
self.id, self.seq)
def assemble(self, cksum=1):
idseq = struct.pack('hh', self.id, self.seq)
packet = chr(self.type) + chr(self.code) + '\000\000' + idseq \
+ self.data
if cksum:
self.cksum = inet.cksum(packet)
packet = chr(self.type) + chr(self.code) \
+ struct.pack('H', self.cksum) + idseq + self.data
# Don't need to do any byte-swapping, because idseq is
# appplication defined and others are single byte values.
self.__packet = packet
return self.__packet
def __disassemble(self, packet, cksum=1):
if cksum:
our_cksum = inet.cksum(packet)
if our_cksum != 0:
raise ValueError, packet
self.type = ord(packet[0])
self.code = ord(packet[1])
elts = struct.unpack('hhh', packet[2:8])
[self.cksum, self.id, self.seq] = map(lambda x:x & 0xffff, elts)
self.data = packet[8:]
def __compute_cksum(self):
"Use inet.cksum instead"
packet = self.packet
if len(packet) & 1:
packet = packet + '\0'
words = array.array('h', packet)
sum = 0
for word in words:
sum = sum + (word & 0xffff)
hi = sum >> 16
lo = sum & 0xffff
sum = hi + lo
sum = sum + (sum >> 16)
return (~sum) & 0xffff
class TimeExceeded(Packet):
def __init__(self, packet=None, cksum=1):
Packet.__init__(self, packet, cksum)
if packet:
if self.type != ICMP_TIMXCEED:
raise ValueError, "supplied packet of wrong type"
else:
self.type = ICMP_TIMXCEED
self.id = self.seq = 0
class Unreachable(Packet):
def __init__(self, packet=None, cksum=1):
Packet.__init__(self, packet, cksum)
if packet:
if self.type != ICMP_UNREACH:
raise ValueError, "supplied packet of wrong type"
else:
self.type = ICMP_UNREACH
self.id = self.seq = 0
|
PypiClean
|
/HtmlList-2.2.2.zip/HtmlList-2.2.2/htmllist/break_html_page.py
|
# TODO: Add optional already built DOM object input.
"""
The module defines BreakHtmlPage class that works with the DOM model.
It is almost twice as slow than break_html_page2 !!! But it uses DOM.
This module uses html5lib to parse the HTML page
http://code.google.com/p/html5lib/
There is an option to use this class without working with html5lib (and then it
doesn't have to be present). Using the "set_dom" method to set an external DOM
object, and not using the "feed" and "close" methods.
I added the hash code of the parent Tag in the HTML tree to the identity of each
Tag object. That makes two tags equal only if they under the exact same path in
the HTML tree.
"""
from xml.dom import Node
from StringIO import StringIO
# NOTE: I import html5lib inside the "close" method, so the lib don't have to
# be present if we are working with a prepared DOM object.
from break_page_seq import BreakPageSeq
from utills import quote_html
ENCODING = "utf-8" # TODO: Do I always need utf-8?
NOT_SELF_CLOSE = "div", "button"
## Private functions that work with the traverse_element function ##
def _write_elm(buff):
""" Return a function (closure) that renders element into buff """
def __write_elm(elm):
""" Render an HTML element """
if elm.nodeType == Node.ELEMENT_NODE:
buff.write('<' + elm.nodeName.encode(ENCODING))
if elm.attributes:
for name,value in elm.attributes.items():
value = quote_html(value)
buff.write(' %s="%s"' % (
name.encode(ENCODING), value.encode(ENCODING)))
if elm.hasChildNodes() or elm.nodeName in NOT_SELF_CLOSE:
buff.write('>')
else:
buff.write(' />')
elif elm.nodeType == Node.COMMENT_NODE:
buff.write("<!-- %s -->" % elm.nodeValue.encode(ENCODING))
else:
buff.write(elm.nodeValue.encode(ENCODING))
return __write_elm
def _write_close_elm(buff):
""" Same for close element (tag) """
def __write_close_elm(elm):
if elm.nodeType == Node.ELEMENT_NODE and ( \
elm.hasChildNodes() or elm.nodeName in NOT_SELF_CLOSE):
buff.write('</%s>' % elm.nodeName.encode(ENCODING))
return __write_close_elm
def traverse_tree(root, stop_elm=None):
""" Experimental function, I don't use it as it is way too slow """
_stop = False
def _traverse_tree(root, stop_elm):
if not root or root is stop_elm:
traverse_tree._stop = True
return
yield root
if not _stop and root.firstChild:
for itm in _traverse_tree(root.firstChild, stop_elm):
yield itm
if not _stop and root.nextSibling:
for itm in _traverse_tree(root.nextSibling, stop_elm):
yield itm
return _traverse_tree(root, stop_elm)
class StopTraversing(object): pass
class BreakHtmlPage(BreakPageSeq):
""" Implements the BreakHtmlPage class using DOM and html5lib.
This class will not return an overlapping HTML sections.
"""
def close(self):
""" Parse the HTML buffer """
# I import html5lib only when I am going to use it
from html5lib import HTMLParser
from html5lib import treebuilders
parser = HTMLParser(tree=treebuilders.getTreeBuilder("dom"))
doc = parser.parse(self._html)
self._html.close()
self.set_dom(doc)
def set_dom(self, dom):
""" This public method is unique for this implementation of
BreakHtmlPage. It lets the system to set an already prepared DOM object,
so we don't need to parse the document again.
"""
dom.normalize()
self._orig_lst = tuple(self._traverse_tree(dom.documentElement))
## Class Methods ##
@classmethod
def _traverse_tree(cls, root_elm, stop_elm=None, stop_at_root=True):
""" An help method that creates an iterator that traverses a DOM tree.
It stops in the end of the tree or when elm equal to stop_elm.
If stop_at_root is False it will continue to the sibling of the given
root, if exists.
"""
elm = root_elm
back_up = False
while not elm is None and not elm is stop_elm:
if not back_up:
yield elm
if not back_up and elm.hasChildNodes():
back_up = False
elm = elm.firstChild
elif elm.nextSibling:
back_up = False
elm = elm.nextSibling
else:
back_up = True
elm = elm.parentNode
if stop_at_root and elm is root_elm:
break
@classmethod
def traverse_list(cls, elm_lst, elm_func, elm_close_func=None, stop_elm=None,
parents=None):
""" See base class documentation
parents is for internal use only
"""
if not elm_lst: return None
if parents is None:
parents = set()
for elm in elm_lst:
if elm is stop_elm: return StopTraversing
if elm.parentNode in parents: continue
res = elm_func(elm)
if not res is None:
return res
res = cls.traverse_list(
elm.childNodes, elm_func, elm_close_func, stop_elm, parents)
if not res is None:
return res
if elm_close_func:
res = elm_close_func(elm)
if not res is None:
return res
parents.add(elm)
return None
@classmethod
def list2text(cls, lst, stop_elm=None):
""" See base class documentation """
buff = StringIO()
cls.traverse_list(
lst, _write_elm(buff), _write_close_elm(buff), stop_elm)
return buff.getvalue()
@classmethod
def words_between_elements(cls, start, end):
""" See base class documentation """
lst = [len(elm.nodeValue.split()) for elm in \
cls._traverse_tree(start, end, stop_at_root=False) \
if elm.nodeType == Node.TEXT_NODE and elm.nodeValue.strip()]
return sum(lst)
@classmethod
def get_element_name(cls, elm):
""" Return the element (tag) name """
return elm.nodeName
@classmethod
def get_element_attrs(cls, elm):
""" Return the element (tag) attributes dictionary or None """
if not elm.attributes:
return None
return elm.attributes
def get_all_element_data(cls, elm):
""" Return a tuple of the element name, attributes and the hash value
of the Tag of the parent element. This will make two tags to be equal
only if they have the same name and format attributes AND have the exact
same path from the document root.
"""
return cls.get_element_name(elm), cls.get_element_attrs(elm), \
elm.parentNode._tag._hash if cls.is_tag_element(elm.parentNode) \
else None
@classmethod
def is_tag_element(cls, elm):
""" See base class documentation """
return elm and elm.nodeType == Node.ELEMENT_NODE
@classmethod
def is_text_elm(cls, elm):
""" See base class documentation """
if elm.nodeType == Node.TEXT_NODE and elm.data.strip():
return True
return None
@classmethod
def test2(cls, verbose):
""" Add a test for the tree like functionality to the standard tests
I make it happen mainly in the get_all_element_data method
"""
cls.test(verbose)
from repeat_pattern import RepeatPattern
if verbose:
print "Testing tree attributes..."
html = """<body>
<div>
<span><a>AAA</a></span>
<span><a>BBB</a></span>
</div>
<span><a>CCC</a></span> <!-- Should not take this occurrence -->
</body>"""
bhp = cls()
bhp.feed(html)
bhp.close()
rp = RepeatPattern()
#print ">>>", tuple(bhp.get_tag_list())
rp.process(tuple(bhp.get_tag_list()))
assert rp.repeats == 2, "Found %s occurrences" % rp.repeats
if __name__ == '__main__':
BreakHtmlPage.test2(verbose=True)
print "Test Passed"
|
PypiClean
|
/python-aliyun-sdk-core-2.13.37.tar.gz/python-aliyun-sdk-core-2.13.37/aliyunsdkcore/vendored/requests/packages/chardet/codingstatemachine.py
|
import logging
from .enums import MachineState
class CodingStateMachine(object):
"""
A state machine to verify a byte sequence for a particular encoding. For
each byte the detector receives, it will feed that byte to every active
state machine available, one byte at a time. The state machine changes its
state based on its previous state and the byte it receives. There are 3
states in a state machine that are of interest to an auto-detector:
START state: This is the state to start with, or a legal byte sequence
(i.e. a valid code point) for character has been identified.
ME state: This indicates that the state machine identified a byte sequence
that is specific to the charset it is designed for and that
there is no other possible encoding which can contain this byte
sequence. This will to lead to an immediate positive answer for
the detector.
ERROR state: This indicates the state machine identified an illegal byte
sequence for that encoding. This will lead to an immediate
negative answer for this encoding. Detector will exclude this
encoding from consideration from here on.
"""
def __init__(self, sm):
self._model = sm
self._curr_byte_pos = 0
self._curr_char_len = 0
self._curr_state = None
self.logger = logging.getLogger(__name__)
self.reset()
def reset(self):
self._curr_state = MachineState.START
def next_state(self, c):
# for each byte we get its class
# if it is first byte, we also get byte length
byte_class = self._model['class_table'][c]
if self._curr_state == MachineState.START:
self._curr_byte_pos = 0
self._curr_char_len = self._model['char_len_table'][byte_class]
# from byte's class and state_table, we get its next state
curr_state = (self._curr_state * self._model['class_factor']
+ byte_class)
self._curr_state = self._model['state_table'][curr_state]
self._curr_byte_pos += 1
return self._curr_state
def get_current_charlen(self):
return self._curr_char_len
def get_coding_state_machine(self):
return self._model['name']
@property
def language(self):
return self._model['language']
|
PypiClean
|
/cohesity-sdk-1.1.0.tar.gz/cohesity-sdk-1.1.0/cohesity_sdk/cluster/model/bond_member.py
|
import re # noqa: F401
import sys # noqa: F401
from cohesity_sdk.cluster.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from cohesity_sdk.cluster.model.interface_stats import InterfaceStats
from cohesity_sdk.cluster.model.uplink_switch import UplinkSwitch
globals()['InterfaceStats'] = InterfaceStats
globals()['UplinkSwitch'] = UplinkSwitch
class BondMember(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('speed',): {
'None': None,
'1GBIT/S': "1Gbit/s",
'10GBIT/S': "10Gbit/s",
'25GBIT/S': "25Gbit/s",
'40GBIT/S': "40Gbit/s",
'100GBIT/S': "100Gbit/s",
'UNKNOWN': "Unknown",
},
('link_state',): {
'None': None,
'UP': "Up",
'DOWN': "Down",
},
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'name': (str, none_type,), # noqa: E501
'mac_address': (str, none_type,), # noqa: E501
'speed': (str, none_type,), # noqa: E501
'link_state': (str, none_type,), # noqa: E501
'active_secondary': (bool, none_type,), # noqa: E501
'slot': (str, none_type,), # noqa: E501
'stats': (InterfaceStats,), # noqa: E501
'uplink_switch': (UplinkSwitch,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'name': 'name', # noqa: E501
'mac_address': 'macAddress', # noqa: E501
'speed': 'speed', # noqa: E501
'link_state': 'linkState', # noqa: E501
'active_secondary': 'activeSecondary', # noqa: E501
'slot': 'slot', # noqa: E501
'stats': 'stats', # noqa: E501
'uplink_switch': 'uplinkSwitch', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""BondMember - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
name (str, none_type): Name of the bond secondary.. [optional] # noqa: E501
mac_address (str, none_type): MAC address of the bond secondary.. [optional] # noqa: E501
speed (str, none_type): Speed of the bond secondary.. [optional] # noqa: E501
link_state (str, none_type): Bond secondary link state.. [optional] # noqa: E501
active_secondary (bool, none_type): Specifies whether or not this is a active secondary. This is only valid in ActiveBackup bonding mode.. [optional] # noqa: E501
slot (str, none_type): Slot information of the bond secondary.. [optional] # noqa: E501
stats (InterfaceStats): [optional] # noqa: E501
uplink_switch (UplinkSwitch): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
|
PypiClean
|
/pytest_hypo_25-2.3.tar.gz/pytest_hypo_25-2.3/pytest_hypo_25/internal/escalation.py
|
import os
import sys
import traceback
from inspect import getframeinfo
from pathlib import Path
from typing import Dict
import hypothesis
from hypothesis.errors import (
DeadlineExceeded,
HypothesisException,
MultipleFailures,
StopTest,
UnsatisfiedAssumption,
)
def belongs_to(package):
if not hasattr(package, "__file__"): # pragma: no cover
return lambda filepath: False
root = Path(package.__file__).resolve().parent
cache = {str: {}, bytes: {}}
def accept(filepath):
ftype = type(filepath)
try:
return cache[ftype][filepath]
except KeyError:
pass
abspath = Path(filepath).resolve()
try:
abspath.relative_to(root)
result = True
except ValueError:
result = False
cache[ftype][filepath] = result
return result
accept.__name__ = "is_%s_file" % (package.__name__,)
return accept
PREVENT_ESCALATION = os.getenv("HYPOTHESIS_DO_NOT_ESCALATE") == "true"
FILE_CACHE = {} # type: Dict[bytes, bool]
is_hypothesis_file = belongs_to(hypothesis)
HYPOTHESIS_CONTROL_EXCEPTIONS = (DeadlineExceeded, StopTest, UnsatisfiedAssumption)
def mark_for_escalation(e):
if not isinstance(e, HYPOTHESIS_CONTROL_EXCEPTIONS):
e.hypothesis_internal_always_escalate = True
def escalate_hypothesis_internal_error():
if PREVENT_ESCALATION:
return
error_type, e, tb = sys.exc_info()
if getattr(e, "hypothesis_internal_never_escalate", False):
return
if getattr(e, "hypothesis_internal_always_escalate", False):
raise
filepath = traceback.extract_tb(tb)[-1][0]
if is_hypothesis_file(filepath) and not isinstance(
e, (HypothesisException,) + HYPOTHESIS_CONTROL_EXCEPTIONS
):
raise
def get_trimmed_traceback():
"""Return the current traceback, minus any frames added by Hypothesis."""
error_type, _, tb = sys.exc_info()
# Avoid trimming the traceback if we're in verbose mode, or the error
# was raised inside Hypothesis (and is not a MultipleFailures)
if hypothesis.settings.default.verbosity >= hypothesis.Verbosity.debug or (
is_hypothesis_file(traceback.extract_tb(tb)[-1][0])
and not isinstance(error_type, MultipleFailures)
):
return tb
while tb is not None and (
# If the frame is from one of our files, it's been added by Hypothesis.
is_hypothesis_file(getframeinfo(tb.tb_frame)[0])
# But our `@proxies` decorator overrides the source location,
# so we check for an attribute it injects into the frame too.
or tb.tb_frame.f_globals.get("__hypothesistracebackhide__") is True
):
tb = tb.tb_next
return tb
|
PypiClean
|
/shells_cae-0.0.19.tar.gz/shells_cae-0.0.19/shells_cae/eb_solver.py
|
import cffi
import os
import numpy as np
import sys
from typing import Sequence, Union
__all__ = ['PointMassTrajectorySolver', 'PointMassTrajectoryHSolver']
def load_lib():
HERE = os.path.dirname(__file__)
if sys.platform.startswith('linux'):
LIB_FILE_NAME = os.path.abspath(os.path.join(HERE, ".", "compiled", "build", "bin", "lib", "libsextbal.so"))
elif sys.platform.startswith('win32'):
LIB_FILE_NAME = os.path.abspath(os.path.join(HERE, ".", "compiled", "build", "bin", "libsextbal.dll"))
else:
raise Exception('Неподдерживаемая платформа')
ffi = cffi.FFI()
ffi.cdef(
'''
void count_eb(double *y0, double d, double q, double *cx_list, double *mach_list, int n_mach,\n
double max_distance, double tstep, double tmax);
'''
)
ffi.cdef(
'''
void dense_count_eb(double *y_array, double d, double q, double *cx_list, double *mach_list, int n_mach,\n
double max_distance, double tstep, double tmax, int *n_tsteps);
'''
)
ffi.cdef(
'''
typedef struct shell{
double d;
double L;
double q;
double A;
double B;
double mu;
double c_q;
double h;
} shell;
'''
)
ffi.cdef(
'''
void dense_count_eb_h(
double *y_array,
double *cx_list, double *mach_list, int n_mach,
shell *ashell,
double *diag_vals,
double eta,
double sigma_dop,
double delta_dop,
double *sigma_array,
double *delta_array,
double max_distance,
double tstep,
double tmax,
int *n_tsteps
);
'''
)
bal_lib = ffi.dlopen(LIB_FILE_NAME)
return ffi, bal_lib
FFI, EBAL_LIB = load_lib()
class PointMassTrajectorySolver:
mah_list = np.array([0.4, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6,
1.7, 1.8, 1.9, 2.0, 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7,
2.8, 2.9, 3.0, 3.1, 3.2, 3.3, 3.4, 3.5, 3.6])
cx_list = np.array([0.157, 0.158, 0.158, 0.160, 0.190, 0.325, 0.378, 0.385, 0.381, 0.371,
0.361, 0.351, 0.342, 0.332, 0.324, 0.316, 0.309, 0.303, 0.297,
0.292, 0.287, 0.283, 0.279, 0.277, 0.273, 0.270, 0.267, 0.265,
0.263, 0.263, 0.261, 0.260])
name = 'point_mass_eb_solver'
preprocess_data = dict(
d=None,
q=None,
theta_angle=None,
v0=None,
cx_list=None,
mach_list=None
)
def preprocessor(self, data: dict, global_state: dict):
self.preprocess_data['d'] = data['gun_char']['d']
self.preprocess_data['q'] = global_state['mcc_solver']['shell']['q']
self.preprocess_data['theta_angle'] = data['initial_cond']['theta0']
self.preprocess_data['v0'] = data['initial_cond']['V']
self.preprocess_data['cx_list'] = global_state['mcdrag_solver']['cx_list']
self.preprocess_data['mach_list'] = global_state['mcdrag_solver']['mach_list']
def run(self, data: dict, global_state: dict):
eb_data = self.preprocess_data
eb_settings = data['point_mass_eb_settings']
v0 = eb_data['v0']
theta_angle = eb_data['theta_angle']
q = eb_data['q']
d = eb_data['d']
cx_list = eb_data['cx_list']
mach_list = eb_data['mach_list']
max_distance = eb_settings['max_distance']
tstep = eb_settings['tstep']
tmax = eb_settings['tmax']
y0 = np.array([0., 0., v0, np.deg2rad(theta_angle)], dtype=np.float64, order='F')
cx_list = np.asfortranarray(cx_list, dtype=np.float64)
mach_list = np.asfortranarray(mach_list, dtype=np.float64)
y0_ptr = FFI.cast("double*", y0.__array_interface__['data'][0])
cx_list_ptr = FFI.cast("double*", cx_list.__array_interface__['data'][0])
mach_list_ptr = FFI.cast("double*", mach_list.__array_interface__['data'][0])
EBAL_LIB.count_eb(
y0_ptr, d, q, cx_list_ptr, mach_list_ptr, len(cx_list),
max_distance, tstep, tmax
)
global_state[PointMassTrajectorySolver.name] = dict(L_max=y0[0],
vc=y0[2])
class PointMassTrajectoryHSolver:
name = 'point_mass_ebh_solver'
preprocess_data = dict(
d=None,
L=None,
q=None,
A=None,
B=None,
h=None,
mu=None,
c_q=None,
sigma_dop=0.6,
delta_dop=2.,
theta_angle=None,
v0=None,
cx_list=None,
mach_list=None
)
def preprocessor(self, data: dict, global_state: dict):
self.preprocess_data['d'] = data['gun_char']['d']
self.preprocess_data['eta_k'] = data['gun_char']['eta_k']
self.preprocess_data['q'] = global_state['mcc_solver']['shell']['q']
self.preprocess_data['L'] = global_state['geometry_solver']['L_all']
self.preprocess_data['A'] = global_state['mcc_solver']['shell']['A']
self.preprocess_data['B'] = global_state['mcc_solver']['shell']['B']
self.preprocess_data['h'] = global_state['mcc_solver']['shell']['h']
self.preprocess_data['mu'] = global_state['mcc_solver']['shell']['mu']
self.preprocess_data['c_q'] = global_state['mcc_solver']['shell']['c_q']
self.preprocess_data['theta_angle'] = data['initial_cond']['theta0']
self.preprocess_data['v0'] = data['initial_cond']['V']
self.preprocess_data['cx_list'] = global_state['kontur_solver']['cx_list']
self.preprocess_data['mach_list'] = global_state['kontur_solver']['mach_list']
def _get_fortran_shell(self):
ashell = FFI.new('shell *ashell')
ashell[0].d = self.preprocess_data['d']
ashell[0].q = self.preprocess_data['q']
ashell[0].A = self.preprocess_data['A']
ashell[0].B = self.preprocess_data['B']
ashell[0].mu = self.preprocess_data['mu']
ashell[0].c_q = self.preprocess_data['c_q']
ashell[0].L = self.preprocess_data['L']
ashell[0].h = self.preprocess_data['h']
return ashell
# Определение по диаграмме устойчивости правильность полёта
def stability_define(self, m, n, eta_k, h, d, hd_kr, eta_kr):
# Гироскопическая устойчивость
h_d = h / d
eta_list = [i / 1000 for i in range(100)]
h_d_sigma_list = [(eta ** 2) * m for eta in eta_list]
# Направленность полёта
h_d_stab_list = [eta * n for eta in eta_list]
# Определяем устойчив ли снаряд
# ДОДЕЛАТЬ
def run(self, data: dict, global_state: dict):
ebh_settings = data['settings']['point_mass_eb']
tstep = ebh_settings['tstep']
tmax = ebh_settings['tmax']
eta_k = self.preprocess_data['eta_k']
ashell = self._get_fortran_shell()
n_tsteps = FFI.new('int *')
n_tsteps[0] = int(tmax / tstep)
cx_list = self.preprocess_data['cx_list']
mach_list = self.preprocess_data['mach_list']
cx_list = np.asfortranarray(cx_list, dtype=np.float64)
mach_list = np.asfortranarray(mach_list, dtype=np.float64)
y_array = np.zeros((5, n_tsteps[0]), dtype=np.float64, order='F')
y_array[:, 0] = [0., 0., self.preprocess_data['v0'], np.deg2rad(self.preprocess_data['theta_angle']), 0.0]
sigma_array = np.zeros(n_tsteps[0], dtype=np.float64, order='F')
delta_array = np.zeros(n_tsteps[0], dtype=np.float64, order='F')
diag_vals_array = np.empty(4, dtype=np.float64, order='F')
y_array_ptr = FFI.cast("double*", y_array.__array_interface__['data'][0])
cx_list_ptr = FFI.cast("double*", cx_list.__array_interface__['data'][0])
mach_list_ptr = FFI.cast("double*", mach_list.__array_interface__['data'][0])
sigma_array_ptr = FFI.cast("double*", sigma_array.__array_interface__['data'][0])
delta_array_ptr = FFI.cast("double*", delta_array.__array_interface__['data'][0])
diag_vals_array_ptr = FFI.cast("double*", diag_vals_array.__array_interface__['data'][0])
EBAL_LIB.dense_count_eb_h(
y_array_ptr,
cx_list_ptr, mach_list_ptr, len(cx_list),
ashell,
diag_vals_array_ptr,
eta_k,
self.preprocess_data['sigma_dop'],
np.deg2rad(self.preprocess_data['delta_dop']),
sigma_array_ptr,
delta_array_ptr,
ebh_settings['max_distance'],
tstep,
tmax,
n_tsteps
)
# Диаграмма устойчивости
self.stability_define(m=diag_vals_array[0], n=diag_vals_array[1], eta_k=eta_k,
h=global_state['mcc_solver']['shell']['h'], d=data['shell_size']['d'],
hd_kr=diag_vals_array[2], eta_kr=diag_vals_array[3])
t_s = np.linspace(0., tstep * n_tsteps[0], n_tsteps[0])
y_array = y_array[:, :n_tsteps[0]]
sigma_array = sigma_array[:n_tsteps[0]]
delta_array = delta_array[:n_tsteps[0]]
y_array[3] = np.rad2deg(y_array[3])
global_state[PointMassTrajectoryHSolver.name] = dict(
m=diag_vals_array[0],
n=diag_vals_array[1],
hd_kr=diag_vals_array[2],
eta_kr=diag_vals_array[3],
L_max=y_array[0, -1],
vc=y_array[2, -1],
t_array=t_s,
x_array=y_array[0],
y_array=y_array[1],
v_array=y_array[2],
theta_array=y_array[3],
omega_array=y_array[4],
sigma_array=sigma_array,
delta_array=delta_array
)
|
PypiClean
|
/certora-cli-alpha-jtoman-try-catch-20230515.16.47.942035.tar.gz/certora-cli-alpha-jtoman-try-catch-20230515.16.47.942035/certora_cli/Shared/certoraLogging.py
|
import logging
import sys
from typing import Dict, Any, Set, Optional, Iterable, List
from Shared.certoraUtils import write_json_file, red_text, orange_text, get_debug_log_file
from Shared.certoraUtils import get_resource_errors_file
class ColoredString(logging.Formatter):
def __init__(self, msg_fmt: str = "%(name)s - %(message)s") -> None:
super().__init__(msg_fmt)
def format(self, record: logging.LogRecord) -> str:
to_ret = super().format(record)
if record.levelno == logging.WARN:
return orange_text("WARNING") + ": " + to_ret
elif record.levelno >= logging.ERROR: # aka ERROR, FATAL, and CRITICAL
return red_text(record.levelname) + ": " + to_ret
else: # aka WARNING, INFO, and DEBUG
return record.levelname + ": " + to_ret
class TopicFilter(logging.Filter):
def __init__(self, names: Iterable[str]) -> None:
super().__init__()
self.logged_names = set(names)
def filter(self, record: logging.LogRecord) -> bool:
return (record.name in self.logged_names) or record.levelno >= logging.WARN
class ResourceErrorHandler(logging.NullHandler):
"""
A handler that creates a JSON error report for all problems concerning resources, like Solidity and spec files.
The handler gathers log messages, filters them, and maintain a local data state.
To generate the report, dump_to_log must be called. It should be called in the shutdown code of certoraRun.py
This class is a Singleton, which should prevent most concurrency issues.
~~~~
Filter logic:
We only care about messages with a topic in resource topics, that are of logging level CRITICAL.
We prettify the message string itself to be concise and less verbose.
TODO - fetch typechecking errors from a file. The typechecking jar should generate an errors file, giving us more
control.
Note - we have no choice but to filter SOLC errors, for example.
"""
resource_topics = ["solc", "type_check"]
"""
errors_info is a JSON object that should look like this:
{
"topics": [
{
"name": "",
"messages": [
{
"message": "",
"location": []
}
]
}
]
}
"""
errors_info: Dict[str, Any] = {
"topics": []
}
# ~~~ Message editing constants
"""
If one of this identifiers is present in the message, we log it. This is to avoid superfluous messages like
"typechecking failed".
"Severe compiler warning:" is there to handle errors originating from certoraBuild.check_for_errors_and_warnings()
"""
error_identifiers = ["Syntax error", "Severe compiler warning:", "error:\n"]
# We delete these prefixes and everything that came before them
prefix_delimiters = ["ERROR ALWAYS - ", "ParserError:\n", "error:\n"]
def __init__(self) -> None:
super(ResourceErrorHandler, self).__init__()
def handle(self, record: logging.LogRecord) -> bool:
if (record.name in self.resource_topics) and record.levelno >= logging.CRITICAL:
message = record.getMessage()
for identifier in self.error_identifiers:
if identifier in message:
for delimiter in self.prefix_delimiters:
if delimiter in message:
message = message.split(delimiter)[1].strip()
message = message.splitlines()[0] # Removing all but the first remaining line
# Adding the message to errors_info
error_dict = {
"message": message,
"location": [] # TODO - add location in the future, at least for Syntax errors
}
topic_found = False
for topic in self.errors_info["topics"]:
if topic["name"] == record.name:
topic["messages"].append(error_dict)
topic_found = True
break
if not topic_found:
topic_dict = {
"name": record.name,
"messages": [
error_dict
]
}
self.errors_info["topics"].append(topic_dict)
break # Do not log the same error twice, even if it has more than one identifier
return True
def dump_to_log(self) -> None:
write_json_file(self.errors_info, get_resource_errors_file())
def close(self) -> None:
self.dump_to_log()
super().close()
class DebugLogHandler(logging.FileHandler):
"""
A handler that writes all errors, of all levels Debug-critical, to the debug log file and sends it to the cloud.
The problems reported are concerning the topics depicted below at the logging_setup() function.
"""
def __init__(self) -> None:
super().__init__(get_debug_log_file())
self.set_name("debug_log")
self.level = logging.DEBUG # Always set this handler's log-level to debug
self.addFilter(TopicFilter(["arguments",
"build_conf",
"finder_instrumentaton",
"rpc",
"run",
"solc",
"type_check",
"verification"
]))
class LoggingManager():
"""
A class that manages logs, be they file logs or stdout logs. Used for:
* Adding or removing logs
* Setting log levels and outputs
* Checking whether we are in debug mode via LoggingManager().is_debugging
"""
def __init__(self, quiet: bool = False, debug: bool = False,
debug_topics: Optional[List[str]] = None,
show_debug_topics: bool = False) -> None:
"""
@param quiet: if true, we show minimal log messages, and logging level is WARNING.
@param debug:
Ignored if quiet is True.
If it is None, we are not in DEBUG mode, and logging level is WARNING.
Otherwise, logging level is DEBUG, and it is a list of debug topic names.
Only debug messages related to loggers of those topics are recorded.
If it is an empty list, we record ALL topics.
@param show_debug_topics: Ignored if either quiet is True or debug is None. If True, sets the logging message
format to show the topic of the logger that sent them.
"""
self.debug_log_handler = DebugLogHandler()
self.resource_error_handler = ResourceErrorHandler()
self.stdout_handler = logging.StreamHandler(stream=sys.stdout)
self.handlers: Set[logging.Handler] = set()
root_logger = logging.root
self.orig_root_log_level = root_logger.level # used to restore the root logger's level after exit
root_logger.setLevel(logging.NOTSET) # We record all logs in the debug log file
handler_list: List[logging.Handler] = [self.debug_log_handler, self.stdout_handler, self.resource_error_handler]
for handler in handler_list:
self.__add_handler(handler)
self.set_log_level_and_format(quiet, debug, debug_topics, show_debug_topics)
def __tear_down(self) -> None:
"""
A destructor - releases all resources and restores the root logger to the state it was in before this class
was constructed
"""
root_logger = logging.root
root_logger.setLevel(self.orig_root_log_level)
while self.handlers:
_handler = next(iter(self.handlers))
self.__remove_handler(_handler)
def __add_handler(self, handler: logging.Handler) -> None:
"""
Adds a new handler to the root logger
"""
if handler not in self.handlers:
self.handlers.add(handler)
logging.root.addHandler(handler)
else:
logging.warning(f"Tried to add a handler that was already active: {handler}")
def __remove_handler(self, handler: logging.Handler) -> None:
"""
Closes and removes a handler from the root logger
"""
if handler in self.handlers:
try:
handler.close()
except Exception as e:
logging.warning(f"Failed to close {handler}: {repr(e)}")
self.handlers.remove(handler)
logging.root.removeHandler(handler)
else:
logging.warning(f"Tried to remove a handler that is not active: {handler}")
def set_log_level_and_format(
self,
is_quiet: bool = False,
debug: bool = False,
debug_topics: Optional[List[str]] = None,
show_debug_topics: bool = False) -> None:
"""
Sets the logging level and log message format.
@param is_quiet: if true, we show minimal log messages, and logging level is WARNING. No debug topics can be
enabled.
@param debug: if true, we show debug information (for all topics)
@param debug_topics:
Ignored if is_quiet is True.
If it is None, we are not in DEBUG mode, and logging level is WARNING.
Otherwise, logging level is DEBUG, and it is a list of debug topic names.
Only debug messages related to loggers of those topics are recorded.
If it is an empty list, we record ALL topics.
@param show_debug_topics: Ignored if either quiet is True or debug is None. If True, sets the logging message
format to show the topic of the logger that sent them.
"""
self.__format_stdout_log_messages(show_debug_topics)
self.__set_logging_level(is_quiet, debug, debug_topics)
def remove_debug_logger(self) -> None:
self.__remove_handler(self.debug_log_handler)
def __format_stdout_log_messages(self, show_debug_topics: bool) -> None:
"""
Sets the log message format.
@param show_debug_topics:
If True, sets the logging message format to show the topic of the logger that sent them.
"""
if show_debug_topics:
base_message = "%(name)s - %(message)s"
else:
base_message = "%(message)s"
if sys.stdout.isatty():
self.stdout_handler.setFormatter(ColoredString(base_message))
else:
self.stdout_handler.setFormatter(logging.Formatter(f'%(levelname)s: {base_message}'))
def __set_logging_level(self, is_quiet: bool, debug: bool = False, debug_topics: Optional[List[str]] = None) \
-> None:
"""
Sets the logging level.
@param is_quiet: if true, we show minimal log messages, and logging level is WARNING. No debug topics can be
enabled
@param debug: is debug enabled
@param debug_topics:
Ignored if is_quiet is True.
If it is None, we are not in DEBUG mode, and logging level is WARNING.
Otherwise, logging level is DEBUG, and it is a list of debug topic names.
Only debug messages related to loggers of those topics are recorded.
If it is an empty list, we record ALL topics.
"""
if is_quiet or not debug:
self.__set_handlers_level(logging.WARNING)
self.is_debugging = False
else:
self.__set_handlers_level(logging.DEBUG)
self.is_debugging = True
self.__set_topics_filter(debug_topics)
def __set_handlers_level(self, level: int) -> None:
"""
Sets the level of all handlers to the given logging level, except the debug log handler
@param level - A logging level such as logging.DEBUG
We assume the level is a number that represents valid logging level!
"""
for handler in self.handlers:
if handler != self.debug_log_handler:
handler.setLevel(level)
def __set_topics_filter(self, debug_topics: Optional[List[str]] = None) -> None:
"""
Adds a filter to the stdout logger to ignore logging topics not provided.
@param debug_topics -
If it is None, we are not in DEBUG mode, and logging level is WARNING.
Otherwise, logging level is DEBUG, and it is a list of debug topic names.
Only debug messages related to loggers of those topics are recorded.
If it is an empty list, we record ALL topics.
"""
for _filter in self.stdout_handler.filters:
self.stdout_handler.removeFilter(_filter)
if self.is_debugging and debug_topics is not None and len(debug_topics) > 0:
topics = [n.strip() for n in debug_topics]
self.stdout_handler.addFilter(TopicFilter(topics))
def __del__(self) -> None:
self.__tear_down()
|
PypiClean
|
/pyGSTi-0.9.11.2-cp37-cp37m-win_amd64.whl/pygsti/baseobjs/opcalc/slowopcalc.py
|
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
from functools import partial as _partial
import numpy as _np
# Has an optimized cython implementation
def _typed_bulk_eval_compact_polynomials(vtape, ctape, paramvec, dest_shape, dtype="auto"):
"""
Evaluate many compact polynomial forms at a given set of variable values.
Parameters
----------
vtape, ctape : numpy.ndarray
Specifies "variable" and "coefficient" 1D numpy arrays to evaluate.
These "tapes" can be generated by concatenating the tapes of individual
complact-polynomial tuples returned by :meth:`Polynomial.compact`.
paramvec : array-like
An object that can be indexed so that `paramvec[i]` gives the
numerical value to substitute for i-th polynomial variable (x_i).
dest_shape : tuple
The shape of the final array of evaluated polynomials. The resulting
1D array of evaluated polynomials is reshaped accordingly.
dtype : {"auto", "real", "complex}
The type of the coefficient array that is returned.
Returns
-------
numpy.ndarray
An array of the same type as the coefficient tape or with the type
given by `dtype`, and with shape given by `dest_shape`.
"""
if dtype == "auto":
result = _np.empty(dest_shape, ctape.dtype) # auto-determine type?
elif dtype == "complex":
result = _np.empty(dest_shape, complex)
elif dtype == "real":
result = _np.empty(dest_shape, 'd')
else:
raise ValueError("Invalid dtype: %s" % dtype)
res = result.flat # for 1D access
c = 0; i = 0; r = 0
while i < vtape.size:
poly_val = 0
nTerms = vtape[i]; i += 1
#print("POLY w/%d terms (i=%d)" % (nTerms,i))
for m in range(nTerms):
nVars = vtape[i]; i += 1 # number of variable indices in this term
a = ctape[c]; c += 1
#print(" TERM%d: %d vars, coeff=%s" % (m,nVars,str(a)))
for k in range(nVars):
a *= paramvec[vtape[i]]; i += 1
poly_val += a
#print(" -> added %s to poly_val = %s" % (str(a),str(poly_val))," i=%d, vsize=%d" % (i,vtape.size))
res[r] = poly_val; r += 1
assert(c == ctape.size), "Coeff Tape length error: %d != %d !" % (c, ctape.size)
assert(r == result.size), "Result/Tape size mismatch: only %d result entries filled!" % r
return result
# These have separate cython implementations but the same python implementation, so we'll simply alias the names
bulk_eval_compact_polynomials_real = _partial(_typed_bulk_eval_compact_polynomials, dtype='real')
bulk_eval_compact_polynomials_complex = _partial(_typed_bulk_eval_compact_polynomials, dtype='complex')
def _typed_bulk_eval_compact_polynomials_derivs(vtape, ctape, wrt_params, paramvec, dest_shape, dtype="auto"):
#Note: assumes wrt_params is SORTED but doesn't assert it like Python version does
vtape_sz = vtape.size
wrt_sz = wrt_params.size
assert(len(dest_shape) == 2)
assert(len(wrt_params) == dest_shape[1])
if dtype == "auto":
result = _np.zeros(dest_shape, ctape.dtype) # auto-determine type?
elif dtype == "complex":
result = _np.zeros(dest_shape, complex) # indices [iPoly, iParam]
elif dtype == "real":
result = _np.zeros(dest_shape, 'd') # indices [iPoly, iParam]
else:
raise ValueError("Invalid dtype: %s" % dtype)
c = 0; i = 0; iPoly = 0
while i < vtape_sz:
j = i # increment j instead of i for this poly
nTerms = vtape[j]; j += 1
#print "POLY w/%d terms (i=%d)" % (nTerms,i)
for m in range(nTerms):
coeff = ctape[c]; c += 1
nVars = vtape[j]; j += 1 # number of variable indices in this term
#print " TERM%d: %d vars, coeff=%s" % (m,nVars,str(coeff))
cur_iWrt = 0
j0 = j # the vtape index where the current term starts
j1 = j + nVars # the ending index
#Loop to get counts of each variable index that is also in `wrt`.
# Once we've passed an element of `wrt` process it, since there can't
# see it any more (the var indices are sorted).
while j < j1: # loop over variable indices for this term
# can't be while True above in case nVars == 0 (then vtape[j] isn't valid)
#find an iVar that is also in wrt.
# - increment the cur_iWrt or j as needed
while cur_iWrt < wrt_sz and vtape[j] > wrt_params[cur_iWrt]: # condition to increment cur_iWrt
cur_iWrt += 1 # so wrt_params[cur_iWrt] >= vtape[j]
if cur_iWrt == wrt_sz: break # no more possible iVars we're interested in;
# we're done with all wrt elements
# - at this point we know wrt[cur_iWrt] is valid and wrt[cur_iWrt] >= tape[j]
cur_wrt = wrt_params[cur_iWrt]
while j < j1 and vtape[j] < cur_wrt:
j += 1 # so vtape[j] >= wrt[cur_iWrt]
if j == j1: break # no more iVars - we're done
#print " check j=%d, val=%d, wrt=%d, cur_iWrt=%d" % (j,vtape[j],cur_wrt,cur_iWrt)
if vtape[j] == cur_wrt:
#Yay! a value we're looking for is present in the vtape.
# Figure out how many there are (easy since vtape is sorted
# and we'll always stop on the first one)
cnt = 0
while j < j1 and vtape[j] == cur_wrt:
cnt += 1; j += 1
#Process cur_iWrt: add a term to evaluated poly for derivative w.r.t. wrt_params[cur_iWrt]
a = coeff * cnt
for k in range(j0, j1):
if k == j - 1: continue # remove this index
a *= paramvec[vtape[k]]
result[iPoly, cur_iWrt] += a
cur_iWrt += 1 # processed this wrt param - move to next one
j = j1 # move to next term; j may not have been incremented if we exited b/c of cur_iWrt reaching end
i = j # update location in vtape after processing poly - actually could just use i instead of j it seems??
iPoly += 1
return result
# These have separate cython implementations but the same python implementation, so we'll simply alias the names
bulk_eval_compact_polynomials_derivs_real = _partial(_typed_bulk_eval_compact_polynomials_derivs, dtype='real')
bulk_eval_compact_polynomials_derivs_complex = _partial(_typed_bulk_eval_compact_polynomials_derivs, dtype='complex')
def abs_sum_bulk_eval_compact_polynomials_complex(vtape, ctape, paramvec, dest_size, **kwargs):
"""Equivalent to np.sum(np.abs(bulk_eval_compact_polynomials_complex(.)))"""
return _np.sum(_np.abs(bulk_eval_compact_polynomials_complex(vtape, ctape, paramvec, (dest_size,), **kwargs)))
def compact_deriv(vtape, ctape, wrt_params):
"""
Take the derivative of one or more compact Polynomials with respect
to one or more variables/parameters.
Parameters
----------
vtape, ctape : numpy.ndarray
Specifies "variable" and "coefficient" 1D numpy arrays to differentiate.
These "tapes" can be generated by concatenating the tapes of individual
complact-polynomial tuples returned by :meth:`Polynomial.compact`.
wrt_params : list
The variable indices to differentiate with respect to. They
must be sorted in ascending order. E.g. "[0,3]" means separatey
differentiate w.r.t x_0 and x_3 (concatenated first by wrt_param
then by poly).
Returns
-------
vtape, ctape : numpy.ndarray
"""
result_vtape = []
result_ctape = []
wrt = sorted(wrt_params)
assert(wrt == list(wrt_params)), "`wrt_params` (%s) must be in ascending order!" % wrt_params
#print("TAPE SIZE = ",vtape.size)
c = 0; i = 0
while i < vtape.size:
j = i # increment j instead of i for this poly
nTerms = vtape[j]; j += 1
dctapes = [list() for x in range(len(wrt))]
dvtapes = [list() for x in range(len(wrt))]
dnterms = [0] * len(wrt)
#print("POLY w/%d terms (i=%d)" % (nTerms,i))
for m in range(nTerms):
coeff = ctape[c]; c += 1
nVars = vtape[j]; j += 1 # number of variable indices in this term
#print(" TERM%d: %d vars, coeff=%s" % (m,nVars,str(coeff)))
cur_iWrt = 0
j0 = j # the vtape index where the current term starts
#Loop to get counts of each variable index that is also in `wrt`.
# Once we've passed an element of `wrt` process it, since there can't
# see it any more (the var indices are sorted).
while j < j0 + nVars: # loop over variable indices for this term
# can't be while True above in case nVars == 0 (then vtape[j] isn't valid)
#find an iVar that is also in wrt.
# - increment the cur_iWrt or j as needed
while cur_iWrt < len(wrt) and vtape[j] > wrt[cur_iWrt]: # condition to increment cur_iWrt
cur_iWrt += 1 # so wrt[cur_iWrt] >= vtape[j]
if cur_iWrt == len(wrt): break # no more possible iVars we're interested in;
# we're done with all wrt elements
# - at this point we know wrt[cur_iWrt] is valid and wrt[cur_iWrt] >= tape[j]
while j < j0 + nVars and vtape[j] < wrt[cur_iWrt]:
j += 1 # so vtape[j] >= wrt[cur_iWrt]
if j == j0 + nVars: break # no more iVars - we're done
#print(" check j=%d, val=%d, wrt=%d, cur_iWrt=%d" % (j,vtape[j],wrt[cur_iWrt],cur_iWrt))
if vtape[j] == wrt[cur_iWrt]:
#Yay! a value we're looking for is present in the vtape.
# Figure out how many there are (easy since vtape is sorted
# and we'll always stop on the first one)
cnt = 0
while j < j0 + nVars and vtape[j] == wrt[cur_iWrt]:
cnt += 1; j += 1
#Process cur_iWrt: add a term to tape for cur_iWrt
dvars = list(vtape[j0:j - 1]) + list(vtape[j:j0 + nVars]) # removes last wrt[cur_iWrt] var
dctapes[cur_iWrt].append(coeff * cnt)
dvtapes[cur_iWrt].extend([nVars - 1] + dvars)
dnterms[cur_iWrt] += 1
# print(" wrt=%d found cnt=%d: adding deriv term coeff=%f vars=%s" \
# % (wrt[cur_iWrt], cnt, coeff*cnt, [nVars-1] + dvars))
cur_iWrt += 1 # processed this wrt param - move to next one
#Now term has been processed, adding derivative terms to the dctapes and dvtapes "tape-lists"
# We continue processing terms, adding to these tape lists, until all the terms of the
# current poly are processed. Then we can concatenate the tapes for each wrt_params element.
# Move to next term; j may not have been incremented if we exited b/c of cur_iWrt reaching end
j = j0 + nVars
#Now all terms are processed - concatenate tapes for wrt_params and add to resulting tape.
for nTerms, dvtape, dctape in zip(dnterms, dvtapes, dctapes):
result_vtape.extend([nTerms] + dvtape)
result_ctape.extend(dctape)
i = j # update location in vtape after processing poly - actually could just use i instead of j it seems??
return _np.array(result_vtape, _np.int64), _np.array(result_ctape, complex)
def float_product(ar):
return _np.prod(ar)
|
PypiClean
|
/pysnmp-bw-5.0.3.tar.gz/pysnmp-bw-5.0.3/pysnmp/hlapi/v3arch/lcd.py
|
from pyasn1.compat.octets import null
from pysnmp import error
from pysnmp import nextid
from pysnmp.entity import config
from pysnmp.hlapi.v3arch.auth import *
__all__ = ['CommandGeneratorLcdConfigurator',
'NotificationOriginatorLcdConfigurator']
class AbstractLcdConfigurator(object):
nextID = nextid.Integer(0xffffffff)
cacheKeys = []
def _getCache(self, snmpEngine):
cacheId = self.__class__.__name__
cache = snmpEngine.getUserContext(cacheId)
if cache is None:
cache = dict([(x, {}) for x in self.cacheKeys])
snmpEngine.setUserContext(**{cacheId: cache})
return cache
def configure(self, snmpEngine, *args, **kwargs):
pass
def unconfigure(self, snmpEngine, *args, **kwargs):
pass
class CommandGeneratorLcdConfigurator(AbstractLcdConfigurator):
cacheKeys = ['auth', 'parm', 'tran', 'addr']
def configure(self, snmpEngine, authData, transportTarget,
contextName, **options):
cache = self._getCache(snmpEngine)
if isinstance(authData, CommunityData):
if authData.communityIndex not in cache['auth']:
config.addV1System(
snmpEngine,
authData.communityIndex,
authData.communityName,
authData.contextEngineId,
authData.contextName,
authData.tag,
authData.securityName
)
cache['auth'][authData.communityIndex] = authData
elif isinstance(authData, UsmUserData):
authDataKey = authData.userName, authData.securityEngineId
if authDataKey not in cache['auth']:
config.addV3User(
snmpEngine,
authData.userName,
authData.authProtocol, authData.authKey,
authData.privProtocol, authData.privKey,
securityEngineId=authData.securityEngineId,
securityName=authData.securityName,
authKeyType=authData.authKeyType,
privKeyType=authData.privKeyType
)
cache['auth'][authDataKey] = authData
else:
raise error.PySnmpError('Unsupported authentication object')
paramsKey = (authData.securityName,
authData.securityLevel,
authData.mpModel)
if paramsKey in cache['parm']:
paramsName, useCount = cache['parm'][paramsKey]
cache['parm'][paramsKey] = paramsName, useCount + 1
else:
paramsName = 'p%s' % self.nextID()
config.addTargetParams(
snmpEngine, paramsName,
authData.securityName, authData.securityLevel, authData.mpModel
)
cache['parm'][paramsKey] = paramsName, 1
if transportTarget.TRANSPORT_DOMAIN in cache['tran']:
transport, useCount = cache['tran'][transportTarget.TRANSPORT_DOMAIN]
transportTarget.verifyDispatcherCompatibility(snmpEngine)
cache['tran'][transportTarget.TRANSPORT_DOMAIN] = transport, useCount + 1
elif config.getTransport(snmpEngine, transportTarget.TRANSPORT_DOMAIN):
transportTarget.verifyDispatcherCompatibility(snmpEngine)
else:
transport = transportTarget.openClientMode()
config.addTransport(
snmpEngine,
transportTarget.TRANSPORT_DOMAIN,
transport
)
cache['tran'][transportTarget.TRANSPORT_DOMAIN] = transport, 1
transportKey = (paramsName, transportTarget.TRANSPORT_DOMAIN,
transportTarget.transportAddr,
transportTarget.timeout,
transportTarget.retries,
transportTarget.tagList,
transportTarget.iface)
if transportKey in cache['addr']:
addrName, useCount = cache['addr'][transportKey]
cache['addr'][transportKey] = addrName, useCount + 1
else:
addrName = 'a%s' % self.nextID()
config.addTargetAddr(
snmpEngine, addrName,
transportTarget.TRANSPORT_DOMAIN,
transportTarget.transportAddr,
paramsName,
transportTarget.timeout * 100,
transportTarget.retries,
transportTarget.tagList
)
cache['addr'][transportKey] = addrName, 1
return addrName, paramsName
def unconfigure(self, snmpEngine, authData=None, contextName=null, **options):
cache = self._getCache(snmpEngine)
if authData:
if isinstance(authData, CommunityData):
authDataKey = authData.communityIndex
elif isinstance(authData, UsmUserData):
authDataKey = authData.userName, authData.securityEngineId
else:
raise error.PySnmpError('Unsupported authentication object')
if authDataKey in cache['auth']:
authDataKeys = (authDataKey,)
else:
raise error.PySnmpError('Unknown authData %s' % (authData,))
else:
authDataKeys = list(cache['auth'].keys())
addrNames, paramsNames = set(), set()
for authDataKey in authDataKeys:
authDataX = cache['auth'][authDataKey]
del cache['auth'][authDataKey]
if isinstance(authDataX, CommunityData):
config.delV1System(
snmpEngine,
authDataX.communityIndex
)
elif isinstance(authDataX, UsmUserData):
config.delV3User(
snmpEngine,
authDataX.userName,
authDataX.securityEngineId
)
else:
raise error.PySnmpError('Unsupported authentication object')
paramsKey = (authDataX.securityName,
authDataX.securityLevel,
authDataX.mpModel)
if paramsKey in cache['parm']:
paramsName, useCount = cache['parm'][paramsKey]
useCount -= 1
if useCount:
cache['parm'][paramsKey] = paramsName, useCount
else:
del cache['parm'][paramsKey]
config.delTargetParams(snmpEngine, paramsName)
paramsNames.add(paramsName)
else:
raise error.PySnmpError('Unknown target %s' % (paramsKey,))
addrKeys = [x for x in cache['addr'] if x[0] == paramsName]
for addrKey in addrKeys:
addrName, useCount = cache['addr'][addrKey]
useCount -= 1
if useCount:
cache['addr'][addrKey] = addrName, useCount
else:
config.delTargetAddr(snmpEngine, addrName)
del cache['addr'][addrKey]
addrNames.add(addrKey)
if addrKey[1] in cache['tran']:
transport, useCount = cache['tran'][addrKey[1]]
if useCount > 1:
useCount -= 1
cache['tran'][addrKey[1]] = transport, useCount
else:
config.delTransport(snmpEngine, addrKey[1])
transport.closeTransport()
del cache['tran'][addrKey[1]]
return addrNames, paramsNames
class NotificationOriginatorLcdConfigurator(AbstractLcdConfigurator):
cacheKeys = ['auth', 'name']
_cmdGenLcdCfg = CommandGeneratorLcdConfigurator()
def configure(self, snmpEngine, authData, transportTarget, notifyType,
contextName, **options):
cache = self._getCache(snmpEngine)
notifyName = None
# Create matching transport tags if not given by user. Not good!
if not transportTarget.tagList:
transportTarget.tagList = str(
hash((authData.securityName, transportTarget.transportAddr))
)
if isinstance(authData, CommunityData) and not authData.tag:
authData.tag = transportTarget.tagList.split()[0]
addrName, paramsName = self._cmdGenLcdCfg.configure(
snmpEngine, authData, transportTarget, contextName, **options)
tagList = transportTarget.tagList.split()
if not tagList:
tagList = ['']
for tag in tagList:
notifyNameKey = paramsName, tag, notifyType
if notifyNameKey in cache['name']:
notifyName, paramsName, useCount = cache['name'][notifyNameKey]
cache['name'][notifyNameKey] = notifyName, paramsName, useCount + 1
else:
notifyName = 'n%s' % self.nextID()
config.addNotificationTarget(
snmpEngine, notifyName, paramsName, tag, notifyType)
cache['name'][notifyNameKey] = notifyName, paramsName, 1
authDataKey = authData.securityName, authData.securityModel, authData.securityLevel, contextName
if authDataKey in cache['auth']:
authDataX, subTree, useCount = cache['auth'][authDataKey]
cache['auth'][authDataKey] = authDataX, subTree, useCount + 1
else:
subTree = (1, 3, 6)
config.addVacmUser(
snmpEngine,authData.securityModel, authData.securityName,
authData.securityLevel, (), (), subTree,
contextName=contextName)
cache['auth'][authDataKey] = authData, subTree, 1
return notifyName
def unconfigure(self, snmpEngine, authData=None, contextName=null, **options):
cache = self._getCache(snmpEngine)
if authData:
authDataKey = authData.securityName, authData.securityModel, authData.securityLevel, contextName
if authDataKey in cache['auth']:
authDataKeys = (authDataKey,)
else:
raise error.PySnmpError('Unknown authData %s' % (authData,))
else:
authDataKeys = tuple(cache['auth'])
addrNames, paramsNames = self._cmdGenLcdCfg.unconfigure(
snmpEngine, authData, contextName, **options)
notifyAndParamsNames = [
(cache['name'][x], x) for x in cache['name'].keys()
if x[0] in paramsNames
]
for (notifyName, paramsName, useCount), notifyNameKey in notifyAndParamsNames:
useCount -= 1
if useCount:
cache['name'][notifyNameKey] = notifyName, paramsName, useCount
else:
config.delNotificationTarget(snmpEngine, notifyName, paramsName)
del cache['name'][notifyNameKey]
for authDataKey in authDataKeys:
authDataX, subTree, useCount = cache['auth'][authDataKey]
useCount -= 1
if useCount:
cache['auth'][authDataKey] = authDataX, subTree, useCount
else:
config.delTrapUser(
snmpEngine, authDataX.securityModel, authDataX.securityName,
authDataX.securityLevel, subTree)
del cache['auth'][authDataKey]
|
PypiClean
|
/sym_api_client_python-2.0b5.tar.gz/sym_api_client_python-2.0b5/symphony/bdk/gen/login_api/authentication_api.py
|
import re # noqa: F401
import sys # noqa: F401
from symphony.bdk.gen.api_client import ApiClient, Endpoint as _Endpoint
from symphony.bdk.gen.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from symphony.bdk.gen.login_model.authenticate_extension_app_request import AuthenticateExtensionAppRequest
from symphony.bdk.gen.login_model.authenticate_request import AuthenticateRequest
from symphony.bdk.gen.login_model.error import Error
from symphony.bdk.gen.login_model.extension_app_tokens import ExtensionAppTokens
from symphony.bdk.gen.login_model.token import Token
class AuthenticationApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def __pubkey_app_authenticate_post(
self,
authenticate_request,
**kwargs
):
"""Authenticate an App with public key # noqa: E501
Based on an authentication request token signed by the application's RSA private key, authenticate the API caller and return a session token. A HTTP 401 Unauthorized error is returned on errors during authentication (e.g. invalid app, malformed authentication token, app's public key not imported in the pod, invalid token signature etc.). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = login_api.pubkey_app_authenticate_post(authenticate_request, async_req=True)
>>> result = thread.get()
Args:
authenticate_request (AuthenticateRequest):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Token
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['authenticate_request'] = \
authenticate_request
return self.call_with_http_info(**kwargs)
self.pubkey_app_authenticate_post = _Endpoint(
settings={
'response_type': (Token,),
'auth': [],
'endpoint_path': '/pubkey/app/authenticate',
'operation_id': 'pubkey_app_authenticate_post',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'authenticate_request',
],
'required': [
'authenticate_request',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'authenticate_request':
(AuthenticateRequest,),
},
'attribute_map': {
},
'location_map': {
'authenticate_request': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__pubkey_app_authenticate_post
)
def __pubkey_app_user_user_id_authenticate_post(
self,
session_token,
user_id,
**kwargs
):
"""Authenticate an application in a delegated context to act on behalf of a user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = login_api.pubkey_app_user_user_id_authenticate_post(session_token, user_id, async_req=True)
>>> result = thread.get()
Args:
session_token (str): App Session authentication token.
user_id (int): the user ID
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Token
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['session_token'] = \
session_token
kwargs['user_id'] = \
user_id
return self.call_with_http_info(**kwargs)
self.pubkey_app_user_user_id_authenticate_post = _Endpoint(
settings={
'response_type': (Token,),
'auth': [],
'endpoint_path': '/pubkey/app/user/{userId}/authenticate',
'operation_id': 'pubkey_app_user_user_id_authenticate_post',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'session_token',
'user_id',
],
'required': [
'session_token',
'user_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'session_token':
(str,),
'user_id':
(int,),
},
'attribute_map': {
'session_token': 'sessionToken',
'user_id': 'userId',
},
'location_map': {
'session_token': 'header',
'user_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__pubkey_app_user_user_id_authenticate_post
)
def __pubkey_app_username_username_authenticate_post(
self,
session_token,
username,
**kwargs
):
"""Authenticate an application in a delegated context to act on behalf of a user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = login_api.pubkey_app_username_username_authenticate_post(session_token, username, async_req=True)
>>> result = thread.get()
Args:
session_token (str): App Session authentication token.
username (str): the username
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Token
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['session_token'] = \
session_token
kwargs['username'] = \
username
return self.call_with_http_info(**kwargs)
self.pubkey_app_username_username_authenticate_post = _Endpoint(
settings={
'response_type': (Token,),
'auth': [],
'endpoint_path': '/pubkey/app/username/{username}/authenticate',
'operation_id': 'pubkey_app_username_username_authenticate_post',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'session_token',
'username',
],
'required': [
'session_token',
'username',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'session_token':
(str,),
'username':
(str,),
},
'attribute_map': {
'session_token': 'sessionToken',
'username': 'username',
},
'location_map': {
'session_token': 'header',
'username': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__pubkey_app_username_username_authenticate_post
)
def __pubkey_authenticate_post(
self,
authenticate_request,
**kwargs
):
"""Authenticate with public key # noqa: E501
Based on an authentication request token signed by the caller's RSA private key, authenticate the API caller and return a session token. A HTTP 401 Unauthorized error is returned on errors during authentication (e.g. invalid user, malformed authentication token, user's public key not imported in the pod, invalid token signature etc.). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = login_api.pubkey_authenticate_post(authenticate_request, async_req=True)
>>> result = thread.get()
Args:
authenticate_request (AuthenticateRequest):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Token
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['authenticate_request'] = \
authenticate_request
return self.call_with_http_info(**kwargs)
self.pubkey_authenticate_post = _Endpoint(
settings={
'response_type': (Token,),
'auth': [],
'endpoint_path': '/pubkey/authenticate',
'operation_id': 'pubkey_authenticate_post',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'authenticate_request',
],
'required': [
'authenticate_request',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'authenticate_request':
(AuthenticateRequest,),
},
'attribute_map': {
},
'location_map': {
'authenticate_request': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__pubkey_authenticate_post
)
def __v1_pubkey_app_authenticate_extension_app_post(
self,
authenticate_request,
**kwargs
):
"""Authenticate extension app with public key # noqa: E501
Based on an authentication request token signed by the caller's RSA private key, authenticate the API caller and return a session token. A HTTP 401 Unauthorized error is returned on errors during authentication (e.g. invalid user, malformed authentication token, user's public key not imported in the pod, invalid token signature etc.). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = login_api.v1_pubkey_app_authenticate_extension_app_post(authenticate_request, async_req=True)
>>> result = thread.get()
Args:
authenticate_request (AuthenticateExtensionAppRequest):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
ExtensionAppTokens
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['authenticate_request'] = \
authenticate_request
return self.call_with_http_info(**kwargs)
self.v1_pubkey_app_authenticate_extension_app_post = _Endpoint(
settings={
'response_type': (ExtensionAppTokens,),
'auth': [],
'endpoint_path': '/v1/pubkey/app/authenticate/extensionApp',
'operation_id': 'v1_pubkey_app_authenticate_extension_app_post',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'authenticate_request',
],
'required': [
'authenticate_request',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'authenticate_request':
(AuthenticateExtensionAppRequest,),
},
'attribute_map': {
},
'location_map': {
'authenticate_request': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__v1_pubkey_app_authenticate_extension_app_post
)
|
PypiClean
|
/alipay_sdk_python-3.6.740-py3-none-any.whl/alipay/aop/api/domain/AlipayFundAllocSignQueryModel.py
|
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayFundAllocSignQueryModel(object):
def __init__(self):
self._agreement_no = None
self._biz_scene = None
self._out_biz_no = None
self._product_code = None
@property
def agreement_no(self):
return self._agreement_no
@agreement_no.setter
def agreement_no(self, value):
self._agreement_no = value
@property
def biz_scene(self):
return self._biz_scene
@biz_scene.setter
def biz_scene(self, value):
self._biz_scene = value
@property
def out_biz_no(self):
return self._out_biz_no
@out_biz_no.setter
def out_biz_no(self, value):
self._out_biz_no = value
@property
def product_code(self):
return self._product_code
@product_code.setter
def product_code(self, value):
self._product_code = value
def to_alipay_dict(self):
params = dict()
if self.agreement_no:
if hasattr(self.agreement_no, 'to_alipay_dict'):
params['agreement_no'] = self.agreement_no.to_alipay_dict()
else:
params['agreement_no'] = self.agreement_no
if self.biz_scene:
if hasattr(self.biz_scene, 'to_alipay_dict'):
params['biz_scene'] = self.biz_scene.to_alipay_dict()
else:
params['biz_scene'] = self.biz_scene
if self.out_biz_no:
if hasattr(self.out_biz_no, 'to_alipay_dict'):
params['out_biz_no'] = self.out_biz_no.to_alipay_dict()
else:
params['out_biz_no'] = self.out_biz_no
if self.product_code:
if hasattr(self.product_code, 'to_alipay_dict'):
params['product_code'] = self.product_code.to_alipay_dict()
else:
params['product_code'] = self.product_code
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayFundAllocSignQueryModel()
if 'agreement_no' in d:
o.agreement_no = d['agreement_no']
if 'biz_scene' in d:
o.biz_scene = d['biz_scene']
if 'out_biz_no' in d:
o.out_biz_no = d['out_biz_no']
if 'product_code' in d:
o.product_code = d['product_code']
return o
|
PypiClean
|
/Orange3-3.35.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/Orange/base.py
|
import inspect
import itertools
from collections.abc import Iterable
import re
import warnings
from typing import Callable, Dict, Optional
import numpy as np
import scipy
from Orange.data import Table, Storage, Instance, Value, Domain
from Orange.data.filter import HasClass
from Orange.data.table import DomainTransformationError
from Orange.data.util import one_hot
from Orange.misc.environ import cache_dir
from Orange.misc.wrapper_meta import WrapperMeta
from Orange.preprocess import Continuize, RemoveNaNColumns, SklImpute, Normalize
from Orange.statistics.util import all_nan
from Orange.util import Reprable, OrangeDeprecationWarning, wrap_callback, \
dummy_callback
__all__ = ["Learner", "Model", "SklLearner", "SklModel",
"ReprableWithPreprocessors"]
class ReprableWithPreprocessors(Reprable):
def _reprable_omit_param(self, name, default, value):
if name == "preprocessors":
default_cls = type(self).preprocessors
if value is default or value is default_cls:
return True
else:
try:
return all(p1 is p2 for p1, p2 in
itertools.zip_longest(value, default_cls))
except (ValueError, TypeError):
return False
else:
return super()._reprable_omit_param(name, default, value)
class Learner(ReprableWithPreprocessors):
"""The base learner class.
Preprocessors can behave in a number of different ways, all of which are
described here.
If the user does not pass a preprocessor argument into the Learner
constructor, the default learner preprocessors are used. We assume the user
would simply like to get things done without having to worry about
preprocessors.
If the user chooses to pass in their own preprocessors, we assume they know
what they are doing. In this case, only the user preprocessors are used and
the default preprocessors are ignored.
In case the user would like to use the default preprocessors as well as
their own ones, the `use_default_preprocessors` flag should be set.
Parameters
----------
preprocessors : Preprocessor or tuple[Preprocessor], optional
User defined preprocessors. If the user specifies their own
preprocessors, the default ones will not be used, unless the
`use_default_preprocessors` flag is set.
Attributes
----------
preprocessors : tuple[Preprocessor] (default None)
The used defined preprocessors that will be used on any data.
use_default_preprocessors : bool (default False)
This flag indicates whether to use the default preprocessors that are
defined on the Learner class. Since preprocessors can be applied in a
number of ways
active_preprocessors : tuple[Preprocessor]
The processors that will be used when data is passed to the learner.
This depends on whether the user has passed in their own preprocessors
and whether the `use_default_preprocessors` flag is set.
This property is needed mainly because of the `Fitter` class, which can
not know in advance, which preprocessors it will need to use. Therefore
this resolves the active preprocessors using a lazy approach.
params : dict
The params that the learner is constructed with.
"""
supports_multiclass = False
supports_weights = False
#: A sequence of data preprocessors to apply on data prior to
#: fitting the model
preprocessors = ()
# Note: Do not use this class attribute.
# It remains here for compatibility reasons.
learner_adequacy_err_msg = ''
def __init__(self, preprocessors=None):
self.use_default_preprocessors = False
if isinstance(preprocessors, Iterable):
self.preprocessors = tuple(preprocessors)
elif preprocessors:
self.preprocessors = (preprocessors,)
# pylint: disable=R0201
def fit(self, X, Y, W=None):
raise RuntimeError(
"Descendants of Learner must overload method fit or fit_storage")
def fit_storage(self, data):
"""Default implementation of fit_storage defaults to calling fit.
Derived classes must define fit_storage or fit"""
X, Y, W = data.X, data.Y, data.W if data.has_weights() else None
return self.fit(X, Y, W)
def __call__(self, data, progress_callback=None):
reason = self.incompatibility_reason(data.domain)
if reason is not None:
raise ValueError(reason)
origdomain = data.domain
if isinstance(data, Instance):
data = Table(data.domain, [data])
origdata = data
if progress_callback is None:
progress_callback = dummy_callback
progress_callback(0, "Preprocessing...")
try:
cb = wrap_callback(progress_callback, end=0.1)
data = self.preprocess(data, progress_callback=cb)
except TypeError:
data = self.preprocess(data)
warnings.warn("A keyword argument 'progress_callback' has been "
"added to the preprocess() signature. Implementing "
"the method without the argument is deprecated and "
"will result in an error in the future.",
OrangeDeprecationWarning)
if len(data.domain.class_vars) > 1 and not self.supports_multiclass:
raise TypeError("%s doesn't support multiple class variables" %
self.__class__.__name__)
progress_callback(0.1, "Fitting...")
model = self._fit_model(data)
model.used_vals = [np.unique(y).astype(int) for y in data.Y[:, None].T]
if not hasattr(model, "domain") or model.domain is None:
# some models set domain themself and it should be respected
# e.g. calibration learners set the base_learner's domain which
# would be wrongly overwritten if we set it here for any model
model.domain = data.domain
model.supports_multiclass = self.supports_multiclass
model.name = self.name
model.original_domain = origdomain
model.original_data = origdata
progress_callback(1)
return model
def _fit_model(self, data):
if type(self).fit is Learner.fit:
return self.fit_storage(data)
else:
X, Y, W = data.X, data.Y, data.W if data.has_weights() else None
return self.fit(X, Y, W)
def preprocess(self, data, progress_callback=None):
"""Apply the `preprocessors` to the data"""
if progress_callback is None:
progress_callback = dummy_callback
n_pps = len(list(self.active_preprocessors))
for i, pp in enumerate(self.active_preprocessors):
progress_callback(i / n_pps)
data = pp(data)
progress_callback(1)
return data
@property
def active_preprocessors(self):
yield from self.preprocessors
if (self.use_default_preprocessors and
self.preprocessors is not type(self).preprocessors):
yield from type(self).preprocessors
# pylint: disable=no-self-use
def incompatibility_reason(self, _: Domain) -> Optional[str]:
"""Return None if a learner can fit domain or string explaining why it can not."""
return None
@property
def name(self):
"""Return a short name derived from Learner type name"""
try:
return self.__name
except AttributeError:
name = self.__class__.__name__
if name.endswith('Learner'):
name = name[:-len('Learner')]
if name.endswith('Fitter'):
name = name[:-len('Fitter')]
if isinstance(self, SklLearner) and name.startswith('Skl'):
name = name[len('Skl'):]
name = name or 'learner'
# From http://stackoverflow.com/a/1176023/1090455 <3
self.name = re.sub(r'([a-z0-9])([A-Z])', r'\1 \2',
re.sub(r'(.)([A-Z][a-z]+)', r'\1 \2', name)).lower()
return self.name
@name.setter
def name(self, value):
self.__name = value
def __str__(self):
return self.name
class Model(Reprable):
supports_multiclass = False
supports_weights = False
Value = 0
Probs = 1
ValueProbs = 2
def __init__(self, domain=None, original_domain=None):
self.domain = domain
if original_domain is not None:
self.original_domain = original_domain
else:
self.original_domain = domain
self.used_vals = None
def predict(self, X):
if type(self).predict_storage is Model.predict_storage:
raise TypeError("Descendants of Model must overload method predict")
else:
Y = np.zeros((len(X), len(self.domain.class_vars)))
Y[:] = np.nan
table = Table(self.domain, X, Y)
return self.predict_storage(table)
def predict_storage(self, data):
if isinstance(data, Storage):
return self.predict(data.X)
elif isinstance(data, Instance):
return self.predict(np.atleast_2d(data.x))
raise TypeError("Unrecognized argument (instance of '{}')"
.format(type(data).__name__))
def get_backmappers(self, data):
backmappers = []
n_values = []
dataclasses = data.domain.class_vars
modelclasses = self.domain.class_vars
if not (modelclasses and dataclasses):
return None, [] # classless model or data; don't touch
if len(dataclasses) != len(modelclasses):
raise DomainTransformationError(
"Mismatching number of model's classes and data classes")
for dataclass, modelclass in zip(dataclasses, modelclasses):
if dataclass != modelclass:
if dataclass.name != modelclass.name:
raise DomainTransformationError(
f"Model for '{modelclass.name}' "
f"cannot predict '{dataclass.name}'")
else:
raise DomainTransformationError(
f"Variables '{modelclass.name}' in the model is "
"incompatible with the variable of the same name "
"in the data.")
n_values.append(dataclass.is_discrete and len(dataclass.values))
if dataclass is not modelclass and dataclass.is_discrete:
backmappers.append(dataclass.get_mapper_from(modelclass))
else:
backmappers.append(None)
if all(x is None for x in backmappers):
backmappers = None
return backmappers, n_values
def backmap_value(self, value, mapped_probs, n_values, backmappers):
if backmappers is None:
return value
if value.ndim == 2: # For multitarget, recursive call by columns
new_value = np.zeros(value.shape)
for i, n_value, backmapper in zip(
itertools.count(), n_values, backmappers):
new_value[:, i] = self.backmap_value(
value[:, i], mapped_probs[:, i, :], [n_value], [backmapper])
return new_value
backmapper = backmappers[0]
if backmapper is None:
return value
value = backmapper(value)
nans = np.isnan(value)
if not np.any(nans) or n_values[0] < 2:
return value
if mapped_probs is not None:
value[nans] = np.argmax(mapped_probs[nans], axis=1)
else:
value[nans] = np.random.RandomState(0).choice(
backmapper(np.arange(0, n_values[0] - 1)),
(np.sum(nans), ))
return value
def backmap_probs(self, probs, n_values, backmappers):
if backmappers is None:
return probs
if probs.ndim == 3:
new_probs = np.zeros((len(probs), len(n_values), max(n_values)),
dtype=probs.dtype)
for i, n_value, backmapper in zip(
itertools.count(), n_values, backmappers):
new_probs[:, i, :n_value] = self.backmap_probs(
probs[:, i, :], [n_value], [backmapper])
return new_probs
backmapper = backmappers[0]
if backmapper is None:
return probs
n_value = n_values[0]
new_probs = np.zeros((len(probs), n_value), dtype=probs.dtype)
for col in range(probs.shape[1]):
target = backmapper(col)
if not np.isnan(target):
new_probs[:, int(target)] = probs[:, col]
tots = np.sum(new_probs, axis=1)
zero_sum = tots == 0
new_probs[zero_sum] = 1
tots[zero_sum] = n_value
new_probs = new_probs / tots[:, None]
return new_probs
def data_to_model_domain(
self, data: Table, progress_callback: Callable = dummy_callback
) -> Table:
"""
Transforms data to the model domain if possible.
Parameters
----------
data
Data to be transformed to the model domain
progress_callback
Callback - callable - to report the progress
Returns
-------
Transformed data table
Raises
------
DomainTransformationError
Error indicates that transformation is not possible since domains
are not compatible
"""
if data.domain == self.domain:
return data
progress_callback(0)
if self.original_domain.attributes != data.domain.attributes \
and data.X.size \
and not all_nan(data.X):
progress_callback(0.5)
new_data = data.transform(self.original_domain)
if all_nan(new_data.X):
raise DomainTransformationError(
"domain transformation produced no defined values")
progress_callback(0.75)
data = new_data.transform(self.domain)
progress_callback(1)
return data
progress_callback(0.5)
data = data.transform(self.domain)
progress_callback(1)
return data
def __call__(self, data, ret=Value):
multitarget = len(self.domain.class_vars) > 1
def one_hot_probs(value):
if not multitarget:
return one_hot(
value,
dim=len(self.domain.class_var.values)
if self.domain is not None else None
)
max_card = max(len(c.values) for c in self.domain.class_vars)
probs = np.zeros(value.shape + (max_card,), float)
for i in range(len(self.domain.class_vars)):
probs[:, i, :] = one_hot(value[:, i])
return probs
def extend_probabilities(probs):
"""
Since SklModels and models implementing `fit` and not `fit_storage`
do not guarantee correct prediction dimensionality, extend
dimensionality of probabilities when it does not match the number
of values in the domain.
"""
class_vars = self.domain.class_vars
max_values = max(len(cv.values) for cv in class_vars)
if max_values == probs.shape[-1]:
return probs
if not self.supports_multiclass:
probs = probs[:, np.newaxis, :]
probs_ext = np.zeros((len(probs), len(class_vars), max_values))
for c, used_vals in enumerate(self.used_vals):
for i, cv in enumerate(used_vals):
probs_ext[:, c, cv] = probs[:, c, i]
if not self.supports_multiclass:
probs_ext = probs_ext[:, 0, :]
return probs_ext
def fix_dim(x):
return x[0] if one_d else x
if not 0 <= ret <= 2:
raise ValueError("invalid value of argument 'ret'")
if ret > 0 and any(v.is_continuous for v in self.domain.class_vars):
raise ValueError("cannot predict continuous distributions")
# Convert 1d structures to 2d and remember doing it
one_d = True
if isinstance(data, Instance):
data = Table.from_list(data.domain, [data])
elif isinstance(data, (list, tuple)) \
and not isinstance(data[0], (list, tuple)):
data = [data]
elif isinstance(data, np.ndarray) and data.ndim == 1:
data = np.atleast_2d(data)
else:
one_d = False
# if sparse convert to csr_matrix
if scipy.sparse.issparse(data):
data = data.tocsr()
# Call the predictor
backmappers = None
n_values = []
if isinstance(data, (np.ndarray, scipy.sparse.csr_matrix)):
prediction = self.predict(data)
elif isinstance(data, Table):
backmappers, n_values = self.get_backmappers(data)
data = self.data_to_model_domain(data)
prediction = self.predict_storage(data)
elif isinstance(data, (list, tuple)):
data = Table.from_list(self.original_domain, data)
data = data.transform(self.domain)
prediction = self.predict_storage(data)
else:
raise TypeError("Unrecognized argument (instance of '{}')"
.format(type(data).__name__))
# Parse the result into value and probs
if isinstance(prediction, tuple):
value, probs = prediction
elif prediction.ndim == 1 + multitarget:
value, probs = prediction, None
elif prediction.ndim == 2 + multitarget:
value, probs = None, prediction
else:
raise TypeError("model returned a %i-dimensional array",
prediction.ndim)
# Ensure that we have what we need to return; backmapp everything
if probs is None and (ret != Model.Value or backmappers is not None):
probs = one_hot_probs(value)
if probs is not None:
probs = extend_probabilities(probs)
probs = self.backmap_probs(probs, n_values, backmappers)
if ret != Model.Probs:
if value is None:
value = np.argmax(probs, axis=-1)
# probs are already backmapped
else:
value = self.backmap_value(value, probs, n_values, backmappers)
# Return what we need to
if ret == Model.Probs:
return fix_dim(probs)
if isinstance(data, Instance) and not multitarget:
value = [Value(self.domain.class_var, value[0])]
if ret == Model.Value:
return fix_dim(value)
else: # ret == Model.ValueProbs
return fix_dim(value), fix_dim(probs)
def __getstate__(self):
"""Skip (possibly large) data when pickling models"""
state = self.__dict__
if 'original_data' in state:
state = state.copy()
state['original_data'] = None
return state
class SklModel(Model, metaclass=WrapperMeta):
used_vals = None
def __init__(self, skl_model):
self.skl_model = skl_model
def predict(self, X):
value = self.skl_model.predict(X)
# SVM has probability attribute which defines if method compute probs
has_prob_attr = hasattr(self.skl_model, "probability")
if (has_prob_attr and self.skl_model.probability
or not has_prob_attr
and hasattr(self.skl_model, "predict_proba")):
probs = self.skl_model.predict_proba(X)
return value, probs
return value
def __repr__(self):
# Params represented as a comment because not passed into constructor
return super().__repr__() + ' # params=' + repr(self.params)
class SklLearner(Learner, metaclass=WrapperMeta):
"""
${skldoc}
Additional Orange parameters
preprocessors : list, optional
An ordered list of preprocessors applied to data before
training or testing.
Defaults to
`[RemoveNaNClasses(), Continuize(), SklImpute(), RemoveNaNColumns()]`
"""
__wraps__ = None
__returns__ = SklModel
_params = {}
preprocessors = default_preprocessors = [
HasClass(),
Continuize(),
RemoveNaNColumns(),
SklImpute()]
@property
def params(self):
return self._params
@params.setter
def params(self, value):
self._params = self._get_sklparams(value)
def _get_sklparams(self, values):
skllearner = self.__wraps__
if skllearner is not None:
spec = list(
inspect.signature(skllearner.__init__).parameters.keys()
)
# first argument is 'self'
assert spec[0] == "self"
params = {
name: values[name] for name in spec[1:] if name in values
}
else:
raise TypeError("Wrapper does not define '__wraps__'")
return params
def preprocess(self, data, progress_callback=None):
data = super().preprocess(data, progress_callback)
if any(v.is_discrete and len(v.values) > 2
for v in data.domain.attributes):
raise ValueError("Wrapped scikit-learn methods do not support " +
"multinomial variables.")
return data
def __call__(self, data, progress_callback=None):
m = super().__call__(data, progress_callback)
m.params = self.params
return m
def _initialize_wrapped(self):
# pylint: disable=not-callable
return self.__wraps__(**self.params)
def fit(self, X, Y, W=None):
clf = self._initialize_wrapped()
Y = Y.reshape(-1)
if W is None or not self.supports_weights:
return self.__returns__(clf.fit(X, Y))
return self.__returns__(clf.fit(X, Y, sample_weight=W.reshape(-1)))
@property
def supports_weights(self):
"""Indicates whether this learner supports weighted instances.
"""
return 'sample_weight' in self.__wraps__.fit.__code__.co_varnames
def __getattr__(self, item):
try:
return self.params[item]
except (KeyError, AttributeError):
raise AttributeError(item) from None
# TODO: Disallow (or mirror) __setattr__ for keys in params?
def __dir__(self):
dd = super().__dir__()
return list(sorted(set(dd) | set(self.params.keys())))
class TreeModel(Model):
pass
class RandomForestModel(Model):
"""Interface for random forest models
"""
@property
def trees(self):
"""Return a list of Trees in the forest
Returns
-------
List[Tree]
"""
class KNNBase:
"""Base class for KNN (classification and regression) learners
"""
# pylint: disable=unused-argument
def __init__(self, n_neighbors=5, metric="euclidean", weights="uniform",
algorithm='auto', metric_params=None,
preprocessors=None):
super().__init__(preprocessors=preprocessors)
self.params = vars()
def fit(self, X, Y, W=None):
if self.params["metric_params"] is None and \
self.params.get("metric") == "mahalanobis":
self.params["metric_params"] = {"V": np.cov(X.T)}
return super().fit(X, Y, W)
class NNBase:
"""Base class for neural network (classification and regression) learners
"""
preprocessors = SklLearner.preprocessors + [Normalize()]
# pylint: disable=unused-argument,too-many-arguments
def __init__(self, hidden_layer_sizes=(100,), activation='relu',
solver='adam', alpha=0.0001, batch_size='auto',
learning_rate='constant', learning_rate_init=0.001,
power_t=0.5, max_iter=200, shuffle=True, random_state=None,
tol=0.0001, verbose=False, warm_start=False, momentum=0.9,
nesterovs_momentum=True, early_stopping=False,
validation_fraction=0.1, beta_1=0.9, beta_2=0.999,
epsilon=1e-08, preprocessors=None):
super().__init__(preprocessors=preprocessors)
self.params = vars()
class CatGBModel(Model, metaclass=WrapperMeta):
def __init__(self, cat_model, cat_features, domain):
super().__init__(domain)
self.cat_model = cat_model
self.cat_features = cat_features
def __call__(self, data, ret=Model.Value):
if isinstance(data, Table):
with data.force_unlocked(data.X):
return super().__call__(data, ret)
else:
return super().__call__(data, ret)
def predict(self, X):
if self.cat_features:
X = X.astype(str)
value = self.cat_model.predict(X).flatten()
if hasattr(self.cat_model, "predict_proba"):
probs = self.cat_model.predict_proba(X)
return value, probs
return value
def __repr__(self):
# Params represented as a comment because not passed into constructor
return super().__repr__() + ' # params=' + repr(self.params)
class CatGBBaseLearner(Learner, metaclass=WrapperMeta):
"""
${skldoc}
Additional Orange parameters
preprocessors : list, optional
An ordered list of preprocessors applied to data before
training or testing.
Defaults to
`[RemoveNaNClasses(), RemoveNaNColumns()]`
"""
supports_weights = True
__wraps__ = None
__returns__ = CatGBModel
_params = {}
preprocessors = default_preprocessors = [
HasClass(),
RemoveNaNColumns(),
]
# pylint: disable=unused-argument,too-many-arguments,too-many-locals
def __init__(self,
iterations=None,
learning_rate=None,
depth=None,
l2_leaf_reg=None,
model_size_reg=None,
rsm=None,
loss_function=None,
border_count=None,
feature_border_type=None,
per_float_feature_quantization=None,
input_borders=None,
output_borders=None,
fold_permutation_block=None,
od_pval=None,
od_wait=None,
od_type=None,
nan_mode=None,
counter_calc_method=None,
leaf_estimation_iterations=None,
leaf_estimation_method=None,
thread_count=None,
random_seed=None,
use_best_model=None,
verbose=False,
logging_level=None,
metric_period=None,
ctr_leaf_count_limit=None,
store_all_simple_ctr=None,
max_ctr_complexity=None,
has_time=None,
allow_const_label=None,
classes_count=None,
class_weights=None,
one_hot_max_size=None,
random_strength=None,
name=None,
ignored_features=None,
train_dir=cache_dir(),
custom_loss=None,
custom_metric=None,
eval_metric=None,
bagging_temperature=None,
save_snapshot=None,
snapshot_file=None,
snapshot_interval=None,
fold_len_multiplier=None,
used_ram_limit=None,
gpu_ram_part=None,
allow_writing_files=False,
final_ctr_computation_mode=None,
approx_on_full_history=None,
boosting_type=None,
simple_ctr=None,
combinations_ctr=None,
per_feature_ctr=None,
task_type=None,
device_config=None,
devices=None,
bootstrap_type=None,
subsample=None,
sampling_unit=None,
dev_score_calc_obj_block_size=None,
max_depth=None,
n_estimators=None,
num_boost_round=None,
num_trees=None,
colsample_bylevel=None,
random_state=None,
reg_lambda=None,
objective=None,
eta=None,
max_bin=None,
scale_pos_weight=None,
gpu_cat_features_storage=None,
data_partition=None,
metadata=None,
early_stopping_rounds=None,
cat_features=None,
grow_policy=None,
min_data_in_leaf=None,
min_child_samples=None,
max_leaves=None,
num_leaves=None,
score_function=None,
leaf_estimation_backtracking=None,
ctr_history_unit=None,
monotone_constraints=None,
feature_weights=None,
penalties_coefficient=None,
first_feature_use_penalties=None,
model_shrink_rate=None,
model_shrink_mode=None,
langevin=None,
diffusion_temperature=None,
posterior_sampling=None,
boost_from_average=None,
text_features=None,
tokenizers=None,
dictionaries=None,
feature_calcers=None,
text_processing=None,
preprocessors=None):
super().__init__(preprocessors=preprocessors)
self.params = vars()
@property
def params(self):
return self._params
@params.setter
def params(self, value):
self._params = self._get_wrapper_params(value)
def _get_wrapper_params(self, values):
spec = list(inspect.signature(
self.__wraps__.__init__).parameters.keys())
return {name: values[name] for name in spec[1:] if name in values}
def __call__(self, data, progress_callback=None):
m = super().__call__(data, progress_callback)
m.params = self.params
return m
def fit_storage(self, data: Table):
with data.force_unlocked(data.X):
domain, X, Y, W = data.domain, data.X, data.Y.reshape(-1), None
if self.supports_weights and data.has_weights():
W = data.W.reshape(-1)
# pylint: disable=not-callable
clf = self.__wraps__(**self.params)
cat_features = [i for i, attr in enumerate(domain.attributes)
if attr.is_discrete]
if cat_features:
X = X.astype(str)
cat_model = clf.fit(X, Y, cat_features=cat_features, sample_weight=W)
return self.__returns__(cat_model, cat_features, domain)
def __getattr__(self, item):
try:
return self.params[item]
except (KeyError, AttributeError):
raise AttributeError(item) from None
def __dir__(self):
dd = super().__dir__()
return list(sorted(set(dd) | set(self.params.keys())))
class XGBBase(SklLearner):
"""Base class for xgboost (classification and regression) learners """
preprocessors = default_preprocessors = [
HasClass(),
Continuize(),
RemoveNaNColumns(),
]
def __init__(self, preprocessors=None, **kwargs):
super().__init__(preprocessors=preprocessors)
self.params = kwargs
@SklLearner.params.setter
def params(self, values: Dict):
self._params = values
|
PypiClean
|
/gu-django-tinymce-2.7.2.tar.gz/gu-django-tinymce-2.7.2/tinymce/media/tiny_mce/themes/advanced/langs/cy_dlg.js
|
tinyMCE.addI18n('cy.advanced_dlg',{"link_list":"Rhestr cysylltau","link_is_external":"Mae\'r URL a rydych wedi rhoi yn edrych fel cyswllt allannol, ydych chi eisiau ychwanegu\'r rhagddodiad http:// sydd angen?","link_is_email":"Mae\'r URL a rydych wedi rhoi yn edrych fel cyferiad e-bost, ydych chi eisiau ychwanegu\'r rhagddodiad mailto: sydd angen?","link_titlefield":"Teitl","link_target_blank":"Agor cyswllt mewn ffenst newydd","link_target_same":"Agor cyswllt yn yr un ffenst","link_target":"Targed","link_url":"URL cyswllt","link_title":"Mewnosod/golygu cyswllt","image_align_right":"De","image_align_left":"Chwith","image_align_textbottom":"Gwaelod testun","image_align_texttop":"Pen testun","image_align_bottom":"Gwaelod","image_align_middle":"Canol","image_align_top":"Pen","image_align_baseline":"Gwaelodlin","image_align":"Aliniad","image_hspace":"Gofod llorweddol","image_vspace":"Gofod fertigol","image_dimensions":"Dimensiynau","image_alt":"disgrifiad delwedd","image_list":"Rhestr delweddau","image_border":"Border","image_src":"URL delwedd","image_title":"Mewnosod/golygu delwedd","charmap_title":"Dewis n\u00f4d addasiedig","colorpicker_name":"Enw:","colorpicker_color":"Lliw:","colorpicker_named_title":"Lliwiau wedi\'u enwi","colorpicker_named_tab":"Wedi\'u enwi","colorpicker_palette_title":"Lliwiau palet","colorpicker_palette_tab":"Palet","colorpicker_picker_title":"Dewisydd lliw","colorpicker_picker_tab":"Dweisydd","colorpicker_title":"Dewis lliw","code_wordwrap":"Amlapio geiriau","code_title":"Golygydd Ffynhonell HTML","anchor_name":"Enw angor","anchor_title":"Mewnosod/golygu angor","about_loaded":"Ategion wedi llwytho","about_version":"Fersion","about_author":"Awdur","about_plugin":"Ategyn","about_plugins":"Ategion","about_license":"Twyddedd","about_help":"Cymorth","about_general":"Ynglyn","about_title":"Ynglyn TinyMCE","charmap_usage":"Defnyddiwch saethau dde a chwith i fforio.","anchor_invalid":"Penodwch enw angor dilys.","accessibility_help":"Cymorth Hygyrchedd","accessibility_usage_title":"Defnydd Cyffredin","invalid_color_value":"Gwerth lliw annilys"});
|
PypiClean
|
/esi_syncopy-2023.7.tar.gz/esi_syncopy-2023.7/syncopy/synthdata/analog.py
|
# Builtin/3rd party package imports
import numpy as np
# syncopy imports
from .utils import collect_trials
_2pi = np.pi * 2
# ---- Synthetic AnalogData ----
@collect_trials
def white_noise(nSamples=1000, nChannels=2, seed=None):
"""
Plain white noise with unity standard deviation.
Parameters
----------
nSamples : int
Number of samples per trial
nChannels : int
Number of channels
seed : int or None
Set to a number to get reproducible random numbers
Returns
--------
wn : :class:`syncopy.AnalogData` or numpy.ndarray
"""
rng = np.random.default_rng(seed)
signal = rng.normal(size=(nSamples, nChannels)).astype("f4")
return signal
@collect_trials
def linear_trend(y_max, nSamples=1000, nChannels=2):
"""
A linear trend on all channels from 0 to `y_max` in `nSamples`.
Parameters
----------
y_max : float
Ordinate value at the last sample,
slope is then given by samplerate * y_max / nSamples
nSamples : int
Number of samples per trial
nChannels : int
Number of channels
Returns
--------
trend : :class:`syncopy.AnalogData` or numpy.ndarray
"""
trend = np.linspace(0, y_max, nSamples, dtype="f4")
return np.column_stack([trend for _ in range(nChannels)])
@collect_trials
def harmonic(freq, samplerate, nSamples=1000, nChannels=2):
"""
A harmonic with frequency `freq`.
Parameters
----------
freq : float
Frequency of the harmonic in Hz
samplerate : float
Sampling rate in Hz
nSamples : int
Number of samples per trial
nChannels : int
Number of channels
Returns
--------
harm : :class:`syncopy.AnalogData` or numpy.ndarray
"""
# the sampling times vector needed for construction
tvec = np.arange(nSamples) * 1 / samplerate
# the harmonic
harm = np.cos(2 * np.pi * freq * tvec, dtype="f4")
return np.column_stack([harm for _ in range(nChannels)])
# noisy phase evolution <-> phase diffusion
@collect_trials
def phase_diffusion(
freq,
eps=0.1,
samplerate=1000,
nChannels=2,
nSamples=1000,
rand_ini=False,
return_phase=False,
seed=None,
):
r"""
Linear (harmonic) phase evolution plus a Brownian noise term
inducing phase diffusion around the deterministic phase velocity (angular frequency).
The linear phase increments are given by
.. math::
\Delta \phi = 2\pi \frac{freq}{samplerate}
The Brownian increments are scaled with `eps` relative to these
phase increments, meaning the relative phase diffusion is frequency
independent.
Parameters
----------
freq : float
Harmonic frequency in Hz
eps : float
Scaled Brownian increments
`1` means the single Wiener step
has on average the size of the
harmonic increments
samplerate : float
Sampling rate in Hz
nChannels : int
Number of channels
nSamples : int
Number of samples in time
rand_ini : bool, optional
If set to ``True`` initial phases are randomized
return_phase : bool, optional
If set to true returns the phases in radians
seed: None or int
Set to an `int` to get reproducible results, or `None` for random ones.
Returns
-------
phases : :class:`syncopy.AnalogData` or numpy.ndarray
Synthetic `nSamples` x `nChannels` data array simulating noisy phase
evolution/diffusion
Examples
--------
Weak phase diffusion around the 60Hz harmonic:
>>> signals = spy.synthdata.phase_diffusion(freq=60, eps=0.01)
Return the unwrapped phase directly:
>>> phases = spy.synthdata.phase_diffusion(freq=60, eps=0.01, return_phase=True)
"""
# white noise
wn = white_noise(nSamples=nSamples, nChannels=nChannels, seed=seed, nTrials=None)
tvec = np.linspace(0, nSamples / samplerate, nSamples, dtype="f4")
omega0 = 2 * np.pi * freq
lin_phase = np.tile(omega0 * tvec, (nChannels, 1)).T
# randomize initial phase
if rand_ini:
rng = np.random.default_rng(seed)
ps0 = 2 * np.pi * rng.uniform(size=nChannels).astype("f4")
lin_phase += ps0
# relative Brownian increments
rel_eps = np.sqrt(omega0 / samplerate * eps)
brown_incr = rel_eps * wn
# combine harmonic and diffusive dyncamics
phases = lin_phase + np.cumsum(brown_incr, axis=0)
if not return_phase:
return np.cos(phases)
else:
return phases
@collect_trials
def ar2_network(AdjMat=None, nSamples=1000, alphas=(0.55, -0.8), seed=None):
"""
Simulation of a network of coupled AR(2) processes
With the default parameters the individual processes
(as in Dhamala 2008) have a spectral peak at 40Hz
with a sampling frequency of 200Hz.
NOTE: There is no check for stability: setting the
`alphas` ad libitum and/or defining large
and dense (many connections) systems will
almost surely lead to an unstable system
NOTE: One can set the number of channels via the shape
of the supplied `AdjMat`. Defaults to 2.
Parameters
----------
AdjMat : np.ndarray or None
`nChannel` x `nChannel` adjacency matrix where
entry ``(i,j)`` is the coupling strength
from channel ``i -> j``.
If left at `None`, the default 2 Channel system
with unidirectional ``2 -> 1`` coupling is generated.
See also `mk_RandomAdjMat`.
nSamples : int, optional
Number of samples in time
alphas : 2-element sequence, optional
The AR(2) parameters for lag1 and lag2
seed : None or int.
Random seed to init random number generator, passed on to `np.random.default_rng` function.
When using this function with an `nTrials` argument (`@collect_trials` wrapper), and you *do*
want the data of all trials to be identical (and reproducible),
pass a single scalar seed and set 'seed_per_trial=False'.
Returns
-------
signal : numpy.ndarray
The `nSamples` x `nChannel`
solution of the network dynamics
"""
# default system layout as in Dhamala 2008:
# unidirectional (2->1) coupling
if AdjMat is None:
AdjMat = np.zeros((2, 2), dtype=np.float32)
AdjMat[1, 0] = 0.25
else:
# cast to our standard type
AdjMat = AdjMat.astype(np.float32)
nChannels = AdjMat.shape[0]
alpha1, alpha2 = alphas
# diagonal 'self-interaction' with lag 1
DiagMat = np.diag(nChannels * [alpha1])
signal = np.zeros((nSamples, nChannels), dtype=np.float32)
# pick the 1st values at random
rng = np.random.default_rng(seed)
signal[:2, :] = rng.normal(size=(2, nChannels))
for i in range(2, nSamples):
signal[i, :] = (DiagMat + AdjMat.T) @ signal[i - 1, :] + alpha2 * signal[i - 2, :]
signal[i, :] += rng.normal(size=(nChannels))
return signal
@collect_trials
def red_noise(alpha, nSamples=1000, nChannels=2, seed=None):
"""
Uncoupled multi-channel AR(1) process realizations.
For `alpha` close to 1 can be used as a surrogate 1/f
background.
Parameters
----------
alpha : float
Must lie within the [0, 1) interval
nSamples : int
Number of samples per trial
nChannels : int
Number of channels
seed : int or None
Set to a number to get reproducible random numbers
Returns
--------
signal : :class:`syncopy.AnalogData` or numpy.ndarray
"""
# configure AR2 network to arrive at the uncoupled
# AR1 processes
alphas = [alpha, 0]
AdjMat = np.diag(np.zeros(nChannels))
signal = ar2_network(AdjMat=AdjMat, nSamples=nSamples, alphas=alphas, seed=seed, nTrials=None)
return signal
def ar2_peak_freq(a1, a2, samplerate=1):
"""
Helper function to tune spectral peak of AR(2) process
"""
if np.any((a1**2 + 4 * a2) > 0):
raise ValueError("No complex roots!")
return np.arccos(a1 * (a2 - 1) / (4 * a2)) * 1 / _2pi * samplerate
def mk_RandomAdjMat(nChannels=3, conn_thresh=0.25, max_coupling=0.25, seed=None):
"""
Create a random adjacency matrix
for the network of AR(2) processes
where entry ``(i,j)`` is the coupling
strength from channel ``i -> j``
Parameters
---------
nChannels : int
Number of channels (network nodes)
conn_thresh : float
Connectivity threshold for the Bernoulli
sampling of the network connections. Setting
``conn_thresh = 1`` yields a fully connected network
(not recommended).
max_coupling : float < 0.5, optional
Total input into single channel
normalized by number of couplings
(for stability).
seed: None or int, passed on to `np.random.default_rng`.
Set to an int to get reproducible results.
Returns
-------
AdjMat : numpy.ndarray
`nChannels` x `nChannels` adjacency matrix where
"""
# random numbers in [0,1)
rng = np.random.default_rng(seed)
AdjMat = rng.random((nChannels, nChannels))
# all smaller than threshold elements get set to 1 (coupled)
AdjMat = (AdjMat < conn_thresh).astype(float)
# set diagonal to 0 to easier identify coupling
np.fill_diagonal(AdjMat, 0)
# normalize such that total input
# does not exceed max. coupling
norm = AdjMat.sum(axis=0)
norm[norm == 0] = 1
AdjMat = AdjMat / norm[None, :] * max_coupling
return AdjMat
|
PypiClean
|
/jupyros-0.7.0a0.tar.gz/jupyros-0.7.0a0/js/node_modules/webpack/lib/wasm/EnableWasmLoadingPlugin.js
|
"use strict";
/** @typedef {import("../../declarations/WebpackOptions").LibraryOptions} LibraryOptions */
/** @typedef {import("../../declarations/WebpackOptions").WasmLoadingType} WasmLoadingType */
/** @typedef {import("../Compiler")} Compiler */
/** @type {WeakMap<Compiler, Set<WasmLoadingType>>} */
const enabledTypes = new WeakMap();
const getEnabledTypes = compiler => {
let set = enabledTypes.get(compiler);
if (set === undefined) {
set = new Set();
enabledTypes.set(compiler, set);
}
return set;
};
class EnableWasmLoadingPlugin {
/**
* @param {WasmLoadingType} type library type that should be available
*/
constructor(type) {
this.type = type;
}
/**
* @param {Compiler} compiler the compiler instance
* @param {WasmLoadingType} type type of library
* @returns {void}
*/
static setEnabled(compiler, type) {
getEnabledTypes(compiler).add(type);
}
/**
* @param {Compiler} compiler the compiler instance
* @param {WasmLoadingType} type type of library
* @returns {void}
*/
static checkEnabled(compiler, type) {
if (!getEnabledTypes(compiler).has(type)) {
throw new Error(
`Library type "${type}" is not enabled. ` +
"EnableWasmLoadingPlugin need to be used to enable this type of wasm loading. " +
'This usually happens through the "output.enabledWasmLoadingTypes" option. ' +
'If you are using a function as entry which sets "wasmLoading", you need to add all potential library types to "output.enabledWasmLoadingTypes". ' +
"These types are enabled: " +
Array.from(getEnabledTypes(compiler)).join(", ")
);
}
}
/**
* Apply the plugin
* @param {Compiler} compiler the compiler instance
* @returns {void}
*/
apply(compiler) {
const { type } = this;
// Only enable once
const enabled = getEnabledTypes(compiler);
if (enabled.has(type)) return;
enabled.add(type);
if (typeof type === "string") {
switch (type) {
case "fetch": {
// TODO webpack 6 remove FetchCompileWasmPlugin
const FetchCompileWasmPlugin = require("../web/FetchCompileWasmPlugin");
const FetchCompileAsyncWasmPlugin = require("../web/FetchCompileAsyncWasmPlugin");
new FetchCompileWasmPlugin({
mangleImports: compiler.options.optimization.mangleWasmImports
}).apply(compiler);
new FetchCompileAsyncWasmPlugin().apply(compiler);
break;
}
case "async-node": {
// TODO webpack 6 remove ReadFileCompileWasmPlugin
const ReadFileCompileWasmPlugin = require("../node/ReadFileCompileWasmPlugin");
// @ts-expect-error typescript bug for duplicate require
const ReadFileCompileAsyncWasmPlugin = require("../node/ReadFileCompileAsyncWasmPlugin");
new ReadFileCompileWasmPlugin({
mangleImports: compiler.options.optimization.mangleWasmImports
}).apply(compiler);
new ReadFileCompileAsyncWasmPlugin({ type }).apply(compiler);
break;
}
case "async-node-module": {
// @ts-expect-error typescript bug for duplicate require
const ReadFileCompileAsyncWasmPlugin = require("../node/ReadFileCompileAsyncWasmPlugin");
new ReadFileCompileAsyncWasmPlugin({ type, import: true }).apply(
compiler
);
break;
}
case "universal":
throw new Error(
"Universal WebAssembly Loading is not implemented yet"
);
default:
throw new Error(`Unsupported wasm loading type ${type}.
Plugins which provide custom wasm loading types must call EnableWasmLoadingPlugin.setEnabled(compiler, type) to disable this error.`);
}
} else {
// TODO support plugin instances here
// apply them to the compiler
}
}
}
module.exports = EnableWasmLoadingPlugin;
|
PypiClean
|
/py-pure-client-1.38.0.tar.gz/py-pure-client-1.38.0/pypureclient/flasharray/FA_2_22/models/host_performance_by_array.py
|
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_22 import models
class HostPerformanceByArray(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'bytes_per_mirrored_write': 'int',
'bytes_per_op': 'int',
'bytes_per_read': 'int',
'bytes_per_write': 'int',
'mirrored_write_bytes_per_sec': 'int',
'mirrored_writes_per_sec': 'int',
'qos_rate_limit_usec_per_mirrored_write_op': 'int',
'qos_rate_limit_usec_per_read_op': 'int',
'qos_rate_limit_usec_per_write_op': 'int',
'queue_usec_per_mirrored_write_op': 'int',
'queue_usec_per_read_op': 'int',
'queue_usec_per_write_op': 'int',
'read_bytes_per_sec': 'int',
'reads_per_sec': 'int',
'san_usec_per_mirrored_write_op': 'int',
'san_usec_per_read_op': 'int',
'san_usec_per_write_op': 'int',
'service_usec_per_mirrored_write_op': 'int',
'service_usec_per_read_op': 'int',
'service_usec_per_write_op': 'int',
'time': 'int',
'usec_per_mirrored_write_op': 'int',
'usec_per_read_op': 'int',
'usec_per_write_op': 'int',
'write_bytes_per_sec': 'int',
'writes_per_sec': 'int',
'service_usec_per_read_op_cache_reduction': 'float',
'id': 'str',
'name': 'str',
'array': 'Resource'
}
attribute_map = {
'bytes_per_mirrored_write': 'bytes_per_mirrored_write',
'bytes_per_op': 'bytes_per_op',
'bytes_per_read': 'bytes_per_read',
'bytes_per_write': 'bytes_per_write',
'mirrored_write_bytes_per_sec': 'mirrored_write_bytes_per_sec',
'mirrored_writes_per_sec': 'mirrored_writes_per_sec',
'qos_rate_limit_usec_per_mirrored_write_op': 'qos_rate_limit_usec_per_mirrored_write_op',
'qos_rate_limit_usec_per_read_op': 'qos_rate_limit_usec_per_read_op',
'qos_rate_limit_usec_per_write_op': 'qos_rate_limit_usec_per_write_op',
'queue_usec_per_mirrored_write_op': 'queue_usec_per_mirrored_write_op',
'queue_usec_per_read_op': 'queue_usec_per_read_op',
'queue_usec_per_write_op': 'queue_usec_per_write_op',
'read_bytes_per_sec': 'read_bytes_per_sec',
'reads_per_sec': 'reads_per_sec',
'san_usec_per_mirrored_write_op': 'san_usec_per_mirrored_write_op',
'san_usec_per_read_op': 'san_usec_per_read_op',
'san_usec_per_write_op': 'san_usec_per_write_op',
'service_usec_per_mirrored_write_op': 'service_usec_per_mirrored_write_op',
'service_usec_per_read_op': 'service_usec_per_read_op',
'service_usec_per_write_op': 'service_usec_per_write_op',
'time': 'time',
'usec_per_mirrored_write_op': 'usec_per_mirrored_write_op',
'usec_per_read_op': 'usec_per_read_op',
'usec_per_write_op': 'usec_per_write_op',
'write_bytes_per_sec': 'write_bytes_per_sec',
'writes_per_sec': 'writes_per_sec',
'service_usec_per_read_op_cache_reduction': 'service_usec_per_read_op_cache_reduction',
'id': 'id',
'name': 'name',
'array': 'array'
}
required_args = {
}
def __init__(
self,
bytes_per_mirrored_write=None, # type: int
bytes_per_op=None, # type: int
bytes_per_read=None, # type: int
bytes_per_write=None, # type: int
mirrored_write_bytes_per_sec=None, # type: int
mirrored_writes_per_sec=None, # type: int
qos_rate_limit_usec_per_mirrored_write_op=None, # type: int
qos_rate_limit_usec_per_read_op=None, # type: int
qos_rate_limit_usec_per_write_op=None, # type: int
queue_usec_per_mirrored_write_op=None, # type: int
queue_usec_per_read_op=None, # type: int
queue_usec_per_write_op=None, # type: int
read_bytes_per_sec=None, # type: int
reads_per_sec=None, # type: int
san_usec_per_mirrored_write_op=None, # type: int
san_usec_per_read_op=None, # type: int
san_usec_per_write_op=None, # type: int
service_usec_per_mirrored_write_op=None, # type: int
service_usec_per_read_op=None, # type: int
service_usec_per_write_op=None, # type: int
time=None, # type: int
usec_per_mirrored_write_op=None, # type: int
usec_per_read_op=None, # type: int
usec_per_write_op=None, # type: int
write_bytes_per_sec=None, # type: int
writes_per_sec=None, # type: int
service_usec_per_read_op_cache_reduction=None, # type: float
id=None, # type: str
name=None, # type: str
array=None, # type: models.Resource
):
"""
Keyword args:
bytes_per_mirrored_write (int): The average I/O size per mirrored write. Measured in bytes.
bytes_per_op (int): The average I/O size for both read and write (all) operations.
bytes_per_read (int): The average I/O size per read. Measured in bytes.
bytes_per_write (int): The average I/O size per write. Measured in bytes.
mirrored_write_bytes_per_sec (int): The number of mirrored bytes written per second.
mirrored_writes_per_sec (int): The number of mirrored writes per second.
qos_rate_limit_usec_per_mirrored_write_op (int): The average time it takes the array to process a mirrored I/O write request. Measured in microseconds.
qos_rate_limit_usec_per_read_op (int): The average time spent waiting due to QoS rate limiting for a read request. Measured in microseconds.
qos_rate_limit_usec_per_write_op (int): The average time that a write I/O request spends waiting as a result of the volume reaching its QoS bandwidth limit. Measured in microseconds.
queue_usec_per_mirrored_write_op (int): The average time that a mirrored write I/O request spends in the array waiting to be served. Measured in microseconds.
queue_usec_per_read_op (int): The average time that a read I/O request spends in the array waiting to be served. Measured in microseconds.
queue_usec_per_write_op (int): The average time that a write I/O request spends in the array waiting to be served. Measured in microseconds.
read_bytes_per_sec (int): The number of bytes read per second.
reads_per_sec (int): The number of read requests processed per second.
san_usec_per_mirrored_write_op (int): The average time required to transfer data from the initiator to the array for a mirrored write request. Measured in microseconds.
san_usec_per_read_op (int): The average time required to transfer data from the array to the initiator for a read request. Measured in microseconds.
san_usec_per_write_op (int): The average time required to transfer data from the initiator to the array for a write request. Measured in microseconds.
service_usec_per_mirrored_write_op (int): The average time required for the array to service a mirrored write request. Measured in microseconds.
service_usec_per_read_op (int): The average time required for the array to service a read request. Measured in microseconds.
service_usec_per_write_op (int): The average time required for the array to service a write request. Measured in microseconds.
time (int): The time when the sample performance data was taken. Measured in milliseconds since the UNIX epoch.
usec_per_mirrored_write_op (int): The average time it takes the array to process a mirrored I/O write request. Measured in microseconds. The average time does not include SAN time, queue time, or QoS rate limit time.
usec_per_read_op (int): The average time it takes the array to process an I/O read request. Measured in microseconds. The average time does not include SAN time, queue time, or QoS rate limit time.
usec_per_write_op (int): The average time it takes the array to process an I/O write request. Measured in microseconds. The average time does not include SAN time, queue time, or QoS rate limit time.
write_bytes_per_sec (int): The number of bytes written per second.
writes_per_sec (int): The number of write requests processed per second.
service_usec_per_read_op_cache_reduction (float): The percentage reduction in `service_usec_per_read_op` due to data cache hits. For example, a value of 0.25 indicates that the value of `service_usec_per_read_op` is 25% lower than it would have been without any data cache hits.
id (str): A globally unique, system-generated ID. The ID cannot be modified and cannot refer to another resource.
name (str): A user-specified name. The name must be locally unique and can be changed.
array (Resource): The array on which the performance metrics were recorded.
"""
if bytes_per_mirrored_write is not None:
self.bytes_per_mirrored_write = bytes_per_mirrored_write
if bytes_per_op is not None:
self.bytes_per_op = bytes_per_op
if bytes_per_read is not None:
self.bytes_per_read = bytes_per_read
if bytes_per_write is not None:
self.bytes_per_write = bytes_per_write
if mirrored_write_bytes_per_sec is not None:
self.mirrored_write_bytes_per_sec = mirrored_write_bytes_per_sec
if mirrored_writes_per_sec is not None:
self.mirrored_writes_per_sec = mirrored_writes_per_sec
if qos_rate_limit_usec_per_mirrored_write_op is not None:
self.qos_rate_limit_usec_per_mirrored_write_op = qos_rate_limit_usec_per_mirrored_write_op
if qos_rate_limit_usec_per_read_op is not None:
self.qos_rate_limit_usec_per_read_op = qos_rate_limit_usec_per_read_op
if qos_rate_limit_usec_per_write_op is not None:
self.qos_rate_limit_usec_per_write_op = qos_rate_limit_usec_per_write_op
if queue_usec_per_mirrored_write_op is not None:
self.queue_usec_per_mirrored_write_op = queue_usec_per_mirrored_write_op
if queue_usec_per_read_op is not None:
self.queue_usec_per_read_op = queue_usec_per_read_op
if queue_usec_per_write_op is not None:
self.queue_usec_per_write_op = queue_usec_per_write_op
if read_bytes_per_sec is not None:
self.read_bytes_per_sec = read_bytes_per_sec
if reads_per_sec is not None:
self.reads_per_sec = reads_per_sec
if san_usec_per_mirrored_write_op is not None:
self.san_usec_per_mirrored_write_op = san_usec_per_mirrored_write_op
if san_usec_per_read_op is not None:
self.san_usec_per_read_op = san_usec_per_read_op
if san_usec_per_write_op is not None:
self.san_usec_per_write_op = san_usec_per_write_op
if service_usec_per_mirrored_write_op is not None:
self.service_usec_per_mirrored_write_op = service_usec_per_mirrored_write_op
if service_usec_per_read_op is not None:
self.service_usec_per_read_op = service_usec_per_read_op
if service_usec_per_write_op is not None:
self.service_usec_per_write_op = service_usec_per_write_op
if time is not None:
self.time = time
if usec_per_mirrored_write_op is not None:
self.usec_per_mirrored_write_op = usec_per_mirrored_write_op
if usec_per_read_op is not None:
self.usec_per_read_op = usec_per_read_op
if usec_per_write_op is not None:
self.usec_per_write_op = usec_per_write_op
if write_bytes_per_sec is not None:
self.write_bytes_per_sec = write_bytes_per_sec
if writes_per_sec is not None:
self.writes_per_sec = writes_per_sec
if service_usec_per_read_op_cache_reduction is not None:
self.service_usec_per_read_op_cache_reduction = service_usec_per_read_op_cache_reduction
if id is not None:
self.id = id
if name is not None:
self.name = name
if array is not None:
self.array = array
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `HostPerformanceByArray`".format(key))
if key == "bytes_per_mirrored_write" and value is not None:
if value < 0:
raise ValueError("Invalid value for `bytes_per_mirrored_write`, must be a value greater than or equal to `0`")
if key == "bytes_per_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `bytes_per_op`, must be a value greater than or equal to `0`")
if key == "bytes_per_read" and value is not None:
if value < 0:
raise ValueError("Invalid value for `bytes_per_read`, must be a value greater than or equal to `0`")
if key == "bytes_per_write" and value is not None:
if value < 0:
raise ValueError("Invalid value for `bytes_per_write`, must be a value greater than or equal to `0`")
if key == "mirrored_write_bytes_per_sec" and value is not None:
if value < 0:
raise ValueError("Invalid value for `mirrored_write_bytes_per_sec`, must be a value greater than or equal to `0`")
if key == "mirrored_writes_per_sec" and value is not None:
if value < 0:
raise ValueError("Invalid value for `mirrored_writes_per_sec`, must be a value greater than or equal to `0`")
if key == "qos_rate_limit_usec_per_mirrored_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `qos_rate_limit_usec_per_mirrored_write_op`, must be a value greater than or equal to `0`")
if key == "qos_rate_limit_usec_per_read_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `qos_rate_limit_usec_per_read_op`, must be a value greater than or equal to `0`")
if key == "qos_rate_limit_usec_per_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `qos_rate_limit_usec_per_write_op`, must be a value greater than or equal to `0`")
if key == "queue_usec_per_mirrored_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `queue_usec_per_mirrored_write_op`, must be a value greater than or equal to `0`")
if key == "queue_usec_per_read_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `queue_usec_per_read_op`, must be a value greater than or equal to `0`")
if key == "queue_usec_per_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `queue_usec_per_write_op`, must be a value greater than or equal to `0`")
if key == "read_bytes_per_sec" and value is not None:
if value < 0:
raise ValueError("Invalid value for `read_bytes_per_sec`, must be a value greater than or equal to `0`")
if key == "reads_per_sec" and value is not None:
if value < 0:
raise ValueError("Invalid value for `reads_per_sec`, must be a value greater than or equal to `0`")
if key == "san_usec_per_mirrored_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `san_usec_per_mirrored_write_op`, must be a value greater than or equal to `0`")
if key == "san_usec_per_read_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `san_usec_per_read_op`, must be a value greater than or equal to `0`")
if key == "san_usec_per_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `san_usec_per_write_op`, must be a value greater than or equal to `0`")
if key == "service_usec_per_mirrored_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `service_usec_per_mirrored_write_op`, must be a value greater than or equal to `0`")
if key == "service_usec_per_read_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `service_usec_per_read_op`, must be a value greater than or equal to `0`")
if key == "service_usec_per_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `service_usec_per_write_op`, must be a value greater than or equal to `0`")
if key == "usec_per_mirrored_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `usec_per_mirrored_write_op`, must be a value greater than or equal to `0`")
if key == "usec_per_read_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `usec_per_read_op`, must be a value greater than or equal to `0`")
if key == "usec_per_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `usec_per_write_op`, must be a value greater than or equal to `0`")
if key == "write_bytes_per_sec" and value is not None:
if value < 0:
raise ValueError("Invalid value for `write_bytes_per_sec`, must be a value greater than or equal to `0`")
if key == "writes_per_sec" and value is not None:
if value < 0:
raise ValueError("Invalid value for `writes_per_sec`, must be a value greater than or equal to `0`")
if key == "service_usec_per_read_op_cache_reduction" and value is not None:
if value > 1.0:
raise ValueError("Invalid value for `service_usec_per_read_op_cache_reduction`, value must be less than or equal to `1.0`")
if value < 0.0:
raise ValueError("Invalid value for `service_usec_per_read_op_cache_reduction`, must be a value greater than or equal to `0.0`")
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def __getitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `HostPerformanceByArray`".format(key))
return object.__getattribute__(self, key)
def __setitem__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `HostPerformanceByArray`".format(key))
object.__setattr__(self, key, value)
def __delitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `HostPerformanceByArray`".format(key))
object.__delattr__(self, key)
def keys(self):
return self.attribute_map.keys()
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(HostPerformanceByArray, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, HostPerformanceByArray):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
PypiClean
|
/kiwitcms-12.4.tar.gz/kiwitcms-12.4/tcms/node_modules/moment/dist/moment.js
|
var hookCallback;
function hooks() {
return hookCallback.apply(null, arguments);
}
// This is done to register the method called with moment()
// without creating circular dependencies.
function setHookCallback(callback) {
hookCallback = callback;
}
function isArray(input) {
return (
input instanceof Array ||
Object.prototype.toString.call(input) === '[object Array]'
);
}
function isObject(input) {
// IE8 will treat undefined and null as object if it wasn't for
// input != null
return (
input != null &&
Object.prototype.toString.call(input) === '[object Object]'
);
}
function hasOwnProp(a, b) {
return Object.prototype.hasOwnProperty.call(a, b);
}
function isObjectEmpty(obj) {
if (Object.getOwnPropertyNames) {
return Object.getOwnPropertyNames(obj).length === 0;
} else {
var k;
for (k in obj) {
if (hasOwnProp(obj, k)) {
return false;
}
}
return true;
}
}
function isUndefined(input) {
return input === void 0;
}
function isNumber(input) {
return (
typeof input === 'number' ||
Object.prototype.toString.call(input) === '[object Number]'
);
}
function isDate(input) {
return (
input instanceof Date ||
Object.prototype.toString.call(input) === '[object Date]'
);
}
function map(arr, fn) {
var res = [],
i,
arrLen = arr.length;
for (i = 0; i < arrLen; ++i) {
res.push(fn(arr[i], i));
}
return res;
}
function extend(a, b) {
for (var i in b) {
if (hasOwnProp(b, i)) {
a[i] = b[i];
}
}
if (hasOwnProp(b, 'toString')) {
a.toString = b.toString;
}
if (hasOwnProp(b, 'valueOf')) {
a.valueOf = b.valueOf;
}
return a;
}
function createUTC(input, format, locale, strict) {
return createLocalOrUTC(input, format, locale, strict, true).utc();
}
function defaultParsingFlags() {
// We need to deep clone this object.
return {
empty: false,
unusedTokens: [],
unusedInput: [],
overflow: -2,
charsLeftOver: 0,
nullInput: false,
invalidEra: null,
invalidMonth: null,
invalidFormat: false,
userInvalidated: false,
iso: false,
parsedDateParts: [],
era: null,
meridiem: null,
rfc2822: false,
weekdayMismatch: false,
};
}
function getParsingFlags(m) {
if (m._pf == null) {
m._pf = defaultParsingFlags();
}
return m._pf;
}
var some;
if (Array.prototype.some) {
some = Array.prototype.some;
} else {
some = function (fun) {
var t = Object(this),
len = t.length >>> 0,
i;
for (i = 0; i < len; i++) {
if (i in t && fun.call(this, t[i], i, t)) {
return true;
}
}
return false;
};
}
function isValid(m) {
if (m._isValid == null) {
var flags = getParsingFlags(m),
parsedParts = some.call(flags.parsedDateParts, function (i) {
return i != null;
}),
isNowValid =
!isNaN(m._d.getTime()) &&
flags.overflow < 0 &&
!flags.empty &&
!flags.invalidEra &&
!flags.invalidMonth &&
!flags.invalidWeekday &&
!flags.weekdayMismatch &&
!flags.nullInput &&
!flags.invalidFormat &&
!flags.userInvalidated &&
(!flags.meridiem || (flags.meridiem && parsedParts));
if (m._strict) {
isNowValid =
isNowValid &&
flags.charsLeftOver === 0 &&
flags.unusedTokens.length === 0 &&
flags.bigHour === undefined;
}
if (Object.isFrozen == null || !Object.isFrozen(m)) {
m._isValid = isNowValid;
} else {
return isNowValid;
}
}
return m._isValid;
}
function createInvalid(flags) {
var m = createUTC(NaN);
if (flags != null) {
extend(getParsingFlags(m), flags);
} else {
getParsingFlags(m).userInvalidated = true;
}
return m;
}
// Plugins that add properties should also add the key here (null value),
// so we can properly clone ourselves.
var momentProperties = (hooks.momentProperties = []),
updateInProgress = false;
function copyConfig(to, from) {
var i,
prop,
val,
momentPropertiesLen = momentProperties.length;
if (!isUndefined(from._isAMomentObject)) {
to._isAMomentObject = from._isAMomentObject;
}
if (!isUndefined(from._i)) {
to._i = from._i;
}
if (!isUndefined(from._f)) {
to._f = from._f;
}
if (!isUndefined(from._l)) {
to._l = from._l;
}
if (!isUndefined(from._strict)) {
to._strict = from._strict;
}
if (!isUndefined(from._tzm)) {
to._tzm = from._tzm;
}
if (!isUndefined(from._isUTC)) {
to._isUTC = from._isUTC;
}
if (!isUndefined(from._offset)) {
to._offset = from._offset;
}
if (!isUndefined(from._pf)) {
to._pf = getParsingFlags(from);
}
if (!isUndefined(from._locale)) {
to._locale = from._locale;
}
if (momentPropertiesLen > 0) {
for (i = 0; i < momentPropertiesLen; i++) {
prop = momentProperties[i];
val = from[prop];
if (!isUndefined(val)) {
to[prop] = val;
}
}
}
return to;
}
// Moment prototype object
function Moment(config) {
copyConfig(this, config);
this._d = new Date(config._d != null ? config._d.getTime() : NaN);
if (!this.isValid()) {
this._d = new Date(NaN);
}
// Prevent infinite loop in case updateOffset creates new moment
// objects.
if (updateInProgress === false) {
updateInProgress = true;
hooks.updateOffset(this);
updateInProgress = false;
}
}
function isMoment(obj) {
return (
obj instanceof Moment || (obj != null && obj._isAMomentObject != null)
);
}
function warn(msg) {
if (
hooks.suppressDeprecationWarnings === false &&
typeof console !== 'undefined' &&
console.warn
) {
console.warn('Deprecation warning: ' + msg);
}
}
function deprecate(msg, fn) {
var firstTime = true;
return extend(function () {
if (hooks.deprecationHandler != null) {
hooks.deprecationHandler(null, msg);
}
if (firstTime) {
var args = [],
arg,
i,
key,
argLen = arguments.length;
for (i = 0; i < argLen; i++) {
arg = '';
if (typeof arguments[i] === 'object') {
arg += '\n[' + i + '] ';
for (key in arguments[0]) {
if (hasOwnProp(arguments[0], key)) {
arg += key + ': ' + arguments[0][key] + ', ';
}
}
arg = arg.slice(0, -2); // Remove trailing comma and space
} else {
arg = arguments[i];
}
args.push(arg);
}
warn(
msg +
'\nArguments: ' +
Array.prototype.slice.call(args).join('') +
'\n' +
new Error().stack
);
firstTime = false;
}
return fn.apply(this, arguments);
}, fn);
}
var deprecations = {};
function deprecateSimple(name, msg) {
if (hooks.deprecationHandler != null) {
hooks.deprecationHandler(name, msg);
}
if (!deprecations[name]) {
warn(msg);
deprecations[name] = true;
}
}
hooks.suppressDeprecationWarnings = false;
hooks.deprecationHandler = null;
function isFunction(input) {
return (
(typeof Function !== 'undefined' && input instanceof Function) ||
Object.prototype.toString.call(input) === '[object Function]'
);
}
function set(config) {
var prop, i;
for (i in config) {
if (hasOwnProp(config, i)) {
prop = config[i];
if (isFunction(prop)) {
this[i] = prop;
} else {
this['_' + i] = prop;
}
}
}
this._config = config;
// Lenient ordinal parsing accepts just a number in addition to
// number + (possibly) stuff coming from _dayOfMonthOrdinalParse.
// TODO: Remove "ordinalParse" fallback in next major release.
this._dayOfMonthOrdinalParseLenient = new RegExp(
(this._dayOfMonthOrdinalParse.source || this._ordinalParse.source) +
'|' +
/\d{1,2}/.source
);
}
function mergeConfigs(parentConfig, childConfig) {
var res = extend({}, parentConfig),
prop;
for (prop in childConfig) {
if (hasOwnProp(childConfig, prop)) {
if (isObject(parentConfig[prop]) && isObject(childConfig[prop])) {
res[prop] = {};
extend(res[prop], parentConfig[prop]);
extend(res[prop], childConfig[prop]);
} else if (childConfig[prop] != null) {
res[prop] = childConfig[prop];
} else {
delete res[prop];
}
}
}
for (prop in parentConfig) {
if (
hasOwnProp(parentConfig, prop) &&
!hasOwnProp(childConfig, prop) &&
isObject(parentConfig[prop])
) {
// make sure changes to properties don't modify parent config
res[prop] = extend({}, res[prop]);
}
}
return res;
}
function Locale(config) {
if (config != null) {
this.set(config);
}
}
var keys;
if (Object.keys) {
keys = Object.keys;
} else {
keys = function (obj) {
var i,
res = [];
for (i in obj) {
if (hasOwnProp(obj, i)) {
res.push(i);
}
}
return res;
};
}
var defaultCalendar = {
sameDay: '[Today at] LT',
nextDay: '[Tomorrow at] LT',
nextWeek: 'dddd [at] LT',
lastDay: '[Yesterday at] LT',
lastWeek: '[Last] dddd [at] LT',
sameElse: 'L',
};
function calendar(key, mom, now) {
var output = this._calendar[key] || this._calendar['sameElse'];
return isFunction(output) ? output.call(mom, now) : output;
}
function zeroFill(number, targetLength, forceSign) {
var absNumber = '' + Math.abs(number),
zerosToFill = targetLength - absNumber.length,
sign = number >= 0;
return (
(sign ? (forceSign ? '+' : '') : '-') +
Math.pow(10, Math.max(0, zerosToFill)).toString().substr(1) +
absNumber
);
}
var formattingTokens =
/(\[[^\[]*\])|(\\)?([Hh]mm(ss)?|Mo|MM?M?M?|Do|DDDo|DD?D?D?|ddd?d?|do?|w[o|w]?|W[o|W]?|Qo?|N{1,5}|YYYYYY|YYYYY|YYYY|YY|y{2,4}|yo?|gg(ggg?)?|GG(GGG?)?|e|E|a|A|hh?|HH?|kk?|mm?|ss?|S{1,9}|x|X|zz?|ZZ?|.)/g,
localFormattingTokens = /(\[[^\[]*\])|(\\)?(LTS|LT|LL?L?L?|l{1,4})/g,
formatFunctions = {},
formatTokenFunctions = {};
// token: 'M'
// padded: ['MM', 2]
// ordinal: 'Mo'
// callback: function () { this.month() + 1 }
function addFormatToken(token, padded, ordinal, callback) {
var func = callback;
if (typeof callback === 'string') {
func = function () {
return this[callback]();
};
}
if (token) {
formatTokenFunctions[token] = func;
}
if (padded) {
formatTokenFunctions[padded[0]] = function () {
return zeroFill(func.apply(this, arguments), padded[1], padded[2]);
};
}
if (ordinal) {
formatTokenFunctions[ordinal] = function () {
return this.localeData().ordinal(
func.apply(this, arguments),
token
);
};
}
}
function removeFormattingTokens(input) {
if (input.match(/\[[\s\S]/)) {
return input.replace(/^\[|\]$/g, '');
}
return input.replace(/\\/g, '');
}
function makeFormatFunction(format) {
var array = format.match(formattingTokens),
i,
length;
for (i = 0, length = array.length; i < length; i++) {
if (formatTokenFunctions[array[i]]) {
array[i] = formatTokenFunctions[array[i]];
} else {
array[i] = removeFormattingTokens(array[i]);
}
}
return function (mom) {
var output = '',
i;
for (i = 0; i < length; i++) {
output += isFunction(array[i])
? array[i].call(mom, format)
: array[i];
}
return output;
};
}
// format date using native date object
function formatMoment(m, format) {
if (!m.isValid()) {
return m.localeData().invalidDate();
}
format = expandFormat(format, m.localeData());
formatFunctions[format] =
formatFunctions[format] || makeFormatFunction(format);
return formatFunctions[format](m);
}
function expandFormat(format, locale) {
var i = 5;
function replaceLongDateFormatTokens(input) {
return locale.longDateFormat(input) || input;
}
localFormattingTokens.lastIndex = 0;
while (i >= 0 && localFormattingTokens.test(format)) {
format = format.replace(
localFormattingTokens,
replaceLongDateFormatTokens
);
localFormattingTokens.lastIndex = 0;
i -= 1;
}
return format;
}
var defaultLongDateFormat = {
LTS: 'h:mm:ss A',
LT: 'h:mm A',
L: 'MM/DD/YYYY',
LL: 'MMMM D, YYYY',
LLL: 'MMMM D, YYYY h:mm A',
LLLL: 'dddd, MMMM D, YYYY h:mm A',
};
function longDateFormat(key) {
var format = this._longDateFormat[key],
formatUpper = this._longDateFormat[key.toUpperCase()];
if (format || !formatUpper) {
return format;
}
this._longDateFormat[key] = formatUpper
.match(formattingTokens)
.map(function (tok) {
if (
tok === 'MMMM' ||
tok === 'MM' ||
tok === 'DD' ||
tok === 'dddd'
) {
return tok.slice(1);
}
return tok;
})
.join('');
return this._longDateFormat[key];
}
var defaultInvalidDate = 'Invalid date';
function invalidDate() {
return this._invalidDate;
}
var defaultOrdinal = '%d',
defaultDayOfMonthOrdinalParse = /\d{1,2}/;
function ordinal(number) {
return this._ordinal.replace('%d', number);
}
var defaultRelativeTime = {
future: 'in %s',
past: '%s ago',
s: 'a few seconds',
ss: '%d seconds',
m: 'a minute',
mm: '%d minutes',
h: 'an hour',
hh: '%d hours',
d: 'a day',
dd: '%d days',
w: 'a week',
ww: '%d weeks',
M: 'a month',
MM: '%d months',
y: 'a year',
yy: '%d years',
};
function relativeTime(number, withoutSuffix, string, isFuture) {
var output = this._relativeTime[string];
return isFunction(output)
? output(number, withoutSuffix, string, isFuture)
: output.replace(/%d/i, number);
}
function pastFuture(diff, output) {
var format = this._relativeTime[diff > 0 ? 'future' : 'past'];
return isFunction(format) ? format(output) : format.replace(/%s/i, output);
}
var aliases = {};
function addUnitAlias(unit, shorthand) {
var lowerCase = unit.toLowerCase();
aliases[lowerCase] = aliases[lowerCase + 's'] = aliases[shorthand] = unit;
}
function normalizeUnits(units) {
return typeof units === 'string'
? aliases[units] || aliases[units.toLowerCase()]
: undefined;
}
function normalizeObjectUnits(inputObject) {
var normalizedInput = {},
normalizedProp,
prop;
for (prop in inputObject) {
if (hasOwnProp(inputObject, prop)) {
normalizedProp = normalizeUnits(prop);
if (normalizedProp) {
normalizedInput[normalizedProp] = inputObject[prop];
}
}
}
return normalizedInput;
}
var priorities = {};
function addUnitPriority(unit, priority) {
priorities[unit] = priority;
}
function getPrioritizedUnits(unitsObj) {
var units = [],
u;
for (u in unitsObj) {
if (hasOwnProp(unitsObj, u)) {
units.push({ unit: u, priority: priorities[u] });
}
}
units.sort(function (a, b) {
return a.priority - b.priority;
});
return units;
}
function isLeapYear(year) {
return (year % 4 === 0 && year % 100 !== 0) || year % 400 === 0;
}
function absFloor(number) {
if (number < 0) {
// -0 -> 0
return Math.ceil(number) || 0;
} else {
return Math.floor(number);
}
}
function toInt(argumentForCoercion) {
var coercedNumber = +argumentForCoercion,
value = 0;
if (coercedNumber !== 0 && isFinite(coercedNumber)) {
value = absFloor(coercedNumber);
}
return value;
}
function makeGetSet(unit, keepTime) {
return function (value) {
if (value != null) {
set$1(this, unit, value);
hooks.updateOffset(this, keepTime);
return this;
} else {
return get(this, unit);
}
};
}
function get(mom, unit) {
return mom.isValid()
? mom._d['get' + (mom._isUTC ? 'UTC' : '') + unit]()
: NaN;
}
function set$1(mom, unit, value) {
if (mom.isValid() && !isNaN(value)) {
if (
unit === 'FullYear' &&
isLeapYear(mom.year()) &&
mom.month() === 1 &&
mom.date() === 29
) {
value = toInt(value);
mom._d['set' + (mom._isUTC ? 'UTC' : '') + unit](
value,
mom.month(),
daysInMonth(value, mom.month())
);
} else {
mom._d['set' + (mom._isUTC ? 'UTC' : '') + unit](value);
}
}
}
// MOMENTS
function stringGet(units) {
units = normalizeUnits(units);
if (isFunction(this[units])) {
return this[units]();
}
return this;
}
function stringSet(units, value) {
if (typeof units === 'object') {
units = normalizeObjectUnits(units);
var prioritized = getPrioritizedUnits(units),
i,
prioritizedLen = prioritized.length;
for (i = 0; i < prioritizedLen; i++) {
this[prioritized[i].unit](units[prioritized[i].unit]);
}
} else {
units = normalizeUnits(units);
if (isFunction(this[units])) {
return this[units](value);
}
}
return this;
}
var match1 = /\d/, // 0 - 9
match2 = /\d\d/, // 00 - 99
match3 = /\d{3}/, // 000 - 999
match4 = /\d{4}/, // 0000 - 9999
match6 = /[+-]?\d{6}/, // -999999 - 999999
match1to2 = /\d\d?/, // 0 - 99
match3to4 = /\d\d\d\d?/, // 999 - 9999
match5to6 = /\d\d\d\d\d\d?/, // 99999 - 999999
match1to3 = /\d{1,3}/, // 0 - 999
match1to4 = /\d{1,4}/, // 0 - 9999
match1to6 = /[+-]?\d{1,6}/, // -999999 - 999999
matchUnsigned = /\d+/, // 0 - inf
matchSigned = /[+-]?\d+/, // -inf - inf
matchOffset = /Z|[+-]\d\d:?\d\d/gi, // +00:00 -00:00 +0000 -0000 or Z
matchShortOffset = /Z|[+-]\d\d(?::?\d\d)?/gi, // +00 -00 +00:00 -00:00 +0000 -0000 or Z
matchTimestamp = /[+-]?\d+(\.\d{1,3})?/, // 123456789 123456789.123
// any word (or two) characters or numbers including two/three word month in arabic.
// includes scottish gaelic two word and hyphenated months
matchWord =
/[0-9]{0,256}['a-z\u00A0-\u05FF\u0700-\uD7FF\uF900-\uFDCF\uFDF0-\uFF07\uFF10-\uFFEF]{1,256}|[\u0600-\u06FF\/]{1,256}(\s*?[\u0600-\u06FF]{1,256}){1,2}/i,
regexes;
regexes = {};
function addRegexToken(token, regex, strictRegex) {
regexes[token] = isFunction(regex)
? regex
: function (isStrict, localeData) {
return isStrict && strictRegex ? strictRegex : regex;
};
}
function getParseRegexForToken(token, config) {
if (!hasOwnProp(regexes, token)) {
return new RegExp(unescapeFormat(token));
}
return regexes[token](config._strict, config._locale);
}
// Code from http://stackoverflow.com/questions/3561493/is-there-a-regexp-escape-function-in-javascript
function unescapeFormat(s) {
return regexEscape(
s
.replace('\\', '')
.replace(
/\\(\[)|\\(\])|\[([^\]\[]*)\]|\\(.)/g,
function (matched, p1, p2, p3, p4) {
return p1 || p2 || p3 || p4;
}
)
);
}
function regexEscape(s) {
return s.replace(/[-\/\\^$*+?.()|[\]{}]/g, '\\$&');
}
var tokens = {};
function addParseToken(token, callback) {
var i,
func = callback,
tokenLen;
if (typeof token === 'string') {
token = [token];
}
if (isNumber(callback)) {
func = function (input, array) {
array[callback] = toInt(input);
};
}
tokenLen = token.length;
for (i = 0; i < tokenLen; i++) {
tokens[token[i]] = func;
}
}
function addWeekParseToken(token, callback) {
addParseToken(token, function (input, array, config, token) {
config._w = config._w || {};
callback(input, config._w, config, token);
});
}
function addTimeToArrayFromToken(token, input, config) {
if (input != null && hasOwnProp(tokens, token)) {
tokens[token](input, config._a, config, token);
}
}
var YEAR = 0,
MONTH = 1,
DATE = 2,
HOUR = 3,
MINUTE = 4,
SECOND = 5,
MILLISECOND = 6,
WEEK = 7,
WEEKDAY = 8;
function mod(n, x) {
return ((n % x) + x) % x;
}
var indexOf;
if (Array.prototype.indexOf) {
indexOf = Array.prototype.indexOf;
} else {
indexOf = function (o) {
// I know
var i;
for (i = 0; i < this.length; ++i) {
if (this[i] === o) {
return i;
}
}
return -1;
};
}
function daysInMonth(year, month) {
if (isNaN(year) || isNaN(month)) {
return NaN;
}
var modMonth = mod(month, 12);
year += (month - modMonth) / 12;
return modMonth === 1
? isLeapYear(year)
? 29
: 28
: 31 - ((modMonth % 7) % 2);
}
// FORMATTING
addFormatToken('M', ['MM', 2], 'Mo', function () {
return this.month() + 1;
});
addFormatToken('MMM', 0, 0, function (format) {
return this.localeData().monthsShort(this, format);
});
addFormatToken('MMMM', 0, 0, function (format) {
return this.localeData().months(this, format);
});
// ALIASES
addUnitAlias('month', 'M');
// PRIORITY
addUnitPriority('month', 8);
// PARSING
addRegexToken('M', match1to2);
addRegexToken('MM', match1to2, match2);
addRegexToken('MMM', function (isStrict, locale) {
return locale.monthsShortRegex(isStrict);
});
addRegexToken('MMMM', function (isStrict, locale) {
return locale.monthsRegex(isStrict);
});
addParseToken(['M', 'MM'], function (input, array) {
array[MONTH] = toInt(input) - 1;
});
addParseToken(['MMM', 'MMMM'], function (input, array, config, token) {
var month = config._locale.monthsParse(input, token, config._strict);
// if we didn't find a month name, mark the date as invalid.
if (month != null) {
array[MONTH] = month;
} else {
getParsingFlags(config).invalidMonth = input;
}
});
// LOCALES
var defaultLocaleMonths =
'January_February_March_April_May_June_July_August_September_October_November_December'.split(
'_'
),
defaultLocaleMonthsShort =
'Jan_Feb_Mar_Apr_May_Jun_Jul_Aug_Sep_Oct_Nov_Dec'.split('_'),
MONTHS_IN_FORMAT = /D[oD]?(\[[^\[\]]*\]|\s)+MMMM?/,
defaultMonthsShortRegex = matchWord,
defaultMonthsRegex = matchWord;
function localeMonths(m, format) {
if (!m) {
return isArray(this._months)
? this._months
: this._months['standalone'];
}
return isArray(this._months)
? this._months[m.month()]
: this._months[
(this._months.isFormat || MONTHS_IN_FORMAT).test(format)
? 'format'
: 'standalone'
][m.month()];
}
function localeMonthsShort(m, format) {
if (!m) {
return isArray(this._monthsShort)
? this._monthsShort
: this._monthsShort['standalone'];
}
return isArray(this._monthsShort)
? this._monthsShort[m.month()]
: this._monthsShort[
MONTHS_IN_FORMAT.test(format) ? 'format' : 'standalone'
][m.month()];
}
function handleStrictParse(monthName, format, strict) {
var i,
ii,
mom,
llc = monthName.toLocaleLowerCase();
if (!this._monthsParse) {
// this is not used
this._monthsParse = [];
this._longMonthsParse = [];
this._shortMonthsParse = [];
for (i = 0; i < 12; ++i) {
mom = createUTC([2000, i]);
this._shortMonthsParse[i] = this.monthsShort(
mom,
''
).toLocaleLowerCase();
this._longMonthsParse[i] = this.months(mom, '').toLocaleLowerCase();
}
}
if (strict) {
if (format === 'MMM') {
ii = indexOf.call(this._shortMonthsParse, llc);
return ii !== -1 ? ii : null;
} else {
ii = indexOf.call(this._longMonthsParse, llc);
return ii !== -1 ? ii : null;
}
} else {
if (format === 'MMM') {
ii = indexOf.call(this._shortMonthsParse, llc);
if (ii !== -1) {
return ii;
}
ii = indexOf.call(this._longMonthsParse, llc);
return ii !== -1 ? ii : null;
} else {
ii = indexOf.call(this._longMonthsParse, llc);
if (ii !== -1) {
return ii;
}
ii = indexOf.call(this._shortMonthsParse, llc);
return ii !== -1 ? ii : null;
}
}
}
function localeMonthsParse(monthName, format, strict) {
var i, mom, regex;
if (this._monthsParseExact) {
return handleStrictParse.call(this, monthName, format, strict);
}
if (!this._monthsParse) {
this._monthsParse = [];
this._longMonthsParse = [];
this._shortMonthsParse = [];
}
// TODO: add sorting
// Sorting makes sure if one month (or abbr) is a prefix of another
// see sorting in computeMonthsParse
for (i = 0; i < 12; i++) {
// make the regex if we don't have it already
mom = createUTC([2000, i]);
if (strict && !this._longMonthsParse[i]) {
this._longMonthsParse[i] = new RegExp(
'^' + this.months(mom, '').replace('.', '') + '$',
'i'
);
this._shortMonthsParse[i] = new RegExp(
'^' + this.monthsShort(mom, '').replace('.', '') + '$',
'i'
);
}
if (!strict && !this._monthsParse[i]) {
regex =
'^' + this.months(mom, '') + '|^' + this.monthsShort(mom, '');
this._monthsParse[i] = new RegExp(regex.replace('.', ''), 'i');
}
// test the regex
if (
strict &&
format === 'MMMM' &&
this._longMonthsParse[i].test(monthName)
) {
return i;
} else if (
strict &&
format === 'MMM' &&
this._shortMonthsParse[i].test(monthName)
) {
return i;
} else if (!strict && this._monthsParse[i].test(monthName)) {
return i;
}
}
}
// MOMENTS
function setMonth(mom, value) {
var dayOfMonth;
if (!mom.isValid()) {
// No op
return mom;
}
if (typeof value === 'string') {
if (/^\d+$/.test(value)) {
value = toInt(value);
} else {
value = mom.localeData().monthsParse(value);
// TODO: Another silent failure?
if (!isNumber(value)) {
return mom;
}
}
}
dayOfMonth = Math.min(mom.date(), daysInMonth(mom.year(), value));
mom._d['set' + (mom._isUTC ? 'UTC' : '') + 'Month'](value, dayOfMonth);
return mom;
}
function getSetMonth(value) {
if (value != null) {
setMonth(this, value);
hooks.updateOffset(this, true);
return this;
} else {
return get(this, 'Month');
}
}
function getDaysInMonth() {
return daysInMonth(this.year(), this.month());
}
function monthsShortRegex(isStrict) {
if (this._monthsParseExact) {
if (!hasOwnProp(this, '_monthsRegex')) {
computeMonthsParse.call(this);
}
if (isStrict) {
return this._monthsShortStrictRegex;
} else {
return this._monthsShortRegex;
}
} else {
if (!hasOwnProp(this, '_monthsShortRegex')) {
this._monthsShortRegex = defaultMonthsShortRegex;
}
return this._monthsShortStrictRegex && isStrict
? this._monthsShortStrictRegex
: this._monthsShortRegex;
}
}
function monthsRegex(isStrict) {
if (this._monthsParseExact) {
if (!hasOwnProp(this, '_monthsRegex')) {
computeMonthsParse.call(this);
}
if (isStrict) {
return this._monthsStrictRegex;
} else {
return this._monthsRegex;
}
} else {
if (!hasOwnProp(this, '_monthsRegex')) {
this._monthsRegex = defaultMonthsRegex;
}
return this._monthsStrictRegex && isStrict
? this._monthsStrictRegex
: this._monthsRegex;
}
}
function computeMonthsParse() {
function cmpLenRev(a, b) {
return b.length - a.length;
}
var shortPieces = [],
longPieces = [],
mixedPieces = [],
i,
mom;
for (i = 0; i < 12; i++) {
// make the regex if we don't have it already
mom = createUTC([2000, i]);
shortPieces.push(this.monthsShort(mom, ''));
longPieces.push(this.months(mom, ''));
mixedPieces.push(this.months(mom, ''));
mixedPieces.push(this.monthsShort(mom, ''));
}
// Sorting makes sure if one month (or abbr) is a prefix of another it
// will match the longer piece.
shortPieces.sort(cmpLenRev);
longPieces.sort(cmpLenRev);
mixedPieces.sort(cmpLenRev);
for (i = 0; i < 12; i++) {
shortPieces[i] = regexEscape(shortPieces[i]);
longPieces[i] = regexEscape(longPieces[i]);
}
for (i = 0; i < 24; i++) {
mixedPieces[i] = regexEscape(mixedPieces[i]);
}
this._monthsRegex = new RegExp('^(' + mixedPieces.join('|') + ')', 'i');
this._monthsShortRegex = this._monthsRegex;
this._monthsStrictRegex = new RegExp(
'^(' + longPieces.join('|') + ')',
'i'
);
this._monthsShortStrictRegex = new RegExp(
'^(' + shortPieces.join('|') + ')',
'i'
);
}
// FORMATTING
addFormatToken('Y', 0, 0, function () {
var y = this.year();
return y <= 9999 ? zeroFill(y, 4) : '+' + y;
});
addFormatToken(0, ['YY', 2], 0, function () {
return this.year() % 100;
});
addFormatToken(0, ['YYYY', 4], 0, 'year');
addFormatToken(0, ['YYYYY', 5], 0, 'year');
addFormatToken(0, ['YYYYYY', 6, true], 0, 'year');
// ALIASES
addUnitAlias('year', 'y');
// PRIORITIES
addUnitPriority('year', 1);
// PARSING
addRegexToken('Y', matchSigned);
addRegexToken('YY', match1to2, match2);
addRegexToken('YYYY', match1to4, match4);
addRegexToken('YYYYY', match1to6, match6);
addRegexToken('YYYYYY', match1to6, match6);
addParseToken(['YYYYY', 'YYYYYY'], YEAR);
addParseToken('YYYY', function (input, array) {
array[YEAR] =
input.length === 2 ? hooks.parseTwoDigitYear(input) : toInt(input);
});
addParseToken('YY', function (input, array) {
array[YEAR] = hooks.parseTwoDigitYear(input);
});
addParseToken('Y', function (input, array) {
array[YEAR] = parseInt(input, 10);
});
// HELPERS
function daysInYear(year) {
return isLeapYear(year) ? 366 : 365;
}
// HOOKS
hooks.parseTwoDigitYear = function (input) {
return toInt(input) + (toInt(input) > 68 ? 1900 : 2000);
};
// MOMENTS
var getSetYear = makeGetSet('FullYear', true);
function getIsLeapYear() {
return isLeapYear(this.year());
}
function createDate(y, m, d, h, M, s, ms) {
// can't just apply() to create a date:
// https://stackoverflow.com/q/181348
var date;
// the date constructor remaps years 0-99 to 1900-1999
if (y < 100 && y >= 0) {
// preserve leap years using a full 400 year cycle, then reset
date = new Date(y + 400, m, d, h, M, s, ms);
if (isFinite(date.getFullYear())) {
date.setFullYear(y);
}
} else {
date = new Date(y, m, d, h, M, s, ms);
}
return date;
}
function createUTCDate(y) {
var date, args;
// the Date.UTC function remaps years 0-99 to 1900-1999
if (y < 100 && y >= 0) {
args = Array.prototype.slice.call(arguments);
// preserve leap years using a full 400 year cycle, then reset
args[0] = y + 400;
date = new Date(Date.UTC.apply(null, args));
if (isFinite(date.getUTCFullYear())) {
date.setUTCFullYear(y);
}
} else {
date = new Date(Date.UTC.apply(null, arguments));
}
return date;
}
// start-of-first-week - start-of-year
function firstWeekOffset(year, dow, doy) {
var // first-week day -- which january is always in the first week (4 for iso, 1 for other)
fwd = 7 + dow - doy,
// first-week day local weekday -- which local weekday is fwd
fwdlw = (7 + createUTCDate(year, 0, fwd).getUTCDay() - dow) % 7;
return -fwdlw + fwd - 1;
}
// https://en.wikipedia.org/wiki/ISO_week_date#Calculating_a_date_given_the_year.2C_week_number_and_weekday
function dayOfYearFromWeeks(year, week, weekday, dow, doy) {
var localWeekday = (7 + weekday - dow) % 7,
weekOffset = firstWeekOffset(year, dow, doy),
dayOfYear = 1 + 7 * (week - 1) + localWeekday + weekOffset,
resYear,
resDayOfYear;
if (dayOfYear <= 0) {
resYear = year - 1;
resDayOfYear = daysInYear(resYear) + dayOfYear;
} else if (dayOfYear > daysInYear(year)) {
resYear = year + 1;
resDayOfYear = dayOfYear - daysInYear(year);
} else {
resYear = year;
resDayOfYear = dayOfYear;
}
return {
year: resYear,
dayOfYear: resDayOfYear,
};
}
function weekOfYear(mom, dow, doy) {
var weekOffset = firstWeekOffset(mom.year(), dow, doy),
week = Math.floor((mom.dayOfYear() - weekOffset - 1) / 7) + 1,
resWeek,
resYear;
if (week < 1) {
resYear = mom.year() - 1;
resWeek = week + weeksInYear(resYear, dow, doy);
} else if (week > weeksInYear(mom.year(), dow, doy)) {
resWeek = week - weeksInYear(mom.year(), dow, doy);
resYear = mom.year() + 1;
} else {
resYear = mom.year();
resWeek = week;
}
return {
week: resWeek,
year: resYear,
};
}
function weeksInYear(year, dow, doy) {
var weekOffset = firstWeekOffset(year, dow, doy),
weekOffsetNext = firstWeekOffset(year + 1, dow, doy);
return (daysInYear(year) - weekOffset + weekOffsetNext) / 7;
}
// FORMATTING
addFormatToken('w', ['ww', 2], 'wo', 'week');
addFormatToken('W', ['WW', 2], 'Wo', 'isoWeek');
// ALIASES
addUnitAlias('week', 'w');
addUnitAlias('isoWeek', 'W');
// PRIORITIES
addUnitPriority('week', 5);
addUnitPriority('isoWeek', 5);
// PARSING
addRegexToken('w', match1to2);
addRegexToken('ww', match1to2, match2);
addRegexToken('W', match1to2);
addRegexToken('WW', match1to2, match2);
addWeekParseToken(
['w', 'ww', 'W', 'WW'],
function (input, week, config, token) {
week[token.substr(0, 1)] = toInt(input);
}
);
// HELPERS
// LOCALES
function localeWeek(mom) {
return weekOfYear(mom, this._week.dow, this._week.doy).week;
}
var defaultLocaleWeek = {
dow: 0, // Sunday is the first day of the week.
doy: 6, // The week that contains Jan 6th is the first week of the year.
};
function localeFirstDayOfWeek() {
return this._week.dow;
}
function localeFirstDayOfYear() {
return this._week.doy;
}
// MOMENTS
function getSetWeek(input) {
var week = this.localeData().week(this);
return input == null ? week : this.add((input - week) * 7, 'd');
}
function getSetISOWeek(input) {
var week = weekOfYear(this, 1, 4).week;
return input == null ? week : this.add((input - week) * 7, 'd');
}
// FORMATTING
addFormatToken('d', 0, 'do', 'day');
addFormatToken('dd', 0, 0, function (format) {
return this.localeData().weekdaysMin(this, format);
});
addFormatToken('ddd', 0, 0, function (format) {
return this.localeData().weekdaysShort(this, format);
});
addFormatToken('dddd', 0, 0, function (format) {
return this.localeData().weekdays(this, format);
});
addFormatToken('e', 0, 0, 'weekday');
addFormatToken('E', 0, 0, 'isoWeekday');
// ALIASES
addUnitAlias('day', 'd');
addUnitAlias('weekday', 'e');
addUnitAlias('isoWeekday', 'E');
// PRIORITY
addUnitPriority('day', 11);
addUnitPriority('weekday', 11);
addUnitPriority('isoWeekday', 11);
// PARSING
addRegexToken('d', match1to2);
addRegexToken('e', match1to2);
addRegexToken('E', match1to2);
addRegexToken('dd', function (isStrict, locale) {
return locale.weekdaysMinRegex(isStrict);
});
addRegexToken('ddd', function (isStrict, locale) {
return locale.weekdaysShortRegex(isStrict);
});
addRegexToken('dddd', function (isStrict, locale) {
return locale.weekdaysRegex(isStrict);
});
addWeekParseToken(['dd', 'ddd', 'dddd'], function (input, week, config, token) {
var weekday = config._locale.weekdaysParse(input, token, config._strict);
// if we didn't get a weekday name, mark the date as invalid
if (weekday != null) {
week.d = weekday;
} else {
getParsingFlags(config).invalidWeekday = input;
}
});
addWeekParseToken(['d', 'e', 'E'], function (input, week, config, token) {
week[token] = toInt(input);
});
// HELPERS
function parseWeekday(input, locale) {
if (typeof input !== 'string') {
return input;
}
if (!isNaN(input)) {
return parseInt(input, 10);
}
input = locale.weekdaysParse(input);
if (typeof input === 'number') {
return input;
}
return null;
}
function parseIsoWeekday(input, locale) {
if (typeof input === 'string') {
return locale.weekdaysParse(input) % 7 || 7;
}
return isNaN(input) ? null : input;
}
// LOCALES
function shiftWeekdays(ws, n) {
return ws.slice(n, 7).concat(ws.slice(0, n));
}
var defaultLocaleWeekdays =
'Sunday_Monday_Tuesday_Wednesday_Thursday_Friday_Saturday'.split('_'),
defaultLocaleWeekdaysShort = 'Sun_Mon_Tue_Wed_Thu_Fri_Sat'.split('_'),
defaultLocaleWeekdaysMin = 'Su_Mo_Tu_We_Th_Fr_Sa'.split('_'),
defaultWeekdaysRegex = matchWord,
defaultWeekdaysShortRegex = matchWord,
defaultWeekdaysMinRegex = matchWord;
function localeWeekdays(m, format) {
var weekdays = isArray(this._weekdays)
? this._weekdays
: this._weekdays[
m && m !== true && this._weekdays.isFormat.test(format)
? 'format'
: 'standalone'
];
return m === true
? shiftWeekdays(weekdays, this._week.dow)
: m
? weekdays[m.day()]
: weekdays;
}
function localeWeekdaysShort(m) {
return m === true
? shiftWeekdays(this._weekdaysShort, this._week.dow)
: m
? this._weekdaysShort[m.day()]
: this._weekdaysShort;
}
function localeWeekdaysMin(m) {
return m === true
? shiftWeekdays(this._weekdaysMin, this._week.dow)
: m
? this._weekdaysMin[m.day()]
: this._weekdaysMin;
}
function handleStrictParse$1(weekdayName, format, strict) {
var i,
ii,
mom,
llc = weekdayName.toLocaleLowerCase();
if (!this._weekdaysParse) {
this._weekdaysParse = [];
this._shortWeekdaysParse = [];
this._minWeekdaysParse = [];
for (i = 0; i < 7; ++i) {
mom = createUTC([2000, 1]).day(i);
this._minWeekdaysParse[i] = this.weekdaysMin(
mom,
''
).toLocaleLowerCase();
this._shortWeekdaysParse[i] = this.weekdaysShort(
mom,
''
).toLocaleLowerCase();
this._weekdaysParse[i] = this.weekdays(mom, '').toLocaleLowerCase();
}
}
if (strict) {
if (format === 'dddd') {
ii = indexOf.call(this._weekdaysParse, llc);
return ii !== -1 ? ii : null;
} else if (format === 'ddd') {
ii = indexOf.call(this._shortWeekdaysParse, llc);
return ii !== -1 ? ii : null;
} else {
ii = indexOf.call(this._minWeekdaysParse, llc);
return ii !== -1 ? ii : null;
}
} else {
if (format === 'dddd') {
ii = indexOf.call(this._weekdaysParse, llc);
if (ii !== -1) {
return ii;
}
ii = indexOf.call(this._shortWeekdaysParse, llc);
if (ii !== -1) {
return ii;
}
ii = indexOf.call(this._minWeekdaysParse, llc);
return ii !== -1 ? ii : null;
} else if (format === 'ddd') {
ii = indexOf.call(this._shortWeekdaysParse, llc);
if (ii !== -1) {
return ii;
}
ii = indexOf.call(this._weekdaysParse, llc);
if (ii !== -1) {
return ii;
}
ii = indexOf.call(this._minWeekdaysParse, llc);
return ii !== -1 ? ii : null;
} else {
ii = indexOf.call(this._minWeekdaysParse, llc);
if (ii !== -1) {
return ii;
}
ii = indexOf.call(this._weekdaysParse, llc);
if (ii !== -1) {
return ii;
}
ii = indexOf.call(this._shortWeekdaysParse, llc);
return ii !== -1 ? ii : null;
}
}
}
function localeWeekdaysParse(weekdayName, format, strict) {
var i, mom, regex;
if (this._weekdaysParseExact) {
return handleStrictParse$1.call(this, weekdayName, format, strict);
}
if (!this._weekdaysParse) {
this._weekdaysParse = [];
this._minWeekdaysParse = [];
this._shortWeekdaysParse = [];
this._fullWeekdaysParse = [];
}
for (i = 0; i < 7; i++) {
// make the regex if we don't have it already
mom = createUTC([2000, 1]).day(i);
if (strict && !this._fullWeekdaysParse[i]) {
this._fullWeekdaysParse[i] = new RegExp(
'^' + this.weekdays(mom, '').replace('.', '\\.?') + '$',
'i'
);
this._shortWeekdaysParse[i] = new RegExp(
'^' + this.weekdaysShort(mom, '').replace('.', '\\.?') + '$',
'i'
);
this._minWeekdaysParse[i] = new RegExp(
'^' + this.weekdaysMin(mom, '').replace('.', '\\.?') + '$',
'i'
);
}
if (!this._weekdaysParse[i]) {
regex =
'^' +
this.weekdays(mom, '') +
'|^' +
this.weekdaysShort(mom, '') +
'|^' +
this.weekdaysMin(mom, '');
this._weekdaysParse[i] = new RegExp(regex.replace('.', ''), 'i');
}
// test the regex
if (
strict &&
format === 'dddd' &&
this._fullWeekdaysParse[i].test(weekdayName)
) {
return i;
} else if (
strict &&
format === 'ddd' &&
this._shortWeekdaysParse[i].test(weekdayName)
) {
return i;
} else if (
strict &&
format === 'dd' &&
this._minWeekdaysParse[i].test(weekdayName)
) {
return i;
} else if (!strict && this._weekdaysParse[i].test(weekdayName)) {
return i;
}
}
}
// MOMENTS
function getSetDayOfWeek(input) {
if (!this.isValid()) {
return input != null ? this : NaN;
}
var day = this._isUTC ? this._d.getUTCDay() : this._d.getDay();
if (input != null) {
input = parseWeekday(input, this.localeData());
return this.add(input - day, 'd');
} else {
return day;
}
}
function getSetLocaleDayOfWeek(input) {
if (!this.isValid()) {
return input != null ? this : NaN;
}
var weekday = (this.day() + 7 - this.localeData()._week.dow) % 7;
return input == null ? weekday : this.add(input - weekday, 'd');
}
function getSetISODayOfWeek(input) {
if (!this.isValid()) {
return input != null ? this : NaN;
}
// behaves the same as moment#day except
// as a getter, returns 7 instead of 0 (1-7 range instead of 0-6)
// as a setter, sunday should belong to the previous week.
if (input != null) {
var weekday = parseIsoWeekday(input, this.localeData());
return this.day(this.day() % 7 ? weekday : weekday - 7);
} else {
return this.day() || 7;
}
}
function weekdaysRegex(isStrict) {
if (this._weekdaysParseExact) {
if (!hasOwnProp(this, '_weekdaysRegex')) {
computeWeekdaysParse.call(this);
}
if (isStrict) {
return this._weekdaysStrictRegex;
} else {
return this._weekdaysRegex;
}
} else {
if (!hasOwnProp(this, '_weekdaysRegex')) {
this._weekdaysRegex = defaultWeekdaysRegex;
}
return this._weekdaysStrictRegex && isStrict
? this._weekdaysStrictRegex
: this._weekdaysRegex;
}
}
function weekdaysShortRegex(isStrict) {
if (this._weekdaysParseExact) {
if (!hasOwnProp(this, '_weekdaysRegex')) {
computeWeekdaysParse.call(this);
}
if (isStrict) {
return this._weekdaysShortStrictRegex;
} else {
return this._weekdaysShortRegex;
}
} else {
if (!hasOwnProp(this, '_weekdaysShortRegex')) {
this._weekdaysShortRegex = defaultWeekdaysShortRegex;
}
return this._weekdaysShortStrictRegex && isStrict
? this._weekdaysShortStrictRegex
: this._weekdaysShortRegex;
}
}
function weekdaysMinRegex(isStrict) {
if (this._weekdaysParseExact) {
if (!hasOwnProp(this, '_weekdaysRegex')) {
computeWeekdaysParse.call(this);
}
if (isStrict) {
return this._weekdaysMinStrictRegex;
} else {
return this._weekdaysMinRegex;
}
} else {
if (!hasOwnProp(this, '_weekdaysMinRegex')) {
this._weekdaysMinRegex = defaultWeekdaysMinRegex;
}
return this._weekdaysMinStrictRegex && isStrict
? this._weekdaysMinStrictRegex
: this._weekdaysMinRegex;
}
}
function computeWeekdaysParse() {
function cmpLenRev(a, b) {
return b.length - a.length;
}
var minPieces = [],
shortPieces = [],
longPieces = [],
mixedPieces = [],
i,
mom,
minp,
shortp,
longp;
for (i = 0; i < 7; i++) {
// make the regex if we don't have it already
mom = createUTC([2000, 1]).day(i);
minp = regexEscape(this.weekdaysMin(mom, ''));
shortp = regexEscape(this.weekdaysShort(mom, ''));
longp = regexEscape(this.weekdays(mom, ''));
minPieces.push(minp);
shortPieces.push(shortp);
longPieces.push(longp);
mixedPieces.push(minp);
mixedPieces.push(shortp);
mixedPieces.push(longp);
}
// Sorting makes sure if one weekday (or abbr) is a prefix of another it
// will match the longer piece.
minPieces.sort(cmpLenRev);
shortPieces.sort(cmpLenRev);
longPieces.sort(cmpLenRev);
mixedPieces.sort(cmpLenRev);
this._weekdaysRegex = new RegExp('^(' + mixedPieces.join('|') + ')', 'i');
this._weekdaysShortRegex = this._weekdaysRegex;
this._weekdaysMinRegex = this._weekdaysRegex;
this._weekdaysStrictRegex = new RegExp(
'^(' + longPieces.join('|') + ')',
'i'
);
this._weekdaysShortStrictRegex = new RegExp(
'^(' + shortPieces.join('|') + ')',
'i'
);
this._weekdaysMinStrictRegex = new RegExp(
'^(' + minPieces.join('|') + ')',
'i'
);
}
// FORMATTING
function hFormat() {
return this.hours() % 12 || 12;
}
function kFormat() {
return this.hours() || 24;
}
addFormatToken('H', ['HH', 2], 0, 'hour');
addFormatToken('h', ['hh', 2], 0, hFormat);
addFormatToken('k', ['kk', 2], 0, kFormat);
addFormatToken('hmm', 0, 0, function () {
return '' + hFormat.apply(this) + zeroFill(this.minutes(), 2);
});
addFormatToken('hmmss', 0, 0, function () {
return (
'' +
hFormat.apply(this) +
zeroFill(this.minutes(), 2) +
zeroFill(this.seconds(), 2)
);
});
addFormatToken('Hmm', 0, 0, function () {
return '' + this.hours() + zeroFill(this.minutes(), 2);
});
addFormatToken('Hmmss', 0, 0, function () {
return (
'' +
this.hours() +
zeroFill(this.minutes(), 2) +
zeroFill(this.seconds(), 2)
);
});
function meridiem(token, lowercase) {
addFormatToken(token, 0, 0, function () {
return this.localeData().meridiem(
this.hours(),
this.minutes(),
lowercase
);
});
}
meridiem('a', true);
meridiem('A', false);
// ALIASES
addUnitAlias('hour', 'h');
// PRIORITY
addUnitPriority('hour', 13);
// PARSING
function matchMeridiem(isStrict, locale) {
return locale._meridiemParse;
}
addRegexToken('a', matchMeridiem);
addRegexToken('A', matchMeridiem);
addRegexToken('H', match1to2);
addRegexToken('h', match1to2);
addRegexToken('k', match1to2);
addRegexToken('HH', match1to2, match2);
addRegexToken('hh', match1to2, match2);
addRegexToken('kk', match1to2, match2);
addRegexToken('hmm', match3to4);
addRegexToken('hmmss', match5to6);
addRegexToken('Hmm', match3to4);
addRegexToken('Hmmss', match5to6);
addParseToken(['H', 'HH'], HOUR);
addParseToken(['k', 'kk'], function (input, array, config) {
var kInput = toInt(input);
array[HOUR] = kInput === 24 ? 0 : kInput;
});
addParseToken(['a', 'A'], function (input, array, config) {
config._isPm = config._locale.isPM(input);
config._meridiem = input;
});
addParseToken(['h', 'hh'], function (input, array, config) {
array[HOUR] = toInt(input);
getParsingFlags(config).bigHour = true;
});
addParseToken('hmm', function (input, array, config) {
var pos = input.length - 2;
array[HOUR] = toInt(input.substr(0, pos));
array[MINUTE] = toInt(input.substr(pos));
getParsingFlags(config).bigHour = true;
});
addParseToken('hmmss', function (input, array, config) {
var pos1 = input.length - 4,
pos2 = input.length - 2;
array[HOUR] = toInt(input.substr(0, pos1));
array[MINUTE] = toInt(input.substr(pos1, 2));
array[SECOND] = toInt(input.substr(pos2));
getParsingFlags(config).bigHour = true;
});
addParseToken('Hmm', function (input, array, config) {
var pos = input.length - 2;
array[HOUR] = toInt(input.substr(0, pos));
array[MINUTE] = toInt(input.substr(pos));
});
addParseToken('Hmmss', function (input, array, config) {
var pos1 = input.length - 4,
pos2 = input.length - 2;
array[HOUR] = toInt(input.substr(0, pos1));
array[MINUTE] = toInt(input.substr(pos1, 2));
array[SECOND] = toInt(input.substr(pos2));
});
// LOCALES
function localeIsPM(input) {
// IE8 Quirks Mode & IE7 Standards Mode do not allow accessing strings like arrays
// Using charAt should be more compatible.
return (input + '').toLowerCase().charAt(0) === 'p';
}
var defaultLocaleMeridiemParse = /[ap]\.?m?\.?/i,
// Setting the hour should keep the time, because the user explicitly
// specified which hour they want. So trying to maintain the same hour (in
// a new timezone) makes sense. Adding/subtracting hours does not follow
// this rule.
getSetHour = makeGetSet('Hours', true);
function localeMeridiem(hours, minutes, isLower) {
if (hours > 11) {
return isLower ? 'pm' : 'PM';
} else {
return isLower ? 'am' : 'AM';
}
}
var baseConfig = {
calendar: defaultCalendar,
longDateFormat: defaultLongDateFormat,
invalidDate: defaultInvalidDate,
ordinal: defaultOrdinal,
dayOfMonthOrdinalParse: defaultDayOfMonthOrdinalParse,
relativeTime: defaultRelativeTime,
months: defaultLocaleMonths,
monthsShort: defaultLocaleMonthsShort,
week: defaultLocaleWeek,
weekdays: defaultLocaleWeekdays,
weekdaysMin: defaultLocaleWeekdaysMin,
weekdaysShort: defaultLocaleWeekdaysShort,
meridiemParse: defaultLocaleMeridiemParse,
};
// internal storage for locale config files
var locales = {},
localeFamilies = {},
globalLocale;
function commonPrefix(arr1, arr2) {
var i,
minl = Math.min(arr1.length, arr2.length);
for (i = 0; i < minl; i += 1) {
if (arr1[i] !== arr2[i]) {
return i;
}
}
return minl;
}
function normalizeLocale(key) {
return key ? key.toLowerCase().replace('_', '-') : key;
}
// pick the locale from the array
// try ['en-au', 'en-gb'] as 'en-au', 'en-gb', 'en', as in move through the list trying each
// substring from most specific to least, but move to the next array item if it's a more specific variant than the current root
function chooseLocale(names) {
var i = 0,
j,
next,
locale,
split;
while (i < names.length) {
split = normalizeLocale(names[i]).split('-');
j = split.length;
next = normalizeLocale(names[i + 1]);
next = next ? next.split('-') : null;
while (j > 0) {
locale = loadLocale(split.slice(0, j).join('-'));
if (locale) {
return locale;
}
if (
next &&
next.length >= j &&
commonPrefix(split, next) >= j - 1
) {
//the next array item is better than a shallower substring of this one
break;
}
j--;
}
i++;
}
return globalLocale;
}
function isLocaleNameSane(name) {
// Prevent names that look like filesystem paths, i.e contain '/' or '\'
return name.match('^[^/\\\\]*$') != null;
}
function loadLocale(name) {
var oldLocale = null,
aliasedRequire;
// TODO: Find a better way to register and load all the locales in Node
if (
locales[name] === undefined &&
typeof module !== 'undefined' &&
module &&
module.exports &&
isLocaleNameSane(name)
) {
try {
oldLocale = globalLocale._abbr;
aliasedRequire = require;
aliasedRequire('./locale/' + name);
getSetGlobalLocale(oldLocale);
} catch (e) {
// mark as not found to avoid repeating expensive file require call causing high CPU
// when trying to find en-US, en_US, en-us for every format call
locales[name] = null; // null means not found
}
}
return locales[name];
}
// This function will load locale and then set the global locale. If
// no arguments are passed in, it will simply return the current global
// locale key.
function getSetGlobalLocale(key, values) {
var data;
if (key) {
if (isUndefined(values)) {
data = getLocale(key);
} else {
data = defineLocale(key, values);
}
if (data) {
// moment.duration._locale = moment._locale = data;
globalLocale = data;
} else {
if (typeof console !== 'undefined' && console.warn) {
//warn user if arguments are passed but the locale could not be set
console.warn(
'Locale ' + key + ' not found. Did you forget to load it?'
);
}
}
}
return globalLocale._abbr;
}
function defineLocale(name, config) {
if (config !== null) {
var locale,
parentConfig = baseConfig;
config.abbr = name;
if (locales[name] != null) {
deprecateSimple(
'defineLocaleOverride',
'use moment.updateLocale(localeName, config) to change ' +
'an existing locale. moment.defineLocale(localeName, ' +
'config) should only be used for creating a new locale ' +
'See http://momentjs.com/guides/#/warnings/define-locale/ for more info.'
);
parentConfig = locales[name]._config;
} else if (config.parentLocale != null) {
if (locales[config.parentLocale] != null) {
parentConfig = locales[config.parentLocale]._config;
} else {
locale = loadLocale(config.parentLocale);
if (locale != null) {
parentConfig = locale._config;
} else {
if (!localeFamilies[config.parentLocale]) {
localeFamilies[config.parentLocale] = [];
}
localeFamilies[config.parentLocale].push({
name: name,
config: config,
});
return null;
}
}
}
locales[name] = new Locale(mergeConfigs(parentConfig, config));
if (localeFamilies[name]) {
localeFamilies[name].forEach(function (x) {
defineLocale(x.name, x.config);
});
}
// backwards compat for now: also set the locale
// make sure we set the locale AFTER all child locales have been
// created, so we won't end up with the child locale set.
getSetGlobalLocale(name);
return locales[name];
} else {
// useful for testing
delete locales[name];
return null;
}
}
function updateLocale(name, config) {
if (config != null) {
var locale,
tmpLocale,
parentConfig = baseConfig;
if (locales[name] != null && locales[name].parentLocale != null) {
// Update existing child locale in-place to avoid memory-leaks
locales[name].set(mergeConfigs(locales[name]._config, config));
} else {
// MERGE
tmpLocale = loadLocale(name);
if (tmpLocale != null) {
parentConfig = tmpLocale._config;
}
config = mergeConfigs(parentConfig, config);
if (tmpLocale == null) {
// updateLocale is called for creating a new locale
// Set abbr so it will have a name (getters return
// undefined otherwise).
config.abbr = name;
}
locale = new Locale(config);
locale.parentLocale = locales[name];
locales[name] = locale;
}
// backwards compat for now: also set the locale
getSetGlobalLocale(name);
} else {
// pass null for config to unupdate, useful for tests
if (locales[name] != null) {
if (locales[name].parentLocale != null) {
locales[name] = locales[name].parentLocale;
if (name === getSetGlobalLocale()) {
getSetGlobalLocale(name);
}
} else if (locales[name] != null) {
delete locales[name];
}
}
}
return locales[name];
}
// returns locale data
function getLocale(key) {
var locale;
if (key && key._locale && key._locale._abbr) {
key = key._locale._abbr;
}
if (!key) {
return globalLocale;
}
if (!isArray(key)) {
//short-circuit everything else
locale = loadLocale(key);
if (locale) {
return locale;
}
key = [key];
}
return chooseLocale(key);
}
function listLocales() {
return keys(locales);
}
function checkOverflow(m) {
var overflow,
a = m._a;
if (a && getParsingFlags(m).overflow === -2) {
overflow =
a[MONTH] < 0 || a[MONTH] > 11
? MONTH
: a[DATE] < 1 || a[DATE] > daysInMonth(a[YEAR], a[MONTH])
? DATE
: a[HOUR] < 0 ||
a[HOUR] > 24 ||
(a[HOUR] === 24 &&
(a[MINUTE] !== 0 ||
a[SECOND] !== 0 ||
a[MILLISECOND] !== 0))
? HOUR
: a[MINUTE] < 0 || a[MINUTE] > 59
? MINUTE
: a[SECOND] < 0 || a[SECOND] > 59
? SECOND
: a[MILLISECOND] < 0 || a[MILLISECOND] > 999
? MILLISECOND
: -1;
if (
getParsingFlags(m)._overflowDayOfYear &&
(overflow < YEAR || overflow > DATE)
) {
overflow = DATE;
}
if (getParsingFlags(m)._overflowWeeks && overflow === -1) {
overflow = WEEK;
}
if (getParsingFlags(m)._overflowWeekday && overflow === -1) {
overflow = WEEKDAY;
}
getParsingFlags(m).overflow = overflow;
}
return m;
}
// iso 8601 regex
// 0000-00-00 0000-W00 or 0000-W00-0 + T + 00 or 00:00 or 00:00:00 or 00:00:00.000 + +00:00 or +0000 or +00)
var extendedIsoRegex =
/^\s*((?:[+-]\d{6}|\d{4})-(?:\d\d-\d\d|W\d\d-\d|W\d\d|\d\d\d|\d\d))(?:(T| )(\d\d(?::\d\d(?::\d\d(?:[.,]\d+)?)?)?)([+-]\d\d(?::?\d\d)?|\s*Z)?)?$/,
basicIsoRegex =
/^\s*((?:[+-]\d{6}|\d{4})(?:\d\d\d\d|W\d\d\d|W\d\d|\d\d\d|\d\d|))(?:(T| )(\d\d(?:\d\d(?:\d\d(?:[.,]\d+)?)?)?)([+-]\d\d(?::?\d\d)?|\s*Z)?)?$/,
tzRegex = /Z|[+-]\d\d(?::?\d\d)?/,
isoDates = [
['YYYYYY-MM-DD', /[+-]\d{6}-\d\d-\d\d/],
['YYYY-MM-DD', /\d{4}-\d\d-\d\d/],
['GGGG-[W]WW-E', /\d{4}-W\d\d-\d/],
['GGGG-[W]WW', /\d{4}-W\d\d/, false],
['YYYY-DDD', /\d{4}-\d{3}/],
['YYYY-MM', /\d{4}-\d\d/, false],
['YYYYYYMMDD', /[+-]\d{10}/],
['YYYYMMDD', /\d{8}/],
['GGGG[W]WWE', /\d{4}W\d{3}/],
['GGGG[W]WW', /\d{4}W\d{2}/, false],
['YYYYDDD', /\d{7}/],
['YYYYMM', /\d{6}/, false],
['YYYY', /\d{4}/, false],
],
// iso time formats and regexes
isoTimes = [
['HH:mm:ss.SSSS', /\d\d:\d\d:\d\d\.\d+/],
['HH:mm:ss,SSSS', /\d\d:\d\d:\d\d,\d+/],
['HH:mm:ss', /\d\d:\d\d:\d\d/],
['HH:mm', /\d\d:\d\d/],
['HHmmss.SSSS', /\d\d\d\d\d\d\.\d+/],
['HHmmss,SSSS', /\d\d\d\d\d\d,\d+/],
['HHmmss', /\d\d\d\d\d\d/],
['HHmm', /\d\d\d\d/],
['HH', /\d\d/],
],
aspNetJsonRegex = /^\/?Date\((-?\d+)/i,
// RFC 2822 regex: For details see https://tools.ietf.org/html/rfc2822#section-3.3
rfc2822 =
/^(?:(Mon|Tue|Wed|Thu|Fri|Sat|Sun),?\s)?(\d{1,2})\s(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\s(\d{2,4})\s(\d\d):(\d\d)(?::(\d\d))?\s(?:(UT|GMT|[ECMP][SD]T)|([Zz])|([+-]\d{4}))$/,
obsOffsets = {
UT: 0,
GMT: 0,
EDT: -4 * 60,
EST: -5 * 60,
CDT: -5 * 60,
CST: -6 * 60,
MDT: -6 * 60,
MST: -7 * 60,
PDT: -7 * 60,
PST: -8 * 60,
};
// date from iso format
function configFromISO(config) {
var i,
l,
string = config._i,
match = extendedIsoRegex.exec(string) || basicIsoRegex.exec(string),
allowTime,
dateFormat,
timeFormat,
tzFormat,
isoDatesLen = isoDates.length,
isoTimesLen = isoTimes.length;
if (match) {
getParsingFlags(config).iso = true;
for (i = 0, l = isoDatesLen; i < l; i++) {
if (isoDates[i][1].exec(match[1])) {
dateFormat = isoDates[i][0];
allowTime = isoDates[i][2] !== false;
break;
}
}
if (dateFormat == null) {
config._isValid = false;
return;
}
if (match[3]) {
for (i = 0, l = isoTimesLen; i < l; i++) {
if (isoTimes[i][1].exec(match[3])) {
// match[2] should be 'T' or space
timeFormat = (match[2] || ' ') + isoTimes[i][0];
break;
}
}
if (timeFormat == null) {
config._isValid = false;
return;
}
}
if (!allowTime && timeFormat != null) {
config._isValid = false;
return;
}
if (match[4]) {
if (tzRegex.exec(match[4])) {
tzFormat = 'Z';
} else {
config._isValid = false;
return;
}
}
config._f = dateFormat + (timeFormat || '') + (tzFormat || '');
configFromStringAndFormat(config);
} else {
config._isValid = false;
}
}
function extractFromRFC2822Strings(
yearStr,
monthStr,
dayStr,
hourStr,
minuteStr,
secondStr
) {
var result = [
untruncateYear(yearStr),
defaultLocaleMonthsShort.indexOf(monthStr),
parseInt(dayStr, 10),
parseInt(hourStr, 10),
parseInt(minuteStr, 10),
];
if (secondStr) {
result.push(parseInt(secondStr, 10));
}
return result;
}
function untruncateYear(yearStr) {
var year = parseInt(yearStr, 10);
if (year <= 49) {
return 2000 + year;
} else if (year <= 999) {
return 1900 + year;
}
return year;
}
function preprocessRFC2822(s) {
// Remove comments and folding whitespace and replace multiple-spaces with a single space
return s
.replace(/\([^()]*\)|[\n\t]/g, ' ')
.replace(/(\s\s+)/g, ' ')
.replace(/^\s\s*/, '')
.replace(/\s\s*$/, '');
}
function checkWeekday(weekdayStr, parsedInput, config) {
if (weekdayStr) {
// TODO: Replace the vanilla JS Date object with an independent day-of-week check.
var weekdayProvided = defaultLocaleWeekdaysShort.indexOf(weekdayStr),
weekdayActual = new Date(
parsedInput[0],
parsedInput[1],
parsedInput[2]
).getDay();
if (weekdayProvided !== weekdayActual) {
getParsingFlags(config).weekdayMismatch = true;
config._isValid = false;
return false;
}
}
return true;
}
function calculateOffset(obsOffset, militaryOffset, numOffset) {
if (obsOffset) {
return obsOffsets[obsOffset];
} else if (militaryOffset) {
// the only allowed military tz is Z
return 0;
} else {
var hm = parseInt(numOffset, 10),
m = hm % 100,
h = (hm - m) / 100;
return h * 60 + m;
}
}
// date and time from ref 2822 format
function configFromRFC2822(config) {
var match = rfc2822.exec(preprocessRFC2822(config._i)),
parsedArray;
if (match) {
parsedArray = extractFromRFC2822Strings(
match[4],
match[3],
match[2],
match[5],
match[6],
match[7]
);
if (!checkWeekday(match[1], parsedArray, config)) {
return;
}
config._a = parsedArray;
config._tzm = calculateOffset(match[8], match[9], match[10]);
config._d = createUTCDate.apply(null, config._a);
config._d.setUTCMinutes(config._d.getUTCMinutes() - config._tzm);
getParsingFlags(config).rfc2822 = true;
} else {
config._isValid = false;
}
}
// date from 1) ASP.NET, 2) ISO, 3) RFC 2822 formats, or 4) optional fallback if parsing isn't strict
function configFromString(config) {
var matched = aspNetJsonRegex.exec(config._i);
if (matched !== null) {
config._d = new Date(+matched[1]);
return;
}
configFromISO(config);
if (config._isValid === false) {
delete config._isValid;
} else {
return;
}
configFromRFC2822(config);
if (config._isValid === false) {
delete config._isValid;
} else {
return;
}
if (config._strict) {
config._isValid = false;
} else {
// Final attempt, use Input Fallback
hooks.createFromInputFallback(config);
}
}
hooks.createFromInputFallback = deprecate(
'value provided is not in a recognized RFC2822 or ISO format. moment construction falls back to js Date(), ' +
'which is not reliable across all browsers and versions. Non RFC2822/ISO date formats are ' +
'discouraged. Please refer to http://momentjs.com/guides/#/warnings/js-date/ for more info.',
function (config) {
config._d = new Date(config._i + (config._useUTC ? ' UTC' : ''));
}
);
// Pick the first defined of two or three arguments.
function defaults(a, b, c) {
if (a != null) {
return a;
}
if (b != null) {
return b;
}
return c;
}
function currentDateArray(config) {
// hooks is actually the exported moment object
var nowValue = new Date(hooks.now());
if (config._useUTC) {
return [
nowValue.getUTCFullYear(),
nowValue.getUTCMonth(),
nowValue.getUTCDate(),
];
}
return [nowValue.getFullYear(), nowValue.getMonth(), nowValue.getDate()];
}
// convert an array to a date.
// the array should mirror the parameters below
// note: all values past the year are optional and will default to the lowest possible value.
// [year, month, day , hour, minute, second, millisecond]
function configFromArray(config) {
var i,
date,
input = [],
currentDate,
expectedWeekday,
yearToUse;
if (config._d) {
return;
}
currentDate = currentDateArray(config);
//compute day of the year from weeks and weekdays
if (config._w && config._a[DATE] == null && config._a[MONTH] == null) {
dayOfYearFromWeekInfo(config);
}
//if the day of the year is set, figure out what it is
if (config._dayOfYear != null) {
yearToUse = defaults(config._a[YEAR], currentDate[YEAR]);
if (
config._dayOfYear > daysInYear(yearToUse) ||
config._dayOfYear === 0
) {
getParsingFlags(config)._overflowDayOfYear = true;
}
date = createUTCDate(yearToUse, 0, config._dayOfYear);
config._a[MONTH] = date.getUTCMonth();
config._a[DATE] = date.getUTCDate();
}
// Default to current date.
// * if no year, month, day of month are given, default to today
// * if day of month is given, default month and year
// * if month is given, default only year
// * if year is given, don't default anything
for (i = 0; i < 3 && config._a[i] == null; ++i) {
config._a[i] = input[i] = currentDate[i];
}
// Zero out whatever was not defaulted, including time
for (; i < 7; i++) {
config._a[i] = input[i] =
config._a[i] == null ? (i === 2 ? 1 : 0) : config._a[i];
}
// Check for 24:00:00.000
if (
config._a[HOUR] === 24 &&
config._a[MINUTE] === 0 &&
config._a[SECOND] === 0 &&
config._a[MILLISECOND] === 0
) {
config._nextDay = true;
config._a[HOUR] = 0;
}
config._d = (config._useUTC ? createUTCDate : createDate).apply(
null,
input
);
expectedWeekday = config._useUTC
? config._d.getUTCDay()
: config._d.getDay();
// Apply timezone offset from input. The actual utcOffset can be changed
// with parseZone.
if (config._tzm != null) {
config._d.setUTCMinutes(config._d.getUTCMinutes() - config._tzm);
}
if (config._nextDay) {
config._a[HOUR] = 24;
}
// check for mismatching day of week
if (
config._w &&
typeof config._w.d !== 'undefined' &&
config._w.d !== expectedWeekday
) {
getParsingFlags(config).weekdayMismatch = true;
}
}
function dayOfYearFromWeekInfo(config) {
var w, weekYear, week, weekday, dow, doy, temp, weekdayOverflow, curWeek;
w = config._w;
if (w.GG != null || w.W != null || w.E != null) {
dow = 1;
doy = 4;
// TODO: We need to take the current isoWeekYear, but that depends on
// how we interpret now (local, utc, fixed offset). So create
// a now version of current config (take local/utc/offset flags, and
// create now).
weekYear = defaults(
w.GG,
config._a[YEAR],
weekOfYear(createLocal(), 1, 4).year
);
week = defaults(w.W, 1);
weekday = defaults(w.E, 1);
if (weekday < 1 || weekday > 7) {
weekdayOverflow = true;
}
} else {
dow = config._locale._week.dow;
doy = config._locale._week.doy;
curWeek = weekOfYear(createLocal(), dow, doy);
weekYear = defaults(w.gg, config._a[YEAR], curWeek.year);
// Default to current week.
week = defaults(w.w, curWeek.week);
if (w.d != null) {
// weekday -- low day numbers are considered next week
weekday = w.d;
if (weekday < 0 || weekday > 6) {
weekdayOverflow = true;
}
} else if (w.e != null) {
// local weekday -- counting starts from beginning of week
weekday = w.e + dow;
if (w.e < 0 || w.e > 6) {
weekdayOverflow = true;
}
} else {
// default to beginning of week
weekday = dow;
}
}
if (week < 1 || week > weeksInYear(weekYear, dow, doy)) {
getParsingFlags(config)._overflowWeeks = true;
} else if (weekdayOverflow != null) {
getParsingFlags(config)._overflowWeekday = true;
} else {
temp = dayOfYearFromWeeks(weekYear, week, weekday, dow, doy);
config._a[YEAR] = temp.year;
config._dayOfYear = temp.dayOfYear;
}
}
// constant that refers to the ISO standard
hooks.ISO_8601 = function () {};
// constant that refers to the RFC 2822 form
hooks.RFC_2822 = function () {};
// date from string and format string
function configFromStringAndFormat(config) {
// TODO: Move this to another part of the creation flow to prevent circular deps
if (config._f === hooks.ISO_8601) {
configFromISO(config);
return;
}
if (config._f === hooks.RFC_2822) {
configFromRFC2822(config);
return;
}
config._a = [];
getParsingFlags(config).empty = true;
// This array is used to make a Date, either with `new Date` or `Date.UTC`
var string = '' + config._i,
i,
parsedInput,
tokens,
token,
skipped,
stringLength = string.length,
totalParsedInputLength = 0,
era,
tokenLen;
tokens =
expandFormat(config._f, config._locale).match(formattingTokens) || [];
tokenLen = tokens.length;
for (i = 0; i < tokenLen; i++) {
token = tokens[i];
parsedInput = (string.match(getParseRegexForToken(token, config)) ||
[])[0];
if (parsedInput) {
skipped = string.substr(0, string.indexOf(parsedInput));
if (skipped.length > 0) {
getParsingFlags(config).unusedInput.push(skipped);
}
string = string.slice(
string.indexOf(parsedInput) + parsedInput.length
);
totalParsedInputLength += parsedInput.length;
}
// don't parse if it's not a known token
if (formatTokenFunctions[token]) {
if (parsedInput) {
getParsingFlags(config).empty = false;
} else {
getParsingFlags(config).unusedTokens.push(token);
}
addTimeToArrayFromToken(token, parsedInput, config);
} else if (config._strict && !parsedInput) {
getParsingFlags(config).unusedTokens.push(token);
}
}
// add remaining unparsed input length to the string
getParsingFlags(config).charsLeftOver =
stringLength - totalParsedInputLength;
if (string.length > 0) {
getParsingFlags(config).unusedInput.push(string);
}
// clear _12h flag if hour is <= 12
if (
config._a[HOUR] <= 12 &&
getParsingFlags(config).bigHour === true &&
config._a[HOUR] > 0
) {
getParsingFlags(config).bigHour = undefined;
}
getParsingFlags(config).parsedDateParts = config._a.slice(0);
getParsingFlags(config).meridiem = config._meridiem;
// handle meridiem
config._a[HOUR] = meridiemFixWrap(
config._locale,
config._a[HOUR],
config._meridiem
);
// handle era
era = getParsingFlags(config).era;
if (era !== null) {
config._a[YEAR] = config._locale.erasConvertYear(era, config._a[YEAR]);
}
configFromArray(config);
checkOverflow(config);
}
function meridiemFixWrap(locale, hour, meridiem) {
var isPm;
if (meridiem == null) {
// nothing to do
return hour;
}
if (locale.meridiemHour != null) {
return locale.meridiemHour(hour, meridiem);
} else if (locale.isPM != null) {
// Fallback
isPm = locale.isPM(meridiem);
if (isPm && hour < 12) {
hour += 12;
}
if (!isPm && hour === 12) {
hour = 0;
}
return hour;
} else {
// this is not supposed to happen
return hour;
}
}
// date from string and array of format strings
function configFromStringAndArray(config) {
var tempConfig,
bestMoment,
scoreToBeat,
i,
currentScore,
validFormatFound,
bestFormatIsValid = false,
configfLen = config._f.length;
if (configfLen === 0) {
getParsingFlags(config).invalidFormat = true;
config._d = new Date(NaN);
return;
}
for (i = 0; i < configfLen; i++) {
currentScore = 0;
validFormatFound = false;
tempConfig = copyConfig({}, config);
if (config._useUTC != null) {
tempConfig._useUTC = config._useUTC;
}
tempConfig._f = config._f[i];
configFromStringAndFormat(tempConfig);
if (isValid(tempConfig)) {
validFormatFound = true;
}
// if there is any input that was not parsed add a penalty for that format
currentScore += getParsingFlags(tempConfig).charsLeftOver;
//or tokens
currentScore += getParsingFlags(tempConfig).unusedTokens.length * 10;
getParsingFlags(tempConfig).score = currentScore;
if (!bestFormatIsValid) {
if (
scoreToBeat == null ||
currentScore < scoreToBeat ||
validFormatFound
) {
scoreToBeat = currentScore;
bestMoment = tempConfig;
if (validFormatFound) {
bestFormatIsValid = true;
}
}
} else {
if (currentScore < scoreToBeat) {
scoreToBeat = currentScore;
bestMoment = tempConfig;
}
}
}
extend(config, bestMoment || tempConfig);
}
function configFromObject(config) {
if (config._d) {
return;
}
var i = normalizeObjectUnits(config._i),
dayOrDate = i.day === undefined ? i.date : i.day;
config._a = map(
[i.year, i.month, dayOrDate, i.hour, i.minute, i.second, i.millisecond],
function (obj) {
return obj && parseInt(obj, 10);
}
);
configFromArray(config);
}
function createFromConfig(config) {
var res = new Moment(checkOverflow(prepareConfig(config)));
if (res._nextDay) {
// Adding is smart enough around DST
res.add(1, 'd');
res._nextDay = undefined;
}
return res;
}
function prepareConfig(config) {
var input = config._i,
format = config._f;
config._locale = config._locale || getLocale(config._l);
if (input === null || (format === undefined && input === '')) {
return createInvalid({ nullInput: true });
}
if (typeof input === 'string') {
config._i = input = config._locale.preparse(input);
}
if (isMoment(input)) {
return new Moment(checkOverflow(input));
} else if (isDate(input)) {
config._d = input;
} else if (isArray(format)) {
configFromStringAndArray(config);
} else if (format) {
configFromStringAndFormat(config);
} else {
configFromInput(config);
}
if (!isValid(config)) {
config._d = null;
}
return config;
}
function configFromInput(config) {
var input = config._i;
if (isUndefined(input)) {
config._d = new Date(hooks.now());
} else if (isDate(input)) {
config._d = new Date(input.valueOf());
} else if (typeof input === 'string') {
configFromString(config);
} else if (isArray(input)) {
config._a = map(input.slice(0), function (obj) {
return parseInt(obj, 10);
});
configFromArray(config);
} else if (isObject(input)) {
configFromObject(config);
} else if (isNumber(input)) {
// from milliseconds
config._d = new Date(input);
} else {
hooks.createFromInputFallback(config);
}
}
function createLocalOrUTC(input, format, locale, strict, isUTC) {
var c = {};
if (format === true || format === false) {
strict = format;
format = undefined;
}
if (locale === true || locale === false) {
strict = locale;
locale = undefined;
}
if (
(isObject(input) && isObjectEmpty(input)) ||
(isArray(input) && input.length === 0)
) {
input = undefined;
}
// object construction must be done this way.
// https://github.com/moment/moment/issues/1423
c._isAMomentObject = true;
c._useUTC = c._isUTC = isUTC;
c._l = locale;
c._i = input;
c._f = format;
c._strict = strict;
return createFromConfig(c);
}
function createLocal(input, format, locale, strict) {
return createLocalOrUTC(input, format, locale, strict, false);
}
var prototypeMin = deprecate(
'moment().min is deprecated, use moment.max instead. http://momentjs.com/guides/#/warnings/min-max/',
function () {
var other = createLocal.apply(null, arguments);
if (this.isValid() && other.isValid()) {
return other < this ? this : other;
} else {
return createInvalid();
}
}
),
prototypeMax = deprecate(
'moment().max is deprecated, use moment.min instead. http://momentjs.com/guides/#/warnings/min-max/',
function () {
var other = createLocal.apply(null, arguments);
if (this.isValid() && other.isValid()) {
return other > this ? this : other;
} else {
return createInvalid();
}
}
);
// Pick a moment m from moments so that m[fn](other) is true for all
// other. This relies on the function fn to be transitive.
//
// moments should either be an array of moment objects or an array, whose
// first element is an array of moment objects.
function pickBy(fn, moments) {
var res, i;
if (moments.length === 1 && isArray(moments[0])) {
moments = moments[0];
}
if (!moments.length) {
return createLocal();
}
res = moments[0];
for (i = 1; i < moments.length; ++i) {
if (!moments[i].isValid() || moments[i][fn](res)) {
res = moments[i];
}
}
return res;
}
// TODO: Use [].sort instead?
function min() {
var args = [].slice.call(arguments, 0);
return pickBy('isBefore', args);
}
function max() {
var args = [].slice.call(arguments, 0);
return pickBy('isAfter', args);
}
var now = function () {
return Date.now ? Date.now() : +new Date();
};
var ordering = [
'year',
'quarter',
'month',
'week',
'day',
'hour',
'minute',
'second',
'millisecond',
];
function isDurationValid(m) {
var key,
unitHasDecimal = false,
i,
orderLen = ordering.length;
for (key in m) {
if (
hasOwnProp(m, key) &&
!(
indexOf.call(ordering, key) !== -1 &&
(m[key] == null || !isNaN(m[key]))
)
) {
return false;
}
}
for (i = 0; i < orderLen; ++i) {
if (m[ordering[i]]) {
if (unitHasDecimal) {
return false; // only allow non-integers for smallest unit
}
if (parseFloat(m[ordering[i]]) !== toInt(m[ordering[i]])) {
unitHasDecimal = true;
}
}
}
return true;
}
function isValid$1() {
return this._isValid;
}
function createInvalid$1() {
return createDuration(NaN);
}
function Duration(duration) {
var normalizedInput = normalizeObjectUnits(duration),
years = normalizedInput.year || 0,
quarters = normalizedInput.quarter || 0,
months = normalizedInput.month || 0,
weeks = normalizedInput.week || normalizedInput.isoWeek || 0,
days = normalizedInput.day || 0,
hours = normalizedInput.hour || 0,
minutes = normalizedInput.minute || 0,
seconds = normalizedInput.second || 0,
milliseconds = normalizedInput.millisecond || 0;
this._isValid = isDurationValid(normalizedInput);
// representation for dateAddRemove
this._milliseconds =
+milliseconds +
seconds * 1e3 + // 1000
minutes * 6e4 + // 1000 * 60
hours * 1000 * 60 * 60; //using 1000 * 60 * 60 instead of 36e5 to avoid floating point rounding errors https://github.com/moment/moment/issues/2978
// Because of dateAddRemove treats 24 hours as different from a
// day when working around DST, we need to store them separately
this._days = +days + weeks * 7;
// It is impossible to translate months into days without knowing
// which months you are are talking about, so we have to store
// it separately.
this._months = +months + quarters * 3 + years * 12;
this._data = {};
this._locale = getLocale();
this._bubble();
}
function isDuration(obj) {
return obj instanceof Duration;
}
function absRound(number) {
if (number < 0) {
return Math.round(-1 * number) * -1;
} else {
return Math.round(number);
}
}
// compare two arrays, return the number of differences
function compareArrays(array1, array2, dontConvert) {
var len = Math.min(array1.length, array2.length),
lengthDiff = Math.abs(array1.length - array2.length),
diffs = 0,
i;
for (i = 0; i < len; i++) {
if (
(dontConvert && array1[i] !== array2[i]) ||
(!dontConvert && toInt(array1[i]) !== toInt(array2[i]))
) {
diffs++;
}
}
return diffs + lengthDiff;
}
// FORMATTING
function offset(token, separator) {
addFormatToken(token, 0, 0, function () {
var offset = this.utcOffset(),
sign = '+';
if (offset < 0) {
offset = -offset;
sign = '-';
}
return (
sign +
zeroFill(~~(offset / 60), 2) +
separator +
zeroFill(~~offset % 60, 2)
);
});
}
offset('Z', ':');
offset('ZZ', '');
// PARSING
addRegexToken('Z', matchShortOffset);
addRegexToken('ZZ', matchShortOffset);
addParseToken(['Z', 'ZZ'], function (input, array, config) {
config._useUTC = true;
config._tzm = offsetFromString(matchShortOffset, input);
});
// HELPERS
// timezone chunker
// '+10:00' > ['10', '00']
// '-1530' > ['-15', '30']
var chunkOffset = /([\+\-]|\d\d)/gi;
function offsetFromString(matcher, string) {
var matches = (string || '').match(matcher),
chunk,
parts,
minutes;
if (matches === null) {
return null;
}
chunk = matches[matches.length - 1] || [];
parts = (chunk + '').match(chunkOffset) || ['-', 0, 0];
minutes = +(parts[1] * 60) + toInt(parts[2]);
return minutes === 0 ? 0 : parts[0] === '+' ? minutes : -minutes;
}
// Return a moment from input, that is local/utc/zone equivalent to model.
function cloneWithOffset(input, model) {
var res, diff;
if (model._isUTC) {
res = model.clone();
diff =
(isMoment(input) || isDate(input)
? input.valueOf()
: createLocal(input).valueOf()) - res.valueOf();
// Use low-level api, because this fn is low-level api.
res._d.setTime(res._d.valueOf() + diff);
hooks.updateOffset(res, false);
return res;
} else {
return createLocal(input).local();
}
}
function getDateOffset(m) {
// On Firefox.24 Date#getTimezoneOffset returns a floating point.
// https://github.com/moment/moment/pull/1871
return -Math.round(m._d.getTimezoneOffset());
}
// HOOKS
// This function will be called whenever a moment is mutated.
// It is intended to keep the offset in sync with the timezone.
hooks.updateOffset = function () {};
// MOMENTS
// keepLocalTime = true means only change the timezone, without
// affecting the local hour. So 5:31:26 +0300 --[utcOffset(2, true)]-->
// 5:31:26 +0200 It is possible that 5:31:26 doesn't exist with offset
// +0200, so we adjust the time as needed, to be valid.
//
// Keeping the time actually adds/subtracts (one hour)
// from the actual represented time. That is why we call updateOffset
// a second time. In case it wants us to change the offset again
// _changeInProgress == true case, then we have to adjust, because
// there is no such time in the given timezone.
function getSetOffset(input, keepLocalTime, keepMinutes) {
var offset = this._offset || 0,
localAdjust;
if (!this.isValid()) {
return input != null ? this : NaN;
}
if (input != null) {
if (typeof input === 'string') {
input = offsetFromString(matchShortOffset, input);
if (input === null) {
return this;
}
} else if (Math.abs(input) < 16 && !keepMinutes) {
input = input * 60;
}
if (!this._isUTC && keepLocalTime) {
localAdjust = getDateOffset(this);
}
this._offset = input;
this._isUTC = true;
if (localAdjust != null) {
this.add(localAdjust, 'm');
}
if (offset !== input) {
if (!keepLocalTime || this._changeInProgress) {
addSubtract(
this,
createDuration(input - offset, 'm'),
1,
false
);
} else if (!this._changeInProgress) {
this._changeInProgress = true;
hooks.updateOffset(this, true);
this._changeInProgress = null;
}
}
return this;
} else {
return this._isUTC ? offset : getDateOffset(this);
}
}
function getSetZone(input, keepLocalTime) {
if (input != null) {
if (typeof input !== 'string') {
input = -input;
}
this.utcOffset(input, keepLocalTime);
return this;
} else {
return -this.utcOffset();
}
}
function setOffsetToUTC(keepLocalTime) {
return this.utcOffset(0, keepLocalTime);
}
function setOffsetToLocal(keepLocalTime) {
if (this._isUTC) {
this.utcOffset(0, keepLocalTime);
this._isUTC = false;
if (keepLocalTime) {
this.subtract(getDateOffset(this), 'm');
}
}
return this;
}
function setOffsetToParsedOffset() {
if (this._tzm != null) {
this.utcOffset(this._tzm, false, true);
} else if (typeof this._i === 'string') {
var tZone = offsetFromString(matchOffset, this._i);
if (tZone != null) {
this.utcOffset(tZone);
} else {
this.utcOffset(0, true);
}
}
return this;
}
function hasAlignedHourOffset(input) {
if (!this.isValid()) {
return false;
}
input = input ? createLocal(input).utcOffset() : 0;
return (this.utcOffset() - input) % 60 === 0;
}
function isDaylightSavingTime() {
return (
this.utcOffset() > this.clone().month(0).utcOffset() ||
this.utcOffset() > this.clone().month(5).utcOffset()
);
}
function isDaylightSavingTimeShifted() {
if (!isUndefined(this._isDSTShifted)) {
return this._isDSTShifted;
}
var c = {},
other;
copyConfig(c, this);
c = prepareConfig(c);
if (c._a) {
other = c._isUTC ? createUTC(c._a) : createLocal(c._a);
this._isDSTShifted =
this.isValid() && compareArrays(c._a, other.toArray()) > 0;
} else {
this._isDSTShifted = false;
}
return this._isDSTShifted;
}
function isLocal() {
return this.isValid() ? !this._isUTC : false;
}
function isUtcOffset() {
return this.isValid() ? this._isUTC : false;
}
function isUtc() {
return this.isValid() ? this._isUTC && this._offset === 0 : false;
}
// ASP.NET json date format regex
var aspNetRegex = /^(-|\+)?(?:(\d*)[. ])?(\d+):(\d+)(?::(\d+)(\.\d*)?)?$/,
// from http://docs.closure-library.googlecode.com/git/closure_goog_date_date.js.source.html
// somewhat more in line with 4.4.3.2 2004 spec, but allows decimal anywhere
// and further modified to allow for strings containing both week and day
isoRegex =
/^(-|\+)?P(?:([-+]?[0-9,.]*)Y)?(?:([-+]?[0-9,.]*)M)?(?:([-+]?[0-9,.]*)W)?(?:([-+]?[0-9,.]*)D)?(?:T(?:([-+]?[0-9,.]*)H)?(?:([-+]?[0-9,.]*)M)?(?:([-+]?[0-9,.]*)S)?)?$/;
function createDuration(input, key) {
var duration = input,
// matching against regexp is expensive, do it on demand
match = null,
sign,
ret,
diffRes;
if (isDuration(input)) {
duration = {
ms: input._milliseconds,
d: input._days,
M: input._months,
};
} else if (isNumber(input) || !isNaN(+input)) {
duration = {};
if (key) {
duration[key] = +input;
} else {
duration.milliseconds = +input;
}
} else if ((match = aspNetRegex.exec(input))) {
sign = match[1] === '-' ? -1 : 1;
duration = {
y: 0,
d: toInt(match[DATE]) * sign,
h: toInt(match[HOUR]) * sign,
m: toInt(match[MINUTE]) * sign,
s: toInt(match[SECOND]) * sign,
ms: toInt(absRound(match[MILLISECOND] * 1000)) * sign, // the millisecond decimal point is included in the match
};
} else if ((match = isoRegex.exec(input))) {
sign = match[1] === '-' ? -1 : 1;
duration = {
y: parseIso(match[2], sign),
M: parseIso(match[3], sign),
w: parseIso(match[4], sign),
d: parseIso(match[5], sign),
h: parseIso(match[6], sign),
m: parseIso(match[7], sign),
s: parseIso(match[8], sign),
};
} else if (duration == null) {
// checks for null or undefined
duration = {};
} else if (
typeof duration === 'object' &&
('from' in duration || 'to' in duration)
) {
diffRes = momentsDifference(
createLocal(duration.from),
createLocal(duration.to)
);
duration = {};
duration.ms = diffRes.milliseconds;
duration.M = diffRes.months;
}
ret = new Duration(duration);
if (isDuration(input) && hasOwnProp(input, '_locale')) {
ret._locale = input._locale;
}
if (isDuration(input) && hasOwnProp(input, '_isValid')) {
ret._isValid = input._isValid;
}
return ret;
}
createDuration.fn = Duration.prototype;
createDuration.invalid = createInvalid$1;
function parseIso(inp, sign) {
// We'd normally use ~~inp for this, but unfortunately it also
// converts floats to ints.
// inp may be undefined, so careful calling replace on it.
var res = inp && parseFloat(inp.replace(',', '.'));
// apply sign while we're at it
return (isNaN(res) ? 0 : res) * sign;
}
function positiveMomentsDifference(base, other) {
var res = {};
res.months =
other.month() - base.month() + (other.year() - base.year()) * 12;
if (base.clone().add(res.months, 'M').isAfter(other)) {
--res.months;
}
res.milliseconds = +other - +base.clone().add(res.months, 'M');
return res;
}
function momentsDifference(base, other) {
var res;
if (!(base.isValid() && other.isValid())) {
return { milliseconds: 0, months: 0 };
}
other = cloneWithOffset(other, base);
if (base.isBefore(other)) {
res = positiveMomentsDifference(base, other);
} else {
res = positiveMomentsDifference(other, base);
res.milliseconds = -res.milliseconds;
res.months = -res.months;
}
return res;
}
// TODO: remove 'name' arg after deprecation is removed
function createAdder(direction, name) {
return function (val, period) {
var dur, tmp;
//invert the arguments, but complain about it
if (period !== null && !isNaN(+period)) {
deprecateSimple(
name,
'moment().' +
name +
'(period, number) is deprecated. Please use moment().' +
name +
'(number, period). ' +
'See http://momentjs.com/guides/#/warnings/add-inverted-param/ for more info.'
);
tmp = val;
val = period;
period = tmp;
}
dur = createDuration(val, period);
addSubtract(this, dur, direction);
return this;
};
}
function addSubtract(mom, duration, isAdding, updateOffset) {
var milliseconds = duration._milliseconds,
days = absRound(duration._days),
months = absRound(duration._months);
if (!mom.isValid()) {
// No op
return;
}
updateOffset = updateOffset == null ? true : updateOffset;
if (months) {
setMonth(mom, get(mom, 'Month') + months * isAdding);
}
if (days) {
set$1(mom, 'Date', get(mom, 'Date') + days * isAdding);
}
if (milliseconds) {
mom._d.setTime(mom._d.valueOf() + milliseconds * isAdding);
}
if (updateOffset) {
hooks.updateOffset(mom, days || months);
}
}
var add = createAdder(1, 'add'),
subtract = createAdder(-1, 'subtract');
function isString(input) {
return typeof input === 'string' || input instanceof String;
}
// type MomentInput = Moment | Date | string | number | (number | string)[] | MomentInputObject | void; // null | undefined
function isMomentInput(input) {
return (
isMoment(input) ||
isDate(input) ||
isString(input) ||
isNumber(input) ||
isNumberOrStringArray(input) ||
isMomentInputObject(input) ||
input === null ||
input === undefined
);
}
function isMomentInputObject(input) {
var objectTest = isObject(input) && !isObjectEmpty(input),
propertyTest = false,
properties = [
'years',
'year',
'y',
'months',
'month',
'M',
'days',
'day',
'd',
'dates',
'date',
'D',
'hours',
'hour',
'h',
'minutes',
'minute',
'm',
'seconds',
'second',
's',
'milliseconds',
'millisecond',
'ms',
],
i,
property,
propertyLen = properties.length;
for (i = 0; i < propertyLen; i += 1) {
property = properties[i];
propertyTest = propertyTest || hasOwnProp(input, property);
}
return objectTest && propertyTest;
}
function isNumberOrStringArray(input) {
var arrayTest = isArray(input),
dataTypeTest = false;
if (arrayTest) {
dataTypeTest =
input.filter(function (item) {
return !isNumber(item) && isString(input);
}).length === 0;
}
return arrayTest && dataTypeTest;
}
function isCalendarSpec(input) {
var objectTest = isObject(input) && !isObjectEmpty(input),
propertyTest = false,
properties = [
'sameDay',
'nextDay',
'lastDay',
'nextWeek',
'lastWeek',
'sameElse',
],
i,
property;
for (i = 0; i < properties.length; i += 1) {
property = properties[i];
propertyTest = propertyTest || hasOwnProp(input, property);
}
return objectTest && propertyTest;
}
function getCalendarFormat(myMoment, now) {
var diff = myMoment.diff(now, 'days', true);
return diff < -6
? 'sameElse'
: diff < -1
? 'lastWeek'
: diff < 0
? 'lastDay'
: diff < 1
? 'sameDay'
: diff < 2
? 'nextDay'
: diff < 7
? 'nextWeek'
: 'sameElse';
}
function calendar$1(time, formats) {
// Support for single parameter, formats only overload to the calendar function
if (arguments.length === 1) {
if (!arguments[0]) {
time = undefined;
formats = undefined;
} else if (isMomentInput(arguments[0])) {
time = arguments[0];
formats = undefined;
} else if (isCalendarSpec(arguments[0])) {
formats = arguments[0];
time = undefined;
}
}
// We want to compare the start of today, vs this.
// Getting start-of-today depends on whether we're local/utc/offset or not.
var now = time || createLocal(),
sod = cloneWithOffset(now, this).startOf('day'),
format = hooks.calendarFormat(this, sod) || 'sameElse',
output =
formats &&
(isFunction(formats[format])
? formats[format].call(this, now)
: formats[format]);
return this.format(
output || this.localeData().calendar(format, this, createLocal(now))
);
}
function clone() {
return new Moment(this);
}
function isAfter(input, units) {
var localInput = isMoment(input) ? input : createLocal(input);
if (!(this.isValid() && localInput.isValid())) {
return false;
}
units = normalizeUnits(units) || 'millisecond';
if (units === 'millisecond') {
return this.valueOf() > localInput.valueOf();
} else {
return localInput.valueOf() < this.clone().startOf(units).valueOf();
}
}
function isBefore(input, units) {
var localInput = isMoment(input) ? input : createLocal(input);
if (!(this.isValid() && localInput.isValid())) {
return false;
}
units = normalizeUnits(units) || 'millisecond';
if (units === 'millisecond') {
return this.valueOf() < localInput.valueOf();
} else {
return this.clone().endOf(units).valueOf() < localInput.valueOf();
}
}
function isBetween(from, to, units, inclusivity) {
var localFrom = isMoment(from) ? from : createLocal(from),
localTo = isMoment(to) ? to : createLocal(to);
if (!(this.isValid() && localFrom.isValid() && localTo.isValid())) {
return false;
}
inclusivity = inclusivity || '()';
return (
(inclusivity[0] === '('
? this.isAfter(localFrom, units)
: !this.isBefore(localFrom, units)) &&
(inclusivity[1] === ')'
? this.isBefore(localTo, units)
: !this.isAfter(localTo, units))
);
}
function isSame(input, units) {
var localInput = isMoment(input) ? input : createLocal(input),
inputMs;
if (!(this.isValid() && localInput.isValid())) {
return false;
}
units = normalizeUnits(units) || 'millisecond';
if (units === 'millisecond') {
return this.valueOf() === localInput.valueOf();
} else {
inputMs = localInput.valueOf();
return (
this.clone().startOf(units).valueOf() <= inputMs &&
inputMs <= this.clone().endOf(units).valueOf()
);
}
}
function isSameOrAfter(input, units) {
return this.isSame(input, units) || this.isAfter(input, units);
}
function isSameOrBefore(input, units) {
return this.isSame(input, units) || this.isBefore(input, units);
}
function diff(input, units, asFloat) {
var that, zoneDelta, output;
if (!this.isValid()) {
return NaN;
}
that = cloneWithOffset(input, this);
if (!that.isValid()) {
return NaN;
}
zoneDelta = (that.utcOffset() - this.utcOffset()) * 6e4;
units = normalizeUnits(units);
switch (units) {
case 'year':
output = monthDiff(this, that) / 12;
break;
case 'month':
output = monthDiff(this, that);
break;
case 'quarter':
output = monthDiff(this, that) / 3;
break;
case 'second':
output = (this - that) / 1e3;
break; // 1000
case 'minute':
output = (this - that) / 6e4;
break; // 1000 * 60
case 'hour':
output = (this - that) / 36e5;
break; // 1000 * 60 * 60
case 'day':
output = (this - that - zoneDelta) / 864e5;
break; // 1000 * 60 * 60 * 24, negate dst
case 'week':
output = (this - that - zoneDelta) / 6048e5;
break; // 1000 * 60 * 60 * 24 * 7, negate dst
default:
output = this - that;
}
return asFloat ? output : absFloor(output);
}
function monthDiff(a, b) {
if (a.date() < b.date()) {
// end-of-month calculations work correct when the start month has more
// days than the end month.
return -monthDiff(b, a);
}
// difference in months
var wholeMonthDiff = (b.year() - a.year()) * 12 + (b.month() - a.month()),
// b is in (anchor - 1 month, anchor + 1 month)
anchor = a.clone().add(wholeMonthDiff, 'months'),
anchor2,
adjust;
if (b - anchor < 0) {
anchor2 = a.clone().add(wholeMonthDiff - 1, 'months');
// linear across the month
adjust = (b - anchor) / (anchor - anchor2);
} else {
anchor2 = a.clone().add(wholeMonthDiff + 1, 'months');
// linear across the month
adjust = (b - anchor) / (anchor2 - anchor);
}
//check for negative zero, return zero if negative zero
return -(wholeMonthDiff + adjust) || 0;
}
hooks.defaultFormat = 'YYYY-MM-DDTHH:mm:ssZ';
hooks.defaultFormatUtc = 'YYYY-MM-DDTHH:mm:ss[Z]';
function toString() {
return this.clone().locale('en').format('ddd MMM DD YYYY HH:mm:ss [GMT]ZZ');
}
function toISOString(keepOffset) {
if (!this.isValid()) {
return null;
}
var utc = keepOffset !== true,
m = utc ? this.clone().utc() : this;
if (m.year() < 0 || m.year() > 9999) {
return formatMoment(
m,
utc
? 'YYYYYY-MM-DD[T]HH:mm:ss.SSS[Z]'
: 'YYYYYY-MM-DD[T]HH:mm:ss.SSSZ'
);
}
if (isFunction(Date.prototype.toISOString)) {
// native implementation is ~50x faster, use it when we can
if (utc) {
return this.toDate().toISOString();
} else {
return new Date(this.valueOf() + this.utcOffset() * 60 * 1000)
.toISOString()
.replace('Z', formatMoment(m, 'Z'));
}
}
return formatMoment(
m,
utc ? 'YYYY-MM-DD[T]HH:mm:ss.SSS[Z]' : 'YYYY-MM-DD[T]HH:mm:ss.SSSZ'
);
}
/**
* Return a human readable representation of a moment that can
* also be evaluated to get a new moment which is the same
*
* @link https://nodejs.org/dist/latest/docs/api/util.html#util_custom_inspect_function_on_objects
*/
function inspect() {
if (!this.isValid()) {
return 'moment.invalid(/* ' + this._i + ' */)';
}
var func = 'moment',
zone = '',
prefix,
year,
datetime,
suffix;
if (!this.isLocal()) {
func = this.utcOffset() === 0 ? 'moment.utc' : 'moment.parseZone';
zone = 'Z';
}
prefix = '[' + func + '("]';
year = 0 <= this.year() && this.year() <= 9999 ? 'YYYY' : 'YYYYYY';
datetime = '-MM-DD[T]HH:mm:ss.SSS';
suffix = zone + '[")]';
return this.format(prefix + year + datetime + suffix);
}
function format(inputString) {
if (!inputString) {
inputString = this.isUtc()
? hooks.defaultFormatUtc
: hooks.defaultFormat;
}
var output = formatMoment(this, inputString);
return this.localeData().postformat(output);
}
function from(time, withoutSuffix) {
if (
this.isValid() &&
((isMoment(time) && time.isValid()) || createLocal(time).isValid())
) {
return createDuration({ to: this, from: time })
.locale(this.locale())
.humanize(!withoutSuffix);
} else {
return this.localeData().invalidDate();
}
}
function fromNow(withoutSuffix) {
return this.from(createLocal(), withoutSuffix);
}
function to(time, withoutSuffix) {
if (
this.isValid() &&
((isMoment(time) && time.isValid()) || createLocal(time).isValid())
) {
return createDuration({ from: this, to: time })
.locale(this.locale())
.humanize(!withoutSuffix);
} else {
return this.localeData().invalidDate();
}
}
function toNow(withoutSuffix) {
return this.to(createLocal(), withoutSuffix);
}
// If passed a locale key, it will set the locale for this
// instance. Otherwise, it will return the locale configuration
// variables for this instance.
function locale(key) {
var newLocaleData;
if (key === undefined) {
return this._locale._abbr;
} else {
newLocaleData = getLocale(key);
if (newLocaleData != null) {
this._locale = newLocaleData;
}
return this;
}
}
var lang = deprecate(
'moment().lang() is deprecated. Instead, use moment().localeData() to get the language configuration. Use moment().locale() to change languages.',
function (key) {
if (key === undefined) {
return this.localeData();
} else {
return this.locale(key);
}
}
);
function localeData() {
return this._locale;
}
var MS_PER_SECOND = 1000,
MS_PER_MINUTE = 60 * MS_PER_SECOND,
MS_PER_HOUR = 60 * MS_PER_MINUTE,
MS_PER_400_YEARS = (365 * 400 + 97) * 24 * MS_PER_HOUR;
// actual modulo - handles negative numbers (for dates before 1970):
function mod$1(dividend, divisor) {
return ((dividend % divisor) + divisor) % divisor;
}
function localStartOfDate(y, m, d) {
// the date constructor remaps years 0-99 to 1900-1999
if (y < 100 && y >= 0) {
// preserve leap years using a full 400 year cycle, then reset
return new Date(y + 400, m, d) - MS_PER_400_YEARS;
} else {
return new Date(y, m, d).valueOf();
}
}
function utcStartOfDate(y, m, d) {
// Date.UTC remaps years 0-99 to 1900-1999
if (y < 100 && y >= 0) {
// preserve leap years using a full 400 year cycle, then reset
return Date.UTC(y + 400, m, d) - MS_PER_400_YEARS;
} else {
return Date.UTC(y, m, d);
}
}
function startOf(units) {
var time, startOfDate;
units = normalizeUnits(units);
if (units === undefined || units === 'millisecond' || !this.isValid()) {
return this;
}
startOfDate = this._isUTC ? utcStartOfDate : localStartOfDate;
switch (units) {
case 'year':
time = startOfDate(this.year(), 0, 1);
break;
case 'quarter':
time = startOfDate(
this.year(),
this.month() - (this.month() % 3),
1
);
break;
case 'month':
time = startOfDate(this.year(), this.month(), 1);
break;
case 'week':
time = startOfDate(
this.year(),
this.month(),
this.date() - this.weekday()
);
break;
case 'isoWeek':
time = startOfDate(
this.year(),
this.month(),
this.date() - (this.isoWeekday() - 1)
);
break;
case 'day':
case 'date':
time = startOfDate(this.year(), this.month(), this.date());
break;
case 'hour':
time = this._d.valueOf();
time -= mod$1(
time + (this._isUTC ? 0 : this.utcOffset() * MS_PER_MINUTE),
MS_PER_HOUR
);
break;
case 'minute':
time = this._d.valueOf();
time -= mod$1(time, MS_PER_MINUTE);
break;
case 'second':
time = this._d.valueOf();
time -= mod$1(time, MS_PER_SECOND);
break;
}
this._d.setTime(time);
hooks.updateOffset(this, true);
return this;
}
function endOf(units) {
var time, startOfDate;
units = normalizeUnits(units);
if (units === undefined || units === 'millisecond' || !this.isValid()) {
return this;
}
startOfDate = this._isUTC ? utcStartOfDate : localStartOfDate;
switch (units) {
case 'year':
time = startOfDate(this.year() + 1, 0, 1) - 1;
break;
case 'quarter':
time =
startOfDate(
this.year(),
this.month() - (this.month() % 3) + 3,
1
) - 1;
break;
case 'month':
time = startOfDate(this.year(), this.month() + 1, 1) - 1;
break;
case 'week':
time =
startOfDate(
this.year(),
this.month(),
this.date() - this.weekday() + 7
) - 1;
break;
case 'isoWeek':
time =
startOfDate(
this.year(),
this.month(),
this.date() - (this.isoWeekday() - 1) + 7
) - 1;
break;
case 'day':
case 'date':
time = startOfDate(this.year(), this.month(), this.date() + 1) - 1;
break;
case 'hour':
time = this._d.valueOf();
time +=
MS_PER_HOUR -
mod$1(
time + (this._isUTC ? 0 : this.utcOffset() * MS_PER_MINUTE),
MS_PER_HOUR
) -
1;
break;
case 'minute':
time = this._d.valueOf();
time += MS_PER_MINUTE - mod$1(time, MS_PER_MINUTE) - 1;
break;
case 'second':
time = this._d.valueOf();
time += MS_PER_SECOND - mod$1(time, MS_PER_SECOND) - 1;
break;
}
this._d.setTime(time);
hooks.updateOffset(this, true);
return this;
}
function valueOf() {
return this._d.valueOf() - (this._offset || 0) * 60000;
}
function unix() {
return Math.floor(this.valueOf() / 1000);
}
function toDate() {
return new Date(this.valueOf());
}
function toArray() {
var m = this;
return [
m.year(),
m.month(),
m.date(),
m.hour(),
m.minute(),
m.second(),
m.millisecond(),
];
}
function toObject() {
var m = this;
return {
years: m.year(),
months: m.month(),
date: m.date(),
hours: m.hours(),
minutes: m.minutes(),
seconds: m.seconds(),
milliseconds: m.milliseconds(),
};
}
function toJSON() {
// new Date(NaN).toJSON() === null
return this.isValid() ? this.toISOString() : null;
}
function isValid$2() {
return isValid(this);
}
function parsingFlags() {
return extend({}, getParsingFlags(this));
}
function invalidAt() {
return getParsingFlags(this).overflow;
}
function creationData() {
return {
input: this._i,
format: this._f,
locale: this._locale,
isUTC: this._isUTC,
strict: this._strict,
};
}
addFormatToken('N', 0, 0, 'eraAbbr');
addFormatToken('NN', 0, 0, 'eraAbbr');
addFormatToken('NNN', 0, 0, 'eraAbbr');
addFormatToken('NNNN', 0, 0, 'eraName');
addFormatToken('NNNNN', 0, 0, 'eraNarrow');
addFormatToken('y', ['y', 1], 'yo', 'eraYear');
addFormatToken('y', ['yy', 2], 0, 'eraYear');
addFormatToken('y', ['yyy', 3], 0, 'eraYear');
addFormatToken('y', ['yyyy', 4], 0, 'eraYear');
addRegexToken('N', matchEraAbbr);
addRegexToken('NN', matchEraAbbr);
addRegexToken('NNN', matchEraAbbr);
addRegexToken('NNNN', matchEraName);
addRegexToken('NNNNN', matchEraNarrow);
addParseToken(
['N', 'NN', 'NNN', 'NNNN', 'NNNNN'],
function (input, array, config, token) {
var era = config._locale.erasParse(input, token, config._strict);
if (era) {
getParsingFlags(config).era = era;
} else {
getParsingFlags(config).invalidEra = input;
}
}
);
addRegexToken('y', matchUnsigned);
addRegexToken('yy', matchUnsigned);
addRegexToken('yyy', matchUnsigned);
addRegexToken('yyyy', matchUnsigned);
addRegexToken('yo', matchEraYearOrdinal);
addParseToken(['y', 'yy', 'yyy', 'yyyy'], YEAR);
addParseToken(['yo'], function (input, array, config, token) {
var match;
if (config._locale._eraYearOrdinalRegex) {
match = input.match(config._locale._eraYearOrdinalRegex);
}
if (config._locale.eraYearOrdinalParse) {
array[YEAR] = config._locale.eraYearOrdinalParse(input, match);
} else {
array[YEAR] = parseInt(input, 10);
}
});
function localeEras(m, format) {
var i,
l,
date,
eras = this._eras || getLocale('en')._eras;
for (i = 0, l = eras.length; i < l; ++i) {
switch (typeof eras[i].since) {
case 'string':
// truncate time
date = hooks(eras[i].since).startOf('day');
eras[i].since = date.valueOf();
break;
}
switch (typeof eras[i].until) {
case 'undefined':
eras[i].until = +Infinity;
break;
case 'string':
// truncate time
date = hooks(eras[i].until).startOf('day').valueOf();
eras[i].until = date.valueOf();
break;
}
}
return eras;
}
function localeErasParse(eraName, format, strict) {
var i,
l,
eras = this.eras(),
name,
abbr,
narrow;
eraName = eraName.toUpperCase();
for (i = 0, l = eras.length; i < l; ++i) {
name = eras[i].name.toUpperCase();
abbr = eras[i].abbr.toUpperCase();
narrow = eras[i].narrow.toUpperCase();
if (strict) {
switch (format) {
case 'N':
case 'NN':
case 'NNN':
if (abbr === eraName) {
return eras[i];
}
break;
case 'NNNN':
if (name === eraName) {
return eras[i];
}
break;
case 'NNNNN':
if (narrow === eraName) {
return eras[i];
}
break;
}
} else if ([name, abbr, narrow].indexOf(eraName) >= 0) {
return eras[i];
}
}
}
function localeErasConvertYear(era, year) {
var dir = era.since <= era.until ? +1 : -1;
if (year === undefined) {
return hooks(era.since).year();
} else {
return hooks(era.since).year() + (year - era.offset) * dir;
}
}
function getEraName() {
var i,
l,
val,
eras = this.localeData().eras();
for (i = 0, l = eras.length; i < l; ++i) {
// truncate time
val = this.clone().startOf('day').valueOf();
if (eras[i].since <= val && val <= eras[i].until) {
return eras[i].name;
}
if (eras[i].until <= val && val <= eras[i].since) {
return eras[i].name;
}
}
return '';
}
function getEraNarrow() {
var i,
l,
val,
eras = this.localeData().eras();
for (i = 0, l = eras.length; i < l; ++i) {
// truncate time
val = this.clone().startOf('day').valueOf();
if (eras[i].since <= val && val <= eras[i].until) {
return eras[i].narrow;
}
if (eras[i].until <= val && val <= eras[i].since) {
return eras[i].narrow;
}
}
return '';
}
function getEraAbbr() {
var i,
l,
val,
eras = this.localeData().eras();
for (i = 0, l = eras.length; i < l; ++i) {
// truncate time
val = this.clone().startOf('day').valueOf();
if (eras[i].since <= val && val <= eras[i].until) {
return eras[i].abbr;
}
if (eras[i].until <= val && val <= eras[i].since) {
return eras[i].abbr;
}
}
return '';
}
function getEraYear() {
var i,
l,
dir,
val,
eras = this.localeData().eras();
for (i = 0, l = eras.length; i < l; ++i) {
dir = eras[i].since <= eras[i].until ? +1 : -1;
// truncate time
val = this.clone().startOf('day').valueOf();
if (
(eras[i].since <= val && val <= eras[i].until) ||
(eras[i].until <= val && val <= eras[i].since)
) {
return (
(this.year() - hooks(eras[i].since).year()) * dir +
eras[i].offset
);
}
}
return this.year();
}
function erasNameRegex(isStrict) {
if (!hasOwnProp(this, '_erasNameRegex')) {
computeErasParse.call(this);
}
return isStrict ? this._erasNameRegex : this._erasRegex;
}
function erasAbbrRegex(isStrict) {
if (!hasOwnProp(this, '_erasAbbrRegex')) {
computeErasParse.call(this);
}
return isStrict ? this._erasAbbrRegex : this._erasRegex;
}
function erasNarrowRegex(isStrict) {
if (!hasOwnProp(this, '_erasNarrowRegex')) {
computeErasParse.call(this);
}
return isStrict ? this._erasNarrowRegex : this._erasRegex;
}
function matchEraAbbr(isStrict, locale) {
return locale.erasAbbrRegex(isStrict);
}
function matchEraName(isStrict, locale) {
return locale.erasNameRegex(isStrict);
}
function matchEraNarrow(isStrict, locale) {
return locale.erasNarrowRegex(isStrict);
}
function matchEraYearOrdinal(isStrict, locale) {
return locale._eraYearOrdinalRegex || matchUnsigned;
}
function computeErasParse() {
var abbrPieces = [],
namePieces = [],
narrowPieces = [],
mixedPieces = [],
i,
l,
eras = this.eras();
for (i = 0, l = eras.length; i < l; ++i) {
namePieces.push(regexEscape(eras[i].name));
abbrPieces.push(regexEscape(eras[i].abbr));
narrowPieces.push(regexEscape(eras[i].narrow));
mixedPieces.push(regexEscape(eras[i].name));
mixedPieces.push(regexEscape(eras[i].abbr));
mixedPieces.push(regexEscape(eras[i].narrow));
}
this._erasRegex = new RegExp('^(' + mixedPieces.join('|') + ')', 'i');
this._erasNameRegex = new RegExp('^(' + namePieces.join('|') + ')', 'i');
this._erasAbbrRegex = new RegExp('^(' + abbrPieces.join('|') + ')', 'i');
this._erasNarrowRegex = new RegExp(
'^(' + narrowPieces.join('|') + ')',
'i'
);
}
// FORMATTING
addFormatToken(0, ['gg', 2], 0, function () {
return this.weekYear() % 100;
});
addFormatToken(0, ['GG', 2], 0, function () {
return this.isoWeekYear() % 100;
});
function addWeekYearFormatToken(token, getter) {
addFormatToken(0, [token, token.length], 0, getter);
}
addWeekYearFormatToken('gggg', 'weekYear');
addWeekYearFormatToken('ggggg', 'weekYear');
addWeekYearFormatToken('GGGG', 'isoWeekYear');
addWeekYearFormatToken('GGGGG', 'isoWeekYear');
// ALIASES
addUnitAlias('weekYear', 'gg');
addUnitAlias('isoWeekYear', 'GG');
// PRIORITY
addUnitPriority('weekYear', 1);
addUnitPriority('isoWeekYear', 1);
// PARSING
addRegexToken('G', matchSigned);
addRegexToken('g', matchSigned);
addRegexToken('GG', match1to2, match2);
addRegexToken('gg', match1to2, match2);
addRegexToken('GGGG', match1to4, match4);
addRegexToken('gggg', match1to4, match4);
addRegexToken('GGGGG', match1to6, match6);
addRegexToken('ggggg', match1to6, match6);
addWeekParseToken(
['gggg', 'ggggg', 'GGGG', 'GGGGG'],
function (input, week, config, token) {
week[token.substr(0, 2)] = toInt(input);
}
);
addWeekParseToken(['gg', 'GG'], function (input, week, config, token) {
week[token] = hooks.parseTwoDigitYear(input);
});
// MOMENTS
function getSetWeekYear(input) {
return getSetWeekYearHelper.call(
this,
input,
this.week(),
this.weekday(),
this.localeData()._week.dow,
this.localeData()._week.doy
);
}
function getSetISOWeekYear(input) {
return getSetWeekYearHelper.call(
this,
input,
this.isoWeek(),
this.isoWeekday(),
1,
4
);
}
function getISOWeeksInYear() {
return weeksInYear(this.year(), 1, 4);
}
function getISOWeeksInISOWeekYear() {
return weeksInYear(this.isoWeekYear(), 1, 4);
}
function getWeeksInYear() {
var weekInfo = this.localeData()._week;
return weeksInYear(this.year(), weekInfo.dow, weekInfo.doy);
}
function getWeeksInWeekYear() {
var weekInfo = this.localeData()._week;
return weeksInYear(this.weekYear(), weekInfo.dow, weekInfo.doy);
}
function getSetWeekYearHelper(input, week, weekday, dow, doy) {
var weeksTarget;
if (input == null) {
return weekOfYear(this, dow, doy).year;
} else {
weeksTarget = weeksInYear(input, dow, doy);
if (week > weeksTarget) {
week = weeksTarget;
}
return setWeekAll.call(this, input, week, weekday, dow, doy);
}
}
function setWeekAll(weekYear, week, weekday, dow, doy) {
var dayOfYearData = dayOfYearFromWeeks(weekYear, week, weekday, dow, doy),
date = createUTCDate(dayOfYearData.year, 0, dayOfYearData.dayOfYear);
this.year(date.getUTCFullYear());
this.month(date.getUTCMonth());
this.date(date.getUTCDate());
return this;
}
// FORMATTING
addFormatToken('Q', 0, 'Qo', 'quarter');
// ALIASES
addUnitAlias('quarter', 'Q');
// PRIORITY
addUnitPriority('quarter', 7);
// PARSING
addRegexToken('Q', match1);
addParseToken('Q', function (input, array) {
array[MONTH] = (toInt(input) - 1) * 3;
});
// MOMENTS
function getSetQuarter(input) {
return input == null
? Math.ceil((this.month() + 1) / 3)
: this.month((input - 1) * 3 + (this.month() % 3));
}
// FORMATTING
addFormatToken('D', ['DD', 2], 'Do', 'date');
// ALIASES
addUnitAlias('date', 'D');
// PRIORITY
addUnitPriority('date', 9);
// PARSING
addRegexToken('D', match1to2);
addRegexToken('DD', match1to2, match2);
addRegexToken('Do', function (isStrict, locale) {
// TODO: Remove "ordinalParse" fallback in next major release.
return isStrict
? locale._dayOfMonthOrdinalParse || locale._ordinalParse
: locale._dayOfMonthOrdinalParseLenient;
});
addParseToken(['D', 'DD'], DATE);
addParseToken('Do', function (input, array) {
array[DATE] = toInt(input.match(match1to2)[0]);
});
// MOMENTS
var getSetDayOfMonth = makeGetSet('Date', true);
// FORMATTING
addFormatToken('DDD', ['DDDD', 3], 'DDDo', 'dayOfYear');
// ALIASES
addUnitAlias('dayOfYear', 'DDD');
// PRIORITY
addUnitPriority('dayOfYear', 4);
// PARSING
addRegexToken('DDD', match1to3);
addRegexToken('DDDD', match3);
addParseToken(['DDD', 'DDDD'], function (input, array, config) {
config._dayOfYear = toInt(input);
});
// HELPERS
// MOMENTS
function getSetDayOfYear(input) {
var dayOfYear =
Math.round(
(this.clone().startOf('day') - this.clone().startOf('year')) / 864e5
) + 1;
return input == null ? dayOfYear : this.add(input - dayOfYear, 'd');
}
// FORMATTING
addFormatToken('m', ['mm', 2], 0, 'minute');
// ALIASES
addUnitAlias('minute', 'm');
// PRIORITY
addUnitPriority('minute', 14);
// PARSING
addRegexToken('m', match1to2);
addRegexToken('mm', match1to2, match2);
addParseToken(['m', 'mm'], MINUTE);
// MOMENTS
var getSetMinute = makeGetSet('Minutes', false);
// FORMATTING
addFormatToken('s', ['ss', 2], 0, 'second');
// ALIASES
addUnitAlias('second', 's');
// PRIORITY
addUnitPriority('second', 15);
// PARSING
addRegexToken('s', match1to2);
addRegexToken('ss', match1to2, match2);
addParseToken(['s', 'ss'], SECOND);
// MOMENTS
var getSetSecond = makeGetSet('Seconds', false);
// FORMATTING
addFormatToken('S', 0, 0, function () {
return ~~(this.millisecond() / 100);
});
addFormatToken(0, ['SS', 2], 0, function () {
return ~~(this.millisecond() / 10);
});
addFormatToken(0, ['SSS', 3], 0, 'millisecond');
addFormatToken(0, ['SSSS', 4], 0, function () {
return this.millisecond() * 10;
});
addFormatToken(0, ['SSSSS', 5], 0, function () {
return this.millisecond() * 100;
});
addFormatToken(0, ['SSSSSS', 6], 0, function () {
return this.millisecond() * 1000;
});
addFormatToken(0, ['SSSSSSS', 7], 0, function () {
return this.millisecond() * 10000;
});
addFormatToken(0, ['SSSSSSSS', 8], 0, function () {
return this.millisecond() * 100000;
});
addFormatToken(0, ['SSSSSSSSS', 9], 0, function () {
return this.millisecond() * 1000000;
});
// ALIASES
addUnitAlias('millisecond', 'ms');
// PRIORITY
addUnitPriority('millisecond', 16);
// PARSING
addRegexToken('S', match1to3, match1);
addRegexToken('SS', match1to3, match2);
addRegexToken('SSS', match1to3, match3);
var token, getSetMillisecond;
for (token = 'SSSS'; token.length <= 9; token += 'S') {
addRegexToken(token, matchUnsigned);
}
function parseMs(input, array) {
array[MILLISECOND] = toInt(('0.' + input) * 1000);
}
for (token = 'S'; token.length <= 9; token += 'S') {
addParseToken(token, parseMs);
}
getSetMillisecond = makeGetSet('Milliseconds', false);
// FORMATTING
addFormatToken('z', 0, 0, 'zoneAbbr');
addFormatToken('zz', 0, 0, 'zoneName');
// MOMENTS
function getZoneAbbr() {
return this._isUTC ? 'UTC' : '';
}
function getZoneName() {
return this._isUTC ? 'Coordinated Universal Time' : '';
}
var proto = Moment.prototype;
proto.add = add;
proto.calendar = calendar$1;
proto.clone = clone;
proto.diff = diff;
proto.endOf = endOf;
proto.format = format;
proto.from = from;
proto.fromNow = fromNow;
proto.to = to;
proto.toNow = toNow;
proto.get = stringGet;
proto.invalidAt = invalidAt;
proto.isAfter = isAfter;
proto.isBefore = isBefore;
proto.isBetween = isBetween;
proto.isSame = isSame;
proto.isSameOrAfter = isSameOrAfter;
proto.isSameOrBefore = isSameOrBefore;
proto.isValid = isValid$2;
proto.lang = lang;
proto.locale = locale;
proto.localeData = localeData;
proto.max = prototypeMax;
proto.min = prototypeMin;
proto.parsingFlags = parsingFlags;
proto.set = stringSet;
proto.startOf = startOf;
proto.subtract = subtract;
proto.toArray = toArray;
proto.toObject = toObject;
proto.toDate = toDate;
proto.toISOString = toISOString;
proto.inspect = inspect;
if (typeof Symbol !== 'undefined' && Symbol.for != null) {
proto[Symbol.for('nodejs.util.inspect.custom')] = function () {
return 'Moment<' + this.format() + '>';
};
}
proto.toJSON = toJSON;
proto.toString = toString;
proto.unix = unix;
proto.valueOf = valueOf;
proto.creationData = creationData;
proto.eraName = getEraName;
proto.eraNarrow = getEraNarrow;
proto.eraAbbr = getEraAbbr;
proto.eraYear = getEraYear;
proto.year = getSetYear;
proto.isLeapYear = getIsLeapYear;
proto.weekYear = getSetWeekYear;
proto.isoWeekYear = getSetISOWeekYear;
proto.quarter = proto.quarters = getSetQuarter;
proto.month = getSetMonth;
proto.daysInMonth = getDaysInMonth;
proto.week = proto.weeks = getSetWeek;
proto.isoWeek = proto.isoWeeks = getSetISOWeek;
proto.weeksInYear = getWeeksInYear;
proto.weeksInWeekYear = getWeeksInWeekYear;
proto.isoWeeksInYear = getISOWeeksInYear;
proto.isoWeeksInISOWeekYear = getISOWeeksInISOWeekYear;
proto.date = getSetDayOfMonth;
proto.day = proto.days = getSetDayOfWeek;
proto.weekday = getSetLocaleDayOfWeek;
proto.isoWeekday = getSetISODayOfWeek;
proto.dayOfYear = getSetDayOfYear;
proto.hour = proto.hours = getSetHour;
proto.minute = proto.minutes = getSetMinute;
proto.second = proto.seconds = getSetSecond;
proto.millisecond = proto.milliseconds = getSetMillisecond;
proto.utcOffset = getSetOffset;
proto.utc = setOffsetToUTC;
proto.local = setOffsetToLocal;
proto.parseZone = setOffsetToParsedOffset;
proto.hasAlignedHourOffset = hasAlignedHourOffset;
proto.isDST = isDaylightSavingTime;
proto.isLocal = isLocal;
proto.isUtcOffset = isUtcOffset;
proto.isUtc = isUtc;
proto.isUTC = isUtc;
proto.zoneAbbr = getZoneAbbr;
proto.zoneName = getZoneName;
proto.dates = deprecate(
'dates accessor is deprecated. Use date instead.',
getSetDayOfMonth
);
proto.months = deprecate(
'months accessor is deprecated. Use month instead',
getSetMonth
);
proto.years = deprecate(
'years accessor is deprecated. Use year instead',
getSetYear
);
proto.zone = deprecate(
'moment().zone is deprecated, use moment().utcOffset instead. http://momentjs.com/guides/#/warnings/zone/',
getSetZone
);
proto.isDSTShifted = deprecate(
'isDSTShifted is deprecated. See http://momentjs.com/guides/#/warnings/dst-shifted/ for more information',
isDaylightSavingTimeShifted
);
function createUnix(input) {
return createLocal(input * 1000);
}
function createInZone() {
return createLocal.apply(null, arguments).parseZone();
}
function preParsePostFormat(string) {
return string;
}
var proto$1 = Locale.prototype;
proto$1.calendar = calendar;
proto$1.longDateFormat = longDateFormat;
proto$1.invalidDate = invalidDate;
proto$1.ordinal = ordinal;
proto$1.preparse = preParsePostFormat;
proto$1.postformat = preParsePostFormat;
proto$1.relativeTime = relativeTime;
proto$1.pastFuture = pastFuture;
proto$1.set = set;
proto$1.eras = localeEras;
proto$1.erasParse = localeErasParse;
proto$1.erasConvertYear = localeErasConvertYear;
proto$1.erasAbbrRegex = erasAbbrRegex;
proto$1.erasNameRegex = erasNameRegex;
proto$1.erasNarrowRegex = erasNarrowRegex;
proto$1.months = localeMonths;
proto$1.monthsShort = localeMonthsShort;
proto$1.monthsParse = localeMonthsParse;
proto$1.monthsRegex = monthsRegex;
proto$1.monthsShortRegex = monthsShortRegex;
proto$1.week = localeWeek;
proto$1.firstDayOfYear = localeFirstDayOfYear;
proto$1.firstDayOfWeek = localeFirstDayOfWeek;
proto$1.weekdays = localeWeekdays;
proto$1.weekdaysMin = localeWeekdaysMin;
proto$1.weekdaysShort = localeWeekdaysShort;
proto$1.weekdaysParse = localeWeekdaysParse;
proto$1.weekdaysRegex = weekdaysRegex;
proto$1.weekdaysShortRegex = weekdaysShortRegex;
proto$1.weekdaysMinRegex = weekdaysMinRegex;
proto$1.isPM = localeIsPM;
proto$1.meridiem = localeMeridiem;
function get$1(format, index, field, setter) {
var locale = getLocale(),
utc = createUTC().set(setter, index);
return locale[field](utc, format);
}
function listMonthsImpl(format, index, field) {
if (isNumber(format)) {
index = format;
format = undefined;
}
format = format || '';
if (index != null) {
return get$1(format, index, field, 'month');
}
var i,
out = [];
for (i = 0; i < 12; i++) {
out[i] = get$1(format, i, field, 'month');
}
return out;
}
// ()
// (5)
// (fmt, 5)
// (fmt)
// (true)
// (true, 5)
// (true, fmt, 5)
// (true, fmt)
function listWeekdaysImpl(localeSorted, format, index, field) {
if (typeof localeSorted === 'boolean') {
if (isNumber(format)) {
index = format;
format = undefined;
}
format = format || '';
} else {
format = localeSorted;
index = format;
localeSorted = false;
if (isNumber(format)) {
index = format;
format = undefined;
}
format = format || '';
}
var locale = getLocale(),
shift = localeSorted ? locale._week.dow : 0,
i,
out = [];
if (index != null) {
return get$1(format, (index + shift) % 7, field, 'day');
}
for (i = 0; i < 7; i++) {
out[i] = get$1(format, (i + shift) % 7, field, 'day');
}
return out;
}
function listMonths(format, index) {
return listMonthsImpl(format, index, 'months');
}
function listMonthsShort(format, index) {
return listMonthsImpl(format, index, 'monthsShort');
}
function listWeekdays(localeSorted, format, index) {
return listWeekdaysImpl(localeSorted, format, index, 'weekdays');
}
function listWeekdaysShort(localeSorted, format, index) {
return listWeekdaysImpl(localeSorted, format, index, 'weekdaysShort');
}
function listWeekdaysMin(localeSorted, format, index) {
return listWeekdaysImpl(localeSorted, format, index, 'weekdaysMin');
}
getSetGlobalLocale('en', {
eras: [
{
since: '0001-01-01',
until: +Infinity,
offset: 1,
name: 'Anno Domini',
narrow: 'AD',
abbr: 'AD',
},
{
since: '0000-12-31',
until: -Infinity,
offset: 1,
name: 'Before Christ',
narrow: 'BC',
abbr: 'BC',
},
],
dayOfMonthOrdinalParse: /\d{1,2}(th|st|nd|rd)/,
ordinal: function (number) {
var b = number % 10,
output =
toInt((number % 100) / 10) === 1
? 'th'
: b === 1
? 'st'
: b === 2
? 'nd'
: b === 3
? 'rd'
: 'th';
return number + output;
},
});
// Side effect imports
hooks.lang = deprecate(
'moment.lang is deprecated. Use moment.locale instead.',
getSetGlobalLocale
);
hooks.langData = deprecate(
'moment.langData is deprecated. Use moment.localeData instead.',
getLocale
);
var mathAbs = Math.abs;
function abs() {
var data = this._data;
this._milliseconds = mathAbs(this._milliseconds);
this._days = mathAbs(this._days);
this._months = mathAbs(this._months);
data.milliseconds = mathAbs(data.milliseconds);
data.seconds = mathAbs(data.seconds);
data.minutes = mathAbs(data.minutes);
data.hours = mathAbs(data.hours);
data.months = mathAbs(data.months);
data.years = mathAbs(data.years);
return this;
}
function addSubtract$1(duration, input, value, direction) {
var other = createDuration(input, value);
duration._milliseconds += direction * other._milliseconds;
duration._days += direction * other._days;
duration._months += direction * other._months;
return duration._bubble();
}
// supports only 2.0-style add(1, 's') or add(duration)
function add$1(input, value) {
return addSubtract$1(this, input, value, 1);
}
// supports only 2.0-style subtract(1, 's') or subtract(duration)
function subtract$1(input, value) {
return addSubtract$1(this, input, value, -1);
}
function absCeil(number) {
if (number < 0) {
return Math.floor(number);
} else {
return Math.ceil(number);
}
}
function bubble() {
var milliseconds = this._milliseconds,
days = this._days,
months = this._months,
data = this._data,
seconds,
minutes,
hours,
years,
monthsFromDays;
// if we have a mix of positive and negative values, bubble down first
// check: https://github.com/moment/moment/issues/2166
if (
!(
(milliseconds >= 0 && days >= 0 && months >= 0) ||
(milliseconds <= 0 && days <= 0 && months <= 0)
)
) {
milliseconds += absCeil(monthsToDays(months) + days) * 864e5;
days = 0;
months = 0;
}
// The following code bubbles up values, see the tests for
// examples of what that means.
data.milliseconds = milliseconds % 1000;
seconds = absFloor(milliseconds / 1000);
data.seconds = seconds % 60;
minutes = absFloor(seconds / 60);
data.minutes = minutes % 60;
hours = absFloor(minutes / 60);
data.hours = hours % 24;
days += absFloor(hours / 24);
// convert days to months
monthsFromDays = absFloor(daysToMonths(days));
months += monthsFromDays;
days -= absCeil(monthsToDays(monthsFromDays));
// 12 months -> 1 year
years = absFloor(months / 12);
months %= 12;
data.days = days;
data.months = months;
data.years = years;
return this;
}
function daysToMonths(days) {
// 400 years have 146097 days (taking into account leap year rules)
// 400 years have 12 months === 4800
return (days * 4800) / 146097;
}
function monthsToDays(months) {
// the reverse of daysToMonths
return (months * 146097) / 4800;
}
function as(units) {
if (!this.isValid()) {
return NaN;
}
var days,
months,
milliseconds = this._milliseconds;
units = normalizeUnits(units);
if (units === 'month' || units === 'quarter' || units === 'year') {
days = this._days + milliseconds / 864e5;
months = this._months + daysToMonths(days);
switch (units) {
case 'month':
return months;
case 'quarter':
return months / 3;
case 'year':
return months / 12;
}
} else {
// handle milliseconds separately because of floating point math errors (issue #1867)
days = this._days + Math.round(monthsToDays(this._months));
switch (units) {
case 'week':
return days / 7 + milliseconds / 6048e5;
case 'day':
return days + milliseconds / 864e5;
case 'hour':
return days * 24 + milliseconds / 36e5;
case 'minute':
return days * 1440 + milliseconds / 6e4;
case 'second':
return days * 86400 + milliseconds / 1000;
// Math.floor prevents floating point math errors here
case 'millisecond':
return Math.floor(days * 864e5) + milliseconds;
default:
throw new Error('Unknown unit ' + units);
}
}
}
// TODO: Use this.as('ms')?
function valueOf$1() {
if (!this.isValid()) {
return NaN;
}
return (
this._milliseconds +
this._days * 864e5 +
(this._months % 12) * 2592e6 +
toInt(this._months / 12) * 31536e6
);
}
function makeAs(alias) {
return function () {
return this.as(alias);
};
}
var asMilliseconds = makeAs('ms'),
asSeconds = makeAs('s'),
asMinutes = makeAs('m'),
asHours = makeAs('h'),
asDays = makeAs('d'),
asWeeks = makeAs('w'),
asMonths = makeAs('M'),
asQuarters = makeAs('Q'),
asYears = makeAs('y');
function clone$1() {
return createDuration(this);
}
function get$2(units) {
units = normalizeUnits(units);
return this.isValid() ? this[units + 's']() : NaN;
}
function makeGetter(name) {
return function () {
return this.isValid() ? this._data[name] : NaN;
};
}
var milliseconds = makeGetter('milliseconds'),
seconds = makeGetter('seconds'),
minutes = makeGetter('minutes'),
hours = makeGetter('hours'),
days = makeGetter('days'),
months = makeGetter('months'),
years = makeGetter('years');
function weeks() {
return absFloor(this.days() / 7);
}
var round = Math.round,
thresholds = {
ss: 44, // a few seconds to seconds
s: 45, // seconds to minute
m: 45, // minutes to hour
h: 22, // hours to day
d: 26, // days to month/week
w: null, // weeks to month
M: 11, // months to year
};
// helper function for moment.fn.from, moment.fn.fromNow, and moment.duration.fn.humanize
function substituteTimeAgo(string, number, withoutSuffix, isFuture, locale) {
return locale.relativeTime(number || 1, !!withoutSuffix, string, isFuture);
}
function relativeTime$1(posNegDuration, withoutSuffix, thresholds, locale) {
var duration = createDuration(posNegDuration).abs(),
seconds = round(duration.as('s')),
minutes = round(duration.as('m')),
hours = round(duration.as('h')),
days = round(duration.as('d')),
months = round(duration.as('M')),
weeks = round(duration.as('w')),
years = round(duration.as('y')),
a =
(seconds <= thresholds.ss && ['s', seconds]) ||
(seconds < thresholds.s && ['ss', seconds]) ||
(minutes <= 1 && ['m']) ||
(minutes < thresholds.m && ['mm', minutes]) ||
(hours <= 1 && ['h']) ||
(hours < thresholds.h && ['hh', hours]) ||
(days <= 1 && ['d']) ||
(days < thresholds.d && ['dd', days]);
if (thresholds.w != null) {
a =
a ||
(weeks <= 1 && ['w']) ||
(weeks < thresholds.w && ['ww', weeks]);
}
a = a ||
(months <= 1 && ['M']) ||
(months < thresholds.M && ['MM', months]) ||
(years <= 1 && ['y']) || ['yy', years];
a[2] = withoutSuffix;
a[3] = +posNegDuration > 0;
a[4] = locale;
return substituteTimeAgo.apply(null, a);
}
// This function allows you to set the rounding function for relative time strings
function getSetRelativeTimeRounding(roundingFunction) {
if (roundingFunction === undefined) {
return round;
}
if (typeof roundingFunction === 'function') {
round = roundingFunction;
return true;
}
return false;
}
// This function allows you to set a threshold for relative time strings
function getSetRelativeTimeThreshold(threshold, limit) {
if (thresholds[threshold] === undefined) {
return false;
}
if (limit === undefined) {
return thresholds[threshold];
}
thresholds[threshold] = limit;
if (threshold === 's') {
thresholds.ss = limit - 1;
}
return true;
}
function humanize(argWithSuffix, argThresholds) {
if (!this.isValid()) {
return this.localeData().invalidDate();
}
var withSuffix = false,
th = thresholds,
locale,
output;
if (typeof argWithSuffix === 'object') {
argThresholds = argWithSuffix;
argWithSuffix = false;
}
if (typeof argWithSuffix === 'boolean') {
withSuffix = argWithSuffix;
}
if (typeof argThresholds === 'object') {
th = Object.assign({}, thresholds, argThresholds);
if (argThresholds.s != null && argThresholds.ss == null) {
th.ss = argThresholds.s - 1;
}
}
locale = this.localeData();
output = relativeTime$1(this, !withSuffix, th, locale);
if (withSuffix) {
output = locale.pastFuture(+this, output);
}
return locale.postformat(output);
}
var abs$1 = Math.abs;
function sign(x) {
return (x > 0) - (x < 0) || +x;
}
function toISOString$1() {
// for ISO strings we do not use the normal bubbling rules:
// * milliseconds bubble up until they become hours
// * days do not bubble at all
// * months bubble up until they become years
// This is because there is no context-free conversion between hours and days
// (think of clock changes)
// and also not between days and months (28-31 days per month)
if (!this.isValid()) {
return this.localeData().invalidDate();
}
var seconds = abs$1(this._milliseconds) / 1000,
days = abs$1(this._days),
months = abs$1(this._months),
minutes,
hours,
years,
s,
total = this.asSeconds(),
totalSign,
ymSign,
daysSign,
hmsSign;
if (!total) {
// this is the same as C#'s (Noda) and python (isodate)...
// but not other JS (goog.date)
return 'P0D';
}
// 3600 seconds -> 60 minutes -> 1 hour
minutes = absFloor(seconds / 60);
hours = absFloor(minutes / 60);
seconds %= 60;
minutes %= 60;
// 12 months -> 1 year
years = absFloor(months / 12);
months %= 12;
// inspired by https://github.com/dordille/moment-isoduration/blob/master/moment.isoduration.js
s = seconds ? seconds.toFixed(3).replace(/\.?0+$/, '') : '';
totalSign = total < 0 ? '-' : '';
ymSign = sign(this._months) !== sign(total) ? '-' : '';
daysSign = sign(this._days) !== sign(total) ? '-' : '';
hmsSign = sign(this._milliseconds) !== sign(total) ? '-' : '';
return (
totalSign +
'P' +
(years ? ymSign + years + 'Y' : '') +
(months ? ymSign + months + 'M' : '') +
(days ? daysSign + days + 'D' : '') +
(hours || minutes || seconds ? 'T' : '') +
(hours ? hmsSign + hours + 'H' : '') +
(minutes ? hmsSign + minutes + 'M' : '') +
(seconds ? hmsSign + s + 'S' : '')
);
}
var proto$2 = Duration.prototype;
proto$2.isValid = isValid$1;
proto$2.abs = abs;
proto$2.add = add$1;
proto$2.subtract = subtract$1;
proto$2.as = as;
proto$2.asMilliseconds = asMilliseconds;
proto$2.asSeconds = asSeconds;
proto$2.asMinutes = asMinutes;
proto$2.asHours = asHours;
proto$2.asDays = asDays;
proto$2.asWeeks = asWeeks;
proto$2.asMonths = asMonths;
proto$2.asQuarters = asQuarters;
proto$2.asYears = asYears;
proto$2.valueOf = valueOf$1;
proto$2._bubble = bubble;
proto$2.clone = clone$1;
proto$2.get = get$2;
proto$2.milliseconds = milliseconds;
proto$2.seconds = seconds;
proto$2.minutes = minutes;
proto$2.hours = hours;
proto$2.days = days;
proto$2.weeks = weeks;
proto$2.months = months;
proto$2.years = years;
proto$2.humanize = humanize;
proto$2.toISOString = toISOString$1;
proto$2.toString = toISOString$1;
proto$2.toJSON = toISOString$1;
proto$2.locale = locale;
proto$2.localeData = localeData;
proto$2.toIsoString = deprecate(
'toIsoString() is deprecated. Please use toISOString() instead (notice the capitals)',
toISOString$1
);
proto$2.lang = lang;
// FORMATTING
addFormatToken('X', 0, 0, 'unix');
addFormatToken('x', 0, 0, 'valueOf');
// PARSING
addRegexToken('x', matchSigned);
addRegexToken('X', matchTimestamp);
addParseToken('X', function (input, array, config) {
config._d = new Date(parseFloat(input) * 1000);
});
addParseToken('x', function (input, array, config) {
config._d = new Date(toInt(input));
});
//! moment.js
hooks.version = '2.29.4';
setHookCallback(createLocal);
hooks.fn = proto;
hooks.min = min;
hooks.max = max;
hooks.now = now;
hooks.utc = createUTC;
hooks.unix = createUnix;
hooks.months = listMonths;
hooks.isDate = isDate;
hooks.locale = getSetGlobalLocale;
hooks.invalid = createInvalid;
hooks.duration = createDuration;
hooks.isMoment = isMoment;
hooks.weekdays = listWeekdays;
hooks.parseZone = createInZone;
hooks.localeData = getLocale;
hooks.isDuration = isDuration;
hooks.monthsShort = listMonthsShort;
hooks.weekdaysMin = listWeekdaysMin;
hooks.defineLocale = defineLocale;
hooks.updateLocale = updateLocale;
hooks.locales = listLocales;
hooks.weekdaysShort = listWeekdaysShort;
hooks.normalizeUnits = normalizeUnits;
hooks.relativeTimeRounding = getSetRelativeTimeRounding;
hooks.relativeTimeThreshold = getSetRelativeTimeThreshold;
hooks.calendarFormat = getCalendarFormat;
hooks.prototype = proto;
// currently HTML5 input type only supports 24-hour formats
hooks.HTML5_FMT = {
DATETIME_LOCAL: 'YYYY-MM-DDTHH:mm', // <input type="datetime-local" />
DATETIME_LOCAL_SECONDS: 'YYYY-MM-DDTHH:mm:ss', // <input type="datetime-local" step="1" />
DATETIME_LOCAL_MS: 'YYYY-MM-DDTHH:mm:ss.SSS', // <input type="datetime-local" step="0.001" />
DATE: 'YYYY-MM-DD', // <input type="date" />
TIME: 'HH:mm', // <input type="time" />
TIME_SECONDS: 'HH:mm:ss', // <input type="time" step="1" />
TIME_MS: 'HH:mm:ss.SSS', // <input type="time" step="0.001" />
WEEK: 'GGGG-[W]WW', // <input type="week" />
MONTH: 'YYYY-MM', // <input type="month" />
};
export default hooks;
|
PypiClean
|
/redash_stmo-2020.5.1-py3-none-any.whl/redash_stmo/query_runner/presto.py
|
import collections
import logging
from pyhive import presto
from redash.query_runner import register
from redash.query_runner.presto import Presto
logger = logging.getLogger(__name__)
class STMOPresto(Presto):
"""
A custom Presto query runner. Currently empty.
"""
@classmethod
def type(cls):
"""Overrides the name to match the name of the parent query runner"""
return "presto"
class STMOConnection(presto.Connection):
"""
A custom Presto connection that uses the custom Presto cursor
as the default cursor.
"""
def cursor(self):
return STMOPrestoCursor(*self._args, **self._kwargs)
class STMOPrestoCursor(presto.Cursor):
"""
A custom Presto cursor that processes the data after it has been
handled by the parent cursor to apply various transformations.
"""
def _process_response(self, response):
super(STMOPrestoCursor, self)._process_response(response)
self._data = self._process_data()
def _process_data(self):
processed_data = collections.deque()
for row in self._data: # the top-level is an iterable of records (i.e. rows)
item = []
for column, row in zip(self._columns, row):
item.append(self._format_data(column["typeSignature"], row))
processed_data.append(item)
return processed_data
def _format_data(self, column, data):
"""Given a Presto column and its data, return a more human-readable
format of its data for some data types."""
type = column["rawType"]
try:
iter(data) # check if the data is iterable
except TypeError:
return data # non-iterables can simply be directly shown
# records should have their fields associated with types
# but keep the tuple format for backward-compatibility
if type == "row":
keys = column["literalArguments"]
values = [
self._format_data(c, d) for c, d in zip(column["typeArguments"], data)
]
return tuple(zip(keys, values))
# arrays should have their element types associated with each element
elif type == "array":
rep = [column["typeArguments"][0]] * len(data)
return [self._format_data(c, d) for c, d in zip(rep, data)]
# maps should have their value types associated with each value
# (note that keys are always strings), but keep the tuple format
# for backward-compatibility
elif type == "map":
value_type = column["typeArguments"][1]
return [
(k, self._format_data(value_type, v)) for k, v in data.items()
]
else:
# unknown type, don't process it
return data
def stmo_connect(*args, **kwargs):
"""
A custom connect function to be used to override the
default pyhive.presto.connect function.
"""
return STMOConnection(*args, **kwargs)
def extension(app):
logger.info("Loading Redash Extension for the custom Presto query runner")
# Monkeypatch the pyhive.presto.connect function
presto.connect = stmo_connect
# and register our own STMOPresto query runner class
# which automatically overwrites the default presto query runner
register(STMOPresto)
logger.info("Loaded Redash Extension for the custom Presto query runner")
return stmo_connect
|
PypiClean
|
/django-urlmagic-0.4.6.tar.gz/django-urlmagic-0.4.6/urlmagic/core.py
|
from abc import ABCMeta
from django.conf import settings
from django.conf.urls import url
from django.contrib.auth.decorators import permission_required
from django.db.models.fields import SlugField
from urlmagic.views import core
from urlmagic.utils import model_names
class UrlGenerator(object):
__metaclass__ = ABCMeta
default_views = {
"add": core.ContextCreateView,
"delete": core.ContextDeleteView,
"detail": core.ContextDetailView,
"edit": core.ContextUpdateView,
"list": core.ContextListView,
}
@classmethod
def get_model_key(cls, model):
for field in model._meta.fields:
if field.db_index and field._unique and isinstance(field, SlugField):
return field.name
return "pk"
@classmethod
def adjust_result(cls, r):
return r
@classmethod
def adjust_dict(cls, model, d):
return d
@classmethod
def list(cls, model, **kwargs):
return cls.adjust_result(cls.list_inner(model, **cls.adjust_dict(model, kwargs)))
@classmethod
def add(cls, model, **kwargs):
return cls.adjust_result(cls.add_inner(model, **cls.adjust_dict(model, kwargs)))
@classmethod
def detail(cls, model, **kwargs):
return cls.adjust_result(cls.detail_inner(model, **cls.adjust_dict(model, kwargs)))
@classmethod
def edit(cls, model, **kwargs):
return cls.adjust_result(cls.edit_inner(model, **cls.adjust_dict(model, kwargs)))
@classmethod
def delete(cls, model, **kwargs):
return cls.adjust_result(cls.delete_inner(model, **cls.adjust_dict(model, kwargs)))
@classmethod
def list_inner(
cls,
model=None,
queryset=None,
name_format="{role}_{model_system}_list",
permission_format=False,
template_format="{role}/{model_system}_list.html",
url_format="^{model_plural_short}/$",
view=None,
view_kwargs=None,
format_kwargs=None,
):
format_kwargs = format_kwargs or {}
format_kwargs.update(model_names(model))
view_kwargs = view_kwargs or {}
view_kwargs.setdefault("extra_context", {})
view_kwargs["extra_context"].update(format_kwargs)
view_kwargs.setdefault("model", model)
view_kwargs.setdefault("queryset", queryset)
view_kwargs.setdefault("paginate_by", getattr(settings, "PAGINATE_PER_PAGE", 50))
view_kwargs.setdefault("template_name", template_format.format(**format_kwargs))
response = url(
url_format.format(**format_kwargs),
(view or cls.default_views.get("list", core.ContextListView)).as_view(**view_kwargs),
name=name_format.format(**format_kwargs)
)
if permission_format:
if permission_format is True:
permission_format = "{model_module}.change_{model_system}"
response._callback = permission_required(permission_format.format(**format_kwargs))(response._callback)
return response
@classmethod
def add_inner(
cls,
model=None,
queryset=None,
form_class=None,
name_format="{role}_{model_system}_add",
permission_format=False,
template_format="{role}/{model_system}_add.html",
url_format="^{model_plural_short}/add/$",
view=None,
view_kwargs=None,
format_kwargs=None,
):
format_kwargs = format_kwargs or {}
format_kwargs.update(model_names(model))
view_kwargs = view_kwargs or {}
view_kwargs.setdefault("extra_context", {})
view_kwargs["extra_context"].update(format_kwargs)
view_kwargs.setdefault("form_class", form_class)
view_kwargs.setdefault("model", model)
view_kwargs.setdefault("queryset", queryset)
view_kwargs.setdefault("template_name", template_format.format(**format_kwargs))
response = url(
url_format.format(**format_kwargs),
(view or cls.default_views.get("add", core.ContextCreateView)).as_view(**view_kwargs),
name=name_format.format(**format_kwargs)
)
if permission_format:
if permission_format is True:
permission_format = "{model_module}.add_{model_system}"
response._callback = permission_required(permission_format.format(**format_kwargs))(response._callback)
return response
@classmethod
def detail_inner(
cls,
model=None,
queryset=None,
name_format="{role}_{model_system}_detail",
permission_format=False,
template_format="{role}/{model_system}_detail.html",
model_key=None,
url_format="^{model_plural_short}/(?P<{model_key}>[^/]+)/$",
view=None,
view_kwargs=None,
format_kwargs=None,
):
format_kwargs = format_kwargs or {}
format_kwargs.setdefault("model_key", model_key or cls.get_model_key(model))
format_kwargs.update(model_names(model))
view_kwargs = view_kwargs or {}
view_kwargs.setdefault("extra_context", {})
view_kwargs["extra_context"].update(format_kwargs)
view_kwargs.setdefault("model", model)
view_kwargs.setdefault("queryset", queryset)
view_kwargs.setdefault("template_name", template_format.format(**format_kwargs))
response = url(
url_format.format(**format_kwargs),
(view or cls.default_views.get("detail", core.ContextDetailView)).as_view(**view_kwargs),
name=name_format.format(**format_kwargs)
)
if permission_format:
if permission_format is True:
permission_format = "{model_module}.change_{model_system}"
response._callback = permission_required(permission_format.format(**format_kwargs))(response._callback)
return response
@classmethod
def edit_inner(
cls,
model=None,
queryset=None,
form_class=None,
name_format="{role}_{model_system}_edit",
permission_format=False,
template_format="{role}/{model_system}_edit.html",
model_key=None,
url_format="^{model_plural_short}/(?P<{model_key}>[^/]+)/edit/$",
view=None,
view_kwargs=None,
format_kwargs=None,
):
format_kwargs = format_kwargs or {}
format_kwargs.setdefault("model_key", model_key or cls.get_model_key(model))
format_kwargs.update(model_names(model))
view_kwargs = view_kwargs or {}
view_kwargs.setdefault("extra_context", {})
view_kwargs["extra_context"].update(format_kwargs)
view_kwargs.setdefault("form_class", form_class)
view_kwargs.setdefault("model", model)
view_kwargs.setdefault("queryset", queryset)
view_kwargs.setdefault("template_name", template_format.format(**format_kwargs))
response = url(
url_format.format(**format_kwargs),
(view or cls.default_views.get("edit", core.ContextUpdateView)).as_view(**view_kwargs),
name=name_format.format(**format_kwargs)
)
if permission_format:
if permission_format is True:
permission_format = "{model_module}.change_{model_system}"
response._callback = permission_required(permission_format.format(**format_kwargs))(response._callback)
return response
@classmethod
def delete_inner(
cls,
model=None,
queryset=None,
name_format="{role}_{model_system}_delete",
permission_format=False,
template_format="{role}/{model_system}_delete.html",
model_key=None,
url_format="^{model_plural_short}/(?P<{model_key}>[^/]+)/delete/$",
view=None,
view_kwargs=None,
format_kwargs=None,
):
format_kwargs = format_kwargs or {}
format_kwargs.setdefault("model_key", model_key or cls.get_model_key(model))
format_kwargs.update(model_names(model))
view_kwargs = view_kwargs or {}
view_kwargs.setdefault("extra_context", {})
view_kwargs["extra_context"].update(format_kwargs)
view_kwargs.setdefault("success_url", "../..")
view_kwargs.setdefault("model", model)
view_kwargs.setdefault("queryset", queryset)
view_kwargs.setdefault("template_name", template_format.format(**format_kwargs))
response = url(
url_format.format(**format_kwargs),
(view or cls.default_views.get("delete", core.ContextDeleteView)).as_view(**view_kwargs),
name=name_format.format(**format_kwargs)
)
if permission_format:
if permission_format is True:
permission_format = "{model_module}.delete_{model_system}"
response._callback = permission_required(permission_format.format(**format_kwargs))(response._callback)
return response
@classmethod
def singular_add(cls, model, **kwargs):
kwargs.setdefault("name_format", "{role}_{model_system}_add")
kwargs.setdefault("url_format", "^{model_singular_short}/add/$")
return cls.add(model, **kwargs)
@classmethod
def singular_edit(cls, model, **kwargs):
kwargs.setdefault("name_format", "{role}_{model_system}_edit")
kwargs.setdefault("url_format", "^{model_singular_short}/edit/$")
kwargs.setdefault("view_kwargs", {})
kwargs["view_kwargs"].setdefault("success_url", ".")
return cls.edit(model, **kwargs)
@classmethod
def singular_detail(cls, model, **kwargs):
kwargs.setdefault("name_format", "{role}_{model_system}_detail")
kwargs.setdefault("url_format", "^{model_singular_short}/$")
return cls.detail(model, **kwargs)
@classmethod
def singular_delete(cls, model, **kwargs):
kwargs.setdefault("name_format", "{role}_{model_system}_delete")
kwargs.setdefault("url_format", "^{model_singular_short}/delete/$")
return cls.delete(model, **kwargs)
|
PypiClean
|
/pmagpy-4.2.114.tar.gz/pmagpy-4.2.114/help_files/demag_gui_help.py
|
zij_help = """Zijderveld plot of current specimen measurement data and fits
plot interactions:
- click and drag to
zoom or pan
(default=zoom)
- right click to toggle
zoom or pan
- middle click to home
- double click 2 measurements
to set them as bounds
fit symbols:
- diamond = selected
fit
- small diamond =
bad fit
- circle = good fit"""
spec_eqarea_help = """Specimen Eqarea plot shows measurement data in white and all fits
plot interactions:
- click and drag to
zoom or pan
(default=zoom)
- right click to toggle
zoom or pan
- middle click to home
- double click to view
selected fit
fit symbols:
- diamond = selected
fit
- small diamond =
bad fit
- circle = good fit"""
MM0_help = """M/M0 plot for data with the bounds of fits
plot interactions:
- click and drag to
zoom or pan
(default=zoom)
- right click to toggle
zoom or pan
- middle click to home
fit symbols:
- diamond = selected
fit
- small diamond =
bad fit
- circle = good fit"""
high_level_eqarea_help = """High Level Mean Eqarea plot
plot interactions:
- click and drag to
zoom or pan
(default=zoom)
- right click to toggle
zoom or pan
- middle click to home
- double click to view
selected fit
fit symbols and colors:
- diamond = selected
fit
- small diamond =
bad fit
- circle = good fit
- black = fisher mean
of displayed data
check sample orient symbols:
- triangle = wrong
drill direction
- delta = wrong
compass direction
- dotted plane = rotated
sample direction
during measurement
(lighter points are
lower hemisphere)"""
logger_help = """ List of all measurement entries for current specimen
column labels:
- i: index
- Step: type of step
N = NRM
T = Thermal
AF = Alternating
Field
- Dec: Declination
- Inc: Inclination
- M: Magnetic Moment
colored entries:
- blue: measurements
that are part of
current fit
- red: bad measurement
- dark blue: highlighted
(grey on mac)
interaction:
- right click to toggle
measurement bad
- double click two
measurements to set
new bounds of current
fit"""
specimens_box_help = """Displays current specimen and has dropdown of all specimens for which there is valid measurement data. You can also enter another specimen name into the box and when you hit enter the GUI will try to switch to that specimen if it exists."""
nextbutton_help = """Switches current specimen to next specimen. Hotkey: ctrl-left"""
prevbutton_help = """Switches current specimen to previous specimen. Hotkey: ctrl-right"""
coordinates_box_help = """Shows current coordinate system and has a dropdown menu of all coordinate systems for which there are specimens with valid measurement data.
Hotkeys:
- specimen: ctrl-P
- geographic: ctrl-g
- tilt-corrected: ctrl-t"""
orthogonal_box_help = """Zijderveld orientation options"""
fit_box_help = """Displays current fit name and has dropdown of all fits for the current specimen. Fits can be renamed here by typing new name into the box and hitting enter.
Hotkeys:
- ctrl-up: previous fit
- ctrl-down: next fit"""
add_fit_button_help = """Adds a new fit to the current specimen. Hotkey: ctrl-n"""
tmin_box_help = """Shows lower bound of current fit and has dropdown menu of all measurement steps."""
tmax_box_help = """Shows upper bound of current fit and has dropdown menu of all measurement steps."""
save_fit_btn_help = """Saves current interpretations to demag_gui.redo file so they can be reloaded in another session. Hotkey: ctrl-s"""
delete_fit_btn_help = """Deletes current fit and reverts your current fit to the previous fit. Hotkey: ctrl-D"""
PCA_type_help = """Shows the current fit's regression or mean type."""
plane_display_help = """How to display plane fits on eqarea plots:
bfv = Best Fit Vector
wp = Whole plane"""
level_box_help = """Shows the current level at which interpretations will be displayed on the high level mean eqarea plot in the far right of the GUI."""
level_names_help = """Shows the available samples, sites, locations, or studies which can be displayed on the high level mean eqarea plot"""
mean_type_help = """Type of mean to preform on all interpretations currently plotted on the high level mean eqarea plot"""
mean_fit_help = """Which interpretations to display in the high level mean plot. If 'All' then all interpretations are displayed reguardless of name, else only interpretations of name == value will be displayed"""
warning_help = """Message box to display any relevent problems with the current specimen and it's interpretations to the user."""
switch_stats_btn_help = """These buttons allow you to cycle through all current high level mean statistics in the case where you are doing a fisher by polarity mean."""
|
PypiClean
|
/msgraph-sdk-1.0.0a3.tar.gz/msgraph-sdk-1.0.0a3/msgraph/generated/groups/item/sites/item/onenote/notebooks/item/sections/item/pages/item/parent_notebook/parent_notebook_request_builder.py
|
from __future__ import annotations
from dataclasses import dataclass
from kiota_abstractions.get_path_parameters import get_path_parameters
from kiota_abstractions.method import Method
from kiota_abstractions.request_adapter import RequestAdapter
from kiota_abstractions.request_information import RequestInformation
from kiota_abstractions.request_option import RequestOption
from kiota_abstractions.response_handler import ResponseHandler
from kiota_abstractions.serialization import Parsable, ParsableFactory
from typing import Any, Callable, Dict, List, Optional, Union
from .............models import notebook
from .............models.o_data_errors import o_data_error
class ParentNotebookRequestBuilder():
"""
Provides operations to manage the parentNotebook property of the microsoft.graph.onenotePage entity.
"""
def __init__(self,request_adapter: RequestAdapter, path_parameters: Optional[Union[Dict[str, Any], str]] = None) -> None:
"""
Instantiates a new ParentNotebookRequestBuilder and sets the default values.
Args:
pathParameters: The raw url or the Url template parameters for the request.
requestAdapter: The request adapter to use to execute the requests.
"""
if path_parameters is None:
raise Exception("path_parameters cannot be undefined")
if request_adapter is None:
raise Exception("request_adapter cannot be undefined")
# Url template to use to build the URL for the current request builder
self.url_template: str = "{+baseurl}/groups/{group%2Did}/sites/{site%2Did}/onenote/notebooks/{notebook%2Did}/sections/{onenoteSection%2Did}/pages/{onenotePage%2Did}/parentNotebook{?%24select,%24expand}"
url_tpl_params = get_path_parameters(path_parameters)
self.path_parameters = url_tpl_params
self.request_adapter = request_adapter
def create_get_request_information(self,request_configuration: Optional[ParentNotebookRequestBuilderGetRequestConfiguration] = None) -> RequestInformation:
"""
The notebook that contains the page. Read-only.
Args:
requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options.
Returns: RequestInformation
"""
request_info = RequestInformation()
request_info.url_template = self.url_template
request_info.path_parameters = self.path_parameters
request_info.http_method = Method.GET
request_info.headers["Accept"] = "application/json"
if request_configuration:
request_info.add_request_headers(request_configuration.headers)
request_info.set_query_string_parameters_from_raw_object(request_configuration.query_parameters)
request_info.add_request_options(request_configuration.options)
return request_info
async def get(self,request_configuration: Optional[ParentNotebookRequestBuilderGetRequestConfiguration] = None, response_handler: Optional[ResponseHandler] = None) -> Optional[notebook.Notebook]:
"""
The notebook that contains the page. Read-only.
Args:
requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options.
responseHandler: Response handler to use in place of the default response handling provided by the core service
Returns: Optional[notebook.Notebook]
"""
request_info = self.create_get_request_information(
request_configuration
)
error_mapping: Dict[str, ParsableFactory] = {
"4XX": o_data_error.ODataError,
"5XX": o_data_error.ODataError,
}
if not self.request_adapter:
raise Exception("Http core is null")
return await self.request_adapter.send_async(request_info, notebook.Notebook, response_handler, error_mapping)
@dataclass
class ParentNotebookRequestBuilderGetQueryParameters():
"""
The notebook that contains the page. Read-only.
"""
# Expand related entities
expand: Optional[List[str]] = None
# Select properties to be returned
select: Optional[List[str]] = None
def get_query_parameter(self,original_name: Optional[str] = None) -> str:
"""
Maps the query parameters names to their encoded names for the URI template parsing.
Args:
originalName: The original query parameter name in the class.
Returns: str
"""
if original_name is None:
raise Exception("original_name cannot be undefined")
if original_name == "expand":
return "%24expand"
if original_name == "select":
return "%24select"
return original_name
@dataclass
class ParentNotebookRequestBuilderGetRequestConfiguration():
"""
Configuration for the request such as headers, query parameters, and middleware options.
"""
# Request headers
headers: Optional[Dict[str, str]] = None
# Request options
options: Optional[List[RequestOption]] = None
# Request query parameters
query_parameters: Optional[ParentNotebookRequestBuilder.ParentNotebookRequestBuilderGetQueryParameters] = None
|
PypiClean
|
/idem-vault-2.0.0.tar.gz/idem-vault-2.0.0/idem_vault/states/vault/secrets/kv_v2/secret.py
|
import copy
from typing import Dict
import dict_tools.data
__contracts__ = ["resource"]
async def present(
hub,
ctx,
name: str,
*,
resource_id: (str, "alias=path"),
data: Dict,
disable_read: bool = False,
) -> Dict:
"""
Creates or update a secret stored with Vault KV_v2 secret engine.
Args:
name(string): An Idem name of the resource.
path(string): The full logical path to write the data. This should be prefixed 'with secret/'.
data(string, optional): Data to be written in the format of a JSON object.
disable_read(bool, optional): Set this field to True if the vault authentication does not have read access.
However, if the value is True, this Idem state operation is not idempotent, and Idem state comment output
will always assume it is a "create" operation. Defaults to False.
Request Syntax:
[vault-secret-name]:
vault.secrets.kv_v2.secret.present:
- resource_Id: 'string' # Can also be specified as "path"
- data: 'string'
- disable_read: 'boolean'
Returns:
Dict[str, Any]
Examples:
.. code-block:: sls
my-secret:
vault.secrets.kv_v2.secret.present:
- resource_id: secret/test # Can also be specified as "path"
- data: '{"my-birthday": "2012-10-17"}'
"""
result = {
"name": name,
"result": True,
"old_state": None,
"new_state": None,
"comment": (),
}
# data is converted to SafeNamespaceDict to avoid it being converted to string and printed to console.
data = dict_tools.data.SafeNamespaceDict(data)
if not disable_read:
read_ret = await hub.exec.vault.secrets.kv_v2.secret.get(ctx, path=resource_id)
if not read_ret["result"]:
if "InvalidPath" not in str(read_ret["comment"]):
result["result"] = False
result["comment"] = read_ret["comment"]
return result
else:
result["old_state"] = {
"name": name,
"path": resource_id,
"data": dict_tools.data.SafeNamespaceDict(read_ret["ret"]["data"]),
}
else:
hub.log.debug(f"vault.secrets.kv_v2.secret '{name}' read has been disabled.")
result["comment"] = (
f"vault.secrets.kv_v2.secret '{name}' read has been disabled.",
)
if (result["old_state"] is not None) and result["old_state"]["data"] == data:
result["comment"] = result["comment"] + (
f"vault.secrets.kv_v2.secret '{name}' has no property need to be updated.",
)
result["new_state"] = copy.deepcopy(result["old_state"])
return result
elif result["old_state"] is None:
if ctx.get("test", False):
result["comment"] = (f"Would create vault.secrets.kv_v2.secret '{name}'.",)
result["new_state"] = {"name": name, "path": resource_id, "data": data}
return result
else:
if ctx.get("test", False):
result["comment"] = (f"Would update vault.secrets.kv_v2.secret '{name}'.",)
result["new_state"] = {"name": name, "path": resource_id, "data": data}
return result
write_ret = await hub.exec.hvac.client.secrets.kv.v2.create_or_update_secret(
ctx, path=resource_id, secret=data
)
if not write_ret["result"]:
result["result"] = False
result["comment"] = write_ret["comment"]
return result
result["new_state"] = {"name": name, "path": resource_id, "data": data}
if result["old_state"] is None:
result["comment"] = (f"Created vault.secrets.kv_v2.secret '{name}'.",)
else:
result["comment"] = (f"Updated vault.secrets.kv_v2.secret '{name}'.",)
return result
async def absent(
hub,
ctx,
name: str,
*,
resource_id: (str, "alias=path"),
delete_all_versions: bool = False,
) -> Dict:
"""
Delete a secret stored with Vault KV_v2 secret engine.
Args:
name(string): An Idem name of the resource.
resource_id (string): The full logical path to write the data.
This argument can also be specified using the alias "path." This should be prefixed with 'secret/'.
delete_all_versions(bool, optional): Set this field to True if the vault authentication does not have read access.
However, if the value is True, this Idem state operation is not idempotent. Defaults to False.
Request Syntax:
[vault-secret-name]:
vault.secrets.kv_v2.secret.absent:
- resource_id: 'string' # Can also be specified as "path"
- delete_all_versions: 'boolean'
Returns:
Dict[str, Any]
Examples:
.. code-block:: sls
my-secret:
vault.secrets.kv_v2.secret.absent:
- resource_id: secret/test # Can also be specified as "path"
"""
result = {
"name": name,
"result": True,
"old_state": None,
"new_state": None,
"comment": (),
}
read_ret = await hub.exec.vault.secrets.kv_v2.secret.get(ctx, path=resource_id)
if not read_ret["result"]:
if "InvalidPath" in str(read_ret["comment"]):
result["comment"] = (
f"vault.secrets.kv_v2.secret '{name}' is already absent.",
)
else:
result["result"] = False
result["comment"] = read_ret["comment"]
return result
# "data" is not populated to reduce data exposure.
result["old_state"] = {"name": name, "path": resource_id}
delete_version = [read_ret["metadata"]["version"]]
if delete_all_versions:
version_ret = await hub.exec.hvac.client.secrets.kv.v2.read_secret_metadata(
ctx, path=resource_id
)
if not version_ret["result"]:
result["result"] = False
result["comment"] = version_ret["comment"]
return result
delete_version = list(version_ret["ret"]["data"]["versions"].keys())
if ctx.get("test", False):
if delete_all_versions:
result["comment"] = (
f"Would delete vault.secrets.kv_v2.secret '{name}' all versions.",
)
else:
result["comment"] = (f"Would delete vault.secrets.kv_v2.secret '{name}'.",)
return result
delete_ret = await hub.exec.hvac.client.secrets.kv.v2.destroy_secret_versions(
ctx, path=resource_id, versions=delete_version
)
if not delete_ret["result"]:
result["result"] = False
result["comment"] = read_ret["comment"]
elif delete_all_versions:
result["comment"] = (
f"Deleted vault.secrets.kv_v2.secret '{name}' all versions.",
)
else:
result["comment"] = (f"Deleted vault.secrets.kv_v2.secret '{name}'.",)
return result
async def describe(hub, ctx):
"""
Vault doesn't allow enumeration of secrets
"""
return {}
|
PypiClean
|
/Helios_Scanner-1.1-py3-none-any.whl/helios/webapp/phpmyadmin.py
|
from helios.webapp import base_app
import re
from helios.core.utils import requests_response_to_dict
import json
import requests
try:
from urlparse import urljoin
except ImportError:
from urllib.parse import urljoin
# This script detects vulnerabilities in the following PHP based products:
# - phpMyAdmin
class Scanner(base_app.BaseAPP):
def __init__(self):
self.name = "phpMyAdmin"
self.types = []
def detect(self, url):
directories = ['phpmyadmin', 'pma', 'phpMyAdmin', '']
for d in directories:
path = urljoin(url, d)
response = self.send(path)
if response and response.status_code == 200 and "phpmyadmin.css.php" in response.text:
self.app_url = path
return True
return False
def test(self, url):
docu_url = urljoin(url, "Documentation.html")
docu_response = self.send(docu_url)
version = None
if docu_response:
get_version = re.search(r'<title>phpMyAdmin\s([\d\.]+)\s', docu_response.text)
if get_version:
version = get_version.group(1)
self.logger.info("phpMyAdmin version %s was identified from the Documentation.html file" % version)
if version:
db = self.get_db("phpmyadmin_vulns.json")
data = json.loads(db)
pma_vulns = data['phpMyAdmin']
self.match_versions(pma_vulns, version, url)
self.test_auth(url)
def test_auth(self, url):
sess = requests.session()
default_creds = ['root:', 'root:admin', 'root:root']
init_req = sess.get(url)
if not init_req:
self.logger.warning("Unable to test authentication, invalid initial response")
return
token_re = re.search('name="token".+?value="(.+?)"', init_req.text)
for entry in default_creds:
if not token_re:
self.logger.warning("Unable to test authentication, no token")
return
user, passwd = entry.split(':')
payload = {'lang': 'en', 'pma_username': user, 'pma_password': passwd, 'token': token_re.group(1)}
post_url = urljoin(url, 'index.php')
post_response = sess.post(post_url, payload)
if post_response and 'Refresh' in post_response.headers:
returl = post_response.headers['Refresh'].split(';')[1].strip()
retdata = sess.get(returl)
if retdata:
if 'class="loginform">' not in retdata.text:
match_str = "Possible positive authentication for user: %s and password %s on %s " % \
(user, passwd, url)
result = {
'request': requests_response_to_dict(post_response),
'match': match_str
}
self.logger.info(match_str)
self.results.append(result)
return
else:
token_re = re.search('name="token".+?value="(.+?)"', retdata.text)
|
PypiClean
|
/z3c.indexer-0.6.1.zip/z3c.indexer-0.6.1/src/z3c/indexer/query.py
|
__docformat__ = "reStructuredText"
import zope.component
from z3c.indexer import interfaces
class QueryMixin(object):
"""Index query."""
def __init__(self, indexOrName):
if isinstance(indexOrName, basestring):
self.index = zope.component.getUtility(interfaces.IIndex,
name=indexOrName)
else:
# indexOrName is a index
self.index = indexOrName
class TextQuery(QueryMixin):
"""Text query."""
zope.interface.implements(interfaces.ITextQuery)
def __init__(self, indexOrName, value):
super(TextQuery, self).__init__(indexOrName)
self.value = value
def apply(self):
return self.index.apply(self.value)
class Eq(QueryMixin):
"""Equal query."""
zope.interface.implements(interfaces.IEqQuery)
def __init__(self, indexOrName, value):
assert value is not None
super(Eq, self).__init__(indexOrName)
self.value = value
def apply(self):
return self.index.applyEq(self.value)
class NotEq(QueryMixin):
"""Not equal query."""
zope.interface.implements(interfaces.INotEqQuery)
def __init__(self, indexOrName, value):
assert value is not None
super(NotEq, self).__init__(indexOrName)
self.value = value
def apply(self):
return self.index.applyNotEq(self.value)
class Between(QueryMixin):
"""Between query."""
zope.interface.implements(interfaces.IBetweenQuery)
def __init__(self, indexOrName, min_value, max_value, exclude_min=False,
exclude_max=False):
super(Between, self).__init__(indexOrName)
self.min_value = min_value
self.max_value = max_value
self.exclude_min = exclude_min
self.exclude_max = exclude_max
def apply(self):
return self.index.applyBetween(self.min_value, self.max_value,
self.exclude_min, self.exclude_max)
class Ge(QueryMixin):
"""Greater (or equal) query."""
zope.interface.implements(interfaces.IGeQuery)
def __init__(self, indexOrName, min_value, exclude_min=False):
super(Ge, self).__init__(indexOrName)
self.min_value = min_value
self.exclude_min = exclude_min
def apply(self):
return self.index.applyGe(self.min_value, self.exclude_min)
class Le(QueryMixin):
"""Less (or equal) query."""
zope.interface.implements(interfaces.ILeQuery)
def __init__(self, indexOrName, max_value, exclude_max=False):
super(Le, self).__init__(indexOrName)
self.max_value = max_value
self.exclude_max = exclude_max
def apply(self):
return self.index.applyLe(self.max_value, self.exclude_max)
class In(QueryMixin):
"""In query."""
zope.interface.implements(interfaces.IInQuery)
def __init__(self, indexOrName, values):
super(In, self).__init__(indexOrName)
self.values = values
def apply(self):
return self.index.applyIn(self.values)
class AnyOf(QueryMixin):
"""Any of query.
The result will be the docids whose values contain any of the given values.
"""
zope.interface.implements(interfaces.IAnyOfQuery)
def __init__(self, indexOrName, values):
super(AnyOf, self).__init__(indexOrName)
self.values = values
def apply(self):
return self.index.applyAnyOf(self.values)
class AllOf(QueryMixin):
"""Any of query.
The result will be the docids whose values contain all of the given values.
"""
zope.interface.implements(interfaces.IAllOfQuery)
def __init__(self, indexOrName, values):
super(AllOf, self).__init__(indexOrName)
self.values = values
def apply(self):
return self.index.applyAllOf(self.values)
class ExtentAny(QueryMixin):
"""ExtentAny query."""
zope.interface.implements(interfaces.IExtentAnyQuery)
def __init__(self, indexOrName, extent):
super(ExtentAny, self).__init__(indexOrName)
self.extent = extent
def apply(self):
return self.index.applyExtentAny(self.extent)
class ExtentNone(QueryMixin):
"""ExtentNone query."""
zope.interface.implements(interfaces.IExtentNoneQuery)
def __init__(self, indexOrName, extent):
super(ExtentNone, self).__init__(indexOrName)
self.extent = extent
def apply(self):
return self.index.applyExtentNone(self.extent)
|
PypiClean
|
/v3/model/resize_instance_volume_option.py
|
import pprint
import re
import six
class ResizeInstanceVolumeOption:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'group_id': 'str',
'size': 'int'
}
attribute_map = {
'group_id': 'group_id',
'size': 'size'
}
def __init__(self, group_id=None, size=None):
"""ResizeInstanceVolumeOption - a model defined in huaweicloud sdk"""
self._group_id = None
self._size = None
self.discriminator = None
if group_id is not None:
self.group_id = group_id
self.size = size
@property
def group_id(self):
"""Gets the group_id of this ResizeInstanceVolumeOption.
角色组ID。 - 对于集群实例,该参数为shard组ID。 - 对于副本集实例,不传该参数。 - 对于副本集和单节点实例,不传该参数。
:return: The group_id of this ResizeInstanceVolumeOption.
:rtype: str
"""
return self._group_id
@group_id.setter
def group_id(self, group_id):
"""Sets the group_id of this ResizeInstanceVolumeOption.
角色组ID。 - 对于集群实例,该参数为shard组ID。 - 对于副本集实例,不传该参数。 - 对于副本集和单节点实例,不传该参数。
:param group_id: The group_id of this ResizeInstanceVolumeOption.
:type: str
"""
self._group_id = group_id
@property
def size(self):
"""Gets the size of this ResizeInstanceVolumeOption.
待扩容到的磁盘容量。取值为10的整数倍,并且大于当前磁盘容量。 - 对于集群实例,表示扩容到的单个shard组的磁盘容量。取值范围:10GB~2000GB。 - 对于副本集实例,表示扩容到的实例的磁盘容量,取值范围:10GB~2000GB。 - 对于单节点实例,表示扩容到的实例的磁盘容量,取值范围:10GB~1000GB。
:return: The size of this ResizeInstanceVolumeOption.
:rtype: int
"""
return self._size
@size.setter
def size(self, size):
"""Sets the size of this ResizeInstanceVolumeOption.
待扩容到的磁盘容量。取值为10的整数倍,并且大于当前磁盘容量。 - 对于集群实例,表示扩容到的单个shard组的磁盘容量。取值范围:10GB~2000GB。 - 对于副本集实例,表示扩容到的实例的磁盘容量,取值范围:10GB~2000GB。 - 对于单节点实例,表示扩容到的实例的磁盘容量,取值范围:10GB~1000GB。
:param size: The size of this ResizeInstanceVolumeOption.
:type: int
"""
self._size = size
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ResizeInstanceVolumeOption):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
PypiClean
|
/zephyrus-sc2-parser-0.3.8.tar.gz/zephyrus-sc2-parser-0.3.8/zephyrus_sc2_parser/gamedata/16561/unit_data.py
|
units = {'Protoss': {'Probe': {'obj_id': 106, 'priority': 33, 'type': ['unit', 'worker'], 'mineral_cost': 50, 'gas_cost': 0, 'supply': 1}, 'Zealot': {'obj_id': 95, 'priority': 39, 'type': ['unit'], 'mineral_cost': 100, 'gas_cost': 0, 'supply': 2, 'cooldown': 446}, 'Stalker': {'obj_id': 96, 'priority': 60, 'type': ['unit'], 'mineral_cost': 125, 'gas_cost': 50, 'supply': 2, 'cooldown': 512}, 'Sentry': {'obj_id': 99, 'priority': 87, 'type': ['unit'], 'mineral_cost': 50, 'gas_cost': 100, 'supply': 2, 'energy': 50, 'cooldown': 512}, 'Adept': {'obj_id': 773, 'priority': 57, 'type': ['unit'], 'mineral_cost': 100, 'gas_cost': 25, 'supply': 2, 'cooldown': 446}, 'HighTemplar': {'obj_id': 97, 'priority': 93, 'type': ['unit'], 'mineral_cost': 50, 'gas_cost': 150, 'supply': 2, 'energy': 50, 'cooldown': 714}, 'DarkTemplar': {'obj_id': 98, 'priority': 56, 'type': ['unit'], 'mineral_cost': 125, 'gas_cost': 125, 'supply': 2, 'cooldown': 714}, 'Archon': {'obj_id': 163, 'priority': 45, 'type': ['unit'], 'mineral_cost': 100, 'gas_cost': 300, 'supply': 4}, 'Observer': {'obj_id': 104, 'priority': 36, 'type': ['unit'], 'mineral_cost': 25, 'gas_cost': 75, 'supply': 1}, 'ObserverSiegeMode': {'obj_id': [1169], 'priority': 36, 'type': ['unit'], 'mineral_cost': 25, 'gas_cost': 75, 'supply': 1}, 'WarpPrism': {'obj_id': 103, 'priority': 69, 'type': ['unit'], 'mineral_cost': 250, 'gas_cost': 0, 'supply': 2}, 'WarpPrismPhasing': {'obj_id': 158, 'priority': 69, 'type': ['unit'], 'mineral_cost': 250, 'gas_cost': 0, 'supply': 2}, 'Immortal': {'obj_id': 105, 'priority': 44, 'type': ['unit'], 'mineral_cost': 275, 'gas_cost': 100, 'supply': 4}, 'Colossus': {'obj_id': 23, 'priority': 48, 'type': ['unit'], 'mineral_cost': 300, 'gas_cost': 200, 'supply': 6}, 'Disruptor': {'obj_id': 772, 'priority': 72, 'type': ['unit'], 'mineral_cost': 150, 'gas_cost': 150, 'supply': 3}, 'Phoenix': {'obj_id': 100, 'priority': 81, 'type': ['unit'], 'mineral_cost': 150, 'gas_cost': 100, 'supply': 2, 'energy': 50}, 'VoidRay': {'obj_id': 102, 'priority': 78, 'type': ['unit'], 'mineral_cost': 200, 'gas_cost': 150, 'supply': 4}, 'Oracle': {'obj_id': 541, 'priority': 84, 'type': ['unit'], 'mineral_cost': 150, 'gas_cost': 150, 'supply': 3, 'energy': 50}, 'Tempest': {'obj_id': 542, 'priority': 50, 'type': ['unit'], 'mineral_cost': 250, 'gas_cost': 175, 'supply': 5}, 'Carrier': {'obj_id': 101, 'priority': 51, 'type': ['unit'], 'mineral_cost': 350, 'gas_cost': 250, 'supply': 6}, 'Interceptor': {'obj_id': 107, 'priority': None, 'type': ['unit'], 'mineral_cost': 0, 'gas_cost': 0, 'supply': 0}, 'Mothership': {'obj_id': 30, 'priority': 96, 'type': ['unit'], 'mineral_cost': 400, 'gas_cost': 400, 'supply': 8, 'energy': 50}}, 'Terran': {'SCV': {'obj_id': 67, 'priority': 58, 'type': ['unit', 'worker'], 'mineral_cost': 50, 'gas_cost': 0, 'supply': 1}, 'MULE': {'obj_id': 277, 'priority': 56, 'type': ['unit'], 'mineral_cost': 0, 'gas_cost': 0, 'supply': 0}, 'Marine': {'obj_id': 70, 'priority': 78, 'type': ['unit'], 'mineral_cost': 50, 'gas_cost': 0, 'supply': 1}, 'Reaper': {'obj_id': 71, 'priority': 70, 'type': ['unit'], 'mineral_cost': 50, 'gas_cost': 50, 'supply': 1}, 'Marauder': {'obj_id': 73, 'priority': 76, 'type': ['unit'], 'mineral_cost': 100, 'gas_cost': 25, 'supply': 2}, 'Ghost': {'obj_id': 72, 'priority': 82, 'type': ['unit'], 'mineral_cost': 150, 'gas_cost': 125, 'supply': 2, 'energy': 75}, 'Hellion': {'obj_id': 75, 'priority': 66, 'type': ['unit'], 'mineral_cost': 100, 'gas_cost': 0, 'supply': 2}, 'HellionTank': {'obj_id': 527, 'priority': 6, 'type': ['unit'], 'mineral_cost': 100, 'gas_cost': 0, 'supply': 2}, 'WidowMine': {'obj_id': 544, 'priority': 54, 'type': ['unit'], 'mineral_cost': 75, 'gas_cost': 25, 'supply': 2}, 'WidowMineBurrowed': {'obj_id': 546, 'priority': 54, 'type': ['unit'], 'mineral_cost': 75, 'gas_cost': 25, 'supply': 2}, 'Cyclone': {'obj_id': 770, 'priority': 71, 'type': ['unit'], 'mineral_cost': 150, 'gas_cost': 100, 'supply': 3}, 'SiegeTank': {'obj_id': 55, 'priority': 74, 'type': ['unit'], 'mineral_cost': 150, 'gas_cost': 125, 'supply': 3}, 'SiegeTankSieged': {'obj_id': 54, 'priority': 74, 'type': ['unit'], 'mineral_cost': 150, 'gas_cost': 125, 'supply': 3}, 'Thor': {'obj_id': 74, 'priority': 52, 'type': ['unit'], 'mineral_cost': 300, 'gas_cost': 200, 'supply': 6}, 'ThorAP': {'obj_id': 769, 'priority': 52, 'type': ['unit'], 'mineral_cost': 300, 'gas_cost': 200, 'supply': 6}, 'VikingFighter': {'obj_id': 57, 'priority': 68, 'type': ['unit'], 'mineral_cost': 150, 'gas_cost': 75, 'supply': 2}, 'VikingAssault': {'obj_id': 56, 'priority': 68, 'type': ['unit'], 'mineral_cost': 150, 'gas_cost': 75, 'supply': 2}, 'Medivac': {'obj_id': 76, 'priority': 60, 'type': ['unit'], 'mineral_cost': 100, 'gas_cost': 100, 'supply': 2, 'energy': 50}, 'Liberator': {'obj_id': 766, 'priority': 72, 'type': ['unit'], 'mineral_cost': 150, 'gas_cost': 150, 'supply': 3}, 'LiberatorAG': {'obj_id': 819, 'priority': 72, 'type': ['unit'], 'mineral_cost': 150, 'gas_cost': 150, 'supply': 3}, 'Raven': {'obj_id': 78, 'priority': 84, 'type': ['unit'], 'mineral_cost': 100, 'gas_cost': 200, 'supply': 2, 'energy': 50}, 'AutoTurret': {'obj_id': 53, 'priority': 2, 'type': ['building'], 'mineral_cost': 0, 'gas_cost': 0, 'supply': 0}, 'Banshee': {'obj_id': 77, 'priority': 64, 'type': ['unit'], 'mineral_cost': 150, 'gas_cost': 100, 'supply': 3, 'energy': 50}, 'Battlecruiser': {'obj_id': 79, 'priority': 80, 'type': ['unit'], 'mineral_cost': 400, 'gas_cost': 300, 'supply': 6}}, 'Zerg': {'Larva': {'obj_id': 176, 'priority': 58, 'type': ['unit'], 'mineral_cost': 0, 'gas_cost': 0, 'supply': 0}, 'Egg': {'obj_id': 125, 'priority': 54, 'type': ['unit'], 'mineral_cost': 0, 'gas_cost': 0, 'supply': 0}, 'Drone': {'obj_id': 126, 'priority': 60, 'type': ['unit', 'worker'], 'mineral_cost': 50, 'gas_cost': 0, 'supply': 1}, 'Overlord': {'obj_id': 128, 'priority': 72, 'type': ['unit', 'supply'], 'mineral_cost': 100, 'gas_cost': 0, 'supply': 0}, 'Queen': {'obj_id': 148, 'priority': 101, 'type': ['unit'], 'mineral_cost': 150, 'gas_cost': 0, 'supply': 2, 'energy': 25}, 'Zergling': {'obj_id': 127, 'priority': 68, 'type': ['unit'], 'mineral_cost': 25, 'gas_cost': 0, 'supply': 0.5}, 'Baneling': {'obj_id': 29, 'priority': 82, 'type': ['unit'], 'mineral_cost': 50, 'gas_cost': 25, 'supply': 0.5}, 'Roach': {'obj_id': 132, 'priority': 80, 'type': ['unit'], 'mineral_cost': 75, 'gas_cost': 25, 'supply': 2}, 'Ravager': {'obj_id': 765, 'priority': 92, 'type': ['unit'], 'mineral_cost': 100, 'gas_cost': 100, 'supply': 3}, 'TransportOverlordCocoon': {'obj_id': 948, 'priority': 1, 'type': ['unit', 'supply'], 'mineral_cost': 125, 'gas_cost': 25, 'supply': 0}, 'OverlordCocoon': {'obj_id': 150, 'priority': 1, 'type': ['unit', 'supply'], 'mineral_cost': 150, 'gas_cost': 50, 'supply': 0}, 'Overseer': {'obj_id': 151, 'priority': 74, 'type': ['unit', 'supply'], 'mineral_cost': 150, 'gas_cost': 50, 'supply': 0, 'energy': 50}, 'OverseerSiegeMode': {'obj_id': [151], 'priority': 74, 'type': ['unit', 'supply'], 'mineral_cost': 150, 'gas_cost': 50, 'supply': 0, 'energy': 50}, 'OverlordTransport': {'obj_id': 949, 'priority': 73, 'type': ['unit', 'supply'], 'mineral_cost': 125, 'gas_cost': 25, 'supply': 0}, 'Hydralisk': {'obj_id': 129, 'priority': 70, 'type': ['unit'], 'mineral_cost': 100, 'gas_cost': 50, 'supply': 2}, 'LurkerMP': {'obj_id': 548, 'priority': 90, 'type': ['unit'], 'mineral_cost': 150, 'gas_cost': 150, 'supply': 3}, 'LurkerMPEgg': {'obj_id': 547, 'priority': 54, 'type': ['unit'], 'mineral_cost': 150, 'gas_cost': 150, 'supply': 3}, 'LurkerMPBurrowed': {'obj_id': 549, 'priority': 90, 'type': ['unit'], 'mineral_cost': 150, 'gas_cost': 150, 'supply': 3}, 'Mutalisk': {'obj_id': 130, 'priority': 76, 'type': ['unit'], 'mineral_cost': 100, 'gas_cost': 100, 'supply': 2}, 'Corruptor': {'obj_id': 134, 'priority': 84, 'type': ['unit'], 'mineral_cost': 150, 'gas_cost': 100, 'supply': 2}, 'SwarmHostMP': {'obj_id': 540, 'priority': 86, 'type': ['unit'], 'mineral_cost': 100, 'gas_cost': 75, 'supply': 3}, 'LocustMP': {'obj_id': 535, 'priority': 54, 'type': ['unit'], 'mineral_cost': 0, 'gas_cost': 0, 'supply': 0}, 'LocustMPFlying': {'obj_id': 771, 'priority': 56, 'type': ['unit'], 'mineral_cost': 0, 'gas_cost': 0, 'supply': 0}, 'LocustMPPrecursor': {'obj_id': 851, 'priority': 54, 'type': ['unit'], 'mineral_cost': 0, 'gas_cost': 0, 'supply': 0}, 'Infestor': {'obj_id': 133, 'priority': 94, 'type': ['unit'], 'mineral_cost': 100, 'gas_cost': 150, 'supply': 2, 'energy': 50}, 'InfestedTerransEgg': {'obj_id': 175, 'priority': 54, 'type': ['unit'], 'mineral_cost': 0, 'gas_cost': 0, 'supply': 0}, 'InfestorTerran': {'obj_id': 27, 'priority': 66, 'type': ['unit'], 'mineral_cost': 0, 'gas_cost': 0, 'supply': 0}, 'Viper': {'obj_id': 545, 'priority': 96, 'type': ['unit'], 'mineral_cost': 100, 'gas_cost': 200, 'supply': 3, 'energy': 50}, 'Ultralisk': {'obj_id': 131, 'priority': 88, 'type': ['unit'], 'mineral_cost': 300, 'gas_cost': 200, 'supply': 6}, 'BroodLord': {'obj_id': 136, 'priority': 78, 'type': ['unit'], 'mineral_cost': 300, 'gas_cost': 250, 'supply': 4}, 'BroodlingEscort': {'obj_id': 165, 'priority': None, 'type': ['unit'], 'mineral_cost': 0, 'gas_cost': 0, 'supply': 0}, 'Broodling': {'obj_id': 300, 'priority': 62, 'type': ['unit'], 'mineral_cost': 0, 'gas_cost': 0, 'supply': 0}, 'RavagerCocoon': {'obj_id': 764, 'priority': 54, 'type': ['unit'], 'mineral_cost': 100, 'gas_cost': 100, 'supply': 3}, 'BanelingCocoon': {'obj_id': 28, 'priority': 54, 'type': ['unit'], 'mineral_cost': 50, 'gas_cost': 25, 'supply': 0.5}, 'BroodLordCocoon': {'obj_id': 135, 'priority': 1, 'type': ['unit'], 'mineral_cost': 300, 'gas_cost': 250, 'supply': 3}}}
|
PypiClean
|
/opencv_contrib_cuda_python-4.8.0.74-cp36-abi3-win_amd64.whl/cv2/gapi/__init__.py
|
__all__ = ['op', 'kernel']
import sys
import cv2 as cv
# NB: Register function in specific module
def register(mname):
def parameterized(func):
sys.modules[mname].__dict__[func.__name__] = func
return func
return parameterized
@register('cv2.gapi')
def networks(*args):
return cv.gapi_GNetPackage(list(map(cv.detail.strip, args)))
@register('cv2.gapi')
def compile_args(*args):
return list(map(cv.GCompileArg, args))
@register('cv2')
def GIn(*args):
return [*args]
@register('cv2')
def GOut(*args):
return [*args]
@register('cv2')
def gin(*args):
return [*args]
@register('cv2.gapi')
def descr_of(*args):
return [*args]
@register('cv2')
class GOpaque():
# NB: Inheritance from c++ class cause segfault.
# So just aggregate cv.GOpaqueT instead of inheritance
def __new__(cls, argtype):
return cv.GOpaqueT(argtype)
class Bool():
def __new__(self):
return cv.GOpaqueT(cv.gapi.CV_BOOL)
class Int():
def __new__(self):
return cv.GOpaqueT(cv.gapi.CV_INT)
class Double():
def __new__(self):
return cv.GOpaqueT(cv.gapi.CV_DOUBLE)
class Float():
def __new__(self):
return cv.GOpaqueT(cv.gapi.CV_FLOAT)
class String():
def __new__(self):
return cv.GOpaqueT(cv.gapi.CV_STRING)
class Point():
def __new__(self):
return cv.GOpaqueT(cv.gapi.CV_POINT)
class Point2f():
def __new__(self):
return cv.GOpaqueT(cv.gapi.CV_POINT2F)
class Point3f():
def __new__(self):
return cv.GOpaqueT(cv.gapi.CV_POINT3F)
class Size():
def __new__(self):
return cv.GOpaqueT(cv.gapi.CV_SIZE)
class Rect():
def __new__(self):
return cv.GOpaqueT(cv.gapi.CV_RECT)
class Prim():
def __new__(self):
return cv.GOpaqueT(cv.gapi.CV_DRAW_PRIM)
class Any():
def __new__(self):
return cv.GOpaqueT(cv.gapi.CV_ANY)
@register('cv2')
class GArray():
# NB: Inheritance from c++ class cause segfault.
# So just aggregate cv.GArrayT instead of inheritance
def __new__(cls, argtype):
return cv.GArrayT(argtype)
class Bool():
def __new__(self):
return cv.GArrayT(cv.gapi.CV_BOOL)
class Int():
def __new__(self):
return cv.GArrayT(cv.gapi.CV_INT)
class Double():
def __new__(self):
return cv.GArrayT(cv.gapi.CV_DOUBLE)
class Float():
def __new__(self):
return cv.GArrayT(cv.gapi.CV_FLOAT)
class String():
def __new__(self):
return cv.GArrayT(cv.gapi.CV_STRING)
class Point():
def __new__(self):
return cv.GArrayT(cv.gapi.CV_POINT)
class Point2f():
def __new__(self):
return cv.GArrayT(cv.gapi.CV_POINT2F)
class Point3f():
def __new__(self):
return cv.GArrayT(cv.gapi.CV_POINT3F)
class Size():
def __new__(self):
return cv.GArrayT(cv.gapi.CV_SIZE)
class Rect():
def __new__(self):
return cv.GArrayT(cv.gapi.CV_RECT)
class Scalar():
def __new__(self):
return cv.GArrayT(cv.gapi.CV_SCALAR)
class Mat():
def __new__(self):
return cv.GArrayT(cv.gapi.CV_MAT)
class GMat():
def __new__(self):
return cv.GArrayT(cv.gapi.CV_GMAT)
class Prim():
def __new__(self):
return cv.GArray(cv.gapi.CV_DRAW_PRIM)
class Any():
def __new__(self):
return cv.GArray(cv.gapi.CV_ANY)
# NB: Top lvl decorator takes arguments
def op(op_id, in_types, out_types):
garray_types= {
cv.GArray.Bool: cv.gapi.CV_BOOL,
cv.GArray.Int: cv.gapi.CV_INT,
cv.GArray.Double: cv.gapi.CV_DOUBLE,
cv.GArray.Float: cv.gapi.CV_FLOAT,
cv.GArray.String: cv.gapi.CV_STRING,
cv.GArray.Point: cv.gapi.CV_POINT,
cv.GArray.Point2f: cv.gapi.CV_POINT2F,
cv.GArray.Point3f: cv.gapi.CV_POINT3F,
cv.GArray.Size: cv.gapi.CV_SIZE,
cv.GArray.Rect: cv.gapi.CV_RECT,
cv.GArray.Scalar: cv.gapi.CV_SCALAR,
cv.GArray.Mat: cv.gapi.CV_MAT,
cv.GArray.GMat: cv.gapi.CV_GMAT,
cv.GArray.Prim: cv.gapi.CV_DRAW_PRIM,
cv.GArray.Any: cv.gapi.CV_ANY
}
gopaque_types= {
cv.GOpaque.Size: cv.gapi.CV_SIZE,
cv.GOpaque.Rect: cv.gapi.CV_RECT,
cv.GOpaque.Bool: cv.gapi.CV_BOOL,
cv.GOpaque.Int: cv.gapi.CV_INT,
cv.GOpaque.Double: cv.gapi.CV_DOUBLE,
cv.GOpaque.Float: cv.gapi.CV_FLOAT,
cv.GOpaque.String: cv.gapi.CV_STRING,
cv.GOpaque.Point: cv.gapi.CV_POINT,
cv.GOpaque.Point2f: cv.gapi.CV_POINT2F,
cv.GOpaque.Point3f: cv.gapi.CV_POINT3F,
cv.GOpaque.Size: cv.gapi.CV_SIZE,
cv.GOpaque.Rect: cv.gapi.CV_RECT,
cv.GOpaque.Prim: cv.gapi.CV_DRAW_PRIM,
cv.GOpaque.Any: cv.gapi.CV_ANY
}
type2str = {
cv.gapi.CV_BOOL: 'cv.gapi.CV_BOOL' ,
cv.gapi.CV_INT: 'cv.gapi.CV_INT' ,
cv.gapi.CV_DOUBLE: 'cv.gapi.CV_DOUBLE' ,
cv.gapi.CV_FLOAT: 'cv.gapi.CV_FLOAT' ,
cv.gapi.CV_STRING: 'cv.gapi.CV_STRING' ,
cv.gapi.CV_POINT: 'cv.gapi.CV_POINT' ,
cv.gapi.CV_POINT2F: 'cv.gapi.CV_POINT2F' ,
cv.gapi.CV_POINT3F: 'cv.gapi.CV_POINT3F' ,
cv.gapi.CV_SIZE: 'cv.gapi.CV_SIZE',
cv.gapi.CV_RECT: 'cv.gapi.CV_RECT',
cv.gapi.CV_SCALAR: 'cv.gapi.CV_SCALAR',
cv.gapi.CV_MAT: 'cv.gapi.CV_MAT',
cv.gapi.CV_GMAT: 'cv.gapi.CV_GMAT',
cv.gapi.CV_DRAW_PRIM: 'cv.gapi.CV_DRAW_PRIM'
}
# NB: Second lvl decorator takes class to decorate
def op_with_params(cls):
if not in_types:
raise Exception('{} operation should have at least one input!'.format(cls.__name__))
if not out_types:
raise Exception('{} operation should have at least one output!'.format(cls.__name__))
for i, t in enumerate(out_types):
if t not in [cv.GMat, cv.GScalar, *garray_types, *gopaque_types]:
raise Exception('{} unsupported output type: {} in position: {}'
.format(cls.__name__, t.__name__, i))
def on(*args):
if len(in_types) != len(args):
raise Exception('Invalid number of input elements!\nExpected: {}, Actual: {}'
.format(len(in_types), len(args)))
for i, (t, a) in enumerate(zip(in_types, args)):
if t in garray_types:
if not isinstance(a, cv.GArrayT):
raise Exception("{} invalid type for argument {}.\nExpected: {}, Actual: {}"
.format(cls.__name__, i, cv.GArrayT.__name__, type(a).__name__))
elif a.type() != garray_types[t]:
raise Exception("{} invalid GArrayT type for argument {}.\nExpected: {}, Actual: {}"
.format(cls.__name__, i, type2str[garray_types[t]], type2str[a.type()]))
elif t in gopaque_types:
if not isinstance(a, cv.GOpaqueT):
raise Exception("{} invalid type for argument {}.\nExpected: {}, Actual: {}"
.format(cls.__name__, i, cv.GOpaqueT.__name__, type(a).__name__))
elif a.type() != gopaque_types[t]:
raise Exception("{} invalid GOpaque type for argument {}.\nExpected: {}, Actual: {}"
.format(cls.__name__, i, type2str[gopaque_types[t]], type2str[a.type()]))
else:
if t != type(a):
raise Exception('{} invalid input type for argument {}.\nExpected: {}, Actual: {}'
.format(cls.__name__, i, t.__name__, type(a).__name__))
op = cv.gapi.__op(op_id, cls.outMeta, *args)
out_protos = []
for i, out_type in enumerate(out_types):
if out_type == cv.GMat:
out_protos.append(op.getGMat())
elif out_type == cv.GScalar:
out_protos.append(op.getGScalar())
elif out_type in gopaque_types:
out_protos.append(op.getGOpaque(gopaque_types[out_type]))
elif out_type in garray_types:
out_protos.append(op.getGArray(garray_types[out_type]))
else:
raise Exception("""In {}: G-API operation can't produce the output with type: {} in position: {}"""
.format(cls.__name__, out_type.__name__, i))
return tuple(out_protos) if len(out_protos) != 1 else out_protos[0]
# NB: Extend operation class
cls.id = op_id
cls.on = staticmethod(on)
return cls
return op_with_params
def kernel(op_cls):
# NB: Second lvl decorator takes class to decorate
def kernel_with_params(cls):
# NB: Add new members to kernel class
cls.id = op_cls.id
cls.outMeta = op_cls.outMeta
return cls
return kernel_with_params
cv.gapi.wip.GStreamerPipeline = cv.gapi_wip_gst_GStreamerPipeline
|
PypiClean
|
/pyhtm-0.1.5.tar.gz/pyhtm-0.1.5/docs/source/installation.rst
|
.. highlight:: shell
============
Installation
============
From PyPI
---------
You can install the library from PyPI through `pip`:
.. code::
$ pip install pyhtm
From Source
-----------
The source code for ``htm`` can be obtained from its `GitLab repo`_
and installed like so:
.. parsed-literal::
$ git clone https://gitlab.com/noao/antares/htm
$ cd htm
$ git checkout |version|
$ python setup.py install
.. _GitLab repo: https://gitlab.com/noao/antares/htm
Installing for Development
--------------------------
If you plan on contributing to the development of the ANTARES HTM utilites
or if you want to run the latest (unreleased and maybe unstable) version
of the library, you may want to install in development mode:
.. parsed-literal::
$ git clone https://gitlab.com/noao/antares/htm
$ cd htm
$ python setup.py develop
|
PypiClean
|
/rst2html5-tools-0.5.3.tar.gz/rst2html5-tools-0.5.3/html5css3/thirdparty/deckjs/test/spec.goto.js
|
describe('Deck JS Quick Go-To', function() {
var $d = $(document);
beforeEach(function() {
loadFixtures('standard.html');
if (Modernizr.history) {
history.replaceState({}, "", "#")
}
else {
window.location.hash = '#';
}
$.deck('.slide');
});
describe('showGoTo()', function() {
it('should show the go-to helper', function() {
expect($(defaults.selectors.container)).not.toHaveClass(defaults.classes.goto);
$.deck('showGoTo');
expect($(defaults.selectors.container)).toHaveClass(defaults.classes.goto);
});
it('should focus the go-to input', function() {
$.deck('showGoTo');
expect($(defaults.selectors.gotoInput)[0]).toEqual(document.activeElement);
});
it('should set aria-hidden to false', function() {
var $gotoForm = $(defaults.selectors.gotoForm);
$.deck('showGoTo');
expect($gotoForm).toHaveAttr('aria-hidden', 'false');
});
});
describe('hideGoTo()', function() {
beforeEach(function() {
$.deck('showGoTo');
$.deck('hideGoTo');
});
it('should hide the go-to helper', function() {
expect($(defaults.selectors.container)).not.toHaveClass(defaults.classes.goto);
});
it('should blur the go-to input', function() {
expect($(defaults.selectors.gotoInput)[0]).not.toEqual(document.activeElement);
});
it('should set aria-hidden to true', function() {
var $gotoForm = $(defaults.selectors.gotoForm);
$.deck('hideGoTo');
expect($gotoForm).toHaveAttr('aria-hidden', 'true');
});
});
describe('toggleGoTo()', function() {
it('should toggle the go-to helper on and off', function() {
expect($(defaults.selectors.container)).not.toHaveClass(defaults.classes.goto);
$.deck('toggleGoTo');
expect($(defaults.selectors.container)).toHaveClass(defaults.classes.goto);
$.deck('toggleGoTo');
expect($(defaults.selectors.container)).not.toHaveClass(defaults.classes.goto);
});
});
describe('Go-To submit', function() {
beforeEach(function() {
$.deck('showGoTo');
});
it('should hide the go-to helper', function() {
$(defaults.selectors.gotoInput).val('3');
$(defaults.selectors.gotoForm).submit();
expect($(defaults.selectors.container)).not.toHaveClass(defaults.classes.goto);
});
it('should go to the slide number entered', function() {
$(defaults.selectors.gotoInput).val('3');
$(defaults.selectors.gotoForm).submit();
expect($.deck('getSlide')).toEqual($.deck('getSlide'), 2);
});
it('should go to the slide id entered', function() {
$(defaults.selectors.gotoInput).val('custom-id');
$(defaults.selectors.gotoForm).submit();
expect($.deck('getSlide')).toEqual($.deck('getSlide'), 1);
});
it('should go nowhere if the number is negative', function() {
$(defaults.selectors.gotoInput).val('-2');
$(defaults.selectors.gotoForm).submit();
expect($.deck('getSlide')).toEqual($.deck('getSlide'), 0);
});
it('should go nowhere if the number is greater than the number of slides', function() {
$(defaults.selectors.gotoInput).val('9');
$(defaults.selectors.gotoForm).submit();
expect($.deck('getSlide')).toEqual($.deck('getSlide'), 0);
});
it('should go nowhere if the id does not exist', function() {
$(defaults.selectors.gotoInput).val('do-not-exist');
$(defaults.selectors.gotoForm).submit();
expect($.deck('getSlide')).toEqual($.deck('getSlide'), 0);
});
});
describe('Datalist population', function() {
it('should fill in options with all the slide ids', function() {
var $dataOptions = $(defaults.selectors.gotoDatalist).find('option');
expect($dataOptions.length).toEqual(5);
expect($dataOptions.eq(0).attr('value')).toEqual('slide-0');
expect($dataOptions.eq(1).attr('value')).toEqual('custom-id');
});
});
describe('key bindings', function() {
var e;
beforeEach(function() {
e = jQuery.Event('keydown.deckgoto');
});
it('should toggle the go-to helper if the specified key is pressed', function() {
e.which = 71; // g
$d.trigger(e);
expect($(defaults.selectors.container)).toHaveClass(defaults.classes.goto);
$d.trigger(e);
expect($(defaults.selectors.container)).not.toHaveClass(defaults.classes.goto);
});
});
describe('countNested false', function() {
beforeEach(function() {
loadFixtures('nesteds.html');
$.deck('.slide', {
countNested: false
});
$.deck('showGoTo');
});
it('should ignore nested slides when given a slide number', function() {
$(defaults.selectors.gotoInput).val('4');
$(defaults.selectors.gotoForm).submit();
expect($.deck('getSlide')).toHaveId('after');
});
it('should respect top side of new slide range', function() {
$.deck('go', 0);
$(defaults.selectors.gotoInput).val('6');
$(defaults.selectors.gotoForm).submit();
expect($.deck('getSlide')).toHaveId('slide-0');
});
});
});
|
PypiClean
|
/nexuscloud-client-1.0.9.tar.gz/nexuscloud-client-1.0.9/docs/NexusInsightsApiV1EndpointsSummaryGet200Response.md
|
# NexusInsightsApiV1EndpointsSummaryGet200Response
## Properties
Name | Type | Description | Notes
------------ | ------------- | ------------- | -------------
**endpoint_stats** | [**NexusInsightsApiV1EndpointsSummaryGet200ResponseEndpointStats**](NexusInsightsApiV1EndpointsSummaryGet200ResponseEndpointStats.md) | | [optional]
**any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional]
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
PypiClean
|
/file_io-0.4.6-py3-none-any.whl/fileio/providers/filesys_cloud.py
|
from __future__ import annotations
import os
import hashlib
import datetime
from typing import ClassVar
from fsspec.callbacks import Callback
from fileio.aiopath.wrap import to_thread
from fileio.core.flavours import _pathz_windows_flavour, _pathz_posix_flavour
from fileio.providers.base import *
from fileio.utils import logger
from fileio.providers.filesys import get_accessor, get_cloud_filesystem, AccessorLike, CloudFileSystemLike
if TYPE_CHECKING:
from fileio.core.generic import PathLike
class CloudFileSystemPurePath(PurePath):
_prefix: str = None
_provider: str = None
_win_pathz: ClassVar = 'PureCloudFileSystemWindowsPath'
_posix_pathz: ClassVar = 'PureCloudFileSystemPosixPath'
def _init(self, template: Optional[PurePath] = None):
self._accessor: AccessorLike = get_accessor(self._prefix)
def __new__(cls, *args):
if cls is CloudFileSystemPurePath or issubclass(cls, CloudFileSystemPurePath):
cls = cls._win_pathz if os.name == 'nt' else cls._posix_pathz
cls = globals()[cls]
return cls._from_parts(args)
def _new(self, *parts):
"""Create a new `Path` child of same type."""
return type(self)(*parts)
class PureCloudFileSystemPosixPath(CloudFileSystemPurePath):
"""PurePath subclass for non-Windows systems.
On a POSIX system, instantiating a PurePath should return this object.
However, you can also instantiate it directly on any system.
"""
_flavour = _pathz_posix_flavour
_pathlike = posixpath
__slots__ = ()
class PureCloudFileSystemWindowsPath(CloudFileSystemPurePath):
"""PurePath subclass for Windows systems.
On a Windows system, instantiating a PurePath should return this object.
However, you can also instantiate it directly on any system.
"""
_flavour = _pathz_windows_flavour
_pathlike = ntpath
__slots__ = ()
class CloudFileSystemPath(Path, CloudFileSystemPurePath):
"""
Our customized class that incorporates both sync and async methods
"""
_flavour = _pathz_windows_flavour if os.name == 'nt' else _pathz_posix_flavour
_accessor: AccessorLike = None
_pathlike = posixpath
_prefix = None
_provider = None
_win_pathz: ClassVar = 'CloudFileSystemWindowsPath'
_posix_pathz: ClassVar = 'CloudFileSystemPosixPath'
def _init(self, template: Optional['CloudFileSystemPath'] = None):
self._accessor: AccessorLike = get_accessor(self._prefix)
self._closed = False
self._fileio = None
def __new__(cls, *parts, **kwargs):
if cls is CloudFileSystemPath or issubclass(cls, CloudFileSystemPath):
cls = cls._win_pathz if os.name == 'nt' else cls._posix_pathz
cls = globals()[cls]
self = cls._from_parts(parts, init=False)
if not self._flavour.is_supported:
name: str = cls.__name__
raise NotImplementedError(f"cannot instantiate {name} on your system")
self._init()
return self
def __repr__(self):
return f'{self.__class__.__name__}("{self.string}")'
def __str__(self):
return self.string
@property
def _path(self) -> str:
return self._cloudstr if self.is_cloud else str(self)
@property
def _cloudpath(self) -> str:
"""
Returns the `__fspath__` string representation without the uri_scheme
"""
if self._prefix in self.parts[0]: return self._pathlike.join(*self.parts[1:])
return self._pathlike.join(*self.parts)
@property
def _bucket(self) -> str:
"""
Returns the `__fspath__` string representation without the uri_scheme
"""
if self._prefix in self.parts[0]: return self.parts[1]
return self.parts[0]
@property
def _bucketstr(self) -> str:
"""
Returns the `__fspath__` string representation without the uri_scheme
"""
return f'{self._prefix}://{self._bucket}'
@property
def _pathkeys(self) -> str:
"""
Returns the `__fspath__` string representation without the uri_scheme
"""
if self._bucket in self.parts[0]: return self._pathlike.join(*self.parts[1:])
if self._bucket in self.parts[1]: return self._pathlike.join(*self.parts[2:])
return self._pathlike.join(*self.parts)
def get_path_key(self, filename: Optional[str] = None) -> str:
"""
Used to return relative/path/to/file.ext
"""
filename = filename or self.name
parts = None
if self._bucket in self.parts[0]: parts = self.parts[1:-1]
elif self._bucket in self.parts[1]: parts = self.parts[2:-1]
else: parts = self.parts[:-1]
return self._pathlike.join(*parts, filename)
@property
def _cloudstr(self) -> str:
"""
Reconstructs the proper cloud URI
"""
if self._prefix not in self.parts[0]:
return f'{self._prefix}://' + '/'.join(self.parts)
return f'{self._prefix}://' + '/'.join(self.parts[1:])
@property
def posix_(self):
"""Return the string representation of the path with forward (/)
slashes."""
f = self._flavour
return str(self).replace(f.sep, '/')
@property
def string(self) -> str:
return self._cloudstr if self.is_cloud else self.posix_
@property
def filename_(self) -> str:
"""
Returns the filename if is file, else ''
"""
return self.parts[-1] if self.is_file() else ''
@property
def ext_(self) -> str:
"""
Returns the extension for a file
"""
return self.suffix
@property
def extension(self) -> str:
"""
Returns the extension for a file
"""
return self.suffix
@property
def stat_(self) -> stat_result:
"""
Returns the stat results for path
"""
return self.stat()
@property
def hash_(self) -> str:
"""
Hash of file properties, to tell if it has changed
"""
return self._accessor.ukey(self._cloudpath)
@property
def info_(self):
"""
Return info of path
"""
return self._accessor.info(path=self._path)
@property
def metadata_(self):
"""
Return metadata of path
"""
return self._accessor.metadata(self._cloudpath)
@property
def path_info_(self):
"""
Return info of path
"""
return self._accessor.info(path=self._cloudpath)
@property
def size_(self) -> Optional[Union[float, int]]:
"""
Size in bytes of file
"""
return self._accessor.size(self._cloudpath) if self.is_file_ else None
@property
def modified_(self) -> 'datetime.datetime':
"""
Return the last modified timestamp of file at path as a datetime
"""
r = self.stat_ #self._accessor.modified(self._cloudpath)
ts = r.get('updated', '')
return datetime.datetime.strptime(ts, '%Y-%m-%dT%H:%M:%S.%fZ') if ts else None
#return r.get('updated')
@property
def checksum(self):
return self._accessor.checksum(path=self._path)
@property
def last_modified(self) -> 'datetime.datetime':
ts = self.info_.get('LastModified')
return ts or self.modified_
@property
def etag(self):
"""
returns the file etag
"""
rez = self.path_info_.get('ETag')
if rez: rez = rez.replace('"', '').strip()
return rez
@property
def file_size(self):
"""
returns the total file size in bytes
"""
return self.path_info_.get('Size')
@property
def content_type(self):
"""
returns the ContentType attribute of the file
"""
return self.path_info_.get('ContentType')
@property
def object_type(self):
"""
returns the Type attribute of the file
"""
return self.path_info_.get('type')
@property
def is_cloud(self) -> bool:
return self._prefix in self.parts[0] or self._prefix in self.parts[1] if self._prefix else False
@property
def is_pathz(self) -> bool:
return True
@property
def exists_(self) -> bool:
return self.exists()
@property
def is_file_(self) -> bool:
return self.is_file()
@property
def is_dir_(self) -> bool:
return self.is_dir()
@property
def home_(self) -> Type['CloudFileSystemPath']:
return self.home()
@property
async def async_exists_(self) -> bool:
return await self.async_exists()
@property
async def async_is_file_(self) -> bool:
return await self.async_is_file()
@property
async def async_is_dir_(self) -> bool:
return await self.async_is_dir()
@property
async def async_home_(self) -> Type['CloudFileSystemPath']:
return await self.async_home()
@property
async def async_stat_(self) -> stat_result:
"""
Returns the stat results for path
"""
return await self.async_stat()
@property
async def async_hash_(self) -> str:
"""
Hash of file properties, to tell if it has changed
"""
return await self._accessor.async_ukey(self._cloudpath)
@property
async def async_size_(self) -> Optional[Union[float, int]]:
"""
Size in bytes of file
"""
if await self.async_is_file_: return await self._accessor.async_size(self._cloudpath)
return None
@property
async def async_metadata_(self):
"""
Return metadata of path
"""
return await self._accessor.async_metadata(self._cloudpath)
@property
async def async_modified_(self) -> 'datetime.datetime':
"""
Return the last modified timestamp of file at path as a datetime
"""
if self._prefix == 'gs':
r = await self.async_stat_
ts = r.get('updated', '')
if ts: return datetime.datetime.strptime(ts, '%Y-%m-%dT%H:%M:%S.%fZ')#.isoformat()
return ts
return await self._accessor.async_modified(self._cloudpath)
@property
async def async_path_info_(self):
"""
Return info of path
"""
return await self.async_info()
def open(self, mode: FileMode = 'r', buffering: int = -1, encoding: Optional[str] = DEFAULT_ENCODING, errors: Optional[str] = ON_ERRORS, newline: Optional[str] = NEWLINE, block_size: int = 5242880, compression: str = None, **kwargs: Any) -> IO[Union[str, bytes]]:
"""
Open the file pointed by this path and return a file object, as
the built-in open() function does.
"""
return self._accessor.open(self._cloudpath, mode=mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline)
def async_open(self, mode: FileMode = 'r', buffering: int = -1, encoding: Optional[str] = DEFAULT_ENCODING, errors: Optional[str] = ON_ERRORS, newline: Optional[str] = NEWLINE, block_size: int = 5242880, compression: str = None, **kwargs: Any) -> IterableAIOFile:
"""
Asyncronously Open the file pointed by this path and return a file object, as
the built-in open() function does.
compression = infer doesn't work all that well.
"""
#self._fileio = self._accessor.open(self._cloudpath, mode=mode, encoding=encoding, errors=errors, block_size=block_size, compression=compression, newline=newline, buffering=buffering, **kwargs)
#print(type(self._fileio))
#return get_cloud_file(self._fileio)
return get_cloud_file(self._accessor.open(self._cloudpath, mode=mode, encoding=encoding, errors=errors, block_size=block_size, compression=compression, newline=newline, buffering=buffering, **kwargs))
def reader(self, mode: FileMode = 'r', buffering: int = -1, encoding: Optional[str] = DEFAULT_ENCODING, errors: Optional[str] = ON_ERRORS, newline: Optional[str] = NEWLINE, block_size: int = 5242880, compression: str = None, **kwargs: Any) -> IO[Union[str, bytes]]:
"""
Open the file pointed by this path and return a file object, as
the built-in open() function does.
"""
return self._accessor.open(self._cloudpath, mode=mode, buffering=buffering, encoding=encoding, errors=errors, block_size=block_size, compression=compression, newline=newline, **kwargs)
def async_reader(self, mode: FileMode = 'r', buffering: int = -1, encoding: Optional[str] = DEFAULT_ENCODING, errors: Optional[str] = ON_ERRORS, newline: Optional[str] = NEWLINE, block_size: int = 5242880, compression: str = None, **kwargs: Any) -> IterableAIOFile:
"""
Asyncronously Open the file pointed by this path and return a file object, as
the built-in open() function does.
"""
return get_cloud_file(self._accessor.open(self._cloudpath, mode=mode, buffering=buffering, encoding=encoding, errors=errors, block_size=block_size, compression=compression, newline=newline, **kwargs))
def appender(self, mode: FileMode = 'a', buffering: int = -1, encoding: Optional[str] = DEFAULT_ENCODING, errors: Optional[str] = ON_ERRORS, newline: Optional[str] = NEWLINE, block_size: int = 5242880, compression: str = None, **kwargs: Any) -> IO[Union[str, bytes]]:
"""
Open the file pointed by this path and return a file object, as
the built-in open() function does.
"""
return self._accessor.open(self._cloudpath, mode=mode, buffering=buffering, encoding=encoding, errors=errors, block_size=block_size, compression=compression, newline=newline, **kwargs)
def async_appender(self, mode: FileMode = 'a', buffering: int = -1, encoding: Optional[str] = DEFAULT_ENCODING, errors: Optional[str] = ON_ERRORS, newline: Optional[str] = NEWLINE, block_size: int = 5242880, compression: str = None, **kwargs: Any) -> IterableAIOFile:
"""
Asyncronously Open the file pointed by this path and return a file object, as
the built-in open() function does.
"""
return get_cloud_file(self._accessor.open(self._cloudpath, mode=mode, buffering=buffering, encoding=encoding, errors=errors, block_size=block_size, compression=compression, newline=newline, **kwargs))
def writer(self, mode: FileMode = 'w', buffering: int = -1, encoding: Optional[str] = DEFAULT_ENCODING, errors: Optional[str] = ON_ERRORS, newline: Optional[str] = NEWLINE, block_size: int = 5242880, compression: str = None, **kwargs: Any) -> IO[Union[str, bytes]]:
"""
Open the file pointed by this path and return a file object, as
the built-in open() function does.
"""
#self.touch()
return self._accessor.open(self._cloudpath, mode=mode, buffering=buffering, encoding=encoding, errors=errors, block_size=block_size, compression=compression, newline=newline, **kwargs)
def async_writer(self, mode: FileMode = 'w', buffering: int = -1, encoding: Optional[str] = DEFAULT_ENCODING, errors: Optional[str] = ON_ERRORS, newline: Optional[str] = NEWLINE, block_size: int = 5242880, compression: str = None, **kwargs: Any) -> IterableAIOFile:
"""
Asyncronously Open the file pointed by this path and return a file object, as
the built-in open() function does.
"""
#self.touch()
return get_cloud_file(self._accessor.open(self._cloudpath, mode=mode, buffering=buffering, encoding=encoding, errors=errors, block_size=block_size, compression=compression, newline=newline, **kwargs))
def read_text(self, encoding: str | None = DEFAULT_ENCODING, errors: str | None = ON_ERRORS) -> str:
with self.open('r', encoding=encoding, errors=errors) as file:
return file.read()
async def async_read_text(self, encoding: str | None = DEFAULT_ENCODING, errors: str | None = ON_ERRORS) -> str:
async with self.async_open('r', encoding=encoding, errors=errors) as file:
return await file.read()
def read_bytes(self) -> bytes:
with self.open('rb') as file:
return file.read()
async def async_read_bytes(self) -> bytes:
async with self.async_open('rb') as file:
return await file.read()
def write_bytes(self, data: bytes) -> int:
"""
Open the file in bytes mode, write to it, and close the file.
"""
# type-check for the buffer interface before truncating the file
view = memoryview(data)
with self.open(mode='wb') as f:
return f.write(data)
async def async_write_bytes(self, data: bytes) -> int:
"""
Open the file in bytes mode, write to it, and close the file.
"""
# type-check for the buffer interface before truncating the file
view = memoryview(data)
async with self.async_open(mode='wb') as f:
return await f.write(data)
def append_text(self, data: str, encoding: Optional[str] = DEFAULT_ENCODING, errors: Optional[str] = ON_ERRORS, newline: Optional[str] = NEWLINE) -> int:
"""
Open the file in text mode, write to it, and close the file.
"""
if not isinstance(data, str): raise TypeError(f'data must be str, not {type(data).__name__}')
with self.open(mode='a', encoding=encoding, errors=errors, newline=newline) as f:
n = f.write(data)
n += f.write(newline)
return n
async def async_append_text(self, data: str, encoding: Optional[str] = DEFAULT_ENCODING, errors: Optional[str] = ON_ERRORS, newline: Optional[str] = NEWLINE) -> int:
"""
Open the file in text mode, write to it, and close the file.
"""
if not isinstance(data, str): raise TypeError(f'data must be str, not {type(data).__name__}')
async with self.async_open(mode='a', encoding=encoding, errors=errors, newline=newline) as f:
n = await f.write(data)
n += await f.write(newline)
return n
def write_text(self, data: str, encoding: Optional[str] = DEFAULT_ENCODING, errors: Optional[str] = ON_ERRORS, newline: Optional[str] = NEWLINE) -> int:
"""
Open the file in text mode, write to it, and close the file.
"""
if not isinstance(data, str): raise TypeError(f'data must be str, not {type(data).__name__}')
with self.open(mode='w', encoding=encoding, errors=errors, newline=newline) as f:
return f.write(data)
async def async_write_text(self, data: str, encoding: Optional[str] = DEFAULT_ENCODING, errors: Optional[str] = ON_ERRORS, newline: Optional[str] = NEWLINE) -> int:
"""
Open the file in text mode, write to it, and close the file.
"""
if not isinstance(data, str): raise TypeError(f'data must be str, not {type(data).__name__}')
async with self.async_open(mode='w', encoding=encoding, errors=errors, newline=newline) as f:
return await f.write(data)
def touch(self, truncate: bool = True, data = None, exist_ok: bool = True, **kwargs):
"""
Create this file with the given access mode, if it doesn't exist.
"""
if exist_ok:
try: self._accessor.stat(self._cloudpath)
# Avoid exception chaining
except OSError: pass
else: return
try:
self._accessor.touch(self._cloudpath, truncate = truncate, data = data, **kwargs)
except Exception as e:
with self.open('wb') as f:
f.write(b'')
f.flush()
async def async_touch(self, truncate: bool = True, data = None, exist_ok: bool = True, **kwargs):
"""
Create this file with the given access mode, if it doesn't exist.
"""
if exist_ok:
try: await self._accessor.async_stat(self._cloudpath)
# Avoid exception chaining
except OSError: pass
else: return
await self._accessor.async_touch(self._cloudpath, truncate = truncate, data = data, **kwargs)
def mkdir(self, mode: int = 0o777, parents: bool = True, exist_ok: bool = True):
"""
Create a new directory at this given path.
"""
try: self._accessor.mkdir(self._cloudpath, parents = parents, exist_ok = exist_ok)
except FileNotFoundError:
if not parents or self.parent == self: raise
self.parent.mkdir(parents=True, exist_ok=True)
self.mkdir(mode, parents=False, exist_ok=exist_ok)
except OSError:
# Cannot rely on checking for EEXIST, since the operating system
# could give priority to other errors like EACCES or EROFS
if not exist_ok or not self.is_dir(): raise
async def async_mkdir(self, parents: bool = True, exist_ok: bool = True):
"""
Create a new directory at this given path.
"""
try: await self._accessor.async_mkdir(self._cloudpath, create_parents = parents, exist_ok = exist_ok)
except FileNotFoundError:
if not parents or self.parent == self: raise
await self.parent.async_mkdir(parents=True, exist_ok=True)
await self.async_mkdir(parents=False, exist_ok=exist_ok)
except OSError:
# Cannot rely on checking for EEXIST, since the operating system
# could give priority to other errors like EACCES or EROFS
if not exist_ok or not await self.async_is_dir(): raise
async def async_makedirs(self, parents: bool = True, exist_ok: bool = True):
"""
Create a new directory at this given path.
"""
try: await self._accessor.async_makedirs(self._cloudpath, exist_ok = exist_ok)
except FileNotFoundError:
if not parents or self.parent == self: raise
await self.parent.async_makedirs(exist_ok=True)
await self.async_makedirs(exist_ok=exist_ok)
except OSError:
# Cannot rely on checking for EEXIST, since the operating system
# could give priority to other errors like EACCES or EROFS
if not exist_ok or not await self.async_is_dir(): raise
async def chmod(self, mode: int):
"""
Change the permissions of the path, like os.chmod().
"""
raise NotImplementedError
async def async_chmod(self, mode: int):
"""
Change the permissions of the path, like os.chmod().
"""
raise NotImplementedError
def lchmod(self, mode: int):
"""
Like chmod(), except if the path points to a symlink, the symlink's
permissions are changed, rather than its target's.
"""
raise NotImplementedError
async def async_lchmod(self, mode: int):
"""
Like chmod(), except if the path points to a symlink, the symlink's
permissions are changed, rather than its target's.
"""
raise NotImplementedError
def unlink(self, missing_ok: bool = False):
"""
Remove this file or link.
If the path is a directory, use rmdir() instead.
"""
try: self._accessor.rm_file(self._cloudpath)
except FileNotFoundError:
if not missing_ok: raise
async def async_unlink(self, missing_ok: bool = False):
"""
Remove this file or link.
If the path is a directory, use rmdir() instead.
"""
try: await self._accessor.async_rm_file(self._cloudpath)
except FileNotFoundError:
if not missing_ok: raise
def rm(self, recursive: bool = False, maxdepth: int = None, missing_ok: bool = False):
"""
Remove this file.
If the path is a directory, use rmdir() instead.
"""
try: self._accessor.rm(self._path, recursive = recursive, maxdepth = maxdepth)
except:
if not missing_ok: raise
async def async_rm(self, recursive: bool = False, maxdepth: int = None, missing_ok: bool = False):
"""
Remove this file.
If the path is a directory, use rmdir() instead.
"""
try: self._accessor.async_rm(self._path, recursive = recursive, maxdepth = maxdepth)
except:
if not missing_ok: raise
def rm_file(self, missing_ok: bool = True):
"""
Remove this file.
If the path is a directory, use rmdir() instead.
"""
try:
self._accessor.rm_file(self._path)
return True
except Exception as e:
if missing_ok: return False
raise e from e
async def async_rm_file(self, missing_ok: bool = True):
"""
Remove this file.
If the path is a directory, use rmdir() instead.
"""
try:
await self._accessor.async_rm_file(self._path)
return True
except Exception as e:
if missing_ok: return False
raise e from e
# async def async_unlink(self, missing_ok: bool = False):
# """
# Remove this file or link.
# If the path is a directory, use rmdir() instead.
# """
# try: await self._accessor.async_unlink(self._cloudpath, missing_ok = missing_ok)
# except FileNotFoundError:
# if not missing_ok: raise
def rmdir(self, force: bool = False, recursive: bool = True, skip_errors: bool = True):
"""
Remove this directory. The directory must be empty.
"""
try:
return self._accessor.rmdir(self._cloudpath)
except Exception as e:
if force: return self._accessor.rmdir(self._cloudpath, recursive = recursive)
if skip_errors: return
raise e
async def async_rmdir(self, force: bool = False, recursive: bool = True, skip_errors: bool = True):
"""
Remove this directory. The directory must be empty.
"""
try:
return await self._accessor.async_rmdir(self._cloudpath)
except Exception as e:
if force: return await self._accessor.async_rmdir(self._cloudpath, recursive = recursive)
if skip_errors: return
raise e
def link_to(self, target: str):
"""
Create a hard link pointing to a path named target.
"""
raise NotImplementedError
async def async_link_to(self, target: str):
"""
Create a hard link pointing to a path named target.
"""
raise NotImplementedError
def rename(self, target: Union[str, Type['CloudFileSystemPath']]) -> Type['CloudFileSystemPath']:
"""
Rename this path to the target path.
The target path may be absolute or relative. Relative paths are
interpreted relative to the current working directory, *not* the
directory of the Path object.
Returns the new Path instance pointing to the target path.
"""
self._accessor.rename(self._cloudpath, target)
return type(self)(target)
async def async_rename(self, target: Union[str, Type['CloudFileSystemPath']]) -> Type['CloudFileSystemPath']:
"""
Rename this path to the target path.
The target path may be absolute or relative. Relative paths are
interpreted relative to the current working directory, *not* the
directory of the Path object.
Returns the new Path instance pointing to the target path.
"""
await self._accessor.async_rename(self._cloudpath, target)
return type(self)(target)
def replace(self, target: str) -> Type['CloudFileSystemPath']:
"""
Rename this path to the target path, overwriting if that path exists.
The target path may be absolute or relative. Relative paths are
interpreted relative to the current working directory, *not* the
directory of the Path object.
Returns the new Path instance pointing to the target path.
"""
self._accessor.replace(self._cloudpath, target)
return type(self)(target)
async def async_replace(self, target: str) -> Type['CloudFileSystemPath']:
"""
Rename this path to the target path, overwriting if that path exists.
The target path may be absolute or relative. Relative paths are
interpreted relative to the current working directory, *not* the
directory of the Path object.
Returns the new Path instance pointing to the target path.
"""
await self._accessor.async_replace(self._cloudpath, target)
return type(self)(target)
def symlink_to(self, target: str, target_is_directory: bool = False):
"""
Make this path a symlink pointing to the given path.
Note the order of arguments (self, target) is the reverse of os.symlink's.
"""
raise NotImplementedError
async def async_symlink_to(self, target: str, target_is_directory: bool = False):
"""
Make this path a symlink pointing to the given path.
Note the order of arguments (self, target) is the reverse of os.symlink's.
"""
raise NotImplementedError
def exists(self) -> bool:
"""
Whether this path exists.
"""
return self._accessor.exists(self._cloudpath)
async def async_exists(self) -> bool:
"""
Whether this path exists.
"""
return await self._accessor.async_exists(self._cloudpath)
@classmethod
def cwd(cls: type) -> str:
"""Return a new path pointing to the current working directory
(as returned by os.getcwd()).
"""
cwd: str = os.getcwd()
return cls(cwd)
@classmethod
def home(cls: type) -> Type['CloudFileSystemPath']:
"""Return a new path pointing to the user's home directory (as
returned by os.path.expanduser('~')).
"""
homedir: str = cls()._flavour.gethomedir(None)
return cls(homedir)
@classmethod
async def async_home(cls: type) -> Type['CloudFileSystemPath']:
"""Return a new path pointing to the user's home directory (as
returned by os.path.expanduser('~')).
"""
coro = cls()._flavour.async_gethomedir(None)
homedir: str = await coro
return cls(homedir)
def samefile(self, other_path: Union[Type['CloudFileSystemPath'], Paths]) -> bool:
"""Return whether other_path is the same or not as this file
(as returned by os.path.samefile()).
"""
if isinstance(other_path, Paths.__args__): other_path = Type['CloudFileSystemPath'](other_path)
if isinstance(other_path, Type['CloudFileSystemPath']):
try: other_st = other_path.stat()
except AttributeError: other_st = self._accessor.stat(other_path)
else:
try: other_st = other_path.stat()
except AttributeError: other_st = other_path._accessor.stat(other_path)
return os.path.samestat(self.stat(), other_st)
async def async_samefile(self, other_path: Union[Type['CloudFileSystemPath'], Paths]) -> bool:
"""Return whether other_path is the same or not as this file
(as returned by os.path.samefile()).
"""
if isinstance(other_path, Paths.__args__): other_path = Type['CloudFileSystemPath'](other_path)
if isinstance(other_path, Type['CloudFileSystemPath']):
try: other_st = await other_path.async_stat()
except AttributeError: other_st = await self._accessor.async_stat(other_path)
else:
try: other_st = await to_thread(other_path.stat)
except AttributeError: other_st = await to_thread(other_path._accessor.stat, other_path)
return os.path.samestat(await self.async_stat(),other_st)
def iterdir(self) -> Iterable[Type['CloudFileSystemPath']]:
"""Iterate over the files in this directory. Does not yield any
result for the special paths '.' and '..'.
"""
for name in self._accessor.listdir(self):
if name in {'.', '..'}: continue
yield self._make_child_relpath(name)
async def async_iterdir(self) -> AsyncIterable[Type['CloudFileSystemPath']]:
"""Iterate over the files in this directory. Does not yield any
result for the special paths '.' and '..'.
"""
# for name in await self._accessor.async_listdir(self):
async for name in self._accessor.async_listdir(self):
if name in {'.', '..'}: continue
yield self._make_child_relpath(name)
def glob(self, pattern: str = '*', as_path: bool = True) -> Iterable[Union[str, Type['CloudFileSystemPath']]]:
"""Iterate over this subtree and yield all existing files (of any
kind, including directories) matching the given relative pattern.
Warning: doesn't work as expected. Use Find Instead.
"""
if not pattern: raise ValueError("Unacceptable pattern: {!r}".format(pattern))
#if self.is_cloud:
glob_pattern = self._cloudpath + ('/' if self.is_dir() and not self._cloudpath.endswith('/') and not pattern.startswith('/') else '') + pattern
try:
matches = self._accessor.glob(glob_pattern)
if not matches: return matches
if self.is_cloud: matches = [f'{self._prefix}://{m}' for m in matches]
if as_path: matches = [type(self)(m) for m in matches]
return matches
except Exception as e:
logger.error(e)
return self.find(pattern = pattern, as_string = not as_path)
async def async_glob(self, pattern: str = '*', as_path: bool = True) -> AsyncIterable[Type['CloudFileSystemPath']]:
"""Iterate over this subtree and yield all existing files (of any
kind, including directories) matching the given relative pattern.
"""
if not pattern: raise ValueError("Unacceptable pattern: {!r}".format(pattern))
glob_pattern = self._cloudpath + ('/' if self.is_dir() and not self._cloudpath.endswith('/') and not pattern.startswith('/') else '') + pattern
try:
matches = await self._accessor.async_glob(glob_pattern)
if not matches: return matches
if self.is_cloud: matches = [f'{self._prefix}://{m}' for m in matches]
if as_path: matches = [type(self)(m) for m in matches]
return matches
except Exception as e:
logger.error(e)
return await self.async_find(pattern = pattern, as_string = not as_path)
def find(self, pattern: str = "*", as_string: bool = False, maxdepth: int = None, withdirs: bool = None, detail: bool = False) -> Union[List[str], List[Type['CloudFileSystemPath']]]:
"""
List all files below path. Like posix find command without conditions
"""
matches = self._accessor.find(path = self._cloudstr, maxdepth = maxdepth, withdirs = withdirs, detail = detail, prefix = pattern)
if self.is_cloud:
matches = [f'{self._prefix}://{m}' for m in matches]
if not as_string:
matches = [type(self)(m) for m in matches]
return matches
async def async_find(self, pattern: str = "*", as_string: bool = False, maxdepth: int = None, withdirs: bool = None, detail: bool = False) -> Union[List[str], List[Type['CloudFileSystemPath']]]:
"""
List all files below path. Like posix find command without conditions
"""
matches = await self._accessor.async_find(path = self._cloudstr, maxdepth = maxdepth, withdirs = withdirs, detail = detail, prefix = pattern)
if self.is_cloud:
matches = [f'{self._prefix}://{m}' for m in matches]
if not as_string:
matches = [type(self)(m) for m in matches]
return matches
def rglob(self, pattern: str, as_path: bool = True) -> Iterable[Union[str, Type['CloudFileSystemPath']]]:
"""Recursively yield all existing files (of any kind, including
directories) matching the given relative pattern, anywhere in
this subtree.
"""
return self.glob(pattern = f'**/{pattern}', as_path = as_path)
async def async_rglob(self, pattern: str) -> AsyncIterable[Union[str, Type['CloudFileSystemPath']]]:
"""Recursively yield all existing files (of any kind, including
directories) matching the given relative pattern, anywhere in
this subtree.
"""
return await self.async_glob(f'**/{pattern}')
def cat(self, recursive: bool = False, on_error: str = 'raise', **kwargs):
"""
Fetch paths’ contents
Parameters
recursive: bool
If True, assume the path(s) are directories, and get all the contained files
on_error“raise”, “omit”, “return”
If raise, an underlying exception will be raised (converted to KeyError if the type is in self.missing_exceptions);
if omit, keys with exception will simply not be included in the output; if “return”, all keys are included in the output,
but the value will be bytes or an exception instance.
kwargs: passed to cat_file
"""
return self._accessor.cat(self._cloudstr, recursive = recursive, on_error = on_error, **kwargs)
async def async_cat(self, recursive: bool = False, on_error: str = 'raise', **kwargs):
"""
Fetch paths’ contents
Parameters
recursive: bool
If True, assume the path(s) are directories, and get all the contained files
on_error“raise”, “omit”, “return”
If raise, an underlying exception will be raised (converted to KeyError if the type is in self.missing_exceptions);
if omit, keys with exception will simply not be included in the output; if “return”, all keys are included in the output,
but the value will be bytes or an exception instance.
kwargs: passed to cat_file
"""
return await self._accessor.async_cat(self._cloudstr, recursive = recursive, on_error = on_error, **kwargs)
def cat_file(self, as_bytes: bool = False, start: int = None, end: int = None, **kwargs):
"""
Parameters
start, end: int
Bytes limits of the read. If negative, backwards from end, like usual python slices. Either can be None for start or end of file, respectively
kwargs: passed to ``open()``.
"""
res = self._accessor.cat_file(self._cloudstr, start = start, end = end, **kwargs)
if not as_bytes and isinstance(res, bytes): res = res.decode('UTF-8')
return res
async def async_cat_file(self, as_bytes: bool = False, start: int = None, end: int = None, **kwargs):
"""
Parameters
start, end: int
Bytes limits of the read. If negative, backwards from end, like usual python slices. Either can be None for start or end of file, respectively
kwargs: passed to ``open()``.
"""
res = await self._accessor.async_cat_file(self._cloudstr, start = start, end = end, **kwargs)
if not as_bytes and isinstance(res, bytes): res = res.decode('UTF-8')
return res
def pipe(self, value: Union[bytes, str], **kwargs):
"""
Put value into path
(counterpart to cat)
"""
if not isinstance(value, bytes): value = value.encode('UTF-8')
return self._accessor.pipe(self._cloudstr, value = value, **kwargs)
async def async_pipe(self, value: Union[bytes, str], **kwargs):
"""
Put value into path
(counterpart to cat)
"""
if not isinstance(value, bytes): value = value.encode('UTF-8')
return await self._accessor.async_pipe(self._cloudstr, value = value, **kwargs)
def pipe_file(self, value: Union[bytes, str], **kwargs):
"""
Put value into path
(counterpart to cat)
"""
if not isinstance(value, bytes): value = value.encode('UTF-8')
return self._accessor.pipe_file(self._cloudstr, value = value, **kwargs)
async def async_pipe_file(self, value: Union[bytes, str], **kwargs):
"""
Put value into path
(counterpart to cat)
"""
if not isinstance(value, bytes): value = value.encode('UTF-8')
return await self._accessor.async_pipe_file(self._cloudstr, value = value, **kwargs)
def absolute(self) -> Type['CloudFileSystemPath']:
"""Return an absolute version of this path. This function works
even if the path doesn't point to anything.
No normalization is done, i.e. all '.' and '..' will be kept along.
Use resolve() to get the canonical path to a file.
"""
raise NotImplementedError
def resolve(self, strict: bool = False) -> Type['CloudFileSystemPath']:
"""
Make the path absolute, resolving all symlinks on the way and also
normalizing it (for example turning slashes into backslashes under
Windows).
"""
s: Optional[str] = self._flavour.resolve(self, strict=strict)
if s is None:
self.stat()
path = self.absolute()
s = str(path)
# Now we have no symlinks in the path, it's safe to normalize it.
normed: str = self._flavour.pathmod.normpath(s)
obj = self._from_parts((normed,), init=False)
obj._init(template=self)
return obj
async def async_resolve(self, strict: bool = False) -> Type['CloudFileSystemPath']:
"""
Make the path absolute, resolving all symlinks on the way and also
normalizing it (for example turning slashes into backslashes under
Windows).
"""
s: Optional[str] = await self._flavour.async_resolve(self, strict=strict)
if s is None:
await self.async_stat()
path = await self.absolute()
s = str(path)
# Now we have no symlinks in the path, it's safe to normalize it.
normed: str = self._flavour.pathmod.normpath(s)
obj = self._from_parts((normed,), init=False)
obj._init(template=self)
return obj
def stat(self) -> stat_result:
"""
Return the result of the stat() system call on this path, like
os.stat() does.
"""
return self._accessor.stat(self._cloudpath)
async def async_stat(self) -> stat_result:
"""
Return the result of the stat() system call on this path, like
os.stat() does.
"""
return await self._accessor.async_stat(self._cloudpath)
def info(self):
"""
Return the result of the info() system call on this path, like
"""
return self._accessor.info(self._cloudpath)
async def async_info(self):
"""
Return the result of the info() system call on this path, like
os.stat() does.
"""
return await self._accessor.async_info(self._cloudpath)
def lstat(self) -> stat_result:
"""
Like stat(), except if the path points to a symlink, the symlink's
status information is returned, rather than its target's.
"""
raise NotImplementedError
async def async_lstat(self) -> stat_result:
"""
Like stat(), except if the path points to a symlink, the symlink's
status information is returned, rather than its target's.
"""
raise NotImplementedError
def owner(self) -> str:
"""
Return the login name of the file owner.
"""
raise NotImplementedError
async def async_owner(self) -> str:
"""
Return the login name of the file owner.
"""
raise NotImplementedError
def group(self) -> str:
"""
Return the group name of the file gid.
"""
raise NotImplementedError
async def async_group(self) -> str:
"""
Return the group name of the file gid.
"""
raise NotImplementedError
def is_dir(self) -> bool:
"""
Whether this path is a directory.
"""
return self._accessor.is_dir(self._cloudpath)
async def async_is_dir(self) -> bool:
"""
Whether this path is a directory.
"""
return await self._accessor.async_is_dir(self._cloudpath)
def is_symlink(self) -> bool:
"""
Whether this path is a symbolic link.
"""
raise NotImplementedError
async def async_is_symlink(self) -> bool:
"""
Whether this path is a symbolic link.
"""
raise NotImplementedError
def is_file(self) -> bool:
"""
Whether this path is a regular file (also True for symlinks pointing
to regular files).
"""
return self._accessor.is_file(self._cloudpath)
async def async_is_file(self) -> bool:
"""
Whether this path is a regular file (also True for symlinks pointing
to regular files).
"""
return await self._accessor.async_is_file(self._cloudpath)
@staticmethod
def _get_pathlike(path: 'PathLike'):
"""
Returns the path of the file.
"""
from fileio.core.generic import get_path
return get_path(path)
def copy(self, dest: 'PathLike', recursive: bool = False, overwrite: bool = False, skip_errors: bool = False):
"""
Copies the File to the Dir/File.
"""
dest = self._get_pathlike(dest)
if dest.is_dir() and self.is_file():
dest = dest.joinpath(self.filename_)
if dest.exists() and not overwrite and dest.is_file():
if skip_errors: return dest
raise ValueError(f'File {dest._path} exists')
if dest.is_cloud: self._accessor.copy(self._path, dest._path, recursive)
else: self._accessor.get(self._path, dest._path, recursive)
return dest
async def async_copy(self, dest: 'PathLike', recursive: bool = False, overwrite: bool = False, skip_errors: bool = False):
"""
Copies the File to the Dir/File.
"""
dest = self._get_pathlike(dest)
if await dest.async_is_dir() and await self.async_is_file():
dest = dest.joinpath(self.filename_)
if await dest.async_exists() and not overwrite and await dest.async_is_file():
if skip_errors: return dest
raise ValueError(f'File {dest._path} exists')
if dest.is_cloud: await self._accessor.async_copy(self._cloudpath, dest._cloudpath, recursive = recursive)
else: await self._accessor.async_get(self._cloudpath, dest.string, recursive = recursive)
return dest
def copy_file(self, dest: 'PathLike', recursive: bool = False, overwrite: bool = False, skip_errors: bool = False):
"""
Copies this File to the the Dest Path
"""
dest = self._get_pathlike(dest)
if dest.is_dir() and self.is_file():
dest = dest.joinpath(self.filename_)
if dest.exists() and not overwrite and dest.is_file():
if skip_errors: return dest
raise ValueError(f'File {dest._path} exists')
if dest.is_cloud: self._accessor.copy(self._path, dest._path, recursive)
else: self._accessor.get(self._path, dest._path, recursive)
return dest
async def async_copy_file(self, dest: 'PathLike', recursive: bool = False, overwrite: bool = False, skip_errors: bool = False):
"""
Copies this File to the the Dest Path
"""
dest = self._get_pathlike(dest)
if await dest.async_is_dir() and await self.async_is_file():
dest = dest.joinpath(self.filename_)
if await dest.async_exists() and not overwrite and await dest.async_is_file():
if skip_errors: return dest
raise ValueError(f'File {dest._path} exists')
if dest.is_cloud: await self._accessor.async_copy(self._cloudpath, dest._cloudpath, recursive = recursive)
else: await self._accessor.async_get(self._cloudpath, dest.string, recursive = recursive)
return dest
def put(self, src: 'PathLike', recursive: bool = False, callback: Optional[Callable] = Callback(), **kwargs):
"""
Copy file(s) from src to this FilePath
WIP support for cloud-to-cloud
"""
src = self._get_pathlike(src)
assert not src.is_cloud, 'Cloud to Cloud support not supported at this time'
return self._accessor.put(src.string, self._cloudpath, recursive=recursive, callback=callback, **kwargs)
async def async_put(self, src: 'PathLike', recursive: bool = False, callback: Optional[Callable] = Callback(), **kwargs):
"""
Copy file(s) from src to this FilePath
WIP support for cloud-to-cloud
"""
src = self._get_pathlike(src)
assert not src.is_cloud, 'Cloud to Cloud support not supported at this time'
return await self._accessor.async_put(src.string, self._cloudpath, recursive=recursive, callback=callback, **kwargs)
def put_file(self, src: 'PathLike', callback: Optional[Callable] = Callback(), **kwargs):
"""
Copy single file to remote
WIP support for cloud-to-cloud
"""
src = self._get_pathlike(src)
assert not src.is_cloud, 'Cloud to Cloud support not supported at this time'
return self._accessor.put_file(src.string, self._cloudpath, callback=callback, **kwargs)
async def async_put_file(self, src: 'PathLike', callback: Optional[Callable] = Callback(), **kwargs):
"""
Copy single file to remote
WIP support for cloud-to-cloud
"""
src = self._get_pathlike(src)
assert not src.is_cloud, 'Cloud to Cloud support not supported at this time'
return await self._accessor.async_put_file(src.string, self._cloudpath, callback=callback, **kwargs)
def get(self, dest: 'PathLike', recursive: bool = False, callback: Optional[Callable] = Callback(), **kwargs):
"""
Copy the remote file(s) to dest (local)
WIP support for cloud-to-cloud
"""
dest = self._get_pathlike(dest)
assert not dest.is_cloud, 'Cloud to Cloud support not supported at this time'
return self._accessor.get(self._cloudpath, dest.string, recursive=recursive, callback=callback, **kwargs)
async def async_get(self, dest: 'PathLike', recursive: bool = False, callback: Optional[Callable] = Callback(), **kwargs):
"""
Copy the remote file(s) to dest (local)
WIP support for cloud-to-cloud
"""
dest = self._get_pathlike(dest)
assert not dest.is_cloud, 'Cloud to Cloud support not supported at this time'
return await self._accessor.async_get(self._cloudpath, dest.string, recursive=recursive, callback=callback, **kwargs)
def get_file(self, dest: 'PathLike', callback: Optional[Callable] = Callback(), **kwargs):
"""
Copies this file to dest (local)
WIP support for cloud-to-cloud
"""
dest = self._get_pathlike(dest)
assert not dest.is_cloud, 'Cloud to Cloud support not supported at this time'
return self._accessor.get_file(self._cloudpath, dest.string, callback=callback, **kwargs)
async def async_get_file(self, dest: 'PathLike', callback: Optional[Callable] = Callback(), **kwargs):
"""
Copies this file to dest (local)
WIP support for cloud-to-cloud
"""
dest = self._get_pathlike(dest)
assert not dest.is_cloud, 'Cloud to Cloud support not supported at this time'
return await self._accessor.async_get_file(self._cloudpath, dest.string, callback=callback, **kwargs)
def is_mount(self) -> bool:
"""
Check if this path is a POSIX mount point
"""
# Need to exist and be a dir
return False if not self.exists() or not self.is_dir() else False
#raise NotImplementedError
async def async_is_mount(self) -> bool:
"""
Check if this path is a POSIX mount point
"""
# Need to exist and be a dir
if not await self.async_exists() or not await self.async_is_dir(): return False
return False
#raise NotImplementedError
def is_block_device(self) -> bool:
"""
Whether this path is a block device.
"""
return False
#raise NotImplementedError
async def async_is_block_device(self) -> bool:
"""
Whether this path is a block device.
"""
return False
#raise NotImplementedError
def is_char_device(self) -> bool:
"""
Whether this path is a character device.
"""
return False
#raise NotImplementedError
async def async_is_char_device(self) -> bool:
"""
Whether this path is a character device.
"""
return False
def is_fifo(self) -> bool:
"""
Whether this path is a FIFO.
"""
return False
async def async_is_fifo(self) -> bool:
"""
Whether this path is a FIFO.
"""
return False
def is_socket(self) -> bool:
"""
Whether this path is a socket.
"""
return False
async def async_is_socket(self) -> bool:
"""
Whether this path is a socket.
"""
return False
def expanduser(self) -> Type['CloudFileSystemPath']:
""" Return a new path with expanded ~ and ~user constructs
(as returned by os.path.expanduser)
"""
if (not self._drv and not self._root and self._parts and self._parts[0][:1] == '~'):
homedir = self._flavour.gethomedir(self._parts[0][1:])
return self._from_parts([homedir] + self._parts[1:])
return self
async def async_expanduser(self) -> Type['CloudFileSystemPath']:
""" Return a new path with expanded ~ and ~user constructs
(as returned by os.path.expanduser)
"""
if (not self._drv and not self._root and self._parts and self._parts[0][:1] == '~'):
homedir = await self._flavour.async_gethomedir(self._parts[0][1:])
return self._from_parts([homedir] + self._parts[1:])
return self
def iterdir(self) -> Iterable[Type['CloudFileSystemPath']]:
names = self._accessor.listdir(self)
for name in names:
if name in {'.', '..'}: continue
yield self._make_child_relpath(name)
async def async_iterdir(self) -> AsyncIterable[Type['CloudFileSystemPath']]:
names = await self._accessor.async_listdir(self)
for name in names:
if name in {'.', '..'}: continue
yield self._make_child_relpath(name)
def _raise_closed(self):
raise ValueError("I/O operation on closed path")
def _raise_open(self):
raise ValueError("I/O operation on already open path")
# We sort of assume that it will be used to open a file
def __enter__(self):
#if self._fileio: self._raise_open()
#if not self._fileio:
# self._fileio = self.open()
if self._closed: self._raise_closed()
return self
def __exit__(self, t, v, tb):
self._closed = True
async def __aenter__(self):
if self._closed: self._raise_closed()
return self
async def __aexit__(self, exc_type, exc, tb):
self._closed = True
"""
Other Methods
"""
def url(self, **kwargs):
return self._accessor.url(self._cloudpath, **kwargs)
async def async_url(self, **kwargs):
return await self._accessor.async_url(self._cloudpath, **kwargs)
def setxattr(self, **kwargs):
return self._accessor.setxattr(self._cloudpath, **kwargs)
async def async_setxattr(self, **kwargs):
return await self._accessor.async_setxattr(self._cloudpath, **kwargs)
def cloze(self, **kwargs):
if self._fileio:
self._fileio.commit()
return self._accessor.invalidate_cache(self._cloudpath)
async def async_cloze(self, **kwargs):
return await self._accessor.async_invalidate_cache(self._cloudpath)
def get_checksum(
self,
method: str = 'md5',
chunk_size: int = 1024,
**kwargs
):
"""
Creates the checksum for the file
"""
hashmethod = getattr(hashlib, method)
hasher = hashmethod()
with self.open('rb') as f:
for chunk in iter(lambda: f.read(chunk_size), b''):
hasher.update(chunk)
checksum = hasher.hexdigest()
del hasher
return checksum
async def async_get_checksum(
self,
method: str = 'md5',
chunk_size: int = 1024,
**kwargs
):
"""
Creates the checksum for the file
"""
hashmethod = getattr(hashlib, method)
hasher = hashmethod()
async with self.async_open('rb') as f:
for chunk in await iter(lambda: f.read(chunk_size), b""):
hasher.update(chunk)
checksum = hasher.hexdigest()
del hasher
return checksum
class CloudFileSystemPosixPath(PosixPath, CloudFileSystemPath, PureCloudFileSystemPosixPath):
__slots__ = ()
class CloudFileSystemWindowsPath(WindowsPath, CloudFileSystemPath, PureCloudFileSystemWindowsPath):
__slots__ = ()
def is_mount(self) -> int:
raise NotImplementedError("CloudFileSystemPath.is_mount() is unsupported on this system")
async def async_is_mount(self) -> int:
raise NotImplementedError("CloudFileSystemPath.async_is_mount() is unsupported on this system")
os.PathLike.register(CloudFileSystemPurePath)
os.PathLike.register(CloudFileSystemPath)
os.PathLike.register(PureCloudFileSystemPosixPath)
os.PathLike.register(CloudFileSystemWindowsPath)
os.PathLike.register(CloudFileSystemPosixPath)
os.PathLike.register(PureCloudFileSystemWindowsPath)
def register_pathlike(pathz: List[Union[PosixPath, CloudFileSystemPath, WindowsPath, CloudFileSystemWindowsPath, CloudFileSystemPosixPath, PureCloudFileSystemWindowsPath, Any]]):
for p in pathz:
os.PathLike.register(p)
__all__ = (
'ClassVar',
'AccessorLike',
'CloudFileSystemLike',
'get_accessor',
'get_cloud_filesystem',
'CloudFileSystemPurePath',
'PurePath',
'PureCloudFileSystemPosixPath',
'PureCloudFileSystemWindowsPath',
'CloudFileSystemPath',
'Path',
'_pathz_windows_flavour',
'_pathz_posix_flavour',
'CloudFileSystemPosixPath',
'CloudFileSystemWindowsPath',
'register_pathlike'
)
|
PypiClean
|
/SwiftCodeGen-0.3.tar.gz/SwiftCodeGen-0.3/README.rst
|
************
SwiftCodeGen
************
A command line utility to generate model and web service classes in Swift programming language. This is a simple and lightweight tool for developers to automate a part of effort. The tool will generate all the required model classes and web service classes based on `Alamofire <https://github.com/Alamofire/Alamofire>`_ and `ObjectMapper <https://github.com/Hearst-DD/ObjectMapper>`_ with a single command.
============
Installation
============
SwiftCodeGen is written in Python, and it requires python 2.7 or above to work.
Install SwiftGen and use simple commands.
$ pip install SwiftCodeGen
If you have already a version installed...
$ pip install SwiftCodeGen --update
=====
Usage
=====
SwiftCodeGen uses `SwiftGenConf.json` file to keep api details and other common properties to configure basic setups. It also uses template files to generate model and service classes. More documentation can be found `here <https://github.com/akhilraj-rajkumar/swift-code-gen#swiftcodegen>`_
.
|
PypiClean
|
/func_timeouts-1.0.2.tar.gz/func_timeouts-1.0.2/func_timeouts/dafunc.py
|
import copy
import inspect
import threading
import time
import types
import sys
from .exceptions import FunctionTimedOut
from .StoppableThread import StoppableThread
try:
from .py3_raise import raise_exception
except SyntaxError:
from .py2_raise import raise_exception
except ImportError:
from .py2_raise import raise_exception
from functools import wraps
__all__ = ('func_timeout', 'func_set_timeout')
def func_timeout(timeout, func, args=(), kwargs=None):
'''
func_timeout - Runs the given function for up to #timeout# seconds.
Raises any exceptions #func# would raise, returns what #func# would return (unless timeout is exceeded), in which case it raises FunctionTimedOut
@param timeout <float> - Maximum number of seconds to run #func# before terminating
@param func <function> - The function to call
@param args <tuple> - Any ordered arguments to pass to the function
@param kwargs <dict/None> - Keyword arguments to pass to the function.
@raises - FunctionTimedOut if #timeout# is exceeded, otherwise anything #func# could raise will be raised
If the timeout is exceeded, FunctionTimedOut will be raised within the context of the called function every two seconds until it terminates,
but will not block the calling thread (a new thread will be created to perform the join). If possible, you should try/except FunctionTimedOut
to return cleanly, but in most cases it will 'just work'.
@return - The return value that #func# gives
'''
if not kwargs:
kwargs = {}
if not args:
args = ()
ret = []
exception = []
isStopped = False
def funcwrap(args2, kwargs2):
try:
ret.append( func(*args2, **kwargs2) )
except FunctionTimedOut:
# Don't print traceback to stderr if we time out
pass
except Exception as e:
exc_info = sys.exc_info()
if isStopped is False:
# Assemble the alternate traceback, excluding this function
# from the trace (by going to next frame)
# Pytohn3 reads native from __traceback__,
# python2 has a different form for "raise"
e.__traceback__ = exc_info[2].tb_next
exception.append( e )
thread = StoppableThread(target=funcwrap, args=(args, kwargs))
thread.daemon = True
thread.start()
thread.join(timeout)
stopException = None
if thread.isAlive():
isStopped = True
class FunctionTimedOutTempType(FunctionTimedOut):
def __init__(self):
return FunctionTimedOut.__init__(self, '', timeout, func, args, kwargs)
FunctionTimedOutTemp = type('FunctionTimedOut' + str( hash( "%d_%d_%d_%d" %(id(timeout), id(func), id(args), id(kwargs))) ), FunctionTimedOutTempType.__bases__, dict(FunctionTimedOutTempType.__dict__))
stopException = FunctionTimedOutTemp
thread._stopThread(stopException)
thread.join(min(.1, timeout / 50.0))
raise FunctionTimedOut('', timeout, func, args, kwargs)
else:
# We can still cleanup the thread here..
# Still give a timeout... just... cuz..
thread.join(.5)
if exception:
raise_exception(exception)
if ret:
return ret[0]
def func_set_timeout(timeout, allowOverride=False):
'''
func_set_timeout - Decorator to run a function with a given/calculated timeout (max execution time).
Optionally (if #allowOverride is True), adds a paramater, "forceTimeout", to the
function which, if provided, will override the default timeout for that invocation.
If #timeout is provided as a lambda/function, it will be called
prior to each invocation of the decorated function to calculate the timeout to be used
for that call, based on the arguments passed to the decorated function.
For example, you may have a "processData" function whose execution time
depends on the number of "data" elements, so you may want a million elements to have a
much higher timeout than seven elements.)
If #allowOverride is True AND a kwarg of "forceTimeout" is passed to the wrapped function, that timeout
will be used for that single call.
@param timeout <float OR lambda/function> -
**If float:**
Default number of seconds max to allow function to execute
before throwing FunctionTimedOut
**If lambda/function:
If a function/lambda is provided, it will be called for every
invocation of the decorated function (unless #allowOverride=True and "forceTimeout" was passed)
to determine the timeout to use based on the arguments to the decorated function.
The arguments as passed into the decorated function will be passed to this function.
They either must match exactly to what the decorated function has, OR
if you prefer to get the *args (list of ordered args) and **kwargs ( key : value keyword args form),
define your calculate function like:
def calculateTimeout(*args, **kwargs):
...
or lambda like:
calculateTimeout = lambda *args, **kwargs : ...
otherwise the args to your calculate function should match exactly the decorated function.
@param allowOverride <bool> Default False, if True adds a keyword argument to the decorated function,
"forceTimeout" which, if provided, will override the #timeout. If #timeout was provided as a lambda / function, it
will not be called.
@throws FunctionTimedOut If time alloted passes without function returning naturally
@see func_timeout
'''
# Try to be as efficent as possible... don't compare the args more than once
# Helps closure issue on some versions of python
defaultTimeout = copy.copy(timeout)
isTimeoutAFunction = bool( issubclass(timeout.__class__, (types.FunctionType, types.MethodType, types.LambdaType, types.BuiltinFunctionType, types.BuiltinMethodType) ) )
if not isTimeoutAFunction:
if not issubclass(timeout.__class__, (float, int)):
try:
timeout = float(timeout)
except:
raise ValueError('timeout argument must be a float/int for number of seconds, or a function/lambda which gets passed the function arguments and returns a calculated timeout (as float or int). Passed type: < %s > is not of any of these, and cannot be converted to a float.' %( timeout.__class__.__name__, ))
if not allowOverride and not isTimeoutAFunction:
# Only defaultTimeout provided. Simple function wrapper
def _function_decorator(func):
return wraps(func)(lambda *args, **kwargs : func_timeout(defaultTimeout, func, args=args, kwargs=kwargs))
# def _function_wrapper(*args, **kwargs):
# return func_timeout(defaultTimeout, func, args=args, kwargs=kwargs)
# return _function_wrapper
return _function_decorator
if not isTimeoutAFunction:
# allowOverride is True and timeout is not a function. Simple conditional on every call
def _function_decorator(func):
def _function_wrapper(*args, **kwargs):
if 'forceTimeout' in kwargs:
useTimeout = kwargs.pop('forceTimeout')
else:
useTimeout = defaultTimeout
return func_timeout(useTimeout, func, args=args, kwargs=kwargs)
return wraps(func)(_function_wrapper)
return _function_decorator
# At this point, timeout IS known to be a function.
timeoutFunction = timeout
if allowOverride:
# Could use a lambda here... but want traceback to highlight the calculate function,
# and not the invoked function
def _function_decorator(func):
def _function_wrapper(*args, **kwargs):
if 'forceTimeout' in kwargs:
useTimeout = kwargs.pop('forceTimeout')
else:
useTimeout = timeoutFunction(*args, **kwargs)
return func_timeout(useTimeout, func, args=args, kwargs=kwargs)
return wraps(func)(_function_wrapper)
return _function_decorator
# Cannot override, and calculate timeout function
def _function_decorator(func):
def _function_wrapper(*args, **kwargs):
useTimeout = timeoutFunction(*args, **kwargs)
return func_timeout(useTimeout, func, args=args, kwargs=kwargs)
return wraps(func)(_function_wrapper)
return _function_decorator
def timeout():
def _function_decorator(func):
def _function_wrapper(*args, **kwargs):
if 'timeout' in kwargs:
timeout_val = kwargs.pop('timeout')
else:
return func(*args, **kwargs)
return func_timeout(timeout_val, func, args=args, kwargs=kwargs)
return wraps(func)(_function_wrapper)
return _function_decorator
|
PypiClean
|
/ruamel.yaml.cmd-0.6.5-py3-none-any.whl/ruamel/yaml/cmd/__main__.py
|
import argparse
import importlib
import sys
import typing
from . import __version__
class HelpFormatter(argparse.RawDescriptionHelpFormatter):
def __init__(self, *args: typing.Any, **kw: typing.Any):
kw['max_help_position'] = 40
super().__init__(*args, **kw)
def _fill_text(self, text: str, width: int, indent: str) -> str:
import textwrap
paragraphs = []
for paragraph in text.splitlines():
paragraphs.append(textwrap.fill(paragraph, width,
initial_indent=indent,
subsequent_indent=indent))
return '\n'.join(paragraphs)
class ArgumentParser(argparse.ArgumentParser):
def __init__(self, *args: typing.Any, **kw: typing.Any):
kw['formatter_class'] = HelpFormatter
super().__init__(*args, **kw)
class DefaultVal(str):
def __init__(self, val: typing.Any):
self.val = val
def __str__(self) -> str:
return str(self.val)
class CountAction(argparse.Action):
def __call__(
self,
parser: typing.Any,
namespace: argparse.Namespace,
values: typing.Union[str, typing.Sequence[str], None],
option_string: typing.Optional[str] = None,
) -> None:
if self.const is None:
self.const = 1
try:
val = getattr(namespace, self.dest) + self.const
except TypeError: # probably None
val = self.const
setattr(namespace, self.dest, val)
def main(cmdarg: typing.Optional[typing.List[str]]=None) -> int:
cmdarg = sys.argv if cmdarg is None else cmdarg
parsers = []
parsers.append(ArgumentParser())
parsers[-1].add_argument('--verbose', '-v', default=DefaultVal(0), dest='_gl_verbose', metavar='VERBOSE', nargs=0, help='increase verbosity level', action=CountAction, const=1)
parsers[-1].add_argument('--indent', default=None, dest='_gl_indent', metavar='IND', help='set indent level (default: auto)', action='store')
parsers[-1].add_argument('--bsi', dest='block_seq_indent', metavar='BLOCK_SEQ_IND', type=int, help='set block sequence indent level (default: auto)', action='store')
parsers[-1].add_argument('--flow', default=None, dest='_gl_flow', action='store_true', help='use flow-style YAML instead of block style')
parsers[-1].add_argument('--semi', default=None, dest='_gl_semi', action='store_true', help='write block style YAML except for "leaf" mapping/dict')
parsers[-1].add_argument('--literal', default=None, dest='_gl_literal', action='store_true', help='convert scalars with newlines to literal block style')
parsers[-1].add_argument('--write', '-w', default=None, dest='_gl_write', action='store_true', help='write individual .yaml files (reusing basename), instead of stdout')
parsers[-1].add_argument('--output', '-o', default=None, dest='_gl_output', metavar='OUT', help='write to file %(metavar)s instead of stdout', action='store')
parsers[-1].add_argument('--smart-string', default=None, dest='_gl_smart_string', action='store_true', help='set literal block style on strings with \\n otherwise plain if possible')
parsers[-1].add_argument('--version', action='store_true', help='show program\'s version number and exit')
subp = parsers[-1].add_subparsers()
px = subp.add_parser('rt', aliases=['round-trip'], description='round trip on YAML document, test if first or second round stabilizes document', help='test round trip on YAML document')
px.set_defaults(subparser_func='rt')
parsers.append(px)
parsers[-1].add_argument('--save', action='store_true', help="save the rewritten data back\n to the input file (if it doesn't exist a '.orig' backup will be made)\n ")
parsers[-1].add_argument('--width', default=80, metavar='W', type=int, help='set width of output (default: %(default)s')
parsers[-1].add_argument('file', nargs='+')
parsers[-1].add_argument('--verbose', '-v', default=DefaultVal(0), nargs=0, help='increase verbosity level', action=CountAction, const=1)
parsers[-1].add_argument('--indent', default=DefaultVal(None), metavar='IND', help='set indent level (default: auto)')
parsers[-1].add_argument('--bsi', dest='block_seq_indent', metavar='BLOCK_SEQ_IND', type=int, help='set block sequence indent level (default: auto)')
parsers[-1].add_argument('--flow', default=DefaultVal(False), action='store_true', help='use flow-style YAML instead of block style')
parsers[-1].add_argument('--semi', default=DefaultVal(False), action='store_true', help='write block style YAML except for "leaf" mapping/dict')
parsers[-1].add_argument('--literal', default=DefaultVal(False), action='store_true', help='convert scalars with newlines to literal block style')
parsers[-1].add_argument('--write', '-w', default=DefaultVal(False), action='store_true', help='write individual .yaml files (reusing basename), instead of stdout')
parsers[-1].add_argument('--output', '-o', default=DefaultVal(None), metavar='OUT', help='write to file %(metavar)s instead of stdout')
parsers[-1].add_argument('--smart-string', default=DefaultVal(False), action='store_true', help='set literal block style on strings with \\n otherwise plain if possible')
px = subp.add_parser('me', aliases=['merge-expand'], description='expand merges in input file to output file', help='expand merges in input file to output file')
px.set_defaults(subparser_func='me')
parsers.append(px)
parsers[-1].add_argument('--allow-anchors', action='store_true', help='allow "normal" anchors/aliases in output')
parsers[-1].add_argument('file', nargs=2)
parsers[-1].add_argument('--verbose', '-v', default=DefaultVal(0), nargs=0, help='increase verbosity level', action=CountAction, const=1)
parsers[-1].add_argument('--indent', default=DefaultVal(None), metavar='IND', help='set indent level (default: auto)')
parsers[-1].add_argument('--bsi', dest='block_seq_indent', metavar='BLOCK_SEQ_IND', type=int, help='set block sequence indent level (default: auto)')
parsers[-1].add_argument('--flow', default=DefaultVal(False), action='store_true', help='use flow-style YAML instead of block style')
parsers[-1].add_argument('--semi', default=DefaultVal(False), action='store_true', help='write block style YAML except for "leaf" mapping/dict')
parsers[-1].add_argument('--literal', default=DefaultVal(False), action='store_true', help='convert scalars with newlines to literal block style')
parsers[-1].add_argument('--write', '-w', default=DefaultVal(False), action='store_true', help='write individual .yaml files (reusing basename), instead of stdout')
parsers[-1].add_argument('--output', '-o', default=DefaultVal(None), metavar='OUT', help='write to file %(metavar)s instead of stdout')
parsers[-1].add_argument('--smart-string', default=DefaultVal(False), action='store_true', help='set literal block style on strings with \\n otherwise plain if possible')
px = subp.add_parser('json', aliases=['from-json'], description='convert JSON to block-style YAML', help='convert JSON to block-style YAML')
px.set_defaults(subparser_func='json')
parsers.append(px)
parsers[-1].add_argument('--width', default=80, metavar='W', type=int, help='set width of output (default: %(default)s')
parsers[-1].add_argument('--mozlz4', action='store_true', help='decode mozilla lz4')
parsers[-1].add_argument('file', nargs='+')
parsers[-1].add_argument('--verbose', '-v', default=DefaultVal(0), nargs=0, help='increase verbosity level', action=CountAction, const=1)
parsers[-1].add_argument('--indent', default=DefaultVal(None), metavar='IND', help='set indent level (default: auto)')
parsers[-1].add_argument('--bsi', dest='block_seq_indent', metavar='BLOCK_SEQ_IND', type=int, help='set block sequence indent level (default: auto)')
parsers[-1].add_argument('--flow', default=DefaultVal(False), action='store_true', help='use flow-style YAML instead of block style')
parsers[-1].add_argument('--semi', default=DefaultVal(False), action='store_true', help='write block style YAML except for "leaf" mapping/dict')
parsers[-1].add_argument('--literal', default=DefaultVal(False), action='store_true', help='convert scalars with newlines to literal block style')
parsers[-1].add_argument('--write', '-w', default=DefaultVal(False), action='store_true', help='write individual .yaml files (reusing basename), instead of stdout')
parsers[-1].add_argument('--output', '-o', default=DefaultVal(None), metavar='OUT', help='write to file %(metavar)s instead of stdout')
parsers[-1].add_argument('--smart-string', default=DefaultVal(False), action='store_true', help='set literal block style on strings with \\n otherwise plain if possible')
px = subp.add_parser('ini', aliases=['from-ini'], description='convert .ini/config file to block YAML', help='convert .ini/config to block YAML')
px.set_defaults(subparser_func='ini')
parsers.append(px)
parsers[-1].add_argument('--basename', '-b', action='store_true', help='re-use basename of .ini file for .yaml file, instead of writing to stdout')
parsers[-1].add_argument('--test', action='store_true')
parsers[-1].add_argument('file')
parsers[-1].add_argument('--verbose', '-v', default=DefaultVal(0), nargs=0, help='increase verbosity level', action=CountAction, const=1)
parsers[-1].add_argument('--indent', default=DefaultVal(None), metavar='IND', help='set indent level (default: auto)')
parsers[-1].add_argument('--bsi', dest='block_seq_indent', metavar='BLOCK_SEQ_IND', type=int, help='set block sequence indent level (default: auto)')
parsers[-1].add_argument('--flow', default=DefaultVal(False), action='store_true', help='use flow-style YAML instead of block style')
parsers[-1].add_argument('--semi', default=DefaultVal(False), action='store_true', help='write block style YAML except for "leaf" mapping/dict')
parsers[-1].add_argument('--literal', default=DefaultVal(False), action='store_true', help='convert scalars with newlines to literal block style')
parsers[-1].add_argument('--write', '-w', default=DefaultVal(False), action='store_true', help='write individual .yaml files (reusing basename), instead of stdout')
parsers[-1].add_argument('--output', '-o', default=DefaultVal(None), metavar='OUT', help='write to file %(metavar)s instead of stdout')
parsers[-1].add_argument('--smart-string', default=DefaultVal(False), action='store_true', help='set literal block style on strings with \\n otherwise plain if possible')
px = subp.add_parser('pon', aliases=['from-pon'], description='convert .pon config file to block YAML', help='convert .pon config file to block YAML')
px.set_defaults(subparser_func='pon')
parsers.append(px)
parsers[-1].add_argument('file', nargs='+')
parsers[-1].add_argument('--verbose', '-v', default=DefaultVal(0), nargs=0, help='increase verbosity level', action=CountAction, const=1)
parsers[-1].add_argument('--indent', default=DefaultVal(None), metavar='IND', help='set indent level (default: auto)')
parsers[-1].add_argument('--bsi', dest='block_seq_indent', metavar='BLOCK_SEQ_IND', type=int, help='set block sequence indent level (default: auto)')
parsers[-1].add_argument('--flow', default=DefaultVal(False), action='store_true', help='use flow-style YAML instead of block style')
parsers[-1].add_argument('--semi', default=DefaultVal(False), action='store_true', help='write block style YAML except for "leaf" mapping/dict')
parsers[-1].add_argument('--literal', default=DefaultVal(False), action='store_true', help='convert scalars with newlines to literal block style')
parsers[-1].add_argument('--write', '-w', default=DefaultVal(False), action='store_true', help='write individual .yaml files (reusing basename), instead of stdout')
parsers[-1].add_argument('--output', '-o', default=DefaultVal(None), metavar='OUT', help='write to file %(metavar)s instead of stdout')
parsers[-1].add_argument('--smart-string', default=DefaultVal(False), action='store_true', help='set literal block style on strings with \\n otherwise plain if possible')
px = subp.add_parser('htmltable', description='convert YAML to html tables. If hierarchy is two levels deep (\nsequence/mapping over sequence/mapping) this is mapped to one table\nIf the hierarchy is three deep, a list of 2 deep tables is assumed, but\nany non-list/mapp second level items are considered text.\nRow level keys are inserted in first column (unless --no-row-key),\nitem level keys are used as classes for the TD. \n', help='convert YAML to HTML tables')
px.set_defaults(subparser_func='htmltable')
parsers.append(px)
parsers[-1].add_argument('--level', action='store_true', help='print # levels and exit')
parsers[-1].add_argument('--check')
parsers[-1].add_argument('file')
parsers[-1].add_argument('--verbose', '-v', default=DefaultVal(0), nargs=0, help='increase verbosity level', action=CountAction, const=1)
parsers[-1].add_argument('--indent', default=DefaultVal(None), metavar='IND', help='set indent level (default: auto)')
parsers[-1].add_argument('--bsi', dest='block_seq_indent', metavar='BLOCK_SEQ_IND', type=int, help='set block sequence indent level (default: auto)')
parsers[-1].add_argument('--flow', default=DefaultVal(False), action='store_true', help='use flow-style YAML instead of block style')
parsers[-1].add_argument('--semi', default=DefaultVal(False), action='store_true', help='write block style YAML except for "leaf" mapping/dict')
parsers[-1].add_argument('--literal', default=DefaultVal(False), action='store_true', help='convert scalars with newlines to literal block style')
parsers[-1].add_argument('--write', '-w', default=DefaultVal(False), action='store_true', help='write individual .yaml files (reusing basename), instead of stdout')
parsers[-1].add_argument('--output', '-o', default=DefaultVal(None), metavar='OUT', help='write to file %(metavar)s instead of stdout')
parsers[-1].add_argument('--smart-string', default=DefaultVal(False), action='store_true', help='set literal block style on strings with \\n otherwise plain if possible')
px = subp.add_parser('from-html', description='convert HTML to YAML. Tags become keys with as\nvalue a list. The first item in the list is a key value pair with\nkey ".attribute" if attributes are available followed by tag and string\nsegment items. Lists with one item are by default flattened.\n', help='convert HTML to YAML')
px.set_defaults(subparser_func='from_html')
parsers.append(px)
parsers[-1].add_argument('--no-body', action='store_true', help='drop top level html and body from HTML code segments')
parsers[-1].add_argument('--strip', action='store_true', help='strip whitespace surrounding strings')
parsers[-1].add_argument('file')
parsers[-1].add_argument('--verbose', '-v', default=DefaultVal(0), nargs=0, help='increase verbosity level', action=CountAction, const=1)
parsers[-1].add_argument('--indent', default=DefaultVal(None), metavar='IND', help='set indent level (default: auto)')
parsers[-1].add_argument('--bsi', dest='block_seq_indent', metavar='BLOCK_SEQ_IND', type=int, help='set block sequence indent level (default: auto)')
parsers[-1].add_argument('--flow', default=DefaultVal(False), action='store_true', help='use flow-style YAML instead of block style')
parsers[-1].add_argument('--semi', default=DefaultVal(False), action='store_true', help='write block style YAML except for "leaf" mapping/dict')
parsers[-1].add_argument('--literal', default=DefaultVal(False), action='store_true', help='convert scalars with newlines to literal block style')
parsers[-1].add_argument('--write', '-w', default=DefaultVal(False), action='store_true', help='write individual .yaml files (reusing basename), instead of stdout')
parsers[-1].add_argument('--output', '-o', default=DefaultVal(None), metavar='OUT', help='write to file %(metavar)s instead of stdout')
parsers[-1].add_argument('--smart-string', default=DefaultVal(False), action='store_true', help='set literal block style on strings with \\n otherwise plain if possible')
px = subp.add_parser('from-csv', aliases=['csv'], description='convert CSV to YAML.\nBy default generates a sequence of rows, with the items in a 2nd level\nsequence.\n', help='convert CSV to YAML')
px.set_defaults(subparser_func='from_csv')
parsers.append(px)
parsers[-1].add_argument('--mapping', '-m', action='store_true', help='generate sequence of mappings with first line as keys')
parsers[-1].add_argument('--delimeter', default=",", metavar='DELIM', help='field delimiter (default %(default)s)')
parsers[-1].add_argument('--strip', action='store_true', help='strip leading & trailing spaces from strings')
parsers[-1].add_argument('--no-process', dest='process', action='store_false', help='do not try to convert elements into int/float/bool/datetime')
parsers[-1].add_argument('file')
parsers[-1].add_argument('--verbose', '-v', default=DefaultVal(0), nargs=0, help='increase verbosity level', action=CountAction, const=1)
parsers[-1].add_argument('--indent', default=DefaultVal(None), metavar='IND', help='set indent level (default: auto)')
parsers[-1].add_argument('--bsi', dest='block_seq_indent', metavar='BLOCK_SEQ_IND', type=int, help='set block sequence indent level (default: auto)')
parsers[-1].add_argument('--flow', default=DefaultVal(False), action='store_true', help='use flow-style YAML instead of block style')
parsers[-1].add_argument('--semi', default=DefaultVal(False), action='store_true', help='write block style YAML except for "leaf" mapping/dict')
parsers[-1].add_argument('--literal', default=DefaultVal(False), action='store_true', help='convert scalars with newlines to literal block style')
parsers[-1].add_argument('--write', '-w', default=DefaultVal(False), action='store_true', help='write individual .yaml files (reusing basename), instead of stdout')
parsers[-1].add_argument('--output', '-o', default=DefaultVal(None), metavar='OUT', help='write to file %(metavar)s instead of stdout')
parsers[-1].add_argument('--smart-string', default=DefaultVal(False), action='store_true', help='set literal block style on strings with \\n otherwise plain if possible')
px = subp.add_parser('from-dirs', aliases=['fromdirs'], description='Combine multiple YAML files into one.\nPath chunks (directories) are converted to mapping entries, the YAML contents\nthe value of the (last) key. If there are multiple files in one directory, the\nfilenames are used as well (or specify --use-file-name).\n', help='combine multiple YAML files into one')
px.set_defaults(subparser_func='from_dirs')
parsers.append(px)
parsers[-1].add_argument('--use-file-names', action='store_true')
parsers[-1].add_argument('--sequence', action='store_true', help='no paths, each YAML content is made an element of a root level sequence')
parsers[-1].add_argument('file', nargs='+', help='full path names (a/b/data.yaml)')
parsers[-1].add_argument('--verbose', '-v', default=DefaultVal(0), nargs=0, help='increase verbosity level', action=CountAction, const=1)
parsers[-1].add_argument('--indent', default=DefaultVal(None), metavar='IND', help='set indent level (default: auto)')
parsers[-1].add_argument('--bsi', dest='block_seq_indent', metavar='BLOCK_SEQ_IND', type=int, help='set block sequence indent level (default: auto)')
parsers[-1].add_argument('--flow', default=DefaultVal(False), action='store_true', help='use flow-style YAML instead of block style')
parsers[-1].add_argument('--semi', default=DefaultVal(False), action='store_true', help='write block style YAML except for "leaf" mapping/dict')
parsers[-1].add_argument('--literal', default=DefaultVal(False), action='store_true', help='convert scalars with newlines to literal block style')
parsers[-1].add_argument('--write', '-w', default=DefaultVal(False), action='store_true', help='write individual .yaml files (reusing basename), instead of stdout')
parsers[-1].add_argument('--output', '-o', default=DefaultVal(None), metavar='OUT', help='write to file %(metavar)s instead of stdout')
parsers[-1].add_argument('--smart-string', default=DefaultVal(False), action='store_true', help='set literal block style on strings with \\n otherwise plain if possible')
px = subp.add_parser('pickle', aliases=['from-pickle', 'frompickle'], description='Load Python pickle file(s) and dump as YAML\n', help='convert Python pickle file(s) to YAML')
px.set_defaults(subparser_func='pickle')
parsers.append(px)
parsers[-1].add_argument('--create-to-yaml', action='store_true', help='create a tagged to_yaml method even if available')
parsers[-1].add_argument('file', nargs='*')
parsers[-1].add_argument('--verbose', '-v', default=DefaultVal(0), nargs=0, help='increase verbosity level', action=CountAction, const=1)
parsers[-1].add_argument('--indent', default=DefaultVal(None), metavar='IND', help='set indent level (default: auto)')
parsers[-1].add_argument('--bsi', dest='block_seq_indent', metavar='BLOCK_SEQ_IND', type=int, help='set block sequence indent level (default: auto)')
parsers[-1].add_argument('--flow', default=DefaultVal(False), action='store_true', help='use flow-style YAML instead of block style')
parsers[-1].add_argument('--semi', default=DefaultVal(False), action='store_true', help='write block style YAML except for "leaf" mapping/dict')
parsers[-1].add_argument('--literal', default=DefaultVal(False), action='store_true', help='convert scalars with newlines to literal block style')
parsers[-1].add_argument('--write', '-w', default=DefaultVal(False), action='store_true', help='write individual .yaml files (reusing basename), instead of stdout')
parsers[-1].add_argument('--output', '-o', default=DefaultVal(None), metavar='OUT', help='write to file %(metavar)s instead of stdout')
parsers[-1].add_argument('--smart-string', default=DefaultVal(False), action='store_true', help='set literal block style on strings with \\n otherwise plain if possible')
px = subp.add_parser('mapping', aliases=['map'], help='create new YAML file with at root a mapping with key and file content')
px.set_defaults(subparser_func='mapping')
parsers.append(px)
parsers[-1].add_argument('key', help='key of the new root-level mapping')
parsers[-1].add_argument('file', help='file with YAML content that will be value for key')
parsers[-1].add_argument('--verbose', '-v', default=DefaultVal(0), nargs=0, help='increase verbosity level', action=CountAction, const=1)
parsers[-1].add_argument('--indent', default=DefaultVal(None), metavar='IND', help='set indent level (default: auto)')
parsers[-1].add_argument('--bsi', dest='block_seq_indent', metavar='BLOCK_SEQ_IND', type=int, help='set block sequence indent level (default: auto)')
parsers[-1].add_argument('--flow', default=DefaultVal(False), action='store_true', help='use flow-style YAML instead of block style')
parsers[-1].add_argument('--semi', default=DefaultVal(False), action='store_true', help='write block style YAML except for "leaf" mapping/dict')
parsers[-1].add_argument('--literal', default=DefaultVal(False), action='store_true', help='convert scalars with newlines to literal block style')
parsers[-1].add_argument('--write', '-w', default=DefaultVal(False), action='store_true', help='write individual .yaml files (reusing basename), instead of stdout')
parsers[-1].add_argument('--output', '-o', default=DefaultVal(None), metavar='OUT', help='write to file %(metavar)s instead of stdout')
parsers[-1].add_argument('--smart-string', default=DefaultVal(False), action='store_true', help='set literal block style on strings with \\n otherwise plain if possible')
px = subp.add_parser('add', help='add a value to a path in the data structure loaded from YAML', description='Add a value to a path in the data structure loaded from YAML. Use value are resolved like in YAML, use --str if necessary The value is the last args token. The "path" in the data structure is taken from all other args, interpreting numerical values as indices in list/seq.\nE.g.:\n yaml add --parents --value Windows test.yaml computers os type\n yaml add --file test.yaml computers os secure false\n yaml add --str test.yaml computers.os.year 2019\n')
px.set_defaults(subparser_func='add')
parsers.append(px)
parsers[-1].add_argument('--parents', action='store_true', help='create parents if necessary')
parsers[-1].add_argument('--item', action='store_true', help='create item')
parsers[-1].add_argument('--key', action='store_true', help='create key, even if not found in siblings of item')
parsers[-1].add_argument('--str', action='store_true', help='store value as string')
parsers[-1].add_argument('--file', help='use FILE instead of first argument as YAML file')
parsers[-1].add_argument('--value', help='use FILE instead of first argument as YAML file')
parsers[-1].add_argument('--sep', help='set separator for splitting single element path')
parsers[-1].add_argument('args', nargs='*', help='[file] path in yaml/path.in.yaml [value]')
parsers[-1].add_argument('--verbose', '-v', default=DefaultVal(0), nargs=0, help='increase verbosity level', action=CountAction, const=1)
parsers[-1].add_argument('--indent', default=DefaultVal(None), metavar='IND', help='set indent level (default: auto)')
parsers[-1].add_argument('--bsi', dest='block_seq_indent', metavar='BLOCK_SEQ_IND', type=int, help='set block sequence indent level (default: auto)')
parsers[-1].add_argument('--flow', default=DefaultVal(False), action='store_true', help='use flow-style YAML instead of block style')
parsers[-1].add_argument('--semi', default=DefaultVal(False), action='store_true', help='write block style YAML except for "leaf" mapping/dict')
parsers[-1].add_argument('--literal', default=DefaultVal(False), action='store_true', help='convert scalars with newlines to literal block style')
parsers[-1].add_argument('--write', '-w', default=DefaultVal(False), action='store_true', help='write individual .yaml files (reusing basename), instead of stdout')
parsers[-1].add_argument('--output', '-o', default=DefaultVal(None), metavar='OUT', help='write to file %(metavar)s instead of stdout')
parsers[-1].add_argument('--smart-string', default=DefaultVal(False), action='store_true', help='set literal block style on strings with \\n otherwise plain if possible')
px = subp.add_parser('sort', description='Load the file, check if path leads to a mapping, sort by key\nand write back. No path -> work on root of data structure.\nFile is not written if mapping is already in sorted order.\n', help='sort the keys of a mapping in a YAML file')
px.set_defaults(subparser_func='sort')
parsers.append(px)
parsers[-1].add_argument('--file', help='use FILE instead of first argument as YAML file')
parsers[-1].add_argument('args', nargs='*', help='[file] [path in yaml/path.in.yaml]')
parsers[-1].add_argument('--verbose', '-v', default=DefaultVal(0), nargs=0, help='increase verbosity level', action=CountAction, const=1)
parsers[-1].add_argument('--indent', default=DefaultVal(None), metavar='IND', help='set indent level (default: auto)')
parsers[-1].add_argument('--bsi', dest='block_seq_indent', metavar='BLOCK_SEQ_IND', type=int, help='set block sequence indent level (default: auto)')
parsers[-1].add_argument('--flow', default=DefaultVal(False), action='store_true', help='use flow-style YAML instead of block style')
parsers[-1].add_argument('--semi', default=DefaultVal(False), action='store_true', help='write block style YAML except for "leaf" mapping/dict')
parsers[-1].add_argument('--literal', default=DefaultVal(False), action='store_true', help='convert scalars with newlines to literal block style')
parsers[-1].add_argument('--write', '-w', default=DefaultVal(False), action='store_true', help='write individual .yaml files (reusing basename), instead of stdout')
parsers[-1].add_argument('--output', '-o', default=DefaultVal(None), metavar='OUT', help='write to file %(metavar)s instead of stdout')
parsers[-1].add_argument('--smart-string', default=DefaultVal(False), action='store_true', help='set literal block style on strings with \\n otherwise plain if possible')
px = subp.add_parser('edit', help='Edit a YAML document, save over orginal only when loadable', description='Edits a copy of the file argument and only updates the file when the copy is loadable YAML. The copy is not removed after exiting editor if not parseable and used (if not older than the original file) to continue.\nCopy is named .ye.<filename>\n')
px.set_defaults(subparser_func='edit')
parsers.append(px)
parsers[-1].add_argument('file', help='file to edit using $EDITOR')
parsers[-1].add_argument('--verbose', '-v', default=DefaultVal(0), nargs=0, help='increase verbosity level', action=CountAction, const=1)
parsers[-1].add_argument('--indent', default=DefaultVal(None), metavar='IND', help='set indent level (default: auto)')
parsers[-1].add_argument('--bsi', dest='block_seq_indent', metavar='BLOCK_SEQ_IND', type=int, help='set block sequence indent level (default: auto)')
parsers[-1].add_argument('--flow', default=DefaultVal(False), action='store_true', help='use flow-style YAML instead of block style')
parsers[-1].add_argument('--semi', default=DefaultVal(False), action='store_true', help='write block style YAML except for "leaf" mapping/dict')
parsers[-1].add_argument('--literal', default=DefaultVal(False), action='store_true', help='convert scalars with newlines to literal block style')
parsers[-1].add_argument('--write', '-w', default=DefaultVal(False), action='store_true', help='write individual .yaml files (reusing basename), instead of stdout')
parsers[-1].add_argument('--output', '-o', default=DefaultVal(None), metavar='OUT', help='write to file %(metavar)s instead of stdout')
parsers[-1].add_argument('--smart-string', default=DefaultVal(False), action='store_true', help='set literal block style on strings with \\n otherwise plain if possible')
px = subp.add_parser('tokens', help='show tokens')
px.set_defaults(subparser_func='tokens')
parsers.append(px)
parsers[-1].add_argument('file', help='file to edit using $EDITOR')
parsers[-1].add_argument('--verbose', '-v', default=DefaultVal(0), nargs=0, help='increase verbosity level', action=CountAction, const=1)
parsers[-1].add_argument('--indent', default=DefaultVal(None), metavar='IND', help='set indent level (default: auto)')
parsers[-1].add_argument('--bsi', dest='block_seq_indent', metavar='BLOCK_SEQ_IND', type=int, help='set block sequence indent level (default: auto)')
parsers[-1].add_argument('--flow', default=DefaultVal(False), action='store_true', help='use flow-style YAML instead of block style')
parsers[-1].add_argument('--semi', default=DefaultVal(False), action='store_true', help='write block style YAML except for "leaf" mapping/dict')
parsers[-1].add_argument('--literal', default=DefaultVal(False), action='store_true', help='convert scalars with newlines to literal block style')
parsers[-1].add_argument('--write', '-w', default=DefaultVal(False), action='store_true', help='write individual .yaml files (reusing basename), instead of stdout')
parsers[-1].add_argument('--output', '-o', default=DefaultVal(None), metavar='OUT', help='write to file %(metavar)s instead of stdout')
parsers[-1].add_argument('--smart-string', default=DefaultVal(False), action='store_true', help='set literal block style on strings with \\n otherwise plain if possible')
px = subp.add_parser('events', help='show events')
px.set_defaults(subparser_func='events')
parsers.append(px)
parsers[-1].add_argument('file', help='file to edit using $EDITOR')
parsers[-1].add_argument('--verbose', '-v', default=DefaultVal(0), nargs=0, help='increase verbosity level', action=CountAction, const=1)
parsers[-1].add_argument('--indent', default=DefaultVal(None), metavar='IND', help='set indent level (default: auto)')
parsers[-1].add_argument('--bsi', dest='block_seq_indent', metavar='BLOCK_SEQ_IND', type=int, help='set block sequence indent level (default: auto)')
parsers[-1].add_argument('--flow', default=DefaultVal(False), action='store_true', help='use flow-style YAML instead of block style')
parsers[-1].add_argument('--semi', default=DefaultVal(False), action='store_true', help='write block style YAML except for "leaf" mapping/dict')
parsers[-1].add_argument('--literal', default=DefaultVal(False), action='store_true', help='convert scalars with newlines to literal block style')
parsers[-1].add_argument('--write', '-w', default=DefaultVal(False), action='store_true', help='write individual .yaml files (reusing basename), instead of stdout')
parsers[-1].add_argument('--output', '-o', default=DefaultVal(None), metavar='OUT', help='write to file %(metavar)s instead of stdout')
parsers[-1].add_argument('--smart-string', default=DefaultVal(False), action='store_true', help='set literal block style on strings with \\n otherwise plain if possible')
px = subp.add_parser('generate', description='generate a file filled with random YAML until it reaches size\n', help='generate a file filled with random YAML until it reaches size\n')
px.set_defaults(subparser_func='generate')
parsers.append(px)
parsers[-1].add_argument('--size', default=10, help='size in Kb')
parsers[-1].add_argument('--levels', help='levels in file (e.g. sm_s1m) ')
parsers[-1].add_argument('file', help='name of the file to generate')
parsers[-1].add_argument('--verbose', '-v', default=DefaultVal(0), nargs=0, help='increase verbosity level', action=CountAction, const=1)
parsers[-1].add_argument('--indent', default=DefaultVal(None), metavar='IND', help='set indent level (default: auto)')
parsers[-1].add_argument('--bsi', dest='block_seq_indent', metavar='BLOCK_SEQ_IND', type=int, help='set block sequence indent level (default: auto)')
parsers[-1].add_argument('--flow', default=DefaultVal(False), action='store_true', help='use flow-style YAML instead of block style')
parsers[-1].add_argument('--semi', default=DefaultVal(False), action='store_true', help='write block style YAML except for "leaf" mapping/dict')
parsers[-1].add_argument('--literal', default=DefaultVal(False), action='store_true', help='convert scalars with newlines to literal block style')
parsers[-1].add_argument('--write', '-w', default=DefaultVal(False), action='store_true', help='write individual .yaml files (reusing basename), instead of stdout')
parsers[-1].add_argument('--output', '-o', default=DefaultVal(None), metavar='OUT', help='write to file %(metavar)s instead of stdout')
parsers[-1].add_argument('--smart-string', default=DefaultVal(False), action='store_true', help='set literal block style on strings with \\n otherwise plain if possible')
px = subp.add_parser('analyse')
px.set_defaults(subparser_func='analyse')
parsers.append(px)
parsers[-1].add_argument('--typ', help='YAML typ to create')
parsers[-1].add_argument('--pure', action='store_true', help='create pure YAML instance')
parsers[-1].add_argument('file', help='name of the file to load')
parsers[-1].add_argument('--verbose', '-v', default=DefaultVal(0), nargs=0, help='increase verbosity level', action=CountAction, const=1)
parsers[-1].add_argument('--indent', default=DefaultVal(None), metavar='IND', help='set indent level (default: auto)')
parsers[-1].add_argument('--bsi', dest='block_seq_indent', metavar='BLOCK_SEQ_IND', type=int, help='set block sequence indent level (default: auto)')
parsers[-1].add_argument('--flow', default=DefaultVal(False), action='store_true', help='use flow-style YAML instead of block style')
parsers[-1].add_argument('--semi', default=DefaultVal(False), action='store_true', help='write block style YAML except for "leaf" mapping/dict')
parsers[-1].add_argument('--literal', default=DefaultVal(False), action='store_true', help='convert scalars with newlines to literal block style')
parsers[-1].add_argument('--write', '-w', default=DefaultVal(False), action='store_true', help='write individual .yaml files (reusing basename), instead of stdout')
parsers[-1].add_argument('--output', '-o', default=DefaultVal(None), metavar='OUT', help='write to file %(metavar)s instead of stdout')
parsers[-1].add_argument('--smart-string', default=DefaultVal(False), action='store_true', help='set literal block style on strings with \\n otherwise plain if possible')
parsers.pop()
if '--version' in cmdarg[1:]:
if '-v' in cmdarg[1:] or '--verbose' in cmdarg[1:]:
return list_versions(pkg_name='ruamel.yaml.cmd', version=None, pkgs=['configobj', 'ruamel.yaml.convert', 'ruamel.yaml', 'ruamel.yaml.base', 'lz4'])
print(__version__)
return 0
if '--help-all' in cmdarg[1:]:
try:
parsers[0].parse_args(['--help'])
except SystemExit:
pass
for sc in parsers[1:]:
print('-' * 72)
try:
parsers[0].parse_args([sc.prog.split()[1], '--help'])
except SystemExit:
pass
sys.exit(0)
args = parsers[0].parse_args(args=cmdarg[1:])
for gl in ['verbose', 'indent', 'flow', 'semi', 'literal', 'write', 'output', 'smart_string']:
glv = getattr(args, '_gl_' + gl, None)
if isinstance(getattr(args, gl, None), (DefaultVal, type(None))) and glv is not None:
setattr(args, gl, glv)
delattr(args, '_gl_' + gl)
if isinstance(getattr(args, gl, None), DefaultVal):
setattr(args, gl, getattr(args, gl).val)
cls = getattr(importlib.import_module('ruamel.yaml.cmd.yaml_cmd'), 'YAMLCommand')
obj = cls(args)
funcname = getattr(args, 'subparser_func', None)
if funcname is None:
parsers[0].parse_args(['--help'])
fun = getattr(obj, funcname + '_subcommand', None)
if fun is None:
fun = getattr(obj, funcname)
ret_val = fun()
if ret_val is None:
return 0
if isinstance(ret_val, int):
return ret_val
return -1
def list_versions(pkg_name: str, version: typing.Union[str, None], pkgs: typing.Sequence[str]) -> int:
version_data = [
('Python', '{v.major}.{v.minor}.{v.micro}'.format(v=sys.version_info)),
(pkg_name, __version__ if version is None else version),
]
for pkg in pkgs:
try:
version_data.append(
(pkg, getattr(importlib.import_module(pkg), '__version__', '--'))
)
except ModuleNotFoundError:
version_data.append((pkg, 'NA'))
except KeyError:
pass
longest = max([len(x[0]) for x in version_data]) + 1
for pkg, ver in version_data:
print('{:{}s} {}'.format(pkg + ':', longest, ver))
return 0
if __name__ == '__main__':
sys.exit(main())
|
PypiClean
|
/hetdex_api-0.8.7.tar.gz/hetdex_api-0.8.7/hetdex_api/sqlite_utils.py
|
import sqlite3
from sqlite3 import Error
import numpy as np
import os.path as op
FILENAME_PREFIX = "elixer_reports_" #keep the trailing underscore
REPORT_TYPES = ["report","nei","mini"]
try:
from hetdex_api.config import HDRconfig
except:
# print("Warning! Cannot find or import HDRconfig from hetdex_api!!")
pass
#key is the HDR version number, value is list of directories that contain ELiXer imaging databses
#Base paths
# xx0 = standard hetdex
# xx6 = broad emission lines (still in with the xx0 detections as of hdr2.1)
# xx9 = continuum sources
DICT_DB_PATHS = {10: ["/work/03946/hetdex/hdr1/detect/image_db"
],
20: ["/scratch/03261/polonius/hdr2/detect/image_db",
"/work/03261/polonius/hdr2/detect/image_db",
"/work/03946/hetdex/hdr2/detect/image_db"
],
21: ["/scratch/03946/hetdex/hdr2.1/detect/image_db",
"/scratch/03261/polonius/hdr2.1/detect/image_db",
"/work/03946/hetdex/hdr2.1/detect/image_db",
"/work/03261/polonius/hdr2.1/detect/image_db"
],
}
#
# add paths from hetdex_api to search (place in first position)
#
for v in DICT_DB_PATHS.keys():
try:
release_number = v/10.0
if v % 10 == 0:
release_string = "hdr{:d}".format(int(release_number))
else:
release_string = "hdr{:2.1f}".format(release_number)
DICT_DB_PATHS[v].insert(0,op.join(HDRconfig(survey=release_string).elix_dir))
except:# Exception as e:
#print(e)
continue
def get_elixer_report_db_path(detectid,report_type="report"):
"""
Return the top (first found) path to database file based on the detectid (assumes the HDR version is part of the
prefix, i.e. HDR1 files are 1000*, HDR2 are 2000*, and so on)
:param detectid:
:param report_type: choose one of "report" (normal ELiXer report image) [default]
"nei" (ELiXer neighborhood image)
"mini" (ELiXer mini-report image for phone app)
:return: None or database filename
"""
detect_prefix = None
db_path = None
try:
detect_prefix = int(np.int64(detectid) / 1e5)
hdr_prefix = int(np.int64(detectid)/1e8)
#keep the leading underscore
if report_type == "report":
ext = ""
elif report_type == "nei":
ext = "_nei"
elif report_type == "mini":
ext = "_mini"
else: #assume same as report
ext = ""
if detect_prefix is not None:
if hdr_prefix in DICT_DB_PATHS.keys():
paths = DICT_DB_PATHS[hdr_prefix]
for p in paths:
if op.exists(p):
fqfn = op.join(p, FILENAME_PREFIX + str(detect_prefix) + ext + ".db")
if op.isfile(fqfn):
db_path = fqfn
break
else:
#print("Invalid HDR version")
return None
except Error as e:
print(e)
return db_path
def get_db_connection(fn,readonly=True):
"""
return a SQLite3 databse connection object for the provide databse filename
assumes file exists (will trap exception if not and return None)
:param fn:
:return: None or connection object
"""
conn = None
try:
if fn is not None:
if readonly:
conn = sqlite3.connect("file:" +fn + "?mode=ro",uri=True)
else:
conn = sqlite3.connect(fn)
except Error as e:
print(e)
return conn
def fetch_elixer_report_image(conn,detectid):
"""
Return a single image (image type (png, jpg) and report type (report, neighborhood, mini) depend on the database connection
:param conn: a sqlite3.Connection object or a path to a database
:param detectid: HETDEX detectid (int64 or string)
:return: None or single image
"""
try:
keep_conn_open = True
if type(conn) != sqlite3.Connection:
#could be a file
if op.isfile(conn):
conn = get_db_connection(conn,readonly=True)
if type(conn) != sqlite3.Connection:
print("Invalid databse connection.")
return None
keep_conn_open = False
else:
print("Invalid database connection.")
return None
cursor = conn.cursor()
sql_read_blob = """SELECT image from report where detectid = ?"""
cursor.execute(sql_read_blob, (str(detectid),))
image = cursor.fetchall()
#get back a list of tuples (each list entry is one row, the tuple is the row, so
#we want the 1st entry (detectid is unique, so should be one or none, and the second column which is the image)
cursor.close()
if not keep_conn_open:
conn.close()
if image is not None:
if len(image) == 1:
return image[0][0]
elif len(image) == 0:
print("No matching detectid found")
return None
else:
print("Unexpected number of images returned")
return None
else:
print("None returned from image fetch.")
return None
except Error as e:
print(e)
return None
def build_elixer_report_image_db(db_name,img_dir,img_regex):
"""
Not for general use. Should normally be called once per image type per grouping of (100,000) per
data release.
If db already exists, this will insert new images and replace existing images (if detectIDs match)
This DOES NOT validate the images or check that they are appropriate for the db_name.
Progress is reported per 100 inserts
Insert speed does not depend on database size, but does depend on disk speed (local vs network, SSD vs HD, etc)
:param db_name: name (with path) of the SQLite output db
:param img_dir: location of all images to be imported (is NOT recursive)
:param img_regex: generally simple wildcard, like "*.png" or "*nei.png" or "*.jpg", etc
:return:
"""
import glob
import re
import time
# import gzip #only get about 5% compression on pngs at much higher cpu cost
import datetime
def build_schema(conn):
# build up sql commands to issue
try:
sql_create_report_image_table = """ CREATE TABLE IF NOT EXISTS report (
detectid BIGINT PRIMARY KEY,
image BLOB NOT NULL
); """
# create report table
cursor = conn.cursor()
cursor.execute(sql_create_report_image_table)
cursor.close()
conn.commit()
#create index: not necessary; is autocreated with BIGING Primary Key
# sql_create_index = """ CREATE UNIQUE INDEX idx_detectid ON report (detectid); """
# cursor = conn.cursor(sql_create_index)
# cursor.execute()
# cursor.close()
# conn.commit()
return True
except Exception as e:
print(e)
return False
def read_image(fn):
blob_data = None
detectid = None
try: #assume name like <detectid><optional chars>.png
detectid = int(re.findall('\d+',op.basename(fn).split(".")[0])[0])
with open(fn, 'rb') as file:
blob_data = file.read()
except Exception as e:
print("Exception in read_image (bad detectid):", fn, e)
return blob_data, detectid
def insert_image(conn, detectid, data):
if (detectid is None) or (data is None):
return
try:
cursor = conn.cursor()
sql_insert_blob = """ INSERT INTO report
(detectid, image) VALUES (?, ?)"""
try:
cursor.execute(sql_insert_blob, (detectid, data))
conn.commit()
except Exception as e:
if type(e) == sqlite3.IntegrityError:
# could be image already exists, in which case overwrite
try:
sql_update_blob = """ UPDATE report
SET image = ? WHERE detectid = ?"""
cursor.execute(sql_update_blob, (data, detectid))
conn.commit()
except Exception as e:
print("second exception in insert_image:", detectid, e)
else:
print("exception in insert_image:", detectid, e)
cursor.close()
except Exception as e:
print(e)
try:
cursor.close()
except:
pass
def import_images_from_path(conn,img_dir,img_regex):
ct = 0
total_inserts = 0
estimated_total = 0
modulo = 100 #print a status statement every modulo inserts
filelist = glob.glob(op.join(img_dir, img_regex)) # just the reports, not the neighbors or mini
estimated_total = len(filelist)
print(f"Inserting {estimated_total} images ... ")
if estimated_total < 1:
return #nothing to do
start_time = int(round(time.time() * 1000))
for f in filelist:
try:
blob, detectid = read_image(f)
insert_image(conn, detectid, blob)
ct += 1
except Exception as e:
print("exception in import_images_from_path:", e)
if ct >= modulo:
try:
time_diff = int(round(time.time() * 1000)) - start_time
total_inserts += ct
per_insert_time = time_diff / ct / 1000.
print(f"{db_name}: Inserted {ct} ({per_insert_time:#0.3f}s per insert). Total {total_inserts}/{estimated_total} "
f"({float(total_inserts / estimated_total) * 100.:#0.1f}%). "
f"Remaining time ({datetime.timedelta(seconds=round(per_insert_time * (estimated_total - total_inserts)))}) ...")
start_time = int(round(time.time() * 1000)) # reset the timer
ct = 0
except:
print("print progress failed....")
#final log (remainder after last block of inserts
try:
time_diff = int(round(time.time() * 1000)) - start_time
total_inserts += ct
per_insert_time = time_diff / ct / 1000.
print(f"Inserted {ct} ({per_insert_time:#0.3f}s per insert). Total {total_inserts}/{estimated_total} "
f"({float(total_inserts / estimated_total) * 100.:#0.1f}%). ")
except:
print("print progress failed....")
#
# main execution part
#
try:
if not op.isfile(db_name):
conn = sqlite3.connect(db_name) #not read only
if type(conn) != sqlite3.Connection:
print("Failed to create db connection")
return False
elif not build_schema(conn):
print("Failed to build schema")
return False
else:
conn = get_db_connection(db_name,readonly=False)
if type(conn) != sqlite3.Connection:
print("Failed to create db connection")
return False
import_images_from_path(conn,img_dir,img_regex)
except Exception as e:
print(e)
# 20200529 DD
# will revisit later if this idea becomes useful
#
# def build_local_imaging_dbs():
# """
# super simplified for testing
# build the imaging databases using default settings in the current directory
# :return:
# """
# import glob
# #get the list of images and report types
# rpt_min, rpt_max = None, None
# rpt_list = sorted(glob.glob("*[0-9].png"))
# if (rpt_list is not None) and (len(rpt_list) > 0):
# rpt_min = int(rpt_list[0].rstrip(".png"))
# rpt_max = int(rpt_list[-1].rstrip(".png"))
#
# nei_min, nei_max = None, None
# nei_list = sorted(glob.glob("*[0-9]*nei.png"))
# if (nei_list is not None) and (len(nei_list) > 0):
# nei_min = rpt_list[0].rstrip("nei.png")
# nei_max = rpt_list[-1].rstrip("nei.png")
# nei_min = int(nei_min.replace("_","")) #might not have an "_"
# nei_max = int(nei_max.replace("_", ""))
#
# mini_min, mini_max = None, None
# mini_list = sorted(glob.glob("*[0-9]*mini.png"))
# if (mini_list is not None) and (len(mini_list) > 0):
# mini_min = rpt_list[0].rstrip("mini.png")
# mini_max = rpt_list[-1].rstrip("mini.png")
# mini_min = int(mini_min.replace("_","")) #might not have an "_"
# mini_max = int(mini_max.replace("_", ""))
#
# #organize by
#
class ConnMgr():
"""
Primitive container for managing SQLite connection (to avoid repeated path search and connection building)
Just for reading
"""
def __init__(self):
self.conn_dict = {} #key = detectid_prefix + type (i.e. "10003" or "10004nei" or "20007mini"
def __del__(self):
self.close_conns()
def get_connection(self,detectid,report_type="report"):
conn = None
try:
if not report_type in REPORT_TYPES:
return None
detect_prefix = int(np.int64(detectid) / 1e5)
dkey = str(detect_prefix)+report_type
if dkey in self.conn_dict.keys():
conn = self.conn_dict[dkey]
else:
try:
#all ConnMgr connections are read-only (uri=True)
conn = get_db_connection(get_elixer_report_db_path(detectid,report_type),readonly=True)
if type(conn) != sqlite3.Connection:
conn = None
else:
self.conn_dict[dkey] = conn
except Exception as e:
print(e)
except Exception as e:
print(e)
return conn
def close_conns(self):
for key in self.conn_dict.keys():
try:
self.conn_dict[key].close()
except:
pass
self.conn_dict.clear()
def fetch_image(self,detectid,report_type="report"):
"""
wrapper just to make code cleaner
:param detectid:
:param report_type:
:return:
"""
img = None
try:
conn = self.get_connection(detectid,report_type)
if type(conn) == sqlite3.Connection:
img = fetch_elixer_report_image(conn,detectid)
except Exception as e:
print(e)
raise
return img
|
PypiClean
|
/defi_protocols-0.0.9.tar.gz/defi_protocols-0.0.9/defi_protocols/Bancor.py
|
from defi_protocols.functions import *
#----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# LITERALS
#----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# Contracts for calling liquidity pools and underlying tokens
BANCOR_NETWORK_ADDRESS = '0xeEF417e1D5CC832e619ae18D2F140De2999dD4fB'
BANCOR_NETWORK_INFO_ADDRESS = '0x8E303D296851B320e6a697bAcB979d13c9D6E760'
BNT_TOKEN = '0x1F573D6Fb3F13d689FF844B4cE37794d79a7FF1C'
#----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# ABIs
#----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# Network ABI - liquidityPools
ABI_NETWORK = '[{"inputs":[],"name":"liquidityPools","outputs":[{"internalType":"contract Token[]","name":"","type":"address[]"}],"stateMutability":"view","type":"function"}]'
# NetworkInfo ABI - poolToken, withdrawalAmounts
ABI_NETWORK_INFO = '[{"inputs":[{"internalType":"contract Token","name":"pool","type":"address"}],"name":"poolToken","outputs":[{"internalType":"contract IPoolToken","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"contract Token","name":"pool","type":"address"},{"internalType":"uint256","name":"poolTokenAmount","type":"uint256"}],"name":"withdrawalAmounts","outputs":[{"components":[{"internalType":"uint256","name":"totalAmount","type":"uint256"},{"internalType":"uint256","name":"baseTokenAmount","type":"uint256"},{"internalType":"uint256","name":"bntAmount","type":"uint256"}],"internalType":"struct WithdrawalAmounts","name":"","type":"tuple"}],"stateMutability":"view","type":"function"}]'
# ABI of the pools - balanceOf, reserveToken
ABI_POOL = '[{"inputs":[{"internalType":"address","name":"account","type":"address"}],"name":"balanceOf","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"reserveToken","outputs":[{"internalType":"contract Token","name":"","type":"address"}],"stateMutability":"view","type":"function"}]'
def underlying(token_address: str, wallet: str, block: int, blockchain: str, web3=None, execution=1, index=0, decimals=True, reward=False) -> list:
# If the number of executions is greater than the MAX_EXECUTIONS variable -> returns None and halts
if execution > MAX_EXECUTIONS:
return None
balances = []
try:
if web3 is None:
web3 = get_node(blockchain, block=block, index=index)
wallet = web3.toChecksumAddress(wallet)
bancor_poolcontract=get_contract(token_address, blockchain, web3=web3, abi=ABI_POOL, block=block)
balance = bancor_poolcontract.functions.balanceOf(wallet).call(block_identifier=block)
reserve_token = bancor_poolcontract.functions.reserveToken().call()
pooltokens_contract = get_contract(BANCOR_NETWORK_INFO_ADDRESS, blockchain, web3=web3, abi=ABI_NETWORK_INFO, block=block)
bancor_pool = pooltokens_contract.functions.withdrawalAmounts(reserve_token,balance).call()
if balance != 0:
if decimals is True:
decimals0 = get_decimals(reserve_token, blockchain, web3=web3)
decimals1 = get_decimals(BNT_TOKEN, blockchain, web3=web3)
amount0 = bancor_pool[1] / 10 ** decimals0
amount1 = bancor_pool[2] / 10 ** decimals1
else:
amount0 = bancor_pool[0]
amount1 = bancor_pool[2]
balances.append([reserve_token, amount0])
balances.append([BNT_TOKEN, amount1])
return balances
except GetNodeIndexError:
return underlying(token_address, wallet, block, blockchain, reward=reward, decimals=decimals, index=0, execution=execution + 1)
except:
return underlying(token_address, wallet, block, blockchain, reward=reward, decimals=decimals, index=index + 1, execution=execution)
def underlying_all(wallet: str, block: int, blockchain: str, web3=None, execution=1, index=0, decimals=True, reward=False) -> list:
"""
:param wallet:
:param lptoken_address:
:param block:
:param blockchain:
:param web3:
:param execution:
:param index:
:param decimals:
:param reward:
:return:
"""
# If the number of executions is greater than the MAX_EXECUTIONS variable -> returns None and halts
if execution > MAX_EXECUTIONS:
return None
balances = []
try:
if web3 is None:
web3 = get_node(blockchain, block=block, index=index)
wallet = web3.toChecksumAddress(wallet)
liquiditypools_contract = get_contract(BANCOR_NETWORK_ADDRESS, blockchain, web3=web3, abi=ABI_NETWORK, block=block)
liquidity_pools = liquiditypools_contract.functions.liquidityPools().call()
network_info_address = get_contract(BANCOR_NETWORK_INFO_ADDRESS, blockchain, web3=web3, abi=ABI_NETWORK_INFO, block=block)
for pool in liquidity_pools:
bn_token = network_info_address.functions.reserveToken(pool).call()
balance = underlying(bn_token, wallet, block, blockchain, web3, execution, index, decimals, reward)
balances.append(balance)
return balances
except GetNodeIndexError:
return underlying_all(wallet, block, blockchain, reward=reward, decimals=decimals, index=0, execution=execution + 1)
except:
return underlying_all(wallet, block, blockchain, reward=reward, decimals=decimals, index=index + 1, execution=execution)
#to test
#wallet='0x849d52316331967b6ff1198e5e32a0eb168d039d'
#token_address = '0x36FAbE4cAeF8c190550b6f93c306A5644E7dCef6'
#bancor = underlying(token_address, wallet,'latest',ETHEREUM)
#print(bancor)
|
PypiClean
|
/zarenacord-2.0.0.tar.gz/zarenacord-2.0.0/discord/errors.py
|
from __future__ import annotations
from typing import Dict, List, Optional, TYPE_CHECKING, Any, Tuple, Union
if TYPE_CHECKING:
from aiohttp import ClientResponse, ClientWebSocketResponse
try:
from requests import Response
_ResponseType = Union[ClientResponse, Response]
except ModuleNotFoundError:
_ResponseType = ClientResponse
from .interactions import Interaction
__all__ = (
'DiscordException',
'ClientException',
'NoMoreItems',
'GatewayNotFound',
'HTTPException',
'Forbidden',
'NotFound',
'DiscordServerError',
'InvalidData',
'InvalidArgument',
'LoginFailure',
'ConnectionClosed',
'PrivilegedIntentsRequired',
'InteractionResponded',
)
class DiscordException(Exception):
"""Base exception class for discord.py
Ideally speaking, this could be caught to handle any exceptions raised from this library.
"""
pass
class ClientException(DiscordException):
"""Exception that's raised when an operation in the :class:`Client` fails.
These are usually for exceptions that happened due to user input.
"""
pass
class NoMoreItems(DiscordException):
"""Exception that is raised when an async iteration operation has no more items."""
pass
class GatewayNotFound(DiscordException):
"""An exception that is raised when the gateway for Discord could not be found"""
def __init__(self):
message = 'The gateway to connect to discord was not found.'
super().__init__(message)
def _flatten_error_dict(d: Dict[str, Any], key: str = '') -> Dict[str, str]:
items: List[Tuple[str, str]] = []
for k, v in d.items():
new_key = key + '.' + k if key else k
if isinstance(v, dict):
try:
_errors: List[Dict[str, Any]] = v['_errors']
except KeyError:
items.extend(_flatten_error_dict(v, new_key).items())
else:
items.append((new_key, ' '.join(x.get('message', '') for x in _errors)))
else:
items.append((new_key, v))
return dict(items)
class HTTPException(DiscordException):
"""Exception that's raised when an HTTP request operation fails.
Attributes
------------
response: :class:`aiohttp.ClientResponse`
The response of the failed HTTP request. This is an
instance of :class:`aiohttp.ClientResponse`. In some cases
this could also be a :class:`requests.Response`.
text: :class:`str`
The text of the error. Could be an empty string.
status: :class:`int`
The status code of the HTTP request.
code: :class:`int`
The Discord specific error code for the failure.
"""
def __init__(self, response: _ResponseType, message: Optional[Union[str, Dict[str, Any]]]):
self.response: _ResponseType = response
self.status: int = response.status # type: ignore
self.code: int
self.text: str
if isinstance(message, dict):
self.code = message.get('code', 0)
base = message.get('message', '')
errors = message.get('errors')
if errors:
errors = _flatten_error_dict(errors)
helpful = '\n'.join('In %s: %s' % t for t in errors.items())
self.text = base + '\n' + helpful
else:
self.text = base
else:
self.text = message or ''
self.code = 0
fmt = '{0.status} {0.reason} (error code: {1})'
if len(self.text):
fmt += ': {2}'
super().__init__(fmt.format(self.response, self.code, self.text))
class Forbidden(HTTPException):
"""Exception that's raised for when status code 403 occurs.
Subclass of :exc:`HTTPException`
"""
pass
class NotFound(HTTPException):
"""Exception that's raised for when status code 404 occurs.
Subclass of :exc:`HTTPException`
"""
pass
class DiscordServerError(HTTPException):
"""Exception that's raised for when a 500 range status code occurs.
Subclass of :exc:`HTTPException`.
.. versionadded:: 1.5
"""
pass
class InvalidData(ClientException):
"""Exception that's raised when the library encounters unknown
or invalid data from Discord.
"""
pass
class InvalidArgument(ClientException):
"""Exception that's raised when an argument to a function
is invalid some way (e.g. wrong value or wrong type).
This could be considered the analogous of ``ValueError`` and
``TypeError`` except inherited from :exc:`ClientException` and thus
:exc:`DiscordException`.
"""
pass
class LoginFailure(ClientException):
"""Exception that's raised when the :meth:`Client.login` function
fails to log you in from improper credentials or some other misc.
failure.
"""
pass
class ConnectionClosed(ClientException):
"""Exception that's raised when the gateway connection is
closed for reasons that could not be handled internally.
Attributes
-----------
code: :class:`int`
The close code of the websocket.
reason: :class:`str`
The reason provided for the closure.
shard_id: Optional[:class:`int`]
The shard ID that got closed if applicable.
"""
def __init__(self, socket: ClientWebSocketResponse, *, shard_id: Optional[int], code: Optional[int] = None):
# This exception is just the same exception except
# reconfigured to subclass ClientException for users
self.code: int = code or socket.close_code or -1
# aiohttp doesn't seem to consistently provide close reason
self.reason: str = ''
self.shard_id: Optional[int] = shard_id
super().__init__(f'Shard ID {self.shard_id} WebSocket closed with {self.code}')
class PrivilegedIntentsRequired(ClientException):
"""Exception that's raised when the gateway is requesting privileged intents
but they're not ticked in the developer page yet.
Go to https://discord.com/developers/applications/ and enable the intents
that are required. Currently these are as follows:
- :attr:`Intents.members`
- :attr:`Intents.presences`
Attributes
-----------
shard_id: Optional[:class:`int`]
The shard ID that got closed if applicable.
"""
def __init__(self, shard_id: Optional[int]):
self.shard_id: Optional[int] = shard_id
msg = (
'Shard ID %s is requesting privileged intents that have not been explicitly enabled in the '
'developer portal. It is recommended to go to https://discord.com/developers/applications/ '
'and explicitly enable the privileged intents within your application\'s page. If this is not '
'possible, then consider disabling the privileged intents instead.'
)
super().__init__(msg % shard_id)
class InteractionResponded(ClientException):
"""Exception that's raised when sending another interaction response using
:class:`InteractionResponse` when one has already been done before.
An interaction can only respond once.
.. versionadded:: 2.0
Attributes
-----------
interaction: :class:`Interaction`
The interaction that's already been responded to.
"""
def __init__(self, interaction: Interaction):
self.interaction: Interaction = interaction
super().__init__('This interaction has already been responded to before')
|
PypiClean
|
/hassmart_homeassistant-0.65.4.tar.gz/hassmart_homeassistant-0.65.4/homeassistant/components/system_log/__init__.py
|
import asyncio
from collections import deque
from io import StringIO
import logging
import re
import traceback
import voluptuous as vol
from homeassistant import __path__ as HOMEASSISTANT_PATH
from homeassistant.components.http import HomeAssistantView
import homeassistant.helpers.config_validation as cv
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
CONF_MAX_ENTRIES = 'max_entries'
CONF_MESSAGE = 'message'
CONF_LEVEL = 'level'
CONF_LOGGER = 'logger'
DATA_SYSTEM_LOG = 'system_log'
DEFAULT_MAX_ENTRIES = 50
DEPENDENCIES = ['http']
DOMAIN = 'system_log'
EVENT_SYSTEM_LOG = 'system_log_event'
SERVICE_CLEAR = 'clear'
SERVICE_WRITE = 'write'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_MAX_ENTRIES, default=DEFAULT_MAX_ENTRIES):
cv.positive_int,
}),
}, extra=vol.ALLOW_EXTRA)
SERVICE_CLEAR_SCHEMA = vol.Schema({})
SERVICE_WRITE_SCHEMA = vol.Schema({
vol.Required(CONF_MESSAGE): cv.string,
vol.Optional(CONF_LEVEL, default='error'):
vol.In(['debug', 'info', 'warning', 'error', 'critical']),
vol.Optional(CONF_LOGGER): cv.string,
})
def _figure_out_source(record, call_stack, hass):
paths = [HOMEASSISTANT_PATH[0], hass.config.config_dir]
try:
# If netdisco is installed check its path too.
from netdisco import __path__ as netdisco_path
paths.append(netdisco_path[0])
except ImportError:
pass
# If a stack trace exists, extract file names from the entire call stack.
# The other case is when a regular "log" is made (without an attached
# exception). In that case, just use the file where the log was made from.
if record.exc_info:
stack = [x[0] for x in traceback.extract_tb(record.exc_info[2])]
else:
index = -1
for i, frame in enumerate(call_stack):
if frame == record.pathname:
index = i
break
if index == -1:
# For some reason we couldn't find pathname in the stack.
stack = [record.pathname]
else:
stack = call_stack[0:index+1]
# Iterate through the stack call (in reverse) and find the last call from
# a file in Home Assistant. Try to figure out where error happened.
paths_re = r'(?:{})/(.*)'.format('|'.join([re.escape(x) for x in paths]))
for pathname in reversed(stack):
# Try to match with a file within Home Assistant
match = re.match(paths_re, pathname)
if match:
return match.group(1)
# Ok, we don't know what this is
return record.pathname
def _exception_as_string(exc_info):
buf = StringIO()
if exc_info:
traceback.print_exception(*exc_info, file=buf)
return buf.getvalue()
class LogErrorHandler(logging.Handler):
"""Log handler for error messages."""
def __init__(self, hass, maxlen):
"""Initialize a new LogErrorHandler."""
super().__init__()
self.hass = hass
self.records = deque(maxlen=maxlen)
def _create_entry(self, record, call_stack):
return {
'timestamp': record.created,
'level': record.levelname,
'message': record.getMessage(),
'exception': _exception_as_string(record.exc_info),
'source': _figure_out_source(record, call_stack, self.hass),
}
def emit(self, record):
"""Save error and warning logs.
Everything logged with error or warning is saved in local buffer. A
default upper limit is set to 50 (older entries are discarded) but can
be changed if needed.
"""
if record.levelno >= logging.WARN:
stack = []
if not record.exc_info:
try:
stack = [f for f, _, _, _ in traceback.extract_stack()]
except ValueError:
# On Python 3.4 under py.test getting the stack might fail.
pass
entry = self._create_entry(record, stack)
self.records.appendleft(entry)
self.hass.bus.fire(EVENT_SYSTEM_LOG, entry)
@asyncio.coroutine
def async_setup(hass, config):
"""Set up the logger component."""
conf = config.get(DOMAIN)
if conf is None:
conf = CONFIG_SCHEMA({DOMAIN: {}})[DOMAIN]
handler = LogErrorHandler(hass, conf.get(CONF_MAX_ENTRIES))
logging.getLogger().addHandler(handler)
hass.http.register_view(AllErrorsView(handler))
@asyncio.coroutine
def async_service_handler(service):
"""Handle logger services."""
if service.service == 'clear':
handler.records.clear()
return
if service.service == 'write':
logger = logging.getLogger(
service.data.get(CONF_LOGGER, '{}.external'.format(__name__)))
level = service.data[CONF_LEVEL]
getattr(logger, level)(service.data[CONF_MESSAGE])
@asyncio.coroutine
def async_shutdown_handler(event):
"""Remove logging handler when Home Assistant is shutdown."""
# This is needed as older logger instances will remain
logging.getLogger().removeHandler(handler)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP,
async_shutdown_handler)
hass.services.async_register(
DOMAIN, SERVICE_CLEAR, async_service_handler,
schema=SERVICE_CLEAR_SCHEMA)
hass.services.async_register(
DOMAIN, SERVICE_WRITE, async_service_handler,
schema=SERVICE_WRITE_SCHEMA)
return True
class AllErrorsView(HomeAssistantView):
"""Get all logged errors and warnings."""
url = "/api/error/all"
name = "api:error:all"
def __init__(self, handler):
"""Initialize a new AllErrorsView."""
self.handler = handler
@asyncio.coroutine
def get(self, request):
"""Get all errors and warnings."""
# deque is not serializable (it's just "list-like") so it must be
# converted to a list before it can be serialized to json
return self.json(list(self.handler.records))
|
PypiClean
|
/eric-ide-22.7.1.tar.gz/eric-ide-22.7.1/eric7/Plugins/UiExtensionPlugins/Translator/TranslatorEngines/GoogleV1Engine.py
|
# Copyright (c) 2014 - 2022 Detlev Offenbach <[email protected]>
#
"""
Module implementing the Google V1 translation engine.
"""
import json
import re
from PyQt6.QtCore import QByteArray, QUrl, QTimer
import Utilities
from .TranslationEngine import TranslationEngine
class GoogleV1Engine(TranslationEngine):
"""
Class implementing the translation engine for the old Google
translation service.
"""
TranslatorUrl = "https://translate.googleapis.com/translate_a/single"
TextToSpeechUrl = "https://translate.google.com/translate_tts"
TextToSpeechLimit = 100
def __init__(self, plugin, parent=None):
"""
Constructor
@param plugin reference to the plugin object
@type TranslatorPlugin
@param parent reference to the parent object
@type QObject
"""
super().__init__(plugin, parent)
QTimer.singleShot(0, self.availableTranslationsLoaded.emit)
def engineName(self):
"""
Public method to return the name of the engine.
@return engine name
@rtype str
"""
return "googlev1"
def supportedLanguages(self):
"""
Public method to get the supported languages.
@return list of supported language codes
@rtype list of str
"""
return ["ar", "be", "bg", "bs", "ca", "cs", "da", "de", "el", "en",
"es", "et", "fi", "fr", "ga", "gl", "hi", "hr", "hu", "id",
"is", "it", "iw", "ja", "ka", "ko", "lt", "lv", "mk", "mt",
"nl", "no", "pl", "pt", "ro", "ru", "sk", "sl", "sq", "sr",
"sv", "th", "tl", "tr", "uk", "vi", "zh-CN", "zh-TW",
]
def hasTTS(self):
"""
Public method indicating the Text-to-Speech capability.
@return flag indicating the Text-to-Speech capability
@rtype bool
"""
return True
def getTranslation(self, requestObject, text, originalLanguage,
translationLanguage):
"""
Public method to translate the given text.
@param requestObject reference to the request object
@type TranslatorRequest
@param text text to be translated
@type str
@param originalLanguage language code of the original
@type str
@param translationLanguage language code of the translation
@type str
@return tuple of translated text and flag indicating success
@rtype tuple of (str, bool)
"""
params = QByteArray(
"client=gtx&sl={0}&tl={1}&dt=t&dt=bd&ie=utf-8&oe=utf-8&q=".format(
originalLanguage, translationLanguage).encode("utf-8"))
encodedText = (
QByteArray(Utilities.html_encode(text).encode("utf-8"))
.toPercentEncoding()
)
request = params + encodedText
response, ok = requestObject.post(QUrl(self.TranslatorUrl), request)
if ok:
try:
# clean up the response
response = re.sub(r',{2,}', ',', response)
responseDict = json.loads(response)
except ValueError:
return self.tr("Google V1: Invalid response received"), False
if isinstance(responseDict, dict):
sentences = responseDict["sentences"]
result = ""
for sentence in sentences:
result += sentence["trans"].replace("\n", "<br/>")
if (
self.plugin.getPreferences("GoogleEnableDictionary") and
"dict" in responseDict
):
dictionary = responseDict["dict"]
for value in dictionary:
result += "<hr/><u><b>{0}</b> - {1}</u><br/>".format(
text, value["pos"])
for entry in value["entry"]:
previous = (entry["previous_word"] + " "
if "previous_word" in entry else "")
word = entry["word"]
reverse = entry["reverse_translation"]
result += "<br/>{0}<b>{1}</b> - {2}".format(
previous, word, ", ".join(reverse))
if value != dictionary[-1]:
result += "<br/>"
elif isinstance(responseDict, list):
sentences = responseDict[0]
result = (
"".join([s[0] for s in sentences]).replace("\n", "<br/>")
)
if (
self.plugin.getPreferences("GoogleEnableDictionary") and
len(responseDict) > 2
):
if not responseDict[1]:
result = self.tr("Google V1: No translation found.")
ok = False
else:
for wordTypeList in responseDict[1]:
result += "<hr/><u><b>{0}</b> - {1}</u>".format(
wordTypeList[0], wordTypeList[-2])
for wordsList in wordTypeList[2]:
reverse = wordsList[0]
words = wordsList[1]
result += "<br/><b>{0}</b> - {1}".format(
reverse, ", ".join(words))
else:
result = responseDict
else:
result = response
return result, ok
def getTextToSpeechData(self, requestObject, text, language):
"""
Public method to pronounce the given text.
@param requestObject reference to the request object
@type TranslatorRequest
@param text text to be pronounced
@type str
@param language language code of the text
@type str
@return tuple with pronounce data or error string and success flag
@rtype tuple of (QByteArray or str, bool)
"""
text = text.split("\n\n", 1)[0]
if len(text) > self.TextToSpeechLimit:
return (self.tr("Google V1: Only texts up to {0} characters are"
" allowed.")
.format(self.TextToSpeechLimit), False)
url = QUrl(self.TextToSpeechUrl +
"?client=tw-ob&ie=utf-8&tl={0}&q={1}".format(
language, text))
return requestObject.get(url)
|
PypiClean
|
/sentry-nos-9.0.0.tar.gz/sentry-nos-9.0.0/src/sentry/south_migrations/0143_fill_project_orgs.py
|
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import IntegrityError, models, transaction
def atomic_save(model):
try:
with transaction.atomic():
model.save()
except transaction.TransactionManagementError:
# sqlite isn't happy
model.save()
class Migration(DataMigration):
def forwards(self, orm):
from sentry.constants import RESERVED_ORGANIZATION_SLUGS
from sentry.db.models.utils import slugify_instance
from sentry.utils.query import RangeQuerySetWrapperWithProgressBar
Project = orm['sentry.Project']
queryset = Project.objects.filter(organization__isnull=True).select_related(
'team', 'team__organization'
)
for project in RangeQuerySetWrapperWithProgressBar(queryset):
project.organization = project.team.organization
try:
atomic_save(project)
except IntegrityError:
# we also need to update the slug here based on the new constraints
slugify_instance(
project,
project.name,
(models.Q(organization=project.organization) | models.Q(team=project.team), )
)
project.save()
def backwards(self, orm):
pass
models = {
'sentry.accessgroup': {
'Meta': {
'unique_together': "(('team', 'name'),)",
'object_name': 'AccessGroup'
},
'data': ('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'managed': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'members': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.User']",
'symmetrical': 'False'
}
),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'projects': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Project']",
'symmetrical': 'False'
}
),
'team':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}),
'type': ('django.db.models.fields.IntegerField', [], {
'default': '50'
})
},
'sentry.activity': {
'Meta': {
'object_name': 'Activity'
},
'data': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Event']",
'null': 'True'
}
),
'group': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}),
'type': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True'
}
)
},
'sentry.alert': {
'Meta': {
'object_name': 'Alert'
},
'data': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'message': ('django.db.models.fields.TextField', [], {}),
'project':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}),
'related_groups': (
'django.db.models.fields.related.ManyToManyField', [], {
'related_name': "'related_alerts'",
'symmetrical': 'False',
'through': "orm['sentry.AlertRelatedGroup']",
'to': "orm['sentry.Group']"
}
),
'status': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.alertrelatedgroup': {
'Meta': {
'unique_together': "(('group', 'alert'),)",
'object_name': 'AlertRelatedGroup'
},
'alert':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Alert']"
}),
'data': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'group':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {
'primary_key': 'True'
})
},
'sentry.auditlogentry': {
'Meta': {
'object_name': 'AuditLogEntry'
},
'actor': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'audit_actors'",
'to': "orm['sentry.User']"
}
),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event': ('django.db.models.fields.PositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'organization': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'target_object': ('django.db.models.fields.PositiveIntegerField', [], {
'null': 'True'
}),
'target_user': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'audit_targets'",
'null': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.broadcast': {
'Meta': {
'object_name': 'Broadcast'
},
'badge': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True',
'blank': 'True'
}
),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_active':
('django.db.models.fields.BooleanField', [], {
'default': 'True',
'db_index': 'True'
}),
'link': (
'django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.CharField', [], {
'max_length': '256'
})
},
'sentry.event': {
'Meta': {
'unique_together': "(('project', 'event_id'),)",
'object_name': 'Event',
'db_table': "'sentry_message'",
'index_together': "(('group', 'datetime'),)"
},
'checksum':
('django.db.models.fields.CharField', [], {
'max_length': '32',
'db_index': 'True'
}),
'data': ('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'event_id': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True',
'db_column': "'message_id'"
}
),
'group': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'event_set'",
'null': 'True',
'to': "orm['sentry.Group']"
}
),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments':
('django.db.models.fields.PositiveIntegerField', [], {
'default': '0',
'null': 'True'
}),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'time_spent': ('django.db.models.fields.IntegerField', [], {
'null': 'True'
})
},
'sentry.eventmapping': {
'Meta': {
'unique_together': "(('project', 'event_id'),)",
'object_name': 'EventMapping'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event_id': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'group':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
})
},
'sentry.group': {
'Meta': {
'unique_together': "(('project', 'checksum'),)",
'object_name': 'Group',
'db_table': "'sentry_groupedmessage'"
},
'active_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'checksum':
('django.db.models.fields.CharField', [], {
'max_length': '32',
'db_index': 'True'
}),
'culprit': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'null': 'True',
'db_column': "'view'",
'blank': 'True'
}
),
'data': ('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_public': (
'django.db.models.fields.NullBooleanField', [], {
'default': 'False',
'null': 'True',
'blank': 'True'
}
),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'level': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '40',
'db_index': 'True',
'blank': 'True'
}
),
'logger': (
'django.db.models.fields.CharField', [], {
'default': "'root'",
'max_length': '64',
'db_index': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments':
('django.db.models.fields.PositiveIntegerField', [], {
'default': '0',
'null': 'True'
}),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'resolved_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'score': ('django.db.models.fields.IntegerField', [], {
'default': '0'
}),
'status': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {
'default': '0'
}),
'time_spent_total': ('django.db.models.fields.IntegerField', [], {
'default': '0'
}),
'times_seen': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '1',
'db_index': 'True'
}
)
},
'sentry.groupassignee': {
'Meta': {
'object_name': 'GroupAssignee',
'db_table': "'sentry_groupasignee'"
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'assignee_set'",
'unique': 'True',
'to': "orm['sentry.Group']"
}
),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'assignee_set'",
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'sentry_assignee_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.groupbookmark': {
'Meta': {
'unique_together': "(('project', 'user', 'group'),)",
'object_name': 'GroupBookmark'
},
'group': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Group']"
}
),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'sentry_bookmark_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.grouphash': {
'Meta': {
'unique_together': "(('project', 'hash'),)",
'object_name': 'GroupHash'
},
'group': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'hash':
('django.db.models.fields.CharField', [], {
'max_length': '32',
'db_index': 'True'
}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
)
},
'sentry.groupmeta': {
'Meta': {
'unique_together': "(('group', 'key'),)",
'object_name': 'GroupMeta'
},
'group':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.grouprulestatus': {
'Meta': {
'unique_together': "(('rule', 'group'),)",
'object_name': 'GroupRuleStatus'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}),
'rule':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Rule']"
}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {
'default': '0'
})
},
'sentry.groupseen': {
'Meta': {
'unique_together': "(('user', 'group'),)",
'object_name': 'GroupSeen'
},
'group':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'project':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}),
'user': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'db_index': 'False'
}
)
},
'sentry.grouptagkey': {
'Meta': {
'unique_together': "(('project', 'group', 'key'),)",
'object_name': 'GroupTagKey'
},
'group':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'values_seen': ('django.db.models.fields.PositiveIntegerField', [], {
'default': '0'
})
},
'sentry.grouptagvalue': {
'Meta': {
'unique_together': "(('project', 'key', 'value', 'group'),)",
'object_name': 'GroupTagValue',
'db_table': "'sentry_messagefiltervalue'"
},
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'group': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'grouptag'",
'to': "orm['sentry.Group']"
}
),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'grouptag'",
'null': 'True',
'to': "orm['sentry.Project']"
}
),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.lostpasswordhash': {
'Meta': {
'object_name': 'LostPasswordHash'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'user': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'unique': 'True'
}
)
},
'sentry.option': {
'Meta': {
'object_name': 'Option'
},
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '64'
}),
'last_updated':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {
'object_name': 'Organization'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'members': (
'django.db.models.fields.related.ManyToManyField', [], {
'related_name': "'org_memberships'",
'symmetrical': 'False',
'through': "orm['sentry.OrganizationMember']",
'to': "orm['sentry.User']"
}
),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'owner':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}),
'slug': (
'django.db.models.fields.SlugField', [], {
'max_length': '50',
'unique': 'True',
'null': 'True'
}
),
'status': ('django.db.models.fields.PositiveIntegerField', [], {
'default': '0'
})
},
'sentry.organizationmember': {
'Meta': {
'unique_together': "(('organization', 'user'), ('organization', 'email'))",
'object_name': 'OrganizationMember'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': (
'django.db.models.fields.EmailField', [], {
'max_length': '75',
'null': 'True',
'blank': 'True'
}
),
'has_global_access': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'organization': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'member_set'",
'to': "orm['sentry.Organization']"
}
),
'teams': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Team']",
'symmetrical': 'False',
'blank': 'True'
}
),
'type': ('django.db.models.fields.PositiveIntegerField', [], {
'default': '50'
}),
'user': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'sentry_orgmember_set'",
'null': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.pendingteammember': {
'Meta': {
'unique_together': "(('team', 'email'),)",
'object_name': 'PendingTeamMember'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'team': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'pending_member_set'",
'to': "orm['sentry.Team']"
}
),
'type': ('django.db.models.fields.IntegerField', [], {
'default': '50'
})
},
'sentry.project': {
'Meta': {
'unique_together': "(('team', 'slug'), ('organization', 'slug'))",
'object_name': 'Project'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '200'
}),
'organization': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']",
'null': 'True'
}
),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True'
}),
'public': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'slug': ('django.db.models.fields.SlugField', [], {
'max_length': '50',
'null': 'True'
}),
'status': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'team':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
})
},
'sentry.projectkey': {
'Meta': {
'object_name': 'ProjectKey'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'label': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'key_set'",
'to': "orm['sentry.Project']"
}
),
'public_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'roles': ('django.db.models.fields.BigIntegerField', [], {
'default': '1'
}),
'secret_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'user': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True'
}
),
'user_added': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'keys_added_set'",
'null': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.projectoption': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'ProjectOption',
'db_table': "'sentry_projectoptions'"
},
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.release': {
'Meta': {
'unique_together': "(('project', 'version'),)",
'object_name': 'Release'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}),
'version': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.rule': {
'Meta': {
'object_name': 'Rule'
},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'label': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
})
},
'sentry.tagkey': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'TagKey',
'db_table': "'sentry_filterkey'"
},
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'label':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}),
'values_seen': ('django.db.models.fields.PositiveIntegerField', [], {
'default': '0'
})
},
'sentry.tagvalue': {
'Meta': {
'unique_together': "(('project', 'key', 'value'),)",
'object_name': 'TagValue',
'db_table': "'sentry_filtervalue'"
},
'data': ('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.team': {
'Meta': {
'object_name': 'Team'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'organization': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'owner':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}),
'slug':
('django.db.models.fields.SlugField', [], {
'unique': 'True',
'max_length': '50'
}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {
'default': '0'
})
},
'sentry.teammember': {
'Meta': {
'unique_together': "(('team', 'user'),)",
'object_name': 'TeamMember'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'team':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}),
'type': ('django.db.models.fields.IntegerField', [], {
'default': '50'
}),
'user':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
})
},
'sentry.user': {
'Meta': {
'object_name': 'User',
'db_table': "'auth_user'"
},
'date_joined':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email':
('django.db.models.fields.EmailField', [], {
'max_length': '75',
'blank': 'True'
}),
'first_name':
('django.db.models.fields.CharField', [], {
'max_length': '30',
'blank': 'True'
}),
'id': ('django.db.models.fields.AutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'is_managed': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_staff': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'last_login':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'last_name':
('django.db.models.fields.CharField', [], {
'max_length': '30',
'blank': 'True'
}),
'password': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'username':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '128'
})
},
'sentry.useroption': {
'Meta': {
'unique_together': "(('user', 'project', 'key'),)",
'object_name': 'UserOption'
},
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'user':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
}
}
complete_apps = ['sentry']
symmetrical = True
|
PypiClean
|
/bronx_beta-2.0.0b2-py3-none-any.whl/bronx/system/mf.py
|
from __future__ import print_function, absolute_import, unicode_literals, division
from six import BytesIO
import ftplib
import netrc
import re
import uuid
from bronx.fancies import loggers
logger = loggers.getLogger(__name__)
#: No automatic export
__all__ = []
def prestage(resource_paths,
mail=None,
archive_machine='hendrix',
stagedir='/DemandeMig/ChargeEnEspaceRapide'):
"""
Puts a pre-staging request on **archive_machine** for the given list of
resources **resource_paths**, and return the path to the submitted request
file.
:param resource_paths: list of paths to requested resources
:param mail: if given, used for informing about the request progress.
:param archive_machine: name of the archive machine. Will probably not work
for other than *hendrix* for now...
:param stagedir: directory in which prestaging request are to be put
on **archive_machine**
.. note::
Uses *~/.netrc* to connect to **archive_machine**.
"""
# build request
if mail is not None:
if re.match(r'([a-zA-Z\-]+)\.([a-zA-Z\-]+)\@meteo.fr', mail):
request = ["#MAIL=" + mail + '\n', ]
else:
logger.warning('invalid **mail** format: ' + mail)
request = []
else:
request = []
request += [r + '\n' for r in resource_paths]
# connect to archive
try:
(_login, _, _passwd) = netrc.netrc().authenticators(archive_machine)
except TypeError:
if netrc.netrc().authenticators(archive_machine) is None:
raise IOError("host " + archive_machine + " is unknown in .netrc")
else:
raise
ftp = ftplib.FTP(archive_machine)
ftp.login(_login, _passwd)
# send request
request_filename = '.'.join([_login,
'staging_request',
uuid.uuid4().hex[:8], # [:8] safe enough ?
'MIG'])
f = BytesIO()
f.writelines([line.encode('utf-8') for line in request])
f.seek(0)
ftp.cwd(stagedir)
ftp.storbinary('STOR ' + request_filename, f)
f.close()
ftp.quit()
# send back request identifier
return '/'.join([stagedir, request_filename])
|
PypiClean
|
/alipay_sdk_python-3.6.740-py3-none-any.whl/alipay/aop/api/request/MybankCreditSceneprodBillQueryRequest.py
|
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.MybankCreditSceneprodBillQueryModel import MybankCreditSceneprodBillQueryModel
class MybankCreditSceneprodBillQueryRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, MybankCreditSceneprodBillQueryModel):
self._biz_content = value
else:
self._biz_content = MybankCreditSceneprodBillQueryModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'mybank.credit.sceneprod.bill.query'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
|
PypiClean
|
/scml-agents-0.4.2.tar.gz/scml-agents-0.4.2/scml_agents/scml2020/team_17/whagent.py
|
import functools
import math
import time
from abc import abstractmethod
from dataclasses import dataclass
from pprint import pformat, pprint
from typing import Any, Dict, List, Optional, Tuple, Union
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from negmas import (
AgentMechanismInterface,
AspirationNegotiator,
Breach,
Contract,
Issue,
LinearUtilityFunction,
MechanismState,
Negotiator,
SAONegotiator,
UtilityFunction,
)
from negmas.helpers import get_class, humanize_time, instantiate
from negmas.outcomes.base_issue import make_issue
from negmas.outcomes.issue_ops import enumerate_issues
from scml.scml2020 import (
AWI,
DecentralizingAgent,
Failure,
MovingRangeNegotiationManager,
PredictionBasedTradingStrategy,
RandomAgent,
SCML2020Agent,
SCML2020World,
TradeDrivenProductionStrategy,
)
from scml.scml2020.agents import (
BuyCheapSellExpensiveAgent,
DecentralizingAgent,
DoNothingAgent,
IndDecentralizingAgent,
)
from scml.scml2020.common import ANY_LINE, NO_COMMAND, TIME, is_system_agent
from scml.scml2020.components import (
FixedTradePredictionStrategy,
SignAllPossible,
TradePredictionStrategy,
)
from scml.scml2020.components.negotiation import (
IndependentNegotiationsManager,
NegotiationManager,
StepNegotiationManager,
)
from scml.scml2020.components.prediction import MeanERPStrategy
from scml.scml2020.components.production import (
DemandDrivenProductionStrategy,
ProductionStrategy,
SupplyDrivenProductionStrategy,
)
from scml.scml2020.components.trading import TradingStrategy
from scml.scml2020.services.controllers import StepController, SyncController
from scml.utils import anac2020_collusion, anac2020_std
from tabulate import tabulate
__all__ = ["WhAgent"]
class AllmakeProductionStrategy(ProductionStrategy):
def step(self):
super().step()
commands = NO_COMMAND * np.ones(self.awi.n_lines, dtype=int)
inputs = min(self.awi.state.inventory[self.awi.my_input_product], len(commands))
commands[:inputs] = self.awi.my_input_product
commands[inputs:] = NO_COMMAND
self.awi.set_commands(commands)
def on_contracts_finalized(
self: "SCML2020Agent",
signed: List[Contract],
cancelled: List[Contract],
rejectors: List[List[str]],
) -> None:
latest = self.awi.n_steps - 2
earliest_production = self.awi.current_step
for contract in signed:
is_seller = contract.annotation["seller"] == self.id
if is_seller:
continue
step = contract.agreement["time"]
if step > latest + 1 or step < earliest_production:
continue
input_product = contract.annotation["product"]
if self.awi.current_step == (self.awi.n_steps - 1):
if self.awi.profile.costs[0, self.awi.my_input_product] > (
self.awi.catalog_prices[self.awi.my_input_product] / 2
):
continue
steps, _ = self.awi.schedule_production(
process=input_product,
repeats=contract.agreement["quantity"],
# step=(step, latest),
step=-1,
line=-1,
method="earliest",
partial_ok=True,
)
self.schedule_range[contract.id] = (
min(steps) if len(steps) > 0 else -1,
max(steps) if len(steps) > 0 else -1,
is_seller,
)
class AvoidOverproductionTradingStrategy(
FixedTradePredictionStrategy, MeanERPStrategy, TradingStrategy
):
def init(self):
super().init()
self.inputs_needed = np.zeros(self.awi.n_steps, dtype=int)
self.outputs_needed = np.zeros(self.awi.n_steps, dtype=int)
self.inputs_secured = np.zeros(self.awi.n_steps, dtype=int)
self.outputs_secured = np.zeros(self.awi.n_steps, dtype=int)
def on_contracts_finalized(
self,
signed: List[Contract],
cancelled: List[Contract],
rejectors: List[List[str]],
) -> None:
super().on_contracts_finalized(signed, cancelled, rejectors)
for contract in signed:
q, u, t = (
contract.agreement["quantity"],
contract.agreement["unit_price"],
contract.agreement["time"],
)
if contract.annotation["seller"] == self.id:
self.outputs_secured[t] += q
else:
self.inputs_secured[t] += q
def sign_all_contracts(self, contracts: List[Contract]) -> List[Optional[str]]:
super().sign_all_contracts(contracts)
signatures = [None] * len(contracts)
contracts = sorted(
zip(contracts, range(len(contracts))),
key=lambda x: (
x[0].agreement["unit_price"],
x[0].agreement["time"],
0
if is_system_agent(x[0].annotation["seller"])
or is_system_agent(x[0].annotation["buyer"])
else 1,
x[0].agreement["unit_price"],
# x[0].agreement["time"],
),
)
sold, bought = 0, 0
s = self.awi.current_step
for contract, indx in contracts:
is_seller = contract.annotation["seller"] == self.id
q, u, t = (
contract.agreement["quantity"],
contract.agreement["unit_price"],
contract.agreement["time"],
)
if t < s and len(contract.issues) == 3:
continue
if self.awi.my_suppliers == ["SELLER"]:
if q > self.awi.n_lines:
continue
if is_seller:
if t > self.awi.n_steps - 2:
continue
zaiko = 0
for zzaiko in self.outputs_needed:
zaiko += zzaiko
if zaiko < 1:
if t < s + 3:
continue
sellprice = max(
(
self.awi.catalog_prices[self.awi.my_output_product]
- self.awi.catalog_prices[self.awi.my_input_product]
- self.awi.profile.costs[0, self.awi.my_input_product]
)
// 2
- 1,
0,
)
if (
u
< self.awi.catalog_prices[self.awi.my_output_product]
- sellprice
):
continue
cansell = zaiko + (self.awi.n_lines - self.inputs_needed[t])
if q <= cansell:
self.outputs_needed[t] -= q
self.inputs_needed[t] += q
else:
continue
else:
wantbuy = 0
needtime = -1
for step in range(self.awi.n_steps):
wantbuy += self.inputs_needed[step]
if self.inputs_needed[step] > 0 and needtime == -1:
needtime = step
if wantbuy > 0:
self.outputs_needed[t] += q
self.inputs_needed[t] -= q
else:
continue
elif self.awi.my_consumers == ["BUYER"]:
if q > self.awi.n_lines:
continue
if is_seller:
zaiko = 0
for zzaiko in self.outputs_needed:
zaiko += zzaiko
if zaiko < 1:
if t < s + 2:
continue
cansell = zaiko
if q <= cansell:
self.outputs_needed[t] -= q
self.inputs_needed[t] += q
else:
continue
else:
if t > s + 5:
continue
wantbuy = self.awi.n_lines - self.outputs_needed[t]
if wantbuy > 0:
self.outputs_needed[t] += q
self.inputs_needed[t] -= q
else:
continue
else:
if q > self.awi.n_lines:
continue
if is_seller:
if t > self.awi.n_steps - 2:
continue
zaiko = 0
for zzaiko in self.outputs_needed:
zaiko += zzaiko
if zaiko < q:
if t < s + 2:
continue
sellprice = max(
(
self.awi.catalog_prices[self.awi.my_output_product]
- self.awi.catalog_prices[self.awi.my_input_product]
- self.awi.profile.costs[0, self.awi.my_input_product]
)
// 2
- 1,
0,
)
if (
u
< self.awi.catalog_prices[self.awi.my_output_product]
- sellprice
):
continue
cansell = zaiko + (self.awi.n_lines - self.inputs_needed[t])
if q <= cansell:
self.outputs_needed[t] -= q
self.inputs_needed[t] += q
else:
continue
else:
if t < s:
continue
havetobuy = 0
needtime = s - 1
for step in range(self.awi.n_steps):
havetobuy += self.inputs_needed[step]
if self.inputs_needed[step] > 0 and needtime <= (s - 1):
needtime = step
if t >= needtime:
continue
if needtime == s + 1:
if u < self.awi.catalog_prices[self.awi.my_input_product]:
continue
elif needtime < s + 3:
buyprice2 = max(
(
self.awi.catalog_prices[self.awi.my_output_product]
- self.awi.catalog_prices[self.awi.my_input_product]
)
// 2
- 1,
0,
)
if (
u
< self.awi.catalog_prices[self.awi.my_input_product]
+ buyprice2
):
continue
else:
buyprice = max(
(
self.awi.catalog_prices[self.awi.my_output_product]
- self.awi.catalog_prices[self.awi.my_input_product]
- self.awi.profile.costs[0, self.awi.my_input_product]
)
// 2
- 1,
0,
)
if (
u
< self.awi.catalog_prices[self.awi.my_input_product]
+ buyprice
):
continue
if havetobuy > 0:
self.outputs_needed[t] += q
self.inputs_needed[t] -= q
else:
continue
signatures[indx] = self.id
if is_seller:
sold += q
else:
bought += q
return signatures
def _format(self, c: Contract):
super()._format(c)
return (
f"{f'>' if c.annotation['seller'] == self.id else '<'}"
f"{c.annotation['buyer'] if c.annotation['seller'] == self.id else c.annotation['seller']}: "
f"{c.agreement['quantity']} of {c.annotation['product']} @ {c.agreement['unit_price']} on {c.agreement['time']}"
)
def on_agent_bankrupt(
self,
agent: str,
contracts: List[Contract],
quantities: List[int],
compensation_money: int,
) -> None:
super().on_agent_bankrupt(agent, contracts, quantities, compensation_money)
for contract, new_quantity in zip(contracts, quantities):
q = contract.agreement["quantity"]
if new_quantity == q:
continue
t = contract.agreement["time"]
missing = q - new_quantity
if t < self.awi.current_step:
continue
if contract.annotation["seller"] == self.id:
self.outputs_secured[t] -= missing
if t > 0:
self.inputs_needed[t - 1] -= missing
else:
self.inputs_secured[t] += missing
if t < self.awi.n_steps - 1:
self.outputs_needed[t + 1] -= missing
@dataclass
class ControllerInfo:
controller: StepController
time_step: int
is_seller: bool
time_range: Tuple[int, int]
target: int
expected: int
done: bool = False
class PreNegotiationManager(IndependentNegotiationsManager):
def respond_to_negotiation_request(
self,
initiator: str,
issues: List[Issue],
annotation: Dict[str, Any],
mechanism: AgentMechanismInterface,
) -> Optional[Negotiator]:
return self.negotiator(annotation["seller"] == self.id, issues=issues)
def negotiator(
self, is_seller: bool, issues=None, outcomes=None, partner=None
) -> SAONegotiator:
params = self.negotiator_params
params["ufun"] = self.create_ufun(
is_seller=is_seller, outcomes=outcomes, issues=issues
)
return instantiate(self.negotiator_type, **params)
def _start_negotiations(
self,
product: int,
sell: bool,
step: int,
qvalues: Tuple[int, int],
uvalues: Tuple[int, int],
tvalues: Tuple[int, int],
partners: List[str],
) -> None:
super()._start_negotiations(
product, sell, step, qvalues, uvalues, tvalues, partners
)
issues = [
make_issue((int(qvalues[0]), int(max(qvalues))), name="quantity"),
make_issue((int(tvalues[0]), int(max(tvalues))), name="time"),
make_issue((int(uvalues[0]), int(max(uvalues))), name="unit_price"),
]
sortpartner = {}
if self.awi.current_step > 4:
reportstep = ((self.awi.current_step // 5) - 1) * 5
for k in self.awi.reports_at_step(reportstep).values():
for ne in partners:
if ne == k.agent_id and k.breach_level < 1.0:
sortpartner[k.agent_id] = k.breach_level
if len(sortpartner) != 0:
sortpartners = sorted(sortpartner.items(), key=lambda x: x[1])
sortpartners_list = [i[0] for i in sortpartners]
for partner in sortpartners_list:
self.awi.request_negotiation(
is_buy=not sell,
product=product,
quantity=qvalues,
unit_price=uvalues,
time=tvalues,
partner=partner,
negotiator=self.negotiator(sell, issues=issues),
)
else:
for partner in partners:
self.awi.request_negotiation(
is_buy=not sell,
product=product,
quantity=qvalues,
unit_price=uvalues,
time=tvalues,
partner=partner,
negotiator=self.negotiator(sell, issues=issues),
)
else:
for partner in partners:
self.awi.request_negotiation(
is_buy=not sell,
product=product,
quantity=qvalues,
unit_price=uvalues,
time=tvalues,
partner=partner,
negotiator=self.negotiator(sell, issues=issues),
)
def target_quantity(self, step: int, sell: bool) -> int:
if sell:
if sum(self.outputs_secured) < self.awi.n_lines * (
self.awi.n_steps - self.awi.current_step - 2
):
sellnum = min(self.awi.n_lines, self.outputs_secured[step])
else:
sellnum = 0
else:
if sum(self.inputs_secured) < self.awi.n_lines * (
self.awi.n_steps - self.awi.current_step - 2
):
buynum = min(self.awi.n_lines, sum(self.outputs_secured))
else:
buynum = 0
if step == self.awi.current_step - 1:
return 0 if sell else 0
return sellnum if sell else buynum
def acceptable_unit_price(self, step: int, sell: bool) -> int:
return (
self.awi.catalog_prices[self.awi.my_output_product]
if sell
else self.awi.catalog_prices[self.awi.my_input_product]
)
def create_ufun(
self, is_seller: bool, issues=None, outcomes=None
) -> UtilityFunction:
if is_seller:
return LinearUtilityFunction((0, 0.25, 1), issues=issues, outcomes=outcomes)
return LinearUtilityFunction((0, -0.5, -0.8), issues=issues, outcomes=outcomes)
class WhAgent(
AvoidOverproductionTradingStrategy,
PreNegotiationManager,
AllmakeProductionStrategy,
SCML2020Agent,
):
def init(self):
super().init()
def step(self):
super().step()
|
PypiClean
|
/uk_bin_collection-0.9.0.tar.gz/uk_bin_collection-0.9.0/uk_bin_collection/uk_bin_collection/councils/DerbyshireDalesDistrictCouncil.py
|
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from uk_bin_collection.uk_bin_collection.common import *
from uk_bin_collection.uk_bin_collection.get_bin_data import \
AbstractGetBinDataClass
# import the wonderful Beautiful Soup and the URL grabber
class CouncilClass(AbstractGetBinDataClass):
"""
Concrete classes have to implement all abstract operations of the
base class. They can also override some operations with a default
implementation.
"""
def parse_data(self, page: str, **kwargs) -> dict:
page = "https://selfserve.derbyshiredales.gov.uk/renderform.aspx?t=103&k=9644C066D2168A4C21BCDA351DA2642526359DFF"
data = {"bins": []}
user_uprn = kwargs.get("uprn")
user_postcode = kwargs.get("postcode")
check_uprn(user_uprn)
check_postcode(user_postcode)
# Set up Selenium to run 'headless'
options = webdriver.ChromeOptions()
options.add_argument("--headless")
options.add_argument("--no-sandbox")
options.add_argument("--disable-gpu")
options.add_argument("--disable-dev-shm-usage")
options.add_experimental_option("excludeSwitches", ["enable-logging"])
# Create Selenium webdriver
driver = webdriver.Chrome(options=options)
driver.get(page)
# Populate postcode field
inputElement_postcode = driver.find_element(
By.ID,
"ctl00_ContentPlaceHolder1_FF2924TB",
)
inputElement_postcode.send_keys(user_postcode)
# Click search button
driver.find_element(
By.ID,
"ctl00_ContentPlaceHolder1_FF2924BTN",
).click()
# Wait for the 'Select address' dropdown to appear and select option matching UPRN
dropdown = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.ID, "ctl00_ContentPlaceHolder1_FF2924DDL"))
)
# Create a 'Select' for it, then select the matching URPN option
dropdownSelect = Select(dropdown)
dropdownSelect.select_by_value("U" + user_uprn)
# Wait for the submit button to appear, then click it to get the collection dates
submit = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.ID, "ctl00_ContentPlaceHolder1_btnSubmit"))
)
submit.click()
soup = BeautifulSoup(driver.page_source, features="html.parser")
bin_rows = soup.find("div", id="ctl00_ContentPlaceHolder1_pnlConfirmation") \
.find("div", {"class": "row"}).find_all("div", {"class": "row"})
if bin_rows:
for bin_row in bin_rows:
bin_data = bin_row.find_all("div")
if bin_data and bin_data[0] and bin_data[1]:
collection_date = datetime.strptime(bin_data[0].get_text(strip=True), "%A%d %B, %Y")
dict_data = {
"type": bin_data[1].get_text(strip=True),
"collectionDate": collection_date.strftime(date_format),
}
data["bins"].append(dict_data)
data["bins"].sort(
key=lambda x: datetime.strptime(x.get("collectionDate"), date_format)
)
return data
|
PypiClean
|
/azureml_pipeline_core-1.53.0-py3-none-any.whl/azureml/pipeline/core/_parallel_run_step_base.py
|
"""Contains functionality to add a step to run user script in parallel mode on multiple AmlCompute targets."""
import argparse
import logging
import re
import json
import os
import sys
import warnings
from itertools import chain
import azureml.core
from azureml.core.runconfig import RunConfiguration
from azureml.core.compute import AmlCompute
from azureml.data import TabularDataset, FileDataset
from azureml.data.constants import UPLOAD_MODE
from azureml.data.data_reference import DataReference
from azureml.data.dataset_consumption_config import DatasetConsumptionConfig
from azureml.data.output_dataset_config import (
OutputTabularDatasetConfig,
OutputFileDatasetConfig,
OutputDatasetConfig,
LinkFileOutputDatasetConfig,
LinkTabularOutputDatasetConfig,
)
from azureml.pipeline.core._parallel_run_config_base import _ParallelRunConfigBase
from azureml.pipeline.core._python_script_step_base import _PythonScriptStepBase
from azureml.pipeline.core.graph import PipelineParameter
from azureml.pipeline.core.graph import ParamDef
from azureml.pipeline.core.pipeline_output_dataset import PipelineOutputFileDataset
from azureml.pipeline.core.pipeline_output_dataset import PipelineOutputTabularDataset
from azureml.pipeline.core.builder import PipelineData
DEFAULT_BATCH_SCORE_MAIN_FILE_NAME = "driver/amlbi_main.py"
DEFAULT_MINI_BATCH_SIZE = 1
DEFAULT_MINI_BATCH_SIZE_FILEDATASET = 10
DEFAULT_MINI_BATCH_SIZE_TABULARDATASET = 1024 * 1024
FILE_TYPE_INPUT = "file"
TABULAR_TYPE_INPUT = "tabular"
REQUIRED_DATAPREP_EXTRAS = {
TABULAR_TYPE_INPUT: "fuse,pandas",
FILE_TYPE_INPUT: "fuse",
}
ALLOWED_INPUT_TYPES = (
DatasetConsumptionConfig,
PipelineOutputFileDataset,
PipelineOutputTabularDataset,
OutputFileDatasetConfig,
OutputTabularDatasetConfig,
LinkFileOutputDatasetConfig,
LinkTabularOutputDatasetConfig,
)
INPUT_TYPE_DICT = {
TabularDataset: TABULAR_TYPE_INPUT,
PipelineOutputTabularDataset: TABULAR_TYPE_INPUT,
OutputTabularDatasetConfig: TABULAR_TYPE_INPUT,
LinkTabularOutputDatasetConfig: TABULAR_TYPE_INPUT,
FileDataset: FILE_TYPE_INPUT,
PipelineOutputFileDataset: FILE_TYPE_INPUT,
OutputFileDatasetConfig: FILE_TYPE_INPUT,
LinkFileOutputDatasetConfig: FILE_TYPE_INPUT,
}
PARALLEL_RUN_VERSION = "v1"
# current packages which also install azureml-dataprep[fuse,pandas]
DATAPREP_FUSE_PANDAS_PACKAGES = [
"azureml-dataprep[fuse,pandas]",
"azureml-dataprep[pandas,fuse]",
"azureml-automl-runtime",
"azureml-contrib-dataset",
"azureml-datadrift",
"azureml-dataset-runtime[pandas,fuse]",
"azureml-dataset-runtime[fuse,pandas]",
"azureml-opendatasets",
"azureml-train-automl",
"azureml-train-automl-runtime",
]
# current packages which also install azureml-dataprep[fuse]
DATAPREP_FUSE_ONLY_PACKAGES = [
"azureml-dataprep[fuse]",
"azureml-dataset-runtime[fuse]",
"azureml-defaults",
"azureml-sdk",
]
DATAPREP_FUSE_PACKAGES = list(chain(DATAPREP_FUSE_PANDAS_PACKAGES, DATAPREP_FUSE_ONLY_PACKAGES))
REQUIRED_DATAPREP_PACKAGES = {
TABULAR_TYPE_INPUT: DATAPREP_FUSE_PANDAS_PACKAGES,
FILE_TYPE_INPUT: DATAPREP_FUSE_PACKAGES,
}
class _ParallelRunStepBase(_PythonScriptStepBase):
r"""
Creates an Azure Machine Learning Pipeline step to process large amounts of data asynchronously and in parallel.
For an example of using ParallelRunStep, see the notebook https://aka.ms/batch-inference-notebooks.
.. remarks::
_ParallelRunStepBase can be used for processing large amounts of data in parallel. Common use cases are
training an ML model or running offline inference to generate predictions on a batch of observations.
_ParallelRunStepBase works by breaking up your data into batches that are processed in parallel. The batch
size node count, and other tunable parameters to speed up your parallel processing can be controlled with
the :class:`azureml.pipeline.steps.ParallelRunConfig` class. _ParallelRunStepBase can work with either
:class:`azureml.data.TabularDataset` or :class:`azureml.data.FileDataset` as input.
To use _ParallelRunStepBase:
* Create a :class:`azureml.pipeline.steps.ParallelRunConfig` object to specify how batch
processing is performed, with parameters to control batch size, number of nodes per compute target,
and a reference to your custom Python script.
* Create a _ParallelRunStepBase object that uses the ParallelRunConfig object, define inputs and
outputs for the step.
* Use the configured _ParallelRunStepBase object in a :class:`azureml.pipeline.core.Pipeline`
just as you would with pipeline step types defined in the :mod:`azureml.pipeline.steps` package.
Examples of working with _ParallelRunStepBase and ParallelRunConfig classes for batch inference are
discussed in the following articles:
* `Tutorial: Build an Azure Machine Learning pipeline for batch
scoring <https://docs.microsoft.com/azure/machine-learning/tutorial-pipeline-batch-scoring-classification>`_.
This article shows how to use these two classes for asynchronous batch scoring in a pipeline and enable a
REST endpoint to run the pipeline.
* `Run batch inference on large amounts of data by using Azure Machine
Learning <https://docs.microsoft.com/azure/machine-learning/how-to-use-parallel-run-step>`_. This article
shows how to process large amounts of data asynchronously and in parallel with a custom inference script
and a pre-trained image classification model bases on the MNIST dataset.
.. code:: python
from azureml.pipeline.steps import ParallelRunStep, ParallelRunConfig
parallel_run_config = ParallelRunConfig(
source_directory=scripts_folder,
entry_script=script_file,
mini_batch_size="5",
error_threshold=10, # Optional, allowed failed count on mini batch items
allowed_failed_count=15, # Optional, allowed failed count on mini batches
allowed_failed_percent=10, # Optional, allowed failed percent on mini batches
output_action="append_row",
environment=batch_env,
compute_target=compute_target,
node_count=2)
parallelrun_step = ParallelRunStep(
name="predict-digits-mnist",
parallel_run_config=parallel_run_config,
inputs=[ named_mnist_ds ],
output=output_dir,
arguments=[ "--extra_arg", "example_value" ],
allow_reuse=True
)
For more information about this example, see the notebook https://aka.ms/batch-inference-notebooks.
:param name: Name of the step. Must be unique to the workspace, only consist of lowercase letters,
numbers, or dashes, start with a letter, and be between 3 and 32 characters long.
:type name: str
:param parallel_run_config: A _ParallelRunConfigBase object used to determine required run properties.
:type parallel_run_config: azureml.pipeline.core._ParallelRunConfigBase
:param inputs: List of input datasets. All datasets in the list should be of same type.
Input data will be partitioned for parallel processing. Each dataset in the list is partitioned
into mini-batches separately, and each of the mini-batches is treated equally in the parallel processing.
:type inputs: list[azureml.data.dataset_consumption_config.DatasetConsumptionConfig
or azureml.data.dataset_consumption_config.PipelineOutputFileDataset
or azureml.data.dataset_consumption_config.PipelineOutputTabularDataset]
:param output: Output port binding, may be used by later pipeline steps.
:type output: azureml.pipeline.core.builder.PipelineData, azureml.pipeline.core.graph.OutputPortBinding
:param side_inputs: List of side input reference data. Side inputs will not be partitioned as input data.
:type side_inputs: list[azureml.pipeline.core.graph.InputPortBinding
or azureml.data.data_reference.DataReference
or azureml.pipeline.core.PortDataReference
or azureml.pipeline.core.builder.PipelineData
or azureml.pipeline.core.pipeline_output_dataset.PipelineOutputFileDataset
or azureml.pipeline.core.pipeline_output_dataset.PipelineOutputTabularDataset
or azureml.data.dataset_consumption_config.DatasetConsumptionConfig]
:param arguments: List of command-line arguments to pass to the Python entry_script.
:type arguments: list[str]
:param allow_reuse: Whether the step should reuse previous results when run with the same settings/inputs.
If this is false, a new run will always be generated for this step during pipeline execution.
:type allow_reuse: bool
"""
def __init__(
self,
name,
parallel_run_config,
inputs,
output=None,
side_inputs=None,
arguments=None,
allow_reuse=True,
):
r"""Create an Azure ML Pipeline step to process large amounts of data asynchronously and in parallel.
For an example of using ParallelRunStep, see the notebook link https://aka.ms/batch-inference-notebooks.
:param name: Name of the step. Must be unique to the workspace, only consist of lowercase letters,
numbers, or dashes, start with a letter, and be between 3 and 32 characters long.
:type name: str
:param parallel_run_config: A ParallelRunConfig object used to determine required run properties.
:type parallel_run_config: azureml.pipeline.steps.ParallelRunConfig
:param inputs: List of input datasets. All datasets in the list should be of same type.
Input data will be partitioned for parallel processing.
:type inputs: list[azureml.data.dataset_consumption_config.DatasetConsumptionConfig
or azureml.data.dataset_consumption_config.PipelineOutputFileDataset
or azureml.data.dataset_consumption_config.PipelineOutputTabularDataset]
:param output: Output port binding, may be used by later pipeline steps.
:type output: azureml.pipeline.core.builder.PipelineData, azureml.pipeline.core.graph.OutputPortBinding
:param side_inputs: List of side input reference data. Side inputs will not be partitioned as input data.
:type side_inputs: list[azureml.pipeline.core.graph.InputPortBinding
or azureml.data.data_reference.DataReference
or azureml.pipeline.core.PortDataReference
or azureml.pipeline.core.builder.PipelineData
or azureml.pipeline.core.pipeline_output_dataset.PipelineOutputFileDataset
or azureml.pipeline.core.pipeline_output_dataset.PipelineOutputTabularDataset
or azureml.data.dataset_consumption_config.DatasetConsumptionConfig]
:param arguments: List of command-line arguments to pass to the Python entry_script.
:type arguments: list[str]
:param allow_reuse: Whether the step should reuse previous results when run with the same settings/inputs.
If this is false, a new run will always be generated for this step during pipeline execution.
:type allow_reuse: bool
"""
self._name = name
self._parallel_run_config = parallel_run_config
self._inputs = inputs
self._output = output
self._side_inputs = side_inputs
self._arguments = arguments
self._node_count = self._parallel_run_config.node_count
self._process_count_per_node = self._parallel_run_config.process_count_per_node
self._mini_batch_size = self._parallel_run_config.mini_batch_size
self._partition_keys = self._parallel_run_config.partition_keys
self._error_threshold = self._parallel_run_config.error_threshold
self._allowed_failed_count = self._parallel_run_config.allowed_failed_count
self._allowed_failed_percent = self._parallel_run_config.allowed_failed_percent
self._logging_level = self._parallel_run_config.logging_level
self._run_invocation_timeout = self._parallel_run_config.run_invocation_timeout
self._run_max_try = self._parallel_run_config.run_max_try
self._input_compute_target = self._parallel_run_config.compute_target
self._pystep_inputs = []
self._pystep_side_inputs = []
self._input_ds_type = None
self._glob_syntax_pattern = re.compile(r"[\^\\\$\|\?\*\+\(\)\[\]\{\}]")
self._module_logger = logging.getLogger(__name__)
self._rank_mini_batch_count = self._get_rank_mini_batch_count()
self._platform = self._get_os_type_from_env(self._parallel_run_config.environment)
self._process_inputs_output_dataset_configs()
self._validate()
self._get_pystep_inputs()
if self._side_inputs:
self._handle_side_inputs()
pipeline_runconfig_params = self._get_pipeline_runconfig_params()
prun_runconfig = self._generate_runconfig()
prun_main_file_args = self._generate_main_file_args()
if self._side_inputs:
self._pystep_inputs += self._pystep_side_inputs
compute_target = self._input_compute_target
if isinstance(compute_target, str):
compute_target = (compute_target, AmlCompute._compute_type)
super(_ParallelRunStepBase, self).__init__(
name=self._name,
source_directory=self._parallel_run_config.source_directory,
script_name=self._parallel_run_config.entry_script,
runconfig=prun_runconfig,
runconfig_pipeline_params=pipeline_runconfig_params,
arguments=prun_main_file_args,
compute_target=compute_target,
inputs=self._pystep_inputs,
outputs=self._output,
allow_reuse=allow_reuse,
)
@staticmethod
def _get_os_type_from_env(env):
"""Return os of the given Environment, return either "windows" or "linux".
If no os specified in given env, defaults to "Linux".
"""
if env is not None and env.docker is not None and env.docker.platform is not None:
os_type = env.docker.platform.os
if os_type and isinstance(os_type, str):
return os_type.lower()
return "linux"
@property
def running_on_windows(self):
"""Return True if the platform is windows."""
return self._platform.lower() == "windows"
def _get_rank_mini_batch_count(self):
"""Return the number of rank mini batch."""
if not self._arguments:
return 0
parser = argparse.ArgumentParser(description="Parallel Run Step")
parser.add_argument(
"--rank_mini_batch_count",
type=int,
required=False,
default=0,
help="The number of rank mini batches to create."
" A rank mini batch doesn't take any input. It is used to run entry script without any input dataset."
" For example, start N processes to run the entry script."
" The default value is '0', where there is no rank mini batch. A negative value will be considered as '0'."
" If this value is greater than 0, other input will be ignored .",
)
args, _ = parser.parse_known_args([str(arg) for arg in self._arguments])
return args.rank_mini_batch_count
def _validate(self):
"""Validate input params to init parallel run step class."""
self._validate_arguments()
self._validate_inputs()
self._validate_output()
self._validate_parallel_run_config()
self._validate_source_directory()
self._validate_entry_script()
def _validate_arguments(self):
"""Validate the additional arguments."""
reserved_args = [
"mini_batch_size",
"error_threshold",
"allowed_failed_count",
"allowed_failed_percent",
"output",
"output_action",
"logging_level",
"process_count_per_node",
"run_invocation_timeout",
"run_max_try",
"append_row_file_name",
"partition_keys",
]
if not self._arguments:
return
# Ensure the first one start with "-"
if not self._arguments[0].startswith("-"):
raise ValueError(
"Found invalid argument '{}'."
" As your arguments will be merged with reserved argument,"
" you can only use keyword argument.".format(self._arguments[0])
)
for item in self._arguments:
# Check argument with "--"
if isinstance(item, str) and item.startswith("--"):
name = item[2:]
parts = name.split("=")
if len(parts) > 1:
name = parts[0]
if name in reserved_args:
raise ValueError(
"'{}' is a reserved argument in ParallelRunStep, "
"please use another argument name instead.".format(name)
)
def _get_input_type(self, in_ds, is_primary_input=True):
input_type = type(in_ds)
ds_mapping_type = None
if input_type == DatasetConsumptionConfig:
# Dataset mode needs to be direct except when we convert it to data reference.
# This will be removed in next release.
real_ds_obj = in_ds.dataset
if isinstance(in_ds.dataset, PipelineParameter):
real_ds_obj = in_ds.dataset.default_value
if (
isinstance(real_ds_obj, (TabularDataset, OutputTabularDatasetConfig, LinkTabularOutputDatasetConfig))
) and in_ds.mode != "direct":
raise Exception("Please ensure input dataset consumption mode is direct")
if isinstance(real_ds_obj, FileDataset) and self._partition_keys and in_ds.mode != "direct":
if is_primary_input:
raise Exception("Please ensure input dataset consumption mode is direct for partitioned dataset")
ds_mapping_type = INPUT_TYPE_DICT[type(real_ds_obj)]
elif input_type == PipelineOutputFileDataset or input_type == PipelineOutputTabularDataset:
# Dataset mode needs to be direct except when we convert it to data reference.
# This will be removed in next release.
if input_type == PipelineOutputTabularDataset and in_ds._input_mode != "direct":
raise Exception("Please ensure pipeline input dataset consumption mode is direct")
ds_mapping_type = INPUT_TYPE_DICT[input_type]
else:
raise Exception("Step input must be of any type: {}, found {}".format(ALLOWED_INPUT_TYPES, input_type))
return ds_mapping_type
def _validate_inputs(self):
"""Validate all inputs are same type and ensure they meet dataset requirement."""
assert (isinstance(self._inputs, list) and self._inputs != []) or self._rank_mini_batch_count > 0, (
"The parameter 'inputs' must be a list and have at least one element"
" or rank_mini_batch_count must be greater than zero."
)
if self._inputs:
self._input_ds_type = self._get_input_type(self._inputs[0])
for input_ds in self._inputs:
if self._input_ds_type != self._get_input_type(input_ds):
raise Exception("All inputs of step must be same type")
else: # self._rank_mini_batch_count > 0 Set to FileDataset for void tasks.
self._input_ds_type = FILE_TYPE_INPUT
def _validate_output(self):
if self._parallel_run_config.output_action.lower() != "summary_only" and self._output is None:
raise Exception("Please specify output parameter.")
if self._output is not None:
self._output = [self._output]
def _validate_parallel_run_config(self):
"""Validate parallel run config."""
if not isinstance(self._parallel_run_config, _ParallelRunConfigBase):
raise Exception("Param parallel_run_config must be a azureml.pipeline.steps.ParallelRunConfig")
if self._parallel_run_config.mini_batch_size is None:
if self._input_ds_type == FILE_TYPE_INPUT:
self._mini_batch_size = DEFAULT_MINI_BATCH_SIZE_FILEDATASET
elif self._input_ds_type == TABULAR_TYPE_INPUT:
self._mini_batch_size = DEFAULT_MINI_BATCH_SIZE_TABULARDATASET
def _validate_source_directory(self):
"""Validate the source_directory param."""
source_dir = self._parallel_run_config.source_directory
if source_dir and source_dir != "":
if not os.path.exists(source_dir):
raise ValueError("The value '{0}' specified in source_directory doesn't exist.".format(source_dir))
if not os.path.isdir(source_dir):
raise ValueError(
"The value '{0}' specified in source_directory is not a directory.".format(source_dir)
)
full_path = os.path.abspath(source_dir)
if full_path not in sys.path:
sys.path.insert(0, full_path)
def _validate_entry_script(self):
"""Validate the entry script."""
source_dir = self._parallel_run_config.source_directory
entry_script = self._parallel_run_config.entry_script
# In validation of ParallelRunConfig, verify if the entry_script is required.
# Here we don't verify again.
if entry_script and entry_script != "":
if source_dir and source_dir != "":
# entry script must be in this directory
full_path = os.path.join(source_dir, entry_script)
if not os.path.exists(full_path):
raise ValueError("The value '{0}' specified in entry_script doesn't exist.".format(entry_script))
if not os.path.isfile(full_path):
raise ValueError("The value '{0}' specified in entry_script is not a file.".format(entry_script))
def _convert_to_mount_mode(self, in_ds):
"""Convert inputs into mount mode."""
if isinstance(in_ds, PipelineOutputFileDataset):
if in_ds._input_mode != "mount" or in_ds._input_path_on_compute is None:
return in_ds.as_mount()
elif isinstance(in_ds, DatasetConsumptionConfig):
if in_ds.mode != "mount" or in_ds.path_on_compute is None:
return in_ds.as_mount()
return in_ds
def _get_pystep_inputs(self):
"""Process and convert inputs before adding to pystep_inputs."""
def _process_file_dataset(file_ds):
"""Process file dataset."""
if self.running_on_windows:
self._pystep_inputs.append(file_ds)
else:
mounted_ds = self._convert_to_mount_mode(file_ds)
self._pystep_inputs.append(mounted_ds)
if self._partition_keys:
self._pystep_inputs = self._inputs
else:
if self._input_ds_type == FILE_TYPE_INPUT:
for input_ds in self._inputs:
_process_file_dataset(input_ds)
elif self._input_ds_type == TABULAR_TYPE_INPUT:
self._pystep_inputs = self._inputs
def _handle_side_inputs(self):
"""Handle side inputs."""
for input_ds in self._side_inputs:
if type(input_ds) != PipelineData and type(input_ds) != DataReference:
input_type = self._get_input_type(input_ds, is_primary_input=False)
if input_type == FILE_TYPE_INPUT:
mounted_ds = self._convert_to_mount_mode(input_ds)
self._pystep_side_inputs.append(mounted_ds)
# Update original DatasetConsumptionConfig reference in arguments to
# new DatasetConsumptionConfig with mount
if self._arguments is not None and isinstance(self._arguments, list):
for arg_index, side_input_arg in enumerate(self._arguments):
if side_input_arg == input_ds:
self._arguments[arg_index] = mounted_ds
break
continue
self._pystep_side_inputs.append(input_ds)
def _get_pipeline_runconfig_params(self):
"""
Generate pipeline parameters for runconfig.
:return: runconfig pipeline parameters
:rtype: dict
"""
prun_runconfig_pipeline_params = {}
if isinstance(self._node_count, PipelineParameter):
prun_runconfig_pipeline_params["NodeCount"] = self._node_count
return prun_runconfig_pipeline_params
def _generate_runconfig(self):
"""
Generate runconfig for parallel run step.
:return: runConfig
:rtype: RunConfig
"""
run_config = RunConfiguration()
if isinstance(self._node_count, PipelineParameter):
run_config.node_count = self._node_count.default_value
else:
run_config.node_count = self._node_count
if isinstance(self._input_compute_target, AmlCompute):
run_config.target = self._input_compute_target
run_config.framework = "Python"
# For AmlCompute we need to set run_config.docker.use_docker = True
run_config.docker.use_docker = True
run_config.environment = self._parallel_run_config.environment
run_config.environment_variables.update(self._parallel_run_config.environment_variables)
if self.running_on_windows:
# For windows compute, always use "ParallelTask".
run_config.communicator = "ParallelTask"
conda_dependencies = run_config.environment.python.conda_dependencies
pip_packages = list(conda_dependencies.pip_packages) if conda_dependencies else []
self._check_required_pip_packages(pip_packages)
return run_config
def _check_required_pip_packages(self, pip_packages):
"""Check whether required pip package added"""
findings_core = [
pip for pip in pip_packages if pip.startswith("azureml") and not pip.startswith("azureml-dataset-runtime")
]
if not findings_core:
warnings.warn(
"""
ParallelRunStep requires azureml-core package to provide the functionality.
Please add azureml-core package in CondaDependencies.""",
UserWarning,
)
# search to see if any other package may have included it as direct or transitive dependency
required_dataprep_packages = REQUIRED_DATAPREP_PACKAGES[self._input_ds_type]
findings_dataprep = filter(
lambda x: [pip for pip in pip_packages if pip.startswith(x)], required_dataprep_packages
)
if not next(findings_dataprep, False):
extra = REQUIRED_DATAPREP_EXTRAS[self._input_ds_type]
warnings.warn(
"""
ParallelRunStep requires azureml-dataset-runtime[{}] for {} dataset.
Please add relevant package in CondaDependencies.""".format(
extra, self._input_ds_type
),
UserWarning,
)
def _generate_main_file_args(self):
"""
Generate main args for entry script.
:return: The generated main args for entry script.
:rtype: array
"""
main_args = [
"--client_sdk_version",
azureml.core.VERSION,
"--scoring_module_name",
self._parallel_run_config.entry_script,
"--mini_batch_size",
self._mini_batch_size,
"--error_threshold",
self._error_threshold,
"--output_action",
self._parallel_run_config.output_action,
"--logging_level",
self._logging_level,
"--run_invocation_timeout",
self._run_invocation_timeout,
"--run_max_try",
self._run_max_try,
"--create_snapshot_at_runtime",
"True",
]
if self._allowed_failed_count is not None:
main_args += ["--allowed_failed_count", self._allowed_failed_count]
if self._allowed_failed_percent is not None:
main_args += ["--allowed_failed_percent", self._allowed_failed_percent]
# Use this variable to dismiss: W503 line break before binary operator
is_append_row = self._parallel_run_config.output_action.lower() == "append_row"
if is_append_row and self._parallel_run_config.append_row_file_name is not None:
main_args += ["--append_row_file_name", self._parallel_run_config.append_row_file_name]
if self._output is not None:
main_args += ["--output", self._output[0]]
if self._process_count_per_node is not None:
main_args += ["--process_count_per_node", self._process_count_per_node]
if self._partition_keys is not None:
main_args += ["--partition_keys"]
if isinstance(self._partition_keys, PipelineParameter):
main_args += [self._partition_keys]
else:
main_args += [json.dumps(self._partition_keys)]
if self._arguments is not None and isinstance(self._arguments, list):
main_args += self._arguments
if self._input_ds_type == TABULAR_TYPE_INPUT:
for index, in_ds in enumerate(self._pystep_inputs):
ds_name = in_ds.input_name if isinstance(in_ds, PipelineOutputTabularDataset) else in_ds.name
main_args += ["--input_ds_{0}".format(index), ds_name]
elif self._input_ds_type == FILE_TYPE_INPUT:
for index, in_ds in enumerate(self._pystep_inputs):
if isinstance(in_ds, DatasetConsumptionConfig) or isinstance(in_ds, PipelineOutputFileDataset):
ds_name = in_ds.input_name if isinstance(in_ds, PipelineOutputFileDataset) else in_ds.name
main_args += ["--input_fds_{0}".format(index), ds_name]
else:
main_args += ["--input{0}".format(index), in_ds]
# In order make dataset as pipeline parameter works, we need add it as a param in main_args
for index, in_ds in enumerate(self._pystep_inputs):
if isinstance(in_ds, DatasetConsumptionConfig) and isinstance(in_ds.dataset, PipelineParameter):
main_args += ["--input_pipeline_param_{0}".format(index), in_ds]
return main_args
def _generate_batch_inference_metadata(self):
"""
Generate batch inference metadata which will be register to MMS service.
:return: The generated batch inference metadata.
:rtype: str
"""
def _get_default_value(in_param):
default_value = in_param
# partition_keys is of list type and need to be json dumped
if isinstance(in_param, list):
default_value = json.dumps(in_param)
if isinstance(in_param, PipelineParameter):
default_value = in_param.default_value
return default_value
batch_inferencing_metadata = {
"Name": self._name,
"ComputeName": self._input_compute_target
if isinstance(self._input_compute_target, str)
else self._input_compute_target.name,
"EntryScript": self._parallel_run_config.entry_script,
"NodeCount": _get_default_value(self._node_count),
"ProcessCountPerNode": _get_default_value(self._process_count_per_node),
"MiniBatchSize": _get_default_value(self._mini_batch_size),
"PartitionKeys": _get_default_value(self._partition_keys),
"ErrorThreshold": _get_default_value(self._parallel_run_config.error_threshold),
"OutputAction": self._parallel_run_config.output_action,
"EnvironmentName": self._parallel_run_config.environment.name,
"EnvironmentVersion": self._parallel_run_config.environment.version,
"version": PARALLEL_RUN_VERSION,
"platform": self._platform,
"RunInvocationTimeout": _get_default_value(self._run_invocation_timeout),
"RunMaxTry": _get_default_value(self._run_max_try),
"LoggingLevel": _get_default_value(self._logging_level),
}
val = _get_default_value(self._parallel_run_config.allowed_failed_count)
if val is not None:
batch_inferencing_metadata["AllowedFailedCount"] = val
val = _get_default_value(self._parallel_run_config.allowed_failed_percent)
if val is not None:
batch_inferencing_metadata["AllowedFailedPercent"] = val
return json.dumps(batch_inferencing_metadata)
def _process_inputs_output_dataset_configs(self):
if not self._inputs:
return
for i in range(len(self._inputs)):
input = self._inputs[i]
if isinstance(input, OutputDatasetConfig):
self._inputs[i] = input.as_input()
if self._arguments and input in self._arguments:
arg_index = self._arguments.index(input)
self._arguments[arg_index] = self._inputs[i]
if self._side_inputs:
for i in range(len(self._side_inputs)):
side_input = self._side_inputs[i]
if isinstance(side_input, OutputDatasetConfig):
self._side_inputs[i] = side_input.as_input()
if self._arguments and side_input in self._arguments:
arg_index = self._arguments.index(side_input)
self._arguments[arg_index] = self._side_inputs[i]
if isinstance(self._output, OutputFileDatasetConfig):
# Windows compute does not support mount, set to upload mode here.
if self.running_on_windows:
if self._output.mode != UPLOAD_MODE:
self._output = self._output.as_upload()
def create_node(self, graph, default_datastore, context):
"""
Create a node for :class:`azureml.pipeline.steps.PythonScriptStep` and add it to the specified graph.
This method is not intended to be used directly. When a pipeline is instantiated with ParallelRunStep,
Azure Machine Learning automatically passes the parameters required through this method so that the step
can be added to a pipeline graph that represents the workflow.
:param graph: Graph object.
:type graph: azureml.pipeline.core.graph.Graph
:param default_datastore: Default datastore.
:type default_datastore: azureml.data.azure_storage_datastore.AbstractAzureStorageDatastore or
azureml.data.azure_data_lake_datastore.AzureDataLakeDatastore
:param context: Context.
:type context: azureml.pipeline.core._GraphContext
:return: The created node.
:rtype: azureml.pipeline.core.graph.Node
"""
node = super(_ParallelRunStepBase, self).create_node(graph, default_datastore, context)
node.get_param("BatchInferencingMetaData").set_value(self._generate_batch_inference_metadata())
node.get_param("Script").set_value(DEFAULT_BATCH_SCORE_MAIN_FILE_NAME)
return node
def create_module_def(
self,
execution_type,
input_bindings,
output_bindings,
param_defs=None,
create_sequencing_ports=True,
allow_reuse=True,
version=None,
arguments=None,
):
"""
Create the module definition object that describes the step.
This method is not intended to be used directly.
:param execution_type: The execution type of the module.
:type execution_type: str
:param input_bindings: The step input bindings.
:type input_bindings: list
:param output_bindings: The step output bindings.
:type output_bindings: list
:param param_defs: The step param definitions.
:type param_defs: list
:param create_sequencing_ports: If true, sequencing ports will be created for the module.
:type create_sequencing_ports: bool
:param allow_reuse: If true, the module will be available to be reused in future Pipelines.
:type allow_reuse: bool
:param version: The version of the module.
:type version: str
:param arguments: Annotated arguments list to use when calling this module.
:type arguments: builtin.list
:return: The module def object.
:rtype: azureml.pipeline.core.graph.ModuleDef
"""
if param_defs is None:
param_defs = []
else:
param_defs = list(param_defs)
batch_inference_metadata_param_def = ParamDef(
name="BatchInferencingMetaData",
set_env_var=False,
is_metadata_param=True,
default_value="None",
env_var_override=False,
)
param_defs.append(batch_inference_metadata_param_def)
return super(_ParallelRunStepBase, self).create_module_def(
execution_type=execution_type,
input_bindings=input_bindings,
output_bindings=output_bindings,
param_defs=param_defs,
create_sequencing_ports=create_sequencing_ports,
allow_reuse=allow_reuse,
version=version,
module_type="BatchInferencing",
arguments=arguments,
)
|
PypiClean
|
/octodns_hetzner-0.0.2-py3-none-any.whl/octodns_hetzner/__init__.py
|
from collections import defaultdict
from requests import Session
import logging
from octodns.record import Record
from octodns.provider import ProviderException
from octodns.provider.base import BaseProvider
__VERSION__ = '0.0.2'
class HetznerClientException(ProviderException):
pass
class HetznerClientNotFound(HetznerClientException):
def __init__(self):
super(HetznerClientNotFound, self).__init__('Not Found')
class HetznerClientUnauthorized(HetznerClientException):
def __init__(self):
super(HetznerClientUnauthorized, self).__init__('Unauthorized')
class HetznerClient(object):
BASE_URL = 'https://dns.hetzner.com/api/v1'
def __init__(self, token):
session = Session()
session.headers.update({'Auth-API-Token': token})
self._session = session
def _do(self, method, path, params=None, data=None):
url = f'{self.BASE_URL}{path}'
response = self._session.request(method, url, params=params, json=data)
if response.status_code == 401:
raise HetznerClientUnauthorized()
if response.status_code == 404:
raise HetznerClientNotFound()
response.raise_for_status()
return response
def _do_json(self, method, path, params=None, data=None):
return self._do(method, path, params, data).json()
def zone_get(self, name):
params = {'name': name}
return self._do_json('GET', '/zones', params)['zones'][0]
def zone_create(self, name, ttl=None):
data = {'name': name, 'ttl': ttl}
return self._do_json('POST', '/zones', data=data)['zone']
def zone_records_get(self, zone_id):
params = {'zone_id': zone_id}
records = self._do_json('GET', '/records', params=params)['records']
for record in records:
if record['name'] == '@':
record['name'] = ''
return records
def zone_record_create(self, zone_id, name, _type, value, ttl=None):
data = {'name': name or '@', 'ttl': ttl, 'type': _type, 'value': value,
'zone_id': zone_id}
self._do('POST', '/records', data=data)
def zone_record_delete(self, zone_id, record_id):
self._do('DELETE', f'/records/{record_id}')
class HetznerProvider(BaseProvider):
SUPPORTS_GEO = False
SUPPORTS_DYNAMIC = False
SUPPORTS_ROOT_NS = True
SUPPORTS = set(('A', 'AAAA', 'CAA', 'CNAME', 'MX', 'NS', 'SRV', 'TXT'))
def __init__(self, id, token, *args, **kwargs):
self.log = logging.getLogger(f'HetznerProvider[{id}]')
self.log.debug('__init__: id=%s, token=***', id)
super(HetznerProvider, self).__init__(id, *args, **kwargs)
self._client = HetznerClient(token)
self._zone_records = {}
self._zone_metadata = {}
self._zone_name_to_id = {}
def _append_dot(self, value):
if value == '@' or value[-1] == '.':
return value
return f'{value}.'
def zone_metadata(self, zone_id=None, zone_name=None):
if zone_name is not None:
if zone_name in self._zone_name_to_id:
zone_id = self._zone_name_to_id[zone_name]
else:
zone = self._client.zone_get(name=zone_name[:-1])
zone_id = zone['id']
self._zone_name_to_id[zone_name] = zone_id
self._zone_metadata[zone_id] = zone
return self._zone_metadata[zone_id]
def _record_ttl(self, record):
default_ttl = self.zone_metadata(zone_id=record['zone_id'])['ttl']
return record['ttl'] if 'ttl' in record else default_ttl
def _data_for_multiple(self, _type, records):
values = [record['value'].replace(';', '\\;') for record in records]
return {
'ttl': self._record_ttl(records[0]),
'type': _type,
'values': values
}
_data_for_A = _data_for_multiple
_data_for_AAAA = _data_for_multiple
def _data_for_CAA(self, _type, records):
values = []
for record in records:
value_without_spaces = record['value'].replace(' ', '')
flags = value_without_spaces[0]
tag = value_without_spaces[1:].split('"')[0]
value = record['value'].split('"')[1]
values.append({
'flags': int(flags),
'tag': tag,
'value': value,
})
return {
'ttl': self._record_ttl(records[0]),
'type': _type,
'values': values
}
def _data_for_CNAME(self, _type, records):
record = records[0]
return {
'ttl': self._record_ttl(record),
'type': _type,
'value': self._append_dot(record['value'])
}
def _data_for_MX(self, _type, records):
values = []
for record in records:
value_stripped_split = record['value'].strip().split(' ')
preference = value_stripped_split[0]
exchange = value_stripped_split[-1]
values.append({
'preference': int(preference),
'exchange': self._append_dot(exchange)
})
return {
'ttl': self._record_ttl(records[0]),
'type': _type,
'values': values
}
def _data_for_NS(self, _type, records):
values = []
for record in records:
values.append(self._append_dot(record['value']))
return {
'ttl': self._record_ttl(records[0]),
'type': _type,
'values': values,
}
def _data_for_SRV(self, _type, records):
values = []
for record in records:
value_stripped = record['value'].strip()
priority = value_stripped.split(' ')[0]
weight = value_stripped[len(priority):].strip().split(' ')[0]
target = value_stripped.split(' ')[-1]
port = value_stripped[:-len(target)].strip().split(' ')[-1]
values.append({
'port': int(port),
'priority': int(priority),
'target': self._append_dot(target),
'weight': int(weight)
})
return {
'ttl': self._record_ttl(records[0]),
'type': _type,
'values': values
}
_data_for_TXT = _data_for_multiple
def zone_records(self, zone):
if zone.name not in self._zone_records:
try:
zone_id = self.zone_metadata(zone_name=zone.name)['id']
self._zone_records[zone.name] = \
self._client.zone_records_get(zone_id)
except HetznerClientNotFound:
return []
return self._zone_records[zone.name]
def populate(self, zone, target=False, lenient=False):
self.log.debug('populate: name=%s, target=%s, lenient=%s', zone.name,
target, lenient)
values = defaultdict(lambda: defaultdict(list))
for record in self.zone_records(zone):
_type = record['type']
if _type not in self.SUPPORTS:
self.log.warning('populate: skipping unsupported %s record',
_type)
continue
values[record['name']][record['type']].append(record)
before = len(zone.records)
for name, types in values.items():
for _type, records in types.items():
data_for = getattr(self, f'_data_for_{_type}')
record = Record.new(zone, name, data_for(_type, records),
source=self, lenient=lenient)
zone.add_record(record, lenient=lenient)
exists = zone.name in self._zone_records
self.log.info('populate: found %s records, exists=%s',
len(zone.records) - before, exists)
return exists
def _params_for_multiple(self, record):
for value in record.values:
yield {
'value': value.replace('\\;', ';'),
'name': record.name,
'ttl': record.ttl,
'type': record._type
}
_params_for_A = _params_for_multiple
_params_for_AAAA = _params_for_multiple
def _params_for_CAA(self, record):
for value in record.values:
data = f'{value.flags} {value.tag} "{value.value}"'
yield {
'value': data,
'name': record.name,
'ttl': record.ttl,
'type': record._type
}
def _params_for_single(self, record):
yield {
'value': record.value,
'name': record.name,
'ttl': record.ttl,
'type': record._type
}
_params_for_CNAME = _params_for_single
def _params_for_MX(self, record):
for value in record.values:
data = f'{value.preference} {value.exchange}'
yield {
'value': data,
'name': record.name,
'ttl': record.ttl,
'type': record._type
}
_params_for_NS = _params_for_multiple
def _params_for_SRV(self, record):
for value in record.values:
data = f'{value.priority} {value.weight} {value.port} ' \
f'{value.target}'
yield {
'value': data,
'name': record.name,
'ttl': record.ttl,
'type': record._type
}
_params_for_TXT = _params_for_multiple
def _apply_Create(self, zone_id, change):
new = change.new
params_for = getattr(self, f'_params_for_{new._type}')
for params in params_for(new):
self._client.zone_record_create(zone_id, params['name'],
params['type'], params['value'],
params['ttl'])
def _apply_Update(self, zone_id, change):
# It's way simpler to delete-then-recreate than to update
self._apply_Delete(zone_id, change)
self._apply_Create(zone_id, change)
def _apply_Delete(self, zone_id, change):
existing = change.existing
zone = existing.zone
for record in self.zone_records(zone):
if existing.name == record['name'] and \
existing._type == record['type']:
self._client.zone_record_delete(zone_id, record['id'])
def _apply(self, plan):
desired = plan.desired
changes = plan.changes
self.log.debug('_apply: zone=%s, len(changes)=%d', desired.name,
len(changes))
try:
zone_id = self.zone_metadata(zone_name=desired.name)['id']
except HetznerClientNotFound:
self.log.debug('_apply: no matching zone, creating domain')
zone_id = self._client.zone_create(desired.name[:-1])['id']
for change in changes:
class_name = change.__class__.__name__
getattr(self, f'_apply_{class_name}')(zone_id, change)
# Clear out the cache if any
self._zone_records.pop(desired.name, None)
|
PypiClean
|
/CoCa_pytorch-0.0.9-py3-none-any.whl/coca_pytorch/coca_pytorch.py
|
import torch
from torch import einsum, nn
import torch.nn.functional as F
from einops import rearrange, repeat
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
# normalization
# they use layernorm without bias, something that pytorch does not offer
class LayerNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.gamma = nn.Parameter(torch.ones(dim))
self.register_buffer("beta", torch.zeros(dim))
def forward(self, x):
return F.layer_norm(x, x.shape[-1:], self.gamma, self.beta)
# residual
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, *args, **kwargs):
return self.fn(x, *args, **kwargs) + x
# to latents
class EmbedToLatents(nn.Module):
def __init__(self, dim, dim_latents):
super().__init__()
self.to_latents = nn.Linear(dim, dim_latents, bias=False)
def forward(self, x):
latents = self.to_latents(x)
return F.normalize(latents, dim=-1)
# rotary positional embedding
# https://arxiv.org/abs/2104.09864
class RotaryEmbedding(nn.Module):
def __init__(self, dim):
super().__init__()
inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer("inv_freq", inv_freq)
def forward(self, max_seq_len, *, device):
seq = torch.arange(max_seq_len, device=device, dtype=self.inv_freq.dtype)
freqs = einsum("i , j -> i j", seq, self.inv_freq)
return torch.cat((freqs, freqs), dim=-1)
def rotate_half(x):
x = rearrange(x, "... (j d) -> ... j d", j=2)
x1, x2 = x.unbind(dim=-2)
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(pos, t):
return (t * pos.cos()) + (rotate_half(t) * pos.sin())
# classic Noam Shazeer paper, except here they use SwiGLU instead of the more popular GEGLU for gating the feedforward
# https://arxiv.org/abs/2002.05202
class SwiGLU(nn.Module):
def forward(self, x):
x, gate = x.chunk(2, dim=-1)
return F.silu(gate) * x
# parallel attention and feedforward with residual
# discovered by Wang et al + EleutherAI from GPT-J fame
class ParallelTransformerBlock(nn.Module):
def __init__(self, dim, dim_head=64, heads=8, ff_mult=4):
super().__init__()
self.norm = LayerNorm(dim)
attn_inner_dim = dim_head * heads
ff_inner_dim = dim * ff_mult
self.fused_dims = (attn_inner_dim, dim_head, dim_head, (ff_inner_dim * 2))
self.heads = heads
self.scale = dim_head**-0.5
self.rotary_emb = RotaryEmbedding(dim_head)
self.fused_attn_ff_proj = nn.Linear(dim, sum(self.fused_dims), bias=False)
self.attn_out = nn.Linear(attn_inner_dim, dim, bias=False)
self.ff_out = nn.Sequential(
SwiGLU(),
nn.Linear(ff_inner_dim, dim, bias=False)
)
# for caching causal mask and rotary embeddings
self.mask = None
self.pos_emb = None
def get_mask(self, n, device):
if self.mask is not None and self.mask.shape[-1] >= n:
return self.mask[:n, :n].to(device)
mask = torch.ones((n, n), device=device, dtype=torch.bool).triu(1)
self.mask = mask
return mask
def get_rotary_embedding(self, n, device):
if self.pos_emb is not None and self.pos_emb.shape[-2] >= n:
return self.pos_emb[:n].to(device)
pos_emb = self.rotary_emb(n, device=device)
self.pos_emb = pos_emb
return pos_emb
def forward(self, x, attn_mask=None):
"""
einstein notation
b - batch
h - heads
n, i, j - sequence length (base sequence length, source, target)
d - feature dimension
"""
n, device, h = x.shape[1], x.device, self.heads
# pre layernorm
x = self.norm(x)
# attention queries, keys, values, and feedforward inner
q, k, v, ff = self.fused_attn_ff_proj(x).split(self.fused_dims, dim=-1)
# split heads
# they use multi-query single-key-value attention, yet another Noam Shazeer paper
# they found no performance loss past a certain scale, and more efficient decoding obviously
# https://arxiv.org/abs/1911.02150
q = rearrange(q, "b n (h d) -> b h n d", h=h)
# rotary embeddings
positions = self.get_rotary_embedding(n, device)
q, k = map(lambda t: apply_rotary_pos_emb(positions, t), (q, k))
# scale
q = q * self.scale
# similarity
sim = einsum("b h i d, b j d -> b h i j", q, k)
# causal mask
causal_mask = self.get_mask(n, device)
sim = sim.masked_fill(causal_mask, -torch.finfo(sim.dtype).max)
# extra attention mask - for masking out attention from text CLS token to padding
if exists(attn_mask):
attn_mask = rearrange(attn_mask, 'b i j -> b 1 i j')
sim = sim.masked_fill(~attn_mask, -torch.finfo(sim.dtype).max)
# attention
sim = sim - sim.amax(dim=-1, keepdim=True).detach()
attn = sim.softmax(dim=-1)
# aggregate values
out = einsum("b h i j, b j d -> b h i d", attn, v)
# merge heads
out = rearrange(out, "b h n d -> b n (h d)")
return self.attn_out(out) + self.ff_out(ff)
# cross attention - using multi-query + one-headed key / values as in PaLM w/ optional parallel feedforward
class CrossAttention(nn.Module):
def __init__(
self,
dim,
*,
context_dim=None,
dim_head=64,
heads=8,
parallel_ff=False,
ff_mult=4,
norm_context=False
):
super().__init__()
self.heads = heads
self.scale = dim_head ** -0.5
inner_dim = heads * dim_head
context_dim = default(context_dim, dim)
self.norm = LayerNorm(dim)
self.context_norm = LayerNorm(context_dim) if norm_context else nn.Identity()
self.to_q = nn.Linear(dim, inner_dim, bias=False)
self.to_kv = nn.Linear(context_dim, dim_head * 2, bias=False)
self.to_out = nn.Linear(inner_dim, dim, bias=False)
# whether to have parallel feedforward
ff_inner_dim = ff_mult * dim
self.ff = nn.Sequential(
nn.Linear(dim, ff_inner_dim * 2, bias=False),
SwiGLU(),
nn.Linear(ff_inner_dim, dim, bias=False)
) if parallel_ff else None
def forward(self, x, context):
"""
einstein notation
b - batch
h - heads
n, i, j - sequence length (base sequence length, source, target)
d - feature dimension
"""
# pre-layernorm, for queries and context
x = self.norm(x)
context = self.context_norm(context)
# get queries
q = self.to_q(x)
q = rearrange(q, 'b n (h d) -> b h n d', h = self.heads)
# scale
q = q * self.scale
# get key / values
k, v = self.to_kv(context).chunk(2, dim=-1)
# query / key similarity
sim = einsum('b h i d, b j d -> b h i j', q, k)
# attention
sim = sim - sim.amax(dim=-1, keepdim=True)
attn = sim.softmax(dim=-1)
# aggregate
out = einsum('b h i j, b j d -> b h i d', attn, v)
# merge and combine heads
out = rearrange(out, 'b h n d -> b n (h d)')
out = self.to_out(out)
# add parallel feedforward (for multimodal layers)
if exists(self.ff):
out = out + self.ff(x)
return out
# transformer
class CoCa(nn.Module):
def __init__(
self,
*,
dim,
num_tokens,
unimodal_depth,
multimodal_depth,
dim_latents = None,
image_dim = None,
num_img_queries=256,
dim_head=64,
heads=8,
ff_mult=4,
img_encoder=None,
caption_loss_weight=1.,
contrastive_loss_weight=1.,
pad_id=0
):
super().__init__()
self.dim = dim
self.pad_id = pad_id
self.caption_loss_weight = caption_loss_weight
self.contrastive_loss_weight = contrastive_loss_weight
# token embeddings
self.token_emb = nn.Embedding(num_tokens, dim)
self.text_cls_token = nn.Parameter(torch.randn(dim))
# image encoder
self.img_encoder = img_encoder
# attention pooling for image tokens
self.img_queries = nn.Parameter(torch.randn(num_img_queries + 1, dim)) # num image queries for multimodal, but 1 extra CLS for contrastive learning
self.img_attn_pool = CrossAttention(dim=dim, context_dim=image_dim, dim_head=dim_head, heads=heads, norm_context=True)
self.img_attn_pool_norm = LayerNorm(dim)
self.text_cls_norm = LayerNorm(dim)
# to latents
dim_latents = default(dim_latents, dim)
self.img_to_latents = EmbedToLatents(dim, dim_latents)
self.text_to_latents = EmbedToLatents(dim, dim_latents)
# contrastive learning temperature
self.temperature = nn.Parameter(torch.Tensor([1.]))
# unimodal layers
self.unimodal_layers = nn.ModuleList([])
for ind in range(unimodal_depth):
self.unimodal_layers.append(
Residual(ParallelTransformerBlock(dim=dim, dim_head=dim_head, heads=heads, ff_mult=ff_mult)),
)
# multimodal layers
self.multimodal_layers = nn.ModuleList([])
for ind in range(multimodal_depth):
self.multimodal_layers.append(nn.ModuleList([
Residual(ParallelTransformerBlock(dim=dim, dim_head=dim_head, heads=heads, ff_mult=ff_mult)),
Residual(CrossAttention(dim=dim, dim_head=dim_head, heads=heads, parallel_ff=True, ff_mult=ff_mult))
]))
# to logits
self.to_logits = nn.Sequential(
LayerNorm(dim),
nn.Linear(dim, num_tokens, bias=False)
)
# they used embedding weight tied projection out to logits, not common, but works
self.to_logits[-1].weight = self.token_emb.weight
nn.init.normal_(self.token_emb.weight, std=0.02)
def embed_text(self, text):
batch, device = text.shape[0], text.device
seq = text.shape[1]
text_tokens = self.token_emb(text)
# append text cls tokens
text_cls_tokens = repeat(self.text_cls_token, 'd -> b 1 d', b=batch)
text_tokens = torch.cat((text_tokens, text_cls_tokens), dim=-2)
# create specific mask for text cls token at the end
# to prevent it from attending to padding
cls_mask = rearrange(text!=self.pad_id, 'b j -> b 1 j')
attn_mask = F.pad(cls_mask, (0, 1, seq, 0), value=True)
# go through unimodal layers
for attn_ff in self.unimodal_layers:
text_tokens = attn_ff(text_tokens, attn_mask=attn_mask)
# get text cls token
text_tokens, text_cls_tokens = text_tokens[:, :-1], text_tokens[:, -1]
text_embeds = self.text_cls_norm(text_cls_tokens)
return text_embeds, text_tokens
def embed_image(self, images=None, image_tokens=None):
# encode images into embeddings
# with the img_encoder passed in at init
# it can also accept precomputed image tokens
assert not (exists(images) and exists(image_tokens))
if exists(images):
assert exists(self.img_encoder), 'img_encoder must be passed in for automatic image encoding'
image_tokens = self.img_encoder(images)
# attention pool image tokens
img_queries = repeat(self.img_queries, 'n d -> b n d', b=image_tokens.shape[0])
img_queries = self.img_attn_pool(img_queries, image_tokens)
img_queries = self.img_attn_pool_norm(img_queries)
return img_queries[:, 0], img_queries[:, 1:]
def forward(
self,
text,
images=None,
image_tokens=None,
labels=None,
return_loss=False,
return_embeddings=False
):
batch, device = text.shape[0], text.device
if return_loss and not exists(labels):
text, labels = text[:, :-1], text[:, 1:]
text_embeds, text_tokens = self.embed_text(text)
image_embeds, image_tokens = self.embed_image(images=images, image_tokens=image_tokens)
# return embeddings if that is what the researcher wants
if return_embeddings:
return text_embeds, image_embeds
# go through multimodal layers
for attn_ff, cross_attn in self.multimodal_layers:
text_tokens = attn_ff(text_tokens)
text_tokens = cross_attn(text_tokens, image_tokens)
logits = self.to_logits(text_tokens)
if not return_loss:
return logits
# shorthand
ce = F.cross_entropy
# calculate caption loss (cross entropy loss)
logits = rearrange(logits, 'b n c -> b c n')
caption_loss = ce(logits, labels, ignore_index=self.pad_id)
caption_loss = caption_loss * self.caption_loss_weight
# embedding to latents
text_latents = self.text_to_latents(text_embeds)
image_latents = self.img_to_latents(image_embeds)
# calculate contrastive loss
sim = einsum('i d, j d -> i j', text_latents, image_latents)
sim = sim * self.temperature.exp()
contrastive_labels = torch.arange(batch, device=device)
contrastive_loss = (ce(sim, contrastive_labels) + ce(sim.t(), contrastive_labels)) * 0.5
contrastive_loss = contrastive_loss * self.contrastive_loss_weight
return caption_loss + contrastive_loss
|
PypiClean
|
/msgraph_beta_sdk-1.0.0a9-py3-none-any.whl/msgraph/generated/governance_role_assignment_requests/item/subject/subject_request_builder.py
|
from __future__ import annotations
from dataclasses import dataclass
from kiota_abstractions.get_path_parameters import get_path_parameters
from kiota_abstractions.method import Method
from kiota_abstractions.request_adapter import RequestAdapter
from kiota_abstractions.request_information import RequestInformation
from kiota_abstractions.request_option import RequestOption
from kiota_abstractions.response_handler import ResponseHandler
from kiota_abstractions.serialization import Parsable, ParsableFactory
from typing import Any, Callable, Dict, List, Optional, TYPE_CHECKING, Union
if TYPE_CHECKING:
from ....models import governance_subject
from ....models.o_data_errors import o_data_error
class SubjectRequestBuilder():
"""
Provides operations to manage the subject property of the microsoft.graph.governanceRoleAssignmentRequest entity.
"""
def __init__(self,request_adapter: RequestAdapter, path_parameters: Optional[Union[Dict[str, Any], str]] = None) -> None:
"""
Instantiates a new SubjectRequestBuilder and sets the default values.
Args:
pathParameters: The raw url or the Url template parameters for the request.
requestAdapter: The request adapter to use to execute the requests.
"""
if path_parameters is None:
raise Exception("path_parameters cannot be undefined")
if request_adapter is None:
raise Exception("request_adapter cannot be undefined")
# Url template to use to build the URL for the current request builder
self.url_template: str = "{+baseurl}/governanceRoleAssignmentRequests/{governanceRoleAssignmentRequest%2Did}/subject{?%24select,%24expand}"
url_tpl_params = get_path_parameters(path_parameters)
self.path_parameters = url_tpl_params
self.request_adapter = request_adapter
async def delete(self,request_configuration: Optional[SubjectRequestBuilderDeleteRequestConfiguration] = None) -> None:
"""
Delete navigation property subject for governanceRoleAssignmentRequests
Args:
requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options.
"""
request_info = self.to_delete_request_information(
request_configuration
)
from ....models.o_data_errors import o_data_error
error_mapping: Dict[str, ParsableFactory] = {
"4XX": o_data_error.ODataError,
"5XX": o_data_error.ODataError,
}
if not self.request_adapter:
raise Exception("Http core is null")
return await self.request_adapter.send_no_response_content_async(request_info, error_mapping)
async def get(self,request_configuration: Optional[SubjectRequestBuilderGetRequestConfiguration] = None) -> Optional[governance_subject.GovernanceSubject]:
"""
Read-only. The user/group principal.
Args:
requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options.
Returns: Optional[governance_subject.GovernanceSubject]
"""
request_info = self.to_get_request_information(
request_configuration
)
from ....models.o_data_errors import o_data_error
error_mapping: Dict[str, ParsableFactory] = {
"4XX": o_data_error.ODataError,
"5XX": o_data_error.ODataError,
}
if not self.request_adapter:
raise Exception("Http core is null")
from ....models import governance_subject
return await self.request_adapter.send_async(request_info, governance_subject.GovernanceSubject, error_mapping)
async def patch(self,body: Optional[governance_subject.GovernanceSubject] = None, request_configuration: Optional[SubjectRequestBuilderPatchRequestConfiguration] = None) -> Optional[governance_subject.GovernanceSubject]:
"""
Update the navigation property subject in governanceRoleAssignmentRequests
Args:
body: The request body
requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options.
Returns: Optional[governance_subject.GovernanceSubject]
"""
if body is None:
raise Exception("body cannot be undefined")
request_info = self.to_patch_request_information(
body, request_configuration
)
from ....models.o_data_errors import o_data_error
error_mapping: Dict[str, ParsableFactory] = {
"4XX": o_data_error.ODataError,
"5XX": o_data_error.ODataError,
}
if not self.request_adapter:
raise Exception("Http core is null")
from ....models import governance_subject
return await self.request_adapter.send_async(request_info, governance_subject.GovernanceSubject, error_mapping)
def to_delete_request_information(self,request_configuration: Optional[SubjectRequestBuilderDeleteRequestConfiguration] = None) -> RequestInformation:
"""
Delete navigation property subject for governanceRoleAssignmentRequests
Args:
requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options.
Returns: RequestInformation
"""
request_info = RequestInformation()
request_info.url_template = self.url_template
request_info.path_parameters = self.path_parameters
request_info.http_method = Method.DELETE
if request_configuration:
request_info.add_request_headers(request_configuration.headers)
request_info.add_request_options(request_configuration.options)
return request_info
def to_get_request_information(self,request_configuration: Optional[SubjectRequestBuilderGetRequestConfiguration] = None) -> RequestInformation:
"""
Read-only. The user/group principal.
Args:
requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options.
Returns: RequestInformation
"""
request_info = RequestInformation()
request_info.url_template = self.url_template
request_info.path_parameters = self.path_parameters
request_info.http_method = Method.GET
request_info.headers["Accept"] = ["application/json"]
if request_configuration:
request_info.add_request_headers(request_configuration.headers)
request_info.set_query_string_parameters_from_raw_object(request_configuration.query_parameters)
request_info.add_request_options(request_configuration.options)
return request_info
def to_patch_request_information(self,body: Optional[governance_subject.GovernanceSubject] = None, request_configuration: Optional[SubjectRequestBuilderPatchRequestConfiguration] = None) -> RequestInformation:
"""
Update the navigation property subject in governanceRoleAssignmentRequests
Args:
body: The request body
requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options.
Returns: RequestInformation
"""
if body is None:
raise Exception("body cannot be undefined")
request_info = RequestInformation()
request_info.url_template = self.url_template
request_info.path_parameters = self.path_parameters
request_info.http_method = Method.PATCH
request_info.headers["Accept"] = ["application/json"]
if request_configuration:
request_info.add_request_headers(request_configuration.headers)
request_info.add_request_options(request_configuration.options)
request_info.set_content_from_parsable(self.request_adapter, "application/json", body)
return request_info
@dataclass
class SubjectRequestBuilderDeleteRequestConfiguration():
"""
Configuration for the request such as headers, query parameters, and middleware options.
"""
# Request headers
headers: Optional[Dict[str, Union[str, List[str]]]] = None
# Request options
options: Optional[List[RequestOption]] = None
@dataclass
class SubjectRequestBuilderGetQueryParameters():
"""
Read-only. The user/group principal.
"""
def get_query_parameter(self,original_name: Optional[str] = None) -> str:
"""
Maps the query parameters names to their encoded names for the URI template parsing.
Args:
originalName: The original query parameter name in the class.
Returns: str
"""
if original_name is None:
raise Exception("original_name cannot be undefined")
if original_name == "expand":
return "%24expand"
if original_name == "select":
return "%24select"
return original_name
# Expand related entities
expand: Optional[List[str]] = None
# Select properties to be returned
select: Optional[List[str]] = None
@dataclass
class SubjectRequestBuilderGetRequestConfiguration():
"""
Configuration for the request such as headers, query parameters, and middleware options.
"""
# Request headers
headers: Optional[Dict[str, Union[str, List[str]]]] = None
# Request options
options: Optional[List[RequestOption]] = None
# Request query parameters
query_parameters: Optional[SubjectRequestBuilder.SubjectRequestBuilderGetQueryParameters] = None
@dataclass
class SubjectRequestBuilderPatchRequestConfiguration():
"""
Configuration for the request such as headers, query parameters, and middleware options.
"""
# Request headers
headers: Optional[Dict[str, Union[str, List[str]]]] = None
# Request options
options: Optional[List[RequestOption]] = None
|
PypiClean
|
/api-shop-1.14.1.tar.gz/api-shop-1.14.1/api_shop/api_shop.py
|
import json
import traceback
import os
import re
import time
import importlib
from .i18n import i18n_init
from .__init__ import __version__
from .url_parse import parse_rule
from .autofill import auto_fill, check_fill_methods
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
i18 = i18n_init('zh')
_ = i18._
class Namespace(dict):
def __getattr__(self, name):
if name in self.keys():
return self.get(name)
else:
raise AttributeError(_('no attributes found')+'{}'.format(name))
def __setattr__(self, name, value):
self.update({name: value})
class ApiInit(Namespace):
'''接口初始化内,用于内部传递状态和配置加载多框架支持'''
# 框架基础配置
class FW(Namespace):
# 配置框架需要使用的方法
# 例如 if api.framework.name=='django':api.framework.JsonResponse
# 就可以加载django的JsonResponse方法
framework_models = {
'django': {
'django.http': ['JsonResponse', 'HttpResponse'],
},
'flask': {
'flask': ['render_template_string', 'jsonify']
},
'bottle': {
'bottle': ['template', 'HTTPResponse']
}
}
framework_order = ['django', 'flask', 'bottle']
framework_return = {
'django': {
'json': 'JsonResponse',
'json_status_code_in_func': True,
'http': 'HttpResponse',
'http_status_code_in_func': True,
'template': 'HttpResponse'
},
'flask': {
'json': 'jsonify',
'json_status_code_in_func': False,
'http': None,
'http_status_code_in_func': False,
'template': None # 直接返回字符串
},
'bottle': {
'json': 'HTTPResponse',
'json_status_code_in_func': True, # json的状态码是否在方法内
'http': 'HTTPResponse',
'http_status_code_in_func': True, # http的状态码是否在方法内
'template': None # 直接返回字符串
},
}
def template(self, string):
# 返回模板字符串
if self.func_dict.get('template'):
return self[self.func_dict.get('template')](string)
else:
return string
def json(self, data, status_code=200):
# 返回json字符串
model_name = self.func_dict.get('json')
flag = self.func_dict.get('json_status_code_in_func')
if model_name:
if flag:
return self[model_name](data, status=status_code)
else:
return self[model_name](data), status_code
else:
return data, status_code
def http(self, data, status_code=200):
# 返回http
model_name = self.func_dict.get('http')
flag = self.func_dict.get('http_status_code_in_func')
if model_name:
if flag:
return self[model_name](data, status=status_code)
else:
return self[model_name](data), status_code
else:
return data, status_code
def load_fw_model(self, fwname, err_out=False):
'''加载框架的方法'''
if not self.framework_models.get(fwname):
# 暂时不支持这个框架
raise BaseException(_('Not support') + ' {} , ('.format(fwname) + _(
'supported framework as follows:') + ' , '.join(self.framework_order) + ')')
current_fw = self.framework_models.get(fwname)
haserr = False
for path in current_fw:
try:
model = importlib.import_module(path)
except:
return
for key in current_fw[path]:
if not hasattr(model, key):
if err_out:
raise BaseException(
_('Framework version is not compatible.'))
else:
haserr = True
self[key] = getattr(model, key)
if not haserr:
self.name = fwname
def __init__(self, fwname=None):
self.name = None
if fwname and type(fwname):
self.load_fw_model(fwname.lower(), True)
else:
# 从默认顺序加载框架
for fwname in self.framework_order:
self.load_fw_model(fwname)
if not self.name:
if not fwname:
raise BaseException(
_('supported framework as follows:') + ' , '.join(self.framework_order))
else:
raise BaseException(_('Did not find the framework') + fwname)
self.func_dict = self.framework_return.get(self.name)
# 用model来制作返回值说明
class ApiResponseModelFields():
'''用来包含模型的部分字段'''
def __init__(self, model, fields: list = None, return_type=set):
self.model = model
self.type = return_type
self.fwname = ''
if 'django.db.models' in str(type(model)):
self.fwname = 'django'
elif 'sqlalchemy.orm' in str(type(model)):
self.fwname = 'flask'
self.fields = fields
def __new__(cls, *args, **kwargs):
model = kwargs.get('model')
fields = kwargs.get('fields')
if len(args) >= 1:
model = args[0]
if len(args) >= 2:
fields = args[1]
if fields:
return object.__new__(cls)
return model
def get_fields(self):
# 返回模型字段
nameList = []
for field in self.fields:
key = None
if type(field) == str:
# 传入的字符串描述字段名称
key = field
elif self.fwname == 'django' and hasattr(field, 'field_name'):
key = field.field_name
elif self.fwname == 'flask' and hasattr(field, 'name'):
key = field.name
# raise BaseException(_('request.method and method are not equal'))
if key:
nameList.append(key)
objList = []
if self.fwname == 'django':
for obj in self.model._meta.fields:
if obj.name in nameList:
objList.append(obj)
if self.fwname == 'flask':
for obj in self.model.__table__._columns:
if obj.name in nameList:
objList.append(obj)
if self.type == set:
return set(objList)
return objList
api = ApiInit()
class ApiDataClass(Namespace):
'''api-data类'''
def __init__(self, data=None):
if data:
self.update(data)
def dict(self):
return self
def to_dict(self):
return self
def get_json(self):
return self
def is_ajax(self):
return False
def get_api_result_json(api_class, method, data=None, request=None, not200=True):
'''
直接调用api代码,并拿到返回json
api_class 是业务api类的对象(不是实例)
method 是请求方法,str格式
data 是附加数据,dict格式
request=None 是当前request,如果method和request.method不相同,请自己封装一个适合业务代码用的request,如果业务代码不用reqeust,请不要传入。
not200=True 是允许status_code不等于200的结果,为False的时候,遇到200以外程序中断并抛错
return json,status_code
'''
print(_('Please use the ApiShop.api_run instance method instead of this method, this method will be removed in later versions!!'))
response = get_api_result_response(
api_class, method, data, request, not200)
if not response:
# 无结果
return None
status_code = getattr(response, 'status_code')
if not200 == False and status_code != 200:
raise BaseException(_('api-shop return result is not success.'))
fw = api.get('framework')
fwname = fw.get('name')
if fwname == 'flask':
if hasattr(response, 'is_json') and response.is_json:
ret = response.get_json()
else:
ret = None
if fwname == 'django':
if response.content:
ret = json.loads(response.content)
else:
ret = None
if fwname == 'bottle':
ret = response.body
return ret, status_code
def get_api_result_response(api_class, method, data=None, request=None, not200=True):
'''
绕过参数检查调用 api业务代码
返回response
api_class 是业务api类的对象(不是实例)
method 是请求方法,str格式
data 是附加数据,dict格式
request=None 是当前request,业务代码如果不使用,可以不传递
not200=True 是允许status_code不等于200的结果,为False的时候,遇到200以外程序中断并抛错
返回值是 response
'''
print(_('Please use the ApiShop.api_run instance method instead of this method, this method will be removed in later versions!!'))
d_ = ApiDataClass(data)
fw = api.get('framework')
fwname = fw.get('name')
if request:
if request.method != method:
raise BaseException(_('request.method and method are not equal'))
else:
request = ApiDataClass(data)
request.method = method
tup = api_class(request, d_)
if type(tup) == tuple:
status_code = tup[1]
res = tup[0]
elif hasattr(tup, 'status_code'):
res = tup
status_code = getattr(tup, 'status_code')
if not200 == False and status_code != 200:
raise BaseException(_('api-shop return result is not success.'))
return res
def return_response(msg=None, status_code=400):
# 返回错误信息
if msg:
ret = {'msg': msg}
else:
ret = {}
if not api.BAD_REQUEST:
ret.update({'status': api.bad_request_error_status})
status_code = 200
if api.use_gateway:
single_apishop = SingleApiShop.get_single_apishop()
gateway_response = None
if single_apishop:
gateway_response = single_apishop.before_running(
response=(ret, status_code)
)
if gateway_response:
return gateway_response
return api.framework.json(ret, status_code)
class Api():
'''制作api接口的最终方法时,请继承本类,用法类似Flask-RESTful的Resource
例子:
class your_api(Api):
def get(self,request,data):
# request 是当前的请求上下文
# data 是被参数过滤器过滤并格式化后的请求附带数据
return response
def post(self,request,data):
# 同上
def put(self,request,data):
# 同上
def delete(self,request,data):
# 同上
def patch(self,request,data):
# 同上
'''
def __new__(self, request, data=None, json=None, method=None):
if not method:
method = request.method
method = method.lower()
single = SingleApiShop.get_single_apishop()
if hasattr(self, method):
func = getattr(self, method)
retdata = func(self, request, data)
status_code = 200
if type(retdata) == tuple:
ret = retdata[0]
status_code = retdata[1] or 200
else:
ret = retdata
# 允许返回空body
if ret == None:
ret = {}
if json:
return ret, status_code
elif type(ret) == dict:
return api.framework.json(ret, status_code)
else:
return api.framework.http(ret, status_code)
else:
return return_response(_('not found in conf') + '{}'.format(method))
class ApiShop():
def __init__(self, conf=None, options=None):
'''
配置api工厂参数,格式如下:
conf = [
{
'url': 'login',
'class': 'account.views.api_login',
'name': '账户登录',
'methods': {
'POST': [
{'name':'username', 'type': str, 'required': True, 'min': 3, 'max': 24, 'description': '用户名'},
{'name':'password', 'type': str, 'required': True, 'min': 3, 'max': 24, 'description': '密码'},
]
}
},
]
options = {
'base_url':'/api/',# 基础url,用以组合给前端的api url
}
'''
if not conf:
conf = []
self.i18n = i18
self.options = {
'version': __version__,
'base_url': '/api/', # 基础url,用以组合给前端的api url
# 参数bad_request如果是真,发生错误返回一个坏请求给前端,否则都返回200的response,里面附带status=error和msg附带错误信息
'bad_request': True,
'bad_request_error_status': 'error',
'document': BASE_DIR + '/api_shop/static/document.html', # 文档路由渲染的模板
'lang': 'en',
'debug': True, # 默认开启调试信息
'docs_mock':False, # 文档模式下,是否使用mock数据
'use_gateway':False, # 是否封装了网关层,如果封装了,那么api-shop将不会处理请求,而是直接返回给网关
'auto_create_folder': False, # 自动创建文件夹
'auto_create_file': False, # 自动创建文件
'auto_create_class': False, # 自动创建类
'auto_create_method': False, # 自动创建方法
}
self.document_version = ''
if options:
self.options.update(options)
try:
if self.options.get('document'):
self.document_name = self.options.get('document')
else:
self.document_name = BASE_DIR + '/api_shop/static/document.html'
self.document_version = time.ctime(
os.stat(self.document_name).st_mtime)
self.document = open(self.document_name,
mode='r', encoding='utf-8').read()
except:
self.document = '<h1>' + _('document template not found') + '</h1>'
# 指定框架
api.framework = FW(self.options.get('framework'))
# 扩展语言包
if type(self.options.get('lang_pack')) == dict:
self.i18n.lang.update(self.options.get('lang_pack'))
# 切换语言
self.i18n.lang_name = self.options.get('lang')
api.BAD_REQUEST = self.options.get('bad_request', True)
api.use_gateway = self.options.get('use_gateway')
api.bad_request_error_status = self.options.get(
'bad_request_error_status')
# 当前加载的url和function的字典
self.url_dict = {}
# url方法字典
self.url_methods = {}
self.api_data = []
self.conf = self.__make_model(conf)
self.__init_url_rules()
def __class_to_json(self, methods):
'''将python的dict数据对象类切换成字符串'''
string = str(methods) # .__str__()
# 替换类名称
class_list = re.compile(r"""<class '[\w|\.]*'>""", 0).findall(string)
for line in class_list:
string = string.replace(line, "'{}'".format(line.split("'")[1]))
# 替换其他对象名称
others = re.compile(r'''<[\s|\S.]*>''', 0).findall(string)
for line in others:
try:
string = string.replace(
line, "'{}'".format(line.split(" ")[1]))
except:
string = string.replace(line, "'{}'".format(line))
return eval(string)
def __dynamic_import(self, thisconf):
name = thisconf['class']
if type(name) != str:
# 直接传入的对象
return name
components = name.split('.')
path = '.'.join(components[:-1])
try:
exec('from {} import {}'.format(path, components[-1]))
return eval(components[-1])
except Exception as ie:
if self.options.get('debug') == True:
print('\n\n******* api-shop errmsg *******\n')
print('currnet_api:\nurl: {}\nclass: {}\n'.format(
thisconf.get('url'), thisconf.get('class')))
traceback.print_exc()
if auto_fill(thisconf, self.options) == True:
# 自动生成文件或者方法,成功后重试一次。
return self.__dynamic_import(thisconf)
else:
os._exit(0)
def __make_model(self, conf):
# 将字符串里的function模块和函数加载进来,并写入到run_function字段方便调用。
for i in range(len(conf)):
# model = dynamic_import(conf[i]['class'])
model = self.__dynamic_import(conf[i])
model.api_run = self.api_run
if type(conf[i]['class']) != str:
# 直接使用对象
conf[i]['class'] = self.__class_to_json(conf[i]['class'])
if type(conf[i]['url']) == list:
# 支持多url表达方式
for url in conf[i]['url']:
self.url_dict.update({
url: model,
})
self.url_methods.update({
url: conf[i]['methods'],
})
else:
self.url_dict.update({
conf[i]['url']: model,
})
self.url_methods.update({
conf[i]['url']: conf[i]['methods'],
})
conf[i]['methods'] = self.__class_to_json(conf[i]['methods'])
if hasattr(model, '__doc__'):
# 接口文档说明
conf[i]['document'] = getattr(model, '__doc__')
conf[i]['methods_documents'] = {} # 方法文档说明
conf[i]['methods_return'] = {} # 方法返回值说明
if hasattr(model, 'response_docs'):
docs_obj = getattr(model, 'response_docs')
response_docs = {}
for key in ['get', 'post', 'delete', 'put', 'patch']:
if docs_obj.get(key):
nodes = docs_obj.get(key)
roots = self.__find_response_docs(key.upper(), nodes)
response_docs.update({key.upper(): roots})
conf[i]['methods_return'] = response_docs
if self.options.get('auto_create_method'):
check_fill_methods(model, conf[i])
for key in ['get', 'post', 'delete', 'put', 'patch']:
# 把业务类子方法的文档添加到数据表
if hasattr(model, key):
mt = getattr(model, key)
if hasattr(mt, '__doc__'):
conf[i]['methods_documents'].update(
{key.upper(): getattr(mt, '__doc__')})
return conf
def __mk_django_model_field_doc(self, field):
# 把django字段模型提取成文档字段
if not hasattr(field, 'column'):
raise BaseException(
_("Django's independent fields must use the ApiResponseModelFields class"))
return {
'name': field.column,
'type': type(field).__name__,
'desc': field.verbose_name,
}
def __mk_flask_model_field_doc(self, field):
return {
'name': field.name,
'type': str(field.type),
'desc': field.comment,
}
def __find_response_docs(self, key, docs_node):
# 容器层
children = []
type_ = ''
desc = ''
str_type = str(type(docs_node))
if type(docs_node) == str:
# 手写描述规定格式:key:type:desc
# 比如photos:Array:照片url字符串组成的列表数据
arr = docs_node.split(':')
if len(arr) == 3:
key = arr[0]
type_ = arr[1]
desc = arr[2]
else:
desc = docs_node
elif type(docs_node) == dict:
# 字典容器递归
type_ = 'Object'
for k, v in docs_node.items():
this = self.__find_response_docs(k, v)
if type(this) == list:
children += this
elif type(this) == dict:
children.append(this)
elif type(docs_node) == set:
# 集合,用来表示单个对象
type_ = 'Object'
for v in docs_node:
this = self.__find_response_docs(key, v)
if type(this) == list:
children += this
elif type(this) == dict:
children.append(this)
elif type(docs_node) == list:
# 列表对象,包含可能多组字段
type_ = 'Array'
for v in docs_node:
this = self.__find_response_docs(key, v)
if type(this) == list:
children += this
elif type(this) == dict:
children.append(this)
elif 'django.db.models' in str_type:
if hasattr(docs_node, '_meta'):
for obj in docs_node._meta.fields:
children.append(self.__mk_django_model_field_doc(obj))
else:
# 单独django字段
children.append(self.__mk_django_model_field_doc(docs_node))
return children
elif 'ApiResponseModelFields' in str_type:
# 解析部分字段
fields = docs_node.get_fields()
this = self.__find_response_docs(key, fields)
return this['children']
elif 'sqlalchemy.sql.schema.Column' in str_type or 'sqlalchemy.orm.attributes' in str_type:
# flask 单独字段
return self.__mk_flask_model_field_doc(docs_node)
elif 'sqlalchemy.orm' in str_type:
# flask的models
if hasattr(docs_node, '__table__'):
for obj in docs_node.__table__._columns:
children.append(self.__mk_flask_model_field_doc(obj))
return {
'name': key,
'type': type_,
'desc': desc,
'children': children
}
def __not_find_url_function(self, request):
# 如果找不到业务模块
return return_response(_('no such interface'))
def __find_api_function(self, url):
# 查找api所指向的模块
if (type(url) == tuple and len(url) > 0):
url = url[0]
key, value_dict = self.__find_url_rule(url)
model = self.url_dict.get(key)
if model:
return model, key, value_dict
return self.__not_find_url_function, None, None
def __find_api_methons(self, url):
# 查找api所指向的方法
return self.url_methods.get(url)
def __find_url_rule(self, url):
# 从规则列表中匹配当前访问的url
value_dict = {}
for obj in self.rule_maps:
url_ = url
line = obj['line']
key = obj['key']
for rule in line:
m = re.match(rule['regex'], url_)
if not m:
break
pos, end = m.span()
url_ = url_[end:] # 截断url
if rule['type'] == 'variable':
# 动态查找
value_dict.update({
rule['variable']: m.group(0) # {'value':'test'}
})
if url_:
# 有剩余url,表示该行不匹配
continue
else:
return key, value_dict
return None, None
def __add_url_rules(self, rule):
this_rule = []
_converter_args_re = re.compile(r'''
(?P<value>
(\w+)
)''', re.VERBOSE | re.UNICODE)
for converter, arguments, variable in parse_rule(rule):
if converter is None:
# 静态地址
this_rule.append({
'regex': re.escape(variable),
'type': 'static'
})
else:
# 动态查询
this_rule.append({
'regex': _converter_args_re,
'variable': variable,
'converter': converter,
'type': 'variable'
})
self.rule_maps.append({
'line': this_rule,
'key': rule
})
def __init_url_rules(self):
self.rule_maps = [] # 规则映射表
for rule in self.url_dict.keys():
self.__add_url_rules(rule)
def get_parameter(self, request):
# 获取参数
data = {}
# 获取django的request数据
if api.framework.name == 'django':
if hasattr(request,'GET') and request.GET:
for k, v in request.GET.items():
if k.endswith('[]'):
# 从get传递过来的同名参数,需要重组成list对象
v = request.GET.getlist(k)
k = k[:-2]
data.update({k: v})
elif hasattr(request,'POST') and request.POST:
data.update(request.POST.dict())
elif hasattr(request,'is_ajax') and request.is_ajax():
# axios payload 方式传递数据
# 如果使用axios,必须指定'X-Requested-With'='XMLHttpRequest'
data.update(json.loads(request.body))
else:
try:
data.update(json.loads(request.body))
except:
pass
if api.framework.name == 'flask':
if request.args:
data.update(request.args.to_dict())
elif request.form:
data.update(request.form.to_dict())
else:
try:
# 某些特殊错误的封装,将get,Content-Type: application/json
jd = request.get_json()
if jd:
data.update(jd)
except:
pass
if api.framework.name == 'bottle':
if request.GET:
data.update(request.GET)
if request.POST:
data.update(request.POST)
if request.json:
data.update(request.json)
return data
def __verify(self, conf, name, value):
# 校验数据并转换格式
required_ = conf.get('required')
type_ = conf.get('type')
min_ = conf.get('min')
max_ = conf.get('max')
default_ = conf.get('default')
options = conf.get('options')
not_converting = False
# 没有值得情况,包括'',[],()这种,但是要排除0,因为0经常用于标记值
if not value and value != 0 and not default_ is None:
# 默认值如果是一个函数,运行它,并不再检查类型转换
if callable(default_):
value = default_()
not_converting = True
else:
value = default_
# 检查必要值
if required_ == True and not value and value != 0:
return _('parameter')+' {} '.format(name)+_('is required'), None
if value == '' and type_ != str:
# 如果是空字符串,但是要求类型不是字符串,就转换成None
value = None
# 检查空值,这个时候因为有些空值还处于字符串形态,比如'[]',所以有可能会被跳过
if not value and value != 0:
if required_:
return _('parameter')+' {} '.format(name)+_('can not be empty'), None,
else:
return None, value
# 检查并转换类型
if not_converting == False and type_ and type(value) != type_:
try:
if type_ in [list, dict, set, tuple]:
# 容器类,json验证后转换
value = type_(json.loads(value))
elif type_ == bool:
if value == 'true':
value = True
else:
value = False
else:
# 其他类型或者类型转换器
value = type_(value)
except:
return _('parameter')+' {} '.format(name)+_('must be type')+' {} '.format(type_), None
# 检查转换后的'',[],(),{}都放弃长度检查,0继续检查大小。
if not value and value != 0:
if required_:
return _('parameter')+' {} '.format(name)+_('can not be empty'), None
else:
return None, value
# 检查可选项目
if options and type(options) == list:
if value not in options:
return _('parameter') + ' {} '.format(name) + _('must be in the list of options') + ' : {}'.format(json.dumps(options)), None
# 检查最小值/长度
if min_:
if type(value) in [str, list, dict, set]:
if len(value) < min_:
return _('parameter')+' {} '.format(name)+_('minimum length')+' {} '.format(min_), None
elif type(value) in [int, float, complex]:
if value < min_:
return _('parameter')+' {} '.format(name)+_('minimum value')+' {} '.format(min_), None
else:
# 其他自定义类型
if value < type_(min_):
return _('parameter')+' {} '.format(name)+_('minimum value')+' {} '.format(min_), None
# 检查最大值/长度
if max_:
if type(value) in [str, list, dict, set]:
if len(value) > max_:
return _('parameter')+' {} '.format(name)+_('maximum length')+' {} '.format(max_), None
elif type(value) in [int, float, bool, complex]:
if value > max_:
return _('parameter')+' {} '.format(name)+_('maximum value')+' {} '.format(max_), None
else:
# 其他自定义类型
if value > type_(max_):
return _('parameter')+' {} '.format(name)+_('maximum value')+' {} '.format(max_), None
return None, value
def verify_parameter(self, request, par_conf, value_dict=None, parameter=None):
# 校验参数合法性,并转换成参数对象
if type(par_conf) != list:
return _('The wrong configuration, methons must be loaded inside the list container.'), None
if parameter is None:
parameter = self.get_parameter(request)
if value_dict:
parameter.update(value_dict)
adc = ApiDataClass()
errmsg = []
# 从参数配置获取参数
for line in par_conf:
key = line['name']
errmsg_, value = self.__verify(line, key, parameter.get(key))
if errmsg_:
errmsg.append(errmsg_)
else:
setattr(adc, key, value)
return errmsg, adc
def api_run(self, request, url, method=None, parameter=None, json=True):
'''在代码中直接运行接口,方便复用接口代码
request 直接传入当前request,
url 就是你想要访问的接口url
method 如果不传入,就是 = request.method
parameter 请求参数,如果不传入,就没有参数传入到api中
json 默认True返回json数据,False就会返回response
'''
if not method:
method = request.method
model, key, value_dict = self.__find_api_function(url)
methons = self.__find_api_methons(key)
errmsg = ''
code = 400
if methons and not method is None:
# 有配置方法和参数,校验参数合法性,并转换参数
par_conf = methons.get(method.upper())
if parameter is None:
parameter = {}
errmsg, data = self.verify_parameter(
None, par_conf, value_dict=value_dict, parameter=parameter)
else:
code = 404
errmsg = _('no such interface method')
if errmsg:
if json:
return {'msg': errmsg}, code
return return_response(errmsg)
ret = model(request, data, json, method)
return ret
def before_running(self, **kwargs):
'''运行前钩子'''
def after_running(self, **kwargs):
'''运行后钩子'''
def add_api(self, name, url, methods):
'''装饰器添加接口'''
def wrap(cls):
conf = self.__make_model([{
'url': url,
'class': cls,
'name': name,
'methods': methods
}])
self.conf += conf
urllist = []
if type(url) == str:
urllist.append(url)
else:
urllist = url
for u in urllist:
self.__add_url_rules(u)
return cls
return wrap
def api_entry(self, request, *url):
'''api入口'''
model, key, value_dict = self.__find_api_function(url)
methons = self.__find_api_methons(key)
if methons and not methons.get(request.method) is None:
# 有配置方法和参数,校验参数合法性,并转换参数
errmsg, data = self.verify_parameter(
request, methons.get(request.method), value_dict)
if errmsg:
return return_response(errmsg)
if hasattr(self, 'before_running'):
before_running_ret = self.before_running(
request=request, data=data, model=model, key=key)
if before_running_ret:
return before_running_ret
# 运行接口,如果要使用网关,就直接先返回一个数据
ret = model(request, data,json=self.options.get('use_gateway'))
if hasattr(self, 'after_running'):
new_ret = self.after_running(
request=request, response=ret, model=model, key=key)
if new_ret:
return new_ret
return ret
else:
return return_response(_('no such interface method'), 404)
def render_documents(self, request, *url):
'''渲染文档'''
if self.document_version != time.ctime(os.stat(self.document_name).st_mtime):
# 如果文档发生变化,读取文档。
self.document = open(self.document_name,
mode='r', encoding='utf-8').read()
return api.framework.template(self.document)
def get_api_data(self, request, *url):
'''返回给文档页面数据'''
return api.framework.json({'data': self.conf, 'options': self.options})
single_apishop = None
def singleton(cls):
class SingletonWrapper(cls):
def __new__(cls, *args, **kwargs):
global single_apishop
if not single_apishop:
single_apishop = super().__new__(cls)
elif kwargs and kwargs.get('force'):
# 参数里包含force=True,强制覆盖上一个
single_apishop = super().__new__(cls)
elif args or kwargs:
# 有参数自动覆盖上一个
single_apishop = super().__new__(cls)
return single_apishop
return SingletonWrapper
@singleton
class SingleApiShop(ApiShop):
'''单例模式的ApiShop
这个单例和单例装饰器的区别是:
1、每次初始化,如果有参数,就会覆盖上一次的类和参数。
2、每次初始化,如果参数中force=True,就会覆盖上一次的类和参数。(包括继承后的)
使用get_single_apishop获取实例
'''
@staticmethod
def get_single_apishop():
'''获取单例实例'''
global single_apishop
return single_apishop
get_single_apishop = SingleApiShop.get_single_apishop
|
PypiClean
|
/dq-notebook-6.0.2.tar.gz/dq-notebook-6.0.2/docs/source/examples/Notebook/Running Code.ipynb
|
# Running Code
First and foremost, the Jupyter Notebook is an interactive environment for writing and running code. The notebook is capable of running code in a wide range of languages. However, each notebook is associated with a single kernel. This notebook is associated with the IPython kernel, therefore runs Python code.
## Code cells allow you to enter and run code
Run a code cell using `Shift-Enter` or pressing the <button class='btn btn-default btn-xs'><i class="icon-step-forward fa fa-step-forward"></i></button> button in the toolbar above:
```
a = 10
print(a)
```
There are two other keyboard shortcuts for running code:
* `Alt-Enter` runs the current cell and inserts a new one below.
* `Ctrl-Enter` run the current cell and enters command mode.
## Managing the Kernel
Code is run in a separate process called the Kernel. The Kernel can be interrupted or restarted. Try running the following cell and then hit the <button class='btn btn-default btn-xs'><i class='icon-stop fa fa-stop'></i></button> button in the toolbar above.
```
import time
time.sleep(10)
```
If the Kernel dies you will be prompted to restart it. Here we call the low-level system libc.time routine with the wrong argument via
ctypes to segfault the Python interpreter:
```
import sys
from ctypes import CDLL
# This will crash a Linux or Mac system
# equivalent calls can be made on Windows
# Uncomment these lines if you would like to see the segfault
# dll = 'dylib' if sys.platform == 'darwin' else 'so.6'
# libc = CDLL("libc.%s" % dll)
# libc.time(-1) # BOOM!!
```
## Cell menu
The "Cell" menu has a number of menu items for running code in different ways. These includes:
* Run and Select Below
* Run and Insert Below
* Run All
* Run All Above
* Run All Below
## Restarting the kernels
The kernel maintains the state of a notebook's computations. You can reset this state by restarting the kernel. This is done by clicking on the <button class='btn btn-default btn-xs'><i class='fa fa-repeat icon-repeat'></i></button> in the toolbar above.
## sys.stdout and sys.stderr
The stdout and stderr streams are displayed as text in the output area.
```
print("hi, stdout")
from __future__ import print_function
print('hi, stderr', file=sys.stderr)
```
## Output is asynchronous
All output is displayed asynchronously as it is generated in the Kernel. If you execute the next cell, you will see the output one piece at a time, not all at the end.
```
import time, sys
for i in range(8):
print(i)
time.sleep(0.5)
```
## Large outputs
To better handle large outputs, the output area can be collapsed. Run the following cell and then single- or double- click on the active area to the left of the output:
```
for i in range(50):
print(i)
```
Beyond a certain point, output will scroll automatically:
```
for i in range(500):
print(2**i - 1)
```
|
PypiClean
|
/py-pure-client-1.38.0.tar.gz/py-pure-client-1.38.0/pypureclient/pure1/Pure1_1_0/models/support_contract_get_response.py
|
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.pure1.Pure1_1_0 import models
class SupportContractGetResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'continuation_token': 'str',
'total_item_count': 'int',
'items': 'list[SupportContract]'
}
attribute_map = {
'continuation_token': 'continuation_token',
'total_item_count': 'total_item_count',
'items': 'items'
}
required_args = {
}
def __init__(
self,
continuation_token=None, # type: str
total_item_count=None, # type: int
items=None, # type: List[models.SupportContract]
):
"""
Keyword args:
continuation_token (str): Continuation token that can be provided in the continuation_token query param to get the next page of data. If you use the continuation token to page through data you are guaranteed to get all items exactly once regardless of how items are modified. If an item is added or deleted during the pagination then it may or may not be returned. The continuation token is generated if the limit is less than the remaining number of items, and the default sort is used (no sort is specified).
total_item_count (int): Total number of items after applying filter params.
items (list[SupportContract])
"""
if continuation_token is not None:
self.continuation_token = continuation_token
if total_item_count is not None:
self.total_item_count = total_item_count
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `SupportContractGetResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(SupportContractGetResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SupportContractGetResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
PypiClean
|
/azure-mgmt-privatedns-1.1.0b1.zip/azure-mgmt-privatedns-1.1.0b1/azure/mgmt/privatedns/operations/_private_zones_operations.py
|
import sys
from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
from .._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_create_or_update_request(
resource_group_name: str,
private_zone_name: str,
subscription_id: str,
*,
if_match: Optional[str] = None,
if_none_match: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-06-01")) # type: Literal["2020-06-01"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"privateZoneName": _SERIALIZER.url("private_zone_name", private_zone_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if if_match is not None:
_headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str")
if if_none_match is not None:
_headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str")
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
def build_update_request(
resource_group_name: str,
private_zone_name: str,
subscription_id: str,
*,
if_match: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-06-01")) # type: Literal["2020-06-01"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"privateZoneName": _SERIALIZER.url("private_zone_name", private_zone_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if if_match is not None:
_headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str")
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs)
def build_delete_request(
resource_group_name: str,
private_zone_name: str,
subscription_id: str,
*,
if_match: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-06-01")) # type: Literal["2020-06-01"]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"privateZoneName": _SERIALIZER.url("private_zone_name", private_zone_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if if_match is not None:
_headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
def build_get_request(
resource_group_name: str, private_zone_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-06-01")) # type: Literal["2020-06-01"]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"privateZoneName": _SERIALIZER.url("private_zone_name", private_zone_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_request(subscription_id: str, *, top: Optional[int] = None, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-06-01")) # type: Literal["2020-06-01"]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/providers/Microsoft.Network/privateDnsZones")
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
if top is not None:
_params["$top"] = _SERIALIZER.query("top", top, "int")
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_by_resource_group_request(
resource_group_name: str, subscription_id: str, *, top: Optional[int] = None, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-06-01")) # type: Literal["2020-06-01"]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
if top is not None:
_params["$top"] = _SERIALIZER.query("top", top, "int")
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class PrivateZonesOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.privatedns.PrivateDnsManagementClient`'s
:attr:`private_zones` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
def _create_or_update_initial(
self,
resource_group_name: str,
private_zone_name: str,
parameters: Union[_models.PrivateZone, IO],
if_match: Optional[str] = None,
if_none_match: Optional[str] = None,
**kwargs: Any
) -> Optional[_models.PrivateZone]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2020-06-01"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[Optional[_models.PrivateZone]]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "PrivateZone")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
private_zone_name=private_zone_name,
subscription_id=self._config.subscription_id,
if_match=if_match,
if_none_match=if_none_match,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("PrivateZone", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("PrivateZone", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}"} # type: ignore
@overload
def begin_create_or_update(
self,
resource_group_name: str,
private_zone_name: str,
parameters: _models.PrivateZone,
if_match: Optional[str] = None,
if_none_match: Optional[str] = None,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.PrivateZone]:
"""Creates or updates a Private DNS zone. Does not modify Links to virtual networks or DNS records
within the zone.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param private_zone_name: The name of the Private DNS zone (without a terminating dot).
Required.
:type private_zone_name: str
:param parameters: Parameters supplied to the CreateOrUpdate operation. Required.
:type parameters: ~azure.mgmt.privatedns.models.PrivateZone
:param if_match: The ETag of the Private DNS zone. Omit this value to always overwrite the
current zone. Specify the last-seen ETag value to prevent accidentally overwriting any
concurrent changes. Default value is None.
:type if_match: str
:param if_none_match: Set to '*' to allow a new Private DNS zone to be created, but to prevent
updating an existing zone. Other values will be ignored. Default value is None.
:type if_none_match: str
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either PrivateZone or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.privatedns.models.PrivateZone]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def begin_create_or_update(
self,
resource_group_name: str,
private_zone_name: str,
parameters: IO,
if_match: Optional[str] = None,
if_none_match: Optional[str] = None,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.PrivateZone]:
"""Creates or updates a Private DNS zone. Does not modify Links to virtual networks or DNS records
within the zone.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param private_zone_name: The name of the Private DNS zone (without a terminating dot).
Required.
:type private_zone_name: str
:param parameters: Parameters supplied to the CreateOrUpdate operation. Required.
:type parameters: IO
:param if_match: The ETag of the Private DNS zone. Omit this value to always overwrite the
current zone. Specify the last-seen ETag value to prevent accidentally overwriting any
concurrent changes. Default value is None.
:type if_match: str
:param if_none_match: Set to '*' to allow a new Private DNS zone to be created, but to prevent
updating an existing zone. Other values will be ignored. Default value is None.
:type if_none_match: str
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either PrivateZone or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.privatedns.models.PrivateZone]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def begin_create_or_update(
self,
resource_group_name: str,
private_zone_name: str,
parameters: Union[_models.PrivateZone, IO],
if_match: Optional[str] = None,
if_none_match: Optional[str] = None,
**kwargs: Any
) -> LROPoller[_models.PrivateZone]:
"""Creates or updates a Private DNS zone. Does not modify Links to virtual networks or DNS records
within the zone.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param private_zone_name: The name of the Private DNS zone (without a terminating dot).
Required.
:type private_zone_name: str
:param parameters: Parameters supplied to the CreateOrUpdate operation. Is either a model type
or a IO type. Required.
:type parameters: ~azure.mgmt.privatedns.models.PrivateZone or IO
:param if_match: The ETag of the Private DNS zone. Omit this value to always overwrite the
current zone. Specify the last-seen ETag value to prevent accidentally overwriting any
concurrent changes. Default value is None.
:type if_match: str
:param if_none_match: Set to '*' to allow a new Private DNS zone to be created, but to prevent
updating an existing zone. Other values will be ignored. Default value is None.
:type if_none_match: str
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either PrivateZone or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.privatedns.models.PrivateZone]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2020-06-01"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.PrivateZone]
polling = kwargs.pop("polling", True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial( # type: ignore
resource_group_name=resource_group_name,
private_zone_name=private_zone_name,
parameters=parameters,
if_match=if_match,
if_none_match=if_none_match,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("PrivateZone", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) # type: PollingMethod
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}"} # type: ignore
def _update_initial(
self,
resource_group_name: str,
private_zone_name: str,
parameters: Union[_models.PrivateZone, IO],
if_match: Optional[str] = None,
**kwargs: Any
) -> Optional[_models.PrivateZone]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2020-06-01"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[Optional[_models.PrivateZone]]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "PrivateZone")
request = build_update_request(
resource_group_name=resource_group_name,
private_zone_name=private_zone_name,
subscription_id=self._config.subscription_id,
if_match=if_match,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("PrivateZone", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}"} # type: ignore
@overload
def begin_update(
self,
resource_group_name: str,
private_zone_name: str,
parameters: _models.PrivateZone,
if_match: Optional[str] = None,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.PrivateZone]:
"""Updates a Private DNS zone. Does not modify virtual network links or DNS records within the
zone.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param private_zone_name: The name of the Private DNS zone (without a terminating dot).
Required.
:type private_zone_name: str
:param parameters: Parameters supplied to the Update operation. Required.
:type parameters: ~azure.mgmt.privatedns.models.PrivateZone
:param if_match: The ETag of the Private DNS zone. Omit this value to always overwrite the
current zone. Specify the last-seen ETag value to prevent accidentally overwriting any
concurrent changes. Default value is None.
:type if_match: str
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either PrivateZone or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.privatedns.models.PrivateZone]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def begin_update(
self,
resource_group_name: str,
private_zone_name: str,
parameters: IO,
if_match: Optional[str] = None,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.PrivateZone]:
"""Updates a Private DNS zone. Does not modify virtual network links or DNS records within the
zone.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param private_zone_name: The name of the Private DNS zone (without a terminating dot).
Required.
:type private_zone_name: str
:param parameters: Parameters supplied to the Update operation. Required.
:type parameters: IO
:param if_match: The ETag of the Private DNS zone. Omit this value to always overwrite the
current zone. Specify the last-seen ETag value to prevent accidentally overwriting any
concurrent changes. Default value is None.
:type if_match: str
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either PrivateZone or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.privatedns.models.PrivateZone]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def begin_update(
self,
resource_group_name: str,
private_zone_name: str,
parameters: Union[_models.PrivateZone, IO],
if_match: Optional[str] = None,
**kwargs: Any
) -> LROPoller[_models.PrivateZone]:
"""Updates a Private DNS zone. Does not modify virtual network links or DNS records within the
zone.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param private_zone_name: The name of the Private DNS zone (without a terminating dot).
Required.
:type private_zone_name: str
:param parameters: Parameters supplied to the Update operation. Is either a model type or a IO
type. Required.
:type parameters: ~azure.mgmt.privatedns.models.PrivateZone or IO
:param if_match: The ETag of the Private DNS zone. Omit this value to always overwrite the
current zone. Specify the last-seen ETag value to prevent accidentally overwriting any
concurrent changes. Default value is None.
:type if_match: str
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either PrivateZone or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.privatedns.models.PrivateZone]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2020-06-01"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.PrivateZone]
polling = kwargs.pop("polling", True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_initial( # type: ignore
resource_group_name=resource_group_name,
private_zone_name=private_zone_name,
parameters=parameters,
if_match=if_match,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("PrivateZone", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) # type: PollingMethod
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}"} # type: ignore
def _delete_initial( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, private_zone_name: str, if_match: Optional[str] = None, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2020-06-01"]
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_delete_request(
resource_group_name=resource_group_name,
private_zone_name=private_zone_name,
subscription_id=self._config.subscription_id,
if_match=if_match,
api_version=api_version,
template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}"} # type: ignore
@distributed_trace
def begin_delete(
self, resource_group_name: str, private_zone_name: str, if_match: Optional[str] = None, **kwargs: Any
) -> LROPoller[None]:
"""Deletes a Private DNS zone. WARNING: All DNS records in the zone will also be deleted. This
operation cannot be undone. Private DNS zone cannot be deleted unless all virtual network links
to it are removed.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param private_zone_name: The name of the Private DNS zone (without a terminating dot).
Required.
:type private_zone_name: str
:param if_match: The ETag of the Private DNS zone. Omit this value to always delete the current
zone. Specify the last-seen ETag value to prevent accidentally deleting any concurrent changes.
Default value is None.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2020-06-01"]
cls = kwargs.pop("cls", None) # type: ClsType[None]
polling = kwargs.pop("polling", True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial( # type: ignore
resource_group_name=resource_group_name,
private_zone_name=private_zone_name,
if_match=if_match,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) # type: PollingMethod
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}"} # type: ignore
@distributed_trace
def get(self, resource_group_name: str, private_zone_name: str, **kwargs: Any) -> _models.PrivateZone:
"""Gets a Private DNS zone. Retrieves the zone properties, but not the virtual networks links or
the record sets within the zone.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param private_zone_name: The name of the Private DNS zone (without a terminating dot).
Required.
:type private_zone_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateZone or the result of cls(response)
:rtype: ~azure.mgmt.privatedns.models.PrivateZone
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2020-06-01"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.PrivateZone]
request = build_get_request(
resource_group_name=resource_group_name,
private_zone_name=private_zone_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("PrivateZone", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}"} # type: ignore
@distributed_trace
def list(self, top: Optional[int] = None, **kwargs: Any) -> Iterable["_models.PrivateZone"]:
"""Lists the Private DNS zones in all resource groups in a subscription.
:param top: The maximum number of Private DNS zones to return. If not specified, returns up to
100 zones. Default value is None.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PrivateZone or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.privatedns.models.PrivateZone]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2020-06-01"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.PrivateZoneListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
top=top,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("PrivateZoneListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Network/privateDnsZones"} # type: ignore
@distributed_trace
def list_by_resource_group(
self, resource_group_name: str, top: Optional[int] = None, **kwargs: Any
) -> Iterable["_models.PrivateZone"]:
"""Lists the Private DNS zones within a resource group.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param top: The maximum number of record sets to return. If not specified, returns up to 100
record sets. Default value is None.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PrivateZone or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.privatedns.models.PrivateZone]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2020-06-01"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.PrivateZoneListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
top=top,
api_version=api_version,
template_url=self.list_by_resource_group.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("PrivateZoneListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list_by_resource_group.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones"} # type: ignore
|
PypiClean
|
/pulumi_fortios-0.0.9.tar.gz/pulumi_fortios-0.0.9/pulumi_fortios/switch_controller_switch_profile.py
|
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['SwitchControllerSwitchProfileArgs', 'SwitchControllerSwitchProfile']
@pulumi.input_type
class SwitchControllerSwitchProfileArgs:
def __init__(__self__, *,
login: Optional[pulumi.Input[str]] = None,
login_passwd: Optional[pulumi.Input[str]] = None,
login_passwd_override: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
revision_backup_on_logout: Optional[pulumi.Input[str]] = None,
revision_backup_on_upgrade: Optional[pulumi.Input[str]] = None,
vdomparam: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a SwitchControllerSwitchProfile resource.
"""
if login is not None:
pulumi.set(__self__, "login", login)
if login_passwd is not None:
pulumi.set(__self__, "login_passwd", login_passwd)
if login_passwd_override is not None:
pulumi.set(__self__, "login_passwd_override", login_passwd_override)
if name is not None:
pulumi.set(__self__, "name", name)
if revision_backup_on_logout is not None:
pulumi.set(__self__, "revision_backup_on_logout", revision_backup_on_logout)
if revision_backup_on_upgrade is not None:
pulumi.set(__self__, "revision_backup_on_upgrade", revision_backup_on_upgrade)
if vdomparam is not None:
pulumi.set(__self__, "vdomparam", vdomparam)
@property
@pulumi.getter
def login(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "login")
@login.setter
def login(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "login", value)
@property
@pulumi.getter(name="loginPasswd")
def login_passwd(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "login_passwd")
@login_passwd.setter
def login_passwd(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "login_passwd", value)
@property
@pulumi.getter(name="loginPasswdOverride")
def login_passwd_override(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "login_passwd_override")
@login_passwd_override.setter
def login_passwd_override(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "login_passwd_override", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="revisionBackupOnLogout")
def revision_backup_on_logout(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "revision_backup_on_logout")
@revision_backup_on_logout.setter
def revision_backup_on_logout(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "revision_backup_on_logout", value)
@property
@pulumi.getter(name="revisionBackupOnUpgrade")
def revision_backup_on_upgrade(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "revision_backup_on_upgrade")
@revision_backup_on_upgrade.setter
def revision_backup_on_upgrade(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "revision_backup_on_upgrade", value)
@property
@pulumi.getter
def vdomparam(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "vdomparam")
@vdomparam.setter
def vdomparam(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "vdomparam", value)
@pulumi.input_type
class _SwitchControllerSwitchProfileState:
def __init__(__self__, *,
login: Optional[pulumi.Input[str]] = None,
login_passwd: Optional[pulumi.Input[str]] = None,
login_passwd_override: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
revision_backup_on_logout: Optional[pulumi.Input[str]] = None,
revision_backup_on_upgrade: Optional[pulumi.Input[str]] = None,
vdomparam: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering SwitchControllerSwitchProfile resources.
"""
if login is not None:
pulumi.set(__self__, "login", login)
if login_passwd is not None:
pulumi.set(__self__, "login_passwd", login_passwd)
if login_passwd_override is not None:
pulumi.set(__self__, "login_passwd_override", login_passwd_override)
if name is not None:
pulumi.set(__self__, "name", name)
if revision_backup_on_logout is not None:
pulumi.set(__self__, "revision_backup_on_logout", revision_backup_on_logout)
if revision_backup_on_upgrade is not None:
pulumi.set(__self__, "revision_backup_on_upgrade", revision_backup_on_upgrade)
if vdomparam is not None:
pulumi.set(__self__, "vdomparam", vdomparam)
@property
@pulumi.getter
def login(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "login")
@login.setter
def login(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "login", value)
@property
@pulumi.getter(name="loginPasswd")
def login_passwd(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "login_passwd")
@login_passwd.setter
def login_passwd(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "login_passwd", value)
@property
@pulumi.getter(name="loginPasswdOverride")
def login_passwd_override(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "login_passwd_override")
@login_passwd_override.setter
def login_passwd_override(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "login_passwd_override", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="revisionBackupOnLogout")
def revision_backup_on_logout(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "revision_backup_on_logout")
@revision_backup_on_logout.setter
def revision_backup_on_logout(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "revision_backup_on_logout", value)
@property
@pulumi.getter(name="revisionBackupOnUpgrade")
def revision_backup_on_upgrade(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "revision_backup_on_upgrade")
@revision_backup_on_upgrade.setter
def revision_backup_on_upgrade(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "revision_backup_on_upgrade", value)
@property
@pulumi.getter
def vdomparam(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "vdomparam")
@vdomparam.setter
def vdomparam(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "vdomparam", value)
class SwitchControllerSwitchProfile(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
login: Optional[pulumi.Input[str]] = None,
login_passwd: Optional[pulumi.Input[str]] = None,
login_passwd_override: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
revision_backup_on_logout: Optional[pulumi.Input[str]] = None,
revision_backup_on_upgrade: Optional[pulumi.Input[str]] = None,
vdomparam: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Create a SwitchControllerSwitchProfile resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[SwitchControllerSwitchProfileArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Create a SwitchControllerSwitchProfile resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param SwitchControllerSwitchProfileArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SwitchControllerSwitchProfileArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
login: Optional[pulumi.Input[str]] = None,
login_passwd: Optional[pulumi.Input[str]] = None,
login_passwd_override: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
revision_backup_on_logout: Optional[pulumi.Input[str]] = None,
revision_backup_on_upgrade: Optional[pulumi.Input[str]] = None,
vdomparam: Optional[pulumi.Input[str]] = None,
__props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SwitchControllerSwitchProfileArgs.__new__(SwitchControllerSwitchProfileArgs)
__props__.__dict__["login"] = login
__props__.__dict__["login_passwd"] = None if login_passwd is None else pulumi.Output.secret(login_passwd)
__props__.__dict__["login_passwd_override"] = login_passwd_override
__props__.__dict__["name"] = name
__props__.__dict__["revision_backup_on_logout"] = revision_backup_on_logout
__props__.__dict__["revision_backup_on_upgrade"] = revision_backup_on_upgrade
__props__.__dict__["vdomparam"] = vdomparam
secret_opts = pulumi.ResourceOptions(additional_secret_outputs=["loginPasswd"])
opts = pulumi.ResourceOptions.merge(opts, secret_opts)
super(SwitchControllerSwitchProfile, __self__).__init__(
'fortios:index/switchControllerSwitchProfile:SwitchControllerSwitchProfile',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
login: Optional[pulumi.Input[str]] = None,
login_passwd: Optional[pulumi.Input[str]] = None,
login_passwd_override: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
revision_backup_on_logout: Optional[pulumi.Input[str]] = None,
revision_backup_on_upgrade: Optional[pulumi.Input[str]] = None,
vdomparam: Optional[pulumi.Input[str]] = None) -> 'SwitchControllerSwitchProfile':
"""
Get an existing SwitchControllerSwitchProfile resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _SwitchControllerSwitchProfileState.__new__(_SwitchControllerSwitchProfileState)
__props__.__dict__["login"] = login
__props__.__dict__["login_passwd"] = login_passwd
__props__.__dict__["login_passwd_override"] = login_passwd_override
__props__.__dict__["name"] = name
__props__.__dict__["revision_backup_on_logout"] = revision_backup_on_logout
__props__.__dict__["revision_backup_on_upgrade"] = revision_backup_on_upgrade
__props__.__dict__["vdomparam"] = vdomparam
return SwitchControllerSwitchProfile(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def login(self) -> pulumi.Output[str]:
return pulumi.get(self, "login")
@property
@pulumi.getter(name="loginPasswd")
def login_passwd(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "login_passwd")
@property
@pulumi.getter(name="loginPasswdOverride")
def login_passwd_override(self) -> pulumi.Output[str]:
return pulumi.get(self, "login_passwd_override")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="revisionBackupOnLogout")
def revision_backup_on_logout(self) -> pulumi.Output[str]:
return pulumi.get(self, "revision_backup_on_logout")
@property
@pulumi.getter(name="revisionBackupOnUpgrade")
def revision_backup_on_upgrade(self) -> pulumi.Output[str]:
return pulumi.get(self, "revision_backup_on_upgrade")
@property
@pulumi.getter
def vdomparam(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "vdomparam")
|
PypiClean
|
/Pyomo-6.6.2-cp39-cp39-win_amd64.whl/pyomo/gdp/plugins/bigm.py
|
import logging
from pyomo.common.collections import ComponentMap
from pyomo.common.config import ConfigDict, ConfigValue
from pyomo.common.modeling import unique_component_name
from pyomo.common.deprecation import deprecated, deprecation_warning
from pyomo.contrib.cp.transform.logical_to_disjunctive_program import (
LogicalToDisjunctive,
)
from pyomo.core import (
Block,
BooleanVar,
Connector,
Constraint,
Param,
Set,
SetOf,
Var,
Expression,
SortComponents,
TraversalStrategy,
value,
RangeSet,
NonNegativeIntegers,
Binary,
Any,
)
from pyomo.core.base import TransformationFactory, Reference
import pyomo.core.expr as EXPR
from pyomo.gdp import Disjunct, Disjunction, GDP_Error
from pyomo.gdp.plugins.bigm_mixin import (
_BigM_MixIn,
_get_bigM_suffix_list,
_warn_for_unused_bigM_args,
)
from pyomo.gdp.plugins.gdp_to_mip_transformation import GDP_to_MIP_Transformation
from pyomo.gdp.transformed_disjunct import _TransformedDisjunct
from pyomo.gdp.util import is_child_of, _get_constraint_transBlock, _to_dict
from pyomo.core.util import target_list
from pyomo.network import Port
from pyomo.repn import generate_standard_repn
from weakref import ref as weakref_ref, ReferenceType
logger = logging.getLogger('pyomo.gdp.bigm')
@TransformationFactory.register(
'gdp.bigm', doc="Relax disjunctive model using big-M terms."
)
class BigM_Transformation(GDP_to_MIP_Transformation, _BigM_MixIn):
"""Relax disjunctive model using big-M terms.
Relaxes a disjunctive model into an algebraic model by adding Big-M
terms to all disjunctive constraints.
This transformation accepts the following keyword arguments:
bigM: A user-specified value (or dict) of M values to use (see below)
targets: the targets to transform [default: the instance]
M values are determined as follows:
1) if the constraint appears in the bigM argument dict
2) if the constraint parent_component appears in the bigM
argument dict
3) if any block which is an ancestor to the constraint appears in
the bigM argument dict
3) if 'None' is in the bigM argument dict
4) if the constraint or the constraint parent_component appear in
a BigM Suffix attached to any parent_block() beginning with the
constraint's parent_block and moving up to the root model.
5) if None appears in a BigM Suffix attached to any
parent_block() between the constraint and the root model.
6) if the constraint is linear, estimate M using the variable bounds
M values may be a single value or a 2-tuple specifying the M for the
lower bound and the upper bound of the constraint body.
Specifying "bigM=N" is automatically mapped to "bigM={None: N}".
The transformation will create a new Block with a unique
name beginning "_pyomo_gdp_bigm_reformulation". That Block will
contain an indexed Block named "relaxedDisjuncts", which will hold
the relaxed disjuncts. This block is indexed by an integer
indicating the order in which the disjuncts were relaxed.
Each block has a dictionary "_constraintMap":
'srcConstraints': ComponentMap(<transformed constraint>:
<src constraint>)
'transformedConstraints': ComponentMap(<src constraint>:
<transformed constraint>)
All transformed Disjuncts will have a pointer to the block their transformed
constraints are on, and all transformed Disjunctions will have a
pointer to the corresponding 'Or' or 'ExactlyOne' constraint.
"""
CONFIG = ConfigDict("gdp.bigm")
CONFIG.declare(
'targets',
ConfigValue(
default=None,
domain=target_list,
description="target or list of targets that will be relaxed",
doc="""
This specifies the list of components to relax. If None (default), the
entire model is transformed. Note that if the transformation is done out
of place, the list of targets should be attached to the model before it
is cloned, and the list will specify the targets on the cloned
instance.""",
),
)
CONFIG.declare(
'bigM',
ConfigValue(
default=None,
domain=_to_dict,
description="Big-M value used for constraint relaxation",
doc="""
A user-specified value, dict, or ComponentMap of M values that override
M-values found through model Suffixes or that would otherwise be
calculated using variable domains.""",
),
)
CONFIG.declare(
'assume_fixed_vars_permanent',
ConfigValue(
default=False,
domain=bool,
description="Boolean indicating whether or not to transform so that "
"the transformed model will still be valid when fixed Vars are "
"unfixed.",
doc="""
This is only relevant when the transformation will be estimating values
for M. If True, the transformation will calculate M values assuming that
fixed variables will always be fixed to their current values. This means
that if a fixed variable is unfixed after transformation, the
transformed model is potentially no longer valid. By default, the
transformation will assume fixed variables could be unfixed in the
future and will use their bounds to calculate the M value rather than
their value. Note that this could make for a weaker LP relaxation
while the variables remain fixed.
""",
),
)
transformation_name = 'bigm'
def __init__(self):
super().__init__(logger)
def _apply_to(self, instance, **kwds):
self.used_args = ComponentMap() # If everything was sure to go well,
# this could be a dictionary. But if
# someone messes up and gives us a Var
# as a key in bigMargs, I need the error
# not to be when I try to put it into
# this map!
try:
self._apply_to_impl(instance, **kwds)
finally:
self._restore_state()
self.used_args.clear()
def _apply_to_impl(self, instance, **kwds):
self._process_arguments(instance, **kwds)
# filter out inactive targets and handle case where targets aren't
# specified.
targets = self._filter_targets(instance)
# transform logical constraints based on targets
self._transform_logical_constraints(instance, targets)
# we need to preprocess targets to make sure that if there are any
# disjunctions in targets that their disjuncts appear before them in
# the list.
gdp_tree = self._get_gdp_tree_from_targets(instance, targets)
preprocessed_targets = gdp_tree.reverse_topological_sort()
bigM = self._config.bigM
for t in preprocessed_targets:
if t.ctype is Disjunction:
self._transform_disjunctionData(
t,
t.index(),
parent_disjunct=gdp_tree.parent(t),
root_disjunct=gdp_tree.root_disjunct(t),
)
else: # We know t is a Disjunct after preprocessing
self._transform_disjunct(
t, bigM, root_disjunct=gdp_tree.root_disjunct(t)
)
# issue warnings about anything that was in the bigM args dict that we
# didn't use
_warn_for_unused_bigM_args(bigM, self.used_args, logger)
def _transform_disjunctionData(
self, obj, index, parent_disjunct=None, root_disjunct=None
):
(transBlock, xorConstraint) = self._setup_transform_disjunctionData(
obj, root_disjunct
)
# add or (or xor) constraint
or_expr = sum(disjunct.binary_indicator_var for disjunct in obj.disjuncts)
rhs = 1 if parent_disjunct is None else parent_disjunct.binary_indicator_var
if obj.xor:
xorConstraint[index] = or_expr == rhs
else:
xorConstraint[index] = or_expr >= rhs
# Mark the DisjunctionData as transformed by mapping it to its XOR
# constraint.
obj._algebraic_constraint = weakref_ref(xorConstraint[index])
# and deactivate for the writers
obj.deactivate()
def _transform_disjunct(self, obj, bigM, root_disjunct):
root = (
root_disjunct.parent_block()
if root_disjunct is not None
else obj.parent_block()
)
transBlock = self._add_transformation_block(root)[0]
suffix_list = _get_bigM_suffix_list(obj)
arg_list = self._get_bigM_arg_list(bigM, obj)
relaxationBlock = self._get_disjunct_transformation_block(obj, transBlock)
# we will keep a map of constraints (hashable, ha!) to a tuple to
# indicate what their M value is and where it came from, of the form:
# ((lower_value, lower_source, lower_key), (upper_value, upper_source,
# upper_key)), where the first tuple is the information for the lower M,
# the second tuple is the info for the upper M, source is the Suffix or
# argument dictionary and None if the value was calculated, and key is
# the key in the Suffix or argument dictionary, and None if it was
# calculated. (Note that it is possible the lower or upper is
# user-specified and the other is not, hence the need to store
# information for both.)
relaxationBlock.bigm_src = {}
# This is crazy, but if the disjunction has been previously
# relaxed, the disjunct *could* be deactivated. This is a big
# deal for Hull, as it uses the component_objects /
# component_data_objects generators. For BigM, that is OK,
# because we never use those generators with active=True. I am
# only noting it here for the future when someone (me?) is
# comparing the two relaxations.
#
# Transform each component within this disjunct
self._transform_block_components(obj, obj, bigM, arg_list, suffix_list)
# deactivate disjunct to keep the writers happy
obj._deactivate_without_fixing_indicator()
def _transform_constraint(
self, obj, disjunct, bigMargs, arg_list, disjunct_suffix_list
):
# add constraint to the transformation block, we'll transform it there.
transBlock = disjunct._transformation_block()
bigm_src = transBlock.bigm_src
constraintMap = transBlock._constraintMap
disjunctionRelaxationBlock = transBlock.parent_block()
# We will make indexes from ({obj.local_name} x obj.index_set() x ['lb',
# 'ub']), but don't bother construct that set here, as taking Cartesian
# products is kind of expensive (and redundant since we have the
# original model)
newConstraint = transBlock.transformedConstraints
for i in sorted(obj.keys()):
c = obj[i]
if not c.active:
continue
lower = (None, None, None)
upper = (None, None, None)
# first, we see if an M value was specified in the arguments.
# (This returns None if not)
lower, upper = self._get_M_from_args(c, bigMargs, arg_list, lower, upper)
M = (lower[0], upper[0])
if self._generate_debug_messages:
logger.debug(
"GDP(BigM): The value for M for constraint '%s' "
"from the BigM argument is %s." % (c.name, str(M))
)
# if we didn't get something we need from args, try suffixes:
if (M[0] is None and c.lower is not None) or (
M[1] is None and c.upper is not None
):
# first get anything parent to c but below disjunct
suffix_list = _get_bigM_suffix_list(
c.parent_block(), stopping_block=disjunct
)
# prepend that to what we already collected for the disjunct.
suffix_list.extend(disjunct_suffix_list)
lower, upper = self._update_M_from_suffixes(
c, suffix_list, lower, upper
)
M = (lower[0], upper[0])
if self._generate_debug_messages:
logger.debug(
"GDP(BigM): The value for M for constraint '%s' "
"after checking suffixes is %s." % (c.name, str(M))
)
if c.lower is not None and M[0] is None:
M = (self._estimate_M(c.body, c)[0] - c.lower, M[1])
lower = (M[0], None, None)
if c.upper is not None and M[1] is None:
M = (M[0], self._estimate_M(c.body, c)[1] - c.upper)
upper = (M[1], None, None)
if self._generate_debug_messages:
logger.debug(
"GDP(BigM): The value for M for constraint '%s' "
"after estimating (if needed) is %s." % (c.name, str(M))
)
# save the source information
bigm_src[c] = (lower, upper)
self._add_constraint_expressions(
c, i, M, disjunct.binary_indicator_var, newConstraint, constraintMap
)
# deactivate because we relaxed
c.deactivate()
def _update_M_from_suffixes(self, constraint, suffix_list, lower, upper):
# It's possible we found half the answer in args, but we are still
# looking for half the answer.
need_lower = constraint.lower is not None and lower[0] is None
need_upper = constraint.upper is not None and upper[0] is None
M = None
# first we check if the constraint or its parent is a key in any of the
# suffix lists
for bigm in suffix_list:
if constraint in bigm:
M = bigm[constraint]
(lower, upper, need_lower, need_upper) = self._process_M_value(
M,
lower,
upper,
need_lower,
need_upper,
bigm,
constraint,
constraint,
)
if not need_lower and not need_upper:
return lower, upper
# if c is indexed, check for the parent component
if constraint.parent_component() in bigm:
parent = constraint.parent_component()
M = bigm[parent]
(lower, upper, need_lower, need_upper) = self._process_M_value(
M, lower, upper, need_lower, need_upper, bigm, parent, constraint
)
if not need_lower and not need_upper:
return lower, upper
# if we didn't get an M that way, traverse upwards through the blocks
# and see if None has a value on any of them.
if M is None:
for bigm in suffix_list:
if None in bigm:
M = bigm[None]
(lower, upper, need_lower, need_upper) = self._process_M_value(
M, lower, upper, need_lower, need_upper, bigm, None, constraint
)
if not need_lower and not need_upper:
return lower, upper
return lower, upper
@deprecated(
"The get_m_value_src function is deprecated. Use "
"the get_M_value_src function if you need source "
"information or the get_M_value function if you "
"only need values.",
version='5.7.1',
)
def get_m_value_src(self, constraint):
transBlock = _get_constraint_transBlock(constraint)
(
(lower_val, lower_source, lower_key),
(upper_val, upper_source, upper_key),
) = transBlock.bigm_src[constraint]
if (
constraint.lower is not None
and constraint.upper is not None
and (not lower_source is upper_source or not lower_key is upper_key)
):
raise GDP_Error(
"This is why this method is deprecated: The lower "
"and upper M values for constraint %s came from "
"different sources, please use the get_M_value_src "
"method." % constraint.name
)
# if source and key are equal for the two, this is representable in the
# old format.
if constraint.lower is not None and lower_source is not None:
return (lower_source, lower_key)
if constraint.upper is not None and upper_source is not None:
return (upper_source, upper_key)
# else it was calculated:
return (lower_val, upper_val)
def get_M_value_src(self, constraint):
"""Return a tuple indicating how the M value used to transform
constraint was specified. (In particular, this can be used to
verify which BigM Suffixes were actually necessary to the
transformation.)
Return is of the form: ((lower_M_val, lower_M_source, lower_M_key),
(upper_M_val, upper_M_source, upper_M_key))
If the constraint does not have a lower bound (or an upper bound),
the first (second) element will be (None, None, None). Note that if
a constraint is of the form a <= expr <= b or is an equality constraint,
it is not necessarily true that the source of lower_M and upper_M
are the same.
If the M value came from an arg, source is the dictionary itself and
key is the key in that dictionary which gave us the M value.
If the M value came from a Suffix, source is the BigM suffix used and
key is the key in that Suffix.
If the transformation calculated the value, both source and key are
None.
Parameters
----------
constraint: Constraint, which must be in the subtree of a transformed
Disjunct
"""
transBlock = _get_constraint_transBlock(constraint)
# This is a KeyError if it fails, but it is also my fault if it
# fails... (That is, it's a bug in the mapping.)
return transBlock.bigm_src[constraint]
def get_M_value(self, constraint):
"""Returns the M values used to transform constraint. Return is a tuple:
(lower_M_value, upper_M_value). Either can be None if constraint does
not have a lower or upper bound, respectively.
Parameters
----------
constraint: Constraint, which must be in the subtree of a transformed
Disjunct
"""
transBlock = _get_constraint_transBlock(constraint)
# This is a KeyError if it fails, but it is also my fault if it
# fails... (That is, it's a bug in the mapping.)
lower, upper = transBlock.bigm_src[constraint]
return (lower[0], upper[0])
def get_all_M_values_by_constraint(self, model):
"""Returns a dictionary mapping each constraint to a tuple:
(lower_M_value, upper_M_value), where either can be None if the
constraint does not have a lower or upper bound (respectively).
Parameters
----------
model: A GDP model that has been transformed with BigM
"""
m_values = {}
for disj in model.component_data_objects(
Disjunct, active=None, descend_into=(Block, Disjunct)
):
transBlock = disj.transformation_block
# First check if it was transformed at all.
if transBlock is not None:
# If it was transformed with BigM, we get the M values.
if hasattr(transBlock, 'bigm_src'):
for cons in transBlock.bigm_src:
m_values[cons] = self.get_M_value(cons)
return m_values
def get_largest_M_value(self, model):
"""Returns the largest M value for any constraint on the model.
Parameters
----------
model: A GDP model that has been transformed with BigM
"""
return max(
max(abs(m) for m in m_values if m is not None)
for m_values in self.get_all_M_values_by_constraint(model).values()
)
|
PypiClean
|
/ast-toolbox-2020.9.1.7.tar.gz/ast-toolbox-2020.9.1.7/examples/Plot/plot_top1.py
|
import csv
import os.path
from matplotlib import pyplot as plt
n_trial = 5
top_k = 10
batch_size = 4000
# max_step = 5e6
prepath = "../"
exps = ["CartpoleNdRewardt"]
policies = ["GATRDInterStep1.0Fmax", "PSMCTSInterStep1.0Ec1.414K0.5A0.5Qmax",
"PSMCTSTRInterStep1.0Ec1.414K0.5A0.5Qmax", "PSMCTSTRCInterStep1.0Ec1.414K0.5A0.5Qmax"]
plot_name = "GATRD_PSMCTS"
# colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k', 'w']
colors = []
for i in range(len(policies)):
colors.append('C' + str(i))
for exp in exps:
plts = []
legends = []
fig = plt.figure(figsize=(10, 10))
for (policy_index, policy) in enumerate(policies):
print(policy)
for trial in range(n_trial):
print(trial)
steps = []
rewards = []
file_path = prepath + exp + '/Data/AST/Lexington/' + policy + '/' + str(trial) + '/process.csv'
if os.path.exists(file_path):
with open(file_path) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for (i, row) in enumerate(csv_reader):
if i == 0:
entry_dict = {}
for index in range(len(row)):
entry_dict[row[index]] = index
else:
# if int(row[entry_dict["StepNum"]]) > max_step:
# break
if int(row[entry_dict["StepNum"]]) % batch_size == 0:
steps.append(int(row[entry_dict["StepNum"]]))
rewards.append(max(0.0, float(row[entry_dict["reward 0"]])))
plot, = plt.plot(steps, rewards, color=colors[policy_index])
plts.append(plot)
legends.append(policy + ' ' + str(trial))
plt.legend(plts, legends)
plt.xlabel('Step number')
plt.ylabel('Best reward')
fig.savefig(prepath + exp + '/Data/Plot/top1/' + plot_name + '_top1.pdf')
plt.close(fig)
|
PypiClean
|
/poetry-codeartifact-login-0.2.1.tar.gz/poetry-codeartifact-login-0.2.1/README.md
|
# Poetry AWS CodeArtifact Login
A Poetry plugin for authenticating with AWS CodeArtifact.
## Requirements
- `poetry >= 1.2.0`
Install using the dedicated installation script. See [here](https://python-poetry.org/docs/#installation).
- `AWS CLI v2`
See [here](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html) for installation guide.
## Intallation
```
poetry self add poetry-codeartifact-login
```
## Usage
AWS credentials will need to be configured on the system prior to usage. Typically this is done using the `aws configure` command and/or directly modifying the configuration files. See [here](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html) for more info. They can also be set through [environment variables](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-envvars.html), which will take precedence over any configuration file values.
Once credentials have been configured, you can log in to CodeArtifact:
```
poetry aws-login <source_name>
```
Assuming the credentials are configured properly and the identity they belong to has proper permissions, `poetry` will be configured with a short-lived authentication token that will automatically be used for installation of any packages in the authenticated source. See [here](https://python-poetry.org/docs/repositories/#private-repository-example) for more information on working with private repositories through `poetry`.
If want to log in with a profile other than the default, you can do:
```
poetry aws-login <source_name> --profile <profile_name>
```
## CLI Reference
```
poetry aws-login --help
```
|
PypiClean
|
/Transcrypt-3.7.16.tar.gz/Transcrypt-3.7.16/transcrypt/demos/parcel_demo/node_modules/htmlnano/node_modules/postcss-minify-font-values/lib/minify-family.js
|
var stringify = require('postcss-value-parser').stringify;
var uniqs = require('./uniqs')('monospace');
// Note that monospace is missing intentionally from this list; we should not
// remove instances of duplicated monospace keywords, it causes the font to be
// rendered smaller in Chrome.
var keywords = [
'sans-serif',
'serif',
'fantasy',
'cursive'
];
function intersection(haystack, array) {
return array.some(function (v) {
return ~haystack.indexOf(v);
});
};
module.exports = function (nodes, opts) {
var family = [];
var last = null;
var i, max;
nodes.forEach(function (node, index, nodes) {
var value = node.value;
if (node.type === 'string' || node.type === 'function') {
family.push(node);
} else if (node.type === 'word') {
if (!last) {
last = { type: 'word', value: '' };
family.push(last);
}
last.value += node.value;
} else if (node.type === 'space') {
if (last && index !== nodes.length - 1) {
last.value += ' ';
}
} else {
last = null;
}
});
family = family.map(function (node) {
if (node.type === 'string') {
if (
!opts.removeQuotes ||
intersection(node.value, keywords) ||
/[0-9]/.test(node.value.slice(0, 1))
) {
return stringify(node);
}
var escaped = node.value.split(/\s/).map(function (word, index, words) {
var next = words[index + 1];
if (next && /^[^a-z]/i.test(next)) {
return word + '\\';
}
if (!/^[^a-z\d\xa0-\uffff_-]/i.test(word)) {
return word.replace(/([^a-z\d\xa0-\uffff_-])/gi, '\\$1');
}
if (/^[^a-z]/i.test(word) && index < 1) {
return '\\' + word;
}
return word;
}).join(' ');
if (escaped.length < node.value.length + 2) {
return escaped;
}
}
return stringify(node);
});
if (opts.removeAfterKeyword) {
for (i = 0, max = family.length; i < max; i += 1) {
if (~keywords.indexOf(family[i])) {
family = family.slice(0, i + 1);
break;
}
}
}
if (opts.removeDuplicates) {
family = uniqs(family);
}
return [
{
type: 'word',
value: family.join()
}
];
};
|
PypiClean
|
/pchem-0.2.tar.gz/pchem-0.2/apps/kinetics.py
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import plotly.express as px
import plotly.graph_objects as go
import streamlit as st
import io
import base64
import re
from collections import defaultdict
from util import find, write_excel, process_file
import util
def combine_spectra(dataframes, labels, xcol, ycol, tol=1e-6):
x_data = dataframes[0][xcol].values
all_data = [x_data]
col_names = [xcol]
col_names.extend(labels)
for df in dataframes:
x = df[xcol].values
if (len(x) != len(x_data)) or abs(x - x_data).max() > tol:
st.write("X axes are different - Try deselecting `Same x axis?` and Submit again.")
raise ValueError("X axis of each dataset should be the same!")
y = df[ycol].values
all_data.append(y)
# ind, fname = ind_fname
# before_ext = fname.split(".")[0]
# col_names.append(f"{ind}-{before_ext}")
return pd.DataFrame(np.array(all_data).T, columns=col_names)
def limit_x_values(combined_data, x_column, settings):
st.markdown("### Limit x Range")
x_data = combined_data[x_column].values
x_min_val = st.selectbox("Choose minimum x:", x_data, index=0 )
i_min = find(x_min_val, x_data)
x_max_val = st.selectbox("Choose maximum x:", x_data, index=len(x_data)-1 )
i_max = find(x_max_val, x_data)
combined_data = combined_data.iloc[i_min:i_max, :]
settings['x_min'] = x_min_val
settings['x_max'] = x_max_val
return combined_data, settings
def normalize_data(combined_data, x_column, settings):
st.markdown("### Normalization options")
x_data = combined_data[x_column].values
processing_options = ['None', "Normalized", "Relative"]
processing = st.selectbox("Processing?", processing_options)
settings['processing'] = processing
if processing == 'Normalized':
normalize_wavelength = st.selectbox("Normalize data at: ", x_data)
settings['normalization_wavelength'] = normalize_wavelength
else:
settings.pop('normalization_wavelength', 0)
if processing == "Normalized":
norm_ind = find(normalize_wavelength, x_data)
y_data = combined_data.values[:, 1:]
combined_data.values[:, 1:] = y_data / y_data[norm_ind]
if processing == "Relative":
# Should probably be tweaked a bit to be more convenient...
y_data = combined_data.values[:, 1:]
combined_data.values[:, 1:] = y_data / y_data.max(axis=0)
return combined_data, settings
def check_nans(df, col, threshold=0.5):
return df[col].isna().sum() / len(df) > threshold
@st.cache
def sort_files_and_create_data(files, sort_files):
if sort_files:
files = sorted(files, key=lambda x: x.name.split('__')[-1])
else:
files = files
filenames = [(i, f.name) for i, f in enumerate(files)]
data = [process_file(f) for f in files]
return filenames, data
@st.cache
def create_data_dict(filenames, data):
files_dict = defaultdict(lambda : dict(times=[], data=[], number=[], time=[]))
# df_all = pd.DataFrame()
for filename, d in zip(filenames, data):
dataname, number, time = filename[1].split('__')
dataname_short = dataname.strip('_Absorbance')
hr, min_, sec, msec = time.split('-')
msec = msec.split('.')[0]
time = int(hr) * 3600 + int(min_)*60 + int(sec) + int(msec)/1000.0
dict_entry = files_dict[dataname_short]
dict_entry['times'].append(time)
dict_entry['data'].append(d)
dict_entry['number'].append(number)
dict_entry['time'].append(f'{hr}:{min_}:{sec}.{msec}')
return files_dict
def run():
df = None
cols = None
x_column = 'Wavelength (nm)'
y_column = 'Absorbance'
combined_data = None
if 'ever_submitted' not in st.session_state:
st.session_state.ever_submitted = False
settings = {}
st.markdown("""## UV-Vis Kinetics Analysis
This helper will combine multiple UV-Vis files (from Ocean Optics Ocean View .csv export)
plot/normalize the spectra, and output a single Excel file for easy plotting and analysis.
""")
sort_files = st.checkbox("Sort files?", value=True)
files = st.file_uploader("Upload CSV or Excel Files",
accept_multiple_files=True)
if files:
filenames, data = sort_files_and_create_data(files, sort_files)
files_dict = create_data_dict(filenames, data)
st.write("""### Data Summary""")
st.markdown(f"{len(filenames)} data files from {len(files_dict)} experiments.")
st.write("""## Labels
Use the boxes below to change the labels for each kinetics experiment.
""")
labels = [st.text_input(key, value=key) for key in files_dict.keys()]
same_x = False
data_example = list(files_dict.values())[0]['data'][0]
x_data = data_example[x_column].values # Use the first x data to set all the limits.
wavelength_monitor = st.number_input("Monitor wavelength:", min_value=x_data.min(),
max_value=x_data.max(), value=x_data.mean())
wavelength_bandwidth = st.number_input("Bandwidth", min_value=0.5, value=3.0)
# Assuming all have the same x axis data
kinetics_mask = ((x_data > wavelength_monitor-wavelength_bandwidth/2)
* (x_data < wavelength_monitor+wavelength_bandwidth/2))
plot_kinetics = st.checkbox("Plot kinetics data?")
dfs = []
for key, val in files_dict.items():
times = np.array(val['times'])
times = times - times.min() # Times in seconds, relative to start of experiment
data = np.array([np.mean(d[y_column].values[kinetics_mask]) for d in val['data']])
dfs.append(
pd.DataFrame.from_dict({'Time (s)':times, 'A': data, 'name':key,
'wavelength': wavelength_monitor,
'bandwidth': wavelength_bandwidth,
'number': val['number'],
'time': val['time']},
orient='columns')
)
df_kinetics = pd.concat(dfs, ignore_index=True)
df_kinetics['Time (min)'] = df_kinetics['Time (s)'] / 60.0
st.write(df_kinetics)
if plot_kinetics:
scatter = px.line(df_kinetics, x='Time (s)', y='A', color='name',
labels={'A': f'A @ {wavelength_monitor:.1f}±{wavelength_bandwidth/2} nm'})
st.plotly_chart(scatter)
st.markdown("### Output options")
filename = st.text_input("Filename:", value="kinetics-data")
write_excel(df_kinetics, filename)
if __name__ == "__main__":
run()
|
PypiClean
|
/PyCIM-15.15.0.tar.gz/PyCIM-15.15.0/CIM15/IEC61968/PaymentMetering/__init__.py
|
from CIM15.IEC61968.PaymentMetering.VendorShift import VendorShift
from CIM15.IEC61968.PaymentMetering.Transactor import Transactor
from CIM15.IEC61968.PaymentMetering.CashierShift import CashierShift
from CIM15.IEC61968.PaymentMetering.TariffProfile import TariffProfile
from CIM15.IEC61968.PaymentMetering.AccountingUnit import AccountingUnit
from CIM15.IEC61968.PaymentMetering.Transaction import Transaction
from CIM15.IEC61968.PaymentMetering.TimeTariffInterval import TimeTariffInterval
from CIM15.IEC61968.PaymentMetering.Charge import Charge
from CIM15.IEC61968.PaymentMetering.AuxiliaryAgreement import AuxiliaryAgreement
from CIM15.IEC61968.PaymentMetering.Tender import Tender
from CIM15.IEC61968.PaymentMetering.ServiceSupplier import ServiceSupplier
from CIM15.IEC61968.PaymentMetering.MerchantAgreement import MerchantAgreement
from CIM15.IEC61968.PaymentMetering.LineDetail import LineDetail
from CIM15.IEC61968.PaymentMetering.ConsumptionTariffInterval import ConsumptionTariffInterval
from CIM15.IEC61968.PaymentMetering.Vendor import Vendor
from CIM15.IEC61968.PaymentMetering.Cheque import Cheque
from CIM15.IEC61968.PaymentMetering.AccountMovement import AccountMovement
from CIM15.IEC61968.PaymentMetering.Shift import Shift
from CIM15.IEC61968.PaymentMetering.Receipt import Receipt
from CIM15.IEC61968.PaymentMetering.Due import Due
from CIM15.IEC61968.PaymentMetering.BankAccountDetail import BankAccountDetail
from CIM15.IEC61968.PaymentMetering.AuxiliaryAccount import AuxiliaryAccount
from CIM15.IEC61968.PaymentMetering.Cashier import Cashier
from CIM15.IEC61968.PaymentMetering.Card import Card
from CIM15.IEC61968.PaymentMetering.MerchantAccount import MerchantAccount
from CIM15.IEC61968.PaymentMetering.PointOfSale import PointOfSale
nsURI = "http://iec.ch/TC57/2010/CIM-schema-cim15#PaymentMetering"
nsPrefix = "cimPaymentMetering"
class TenderKind(str):
"""Values are: unspecified, cheque, other, cash, card
"""
pass
class ChequeKind(str):
"""Values are: other, postalOrder, bankOrder
"""
pass
class ChargeKind(str):
"""Values are: other, demandCharge, consumptionCharge, auxiliaryCharge, taxCharge
"""
pass
class TransactionKind(str):
"""Values are: other, serviceChargePayment, accountPayment, tokenSalePayment, tokenCancellation, taxChargePayment, tokenExchange, tokenGrant, diversePayment, auxiliaryChargePayment, meterConfigurationToken, tokenFreeIssue, transactionReversal
"""
pass
class SupplierKind(str):
"""Values are: other, retailer, utility
"""
pass
|
PypiClean
|
/petab_select-0.1.10-py3-none-any.whl/petab_select/petab.py
|
from pathlib import Path
from typing import List, Optional
import petab
from petab.C import ESTIMATE, NOMINAL_VALUE
from .constants import PETAB_ESTIMATE_FALSE, TYPE_PARAMETER_DICT, TYPE_PATH
class PetabMixin:
"""Useful things for classes that contain a PEtab problem.
All attributes/methods are prefixed with `petab_`.
"""
def __init__(
self,
petab_yaml: Optional[TYPE_PATH] = None,
petab_problem: Optional[petab.Problem] = None,
parameters_as_lists: bool = False,
):
if petab_yaml is None and petab_problem is None:
raise ValueError(
'Please supply at least one of either the location of the '
'PEtab problem YAML file, or an instance of the PEtab problem.'
)
self.petab_yaml = petab_yaml
if self.petab_yaml is not None:
self.petab_yaml = Path(self.petab_yaml)
self.petab_problem = petab_problem
if self.petab_problem is None:
self.petab_problem = petab.Problem.from_yaml(str(petab_yaml))
self.petab_parameters = {
parameter_id: (
row[NOMINAL_VALUE]
if row[ESTIMATE] == PETAB_ESTIMATE_FALSE
else ESTIMATE
)
for parameter_id, row in self.petab_problem.parameter_df.iterrows()
}
if parameters_as_lists:
self.petab_parameters = {
k: [v] for k, v in self.petab_parameters.items()
}
@property
def petab_parameter_ids_estimated(self) -> List[str]:
return [
parameter_id
for parameter_id, parameter_value in self.petab_parameters.items()
if parameter_value == ESTIMATE
]
@property
def petab_parameter_ids_fixed(self) -> List[str]:
estimated = self.petab_parameter_ids_estimated
return [
parameter_id
for parameter_id in self.petab_parameters
if parameter_id not in estimated
]
@property
def petab_parameters_singular(self) -> TYPE_PARAMETER_DICT:
return {
parameter_id: one(parameter_value)
for parameter_id, parameter_value in self.petab_parameters
}
|
PypiClean
|
/Django_patch-2.2.19-py3-none-any.whl/django/core/management/commands/sqlmigrate.py
|
from django.apps import apps
from django.core.management.base import BaseCommand, CommandError
from django.db import DEFAULT_DB_ALIAS, connections
from django.db.migrations.executor import MigrationExecutor
from django.db.migrations.loader import AmbiguityError
class Command(BaseCommand):
help = "Prints the SQL statements for the named migration."
output_transaction = True
def add_arguments(self, parser):
parser.add_argument('app_label', help='App label of the application containing the migration.')
parser.add_argument('migration_name', help='Migration name to print the SQL for.')
parser.add_argument(
'--database', default=DEFAULT_DB_ALIAS,
help='Nominates a database to create SQL for. Defaults to the "default" database.',
)
parser.add_argument(
'--backwards', action='store_true',
help='Creates SQL to unapply the migration, rather than to apply it',
)
def execute(self, *args, **options):
# sqlmigrate doesn't support coloring its output but we need to force
# no_color=True so that the BEGIN/COMMIT statements added by
# output_transaction don't get colored either.
options['no_color'] = True
return super().execute(*args, **options)
def handle(self, *args, **options):
# Get the database we're operating from
connection = connections[options['database']]
# Load up an executor to get all the migration data
executor = MigrationExecutor(connection)
# Resolve command-line arguments into a migration
app_label, migration_name = options['app_label'], options['migration_name']
# Validate app_label
try:
apps.get_app_config(app_label)
except LookupError as err:
raise CommandError(str(err))
if app_label not in executor.loader.migrated_apps:
raise CommandError("App '%s' does not have migrations" % app_label)
try:
migration = executor.loader.get_migration_by_prefix(app_label, migration_name)
except AmbiguityError:
raise CommandError("More than one migration matches '%s' in app '%s'. Please be more specific." % (
migration_name, app_label))
except KeyError:
raise CommandError("Cannot find a migration matching '%s' from app '%s'. Is it in INSTALLED_APPS?" % (
migration_name, app_label))
targets = [(app_label, migration.name)]
# Show begin/end around output only for atomic migrations
self.output_transaction = migration.atomic
# Make a plan that represents just the requested migrations and show SQL
# for it
plan = [(executor.loader.graph.nodes[targets[0]], options['backwards'])]
sql_statements = executor.collect_sql(plan)
return '\n'.join(sql_statements)
|
PypiClean
|
/buckaroo-0.3.17.tar.gz/buckaroo-0.3.17/docs/source/contributing.rst
|
.. _Contributing:
========================
Contributing to Buckaroo
========================
Buckaroo is actively looking for contributors. All forms of participation are welcome, from bug reports, to suggestions, to code contributions.
Developing in the Jupyter Lab environment
=========================================
The easiest way to develop and contribute to Buckaroo is to add ``Commands``. When I use Buckaroo to clean and explore a new dataset, I firt try to use the built in Buckaroo commands in the UI. When I want to perform a manipulation that doesn't yet exist in Buckaroo, I first drop down to raw pandas/python like I would before Buckaroo... Then I figure out how to expose that functionality as a ``Command``. While working with manatee data, I recognized that a column was probably date times, but a ``to_datetime`` ``Command`` didn't exist. So I wrote one.
.. code-block:: python
pd.to_datetime(df['REPDATE']).head()
#outputs ->
#0 1974-04-03 00:00:00+00:00
#1 1974-06-27 00:00:00+00:00
#Name: REPDATE, dtype: datetime64[ns, UTC]
#pd.to_datetime is the transform I want... so I write it as a Command
#and add it to the w widget with the @w.add_command decorator
@w.add_command
class to_datetime(Command):
command_default = [s('to_datetime'), s('df'), "col"]
command_pattern = [None]
@staticmethod
def transform(df, col):
df[col] = pd.to_datetime(df[col])
return df
@staticmethod
def transform_to_py(df, col):
return " df['%s'] = pd.to_datetime(df['%s'])" % (col, col)
When you use the ``add_command`` decorator, the command is instantly added to the UI of the corresponding widget. Subsequent re-evalutations of the same cell, will replace a ``Command`` in the widget with the same name. This allows you to iteratively develop commands.
Once you have developed a ``Command`` you can either continue to use it internally as with the ``add_command`` decorator or you can open a PR and add it to the builtin commands for Buckaroo `all_transforms.py <https://github.com/paddymul/buckaroo/blob/main/buckaroo/all_transforms.py>`_.
The upside of just using the @add_command decorator is that you don't have to setup a development environment.
Setting up a development environment
====================================
First, you need to fork the project. Then setup your environment:
.. code-block:: bash
# create a new conda environment
conda create -n buckaroo-dev jupyterlab pandas nodejs yarn pip
conda activate buckaroo-dev
pip install build twine
# download buckaroo from your GitHub fork
git clone https://github.com/<your-github-username>/buckaroo.git
# or start by cloning the main repo
git clone https://github.com/paddymul/buckaroo.git
# install JS dependencies and build js assets
cd buckaroo
yarn install
# install Buckaroo in editable mode
python -m pip install -ve .
#in another shell, setup the typescript watcher
conda activate buckaroo-dev
yarn build && yarn watch
#this will build the jupyter lab extension, and recompile on any code changes
#start your jupyter lab server in another shell
conda activate buckaroo-dev
jupyter lab
#work on your jupyter notebook from that lab server
.. note::
Getting typescript updates from the widget into a jupyter lab notebook is a little tricky. The following steps ensure that typescript code changes are picked up.
Loading typescript changes
==========================
I make my changes, confirm that ``yarn watch`` has successfully compiled them. **then** I follow these steps to ensure the new code is loaded
#. Go to the jupyter lab notebook in a browser
#. Click the Kernel Menu > Restart Kernel and Clear all outputs
#. Save the notebook
#. Reload the web browser
#. Execute the relevant cells
It is sometimes helpful to put a console.log in ``js/plugin.ts`` and check that the updated log statement shows up in the browser to make sure you are executing the code you think you are.
|
PypiClean
|
/pytorch-functional-0.7.1.post4.tar.gz/pytorch-functional-0.7.1.post4/pytorch_functional/functions_utility.py
|
from __future__ import annotations
from typing import Callable, Dict, Hashable, List, Tuple
from torch import nn
from . import useful_layers
from .functional_model import SymbolicTensor
def add_module_to_model(module, *args):
assert isinstance(args[0], SymbolicTensor)
return args[0](module, *args[1:])
def _replace_symbolic_with_value(container, extracted, navigation):
"""Search recursively for all occurences of SymbolicTensor and replace them with their value.
At the same time save navigation to know, how to do indexing to get to them.
If navigation for container ends up as [..., [1, 2, 0, "TEST", 5], ...]
this means that to get the element you should index container[1][2][0]["TEST"][5].
"""
if isinstance(container, SymbolicTensor):
navigation.append(navigation[-1].copy())
extracted.append(container)
return container.v
if isinstance(container, List) or isinstance(container, Tuple):
new_list = []
for i, x in enumerate(container):
navigation[-1].append(i)
new_list.append(_replace_symbolic_with_value(x, extracted, navigation))
navigation[-1].pop()
return new_list
if isinstance(container, Dict):
new_dict = {}
for k, v in container.items():
navigation[-1].append(k)
new_dict[k] = _replace_symbolic_with_value(v, extracted, navigation)
navigation[-1].pop()
return new_dict
return container
def add_to_model(func: Callable | nn.Module, *args, **kwds):
"""Register a custom func or module in the computation graph.
This should work will arbitrary functions and modules.
In case of functions it might add a small delay to the call, because it is figuring out
where the arguments should go. If this is unacceptable, please create a nn.Module from your func.
All arguments, including Symbolic Tensors, should be passed after the ``func`` argument.
They can be mixed and matched, even nested in lists, tuples and dictionaries.
Convolution func example::
inputs = Input(shape=(3, 32, 32))
kernel = Input(shape=(16, 3, 3, 3), include_batch=False)
bias = Input(shape=(16,), include_batch=False)
output = add_function(F.conv2d, input=inputs, weight=k, bias=bias, padding=1)
"""
if isinstance(func, nn.Module) and not kwds:
return add_module_to_model(func, *args)
extracted_symbols: List[SymbolicTensor] = []
real_call_args = []
real_call_kwds = {}
navigation: List[List[Hashable]] = [[]]
real_call_args = _replace_symbolic_with_value(args, extracted_symbols, navigation)
real_call_kwds = _replace_symbolic_with_value(kwds, extracted_symbols, navigation)
navigation.pop()
assert len(extracted_symbols) > 0, "No Symbolic Tensors detected in the input!"
assert all((isinstance(symbol, SymbolicTensor) for symbol in extracted_symbols))
assert len(extracted_symbols) == len(navigation)
def wrapper_function(*args):
assert len(args) == len(navigation), f"Expected {len(navigation)} inputs, not {len(args)}!"
for arg, navi in zip(args, navigation):
obj = real_call_kwds if isinstance(navi[0], str) else real_call_args
for idx in navi[:-1]:
obj = obj[idx]
obj[navi[-1]] = arg
return func(*real_call_args, **real_call_kwds)
module = useful_layers.NamedAnyOpLayer(op=wrapper_function, name=f"{func.__name__}({len(navigation)})")
return extracted_symbols[0](module, *extracted_symbols[1:])
|
PypiClean
|
/pymvpa2-2.6.5.tar.gz/pymvpa2-2.6.5/mvpa2/measures/noiseperturbation.py
|
"""Derive sensitivity maps for a metric by selective noise perturbation"""
__docformat__ = 'restructuredtext'
if __debug__:
from mvpa2.base import debug
from mvpa2.support.copy import deepcopy
import numpy as np
from mvpa2.measures.base import FeaturewiseMeasure
from mvpa2.datasets.base import Dataset
class NoisePerturbationSensitivity(FeaturewiseMeasure):
"""Sensitivity based on the effect of noise perturbation on a measure.
This is a `FeaturewiseMeasure` that uses a scalar `Measure`
and selective noise perturbation to compute a sensitivity map.
First the scalar `Measure` computed using the original dataset. Next
the data measure is computed multiple times each with a single feature in
the dataset perturbed by noise. The resulting difference in the
scalar `Measure` is used as the sensitivity for the respective
perturbed feature. Large differences are treated as an indicator of a
feature having great impact on the scalar `Measure`.
Notes
-----
The computed sensitivity map might have positive and negative values!
"""
is_trained = True
"""Indicate that this measure is always trained."""
def __init__(self, datameasure,
noise=np.random.normal):
"""
Parameters
----------
datameasure : `Measure`
Used to quantify the effect of noise perturbation.
noise: Callable
Used to generate noise. The noise generator has to return an 1d array
of n values when called the `size=n` keyword argument. This is the
default interface of the random number generators in NumPy's
`random` module.
"""
# init base classes first
FeaturewiseMeasure.__init__(self)
self.__datameasure = datameasure
self.__noise = noise
def _call(self, dataset):
# first cast to floating point dtype, because noise is most likely
# floating point as well and '+=' on int would not do the right thing
if not np.issubdtype(dataset.samples.dtype, np.float):
ds = dataset.copy(deep=False)
ds.samples = dataset.samples.astype('float32')
dataset = ds
if __debug__:
nfeatures = dataset.nfeatures
# using a list here, to be able to handle output of unknown
# dimensionality
sens_map = []
# compute the datameasure on the original dataset
# this is used as a baseline
orig_measure = self.__datameasure(dataset)
# do for every _single_ feature in the dataset
for feature in xrange(dataset.nfeatures):
if __debug__:
debug('PSA', "Analyzing %i features: %i [%i%%]" \
% (nfeatures,
feature+1,
float(feature+1)/nfeatures*100,), cr=True)
# store current feature to restore it later on
current_feature = dataset.samples[:, feature].copy()
# add noise to current feature
dataset.samples[:, feature] += self.__noise(size=len(dataset))
# compute the datameasure on the perturbed dataset
perturbed_measure = self.__datameasure(dataset)
# restore the current feature
dataset.samples[:, feature] = current_feature
# difference from original datameasure is sensitivity
sens_map.append(perturbed_measure.samples - orig_measure.samples)
if __debug__:
debug('PSA', '')
# turn into an array and get rid of unnecessary axes -- ideally yielding
# 2D array
sens_map = np.array(sens_map).squeeze()
# swap first to axis: we have nfeatures on first but want it as second
# in a dataset
sens_map = np.swapaxes(sens_map, 0, 1)
return Dataset(sens_map)
|
PypiClean
|
/PyGall-0.12.tar.gz/PyGall-0.12/pygall/static/app/js/jquery.iframe-transport.js
|
(function ($) {
'use strict';
// Helper variable to create unique names for the transport iframes:
var counter = 0;
// The iframe transport accepts three additional options:
// options.fileInput: a jQuery collection of file input fields
// options.paramName: the parameter name for the file form data,
// overrides the name property of the file input field(s)
// options.formData: an array of objects with name and value properties,
// equivalent to the return data of .serializeArray(), e.g.:
// [{name: a, value: 1}, {name: b, value: 2}]
$.ajaxTransport('iframe', function (options, originalOptions, jqXHR) {
if (options.type === 'POST' || options.type === 'GET') {
var form,
iframe;
return {
send: function (headers, completeCallback) {
form = $('<form style="display:none;"></form>');
// javascript:false as initial iframe src
// prevents warning popups on HTTPS in IE6.
// IE versions below IE8 cannot set the name property of
// elements that have already been added to the DOM,
// so we set the name along with the iframe HTML markup:
iframe = $(
'<iframe src="javascript:false;" name="iframe-transport-' +
(counter += 1) + '"></iframe>'
).bind('load', function () {
var fileInputClones;
iframe
.unbind('load')
.bind('load', function () {
var response;
// Wrap in a try/catch block to catch exceptions thrown
// when trying to access cross-domain iframe contents:
try {
response = iframe.contents();
// Google Chrome and Firefox do not throw an
// exception when calling iframe.contents() on
// cross-domain requests, so we unify the response:
if (!response.length || !response[0].firstChild) {
throw new Error();
}
} catch (e) {
response = undefined;
}
// The complete callback returns the
// iframe content document as response object:
completeCallback(
200,
'success',
{'iframe': response}
);
// Fix for IE endless progress bar activity bug
// (happens on form submits to iframe targets):
$('<iframe src="javascript:false;"></iframe>')
.appendTo(form);
form.remove();
});
form
.prop('target', iframe.prop('name'))
.prop('action', options.url)
.prop('method', options.type);
if (options.formData) {
$.each(options.formData, function (index, field) {
$('<input type="hidden"/>')
.prop('name', field.name)
.val(field.value)
.appendTo(form);
});
}
if (options.fileInput && options.fileInput.length &&
options.type === 'POST') {
fileInputClones = options.fileInput.clone();
// Insert a clone for each file input field:
options.fileInput.after(function (index) {
return fileInputClones[index];
});
if (options.paramName) {
options.fileInput.each(function () {
$(this).prop('name', options.paramName);
});
}
// Appending the file input fields to the hidden form
// removes them from their original location:
form
.append(options.fileInput)
.prop('enctype', 'multipart/form-data')
// enctype must be set as encoding for IE:
.prop('encoding', 'multipart/form-data');
}
form.submit();
// Insert the file input fields at their original location
// by replacing the clones with the originals:
if (fileInputClones && fileInputClones.length) {
options.fileInput.each(function (index, input) {
var clone = $(fileInputClones[index]);
$(input).prop('name', clone.prop('name'));
clone.replaceWith(input);
});
}
});
form.append(iframe).appendTo('body');
},
abort: function () {
if (iframe) {
// javascript:false as iframe src aborts the request
// and prevents warning popups on HTTPS in IE6.
// concat is used to avoid the "Script URL" JSLint error:
iframe
.unbind('load')
.prop('src', 'javascript'.concat(':false;'));
}
if (form) {
form.remove();
}
}
};
}
});
// The iframe transport returns the iframe content document as response.
// The following adds converters from iframe to text, json, html, and script:
$.ajaxSetup({
converters: {
'iframe text': function (iframe) {
return iframe.text();
},
'iframe json': function (iframe) {
return $.parseJSON(iframe.text());
},
'iframe html': function (iframe) {
return iframe.find('body').html();
},
'iframe script': function (iframe) {
return $.globalEval(iframe.text());
}
}
});
}(jQuery));
|
PypiClean
|
/multi_task_nmt_lawhy-0.1.3-py3-none-any.whl/mnmt/translator/basic_translator.py
|
class BasicTranslator:
def __init__(self, quiet_translate=True):
"""
Args:
quiet_translate: determine whether to print translation or not
"""
self.quiet_translate = quiet_translate
def translate(self, output, trg, trg_field, output_file=None):
"""
Args:
output: [trg_length, batch_size, output_dim], model's output
trg: [trg_length, batch_size], target reference
trg_field: target language field
output_file: save translation results in output_file
"""
raise NotImplementedError
@staticmethod
def matching(pred, ref, trg_field, quiet_translate, output_file=None):
"""
Args:
pred: model's prediction, modified from model's output
ref: target reference, modified from raw target reference {trg}
trg_field: target language field
quiet_translate: determine whether to print translation or not
output_file: save translation results in output_file
"""
tally = 0
for j in range(pred.shape[0]):
pred_j = pred[j, :]
pred_j_toks = []
for t in pred_j:
tok = trg_field.vocab.itos[t]
if tok == '<eos>':
break
else:
pred_j_toks.append(tok)
pred_j = ''.join(pred_j_toks)
ref_j = ref[j, :]
ref_j_toks = []
for t in ref_j:
tok = trg_field.vocab.itos[t]
if tok == '<eos>':
break
else:
ref_j_toks.append(tok)
ref_j = ''.join(ref_j_toks)
if not quiet_translate:
print("Pred: {} | Ref: {}".format(pred_j, ref_j))
if output_file is not None:
output_file.write(pred_j + '\t' + ref_j + '\n') # save output results in file
if pred_j == ref_j:
tally += 1
return tally
|
PypiClean
|
/alipay-sdk-python-pycryptodome-3.3.202.tar.gz/alipay-sdk-python-pycryptodome-3.3.202/alipay/aop/api/domain/AlipayCommerceIotSupplierAssetBatchqueryModel.py
|
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayCommerceIotSupplierAssetBatchqueryModel(object):
def __init__(self):
self._cur_page_num = None
self._keyword = None
self._page_size = None
self._supplier_pid = None
@property
def cur_page_num(self):
return self._cur_page_num
@cur_page_num.setter
def cur_page_num(self, value):
self._cur_page_num = value
@property
def keyword(self):
return self._keyword
@keyword.setter
def keyword(self, value):
self._keyword = value
@property
def page_size(self):
return self._page_size
@page_size.setter
def page_size(self, value):
self._page_size = value
@property
def supplier_pid(self):
return self._supplier_pid
@supplier_pid.setter
def supplier_pid(self, value):
self._supplier_pid = value
def to_alipay_dict(self):
params = dict()
if self.cur_page_num:
if hasattr(self.cur_page_num, 'to_alipay_dict'):
params['cur_page_num'] = self.cur_page_num.to_alipay_dict()
else:
params['cur_page_num'] = self.cur_page_num
if self.keyword:
if hasattr(self.keyword, 'to_alipay_dict'):
params['keyword'] = self.keyword.to_alipay_dict()
else:
params['keyword'] = self.keyword
if self.page_size:
if hasattr(self.page_size, 'to_alipay_dict'):
params['page_size'] = self.page_size.to_alipay_dict()
else:
params['page_size'] = self.page_size
if self.supplier_pid:
if hasattr(self.supplier_pid, 'to_alipay_dict'):
params['supplier_pid'] = self.supplier_pid.to_alipay_dict()
else:
params['supplier_pid'] = self.supplier_pid
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayCommerceIotSupplierAssetBatchqueryModel()
if 'cur_page_num' in d:
o.cur_page_num = d['cur_page_num']
if 'keyword' in d:
o.keyword = d['keyword']
if 'page_size' in d:
o.page_size = d['page_size']
if 'supplier_pid' in d:
o.supplier_pid = d['supplier_pid']
return o
|
PypiClean
|
/h2o_pysparkling_2.4-3.42.0.2.post1.tar.gz/h2o_pysparkling_2.4-3.42.0.2.post1/ai/h2o/sparkling/ml/models/H2ODRFMOJOModel.py
|
from ai.h2o.sparkling.ml.params.H2OMOJOModelParams import H2OTreeBasedSupervisedMOJOModelParams
from pyspark.ml.util import _jvm
from py4j.java_gateway import JavaObject
from ai.h2o.sparkling.Initializer import Initializer
from ai.h2o.sparkling.ml.models.H2OMOJOSettings import H2OMOJOSettings
from ai.h2o.sparkling.ml.params.H2OTypeConverters import H2OTypeConverters
from ai.h2o.sparkling.H2ODataFrameConverters import H2ODataFrameConverters
from ai.h2o.sparkling.ml.params.HasIgnoredColsOnMOJO import HasIgnoredColsOnMOJO
class H2ODRFMOJOModel(H2OTreeBasedSupervisedMOJOModelParams, HasIgnoredColsOnMOJO):
@staticmethod
def createFromMojo(pathToMojo, settings=H2OMOJOSettings.default()):
# We need to make sure that Sparkling Water classes are available on the Spark driver and executor paths
Initializer.load_sparkling_jar()
javaModel = _jvm().ai.h2o.sparkling.ml.models.H2ODRFMOJOModel.createFromMojo(pathToMojo, settings.toJavaObject())
return H2ODRFMOJOModel(javaModel)
def getCrossValidationModels(self):
cvModels = self._java_obj.getCrossValidationModelsAsArray()
if cvModels is None:
return None
elif isinstance(cvModels, JavaObject):
return [H2ODRFMOJOModel(v) for v in cvModels]
else:
raise TypeError("Invalid type.")
def getMtries(self):
value = self._java_obj.getMtries()
return value
def getBinomialDoubleTrees(self):
value = self._java_obj.getBinomialDoubleTrees()
return value
def getSampleRate(self):
value = self._java_obj.getSampleRate()
return value
def getBalanceClasses(self):
value = self._java_obj.getBalanceClasses()
return value
def getClassSamplingFactors(self):
value = self._java_obj.getClassSamplingFactors()
return H2OTypeConverters.scalaArrayToPythonArray(value)
def getMaxAfterBalanceSize(self):
value = self._java_obj.getMaxAfterBalanceSize()
return value
def getMaxConfusionMatrixSize(self):
value = self._java_obj.getMaxConfusionMatrixSize()
return value
def getMaxDepth(self):
value = self._java_obj.getMaxDepth()
return value
def getMinRows(self):
value = self._java_obj.getMinRows()
return value
def getNbins(self):
value = self._java_obj.getNbins()
return value
def getNbinsTopLevel(self):
value = self._java_obj.getNbinsTopLevel()
return value
def getNbinsCats(self):
value = self._java_obj.getNbinsCats()
return value
def getSeed(self):
value = self._java_obj.getSeed()
return value
def getBuildTreeOneNode(self):
value = self._java_obj.getBuildTreeOneNode()
return value
def getSampleRatePerClass(self):
value = self._java_obj.getSampleRatePerClass()
return H2OTypeConverters.scalaArrayToPythonArray(value)
def getColSampleRatePerTree(self):
value = self._java_obj.getColSampleRatePerTree()
return value
def getColSampleRateChangePerLevel(self):
value = self._java_obj.getColSampleRateChangePerLevel()
return value
def getScoreTreeInterval(self):
value = self._java_obj.getScoreTreeInterval()
return value
def getMinSplitImprovement(self):
value = self._java_obj.getMinSplitImprovement()
return value
def getHistogramType(self):
value = self._java_obj.getHistogramType()
return value
def getCalibrateModel(self):
value = self._java_obj.getCalibrateModel()
return value
def getCalibrationMethod(self):
value = self._java_obj.getCalibrationMethod()
return value
def getCheckConstantResponse(self):
value = self._java_obj.getCheckConstantResponse()
return value
def getNfolds(self):
value = self._java_obj.getNfolds()
return value
def getKeepCrossValidationModels(self):
value = self._java_obj.getKeepCrossValidationModels()
return value
def getKeepCrossValidationPredictions(self):
value = self._java_obj.getKeepCrossValidationPredictions()
return value
def getKeepCrossValidationFoldAssignment(self):
value = self._java_obj.getKeepCrossValidationFoldAssignment()
return value
def getDistribution(self):
value = self._java_obj.getDistribution()
return value
def getLabelCol(self):
value = self._java_obj.getLabelCol()
return value
def getWeightCol(self):
value = self._java_obj.getWeightCol()
return value
def getFoldCol(self):
value = self._java_obj.getFoldCol()
return value
def getFoldAssignment(self):
value = self._java_obj.getFoldAssignment()
return value
def getCategoricalEncoding(self):
value = self._java_obj.getCategoricalEncoding()
return value
def getIgnoreConstCols(self):
value = self._java_obj.getIgnoreConstCols()
return value
def getScoreEachIteration(self):
value = self._java_obj.getScoreEachIteration()
return value
def getStoppingRounds(self):
value = self._java_obj.getStoppingRounds()
return value
def getMaxRuntimeSecs(self):
value = self._java_obj.getMaxRuntimeSecs()
return value
def getStoppingMetric(self):
value = self._java_obj.getStoppingMetric()
return value
def getStoppingTolerance(self):
value = self._java_obj.getStoppingTolerance()
return value
def getGainsliftBins(self):
value = self._java_obj.getGainsliftBins()
return value
def getCustomMetricFunc(self):
value = self._java_obj.getCustomMetricFunc()
return value
def getExportCheckpointsDir(self):
value = self._java_obj.getExportCheckpointsDir()
return value
def getAucType(self):
value = self._java_obj.getAucType()
return value
# Outputs
|
PypiClean
|
/jd/security/tde_client/http_report_client.py
|
import socket
import platform
from threading import RLock
import os
from seven_jd.jd.api.base import RestApi
from jd import appinfo
import random
import time
import json
ALPHABET = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz=+-*/_|<>^~@?%&"
class HttpReportClient(object):
def __init__(self, tde_client, server_url, access_token, app_key, app_secret):
self.reports = dict()
self.host_info = HttpReportClient.__get_host_info()
self.parent = tde_client
self.server_url = server_url
self.access_token = access_token
self.app_key = app_key
self.app_secret = app_secret
self.env_label = platform.system() + "|" + platform.version() + "|" + platform.python_version()
self.type = {"init": 1, "exception": 2, "statistic": 3, "event": 4}
self.level = {"info": 1, "warn": 2, "error": 3, "severe": 4}
self.__lock = RLock()
self.__add_cup_info()
# statistic record present in long array
# index: enccnt(0) deccnt(1) encerrcnt(2) decerrcnt(3)
self.statistic = [0, 0, 0, 0]
@staticmethod
def __get_host_info():
name = socket.getfqdn(socket.gethostname())
addr = socket.gethostbyname(name)
if addr is not None:
return addr
return "Unknown host"
def __add_cup_info(self):
lower_env_label = self.env_label.lower()
cpu_info = 'Unknown'
if lower_env_label.find('linux') != -1:
cpu_info = os.popen('cat /proc/cpuinfo | grep "model name" | uniq').read().split(':')[1].rstrip(
'\n').strip()
elif lower_env_label.find('mac') != -1:
cpu_info = os.popen('sysctl -n machdep.cpu.brand_string').read().rstrip('\n').strip()
elif lower_env_label.find('windows') != -1:
cmd_result = os.popen('wmic cpu get name')
cpu_info = cmd_result.read().replace('Name', '').replace('\n', '', -1).strip()
cmd_result.read()
self.env_label = self.env_label + '|' + cpu_info
def flush(self):
self.insert_statistic_report()
self.send_all_reports()
def send_all_reports(self):
with self.__lock:
for key in self.reports.keys():
val = self.reports[key]
request = JosSecretApiReportRequest(self.server_url, 80)
request.businessId = val['businessId']
request.text = val['text']
request.attribute = json.dumps(val['attributes'])
request.set_app_info(appinfo(self.app_key, self.app_secret))
res = request.getResponse(self.access_token)
if res is not None and res.get('serviceCode') == 0:
del self.reports[key]
def insert_init_report(self):
with self.__lock:
init_msg = {
'businessId': ''.join(random.sample(ALPHABET, 40)),
'text': 'INIT',
'attributes': {
'type': self.type['init'],
'host': self.host_info,
'level': self.level['info'],
'service': self.parent.token['service'],
'sdk_ver': self.parent.sdk_version,
'env': self.env_label,
'ts': round(time.time() * 1000)
}
}
self.reports[self.type['init']] = init_msg
def insert_statistic_report(self):
with self.__lock:
statistic_msg = {
'businessId': ''.join(random.sample(ALPHABET, 40)),
'text': 'STATISTIC',
'attributes': {
'enccnt': str(self.statistic[0]),
'deccnt': str(self.statistic[1]),
'encerrcnt': str(self.statistic[2]),
'decerrcnt': str(self.statistic[3]),
'type': self.type['statistic'],
'host': self.host_info,
'level': self.level['info'],
'service': self.parent.token['service'],
'sdk_ver': self.parent.sdk_version,
'env': self.env_label,
'ts': round(time.time() * 1000)
}
}
self.reports[self.type['statistic']] = statistic_msg
self.statistic = [0, 0, 0, 0]
def insert_event_report(self, event_code, event_detail):
with self.__lock:
event_msg = {
'businessId': ''.join(random.sample(ALPHABET, 40)),
'text': 'EVENT',
'attributes': {
'code': event_code,
'event': event_detail,
'type': self.type['event'],
'host': self.host_info,
'level': self.level['info'],
'service': self.parent.token['service'],
'sdk_ver': self.parent.sdk_version,
'env': self.env_label,
'ts': round(time.time() * 1000)
}
}
request = JosSecretApiReportRequest(self.server_url, 80)
request.businessId = event_msg['businessId']
request.text = event_msg['text']
request.set_app_info(appinfo(self.app_key, self.app_secret))
request.getResponse(self.access_token)
def insert_key_update_event_report(self, event_code, event_detail, major_key_ver, key_list):
with self.__lock:
key_update_event_msg = {
'businessId': ''.join(random.sample(ALPHABET, 40)),
'text': 'EVENT',
'attributes': {
'cur_key': major_key_ver,
'keylist': key_list,
'type': self.type['event'],
'host': self.host_info,
'level': self.level['info'],
'service': self.parent.token['service'],
'sdk_ver': self.parent.sdk_version,
'env': self.env_label,
'ts': round(time.time() * 1000)
}
}
request = JosSecretApiReportRequest(self.server_url, 80)
request.businessId = key_update_event_msg['businessId']
request.text = key_update_event_msg['text']
request.attribute = json.dumps(key_update_event_msg['attributes'])
request.set_app_info(appinfo(self.app_key, self.app_secret))
request.getResponse(self.access_token)
def insert_err_report(self, code, detail, stack_trace, level):
with self.__lock:
err_msg = {
'businessId': ''.join(random.sample(ALPHABET, 40)),
'text': 'EXCEPTION',
'attributes': {
'type': self.type['exception'],
'host': self.host_info,
'level': level,
'service': self.parent.token['service'],
'sdk_ver': self.parent.sdk_version,
'env': self.env_label,
'ts': round(time.time() * 1000),
'code': code,
'msg': detail,
'heap': stack_trace
}
}
if level == self.level['error'] or level == self.level['severe']:
request = JosSecretApiReportRequest(self.server_url, 80)
request.businessId = err_msg['businessId']
request.text = err_msg['text']
request.attribute = json.dumps(err_msg['attributes'])
request.set_app_info(appinfo(self.app_key, self.app_secret))
request.getResponse(self.access_token)
else:
self.reports[code] = err_msg
class JosSecretApiReportRequest(RestApi):
def __init__(self, domain, port=80):
RestApi.__init__(self, domain, port)
self.serverUrl = None
self.businessId = None
self.text = None
self.attribute = None
def process_with_url_before_request(self, url):
self.serverUrl = url
def getapiname(self):
return 'jingdong.jos.secret.api.report.get'
|
PypiClean
|
/authok-python-1.22.1.tar.gz/authok-python-1.22.1/authok/v3/management/rules.py
|
from .rest import RestClient
class Rules(object):
"""Rules endpoint implementation.
Args:
domain (str): Your AuthOK domain, e.g: 'username.cn.authok.cn'
token (str): Management API v1 Token
telemetry (bool, optional): Enable or disable Telemetry
(defaults to True)
timeout (float or tuple, optional): Change the requests
connect and read timeout. Pass a tuple to specify
both values separately or a float to set both to it.
(defaults to 5.0 for both)
rest_options (RestClientOptions): Pass an instance of
RestClientOptions to configure additional RestClient
options, such as rate-limit retries.
(defaults to None)
"""
def __init__(self, domain, token, telemetry=True, timeout=5.0, protocol="https", rest_options=None):
self.domain = domain
self.protocol = protocol
self.client = RestClient(jwt=token, telemetry=telemetry, timeout=timeout, options=rest_options)
def _url(self, id=None):
url = '{}://{}/api/v1/rules'.format(self.protocol, self.domain)
if id is not None:
return '{}/{}'.format(url, id)
return url
def all(self, stage='login_success', enabled=True, fields=None,
include_fields=True, page=None, per_page=None, include_totals=False):
"""Retrieves a list of all rules.
Args:
stage (str, optional): Retrieves rules that match the execution stage.
Defaults to login_success.
enabled (bool, optional): If provided, retrieves rules that match
the value, otherwise all rules are retrieved.
fields (list, optional): A list of fields to include or exclude
(depending on include_fields) from the result. Leave empty to
retrieve all fields.
include_fields (bool, optional): True if the fields specified are
to be included in the result, False otherwise. Defaults to True.
page (int, optional): The result's page number (zero based). When not set,
the default value is up to the server.
per_page (int, optional): The amount of entries per page. When not set,
the default value is up to the server.
include_totals (bool, optional): True if the query summary is
to be included in the result, False otherwise. Defaults to False.
See: https://docs.authok.cn/api/management/v1#!/Rules/get_rules
"""
params = {
'stage': stage,
'fields': fields and ','.join(fields) or None,
'include_fields': str(include_fields).lower(),
'page': page,
'per_page': per_page,
'include_totals': str(include_totals).lower()
}
# since the default is True, this is here to disable the filter
if enabled is not None:
params['enabled'] = str(enabled).lower()
return self.client.get(self._url(), params=params)
def create(self, body):
"""Creates a new rule.
Args:
body (dict): Attributes for the newly created rule.
See: https://docs.authok.cn/api/v1#!/Rules/post_rules
"""
return self.client.post(self._url(), data=body)
def get(self, id, fields=None, include_fields=True):
"""Retrieves a rule by its ID.
Args:
id (str): The id of the rule to retrieve.
fields (list, optional): A list of fields to include or exclude
(depending on include_fields) from the result. Leave empty to
retrieve all fields.
include_fields (bool, optional): True if the fields specified are
to be included in the result, False otherwise. Defaults to True.
See: https://docs.authok.cn/api/management/v1#!/Rules/get_rules_by_id
"""
params = {'fields': fields and ','.join(fields) or None,
'include_fields': str(include_fields).lower()}
return self.client.get(self._url(id), params=params)
def delete(self, id):
"""Delete a rule.
Args:
id (str): The id of the rule to delete.
See: https://docs.authok.cn/api/management/v1#!/Rules/delete_rules_by_id
"""
return self.client.delete(self._url(id))
def update(self, id, body):
"""Update an existing rule
Args:
id (str): The id of the rule to modify.
body (dict): Attributes to modify.
See: https://docs.authok.cn/api/v1#!/Rules/patch_rules_by_id
"""
return self.client.patch(self._url(id), data=body)
|
PypiClean
|
/sherpa_client-0.12.7-py3-none-any.whl/sherpa_client/models/delete_many_response.py
|
from typing import TYPE_CHECKING, Any, Dict, List, Type, TypeVar, Union
import attr
from ..types import UNSET, Unset
if TYPE_CHECKING:
from ..models.item_count import ItemCount
from ..models.sherpa_job_bean import SherpaJobBean
T = TypeVar("T", bound="DeleteManyResponse")
@attr.s(auto_attribs=True)
class DeleteManyResponse:
"""
Attributes:
removed (int):
details (Union[Unset, List['ItemCount']]):
job (Union[Unset, SherpaJobBean]):
"""
removed: int
details: Union[Unset, List["ItemCount"]] = UNSET
job: Union[Unset, "SherpaJobBean"] = UNSET
def to_dict(self) -> Dict[str, Any]:
removed = self.removed
details: Union[Unset, List[Dict[str, Any]]] = UNSET
if not isinstance(self.details, Unset):
details = []
for details_item_data in self.details:
details_item = details_item_data.to_dict()
details.append(details_item)
job: Union[Unset, Dict[str, Any]] = UNSET
if not isinstance(self.job, Unset):
job = self.job.to_dict()
field_dict: Dict[str, Any] = {}
field_dict.update(
{
"removed": removed,
}
)
if details is not UNSET:
field_dict["details"] = details
if job is not UNSET:
field_dict["job"] = job
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
from ..models.item_count import ItemCount
from ..models.sherpa_job_bean import SherpaJobBean
d = src_dict.copy()
removed = d.pop("removed")
details = []
_details = d.pop("details", UNSET)
for details_item_data in _details or []:
details_item = ItemCount.from_dict(details_item_data)
details.append(details_item)
_job = d.pop("job", UNSET)
job: Union[Unset, SherpaJobBean]
if isinstance(_job, Unset):
job = UNSET
else:
job = SherpaJobBean.from_dict(_job)
delete_many_response = cls(
removed=removed,
details=details,
job=job,
)
return delete_many_response
|
PypiClean
|
/clime_issue_density-0.3.1-py3-none-any.whl/clime_issue_density/args.py
|
from argparse import ArgumentParser, Namespace
name: str = "CLIME"
authors: list = [
"Nicholas M. Synovic",
"Matthew Hyatt",
"Sohini Thota",
"George K. Thiruvathukal",
]
def mainArgs() -> Namespace:
parser: ArgumentParser = ArgumentParser(
prog=f"{name} Issue Density",
description="A tool to calculate the issue density of a repository",
epilog=f"Author(s): {', '.join(authors)}",
)
parser.add_argument(
"-c",
"--commits",
help="Commits JSON file. DEFAULT: ./commits_loc.json",
default="commits_loc.json",
required=False,
type=str,
)
parser.add_argument(
"-i",
"--issues",
help="Issues JSON file. DEFAULT: ./github_issues.json",
default="github_issues.json",
required=False,
type=str,
)
parser.add_argument(
"-o",
"--output",
help="Output JSON file. DEFAULT: ./issue_density.json",
default="issue_density.json",
required=False,
type=str,
)
parser.add_argument(
"-v",
"--version",
help="Display version of the tool",
action="store_true",
default=False,
)
return parser.parse_args()
def graphArgs() -> Namespace:
parser: ArgumentParser = ArgumentParser(
prog=f"{name} Issue Density Grapher",
description="A tool to graph the issue density of a repository",
epilog=f"Author(s): {', '.join(authors)}",
)
parser.add_argument(
"-i",
"--input",
help=f"JSON export from {name} GitHub Issue Density Compute. DEFAULT: ./issue_density.json",
type=str,
required=False,
default="issue_density.json",
)
parser.add_argument(
"-o",
"--output",
help="Filename of the graph. DEFAULT: ./issue_density.pdf",
type=str,
required=False,
default="issue_density.pdf",
)
parser.add_argument(
"--type",
help="Type of figure to plot. DEFAULT: line",
type=str,
required=False,
default="line",
)
parser.add_argument(
"--title",
help='Title of the figure. DEFAULT: ""',
type=str,
required=False,
default="",
)
parser.add_argument(
"--x-label",
help='X axis label of the figure. DEFAULT: ""',
type=str,
required=False,
default="",
)
parser.add_argument(
"--y-label",
help='Y axis label of the figure. DEFAULT: ""',
type=str,
required=False,
default="",
)
parser.add_argument(
"--stylesheet",
help='Filepath of matplotlib stylesheet to use. DEFAULT: ""',
type=str,
required=False,
default="",
)
parser.add_argument(
"-v",
"--version",
help="Display version of the tool",
action="store_true",
default=False,
)
return parser.parse_args()
|
PypiClean
|
/tensorflow_tflex-1.13.1rc1-cp27-cp27mu-manylinux1_x86_64.whl/tensorflow_tflex-1.13.1rc1.data/purelib/tensorflow/python/ops/linalg/linear_operator_algebra.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
from tensorflow.python.framework import ops
from tensorflow.python.util import tf_inspect
_CHOLESKY_DECOMPS = {}
_MATMUL = {}
def _registered_function(type_list, registry):
"""Given a list of classes, finds the most specific function registered."""
enumerated_hierarchies = [enumerate(tf_inspect.getmro(t)) for t in type_list]
# Get all possible combinations of hierarchies.
cls_combinations = list(itertools.product(*enumerated_hierarchies))
def hierarchy_distance(cls_combination):
candidate_distance = sum(c[0] for c in cls_combination)
if tuple(c[1] for c in cls_combination) in registry:
return candidate_distance
return 10000
registered_combination = min(cls_combinations, key=hierarchy_distance)
return registry.get(tuple(r[1] for r in registered_combination), None)
def _registered_cholesky(type_a):
"""Get the Cholesky function registered for class a."""
return _registered_function([type_a], _CHOLESKY_DECOMPS)
def _registered_matmul(type_a, type_b):
"""Get the Matmul function registered for classes a and b."""
return _registered_function([type_a, type_b], _MATMUL)
def cholesky(lin_op_a, name=None):
"""Get the Cholesky factor associated to lin_op_a.
Args:
lin_op_a: The LinearOperator to decompose.
name: Name to use for this operation.
Returns:
A LinearOperator that represents the lower Cholesky factor of `lin_op_a`.
Raises:
NotImplementedError: If no Cholesky method is defined for the LinearOperator
type of `lin_op_a`.
"""
cholesky_fn = _registered_cholesky(type(lin_op_a))
if cholesky_fn is None:
raise ValueError("No cholesky decomposition registered for {}".format(
type(lin_op_a)))
with ops.name_scope(name, "Cholesky"):
return cholesky_fn(lin_op_a)
def matmul(lin_op_a, lin_op_b, name=None):
"""Compute lin_op_a.matmul(lin_op_b).
Args:
lin_op_a: The LinearOperator on the left.
lin_op_b: The LinearOperator on the right.
name: Name to use for this operation.
Returns:
A LinearOperator that represents the matmul between `lin_op_a` and
`lin_op_b`.
Raises:
NotImplementedError: If no matmul method is defined between types of
`lin_op_a` and `lin_op_b`.
"""
matmul_fn = _registered_matmul(type(lin_op_a), type(lin_op_b))
if matmul_fn is None:
raise ValueError("No matmul registered for {}.matmul({})".format(
type(lin_op_a), type(lin_op_b)))
with ops.name_scope(name, "Matmul"):
return matmul_fn(lin_op_a, lin_op_b)
class RegisterCholesky(object):
"""Decorator to register a Cholesky implementation function.
Usage:
@linear_operator_algebra.RegisterCholesky(lin_op.LinearOperatorIdentity)
def _cholesky_identity(lin_op_a):
# Return the identity matrix.
"""
def __init__(self, lin_op_cls_a):
"""Initialize the LinearOperator registrar.
Args:
lin_op_cls_a: the class of the LinearOperator to decompose.
"""
self._key = (lin_op_cls_a,)
def __call__(self, cholesky_fn):
"""Perform the Cholesky registration.
Args:
cholesky_fn: The function to use for the Cholesky.
Returns:
cholesky_fn
Raises:
TypeError: if cholesky_fn is not a callable.
ValueError: if a Cholesky function has already been registered for
the given argument classes.
"""
if not callable(cholesky_fn):
raise TypeError(
"cholesky_fn must be callable, received: {}".format(cholesky_fn))
if self._key in _CHOLESKY_DECOMPS:
raise ValueError("Cholesky({}) has already been registered to: {}".format(
self._key[0].__name__, _CHOLESKY_DECOMPS[self._key]))
_CHOLESKY_DECOMPS[self._key] = cholesky_fn
return cholesky_fn
class RegisterMatmul(object):
"""Decorator to register a Matmul implementation function.
Usage:
@linear_operator_algebra.RegisterMatmul(
lin_op.LinearOperatorIdentity,
lin_op.LinearOperatorIdentity)
def _matmul_identity(a, b):
# Return the identity matrix.
"""
def __init__(self, lin_op_cls_a, lin_op_cls_b):
"""Initialize the LinearOperator registrar.
Args:
lin_op_cls_a: the class of the LinearOperator to multiply.
lin_op_cls_b: the class of the second LinearOperator to multiply.
"""
self._key = (lin_op_cls_a, lin_op_cls_b)
def __call__(self, matmul_fn):
"""Perform the Matmul registration.
Args:
matmul_fn: The function to use for the Matmul.
Returns:
matmul_fn
Raises:
TypeError: if matmul_fn is not a callable.
ValueError: if a Matmul function has already been registered for
the given argument classes.
"""
if not callable(matmul_fn):
raise TypeError(
"matmul_fn must be callable, received: {}".format(matmul_fn))
if self._key in _MATMUL:
raise ValueError("Matmul({}, {}) has already been registered.".format(
self._key[0].__name__,
self._key[1].__name__))
_MATMUL[self._key] = matmul_fn
return matmul_fn
|
PypiClean
|
/py-pure-client-1.38.0.tar.gz/py-pure-client-1.38.0/pypureclient/flashblade/FB_2_10/models/__init__.py
|
from __future__ import absolute_import
class ReferenceType(object):
"""Class just for type annotations.
It's used for reference arg on api function. This allows user to pass collections of Model objects
to the method without transforming them to ids or names.
Should be Protocol type when the typing module will get support of it.
"""
def __init__(self):
self.id = ''
self.name = ''
def quoteString(s):
r"""Quote string according to
https://wiki.purestorage.com/display/UXReviewers/Filtering
>>> quote("a")
"'a'"
>>> quote("a\\b")
"'a\\\\b'"
>>> quote("a\\b")
"'a\\\\b'"
>>> quote("a'b")
"'a\\'b'"
>>> quote(None)
None
"""
if s is None:
return None
quoted = str(s).replace("\\", "\\\\").replace("'", "\\'")
return "'{}'".format(quoted)
def quoteStrings(s):
if s is None:
return None
return [quoteString(x) for x in s]
# import models into model package
from .active_directory import ActiveDirectory
from .active_directory_get_response import ActiveDirectoryGetResponse
from .active_directory_patch import ActiveDirectoryPatch
from .active_directory_post import ActiveDirectoryPost
from .active_directory_response import ActiveDirectoryResponse
from .admin import Admin
from .admin_api_token import AdminApiToken
from .admin_api_token_get_response import AdminApiTokenGetResponse
from .admin_api_token_response import AdminApiTokenResponse
from .admin_cache import AdminCache
from .admin_cache_get_response import AdminCacheGetResponse
from .admin_get_response import AdminGetResponse
from .admin_patch import AdminPatch
from .admin_response import AdminResponse
from .admin_setting import AdminSetting
from .admin_settings_get_response import AdminSettingsGetResponse
from .admin_settings_response import AdminSettingsResponse
from .alert import Alert
from .alert_get_response import AlertGetResponse
from .alert_response import AlertResponse
from .alert_watcher import AlertWatcher
from .alert_watcher_get_response import AlertWatcherGetResponse
from .alert_watcher_post import AlertWatcherPost
from .alert_watcher_response import AlertWatcherResponse
from .api_client import ApiClient
from .api_clients_post import ApiClientsPost
from .api_clients_response import ApiClientsResponse
from .api_token import ApiToken
from .api_version import ApiVersion
from .array import Array
from .array_connection import ArrayConnection
from .array_connection_get_response import ArrayConnectionGetResponse
from .array_connection_key import ArrayConnectionKey
from .array_connection_key_get_response import ArrayConnectionKeyGetResponse
from .array_connection_key_response import ArrayConnectionKeyResponse
from .array_connection_path import ArrayConnectionPath
from .array_connection_path_get_response import ArrayConnectionPathGetResponse
from .array_connection_post import ArrayConnectionPost
from .array_connection_response import ArrayConnectionResponse
from .array_encryption import ArrayEncryption
from .array_encryption_data_at_rest import ArrayEncryptionDataAtRest
from .array_eradication_config import ArrayEradicationConfig
from .array_factory_reset_token import ArrayFactoryResetToken
from .array_factory_reset_token_get_response import ArrayFactoryResetTokenGetResponse
from .array_factory_reset_token_response import ArrayFactoryResetTokenResponse
from .array_get_response import ArrayGetResponse
from .array_http_specific_performance import ArrayHttpSpecificPerformance
from .array_http_specific_performance_get import ArrayHttpSpecificPerformanceGet
from .array_nfs_specific_performance import ArrayNfsSpecificPerformance
from .array_nfs_specific_performance_get import ArrayNfsSpecificPerformanceGet
from .array_performance import ArrayPerformance
from .array_performance_get_response import ArrayPerformanceGetResponse
from .array_performance_replication_get_resp import ArrayPerformanceReplicationGetResp
from .array_response import ArrayResponse
from .array_s3_specific_performance import ArrayS3SpecificPerformance
from .array_s3_specific_performance_get_resp import ArrayS3SpecificPerformanceGetResp
from .array_space import ArraySpace
from .array_space_get_response import ArraySpaceGetResponse
from .arrays_supported_time_zones_get_response import ArraysSupportedTimeZonesGetResponse
from .audit import Audit
from .audit_get_response import AuditGetResponse
from .audit_response import AuditResponse
from .blade import Blade
from .blade_get_response import BladeGetResponse
from .bucket import Bucket
from .bucket_defaults import BucketDefaults
from .bucket_defaults_readonly import BucketDefaultsReadonly
from .bucket_eradication_config import BucketEradicationConfig
from .bucket_get_response import BucketGetResponse
from .bucket_patch import BucketPatch
from .bucket_performance import BucketPerformance
from .bucket_performance_get_response import BucketPerformanceGetResponse
from .bucket_post import BucketPost
from .bucket_replica_link import BucketReplicaLink
from .bucket_replica_link_get_response import BucketReplicaLinkGetResponse
from .bucket_replica_link_post import BucketReplicaLinkPost
from .bucket_replica_link_response import BucketReplicaLinkResponse
from .bucket_response import BucketResponse
from .bucket_s3_specific_performance import BucketS3SpecificPerformance
from .bucket_s3_specific_performance_get_resp import BucketS3SpecificPerformanceGetResp
from .built_in import BuiltIn
from .built_in_no_id import BuiltInNoId
from .built_in_relationship import BuiltInRelationship
from .certificate import Certificate
from .certificate_certificate_group_get_resp import CertificateCertificateGroupGetResp
from .certificate_certificate_group_response import CertificateCertificateGroupResponse
from .certificate_get_response import CertificateGetResponse
from .certificate_group import CertificateGroup
from .certificate_group_certificate_get_resp import CertificateGroupCertificateGetResp
from .certificate_group_certificate_response import CertificateGroupCertificateResponse
from .certificate_group_get_response import CertificateGroupGetResponse
from .certificate_group_response import CertificateGroupResponse
from .certificate_group_use import CertificateGroupUse
from .certificate_group_use_get_response import CertificateGroupUseGetResponse
from .certificate_patch import CertificatePatch
from .certificate_post import CertificatePost
from .certificate_response import CertificateResponse
from .certificate_use import CertificateUse
from .certificate_use_get_response import CertificateUseGetResponse
from .client_performance import ClientPerformance
from .client_performance_get_response import ClientPerformanceGetResponse
from .continuous_replication_performance import ContinuousReplicationPerformance
from .direction import Direction
from .directory_service import DirectoryService
from .directory_service_get_response import DirectoryServiceGetResponse
from .directory_service_management import DirectoryServiceManagement
from .directory_service_nfs import DirectoryServiceNfs
from .directory_service_response import DirectoryServiceResponse
from .directory_service_role import DirectoryServiceRole
from .directory_service_roles_get_response import DirectoryServiceRolesGetResponse
from .directory_service_roles_response import DirectoryServiceRolesResponse
from .directory_service_smb import DirectoryServiceSmb
from .dns import Dns
from .dns_get_response import DnsGetResponse
from .dns_response import DnsResponse
from .drive import Drive
from .drive_get_response import DriveGetResponse
from .eula import Eula
from .eula_get_response import EulaGetResponse
from .eula_response import EulaResponse
from .eula_signature import EulaSignature
from .file_info import FileInfo
from .file_lock import FileLock
from .file_lock_get_response import FileLockGetResponse
from .file_lock_nlm_reclamation_response import FileLockNlmReclamationResponse
from .file_lock_range import FileLockRange
from .file_lock_response import FileLockResponse
from .file_session import FileSession
from .file_session_get_response import FileSessionGetResponse
from .file_session_response import FileSessionResponse
from .file_system import FileSystem
from .file_system_client import FileSystemClient
from .file_system_clients_get_response import FileSystemClientsGetResponse
from .file_system_clients_response import FileSystemClientsResponse
from .file_system_get_response import FileSystemGetResponse
from .file_system_group_performance import FileSystemGroupPerformance
from .file_system_groups_performance_get_response import FileSystemGroupsPerformanceGetResponse
from .file_system_lock_nlm_reclamation import FileSystemLockNlmReclamation
from .file_system_patch import FileSystemPatch
from .file_system_performance import FileSystemPerformance
from .file_system_performance_get_response import FileSystemPerformanceGetResponse
from .file_system_post import FileSystemPost
from .file_system_replica_link import FileSystemReplicaLink
from .file_system_replica_link_get_response import FileSystemReplicaLinkGetResponse
from .file_system_replica_link_response import FileSystemReplicaLinkResponse
from .file_system_response import FileSystemResponse
from .file_system_snapshot import FileSystemSnapshot
from .file_system_snapshot_get_response import FileSystemSnapshotGetResponse
from .file_system_snapshot_get_transfer_response import FileSystemSnapshotGetTransferResponse
from .file_system_snapshot_post import FileSystemSnapshotPost
from .file_system_snapshot_response import FileSystemSnapshotResponse
from .file_system_snapshot_transfer import FileSystemSnapshotTransfer
from .file_system_snapshot_transfer_response import FileSystemSnapshotTransferResponse
from .file_system_user_performance import FileSystemUserPerformance
from .file_system_users_performance_get_response import FileSystemUsersPerformanceGetResponse
from .fixed_location_reference import FixedLocationReference
from .fixed_reference import FixedReference
from .fixed_reference_name_only import FixedReferenceNameOnly
from .fixed_reference_no_id import FixedReferenceNoId
from .fixed_reference_no_resource_type import FixedReferenceNoResourceType
from .fixed_reference_with_remote import FixedReferenceWithRemote
from .group import Group
from .group_quota import GroupQuota
from .group_quota_get_response import GroupQuotaGetResponse
from .group_quota_patch import GroupQuotaPatch
from .group_quota_post import GroupQuotaPost
from .group_quota_response import GroupQuotaResponse
from .hardware import Hardware
from .hardware_connector import HardwareConnector
from .hardware_connector_get_response import HardwareConnectorGetResponse
from .hardware_connector_performance import HardwareConnectorPerformance
from .hardware_connector_performance_get_response import HardwareConnectorPerformanceGetResponse
from .hardware_connector_response import HardwareConnectorResponse
from .hardware_get_response import HardwareGetResponse
from .hardware_response import HardwareResponse
from .http import Http
from .inline_response400 import InlineResponse400
from .inline_response401 import InlineResponse401
from .keytab import Keytab
from .keytab_file_base64 import KeytabFileBase64
from .keytab_file_binary import KeytabFileBinary
from .keytab_file_response import KeytabFileResponse
from .keytab_get_response import KeytabGetResponse
from .keytab_post import KeytabPost
from .keytab_response import KeytabResponse
from .kmip_server import KmipServer
from .kmip_server_response import KmipServerResponse
from .lifecycle_rule import LifecycleRule
from .lifecycle_rule_config_extension import LifecycleRuleConfigExtension
from .lifecycle_rule_get_response import LifecycleRuleGetResponse
from .lifecycle_rule_patch import LifecycleRulePatch
from .lifecycle_rule_post import LifecycleRulePost
from .lifecycle_rule_response import LifecycleRuleResponse
from .link_aggregation_group import LinkAggregationGroup
from .link_aggregation_group_get_response import LinkAggregationGroupGetResponse
from .link_aggregation_group_response import LinkAggregationGroupResponse
from .linkaggregationgroup import Linkaggregationgroup
from .location_reference import LocationReference
from .login import Login
from .login_banner_get_response import LoginBannerGetResponse
from .logs_async import LogsAsync
from .logs_async_get_response import LogsAsyncGetResponse
from .logs_async_response import LogsAsyncResponse
from .member import Member
from .member_link import MemberLink
from .multi_protocol import MultiProtocol
from .multi_protocol_post import MultiProtocolPost
from .network_interface import NetworkInterface
from .network_interface_get_response import NetworkInterfaceGetResponse
from .network_interface_patch import NetworkInterfacePatch
from .network_interface_ping import NetworkInterfacePing
from .network_interface_ping_get_response import NetworkInterfacePingGetResponse
from .network_interface_ping_response import NetworkInterfacePingResponse
from .network_interface_response import NetworkInterfaceResponse
from .network_interface_trace import NetworkInterfaceTrace
from .network_interface_trace_get_response import NetworkInterfaceTraceGetResponse
from .network_interface_trace_response import NetworkInterfaceTraceResponse
from .nfs import Nfs
from .nfs_export_policy import NfsExportPolicy
from .nfs_export_policy_get_response import NfsExportPolicyGetResponse
from .nfs_export_policy_post import NfsExportPolicyPost
from .nfs_export_policy_response import NfsExportPolicyResponse
from .nfs_export_policy_rule import NfsExportPolicyRule
from .nfs_export_policy_rule_base import NfsExportPolicyRuleBase
from .nfs_export_policy_rule_get_response import NfsExportPolicyRuleGetResponse
from .nfs_export_policy_rule_in_policy import NfsExportPolicyRuleInPolicy
from .nfs_export_policy_rule_response import NfsExportPolicyRuleResponse
from .nfs_patch import NfsPatch
from .oauth_token_response import OauthTokenResponse
from .object_backlog import ObjectBacklog
from .object_lock_config_base import ObjectLockConfigBase
from .object_lock_config_request_body import ObjectLockConfigRequestBody
from .object_lock_config_response import ObjectLockConfigResponse
from .object_store_access_key import ObjectStoreAccessKey
from .object_store_access_key_get_response import ObjectStoreAccessKeyGetResponse
from .object_store_access_key_post import ObjectStoreAccessKeyPost
from .object_store_access_key_response import ObjectStoreAccessKeyResponse
from .object_store_access_policy import ObjectStoreAccessPolicy
from .object_store_access_policy_action import ObjectStoreAccessPolicyAction
from .object_store_access_policy_action_get_response import ObjectStoreAccessPolicyActionGetResponse
from .object_store_access_policy_action_response import ObjectStoreAccessPolicyActionResponse
from .object_store_access_policy_get_response import ObjectStoreAccessPolicyGetResponse
from .object_store_access_policy_patch import ObjectStoreAccessPolicyPatch
from .object_store_access_policy_post import ObjectStoreAccessPolicyPost
from .object_store_access_policy_response import ObjectStoreAccessPolicyResponse
from .object_store_access_policy_rule import ObjectStoreAccessPolicyRule
from .object_store_access_policy_rule_get_response import ObjectStoreAccessPolicyRuleGetResponse
from .object_store_access_policy_rule_response import ObjectStoreAccessPolicyRuleResponse
from .object_store_account import ObjectStoreAccount
from .object_store_account_get_response import ObjectStoreAccountGetResponse
from .object_store_account_patch import ObjectStoreAccountPatch
from .object_store_account_post import ObjectStoreAccountPost
from .object_store_account_response import ObjectStoreAccountResponse
from .object_store_remote_credential_get_resp import ObjectStoreRemoteCredentialGetResp
from .object_store_remote_credentials import ObjectStoreRemoteCredentials
from .object_store_remote_credentials_post import ObjectStoreRemoteCredentialsPost
from .object_store_remote_credentials_resp import ObjectStoreRemoteCredentialsResp
from .object_store_user import ObjectStoreUser
from .object_store_user_get_response import ObjectStoreUserGetResponse
from .object_store_user_response import ObjectStoreUserResponse
from .object_store_virtual_host import ObjectStoreVirtualHost
from .object_store_virtual_host_get_response import ObjectStoreVirtualHostGetResponse
from .object_store_virtual_host_response import ObjectStoreVirtualHostResponse
from .page_info import PageInfo
from .permission import Permission
from .policy import Policy
from .policy_base import PolicyBase
from .policy_base_get_response import PolicyBaseGetResponse
from .policy_base_renameable import PolicyBaseRenameable
from .policy_base_response import PolicyBaseResponse
from .policy_file_system_snapshot import PolicyFileSystemSnapshot
from .policy_file_system_snapshot_get_response import PolicyFileSystemSnapshotGetResponse
from .policy_file_system_snapshot_response import PolicyFileSystemSnapshotResponse
from .policy_get_response import PolicyGetResponse
from .policy_local_member import PolicyLocalMember
from .policy_member import PolicyMember
from .policy_member_get_response import PolicyMemberGetResponse
from .policy_member_response import PolicyMemberResponse
from .policy_member_with_remote import PolicyMemberWithRemote
from .policy_member_with_remote_get_response import PolicyMemberWithRemoteGetResponse
from .policy_member_with_remote_response import PolicyMemberWithRemoteResponse
from .policy_patch import PolicyPatch
from .policy_response import PolicyResponse
from .policy_rule import PolicyRule
from .policy_rule_index import PolicyRuleIndex
from .policy_rule_index_in_policy import PolicyRuleIndexInPolicy
from .policy_rule_object_access import PolicyRuleObjectAccess
from .policy_rule_object_access_bulk_manage import PolicyRuleObjectAccessBulkManage
from .policy_rule_object_access_condition import PolicyRuleObjectAccessCondition
from .policy_rule_object_access_post import PolicyRuleObjectAccessPost
from .quota_setting import QuotaSetting
from .quota_setting_get_response import QuotaSettingGetResponse
from .quota_setting_response import QuotaSettingResponse
from .rapid_data_locking import RapidDataLocking
from .rapid_data_locking_response import RapidDataLockingResponse
from .reference import Reference
from .relationship_performance_replication import RelationshipPerformanceReplication
from .relationship_performance_replication_get_resp import RelationshipPerformanceReplicationGetResp
from .replica_link_built_in import ReplicaLinkBuiltIn
from .replication_performance import ReplicationPerformance
from .resource import Resource
from .resource_performance_replication import ResourcePerformanceReplication
from .resource_performance_replication_get_response import ResourcePerformanceReplicationGetResponse
from .resource_type import ResourceType
from .role import Role
from .role_get_response import RoleGetResponse
from .smtp import SMTP
from .session import Session
from .session_get_response import SessionGetResponse
from .smb import Smb
from .smb_client_policy import SmbClientPolicy
from .smb_client_policy_get_response import SmbClientPolicyGetResponse
from .smb_client_policy_post import SmbClientPolicyPost
from .smb_client_policy_response import SmbClientPolicyResponse
from .smb_client_policy_rule import SmbClientPolicyRule
from .smb_client_policy_rule_base import SmbClientPolicyRuleBase
from .smb_client_policy_rule_get_response import SmbClientPolicyRuleGetResponse
from .smb_client_policy_rule_in_policy import SmbClientPolicyRuleInPolicy
from .smb_client_policy_rule_post import SmbClientPolicyRulePost
from .smb_client_policy_rule_post_base import SmbClientPolicyRulePostBase
from .smb_client_policy_rule_post_in_policy import SmbClientPolicyRulePostInPolicy
from .smb_client_policy_rule_response import SmbClientPolicyRuleResponse
from .smb_post import SmbPost
from .smb_share_policy import SmbSharePolicy
from .smb_share_policy_get_response import SmbSharePolicyGetResponse
from .smb_share_policy_post import SmbSharePolicyPost
from .smb_share_policy_response import SmbSharePolicyResponse
from .smb_share_policy_rule import SmbSharePolicyRule
from .smb_share_policy_rule_get_response import SmbSharePolicyRuleGetResponse
from .smb_share_policy_rule_post import SmbSharePolicyRulePost
from .smb_share_policy_rule_response import SmbSharePolicyRuleResponse
from .smtp_server import SmtpServer
from .smtp_server_get_response import SmtpServerGetResponse
from .smtp_server_response import SmtpServerResponse
from .snmp_agent import SnmpAgent
from .snmp_agent_get_response import SnmpAgentGetResponse
from .snmp_agent_mib import SnmpAgentMib
from .snmp_agent_mib_response import SnmpAgentMibResponse
from .snmp_agent_response import SnmpAgentResponse
from .snmp_manager import SnmpManager
from .snmp_manager_get_response import SnmpManagerGetResponse
from .snmp_manager_post import SnmpManagerPost
from .snmp_manager_response import SnmpManagerResponse
from .snmp_manager_test import SnmpManagerTest
from .snmp_v2c import SnmpV2c
from .snmp_v3 import SnmpV3
from .snmp_v3_post import SnmpV3Post
from .space import Space
from .space_extended import SpaceExtended
from .subnet import Subnet
from .subnet_get_response import SubnetGetResponse
from .subnet_response import SubnetResponse
from .support import Support
from .support_get_response import SupportGetResponse
from .support_remote_assist_paths import SupportRemoteAssistPaths
from .support_response import SupportResponse
from .syslog_server import SyslogServer
from .syslog_server_get_response import SyslogServerGetResponse
from .syslog_server_post_or_patch import SyslogServerPostOrPatch
from .syslog_server_response import SyslogServerResponse
from .syslog_server_settings import SyslogServerSettings
from .syslog_server_settings_get_response import SyslogServerSettingsGetResponse
from .syslog_server_settings_response import SyslogServerSettingsResponse
from .target import Target
from .target_get_response import TargetGetResponse
from .target_post import TargetPost
from .target_response import TargetResponse
from .test_result import TestResult
from .test_result_get_response import TestResultGetResponse
from .test_result_response import TestResultResponse
from .throttle import Throttle
from .time_window import TimeWindow
from .time_zone import TimeZone
from .user import User
from .user_no_id import UserNoId
from .user_quota import UserQuota
from .user_quota_get_response import UserQuotaGetResponse
from .user_quota_patch import UserQuotaPatch
from .user_quota_post import UserQuotaPost
from .user_quota_response import UserQuotaResponse
from .verification_key import VerificationKey
from .verification_key_get_response import VerificationKeyGetResponse
from .verification_key_patch import VerificationKeyPatch
from .verification_key_response import VerificationKeyResponse
from .version import Version
|
PypiClean
|
/edb-deployment-3.15.0.tar.gz/edb-deployment-3.15.0/edbdeploy/commands/vmware.py
|
import argparse
from ..options import *
from .default import default_subcommand_parsers
# VMWare sub-commands and options
def subcommands(subparser):
# List of the sub-commands we want to be available for the vmwarewkstn
# command
available_subcommands = [
'configure', 'provision', 'deploy', 'destroy', 'remove', 'logs', 'list', 'display', 'ssh'
]
# Get sub-commands parsers
subcommand_parsers = default_subcommand_parsers(
subparser, available_subcommands
)
# vmware deploy sub-command options
subcommand_parsers['configure'].add_argument(
'-a', '--reference-architecture',
dest='reference_architecture',
choices=ReferenceArchitectureOption.choices,
default=ReferenceArchitectureOption.default,
metavar='<ref-arch-code>',
help=ReferenceArchitectureOption.help
)
subcommand_parsers['configure'].add_argument(
'-u', '--edb-credentials',
dest='edb_credentials',
required=True,
type=EDBCredentialsType,
metavar='"<username>:<password>"',
help="EDB Packages repository credentials."
).completer = edb_credentials_completer
subcommand_parsers['configure'].add_argument(
'-o', '--os',
dest='operating_system',
choices=VMWareOSOption.choices,
default=VMWareOSOption.default,
metavar='<operating-system>',
help=OSOption.help
)
subcommand_parsers['configure'].add_argument(
'-t', '--pg-type',
dest='postgres_type',
choices=PgTypeOption.choices,
default=PgTypeOption.default,
metavar='<postgres-engine-type>',
help=PgTypeOption.help
)
subcommand_parsers['configure'].add_argument(
'-v', '--pg-version',
dest='postgres_version',
choices=PgVersionOption.choices,
default=PgVersionOption.default,
metavar='<postgres-version>',
help=PgVersionOption.help
)
subcommand_parsers['configure'].add_argument(
'-e', '--efm-version',
dest='efm_version',
choices=EFMVersionOptionVMWare.choices,
default=EFMVersionOptionVMWare.default,
metavar='<efm-version>',
help=EFMVersionOptionVMWare.help
)
subcommand_parsers['configure'].add_argument(
'--use-hostname',
dest='use_hostname',
choices=UseHostnameOption.choices,
default=UseHostnameOption.default,
metavar='<use-hostname>',
help=UseHostnameOption.help
)
subcommand_parsers['configure'].add_argument(
'-k', '--ssh-pub-key',
dest='ssh_pub_key',
type=argparse.FileType('r'),
default=SSHPubKeyOption.default(),
metavar='<ssh-public-key-file>',
help=SSHPubKeyOption.help
)
subcommand_parsers['configure'].add_argument(
'-K', '--ssh-private-key',
dest='ssh_priv_key',
type=argparse.FileType('r'),
default=SSHPrivKeyOption.default(),
metavar='<ssh-private-key-file>',
help=SSHPrivKeyOption.help
)
subcommand_parsers['configure'].add_argument(
'-s', '--spec',
dest='spec_file',
type=argparse.FileType('r'),
metavar='<aws-spec-file>',
help="AWS instances specification file, in JSON."
)
subcommand_parsers['configure'].add_argument(
'-m', '--mem-size',
dest='mem_size',
required=True,
choices=MemSizeOptionsVMWare.choices,
default=MemSizeOptionsVMWare.default,
metavar="<mem-size>",
help="Amount of memory to assign"
)
subcommand_parsers['configure'].add_argument(
'-c', '--cpu-count',
dest='cpu_count',
required=True,
choices=CPUCountOptionsVMWare.choices,
default=CPUCountOptionsVMWare.default,
metavar="<cpu-count>",
help="Number of CPUS to configure"
)
# vmware deploy sub-command options
subcommand_parsers['provision'].add_argument(
'-S', '--skip-main-playbook',
dest='skip_main_playbook',
action='store_true',
help="Skip main playbook of the reference architecture."
)
# vmware deploy sub-command options
subcommand_parsers['deploy'].add_argument(
'-n', '--no-install-collection',
dest='no_install_collection',
action='store_true',
help="Do not install the Ansible collection."
)
subcommand_parsers['deploy'].add_argument(
'-p', '--pre-deploy-ansible',
dest='pre_deploy_ansible',
type=argparse.FileType('r'),
metavar='<pre-deploy-ansible-playbook>',
help="Pre deploy ansible playbook."
)
subcommand_parsers['deploy'].add_argument(
'-P', '--post-deploy-ansible',
dest='post_deploy_ansible',
type=argparse.FileType('r'),
metavar='<post-deploy-ansible-playbook>',
help="Post deploy ansible playbook."
)
subcommand_parsers['deploy'].add_argument(
'-S', '--skip-main-playbook',
dest='skip_main_playbook',
action='store_true',
help="Skip main playbook of the reference architecture."
)
# vmware deploy sub-command options
subcommand_parsers['destroy'].add_argument(
'-S', '--skip-main-playbook',
dest='skip_main_playbook',
action='store_true',
help="Skip main playbook of the reference architecture."
)
# vmware logs sub-command options
subcommand_parsers['logs'].add_argument(
'-t', '--tail',
dest='tail',
action='store_true',
help="Do not stop at the end of file."
)
subcommand_parsers['deploy'].add_argument(
'--disable-pipelining',
dest='disable_pipelining',
action='store_true',
help="Disable Ansible pipelining."
)
subcommand_parsers['ssh'].add_argument(
metavar='<host-name>',
dest='host',
help="Node hostname"
)
|
PypiClean
|
/DXC-RL-1.0.3.5.tar.gz/DXC-RL-1.0.3.5/dxc/ai/visualization/visualization.py
|
from yellowbrick.features import Rank2D #exploring raw data
import matplotlib.pyplot as plt
import missingno as msno #gauge dataset completeness
import seaborn as sns #data exploration, distribution plotting
import pandas as pd
from datacleaner import autoclean
import math
from pandas.api.types import is_numeric_dtype
from pandas_profiling import ProfileReport
#VISUALIZATION
#display the correlations in pairwise comparisons of all features
def explore_features(df):
df_copy = df.copy()
#for some reason, the visualize doesn't accept categorical
#variables. those have to be converted to strings
for (col,data) in df_copy.iteritems():
if df_copy[col].dtype.name == "category":
df_copy[col] = df_copy[col].astype(str)
numeric_df = autoclean(df_copy)
visualizer = Rank2D(algorithm="pearson")
visualizer.fit_transform(numeric_df)
visualizer.poof()
#display a visual representation of missing fields in the given data
def visualize_missing_data(df):
msno.matrix(df, figsize=(15,8))
def explore_complete_data(df, title='Complete Data Report'):
profile = ProfileReport(df, title, html={'style':{'full_width':False}})
return profile
#plot the distribution of values of each field in the given data
def plot_distributions(df):
#set plot style
sns.set(style="darkgrid")
features = len(df.columns)
#determine the number of columns in the plot grid and the width and height of each plot
grid_cols = 3
plot_width = 5
plot_height = 3
#determine the width of the plot grid and number of rows
grid_width = plot_width * grid_cols
num_rows = math.ceil(features/grid_cols)
#determine the width of the plot grid
grid_height = plot_height * num_rows
#lay out the plot grid
fig1 = plt.figure(constrained_layout=True, figsize = (grid_width,grid_height))
gs = fig1.add_gridspec(ncols = grid_cols, nrows = num_rows)
#step through the dataframe and add plots for each feature
current_column = 0
current_row = 0
for col in df.columns:
#set up a plot
f1_ax1 = fig1.add_subplot(gs[current_row, current_column])
f1_ax1.set_title(col)
#create a plot for numeric values
if is_numeric_dtype(df[col]):
sns.distplot(df[col], ax = f1_ax1).set_xlabel('')
#creare a plot for categorical values
if df[col].dtype.name == "category":
sns.countplot(df[col], ax = f1_ax1, order = df[col].value_counts().index).set_xlabel('')
#move to the next column
current_column +=1
#determine if it is time to start a new row
if current_column == grid_cols:
current_column = 0
current_row +=1
|
PypiClean
|
/django-compressor-parceljs-2.3.6.tar.gz/django-compressor-parceljs-2.3.6/compressor/contrib/sekizai.py
|
from compressor.templatetags.compress import CompressorNode
from compressor.exceptions import UncompressableFileError
from compressor.base import Compressor
from compressor.conf import settings
from compressor.utils import get_class
from django.template.base import TextNode
def compress(context, data, name):
"""
Data is the string from the template (the list of js files in this case)
Name is either 'js' or 'css' (the sekizai namespace)
Basically passes the string through the {% compress 'js' %} template tag
"""
# separate compressable from uncompressable files
parser = get_class(settings.COMPRESS_PARSER)(data)
js_compressor, css_compressor = Compressor('js'), Compressor('css')
compressable_elements, expanded_elements, deferred_elements = [], [], []
if name == 'js':
for elem in parser.js_elems():
attribs = parser.elem_attribs(elem)
try:
if 'src' in attribs:
js_compressor.get_basename(attribs['src'])
except UncompressableFileError:
if 'defer' in attribs:
deferred_elements.append(elem)
else:
expanded_elements.append(elem)
else:
compressable_elements.append(elem)
elif name == 'css':
for elem in parser.css_elems():
attribs = parser.elem_attribs(elem)
try:
if parser.elem_name(elem) == 'link' and attribs['rel'].lower() == 'stylesheet':
css_compressor.get_basename(attribs['href'])
except UncompressableFileError:
expanded_elements.append(elem)
else:
compressable_elements.append(elem)
# reconcatenate them
data = ''.join(parser.elem_str(e) for e in expanded_elements)
expanded_node = CompressorNode(nodelist=TextNode(data), kind=name, mode='file')
data = ''.join(parser.elem_str(e) for e in compressable_elements)
compressable_node = CompressorNode(nodelist=TextNode(data), kind=name, mode='file')
data = ''.join(parser.elem_str(e) for e in deferred_elements)
deferred_node = CompressorNode(nodelist=TextNode(data), kind=name, mode='file')
return '\n'.join([
expanded_node.get_original_content(context=context),
compressable_node.render(context=context),
deferred_node.get_original_content(context=context),
])
|
PypiClean
|
/django-arquea-2.4.17.tar.gz/django-arquea-2.4.17/configuracao/static/js/tiny_mce/langs/bg.js
|
tinyMCE.addI18n({bg:{common:{more_colors:"\u041e\u0449\u0435 \u0446\u0432\u0435\u0442\u043e\u0432\u0435",invalid_data:"\u0413\u0440\u0435\u0448\u043a\u0430: \u0412\u044a\u0432\u0435\u0434\u0435\u043d\u0438 \u0441\u0430 \u043d\u0435\u0432\u0430\u043b\u0438\u0434\u043d\u0438 \u0441\u0442\u043e\u0439\u043d\u043e\u0441\u0442\u0438, \u0442\u0435 \u0441\u0430 \u043c\u0430\u0440\u043a\u0438\u0440\u0430\u043d\u0438 \u0432 \u0447\u0435\u0440\u0432\u0435\u043d\u043e.",popup_blocked:"\u0421\u044a\u0436\u0430\u043b\u044f\u0432\u0430\u043c\u0435, \u043d\u043e \u0437\u0430\u0431\u0435\u043b\u044f\u0437\u0430\u0445\u043c\u0435, \u0447\u0435 \u0432\u0430\u0448\u0438\u044f\u0442 popup-blocker \u0435 \u0441\u043f\u0440\u044f\u043b \u043f\u0440\u043e\u0437\u043e\u0440\u0435\u0446 \u043a\u043e\u0439\u0442\u043e \u0441\u0435 \u0438\u0437\u043f\u043e\u043b\u0437\u0432\u0430 \u043e\u0442 \u043f\u0440\u043e\u0433\u0440\u0430\u043c\u0430\u0442\u0430. \u0429\u0435 \u0442\u0440\u044f\u0431\u0432\u0430 \u0434\u0430 \u0438\u0437\u043a\u043b\u044e\u0447\u0438\u0442\u0435 \u0431\u043b\u043e\u043a\u0438\u0440\u0430\u043d\u0435\u0442\u043e \u043d\u0430 \u043f\u043e\u043f\u044a\u043f\u0438 \u0437\u0430 \u0442\u043e\u0437\u0438 \u0441\u0430\u0439\u0442 \u0437\u0430 \u0434\u0430 \u0438\u0437\u043f\u043e\u043b\u0437\u0432\u0430\u0442\u0435 \u043f\u044a\u043b\u043d\u0430\u0442\u0430 \u0444\u0443\u043d\u043a\u0446\u0438\u043e\u043d\u0430\u043b\u043d\u043e\u0441\u0442.",clipboard_no_support:"\u041d\u0435 \u0441\u0435 \u043f\u043e\u0434\u0434\u044a\u0440\u0436\u0430 \u043e\u0442 \u0432\u0430\u0448\u0438\u044f\u0442 \u0431\u0440\u0430\u0443\u0437\u044a\u0440, \u0438\u0437\u043f\u043e\u043b\u0437\u0432\u0430\u0439\u0442\u0435 \u043a\u043b\u0430\u0432\u0438\u0430\u0442\u0443\u0440\u043d\u0438 \u043a\u043e\u043c\u0430\u043d\u0434\u0438.",clipboard_msg:"\u041a\u043e\u043f\u0438\u0440\u0430\u043d\u0435/\u041e\u0442\u0440\u044f\u0437\u0432\u0430\u043d\u0435/\u041f\u043e\u0441\u0442\u0430\u0432\u044f\u043d\u0435 \u043d\u0435 \u0435 \u0434\u043e\u0441\u0442\u044a\u043f\u043d\u043e \u043f\u043e\u0434 Mozilla \u0438 Firefox.\\n\u0416\u0435\u043b\u0430\u0435\u0442\u0435 \u043b\u0438 \u043f\u043e\u0432\u0435\u0447\u0435 \u0438\u043d\u0444\u043e\u0440\u043c\u0430\u0446\u0438\u044f \u0437\u0430 \u043f\u0440\u043e\u0431\u043b\u0435\u043c\u0430?",not_set:"-- \u041d\u0435\u0443\u0441\u0442\u0430\u043d\u043e\u0432\u0435\u043d\u043e --",class_name:"\u041a\u043b\u0430\u0441",browse:"Browse",close:"\u0417\u0430\u0442\u0432\u043e\u0440\u0438",cancel:"\u041e\u0442\u043a\u0430\u0436\u0438",update:"\u041e\u0431\u043d\u043e\u0432\u0438",insert:"\u0412\u043c\u044a\u043a\u043d\u0438",apply:"\u041f\u043e\u0442\u0432\u044a\u0440\u0434\u0438",edit_confirm:"\u0418\u0441\u043a\u0430\u0442\u0435 \u043b\u0438 \u0434\u0430 \u0438\u0437\u043f\u043e\u043b\u0437\u0432\u0430\u0442\u0435 WYSIWYG \u0440\u0435\u0436\u0438\u043c \u0437\u0430 \u0442\u043e\u0432\u0430 \u0442\u0435\u043a\u0441\u0442\u043e\u0432\u043e \u043f\u043e\u043b\u0435?"},contextmenu:{full:"\u0414\u0432\u0443\u0441\u0442\u0440\u0430\u043d\u043d\u043e",right:"\u0414\u044f\u0441\u043d\u043e",center:"\u0426\u0435\u043d\u0442\u044a\u0440",left:"\u041b\u044f\u0432\u043e",align:"\u041f\u043e\u0434\u0440\u0430\u0432\u043d\u044f\u0432\u0430\u043d\u0435"},insertdatetime:{day_short:"\u041d\u0434,\u041f\u043d,\u0412\u0442,\u0421\u0440,\u0427\u0442,\u041f\u0442,\u0421\u0431,\u041d\u0434",day_long:"\u041d\u0435\u0434\u0435\u043b\u044f,\u041f\u043e\u043d\u0435\u0434\u0435\u043b\u043d\u0438\u043a,\u0412\u0442\u043e\u0440\u043d\u0438\u043a,\u0421\u0440\u044f\u0434\u0430,\u0427\u0435\u0442\u0432\u044a\u0440\u0442\u044a\u043a,\u041f\u0435\u0442\u044a\u043a,\u0421\u044a\u0431\u043e\u0442\u0430,\u041d\u0435\u0434\u0435\u043b\u044f",months_short:"\u042f\u043d\u0443,\u0424\u0435\u0432,\u041c\u0430\u0440,\u0410\u043f\u0440,\u041c\u0430\u0439,\u042e\u043d\u0438,\u042e\u043b\u0438,\u0410\u0432\u0433,\u0421\u0435\u043f,\u041e\u043a\u0442,\u041d\u043e\u0435,\u0414\u0435\u043a",months_long:"\u042f\u043d\u0443\u0430\u0440\u0438,\u0424\u0435\u0432\u0440\u0443\u0430\u0440\u0438,\u041c\u0430\u0440\u0442,\u0410\u043f\u0440\u0438\u043b,\u041c\u0430\u0439,\u042e\u043d\u0438,\u042e\u043b\u0438,\u0410\u0432\u0433\u0443\u0441\u0442,\u0421\u0435\u043f\u0442\u0435\u043c\u0432\u0440\u0438,\u041e\u043a\u0442\u043e\u043c\u0432\u0440\u0438,\u041d\u043e\u0435\u043c\u0432\u0440\u0438,\u0414\u0435\u043a\u0435\u043c\u0432\u0440\u0438",inserttime_desc:"\u0412\u043c\u044a\u043a\u043d\u0438 \u0432\u0440\u0435\u043c\u0435",insertdate_desc:"\u0412\u043c\u044a\u043a\u043d\u0438 \u0434\u0430\u0442\u0430",time_fmt:"%H:%M:%S",date_fmt:"%Y-%m-%d"},print:{print_desc:"\u041e\u0442\u043f\u0435\u0447\u0430\u0442\u0430\u0439"},preview:{preview_desc:"\u041f\u0440\u0435\u0433\u043b\u0435\u0434"},directionality:{rtl_desc:"\u041f\u043e\u0441\u043e\u043a\u0430 \u043e\u0442\u0434\u044f\u0441\u043d\u043e \u043d\u0430 \u043b\u044f\u0432\u043e",ltr_desc:"\u041f\u043e\u0441\u043e\u043a\u0430 \u043e\u0442\u043b\u044f\u0432\u043e \u043d\u0430 \u0434\u044f\u0441\u043d\u043e"},layer:{content:"\u041d\u043e\u0432 \u0441\u043b\u043e\u0439...",absolute_desc:"\u0412\u043a\u043b\u044e\u0447\u0438 \u0430\u0431\u0441\u043e\u043b\u044e\u0442\u043d\u043e \u043f\u043e\u0437\u0438\u0446\u0438\u043e\u043d\u0438\u0440\u0430\u043d\u0435",backward_desc:"\u041f\u043e\u043a\u0430\u0436\u0438 \u043e\u0442\u0437\u0430\u0434",forward_desc:"\u041f\u043e\u043a\u0430\u0436\u0438 \u043e\u0442\u043f\u0440\u0435\u0434",insertlayer_desc:"\u0412\u043c\u044a\u043a\u043d\u0438 \u043d\u043e\u0432 \u0441\u043b\u043e\u0439"},save:{save_desc:"\u0417\u0430\u043f\u0438\u0448\u0438",cancel_desc:"\u041e\u0442\u043a\u0430\u0436\u0438 \u0432\u0441\u0438\u0447\u043a\u0438 \u043f\u0440\u043e\u043c\u0435\u043d\u0438"},nonbreaking:{nonbreaking_desc:"\u0412\u043c\u044a\u043a\u043d\u0438 \u043d\u0435\u043f\u0440\u0435\u043a\u044a\u0441\u0432\u0430\u0435\u043c \u0438\u043d\u0442\u0435\u0440\u0432\u0430\u043b"},iespell:{download:"ieSpell \u043d\u0435 \u0435 \u043e\u0442\u043a\u0440\u0438\u0442. \u0416\u0435\u043b\u0430\u0435\u0442\u0435 \u043b\u0438 \u0434\u0430 \u0433\u043e \u0438\u043d\u0441\u0442\u0430\u043b\u0438\u0440\u0430\u0442\u0435 \u0441\u0435\u0433\u0430?",iespell_desc:"\u041f\u0440\u043e\u0432\u0435\u0440\u0438 \u043f\u0440\u0430\u0432\u043e\u043f\u0438\u0441\u0430"},advhr:{advhr_desc:"\u0425\u043e\u0440\u0438\u0437\u043e\u043d\u0442\u0430\u043b\u043d\u0430 \u043b\u0438\u043d\u0438\u044f",delta_height:"",delta_width:""},emotions:{emotions_desc:"\u0415\u043c\u043e\u0442\u0438\u043a\u043e\u043d\u0438",delta_height:"",delta_width:""},searchreplace:{replace_desc:"\u0422\u044a\u0440\u0441\u0438/\u0417\u0430\u043c\u0435\u0441\u0442\u0438",search_desc:"\u0422\u044a\u0440\u0441\u0438",delta_width:"",delta_height:""},advimage:{image_desc:"\u0412\u043c\u044a\u043a\u043d\u0438/\u0420\u0435\u0434\u0430\u043a\u0442\u0438\u0440\u0430\u0439 \u043a\u0430\u0440\u0442\u0438\u043d\u043a\u0430",delta_width:"",delta_height:""},advlink:{link_desc:"\u0412\u043c\u044a\u043a\u043d\u0438/\u0420\u0435\u0434\u0430\u043a\u0442\u0438\u0440\u0430\u0439 \u0445\u0438\u043f\u0435\u0440\u0432\u0440\u044a\u0437\u043a\u0430",delta_height:"",delta_width:""},xhtmlxtras:{attribs_desc:"\u0412\u043c\u044a\u043a\u043d\u0438/\u0420\u0435\u0434\u0430\u043a\u0442\u0438\u0440\u0430\u0439 \u0430\u0442\u0440\u0438\u0431\u0443\u0442\u0438",ins_desc:"\u0412\u043c\u044a\u043a\u0432\u0430\u043d\u0435",del_desc:"\u0418\u0437\u0442\u0440\u0438\u0432\u0430\u043d\u0435",acronym_desc:"\u0410\u043a\u0440\u043e\u043d\u0438\u043c",abbr_desc:"\u0421\u044a\u043a\u0440\u0430\u0449\u0435\u043d\u0438\u0435",cite_desc:"\u0426\u0438\u0442\u0430\u0442",attribs_delta_height:"",attribs_delta_width:"",ins_delta_height:"",ins_delta_width:"",del_delta_height:"",del_delta_width:"",acronym_delta_height:"",acronym_delta_width:"",abbr_delta_height:"",abbr_delta_width:"",cite_delta_height:"",cite_delta_width:""},style:{desc:"\u0420\u0435\u0434\u0430\u043a\u0442\u0438\u0440\u0430\u0439 CSS \u0441\u0442\u0438\u043b",delta_height:"",delta_width:""},paste:{plaintext_mode:"\u041f\u043e\u0441\u0442\u0430\u0432\u0435\u043d\u043e\u0442\u043e \u0435 \u0432 \u0438\u0437\u0447\u0438\u0441\u0442\u0435\u043d \u0442\u0435\u043a\u0441\u0442\u043e\u0432 \u0440\u0435\u0436\u0438\u043c. \u0429\u0440\u0430\u043a\u043d\u0435\u0442\u0435 \u043e\u0442\u043d\u043e\u0432\u043e \u0434\u0430 \u043f\u0440\u0435\u043c\u0438\u043d\u0435\u0442\u0435 \u0432 \u043e\u0431\u0438\u043a\u043d\u043e\u0432\u0435\u043d \u0440\u0435\u0436\u0438\u043c \u043d\u0430 \u043f\u043e\u0441\u0442\u0430\u0432\u044f\u043d\u0435.",plaintext_mode_sticky:"\u041f\u043e\u0441\u0442\u0430\u0432\u0435\u043d\u043e\u0442\u043e \u0435 \u0432 \u0438\u0437\u0447\u0438\u0441\u0442\u0435\u043d \u0442\u0435\u043a\u0441\u0442\u043e\u0432 \u0440\u0435\u0436\u0438\u043c. \u0429\u0440\u0430\u043a\u043d\u0435\u0442\u0435 \u043e\u0442\u043d\u043e\u0432\u043e \u0434\u0430 \u043f\u0440\u0435\u043c\u0438\u043d\u0435\u0442\u0435 \u0432 \u043e\u0431\u0438\u043a\u043d\u043e\u0432\u0435\u043d \u0440\u0435\u0436\u0438\u043c \u043d\u0430 \u043f\u043e\u0441\u0442\u0430\u0432\u044f\u043d\u0435. \u0421\u043b\u0435\u0434 \u043a\u0430\u0442\u043e \u043f\u043e\u0441\u0442\u0430\u0432\u0438\u0442\u0435 \u0435\u043b\u0435\u043c\u0435\u043d\u0442\u0430 \u0449\u0435 \u0441\u0435 \u0432\u044a\u0440\u043d\u0435\u0442\u0435 \u0432 \u043d\u043e\u0440\u043c\u0430\u043b\u0435\u043d \u0440\u0435\u0436\u0438\u043c.",selectall_desc:"\u0418\u0437\u0431\u0435\u0440\u0438 \u0432\u0441\u0438\u0447\u043a\u0438",paste_word_desc:"\u041f\u043e\u0441\u0442\u0430\u0432\u0438 \u043e\u0442 Word",paste_text_desc:"\u041f\u043e\u0441\u0442\u0430\u0432\u0438 \u043a\u0430\u0442\u043e \u0442\u0435\u043a\u0441\u0442"},paste_dlg:{word_title:"\u0418\u0437\u043f\u043e\u043b\u0437\u0432\u0430\u0439\u0442\u0435 CTRL+V \u043d\u0430 \u043a\u043b\u0430\u0432\u0438\u0430\u0442\u0443\u0440\u0430\u0442\u0430 \u0437\u0430 \u0434\u0430 \u043f\u043e\u0441\u0442\u0430\u0432\u0438\u0442\u0435 \u0442\u0435\u043a\u0441\u0442\u0430 \u0432 \u043f\u0440\u043e\u0437\u043e\u0440\u0435\u0446\u0430.",text_linebreaks:"\u0417\u0430\u043f\u0430\u0437\u0438 \u043d\u043e\u0432\u0438\u0442\u0435 \u0440\u0435\u0434\u043e\u0432\u0435",text_title:"\u0418\u0437\u043f\u043e\u043b\u0437\u0432\u0430\u0439\u0442\u0435 CTRL+V \u043d\u0430 \u043a\u043b\u0430\u0432\u0438\u0430\u0442\u0443\u0440\u0430\u0442\u0430 \u0437\u0430 \u0434\u0430 \u043f\u043e\u0441\u0442\u0430\u0432\u0438\u0442\u0435 \u0442\u0435\u043a\u0441\u0442\u0430 \u0432 \u043f\u0440\u043e\u0437\u043e\u0440\u0435\u0446\u0430."},table:{cell:"\u041a\u043b\u0435\u0442\u043a\u0430",col:"\u041a\u043e\u043b\u043e\u043d\u0430",row:"\u0420\u0435\u0434",del:"\u0418\u0437\u0442\u0440\u0438\u0439 \u0442\u0430\u0431\u043b\u0438\u0446\u0430",copy_row_desc:"\u041a\u043e\u043f\u0438\u0440\u0430\u0439 \u0440\u0435\u0434",cut_row_desc:"\u041e\u0442\u0440\u0435\u0436\u0438 \u0440\u0435\u0434",paste_row_after_desc:"\u041f\u043e\u0441\u0442\u0430\u0432\u0438 \u0440\u0435\u0434 \u0441\u043b\u0435\u0434",paste_row_before_desc:"\u041f\u043e\u0441\u0442\u0430\u0432\u0438 \u0440\u0435\u0434 \u043f\u0440\u0435\u0434\u0438",props_desc:"\u0421\u0432\u043e\u0439\u0441\u0442\u0432\u0430 \u043d\u0430 \u0442\u0430\u0431\u043b\u0438\u0446\u0430\u0442\u0430",cell_desc:"\u0421\u0432\u043e\u0439\u0441\u0442\u0432\u0430 \u043d\u0430 \u043a\u043b\u0435\u0442\u043a\u0430\u0442\u0430",row_desc:"\u0421\u0432\u043e\u0439\u0441\u0442\u0432\u0430 \u043d\u0430 \u0440\u0435\u0434\u0430",merge_cells_desc:"\u0421\u043b\u0435\u0439 \u043a\u043b\u0435\u0442\u043a\u0438",split_cells_desc:"\u0420\u0430\u0437\u0434\u0435\u043b\u0438 \u0441\u043b\u0435\u0442\u0438 \u043a\u043b\u0435\u0442\u043a\u0438",delete_col_desc:"\u0418\u0437\u0442\u0440\u0438\u0439 \u043a\u043e\u043b\u043e\u043d\u0430",col_after_desc:"\u0412\u043c\u044a\u043a\u043d\u0438 \u043a\u043e\u043b\u043e\u043d\u0430 \u0441\u043b\u0435\u0434",col_before_desc:"\u0412\u043c\u044a\u043a\u043d\u0438 \u043a\u043e\u043b\u043e\u043d\u0430 \u043f\u0440\u0435\u0434\u0438",delete_row_desc:"\u0418\u0437\u0442\u0440\u0438\u0439 \u0440\u0435\u0434",row_after_desc:"\u0412\u043c\u044a\u043a\u043d\u0438 \u0440\u0435\u0434 \u0441\u043b\u0435\u0434",row_before_desc:"\u0412\u043c\u044a\u043a\u043d\u0438 \u0440\u0435\u0434 \u043f\u0440\u0435\u0434\u0438",desc:"\u0412\u043c\u044a\u043a\u043d\u0438 \u043d\u043e\u0432\u0430 \u0442\u0430\u0431\u043b\u0438\u0446\u0430",merge_cells_delta_height:"",merge_cells_delta_width:"",table_delta_height:"",table_delta_width:"",cellprops_delta_height:"",cellprops_delta_width:"",rowprops_delta_height:"",rowprops_delta_width:""},autosave:{warning_message:"\u0412\u0441\u0438\u0447\u043a\u0438 \u043d\u0430\u043f\u0440\u0430\u0432\u0435\u043d\u0438 \u043f\u0440\u043e\u043c\u0435\u043d\u0438 \u0449\u0435 \u0431\u044a\u0434\u0430\u0442 \u0437\u0430\u0433\u0443\u0431\u0435\u043d\u0438.\\n\\n\u0421\u0438\u0433\u0443\u0440\u043d\u0438 \u043b\u0438 \u0441\u0442\u0435, \u0447\u0435 \u0438\u0441\u043a\u0430\u0442\u0435 \u0434\u0430 \u0432\u044a\u0437\u0441\u0442\u0430\u043d\u043e\u0432\u0438\u0442\u0435 \u0437\u0430\u043f\u0430\u0437\u0435\u043d\u043e\u0442\u043e \u0441\u044a\u0434\u044a\u0440\u0436\u0430\u043d\u0438\u0435?.",restore_content:"\u0412\u044a\u0437\u0441\u0442\u0430\u043d\u043e\u0432\u044f\u0432\u0430\u043d\u0435 \u043d\u0430 \u0430\u0432\u0442\u043e\u043c\u0430\u0442\u0438\u0447\u043d\u043e \u0437\u0430\u043f\u0430\u0437\u0435\u043d\u043e\u0442\u043e.",unload_msg:"\u041f\u0440\u043e\u043c\u0435\u043d\u0438\u0442\u0435 \u043a\u043e\u0438\u0442\u043e \u043d\u0430\u043f\u0440\u0430\u0432\u0438\u0445\u0442\u0435 \u0449\u0435 \u0441\u0435 \u0437\u0430\u0433\u0443\u0431\u044f\u0442 \u0430\u043a\u043e \u043e\u0442\u0438\u0434\u0435\u0442\u0435 \u043d\u0430 \u0434\u0440\u0443\u0433\u0430 \u0441\u0442\u0440\u0430\u043d\u0438\u0446\u0430."},fullscreen:{desc:"\u0412\u043a\u043b./\u0418\u0437\u043a\u043b. \u0446\u044f\u043b \u0435\u043a\u0440\u0430\u043d"},media:{edit:"\u0420\u0435\u0434\u0430\u043a\u0442\u0438\u0440\u0430\u043a \u043c\u0435\u0434\u0438\u0430",desc:"\u0412\u043c\u044a\u043a\u043d\u0438/\u0440\u0435\u0434\u0430\u043a\u0442\u0438\u0440\u0430\u0439 \u043c\u0435\u0434\u0438\u0430\u0442\u0430",delta_height:"",delta_width:""},fullpage:{desc:"\u041d\u0430\u0441\u0442\u0440\u043e\u0439\u043a\u0438 \u043d\u0430 \u0434\u043e\u043a\u0443\u043c\u0435\u043d\u0442\u0430",delta_width:"",delta_height:""},template:{desc:"\u0412\u043c\u044a\u043a\u043d\u0438 \u0441\u044a\u0434\u044a\u0440\u0436\u0430\u043d\u0438\u0435\u0442\u043e \u043d\u0430 \u0442\u0435\u043c\u043f\u043b\u0435\u0439\u0442"},visualchars:{desc:"\u0412\u043a\u043b./\u0418\u0437\u043a\u043b. \u043d\u0430 \u043a\u043e\u043d\u0442\u0440\u043e\u043b\u043d\u0438\u0442\u0435 \u0441\u0438\u043c\u0432\u043e\u043b\u0438."},spellchecker:{desc:"\u0412\u043a\u043b./\u0418\u0437\u043a\u043b. \u043f\u0440\u043e\u0432\u0435\u0440\u043a\u0430 \u043d\u0430 \u043f\u0440\u0430\u0432\u043e\u043f\u0438\u0441\u0430",menu:"\u041d\u0430\u0441\u0442\u0440\u043e\u0439\u043a\u0438 \u043d\u0430 \u043f\u0440\u043e\u0432\u0435\u0440\u043a\u0430\u0442\u0430 \u043d\u0430 \u043f\u0440\u0430\u0432\u043e\u043f\u0438\u0441",ignore_word:"\u0418\u0433\u043d\u043e\u0440\u0438\u0440\u0430\u0439 \u0434\u0443\u043c\u0430",ignore_words:"\u0418\u0433\u043d\u043e\u0440\u0438\u0440\u0430\u0439 \u0432\u0441\u0438\u0447\u043a\u0438",langs:"\u0415\u0437\u0438\u0446\u0438",wait:"\u041c\u043e\u043b\u044f \u0438\u0437\u0447\u0430\u043a\u0430\u0439\u0442\u0435...",sug:"\u041f\u0440\u0435\u0434\u043b\u043e\u0436\u0435\u043d\u0438\u044f",no_sug:"\u041d\u044f\u043c\u0430 \u043f\u0440\u0435\u0434\u043b\u043e\u0436\u0435\u043d\u0438\u044f",no_mpell:"\u041d\u044f\u043c\u0430 \u0433\u0440\u0435\u0448\u043d\u043e \u043d\u0430\u043f\u0438\u0441\u0430\u043d\u0438 \u0434\u0443\u043c\u0438."},pagebreak:{desc:"\u0412\u043c\u044a\u043a\u043d\u0438 \u043d\u043e\u0432\u0430 \u0441\u0442\u0440\u0430\u043d\u0438\u0446\u0430."},advlist:{types:"\u0421\u0438\u043c\u0432\u043e\u043b\u0438",def:"\u0421\u0442\u0430\u043d\u0434\u0430\u0440\u0442\u043d\u0438",lower_alpha:"\u041c\u0430\u043b\u043a\u0438 \u043b\u0430\u0442\u0438\u043d\u0441\u043a\u0438 \u0431\u0443\u043a\u0432\u0438",lower_greek:"\u041c\u0430\u043b\u043a\u0438 \u0433\u0440\u044a\u0446\u043a\u0438 \u0431\u0443\u043a\u0432\u0438",lower_roman:"\u041c\u0430\u043b\u043a\u0438 \u0440\u0438\u043c\u0441\u043a\u0438 \u0446\u0438\u0444\u0440\u0438",upper_alpha:"\u0417\u0430\u0433\u043b\u0430\u0432\u043d\u0438 \u043b\u0430\u0442\u0438\u043d\u0441\u043a\u0438 \u0431\u0443\u043a\u0432\u0438",upper_roman:"\u0417\u0430\u0433\u043b\u0430\u0432\u043d\u0438 \u0440\u0438\u043c\u0441\u043a\u0438 \u0446\u0438\u0444\u0440\u0438",circle:"\u041a\u0440\u044a\u0433",disc:"\u041e\u043a\u0440\u044a\u0436\u043d\u043e\u0441\u0442",square:"\u041a\u0432\u0430\u0434\u0440\u0430\u0442"}}});
|
PypiClean
|
/rogii-solo-0.4.0.tar.gz/rogii-solo-0.4.0/src/rogii_solo/base.py
|
from abc import ABC, abstractmethod
from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, TypeVar
from pandas import DataFrame
from rogii_solo.calculations.converters import convert_value, radians_to_degrees
from rogii_solo.calculations.enums import EMeasureUnits
from rogii_solo.papi.client import PapiClient
from rogii_solo.types import DataList
class Convertible:
@staticmethod
def convert_xy(value: float, measure_units: EMeasureUnits, force_to_meters: bool = False) -> Optional[float]:
if value is not None:
return convert_value(value, measure_units=measure_units, force_to_meters=force_to_meters)
@staticmethod
def convert_z(value: float, measure_units: EMeasureUnits) -> Optional[float]:
if value is not None:
return convert_value(value=value, measure_units=measure_units)
@staticmethod
def convert_angle(value: float) -> Optional[float]:
if value is not None:
return radians_to_degrees(value)
class BaseObject(ABC, Convertible):
"""
Base data object
"""
@abstractmethod
def to_dict(self, *args, **kwargs) -> Dict[str, Any]:
"""
Convert object to dict
:return
"""
pass
@abstractmethod
def to_df(self, *args, **kwargs) -> DataFrame:
"""
Convert object to DataFrame
:return
"""
pass
def _find_by_path(self,
obj: Dict or Iterable[Dict],
path: str or Iterable[str],
default: Any = None,
divider: str = None,
check_none: bool = False,
to_list: bool = False,
) -> Any:
"""
Find nested key value in dict
:param obj:
:param path:
:param default:
:param divider:
:param check_none:
:param to_list:
:return:
"""
if not obj:
return None if not to_list else []
if not isinstance(obj, (List, Tuple, Set)):
obj = [obj]
if not isinstance(path, (List, Tuple, Set)):
path = [path]
result = [] if to_list else None
for o in obj:
for p in path:
res = self.__find_by_path(
obj=o,
path=p,
default=default,
divider=divider,
check_none=check_none,
to_list=to_list,
)
if to_list:
result.extend(res)
elif not to_list and res:
result = res
break
return result
def __find_by_path(self,
obj: Dict,
path: str,
default: Any = None,
divider: str = None,
check_none: bool = False,
to_list: bool = False,
) -> Any:
if not obj:
return None if not to_list else []
for p in path.split(divider or "."):
if p not in obj or not obj[p]:
return default if not to_list else []
obj = obj[p]
obj = obj if not check_none else default if obj is None else obj
if not to_list:
return obj
return obj if isinstance(obj, list) else [obj] if obj else []
class ComplexObject(BaseObject):
"""
Object with access to PAPI
"""
def __init__(self, papi_client: PapiClient):
super().__init__()
self._papi_client = papi_client
def to_dict(self, *args, **kwargs) -> Dict[str, Any]:
return {}
def to_df(self, *args, **kwargs) -> DataFrame:
return DataFrame([self.to_dict(*args, **kwargs)])
T = TypeVar('T', bound=BaseObject)
class ObjectRepository(list[T]):
"""
List of objects with utility methods
"""
def __init__(self, objects: List[T] = None):
if objects is None:
objects = []
super().__init__(objects)
def to_dict(self, get_converted: bool = True) -> DataList:
"""
Return list of dicts
:return:
"""
return [object_.to_dict(get_converted) for object_ in self]
def to_df(self, get_converted: bool = True) -> DataFrame:
"""
Convert list to Pandas DataFrame
:return:
"""
return DataFrame(self.to_dict(get_converted))
def find_by_id(self, value) -> Optional[T]:
"""
Find object by ID
:param value:
:return:
"""
return self._find_by_attr(attr='uuid', value=value)
def find_by_name(self, value) -> Optional[T]:
"""
Find object by name
:param value:
:return:
"""
return self._find_by_attr(attr='name', value=value)
def _find_by_attr(self, attr: str, value) -> Optional[T]:
return next((item for item in self if getattr(item, attr, None) == value), None)
|
PypiClean
|
/janis-pipelines.core-0.13.1.tar.gz/janis-pipelines.core-0.13.1/janis_core/ingestion/galaxy/gxwrappers/downloads/wrappers.py
|
import requests
import tarfile
import os
from typing import Optional
from janis_core.ingestion.galaxy.utils import galaxy as utils
from janis_core.ingestion.galaxy.gxwrappers.downloads.cache import DownloadCache
CACHE: DownloadCache = DownloadCache()
def fetch_xml(owner: str, repo: str, revision: str, tool_id: str) -> str:
"""gets the wrapper locally or from toolshed then returns path to xml"""
path: Optional[str] = None
if not path:
path = _fetch_cache(repo, revision, tool_id)
if not path:
path = _fetch_builtin(tool_id)
if not path:
path = _fetch_toolshed(owner, repo, revision, tool_id)
if not path:
raise RuntimeError(f'could not find wrapper for {tool_id}:{revision}')
else:
return path
def get_builtin_tool_path(tool_id: str) -> Optional[str]:
"""returns path to xml file with id='tool_id'"""
tool_directories = _get_builtin_tool_directories()
for directory in tool_directories:
xmlfile = utils.get_xml_by_id(directory, tool_id)
if xmlfile:
return os.path.join(directory, xmlfile)
return None
def _get_builtin_tool_directories() -> list[str]:
out: list[str] = []
out += _get_builtin_tools_directories()
out += _get_datatype_converter_directories()
return out
def _get_builtin_tools_directories() -> list[str]:
import galaxy.tools
tools_folder = str(galaxy.tools.__file__).rsplit('/', 1)[0]
bundled_folders = os.listdir(f'{tools_folder}/bundled')
bundled_folders = [f for f in bundled_folders if not f.startswith('__')]
bundled_folders = [f'{tools_folder}/bundled/{f}' for f in bundled_folders]
bundled_folders = [f for f in bundled_folders if os.path.isdir(f)]
return [tools_folder] + bundled_folders
def _get_datatype_converter_directories() -> list[str]:
import galaxy.datatypes
datatypes_folder = str(galaxy.datatypes.__file__).rsplit('/', 1)[0]
converters_folder = f'{datatypes_folder}/converters'
return [converters_folder]
def _fetch_builtin(tool_id: str) -> Optional[str]:
return get_builtin_tool_path(tool_id)
def _fetch_cache(repo: str, revision: str, tool_id: str) -> Optional[str]:
wrapper = CACHE.get(repo, revision)
if wrapper:
xml = utils.get_xml_by_id(wrapper, tool_id)
if xml:
return os.path.join(wrapper, xml)
return None
def _fetch_toolshed(owner: str, repo: str, revision: str, tool_id: str) -> Optional[str]:
# download and add to cache
url = _get_url_via_revision(owner, repo, revision)
# logging.msg_downloading_tool(url)
tar = _download_wrapper(url)
CACHE.add(tar)
# fetch from cache
return _fetch_cache(repo, revision, tool_id)
def _get_url_via_revision(owner: str, repo: str, revision: str) -> str:
return f'https://toolshed.g2.bx.psu.edu/repos/{owner}/{repo}/archive/{revision}.tar.gz'
def _download_wrapper(url: str) -> tarfile.TarFile:
response = requests.get(url, stream=True)
return tarfile.open(fileobj=response.raw, mode='r:gz')
|
PypiClean
|
/satnogs-db-1.51.tar.gz/satnogs-db-1.51/db/static/lib/admin-lte/plugins/moment/locale/tet.js
|
;(function (global, factory) {
typeof exports === 'object' && typeof module !== 'undefined'
&& typeof require === 'function' ? factory(require('../moment')) :
typeof define === 'function' && define.amd ? define(['../moment'], factory) :
factory(global.moment)
}(this, (function (moment) { 'use strict';
//! moment.js locale configuration
var tet = moment.defineLocale('tet', {
months: 'Janeiru_Fevereiru_Marsu_Abril_Maiu_Juñu_Jullu_Agustu_Setembru_Outubru_Novembru_Dezembru'.split(
'_'
),
monthsShort: 'Jan_Fev_Mar_Abr_Mai_Jun_Jul_Ago_Set_Out_Nov_Dez'.split('_'),
weekdays: 'Domingu_Segunda_Tersa_Kuarta_Kinta_Sesta_Sabadu'.split('_'),
weekdaysShort: 'Dom_Seg_Ters_Kua_Kint_Sest_Sab'.split('_'),
weekdaysMin: 'Do_Seg_Te_Ku_Ki_Ses_Sa'.split('_'),
longDateFormat: {
LT: 'HH:mm',
LTS: 'HH:mm:ss',
L: 'DD/MM/YYYY',
LL: 'D MMMM YYYY',
LLL: 'D MMMM YYYY HH:mm',
LLLL: 'dddd, D MMMM YYYY HH:mm',
},
calendar: {
sameDay: '[Ohin iha] LT',
nextDay: '[Aban iha] LT',
nextWeek: 'dddd [iha] LT',
lastDay: '[Horiseik iha] LT',
lastWeek: 'dddd [semana kotuk] [iha] LT',
sameElse: 'L',
},
relativeTime: {
future: 'iha %s',
past: '%s liuba',
s: 'segundu balun',
ss: 'segundu %d',
m: 'minutu ida',
mm: 'minutu %d',
h: 'oras ida',
hh: 'oras %d',
d: 'loron ida',
dd: 'loron %d',
M: 'fulan ida',
MM: 'fulan %d',
y: 'tinan ida',
yy: 'tinan %d',
},
dayOfMonthOrdinalParse: /\d{1,2}(st|nd|rd|th)/,
ordinal: function (number) {
var b = number % 10,
output =
~~((number % 100) / 10) === 1
? 'th'
: b === 1
? 'st'
: b === 2
? 'nd'
: b === 3
? 'rd'
: 'th';
return number + output;
},
week: {
dow: 1, // Monday is the first day of the week.
doy: 4, // The week that contains Jan 4th is the first week of the year.
},
});
return tet;
})));
|
PypiClean
|
/napalm-yang-0.1.0.tar.gz/napalm-yang-0.1.0/napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/bgp/global_/afi_safis/afi_safi/l3vpn_ipv6_multicast/prefix_limit/config/__init__.py
|
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class config(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/bgp/global/afi-safis/afi-safi/l3vpn-ipv6-multicast/prefix-limit/config. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configuration parameters relating to the prefix
limit for the AFI-SAFI
"""
__slots__ = (
"_path_helper",
"_extmethods",
"__max_prefixes",
"__prevent_teardown",
"__shutdown_threshold_pct",
"__restart_timer",
)
_yang_name = "config"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__max_prefixes = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="max-prefixes",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=True,
)
self.__prevent_teardown = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="prevent-teardown",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=True,
)
self.__shutdown_threshold_pct = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["0..100"]},
),
is_leaf=True,
yang_name="shutdown-threshold-pct",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-types:percentage",
is_config=True,
)
self.__restart_timer = YANGDynClass(
base=RestrictedPrecisionDecimalType(precision=2),
is_leaf=True,
yang_name="restart-timer",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="decimal64",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"bgp",
"global",
"afi-safis",
"afi-safi",
"l3vpn-ipv6-multicast",
"prefix-limit",
"config",
]
def _get_max_prefixes(self):
"""
Getter method for max_prefixes, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/l3vpn_ipv6_multicast/prefix_limit/config/max_prefixes (uint32)
YANG Description: Maximum number of prefixes that will be accepted
from the neighbour
"""
return self.__max_prefixes
def _set_max_prefixes(self, v, load=False):
"""
Setter method for max_prefixes, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/l3vpn_ipv6_multicast/prefix_limit/config/max_prefixes (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_max_prefixes is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_max_prefixes() directly.
YANG Description: Maximum number of prefixes that will be accepted
from the neighbour
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="max-prefixes",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """max_prefixes must be of a type compatible with uint32""",
"defined-type": "uint32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="max-prefixes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=True)""",
}
)
self.__max_prefixes = t
if hasattr(self, "_set"):
self._set()
def _unset_max_prefixes(self):
self.__max_prefixes = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="max-prefixes",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=True,
)
def _get_prevent_teardown(self):
"""
Getter method for prevent_teardown, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/l3vpn_ipv6_multicast/prefix_limit/config/prevent_teardown (boolean)
YANG Description: Do not tear down the BGP session when the maximum
prefix limit is exceeded, but rather only log a
warning. The default of this leaf is false, such
that when it is not specified, the session is torn
down.
"""
return self.__prevent_teardown
def _set_prevent_teardown(self, v, load=False):
"""
Setter method for prevent_teardown, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/l3vpn_ipv6_multicast/prefix_limit/config/prevent_teardown (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_prevent_teardown is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_prevent_teardown() directly.
YANG Description: Do not tear down the BGP session when the maximum
prefix limit is exceeded, but rather only log a
warning. The default of this leaf is false, such
that when it is not specified, the session is torn
down.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="prevent-teardown",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """prevent_teardown must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="prevent-teardown", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=True)""",
}
)
self.__prevent_teardown = t
if hasattr(self, "_set"):
self._set()
def _unset_prevent_teardown(self):
self.__prevent_teardown = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="prevent-teardown",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=True,
)
def _get_shutdown_threshold_pct(self):
"""
Getter method for shutdown_threshold_pct, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/l3vpn_ipv6_multicast/prefix_limit/config/shutdown_threshold_pct (oc-types:percentage)
YANG Description: Threshold on number of prefixes that can be received
from a neighbour before generation of warning messages
or log entries. Expressed as a percentage of
max-prefixes
"""
return self.__shutdown_threshold_pct
def _set_shutdown_threshold_pct(self, v, load=False):
"""
Setter method for shutdown_threshold_pct, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/l3vpn_ipv6_multicast/prefix_limit/config/shutdown_threshold_pct (oc-types:percentage)
If this variable is read-only (config: false) in the
source YANG file, then _set_shutdown_threshold_pct is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_shutdown_threshold_pct() directly.
YANG Description: Threshold on number of prefixes that can be received
from a neighbour before generation of warning messages
or log entries. Expressed as a percentage of
max-prefixes
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int,
restriction_dict={"range": ["0..255"]},
int_size=8,
),
restriction_dict={"range": ["0..100"]},
),
is_leaf=True,
yang_name="shutdown-threshold-pct",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-types:percentage",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """shutdown_threshold_pct must be of a type compatible with oc-types:percentage""",
"defined-type": "oc-types:percentage",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['0..100']}), is_leaf=True, yang_name="shutdown-threshold-pct", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-types:percentage', is_config=True)""",
}
)
self.__shutdown_threshold_pct = t
if hasattr(self, "_set"):
self._set()
def _unset_shutdown_threshold_pct(self):
self.__shutdown_threshold_pct = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["0..100"]},
),
is_leaf=True,
yang_name="shutdown-threshold-pct",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-types:percentage",
is_config=True,
)
def _get_restart_timer(self):
"""
Getter method for restart_timer, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/l3vpn_ipv6_multicast/prefix_limit/config/restart_timer (decimal64)
YANG Description: Time interval in seconds after which the BGP session
is re-established after being torn down due to exceeding
the max-prefix limit.
"""
return self.__restart_timer
def _set_restart_timer(self, v, load=False):
"""
Setter method for restart_timer, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/l3vpn_ipv6_multicast/prefix_limit/config/restart_timer (decimal64)
If this variable is read-only (config: false) in the
source YANG file, then _set_restart_timer is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_restart_timer() directly.
YANG Description: Time interval in seconds after which the BGP session
is re-established after being torn down due to exceeding
the max-prefix limit.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedPrecisionDecimalType(precision=2),
is_leaf=True,
yang_name="restart-timer",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="decimal64",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """restart_timer must be of a type compatible with decimal64""",
"defined-type": "decimal64",
"generated-type": """YANGDynClass(base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="restart-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='decimal64', is_config=True)""",
}
)
self.__restart_timer = t
if hasattr(self, "_set"):
self._set()
def _unset_restart_timer(self):
self.__restart_timer = YANGDynClass(
base=RestrictedPrecisionDecimalType(precision=2),
is_leaf=True,
yang_name="restart-timer",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="decimal64",
is_config=True,
)
max_prefixes = __builtin__.property(_get_max_prefixes, _set_max_prefixes)
prevent_teardown = __builtin__.property(
_get_prevent_teardown, _set_prevent_teardown
)
shutdown_threshold_pct = __builtin__.property(
_get_shutdown_threshold_pct, _set_shutdown_threshold_pct
)
restart_timer = __builtin__.property(_get_restart_timer, _set_restart_timer)
_pyangbind_elements = OrderedDict(
[
("max_prefixes", max_prefixes),
("prevent_teardown", prevent_teardown),
("shutdown_threshold_pct", shutdown_threshold_pct),
("restart_timer", restart_timer),
]
)
class config(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/bgp/global/afi-safis/afi-safi/l3vpn-ipv6-multicast/prefix-limit/config. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configuration parameters relating to the prefix
limit for the AFI-SAFI
"""
__slots__ = (
"_path_helper",
"_extmethods",
"__max_prefixes",
"__prevent_teardown",
"__shutdown_threshold_pct",
"__restart_timer",
)
_yang_name = "config"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__max_prefixes = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="max-prefixes",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=True,
)
self.__prevent_teardown = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="prevent-teardown",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=True,
)
self.__shutdown_threshold_pct = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["0..100"]},
),
is_leaf=True,
yang_name="shutdown-threshold-pct",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-types:percentage",
is_config=True,
)
self.__restart_timer = YANGDynClass(
base=RestrictedPrecisionDecimalType(precision=2),
is_leaf=True,
yang_name="restart-timer",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="decimal64",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"bgp",
"global",
"afi-safis",
"afi-safi",
"l3vpn-ipv6-multicast",
"prefix-limit",
"config",
]
def _get_max_prefixes(self):
"""
Getter method for max_prefixes, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/l3vpn_ipv6_multicast/prefix_limit/config/max_prefixes (uint32)
YANG Description: Maximum number of prefixes that will be accepted
from the neighbour
"""
return self.__max_prefixes
def _set_max_prefixes(self, v, load=False):
"""
Setter method for max_prefixes, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/l3vpn_ipv6_multicast/prefix_limit/config/max_prefixes (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_max_prefixes is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_max_prefixes() directly.
YANG Description: Maximum number of prefixes that will be accepted
from the neighbour
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="max-prefixes",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """max_prefixes must be of a type compatible with uint32""",
"defined-type": "uint32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="max-prefixes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=True)""",
}
)
self.__max_prefixes = t
if hasattr(self, "_set"):
self._set()
def _unset_max_prefixes(self):
self.__max_prefixes = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="max-prefixes",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=True,
)
def _get_prevent_teardown(self):
"""
Getter method for prevent_teardown, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/l3vpn_ipv6_multicast/prefix_limit/config/prevent_teardown (boolean)
YANG Description: Do not tear down the BGP session when the maximum
prefix limit is exceeded, but rather only log a
warning. The default of this leaf is false, such
that when it is not specified, the session is torn
down.
"""
return self.__prevent_teardown
def _set_prevent_teardown(self, v, load=False):
"""
Setter method for prevent_teardown, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/l3vpn_ipv6_multicast/prefix_limit/config/prevent_teardown (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_prevent_teardown is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_prevent_teardown() directly.
YANG Description: Do not tear down the BGP session when the maximum
prefix limit is exceeded, but rather only log a
warning. The default of this leaf is false, such
that when it is not specified, the session is torn
down.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="prevent-teardown",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """prevent_teardown must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="prevent-teardown", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=True)""",
}
)
self.__prevent_teardown = t
if hasattr(self, "_set"):
self._set()
def _unset_prevent_teardown(self):
self.__prevent_teardown = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="prevent-teardown",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=True,
)
def _get_shutdown_threshold_pct(self):
"""
Getter method for shutdown_threshold_pct, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/l3vpn_ipv6_multicast/prefix_limit/config/shutdown_threshold_pct (oc-types:percentage)
YANG Description: Threshold on number of prefixes that can be received
from a neighbour before generation of warning messages
or log entries. Expressed as a percentage of
max-prefixes
"""
return self.__shutdown_threshold_pct
def _set_shutdown_threshold_pct(self, v, load=False):
"""
Setter method for shutdown_threshold_pct, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/l3vpn_ipv6_multicast/prefix_limit/config/shutdown_threshold_pct (oc-types:percentage)
If this variable is read-only (config: false) in the
source YANG file, then _set_shutdown_threshold_pct is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_shutdown_threshold_pct() directly.
YANG Description: Threshold on number of prefixes that can be received
from a neighbour before generation of warning messages
or log entries. Expressed as a percentage of
max-prefixes
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int,
restriction_dict={"range": ["0..255"]},
int_size=8,
),
restriction_dict={"range": ["0..100"]},
),
is_leaf=True,
yang_name="shutdown-threshold-pct",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-types:percentage",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """shutdown_threshold_pct must be of a type compatible with oc-types:percentage""",
"defined-type": "oc-types:percentage",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['0..100']}), is_leaf=True, yang_name="shutdown-threshold-pct", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-types:percentage', is_config=True)""",
}
)
self.__shutdown_threshold_pct = t
if hasattr(self, "_set"):
self._set()
def _unset_shutdown_threshold_pct(self):
self.__shutdown_threshold_pct = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["0..100"]},
),
is_leaf=True,
yang_name="shutdown-threshold-pct",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-types:percentage",
is_config=True,
)
def _get_restart_timer(self):
"""
Getter method for restart_timer, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/l3vpn_ipv6_multicast/prefix_limit/config/restart_timer (decimal64)
YANG Description: Time interval in seconds after which the BGP session
is re-established after being torn down due to exceeding
the max-prefix limit.
"""
return self.__restart_timer
def _set_restart_timer(self, v, load=False):
"""
Setter method for restart_timer, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/l3vpn_ipv6_multicast/prefix_limit/config/restart_timer (decimal64)
If this variable is read-only (config: false) in the
source YANG file, then _set_restart_timer is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_restart_timer() directly.
YANG Description: Time interval in seconds after which the BGP session
is re-established after being torn down due to exceeding
the max-prefix limit.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedPrecisionDecimalType(precision=2),
is_leaf=True,
yang_name="restart-timer",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="decimal64",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """restart_timer must be of a type compatible with decimal64""",
"defined-type": "decimal64",
"generated-type": """YANGDynClass(base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="restart-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='decimal64', is_config=True)""",
}
)
self.__restart_timer = t
if hasattr(self, "_set"):
self._set()
def _unset_restart_timer(self):
self.__restart_timer = YANGDynClass(
base=RestrictedPrecisionDecimalType(precision=2),
is_leaf=True,
yang_name="restart-timer",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="decimal64",
is_config=True,
)
max_prefixes = __builtin__.property(_get_max_prefixes, _set_max_prefixes)
prevent_teardown = __builtin__.property(
_get_prevent_teardown, _set_prevent_teardown
)
shutdown_threshold_pct = __builtin__.property(
_get_shutdown_threshold_pct, _set_shutdown_threshold_pct
)
restart_timer = __builtin__.property(_get_restart_timer, _set_restart_timer)
_pyangbind_elements = OrderedDict(
[
("max_prefixes", max_prefixes),
("prevent_teardown", prevent_teardown),
("shutdown_threshold_pct", shutdown_threshold_pct),
("restart_timer", restart_timer),
]
)
|
PypiClean
|
/llm_toys-0.1.1-py3-none-any.whl/llm_toys/hf/transformers/models/xlm_prophetnet/tokenization_xlm_prophetnet.py
|
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
logger = logging.get_logger(__name__)
SPIECE_UNDERLINE = "▁"
VOCAB_FILES_NAMES = {"vocab_file": "prophetnet.tokenizer"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer"
),
}
}
PRETRAINED_INIT_CONFIGURATION = {
"microsoft/xprophetnet-large-wiki100-cased": {"do_lower_case": False},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"microsoft/xprophetnet-large-wiki100-cased": 512,
}
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
with open(vocab_file, "r", encoding="utf-8") as reader:
tokens = reader.readlines()
for index, token in enumerate(tokens):
token = token.rstrip("\n")
vocab[token] = index
return vocab
class XLMProphetNetTokenizer(PreTrainedTokenizer):
"""
Adapted from [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on
[SentencePiece](https://github.com/google/sentencepiece).
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
Path to the vocabulary file.
bos_token (`str`, *optional*, defaults to `"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
</Tip>
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (`str`, *optional*, defaults to `"<s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
mask_token (`str`, *optional*, defaults to `"<mask>"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
additional_special_tokens (`List[str]`, *optional*, defaults to `["<s>NOTUSED", "</s>NOTUSED"]`):
Additional special tokens used by the tokenizer.
sp_model_kwargs (`dict`, *optional*):
Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
to set:
- `enable_sampling`: Enable subword regularization.
- `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
- `nbest_size = {0,1}`: No sampling is performed.
- `nbest_size > 1`: samples from the nbest_size results.
- `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
using forward-filtering-and-backward-sampling algorithm.
- `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
BPE-dropout.
Attributes:
sp_model (`SentencePieceProcessor`):
The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ["input_ids", "attention_mask"]
def __init__(
self,
vocab_file,
bos_token="[SEP]",
eos_token="[SEP]",
sep_token="[SEP]",
unk_token="[UNK]",
pad_token="[PAD]",
cls_token="[CLS]",
mask_token="[MASK]",
sp_model_kwargs: Optional[Dict[str, Any]] = None,
**kwargs,
) -> None:
self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=bos_token,
eos_token=eos_token,
sep_token=sep_token,
unk_token=unk_token,
pad_token=pad_token,
cls_token=cls_token,
mask_token=mask_token,
sp_model_kwargs=self.sp_model_kwargs,
**kwargs,
)
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece"
)
raise
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(vocab_file))
self.vocab_file = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
self.fairseq_tokens_to_ids = {"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4}
for i in range(10):
tok = f"[unused{i}]"
self.fairseq_tokens_to_ids[tok] = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
self.fairseq_offset = 12
self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(k)
def __getstate__(self):
state = self.__dict__.copy()
state["sp_model"] = None
return state
def __setstate__(self, d):
self.__dict__ = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece"
)
raise
# for backward compatibility
if not hasattr(self, "sp_model_kwargs"):
self.sp_model_kwargs = {}
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def get_special_tokens_mask(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
) -> List[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
)
if token_ids_1 is None:
return ([0] * len(token_ids_0)) + [1]
return ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLMProphetNet
does not make use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of zeros.
"""
sep = [self.sep_token_id]
if token_ids_1 is None:
return len(token_ids_0 + sep) * [0]
return len(token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
@property
def vocab_size(self):
return len(self.sp_model) + self.fairseq_offset
def get_vocab(self):
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def _tokenize(self, text: str) -> str:
return self.sp_model.encode(text, out_type=str)
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
spm_id = self.sp_model.PieceToId(token)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (strings for sub-words) in a single string."""
out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
return out_string
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
if not os.path.isdir(save_directory):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
out_vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file, out_vocab_file)
elif not os.path.isfile(self.vocab_file):
with open(out_vocab_file, "wb") as fi:
content_spiece_model = self.sp_model.serialized_model_proto()
fi.write(content_spiece_model)
return (out_vocab_file,)
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A XLMProphetNet sequence has the following format:
- single sequence: `X [SEP]`
- pair of sequences: `A [SEP] B [SEP]`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
if token_ids_1 is None:
return token_ids_0 + [self.sep_token_id]
sep = [self.sep_token_id]
return token_ids_0 + sep + token_ids_1 + sep
|
PypiClean
|
/django-adminlte3-amigne-0.1.99.tar.gz/django-adminlte3-amigne-0.1.99/adminlte3/static/admin-lte/plugins/codemirror/mode/rust/rust.js
|
(function(mod) {
if (typeof exports == "object" && typeof module == "object") // CommonJS
mod(require("../../lib/codemirror"), require("../../addon/mode/simple"));
else if (typeof define == "function" && define.amd) // AMD
define(["../../lib/codemirror", "../../addon/mode/simple"], mod);
else // Plain browser env
mod(CodeMirror);
})(function(CodeMirror) {
"use strict";
CodeMirror.defineSimpleMode("rust",{
start: [
// string and byte string
{regex: /b?"/, token: "string", next: "string"},
// raw string and raw byte string
{regex: /b?r"/, token: "string", next: "string_raw"},
{regex: /b?r#+"/, token: "string", next: "string_raw_hash"},
// character
{regex: /'(?:[^'\\]|\\(?:[nrt0'"]|x[\da-fA-F]{2}|u\{[\da-fA-F]{6}\}))'/, token: "string-2"},
// byte
{regex: /b'(?:[^']|\\(?:['\\nrt0]|x[\da-fA-F]{2}))'/, token: "string-2"},
{regex: /(?:(?:[0-9][0-9_]*)(?:(?:[Ee][+-]?[0-9_]+)|\.[0-9_]+(?:[Ee][+-]?[0-9_]+)?)(?:f32|f64)?)|(?:0(?:b[01_]+|(?:o[0-7_]+)|(?:x[0-9a-fA-F_]+))|(?:[0-9][0-9_]*))(?:u8|u16|u32|u64|i8|i16|i32|i64|isize|usize)?/,
token: "number"},
{regex: /(let(?:\s+mut)?|fn|enum|mod|struct|type|union)(\s+)([a-zA-Z_][a-zA-Z0-9_]*)/, token: ["keyword", null, "def"]},
{regex: /(?:abstract|alignof|as|async|await|box|break|continue|const|crate|do|dyn|else|enum|extern|fn|for|final|if|impl|in|loop|macro|match|mod|move|offsetof|override|priv|proc|pub|pure|ref|return|self|sizeof|static|struct|super|trait|type|typeof|union|unsafe|unsized|use|virtual|where|while|yield)\b/, token: "keyword"},
{regex: /\b(?:Self|isize|usize|char|bool|u8|u16|u32|u64|f16|f32|f64|i8|i16|i32|i64|str|Option)\b/, token: "atom"},
{regex: /\b(?:true|false|Some|None|Ok|Err)\b/, token: "builtin"},
{regex: /\b(fn)(\s+)([a-zA-Z_][a-zA-Z0-9_]*)/,
token: ["keyword", null ,"def"]},
{regex: /#!?\[.*\]/, token: "meta"},
{regex: /\/\/.*/, token: "comment"},
{regex: /\/\*/, token: "comment", next: "comment"},
{regex: /[-+\/*=<>!]+/, token: "operator"},
{regex: /[a-zA-Z_]\w*!/,token: "variable-3"},
{regex: /[a-zA-Z_]\w*/, token: "variable"},
{regex: /[\{\[\(]/, indent: true},
{regex: /[\}\]\)]/, dedent: true}
],
string: [
{regex: /"/, token: "string", next: "start"},
{regex: /(?:[^\\"]|\\(?:.|$))*/, token: "string"}
],
string_raw: [
{regex: /"/, token: "string", next: "start"},
{regex: /[^"]*/, token: "string"}
],
string_raw_hash: [
{regex: /"#+/, token: "string", next: "start"},
{regex: /(?:[^"]|"(?!#))*/, token: "string"}
],
comment: [
{regex: /.*?\*\//, token: "comment", next: "start"},
{regex: /.*/, token: "comment"}
],
meta: {
dontIndentStates: ["comment"],
electricInput: /^\s*\}$/,
blockCommentStart: "/*",
blockCommentEnd: "*/",
lineComment: "//",
fold: "brace"
}
});
CodeMirror.defineMIME("text/x-rustsrc", "rust");
CodeMirror.defineMIME("text/rust", "rust");
});
|
PypiClean
|
/console_widgets-0.0.603.tar.gz/console_widgets-0.0.603/console_widgets/console_widgets.py
|
from .errors import *
from .themes import *
import inspect
from collections import OrderedDict
def write_roman(num):
roman = OrderedDict()
roman[1000] = "M"
roman[900] = "CM"
roman[500] = "D"
roman[400] = "CD"
roman[100] = "C"
roman[90] = "XC"
roman[50] = "L"
roman[40] = "XL"
roman[10] = "X"
roman[9] = "IX"
roman[5] = "V"
roman[4] = "IV"
roman[1] = "I"
def roman_num(num):
for r in roman.keys():
x, y = divmod(num, r)
yield roman[r] * x
num -= (r * x)
if num <= 0:
break
return "".join([a for a in roman_num(num)])
### IDEAS
## ------------------------------------------------------------------------------------------------
## add a modify_width method which automagically modifies the width of a wigets after using the set method
## add the possibility to add a multi-line body for the widgets and other attributes
## ^ done only for the ConsoleWidget currently
## (+?) change allignment checking from COnsoleBox to another method (kinda done)
## (easy!) add Corners() class for the Box() class so that you don't need to use lists
## !!!!!!!!!!!!!!!!!
## (!!!important!!!) check the parsing of items in ConsoleList
## !!!!!!!!!!!!!!!!!
## add box and list themes
## remove subtitles from all objects, not just boxes
def parse_text(text):
if type(text) is str:
parsed_text = text.split('\n')
return parsed_text
raise ParseError(text)
class ConsoleWidget():
def __init__(self, title: str = "", subtitle: str = "",
body: str = "", width: int = None):
self.title = parse_text(title.upper())
self.subtitle = parse_text(subtitle.title())
self.body = parse_text(body)
def show(self):
for attribute in [self.title, self.subtitle, self.body]:
if any(attribute):
for line in attribute:
print(line)
print()
def set(self, attr_val: dict = {}):
if type(attr_val) is not dict:
raise SetError(attr_val)
[setattr(self, attribute, value) for attribute, value in attr_val.items()]
## ! This is a temporary solution to possibly changigng the max width when resetting an attribute
#if type(self) is ConsoleList():
# self.width = max(17 + len(str(len(items))), max(map(len, map(str, [self.title, self.subtitle] + self.items))))
def get_attributes(self):
### This code is courtesy of Matt Luongo from StackOverflow
### https://stackoverflow.com/questions/9058305/getting-attributes-of-a-class
attributes = inspect.getmembers(self, lambda a:not(inspect.isroutine(a)))
return [a for a in attributes if not(a[0].startswith('__') and a[0].endswith('__'))]
class ConsoleBox(ConsoleWidget):
def __init__(self, title: str = "", subtitle: str = "", body: str = "", theme: str = "default", horizontal_margin: int = 0, vertical_margin: int = 0, allignment: str = "CENTER"):
super(ConsoleBox, self).__init__()
global box_themes
self.title = parse_text(title)
self.body = parse_text(body)
self.horizontal_margin = horizontal_margin
self.vertical_margin = vertical_margin
self.allignment = allignment
self.set_theme(theme)
self.width = max(map(len, self.title + self.subtitle + self.body)) + 2 * self.horizontal_margin
def set_theme(self, theme):
self.box = theme
if not self.box in box_themes:
self.box = "default"
self.set(box_themes[self.box])
def return_padding(self, padding):
## The function returns the left and right paddings
if self.allignment == "CENTER":
return (" " * ((padding // 2) + (padding % 2)), " " * (padding // 2))
if self.allignment == "LEFT":
return ("", " " * padding)
if self.allignment == "RIGHT":
return (" " * padding, "")
def show(self):
## for i in range(self.border_width):
print(self.upper_left + self.upper_border * self.width + self.upper_right)
if any(self.title):
for title_line in self.title:
title_padding = self.width - len(title_line)
(left_t_padding, right_t_padding) = self.return_padding(title_padding)
print(self.left_border + left_t_padding + title_line + right_t_padding + self.right_border)
print(self.left_vertical + self.horizontal_line * self.width + self.right_vertical)
if any(self.body):
for body_line in self.body:
body_padding = self.width - len(body_line)
(left_b_padding, right_b_padding) = self.return_padding(body_padding)
print(self.left_border + left_b_padding + body_line + right_b_padding + self.right_border)
print(self.lower_left + self.lower_border * self.width + self.lower_right)
class ConsoleList(ConsoleWidget):
def __init__(self, title: str = "", subtitle: str = "", items: list = [], list_type: str = "default"):
super(ConsoleList, self).__init__()
self.title = parse_text(title)
self.subtitle = parse_text(subtitle)
self.items = [parse_text(item) for item in items]
self.list_type = list_type
self.width = max(map(len, map(str, self.title + self.subtitle + self.items)))
def show(self):
print("─" * self.width)
for attribute in [self.title, self.subtitle]:
if attribute:
for line in attribute:
print(line)
print("─" * self.width)
for item_number, item in enumerate(self.items):
if item:
print(f"{item_number + 1}. {item[0]}")
for line in item[1:]:
print(line)
print("─" * self.width)
class ConsoleSelection(ConsoleList):
def __init__(self, title: str = "", subtitle: str = "", items: list = []):
self.title = title
self.subtitle = subtitle
self.items = items
self.width = max(17 + len(str(len(items))), max(map(len, map(str, [self.title, self.subtitle] + self.items))))
def select(self):
super(ConsoleSelection, self).show()
selection = input(f"Select [1-{len(self.items)}]: ")
#print("─" * self.width)
return selection
|
PypiClean
|
/tensorflow_cpu-2.14.0rc1-cp311-cp311-macosx_10_15_x86_64.whl/tensorflow/python/ops/ragged/segment_id_ops.py
|
"""Ops for converting between row_splits and segment_ids."""
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.ragged import ragged_util
from tensorflow.python.util import dispatch
from tensorflow.python.util.tf_export import tf_export
# For background on "segments" and "segment ids", see:
# https://www.tensorflow.org/api_docs/python/tf/math#Segmentation
@tf_export("ragged.row_splits_to_segment_ids")
@dispatch.add_dispatch_support
def row_splits_to_segment_ids(splits, name=None, out_type=None):
"""Generates the segmentation corresponding to a RaggedTensor `row_splits`.
Returns an integer vector `segment_ids`, where `segment_ids[i] == j` if
`splits[j] <= i < splits[j+1]`. Example:
>>> print(tf.ragged.row_splits_to_segment_ids([0, 3, 3, 5, 6, 9]))
tf.Tensor([0 0 0 2 2 3 4 4 4], shape=(9,), dtype=int64)
Args:
splits: A sorted 1-D integer Tensor. `splits[0]` must be zero.
name: A name prefix for the returned tensor (optional).
out_type: The dtype for the return value. Defaults to `splits.dtype`,
or `tf.int64` if `splits` does not have a dtype.
Returns:
A sorted 1-D integer Tensor, with `shape=[splits[-1]]`
Raises:
ValueError: If `splits` is invalid.
"""
with ops.name_scope(name, "RaggedSplitsToSegmentIds", [splits]) as name:
splits = ops.convert_to_tensor(
splits, name="splits",
preferred_dtype=dtypes.int64)
if splits.dtype not in (dtypes.int32, dtypes.int64):
raise ValueError("splits must have dtype int32 or int64")
splits.shape.assert_has_rank(1)
if tensor_shape.dimension_value(splits.shape[0]) == 0:
raise ValueError("Invalid row_splits: []")
if out_type is None:
out_type = splits.dtype
else:
out_type = dtypes.as_dtype(out_type)
row_lengths = splits[1:] - splits[:-1]
nrows = array_ops.shape(splits, out_type=out_type)[-1] - 1
indices = math_ops.range(nrows)
return ragged_util.repeat(indices, repeats=row_lengths, axis=0)
# For background on "segments" and "segment ids", see:
# https://www.tensorflow.org/api_docs/python/tf/math#Segmentation
@tf_export("ragged.segment_ids_to_row_splits")
@dispatch.add_dispatch_support
def segment_ids_to_row_splits(segment_ids, num_segments=None,
out_type=None, name=None):
"""Generates the RaggedTensor `row_splits` corresponding to a segmentation.
Returns an integer vector `splits`, where `splits[0] = 0` and
`splits[i] = splits[i-1] + count(segment_ids==i)`. Example:
>>> print(tf.ragged.segment_ids_to_row_splits([0, 0, 0, 2, 2, 3, 4, 4, 4]))
tf.Tensor([0 3 3 5 6 9], shape=(6,), dtype=int64)
Args:
segment_ids: A 1-D integer Tensor.
num_segments: A scalar integer indicating the number of segments. Defaults
to `max(segment_ids) + 1` (or zero if `segment_ids` is empty).
out_type: The dtype for the return value. Defaults to `segment_ids.dtype`,
or `tf.int64` if `segment_ids` does not have a dtype.
name: A name prefix for the returned tensor (optional).
Returns:
A sorted 1-D integer Tensor, with `shape=[num_segments + 1]`.
"""
# Local import bincount_ops to avoid import-cycle.
from tensorflow.python.ops import bincount_ops # pylint: disable=g-import-not-at-top
if out_type is None:
if isinstance(segment_ids, tensor.Tensor):
out_type = segment_ids.dtype
elif isinstance(num_segments, tensor.Tensor):
out_type = num_segments.dtype
else:
out_type = dtypes.int64
else:
out_type = dtypes.as_dtype(out_type)
with ops.name_scope(name, "SegmentIdsToRaggedSplits", [segment_ids]) as name:
# Note: we cast int64 tensors to int32, since bincount currently only
# supports int32 inputs.
segment_ids = ragged_util.convert_to_int_tensor(segment_ids, "segment_ids",
dtype=dtypes.int32)
segment_ids.shape.assert_has_rank(1)
if num_segments is not None:
num_segments = ragged_util.convert_to_int_tensor(num_segments,
"num_segments",
dtype=dtypes.int32)
num_segments.shape.assert_has_rank(0)
row_lengths = bincount_ops.bincount(
segment_ids,
minlength=num_segments,
maxlength=num_segments,
dtype=out_type)
splits = array_ops.concat([[0], math_ops.cumsum(row_lengths)], axis=0)
# Update shape information, if possible.
if num_segments is not None:
const_num_segments = tensor_util.constant_value(num_segments)
if const_num_segments is not None:
splits.set_shape(tensor_shape.TensorShape([const_num_segments + 1]))
return splits
|
PypiClean
|
/sphinxcontrib-imagehelper-1.1.1.tar.gz/sphinxcontrib-imagehelper-1.1.1/README.rst
|
sphinxcontrib-imagehelper
==========================
.. image:: https://travis-ci.org/tk0miya/sphinxcontrib-imagehelper.svg?branch=master
:target: https://travis-ci.org/tk0miya/sphinxcontrib-imagehelper
.. image:: https://coveralls.io/repos/tk0miya/sphinxcontrib-imagehelper/badge.png?branch=master
:target: https://coveralls.io/r/tk0miya/sphinxcontrib-imagehelper?branch=master
.. image:: https://codeclimate.com/github/tk0miya/sphinxcontrib-imagehelper/badges/gpa.svg
:target: https://codeclimate.com/github/tk0miya/sphinxcontrib-imagehelper
`sphinxcontrib-imagehelper` is helpers for creating image Sphinx extensions.
Adding new image format support to Sphinx is too boring.
This helper helps you to create image sphinx extensions.
It provides these features:
* Caching converted images
* Conseal sphinx directory structure; determine path of images automatically
* Support common options for image directive (cf. `:height:`, `:scale:`, `:align:`, and so on)
* Enhance standard imaging directives; `image` and `figure` get capability to embed new image format
With `sphinxcontrib-imagehelper`, all you have to do is only convert new image format to
well known image formats.
Install
=======
::
$ pip install sphinxcontrib-imagehelper
Example
=======
::
from sphinxcontrib.imagehelper import (
add_image_type, add_image_directive, add_figure_directive, ImageConverter
)
# Declare converter class inherits ImageConverter
class MyImageConverter(ImageConverter):
# Override `get_filename_for()` to determine filename
def get_filename_for(self, node):
# filename is came from its URI and configuration
hashed = sha1((node['uri'] + self.app.config.some_convert_settings).encode('utf-8')).hexdigest()
return 'myimage-%s.png' % hashed
# Override `convert()` to convert new image format to well known image formats (PNG, JPG and so on)
def convert(self, node, filename, to):
# Hint: you can refer self.app.builder.format to switch conversion behavior
succeeded = convert_myimage_to_png(filename, to,
option1=node['option'],
option2=self.app.config.some_convert_settings)
if succeeded:
return True # return True if conversion succeeded
else:
return False
def setup(app)
# Register new image type: myimage
add_image_type(app, 'my', 'img', MyImageConverter)
# Register my-image directive
add_image_directive(app, 'my')
# Register my-figure directive
add_figure_directive(app, 'my')
Helpers
=======
`sphinxcontrib.imagehelper.add_image_type(app, name, ext, handler)`
Register a new image type which is identified with file extension `ext`.
The `handler` is used to convert image formats.
`sphinxcontrib.imagehelper.ImageConverter`
A handler class for converting image formats. It is used at `add_image_type()`.
The developers of sphinx-extensions should create a handler class which inherits `ImageConverter`,
and should override two following methods:
`ImageConverter.option_spec`
A definition of additional options.
By default, it is empty dict.
`ImageConverter.get_last_modified_for(self, node)`
Determine last modified time of target image.
By default, this method returns the timestamp of the image file.
`ImageConverter.get_filename_for(self, node)`
Determine a filename of converted image.
By default, this method returns the filename replaced its extension with '.png'::
def get_filename_for(self, node):
return os.path.splitext(node['uri'])[0] + '.png'
`ImageConverter.convert(self, node, filename, to)`
Convert image to embedable format.
By default, this method does nothing.
`sphinxcontrib.imagehelper.add_image_directive(app, name, option_spec={})`
Add a custom image directive to Sphinx.
The directive is named as `name`-image (cf. astah-image).
If `option_spec` is given, the new directive accepts custom options.
`sphinxcontrib.imagehelper.add_figure_directive(app, name, option_spec={})`
Add a custom figure directive to Sphinx.
The directive is named as `name`-figure (cf. astah-figure).
If `option_spec` is given, the new directive accepts custom options.
`sphinxcontrib.imagehelper.generate_image_directive(name, option_spec={})`
Generate a custom image directive class. The class is not registered to Sphinx.
You can enhance the directive class with subclassing.
`sphinxcontrib.imagehelper.generate_figure_directive(name, option_spec={})`
Generate a custom figure directive class. The class is not registered to Sphinx.
You can enhance the directive class with subclassing.
|
PypiClean
|
/jupancon-0.3.3.tar.gz/jupancon-0.3.3/README.md
|
# jupancon
Database Connectors and SQL magics for [Jupyter](https://docs.jupyter.org/en/latest/). `jupancon` = Jupyter + Pandas + Connectors.
### Features
- Connector to Redshift
- Using user/pass
- Using IAM profile
- Connector to Bigquery (using google profile)
- Connector to Databricks
- Optional automatic tunnel setting through an SSH Bastion
- Querying capabilities
- IPython kernel magics for querying
- Always returns [Pandas DataFrames](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html)
- Some hidden stuff I rather not document just yet so you don't nuke your Warehouse :) Will do when it's safer to use
### Install
```bash
pip install jupancon
```
### Configure
Write a `~/.jupancon/config.yml` YAML file that looks similar to the following C&P from my actual config file (heavily censored for obvious reasons):
```yaml
default: my-redshift-cluster
my-redshift-cluster:
type: redshift
host: XXXXXX.XXXXXX.XXXXXXX.redshift.amazonaws.com
# explicitly setting redshift port (optional)
port: 5439
user: XXXXXXXX
pass: XXXXXXXX
dbname: XXXXXX
my-redshift-using-iamprofile:
type: redshift
host: XXXXXX.XXXXXX.XXXXXXX.redshift.amazonaws.com
profile: XXXXXXXXX
dbname: XXXXXX
# NOTE: you can choose dbuser and it will be auto-created if it doesn't exist
dbuser: XXXXXX
cluster: XXXXXX
my-gcp:
type: bigquery
project: XXXXX-XXXXX-123456
my-databricks:
type: databricks
server_hostname: XXXXXX.cloud.databricks.com
http_path: /sql/XXX/XXXX/XXXXXXXXXX
# optional
catalog: XXXXXXX
token: XXXXXXXXX
my-redshift-behind-sshbastion:
type: redshift
use_bastion: true
bastion_server: censored.bastion.server.com
bastion_user: XXXXXX
bastion_host: XXXXXX.XXXXXX.XXXXXX.redshift.amazonaws.com
host: censored.main.server.com
user: XXXXXXXX
pass: XXXXXXXX
dbname: XXXXXX
```
Jupancon will also pick environment variables (which have preference over the `config.yml`).
- `JPC_DB_TYPE`: `redshift` or `bigquery`
- `JPC_HOST`: for example, `XXXXXX.XXXXXX.XXXXXX.redshift.amazonaws.com`
- `JPC_USER`: User name
- `JPC_DB`: Database name
- `JPC_PASS`: Password
- `JPC_USE_BASTION`: `true` or leave blank
- `JPC_BASTION_SERVER`
- `JPC_BASTION_HOST`
- `JPC_PROFILE`: IAM profile (for IAM connection only)
- `JPC_CLUSTER`: Redshift cluster (for IAM connection only)
- `JPC_DBUSER`: Redshift user (for IAM connection only)
# How to use
This library is developed primarily for usage within [Jupyter Lab](https://jupyterlab.readthedocs.io/en/stable/getting_started/overview.html). It's likely to work in Jupyter Notebook and Ipython, but untested and unsupported at this stage. It also works and is being used in regular scripts, but [it obviously loses its magic](https://ipython.readthedocs.io/en/stable/interactive/magics.html).
### Regular usage
```python
from jupancon import query, list_schemas, list_tables
list_schemas()
list_tables()
query("select * from foo")
```
### Magical usage
```python
from jupancon import load_magics
load_magics()
```
```sql
select * from foo
```
```sql
df = %select * from foo
```
```sql
%%sql
select *
from foo
where cond = 1
and label = 'my nice label'
```
# Development
Current status: Jupancon has enough basic features that it's worth open sourcing, but the documentation is still lacking.
### TODO list
- `list_table("schema")` to detect if schema doesn't exist and return error
- Add query monitoring and cancelling functionality
- Complete docs (low level stuff, exhaustive features, maybe sphinx/rdd?)
- Add animated gifs to docs
### Features that aren't worth adding right now
- Autocomplete and autodiscover of databases is possible, but not trivial at all. In addition, I'll like to find a way of not adding any extra configuration. Regardless, not worth it until the TODO list above is tackled. See [this project](https://github.com/jupyter-lsp/jupyterlab-lsp) for a successful example.
- Because of the current architecture of Jupyter Lab, syntax highlighting is not feasible to add (as it was in Jupyter Notebook). This might change in the future. See this [git issue](https://github.com/jupyterlab/jupyterlab/issues/3869) for more details.
### A note about Unit Testing
I would like to publish decent unit testing, but this library is hard to test because all the databases currently queried for it's development are either tests that cost me money or private (my clients') databases. Any ideas on how to write an open source, non exploitable set of unit tests for Redshift or BigQuery are very welcome.
|
PypiClean
|
/shimao-nosidebar-frontend-20180926.0.tar.gz/shimao-nosidebar-frontend-20180926.0/hass_frontend_es5/custom-panel-7baaafff.js
|
!function(e){function n(n){for(var t,o,u=n[0],i=n[1],c=0,l=[];c<u.length;c++)o=u[c],r[o]&&l.push(r[o][0]),r[o]=0;for(t in i)Object.prototype.hasOwnProperty.call(i,t)&&(e[t]=i[t]);for(a&&a(n);l.length;)l.shift()()}var t={},r={11:0};function o(n){if(t[n])return t[n].exports;var r=t[n]={i:n,l:!1,exports:{}};return e[n].call(r.exports,r,r.exports,o),r.l=!0,r.exports}o.e=function(e){var n=[],t=r[e];if(0!==t)if(t)n.push(t[2]);else{var u=new Promise(function(n,o){t=r[e]=[n,o]});n.push(t[2]=u);var i,c=document.getElementsByTagName("head")[0],a=document.createElement("script");a.charset="utf-8",a.timeout=120,o.nc&&a.setAttribute("nonce",o.nc),a.src=function(e){return o.p+""+{43:"61dbd42bf439af741251",44:"870fcc2320e7d91600a1",45:"a0c6ceca1f06db7c24f3",52:"fcdd53ccee1700ace44d"}[e]+".chunk.js"}(e),i=function(n){a.onerror=a.onload=null,clearTimeout(l);var t=r[e];if(0!==t){if(t){var o=n&&("load"===n.type?"missing":n.type),u=n&&n.target&&n.target.src,i=new Error("Loading chunk "+e+" failed.\n("+o+": "+u+")");i.type=o,i.request=u,t[1](i)}r[e]=void 0}};var l=setTimeout(function(){i({type:"timeout",target:a})},12e4);a.onerror=a.onload=i,c.appendChild(a)}return Promise.all(n)},o.m=e,o.c=t,o.d=function(e,n,t){o.o(e,n)||Object.defineProperty(e,n,{enumerable:!0,get:t})},o.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},o.t=function(e,n){if(1&n&&(e=o(e)),8&n)return e;if(4&n&&"object"==typeof e&&e&&e.__esModule)return e;var t=Object.create(null);if(o.r(t),Object.defineProperty(t,"default",{enumerable:!0,value:e}),2&n&&"string"!=typeof e)for(var r in e)o.d(t,r,function(n){return e[n]}.bind(null,r));return t},o.n=function(e){var n=e&&e.__esModule?function(){return e.default}:function(){return e};return o.d(n,"a",n),n},o.o=function(e,n){return Object.prototype.hasOwnProperty.call(e,n)},o.p="/frontend_es5/",o.oe=function(e){throw console.error(e),e};var u=window.webpackJsonp=window.webpackJsonp||[],i=u.push.bind(u);u.push=n,u=u.slice();for(var c=0;c<u.length;c++)n(u[c]);var a=i;o(o.s=188)}({145:function(e,n,t){"use strict";t.d(n,"a",function(){return i});var r=t(82),o=function(e,n){if(Array.isArray(e))return e;if(Symbol.iterator in Object(e))return function(e,n){var t=[],r=!0,o=!1,u=void 0;try{for(var i,c=e[Symbol.iterator]();!(r=(i=c.next()).done)&&(t.push(i.value),!n||t.length!==n);r=!0);}catch(e){o=!0,u=e}finally{try{!r&&c.return&&c.return()}finally{if(o)throw u}}return t}(e,n);throw new TypeError("Invalid attempt to destructure non-iterable instance")},u={};function i(e){if(e.html_url){var n=[t.e(44).then(t.bind(null,251))];return e.embed_iframe||n.push(Promise.all([t.e(52),t.e(45)]).then(t.bind(null,139))),Promise.all(n).then(function(n){return(0,o(n,1)[0].importHrefPromise)(e.html_url)})}return e.js_url?(e.js_url in u||(u[e.js_url]=Object(r.b)(e.js_url)),u[e.js_url]):e.module_url?Object(r.c)(e.module_url):Promise.reject("No valid url found in panel config.")}},146:function(e,n,t){"use strict";function r(e){var n="html_url"in e?"ha-panel-"+e.name:e.name;return document.createElement(n)}t.d(n,"a",function(){return r})},147:function(e,n,t){"use strict";function r(e,n){"setProperties"in e?e.setProperties(n):Object.keys(n).forEach(function(t){e[t]=n[t]})}t.d(n,"a",function(){return r})},188:function(e,n,t){"use strict";t.r(n);var r=t(82),o=t(145),u=t(146),i=t(147),c="customElements"in window&&"import"in document.createElement("link")&&"content"in document.createElement("template"),a=null;window.loadES5Adapter=function(){return a||(a=Promise.all([Object(r.b)("/static/custom-elements-es5-adapter.js").catch(),t.e(43).then(t.bind(null,143))])),a};var l=null;function s(e){null!==l&&Object(i.a)(l,e)}function d(e,n){var t=document.createElement("style");t.innerHTML="body{margin:0}",document.head.appendChild(t);var i=e.config._panel_custom,d=Promise.resolve();c||(d=d.then(function(){return Object(r.b)("/static/webcomponents-bundle.js")})),(d=d.then(function(){return window.loadES5Adapter()})).then(function(){return Object(o.a)(i)}).then(function(){return a||Promise.resolve()}).then(function(){var t=function(e){return window.parent.customPanel.fire(e.type,e.detail)};(l=Object(u.a)(i)).addEventListener("hass-open-menu",t),l.addEventListener("hass-close-menu",t),l.addEventListener("location-changed",function(){return window.parent.customPanel.navigate(window.location.pathname)}),s(Object.assign({panel:e},n)),document.body.appendChild(l)},function(n){console.error(n,e),alert("Unable to load the panel source: "+n+".")})}document.addEventListener("DOMContentLoaded",function(){return window.parent.customPanel.registerIframe(d,s)},{once:!0})},82:function(e,n,t){"use strict";function r(e,n,t){return new Promise(function(r,o){var u=document.createElement(e),i="src",c="body";switch(u.onload=function(){return r(n)},u.onerror=function(){return o(n)},e){case"script":u.async=!0,t&&(u.type=t);break;case"link":u.type="text/css",u.rel="stylesheet",i="href",c="head"}u[i]=n,document[c].appendChild(u)})}t.d(n,"a",function(){return o}),t.d(n,"b",function(){return u}),t.d(n,"c",function(){return i});var o=function(e){return r("link",e)},u=function(e){return r("script",e)},i=function(e){return r("script",e,"module")}}});
//# sourceMappingURL=custom-panel-7baaafff.js.map
|
PypiClean
|
/robotframework-python3-2.9.tar.gz/robotframework-python3-2.9/src/robot/model/keyword.py
|
from itertools import chain
from operator import attrgetter
from robot.utils import setter, unic
from .itemlist import ItemList
from .message import Message, Messages
from .modelobject import ModelObject
from .tags import Tags
class Keyword(ModelObject):
"""Base model for single keyword."""
__slots__ = ['_name', 'doc', 'args', 'assign', 'timeout',
'type', '_sort_key', '_next_child_sort_key']
KEYWORD_TYPE = 'kw'
SETUP_TYPE = 'setup'
TEARDOWN_TYPE = 'teardown'
FOR_LOOP_TYPE = 'for'
FOR_ITEM_TYPE = 'foritem'
keyword_class = None
message_class = Message
def __init__(self, name='', doc='', args=(), assign=(), tags=(),
timeout=None, type='kw'):
#: :class:`~.model.testsuite.TestSuite` or
#: :class:`~.model.testcase.TestCase` or
#: :class:`~.model.keyword.Keyword` that contains this keyword.
self.parent = None
self._name = name
#: Keyword documentation.
self.doc = doc
#: Keyword arguments as a list of strings.
self.args = args
#: Assigned variables as a list of strings.
self.assign = assign
#: Keyword tags as a list like :class:`~.model.tags.Tags` object.
self.tags = tags
#: Keyword timeout.
self.timeout = timeout
#: Keyword type as a string. See class level ``XXX_TYPE`` constants.
self.type = type
#: Keyword messages as :class:`~.model.message.Message` instances.
self.messages = None
#: Child keywords as :class:`~.model.keyword.Keyword` instances.
self.keywords = None
self._sort_key = -1
self._next_child_sort_key = 0
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
@setter
def parent(self, parent):
if parent and parent is not self.parent:
self._sort_key = getattr(parent, '_child_sort_key', -1)
return parent
@property
def _child_sort_key(self):
self._next_child_sort_key += 1
return self._next_child_sort_key
@setter
def tags(self, tags):
return Tags(tags)
@setter
def keywords(self, keywords):
return Keywords(self.keyword_class or self.__class__, self, keywords)
@setter
def messages(self, messages):
return Messages(self.message_class, self, messages)
@property
def children(self):
"""Child keywords and messages in creation order."""
# It would be cleaner to store keywords/messages in same `children`
# list and turn `keywords` and `messages` to properties that pick items
# from it. That would require bigger changes to the model, though.
return sorted(chain(self.keywords, self.messages),
key=attrgetter('_sort_key'))
@property
def id(self):
if not self.parent:
return 'k1'
return '%s-k%d' % (self.parent.id, self.parent.keywords.index(self)+1)
def visit(self, visitor):
visitor.visit_keyword(self)
class Keywords(ItemList):
__slots__ = []
def __init__(self, keyword_class=Keyword, parent=None, keywords=None):
ItemList.__init__(self, keyword_class, {'parent': parent}, keywords)
@property
def setup(self):
return self[0] if (self and self[0].type == 'setup') else None
@property
def teardown(self):
return self[-1] if (self and self[-1].type == 'teardown') else None
@property
def all(self):
return self
@property
def normal(self):
kws = [kw for kw in self if kw.type not in ('setup', 'teardown')]
return Keywords(self._item_class, self._common_attrs['parent'], kws)
def __setitem__(self, index, item):
old = self[index]
ItemList.__setitem__(self, index, item)
self[index]._sort_key = old._sort_key
|
PypiClean
|
/deepfos-celery-1.1.2.tar.gz/deepfos-celery-1.1.2/docs/history/changelog-2.3.rst
|
.. _changelog-2.3:
===============================
Change history for Celery 2.3
===============================
.. contents::
:local:
.. _version-2.3.4:
2.3.4
=====
:release-date: 2011-11-25 04:00 p.m. GMT
:release-by: Ask Solem
.. _v234-security-fixes:
Security Fixes
--------------
* [Security: `CELERYSA-0001`_] Daemons would set effective id's rather than
real id's when the :option:`--uid <celery --uid>`/
:option:`--gid <celery --gid>` arguments to :program:`celery multi`,
:program:`celeryd_detach`, :program:`celery beat` and
:program:`celery events` were used.
This means privileges weren't properly dropped, and that it would
be possible to regain supervisor privileges later.
.. _`CELERYSA-0001`:
https://github.com/celery/celery/tree/master/docs/sec/CELERYSA-0001.txt
Fixes
-----
* Backported fix for #455 from 2.4 to 2.3.
* StateDB wasn't saved at shutdown.
* Fixes worker sometimes hanging when hard time limit exceeded.
.. _version-2.3.3:
2.3.3
=====
:release-date: 2011-16-09 05:00 p.m. BST
:release-by: Mher Movsisyan
* Monkey patching :attr:`sys.stdout` could result in the worker
crashing if the replacing object didn't define :meth:`isatty`
(Issue #477).
* ``CELERYD`` option in :file:`/etc/default/celeryd` shouldn't
be used with generic init-scripts.
.. _version-2.3.2:
2.3.2
=====
:release-date: 2011-10-07 05:00 p.m. BST
:release-by: Ask Solem
.. _v232-news:
News
----
* Improved Contributing guide.
If you'd like to contribute to Celery you should read the
:ref:`Contributing Gudie <contributing>`.
We're looking for contributors at all skill levels, so don't
hesitate!
* Now depends on Kombu 1.3.1
* ``Task.request`` now contains the current worker host name (Issue #460).
Available as ``task.request.hostname``.
* It's now easier for app subclasses to extend how they're pickled.
(see :class:`celery.app.AppPickler`).
.. _v232-fixes:
Fixes
-----
* `purge/discard_all` wasn't working correctly (Issue #455).
* The coloring of log messages didn't handle non-ASCII data well
(Issue #427).
* [Windows] the multiprocessing pool tried to import ``os.kill``
even though this isn't available there (Issue #450).
* Fixes case where the worker could become unresponsive because of tasks
exceeding the hard time limit.
* The :event:`task-sent` event was missing from the event reference.
* ``ResultSet.iterate`` now returns results as they finish (Issue #459).
This wasn't the case previously, even though the documentation
states this was the expected behavior.
* Retries will no longer be performed when tasks are called directly
(using ``__call__``).
Instead the exception passed to ``retry`` will be re-raised.
* Eventlet no longer crashes if autoscale is enabled.
growing and shrinking eventlet pools is still not supported.
* ``py24`` target removed from :file:`tox.ini`.
.. _version-2.3.1:
2.3.1
=====
:release-date: 2011-08-07 08:00 p.m. BST
:release-by: Ask Solem
Fixes
-----
* The :setting:`CELERY_AMQP_TASK_RESULT_EXPIRES` setting didn't work,
resulting in an AMQP related error about not being able to serialize
floats while trying to publish task states (Issue #446).
.. _version-2.3.0:
2.3.0
=====
:release-date: 2011-08-05 12:00 p.m. BST
:tested: CPython: 2.5, 2.6, 2.7; PyPy: 1.5; Jython: 2.5.2
:release-by: Ask Solem
.. _v230-important:
Important Notes
---------------
* Now requires Kombu 1.2.1
* Results are now disabled by default.
The AMQP backend wasn't a good default because often the users were
not consuming the results, resulting in thousands of queues.
While the queues can be configured to expire if left unused, it wasn't
possible to enable this by default because this was only available in
recent RabbitMQ versions (2.1.1+)
With this change enabling a result backend will be a conscious choice,
which will hopefully lead the user to read the documentation and be aware
of any common pitfalls with the particular backend.
The default backend is now a dummy backend
(:class:`celery.backends.base.DisabledBackend`). Saving state is simply an
no-op, and AsyncResult.wait(), .result, .state, etc. will raise
a :exc:`NotImplementedError` telling the user to configure the result backend.
For help choosing a backend please see :ref:`task-result-backends`.
If you depend on the previous default which was the AMQP backend, then
you have to set this explicitly before upgrading::
CELERY_RESULT_BACKEND = 'amqp'
.. note::
For :pypi:`django-celery` users the default backend is
still ``database``, and results are not disabled by default.
* The Debian init-scripts have been deprecated in favor of the generic-init.d
init-scripts.
In addition generic init-scripts for ``celerybeat`` and ``celeryev`` has
been added.
.. _v230-news:
News
----
* Automatic connection pool support.
The pool is used by everything that requires a broker connection, for
example calling tasks, sending broadcast commands, retrieving results
with the AMQP result backend, and so on.
The pool is disabled by default, but you can enable it by configuring the
:setting:`BROKER_POOL_LIMIT` setting::
BROKER_POOL_LIMIT = 10
A limit of 10 means a maximum of 10 simultaneous connections can co-exist.
Only a single connection will ever be used in a single-thread
environment, but in a concurrent environment (threads, greenlets, etc., but
not processes) when the limit has been exceeded, any try to acquire a
connection will block the thread and wait for a connection to be released.
This is something to take into consideration when choosing a limit.
A limit of :const:`None` or 0 means no limit, and connections will be
established and closed every time.
* Introducing Chords (taskset callbacks).
A chord is a task that only executes after all of the tasks in a taskset
has finished executing. It's a fancy term for "taskset callbacks"
adopted from
`Cω <http://research.microsoft.com/en-us/um/cambridge/projects/comega/>`_).
It works with all result backends, but the best implementation is
currently provided by the Redis result backend.
Here's an example chord::
>>> chord(add.subtask((i, i))
... for i in xrange(100))(tsum.subtask()).get()
9900
Please read the :ref:`Chords section in the user guide <canvas-chord>`, if you
want to know more.
* Time limits can now be set for individual tasks.
To set the soft and hard time limits for a task use the ``time_limit``
and ``soft_time_limit`` attributes:
.. code-block:: python
import time
@task(time_limit=60, soft_time_limit=30)
def sleeptask(seconds):
time.sleep(seconds)
If the attributes are not set, then the workers default time limits
will be used.
New in this version you can also change the time limits for a task
at runtime using the :func:`time_limit` remote control command::
>>> from celery.task import control
>>> control.time_limit('tasks.sleeptask',
... soft=60, hard=120, reply=True)
[{'worker1.example.com': {'ok': 'time limits set successfully'}}]
Only tasks that starts executing after the time limit change will be affected.
.. note::
Soft time limits will still not work on Windows or other platforms
that don't have the ``SIGUSR1`` signal.
* Redis backend configuration directive names changed to include the
``CELERY_`` prefix.
===================================== ===================================
**Old setting name** **Replace with**
===================================== ===================================
`REDIS_HOST` `CELERY_REDIS_HOST`
`REDIS_PORT` `CELERY_REDIS_PORT`
`REDIS_DB` `CELERY_REDIS_DB`
`REDIS_PASSWORD` `CELERY_REDIS_PASSWORD`
===================================== ===================================
The old names are still supported but pending deprecation.
* PyPy: The default pool implementation used is now multiprocessing
if running on PyPy 1.5.
* multi: now supports "pass through" options.
Pass through options makes it easier to use Celery without a
configuration file, or just add last-minute options on the command
line.
Example use:
.. code-block:: console
$ celery multi start 4 -c 2 -- broker.host=amqp.example.com \
broker.vhost=/ \
celery.disable_rate_limits=yes
* ``celerybeat``: Now retries establishing the connection (Issue #419).
* ``celeryctl``: New ``list bindings`` command.
Lists the current or all available bindings, depending on the
broker transport used.
* Heartbeat is now sent every 30 seconds (previously every 2 minutes).
* ``ResultSet.join_native()`` and ``iter_native()`` is now supported by
the Redis and Cache result backends.
This is an optimized version of ``join()`` using the underlying
backends ability to fetch multiple results at once.
* Can now use SSL when sending error e-mails by enabling the
:setting:`EMAIL_USE_SSL` setting.
* ``events.default_dispatcher()``: Context manager to easily obtain
an event dispatcher instance using the connection pool.
* Import errors in the configuration module won't be silenced anymore.
* ResultSet.iterate: Now supports the ``timeout``, ``propagate`` and
``interval`` arguments.
* ``with_default_connection`` -> ``with default_connection``
* TaskPool.apply_async: Keyword arguments ``callbacks`` and ``errbacks``
has been renamed to ``callback`` and ``errback`` and take a single scalar
value instead of a list.
* No longer propagates errors occurring during process cleanup (Issue #365)
* Added ``TaskSetResult.delete()``, which will delete a previously
saved taskset result.
* ``celerybeat`` now syncs every 3 minutes instead of only at
shutdown (Issue #382).
* Monitors now properly handles unknown events, so user-defined events
are displayed.
* Terminating a task on Windows now also terminates all of the tasks child
processes (Issue #384).
* worker: ``-I|--include`` option now always searches the current directory
to import the specified modules.
* Cassandra backend: Now expires results by using TTLs.
* Functional test suite in ``funtests`` is now actually working properly, and
passing tests.
.. _v230-fixes:
Fixes
-----
* ``celeryev`` was trying to create the pidfile twice.
* celery.contrib.batches: Fixed problem where tasks failed
silently (Issue #393).
* Fixed an issue where logging objects would give "<Unrepresentable",
even though the objects were.
* ``CELERY_TASK_ERROR_WHITE_LIST`` is now properly initialized
in all loaders.
* ``celeryd_detach`` now passes through command line configuration.
* Remote control command ``add_consumer`` now does nothing if the
queue is already being consumed from.
|
PypiClean
|
/pyrundeck-0.10.0.tar.gz/pyrundeck-0.10.0/README.md
|
# Rundeck REST API client




This is a Python REST API client for Rundeck 2.6+
## Example
```python
from pyrundeck import Rundeck
rundeck = Rundeck('http://rundeck-url',
token='sometoken',
api_version=32, # this is not mandatory, it defaults to 18
)
run = rundeck.run_job(RUNDECK_JOB_ID, options={'option1': 'foo'})
running_jobs = rundeck.get_executions_for_job(job_id=RUNDECK_JOB_ID, status='running')
for job in running_jobs['executions']:
print("%s is running" % job['id'])
```
A token can be generated in the 'profile' page of Rundeck. Alternatively you
can login with a username and password.
Example using the file upload option
```python
from pyrundeck import rundeck
rd = Rundeck(
rundeck_url,
username=username,
password=password,
verify=False,
api_version=19 # Required for file upload option
)
# Use the file_key returned in the response to reference the file when running a job
# Per documentation at https://docs.rundeck.com/docs/api/rundeck-api.html#upload-a-file-for-a-job-option
response = rd.upload_file(RUNDECK_JOB_ID, OPTION_NAME, FILE_NAME_STRING_OR_IOFILEWRAPPER)
file_key = response['options'][OPTION_NAME]
rd.run_job(RUNDECK_JOB_ID, options={OPTION_NAME: file_key})
```
## See also
- https://github.com/marklap/rundeckrun
## LICENSE
GPL3
|
PypiClean
|
/infra_operator-1.0.23-py3-none-any.whl/infra_operator/clients/mod.py
|
import os
import types
import boto3
from infra_operator.clients.ghe import GHE
def get_account_id(self):
return self.get_caller_identity()["Account"]
def find_distribution(self, Id=None, Alias=None, DomainName=None):
if Id is None:
pager = self.get_paginator("list_distributions")
matched = []
for res in pager.paginate():
for dist in res["DistributionList"]["Items"]:
if DomainName is None:
if Alias in dist["Aliases"].get("Items", []):
matched.append(dist)
else:
if dist["DomainName"] == DomainName:
matched.append(dist)
if len(matched) == 1:
Id = matched[0]["Id"]
elif len(matched) == 0:
return None
else:
raise Exception(
f"found {len(matched)} distribution with Alias: {Alias}. please specify by Id")
res = self.get_distribution(Id=Id)
res["Distribution"]["DistributionConfig"]["Id"] = Id
res["Distribution"]["DistributionConfig"]["ETag"] = res["ETag"]
res["Distribution"]["DistributionConfig"]["DomainName"] = res["Distribution"]["DomainName"]
return res
def list_continuous_deployment_policies_(self):
res = self.list_continuous_deployment_policies()
marker = res["ContinuousDeploymentPolicyList"]
marker = None
while True:
if marker:
res = self.list_continuous_deployment_policies(Marker=marker)
else:
res = self.list_continuous_deployment_policies()
marker = res["ContinuousDeploymentPolicyList"].get("NextMarker")
for one in res["ContinuousDeploymentPolicyList"]["Items"]:
one["ContinuousDeploymentPolicy"]["ContinuousDeploymentPolicyConfig"]["Id"] = one["ContinuousDeploymentPolicy"]["Id"]
yield one["ContinuousDeploymentPolicy"]["ContinuousDeploymentPolicyConfig"]
if marker is None:
break
def try_get_continuous_deployment_policy(self, Id=None, StagingDistributionDnsNames=None):
if Id is None:
for one in self.list_continuous_deployment_policies_():
if set(one["StagingDistributionDnsNames"]["Items"]) == set(StagingDistributionDnsNames):
Id = one["Id"]
break
if Id:
return self.get_continuous_deployment_policy(Id=Id)
def list_cache_policies_(self):
marker = None
while True:
if marker:
res = self.list_cache_policies(Marker=marker)
else:
res = self.list_cache_policies()
marker = res["CachePolicyList"].get("NextMarker")
for one in res["CachePolicyList"]["Items"]:
one["CachePolicy"]["CachePolicyConfig"]["Id"] = one["CachePolicy"]["Id"]
yield one["CachePolicy"]["CachePolicyConfig"]
if marker is None:
break
def find_cache_policy(self, Id=None, Name=None):
if Id is None:
cache_policies = list(self.list_cache_policies_())
res = [one for one in cache_policies if one["Name"] == Name]
if len(res) == 0:
return None
Id = res[0]["Id"]
result = self.get_cache_policy(Id=Id)
if result is None:
return None
Id = result["CachePolicy"]["Id"]
config = result["CachePolicy"]["CachePolicyConfig"]
config["Id"] = Id
config["ETag"] = result["ETag"]
return config
cloudfront = boto3.client('cloudfront')
cloudfront.find_distribution = types.MethodType(find_distribution, cloudfront)
cloudfront.list_cache_policies_ = types.MethodType(
list_cache_policies_, cloudfront)
cloudfront.find_cache_policy = types.MethodType(find_cache_policy, cloudfront)
cloudfront.list_continuous_deployment_policies_ = types.MethodType(
list_continuous_deployment_policies_, cloudfront)
cloudfront.try_get_continuous_deployment_policy = types.MethodType(
try_get_continuous_deployment_policy, cloudfront)
sts = boto3.client("sts")
sts.get_account_id = types.MethodType(get_account_id, sts)
def find_instance(this, InstanceId, Name):
if InstanceId:
return this.describe_instances(InstanceIds=[InstanceId])
else:
return this.describe_instances(Filters=[{
'Name': 'tag:Name',
'Values': [Name]
}])
def find_volume(this, VolumeId=None, Name=None):
if VolumeId:
return this.describe_volumes(VolumeIds=[VolumeId])
else:
return this.describe_volumes(Filters=[{
'Name': 'tag:Name',
'Values': [Name]
}])
def find_launch_template(this, LaunchTemplateId=None, LaunchTemplateName=None):
if LaunchTemplateId:
return this.describe_launch_templates(LaunchTemplateIds=[LaunchTemplateId])
else:
return this.describe_launch_templates(LaunchTemplateNames=[LaunchTemplateName])
def update_instance(this, content, current):
fields = [
"SourceDestCheck",
"Attribute",
"BlockDeviceMappings",
"DisableApiTermination",
"EbsOptimized",
"EnaSupport",
"Groups",
"InstanceId",
"InstanceInitiatedShutdownBehavior",
"InstanceType",
"Kernel",
"Ramdisk",
"SriovNetSupport",
"UserData",
"Value",
"DisableApiStop",
]
for field in fields:
pass
ec2 = boto3.client("ec2")
ec2.find_instance = types.MethodType(find_instance, ec2)
ec2.find_volume = types.MethodType(find_volume, ec2)
ec2.find_launch_template = types.MethodType(find_launch_template, ec2)
def find_repository(this, registryId, repositoryName):
if registryId:
return this.describe_repositories(registryId=registryId, repositoryNames=[repositoryName])
else:
return this.describe_repositories(repositoryNames=[repositoryName])
ecr = boto3.client('ecr')
ecr.find_repository = types.MethodType(find_repository, ecr)
clients = {
"ecr":
ecr,
"ecs":
boto3.client('ecs'),
"elbv2":
boto3.client("elbv2"),
"ec2": ec2,
"iam":
boto3.client("iam"),
"appmesh":
boto3.client("appmesh"),
"servicediscovery":
boto3.client("servicediscovery"),
"secretsmanager":
boto3.client("secretsmanager"),
"ghe":
GHE(base_url="https://git.toolsfdg.net/api/v3",
login_or_token=os.getenv('GITHUB_TOKEN')),
"application-autoscaling":
boto3.client("application-autoscaling"),
"sqs":
boto3.resource('sqs'),
"lambda":
boto3.client('lambda'),
"asg":
boto3.client('autoscaling'),
"cloudwatch":
boto3.client('cloudwatch'),
"cloudfront":
cloudfront,
"sts":
sts,
}
|
PypiClean
|
/pdc_dp_means-0.0.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/sklearn/neighbors/_graph.py
|
# Author: Jake Vanderplas <[email protected]>
# Tom Dupre la Tour
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
from ._base import KNeighborsMixin, RadiusNeighborsMixin
from ._base import NeighborsBase
from ._unsupervised import NearestNeighbors
from ..base import TransformerMixin, ClassNamePrefixFeaturesOutMixin
from ..utils._param_validation import StrOptions
from ..utils.validation import check_is_fitted
def _check_params(X, metric, p, metric_params):
"""Check the validity of the input parameters"""
params = zip(["metric", "p", "metric_params"], [metric, p, metric_params])
est_params = X.get_params()
for param_name, func_param in params:
if func_param != est_params[param_name]:
raise ValueError(
"Got %s for %s, while the estimator has %s for the same parameter."
% (func_param, param_name, est_params[param_name])
)
def _query_include_self(X, include_self, mode):
"""Return the query based on include_self param"""
if include_self == "auto":
include_self = mode == "connectivity"
# it does not include each sample as its own neighbors
if not include_self:
X = None
return X
def kneighbors_graph(
X,
n_neighbors,
*,
mode="connectivity",
metric="minkowski",
p=2,
metric_params=None,
include_self=False,
n_jobs=None,
):
"""Compute the (weighted) graph of k-Neighbors for points in X.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like of shape (n_samples, n_features) or BallTree
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
n_neighbors : int
Number of neighbors for each sample.
mode : {'connectivity', 'distance'}, default='connectivity'
Type of returned matrix: 'connectivity' will return the connectivity
matrix with ones and zeros, and 'distance' will return the distances
between neighbors according to the given metric.
metric : str, default='minkowski'
Metric to use for distance computation. Default is "minkowski", which
results in the standard Euclidean distance when p = 2. See the
documentation of `scipy.spatial.distance
<https://docs.scipy.org/doc/scipy/reference/spatial.distance.html>`_ and
the metrics listed in
:class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric
values.
p : int, default=2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, default=None
Additional keyword arguments for the metric function.
include_self : bool or 'auto', default=False
Whether or not to mark each sample as the first nearest neighbor to
itself. If 'auto', then True is used for mode='connectivity' and False
for mode='distance'.
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Returns
-------
A : sparse matrix of shape (n_samples, n_samples)
Graph where A[i, j] is assigned the weight of edge that
connects i to j. The matrix is of CSR format.
See Also
--------
radius_neighbors_graph: Compute the (weighted) graph of Neighbors for points in X.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import kneighbors_graph
>>> A = kneighbors_graph(X, 2, mode='connectivity', include_self=True)
>>> A.toarray()
array([[1., 0., 1.],
[0., 1., 1.],
[1., 0., 1.]])
"""
if not isinstance(X, KNeighborsMixin):
X = NearestNeighbors(
n_neighbors=n_neighbors,
metric=metric,
p=p,
metric_params=metric_params,
n_jobs=n_jobs,
).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X._fit_X, include_self, mode)
return X.kneighbors_graph(X=query, n_neighbors=n_neighbors, mode=mode)
def radius_neighbors_graph(
X,
radius,
*,
mode="connectivity",
metric="minkowski",
p=2,
metric_params=None,
include_self=False,
n_jobs=None,
):
"""Compute the (weighted) graph of Neighbors for points in X.
Neighborhoods are restricted the points at a distance lower than
radius.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like of shape (n_samples, n_features) or BallTree
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
radius : float
Radius of neighborhoods.
mode : {'connectivity', 'distance'}, default='connectivity'
Type of returned matrix: 'connectivity' will return the connectivity
matrix with ones and zeros, and 'distance' will return the distances
between neighbors according to the given metric.
metric : str, default='minkowski'
Metric to use for distance computation. Default is "minkowski", which
results in the standard Euclidean distance when p = 2. See the
documentation of `scipy.spatial.distance
<https://docs.scipy.org/doc/scipy/reference/spatial.distance.html>`_ and
the metrics listed in
:class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric
values.
p : int, default=2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, default=None
Additional keyword arguments for the metric function.
include_self : bool or 'auto', default=False
Whether or not to mark each sample as the first nearest neighbor to
itself. If 'auto', then True is used for mode='connectivity' and False
for mode='distance'.
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Returns
-------
A : sparse matrix of shape (n_samples, n_samples)
Graph where A[i, j] is assigned the weight of edge that connects
i to j. The matrix is of CSR format.
See Also
--------
kneighbors_graph: Compute the weighted graph of k-neighbors for points in X.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import radius_neighbors_graph
>>> A = radius_neighbors_graph(X, 1.5, mode='connectivity',
... include_self=True)
>>> A.toarray()
array([[1., 0., 1.],
[0., 1., 0.],
[1., 0., 1.]])
"""
if not isinstance(X, RadiusNeighborsMixin):
X = NearestNeighbors(
radius=radius,
metric=metric,
p=p,
metric_params=metric_params,
n_jobs=n_jobs,
).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X._fit_X, include_self, mode)
return X.radius_neighbors_graph(query, radius, mode)
class KNeighborsTransformer(
ClassNamePrefixFeaturesOutMixin, KNeighborsMixin, TransformerMixin, NeighborsBase
):
"""Transform X into a (weighted) graph of k nearest neighbors.
The transformed data is a sparse graph as returned by kneighbors_graph.
Read more in the :ref:`User Guide <neighbors_transformer>`.
.. versionadded:: 0.22
Parameters
----------
mode : {'distance', 'connectivity'}, default='distance'
Type of returned matrix: 'connectivity' will return the connectivity
matrix with ones and zeros, and 'distance' will return the distances
between neighbors according to the given metric.
n_neighbors : int, default=5
Number of neighbors for each sample in the transformed sparse graph.
For compatibility reasons, as each sample is considered as its own
neighbor, one extra neighbor will be computed when mode == 'distance'.
In this case, the sparse graph contains (n_neighbors + 1) neighbors.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, default=30
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : str or callable, default='minkowski'
Metric to use for distance computation. Default is "minkowski", which
results in the standard Euclidean distance when p = 2. See the
documentation of `scipy.spatial.distance
<https://docs.scipy.org/doc/scipy/reference/spatial.distance.html>`_ and
the metrics listed in
:class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric
values.
If metric is a callable function, it takes two arrays representing 1D
vectors as inputs and must return one value indicating the distance
between those vectors. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
p : int, default=2
Parameter for the Minkowski metric from
sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, default=None
Additional keyword arguments for the metric function.
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
effective_metric_ : str or callable
The distance metric used. It will be same as the `metric` parameter
or a synonym of it, e.g. 'euclidean' if the `metric` parameter set to
'minkowski' and `p` parameter set to 2.
effective_metric_params_ : dict
Additional keyword arguments for the metric function. For most metrics
will be same with `metric_params` parameter, but may also contain the
`p` parameter value if the `effective_metric_` attribute is set to
'minkowski'.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_samples_fit_ : int
Number of samples in the fitted data.
See Also
--------
kneighbors_graph : Compute the weighted graph of k-neighbors for
points in X.
RadiusNeighborsTransformer : Transform X into a weighted graph of
neighbors nearer than a radius.
Examples
--------
>>> from sklearn.datasets import load_wine
>>> from sklearn.neighbors import KNeighborsTransformer
>>> X, _ = load_wine(return_X_y=True)
>>> X.shape
(178, 13)
>>> transformer = KNeighborsTransformer(n_neighbors=5, mode='distance')
>>> X_dist_graph = transformer.fit_transform(X)
>>> X_dist_graph.shape
(178, 178)
"""
_parameter_constraints: dict = {
**NeighborsBase._parameter_constraints,
"mode": [StrOptions({"distance", "connectivity"})],
}
_parameter_constraints.pop("radius")
def __init__(
self,
*,
mode="distance",
n_neighbors=5,
algorithm="auto",
leaf_size=30,
metric="minkowski",
p=2,
metric_params=None,
n_jobs=None,
):
super(KNeighborsTransformer, self).__init__(
n_neighbors=n_neighbors,
radius=None,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric,
p=p,
metric_params=metric_params,
n_jobs=n_jobs,
)
self.mode = mode
def fit(self, X, y=None):
"""Fit the k-nearest neighbors transformer from the training dataset.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
(n_samples, n_samples) if metric='precomputed'
Training data.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : KNeighborsTransformer
The fitted k-nearest neighbors transformer.
"""
self._validate_params()
self._fit(X)
self._n_features_out = self.n_samples_fit_
return self
def transform(self, X):
"""Compute the (weighted) graph of Neighbors for points in X.
Parameters
----------
X : array-like of shape (n_samples_transform, n_features)
Sample data.
Returns
-------
Xt : sparse matrix of shape (n_samples_transform, n_samples_fit)
Xt[i, j] is assigned the weight of edge that connects i to j.
Only the neighbors have an explicit value.
The diagonal is always explicit.
The matrix is of CSR format.
"""
check_is_fitted(self)
add_one = self.mode == "distance"
return self.kneighbors_graph(
X, mode=self.mode, n_neighbors=self.n_neighbors + add_one
)
def fit_transform(self, X, y=None):
"""Fit to data, then transform it.
Fits transformer to X and y with optional parameters fit_params
and returns a transformed version of X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training set.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
Xt : sparse matrix of shape (n_samples, n_samples)
Xt[i, j] is assigned the weight of edge that connects i to j.
Only the neighbors have an explicit value.
The diagonal is always explicit.
The matrix is of CSR format.
"""
return self.fit(X).transform(X)
def _more_tags(self):
return {
"_xfail_checks": {
"check_methods_sample_order_invariance": "check is not applicable."
}
}
class RadiusNeighborsTransformer(
ClassNamePrefixFeaturesOutMixin,
RadiusNeighborsMixin,
TransformerMixin,
NeighborsBase,
):
"""Transform X into a (weighted) graph of neighbors nearer than a radius.
The transformed data is a sparse graph as returned by
`radius_neighbors_graph`.
Read more in the :ref:`User Guide <neighbors_transformer>`.
.. versionadded:: 0.22
Parameters
----------
mode : {'distance', 'connectivity'}, default='distance'
Type of returned matrix: 'connectivity' will return the connectivity
matrix with ones and zeros, and 'distance' will return the distances
between neighbors according to the given metric.
radius : float, default=1.0
Radius of neighborhood in the transformed sparse graph.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, default=30
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : str or callable, default='minkowski'
Metric to use for distance computation. Default is "minkowski", which
results in the standard Euclidean distance when p = 2. See the
documentation of `scipy.spatial.distance
<https://docs.scipy.org/doc/scipy/reference/spatial.distance.html>`_ and
the metrics listed in
:class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric
values.
If metric is a callable function, it takes two arrays representing 1D
vectors as inputs and must return one value indicating the distance
between those vectors. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
p : int, default=2
Parameter for the Minkowski metric from
sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, default=None
Additional keyword arguments for the metric function.
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
effective_metric_ : str or callable
The distance metric used. It will be same as the `metric` parameter
or a synonym of it, e.g. 'euclidean' if the `metric` parameter set to
'minkowski' and `p` parameter set to 2.
effective_metric_params_ : dict
Additional keyword arguments for the metric function. For most metrics
will be same with `metric_params` parameter, but may also contain the
`p` parameter value if the `effective_metric_` attribute is set to
'minkowski'.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_samples_fit_ : int
Number of samples in the fitted data.
See Also
--------
kneighbors_graph : Compute the weighted graph of k-neighbors for
points in X.
KNeighborsTransformer : Transform X into a weighted graph of k
nearest neighbors.
Examples
--------
>>> import numpy as np
>>> from sklearn.datasets import load_wine
>>> from sklearn.cluster import DBSCAN
>>> from sklearn.neighbors import RadiusNeighborsTransformer
>>> from sklearn.pipeline import make_pipeline
>>> X, _ = load_wine(return_X_y=True)
>>> estimator = make_pipeline(
... RadiusNeighborsTransformer(radius=42.0, mode='distance'),
... DBSCAN(eps=25.0, metric='precomputed'))
>>> X_clustered = estimator.fit_predict(X)
>>> clusters, counts = np.unique(X_clustered, return_counts=True)
>>> print(counts)
[ 29 15 111 11 12]
"""
_parameter_constraints: dict = {
**NeighborsBase._parameter_constraints,
"mode": [StrOptions({"distance", "connectivity"})],
}
_parameter_constraints.pop("n_neighbors")
def __init__(
self,
*,
mode="distance",
radius=1.0,
algorithm="auto",
leaf_size=30,
metric="minkowski",
p=2,
metric_params=None,
n_jobs=None,
):
super(RadiusNeighborsTransformer, self).__init__(
n_neighbors=None,
radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric,
p=p,
metric_params=metric_params,
n_jobs=n_jobs,
)
self.mode = mode
def fit(self, X, y=None):
"""Fit the radius neighbors transformer from the training dataset.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
(n_samples, n_samples) if metric='precomputed'
Training data.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : RadiusNeighborsTransformer
The fitted radius neighbors transformer.
"""
self._validate_params()
self._fit(X)
self._n_features_out = self.n_samples_fit_
return self
def transform(self, X):
"""Compute the (weighted) graph of Neighbors for points in X.
Parameters
----------
X : array-like of shape (n_samples_transform, n_features)
Sample data.
Returns
-------
Xt : sparse matrix of shape (n_samples_transform, n_samples_fit)
Xt[i, j] is assigned the weight of edge that connects i to j.
Only the neighbors have an explicit value.
The diagonal is always explicit.
The matrix is of CSR format.
"""
check_is_fitted(self)
return self.radius_neighbors_graph(X, mode=self.mode, sort_results=True)
def fit_transform(self, X, y=None):
"""Fit to data, then transform it.
Fits transformer to X and y with optional parameters fit_params
and returns a transformed version of X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training set.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
Xt : sparse matrix of shape (n_samples, n_samples)
Xt[i, j] is assigned the weight of edge that connects i to j.
Only the neighbors have an explicit value.
The diagonal is always explicit.
The matrix is of CSR format.
"""
return self.fit(X).transform(X)
def _more_tags(self):
return {
"_xfail_checks": {
"check_methods_sample_order_invariance": "check is not applicable."
}
}
|
PypiClean
|
/backtradermsms-1.9.75.123-py3-none-any.whl/backtrader/lineiterator.py
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import collections
import operator
import sys
from .utils.py3 import map, range, zip, with_metaclass, string_types
from .utils import DotDict
from .lineroot import LineRoot, LineSingle
from .linebuffer import LineActions, LineNum
from .lineseries import LineSeries, LineSeriesMaker
from .dataseries import DataSeries
from . import metabase
class MetaLineIterator(LineSeries.__class__):
def donew(cls, *args, **kwargs):
_obj, args, kwargs = \
super(MetaLineIterator, cls).donew(*args, **kwargs)
# Prepare to hold children that need to be calculated and
# influence minperiod - Moved here to support LineNum below
_obj._lineiterators = collections.defaultdict(list)
# Scan args for datas ... if none are found,
# use the _owner (to have a clock)
mindatas = _obj._mindatas
lastarg = 0
_obj.datas = []
for arg in args:
if isinstance(arg, LineRoot):
_obj.datas.append(LineSeriesMaker(arg))
elif not mindatas:
break # found not data and must not be collected
else:
try:
_obj.datas.append(LineSeriesMaker(LineNum(arg)))
except:
# Not a LineNum and is not a LineSeries - bail out
break
mindatas = max(0, mindatas - 1)
lastarg += 1
newargs = args[lastarg:]
# If no datas have been passed to an indicator ... use the
# main datas of the owner, easing up adding "self.data" ...
if not _obj.datas and isinstance(_obj, (IndicatorBase, ObserverBase)):
_obj.datas = _obj._owner.datas[0:mindatas]
# Create a dictionary to be able to check for presence
# lists in python use "==" operator when testing for presence with "in"
# which doesn't really check for presence but for equality
_obj.ddatas = {x: None for x in _obj.datas}
# For each found data add access member -
# for the first data 2 (data and data0)
if _obj.datas:
_obj.data = data = _obj.datas[0]
for l, line in enumerate(data.lines):
linealias = data._getlinealias(l)
if linealias:
setattr(_obj, 'data_%s' % linealias, line)
setattr(_obj, 'data_%d' % l, line)
for d, data in enumerate(_obj.datas):
setattr(_obj, 'data%d' % d, data)
for l, line in enumerate(data.lines):
linealias = data._getlinealias(l)
if linealias:
setattr(_obj, 'data%d_%s' % (d, linealias), line)
setattr(_obj, 'data%d_%d' % (d, l), line)
# Parameter values have now been set before __init__
_obj.dnames = DotDict([(d._name, d)
for d in _obj.datas if getattr(d, '_name', '')])
return _obj, newargs, kwargs
def dopreinit(cls, _obj, *args, **kwargs):
_obj, args, kwargs = \
super(MetaLineIterator, cls).dopreinit(_obj, *args, **kwargs)
# if no datas were found use, use the _owner (to have a clock)
_obj.datas = _obj.datas or [_obj._owner]
# 1st data source is our ticking clock
_obj._clock = _obj.datas[0]
# To automatically set the period Start by scanning the found datas
# No calculation can take place until all datas have yielded "data"
# A data could be an indicator and it could take x bars until
# something is produced
_obj._minperiod = \
max([x._minperiod for x in _obj.datas] or [_obj._minperiod])
# The lines carry at least the same minperiod as
# that provided by the datas
for line in _obj.lines:
line.addminperiod(_obj._minperiod)
return _obj, args, kwargs
def dopostinit(cls, _obj, *args, **kwargs):
_obj, args, kwargs = \
super(MetaLineIterator, cls).dopostinit(_obj, *args, **kwargs)
# my minperiod is as large as the minperiod of my lines
_obj._minperiod = max([x._minperiod for x in _obj.lines])
# Recalc the period
_obj._periodrecalc()
# Register (my)self as indicator to owner once
# _minperiod has been calculated
if _obj._owner is not None:
_obj._owner.addindicator(_obj)
return _obj, args, kwargs
class LineIterator(with_metaclass(MetaLineIterator, LineSeries)):
_nextforce = False # force cerebro to run in next mode (runonce=False)
_mindatas = 1
_ltype = LineSeries.IndType
plotinfo = dict(plot=True,
subplot=True,
plotname='',
plotskip=False,
plotabove=False,
plotlinelabels=False,
plotlinevalues=True,
plotvaluetags=True,
plotymargin=0.0,
plotyhlines=[],
plotyticks=[],
plothlines=[],
plotforce=False,
plotmaster=None,)
def _periodrecalc(self):
# last check in case not all lineiterators were assigned to
# lines (directly or indirectly after some operations)
# An example is Kaufman's Adaptive Moving Average
indicators = self._lineiterators[LineIterator.IndType]
indperiods = [ind._minperiod for ind in indicators]
indminperiod = max(indperiods or [self._minperiod])
self.updateminperiod(indminperiod)
def _stage2(self):
super(LineIterator, self)._stage2()
for data in self.datas:
data._stage2()
for lineiterators in self._lineiterators.values():
for lineiterator in lineiterators:
lineiterator._stage2()
def _stage1(self):
super(LineIterator, self)._stage1()
for data in self.datas:
data._stage1()
for lineiterators in self._lineiterators.values():
for lineiterator in lineiterators:
lineiterator._stage1()
def getindicators(self):
return self._lineiterators[LineIterator.IndType]
def getindicators_lines(self):
return [x for x in self._lineiterators[LineIterator.IndType]
if hasattr(x.lines, 'getlinealiases')]
def getobservers(self):
return self._lineiterators[LineIterator.ObsType]
def addindicator(self, indicator):
# store in right queue
self._lineiterators[indicator._ltype].append(indicator)
# use getattr because line buffers don't have this attribute
if getattr(indicator, '_nextforce', False):
# the indicator needs runonce=False
o = self
while o is not None:
if o._ltype == LineIterator.StratType:
o.cerebro._disable_runonce()
break
o = o._owner # move up the hierarchy
def bindlines(self, owner=None, own=None):
if not owner:
owner = 0
if isinstance(owner, string_types):
owner = [owner]
elif not isinstance(owner, collections.Iterable):
owner = [owner]
if not own:
own = range(len(owner))
if isinstance(own, string_types):
own = [own]
elif not isinstance(own, collections.Iterable):
own = [own]
for lineowner, lineown in zip(owner, own):
if isinstance(lineowner, string_types):
lownerref = getattr(self._owner.lines, lineowner)
else:
lownerref = self._owner.lines[lineowner]
if isinstance(lineown, string_types):
lownref = getattr(self.lines, lineown)
else:
lownref = self.lines[lineown]
lownref.addbinding(lownerref)
return self
# Alias which may be more readable
bind2lines = bindlines
bind2line = bind2lines
def _next(self):
clock_len = self._clk_update()
for indicator in self._lineiterators[LineIterator.IndType]:
indicator._next()
self._notify()
if self._ltype == LineIterator.StratType:
# supporting datas with different lengths
minperstatus = self._getminperstatus()
if minperstatus < 0:
self.next()
elif minperstatus == 0:
self.nextstart() # only called for the 1st value
else:
self.prenext()
else:
# assume indicators and others operate on same length datas
# although the above operation can be generalized
if clock_len > self._minperiod:
self.next()
elif clock_len == self._minperiod:
self.nextstart() # only called for the 1st value
elif clock_len:
self.prenext()
def _clk_update(self):
clock_len = len(self._clock)
if clock_len != len(self):
self.forward()
return clock_len
def _once(self):
self.forward(size=self._clock.buflen())
for indicator in self._lineiterators[LineIterator.IndType]:
indicator._once()
for observer in self._lineiterators[LineIterator.ObsType]:
observer.forward(size=self.buflen())
for data in self.datas:
data.home()
for indicator in self._lineiterators[LineIterator.IndType]:
indicator.home()
for observer in self._lineiterators[LineIterator.ObsType]:
observer.home()
self.home()
# These 3 remain empty for a strategy and therefore play no role
# because a strategy will always be executed on a next basis
# indicators are each called with its min period
self.preonce(0, self._minperiod - 1)
self.oncestart(self._minperiod - 1, self._minperiod)
self.once(self._minperiod, self.buflen())
for line in self.lines:
line.oncebinding()
def preonce(self, start, end):
pass
def oncestart(self, start, end):
self.once(start, end)
def once(self, start, end):
pass
def prenext(self):
'''
This method will be called before the minimum period of all
datas/indicators have been meet for the strategy to start executing
'''
pass
def nextstart(self):
'''
This method will be called once, exactly when the minimum period for
all datas/indicators have been meet. The default behavior is to call
next
'''
# Called once for 1st full calculation - defaults to regular next
self.next()
def next(self):
'''
This method will be called for all remaining data points when the
minimum period for all datas/indicators have been meet.
'''
pass
def _addnotification(self, *args, **kwargs):
pass
def _notify(self):
pass
def _plotinit(self):
pass
def qbuffer(self, savemem=0):
if savemem:
for line in self.lines:
line.qbuffer()
# If called, anything under it, must save
for obj in self._lineiterators[self.IndType]:
obj.qbuffer(savemem=1)
# Tell datas to adjust buffer to minimum period
for data in self.datas:
data.minbuffer(self._minperiod)
# This 3 subclasses can be used for identification purposes within LineIterator
# or even outside (like in LineObservers)
# for the 3 subbranches without generating circular import references
class DataAccessor(LineIterator):
PriceClose = DataSeries.Close
PriceLow = DataSeries.Low
PriceHigh = DataSeries.High
PriceOpen = DataSeries.Open
PriceVolume = DataSeries.Volume
PriceOpenInteres = DataSeries.OpenInterest
PriceDateTime = DataSeries.DateTime
class IndicatorBase(DataAccessor):
pass
class ObserverBase(DataAccessor):
pass
class StrategyBase(DataAccessor):
pass
# Utility class to couple lines/lineiterators which may have different lengths
# Will only work when runonce=False is passed to Cerebro
class SingleCoupler(LineActions):
def __init__(self, cdata, clock=None):
super(SingleCoupler, self).__init__()
self._clock = clock if clock is not None else self._owner
self.cdata = cdata
self.dlen = 0
self.val = float('NaN')
def next(self):
if len(self.cdata) > self.dlen:
self.val = self.cdata[0]
self.dlen += 1
self[0] = self.val
class MultiCoupler(LineIterator):
_ltype = LineIterator.IndType
def __init__(self):
super(MultiCoupler, self).__init__()
self.dlen = 0
self.dsize = self.fullsize() # shorcut for number of lines
self.dvals = [float('NaN')] * self.dsize
def next(self):
if len(self.data) > self.dlen:
self.dlen += 1
for i in range(self.dsize):
self.dvals[i] = self.data.lines[i][0]
for i in range(self.dsize):
self.lines[i][0] = self.dvals[i]
def LinesCoupler(cdata, clock=None, **kwargs):
if isinstance(cdata, LineSingle):
return SingleCoupler(cdata, clock) # return for single line
cdatacls = cdata.__class__ # copy important structures before creation
try:
LinesCoupler.counter += 1 # counter for unique class name
except AttributeError:
LinesCoupler.counter = 0
# Prepare a MultiCoupler subclass
nclsname = str('LinesCoupler_%d' % LinesCoupler.counter)
ncls = type(nclsname, (MultiCoupler,), {})
thismod = sys.modules[LinesCoupler.__module__]
setattr(thismod, ncls.__name__, ncls)
# Replace lines et al., to get a sensible clone
ncls.lines = cdatacls.lines
ncls.params = cdatacls.params
ncls.plotinfo = cdatacls.plotinfo
ncls.plotlines = cdatacls.plotlines
obj = ncls(cdata, **kwargs) # instantiate
# The clock is set here to avoid it being interpreted as a data by the
# LineIterator background scanning code
if clock is None:
clock = getattr(cdata, '_clock', None)
if clock is not None:
nclock = getattr(clock, '_clock', None)
if nclock is not None:
clock = nclock
else:
nclock = getattr(clock, 'data', None)
if nclock is not None:
clock = nclock
if clock is None:
clock = obj._owner
obj._clock = clock
return obj
# Add an alias (which seems a lot more sensible for "Single Line" lines
LineCoupler = LinesCoupler
|
PypiClean
|
/machnamh-unmakingyou-0.0.23.tar.gz/machnamh-unmakingyou-0.0.23/machnamh/helper.py
|
# In[8]:
from io import StringIO
import os
import sys
from datetime import date
import re
import random
import math
from collections import Counter, namedtuple
import gc
import threading
import time
from itertools import cycle, chain, combinations
import itertools
import warnings
import kaleido
from contextlib import suppress
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from sklearn import datasets
from sklearn.model_selection import cross_validate
from sklearn import metrics
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import Normalizer
from sklearn.preprocessing import StandardScaler
from sklearn.compose import ColumnTransformer, make_column_transformer
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.pipeline import Pipeline
from sklearn import preprocessing
#import pprint
import pandas_profiling
from pandas_profiling.config import config
from pandas_profiling.model.base import Variable
import phik
import ipywidgets as widgets
from ipywidgets import Layout, Button, Box
from ipywidgets import interact, interact_manual
from IPython.display import display, clear_output, HTML
import plotly.express as px
import plotly.graph_objs as go
import plotly.figure_factory as ff
from plotly.subplots import make_subplots
#from plotly.graph_objs import graph_objs as go
import ipyfilechooser
from ipyfilechooser import FileChooser
import dill
from IPython.core.display import display, HTML
import shap
from aequitas.group import Group
from aequitas.bias import Bias
from aequitas.fairness import Fairness
from aequitas.plotting import Plot
from aequitas.preprocessing import preprocess_input_df
from scipy import stats
from scipy.stats import chi2_contingency
from scipy.stats import chi2
from typing import Callable, Dict, List, Optional, Union
import benfordslaw as bl
import missingno as msno
class helper_methods():
def __init__(self):
self.text_color = "green"
get_ipython().run_cell_magic('javascript', '', 'IPython.OutputArea.prototype._should_scroll = function(lines) {\n return false;\n}')
self.worldview = """
<b>Worldview:</b> In the context of this framework a "Worldview" is a set of assumptions about a
physical and social reality pertaining to a human feature or attribute, or to the measurement of same.
As context must be taken into consideration there is no one fundamentally correct worldview but rather
a reflection of a particular philosophy of life, or a conception of the world, as it relates to each of an
individuals' apparently quantifiable features or attributes. In the case of this framework, the focus is, in particular, on the worldview
held concerning any disparities in features or attributes that might be detected across groups within protected
features such as race, gender, age etc.
A disparity may, for example, refer to a non-proportionate representation or a significant difference in
distribution. <br><br>
Two worldviews have been defined for this purpose: <br>
"""
self.worldview_biological = """
<b>Inherent or biological worldview: </b>This worldview postulates that either chance or innate,
inherent physiological, biochemical, neurological, cultural and/or genetic factors influence any
disparities in features or attributes that might be detected across groups (categorised by race, gender,
age etc).
This worldview could be quite easily applied to the measurements of weight, height, BMI or similar easily
quantifiable features to be used as predictors for a specific outcome. The worldview, however, becomes
more complex for those human attributes or features which are harder to quantify, such as grit, determination,
intelligence, cognitive ability, self-control, growth mindset, reasoning, imagination, reliability etc.
This Inherent or biological worldview is closely aligned with the concept of <b>individual fairness</b>,
where the fairness goal is to ensure that people who are ‘similar’ concerning a combination of the specific
observable and measurable features or attributes deemed relevant to the task or capabilities at hand,
should receive close or similar rankings and therefor achieve similar outcomes.
With this worldview, observable and measurable features are considered to be inherently objective
with no adjustments deemed necessary albeit with the knowledge that the human attributes or features
considered critical to success may have been identified as such by the dominant group. Notwithstanding
that a significant amount of the measurements used to gauge and/or measure these human features or attributes
have been conceptualised, created or implemented by that same dominant group or that those historic
outcomes may also have been influenced by prejudice towards a protected groups, or via favouritism
towards the dominant group.
This worldview might lead one to accept the idea that race, gender or class gaps are due to group
shortcomings, not structural or systemic ones, and therefore the outcome “is what it is”, such that
individuals should be ranked with no consideration to differences in outcome across groups.
According to this worldview structural inequalities often perpetuated byracism, sexism and other prejudices
<b>are not considered</b> to have any causal influence on outcomes.
This worldview may also lead one to believe that representation of certain groups in specific fields
(such as STEM) are disproportionate to the representation in the population due to inherently different
preferences and/or abilities as opposed to the influence of social factors such as the exclusion,
marginalisation, and undermining of the potential of the underrepresented group or to the favouritism
(manifested through cognitive biases such as similarity bias etc) shown to other members of the dominant group.
This worldview might lead one to conclude that certain groups of individuals do not avoid careers in certain
sectors due to lack of mentorship or the existence of (or the perception of the existence of)an exclusionary
workplace culture but rather because of their individual and inherent characteristics.
"""
self.worldview_social = """
<b>Social and environmental worldview: </b> This worldview postulates that social
and environmental factors, such as family income, parental educational backgrounds,
school, peer group, workplace, community, environmental availability of nutrition,
correct environment for sleep, stereotype threat(and other cognitive biases )
often perpetuated by racism, sexism and other prejudices have influenced outcomes
in terms of any detected disparities across groups. Differences in outcome may be
a reflection of inequalities in a society which has led to these
outcome. Identifying this has important
implications for the financial, professional, and social futures of particular
protected groups within the population. Discrimination, privilege, institutional
racism , sexism, ablism are examples of causal influences which may impact outcomes
or representation. Disparities may have been caused by intentional,explicit
discrimination against a protected group or by subtle, unconscious,
automatic discrimination as the result of favoritism towards the reference group,
or by other social and systemic factors. The term "affirmative action" is often
used to justify the offering of opportunities to members of protected groups who
do not otherwise appear to merit the opportunity. The offering of the opportunity is
often based upon personal qualities that are usually hard to quantify in an entirely
objective way. However it is important to note that due to social and environmental
factors many measurements relating to human performance, merit, ability, etc
are also not necessarily objective.
"""
def display_html(self, text, color, size):
content = "<" + size + ">" + "<text style='color:" + color + "'>" + text + "</text>" + "</" + size + ">"
display (widgets.HTML(content, layout=Layout(width='100%')))
#################################################################################################
# VIEW Group representation in the data and display in the output area provided
#
#################################################################################################
def display_group_representation(self, data_frame, protected_features_list, output_area, _w=600, _h=600):
try:
with output_area:
clear_output(wait = True)
fig_wig_a, fig_wig_b = self.plot_donut(protected_features_list,
data_frame,
w=_w, h=_h,
title = "Representation of Protected group(s) in the data");
accordion = widgets.Accordion(children=[fig_wig_b, fig_wig_a])
accordion.set_title(0, 'Tree Map View')
accordion.set_title(1, 'Donut View')
display(accordion)
del fig_wig_a
del fig_wig_b
except:
print ("Error in display_group_representation")
#################################################################################################
# VIEW analysis of NUMERIC features across protected groups, also used to show outcome distributio
# across groups
#################################################################################################
def numeric_feature_analysis_across_groups(self,
df,
feature,
protected_attributes_list,
label_y,
group_descriptions_dict,#will remove after refactor
label_encoding_dict,#will remove after refactor
reference_groups_dict,#will remove after refactor
_w=600, _h=600,
high_range_pos = True,
feature_data_dict = None):
local_layout = {'width': 'auto', 'visibility':'visible'}
local_layout_hidden = {'width': 'auto', 'visibility':'hidden'}
local_style = {'description_width':'initial'}
HIGH_RANGE_POSITIVE = high_range_pos
#If any of the protected features have a description replace the entry in the data-frame
#with the description so it is easier to read.
def show_analysis(selected_protected, label, curve_type, remove_outliers):
#local method
#plot the representation of data in the dataframe per protected group
if selected_protected != "--select--":
#define a progress bar thread
data_frame = df.copy()
#########TO REFACTOR- OBTAINING THE ORIGINAL PROTECTED FOR ANALYSIS#########
#label_encoding_dict only used here.
# if group_descriptions_dict.get(selected_protected, False) != False:
# print("Descriptions have been saved for the Feature values")
#if label_encoding_dict.get(selected_protected, False) != False:
#print("Feature has been label encoded. view with pre-encoded values?")
#if selected_protected+"_bm" in data_frame.columns:
#print ("feature has had values merged, view analysis with pre-merged valies?")
if feature_data_dict == None: #refactor to only use a feature_data_dict
for feat in protected_attributes_list:
#get_feature_info returns _choice_dict_for_drop, original_values, label_encoded_values, descriptions
mapping = self.get_feature_info(feat,
data_frame[feat].dropna().unique(),
group_descriptions_dict,
label_encoding_dict,
{},
{})[0]
keys = list( mapping.keys())
values = list (mapping.values())
reverse_mapping = dict(zip(values, keys))
data_frame[feat] = data_frame[feat].map(reverse_mapping)###
#now the dataframe has the description.
elif feature_data_dict != None:#HERE we will keep this after refactoring
mapped_cols_df = self.map_values (data_frame,
protected_attributes_list,
feature_data_dict)
#swap original cols with mapped cols...
for feat in protected_attributes_list:
data_frame[feat] = mapped_cols_df[feat]
#########END TO REFACTOR-OBTAINING THE ORIGINAL PROTECTED FOR ANALYSIS END#########
#data_frame[feat] now contains the values in the way we want to analyse.
#ie merged, not-merged, encoded or not, with descriptions or not.
progress = widgets.FloatProgress(value=0.0,
min=0.0,
max=1.0)
progress.layout.width = '100%'
finished = False
def work(progress):
total = 200
for i in range(total):
if finished != True:
time.sleep(0.2)
progress.value = float(i+1)/total
else:
progress.value = 200
progress.style.bar_color = "green"
break
thread = threading.Thread(target=work, args=(progress,))
display(progress)
#start the progress bar thread
thread.start()
#If a description was saved, use the desc rather than the actual values
#to achieve this we change the contents of the column to reflect the
#description, not the value.
groups = data_frame[selected_protected].dropna().unique()
tab = widgets.Tab()
widget_html_arr = []
tab_titles = []
for group in groups:
filtered = data_frame[data_frame[selected_protected]==group]
html_summary, outliers = self.detect_outlier_and_describe(filtered[feature],
3,
data_type = "numeric")
widget_html_arr.append(widgets.HTML(html_summary))
tab_titles.append(str(group))
if remove_outliers == True:
for val in outliers:
indexNames = data_frame[ (data_frame[selected_protected] == group) & (data_frame[feature] == val) ].index
data_frame.drop(indexNames , inplace=True)
tab.children = widget_html_arr
for x in range(len(tab_titles)):
tab.set_title(x, tab_titles[x])
if curve_type == "normal":
text = ''' <b>Normal distribution:</b> A parametric approach which represents the behavior of most of the situations in
the universe. It's characterised by a bell shaped. The diameter, weight, strength,
and many other characteristics of natural, human or machine-made items are normally distributed.
In humans, performance, outcomes, grade point averages etc. are all normally distributed.
The normal distribution really is a normal occurrence. If we compare the normal distribution
of training data outcomes across two groups we can preform statistical test (such as the one below)
to determine if there is a <b>significant variance</b> between groups'''
if curve_type == "kde":
text = ''' <b>Kernel Density estimate:</b> is a nonparametric approach. Parametric estimation requires a
parametric family of distributions to be assumed(e.g Normal distribution).
If you have a basis to believe the model is approxiamtely correct it is advantageous to do parametric
inference. On the other hand it is possible that the data does not fit well to any member of the family.
In that case it is better to use kernel density estimation because it will construct a density that
reasonably fit the data. It does not require any assumption regarding parametric families.'''
fig_wig_dist, dist_output_per_group, groups = self.plot_distribution(selected_protected,
feature,
data_frame,
w=_w, h=_h,
y_high_positive = HIGH_RANGE_POSITIVE,
curve_type = curve_type)
distOut = widgets.Output(layout={})
with distOut:
display(fig_wig_dist)#as this returns an array of widgets
display(HTML("""Interactive version available <a href="output_dist.html" target="_blank"> here</a>"""))
self.display_html(text, "grey", "p")
#########TO REFACTOR- OBTAINING THE Priviliged/Reference group#########
#reference_groups_dict and group_descriptions_dict only used here
#reference_group for t_test is the actual value in the dataframe (not the description)
reference_group_to_use = ''
if feature_data_dict == None:
reference_group = reference_groups_dict[selected_protected]
#Now if there is a description we should convert to the description
try:
reference_group_to_use = group_descriptions_dict [selected_protected][reference_group]
except:
reference_group_to_use = reference_group
else:
if feature_data_dict != None: #Here, keep this one after refactoring
reference_group_to_use = feature_data_dict[selected_protected]['privileged_description']
if reference_group_to_use == '':
reference_group_to_use = feature_data_dict[selected_protected]['original_privileged']
#'label_enc_privileged'
#########TO REFACTOR END#########
#Now add the two tailed T-test*************
t_testOut = widgets.Output(layout={})
with t_testOut:
clear_output(wait = True)
self.get_t_test_info(dist_output_per_group, groups, reference_group_to_use)
#Now add correlation matrix*************
correlationOut = widgets.Output(layout={})
with correlationOut:
clear_output(wait = True)
self.feature_analysis_plot_correlation(data_frame[[feature]+[selected_protected]+[label_y]],
label_y,
feature,
selected_protected)
#Now add scatter plot*************
scatterPlotOut = widgets.Output(layout={})
if label_y != feature:
with scatterPlotOut:
tab_scat = widgets.Tab()
clear_output(wait = True)
wig1 = go.FigureWidget(px.scatter_3d(data_frame[[feature]+[selected_protected]+[label_y]], x=label_y, y=feature, z=selected_protected,
color=selected_protected,
width=600, height=600,
title=label_y+" "+feature+ " " + selected_protected))
wig2 = go.FigureWidget(px.scatter(data_frame[[feature]+[selected_protected]+[label_y]], x=label_y, y=feature,
color=selected_protected,
width=600, height=600,
title=label_y+" "+feature))
tab_scat.children = [wig1,wig2]
tab_scat.set_title(0, "3D view")
tab_scat.set_title(1, "2D view")
display(tab_scat)
BenfordsLawOut = widgets.Output(layout={})
with BenfordsLawOut:
benHTML = widgets.HTML("""
Also known as the Law of First Digits or the Phenomenon of Significant Digits,
this law is the finding that the first numerals of the numbers found in series
of records of the most varied sources do not display a uniform distribution,
but rather are arranged in such a way that the digit “1” is the most frequent,
followed by “2”, “3”...in a successively decreasing manner down to “9”. This
can be a useful way of analysing data for fraud detection for example.
<br><b>Note:</b> The law is not applicable to all numeric series but rather to those:<br>
<b>*</b> With a high order of magnitude.<br>
<b>*</b> No pre-established min or max <br>
<b>*</b> Not numbers used as identifiers, e.g social security, identity, bank acc.<br>
<b>*</b> Have a mean which is less than the median.<br>
<b>*</b> Data is not concentrated around the mean.<br>
""")
display(benHTML)
display (self.Benfords_law(data_frame[[feature]+[selected_protected]+[label_y]],
feature,
selected_protected))
if label_y != feature:
accordion = widgets.Accordion(children=[distOut,
tab,
t_testOut,
correlationOut,
scatterPlotOut,
BenfordsLawOut])
accordion.set_title(0, 'Distribution of ' + feature + ' grouped by '+selected_protected)
accordion.set_title(1, 'Describe (min/max/mean/outliers) for ' + feature + ' grouped by '+selected_protected)
accordion.set_title(2, 'Two tailed T-test for ' + feature + ' based on ' + selected_protected)
accordion.set_title(3, 'Correlation between ' + feature + ", " + label_y + ' and '+ selected_protected)
accordion.set_title(4, 'Scatter plot ' + feature + ' and ' + label_y)
accordion.set_title(5, 'Benfords_law for ' + feature + ' based on ' + selected_protected )
accordion.selected_index=0
if label_y == feature:
accordion = widgets.Accordion(children=[distOut,
tab,
t_testOut,
correlationOut,
BenfordsLawOut])
accordion.set_title(0, 'Distribution of ' + feature + ' grouped by '+selected_protected)
accordion.set_title(1, 'Describe (min/max/mean/outliers) for ' + feature + ' grouped by '+selected_protected)
accordion.set_title(2, 'Two tailed T-test for ' + feature + ' based on ' + selected_protected)
accordion.set_title(3, 'Correlation between ' + feature + ' and '+ selected_protected)
accordion.set_title(4,'Newcomb/Benford law for ' + feature + ' based on ' + selected_protected )
accordion.selected_index=0
display (accordion)
finished = True
del data_frame
if feature == label_y:
self.display_html("Analysis of the distribution of the target ("+ feature + ") across groups", "black", "h4")
else:
display(HTML("<h4>Select the protected feature:</h4> "))
interact(show_analysis,
selected_protected = widgets.Dropdown(description = "Protected Feature",
options = ["--select--"] + protected_attributes_list,
layout = local_layout,
style = local_style),
label = widgets.HTML(description=f"<b><font color='black'>{'Density estimation configuration :'}</b>",
style = {'description_width': 'initial'},
layout=Layout(width='90%')
),
curve_type = widgets.Dropdown(description = "Density Estimation",
options = {"Normal Distribution":"normal", "Kernel Density Estimation":"kde"},
layout = local_layout,
style = local_style),
remove_outliers = widgets.Checkbox(value=False,
description='Remove outliers (per group) for analysis',
disabled=False,
layout = local_layout,
style = local_style,
indent=False),
);
#################################################################################################
# VIEW analysis of CATEGORIC features across protected groups, also used to show outcome distributio
# across groups
#################################################################################################
def categoric_feature_analysis_across_groups(self,
df,
feature,
protected_attributes_list,
label_y,
group_descriptions_dict,
encoding_dict,
reference_groups_dict,
_w=600, _h=600,
high_range_pos = True):
local_layout = {'width': 'auto', 'visibility':'visible'}
local_layout_hidden = {'width': 'auto', 'visibility':'hidden'}
local_style = {'description_width':'initial'}
HIGH_RANGE_POSITIVE = high_range_pos
def show_analysis(selected_protected): #local method
#choose is the protected attribute we will analyze against
if selected_protected != "--select--":
#If a description was saved, use the desc rather than the actual values
#to achieve this we change the contents of the column to reflect the
#description, not the value.
data_frame = df.copy()
for feat in protected_attributes_list:
mapping = self.get_feature_info(feat,
data_frame[feat].dropna().unique(),
group_descriptions_dict,
encoding_dict,
{},{})[0]
keys = list( mapping.keys())
values = list (mapping.values())
reverse_mapping = dict(zip(values, keys))
data_frame[feat] = data_frame[feat].map(reverse_mapping)
####
#set up a threaded progress bar
progress = widgets.FloatProgress(value=0.0, min=0.0, max=1.0)
progress.layout.width = '100%'
finished = False
def work(progress):
total = 200
for i in range(total):
if finished != True:
time.sleep(0.2)
progress.value = float(i+1)/total
else:
progress.value = 200
progress.style.bar_color = "green"
break
thread = threading.Thread(target=work, args=(progress,))
display(progress)
#start the progress bar
thread.start()
groups = data_frame[selected_protected].dropna().unique()
output_values = data_frame[feature].dropna().unique()
layout = go.Layout(xaxis=dict(type='category'))
fig_hist_count = go.FigureWidget(layout=layout)
fig_hist_percent = go.FigureWidget(layout=layout)
with fig_hist_count.batch_update():
for group in groups:
temp = data_frame[[selected_protected, feature]].fillna("@Unknown")
temp = temp[temp[selected_protected]==group]
if feature == label_y:
if high_range_pos == True:
temp.loc[(temp[feature] == 1)] = "1(Positive impact)"
temp.loc[(temp[feature] == 0)] = "0(Negative impact)"
elif high_range_pos == False:
temp.loc[(temp[feature] == 0)] = "0(Positive impact)"
temp.loc[(temp[feature] == 1)] = "1(Negative impact)"
fig_hist_count.add_trace(go.Histogram(
x=temp[feature],
name = selected_protected +":"+group,
histfunc="count",
opacity=0.75))
fig_hist_percent.add_trace(go.Histogram(
x=temp[feature],
name = selected_protected +":"+group,
histnorm = 'percent',
opacity=0.75))
fig_hist_count.update_layout(
title_text='Count across groups', # title of plot
xaxis_title_text=feature, # xaxis label
yaxis_title_text='Count', # yaxis label
bargap = 0.2, # gap between bars of adjacent location coordinates
bargroupgap = 0.1, # gap between bars of the same location coordinates
legend_title = selected_protected,
autosize = False
)
fig_hist_percent.update_layout(
title_text='Percentage across groups', # title of plot
xaxis_title_text = selected_protected, # xaxis label
yaxis_title_text='Percent', # yaxis label
bargap=0.2, # gap between bars of adjacent location coordinates
bargroupgap=0.1, # gap between bars of the same location coordinates
legend_title = selected_protected,
autosize=False
)
####get information about each group, such as the count, num unique values.
describe_tab = widgets.Tab()
widget_html_arr = []
tab_titles = []
for group in groups:
filtered = data_frame[data_frame[selected_protected]==group]
html_summary = self.detect_outlier_and_describe(filtered[feature],
3,
data_type = "categoric")[0]
widget_html_arr.append(widgets.HTML(html_summary))
tab_titles.append(str(group))
describe_tab.children = widget_html_arr
for x in range(len(tab_titles)):
describe_tab.set_title(x, tab_titles[x])
histOut = widgets.Output(layout={})
with histOut:
hist_tab = widgets.Tab()
hist_tab.children = [fig_hist_count,fig_hist_percent]
hist_tab.set_title(0, "Count")
hist_tab.set_title(1, "Percentage")
display(hist_tab)
describeOut = widgets.Output(layout={})
with describeOut:
display(describe_tab)
sigOut = widgets.Output(layout={})
with sigOut:
#reference_group for t_test is the actual value in the dataframe (not the description)
reference_group = reference_groups_dict[selected_protected]
#Now if there is a description we should convert to the description
try:
reference_group_to_use = group_descriptions_dict [selected_protected][reference_group]
except:
reference_group_to_use = reference_group
self.get_chi_square_test_info(data_frame[[feature]+[selected_protected]],
feature,
selected_protected,
reference_group_to_use)
correlationOut = widgets.Output(layout={})
with correlationOut:
self.feature_analysis_plot_correlation(data_frame[[feature]+[selected_protected]+[label_y]],
label_y,feature,
selected_protected)
accordion = widgets.Accordion(children=[histOut,
describeOut,
sigOut,
correlationOut,
])
accordion.set_title(0, 'Count of ' + feature + ' grouped by '+ selected_protected)
accordion.set_title(1, 'Describe ' + feature + ' grouped by '+ selected_protected)
accordion.set_title(2, 'Pearson’s chi-squared significance test ' + feature + ' based on ' + selected_protected)
accordion.set_title(3, 'Correlation between ' + feature + ", " + label_y + ' and '+ selected_protected)
accordion.selected_index=0
display(accordion)
#end the progress bar thread
finished = True
del data_frame
if feature == label_y:
self.display_html("Analysis of the distribution of the target ("+ feature + ") across groups.", "black", "h4")
else:
self.display_html("Analysis of input feature: "+ feature + " across groups.", "black", "h4")
interact(show_analysis,
selected_protected = widgets.Dropdown(description = "Protected Feature",
options = ["--select--"] + protected_attributes_list,
layout = local_layout,
style = local_style),
)
################################################################################################
# Correlation plot for protected group and all values
#
################################################################################################
def plot_correlation_per_group(self, data_frame, protected_feature):
widget_dict = {}
plt.figure(figsize=(8, 8))
for group in data_frame[protected_feature].dropna().unique():
print(group)
temp_df = data_frame[data_frame[protected_feature]== group]
temp_df.drop(protected_feature, axis=1, inplace = True )
corr = self.phi_k_correlation(temp_df)
corr.reset_index(drop=True, inplace=True)
corr["index"] = pd.Series(list(corr.columns))
corr = corr.set_index("index")
heatmap = go.FigureWidget(go.Heatmap(z=corr,
zmin=0,
zmax=1,
x=corr.columns,
y=corr.columns,
xgap=1, ygap=1,
colorscale= px.colors.sequential.Blues,
colorbar_thickness=20,
colorbar_ticklen=3))
title = 'Correlation Matrix'
with heatmap.batch_update():
heatmap.update_layout(go.Layout(title_text=title, title_x=0.5,
width=300, height=300,
xaxis_showgrid=False,
yaxis_showgrid=False,
yaxis_autorange='reversed'
))
widget_dict[group] = heatmap
return widget_dict
box = widgets.HBox(widget_dict.values)
display(box )
#################################################################################################
# Correlation plot for feature or label vs protected feature
#
################################################################################################
def feature_analysis_plot_correlation(self, data_frame, label_y, feature, protected_feature):
#remove any duplicate column that might occur when feature is the label
data_frame = data_frame.loc[:,~data_frame.columns.duplicated()]
html = widgets.HTML("""<b>Phik (φk)</b><br>
Phik (φk) is a new and practical correlation coefficient that
works consistently between categorical, ordinal and interval
variables, captures non-linear dependency and reverts to
the Pearson correlation coefficient in case of a bivariate
normal input distribution. There is extensive documentation
available here https://phik.readthedocs.io/en/latest/index.html""")
display(html)
plt.figure(figsize=(6, 6))
if label_y != feature:
corr = self.phi_k_correlation(data_frame[[feature]+[protected_feature]+[label_y]])
res1 = corr.loc[ feature , : ][protected_feature]
res2 = corr.loc[ feature , : ][label_y]
text = "Correlation value for " + feature + " and " + protected_feature + " is " + str (res1)
text = text + "<br>Correlation value for " + feature + " and " + label_y + " is " + str (res2)
elif label_y == feature:
corr = self.phi_k_correlation(data_frame[[label_y]+[protected_feature]])
res1 = corr.loc[ feature , : ][protected_feature]
text = "Correlation value for " + feature + " and " + protected_feature + " is " + str (res1)
corr.reset_index(drop=True, inplace=True)
corr["index"] = pd.Series(list(corr.columns))
corr = corr.set_index("index")
heatmap = go.FigureWidget(go.Heatmap(z=corr,
x=corr.columns,
y=corr.columns,
xgap=1, ygap=1,
colorscale= px.colors.sequential.Blues,
colorbar_thickness=20,
colorbar_ticklen=3))
title = 'Correlation Matrix'
with heatmap.batch_update():
heatmap.update_layout(go.Layout(title_text=title, title_x=0.5,
width=300, height=300,
xaxis_showgrid=False,
yaxis_showgrid=False,
yaxis_autorange='reversed'
))
display(heatmap)
display (HTML(text))
#################################################################################################
# VIEW Counts of categorical features or output
# view_categorical_counts(data_frame, feature, high_range_pos)
################################################################################################
def view_categorical_counts (self, data_frame, feature, high_range_pos = True ):
layout = go.Layout(xaxis=dict(type='category'))
if high_range_pos == True:
data_frame.loc[(data_frame[feature] == 1),feature] = "1(Positive impact)"
data_frame.loc[(data_frame[feature] == 0),feature] = "0(Negative impact)"
elif high_range_pos == False:
data_frame.loc[(data_frame[feature] == 0),feature] = "0(Positive impact)"
data_frame.loc[(data_frame[feature] == 1),feature] = "1(Negative impact)"
count = go.FigureWidget(layout=layout)
pcnt = go.FigureWidget(layout=layout)
count.add_trace(go.Histogram(
x=data_frame[feature],
histfunc="count",
opacity=0.75))
pcnt.add_trace(go.Histogram(
x=data_frame[feature],
histnorm = 'percent',
opacity=0.75))
count.update_layout(
title_text='Count of total', # title of plot
xaxis_title_text=feature, # xaxis label
yaxis_title_text='Count', # yaxis label
bargap = 0.2, # gap between bars of adjacent location coordinates
bargroupgap = 0.1, # gap between bars of the same location coordinates
autosize = False
)
pcnt.update_layout(
title_text='Percent of total', # title of plot
xaxis_title_text=feature, # xaxis label
yaxis_title_text='Percent', # yaxis label
bargap=0.2, # gap between bars of adjacent location coordinates
bargroupgap=0.1, # gap between bars of the same location coordinates
autosize=False
)
return count, pcnt
#################################################################################################
# VIEW Counts of categorical features or output
# view_categorical_counts(data_frame, selected_protected, feature, high_range_pos)
#################################################################################################
def view_categorical_counts_for_protected(self,
data_frame,
protected_feature,
feature,
label_y,
high_range_pos = True):
groups = data_frame[protected_feature].dropna().unique()
output_values = data_frame[protected_feature].dropna().unique()
layout = go.Layout(xaxis=dict(type='category'))
fig_hist_count = go.FigureWidget(layout=layout)
fig_hist_percent = go.FigureWidget(layout=layout)
with fig_hist_count.batch_update():
for group in groups:
temp = data_frame[[protected_feature, feature]].fillna("@Unknown")
temp = temp[temp[protected_feature]==group]
if feature == label_y:
if high_range_pos == True:
temp.loc[(temp[feature] == 1),feature] = "1(Positive impact)"
temp.loc[(temp[feature] == 0),feature] = "0(Negative impact)"
elif high_range_pos == False:
temp.loc[(temp[feature] == 0),feature] = "0(Positive impact)"
temp.loc[(temp[feature] == 1),feature] = "1(Negative impact)"
fig_hist_count.add_trace(go.Histogram(
x=temp[feature],
name = protected_feature +":"+group,
histfunc="count",
opacity=0.75))
fig_hist_percent.add_trace(go.Histogram(
x=temp[feature],
name = protected_feature +":"+group,
histnorm = 'percent',
opacity=0.75))
fig_hist_count.update_layout(
title_text='Count across groups', # title of plot
xaxis_title_text=feature, # xaxis label
yaxis_title_text='Count', # yaxis label
bargap = 0.2, # gap between bars of adjacent location coordinates
bargroupgap = 0.1, # gap between bars of the same location coordinates
legend_title = protected_feature,
autosize = False
)
fig_hist_percent.update_layout(
title_text='Percentage across groups', # title of plot
xaxis_title_text = feature, # xaxis label
yaxis_title_text='Percent', # yaxis label
bargap=0.2, # gap between bars of adjacent location coordinates
bargroupgap=0.1, # gap between bars of the same location coordinates
legend_title = protected_feature,
autosize=False
)
return fig_hist_count, fig_hist_percent
#################################################################################################
# Convert encoded or merged features back to original values to allow for more
# intuitive analysis of fairness.
#################################################################################################
def convert_feature_column(self,
df,
feature,
feature_data):
# one-hot-encode (done)
# label_encode (done)
# merged (done)
# merged and one-hot-encode (TODO)
# merged and label_encode (TODO)
if len(feature_data['values_description']) == 0:
feature_data['values_description'] = feature_data['original_values']
# If the feature was one_hot_encoded but not merged, de-encode and return the original col,
# and original vals with description(if exists)
if feature_data['one_hot_enc'] == True and feature_data['values_merged'] == False:
df[feature] = self.de_hot_encode_feature (df,
feature_data['one_hot_enc_col_before'],
feature_data['one_hot_enc_cols_after'])
mapping_dict = dict(zip(feature_data['original_values'],
feature_data['values_description']))
df[feature] = df[feature].map(mapping_dict)
return df[feature]
# If the feature was label encoded but not merged, return the original col,
# and original vals with description(if exists)
elif feature_data['label_enc'] == True and feature_data['values_merged'] == False:
mapping_dict = dict(zip(feature_data['label_enc_values'],
feature_data['values_description']))
df[feature] = df[feature].map(mapping_dict)
return df[feature]
# If the feature was merged but not one-hot encoded or label encoded
elif feature_data['values_merged'] == True:
print ("merged!")
df[feature] = df[feature_data['before_merge_col']]
mapping_dict = dict(zip(feature_data['original_values'],
feature_data['values_description']))
df[feature] = df[feature].map(mapping_dict)
return df[feature]
else:
return df[feature]
#################################################################################################
# VIEW STATISTICS AROUND. THE PROTECTED FEATURES/ATTRIBUTES
#
#################################################################################################
def set_decision_boundary(self, df, data_summary):
#if the function was already called, remove the generated column to start again.
if data_summary.y_value +'_binary' in df.columns:
df.drop(data_summary.y_value +'_binary', axis = 1, inplace = True)
#copy the input data_frame to avoid permanent changes as we will de-encode etc.
data_frame = df.copy()
out1 = widgets.Output(layout={})
local_layout = {'width': 'auto', 'visibility':'visible'}
local_layout_hidden = {'width': 'auto', 'visibility':'hidden'}
local_style = {'description_width':'initial'}
display (out1)
layout = go.Layout(xaxis=dict(type='category'))
try:
with out1:
clear_output(wait = True)
self.display_html("Description of Target", self.text_color, "h3")
if data_summary.Y_BINARY == True:
self.display_html("The target is a binary value(1 or 0)", "black", "p")
if data_summary.HIGH_RANGE_POSITIVE == True:
impactTxt = "<b>Positive</b>"
if data_summary.HIGH_RANGE_POSITIVE == False:
impactTxt = "<b>Negative</b>"
x = "An output of 1 has a <b>" + impactTxt + "</b> impact on an individual"
self.display_html(str(x), self.text_color, "h4")
elif data_summary.Y_CONTINUOUS == True:
self.display_html("The target is a continuous value", "black", "p")
y_Min = round(data_frame[data_summary.y_value].min(), 3)
y_Mean = round(data_frame[data_summary.y_value].mean(), 3)
y_Max = round(data_frame[data_summary.y_value].max(), 3)
text = ""
text = text + "Minimum: " + str(y_Min) + "<br>"
text = text + "Mean: " +str(y_Mean)+ "<br>"
text = text + "Max: " +str(y_Max)+ "<br>"
self.display_html(text, "black", "p")
if data_summary.HIGH_RANGE_POSITIVE == True:
impactTxt = "<b>Positive</b>"
if data_summary.HIGH_RANGE_POSITIVE == False:
impactTxt = "<b>Negative</b>"
x = "The Impact of a high output(ranking) on an individual or group is <b>" + impactTxt + ".</b>"
self.display_html(str(x), "black", "p")
self.display_html("Select the decision boundary between a positive and negative outcome for logistic regression training.", "black", "h4")
text = """Logistic regression is a predictive modelling algorithm that is used when the target (Y )
is binary categorical. That is, it can take only two values e.g 1 or 0.
The goal is to determine a mathematical equation that can be used to predict the probability
of event 1, if you wish to use logistic regression to predict a successful outcome in terms of """ + data_summary.y_value + """,
you must select a decision boundary after which the continuous value will represent 1"""
self.display_html(text, "black", "p")
#revert to thevalues as we wish to see them
for protected_feat in data_summary.protected_before_transform:
data_frame[protected_feat] = self.convert_feature_column(
data_frame,
protected_feat,
data_summary.feature_data_dict[protected_feat])
def select_boundary(choose): #local method
#plot the representation of data in the dataframe per protected group
slider = widgets.IntSlider()
def set_outcome (s):
#data_summary.y_value
if slider.description == "Select Percentile":
try:
data_frame.drop(data_summary.y_value +'_binary', axis = 1, inplace = True)
except:
pass
s = s/100
data_frame[data_summary.y_value +'_Percentile_Rank'] = data_frame[data_summary.y_value].rank(pct = True)
data_frame.loc[data_frame[data_summary.y_value +'_Percentile_Rank'] >= s, data_summary.y_value+'_binary'] = 1
data_frame.loc[data_frame[data_summary.y_value +'_Percentile_Rank'] < s, data_summary.y_value+'_binary'] = 0
data_frame.drop(data_summary.y_value +'_Percentile_Rank', axis = 1, inplace = True)
#self.display_html(_text, self.text_color, "p")
#total = data_frame[[data_summary.y_value, data_summary.y_value+'_binary']].groupby(data_summary.y_value+'_binary').count().reset_index()
fig_widget_arr = []
tab_titles = []
hist_tab = widgets.Tab()
count, pcnt = self.view_categorical_counts(data_frame.copy(),
data_summary.y_value+'_binary',
data_summary.HIGH_RANGE_POSITIVE)
tab_titles.append("Total count")
fig_widget_arr.append(count)
tab_titles.append("Total percentage")
fig_widget_arr.append(pcnt)
for protected_feat in data_summary.protected_before_transform:
#protected_total = data_frame[[data_summary.y_value, data_summary.y_value+'_binary', protected_feat]].groupby([data_summary.y_value+'_binary',protected_feat]).count().reset_index()
#view_categorical_counts returns go.FigureWidget type.
count, pcnt = self.view_categorical_counts_for_protected(data_frame.copy(),
protected_feat,
data_summary.y_value+'_binary',
data_summary.y_value+'_binary',
data_summary.HIGH_RANGE_POSITIVE)
tab_titles.append(str(protected_feat) + " count")
fig_widget_arr.append(count)
tab_titles.append(str(protected_feat) + " percentage")
fig_widget_arr.append(pcnt)
hist_tab.children = fig_widget_arr
for x in range(len(tab_titles)):
hist_tab.set_title(x, tab_titles[x])
display(hist_tab)
#Now apply the modification to the original input df
df[data_summary.y_value+'_binary'] = data_frame[data_summary.y_value+'_binary']
elif slider.description == "Select n for Top_n":
try:
data_frame.drop(data_summary.y_value +'_binary', axis = 1, inplace = True)
except:
pass
#Ascending means smallest to largest, go from smallest to largest, take the value in position s.
yDivPoint = data_frame.sort_values(data_summary.y_value,ascending = False).head(s).min()[data_summary.y_value]
data_frame.loc[data_frame[data_summary.y_value ] >= yDivPoint, data_summary.y_value+'_binary'] = 1
data_frame.loc[data_frame[data_summary.y_value ] < yDivPoint, data_summary.y_value+'_binary'] = 0
self.display_html("""The """ + str(s) + """th position has value of <b>""" + str(yDivPoint) + """</b>, any value equal to or above
this will be set to <b>1</b>. Any value below this will be set to <b>0</b>""", "black", "p")
fig_widget_arr = []
tab_titles = []
hist_tab = widgets.Tab()
count, pcnt = self.view_categorical_counts(data_frame.copy(),
data_summary.y_value+'_binary',
data_summary.HIGH_RANGE_POSITIVE)
tab_titles.append("Total count")
fig_widget_arr.append(count)
tab_titles.append("Total percentage")
fig_widget_arr.append(pcnt)
for protected_feat in data_summary.protected_before_transform:
#protected_total = data_frame[[data_summary.y_value, data_summary.y_value+'_binary', protected_feat]].groupby([data_summary.y_value+'_binary',protected_feat]).count().reset_index()
#view_categorical_counts returns go.FigureWidget type.
count, pcnt = self.view_categorical_counts_for_protected(data_frame.copy(),
protected_feat,
data_summary.y_value+'_binary',
data_summary.y_value+'_binary',
data_summary.HIGH_RANGE_POSITIVE)
tab_titles.append(str(protected_feat) + " count")
fig_widget_arr.append(count)
tab_titles.append(str(protected_feat) + " percentage")
fig_widget_arr.append(pcnt)
hist_tab.children = fig_widget_arr
for x in range(len(tab_titles)):
hist_tab.set_title(x, tab_titles[x])
display(hist_tab)
#Now apply the modification to the original input df
df[data_summary.y_value+'_binary'] = data_frame[data_summary.y_value+'_binary']
if choose == "Mean":
try:
data_frame.drop(data_summary.y_value +'_binary', axis = 1, inplace = True)
except:
pass
text = "Values between " + str(y_Min) + " and " + str(y_Mean) + " will be converted to <b>0</b><br>"
text = text + "Values between " + str(y_Mean) + " and " + str(y_Max) + " will be converted to <b>1</b>"
self.display_html(text, "black", "p")
data_frame.loc[data_frame[data_summary.y_value] >= y_Mean, data_summary.y_value+'_binary'] = 1
data_frame.loc[data_frame[data_summary.y_value] < y_Mean, data_summary.y_value+'_binary'] = 0
fig_widget_arr = []
tab_titles = []
hist_tab = widgets.Tab()
count, pcnt = self.view_categorical_counts(data_frame.copy(),
data_summary.y_value+'_binary',
data_summary.HIGH_RANGE_POSITIVE)
tab_titles.append("Total count")
fig_widget_arr.append(count)
tab_titles.append("Total percentage")
fig_widget_arr.append(pcnt)
for protected_feat in data_summary.protected_before_transform:
#protected_total = data_frame[[data_summary.y_value, data_summary.y_value+'_binary', protected_feat]].groupby([data_summary.y_value+'_binary',protected_feat]).count().reset_index()
#view_categorical_counts returns go.FigureWidget type.
count, pcnt = self.view_categorical_counts_for_protected(data_frame.copy(),
protected_feat,
data_summary.y_value+'_binary',
data_summary.y_value+'_binary',
data_summary.HIGH_RANGE_POSITIVE)
tab_titles.append(str(protected_feat) + " count")
fig_widget_arr.append(count)
tab_titles.append(str(protected_feat) + " percentage")
fig_widget_arr.append(pcnt)
hist_tab.children = fig_widget_arr
for x in range(len(tab_titles)):
hist_tab.set_title(x, tab_titles[x])
display(hist_tab)
#Now apply the modification to the original input df
df[data_summary.y_value+'_binary'] = data_frame[data_summary.y_value+'_binary']
if choose == "Percentile":
slider = widgets.IntSlider(
description = "Select Percentile",
min=0, max=100,
step=1, value=80,
continuous_update=False,
style = local_style)
interact(set_outcome, s = slider)
if choose == "Top-n":
slider = widgets.IntSlider(
description = "Select n for Top_n",
min=10, max=1000,
step=10, value=100,
continuous_update=False,
style = local_style)
interact(set_outcome, s = slider)
_choose = widgets.Dropdown(
description = "Decision boundary determined by",
options = ["Mean","Top-n","Percentile"],
layout = local_layout,
style = local_style)
interact(select_boundary, choose = _choose)
change = widgets.Button(description="View dataframe head")
button_output = widgets.Output()
def on_button_clicked(b):
with button_output:
clear_output(wait = True)
display(df.head(5))
change.on_click(on_button_clicked)
display(change, button_output)
except Exception as e:
self.display_html("Something went wrong in method", self.text_color, "h4")
print (e)
#################################################################################################
#
#
#################################################################################################
def create_label (self, row):
names = list (row.index)
values = list( row.values)
text = ""
for i in range (len(names)):
text = text + ":" + names[i] + "_" + str(values[i])
text = text[1:]
return text
#################################################################################################
#
#
#################################################################################################
def plot_donut(self, attributes_list, data_frame, w=800, h=800, title = "Result"):
num_of_donuts = len(attributes_list)
if num_of_donuts > 6:
num_of_donuts = 6
display (HTML("showing only the first 6 attributes"))
sequential_color_list = [
px.colors.sequential.Blues,
px.colors.sequential.Greens,
px.colors.sequential.Oranges,
px.colors.sequential.Purples,
px.colors.sequential.Reds,
px.colors.sequential.Greys,
px.colors.sequential.algae,
px.colors.sequential.amp]
color_pool = cycle(sequential_color_list)
pie_list = []
labels_arr = []
values_arr = []
color_arr = []
annotations_arr = []
annotate = dict(text='woops',
x=0.5, y=0.6,
font_size=15,
showarrow=False)
attribute_hierarchy = []
for a, pos in zip (attributes_list, range(len(attributes_list))):
attribute_hierarchy.append(a)
annotate['text'] = a
annotate['y'] = annotate['y']-0.05
annotations_arr.append(annotate.copy())
data_frame["count"] = 0
df = data_frame[attribute_hierarchy+["count"]].fillna("@Unknown").groupby(attribute_hierarchy).count().reset_index().rename(columns={"count": "values"})
df['labels'] = df.apply(lambda row : self.create_label(row[attribute_hierarchy]), axis = 1)
df['values'].fillna(0,inplace=True)
c = []
s = []
if pos == 0:
for l in range (len(df['labels'].to_numpy())):
c.append(next(color_pool)[0])
if l >= len(sequential_color_list):
l = l - len(sequential_color_list)
s.append(l)
df['colors'] = c
df['color_pool_pos'] = s
else:
temp_list = list(df['values'].to_numpy())#changed from .list
for count, color_index in zip(prev_counts, prev_color_pool) :
match = 0
for value, pos in zip (temp_list, range(len(temp_list))):
s.append(color_index)
try:
c.append (sequential_color_list[color_index][pos+1])
except:
c.append (sequential_color_list[color_index][2])
match = match + value
if match == count:
del temp_list[0:pos+1]
break
df['colors'] = c
df['color_pool_pos'] = s
labels_arr.append (df['labels'])
values_arr.append (df['values'])
color_arr.append (df['colors'])
prev_counts = df['values'].values
prev_color_pool = df['color_pool_pos'].values
hole = 0.8
x1 = 0
x2 =1
y1 = 0
y2 = 1
adjust = round((1.0 - hole)* 0.5,2)
for x in range (num_of_donuts):
pie_list.append(go.Pie(
hole=hole, #Sets the fraction of the radius to cut out of the pie. Use this to make a donut chart
sort=False,
direction='clockwise',
domain={'x': [x1, x2], 'y': [y1, y2]},
values=values_arr[x],
labels=labels_arr[x],
textinfo='label+percent',
textposition='inside',
name=attributes_list[x],
marker={'colors': color_arr[x],'line': {'color': 'black', 'width': 1}}
))
hole= round(hole - adjust, 2)
x1 = round (x1 + adjust, 2)
x2 = round (x2 - adjust, 2)
y1 = round (y1 + adjust, 2)
y2 = round (y2 - adjust, 2)
fig = go.FigureWidget(data=pie_list);#need to reverse the order?
fig.update_layout(autosize=False,
width=w,
height=h,
margin=dict(l=50,r=50,b=100, t=100,pad=4),
title=str(attribute_hierarchy),
#Add annotations in the center of the donut pies.
annotations=annotations_arr,
legend_orientation="h",
#paper_bgcolor='rgba(113, 136, 136, 1)', #for transparent set to (0,0,0,0)
#plot_bgcolor='rgba(113, 136, 136, 1)',
);
fig.update_traces(textposition='inside');
fig.update(layout_title_text=title,
layout_showlegend=False );
df["all"] = "all"
fig_2 = px.treemap(df,
path=["all"]+attributes_list,
values='values',
)
fig_2.data[0].textinfo = 'current path+ label+value+percent parent+percent root'
# # # # # Now create one donut per protected attribute for a clearer view if the call specifies this# # # # # #
fig_2.update(layout_title_text=title,
layout_showlegend=True );
fig_wig_2 = go.FigureWidget(fig_2);
#as this can be a pointer to the input, clean it up
data_frame.drop(["count"], axis=1, inplace = True)
gc.collect()
return fig, fig_wig_2
#################################################################################################
# Pearson’s Chi-Squared Test....
# METHOD USED TO Perform Independent chi_square_test. can be used as a test for independance
# between categorical variables
#################################################################################################
#################################################################################################
def get_chi_square_test_info(self, df, feature, protected_feature, ref_group):
'''A categorical variable is a variable that may take on one of a set of labels.
Here we will examine a categorical variable as they pertain to another categorical label,
specifically a protected feature such as Gender(Male, Female), or Race(Black, White)
as it pertains to another variable such as Score, Success etc,
Large values of X^2 indicate that observed and expected frequencies are far apart.
Small values of X^2 indicate that observed are close to expecteds.
X^2 give a measure of the distance between observed and expected frequencies.
expected frequency is that there will be no difference between observed and expected
above what would be expected by chance (no statistically significant difference)'''
try:
groups = df[protected_feature].dropna().unique()
table = pd.crosstab(df[protected_feature], df[feature])
prob = 0.95
#can be used to create multiple plots, however we only call it with attribute_list of len 1.
def test_res(group_1, group_2):
filter_table = table[table.index.isin([group_1,group_2])]
for col in filter_table.columns:
if filter_table[col].sum() == 0:
filter_table.drop(col, inplace=True)
chi2_stat, p_value, dof, expected = chi2_contingency(filter_table)
######
#Interprert the critical value
# critical = chi2.ppf(prob, dof)
#print ("critical(chi2.ppf(prob, dof)): ", critical)
#if abs(chi2_stat) >= critical:
#print('Dependent (reject H0)')
#else:
#print('Independent (fail to reject H0)')
#######
# interpret p-value for consistency with other test
#alpha = 1.0 - prob
#print('significance=%.3f, p=%.3f' % (alpha, p_value))
#if p_value <= alpha:
# print('Dependent (reject H0)')
#else:
# print('Independent (fail to reject H0)')
####
matrix_twosample = [
['', 'Chi-2 Test Statistic(T-Value)', 'P-value'],
['Sample Data', abs(chi2_stat), p_value]
]
wig2 = go.FigureWidget(ff.create_table(matrix_twosample, index=True))
display (wig2)
text = "There is a "+ str (round ((p_value*100),3)) + "% probability that a difference of " + str(chi2_stat)
text = text + """ occured by chance. A usual interpretation is that a p-value of less than 0.05 (5% probability)
is deemed to indicate that the difference has not occured by chance (rejecting H0)"""
self.display_html(text, self.text_color, "p")
self.display_html("Chi-Squared T-Test ", "black", "h3")
text = ''' <b>Significant variance:</b> The statistic test will tell us if there is a significant difference in the
distribution of categories, if this difference is due to chance, or how likely it is that it is not due to chance but
rather to an unobserved factor. <br>
<b>T-Value:</b>This value represents the distance between the observed distribution
and the expected distribution in a fair world.
The larger the value of T, the greater the evidence against the difference
occuring by chance in a fair world. <br>'''
self.display_html(text, "black", "p")
interact(test_res,
group_1 = widgets.Dropdown(description = "Reference Group",
options = groups,
value = ref_group,
style = {'description_width': 'initial'}),
group_2 = widgets.Dropdown(description = "Focal Group",
options = groups)
);
except Exception as e:
self.display_html("Something went wrong generating the distribution, change the distribution type and ensure group is represented sufficiently to generate dist",
self.text_color, "h4")
print (e)
#################################################################################################
#
# METHOD USED TO Perform Independent t-Test. A t-test is a type of inferential statistic which is used to
# determine if there is a significant difference between the means of two groups which may be
# related in certain features
#################################################################################################
def get_t_test_info(self, dist_output_per_group, groups, ref_group):
try:
#can be used to create multiple plots, however we only call it with attribute_list of len 1.
def test_res(group_1, group_2):
group_index_1 = list(groups).index(group_1)
group_index_2 = list(groups).index(group_2)
twosample_results = stats.ttest_ind(dist_output_per_group[group_index_1], dist_output_per_group[group_index_2])
matrix_twosample = [
['', 'Test Statistic(T-Value)', 'P-value'],
['Sample Data', twosample_results[0], twosample_results[1]]
]
wig2 = go.FigureWidget(ff.create_table(matrix_twosample, index=True))
display (wig2)
text = "There is a "+ str (round ((twosample_results[1]*100),3)) + "% probability that a difference of " +str(twosample_results[0]) +" occured by chance."
self.display_html(text, self.text_color, "p")
self.display_html("Two-Tailed T-Test ", "black", "h3")
text = ''' <b>Significant variance:</b> The statistic test will tell us if there is a significant variance in the distribution
and if this variance is due to chance, or how likely it is that it is not due to chance but
rather to an unobserved factor. <br>
<b>T-Value:</b>This value represents the distance between the observed distribution
and the expected distribution in a fair world.
The larger the value of T, the greater the evidence against the difference
occuring by chance in a fair world. <br>'''
self.display_html(text, "black", "p")
interact(test_res,
group_1 = widgets.Dropdown(description = "Reference Group",
options = groups,
value = ref_group,
style = {'description_width': 'initial'}),
group_2 = widgets.Dropdown(description = "Focal Group",
options = groups)
);
except Exception as e:
self.display_html("Something went wrong generating the distribution, change the distribution type and ensure group is represented sufficiently to generate dist",
self.text_color, "h4")
print (e)
#################################################################################################
#
# METHOD USED TO PLOT THE DISTRIBUTION OF THE OUTCOME ACROSS GROUPS
#################################################################################################
def plot_distribution(self,
attribute,
y,
data_frame,
w=800, h=800,
y_high_positive = True,
curve_type = "kde"):
try:
#can be used to creatr multiple plots, however we only call it with attribute_list of len 1.
dist_output_per_group = []
group_labels = []
groups = data_frame[attribute].dropna().unique()
#For every group in the protected feature
for group in range(len(groups)):
group_df = data_frame[data_frame[attribute] == groups[group]]
dist_output_per_group.append(group_df[y])
group_labels.append(attribute + "-" + str(groups[group]))
# Add histogram data
# Group data together
#Add the dist of all combined groups
dist_output_per_group.append(data_frame[y])
group_labels.append("All")
# Create distplot with custom bin_size
# Add title
wig = go.Figure(ff.create_distplot(dist_output_per_group,
group_labels,
curve_type = curve_type,
show_hist=False) )#, bin_size=[.1, .25, .5, 1])
with wig.batch_update():
wig.update_layout(autosize=False,
width=900,
height=500,
#margin=dict(l=50,r=50,b=100, t=100,pad=4),
#paper_bgcolor="LightSteelBlue",
title=y +' distribution across ' + attribute,
xaxis=dict(range=[data_frame[y].min(), data_frame[y].max()])
)
img_bytes = wig.to_image(format="png", engine="kaleido")
wig.write_html("output_dist.html")
image_wig = widgets.Image(value=img_bytes,
format='png',
width=800,
height=500)
del wig
return image_wig, dist_output_per_group, groups
except Exception as e:
self.display_html("Something went wrong generating the distribution, change the distribution type and ensure group is represented sufficiently to generate dist",
self.text_color, "h4")
print (e)
#################################################################################################
# #NOT FULLY IMPLEMENTED
#
#################################################################################################
def plot_output_ratios( ):
sequential_color_list = [
px.colors.sequential.Greens,
px.colors.sequential.Greys]
df_ratios = data_frame[attributes_list+[impact_col_name]+[y_value]].groupby(attributes_list+[impact_col_name]).count().reset_index().rename(columns={y_value: "values"})
_total = df_ratios['values'].sum()
_total_pos = df_ratios[(df_ratios[impact_col_name]=="Pos")]['values'].sum()
_total_neg = df_ratios[(df_ratios[impact_col_name]=="Neg")]['values'].sum()
one, isto = self.ratio(_total_pos, _total_neg)
df_ratios['labels'] = df_ratios.apply(lambda row : self.create_label(row[attributes_list]), axis = 1)
df_ratios['labels'].fillna(0,inplace=True)
attr_list = []
attr_list.append("All")
isto_list = []
isto_list.append(isto)
pcnt_list = []
pcnt_list.append((_total_pos/(_total_pos+_total_neg))*100)
for label in df_ratios['labels'].dropna().unique():
pos = df_ratios[(df_ratios['labels']==label) & (df_ratios[impact_col_name]=="Pos")]['values']
neg = df_ratios[(df_ratios['labels']==label) & (df_ratios[impact_col_name]=="Neg")]['values']
one, isto = self.ratio(pos.values[0], neg.values[0])
attr_list.append(label)
isto_list.append(isto)
pcnt = (pos.values[0]/(pos.values[0]+neg.values[0]))*100
if math.isnan(pcnt):
pcnt_list.append(0)
else:
pcnt_list.append(pcnt)
fig1 = go.Figure()
fig1.add_trace(go.Bar(
x=attr_list,
y=isto_list,
marker_color='indianred'
))
# Here we modify the tickangle of the xaxis, resulting in rotated labels.
fig1.update_layout(xaxis_tickangle=-45,
title = "Ratio of Positive to Negative (for each 1 positive effect)",
autosize=False,
width=900,
height=400,)
fw = go.FigureWidget(fig1)
#################################################################################################
#
#
#################################################################################################
def label_encoding(self, attributes_list, data_frame):
# creating initial dataframe
labelencoder = LabelEncoder()
return_dict = {}
for attribute in attributes_list:
categories = data_frame[attribute].dropna().unique()
temp_df = pd.DataFrame(categories, columns=[attribute])
# Assigning numerical values and storing in another column
temp_df[attribute+"_benc"] = temp_df[attribute]
temp_df[attribute] = labelencoder.fit_transform(temp_df[attribute])
# Convert this Temp_df into a dictionary
temp_df.set_index(attribute+"_benc", inplace=True)
return_dict.update(temp_df.to_dict())
data_frame[attribute+"_benc"] = data_frame[attribute]
data_frame[attribute] = labelencoder.fit_transform(data_frame[attribute])
return return_dict
#################################################################################################
#
#
#################################################################################################
def gcd(self, p, q):
if (q == 0):
return p
else:
return min(q, p)
def ratio(self, a,b):
_gcd = self.gcd(a,b)
one = round(a/_gcd, 2)
isto = round(b/_gcd, 2)
if one != 1:
isto = round (1/one, 2)
one = 1.0
return one, isto
#################################################################################################
# VIEW FAIRNESS MATRICS AEQUITAS
#
#################################################################################################
"""Difference in means: The difference between the probability for a member of group-a be selected and
the probability for a member of group-b to be selected.
Disparate Impact: the Probability of a member of group-a be selected to be selected divided by
the probability of a member of group-b to be selected
False positive rate Ratio of false positive ratio's among protected groups
False negative rate: Ratio of false negative ratio's among protected groups"""
#################################################################################################
# VIEW FAIRNESS MATRICS AEQUITAS
#
#################################################################################################
"""Difference in means: The difference between the probability for a member of group-a be selected and
the probability for a member of group-b to be selected.
Disparate Impact: the Probability of a member of group-a be selected to be selected divided by
the probability of a member of group-b to be selected
False positive rate Ratio of false positive ratio's among protected groups
False negative rate: Ratio of false negative ratio's among protected groups"""
def view_aequitas_fairness_metrics(self,
X_df,
y_target,
y_pred,
data_summary):
#copying it here so we do not make any modifications to the original
X_data_frame = X_df.copy()
y_column_name = y_target.name
_w=600
_h=600
protected_attributes_list = data_summary.protected_before_transform
feature_data_dict = data_summary.feature_data_dict
y_high_positive = data_summary.HIGH_RANGE_POSITIVE
aeq_Plot = Plot()
aeq_Group = Group()
aeq_Bias = Bias()
aeq_Fairness = Fairness()
out1 = widgets.Output(layout={})
out2 = widgets.Output(layout={})
out3 = widgets.Output(layout={})
out4 = widgets.Output(layout={})
out5 = widgets.Output(layout={})
out6 = widgets.Output(layout={})
out7 = widgets.Output(layout={})
tab_contents = [out1, out2, out3, out4, out5, out6,out7]
children = tab_contents
tab = widgets.Tab(style={'description_layout':'auto', 'title_layout':'auto'})
tab.children = children
tab.set_title(0, "Confusion Matrix")
tab.set_title(1, "False Positive")
tab.set_title(2, "False Negative")
tab.set_title(3, "All Metrics")
tab.set_title(4, "Disparate Impact")
tab.set_title(5, "Fairness")
tab.set_title(6, "Metrics reminder")
local_layout = {'width': 'auto', 'visibility':'visible'}
local_layout_hidden = {'width': 'auto', 'visibility':'hidden'}
local_style = {'description_width':'initial'}
#Convert the protected feature columns back to their values
#before label encoding or one-hot encoding
for feature in protected_attributes_list:
X_data_frame[feature] = self.convert_feature_column(X_data_frame,
feature,
feature_data_dict[feature])
_choose_a = widgets.Dropdown(description = "Select protected feature",
options = protected_attributes_list,
layout = local_layout,
style = local_style)
_choose_b = widgets.Dropdown(description = "Select protected group",
options = X_data_frame[_choose_a.value].dropna().unique(),
layout = local_layout,
style = local_style)
_choose_measure = widgets.Dropdown(description = "Select metric",
options = {'False Omission Rate' : 'for',
'False Discovery Rate' :'fdr',
'False Positive Rate': 'fpr',
'False Negative Rate': 'fnr',
'Negative Predictive Value': 'npv',
'Precision': 'precision',
'Predicted Positive Ratio_k' :'ppr',
'Predicted Positive Ratio_g': 'pprev',
'Group Prevalence':'prev'},
layout = local_layout,
value = 'precision',
style = local_style)
_choose_disparity_measure = widgets.Dropdown(description = "Select disparity metric",
options = {'False Positive Rate disparity': 'fpr_disparity',
'False Negative Rate disparity': 'fnr_disparity',
'Predicted Positive Ratio_k' : 'ppr_disparity',
'Predicted Positive Ratio_g disparity' :'pprev_disparity',
'Precision Disparity': 'precision_disparity',
'False Discovery Rate disparity': 'fdr_disparity',
'False Omission Rate disparity': 'for_disparity',
'True Positive Rate disparity': 'tpr_disparity',
'True Negative Rate disparity': 'tnr_disparity',
'npv_disparity': 'npv_disparity',},
layout = local_layout,
value = 'fpr_disparity',
style = local_style)
html = '''<h3>Aequitas fairness via Machnamh: </h3> Aequitas is an open source bias audit toolkit for
machine learning developers, analysts, and policymakers to audit machine learning models for discrimination
and bias, and make informed and equitable decisions around developing and deploying predictive risk-assessment
tools.<br>The Machnamh framework provides a dynamic interface to Aequitas API allowing for quick analysis and a useful
user interface to help facilitate the translation of results'''
display (HTML(html))
display(tab)
df_aequitas = pd.concat([X_data_frame[protected_attributes_list],
y_target,
pd.DataFrame(y_pred,
index=X_data_frame.index)],
axis=1, sort=False);
#Aequitas needs the true-y_value column to be called 'label_value'
# and the prediction to be called 'scire'
df_aequitas.rename(columns={y_column_name: 'label_value',
0: 'score'},
inplace=True);
df_aequitas[df_aequitas.columns.difference(['label_value', 'score'])] = df_aequitas[
df_aequitas.columns.difference(['label_value', 'score'])].astype(str);
cross_tab, _ = aeq_Group.get_crosstabs(df_aequitas)
cross_tab.fillna(0, inplace = True)
absolute_metrics = aeq_Group.list_absolute_metrics(cross_tab)
#columns not in absolute Matrix
counts_metrics = list (cross_tab[[col for col in cross_tab.columns if col not in absolute_metrics]].columns.values)
counts_metrics.remove('model_id')
counts_metrics.remove('score_threshold')
counts_metrics.remove('k')
## Read images from file (because this is binary, maybe you can find how to use ByteIO) but this is more easy
path = os.path.dirname(sys.modules[__name__].__file__)
img2 = open(path + '/data/count.png', 'rb').read()
img1 = open(path + '/data/absolute.png', 'rb').read()
## Create image widgets. You can use layout of ipywidgets only with widgets.
## Set image variable, image format and dimension.
wi1 = widgets.Image(value=img1, format='png', width=300, height=400)
wi2 = widgets.Image(value=img2, format='png', width=300, height=400)
## Side by side thanks to HBox widgets
sidebyside = widgets.HBox([wi1, wi2])
## Finally, show.
with out1:
clear_output(wait = True)
tab = widgets.Tab()
tab_names = []
output_arr = {}
for feature in protected_attributes_list:
priv = str(feature_data_dict[feature]['privileged_description'])
output_arr[feature] = widgets.Output(layout={})
with output_arr[feature]:
beneficial = ""
punative = ""
if y_high_positive == True:
html = """You have indicated that a high ranking is beneficial to an individual or
group, therefor <font color='red'><b>false negatives</b></font> can be particularally harmful.
in terms of fairness!"""
beneficial = ['tp','fp']
punitive = ['tn','fn']
equal_oppertunity = '''In this case a high outcome(or binary one) is <font color='green'>beneficial </font>so we are quantifying the equal oppertunity to have an apparently deserved <font color='green'>beneficial outcomes</font> (TPR)'''
equal_odds = ''', and to have an apparently undeserved<font color='green'> beneficial outcome</font>'''
equal_oppertunity_2 = ''
elif y_high_positive == False:
html = """You have indicated that a low ranking is beneficial to an individual or
group, therefor <font color='red'><b>false positives</b></font> can
be particularally harmful in terms of fairness!"""
beneficial = ['tn','fn']
punitive = ['tp','fp']
equal_oppertunity = '''In this case a high outcome(or binary one) is <font color='red'>not beneficial </font>so we are quantifying the equal oppertunity to have <font color='red'>an
apparently deserved non-beneficial outcome</font> '''
equal_odds = ''', and to have an apparently undeserved<font color='green'> non-beneficial outcome.</font>'''
equal_oppertunity_2 = ''' Rectifying any discrepencies in
oppertunity to a non-benificial outcome will cause negative outcomes for additional individuals.'''
widgHTML = widgets.GridBox(children=[widgets.HTML(html)],
layout=Layout(
width='90%',
)
)
display (widgHTML)
one = widgets.Output(layout={})
two = widgets.Output(layout={})
three = widgets.Output(layout={})
accordion1 = widgets.Accordion(children=[one, two, three])
accordion1.set_title(0, "Absolute Metrics across " + feature + " groups")
accordion1.set_title(1, "Group counts across " + feature + " groups")
accordion1.set_title(2, "Metrics description ")
with one:
included = ['attribute_name', 'attribute_value'] + absolute_metrics
display ( cross_tab[included][ cross_tab[included]['attribute_name'] == feature].round(2))
with two:
display (cross_tab[counts_metrics][ cross_tab[counts_metrics]['attribute_name'] == feature] )
with three:
display(sidebyside)
accordion1.selected_index = None
display(accordion1)
self.make_confusion_matrix(cross_tab[counts_metrics],
feature,
group_names = ['True Neg','False Pos','False Neg','True Pos'],
categories='auto',
count=True,
percent=True,
cbar=True,
xyticks=True,
xyplotlabels=True,
sum_stats=True,
figsize=(3,3),
cmap='Blues',
title=None)
fairness_measures_pd = cross_tab[counts_metrics][ cross_tab[counts_metrics]['attribute_name'] == feature]
###### Proportional parity#########
fairness_measures_pd["Beneficial outcome percentage"] = fairness_measures_pd.apply(lambda row: ((row[beneficial[0]] + row[beneficial[1]])/row["group_size"])*100, axis=1)
fairness_measures_pd["Beneficial outcome percentage"] = round(fairness_measures_pd["Beneficial outcome percentage"],2)
fairness_measures_pd["Punative outcome percentage"] = fairness_measures_pd.apply(lambda row: ((row[punitive[0]] + row[punitive[1]])/row["group_size"])*100, axis=1)
fairness_measures_pd["Punative outcome percentage"] = round(fairness_measures_pd["Punative outcome percentage"],2)
display (HTML("""<h2><font color='green'>Proportional parity</h2><font color='black'>
<b>Proportional parity:</b>
Proportional parity is a representational based group fairness metric
which states that each group should have the same proportion of
beneficial(non-punative) outcomes. A desire to correct for the
absence of proportional parity (when no biological or inherent
reason accounts for its' absence) reflects a worldview which recognises
the existance of prejudice and a wish to create a "decision maker"
willing to apply corrective measures to counter historical discrimination
against a particular group or groups and ensure that all groups are
proportionately represented in beneficial outcomes. The "decision maker" is
aware that such intervention may be reflected in a reduction of perceived utility
of <i>current</i> model accuracy.<br>
<b>Note</b> These values are calculated based on the group
representation in the sample which does not necessarally match that of
the population or the domain in which the model will be used.<br>"""))
display (HTML("The privileged group has been set as: " + priv))
display(fairness_measures_pd[["attribute_value","Beneficial outcome percentage","Punative outcome percentage"]].rename(columns = {'attribute_value': 'Group'}))
####### Demographic parity (TP+FP) or (TN+FN) should be the same#########
fairness_measures_pd["Beneficial outcome count"] = fairness_measures_pd.apply(lambda row: (row[beneficial[0]] + row[beneficial[1]]), axis=1)
fairness_measures_pd["Punative outcome count"] = fairness_measures_pd.apply(lambda row: (row[punitive[0]] + row[punitive[1]]), axis=1)
display (HTML("""<h2><font color='green'>Demographic parity</h2><font color='black'>
<b>Demographic parity:</b> (also known as Independence or Statistical Parity,) is a
representation based group fairness metric which states that each group
should have an equal number of beneficial(non-punative) outcomes.
A desire to correct for the absence of demographic parity (when no biological
or inherent reason accounts for its' absence) reflects a worldview which recognises
the existance of prejudice and a wish to create a "decision maker" willing to apply
corrective measures to counter historical discrimination against a particular group
or groups and ensure that all groups are proportionately represented in beneficial
outcomes. The "decision maker" is aware that such intervention may be reflected in a
reduction of perceived utility of <i>current</i> model accuracy. """))
display (HTML("The privileged group has been set as: " + priv))
display(fairness_measures_pd[["attribute_value","Beneficial outcome count","Punative outcome count"]].rename(columns = {'attribute_value': 'Group'}))
####### Equal oppertunity###############################
def tpr(row):
try:
row = row["tp"] / (row["tp"] + row["fn"])
except:
row = "0"
return round(row * 100,2)
fairness_measures_pd["True positive rate percentage"] = fairness_measures_pd.apply(lambda row: tpr(row),axis=1)
fairness_measures_pd["Proportional to "+priv] = fairness_measures_pd.apply(lambda row: tpr(row),axis=1)
display (HTML("""<h2><font color='green'>Equality of oppertunity</h2><font color='black'>
<b>Equality of opportunity:</b> is an accuracy related fairness that
is satisfied if the model correctly predicts class 1
outcomes at equal rates across groups. A desire by a 'decision maker' to
satisfy equality of oppertunity reflects a worldview belief that we
should ensure that those who appear to deserve a certain outcome(assistive or punitive )
should obtain that outcome independant of the group they belong to and that this outcome should be
the same rate across groups, the desire is to ensure that no further prejudice or unfairness
occurs although there is no consideration to actively apply corrective measures to counter
historical discrimination reflected in the features used to determine the outcome.
There is also no concern given to those situations where an outcome is incorrectly
given when not deserved(which may indicate favoritism towards a particular group)<br> """ + equal_oppertunity + equal_oppertunity_2 +'''<br><br>The True Positive Rate (TPR) should be the same for each group, to satisfy Equality of opportunity.'''))
display (HTML("The privileged group has been set as: " + priv))
display(fairness_measures_pd[["attribute_value","True positive rate percentage"]].rename(columns = {'attribute_value': 'Group'}))
######################Equalized odds ######################
def fpr(row):
try:
row = row["fp"] / (row["fp"] + row["tn"])
except:
row = "0"
return round(row * 100,2)
fairness_measures_pd["False positive rate percentage"] = fairness_measures_pd.apply(lambda row: fpr(row),axis=1)
display (HTML("""<h2><font color='green'>Equalized odds</h2><font color='black'>
<b>Equalized odds:</b> is an accuracy related fairness that
is satisfied if the model correctly predicts true class 1
outcomes at equal rates across groups. A desire by a 'decision maker' to
satisfy equality of oppertunity reflects a worldview belief that we
should ensure that those who appear to deserve a certain outcome(assistive or punitive )
should obtain that outcome independant of the group they belong to, and that those who
do not deserve the outcome, should not obtain it(should not be mis-classified) and this should be the
same rate across groups, the desire is to ensure that no further prejudice or unfairness occurs either through prejudice or favoritism, although there is no consideration to actively apply corrective measures to counter historical discrimination reflected in the features used to determine the outcome.
)<br> """ + equal_oppertunity + equal_odds + equal_oppertunity_2 +'''<br><br>The True Positive Rate (TPR) and False Positive Rate (FPR) should be the same for each group to satisfy Equalised odds.'''))
display (HTML("The privileged group has been set as: " + priv))
display(fairness_measures_pd[["attribute_value","True positive rate percentage", "False positive rate percentage"]].rename(columns = {'attribute_value': 'Group'}))
tab.children = list(output_arr.values())
for i in range(len(protected_attributes_list)):
tab.set_title(i, protected_attributes_list[i])
display (tab)
with out2: #False Positive
clear_output(wait = True)
html = """<b>False Positive Rate:</b> The model predicted the subjects outcome
was positive when in fact it was not, in other words <b>an incorrect decision to
recommend for action!</b>"""
if y_high_positive == True:
html = html + '''You have indicated that a high outcome (ranking) has a positive impact on an individual
therefore a high false positive rate will have a <font color='green'><b>positive impact</b></font> on an individual or group.
'''
elif y_high_positive == False:
html = html + '''You have indicated that a high outcome (ranking) has a negative impact on an individual
therefore a high false positive rate will have a <font color='red'><b>negative impact</b></font> on an individual or group.
'''
widgHTML = widgets.GridBox(children=[widgets.HTML(html)],
layout=Layout(
width='90%',
)
)
display (widgHTML)
fig1, (ax1) = plt.subplots(nrows=1, figsize=(10 ,5));
ax1 = aeq_Plot.plot_group_metric(cross_tab,'fpr', ax1);
plt.tight_layout();
ax1.set_title('False Positive ratios');
plt.show();
plt.close(fig1);
plt.clf();
with out3:#False Negative
clear_output(wait = True)
html = """<b>False Negative Rate:</b> The model predicted the subjects outcome was negative
when in fact it was not, in other words <b>an incorrect decision not to recommend for action!</b> """
if y_high_positive == True:
html = html + '''You have indicated that a high outcome (ranking) has a positive impact on an individual
therefore a high false negative rate will have a<font color='red'> <b>negative impact </b></font> on an individual or group.
'''
elif y_high_positive == False:
html = html + '''You have indicated that a high outcome (ranking) has a negative impact on an individual
therefore a high false negative rate will have a <font color='green'><b>positive impact </b></font> on an individual or group.
'''
widgHTML = widgets.GridBox(children=[widgets.HTML(html)],
layout=Layout(
width='90%',
)
)
display (widgHTML)
fig1, (ax1) = plt.subplots(nrows=1, figsize=(10 ,5));
ax1 = aeq_Plot.plot_group_metric(cross_tab,'fnr', ax1);
plt.tight_layout();
ax1.set_title('False Negative ratios');
plt.show()
plt.close(fig1);
plt.clf();
with out4:#ALL metrics
clear_output(wait = True)
if y_high_positive == True:
html = '''You have indicated that a <b>high outcome(ranking)</b> has a<font color='green'> <b>positive impact</b></font> on an individual.
'''
elif y_high_positive == False:
html = '''You have indicated that a <b>high outcome(ranking)</b> has a <font color='red'></b>negative impact</b></font> on an individual.
'''
display (HTML(html))
def show_any(choose_measure):
fig1, (ax1) = plt.subplots(nrows=1);
ax1 = aeq_Plot.plot_group_metric(cross_tab, choose_measure, ax1)
plt.tight_layout()
ax1.set_title(choose_measure)
plt.show()
plt.close(fig1)
plt.clf()
interact(show_any, choose_measure = _choose_measure);
with out5: #Disparate
clear_output(wait = True)
dict_of_controls = {}
dis_imp_html = '''<b>Disparate Impact:</b> A decision-making process suffers from disparate impact if the outcome
of the decision disproportionately benefits one group or disproportionately hurts another group.
It generally results from unintentional discrimination in decision-making systems.
Disparities are calculated as a ratio of a metric for a group of interest compared to a reference group.
For example, the False Negative Rate Disparity for Group-A compared to a reference Group-B is: FNR-B/FNR-A
The calculated disparities are in relation to a reference group, which will always
have a disparity of 1.0. Disparate impact is often measured by the eighty percent or four-fifths rule. '''
widgHTML = widgets.GridBox(children=[widgets.HTML(dis_imp_html)],
layout=Layout(
width='90%',
)
)
display (widgHTML)
for feature in protected_attributes_list:
dict_of_controls[feature] = widgets.Dropdown(description = "Reference group for "+feature,
options = X_data_frame[feature].dropna().unique(),
value = feature_data_dict[feature]['privileged_description'],
layout = local_layout,
style = local_style)
display(dict_of_controls[feature])
def show_disparity(button, space, choose_disparity_measure): #local method
_ref_groups_dict = {}
for c in dict_of_controls:
_ref_groups_dict[c] = str(dict_of_controls[c].value)
disparity = aeq_Bias.get_disparity_predefined_groups(cross_tab,
original_df=df_aequitas,
ref_groups_dict=_ref_groups_dict,
alpha=0.05,
mask_significance=True);
num_rows = math.ceil( (len(protected_attributes_list))/2)
fig = plt.figure(figsize=(12 ,6*num_rows))
plt.tight_layout()
ax_dict = {}
for x, num in zip (protected_attributes_list, range(len(protected_attributes_list))):
ax_dict[x] = plt.subplot(1, 2, num+1)
ax_dict[x] = aeq_Plot.plot_disparity(disparity,
group_metric=choose_disparity_measure,
attribute_name=x,
significance_alpha=0.05,
fig = fig,
ax = ax_dict[x]);
if y_high_positive == True:
html = '''<b></b>You have indicated that a high outcome (ranking) has a <font color='green'><b>positive</b> </font> impact on a group or individual.
<br>
'''
elif y_high_positive == False:
html = '''<b></b>You have indicated that a high outcome (ranking) has a <font color='red'><b>negative impact</b></font> on a group or individual.
'''
display(HTML(html))
plt.show()
display(HTML('''Sized based on group size, color based on disparity magnitude<br>
Reference groups are displayed in grey with disparity = 1. <br>
Disparities greater than 10x will show as 10x.<br>
Disparities less than 0.1x will show as 0.1x.<br>
Statistical siginificance(default 0.05) will show as ** on square.'''))
plt.close(fig1)
plt.clf()
one = widgets.Output(layout={})
accordion1 = widgets.Accordion(children=[one])
accordion1.set_title(0, "All Calculated values ")
with one:
pd.set_option('display.max_columns', None)
display (disparity)
accordion1.selected_index = None
display(accordion1)
with out6:
clear_output(wait = True)
for ref in _ref_groups_dict:
display(HTML("Reference group is " +_ref_groups_dict[ref] + " for " + ref))
display(HTML("Green bar indicates Fair.<br>Red bar indicates unfair."))
group_val_fairness= aeq_Fairness.get_group_value_fairness(disparity)
parity_detrminations = aeq_Fairness.list_parities(group_val_fairness)
aeq_Plot.plot_fairness_group_all(group_val_fairness, ncols=5, metrics = "all")
one = widgets.Output(layout={})
accordion1 = widgets.Accordion(children=[one])
accordion1.set_title(0, "All Calculated values ")
with one:
display (group_val_fairness[['attribute_name', 'attribute_value']+parity_detrminations])
accordion1.selected_index = None
display(accordion1)
interact(show_disparity,
button = widgets.ToggleButton(
description='Apply selected Reference group',
layout = local_layout,
style = local_style),
space = widgets.Label(' ', layout=widgets.Layout(width='100%')),
choose_disparity_measure = _choose_disparity_measure
)
with out7:
html = '''<table class="wikitable" align="center" style="text-align:center; border:none; background:transparent;">
<tbody><tr>
<td style="border:none;" colspan="2">
</td>
<td style="background:#eeeebb;" colspan="2"><b>True condition</b>
</td></tr>
<tr>
<td style="border:none;">
</td>
<td style="background:#dddddd;"><a href="https://en.wikipedia.org/wiki/Statistical_population" title="Statistical population">Total population</a>
</td>
<td style="background:#ffffcc;">Condition positive
</td>
<td style="background:#ddddaa;">Condition negative
</td>
<td style="background:#eeeecc;font-size:90%;"><a href="https://en.wikipedia.org/wiki/Prevalence" title="Prevalence">Prevalence</a> <span style="font-size:118%;white-space:nowrap;">= <span role="math" class="sfrac nowrap tion" style="display:inline-block; vertical-align:-0.5em; font-size:85%; text-align:center;"><span class="num" style="display:block; line-height:1em; margin:0 0.1em;">Σ Condition positive</span><span class="slash visualhide">/</span><span class="den" style="display:block; line-height:1em; margin:0 0.1em; border-top:1px solid;">Σ Total population</span></span></span>
</td>
<td style="background:#cceecc;border-left:double silver;font-size:90%;" colspan="2"><a href="https://en.wikipedia.org/wiki/Accuracy_and_precision" title="Accuracy and precision">Accuracy</a> (ACC) = <span style="font-size:118%;"><span role="math" class="sfrac nowrap tion" style="display:inline-block; vertical-align:-0.5em; font-size:85%; text-align:center;"><span class="num" style="display:block; line-height:1em; margin:0 0.1em;">Σ True positive + Σ True negative</span><span class="slash visualhide">/</span><span class="den" style="display:block; line-height:1em; margin:0 0.1em; border-top:1px solid;">Σ Total population</span></span></span>
</td></tr>
<tr>
<td rowspan="2" class="nowrap unsortable" style="line-height:99%;vertical-align:middle;padding:.4em .4em .2em;background-position:50% .4em !important;min-width:0.875em;max-width:0.875em;width:0.875em;overflow:hidden;background:#bbeeee;"><div style="-webkit-writing-mode: vertical-rl; -o-writing-mode: vertical-rl; -ms-writing-mode: tb-rl;writing-mode: tb-rl; writing-mode: vertical-rl; layout-flow: vertical-ideographic;display: inline-block; -ms-transform: rotate(180deg); -webkit-transform: rotate(180deg); transform: rotate(180deg);;-ms-transform: none ;padding-left:1px;text-align:center;"><b>Predicted condition</b></div>
</td>
<td style="background:#ccffff;">Predicted condition<br />positive
</td>
<td style="background:#ccffcc;"><span style="color:#006600;"><b><a href="https://en.wikipedia.org/wiki/True_positive" class="mw-redirect" title="True positive">True positive</a></b></span>
</td>
<td style="background:#eedddd;"><span style="color:#cc0000;"><b><a href="https://en.wikipedia.org/wiki/False_positive" class="mw-redirect" title="False positive">False positive</a></b>,<br /><a href="https://en.wikipedia.org/wiki/Type_I_error" class="mw-redirect" title="Type I error">Type I error</a></span>
</td>
<td style="background:#ccffee;border-top:double silver;font-size:90%;"><a href="https://en.wikipedia.org/wiki/Positive_predictive_value" class="mw-redirect" title="Positive predictive value">Positive predictive value</a> (PPV), <a href="https://en.wikipedia.org/wiki/Precision_(information_retrieval)" class="mw-redirect" title="Precision (information retrieval)">Precision</a> = <span style="font-size:118%;white-space:nowrap;"><span role="math" class="sfrac nowrap tion" style="display:inline-block; vertical-align:-0.5em; font-size:85%; text-align:center;"><span class="num" style="display:block; line-height:1em; margin:0 0.1em;">Σ True positive</span><span class="slash visualhide">/</span><span class="den" style="display:block; line-height:1em; margin:0 0.1em; border-top:1px solid;">Σ Predicted condition positive</span></span></span>
</td>
<td style="background:#cceeff;border-top:double silver;font-size:90%;" colspan="2"><a href="https://en.wikipedia.org/wiki/False_discovery_rate" title="False discovery rate">False discovery rate</a> (FDR) = <span style="font-size:118%;white-space:nowrap;"><span role="math" class="sfrac nowrap tion" style="display:inline-block; vertical-align:-0.5em; font-size:85%; text-align:center;"><span class="num" style="display:block; line-height:1em; margin:0 0.1em;">Σ False positive</span><span class="slash visualhide">/</span><span class="den" style="display:block; line-height:1em; margin:0 0.1em; border-top:1px solid;">Σ Predicted condition positive</span></span></span>
</td></tr>
<tr>
<td style="background:#aadddd;">Predicted condition<br />negative
</td>
<td style="background:#ffdddd;"><span style="color:#cc0000;"><b><a href="https://en.wikipedia.org/wiki/False_negative" class="mw-redirect" title="False negative">False negative</a></b>,<br /><a href="https://en.wikipedia.org/wiki/Type_II_error" class="mw-redirect" title="Type II error">Type II error</a></span>
</td>
<td style="background:#bbeebb;"><span style="color:#006600;"><b><a href="https://en.wikipedia.org/wiki/True_negative" class="mw-redirect" title="True negative">True negative</a></b></span>
</td>
<td style="background:#eeddee;border-bottom:double silver;font-size:90%;"><a href="https://en.wikipedia.org/wiki/False_omission_rate" class="mw-redirect" title="False omission rate">False omission rate</a> (FOR) = <span style="font-size:118%;white-space:nowrap;"><span role="math" class="sfrac nowrap tion" style="display:inline-block; vertical-align:-0.5em; font-size:85%; text-align:center;"><span class="num" style="display:block; line-height:1em; margin:0 0.1em;">Σ False negative</span><span class="slash visualhide">/</span><span class="den" style="display:block; line-height:1em; margin:0 0.1em; border-top:1px solid;">Σ Predicted condition negative</span></span></span>
</td>
<td style="background:#aaddcc;border-bottom:double silver;font-size:90%;" colspan="2"><a href="https://en.wikipedia.org/wiki/Negative_predictive_value" class="mw-redirect" title="Negative predictive value">Negative predictive value</a> (NPV) = <span style="font-size:118%;white-space:nowrap;"><span role="math" class="sfrac nowrap tion" style="display:inline-block; vertical-align:-0.5em; font-size:85%; text-align:center;"><span class="num" style="display:block; line-height:1em; margin:0 0.1em;">Σ True negative</span><span class="slash visualhide">/</span><span class="den" style="display:block; line-height:1em; margin:0 0.1em; border-top:1px solid;">Σ Predicted condition negative</span></span></span>
</td></tr>
<tr style="font-size:90%;">
<td style="border:none;vertical-align:bottom;padding:0 2px 0 0;color:#999999;" colspan="2" rowspan="2">
</td>
<td style="background:#eeffcc;"><a href="https://en.wikipedia.org/wiki/True_positive_rate" class="mw-redirect" title="True positive rate">True positive rate</a> (TPR), <a href="https://en.wikipedia.org/wiki/Recall_(information_retrieval)" class="mw-redirect" title="Recall (information retrieval)">Recall</a>, <a href="https://en.wikipedia.org/wiki/Sensitivity_(tests)" class="mw-redirect" title="Sensitivity (tests)">Sensitivity</a>, probability of detection, <a href="https://en.wikipedia.org/wiki/Statistical_power" class="mw-redirect" title="Statistical power">Power</a> <span style="font-size:118%;white-space:nowrap;">= <span role="math" class="sfrac nowrap tion" style="display:inline-block; vertical-align:-0.5em; font-size:85%; text-align:center;"><span class="num" style="display:block; line-height:1em; margin:0 0.1em;">Σ True positive</span><span class="slash visualhide">/</span><span class="den" style="display:block; line-height:1em; margin:0 0.1em; border-top:1px solid;">Σ Condition positive</span></span></span>
</td>
<td style="background:#eeddbb;"><a href="https://en.wikipedia.org/wiki/False_positive_rate" title="False positive rate">False positive rate</a> (FPR), <a href="https://en.wikipedia.org/wiki/Information_retrieval" title="Information retrieval"><span class="nowrap">Fall-out</span></a>, probability of false alarm <span style="font-size:118%;white-space:nowrap;">= <span role="math" class="sfrac nowrap tion" style="display:inline-block; vertical-align:-0.5em; font-size:85%; text-align:center;"><span class="num" style="display:block; line-height:1em; margin:0 0.1em;">Σ False positive</span><span class="slash visualhide">/</span><span class="den" style="display:block; line-height:1em; margin:0 0.1em; border-top:1px solid;">Σ Condition negative</span></span></span>
</td>
<td style="background:#eeeeee;"><a href="https://en.wikipedia.org/wiki/Positive_likelihood_ratio" class="mw-redirect" title="Positive likelihood ratio">Positive likelihood ratio</a> <span class="nowrap">(LR+)</span> <span style="font-size:118%;white-space:nowrap;">= <span role="math" class="sfrac nowrap tion" style="display:inline-block; vertical-align:-0.5em; font-size:85%; text-align:center;"><span class="num" style="display:block; line-height:1em; margin:0 0.1em;">TPR</span><span class="slash visualhide">/</span><span class="den" style="display:block; line-height:1em; margin:0 0.1em; border-top:1px solid;">FPR</span></span></span>
</td>
<td style="background:#dddddd;" rowspan="2"><a href="https://en.wikipedia.org/wiki/Diagnostic_odds_ratio" title="Diagnostic odds ratio">Diagnostic odds ratio</a> (DOR) <span style="font-size:118%;white-space:nowrap;">= <span role="math" class="sfrac nowrap tion" style="display:inline-block; vertical-align:-0.5em; font-size:85%; text-align:center;"><span class="num" style="display:block; line-height:1em; margin:0 0.1em;">LR+</span><span class="slash visualhide">/</span><span class="den" style="display:block; line-height:1em; margin:0 0.1em; border-top:1px solid;">LR−</span></span></span>
</td>
<td style="background:#ddffdd;border-left:double silver;line-height:2;" rowspan="2"><a class="mw-selflink selflink">F<sub>1</sub> score</a> = <span style="font-size:118%;white-space:nowrap;">2 · <span role="math" class="sfrac nowrap tion" style="display:inline-block; vertical-align:-0.5em; font-size:85%; text-align:center;"><span class="num" style="display:block; line-height:1em; margin:0 0.1em;">Precision · Recall</span><span class="slash visualhide">/</span><span class="den" style="display:block; line-height:1em; margin:0 0.1em; border-top:1px solid;">Precision + Recall</span></span></span>
</td></tr>
<tr style="font-size:90%;">
<td style="background:#ffeecc;"><a href="https://en.wikipedia.org/wiki/False_negative_rate" class="mw-redirect" title="False negative rate">False negative rate</a> (FNR), Miss rate <span style="font-size:118%;white-space:nowrap;">= <span role="math" class="sfrac nowrap tion" style="display:inline-block; vertical-align:-0.5em; font-size:85%; text-align:center;"><span class="num" style="display:block; line-height:1em; margin:0 0.1em;">Σ False negative</span><span class="slash visualhide">/</span><span class="den" style="display:block; line-height:1em; margin:0 0.1em; border-top:1px solid;">Σ Condition positive</span></span></span>
</td>
<td style="background:#ddeebb;"><a href="https://en.wikipedia.org/wiki/Specificity_(tests)" class="mw-redirect" title="Specificity (tests)">Specificity</a> (SPC), Selectivity, <a href="https://en.wikipedia.org/wiki/True_negative_rate" class="mw-redirect" title="True negative rate">True negative rate</a> (TNR) <span style="font-size:118%;white-space:nowrap;">= <span role="math" class="sfrac nowrap tion" style="display:inline-block; vertical-align:-0.5em; font-size:85%; text-align:center;"><span class="num" style="display:block; line-height:1em; margin:0 0.1em;">Σ True negative</span><span class="slash visualhide">/</span><span class="den" style="display:block; line-height:1em; margin:0 0.1em; border-top:1px solid;">Σ Condition negative</span></span></span>
</td>
<td style="background:#cccccc;"><a href="https://en.wikipedia.org/wiki/Negative_likelihood_ratio" class="mw-redirect" title="Negative likelihood ratio">Negative likelihood ratio</a> <span class="nowrap">(LR−)</span> <span style="font-size:118%;white-space:nowrap;">= <span role="math" class="sfrac nowrap tion" style="display:inline-block; vertical-align:-0.5em; font-size:85%; text-align:center;"><span class="num" style="display:block; line-height:1em; margin:0 0.1em;">FNR</span><span class="slash visualhide">/</span><span class="den" style="display:block; line-height:1em; margin:0 0.1em; border-top:1px solid;">TNR</span></span></span>
</td></tr></tbody></table>'''
display(HTML("<b>Wikipedia Metrics overview</b>"))
display(HTML(html))
gc.collect()
def visualise_RMSE_model_eval(self, y_train, y_test, y_pred_train, y_pred_test):
y_predictedTrain = y_pred_train
y_predictedTest = y_pred_test
y_test = y_test
y_train = y_train
'''We will be using Root mean squared error(RMSE) and Coefficient of Determination(R² score) to evaluate our model.
RMSE is the square root of the average of the sum of the squares of residuals.
The RMSE is the square root of the variance of the residuals.
#It indicates the absolute fit of the model to the data–how close the observed data points are to the model’s
#predicted values. Whereas R-squared is a relative measure of fit, RMSE is an absolute measure of fit.
#As the square root of a variance, RMSE can be interpreted as the standard deviation of the unexplained variance,
#and has the useful property of being in the same units as the response variable.
#Lower values of RMSE indicate better fit.
#RMSE is a good measure of how accurately the model predicts the response, and it is the most important criterion
#for fit if the main purpose of the model is prediction.
#Coefficient of Determination(R² score) - The best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse).
#A constant model that always predicts the expected value of y, disregarding the input features,
#would get a R^2 score of 0.0. #R-squared is between 0 and 1, Higher values are better because it means
#that more variance is explained by the model.'''
display("*****EVALUATING MODEL WITH TRAINING DATA:***")
rmse = mean_squared_error(y_train, y_predictedTrain)
r2 = r2_score(y_train, y_predictedTrain)
display(' Root mean squared error: '+ str(rmse))
display(' R2 score: '+ str(r2))
display("*****EVALUATING MODEL WITH TEST DATA:******")
rmse = mean_squared_error(y_test, y_predictedTest)
r2 = r2_score(y_test, y_predictedTest)
display(' Root mean squared error: ' + str(rmse))
display(' R2 score: '+ str(r2))
n_train = len(y_train)
plt.figure(figsize=(20, 10))
plt.plot(range(n_train), y_train, label="train")
plt.plot(range(n_train, len(y_test) + n_train), y_test, '-', label="test")
plt.plot(range(n_train), y_predictedTrain, '--', label="prediction train")
plt.plot(range(n_train, len(y_test) + n_train), y_predictedTest, '--', label="prediction test")
plt.legend(loc=(1.01, 0))
plt.xlabel("Score")
plt.ylabel("prediction")
return plt
def reload_data (self, pickle_path, data_frame_path, print_summary = True):
# Reload the file
data_summary = dill.load(open(pickle_path, "rb"))
data_frame = pd.read_csv(data_frame_path)
html = ""
protected = []
non_protected = []
y_value = ""
if print_summary == True:
display(HTML("<b>Number of samples in dataset:</b> " + str(data_frame.shape[0])))
display (HTML("<b>All columns in data frame:</b> "+ str(data_frame.columns)))
display(HTML("<b>Target:</b> "+ str (data_summary.y_value)))
if data_summary.Y_CONTINUOUS == True:
display(HTML("<b>Target type: </b>continuous"))
display(HTML("<b>Num of unique values in target</b>: " + str ( len(data_frame[data_summary.y_value].dropna().unique()))))
display(HTML("<b>Min:</b> " + str (data_frame[data_summary.y_value].min())))
display(HTML("<b>Max </b>: "+ str ( data_frame[data_summary.y_value].max())))
if data_summary.Y_BINARY == True:
display(HTML("Output is binary"))
if data_summary.HIGH_RANGE_POSITIVE == True:
display(HTML("Output in high range has positive effect"))
if data_summary.HIGH_RANGE_POSITIVE == False:
display(HTML("Output in high range has negative effect"))
display(HTML("<h3>Summary of Data transformation per feature:</h3>"))
feature_data = data_summary.feature_data_dict
html = html + "<ul>"
for feature in feature_data:#in order of target, protected, other.
html = html +"<li><b>"+feature+"</b><br>"
html = html + "<ul>"
html = html + "<li>Type: " + str(feature_data[feature]['type']) + "<br>"
if feature_data[feature]['target'] == True:
html = html + "<li>This is the target(y)<br>"
y_value = feature
elif feature_data[feature]['protected'] == True:
html = html + "<li>This is a protected feature<br>"
protected.append(feature)
else:
non_protected.append(feature)
if feature_data[feature]['type'] == "categorical":
html = html + "<li>Original Values: " + str(feature_data[feature]['original_values']) + "<br>"
html = html + "<li>Value descriptions: " + str(feature_data[feature]['values_description']) + "<br>"
if feature_data[feature]['label_enc'] == True:
html = html + "<li>Label encoding was applied to this feature." + "<br>"
html = html + "<li>Label encoded values: " + str(feature_data[feature]['label_enc_values']) + "<br>"
if feature_data[feature]['one_hot_enc'] == True:
html = html + "<li>One-Hot-Encoding was applied to this feature" + "<br>"
html = html + "<li>The new columns are:" + str(feature_data[feature]['one_hot_enc_cols_after']) + "<br>"
html = html + "<li>The original column before one-hot encoding:" + str(feature_data[feature]['one_hot_enc_col_before']) + "<br>"
if feature_data[feature]['values_merged'] == True:
html = html + "<li>Some values within the feature were merged." + "<br>"
html = html + "<li>The values before the merge were: " + str(feature_data[feature]['before_merge_values']) + "<br>"
html = html + "<li>The values after the merge are: " + str(feature_data[feature]['original_values']) + "<br>"
if feature_data[feature]['scaled_using'] != "":
html = html + "<li>Scaled/Normalised using: " + feature_data[feature]['scaled_using'] + "<br>"
html = html + "<br>"
html = html + "</ul>"
html = html + "</ul>"
display (HTML(html))
return data_frame, data_summary
#################################################################################################
# Detect outliers and return for one column in dataset.
# We find the z score for each of the data point in the dataset
# and if the z score is greater than 3 than we can classify that point as an outlier.
# Any point outside of "thresh" = 3 standard deviations would be an outlier.
#
#################################################################################################
def detect_outlier_and_describe(self, series, thresh = 3, data_type = "numeric"):
outliers=[]
threshold=thresh
size = series.count()
missing = series.isnull().sum()
unique = len(series.unique())
pcnt_missing = missing/size *100
html = ""
if data_type == "numeric":
mean_1 = np.mean(series)
std_1 =np.std(series)
html = "Outlier is defind as any point outside " + str(thresh) + " standard deviations<br>"
html = html + "Min: " + str(np.min(series)) + "<br>"
html = html + "Max: " + str( np.max(series)) + "<br>"
html = html + "Mean: " + str( mean_1) + "<br>"
html = html + "Standard Deviation: "+ str( std_1) + "<br>"
for y in series:
z_score= (y - mean_1)/std_1
if np.abs(z_score) > threshold:
outliers.append(y)
html = html + "Number of outliers: "+ str( len(outliers)) + "<br>"
html = html + "Outliers: "+ str(outliers) + "<br>"
html = html + "Number of observations: "+ str( size) + "<br>"
html = html + "Num of unique values: "+ str(unique) + "<br>"
html = html + "Missing cells: "+ str( missing) + "<br>"
html = html + "Missing cells%: "+ str( missing) + "<br>"
return html, outliers
#################################################################################################
# This function will return a Features dataframe, a Series with the Target and a list of the
# features that will be used to train the model. The Features dataframe may contain additional features
# such as the protected features(when not used for training), and tcolumns containing values before
# any kind of merge of the data.
#################################################################################################
def get_features_and_target(self,
data_frame,
data_summary,
include_protected,
continuous_to_binary_target = False):
if include_protected == True:
cols = data_summary.protected_after_transform + data_summary.non_protected_after_transform
else:
cols = data_summary.non_protected_after_transform
if data_summary.y_value in cols:
cols.remove(data_summary.y_value)
if continuous_to_binary_target == True:
if not data_summary.y_value + "_binary" in list(data_frame.columns):
display(HTML("""<font style='color:orange;'>You have set <b>'continuous_to_binary_target = True'</b>, but no translation from continuous to binary was detected!<br>
<font style='color:black;'>If the label is a
continuous number then run the helper method
<b>set_decision_boundary(data_frame, data_summary)</b>
to convert a binary label to a continuous label
before calling <b>get_features_and_target</b>.
"""))
return data_frame.drop(data_summary.y_value, axis = 1), data_frame[data_summary.y_value], cols
else:
return data_frame.drop([data_summary.y_value, data_summary.y_value + "_binary"], axis = 1), data_frame[data_summary.y_value + "_binary"], cols
return data_frame.drop(data_summary.y_value, axis = 1), data_frame[data_summary.y_value], cols
#################################################################################################
#
#
#################################################################################################
def shap_analysis(self, shap_values, explainer, x, data_summary):
try:
shap.initjs()
except:
print ( 'shap.initjs() failed to load javascript')
outOverview = widgets.Output(layout={})
out1 = widgets.Output(layout={})
out2 = widgets.Output(layout={})
out3 = widgets.Output(layout={})
out4 = widgets.Output(layout={})
out5 = widgets.Output(layout={})
tab_contents = [out1, out2, out3, out4, out5]
children = tab_contents
tab = widgets.Tab(style={'description_layout':'auto', 'title_layout':'auto'})
tab.children = children
tab.set_title(0, "Summary Importance plot")
tab.set_title(1, "Importance plot")
tab.set_title(2, "Dependence plot")
tab.set_title(3, "Individual force plot")
tab.set_title(4, "Collective force plot")
local_layout = {'width': 'auto', 'visibility':'visible'}
local_layout_hidden = {'width': 'auto', 'visibility':'hidden'}
local_style = {'description_width':'initial'}
display(outOverview)
display(tab)
_choose = widgets.Dropdown(description = "Select Feature",
options = list(x.columns),
layout = local_layout,
style = local_style)
all_comb = {}
for f in data_summary.protected_after_transform:
for a in x[f].unique():
all_comb[(f+":"+ str(a))] = a
_protected = widgets.Dropdown(description = "Filter by Protected Feature",
options = all_comb,
layout = local_layout,
style = local_style)
toggle = widgets.ToggleButton(
value=False,
description='Generate',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
)
with outOverview:
display (HTML('''<h3>SHAP interpretability via Machnamh</h3>(SHapley Additive exPlanations) KernelExplainer is a
model-agnostic method which builds a weighted linear regression by using training/test data,
training/test predictions, and whatever function that predicts the predicted values.
SHAP values represent a feature's responsibility for a change in the model output.
It computes the variable importance values based on the Shapley values from game theory,
and the coefficients from a local linear regression. </br>
see: https://papers.nips.cc/paper/7062-a-unified-approach-to-interpreting-model-predictions.pdf <br>
It offer a high level of interpretability for a model, through two distinct approaches:
<br><b>Global interpretability</b> — the SHAP values can show how much each predictor contributes,
either positively or negatively, to the target variable. Similar to a variable importance plot however it also indicates the positive or negative relationship between each feature and the target output.
<br><b>Local interpretability</b> — each observation is assigned it's own SHAP value.
This provides a very granular level of transparency and interpretability where we can
determine why an individual cases receive a specific prediction and the contribution of
each feature to the prediction. Generally speaking variable importance algorithms usually
only show the results across the entire dataset but not on each individual case.'''))
with out1:
html_desc = '''
<b>Summary importance plot </b><br><b>Feature importance:</b> Variables are ranked in descending order. The top variables contribute more to the model than the bottom ones and thus have high predictive power.<br>
'''
wi1 = widgets.Output(layout=Layout(width='60%'))
with wi1:
shap.summary_plot(shap_values, x, plot_type="bar");
wi2 = widgets.HTML(value=html_desc,layout=Layout(width='30%') ) ;
sidebyside = widgets.HBox([wi1, wi2])
display (sidebyside)
with out2:
display (HTML('''<b>Importance plot:</b> lists the most significant variables in descending order.
The top variables contribute more to the model than the bottom ones and thus have high predictive power.'''))
html_desc = '''
<b>Feature importance:</b> Variables are ranked in descending order.<br>
<b>Impact:</b> Horizontal location indicates if effect of feature is associated with a higher or lower prediction.<br>
<b>Original value:</b> Colour indicates if feature variable is high(red) or low(blue) for the particular observation.<br>
<b>Correlation:</b> A high or low impact(indicated by colour), a positive or negative impact(indicated by position on x-axis)
'''
wi1 = widgets.Output(layout={})
with wi1:
shap.summary_plot(shap_values, x);
wi2 = widgets.HTML(value=html_desc,layout=Layout(width='30%') )
sidebyside = widgets.HBox([wi1, wi2])
display (sidebyside)
with out3:
display (HTML ("The decision variable is " + str(data_summary.y_value)))
display (HTML ('''To understand how a single feature effects the output of the model a
dependency plot plots the SHAP value of that feature vs. the value of
the feature for all the examples in a dataset.'''))
def show_dependancy_plot(choose):
html_desc = '''The dependency plots show relationship between the target ('''+ data_summary.y_value + ''')
and the selected feature ('''+ choose + ''') to review if it is linear, monotonic or
more complex. The additionla variable is the variable that the selected feature (''' + choose + ''')
interacts with the most frequently. Vertical dispersion at a single value represents interaction
effects with the other features. '''
display (HTML(html_desc))
display (shap.dependence_plot(choose, shap_values, x))
interact(show_dependancy_plot, choose = _choose);
with out4:
display (HTML ("The decision variable is " + str(data_summary.y_value)))
display (HTML ('''<b>Individual Force plot</b> shows the features which each contribute to push the model output
from the base value (the average model output over the dataset passed) to the
model output. Features pushing the prediction higher are shown in red,
those pushing the prediction lower are shown in blue.'''))
# visualize the first prediction's explanation (use matplotlib=True to avoid Javascript)
display (HTML ("<b>Generate random sample to investigate:</b>"))
def show_individual_force_plot(protected, toggle):
feat = _protected.label.split(':')[0]
index = x[x[feat] == _protected.value].sample(1).index[0]
display (shap.force_plot(explainer.expected_value,
shap_values[index,:],
x.iloc[index,:],
matplotlib=True))
interact(show_individual_force_plot, protected = _protected, toggle = toggle);
with out5:
display (HTML ("The decision variable is " + str(data_summary.y_value)))
display (HTML ('''<b>Collective Force plot</b> A combination of all individual force plots, each rotated 90 degrees, and stacked
horizontally, to explanation an entire dataset.'''))
display (shap.force_plot(explainer.expected_value,
shap_values,
x))
#################################################################################################
#
#
#################################################################################################
def get_protected (self, summary):
return summary.protected_before_transform
#################################################################################################
#
#
#################################################################################################
#def get_protected_before_merge (self, summary):
# return summary.all_columns_in_x
# print (list([all_columns_in_x.contains('_bm')]))
# print (list(X_train.columns[X_train.columns.str.contains('_benc')]))
#################################################################################################
#
#
#################################################################################################
#def get_protected_before_transform (self, summary):
# all_cols = summary.all_columns_in_x
# prot = summary.protected_x
# new_prot = []
# for f in prot:
# found = False
# if f+'_bm' in all_cols:
# new_prot.append(f+'_bm')
# found = True
# if f+'_benc' in all_cols:
# new_prot.append(f+'_benc')
# found = True
# if f.endswith('_oh_benc'):
# new_prot.append(f)
# found = True
#
# if found == False:
# new_prot.append(f)
# return new_prot
#################################################################################################
#
#
#################################################################################################
def serialise_ranked_list(self, X, y, y_prob_actual, y_prob_1, y_pred, save_to_path = './', name = ""):
ranked_dict = {}
ranked_dict['X'] = X
ranked_dict['y'] = y
ranked_dict['y_prob_actual'] = y_prob_actual
ranked_dict[ 'y_prob_1'] = y_prob_1
ranked_dict['y_pred'] = y_pred
path = save_to_path + name + "_ranked_data.pickle"
dill.dump(ranked_dict, file = open(path, "wb"))
display (HTML("Serialised data to dictionary 'ranked_dict', at " + path))
return path
#################################################################################################
#
#
#################################################################################################
def reload_ranked_list (self, _path):
path = _path
ranked_dict = dill.load(open(path, "rb"))
X = ranked_dict['X']
y = ranked_dict['y']
y_prob_actual = ranked_dict['y_prob_actual']
y_prob_1 = ranked_dict[ 'y_prob_1']
y_pred = ranked_dict['y_pred']
return X, y, y_prob_actual, y_prob_1, y_pred
#################################################################################################
#
#
#################################################################################################
def run_shap_and_serialise_response(self, X_in,
model_predict,
count = 100,
save_to_path = './'):
x = shap.sample( X_in, count)
x = x.reset_index(drop=True)
explainer = shap.KernelExplainer(model_predict, x ) # The second argument is the "background" dataset; a size of 100 rows is gently encouraged by the code
shap_values = explainer.shap_values(x, l1_reg="num_features(10)")
print(f'length of SHAP values: {len(shap_values)}')
print(f'Shape of each element: {shap_values[0].shape}')
path = save_to_path + "shap_values.pickle"
print ("Shap_values saved to", path)
dill.dump(shap_values, file = open(path, "wb"))
path = save_to_path + "shap_explainer.pickle"
print ("Shap_explainer saved to", path)
dill.dump(explainer, file = open(path, "wb"))
path = save_to_path +"shap_x.pickle"
print ("Shap_explainer saved to", path)
dill.dump(x, file = open(path, "wb"))
display (HTML ("The model-agnostic SHAP explainer <b>'KernelExplainer'</b> has been used."))
return explainer, shap_values, x
#################################################################################################
#
#
#################################################################################################
def reload_shap_data (self, _path):
path = _path
shap_values_path = path + "/shap_values.pickle"
explainer_path = path + "/shap_explainer.pickle"
x_path = path + "/shap_x.pickle"
shap_values = dill.load(open(shap_values_path, "rb"))
explainer = dill.load(open(explainer_path, "rb"))
x = dill.load(open(x_path, "rb"))
# Reload the file
return shap_values, explainer, x
#################################################################################################
#
#
#################################################################################################
def get_features_type(self, df, unique_max):
numeric_cat = []
obj = []
cat = []
boo = []
#pandas data types.
# datetime64 - currently not supported by the tool
# timedelta[ns] - currently not supported by the tool
# object
# int64
# float64
# bool
# category
for col in df.select_dtypes(include='number').columns:
if len(df[col].dropna().unique()) <= unique_max:
numeric_cat.append(col)
for col in df.select_dtypes(include='object').columns:
if len(df[col].dropna().unique()) <= unique_max:
obj.append(col)
for col in df.select_dtypes(include='category').columns:
if len(df[col].dropna().unique()) <= unique_max:
cat.append(col)
for col in df.select_dtypes(include='bool').columns:
if len(df[col].dropna().unique()) <= unique_max:
boo.append(col)
all_categorical = cat + obj + numeric_cat + boo
all_numeric = list (df.columns)
all_numeric = [ele for ele in all_numeric if ele not in all_categorical]
return all_categorical, all_numeric
#################################################################################################
#
#
#################################################################################################
def get_feature_info (self, feature, unique_values, group_descriptions_dict, label_encoding_dict, oh_encoding_dict, merged_dict, trace = False):
values = unique_values
decoded_values = []
original_values = []
label_encoded_values = []
keys = []
if feature in oh_encoding_dict:
original_feature_bohe = oh_encoding_dict[feature]["Original_col"]
original_value_bohe = oh_encoding_dict[feature]["Original_val"]
if trace == True:
print ("One Hot Encoded from feature:", original_feature_bohe, "value:",original_value_bohe)
print ("One Hot Encoded values:", values)
_choice_dict_for_drop = dict(zip(values, values))
original_values = values
label_encoded_values = []
descriptions = values
return _choice_dict_for_drop, original_values, label_encoded_values, descriptions
def get_key(val): #local method in get_feature_info
for key, value in label_encoding_dict[feature].items():
if val == value:
return key
return None
#if feature is already encoded then unique_values will be the encoded versions
#If feature does not have a description saved then return {x:x, y:y etc} as
#the key value pairs for any dropdown. regardless of encoded or not.
if feature not in group_descriptions_dict:
original_values = values
if feature in label_encoding_dict:
for value in values:
decoded_values.append(get_key(value))
original_values = decoded_values
label_encoded_values = values
descriptions = original_values
_choice_dict_for_drop = dict(zip(original_values, label_encoded_values))
else:
_choice_dict_for_drop = dict(zip(values, values))
descriptions = values
if trace == True:
print ("Original values ", original_values)
print ("Label Encoded values ", label_encoded_values )
print ("Description ", descriptions )
print ("Key/Value for dropdown: ", _choice_dict_for_drop)
return _choice_dict_for_drop, original_values, label_encoded_values, descriptions
if feature in group_descriptions_dict:
#first check if the input feature unique_values are the result of an Encode
if feature in label_encoding_dict:
for value in values:
decoded_values.append(get_key(value))
original_values = decoded_values
label_encoded_values = values
if trace == True:
print ("Original values ", original_values)
print ("Label Encoded values ", label_encoded_values )
if feature not in label_encoding_dict:
original_values = values
label_encoded_values = []
if trace == True:
print ("Original values ", original_values)
print ("Label Encoded values ", label_encoded_values )
for key in original_values:
if key not in group_descriptions_dict[feature]:
keys.append(key)
else:
keys.append(group_descriptions_dict[feature][key])
# using zip()
# to convert lists to dictionary
_choice_dict_for_drop = dict(zip(keys,values))
descriptions = keys
if trace == True:
print ("Description: ", keys)
print ("Key/Value for dropdown: ", _choice_dict_for_drop)
if feature in merged_dict:
print ("Merged Values: ", merged_dict[feature])
return _choice_dict_for_drop, original_values, label_encoded_values, descriptions
#################################################################################################
#
#
#################################################################################################
def phi_k_correlation(self, df):
intcols = []
selcols = []
for col in df.columns.tolist():
try:
tmp = (
df[col]
.value_counts(dropna=False)
.reset_index()
.dropna()
.set_index("index")
.iloc[:, 0]
)
if tmp.index.inferred_type == "mixed":
continue
if pd.api.types.is_numeric_dtype(df[col]):
intcols.append(col)
selcols.append(col)
elif df[col].nunique() <= config[
"categorical_maximum_correlation_distinct"
].get(int):
selcols.append(col)
except (TypeError, ValueError):
continue
if len(selcols) > 1:
correlation = df[selcols].phik_matrix(interval_cols=intcols)
return correlation
else:
return None
#################################################################################################
#
#
#################################################################################################
def Benfords_law(self, df, feature, protected):
groups = df[protected].dropna().unique()
tab = widgets.Tab()
widget_arr = {}
tab_titles = []
fit_output = widgets.Output(layout={})
for group in groups:
filtered = df[df[protected]==group]
X = filtered[feature].values
# # Make fit
with fit_output:
out = bl.fit(X)
# # Plot
widget_arr[group] = widgets.Output(layout={})
with widget_arr[group]:
display(bl.plot(out,
title='Benfords law for '+ str(feature) + ' and group '+ str(group),
figsize=(8,4)));
tab_titles.append(str(group))
widget_arr["Output"] = fit_output
tab.children = list(widget_arr.values())
for x in range(len(tab_titles)):
tab.set_title(x, tab_titles[x])
tab.set_title(x+1,"Output Trace")
return tab
def de_hot_encode_feature (self, df, original_col, hot_encoded_cols):
num_chars = len(original_col) + 1
df[original_col] = 0
for col in hot_encoded_cols:
map_to = col[num_chars:]
df.loc[df[col] == 1, original_col] = map_to
return df[original_col]
def map_values (self, data_frame, protected_attributes_list, feature_data_dict):
for feat in protected_attributes_list:
#create a dictionary out of two lists..
if feature_data_dict[feat]['values_merged'] == True:
display ("the feature was merged")
display ("column before merge is ", feature_data_dict[feat]['before_merge_col'])
#data_frame[feat] = data_frame['before_merge_col']
if feature_data_dict[feat]['one_hot_enc'] == True:
#One_hot was applied
original_col = feature_data_dict[feat]['one_hot_enc_col_before']
hot_encoded_cols = feature_data_dict[feat]['one_hot_enc_cols_after']
data_frame[feat] = self.de_hot_encode_feature(data_frame, original_col, hot_encoded_cols)
#now if there are descriptions use the descriptions
if len(feature_data_dict[feat]['values_description']) != 0:
map_dictionary = dict(zip(feature_data_dict[feat]['original_values'],
feature_data_dict[feat]['values_description']))
data_frame[feat] = data_frame[feat].map(map_dictionary)
elif feature_data_dict[feat]['label_enc'] == True:
#Label encoding was applied
#if there are descriptions use the descriptions otherwise use the non-label encoded values
if len(feature_data_dict[feat]['values_description']) != 0:
map_dictionary = dict(zip(feature_data_dict[feat]['label_enc_values'],
feature_data_dict[feat]['values_description']))
else:
map_dictionary = dict(zip(feature_data_dict[feat]['label_enc_values'],
feature_data_dict[feat]['original_values']))
data_frame[feat] = data_frame[feat].map(map_dictionary)
else:
#No encoding was applied
if len(feature_data_dict[feat]['values_description']) != 0:
map_dictionary = dict(zip(feature_data_dict[feat]['original_values'],
feature_data_dict[feat]['values_description']))
data_frame[feat] = data_frame[feat].map(map_dictionary)
return data_frame[protected_attributes_list]
#################################################################################################
# copied from Medium article, will use it as a
#
#################################################################################################
def make_confusion_matrix(self,
conf_matrix,
feature,
group_names=None,
categories='auto',
count=True,
percent=True,
cbar=True,
xyticks=True,
xyplotlabels=True,
sum_stats=True,
figsize=None,
cmap='Blues',
title=None):
cm_all = conf_matrix.copy()
'''
This function will make a pretty plot of an sklearn Confusion Matrix cm using a Seaborn heatmap visualization.
Arguments
---------
conf_matrix: Aequitas entire confusion matrix to be passed in
feature The protected feature to view
group_names: List of strings that represent the labels row by row to be shown in each square.
categories: List of strings containing the categories to be displayed on the x,y axis. Default is 'auto'
count: If True, show the raw number in the confusion matrix. Default is True.
normalize: If True, show the proportions for each category. Default is True.
cbar: If True, show the color bar. The cbar values are based off the values in the confusion matrix.
Default is True.
xyticks: If True, show x and y ticks. Default is True.
xyplotlabels: If True, show 'True Label' and 'Predicted Label' on the figure. Default is True.
sum_stats: If True, display summary statistics below the figure. Default is True.
figsize: Tuple representing the figure size. Default will be the matplotlib rcParams value.
cmap: Colormap of the values displayed from matplotlib.pyplot.cm. Default is 'Blues'
See http://matplotlib.org/examples/color/colormaps_reference.html
title: Title for the heatmap. Default is None.
'''
out_dict = {}
cm_all = cm_all[cm_all['attribute_name'] == feature]
groups = cm_all['attribute_value'].unique()
for group in groups:
cm_group = cm_all[cm_all['attribute_value'] == group].squeeze() #squeese changes it to series
tn = cm_group['tn']
fp = cm_group['fp']
fn = cm_group['fn']
tp = cm_group['tp']
cf = np.array([[tn, fp],
[ fn, tp]])
out_dict[group] = widgets.Output(layout = {'border': 'solid 1px white', 'padding': '25px'})
# CODE TO GENERATE TEXT INSIDE EACH SQUARE
blanks = ['' for i in range(cf.size)]
if group_names and len(group_names)==cf.size:
group_labels = ["{}\n".format(value) for value in group_names]
else:
group_labels = blanks
if count:
group_counts = ["{0:0.0f}\n".format(value) for value in cf.flatten()]
else:
group_counts = blanks
if percent:
group_percentages = ["{0:.2%}".format(value) for value in cf.flatten()/np.sum(cf)]
else:
group_percentages = blanks
box_labels = [f"{v1}{v2}{v3}".strip() for v1, v2, v3 in zip(group_labels,group_counts,group_percentages)]
box_labels = np.asarray(box_labels).reshape(cf.shape[0],cf.shape[1])
# CODE TO GENERATE SUMMARY STATISTICS & TEXT FOR SUMMARY STATS
if sum_stats:
#Accuracy is sum of diagonal divided by total observations
accuracy = np.trace(cf) / float(np.sum(cf))
#if it is a binary confusion matrix, show some more stats
if len(cf)==2:
#Metrics for Binary Confusion Matrices
precision = cf[1,1] / sum(cf[:,1])
recall = cf[1,1] / sum(cf[1,:])
f1_score = 2*precision*recall / (precision + recall)
stats_text = """\n\nAccuracy={:0.3f}
\nPrecision/Positive predictive value(PPV)={:0.3f}
\nRecall/True Positive Rate/Sensitivity={:0.3f}
\nF1 Score={:0.3f}""".format(
accuracy,precision,recall,f1_score)
html = '<font style="font-family:sans-serif; font-size:10px;color:black;">'
html = html + "Accuracy: " + str(round(accuracy,2)) + "<br>"
html = html + "Precision/Positive predictive value(PPV): " + str(round(precision,2)) + "<br>"
html = html + "Recall/True Positive Rate/Sensitivity: " + str(round(recall,2)) + "<br>"
html = html + "F1 Score=: " + str(round(f1_score,2)) + "<br>"
else:
stats_text = "\n\nAccuracy={:0.3f}".format(accuracy)
else:
stats_text = ""
stats_text = ''#Going to use the HTML instead
# SET FIGURE PARAMETERS ACCORDING TO OTHER ARGUMENTS
if figsize==None:
#Get default figure size if not set
figsize = plt.rcParams.get('figure.figsize')
if xyticks==False:
#Do not show categories if xyticks is False
categories=False
# MAKE THE HEATMAP VISUALIZATION
plt.figure(figsize=figsize)
sns.heatmap(cf,annot=box_labels,
fmt="",
cmap=cmap,
cbar=cbar,
xticklabels=categories,
yticklabels=categories
)
if xyplotlabels:
plt.ylabel('True label')
plt.xlabel('Predicted label' + stats_text)
else:
plt.xlabel(stats_text)
if title:
plt.title(title)
with out_dict[group]:
display(HTML(group))
plt.show()
display(HTML(html))
l = list(out_dict.values())
n = 3
outList = []
for i in range(n, len(l) + n, n):
outList.append(l[i-n:i])
for chunk_of_3 in outList:
display (widgets.HBox([*chunk_of_3], layout = Layout(
padding = '10px',
width='100%',
display='flex',
align_items='stretch',
align_content='space-between',
)
))
# In[ ]:
|
PypiClean
|
/ptcn-0.1.3.tar.gz/ptcn-0.1.3/README.md
|
# ptcn
Tensorflow (2.x) implementation of a Temporal Convolutional Network architecture, with a probabilistic twist.
This project indulges a couple of curiosities:
1. Working with convolutional sequence-to-sequence models a la [An Empirical Evaluation of Generic Convolutional and Recurrent Networks for Sequence Modeling](https://arxiv.org/abs/1803.01271)
2. Adding a bayesian twist to the network a la [Bayesian Segnet: Model Uncertainty in Deep Convolutional Encoder-Decoder Architectures for Scene Understanding](https://arxiv.org/abs/1511.02680)
This implementation has been inspired by other projects, including:
- https://github.com/locuslab/TCN
- https://github.com/Baichenjia/Tensorflow-TCN
- https://github.com/philipperemy/keras-tcn
|
PypiClean
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.