id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
/idem_aws-4.0.1-py3-none-any.whl/idem_aws/exec/aws/apigateway/domain_name.py
|
__func_alias__ = {"list_": "list"}
from typing import Dict
async def get(hub, ctx, name: str, resource_id: str) -> Dict:
"""
Get an API Gateway domain name resource from AWS with the domain name as the resource_id.
Args:
name(str):
The name of the Idem state domain name.
resource_id(str):
AWS API Gateway domain name.
Returns:
Dict[str, Any]
Examples:
Calling from the CLI:
.. code-block:: bash
$ idem exec aws.apigateway.domain_name.get name="unmanaged_domain_names"
Using in a state:
.. code-block:: yaml
my_unmanaged_resource:
exec.run:
- path: aws.apigateway.domain_name.get
- kwargs:
name: unmanaged_domain_name
resource_id: resource_id
"""
result = dict(comment=[], ret=None, result=True)
ret = await hub.exec.boto3.client.apigateway.get_domain_name(
ctx=ctx, domainName=resource_id
)
if not ret["result"]:
if "NotFoundException" in str(ret["comment"]):
result["comment"].append(
hub.tool.aws.comment_utils.get_empty_comment(
resource_type="aws.apigateway.domain_name", name=name
)
)
result["comment"] += list(ret["comment"])
return result
result["comment"] += list(ret["comment"])
result["result"] = False
return result
result[
"ret"
] = hub.tool.aws.apigateway.domain_name.convert_raw_domain_name_to_present(
raw_resource=ret["ret"], idem_resource_name=name
)
return result
async def list_(hub, ctx, name: str = None):
"""Get the list of domain names for AWS APIGateway.
Args:
name (str, Optional):
The name of the Idem state for logging.
Returns:
Dict[str, Any]
Examples:
Calling from the CLI:
.. code-block:: bash
idem exec aws.apigateway.domain_name.list
Calling this exec module function from within a state:
.. code-block:: yaml
my_unmanaged_resource:
exec.run:
- path: aws.apigateway.domain_name.list
- kwargs:
name: my-resource-name
"""
result = dict(comment=[], ret=[], result=True)
get_domain_names_ret = await hub.exec.boto3.client.apigateway.get_domain_names(ctx)
if (
"NotFoundException" in str(get_domain_names_ret["comment"])
or get_domain_names_ret["ret"]["items"] == []
):
result["comment"].append(
hub.tool.aws.comment_utils.get_empty_comment(
resource_type="aws.apigateway.domain_name", name="domain_name_resource"
)
)
return []
for domain_name in get_domain_names_ret["ret"]["items"]:
idem_resource_name = domain_name["domainName"]
get_translated_resource = (
hub.tool.aws.apigateway.domain_name.convert_raw_domain_name_to_present(
raw_resource=domain_name, idem_resource_name=idem_resource_name
)
)
result["ret"].append(get_translated_resource)
return result
|
PypiClean
|
/TRAPpy-6.0.1-py3-none-any.whl/trappy/plotter/AbstractDataPlotter.py
|
"""This is the template class that all Plotters inherit"""
from __future__ import unicode_literals
from __future__ import division
from __future__ import print_function
from builtins import object
from abc import abstractmethod, ABCMeta
from pandas import DataFrame
import re
from trappy.utils import listify
from functools import reduce
from future.utils import with_metaclass
# pylint: disable=R0921
# pylint: disable=R0903
class AbstractDataPlotter(with_metaclass(ABCMeta, object)):
"""This is an abstract data plotting Class defining an interface
for the various Plotting Classes"""
def __init__(self, traces=None, attr=None, templates=None):
self._event_map = {}
self._attr = attr if attr else {}
self.traces = traces
self.templates = templates
@abstractmethod
def view(self):
"""View the graph"""
raise NotImplementedError("Method Not Implemented")
@abstractmethod
def savefig(self, path):
"""Save the image as a file
:param path: Location of the Saved File
:type path: str
"""
raise NotImplementedError("Method Not Implemented")
def _check_data(self):
"""Internal function to check the received data"""
data = listify(self.traces)
if len(data):
mask = [isinstance(x, DataFrame) for x in data]
data_frame = reduce(lambda x, y: x and y, mask)
sig_or_template = self.templates or "signals" in self._attr
if not data_frame and not sig_or_template:
raise ValueError(
"Cannot understand data. Accepted DataFormats are pandas.DataFrame or trappy.FTrace/BareTrace/SysTrace (with templates)")
elif data_frame and "column" not in self._attr:
raise ValueError("Column not specified for DataFrame input")
else:
raise ValueError("Empty Data received")
def _parse_value(self, signal_def):
"""Parse a signal definition into a (template, column) tuple
:param signal_def: A signal definition. E.g. "trace_class:column"
:type signal_def: str
"""
match = re.match(r"(?P<event>[^:]+):(?P<column>[^:]+)(?P<color>:.+)?",
signal_def)
if not match:
raise ValueError(
'Invalid signal definition "{}". '
'Should have the form "trace_class:column" '
'e.g. "cpu_frequency:frequency"'.format(signal_def))
event = match.group("event")
column = match.group("column")
color_match = match.group("color")
if color_match:
color_list = color_match[1:].split(",", 2)
color = [int(n, 16) if n.startswith("0x") else int(n) for n in color_list]
else:
color = None
try:
return self._event_map[event], column, color
except KeyError:
for trace in listify(self.traces):
if event in trace.class_definitions:
self._event_map[event] = trace.class_definitions[event]
return self._event_map[event], column, color
raise ValueError(
"Event: " +
event +
" not found in Trace Object")
def _describe_signals(self):
"""Internal Function for populating templates and columns
from signals
"""
if "column" in self._attr or self.templates:
raise ValueError("column/templates specified with values")
self._attr["column"] = []
self.templates = []
colors = []
for value in listify(self._attr["signals"]):
template, column, color = self._parse_value(value)
self.templates.append(template)
self._attr["column"].append(column)
colors.append(color)
if any(colors):
self._attr["colors"] = colors
|
PypiClean
|
/pymaclab-0.95.9.tar.gz/pymaclab-0.95.9/sympycore/heads/differential.py
|
from ..core import init_module
from .base import AtomicHead, Expr, heads_precedence
init_module.import_heads()
init_module.import_lowlevel_operations()
class DifferentialHead(AtomicHead):
def base_exp(self, cls, expr):
return expr, 1
def term_coeff(self, cls, expr):
return expr, 1
def commutative_mul_number(self, cls, lhs, rhs):
return term_coeff_new(cls, (lhs, rhs))
def add(self, cls, lhs, rhs):
h,d = rhs.pair
if h is NUMBER:
if d==0: return lhs
return cls(TERM_COEFF_DICT, {lhs:1, cls(NUMBER,1):d})
elif h is self:
if lhs.data == d:
return cls(TERM_COEFF, (lhs, 2))
return cls(TERM_COEFF_DICT, {lhs:1, rhs:1})
elif h is TERM_COEFF:
t,c = d
if lhs==t:
return term_coeff_new(cls, (t, c+1))
return cls(TERM_COEFF_DICT, {t:c, lhs:1})
elif h is TERM_COEFF_DICT:
data = d.copy()
dict_add_item(cls, data, lhs, 1)
return term_coeff_dict_new(cls, data)
raise NotImplementedError(`self, h`)
def commutative_mul(self, cls, lhs, rhs):
h, d = rhs.pair
if h is NUMBER:
return cls.commutative_mul_number(cls, lhs, d)
if h is self:
if lhs.data==d:
return cls(POW, (lhs, 2))
return cls(BASE_EXP_DICT, {lhs:1, rhs:1})
if h is POW:
base, exp = d
if base==lhs:
return pow_new(cls, (base, exp + 1))
return cls(BASE_EXP_DICT, {base:exp, lhs:1})
raise NotImplementedError(`self, h`)
def pow(self, cls, base, exp):
return pow_new(cls, (base, exp))
pow_number = pow
class DiffHead(DifferentialHead):
"""
FunctionAlgebra(DIFF, x) - differential with respect to x
x is symbol
"""
def __repr__(self): return 'DIFF'
def is_data_ok(self, cls, data):
if isinstance(data, Expr):
if data.head is SYMBOL: return
return 'data must be with SYMBOL head but got %r' % (data.head)
return 'data must be symbol but got %s' % (type(data))
def data_to_str_and_precedence(self, cls, data):
return SUBSCRIPT.data_to_str_and_precedence(cls, (Expr(SYMBOL, 'D'), (data,)))
def diff_apply(self, cls, data, diff, expr):
return expr.head.diff(type(expr), expr.data, expr, data.data, 1)
class FDiffHead(DifferentialHead):
"""
FunctionAlgebra(FDIFF, x) - differential with respect to x-th argument
x is number or symbol
"""
def __repr__(self): return 'FDIFF'
def is_data_ok(self, cls, data):
if isinstance(data, Expr):
if data.head is SYMBOL or data.head is NUMBER: return
return 'data must be with SYMBOL|NUMBER head but got %r' % (data.head)
return 'data must be symbol or number but got %s' % (type(data))
def data_to_str_and_precedence(self, cls, data):
return SUBSCRIPT.data_to_str_and_precedence(cls, (Expr(SYMBOL, 'FD'), (data,)))
def diff_apply(self, cls, data, diff, expr):
return expr.head.fdiff(type(expr), expr.data, expr, data.data, 1)
DIFF = DiffHead()
FDIFF = FDiffHead()
|
PypiClean
|
/pulumi_fortios-0.0.9.tar.gz/pulumi_fortios-0.0.9/pulumi_fortios/system_fortiguard.py
|
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['SystemFortiguardArgs', 'SystemFortiguard']
@pulumi.input_type
class SystemFortiguardArgs:
def __init__(__self__, *,
antispam_timeout: pulumi.Input[int],
outbreak_prevention_timeout: pulumi.Input[int],
webfilter_timeout: pulumi.Input[int],
antispam_cache: Optional[pulumi.Input[str]] = None,
antispam_cache_mpercent: Optional[pulumi.Input[int]] = None,
antispam_cache_mpermille: Optional[pulumi.Input[int]] = None,
antispam_cache_ttl: Optional[pulumi.Input[int]] = None,
antispam_expiration: Optional[pulumi.Input[int]] = None,
antispam_force_off: Optional[pulumi.Input[str]] = None,
antispam_license: Optional[pulumi.Input[int]] = None,
anycast_sdns_server_ip: Optional[pulumi.Input[str]] = None,
anycast_sdns_server_port: Optional[pulumi.Input[int]] = None,
auto_firmware_upgrade: Optional[pulumi.Input[str]] = None,
auto_firmware_upgrade_day: Optional[pulumi.Input[str]] = None,
auto_firmware_upgrade_delay: Optional[pulumi.Input[int]] = None,
auto_firmware_upgrade_end_hour: Optional[pulumi.Input[int]] = None,
auto_firmware_upgrade_start_hour: Optional[pulumi.Input[int]] = None,
auto_join_forticloud: Optional[pulumi.Input[str]] = None,
ddns_server_ip: Optional[pulumi.Input[str]] = None,
ddns_server_ip6: Optional[pulumi.Input[str]] = None,
ddns_server_port: Optional[pulumi.Input[int]] = None,
fds_license_expiring_days: Optional[pulumi.Input[int]] = None,
fortiguard_anycast: Optional[pulumi.Input[str]] = None,
fortiguard_anycast_source: Optional[pulumi.Input[str]] = None,
interface: Optional[pulumi.Input[str]] = None,
interface_select_method: Optional[pulumi.Input[str]] = None,
load_balance_servers: Optional[pulumi.Input[int]] = None,
outbreak_prevention_cache: Optional[pulumi.Input[str]] = None,
outbreak_prevention_cache_mpercent: Optional[pulumi.Input[int]] = None,
outbreak_prevention_cache_mpermille: Optional[pulumi.Input[int]] = None,
outbreak_prevention_cache_ttl: Optional[pulumi.Input[int]] = None,
outbreak_prevention_expiration: Optional[pulumi.Input[int]] = None,
outbreak_prevention_force_off: Optional[pulumi.Input[str]] = None,
outbreak_prevention_license: Optional[pulumi.Input[int]] = None,
persistent_connection: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[str]] = None,
protocol: Optional[pulumi.Input[str]] = None,
proxy_password: Optional[pulumi.Input[str]] = None,
proxy_server_ip: Optional[pulumi.Input[str]] = None,
proxy_server_port: Optional[pulumi.Input[int]] = None,
proxy_username: Optional[pulumi.Input[str]] = None,
sandbox_inline_scan: Optional[pulumi.Input[str]] = None,
sandbox_region: Optional[pulumi.Input[str]] = None,
sdns_options: Optional[pulumi.Input[str]] = None,
sdns_server_ip: Optional[pulumi.Input[str]] = None,
sdns_server_port: Optional[pulumi.Input[int]] = None,
service_account_id: Optional[pulumi.Input[str]] = None,
source_ip: Optional[pulumi.Input[str]] = None,
source_ip6: Optional[pulumi.Input[str]] = None,
update_build_proxy: Optional[pulumi.Input[str]] = None,
update_dldb: Optional[pulumi.Input[str]] = None,
update_extdb: Optional[pulumi.Input[str]] = None,
update_ffdb: Optional[pulumi.Input[str]] = None,
update_server_location: Optional[pulumi.Input[str]] = None,
update_uwdb: Optional[pulumi.Input[str]] = None,
vdom: Optional[pulumi.Input[str]] = None,
vdomparam: Optional[pulumi.Input[str]] = None,
videofilter_expiration: Optional[pulumi.Input[int]] = None,
videofilter_license: Optional[pulumi.Input[int]] = None,
webfilter_cache: Optional[pulumi.Input[str]] = None,
webfilter_cache_ttl: Optional[pulumi.Input[int]] = None,
webfilter_expiration: Optional[pulumi.Input[int]] = None,
webfilter_force_off: Optional[pulumi.Input[str]] = None,
webfilter_license: Optional[pulumi.Input[int]] = None):
"""
The set of arguments for constructing a SystemFortiguard resource.
"""
pulumi.set(__self__, "antispam_timeout", antispam_timeout)
pulumi.set(__self__, "outbreak_prevention_timeout", outbreak_prevention_timeout)
pulumi.set(__self__, "webfilter_timeout", webfilter_timeout)
if antispam_cache is not None:
pulumi.set(__self__, "antispam_cache", antispam_cache)
if antispam_cache_mpercent is not None:
pulumi.set(__self__, "antispam_cache_mpercent", antispam_cache_mpercent)
if antispam_cache_mpermille is not None:
pulumi.set(__self__, "antispam_cache_mpermille", antispam_cache_mpermille)
if antispam_cache_ttl is not None:
pulumi.set(__self__, "antispam_cache_ttl", antispam_cache_ttl)
if antispam_expiration is not None:
pulumi.set(__self__, "antispam_expiration", antispam_expiration)
if antispam_force_off is not None:
pulumi.set(__self__, "antispam_force_off", antispam_force_off)
if antispam_license is not None:
pulumi.set(__self__, "antispam_license", antispam_license)
if anycast_sdns_server_ip is not None:
pulumi.set(__self__, "anycast_sdns_server_ip", anycast_sdns_server_ip)
if anycast_sdns_server_port is not None:
pulumi.set(__self__, "anycast_sdns_server_port", anycast_sdns_server_port)
if auto_firmware_upgrade is not None:
pulumi.set(__self__, "auto_firmware_upgrade", auto_firmware_upgrade)
if auto_firmware_upgrade_day is not None:
pulumi.set(__self__, "auto_firmware_upgrade_day", auto_firmware_upgrade_day)
if auto_firmware_upgrade_delay is not None:
pulumi.set(__self__, "auto_firmware_upgrade_delay", auto_firmware_upgrade_delay)
if auto_firmware_upgrade_end_hour is not None:
pulumi.set(__self__, "auto_firmware_upgrade_end_hour", auto_firmware_upgrade_end_hour)
if auto_firmware_upgrade_start_hour is not None:
pulumi.set(__self__, "auto_firmware_upgrade_start_hour", auto_firmware_upgrade_start_hour)
if auto_join_forticloud is not None:
pulumi.set(__self__, "auto_join_forticloud", auto_join_forticloud)
if ddns_server_ip is not None:
pulumi.set(__self__, "ddns_server_ip", ddns_server_ip)
if ddns_server_ip6 is not None:
pulumi.set(__self__, "ddns_server_ip6", ddns_server_ip6)
if ddns_server_port is not None:
pulumi.set(__self__, "ddns_server_port", ddns_server_port)
if fds_license_expiring_days is not None:
pulumi.set(__self__, "fds_license_expiring_days", fds_license_expiring_days)
if fortiguard_anycast is not None:
pulumi.set(__self__, "fortiguard_anycast", fortiguard_anycast)
if fortiguard_anycast_source is not None:
pulumi.set(__self__, "fortiguard_anycast_source", fortiguard_anycast_source)
if interface is not None:
pulumi.set(__self__, "interface", interface)
if interface_select_method is not None:
pulumi.set(__self__, "interface_select_method", interface_select_method)
if load_balance_servers is not None:
pulumi.set(__self__, "load_balance_servers", load_balance_servers)
if outbreak_prevention_cache is not None:
pulumi.set(__self__, "outbreak_prevention_cache", outbreak_prevention_cache)
if outbreak_prevention_cache_mpercent is not None:
pulumi.set(__self__, "outbreak_prevention_cache_mpercent", outbreak_prevention_cache_mpercent)
if outbreak_prevention_cache_mpermille is not None:
pulumi.set(__self__, "outbreak_prevention_cache_mpermille", outbreak_prevention_cache_mpermille)
if outbreak_prevention_cache_ttl is not None:
pulumi.set(__self__, "outbreak_prevention_cache_ttl", outbreak_prevention_cache_ttl)
if outbreak_prevention_expiration is not None:
pulumi.set(__self__, "outbreak_prevention_expiration", outbreak_prevention_expiration)
if outbreak_prevention_force_off is not None:
pulumi.set(__self__, "outbreak_prevention_force_off", outbreak_prevention_force_off)
if outbreak_prevention_license is not None:
pulumi.set(__self__, "outbreak_prevention_license", outbreak_prevention_license)
if persistent_connection is not None:
pulumi.set(__self__, "persistent_connection", persistent_connection)
if port is not None:
pulumi.set(__self__, "port", port)
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
if proxy_password is not None:
pulumi.set(__self__, "proxy_password", proxy_password)
if proxy_server_ip is not None:
pulumi.set(__self__, "proxy_server_ip", proxy_server_ip)
if proxy_server_port is not None:
pulumi.set(__self__, "proxy_server_port", proxy_server_port)
if proxy_username is not None:
pulumi.set(__self__, "proxy_username", proxy_username)
if sandbox_inline_scan is not None:
pulumi.set(__self__, "sandbox_inline_scan", sandbox_inline_scan)
if sandbox_region is not None:
pulumi.set(__self__, "sandbox_region", sandbox_region)
if sdns_options is not None:
pulumi.set(__self__, "sdns_options", sdns_options)
if sdns_server_ip is not None:
pulumi.set(__self__, "sdns_server_ip", sdns_server_ip)
if sdns_server_port is not None:
pulumi.set(__self__, "sdns_server_port", sdns_server_port)
if service_account_id is not None:
pulumi.set(__self__, "service_account_id", service_account_id)
if source_ip is not None:
pulumi.set(__self__, "source_ip", source_ip)
if source_ip6 is not None:
pulumi.set(__self__, "source_ip6", source_ip6)
if update_build_proxy is not None:
pulumi.set(__self__, "update_build_proxy", update_build_proxy)
if update_dldb is not None:
pulumi.set(__self__, "update_dldb", update_dldb)
if update_extdb is not None:
pulumi.set(__self__, "update_extdb", update_extdb)
if update_ffdb is not None:
pulumi.set(__self__, "update_ffdb", update_ffdb)
if update_server_location is not None:
pulumi.set(__self__, "update_server_location", update_server_location)
if update_uwdb is not None:
pulumi.set(__self__, "update_uwdb", update_uwdb)
if vdom is not None:
pulumi.set(__self__, "vdom", vdom)
if vdomparam is not None:
pulumi.set(__self__, "vdomparam", vdomparam)
if videofilter_expiration is not None:
pulumi.set(__self__, "videofilter_expiration", videofilter_expiration)
if videofilter_license is not None:
pulumi.set(__self__, "videofilter_license", videofilter_license)
if webfilter_cache is not None:
pulumi.set(__self__, "webfilter_cache", webfilter_cache)
if webfilter_cache_ttl is not None:
pulumi.set(__self__, "webfilter_cache_ttl", webfilter_cache_ttl)
if webfilter_expiration is not None:
pulumi.set(__self__, "webfilter_expiration", webfilter_expiration)
if webfilter_force_off is not None:
pulumi.set(__self__, "webfilter_force_off", webfilter_force_off)
if webfilter_license is not None:
pulumi.set(__self__, "webfilter_license", webfilter_license)
@property
@pulumi.getter(name="antispamTimeout")
def antispam_timeout(self) -> pulumi.Input[int]:
return pulumi.get(self, "antispam_timeout")
@antispam_timeout.setter
def antispam_timeout(self, value: pulumi.Input[int]):
pulumi.set(self, "antispam_timeout", value)
@property
@pulumi.getter(name="outbreakPreventionTimeout")
def outbreak_prevention_timeout(self) -> pulumi.Input[int]:
return pulumi.get(self, "outbreak_prevention_timeout")
@outbreak_prevention_timeout.setter
def outbreak_prevention_timeout(self, value: pulumi.Input[int]):
pulumi.set(self, "outbreak_prevention_timeout", value)
@property
@pulumi.getter(name="webfilterTimeout")
def webfilter_timeout(self) -> pulumi.Input[int]:
return pulumi.get(self, "webfilter_timeout")
@webfilter_timeout.setter
def webfilter_timeout(self, value: pulumi.Input[int]):
pulumi.set(self, "webfilter_timeout", value)
@property
@pulumi.getter(name="antispamCache")
def antispam_cache(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "antispam_cache")
@antispam_cache.setter
def antispam_cache(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "antispam_cache", value)
@property
@pulumi.getter(name="antispamCacheMpercent")
def antispam_cache_mpercent(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "antispam_cache_mpercent")
@antispam_cache_mpercent.setter
def antispam_cache_mpercent(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "antispam_cache_mpercent", value)
@property
@pulumi.getter(name="antispamCacheMpermille")
def antispam_cache_mpermille(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "antispam_cache_mpermille")
@antispam_cache_mpermille.setter
def antispam_cache_mpermille(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "antispam_cache_mpermille", value)
@property
@pulumi.getter(name="antispamCacheTtl")
def antispam_cache_ttl(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "antispam_cache_ttl")
@antispam_cache_ttl.setter
def antispam_cache_ttl(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "antispam_cache_ttl", value)
@property
@pulumi.getter(name="antispamExpiration")
def antispam_expiration(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "antispam_expiration")
@antispam_expiration.setter
def antispam_expiration(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "antispam_expiration", value)
@property
@pulumi.getter(name="antispamForceOff")
def antispam_force_off(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "antispam_force_off")
@antispam_force_off.setter
def antispam_force_off(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "antispam_force_off", value)
@property
@pulumi.getter(name="antispamLicense")
def antispam_license(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "antispam_license")
@antispam_license.setter
def antispam_license(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "antispam_license", value)
@property
@pulumi.getter(name="anycastSdnsServerIp")
def anycast_sdns_server_ip(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "anycast_sdns_server_ip")
@anycast_sdns_server_ip.setter
def anycast_sdns_server_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "anycast_sdns_server_ip", value)
@property
@pulumi.getter(name="anycastSdnsServerPort")
def anycast_sdns_server_port(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "anycast_sdns_server_port")
@anycast_sdns_server_port.setter
def anycast_sdns_server_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "anycast_sdns_server_port", value)
@property
@pulumi.getter(name="autoFirmwareUpgrade")
def auto_firmware_upgrade(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "auto_firmware_upgrade")
@auto_firmware_upgrade.setter
def auto_firmware_upgrade(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "auto_firmware_upgrade", value)
@property
@pulumi.getter(name="autoFirmwareUpgradeDay")
def auto_firmware_upgrade_day(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "auto_firmware_upgrade_day")
@auto_firmware_upgrade_day.setter
def auto_firmware_upgrade_day(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "auto_firmware_upgrade_day", value)
@property
@pulumi.getter(name="autoFirmwareUpgradeDelay")
def auto_firmware_upgrade_delay(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "auto_firmware_upgrade_delay")
@auto_firmware_upgrade_delay.setter
def auto_firmware_upgrade_delay(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "auto_firmware_upgrade_delay", value)
@property
@pulumi.getter(name="autoFirmwareUpgradeEndHour")
def auto_firmware_upgrade_end_hour(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "auto_firmware_upgrade_end_hour")
@auto_firmware_upgrade_end_hour.setter
def auto_firmware_upgrade_end_hour(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "auto_firmware_upgrade_end_hour", value)
@property
@pulumi.getter(name="autoFirmwareUpgradeStartHour")
def auto_firmware_upgrade_start_hour(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "auto_firmware_upgrade_start_hour")
@auto_firmware_upgrade_start_hour.setter
def auto_firmware_upgrade_start_hour(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "auto_firmware_upgrade_start_hour", value)
@property
@pulumi.getter(name="autoJoinForticloud")
def auto_join_forticloud(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "auto_join_forticloud")
@auto_join_forticloud.setter
def auto_join_forticloud(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "auto_join_forticloud", value)
@property
@pulumi.getter(name="ddnsServerIp")
def ddns_server_ip(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "ddns_server_ip")
@ddns_server_ip.setter
def ddns_server_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ddns_server_ip", value)
@property
@pulumi.getter(name="ddnsServerIp6")
def ddns_server_ip6(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "ddns_server_ip6")
@ddns_server_ip6.setter
def ddns_server_ip6(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ddns_server_ip6", value)
@property
@pulumi.getter(name="ddnsServerPort")
def ddns_server_port(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "ddns_server_port")
@ddns_server_port.setter
def ddns_server_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "ddns_server_port", value)
@property
@pulumi.getter(name="fdsLicenseExpiringDays")
def fds_license_expiring_days(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "fds_license_expiring_days")
@fds_license_expiring_days.setter
def fds_license_expiring_days(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "fds_license_expiring_days", value)
@property
@pulumi.getter(name="fortiguardAnycast")
def fortiguard_anycast(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "fortiguard_anycast")
@fortiguard_anycast.setter
def fortiguard_anycast(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "fortiguard_anycast", value)
@property
@pulumi.getter(name="fortiguardAnycastSource")
def fortiguard_anycast_source(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "fortiguard_anycast_source")
@fortiguard_anycast_source.setter
def fortiguard_anycast_source(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "fortiguard_anycast_source", value)
@property
@pulumi.getter
def interface(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "interface")
@interface.setter
def interface(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "interface", value)
@property
@pulumi.getter(name="interfaceSelectMethod")
def interface_select_method(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "interface_select_method")
@interface_select_method.setter
def interface_select_method(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "interface_select_method", value)
@property
@pulumi.getter(name="loadBalanceServers")
def load_balance_servers(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "load_balance_servers")
@load_balance_servers.setter
def load_balance_servers(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "load_balance_servers", value)
@property
@pulumi.getter(name="outbreakPreventionCache")
def outbreak_prevention_cache(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "outbreak_prevention_cache")
@outbreak_prevention_cache.setter
def outbreak_prevention_cache(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "outbreak_prevention_cache", value)
@property
@pulumi.getter(name="outbreakPreventionCacheMpercent")
def outbreak_prevention_cache_mpercent(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "outbreak_prevention_cache_mpercent")
@outbreak_prevention_cache_mpercent.setter
def outbreak_prevention_cache_mpercent(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "outbreak_prevention_cache_mpercent", value)
@property
@pulumi.getter(name="outbreakPreventionCacheMpermille")
def outbreak_prevention_cache_mpermille(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "outbreak_prevention_cache_mpermille")
@outbreak_prevention_cache_mpermille.setter
def outbreak_prevention_cache_mpermille(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "outbreak_prevention_cache_mpermille", value)
@property
@pulumi.getter(name="outbreakPreventionCacheTtl")
def outbreak_prevention_cache_ttl(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "outbreak_prevention_cache_ttl")
@outbreak_prevention_cache_ttl.setter
def outbreak_prevention_cache_ttl(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "outbreak_prevention_cache_ttl", value)
@property
@pulumi.getter(name="outbreakPreventionExpiration")
def outbreak_prevention_expiration(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "outbreak_prevention_expiration")
@outbreak_prevention_expiration.setter
def outbreak_prevention_expiration(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "outbreak_prevention_expiration", value)
@property
@pulumi.getter(name="outbreakPreventionForceOff")
def outbreak_prevention_force_off(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "outbreak_prevention_force_off")
@outbreak_prevention_force_off.setter
def outbreak_prevention_force_off(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "outbreak_prevention_force_off", value)
@property
@pulumi.getter(name="outbreakPreventionLicense")
def outbreak_prevention_license(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "outbreak_prevention_license")
@outbreak_prevention_license.setter
def outbreak_prevention_license(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "outbreak_prevention_license", value)
@property
@pulumi.getter(name="persistentConnection")
def persistent_connection(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "persistent_connection")
@persistent_connection.setter
def persistent_connection(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "persistent_connection", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def protocol(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protocol", value)
@property
@pulumi.getter(name="proxyPassword")
def proxy_password(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "proxy_password")
@proxy_password.setter
def proxy_password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "proxy_password", value)
@property
@pulumi.getter(name="proxyServerIp")
def proxy_server_ip(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "proxy_server_ip")
@proxy_server_ip.setter
def proxy_server_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "proxy_server_ip", value)
@property
@pulumi.getter(name="proxyServerPort")
def proxy_server_port(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "proxy_server_port")
@proxy_server_port.setter
def proxy_server_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "proxy_server_port", value)
@property
@pulumi.getter(name="proxyUsername")
def proxy_username(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "proxy_username")
@proxy_username.setter
def proxy_username(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "proxy_username", value)
@property
@pulumi.getter(name="sandboxInlineScan")
def sandbox_inline_scan(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "sandbox_inline_scan")
@sandbox_inline_scan.setter
def sandbox_inline_scan(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sandbox_inline_scan", value)
@property
@pulumi.getter(name="sandboxRegion")
def sandbox_region(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "sandbox_region")
@sandbox_region.setter
def sandbox_region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sandbox_region", value)
@property
@pulumi.getter(name="sdnsOptions")
def sdns_options(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "sdns_options")
@sdns_options.setter
def sdns_options(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sdns_options", value)
@property
@pulumi.getter(name="sdnsServerIp")
def sdns_server_ip(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "sdns_server_ip")
@sdns_server_ip.setter
def sdns_server_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sdns_server_ip", value)
@property
@pulumi.getter(name="sdnsServerPort")
def sdns_server_port(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "sdns_server_port")
@sdns_server_port.setter
def sdns_server_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "sdns_server_port", value)
@property
@pulumi.getter(name="serviceAccountId")
def service_account_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "service_account_id")
@service_account_id.setter
def service_account_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_account_id", value)
@property
@pulumi.getter(name="sourceIp")
def source_ip(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "source_ip")
@source_ip.setter
def source_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_ip", value)
@property
@pulumi.getter(name="sourceIp6")
def source_ip6(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "source_ip6")
@source_ip6.setter
def source_ip6(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_ip6", value)
@property
@pulumi.getter(name="updateBuildProxy")
def update_build_proxy(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "update_build_proxy")
@update_build_proxy.setter
def update_build_proxy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "update_build_proxy", value)
@property
@pulumi.getter(name="updateDldb")
def update_dldb(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "update_dldb")
@update_dldb.setter
def update_dldb(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "update_dldb", value)
@property
@pulumi.getter(name="updateExtdb")
def update_extdb(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "update_extdb")
@update_extdb.setter
def update_extdb(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "update_extdb", value)
@property
@pulumi.getter(name="updateFfdb")
def update_ffdb(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "update_ffdb")
@update_ffdb.setter
def update_ffdb(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "update_ffdb", value)
@property
@pulumi.getter(name="updateServerLocation")
def update_server_location(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "update_server_location")
@update_server_location.setter
def update_server_location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "update_server_location", value)
@property
@pulumi.getter(name="updateUwdb")
def update_uwdb(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "update_uwdb")
@update_uwdb.setter
def update_uwdb(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "update_uwdb", value)
@property
@pulumi.getter
def vdom(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "vdom")
@vdom.setter
def vdom(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "vdom", value)
@property
@pulumi.getter
def vdomparam(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "vdomparam")
@vdomparam.setter
def vdomparam(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "vdomparam", value)
@property
@pulumi.getter(name="videofilterExpiration")
def videofilter_expiration(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "videofilter_expiration")
@videofilter_expiration.setter
def videofilter_expiration(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "videofilter_expiration", value)
@property
@pulumi.getter(name="videofilterLicense")
def videofilter_license(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "videofilter_license")
@videofilter_license.setter
def videofilter_license(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "videofilter_license", value)
@property
@pulumi.getter(name="webfilterCache")
def webfilter_cache(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "webfilter_cache")
@webfilter_cache.setter
def webfilter_cache(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "webfilter_cache", value)
@property
@pulumi.getter(name="webfilterCacheTtl")
def webfilter_cache_ttl(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "webfilter_cache_ttl")
@webfilter_cache_ttl.setter
def webfilter_cache_ttl(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "webfilter_cache_ttl", value)
@property
@pulumi.getter(name="webfilterExpiration")
def webfilter_expiration(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "webfilter_expiration")
@webfilter_expiration.setter
def webfilter_expiration(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "webfilter_expiration", value)
@property
@pulumi.getter(name="webfilterForceOff")
def webfilter_force_off(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "webfilter_force_off")
@webfilter_force_off.setter
def webfilter_force_off(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "webfilter_force_off", value)
@property
@pulumi.getter(name="webfilterLicense")
def webfilter_license(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "webfilter_license")
@webfilter_license.setter
def webfilter_license(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "webfilter_license", value)
@pulumi.input_type
class _SystemFortiguardState:
def __init__(__self__, *,
antispam_cache: Optional[pulumi.Input[str]] = None,
antispam_cache_mpercent: Optional[pulumi.Input[int]] = None,
antispam_cache_mpermille: Optional[pulumi.Input[int]] = None,
antispam_cache_ttl: Optional[pulumi.Input[int]] = None,
antispam_expiration: Optional[pulumi.Input[int]] = None,
antispam_force_off: Optional[pulumi.Input[str]] = None,
antispam_license: Optional[pulumi.Input[int]] = None,
antispam_timeout: Optional[pulumi.Input[int]] = None,
anycast_sdns_server_ip: Optional[pulumi.Input[str]] = None,
anycast_sdns_server_port: Optional[pulumi.Input[int]] = None,
auto_firmware_upgrade: Optional[pulumi.Input[str]] = None,
auto_firmware_upgrade_day: Optional[pulumi.Input[str]] = None,
auto_firmware_upgrade_delay: Optional[pulumi.Input[int]] = None,
auto_firmware_upgrade_end_hour: Optional[pulumi.Input[int]] = None,
auto_firmware_upgrade_start_hour: Optional[pulumi.Input[int]] = None,
auto_join_forticloud: Optional[pulumi.Input[str]] = None,
ddns_server_ip: Optional[pulumi.Input[str]] = None,
ddns_server_ip6: Optional[pulumi.Input[str]] = None,
ddns_server_port: Optional[pulumi.Input[int]] = None,
fds_license_expiring_days: Optional[pulumi.Input[int]] = None,
fortiguard_anycast: Optional[pulumi.Input[str]] = None,
fortiguard_anycast_source: Optional[pulumi.Input[str]] = None,
interface: Optional[pulumi.Input[str]] = None,
interface_select_method: Optional[pulumi.Input[str]] = None,
load_balance_servers: Optional[pulumi.Input[int]] = None,
outbreak_prevention_cache: Optional[pulumi.Input[str]] = None,
outbreak_prevention_cache_mpercent: Optional[pulumi.Input[int]] = None,
outbreak_prevention_cache_mpermille: Optional[pulumi.Input[int]] = None,
outbreak_prevention_cache_ttl: Optional[pulumi.Input[int]] = None,
outbreak_prevention_expiration: Optional[pulumi.Input[int]] = None,
outbreak_prevention_force_off: Optional[pulumi.Input[str]] = None,
outbreak_prevention_license: Optional[pulumi.Input[int]] = None,
outbreak_prevention_timeout: Optional[pulumi.Input[int]] = None,
persistent_connection: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[str]] = None,
protocol: Optional[pulumi.Input[str]] = None,
proxy_password: Optional[pulumi.Input[str]] = None,
proxy_server_ip: Optional[pulumi.Input[str]] = None,
proxy_server_port: Optional[pulumi.Input[int]] = None,
proxy_username: Optional[pulumi.Input[str]] = None,
sandbox_inline_scan: Optional[pulumi.Input[str]] = None,
sandbox_region: Optional[pulumi.Input[str]] = None,
sdns_options: Optional[pulumi.Input[str]] = None,
sdns_server_ip: Optional[pulumi.Input[str]] = None,
sdns_server_port: Optional[pulumi.Input[int]] = None,
service_account_id: Optional[pulumi.Input[str]] = None,
source_ip: Optional[pulumi.Input[str]] = None,
source_ip6: Optional[pulumi.Input[str]] = None,
update_build_proxy: Optional[pulumi.Input[str]] = None,
update_dldb: Optional[pulumi.Input[str]] = None,
update_extdb: Optional[pulumi.Input[str]] = None,
update_ffdb: Optional[pulumi.Input[str]] = None,
update_server_location: Optional[pulumi.Input[str]] = None,
update_uwdb: Optional[pulumi.Input[str]] = None,
vdom: Optional[pulumi.Input[str]] = None,
vdomparam: Optional[pulumi.Input[str]] = None,
videofilter_expiration: Optional[pulumi.Input[int]] = None,
videofilter_license: Optional[pulumi.Input[int]] = None,
webfilter_cache: Optional[pulumi.Input[str]] = None,
webfilter_cache_ttl: Optional[pulumi.Input[int]] = None,
webfilter_expiration: Optional[pulumi.Input[int]] = None,
webfilter_force_off: Optional[pulumi.Input[str]] = None,
webfilter_license: Optional[pulumi.Input[int]] = None,
webfilter_timeout: Optional[pulumi.Input[int]] = None):
"""
Input properties used for looking up and filtering SystemFortiguard resources.
"""
if antispam_cache is not None:
pulumi.set(__self__, "antispam_cache", antispam_cache)
if antispam_cache_mpercent is not None:
pulumi.set(__self__, "antispam_cache_mpercent", antispam_cache_mpercent)
if antispam_cache_mpermille is not None:
pulumi.set(__self__, "antispam_cache_mpermille", antispam_cache_mpermille)
if antispam_cache_ttl is not None:
pulumi.set(__self__, "antispam_cache_ttl", antispam_cache_ttl)
if antispam_expiration is not None:
pulumi.set(__self__, "antispam_expiration", antispam_expiration)
if antispam_force_off is not None:
pulumi.set(__self__, "antispam_force_off", antispam_force_off)
if antispam_license is not None:
pulumi.set(__self__, "antispam_license", antispam_license)
if antispam_timeout is not None:
pulumi.set(__self__, "antispam_timeout", antispam_timeout)
if anycast_sdns_server_ip is not None:
pulumi.set(__self__, "anycast_sdns_server_ip", anycast_sdns_server_ip)
if anycast_sdns_server_port is not None:
pulumi.set(__self__, "anycast_sdns_server_port", anycast_sdns_server_port)
if auto_firmware_upgrade is not None:
pulumi.set(__self__, "auto_firmware_upgrade", auto_firmware_upgrade)
if auto_firmware_upgrade_day is not None:
pulumi.set(__self__, "auto_firmware_upgrade_day", auto_firmware_upgrade_day)
if auto_firmware_upgrade_delay is not None:
pulumi.set(__self__, "auto_firmware_upgrade_delay", auto_firmware_upgrade_delay)
if auto_firmware_upgrade_end_hour is not None:
pulumi.set(__self__, "auto_firmware_upgrade_end_hour", auto_firmware_upgrade_end_hour)
if auto_firmware_upgrade_start_hour is not None:
pulumi.set(__self__, "auto_firmware_upgrade_start_hour", auto_firmware_upgrade_start_hour)
if auto_join_forticloud is not None:
pulumi.set(__self__, "auto_join_forticloud", auto_join_forticloud)
if ddns_server_ip is not None:
pulumi.set(__self__, "ddns_server_ip", ddns_server_ip)
if ddns_server_ip6 is not None:
pulumi.set(__self__, "ddns_server_ip6", ddns_server_ip6)
if ddns_server_port is not None:
pulumi.set(__self__, "ddns_server_port", ddns_server_port)
if fds_license_expiring_days is not None:
pulumi.set(__self__, "fds_license_expiring_days", fds_license_expiring_days)
if fortiguard_anycast is not None:
pulumi.set(__self__, "fortiguard_anycast", fortiguard_anycast)
if fortiguard_anycast_source is not None:
pulumi.set(__self__, "fortiguard_anycast_source", fortiguard_anycast_source)
if interface is not None:
pulumi.set(__self__, "interface", interface)
if interface_select_method is not None:
pulumi.set(__self__, "interface_select_method", interface_select_method)
if load_balance_servers is not None:
pulumi.set(__self__, "load_balance_servers", load_balance_servers)
if outbreak_prevention_cache is not None:
pulumi.set(__self__, "outbreak_prevention_cache", outbreak_prevention_cache)
if outbreak_prevention_cache_mpercent is not None:
pulumi.set(__self__, "outbreak_prevention_cache_mpercent", outbreak_prevention_cache_mpercent)
if outbreak_prevention_cache_mpermille is not None:
pulumi.set(__self__, "outbreak_prevention_cache_mpermille", outbreak_prevention_cache_mpermille)
if outbreak_prevention_cache_ttl is not None:
pulumi.set(__self__, "outbreak_prevention_cache_ttl", outbreak_prevention_cache_ttl)
if outbreak_prevention_expiration is not None:
pulumi.set(__self__, "outbreak_prevention_expiration", outbreak_prevention_expiration)
if outbreak_prevention_force_off is not None:
pulumi.set(__self__, "outbreak_prevention_force_off", outbreak_prevention_force_off)
if outbreak_prevention_license is not None:
pulumi.set(__self__, "outbreak_prevention_license", outbreak_prevention_license)
if outbreak_prevention_timeout is not None:
pulumi.set(__self__, "outbreak_prevention_timeout", outbreak_prevention_timeout)
if persistent_connection is not None:
pulumi.set(__self__, "persistent_connection", persistent_connection)
if port is not None:
pulumi.set(__self__, "port", port)
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
if proxy_password is not None:
pulumi.set(__self__, "proxy_password", proxy_password)
if proxy_server_ip is not None:
pulumi.set(__self__, "proxy_server_ip", proxy_server_ip)
if proxy_server_port is not None:
pulumi.set(__self__, "proxy_server_port", proxy_server_port)
if proxy_username is not None:
pulumi.set(__self__, "proxy_username", proxy_username)
if sandbox_inline_scan is not None:
pulumi.set(__self__, "sandbox_inline_scan", sandbox_inline_scan)
if sandbox_region is not None:
pulumi.set(__self__, "sandbox_region", sandbox_region)
if sdns_options is not None:
pulumi.set(__self__, "sdns_options", sdns_options)
if sdns_server_ip is not None:
pulumi.set(__self__, "sdns_server_ip", sdns_server_ip)
if sdns_server_port is not None:
pulumi.set(__self__, "sdns_server_port", sdns_server_port)
if service_account_id is not None:
pulumi.set(__self__, "service_account_id", service_account_id)
if source_ip is not None:
pulumi.set(__self__, "source_ip", source_ip)
if source_ip6 is not None:
pulumi.set(__self__, "source_ip6", source_ip6)
if update_build_proxy is not None:
pulumi.set(__self__, "update_build_proxy", update_build_proxy)
if update_dldb is not None:
pulumi.set(__self__, "update_dldb", update_dldb)
if update_extdb is not None:
pulumi.set(__self__, "update_extdb", update_extdb)
if update_ffdb is not None:
pulumi.set(__self__, "update_ffdb", update_ffdb)
if update_server_location is not None:
pulumi.set(__self__, "update_server_location", update_server_location)
if update_uwdb is not None:
pulumi.set(__self__, "update_uwdb", update_uwdb)
if vdom is not None:
pulumi.set(__self__, "vdom", vdom)
if vdomparam is not None:
pulumi.set(__self__, "vdomparam", vdomparam)
if videofilter_expiration is not None:
pulumi.set(__self__, "videofilter_expiration", videofilter_expiration)
if videofilter_license is not None:
pulumi.set(__self__, "videofilter_license", videofilter_license)
if webfilter_cache is not None:
pulumi.set(__self__, "webfilter_cache", webfilter_cache)
if webfilter_cache_ttl is not None:
pulumi.set(__self__, "webfilter_cache_ttl", webfilter_cache_ttl)
if webfilter_expiration is not None:
pulumi.set(__self__, "webfilter_expiration", webfilter_expiration)
if webfilter_force_off is not None:
pulumi.set(__self__, "webfilter_force_off", webfilter_force_off)
if webfilter_license is not None:
pulumi.set(__self__, "webfilter_license", webfilter_license)
if webfilter_timeout is not None:
pulumi.set(__self__, "webfilter_timeout", webfilter_timeout)
@property
@pulumi.getter(name="antispamCache")
def antispam_cache(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "antispam_cache")
@antispam_cache.setter
def antispam_cache(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "antispam_cache", value)
@property
@pulumi.getter(name="antispamCacheMpercent")
def antispam_cache_mpercent(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "antispam_cache_mpercent")
@antispam_cache_mpercent.setter
def antispam_cache_mpercent(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "antispam_cache_mpercent", value)
@property
@pulumi.getter(name="antispamCacheMpermille")
def antispam_cache_mpermille(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "antispam_cache_mpermille")
@antispam_cache_mpermille.setter
def antispam_cache_mpermille(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "antispam_cache_mpermille", value)
@property
@pulumi.getter(name="antispamCacheTtl")
def antispam_cache_ttl(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "antispam_cache_ttl")
@antispam_cache_ttl.setter
def antispam_cache_ttl(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "antispam_cache_ttl", value)
@property
@pulumi.getter(name="antispamExpiration")
def antispam_expiration(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "antispam_expiration")
@antispam_expiration.setter
def antispam_expiration(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "antispam_expiration", value)
@property
@pulumi.getter(name="antispamForceOff")
def antispam_force_off(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "antispam_force_off")
@antispam_force_off.setter
def antispam_force_off(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "antispam_force_off", value)
@property
@pulumi.getter(name="antispamLicense")
def antispam_license(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "antispam_license")
@antispam_license.setter
def antispam_license(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "antispam_license", value)
@property
@pulumi.getter(name="antispamTimeout")
def antispam_timeout(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "antispam_timeout")
@antispam_timeout.setter
def antispam_timeout(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "antispam_timeout", value)
@property
@pulumi.getter(name="anycastSdnsServerIp")
def anycast_sdns_server_ip(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "anycast_sdns_server_ip")
@anycast_sdns_server_ip.setter
def anycast_sdns_server_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "anycast_sdns_server_ip", value)
@property
@pulumi.getter(name="anycastSdnsServerPort")
def anycast_sdns_server_port(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "anycast_sdns_server_port")
@anycast_sdns_server_port.setter
def anycast_sdns_server_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "anycast_sdns_server_port", value)
@property
@pulumi.getter(name="autoFirmwareUpgrade")
def auto_firmware_upgrade(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "auto_firmware_upgrade")
@auto_firmware_upgrade.setter
def auto_firmware_upgrade(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "auto_firmware_upgrade", value)
@property
@pulumi.getter(name="autoFirmwareUpgradeDay")
def auto_firmware_upgrade_day(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "auto_firmware_upgrade_day")
@auto_firmware_upgrade_day.setter
def auto_firmware_upgrade_day(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "auto_firmware_upgrade_day", value)
@property
@pulumi.getter(name="autoFirmwareUpgradeDelay")
def auto_firmware_upgrade_delay(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "auto_firmware_upgrade_delay")
@auto_firmware_upgrade_delay.setter
def auto_firmware_upgrade_delay(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "auto_firmware_upgrade_delay", value)
@property
@pulumi.getter(name="autoFirmwareUpgradeEndHour")
def auto_firmware_upgrade_end_hour(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "auto_firmware_upgrade_end_hour")
@auto_firmware_upgrade_end_hour.setter
def auto_firmware_upgrade_end_hour(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "auto_firmware_upgrade_end_hour", value)
@property
@pulumi.getter(name="autoFirmwareUpgradeStartHour")
def auto_firmware_upgrade_start_hour(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "auto_firmware_upgrade_start_hour")
@auto_firmware_upgrade_start_hour.setter
def auto_firmware_upgrade_start_hour(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "auto_firmware_upgrade_start_hour", value)
@property
@pulumi.getter(name="autoJoinForticloud")
def auto_join_forticloud(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "auto_join_forticloud")
@auto_join_forticloud.setter
def auto_join_forticloud(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "auto_join_forticloud", value)
@property
@pulumi.getter(name="ddnsServerIp")
def ddns_server_ip(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "ddns_server_ip")
@ddns_server_ip.setter
def ddns_server_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ddns_server_ip", value)
@property
@pulumi.getter(name="ddnsServerIp6")
def ddns_server_ip6(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "ddns_server_ip6")
@ddns_server_ip6.setter
def ddns_server_ip6(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ddns_server_ip6", value)
@property
@pulumi.getter(name="ddnsServerPort")
def ddns_server_port(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "ddns_server_port")
@ddns_server_port.setter
def ddns_server_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "ddns_server_port", value)
@property
@pulumi.getter(name="fdsLicenseExpiringDays")
def fds_license_expiring_days(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "fds_license_expiring_days")
@fds_license_expiring_days.setter
def fds_license_expiring_days(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "fds_license_expiring_days", value)
@property
@pulumi.getter(name="fortiguardAnycast")
def fortiguard_anycast(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "fortiguard_anycast")
@fortiguard_anycast.setter
def fortiguard_anycast(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "fortiguard_anycast", value)
@property
@pulumi.getter(name="fortiguardAnycastSource")
def fortiguard_anycast_source(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "fortiguard_anycast_source")
@fortiguard_anycast_source.setter
def fortiguard_anycast_source(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "fortiguard_anycast_source", value)
@property
@pulumi.getter
def interface(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "interface")
@interface.setter
def interface(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "interface", value)
@property
@pulumi.getter(name="interfaceSelectMethod")
def interface_select_method(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "interface_select_method")
@interface_select_method.setter
def interface_select_method(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "interface_select_method", value)
@property
@pulumi.getter(name="loadBalanceServers")
def load_balance_servers(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "load_balance_servers")
@load_balance_servers.setter
def load_balance_servers(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "load_balance_servers", value)
@property
@pulumi.getter(name="outbreakPreventionCache")
def outbreak_prevention_cache(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "outbreak_prevention_cache")
@outbreak_prevention_cache.setter
def outbreak_prevention_cache(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "outbreak_prevention_cache", value)
@property
@pulumi.getter(name="outbreakPreventionCacheMpercent")
def outbreak_prevention_cache_mpercent(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "outbreak_prevention_cache_mpercent")
@outbreak_prevention_cache_mpercent.setter
def outbreak_prevention_cache_mpercent(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "outbreak_prevention_cache_mpercent", value)
@property
@pulumi.getter(name="outbreakPreventionCacheMpermille")
def outbreak_prevention_cache_mpermille(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "outbreak_prevention_cache_mpermille")
@outbreak_prevention_cache_mpermille.setter
def outbreak_prevention_cache_mpermille(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "outbreak_prevention_cache_mpermille", value)
@property
@pulumi.getter(name="outbreakPreventionCacheTtl")
def outbreak_prevention_cache_ttl(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "outbreak_prevention_cache_ttl")
@outbreak_prevention_cache_ttl.setter
def outbreak_prevention_cache_ttl(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "outbreak_prevention_cache_ttl", value)
@property
@pulumi.getter(name="outbreakPreventionExpiration")
def outbreak_prevention_expiration(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "outbreak_prevention_expiration")
@outbreak_prevention_expiration.setter
def outbreak_prevention_expiration(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "outbreak_prevention_expiration", value)
@property
@pulumi.getter(name="outbreakPreventionForceOff")
def outbreak_prevention_force_off(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "outbreak_prevention_force_off")
@outbreak_prevention_force_off.setter
def outbreak_prevention_force_off(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "outbreak_prevention_force_off", value)
@property
@pulumi.getter(name="outbreakPreventionLicense")
def outbreak_prevention_license(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "outbreak_prevention_license")
@outbreak_prevention_license.setter
def outbreak_prevention_license(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "outbreak_prevention_license", value)
@property
@pulumi.getter(name="outbreakPreventionTimeout")
def outbreak_prevention_timeout(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "outbreak_prevention_timeout")
@outbreak_prevention_timeout.setter
def outbreak_prevention_timeout(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "outbreak_prevention_timeout", value)
@property
@pulumi.getter(name="persistentConnection")
def persistent_connection(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "persistent_connection")
@persistent_connection.setter
def persistent_connection(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "persistent_connection", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def protocol(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protocol", value)
@property
@pulumi.getter(name="proxyPassword")
def proxy_password(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "proxy_password")
@proxy_password.setter
def proxy_password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "proxy_password", value)
@property
@pulumi.getter(name="proxyServerIp")
def proxy_server_ip(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "proxy_server_ip")
@proxy_server_ip.setter
def proxy_server_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "proxy_server_ip", value)
@property
@pulumi.getter(name="proxyServerPort")
def proxy_server_port(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "proxy_server_port")
@proxy_server_port.setter
def proxy_server_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "proxy_server_port", value)
@property
@pulumi.getter(name="proxyUsername")
def proxy_username(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "proxy_username")
@proxy_username.setter
def proxy_username(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "proxy_username", value)
@property
@pulumi.getter(name="sandboxInlineScan")
def sandbox_inline_scan(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "sandbox_inline_scan")
@sandbox_inline_scan.setter
def sandbox_inline_scan(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sandbox_inline_scan", value)
@property
@pulumi.getter(name="sandboxRegion")
def sandbox_region(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "sandbox_region")
@sandbox_region.setter
def sandbox_region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sandbox_region", value)
@property
@pulumi.getter(name="sdnsOptions")
def sdns_options(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "sdns_options")
@sdns_options.setter
def sdns_options(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sdns_options", value)
@property
@pulumi.getter(name="sdnsServerIp")
def sdns_server_ip(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "sdns_server_ip")
@sdns_server_ip.setter
def sdns_server_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sdns_server_ip", value)
@property
@pulumi.getter(name="sdnsServerPort")
def sdns_server_port(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "sdns_server_port")
@sdns_server_port.setter
def sdns_server_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "sdns_server_port", value)
@property
@pulumi.getter(name="serviceAccountId")
def service_account_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "service_account_id")
@service_account_id.setter
def service_account_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_account_id", value)
@property
@pulumi.getter(name="sourceIp")
def source_ip(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "source_ip")
@source_ip.setter
def source_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_ip", value)
@property
@pulumi.getter(name="sourceIp6")
def source_ip6(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "source_ip6")
@source_ip6.setter
def source_ip6(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_ip6", value)
@property
@pulumi.getter(name="updateBuildProxy")
def update_build_proxy(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "update_build_proxy")
@update_build_proxy.setter
def update_build_proxy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "update_build_proxy", value)
@property
@pulumi.getter(name="updateDldb")
def update_dldb(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "update_dldb")
@update_dldb.setter
def update_dldb(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "update_dldb", value)
@property
@pulumi.getter(name="updateExtdb")
def update_extdb(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "update_extdb")
@update_extdb.setter
def update_extdb(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "update_extdb", value)
@property
@pulumi.getter(name="updateFfdb")
def update_ffdb(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "update_ffdb")
@update_ffdb.setter
def update_ffdb(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "update_ffdb", value)
@property
@pulumi.getter(name="updateServerLocation")
def update_server_location(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "update_server_location")
@update_server_location.setter
def update_server_location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "update_server_location", value)
@property
@pulumi.getter(name="updateUwdb")
def update_uwdb(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "update_uwdb")
@update_uwdb.setter
def update_uwdb(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "update_uwdb", value)
@property
@pulumi.getter
def vdom(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "vdom")
@vdom.setter
def vdom(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "vdom", value)
@property
@pulumi.getter
def vdomparam(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "vdomparam")
@vdomparam.setter
def vdomparam(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "vdomparam", value)
@property
@pulumi.getter(name="videofilterExpiration")
def videofilter_expiration(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "videofilter_expiration")
@videofilter_expiration.setter
def videofilter_expiration(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "videofilter_expiration", value)
@property
@pulumi.getter(name="videofilterLicense")
def videofilter_license(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "videofilter_license")
@videofilter_license.setter
def videofilter_license(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "videofilter_license", value)
@property
@pulumi.getter(name="webfilterCache")
def webfilter_cache(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "webfilter_cache")
@webfilter_cache.setter
def webfilter_cache(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "webfilter_cache", value)
@property
@pulumi.getter(name="webfilterCacheTtl")
def webfilter_cache_ttl(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "webfilter_cache_ttl")
@webfilter_cache_ttl.setter
def webfilter_cache_ttl(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "webfilter_cache_ttl", value)
@property
@pulumi.getter(name="webfilterExpiration")
def webfilter_expiration(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "webfilter_expiration")
@webfilter_expiration.setter
def webfilter_expiration(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "webfilter_expiration", value)
@property
@pulumi.getter(name="webfilterForceOff")
def webfilter_force_off(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "webfilter_force_off")
@webfilter_force_off.setter
def webfilter_force_off(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "webfilter_force_off", value)
@property
@pulumi.getter(name="webfilterLicense")
def webfilter_license(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "webfilter_license")
@webfilter_license.setter
def webfilter_license(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "webfilter_license", value)
@property
@pulumi.getter(name="webfilterTimeout")
def webfilter_timeout(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "webfilter_timeout")
@webfilter_timeout.setter
def webfilter_timeout(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "webfilter_timeout", value)
class SystemFortiguard(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
antispam_cache: Optional[pulumi.Input[str]] = None,
antispam_cache_mpercent: Optional[pulumi.Input[int]] = None,
antispam_cache_mpermille: Optional[pulumi.Input[int]] = None,
antispam_cache_ttl: Optional[pulumi.Input[int]] = None,
antispam_expiration: Optional[pulumi.Input[int]] = None,
antispam_force_off: Optional[pulumi.Input[str]] = None,
antispam_license: Optional[pulumi.Input[int]] = None,
antispam_timeout: Optional[pulumi.Input[int]] = None,
anycast_sdns_server_ip: Optional[pulumi.Input[str]] = None,
anycast_sdns_server_port: Optional[pulumi.Input[int]] = None,
auto_firmware_upgrade: Optional[pulumi.Input[str]] = None,
auto_firmware_upgrade_day: Optional[pulumi.Input[str]] = None,
auto_firmware_upgrade_delay: Optional[pulumi.Input[int]] = None,
auto_firmware_upgrade_end_hour: Optional[pulumi.Input[int]] = None,
auto_firmware_upgrade_start_hour: Optional[pulumi.Input[int]] = None,
auto_join_forticloud: Optional[pulumi.Input[str]] = None,
ddns_server_ip: Optional[pulumi.Input[str]] = None,
ddns_server_ip6: Optional[pulumi.Input[str]] = None,
ddns_server_port: Optional[pulumi.Input[int]] = None,
fds_license_expiring_days: Optional[pulumi.Input[int]] = None,
fortiguard_anycast: Optional[pulumi.Input[str]] = None,
fortiguard_anycast_source: Optional[pulumi.Input[str]] = None,
interface: Optional[pulumi.Input[str]] = None,
interface_select_method: Optional[pulumi.Input[str]] = None,
load_balance_servers: Optional[pulumi.Input[int]] = None,
outbreak_prevention_cache: Optional[pulumi.Input[str]] = None,
outbreak_prevention_cache_mpercent: Optional[pulumi.Input[int]] = None,
outbreak_prevention_cache_mpermille: Optional[pulumi.Input[int]] = None,
outbreak_prevention_cache_ttl: Optional[pulumi.Input[int]] = None,
outbreak_prevention_expiration: Optional[pulumi.Input[int]] = None,
outbreak_prevention_force_off: Optional[pulumi.Input[str]] = None,
outbreak_prevention_license: Optional[pulumi.Input[int]] = None,
outbreak_prevention_timeout: Optional[pulumi.Input[int]] = None,
persistent_connection: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[str]] = None,
protocol: Optional[pulumi.Input[str]] = None,
proxy_password: Optional[pulumi.Input[str]] = None,
proxy_server_ip: Optional[pulumi.Input[str]] = None,
proxy_server_port: Optional[pulumi.Input[int]] = None,
proxy_username: Optional[pulumi.Input[str]] = None,
sandbox_inline_scan: Optional[pulumi.Input[str]] = None,
sandbox_region: Optional[pulumi.Input[str]] = None,
sdns_options: Optional[pulumi.Input[str]] = None,
sdns_server_ip: Optional[pulumi.Input[str]] = None,
sdns_server_port: Optional[pulumi.Input[int]] = None,
service_account_id: Optional[pulumi.Input[str]] = None,
source_ip: Optional[pulumi.Input[str]] = None,
source_ip6: Optional[pulumi.Input[str]] = None,
update_build_proxy: Optional[pulumi.Input[str]] = None,
update_dldb: Optional[pulumi.Input[str]] = None,
update_extdb: Optional[pulumi.Input[str]] = None,
update_ffdb: Optional[pulumi.Input[str]] = None,
update_server_location: Optional[pulumi.Input[str]] = None,
update_uwdb: Optional[pulumi.Input[str]] = None,
vdom: Optional[pulumi.Input[str]] = None,
vdomparam: Optional[pulumi.Input[str]] = None,
videofilter_expiration: Optional[pulumi.Input[int]] = None,
videofilter_license: Optional[pulumi.Input[int]] = None,
webfilter_cache: Optional[pulumi.Input[str]] = None,
webfilter_cache_ttl: Optional[pulumi.Input[int]] = None,
webfilter_expiration: Optional[pulumi.Input[int]] = None,
webfilter_force_off: Optional[pulumi.Input[str]] = None,
webfilter_license: Optional[pulumi.Input[int]] = None,
webfilter_timeout: Optional[pulumi.Input[int]] = None,
__props__=None):
"""
Create a SystemFortiguard resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: SystemFortiguardArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Create a SystemFortiguard resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param SystemFortiguardArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SystemFortiguardArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
antispam_cache: Optional[pulumi.Input[str]] = None,
antispam_cache_mpercent: Optional[pulumi.Input[int]] = None,
antispam_cache_mpermille: Optional[pulumi.Input[int]] = None,
antispam_cache_ttl: Optional[pulumi.Input[int]] = None,
antispam_expiration: Optional[pulumi.Input[int]] = None,
antispam_force_off: Optional[pulumi.Input[str]] = None,
antispam_license: Optional[pulumi.Input[int]] = None,
antispam_timeout: Optional[pulumi.Input[int]] = None,
anycast_sdns_server_ip: Optional[pulumi.Input[str]] = None,
anycast_sdns_server_port: Optional[pulumi.Input[int]] = None,
auto_firmware_upgrade: Optional[pulumi.Input[str]] = None,
auto_firmware_upgrade_day: Optional[pulumi.Input[str]] = None,
auto_firmware_upgrade_delay: Optional[pulumi.Input[int]] = None,
auto_firmware_upgrade_end_hour: Optional[pulumi.Input[int]] = None,
auto_firmware_upgrade_start_hour: Optional[pulumi.Input[int]] = None,
auto_join_forticloud: Optional[pulumi.Input[str]] = None,
ddns_server_ip: Optional[pulumi.Input[str]] = None,
ddns_server_ip6: Optional[pulumi.Input[str]] = None,
ddns_server_port: Optional[pulumi.Input[int]] = None,
fds_license_expiring_days: Optional[pulumi.Input[int]] = None,
fortiguard_anycast: Optional[pulumi.Input[str]] = None,
fortiguard_anycast_source: Optional[pulumi.Input[str]] = None,
interface: Optional[pulumi.Input[str]] = None,
interface_select_method: Optional[pulumi.Input[str]] = None,
load_balance_servers: Optional[pulumi.Input[int]] = None,
outbreak_prevention_cache: Optional[pulumi.Input[str]] = None,
outbreak_prevention_cache_mpercent: Optional[pulumi.Input[int]] = None,
outbreak_prevention_cache_mpermille: Optional[pulumi.Input[int]] = None,
outbreak_prevention_cache_ttl: Optional[pulumi.Input[int]] = None,
outbreak_prevention_expiration: Optional[pulumi.Input[int]] = None,
outbreak_prevention_force_off: Optional[pulumi.Input[str]] = None,
outbreak_prevention_license: Optional[pulumi.Input[int]] = None,
outbreak_prevention_timeout: Optional[pulumi.Input[int]] = None,
persistent_connection: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[str]] = None,
protocol: Optional[pulumi.Input[str]] = None,
proxy_password: Optional[pulumi.Input[str]] = None,
proxy_server_ip: Optional[pulumi.Input[str]] = None,
proxy_server_port: Optional[pulumi.Input[int]] = None,
proxy_username: Optional[pulumi.Input[str]] = None,
sandbox_inline_scan: Optional[pulumi.Input[str]] = None,
sandbox_region: Optional[pulumi.Input[str]] = None,
sdns_options: Optional[pulumi.Input[str]] = None,
sdns_server_ip: Optional[pulumi.Input[str]] = None,
sdns_server_port: Optional[pulumi.Input[int]] = None,
service_account_id: Optional[pulumi.Input[str]] = None,
source_ip: Optional[pulumi.Input[str]] = None,
source_ip6: Optional[pulumi.Input[str]] = None,
update_build_proxy: Optional[pulumi.Input[str]] = None,
update_dldb: Optional[pulumi.Input[str]] = None,
update_extdb: Optional[pulumi.Input[str]] = None,
update_ffdb: Optional[pulumi.Input[str]] = None,
update_server_location: Optional[pulumi.Input[str]] = None,
update_uwdb: Optional[pulumi.Input[str]] = None,
vdom: Optional[pulumi.Input[str]] = None,
vdomparam: Optional[pulumi.Input[str]] = None,
videofilter_expiration: Optional[pulumi.Input[int]] = None,
videofilter_license: Optional[pulumi.Input[int]] = None,
webfilter_cache: Optional[pulumi.Input[str]] = None,
webfilter_cache_ttl: Optional[pulumi.Input[int]] = None,
webfilter_expiration: Optional[pulumi.Input[int]] = None,
webfilter_force_off: Optional[pulumi.Input[str]] = None,
webfilter_license: Optional[pulumi.Input[int]] = None,
webfilter_timeout: Optional[pulumi.Input[int]] = None,
__props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SystemFortiguardArgs.__new__(SystemFortiguardArgs)
__props__.__dict__["antispam_cache"] = antispam_cache
__props__.__dict__["antispam_cache_mpercent"] = antispam_cache_mpercent
__props__.__dict__["antispam_cache_mpermille"] = antispam_cache_mpermille
__props__.__dict__["antispam_cache_ttl"] = antispam_cache_ttl
__props__.__dict__["antispam_expiration"] = antispam_expiration
__props__.__dict__["antispam_force_off"] = antispam_force_off
__props__.__dict__["antispam_license"] = antispam_license
if antispam_timeout is None and not opts.urn:
raise TypeError("Missing required property 'antispam_timeout'")
__props__.__dict__["antispam_timeout"] = antispam_timeout
__props__.__dict__["anycast_sdns_server_ip"] = anycast_sdns_server_ip
__props__.__dict__["anycast_sdns_server_port"] = anycast_sdns_server_port
__props__.__dict__["auto_firmware_upgrade"] = auto_firmware_upgrade
__props__.__dict__["auto_firmware_upgrade_day"] = auto_firmware_upgrade_day
__props__.__dict__["auto_firmware_upgrade_delay"] = auto_firmware_upgrade_delay
__props__.__dict__["auto_firmware_upgrade_end_hour"] = auto_firmware_upgrade_end_hour
__props__.__dict__["auto_firmware_upgrade_start_hour"] = auto_firmware_upgrade_start_hour
__props__.__dict__["auto_join_forticloud"] = auto_join_forticloud
__props__.__dict__["ddns_server_ip"] = ddns_server_ip
__props__.__dict__["ddns_server_ip6"] = ddns_server_ip6
__props__.__dict__["ddns_server_port"] = ddns_server_port
__props__.__dict__["fds_license_expiring_days"] = fds_license_expiring_days
__props__.__dict__["fortiguard_anycast"] = fortiguard_anycast
__props__.__dict__["fortiguard_anycast_source"] = fortiguard_anycast_source
__props__.__dict__["interface"] = interface
__props__.__dict__["interface_select_method"] = interface_select_method
__props__.__dict__["load_balance_servers"] = load_balance_servers
__props__.__dict__["outbreak_prevention_cache"] = outbreak_prevention_cache
__props__.__dict__["outbreak_prevention_cache_mpercent"] = outbreak_prevention_cache_mpercent
__props__.__dict__["outbreak_prevention_cache_mpermille"] = outbreak_prevention_cache_mpermille
__props__.__dict__["outbreak_prevention_cache_ttl"] = outbreak_prevention_cache_ttl
__props__.__dict__["outbreak_prevention_expiration"] = outbreak_prevention_expiration
__props__.__dict__["outbreak_prevention_force_off"] = outbreak_prevention_force_off
__props__.__dict__["outbreak_prevention_license"] = outbreak_prevention_license
if outbreak_prevention_timeout is None and not opts.urn:
raise TypeError("Missing required property 'outbreak_prevention_timeout'")
__props__.__dict__["outbreak_prevention_timeout"] = outbreak_prevention_timeout
__props__.__dict__["persistent_connection"] = persistent_connection
__props__.__dict__["port"] = port
__props__.__dict__["protocol"] = protocol
__props__.__dict__["proxy_password"] = None if proxy_password is None else pulumi.Output.secret(proxy_password)
__props__.__dict__["proxy_server_ip"] = proxy_server_ip
__props__.__dict__["proxy_server_port"] = proxy_server_port
__props__.__dict__["proxy_username"] = proxy_username
__props__.__dict__["sandbox_inline_scan"] = sandbox_inline_scan
__props__.__dict__["sandbox_region"] = sandbox_region
__props__.__dict__["sdns_options"] = sdns_options
__props__.__dict__["sdns_server_ip"] = sdns_server_ip
__props__.__dict__["sdns_server_port"] = sdns_server_port
__props__.__dict__["service_account_id"] = service_account_id
__props__.__dict__["source_ip"] = source_ip
__props__.__dict__["source_ip6"] = source_ip6
__props__.__dict__["update_build_proxy"] = update_build_proxy
__props__.__dict__["update_dldb"] = update_dldb
__props__.__dict__["update_extdb"] = update_extdb
__props__.__dict__["update_ffdb"] = update_ffdb
__props__.__dict__["update_server_location"] = update_server_location
__props__.__dict__["update_uwdb"] = update_uwdb
__props__.__dict__["vdom"] = vdom
__props__.__dict__["vdomparam"] = vdomparam
__props__.__dict__["videofilter_expiration"] = videofilter_expiration
__props__.__dict__["videofilter_license"] = videofilter_license
__props__.__dict__["webfilter_cache"] = webfilter_cache
__props__.__dict__["webfilter_cache_ttl"] = webfilter_cache_ttl
__props__.__dict__["webfilter_expiration"] = webfilter_expiration
__props__.__dict__["webfilter_force_off"] = webfilter_force_off
__props__.__dict__["webfilter_license"] = webfilter_license
if webfilter_timeout is None and not opts.urn:
raise TypeError("Missing required property 'webfilter_timeout'")
__props__.__dict__["webfilter_timeout"] = webfilter_timeout
secret_opts = pulumi.ResourceOptions(additional_secret_outputs=["proxyPassword"])
opts = pulumi.ResourceOptions.merge(opts, secret_opts)
super(SystemFortiguard, __self__).__init__(
'fortios:index/systemFortiguard:SystemFortiguard',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
antispam_cache: Optional[pulumi.Input[str]] = None,
antispam_cache_mpercent: Optional[pulumi.Input[int]] = None,
antispam_cache_mpermille: Optional[pulumi.Input[int]] = None,
antispam_cache_ttl: Optional[pulumi.Input[int]] = None,
antispam_expiration: Optional[pulumi.Input[int]] = None,
antispam_force_off: Optional[pulumi.Input[str]] = None,
antispam_license: Optional[pulumi.Input[int]] = None,
antispam_timeout: Optional[pulumi.Input[int]] = None,
anycast_sdns_server_ip: Optional[pulumi.Input[str]] = None,
anycast_sdns_server_port: Optional[pulumi.Input[int]] = None,
auto_firmware_upgrade: Optional[pulumi.Input[str]] = None,
auto_firmware_upgrade_day: Optional[pulumi.Input[str]] = None,
auto_firmware_upgrade_delay: Optional[pulumi.Input[int]] = None,
auto_firmware_upgrade_end_hour: Optional[pulumi.Input[int]] = None,
auto_firmware_upgrade_start_hour: Optional[pulumi.Input[int]] = None,
auto_join_forticloud: Optional[pulumi.Input[str]] = None,
ddns_server_ip: Optional[pulumi.Input[str]] = None,
ddns_server_ip6: Optional[pulumi.Input[str]] = None,
ddns_server_port: Optional[pulumi.Input[int]] = None,
fds_license_expiring_days: Optional[pulumi.Input[int]] = None,
fortiguard_anycast: Optional[pulumi.Input[str]] = None,
fortiguard_anycast_source: Optional[pulumi.Input[str]] = None,
interface: Optional[pulumi.Input[str]] = None,
interface_select_method: Optional[pulumi.Input[str]] = None,
load_balance_servers: Optional[pulumi.Input[int]] = None,
outbreak_prevention_cache: Optional[pulumi.Input[str]] = None,
outbreak_prevention_cache_mpercent: Optional[pulumi.Input[int]] = None,
outbreak_prevention_cache_mpermille: Optional[pulumi.Input[int]] = None,
outbreak_prevention_cache_ttl: Optional[pulumi.Input[int]] = None,
outbreak_prevention_expiration: Optional[pulumi.Input[int]] = None,
outbreak_prevention_force_off: Optional[pulumi.Input[str]] = None,
outbreak_prevention_license: Optional[pulumi.Input[int]] = None,
outbreak_prevention_timeout: Optional[pulumi.Input[int]] = None,
persistent_connection: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[str]] = None,
protocol: Optional[pulumi.Input[str]] = None,
proxy_password: Optional[pulumi.Input[str]] = None,
proxy_server_ip: Optional[pulumi.Input[str]] = None,
proxy_server_port: Optional[pulumi.Input[int]] = None,
proxy_username: Optional[pulumi.Input[str]] = None,
sandbox_inline_scan: Optional[pulumi.Input[str]] = None,
sandbox_region: Optional[pulumi.Input[str]] = None,
sdns_options: Optional[pulumi.Input[str]] = None,
sdns_server_ip: Optional[pulumi.Input[str]] = None,
sdns_server_port: Optional[pulumi.Input[int]] = None,
service_account_id: Optional[pulumi.Input[str]] = None,
source_ip: Optional[pulumi.Input[str]] = None,
source_ip6: Optional[pulumi.Input[str]] = None,
update_build_proxy: Optional[pulumi.Input[str]] = None,
update_dldb: Optional[pulumi.Input[str]] = None,
update_extdb: Optional[pulumi.Input[str]] = None,
update_ffdb: Optional[pulumi.Input[str]] = None,
update_server_location: Optional[pulumi.Input[str]] = None,
update_uwdb: Optional[pulumi.Input[str]] = None,
vdom: Optional[pulumi.Input[str]] = None,
vdomparam: Optional[pulumi.Input[str]] = None,
videofilter_expiration: Optional[pulumi.Input[int]] = None,
videofilter_license: Optional[pulumi.Input[int]] = None,
webfilter_cache: Optional[pulumi.Input[str]] = None,
webfilter_cache_ttl: Optional[pulumi.Input[int]] = None,
webfilter_expiration: Optional[pulumi.Input[int]] = None,
webfilter_force_off: Optional[pulumi.Input[str]] = None,
webfilter_license: Optional[pulumi.Input[int]] = None,
webfilter_timeout: Optional[pulumi.Input[int]] = None) -> 'SystemFortiguard':
"""
Get an existing SystemFortiguard resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _SystemFortiguardState.__new__(_SystemFortiguardState)
__props__.__dict__["antispam_cache"] = antispam_cache
__props__.__dict__["antispam_cache_mpercent"] = antispam_cache_mpercent
__props__.__dict__["antispam_cache_mpermille"] = antispam_cache_mpermille
__props__.__dict__["antispam_cache_ttl"] = antispam_cache_ttl
__props__.__dict__["antispam_expiration"] = antispam_expiration
__props__.__dict__["antispam_force_off"] = antispam_force_off
__props__.__dict__["antispam_license"] = antispam_license
__props__.__dict__["antispam_timeout"] = antispam_timeout
__props__.__dict__["anycast_sdns_server_ip"] = anycast_sdns_server_ip
__props__.__dict__["anycast_sdns_server_port"] = anycast_sdns_server_port
__props__.__dict__["auto_firmware_upgrade"] = auto_firmware_upgrade
__props__.__dict__["auto_firmware_upgrade_day"] = auto_firmware_upgrade_day
__props__.__dict__["auto_firmware_upgrade_delay"] = auto_firmware_upgrade_delay
__props__.__dict__["auto_firmware_upgrade_end_hour"] = auto_firmware_upgrade_end_hour
__props__.__dict__["auto_firmware_upgrade_start_hour"] = auto_firmware_upgrade_start_hour
__props__.__dict__["auto_join_forticloud"] = auto_join_forticloud
__props__.__dict__["ddns_server_ip"] = ddns_server_ip
__props__.__dict__["ddns_server_ip6"] = ddns_server_ip6
__props__.__dict__["ddns_server_port"] = ddns_server_port
__props__.__dict__["fds_license_expiring_days"] = fds_license_expiring_days
__props__.__dict__["fortiguard_anycast"] = fortiguard_anycast
__props__.__dict__["fortiguard_anycast_source"] = fortiguard_anycast_source
__props__.__dict__["interface"] = interface
__props__.__dict__["interface_select_method"] = interface_select_method
__props__.__dict__["load_balance_servers"] = load_balance_servers
__props__.__dict__["outbreak_prevention_cache"] = outbreak_prevention_cache
__props__.__dict__["outbreak_prevention_cache_mpercent"] = outbreak_prevention_cache_mpercent
__props__.__dict__["outbreak_prevention_cache_mpermille"] = outbreak_prevention_cache_mpermille
__props__.__dict__["outbreak_prevention_cache_ttl"] = outbreak_prevention_cache_ttl
__props__.__dict__["outbreak_prevention_expiration"] = outbreak_prevention_expiration
__props__.__dict__["outbreak_prevention_force_off"] = outbreak_prevention_force_off
__props__.__dict__["outbreak_prevention_license"] = outbreak_prevention_license
__props__.__dict__["outbreak_prevention_timeout"] = outbreak_prevention_timeout
__props__.__dict__["persistent_connection"] = persistent_connection
__props__.__dict__["port"] = port
__props__.__dict__["protocol"] = protocol
__props__.__dict__["proxy_password"] = proxy_password
__props__.__dict__["proxy_server_ip"] = proxy_server_ip
__props__.__dict__["proxy_server_port"] = proxy_server_port
__props__.__dict__["proxy_username"] = proxy_username
__props__.__dict__["sandbox_inline_scan"] = sandbox_inline_scan
__props__.__dict__["sandbox_region"] = sandbox_region
__props__.__dict__["sdns_options"] = sdns_options
__props__.__dict__["sdns_server_ip"] = sdns_server_ip
__props__.__dict__["sdns_server_port"] = sdns_server_port
__props__.__dict__["service_account_id"] = service_account_id
__props__.__dict__["source_ip"] = source_ip
__props__.__dict__["source_ip6"] = source_ip6
__props__.__dict__["update_build_proxy"] = update_build_proxy
__props__.__dict__["update_dldb"] = update_dldb
__props__.__dict__["update_extdb"] = update_extdb
__props__.__dict__["update_ffdb"] = update_ffdb
__props__.__dict__["update_server_location"] = update_server_location
__props__.__dict__["update_uwdb"] = update_uwdb
__props__.__dict__["vdom"] = vdom
__props__.__dict__["vdomparam"] = vdomparam
__props__.__dict__["videofilter_expiration"] = videofilter_expiration
__props__.__dict__["videofilter_license"] = videofilter_license
__props__.__dict__["webfilter_cache"] = webfilter_cache
__props__.__dict__["webfilter_cache_ttl"] = webfilter_cache_ttl
__props__.__dict__["webfilter_expiration"] = webfilter_expiration
__props__.__dict__["webfilter_force_off"] = webfilter_force_off
__props__.__dict__["webfilter_license"] = webfilter_license
__props__.__dict__["webfilter_timeout"] = webfilter_timeout
return SystemFortiguard(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="antispamCache")
def antispam_cache(self) -> pulumi.Output[str]:
return pulumi.get(self, "antispam_cache")
@property
@pulumi.getter(name="antispamCacheMpercent")
def antispam_cache_mpercent(self) -> pulumi.Output[int]:
return pulumi.get(self, "antispam_cache_mpercent")
@property
@pulumi.getter(name="antispamCacheMpermille")
def antispam_cache_mpermille(self) -> pulumi.Output[int]:
return pulumi.get(self, "antispam_cache_mpermille")
@property
@pulumi.getter(name="antispamCacheTtl")
def antispam_cache_ttl(self) -> pulumi.Output[int]:
return pulumi.get(self, "antispam_cache_ttl")
@property
@pulumi.getter(name="antispamExpiration")
def antispam_expiration(self) -> pulumi.Output[int]:
return pulumi.get(self, "antispam_expiration")
@property
@pulumi.getter(name="antispamForceOff")
def antispam_force_off(self) -> pulumi.Output[str]:
return pulumi.get(self, "antispam_force_off")
@property
@pulumi.getter(name="antispamLicense")
def antispam_license(self) -> pulumi.Output[int]:
return pulumi.get(self, "antispam_license")
@property
@pulumi.getter(name="antispamTimeout")
def antispam_timeout(self) -> pulumi.Output[int]:
return pulumi.get(self, "antispam_timeout")
@property
@pulumi.getter(name="anycastSdnsServerIp")
def anycast_sdns_server_ip(self) -> pulumi.Output[str]:
return pulumi.get(self, "anycast_sdns_server_ip")
@property
@pulumi.getter(name="anycastSdnsServerPort")
def anycast_sdns_server_port(self) -> pulumi.Output[int]:
return pulumi.get(self, "anycast_sdns_server_port")
@property
@pulumi.getter(name="autoFirmwareUpgrade")
def auto_firmware_upgrade(self) -> pulumi.Output[str]:
return pulumi.get(self, "auto_firmware_upgrade")
@property
@pulumi.getter(name="autoFirmwareUpgradeDay")
def auto_firmware_upgrade_day(self) -> pulumi.Output[str]:
return pulumi.get(self, "auto_firmware_upgrade_day")
@property
@pulumi.getter(name="autoFirmwareUpgradeDelay")
def auto_firmware_upgrade_delay(self) -> pulumi.Output[int]:
return pulumi.get(self, "auto_firmware_upgrade_delay")
@property
@pulumi.getter(name="autoFirmwareUpgradeEndHour")
def auto_firmware_upgrade_end_hour(self) -> pulumi.Output[int]:
return pulumi.get(self, "auto_firmware_upgrade_end_hour")
@property
@pulumi.getter(name="autoFirmwareUpgradeStartHour")
def auto_firmware_upgrade_start_hour(self) -> pulumi.Output[int]:
return pulumi.get(self, "auto_firmware_upgrade_start_hour")
@property
@pulumi.getter(name="autoJoinForticloud")
def auto_join_forticloud(self) -> pulumi.Output[str]:
return pulumi.get(self, "auto_join_forticloud")
@property
@pulumi.getter(name="ddnsServerIp")
def ddns_server_ip(self) -> pulumi.Output[str]:
return pulumi.get(self, "ddns_server_ip")
@property
@pulumi.getter(name="ddnsServerIp6")
def ddns_server_ip6(self) -> pulumi.Output[str]:
return pulumi.get(self, "ddns_server_ip6")
@property
@pulumi.getter(name="ddnsServerPort")
def ddns_server_port(self) -> pulumi.Output[int]:
return pulumi.get(self, "ddns_server_port")
@property
@pulumi.getter(name="fdsLicenseExpiringDays")
def fds_license_expiring_days(self) -> pulumi.Output[int]:
return pulumi.get(self, "fds_license_expiring_days")
@property
@pulumi.getter(name="fortiguardAnycast")
def fortiguard_anycast(self) -> pulumi.Output[str]:
return pulumi.get(self, "fortiguard_anycast")
@property
@pulumi.getter(name="fortiguardAnycastSource")
def fortiguard_anycast_source(self) -> pulumi.Output[str]:
return pulumi.get(self, "fortiguard_anycast_source")
@property
@pulumi.getter
def interface(self) -> pulumi.Output[str]:
return pulumi.get(self, "interface")
@property
@pulumi.getter(name="interfaceSelectMethod")
def interface_select_method(self) -> pulumi.Output[str]:
return pulumi.get(self, "interface_select_method")
@property
@pulumi.getter(name="loadBalanceServers")
def load_balance_servers(self) -> pulumi.Output[int]:
return pulumi.get(self, "load_balance_servers")
@property
@pulumi.getter(name="outbreakPreventionCache")
def outbreak_prevention_cache(self) -> pulumi.Output[str]:
return pulumi.get(self, "outbreak_prevention_cache")
@property
@pulumi.getter(name="outbreakPreventionCacheMpercent")
def outbreak_prevention_cache_mpercent(self) -> pulumi.Output[int]:
return pulumi.get(self, "outbreak_prevention_cache_mpercent")
@property
@pulumi.getter(name="outbreakPreventionCacheMpermille")
def outbreak_prevention_cache_mpermille(self) -> pulumi.Output[int]:
return pulumi.get(self, "outbreak_prevention_cache_mpermille")
@property
@pulumi.getter(name="outbreakPreventionCacheTtl")
def outbreak_prevention_cache_ttl(self) -> pulumi.Output[int]:
return pulumi.get(self, "outbreak_prevention_cache_ttl")
@property
@pulumi.getter(name="outbreakPreventionExpiration")
def outbreak_prevention_expiration(self) -> pulumi.Output[int]:
return pulumi.get(self, "outbreak_prevention_expiration")
@property
@pulumi.getter(name="outbreakPreventionForceOff")
def outbreak_prevention_force_off(self) -> pulumi.Output[str]:
return pulumi.get(self, "outbreak_prevention_force_off")
@property
@pulumi.getter(name="outbreakPreventionLicense")
def outbreak_prevention_license(self) -> pulumi.Output[int]:
return pulumi.get(self, "outbreak_prevention_license")
@property
@pulumi.getter(name="outbreakPreventionTimeout")
def outbreak_prevention_timeout(self) -> pulumi.Output[int]:
return pulumi.get(self, "outbreak_prevention_timeout")
@property
@pulumi.getter(name="persistentConnection")
def persistent_connection(self) -> pulumi.Output[str]:
return pulumi.get(self, "persistent_connection")
@property
@pulumi.getter
def port(self) -> pulumi.Output[str]:
return pulumi.get(self, "port")
@property
@pulumi.getter
def protocol(self) -> pulumi.Output[str]:
return pulumi.get(self, "protocol")
@property
@pulumi.getter(name="proxyPassword")
def proxy_password(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "proxy_password")
@property
@pulumi.getter(name="proxyServerIp")
def proxy_server_ip(self) -> pulumi.Output[str]:
return pulumi.get(self, "proxy_server_ip")
@property
@pulumi.getter(name="proxyServerPort")
def proxy_server_port(self) -> pulumi.Output[int]:
return pulumi.get(self, "proxy_server_port")
@property
@pulumi.getter(name="proxyUsername")
def proxy_username(self) -> pulumi.Output[str]:
return pulumi.get(self, "proxy_username")
@property
@pulumi.getter(name="sandboxInlineScan")
def sandbox_inline_scan(self) -> pulumi.Output[str]:
return pulumi.get(self, "sandbox_inline_scan")
@property
@pulumi.getter(name="sandboxRegion")
def sandbox_region(self) -> pulumi.Output[str]:
return pulumi.get(self, "sandbox_region")
@property
@pulumi.getter(name="sdnsOptions")
def sdns_options(self) -> pulumi.Output[str]:
return pulumi.get(self, "sdns_options")
@property
@pulumi.getter(name="sdnsServerIp")
def sdns_server_ip(self) -> pulumi.Output[str]:
return pulumi.get(self, "sdns_server_ip")
@property
@pulumi.getter(name="sdnsServerPort")
def sdns_server_port(self) -> pulumi.Output[int]:
return pulumi.get(self, "sdns_server_port")
@property
@pulumi.getter(name="serviceAccountId")
def service_account_id(self) -> pulumi.Output[str]:
return pulumi.get(self, "service_account_id")
@property
@pulumi.getter(name="sourceIp")
def source_ip(self) -> pulumi.Output[str]:
return pulumi.get(self, "source_ip")
@property
@pulumi.getter(name="sourceIp6")
def source_ip6(self) -> pulumi.Output[str]:
return pulumi.get(self, "source_ip6")
@property
@pulumi.getter(name="updateBuildProxy")
def update_build_proxy(self) -> pulumi.Output[str]:
return pulumi.get(self, "update_build_proxy")
@property
@pulumi.getter(name="updateDldb")
def update_dldb(self) -> pulumi.Output[str]:
return pulumi.get(self, "update_dldb")
@property
@pulumi.getter(name="updateExtdb")
def update_extdb(self) -> pulumi.Output[str]:
return pulumi.get(self, "update_extdb")
@property
@pulumi.getter(name="updateFfdb")
def update_ffdb(self) -> pulumi.Output[str]:
return pulumi.get(self, "update_ffdb")
@property
@pulumi.getter(name="updateServerLocation")
def update_server_location(self) -> pulumi.Output[str]:
return pulumi.get(self, "update_server_location")
@property
@pulumi.getter(name="updateUwdb")
def update_uwdb(self) -> pulumi.Output[str]:
return pulumi.get(self, "update_uwdb")
@property
@pulumi.getter
def vdom(self) -> pulumi.Output[str]:
return pulumi.get(self, "vdom")
@property
@pulumi.getter
def vdomparam(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "vdomparam")
@property
@pulumi.getter(name="videofilterExpiration")
def videofilter_expiration(self) -> pulumi.Output[int]:
return pulumi.get(self, "videofilter_expiration")
@property
@pulumi.getter(name="videofilterLicense")
def videofilter_license(self) -> pulumi.Output[int]:
return pulumi.get(self, "videofilter_license")
@property
@pulumi.getter(name="webfilterCache")
def webfilter_cache(self) -> pulumi.Output[str]:
return pulumi.get(self, "webfilter_cache")
@property
@pulumi.getter(name="webfilterCacheTtl")
def webfilter_cache_ttl(self) -> pulumi.Output[int]:
return pulumi.get(self, "webfilter_cache_ttl")
@property
@pulumi.getter(name="webfilterExpiration")
def webfilter_expiration(self) -> pulumi.Output[int]:
return pulumi.get(self, "webfilter_expiration")
@property
@pulumi.getter(name="webfilterForceOff")
def webfilter_force_off(self) -> pulumi.Output[str]:
return pulumi.get(self, "webfilter_force_off")
@property
@pulumi.getter(name="webfilterLicense")
def webfilter_license(self) -> pulumi.Output[int]:
return pulumi.get(self, "webfilter_license")
@property
@pulumi.getter(name="webfilterTimeout")
def webfilter_timeout(self) -> pulumi.Output[int]:
return pulumi.get(self, "webfilter_timeout")
|
PypiClean
|
/pulumi_consul-3.10.0a1692854781.tar.gz/pulumi_consul-3.10.0a1692854781/pulumi_consul/get_network_area_members.py
|
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
__all__ = [
'GetNetworkAreaMembersResult',
'AwaitableGetNetworkAreaMembersResult',
'get_network_area_members',
'get_network_area_members_output',
]
@pulumi.output_type
class GetNetworkAreaMembersResult:
"""
A collection of values returned by getNetworkAreaMembers.
"""
def __init__(__self__, datacenter=None, id=None, members=None, token=None, uuid=None):
if datacenter and not isinstance(datacenter, str):
raise TypeError("Expected argument 'datacenter' to be a str")
pulumi.set(__self__, "datacenter", datacenter)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if members and not isinstance(members, list):
raise TypeError("Expected argument 'members' to be a list")
pulumi.set(__self__, "members", members)
if token and not isinstance(token, str):
raise TypeError("Expected argument 'token' to be a str")
pulumi.set(__self__, "token", token)
if uuid and not isinstance(uuid, str):
raise TypeError("Expected argument 'uuid' to be a str")
pulumi.set(__self__, "uuid", uuid)
@property
@pulumi.getter
def datacenter(self) -> str:
"""
The node's Consul datacenter.
"""
return pulumi.get(self, "datacenter")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def members(self) -> Sequence['outputs.GetNetworkAreaMembersMemberResult']:
"""
The list of Consul servers in this network area
"""
return pulumi.get(self, "members")
@property
@pulumi.getter
def token(self) -> Optional[str]:
warnings.warn("""The token argument has been deprecated and will be removed in a future release.
Please use the token argument in the provider configuration""", DeprecationWarning)
pulumi.log.warn("""token is deprecated: The token argument has been deprecated and will be removed in a future release.
Please use the token argument in the provider configuration""")
return pulumi.get(self, "token")
@property
@pulumi.getter
def uuid(self) -> str:
"""
The UUID of the Network Area being queried.
"""
return pulumi.get(self, "uuid")
class AwaitableGetNetworkAreaMembersResult(GetNetworkAreaMembersResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetNetworkAreaMembersResult(
datacenter=self.datacenter,
id=self.id,
members=self.members,
token=self.token,
uuid=self.uuid)
def get_network_area_members(datacenter: Optional[str] = None,
token: Optional[str] = None,
uuid: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetNetworkAreaMembersResult:
"""
> **NOTE:** This feature requires [Consul Enterprise](https://www.consul.io/docs/enterprise/index.html).
The `get_network_area_members` data source provides a list of the Consul
servers present in a specific network area.
## Example Usage
```python
import pulumi
import pulumi_consul as consul
dc2_network_area = consul.NetworkArea("dc2NetworkArea",
peer_datacenter="dc2",
retry_joins=["1.2.3.4"],
use_tls=True)
dc2_network_area_members = consul.get_network_area_members_output(uuid=dc2_network_area.id)
pulumi.export("members", dc2_network_area_members.members)
```
:param str datacenter: The datacenter to use. This overrides the
agent's default datacenter and the datacenter in the provider setup.
:param str token: The ACL token to use. This overrides the
token that the agent provides by default.
:param str uuid: The UUID of the area to list.
"""
__args__ = dict()
__args__['datacenter'] = datacenter
__args__['token'] = token
__args__['uuid'] = uuid
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('consul:index/getNetworkAreaMembers:getNetworkAreaMembers', __args__, opts=opts, typ=GetNetworkAreaMembersResult).value
return AwaitableGetNetworkAreaMembersResult(
datacenter=pulumi.get(__ret__, 'datacenter'),
id=pulumi.get(__ret__, 'id'),
members=pulumi.get(__ret__, 'members'),
token=pulumi.get(__ret__, 'token'),
uuid=pulumi.get(__ret__, 'uuid'))
@_utilities.lift_output_func(get_network_area_members)
def get_network_area_members_output(datacenter: Optional[pulumi.Input[Optional[str]]] = None,
token: Optional[pulumi.Input[Optional[str]]] = None,
uuid: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetNetworkAreaMembersResult]:
"""
> **NOTE:** This feature requires [Consul Enterprise](https://www.consul.io/docs/enterprise/index.html).
The `get_network_area_members` data source provides a list of the Consul
servers present in a specific network area.
## Example Usage
```python
import pulumi
import pulumi_consul as consul
dc2_network_area = consul.NetworkArea("dc2NetworkArea",
peer_datacenter="dc2",
retry_joins=["1.2.3.4"],
use_tls=True)
dc2_network_area_members = consul.get_network_area_members_output(uuid=dc2_network_area.id)
pulumi.export("members", dc2_network_area_members.members)
```
:param str datacenter: The datacenter to use. This overrides the
agent's default datacenter and the datacenter in the provider setup.
:param str token: The ACL token to use. This overrides the
token that the agent provides by default.
:param str uuid: The UUID of the area to list.
"""
...
|
PypiClean
|
/compas_fab-0.28.0.tar.gz/compas_fab-0.28.0/src/compas_fab/sensors/baumer.py
|
import time
from compas_fab.sensors import SerialSensor
from compas_fab.sensors.exceptions import ProtocolError
from compas_fab.sensors.exceptions import SensorTimeoutError
__all__ = ["PosCon3D", "PosConCM"]
class PosCon3D(SerialSensor):
"""Provides an interface for the Baumer PosCon3D edge measurement sensor.
The sensor has different interfaces to retrieve its data. This
class provides access to the serial interface (RS-485). This
class is a context manager type, so it's best used in combination
with the ``with`` statement to ensure resource deallocation. The
protocol of the sensor when operated via RS-485 indicates that
access to it must be locked programmatically before starting operations
and unlocked on completion. This is handled automatically if you use
this class on a ``with`` statement, otherwise, the methods ``begin()`` and
``end()`` must be invoked by hand.
Attributes
----------
serial : :obj:`serial.Serial`
Instance of the serial port used to communicate with the sensor.
address : :obj:`int`
PosCon3D sensors have an address assigned, which defaults to 1.
There's also a broadcast address (``PosCon3D.BROADCAST_ADDRESS``)
that can be used to query the address of the sensor connected to
the RS-485 bus. Only one sensor can be in the bus when using the
broadcast address to query for sensor's address.
Examples
--------
>>> from serial import Serial # doctest: +SKIP
>>> with Serial('COM5', 57600, timeout=1) as serial: # doctest: +SKIP
>>> with PosCon3D(serial, PosCon3D.BROADCAST_ADDRESS) as broadcast_query: # doctest: +SKIP
... addr = broadcast_query.get_address() # doctest: +SKIP
... # doctest: +SKIP
... with PosCon3D(serial, addr) as sensor: # doctest: +SKIP
... sensor.set_measurement_type('Edge L rise') # doctest: +SKIP
... sensor.set_precision(2) # doctest: +SKIP
... data = sensor.get_measurement() # doctest: +SKIP
"""
FRAME_HEAD = "{%s,%s,%s"
FRAME_TAIL = "%s%s}"
BROADCAST_ADDRESS = 0
MEASUREMENT_TYPES = (
"Edge L rise",
"Edge L fall",
"Edge R rise",
"Edge R fall",
"Width",
"Center width",
"Gap",
"Center gap",
)
QUALITY = {0: "Valid", 1: "Low signal", 2: "No edge", 3: "Low signal, no edge", 4: "No signal"}
ERROR_CODES = {
"000": "No error",
"001": "False checksum",
"002": "False command",
"003": "False frame",
"004": "False value or parameter",
"005": "Missed command 000 to begin RS-485 control",
"006": "Out of range",
"007": "Buffer overflow",
"010": "All outputs Off",
"020": "Display Off",
"99": "Argument out of Range",
"100": "Distance out of Range (see FSP)",
"101": "Angle out of Range (see FSP)",
"102": "Flatness out of Range (see FSP)",
"103": "Length out of Range (see FSP)",
"200": "Fatal Error (Reset sensor, Power Off / On)",
}
def __init__(self, serial, address):
super(PosCon3D, self).__init__(serial)
self.address = address
def __enter__(self):
self.begin()
return self
def __exit__(self, *args):
self.end()
def begin(self):
"""Locks the sensor to start RS-485 communication.
Note
----
This method only needs to be called if not using
a ``with`` statement to handle lifetime of the `PosCon3D` instance.
"""
return self.send_command(self.address, "000", "1")
def end(self):
"""Unlocks the sensor from RS-485 communication.
Note
----
This method only needs to be called if not using
a ``with`` statement to handle lifetime of the `PosCon3D` instance.
"""
return self.send_command(self.address, "000", "0")
def send_command(self, address, command, data=None):
"""Sends a command to the sensor's address specified. The command
can optionally contain a data string.
This method is mostly for internal use, as the higher-level API is
exposed via dedicated methods.
Parameters
----------
address : :obj:`int`
PosCon3D sensors have an address assigned, which defaults to 1.
There's also a broadcast address (``PosCon3D.BROADCAST_ADDRESS``)
that can be used to query the address of the sensor connected to
the RS-485 bus. Only one sensor can be in the bus when using the
broadcast address to query for sensor's address.
command : :obj:`string`
A string indicating the command number to be executed.
data : :obj:`string`
An optional string of data that is sent together with
the command.
Returns
-------
`list` or value
Result of the command. It can be a list or a single value depending on the operation.
"""
cmd = self.format_command(address, command, data)
# Python 2 vs 3
if hasattr(cmd, "decode"):
self.serial.write(cmd)
result = self.serial.readline()
else:
self.serial.write(cmd.encode("ascii"))
result = self.serial.readline().decode("ascii")
if result:
frame_head = result[:-4]
checksum = result[-4:-1]
expected = self.calculate_checksum(frame_head)
if expected != checksum:
raise ProtocolError("Invalid response, checksum mismatch. Expected=%s, Got=%s" % (expected, checksum))
expected_frame_head = self.FRAME_HEAD % (address, command, "")
if not result.startswith(expected_frame_head):
raise ProtocolError(
'Invalid response, command/address mismatch. Expected to start with="%s", Got="%s"'
% (expected_frame_head, result)
)
return self.get_payload(result)
return None
def format_command(self, address, command, data=None):
"""Formats the command."""
data = data + "," if data else ""
frame = self.FRAME_HEAD % (address, command, data)
return self.FRAME_TAIL % (frame, self.calculate_checksum(frame))
def calculate_checksum(self, command):
"""Checks that message is complete."""
code_points = [ord(c) for c in list(command)]
checksum = 0
for i in code_points:
checksum = checksum ^ i
return str(checksum).zfill(3)
def get_payload(self, result):
"""Gets payload."""
data = result.split(",")[2:-1]
if not data:
return None
elif len(data) == 1:
return data[0]
else:
if data[0] == "E":
raise ProtocolError(self.ERROR_CODES[str(data[1])])
return data
def get_address(self):
"""Gets the address of the RS-485 sensors currently connected to the bus. This command
is only really useful when this class is initialized with the broadcast address,
with the purpose of retrieving the address of a sensor connected.
Returns
-------
`int`
Address of the PosCon3D sensor connected to the RS-485 bus.
Note
----
Only one PosCon3D sensor can be connected to the bus for this operation to succeed.
"""
return int(self.send_command(self.address, "013"))
def set_measurement_type(self, measurement_type):
"""Defines the measurement type to use.
=================== =========== ======
Measurement type Function Value
=================== =========== ======
"Edge L rise" Edge 0
"Edge L fall" Edge 1
"Edge R rise" Edge 2
"Edge R fall" Edge 3
"Width" Width 4
"Center width" Width 5
"Gap" Gap 6
"Center gap" Gap 7
=================== =========== ======
Parameters
----------
measurement_type : :obj:`string`
Measurement type.
"""
if measurement_type not in self.MEASUREMENT_TYPES:
raise ProtocolError("Unsupported measure type, must be one of " + str(self.MEASUREMENT_TYPES))
return self.send_command(self.address, "020", str(self.MEASUREMENT_TYPES.index(measurement_type)))
def set_precision(self, precision):
"""Defines the precision the sensor will use to determine edges:
======== ============ ===============
Value Precision Function values
======== ============ ===============
``0`` Standard Median=off, Moving Average=off
``1`` High Median=7, Moving Average=16
``2`` Very High Median=15, Moving Average=128
======== ============ ===============
Parameters
----------
precision : :obj:`int`
Sensor precision to use.
Note
----
The higher the precision, the slower the measurement gets.
"""
if precision < 0 or precision > 2:
raise ProtocolError("Precision must be 0 (standard), 1 (high) or 2 (very high)")
return self.send_command(self.address, "040", str(precision))
def set_edge_height(self, height):
"""Defines the minimum height of an edge to be detected.
Parameters
----------
height : :obj:`float`
Minimum edge height.
"""
return self.send_command(self.address, "042", str(height))
def get_measurement(self):
"""Retrieves the current measurement of the sensor according to the current settings.
Returns
-------
`tuple`
The current measurement and additionally a value indicating the quality of the measured value.
"""
result = self.send_command(self.address, "031")
if len(result) != 2:
raise ProtocolError("Unexpected result: " + str(result))
value = result[0]
quality = int(result[1])
# If Valid
if quality == 0:
value = float(value)
return (value, self.QUALITY[quality])
def get_live_monitor_data(self):
"""Retrieves the distance to the surface in the center of the laser beam and the
angle at which it's found.
Returns
-------
`list`
angle and distance to the reference surface.
Note
----
This function is designed to aid in the installation of the sensor at an angle.
"""
result = self.send_command(self.address, "093")
if len(result) != 2:
raise ProtocolError("Unexpected result: " + str(result))
return map(float, result)
def reset(self):
"""Resets the sensor to factory settings."""
self.send_command(self.address, "003")
def activate_flex_mount(self, reference_thickness):
"""Activates the FLEX Mount feature of the sensor to allow positioning it on an
angled installation. The reference thickness is only required if the surface is
uneven and an additional leveling auxiliary plate as been added."""
result = self.send_command(self.address, "062", str(reference_thickness))
return map(float, result)
def deactivate_flex_mount(self):
"""Deactivates the FLEX Mount feature."""
self.send_command(self.address, "063")
def set_flex_mount(self, angle, distance):
"""Sets the FLEX Mount feature to a specific angle and distance."""
result = self.send_command(self.address, "060", "%.2f,%.2f" % (angle, distance))
return map(float, result)
def adjust_to_dark_object(self, is_dark_object):
"""Adjusts the sensor to detect darker or lighter surfaces."""
data = "1" if is_dark_object else "0"
return self.send_command(self.address, "044", data)
class PosConCM(SerialSensor):
"""Provides an interface for the Baumer PosConCM round objects measurement sensor.
The sensor has different interfaces to retrieve its data. This
class provides access to the serial interface (RS-485).
This class is a context manager type, so it's best used in combination
with the ``with`` statement to ensure resource deallocation.
The protocol of the sensor when operated via RS-485 indicates that
access to it must be locked programmatically before starting operations
and unlocked on completion. This is handled automatically if you use
this class on a ``with`` statement, otherwise, the methods ``begin()`` and
``end()`` must be invoked by hand.
Attributes
----------
serial : :obj:`serial.Serial`
Instance of the serial port used to communicate with the sensor.
address : :obj:`int`
PosConCM sensors have an address assigned, which defaults to 1.
There's also a broadcast address (``PosConCM.BROADCAST_ADDRESS``)
that can be used to query the address of the sensor connected to
the RS-485 bus. Only one sensor can be in the bus when using the
broadcast address to query for sensor's address.
Examples
--------
>>> from serial import Serial # doctest: +SKIP
>>> with Serial('COM5', 57600, parity=PARITY_EVEN, timeout=1) as serial: # doctest: +SKIP
>>> with PosConCM(serial, PosConCM.BROADCAST_ADDRESS) as broadcast_query: # doctest: +SKIP
... addr = broadcast_query.get_address() # doctest: +SKIP
... # doctest: +SKIP
... with PosConCM(serial, addr) as sensor: # doctest: +SKIP
... sensor.set_measurement_type('X-Center') # doctest: +SKIP
... sensor.set_precision(2) # doctest: +SKIP
... data = sensor.get_measurement() # doctest: +SKIP
"""
FRAME_HEAD = ":%s%s;%s;"
FRAME_TAIL = "%s%s\r\n"
BROADCAST_ADDRESS = 0
MEASUREMENT_TYPES = {"Diameter": 28, "X_center": 29, "Z_center": 30, "X_left": 31, "X_right": 32, "Z_top": 33}
QUALITY = {0: "Valid", 1: "Low signal", 2: "No edge", 3: "Low signal, no edge", 4: "No signal"}
ERROR_CODES = {
"1": "Wrong message type",
"2": "Wrong payload format",
"3": "Wrong argument",
"4": "Wrong argument count",
"5": "Not enough data",
"6": "Index do not exist",
"7": "Index locked",
"8": "Access not allowed",
"9": "Not enough memory for encoding",
"10": "Not possible to encode argument",
"11": "Application specific error",
"12": "Wrong state",
}
def __init__(self, serial, address):
super(PosConCM, self).__init__(serial)
self.address = address
def __enter__(self):
self.begin()
return self
def __exit__(self, *args):
self.end()
def begin(self):
"""Locks the sensor to start RS-485 communication.
Note
----
This method only needs to be called if not using
a ``with`` statement to handle lifetime of the `PosConCM` instance.
"""
return self.send_command(self.address, "W010", "0")
def end(self):
"""Unlocks the sensor from RS-485 communication.
Note
----
This method only needs to be called if not using
a ``with`` statement to handle lifetime of the `PosConCM` instance.
"""
return self.send_command(self.address, "W010", "1")
def format_command(self, address, command, data=None):
"""Formats the command."""
data = data or ""
frame = self.FRAME_HEAD % (str(address).zfill(2), command, data)
return self.FRAME_TAIL % (frame, self.calculate_checksum(frame))
def calculate_checksum(self, command):
"""Checks that message is complete."""
return "****"
def get_payload(self, result):
"""Gets payload."""
frame_head = result[:-6]
result_type = frame_head[3]
if result_type == "a":
raise SensorTimeoutError("Sensor has not completed reading")
if result_type == "E":
error_index = frame_head.split(";")
raise ProtocolError(
"Application error, Result=%s" % frame_head
+ "Error type: "
+ str(self.ERROR_CODES[str(error_index[1])])
)
if result_type == "B":
raise ProtocolError("Sensor is busy, Result=%s" % frame_head)
return result[5:-6].split(";")
def send_command(self, address, command, data=None):
"""Sends a command to the sensor's address specified. The command
can optionally contain a data string.
This method is mostly for internal use, as the higher-level API is
exposed via dedicated methods.
Parameters
----------
address : :obj:`int`
PosConCM sensors have an address assigned, which defaults to 1.
There's also a broadcast address (``PosConCM.BROADCAST_ADDRESS``)
that can be used to query the address of the sensor connected to
the RS-485 bus. Only one sensor can be in the bus when using the
broadcast address to query for sensor's address.
command : :obj:`string`
A string indicating the command number to be executed with the
W or R in front, depending on Writting or Reading.
data : :obj:`string`
An optional string of data that is sent together with the command.
Returns
-------
`list` or value
Result of the command. It can be a list or a single value depending on the operation.
"""
for i in range(2):
cmd = self.format_command(address, command, data)
# Python 2 vs 3
if hasattr(cmd, "decode"):
self.serial.write(cmd)
result = self.serial.readline()
else:
self.serial.write(cmd.encode("ascii"))
result = self.serial.readline().decode("ascii")
if result:
try:
return self.get_payload(result)
except SensorTimeoutError:
time.sleep(0.5)
continue
return None
def get_address(self):
"""Gets the address of the RS-485 sensors currently connected to the bus. This command
is only really useful when this class is initialized with the broadcast address,
with the purpose of retrieving the address of a sensor connected.
Returns
-------
`int`
Address of the PosConCM sensor connected to the RS-485 bus.
Note
----
Only one PosConCM sensor can be connected to the bus for this operation to succeed.
"""
result = self.send_command(self.address, "R005")
return int(result[0])
def set_measurement_type(self, measurement_type):
"""Defines the measurement type to use.
=================== ======
Measurement type Value
=================== ======
"Diameter" 28
"X-Center position" 29
"Z-Center position" 30
"X-Left position" 31
"X-Right position" 32
"Z-Top position" 33
=================== ======
Parameters
----------
measurement_type : :obj:`string`
Measurement type.
"""
if measurement_type not in self.MEASUREMENT_TYPES:
raise ProtocolError("Unsupported measure type, must be one of " + str(self.MEASUREMENT_TYPES))
return self.send_command(self.address, "W020", str(self.MEASUREMENT_TYPES[measurement_type]))
def set_precision(self, precision):
"""Defines the precision the sensor will use to determine edges:
======== ============ ===============================
Value Precision Function values
======== ============ ===============================
``0`` Standard Median=off, Moving Average=off
``1`` High Median=7, Moving Average=16
``2`` Very High Median=15, Moving Average=128
======== ============ ===============================
Parameters
----------
precision : :obj:`int`
Sensor precision to use.
Note
-----
The higher the precision, the slower the measurement gets.
"""
if precision < 0 or precision > 2:
raise ProtocolError("Precision must be 0 (standard), 1 (high) or 2 (very high)")
return self.send_command(self.address, "W033", str(precision))
def get_measurement(self):
"""Retrieves the current measurement of the sensor according to the current settings.
Returns
-------
`tuple`
The current measurement and additionally a value indicating the quality of the measured value.
"""
result = self.send_command(self.address, "R021")
if len(result) != 3:
raise ProtocolError("Unexpected result: " + str(result))
value = result[0]
quality = int(result[1])
# If Valid
if quality == 0:
value = float(value)
return (value, self.QUALITY[quality])
def activate_flex_mount(self):
"""Activates the FLEX Mount feature of the sensor to allow positioning it on an
angled installation. The reference thickness is only required if the surface is
uneven and an additional leveling auxiliary plate as been added."""
result = self.send_command(self.address, "W035", "1")
return map(float, result)
def deactivate_flex_mount(self):
"""Deactivates the FLEX Mount feature."""
result = self.send_command(self.address, "W035", "0")
return map(float, result)
def set_flex_mount(self, angle, distance):
"""Sets the FLEX Mount feature to a specific angle and distance."""
data = "{:.2f};{:.2f}".format(angle, distance)
return self.send_command(self.address, "W036", data)
def teach_flex_mount(self, reference_thickness):
"""Sets the FLEX Mount feature to a specific angle and distance."""
return self.send_command(self.address, "W037", str(reference_thickness))
def adjust_to_dark_object(self, is_dark_object):
"""Adjusts the sensor to detect darker or lighter surfaces."""
data = "1" if is_dark_object else "0"
result = self.send_command(self.address, "W032", data)
return map(float, result)
def reset(self):
"""Resets the sensor to factory settings."""
self.send_command(self.address, "W202", "0")
|
PypiClean
|
/gds-django-jet-1.0.10.tar.gz/gds-django-jet-1.0.10/jet/dashboard/forms.py
|
import json
from django import forms
from django.core.exceptions import ValidationError
from jet.dashboard.models import UserDashboardModule
from jet.dashboard.utils import get_current_dashboard
from jet.utils import user_is_authenticated
class UpdateDashboardModulesForm(forms.Form):
app_label = forms.CharField(required=False)
modules = forms.CharField()
modules_objects = []
def __init__(self, request, *args, **kwargs):
self.request = request
super(UpdateDashboardModulesForm, self).__init__(*args, **kwargs)
def clean(self):
data = super(UpdateDashboardModulesForm, self).clean()
if not user_is_authenticated(self.request.user) or not self.request.user.is_staff:
raise ValidationError('error')
try:
modules = json.loads(data['modules'])
for module in modules:
db_module = UserDashboardModule.objects.get(
user=self.request.user.pk,
app_label=data['app_label'] if data['app_label'] else None,
pk=module['id']
)
column = module['column']
order = module['order']
if db_module.column != column or db_module.order != order:
db_module.column = column
db_module.order = order
self.modules_objects.append(db_module)
except Exception:
raise ValidationError('error')
return data
def save(self):
for module in self.modules_objects:
module.save()
class AddUserDashboardModuleForm(forms.ModelForm):
type = forms.CharField()
module = forms.IntegerField()
module_cls = None
def __init__(self, request, *args, **kwargs):
self.request = request
super(AddUserDashboardModuleForm, self).__init__(*args, **kwargs)
class Meta:
model = UserDashboardModule
fields = ['app_label']
def clean_app_label(self):
data = self.cleaned_data['app_label']
return data if data != '' else None
def clean(self):
data = super(AddUserDashboardModuleForm, self).clean()
if not user_is_authenticated(self.request.user) or not self.request.user.is_staff:
raise ValidationError('error')
if 'app_label' in data:
index_dashboard_cls = get_current_dashboard('app_index' if data['app_label'] else 'index')
index_dashboard = index_dashboard_cls({'request': self.request}, app_label=data['app_label'])
if 'type' in data:
if data['type'] == 'children':
module = index_dashboard.children[data['module']]
elif data['type'] == 'available_children':
module = index_dashboard.available_children[data['module']]()
else:
raise ValidationError('error')
self.module_cls = module
return data
def save(self, commit=True):
self.instance.title = self.module_cls.title
self.instance.module = self.module_cls.fullname()
self.instance.user = self.request.user.pk
self.instance.column = 0
self.instance.order = -1
self.instance.settings = self.module_cls.dump_settings()
self.instance.children = self.module_cls.dump_children()
return super(AddUserDashboardModuleForm, self).save(commit)
class UpdateDashboardModuleCollapseForm(forms.ModelForm):
def __init__(self, request, *args, **kwargs):
self.request = request
super(UpdateDashboardModuleCollapseForm, self).__init__(*args, **kwargs)
class Meta:
model = UserDashboardModule
fields = ['collapsed']
def clean(self):
data = super(UpdateDashboardModuleCollapseForm, self).clean()
if not user_is_authenticated(self.request.user) or not self.request.user.is_staff:
raise ValidationError('error')
if self.instance.user != self.request.user.pk:
raise ValidationError('error')
return data
class RemoveDashboardModuleForm(forms.ModelForm):
def __init__(self, request, *args, **kwargs):
self.request = request
super(RemoveDashboardModuleForm, self).__init__(*args, **kwargs)
class Meta:
model = UserDashboardModule
fields = []
def clean(self):
cleaned_data = super(RemoveDashboardModuleForm, self).clean()
if not user_is_authenticated(self.request.user) or self.instance.user != self.request.user.pk:
raise ValidationError('error')
return cleaned_data
def save(self, commit=True):
if commit:
self.instance.delete()
class ResetDashboardForm(forms.Form):
app_label = forms.CharField(required=False)
def __init__(self, request, *args, **kwargs):
self.request = request
super(ResetDashboardForm, self).__init__(*args, **kwargs)
class Meta:
model = UserDashboardModule
fields = []
def clean(self):
data = super(ResetDashboardForm, self).clean()
data['app_label'] = data['app_label'] if data['app_label'] else None
if not user_is_authenticated(self.request.user) or not self.request.user.is_staff:
raise ValidationError('error')
return data
def save(self, commit=True):
if commit:
UserDashboardModule.objects.filter(
user=self.request.user.pk,
app_label=self.cleaned_data['app_label']
).delete()
|
PypiClean
|
/pulumi_azure_native-2.5.1a1693590910.tar.gz/pulumi_azure_native-2.5.1a1693590910/pulumi_azure_native/botservice/v20220915/_inputs.py
|
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ._enums import *
__all__ = [
'AcsChatChannelArgs',
'AlexaChannelPropertiesArgs',
'AlexaChannelArgs',
'BotPropertiesArgs',
'ConnectionSettingParameterArgs',
'ConnectionSettingPropertiesArgs',
'DirectLineChannelPropertiesArgs',
'DirectLineChannelArgs',
'DirectLineSiteArgs',
'DirectLineSpeechChannelPropertiesArgs',
'DirectLineSpeechChannelArgs',
'EmailChannelPropertiesArgs',
'EmailChannelArgs',
'FacebookChannelPropertiesArgs',
'FacebookChannelArgs',
'FacebookPageArgs',
'KikChannelPropertiesArgs',
'KikChannelArgs',
'LineChannelPropertiesArgs',
'LineChannelArgs',
'LineRegistrationArgs',
'M365ExtensionsArgs',
'MsTeamsChannelPropertiesArgs',
'MsTeamsChannelArgs',
'OmnichannelArgs',
'OutlookChannelArgs',
'PrivateLinkServiceConnectionStateArgs',
'SearchAssistantArgs',
'SkuArgs',
'SkypeChannelPropertiesArgs',
'SkypeChannelArgs',
'SlackChannelPropertiesArgs',
'SlackChannelArgs',
'SmsChannelPropertiesArgs',
'SmsChannelArgs',
'TelegramChannelPropertiesArgs',
'TelegramChannelArgs',
'TelephonyChannelPropertiesArgs',
'TelephonyChannelResourceApiConfigurationArgs',
'TelephonyChannelArgs',
'TelephonyPhoneNumbersArgs',
'WebChatChannelPropertiesArgs',
'WebChatChannelArgs',
'WebChatSiteArgs',
]
@pulumi.input_type
class AcsChatChannelArgs:
def __init__(__self__, *,
channel_name: pulumi.Input[str],
etag: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None):
"""
AcsChat channel definition
:param pulumi.Input[str] channel_name: The channel name
Expected value is 'AcsChatChannel'.
:param pulumi.Input[str] etag: Entity Tag of the resource
:param pulumi.Input[str] location: Specifies the location of the resource.
"""
pulumi.set(__self__, "channel_name", 'AcsChatChannel')
if etag is not None:
pulumi.set(__self__, "etag", etag)
if location is None:
location = 'global'
if location is not None:
pulumi.set(__self__, "location", location)
@property
@pulumi.getter(name="channelName")
def channel_name(self) -> pulumi.Input[str]:
"""
The channel name
Expected value is 'AcsChatChannel'.
"""
return pulumi.get(self, "channel_name")
@channel_name.setter
def channel_name(self, value: pulumi.Input[str]):
pulumi.set(self, "channel_name", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
Entity Tag of the resource
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@pulumi.input_type
class AlexaChannelPropertiesArgs:
def __init__(__self__, *,
alexa_skill_id: pulumi.Input[str],
is_enabled: pulumi.Input[bool]):
"""
The parameters to provide for the Alexa channel.
:param pulumi.Input[str] alexa_skill_id: The Alexa skill Id
:param pulumi.Input[bool] is_enabled: Whether this channel is enabled for the bot
"""
pulumi.set(__self__, "alexa_skill_id", alexa_skill_id)
pulumi.set(__self__, "is_enabled", is_enabled)
@property
@pulumi.getter(name="alexaSkillId")
def alexa_skill_id(self) -> pulumi.Input[str]:
"""
The Alexa skill Id
"""
return pulumi.get(self, "alexa_skill_id")
@alexa_skill_id.setter
def alexa_skill_id(self, value: pulumi.Input[str]):
pulumi.set(self, "alexa_skill_id", value)
@property
@pulumi.getter(name="isEnabled")
def is_enabled(self) -> pulumi.Input[bool]:
"""
Whether this channel is enabled for the bot
"""
return pulumi.get(self, "is_enabled")
@is_enabled.setter
def is_enabled(self, value: pulumi.Input[bool]):
pulumi.set(self, "is_enabled", value)
@pulumi.input_type
class AlexaChannelArgs:
def __init__(__self__, *,
channel_name: pulumi.Input[str],
etag: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input['AlexaChannelPropertiesArgs']] = None):
"""
Alexa channel definition
:param pulumi.Input[str] channel_name: The channel name
Expected value is 'AlexaChannel'.
:param pulumi.Input[str] etag: Entity Tag of the resource
:param pulumi.Input[str] location: Specifies the location of the resource.
:param pulumi.Input['AlexaChannelPropertiesArgs'] properties: The set of properties specific to Alexa channel resource
"""
pulumi.set(__self__, "channel_name", 'AlexaChannel')
if etag is not None:
pulumi.set(__self__, "etag", etag)
if location is None:
location = 'global'
if location is not None:
pulumi.set(__self__, "location", location)
if properties is not None:
pulumi.set(__self__, "properties", properties)
@property
@pulumi.getter(name="channelName")
def channel_name(self) -> pulumi.Input[str]:
"""
The channel name
Expected value is 'AlexaChannel'.
"""
return pulumi.get(self, "channel_name")
@channel_name.setter
def channel_name(self, value: pulumi.Input[str]):
pulumi.set(self, "channel_name", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
Entity Tag of the resource
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input['AlexaChannelPropertiesArgs']]:
"""
The set of properties specific to Alexa channel resource
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input['AlexaChannelPropertiesArgs']]):
pulumi.set(self, "properties", value)
@pulumi.input_type
class BotPropertiesArgs:
def __init__(__self__, *,
display_name: pulumi.Input[str],
endpoint: pulumi.Input[str],
msa_app_id: pulumi.Input[str],
all_settings: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
app_password_hint: Optional[pulumi.Input[str]] = None,
cmek_key_vault_url: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
developer_app_insight_key: Optional[pulumi.Input[str]] = None,
developer_app_insights_api_key: Optional[pulumi.Input[str]] = None,
developer_app_insights_application_id: Optional[pulumi.Input[str]] = None,
disable_local_auth: Optional[pulumi.Input[bool]] = None,
icon_url: Optional[pulumi.Input[str]] = None,
is_cmek_enabled: Optional[pulumi.Input[bool]] = None,
is_streaming_supported: Optional[pulumi.Input[bool]] = None,
luis_app_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
luis_key: Optional[pulumi.Input[str]] = None,
manifest_url: Optional[pulumi.Input[str]] = None,
msa_app_msi_resource_id: Optional[pulumi.Input[str]] = None,
msa_app_tenant_id: Optional[pulumi.Input[str]] = None,
msa_app_type: Optional[pulumi.Input[Union[str, 'MsaAppType']]] = None,
open_with_hint: Optional[pulumi.Input[str]] = None,
parameters: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
public_network_access: Optional[pulumi.Input[Union[str, 'PublicNetworkAccess']]] = None,
publishing_credentials: Optional[pulumi.Input[str]] = None,
schema_transformation_version: Optional[pulumi.Input[str]] = None,
storage_resource_id: Optional[pulumi.Input[str]] = None,
tenant_id: Optional[pulumi.Input[str]] = None):
"""
The parameters to provide for the Bot.
:param pulumi.Input[str] display_name: The Name of the bot
:param pulumi.Input[str] endpoint: The bot's endpoint
:param pulumi.Input[str] msa_app_id: Microsoft App Id for the bot
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] all_settings: Contains resource all settings defined as key/value pairs.
:param pulumi.Input[str] app_password_hint: The hint (e.g. keyVault secret resourceId) on how to fetch the app secret
:param pulumi.Input[str] cmek_key_vault_url: The CMK Url
:param pulumi.Input[str] description: The description of the bot
:param pulumi.Input[str] developer_app_insight_key: The Application Insights key
:param pulumi.Input[str] developer_app_insights_api_key: The Application Insights Api Key
:param pulumi.Input[str] developer_app_insights_application_id: The Application Insights App Id
:param pulumi.Input[bool] disable_local_auth: Opt-out of local authentication and ensure only MSI and AAD can be used exclusively for authentication.
:param pulumi.Input[str] icon_url: The Icon Url of the bot
:param pulumi.Input[bool] is_cmek_enabled: Whether Cmek is enabled
:param pulumi.Input[bool] is_streaming_supported: Whether the bot is streaming supported
:param pulumi.Input[Sequence[pulumi.Input[str]]] luis_app_ids: Collection of LUIS App Ids
:param pulumi.Input[str] luis_key: The LUIS Key
:param pulumi.Input[str] manifest_url: The bot's manifest url
:param pulumi.Input[str] msa_app_msi_resource_id: Microsoft App Managed Identity Resource Id for the bot
:param pulumi.Input[str] msa_app_tenant_id: Microsoft App Tenant Id for the bot
:param pulumi.Input[Union[str, 'MsaAppType']] msa_app_type: Microsoft App Type for the bot
:param pulumi.Input[str] open_with_hint: The hint to browser (e.g. protocol handler) on how to open the bot for authoring
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] parameters: Contains resource parameters defined as key/value pairs.
:param pulumi.Input[Union[str, 'PublicNetworkAccess']] public_network_access: Whether the bot is in an isolated network
:param pulumi.Input[str] publishing_credentials: Publishing credentials of the resource
:param pulumi.Input[str] schema_transformation_version: The channel schema transformation version for the bot
:param pulumi.Input[str] storage_resource_id: The storage resourceId for the bot
:param pulumi.Input[str] tenant_id: The Tenant Id for the bot
"""
pulumi.set(__self__, "display_name", display_name)
pulumi.set(__self__, "endpoint", endpoint)
pulumi.set(__self__, "msa_app_id", msa_app_id)
if all_settings is not None:
pulumi.set(__self__, "all_settings", all_settings)
if app_password_hint is not None:
pulumi.set(__self__, "app_password_hint", app_password_hint)
if cmek_key_vault_url is not None:
pulumi.set(__self__, "cmek_key_vault_url", cmek_key_vault_url)
if description is not None:
pulumi.set(__self__, "description", description)
if developer_app_insight_key is not None:
pulumi.set(__self__, "developer_app_insight_key", developer_app_insight_key)
if developer_app_insights_api_key is not None:
pulumi.set(__self__, "developer_app_insights_api_key", developer_app_insights_api_key)
if developer_app_insights_application_id is not None:
pulumi.set(__self__, "developer_app_insights_application_id", developer_app_insights_application_id)
if disable_local_auth is not None:
pulumi.set(__self__, "disable_local_auth", disable_local_auth)
if icon_url is None:
icon_url = ''
if icon_url is not None:
pulumi.set(__self__, "icon_url", icon_url)
if is_cmek_enabled is None:
is_cmek_enabled = False
if is_cmek_enabled is not None:
pulumi.set(__self__, "is_cmek_enabled", is_cmek_enabled)
if is_streaming_supported is None:
is_streaming_supported = False
if is_streaming_supported is not None:
pulumi.set(__self__, "is_streaming_supported", is_streaming_supported)
if luis_app_ids is not None:
pulumi.set(__self__, "luis_app_ids", luis_app_ids)
if luis_key is not None:
pulumi.set(__self__, "luis_key", luis_key)
if manifest_url is not None:
pulumi.set(__self__, "manifest_url", manifest_url)
if msa_app_msi_resource_id is not None:
pulumi.set(__self__, "msa_app_msi_resource_id", msa_app_msi_resource_id)
if msa_app_tenant_id is not None:
pulumi.set(__self__, "msa_app_tenant_id", msa_app_tenant_id)
if msa_app_type is not None:
pulumi.set(__self__, "msa_app_type", msa_app_type)
if open_with_hint is not None:
pulumi.set(__self__, "open_with_hint", open_with_hint)
if parameters is not None:
pulumi.set(__self__, "parameters", parameters)
if public_network_access is None:
public_network_access = 'Enabled'
if public_network_access is not None:
pulumi.set(__self__, "public_network_access", public_network_access)
if publishing_credentials is not None:
pulumi.set(__self__, "publishing_credentials", publishing_credentials)
if schema_transformation_version is not None:
pulumi.set(__self__, "schema_transformation_version", schema_transformation_version)
if storage_resource_id is not None:
pulumi.set(__self__, "storage_resource_id", storage_resource_id)
if tenant_id is not None:
pulumi.set(__self__, "tenant_id", tenant_id)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Input[str]:
"""
The Name of the bot
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: pulumi.Input[str]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter
def endpoint(self) -> pulumi.Input[str]:
"""
The bot's endpoint
"""
return pulumi.get(self, "endpoint")
@endpoint.setter
def endpoint(self, value: pulumi.Input[str]):
pulumi.set(self, "endpoint", value)
@property
@pulumi.getter(name="msaAppId")
def msa_app_id(self) -> pulumi.Input[str]:
"""
Microsoft App Id for the bot
"""
return pulumi.get(self, "msa_app_id")
@msa_app_id.setter
def msa_app_id(self, value: pulumi.Input[str]):
pulumi.set(self, "msa_app_id", value)
@property
@pulumi.getter(name="allSettings")
def all_settings(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Contains resource all settings defined as key/value pairs.
"""
return pulumi.get(self, "all_settings")
@all_settings.setter
def all_settings(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "all_settings", value)
@property
@pulumi.getter(name="appPasswordHint")
def app_password_hint(self) -> Optional[pulumi.Input[str]]:
"""
The hint (e.g. keyVault secret resourceId) on how to fetch the app secret
"""
return pulumi.get(self, "app_password_hint")
@app_password_hint.setter
def app_password_hint(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "app_password_hint", value)
@property
@pulumi.getter(name="cmekKeyVaultUrl")
def cmek_key_vault_url(self) -> Optional[pulumi.Input[str]]:
"""
The CMK Url
"""
return pulumi.get(self, "cmek_key_vault_url")
@cmek_key_vault_url.setter
def cmek_key_vault_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cmek_key_vault_url", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of the bot
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="developerAppInsightKey")
def developer_app_insight_key(self) -> Optional[pulumi.Input[str]]:
"""
The Application Insights key
"""
return pulumi.get(self, "developer_app_insight_key")
@developer_app_insight_key.setter
def developer_app_insight_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "developer_app_insight_key", value)
@property
@pulumi.getter(name="developerAppInsightsApiKey")
def developer_app_insights_api_key(self) -> Optional[pulumi.Input[str]]:
"""
The Application Insights Api Key
"""
return pulumi.get(self, "developer_app_insights_api_key")
@developer_app_insights_api_key.setter
def developer_app_insights_api_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "developer_app_insights_api_key", value)
@property
@pulumi.getter(name="developerAppInsightsApplicationId")
def developer_app_insights_application_id(self) -> Optional[pulumi.Input[str]]:
"""
The Application Insights App Id
"""
return pulumi.get(self, "developer_app_insights_application_id")
@developer_app_insights_application_id.setter
def developer_app_insights_application_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "developer_app_insights_application_id", value)
@property
@pulumi.getter(name="disableLocalAuth")
def disable_local_auth(self) -> Optional[pulumi.Input[bool]]:
"""
Opt-out of local authentication and ensure only MSI and AAD can be used exclusively for authentication.
"""
return pulumi.get(self, "disable_local_auth")
@disable_local_auth.setter
def disable_local_auth(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disable_local_auth", value)
@property
@pulumi.getter(name="iconUrl")
def icon_url(self) -> Optional[pulumi.Input[str]]:
"""
The Icon Url of the bot
"""
return pulumi.get(self, "icon_url")
@icon_url.setter
def icon_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "icon_url", value)
@property
@pulumi.getter(name="isCmekEnabled")
def is_cmek_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether Cmek is enabled
"""
return pulumi.get(self, "is_cmek_enabled")
@is_cmek_enabled.setter
def is_cmek_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_cmek_enabled", value)
@property
@pulumi.getter(name="isStreamingSupported")
def is_streaming_supported(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the bot is streaming supported
"""
return pulumi.get(self, "is_streaming_supported")
@is_streaming_supported.setter
def is_streaming_supported(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_streaming_supported", value)
@property
@pulumi.getter(name="luisAppIds")
def luis_app_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Collection of LUIS App Ids
"""
return pulumi.get(self, "luis_app_ids")
@luis_app_ids.setter
def luis_app_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "luis_app_ids", value)
@property
@pulumi.getter(name="luisKey")
def luis_key(self) -> Optional[pulumi.Input[str]]:
"""
The LUIS Key
"""
return pulumi.get(self, "luis_key")
@luis_key.setter
def luis_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "luis_key", value)
@property
@pulumi.getter(name="manifestUrl")
def manifest_url(self) -> Optional[pulumi.Input[str]]:
"""
The bot's manifest url
"""
return pulumi.get(self, "manifest_url")
@manifest_url.setter
def manifest_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "manifest_url", value)
@property
@pulumi.getter(name="msaAppMSIResourceId")
def msa_app_msi_resource_id(self) -> Optional[pulumi.Input[str]]:
"""
Microsoft App Managed Identity Resource Id for the bot
"""
return pulumi.get(self, "msa_app_msi_resource_id")
@msa_app_msi_resource_id.setter
def msa_app_msi_resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "msa_app_msi_resource_id", value)
@property
@pulumi.getter(name="msaAppTenantId")
def msa_app_tenant_id(self) -> Optional[pulumi.Input[str]]:
"""
Microsoft App Tenant Id for the bot
"""
return pulumi.get(self, "msa_app_tenant_id")
@msa_app_tenant_id.setter
def msa_app_tenant_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "msa_app_tenant_id", value)
@property
@pulumi.getter(name="msaAppType")
def msa_app_type(self) -> Optional[pulumi.Input[Union[str, 'MsaAppType']]]:
"""
Microsoft App Type for the bot
"""
return pulumi.get(self, "msa_app_type")
@msa_app_type.setter
def msa_app_type(self, value: Optional[pulumi.Input[Union[str, 'MsaAppType']]]):
pulumi.set(self, "msa_app_type", value)
@property
@pulumi.getter(name="openWithHint")
def open_with_hint(self) -> Optional[pulumi.Input[str]]:
"""
The hint to browser (e.g. protocol handler) on how to open the bot for authoring
"""
return pulumi.get(self, "open_with_hint")
@open_with_hint.setter
def open_with_hint(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "open_with_hint", value)
@property
@pulumi.getter
def parameters(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Contains resource parameters defined as key/value pairs.
"""
return pulumi.get(self, "parameters")
@parameters.setter
def parameters(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "parameters", value)
@property
@pulumi.getter(name="publicNetworkAccess")
def public_network_access(self) -> Optional[pulumi.Input[Union[str, 'PublicNetworkAccess']]]:
"""
Whether the bot is in an isolated network
"""
return pulumi.get(self, "public_network_access")
@public_network_access.setter
def public_network_access(self, value: Optional[pulumi.Input[Union[str, 'PublicNetworkAccess']]]):
pulumi.set(self, "public_network_access", value)
@property
@pulumi.getter(name="publishingCredentials")
def publishing_credentials(self) -> Optional[pulumi.Input[str]]:
"""
Publishing credentials of the resource
"""
return pulumi.get(self, "publishing_credentials")
@publishing_credentials.setter
def publishing_credentials(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "publishing_credentials", value)
@property
@pulumi.getter(name="schemaTransformationVersion")
def schema_transformation_version(self) -> Optional[pulumi.Input[str]]:
"""
The channel schema transformation version for the bot
"""
return pulumi.get(self, "schema_transformation_version")
@schema_transformation_version.setter
def schema_transformation_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "schema_transformation_version", value)
@property
@pulumi.getter(name="storageResourceId")
def storage_resource_id(self) -> Optional[pulumi.Input[str]]:
"""
The storage resourceId for the bot
"""
return pulumi.get(self, "storage_resource_id")
@storage_resource_id.setter
def storage_resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "storage_resource_id", value)
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> Optional[pulumi.Input[str]]:
"""
The Tenant Id for the bot
"""
return pulumi.get(self, "tenant_id")
@tenant_id.setter
def tenant_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tenant_id", value)
@pulumi.input_type
class ConnectionSettingParameterArgs:
def __init__(__self__, *,
key: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None):
"""
Extra Parameter in a Connection Setting Properties to indicate service provider specific properties
:param pulumi.Input[str] key: Key for the Connection Setting Parameter.
:param pulumi.Input[str] value: Value associated with the Connection Setting Parameter.
"""
if key is not None:
pulumi.set(__self__, "key", key)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
"""
Key for the Connection Setting Parameter.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
Value associated with the Connection Setting Parameter.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@pulumi.input_type
class ConnectionSettingPropertiesArgs:
def __init__(__self__, *,
client_id: Optional[pulumi.Input[str]] = None,
client_secret: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
parameters: Optional[pulumi.Input[Sequence[pulumi.Input['ConnectionSettingParameterArgs']]]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
scopes: Optional[pulumi.Input[str]] = None,
service_provider_display_name: Optional[pulumi.Input[str]] = None,
service_provider_id: Optional[pulumi.Input[str]] = None):
"""
Properties for a Connection Setting Item
:param pulumi.Input[str] client_id: Client Id associated with the Connection Setting.
:param pulumi.Input[str] client_secret: Client Secret associated with the Connection Setting
:param pulumi.Input[str] id: Id of the Connection Setting.
:param pulumi.Input[str] name: Name of the Connection Setting.
:param pulumi.Input[Sequence[pulumi.Input['ConnectionSettingParameterArgs']]] parameters: Service Provider Parameters associated with the Connection Setting
:param pulumi.Input[str] provisioning_state: Provisioning state of the resource
:param pulumi.Input[str] scopes: Scopes associated with the Connection Setting
:param pulumi.Input[str] service_provider_display_name: Service Provider Display Name associated with the Connection Setting
:param pulumi.Input[str] service_provider_id: Service Provider Id associated with the Connection Setting
"""
if client_id is not None:
pulumi.set(__self__, "client_id", client_id)
if client_secret is not None:
pulumi.set(__self__, "client_secret", client_secret)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if parameters is not None:
pulumi.set(__self__, "parameters", parameters)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if scopes is None:
scopes = ''
if scopes is not None:
pulumi.set(__self__, "scopes", scopes)
if service_provider_display_name is not None:
pulumi.set(__self__, "service_provider_display_name", service_provider_display_name)
if service_provider_id is not None:
pulumi.set(__self__, "service_provider_id", service_provider_id)
@property
@pulumi.getter(name="clientId")
def client_id(self) -> Optional[pulumi.Input[str]]:
"""
Client Id associated with the Connection Setting.
"""
return pulumi.get(self, "client_id")
@client_id.setter
def client_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_id", value)
@property
@pulumi.getter(name="clientSecret")
def client_secret(self) -> Optional[pulumi.Input[str]]:
"""
Client Secret associated with the Connection Setting
"""
return pulumi.get(self, "client_secret")
@client_secret.setter
def client_secret(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_secret", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Id of the Connection Setting.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the Connection Setting.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def parameters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ConnectionSettingParameterArgs']]]]:
"""
Service Provider Parameters associated with the Connection Setting
"""
return pulumi.get(self, "parameters")
@parameters.setter
def parameters(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ConnectionSettingParameterArgs']]]]):
pulumi.set(self, "parameters", value)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[pulumi.Input[str]]:
"""
Provisioning state of the resource
"""
return pulumi.get(self, "provisioning_state")
@provisioning_state.setter
def provisioning_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "provisioning_state", value)
@property
@pulumi.getter
def scopes(self) -> Optional[pulumi.Input[str]]:
"""
Scopes associated with the Connection Setting
"""
return pulumi.get(self, "scopes")
@scopes.setter
def scopes(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scopes", value)
@property
@pulumi.getter(name="serviceProviderDisplayName")
def service_provider_display_name(self) -> Optional[pulumi.Input[str]]:
"""
Service Provider Display Name associated with the Connection Setting
"""
return pulumi.get(self, "service_provider_display_name")
@service_provider_display_name.setter
def service_provider_display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_provider_display_name", value)
@property
@pulumi.getter(name="serviceProviderId")
def service_provider_id(self) -> Optional[pulumi.Input[str]]:
"""
Service Provider Id associated with the Connection Setting
"""
return pulumi.get(self, "service_provider_id")
@service_provider_id.setter
def service_provider_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_provider_id", value)
@pulumi.input_type
class DirectLineChannelPropertiesArgs:
def __init__(__self__, *,
direct_line_embed_code: Optional[pulumi.Input[str]] = None,
extension_key1: Optional[pulumi.Input[str]] = None,
extension_key2: Optional[pulumi.Input[str]] = None,
sites: Optional[pulumi.Input[Sequence[pulumi.Input['DirectLineSiteArgs']]]] = None):
"""
The parameters to provide for the Direct Line channel.
:param pulumi.Input[str] direct_line_embed_code: Direct Line embed code of the resource
:param pulumi.Input[str] extension_key1: The extensionKey1
:param pulumi.Input[str] extension_key2: The extensionKey2
:param pulumi.Input[Sequence[pulumi.Input['DirectLineSiteArgs']]] sites: The list of Direct Line sites
"""
if direct_line_embed_code is not None:
pulumi.set(__self__, "direct_line_embed_code", direct_line_embed_code)
if extension_key1 is None:
extension_key1 = ''
if extension_key1 is not None:
pulumi.set(__self__, "extension_key1", extension_key1)
if extension_key2 is None:
extension_key2 = ''
if extension_key2 is not None:
pulumi.set(__self__, "extension_key2", extension_key2)
if sites is not None:
pulumi.set(__self__, "sites", sites)
@property
@pulumi.getter(name="directLineEmbedCode")
def direct_line_embed_code(self) -> Optional[pulumi.Input[str]]:
"""
Direct Line embed code of the resource
"""
return pulumi.get(self, "direct_line_embed_code")
@direct_line_embed_code.setter
def direct_line_embed_code(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "direct_line_embed_code", value)
@property
@pulumi.getter(name="extensionKey1")
def extension_key1(self) -> Optional[pulumi.Input[str]]:
"""
The extensionKey1
"""
return pulumi.get(self, "extension_key1")
@extension_key1.setter
def extension_key1(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "extension_key1", value)
@property
@pulumi.getter(name="extensionKey2")
def extension_key2(self) -> Optional[pulumi.Input[str]]:
"""
The extensionKey2
"""
return pulumi.get(self, "extension_key2")
@extension_key2.setter
def extension_key2(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "extension_key2", value)
@property
@pulumi.getter
def sites(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DirectLineSiteArgs']]]]:
"""
The list of Direct Line sites
"""
return pulumi.get(self, "sites")
@sites.setter
def sites(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['DirectLineSiteArgs']]]]):
pulumi.set(self, "sites", value)
@pulumi.input_type
class DirectLineChannelArgs:
def __init__(__self__, *,
channel_name: pulumi.Input[str],
etag: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input['DirectLineChannelPropertiesArgs']] = None):
"""
Direct Line channel definition
:param pulumi.Input[str] channel_name: The channel name
Expected value is 'DirectLineChannel'.
:param pulumi.Input[str] etag: Entity Tag of the resource
:param pulumi.Input[str] location: Specifies the location of the resource.
:param pulumi.Input['DirectLineChannelPropertiesArgs'] properties: The set of properties specific to Direct Line channel resource
"""
pulumi.set(__self__, "channel_name", 'DirectLineChannel')
if etag is not None:
pulumi.set(__self__, "etag", etag)
if location is None:
location = 'global'
if location is not None:
pulumi.set(__self__, "location", location)
if properties is not None:
pulumi.set(__self__, "properties", properties)
@property
@pulumi.getter(name="channelName")
def channel_name(self) -> pulumi.Input[str]:
"""
The channel name
Expected value is 'DirectLineChannel'.
"""
return pulumi.get(self, "channel_name")
@channel_name.setter
def channel_name(self, value: pulumi.Input[str]):
pulumi.set(self, "channel_name", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
Entity Tag of the resource
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input['DirectLineChannelPropertiesArgs']]:
"""
The set of properties specific to Direct Line channel resource
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input['DirectLineChannelPropertiesArgs']]):
pulumi.set(self, "properties", value)
@pulumi.input_type
class DirectLineSiteArgs:
def __init__(__self__, *,
is_enabled: pulumi.Input[bool],
is_v1_enabled: pulumi.Input[bool],
is_v3_enabled: pulumi.Input[bool],
site_name: pulumi.Input[str],
app_id: Optional[pulumi.Input[str]] = None,
e_tag: Optional[pulumi.Input[str]] = None,
is_block_user_upload_enabled: Optional[pulumi.Input[bool]] = None,
is_detailed_logging_enabled: Optional[pulumi.Input[bool]] = None,
is_endpoint_parameters_enabled: Optional[pulumi.Input[bool]] = None,
is_no_storage_enabled: Optional[pulumi.Input[bool]] = None,
is_secure_site_enabled: Optional[pulumi.Input[bool]] = None,
is_web_chat_speech_enabled: Optional[pulumi.Input[bool]] = None,
is_webchat_preview_enabled: Optional[pulumi.Input[bool]] = None,
tenant_id: Optional[pulumi.Input[str]] = None,
trusted_origins: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
A site for the Direct Line channel
:param pulumi.Input[bool] is_enabled: Whether this site is enabled for DirectLine channel
:param pulumi.Input[bool] is_v1_enabled: Whether this site is enabled for Bot Framework V1 protocol.
:param pulumi.Input[bool] is_v3_enabled: Whether this site is enabled for Bot Framework V3 protocol.
:param pulumi.Input[str] site_name: Site name
:param pulumi.Input[str] app_id: DirectLine application id
:param pulumi.Input[str] e_tag: Entity Tag
:param pulumi.Input[bool] is_block_user_upload_enabled: Whether this site is enabled for block user upload.
:param pulumi.Input[bool] is_detailed_logging_enabled: Whether this site is disabled detailed logging for
:param pulumi.Input[bool] is_endpoint_parameters_enabled: Whether this site is EndpointParameters enabled for channel
:param pulumi.Input[bool] is_no_storage_enabled: Whether this no-storage site is disabled detailed logging for
:param pulumi.Input[bool] is_secure_site_enabled: Whether this site is enabled for authentication with Bot Framework.
:param pulumi.Input[bool] is_web_chat_speech_enabled: Whether this site is enabled for Webchat Speech
:param pulumi.Input[bool] is_webchat_preview_enabled: Whether this site is enabled for preview versions of Webchat
:param pulumi.Input[str] tenant_id: Tenant Id
:param pulumi.Input[Sequence[pulumi.Input[str]]] trusted_origins: List of Trusted Origin URLs for this site. This field is applicable only if isSecureSiteEnabled is True.
"""
pulumi.set(__self__, "is_enabled", is_enabled)
pulumi.set(__self__, "is_v1_enabled", is_v1_enabled)
pulumi.set(__self__, "is_v3_enabled", is_v3_enabled)
pulumi.set(__self__, "site_name", site_name)
if app_id is not None:
pulumi.set(__self__, "app_id", app_id)
if e_tag is not None:
pulumi.set(__self__, "e_tag", e_tag)
if is_block_user_upload_enabled is not None:
pulumi.set(__self__, "is_block_user_upload_enabled", is_block_user_upload_enabled)
if is_detailed_logging_enabled is not None:
pulumi.set(__self__, "is_detailed_logging_enabled", is_detailed_logging_enabled)
if is_endpoint_parameters_enabled is not None:
pulumi.set(__self__, "is_endpoint_parameters_enabled", is_endpoint_parameters_enabled)
if is_no_storage_enabled is not None:
pulumi.set(__self__, "is_no_storage_enabled", is_no_storage_enabled)
if is_secure_site_enabled is not None:
pulumi.set(__self__, "is_secure_site_enabled", is_secure_site_enabled)
if is_web_chat_speech_enabled is None:
is_web_chat_speech_enabled = False
if is_web_chat_speech_enabled is not None:
pulumi.set(__self__, "is_web_chat_speech_enabled", is_web_chat_speech_enabled)
if is_webchat_preview_enabled is None:
is_webchat_preview_enabled = False
if is_webchat_preview_enabled is not None:
pulumi.set(__self__, "is_webchat_preview_enabled", is_webchat_preview_enabled)
if tenant_id is not None:
pulumi.set(__self__, "tenant_id", tenant_id)
if trusted_origins is not None:
pulumi.set(__self__, "trusted_origins", trusted_origins)
@property
@pulumi.getter(name="isEnabled")
def is_enabled(self) -> pulumi.Input[bool]:
"""
Whether this site is enabled for DirectLine channel
"""
return pulumi.get(self, "is_enabled")
@is_enabled.setter
def is_enabled(self, value: pulumi.Input[bool]):
pulumi.set(self, "is_enabled", value)
@property
@pulumi.getter(name="isV1Enabled")
def is_v1_enabled(self) -> pulumi.Input[bool]:
"""
Whether this site is enabled for Bot Framework V1 protocol.
"""
return pulumi.get(self, "is_v1_enabled")
@is_v1_enabled.setter
def is_v1_enabled(self, value: pulumi.Input[bool]):
pulumi.set(self, "is_v1_enabled", value)
@property
@pulumi.getter(name="isV3Enabled")
def is_v3_enabled(self) -> pulumi.Input[bool]:
"""
Whether this site is enabled for Bot Framework V3 protocol.
"""
return pulumi.get(self, "is_v3_enabled")
@is_v3_enabled.setter
def is_v3_enabled(self, value: pulumi.Input[bool]):
pulumi.set(self, "is_v3_enabled", value)
@property
@pulumi.getter(name="siteName")
def site_name(self) -> pulumi.Input[str]:
"""
Site name
"""
return pulumi.get(self, "site_name")
@site_name.setter
def site_name(self, value: pulumi.Input[str]):
pulumi.set(self, "site_name", value)
@property
@pulumi.getter(name="appId")
def app_id(self) -> Optional[pulumi.Input[str]]:
"""
DirectLine application id
"""
return pulumi.get(self, "app_id")
@app_id.setter
def app_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "app_id", value)
@property
@pulumi.getter(name="eTag")
def e_tag(self) -> Optional[pulumi.Input[str]]:
"""
Entity Tag
"""
return pulumi.get(self, "e_tag")
@e_tag.setter
def e_tag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "e_tag", value)
@property
@pulumi.getter(name="isBlockUserUploadEnabled")
def is_block_user_upload_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this site is enabled for block user upload.
"""
return pulumi.get(self, "is_block_user_upload_enabled")
@is_block_user_upload_enabled.setter
def is_block_user_upload_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_block_user_upload_enabled", value)
@property
@pulumi.getter(name="isDetailedLoggingEnabled")
def is_detailed_logging_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this site is disabled detailed logging for
"""
return pulumi.get(self, "is_detailed_logging_enabled")
@is_detailed_logging_enabled.setter
def is_detailed_logging_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_detailed_logging_enabled", value)
@property
@pulumi.getter(name="isEndpointParametersEnabled")
def is_endpoint_parameters_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this site is EndpointParameters enabled for channel
"""
return pulumi.get(self, "is_endpoint_parameters_enabled")
@is_endpoint_parameters_enabled.setter
def is_endpoint_parameters_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_endpoint_parameters_enabled", value)
@property
@pulumi.getter(name="isNoStorageEnabled")
def is_no_storage_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this no-storage site is disabled detailed logging for
"""
return pulumi.get(self, "is_no_storage_enabled")
@is_no_storage_enabled.setter
def is_no_storage_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_no_storage_enabled", value)
@property
@pulumi.getter(name="isSecureSiteEnabled")
def is_secure_site_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this site is enabled for authentication with Bot Framework.
"""
return pulumi.get(self, "is_secure_site_enabled")
@is_secure_site_enabled.setter
def is_secure_site_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_secure_site_enabled", value)
@property
@pulumi.getter(name="isWebChatSpeechEnabled")
def is_web_chat_speech_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this site is enabled for Webchat Speech
"""
return pulumi.get(self, "is_web_chat_speech_enabled")
@is_web_chat_speech_enabled.setter
def is_web_chat_speech_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_web_chat_speech_enabled", value)
@property
@pulumi.getter(name="isWebchatPreviewEnabled")
def is_webchat_preview_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this site is enabled for preview versions of Webchat
"""
return pulumi.get(self, "is_webchat_preview_enabled")
@is_webchat_preview_enabled.setter
def is_webchat_preview_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_webchat_preview_enabled", value)
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> Optional[pulumi.Input[str]]:
"""
Tenant Id
"""
return pulumi.get(self, "tenant_id")
@tenant_id.setter
def tenant_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tenant_id", value)
@property
@pulumi.getter(name="trustedOrigins")
def trusted_origins(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of Trusted Origin URLs for this site. This field is applicable only if isSecureSiteEnabled is True.
"""
return pulumi.get(self, "trusted_origins")
@trusted_origins.setter
def trusted_origins(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "trusted_origins", value)
@pulumi.input_type
class DirectLineSpeechChannelPropertiesArgs:
def __init__(__self__, *,
cognitive_service_region: Optional[pulumi.Input[str]] = None,
cognitive_service_resource_id: Optional[pulumi.Input[str]] = None,
cognitive_service_subscription_key: Optional[pulumi.Input[str]] = None,
custom_speech_model_id: Optional[pulumi.Input[str]] = None,
custom_voice_deployment_id: Optional[pulumi.Input[str]] = None,
is_default_bot_for_cog_svc_account: Optional[pulumi.Input[bool]] = None,
is_enabled: Optional[pulumi.Input[bool]] = None):
"""
The parameters to provide for the DirectLine Speech channel.
:param pulumi.Input[str] cognitive_service_region: The cognitive service region with this channel registration.
:param pulumi.Input[str] cognitive_service_resource_id: The cognitive service id with this channel registration.
:param pulumi.Input[str] cognitive_service_subscription_key: The cognitive service subscription key to use with this channel registration.
:param pulumi.Input[str] custom_speech_model_id: Custom voice deployment id (optional).
:param pulumi.Input[str] custom_voice_deployment_id: Custom speech model id (optional).
:param pulumi.Input[bool] is_default_bot_for_cog_svc_account: Make this a default bot for chosen cognitive service account.
:param pulumi.Input[bool] is_enabled: Whether this channel is enabled or not.
"""
if cognitive_service_region is not None:
pulumi.set(__self__, "cognitive_service_region", cognitive_service_region)
if cognitive_service_resource_id is not None:
pulumi.set(__self__, "cognitive_service_resource_id", cognitive_service_resource_id)
if cognitive_service_subscription_key is not None:
pulumi.set(__self__, "cognitive_service_subscription_key", cognitive_service_subscription_key)
if custom_speech_model_id is not None:
pulumi.set(__self__, "custom_speech_model_id", custom_speech_model_id)
if custom_voice_deployment_id is not None:
pulumi.set(__self__, "custom_voice_deployment_id", custom_voice_deployment_id)
if is_default_bot_for_cog_svc_account is not None:
pulumi.set(__self__, "is_default_bot_for_cog_svc_account", is_default_bot_for_cog_svc_account)
if is_enabled is not None:
pulumi.set(__self__, "is_enabled", is_enabled)
@property
@pulumi.getter(name="cognitiveServiceRegion")
def cognitive_service_region(self) -> Optional[pulumi.Input[str]]:
"""
The cognitive service region with this channel registration.
"""
return pulumi.get(self, "cognitive_service_region")
@cognitive_service_region.setter
def cognitive_service_region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cognitive_service_region", value)
@property
@pulumi.getter(name="cognitiveServiceResourceId")
def cognitive_service_resource_id(self) -> Optional[pulumi.Input[str]]:
"""
The cognitive service id with this channel registration.
"""
return pulumi.get(self, "cognitive_service_resource_id")
@cognitive_service_resource_id.setter
def cognitive_service_resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cognitive_service_resource_id", value)
@property
@pulumi.getter(name="cognitiveServiceSubscriptionKey")
def cognitive_service_subscription_key(self) -> Optional[pulumi.Input[str]]:
"""
The cognitive service subscription key to use with this channel registration.
"""
return pulumi.get(self, "cognitive_service_subscription_key")
@cognitive_service_subscription_key.setter
def cognitive_service_subscription_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cognitive_service_subscription_key", value)
@property
@pulumi.getter(name="customSpeechModelId")
def custom_speech_model_id(self) -> Optional[pulumi.Input[str]]:
"""
Custom voice deployment id (optional).
"""
return pulumi.get(self, "custom_speech_model_id")
@custom_speech_model_id.setter
def custom_speech_model_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "custom_speech_model_id", value)
@property
@pulumi.getter(name="customVoiceDeploymentId")
def custom_voice_deployment_id(self) -> Optional[pulumi.Input[str]]:
"""
Custom speech model id (optional).
"""
return pulumi.get(self, "custom_voice_deployment_id")
@custom_voice_deployment_id.setter
def custom_voice_deployment_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "custom_voice_deployment_id", value)
@property
@pulumi.getter(name="isDefaultBotForCogSvcAccount")
def is_default_bot_for_cog_svc_account(self) -> Optional[pulumi.Input[bool]]:
"""
Make this a default bot for chosen cognitive service account.
"""
return pulumi.get(self, "is_default_bot_for_cog_svc_account")
@is_default_bot_for_cog_svc_account.setter
def is_default_bot_for_cog_svc_account(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_default_bot_for_cog_svc_account", value)
@property
@pulumi.getter(name="isEnabled")
def is_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this channel is enabled or not.
"""
return pulumi.get(self, "is_enabled")
@is_enabled.setter
def is_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_enabled", value)
@pulumi.input_type
class DirectLineSpeechChannelArgs:
def __init__(__self__, *,
channel_name: pulumi.Input[str],
etag: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input['DirectLineSpeechChannelPropertiesArgs']] = None):
"""
DirectLine Speech channel definition
:param pulumi.Input[str] channel_name: The channel name
Expected value is 'DirectLineSpeechChannel'.
:param pulumi.Input[str] etag: Entity Tag of the resource
:param pulumi.Input[str] location: Specifies the location of the resource.
:param pulumi.Input['DirectLineSpeechChannelPropertiesArgs'] properties: The set of properties specific to DirectLine Speech channel resource
"""
pulumi.set(__self__, "channel_name", 'DirectLineSpeechChannel')
if etag is not None:
pulumi.set(__self__, "etag", etag)
if location is None:
location = 'global'
if location is not None:
pulumi.set(__self__, "location", location)
if properties is not None:
pulumi.set(__self__, "properties", properties)
@property
@pulumi.getter(name="channelName")
def channel_name(self) -> pulumi.Input[str]:
"""
The channel name
Expected value is 'DirectLineSpeechChannel'.
"""
return pulumi.get(self, "channel_name")
@channel_name.setter
def channel_name(self, value: pulumi.Input[str]):
pulumi.set(self, "channel_name", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
Entity Tag of the resource
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input['DirectLineSpeechChannelPropertiesArgs']]:
"""
The set of properties specific to DirectLine Speech channel resource
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input['DirectLineSpeechChannelPropertiesArgs']]):
pulumi.set(self, "properties", value)
@pulumi.input_type
class EmailChannelPropertiesArgs:
def __init__(__self__, *,
email_address: pulumi.Input[str],
is_enabled: pulumi.Input[bool],
auth_method: Optional[pulumi.Input[float]] = None,
magic_code: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None):
"""
The parameters to provide for the Email channel.
:param pulumi.Input[str] email_address: The email address
:param pulumi.Input[bool] is_enabled: Whether this channel is enabled for the bot
:param pulumi.Input[float] auth_method: Email channel auth method. 0 Password (Default); 1 Graph.
:param pulumi.Input[str] magic_code: The magic code for setting up the modern authentication.
:param pulumi.Input[str] password: The password for the email address. Value only returned through POST to the action Channel List API, otherwise empty.
"""
pulumi.set(__self__, "email_address", email_address)
pulumi.set(__self__, "is_enabled", is_enabled)
if auth_method is not None:
pulumi.set(__self__, "auth_method", auth_method)
if magic_code is not None:
pulumi.set(__self__, "magic_code", magic_code)
if password is not None:
pulumi.set(__self__, "password", password)
@property
@pulumi.getter(name="emailAddress")
def email_address(self) -> pulumi.Input[str]:
"""
The email address
"""
return pulumi.get(self, "email_address")
@email_address.setter
def email_address(self, value: pulumi.Input[str]):
pulumi.set(self, "email_address", value)
@property
@pulumi.getter(name="isEnabled")
def is_enabled(self) -> pulumi.Input[bool]:
"""
Whether this channel is enabled for the bot
"""
return pulumi.get(self, "is_enabled")
@is_enabled.setter
def is_enabled(self, value: pulumi.Input[bool]):
pulumi.set(self, "is_enabled", value)
@property
@pulumi.getter(name="authMethod")
def auth_method(self) -> Optional[pulumi.Input[float]]:
"""
Email channel auth method. 0 Password (Default); 1 Graph.
"""
return pulumi.get(self, "auth_method")
@auth_method.setter
def auth_method(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "auth_method", value)
@property
@pulumi.getter(name="magicCode")
def magic_code(self) -> Optional[pulumi.Input[str]]:
"""
The magic code for setting up the modern authentication.
"""
return pulumi.get(self, "magic_code")
@magic_code.setter
def magic_code(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "magic_code", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
The password for the email address. Value only returned through POST to the action Channel List API, otherwise empty.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@pulumi.input_type
class EmailChannelArgs:
def __init__(__self__, *,
channel_name: pulumi.Input[str],
etag: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input['EmailChannelPropertiesArgs']] = None):
"""
Email channel definition
:param pulumi.Input[str] channel_name: The channel name
Expected value is 'EmailChannel'.
:param pulumi.Input[str] etag: Entity Tag of the resource
:param pulumi.Input[str] location: Specifies the location of the resource.
:param pulumi.Input['EmailChannelPropertiesArgs'] properties: The set of properties specific to email channel resource
"""
pulumi.set(__self__, "channel_name", 'EmailChannel')
if etag is not None:
pulumi.set(__self__, "etag", etag)
if location is None:
location = 'global'
if location is not None:
pulumi.set(__self__, "location", location)
if properties is not None:
pulumi.set(__self__, "properties", properties)
@property
@pulumi.getter(name="channelName")
def channel_name(self) -> pulumi.Input[str]:
"""
The channel name
Expected value is 'EmailChannel'.
"""
return pulumi.get(self, "channel_name")
@channel_name.setter
def channel_name(self, value: pulumi.Input[str]):
pulumi.set(self, "channel_name", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
Entity Tag of the resource
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input['EmailChannelPropertiesArgs']]:
"""
The set of properties specific to email channel resource
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input['EmailChannelPropertiesArgs']]):
pulumi.set(self, "properties", value)
@pulumi.input_type
class FacebookChannelPropertiesArgs:
def __init__(__self__, *,
app_id: pulumi.Input[str],
is_enabled: pulumi.Input[bool],
app_secret: Optional[pulumi.Input[str]] = None,
pages: Optional[pulumi.Input[Sequence[pulumi.Input['FacebookPageArgs']]]] = None):
"""
The parameters to provide for the Facebook channel.
:param pulumi.Input[str] app_id: Facebook application id
:param pulumi.Input[bool] is_enabled: Whether this channel is enabled for the bot
:param pulumi.Input[str] app_secret: Facebook application secret. Value only returned through POST to the action Channel List API, otherwise empty.
:param pulumi.Input[Sequence[pulumi.Input['FacebookPageArgs']]] pages: The list of Facebook pages
"""
pulumi.set(__self__, "app_id", app_id)
pulumi.set(__self__, "is_enabled", is_enabled)
if app_secret is not None:
pulumi.set(__self__, "app_secret", app_secret)
if pages is not None:
pulumi.set(__self__, "pages", pages)
@property
@pulumi.getter(name="appId")
def app_id(self) -> pulumi.Input[str]:
"""
Facebook application id
"""
return pulumi.get(self, "app_id")
@app_id.setter
def app_id(self, value: pulumi.Input[str]):
pulumi.set(self, "app_id", value)
@property
@pulumi.getter(name="isEnabled")
def is_enabled(self) -> pulumi.Input[bool]:
"""
Whether this channel is enabled for the bot
"""
return pulumi.get(self, "is_enabled")
@is_enabled.setter
def is_enabled(self, value: pulumi.Input[bool]):
pulumi.set(self, "is_enabled", value)
@property
@pulumi.getter(name="appSecret")
def app_secret(self) -> Optional[pulumi.Input[str]]:
"""
Facebook application secret. Value only returned through POST to the action Channel List API, otherwise empty.
"""
return pulumi.get(self, "app_secret")
@app_secret.setter
def app_secret(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "app_secret", value)
@property
@pulumi.getter
def pages(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['FacebookPageArgs']]]]:
"""
The list of Facebook pages
"""
return pulumi.get(self, "pages")
@pages.setter
def pages(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['FacebookPageArgs']]]]):
pulumi.set(self, "pages", value)
@pulumi.input_type
class FacebookChannelArgs:
def __init__(__self__, *,
channel_name: pulumi.Input[str],
etag: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input['FacebookChannelPropertiesArgs']] = None):
"""
Facebook channel definition
:param pulumi.Input[str] channel_name: The channel name
Expected value is 'FacebookChannel'.
:param pulumi.Input[str] etag: Entity Tag of the resource
:param pulumi.Input[str] location: Specifies the location of the resource.
:param pulumi.Input['FacebookChannelPropertiesArgs'] properties: The set of properties specific to bot facebook channel
"""
pulumi.set(__self__, "channel_name", 'FacebookChannel')
if etag is not None:
pulumi.set(__self__, "etag", etag)
if location is None:
location = 'global'
if location is not None:
pulumi.set(__self__, "location", location)
if properties is not None:
pulumi.set(__self__, "properties", properties)
@property
@pulumi.getter(name="channelName")
def channel_name(self) -> pulumi.Input[str]:
"""
The channel name
Expected value is 'FacebookChannel'.
"""
return pulumi.get(self, "channel_name")
@channel_name.setter
def channel_name(self, value: pulumi.Input[str]):
pulumi.set(self, "channel_name", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
Entity Tag of the resource
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input['FacebookChannelPropertiesArgs']]:
"""
The set of properties specific to bot facebook channel
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input['FacebookChannelPropertiesArgs']]):
pulumi.set(self, "properties", value)
@pulumi.input_type
class FacebookPageArgs:
def __init__(__self__, *,
id: pulumi.Input[str],
access_token: Optional[pulumi.Input[str]] = None):
"""
A Facebook page for Facebook channel registration
:param pulumi.Input[str] id: Page id
:param pulumi.Input[str] access_token: Facebook application access token. Value only returned through POST to the action Channel List API, otherwise empty.
"""
pulumi.set(__self__, "id", id)
if access_token is not None:
pulumi.set(__self__, "access_token", access_token)
@property
@pulumi.getter
def id(self) -> pulumi.Input[str]:
"""
Page id
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: pulumi.Input[str]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="accessToken")
def access_token(self) -> Optional[pulumi.Input[str]]:
"""
Facebook application access token. Value only returned through POST to the action Channel List API, otherwise empty.
"""
return pulumi.get(self, "access_token")
@access_token.setter
def access_token(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "access_token", value)
@pulumi.input_type
class KikChannelPropertiesArgs:
def __init__(__self__, *,
is_enabled: pulumi.Input[bool],
user_name: pulumi.Input[str],
api_key: Optional[pulumi.Input[str]] = None,
is_validated: Optional[pulumi.Input[bool]] = None):
"""
The parameters to provide for the Kik channel.
:param pulumi.Input[bool] is_enabled: Whether this channel is enabled for the bot
:param pulumi.Input[str] user_name: The Kik user name
:param pulumi.Input[str] api_key: Kik API key. Value only returned through POST to the action Channel List API, otherwise empty.
:param pulumi.Input[bool] is_validated: Whether this channel is validated for the bot
"""
pulumi.set(__self__, "is_enabled", is_enabled)
pulumi.set(__self__, "user_name", user_name)
if api_key is not None:
pulumi.set(__self__, "api_key", api_key)
if is_validated is not None:
pulumi.set(__self__, "is_validated", is_validated)
@property
@pulumi.getter(name="isEnabled")
def is_enabled(self) -> pulumi.Input[bool]:
"""
Whether this channel is enabled for the bot
"""
return pulumi.get(self, "is_enabled")
@is_enabled.setter
def is_enabled(self, value: pulumi.Input[bool]):
pulumi.set(self, "is_enabled", value)
@property
@pulumi.getter(name="userName")
def user_name(self) -> pulumi.Input[str]:
"""
The Kik user name
"""
return pulumi.get(self, "user_name")
@user_name.setter
def user_name(self, value: pulumi.Input[str]):
pulumi.set(self, "user_name", value)
@property
@pulumi.getter(name="apiKey")
def api_key(self) -> Optional[pulumi.Input[str]]:
"""
Kik API key. Value only returned through POST to the action Channel List API, otherwise empty.
"""
return pulumi.get(self, "api_key")
@api_key.setter
def api_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_key", value)
@property
@pulumi.getter(name="isValidated")
def is_validated(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this channel is validated for the bot
"""
return pulumi.get(self, "is_validated")
@is_validated.setter
def is_validated(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_validated", value)
@pulumi.input_type
class KikChannelArgs:
def __init__(__self__, *,
channel_name: pulumi.Input[str],
etag: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input['KikChannelPropertiesArgs']] = None):
"""
Kik channel definition
:param pulumi.Input[str] channel_name: The channel name
Expected value is 'KikChannel'.
:param pulumi.Input[str] etag: Entity Tag of the resource
:param pulumi.Input[str] location: Specifies the location of the resource.
:param pulumi.Input['KikChannelPropertiesArgs'] properties: The set of properties specific to Kik channel resource
"""
pulumi.set(__self__, "channel_name", 'KikChannel')
if etag is not None:
pulumi.set(__self__, "etag", etag)
if location is None:
location = 'global'
if location is not None:
pulumi.set(__self__, "location", location)
if properties is not None:
pulumi.set(__self__, "properties", properties)
@property
@pulumi.getter(name="channelName")
def channel_name(self) -> pulumi.Input[str]:
"""
The channel name
Expected value is 'KikChannel'.
"""
return pulumi.get(self, "channel_name")
@channel_name.setter
def channel_name(self, value: pulumi.Input[str]):
pulumi.set(self, "channel_name", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
Entity Tag of the resource
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input['KikChannelPropertiesArgs']]:
"""
The set of properties specific to Kik channel resource
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input['KikChannelPropertiesArgs']]):
pulumi.set(self, "properties", value)
@pulumi.input_type
class LineChannelPropertiesArgs:
def __init__(__self__, *,
line_registrations: pulumi.Input[Sequence[pulumi.Input['LineRegistrationArgs']]]):
"""
The parameters to provide for the Line channel.
:param pulumi.Input[Sequence[pulumi.Input['LineRegistrationArgs']]] line_registrations: The list of line channel registrations
"""
pulumi.set(__self__, "line_registrations", line_registrations)
@property
@pulumi.getter(name="lineRegistrations")
def line_registrations(self) -> pulumi.Input[Sequence[pulumi.Input['LineRegistrationArgs']]]:
"""
The list of line channel registrations
"""
return pulumi.get(self, "line_registrations")
@line_registrations.setter
def line_registrations(self, value: pulumi.Input[Sequence[pulumi.Input['LineRegistrationArgs']]]):
pulumi.set(self, "line_registrations", value)
@pulumi.input_type
class LineChannelArgs:
def __init__(__self__, *,
channel_name: pulumi.Input[str],
etag: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input['LineChannelPropertiesArgs']] = None):
"""
Line channel definition
:param pulumi.Input[str] channel_name: The channel name
Expected value is 'LineChannel'.
:param pulumi.Input[str] etag: Entity Tag of the resource
:param pulumi.Input[str] location: Specifies the location of the resource.
:param pulumi.Input['LineChannelPropertiesArgs'] properties: The set of properties specific to line channel resource
"""
pulumi.set(__self__, "channel_name", 'LineChannel')
if etag is not None:
pulumi.set(__self__, "etag", etag)
if location is None:
location = 'global'
if location is not None:
pulumi.set(__self__, "location", location)
if properties is not None:
pulumi.set(__self__, "properties", properties)
@property
@pulumi.getter(name="channelName")
def channel_name(self) -> pulumi.Input[str]:
"""
The channel name
Expected value is 'LineChannel'.
"""
return pulumi.get(self, "channel_name")
@channel_name.setter
def channel_name(self, value: pulumi.Input[str]):
pulumi.set(self, "channel_name", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
Entity Tag of the resource
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input['LineChannelPropertiesArgs']]:
"""
The set of properties specific to line channel resource
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input['LineChannelPropertiesArgs']]):
pulumi.set(self, "properties", value)
@pulumi.input_type
class LineRegistrationArgs:
def __init__(__self__, *,
channel_access_token: Optional[pulumi.Input[str]] = None,
channel_secret: Optional[pulumi.Input[str]] = None):
"""
The properties corresponding to a line channel registration
:param pulumi.Input[str] channel_access_token: Access token for the line channel registration
:param pulumi.Input[str] channel_secret: Secret for the line channel registration
"""
if channel_access_token is not None:
pulumi.set(__self__, "channel_access_token", channel_access_token)
if channel_secret is not None:
pulumi.set(__self__, "channel_secret", channel_secret)
@property
@pulumi.getter(name="channelAccessToken")
def channel_access_token(self) -> Optional[pulumi.Input[str]]:
"""
Access token for the line channel registration
"""
return pulumi.get(self, "channel_access_token")
@channel_access_token.setter
def channel_access_token(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "channel_access_token", value)
@property
@pulumi.getter(name="channelSecret")
def channel_secret(self) -> Optional[pulumi.Input[str]]:
"""
Secret for the line channel registration
"""
return pulumi.get(self, "channel_secret")
@channel_secret.setter
def channel_secret(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "channel_secret", value)
@pulumi.input_type
class M365ExtensionsArgs:
def __init__(__self__, *,
channel_name: pulumi.Input[str],
etag: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None):
"""
M365 Extensions definition
:param pulumi.Input[str] channel_name: The channel name
Expected value is 'M365Extensions'.
:param pulumi.Input[str] etag: Entity Tag of the resource
:param pulumi.Input[str] location: Specifies the location of the resource.
"""
pulumi.set(__self__, "channel_name", 'M365Extensions')
if etag is not None:
pulumi.set(__self__, "etag", etag)
if location is None:
location = 'global'
if location is not None:
pulumi.set(__self__, "location", location)
@property
@pulumi.getter(name="channelName")
def channel_name(self) -> pulumi.Input[str]:
"""
The channel name
Expected value is 'M365Extensions'.
"""
return pulumi.get(self, "channel_name")
@channel_name.setter
def channel_name(self, value: pulumi.Input[str]):
pulumi.set(self, "channel_name", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
Entity Tag of the resource
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@pulumi.input_type
class MsTeamsChannelPropertiesArgs:
def __init__(__self__, *,
is_enabled: pulumi.Input[bool],
accepted_terms: Optional[pulumi.Input[bool]] = None,
calling_webhook: Optional[pulumi.Input[str]] = None,
deployment_environment: Optional[pulumi.Input[str]] = None,
enable_calling: Optional[pulumi.Input[bool]] = None,
incoming_call_route: Optional[pulumi.Input[str]] = None):
"""
The parameters to provide for the Microsoft Teams channel.
:param pulumi.Input[bool] is_enabled: Whether this channel is enabled for the bot
:param pulumi.Input[bool] accepted_terms: Whether this channel accepted terms
:param pulumi.Input[str] calling_webhook: Webhook for Microsoft Teams channel calls
:param pulumi.Input[str] deployment_environment: Deployment environment for Microsoft Teams channel calls
:param pulumi.Input[bool] enable_calling: Enable calling for Microsoft Teams channel
:param pulumi.Input[str] incoming_call_route: Webhook for Microsoft Teams channel calls
"""
pulumi.set(__self__, "is_enabled", is_enabled)
if accepted_terms is not None:
pulumi.set(__self__, "accepted_terms", accepted_terms)
if calling_webhook is not None:
pulumi.set(__self__, "calling_webhook", calling_webhook)
if deployment_environment is None:
deployment_environment = 'FallbackDeploymentEnvironment'
if deployment_environment is not None:
pulumi.set(__self__, "deployment_environment", deployment_environment)
if enable_calling is None:
enable_calling = False
if enable_calling is not None:
pulumi.set(__self__, "enable_calling", enable_calling)
if incoming_call_route is not None:
pulumi.set(__self__, "incoming_call_route", incoming_call_route)
@property
@pulumi.getter(name="isEnabled")
def is_enabled(self) -> pulumi.Input[bool]:
"""
Whether this channel is enabled for the bot
"""
return pulumi.get(self, "is_enabled")
@is_enabled.setter
def is_enabled(self, value: pulumi.Input[bool]):
pulumi.set(self, "is_enabled", value)
@property
@pulumi.getter(name="acceptedTerms")
def accepted_terms(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this channel accepted terms
"""
return pulumi.get(self, "accepted_terms")
@accepted_terms.setter
def accepted_terms(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "accepted_terms", value)
@property
@pulumi.getter(name="callingWebhook")
def calling_webhook(self) -> Optional[pulumi.Input[str]]:
"""
Webhook for Microsoft Teams channel calls
"""
return pulumi.get(self, "calling_webhook")
@calling_webhook.setter
def calling_webhook(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "calling_webhook", value)
@property
@pulumi.getter(name="deploymentEnvironment")
def deployment_environment(self) -> Optional[pulumi.Input[str]]:
"""
Deployment environment for Microsoft Teams channel calls
"""
return pulumi.get(self, "deployment_environment")
@deployment_environment.setter
def deployment_environment(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deployment_environment", value)
@property
@pulumi.getter(name="enableCalling")
def enable_calling(self) -> Optional[pulumi.Input[bool]]:
"""
Enable calling for Microsoft Teams channel
"""
return pulumi.get(self, "enable_calling")
@enable_calling.setter
def enable_calling(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_calling", value)
@property
@pulumi.getter(name="incomingCallRoute")
def incoming_call_route(self) -> Optional[pulumi.Input[str]]:
"""
Webhook for Microsoft Teams channel calls
"""
return pulumi.get(self, "incoming_call_route")
@incoming_call_route.setter
def incoming_call_route(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "incoming_call_route", value)
@pulumi.input_type
class MsTeamsChannelArgs:
def __init__(__self__, *,
channel_name: pulumi.Input[str],
etag: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input['MsTeamsChannelPropertiesArgs']] = None):
"""
Microsoft Teams channel definition
:param pulumi.Input[str] channel_name: The channel name
Expected value is 'MsTeamsChannel'.
:param pulumi.Input[str] etag: Entity Tag of the resource
:param pulumi.Input[str] location: Specifies the location of the resource.
:param pulumi.Input['MsTeamsChannelPropertiesArgs'] properties: The set of properties specific to Microsoft Teams channel resource
"""
pulumi.set(__self__, "channel_name", 'MsTeamsChannel')
if etag is not None:
pulumi.set(__self__, "etag", etag)
if location is None:
location = 'global'
if location is not None:
pulumi.set(__self__, "location", location)
if properties is not None:
pulumi.set(__self__, "properties", properties)
@property
@pulumi.getter(name="channelName")
def channel_name(self) -> pulumi.Input[str]:
"""
The channel name
Expected value is 'MsTeamsChannel'.
"""
return pulumi.get(self, "channel_name")
@channel_name.setter
def channel_name(self, value: pulumi.Input[str]):
pulumi.set(self, "channel_name", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
Entity Tag of the resource
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input['MsTeamsChannelPropertiesArgs']]:
"""
The set of properties specific to Microsoft Teams channel resource
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input['MsTeamsChannelPropertiesArgs']]):
pulumi.set(self, "properties", value)
@pulumi.input_type
class OmnichannelArgs:
def __init__(__self__, *,
channel_name: pulumi.Input[str],
etag: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None):
"""
Omnichannel channel definition
:param pulumi.Input[str] channel_name: The channel name
Expected value is 'Omnichannel'.
:param pulumi.Input[str] etag: Entity Tag of the resource
:param pulumi.Input[str] location: Specifies the location of the resource.
"""
pulumi.set(__self__, "channel_name", 'Omnichannel')
if etag is not None:
pulumi.set(__self__, "etag", etag)
if location is None:
location = 'global'
if location is not None:
pulumi.set(__self__, "location", location)
@property
@pulumi.getter(name="channelName")
def channel_name(self) -> pulumi.Input[str]:
"""
The channel name
Expected value is 'Omnichannel'.
"""
return pulumi.get(self, "channel_name")
@channel_name.setter
def channel_name(self, value: pulumi.Input[str]):
pulumi.set(self, "channel_name", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
Entity Tag of the resource
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@pulumi.input_type
class OutlookChannelArgs:
def __init__(__self__, *,
channel_name: pulumi.Input[str],
etag: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None):
"""
Outlook channel definition
:param pulumi.Input[str] channel_name: The channel name
Expected value is 'OutlookChannel'.
:param pulumi.Input[str] etag: Entity Tag of the resource
:param pulumi.Input[str] location: Specifies the location of the resource.
"""
pulumi.set(__self__, "channel_name", 'OutlookChannel')
if etag is not None:
pulumi.set(__self__, "etag", etag)
if location is None:
location = 'global'
if location is not None:
pulumi.set(__self__, "location", location)
@property
@pulumi.getter(name="channelName")
def channel_name(self) -> pulumi.Input[str]:
"""
The channel name
Expected value is 'OutlookChannel'.
"""
return pulumi.get(self, "channel_name")
@channel_name.setter
def channel_name(self, value: pulumi.Input[str]):
pulumi.set(self, "channel_name", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
Entity Tag of the resource
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@pulumi.input_type
class PrivateLinkServiceConnectionStateArgs:
def __init__(__self__, *,
actions_required: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[Union[str, 'PrivateEndpointServiceConnectionStatus']]] = None):
"""
A collection of information about the state of the connection between service consumer and provider.
:param pulumi.Input[str] actions_required: A message indicating if changes on the service provider require any updates on the consumer.
:param pulumi.Input[str] description: The reason for approval/rejection of the connection.
:param pulumi.Input[Union[str, 'PrivateEndpointServiceConnectionStatus']] status: Indicates whether the connection has been Approved/Rejected/Removed by the owner of the service.
"""
if actions_required is not None:
pulumi.set(__self__, "actions_required", actions_required)
if description is not None:
pulumi.set(__self__, "description", description)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter(name="actionsRequired")
def actions_required(self) -> Optional[pulumi.Input[str]]:
"""
A message indicating if changes on the service provider require any updates on the consumer.
"""
return pulumi.get(self, "actions_required")
@actions_required.setter
def actions_required(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "actions_required", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The reason for approval/rejection of the connection.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[Union[str, 'PrivateEndpointServiceConnectionStatus']]]:
"""
Indicates whether the connection has been Approved/Rejected/Removed by the owner of the service.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[Union[str, 'PrivateEndpointServiceConnectionStatus']]]):
pulumi.set(self, "status", value)
@pulumi.input_type
class SearchAssistantArgs:
def __init__(__self__, *,
channel_name: pulumi.Input[str],
etag: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None):
"""
SearchAssistant definition
:param pulumi.Input[str] channel_name: The channel name
Expected value is 'SearchAssistant'.
:param pulumi.Input[str] etag: Entity Tag of the resource
:param pulumi.Input[str] location: Specifies the location of the resource.
"""
pulumi.set(__self__, "channel_name", 'SearchAssistant')
if etag is not None:
pulumi.set(__self__, "etag", etag)
if location is None:
location = 'global'
if location is not None:
pulumi.set(__self__, "location", location)
@property
@pulumi.getter(name="channelName")
def channel_name(self) -> pulumi.Input[str]:
"""
The channel name
Expected value is 'SearchAssistant'.
"""
return pulumi.get(self, "channel_name")
@channel_name.setter
def channel_name(self, value: pulumi.Input[str]):
pulumi.set(self, "channel_name", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
Entity Tag of the resource
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@pulumi.input_type
class SkuArgs:
def __init__(__self__, *,
name: pulumi.Input[Union[str, 'SkuName']]):
"""
The SKU of the cognitive services account.
:param pulumi.Input[Union[str, 'SkuName']] name: The sku name
"""
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> pulumi.Input[Union[str, 'SkuName']]:
"""
The sku name
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[Union[str, 'SkuName']]):
pulumi.set(self, "name", value)
@pulumi.input_type
class SkypeChannelPropertiesArgs:
def __init__(__self__, *,
is_enabled: pulumi.Input[bool],
calling_web_hook: Optional[pulumi.Input[str]] = None,
enable_calling: Optional[pulumi.Input[bool]] = None,
enable_groups: Optional[pulumi.Input[bool]] = None,
enable_media_cards: Optional[pulumi.Input[bool]] = None,
enable_messaging: Optional[pulumi.Input[bool]] = None,
enable_screen_sharing: Optional[pulumi.Input[bool]] = None,
enable_video: Optional[pulumi.Input[bool]] = None,
groups_mode: Optional[pulumi.Input[str]] = None,
incoming_call_route: Optional[pulumi.Input[str]] = None):
"""
The parameters to provide for the Microsoft Teams channel.
:param pulumi.Input[bool] is_enabled: Whether this channel is enabled for the bot
:param pulumi.Input[str] calling_web_hook: Calling web hook for Skype channel
:param pulumi.Input[bool] enable_calling: Enable calling for Skype channel
:param pulumi.Input[bool] enable_groups: Enable groups for Skype channel
:param pulumi.Input[bool] enable_media_cards: Enable media cards for Skype channel
:param pulumi.Input[bool] enable_messaging: Enable messaging for Skype channel
:param pulumi.Input[bool] enable_screen_sharing: Enable screen sharing for Skype channel
:param pulumi.Input[bool] enable_video: Enable video for Skype channel
:param pulumi.Input[str] groups_mode: Group mode for Skype channel
:param pulumi.Input[str] incoming_call_route: Incoming call route for Skype channel
"""
pulumi.set(__self__, "is_enabled", is_enabled)
if calling_web_hook is not None:
pulumi.set(__self__, "calling_web_hook", calling_web_hook)
if enable_calling is None:
enable_calling = False
if enable_calling is not None:
pulumi.set(__self__, "enable_calling", enable_calling)
if enable_groups is not None:
pulumi.set(__self__, "enable_groups", enable_groups)
if enable_media_cards is not None:
pulumi.set(__self__, "enable_media_cards", enable_media_cards)
if enable_messaging is not None:
pulumi.set(__self__, "enable_messaging", enable_messaging)
if enable_screen_sharing is not None:
pulumi.set(__self__, "enable_screen_sharing", enable_screen_sharing)
if enable_video is not None:
pulumi.set(__self__, "enable_video", enable_video)
if groups_mode is not None:
pulumi.set(__self__, "groups_mode", groups_mode)
if incoming_call_route is not None:
pulumi.set(__self__, "incoming_call_route", incoming_call_route)
@property
@pulumi.getter(name="isEnabled")
def is_enabled(self) -> pulumi.Input[bool]:
"""
Whether this channel is enabled for the bot
"""
return pulumi.get(self, "is_enabled")
@is_enabled.setter
def is_enabled(self, value: pulumi.Input[bool]):
pulumi.set(self, "is_enabled", value)
@property
@pulumi.getter(name="callingWebHook")
def calling_web_hook(self) -> Optional[pulumi.Input[str]]:
"""
Calling web hook for Skype channel
"""
return pulumi.get(self, "calling_web_hook")
@calling_web_hook.setter
def calling_web_hook(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "calling_web_hook", value)
@property
@pulumi.getter(name="enableCalling")
def enable_calling(self) -> Optional[pulumi.Input[bool]]:
"""
Enable calling for Skype channel
"""
return pulumi.get(self, "enable_calling")
@enable_calling.setter
def enable_calling(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_calling", value)
@property
@pulumi.getter(name="enableGroups")
def enable_groups(self) -> Optional[pulumi.Input[bool]]:
"""
Enable groups for Skype channel
"""
return pulumi.get(self, "enable_groups")
@enable_groups.setter
def enable_groups(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_groups", value)
@property
@pulumi.getter(name="enableMediaCards")
def enable_media_cards(self) -> Optional[pulumi.Input[bool]]:
"""
Enable media cards for Skype channel
"""
return pulumi.get(self, "enable_media_cards")
@enable_media_cards.setter
def enable_media_cards(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_media_cards", value)
@property
@pulumi.getter(name="enableMessaging")
def enable_messaging(self) -> Optional[pulumi.Input[bool]]:
"""
Enable messaging for Skype channel
"""
return pulumi.get(self, "enable_messaging")
@enable_messaging.setter
def enable_messaging(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_messaging", value)
@property
@pulumi.getter(name="enableScreenSharing")
def enable_screen_sharing(self) -> Optional[pulumi.Input[bool]]:
"""
Enable screen sharing for Skype channel
"""
return pulumi.get(self, "enable_screen_sharing")
@enable_screen_sharing.setter
def enable_screen_sharing(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_screen_sharing", value)
@property
@pulumi.getter(name="enableVideo")
def enable_video(self) -> Optional[pulumi.Input[bool]]:
"""
Enable video for Skype channel
"""
return pulumi.get(self, "enable_video")
@enable_video.setter
def enable_video(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_video", value)
@property
@pulumi.getter(name="groupsMode")
def groups_mode(self) -> Optional[pulumi.Input[str]]:
"""
Group mode for Skype channel
"""
return pulumi.get(self, "groups_mode")
@groups_mode.setter
def groups_mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "groups_mode", value)
@property
@pulumi.getter(name="incomingCallRoute")
def incoming_call_route(self) -> Optional[pulumi.Input[str]]:
"""
Incoming call route for Skype channel
"""
return pulumi.get(self, "incoming_call_route")
@incoming_call_route.setter
def incoming_call_route(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "incoming_call_route", value)
@pulumi.input_type
class SkypeChannelArgs:
def __init__(__self__, *,
channel_name: pulumi.Input[str],
etag: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input['SkypeChannelPropertiesArgs']] = None):
"""
Skype channel definition
:param pulumi.Input[str] channel_name: The channel name
Expected value is 'SkypeChannel'.
:param pulumi.Input[str] etag: Entity Tag of the resource
:param pulumi.Input[str] location: Specifies the location of the resource.
:param pulumi.Input['SkypeChannelPropertiesArgs'] properties: The set of properties specific to Skype channel resource
"""
pulumi.set(__self__, "channel_name", 'SkypeChannel')
if etag is not None:
pulumi.set(__self__, "etag", etag)
if location is None:
location = 'global'
if location is not None:
pulumi.set(__self__, "location", location)
if properties is not None:
pulumi.set(__self__, "properties", properties)
@property
@pulumi.getter(name="channelName")
def channel_name(self) -> pulumi.Input[str]:
"""
The channel name
Expected value is 'SkypeChannel'.
"""
return pulumi.get(self, "channel_name")
@channel_name.setter
def channel_name(self, value: pulumi.Input[str]):
pulumi.set(self, "channel_name", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
Entity Tag of the resource
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input['SkypeChannelPropertiesArgs']]:
"""
The set of properties specific to Skype channel resource
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input['SkypeChannelPropertiesArgs']]):
pulumi.set(self, "properties", value)
@pulumi.input_type
class SlackChannelPropertiesArgs:
def __init__(__self__, *,
is_enabled: pulumi.Input[bool],
client_id: Optional[pulumi.Input[str]] = None,
client_secret: Optional[pulumi.Input[str]] = None,
landing_page_url: Optional[pulumi.Input[str]] = None,
register_before_o_auth_flow: Optional[pulumi.Input[bool]] = None,
scopes: Optional[pulumi.Input[str]] = None,
signing_secret: Optional[pulumi.Input[str]] = None,
verification_token: Optional[pulumi.Input[str]] = None):
"""
The parameters to provide for the Slack channel.
:param pulumi.Input[bool] is_enabled: Whether this channel is enabled for the bot
:param pulumi.Input[str] client_id: The Slack client id
:param pulumi.Input[str] client_secret: The Slack client secret. Value only returned through POST to the action Channel List API, otherwise empty.
:param pulumi.Input[str] landing_page_url: The Slack landing page Url
:param pulumi.Input[bool] register_before_o_auth_flow: Whether to register the settings before OAuth validation is performed. Recommended to True.
:param pulumi.Input[str] scopes: The Slack permission scopes.
:param pulumi.Input[str] signing_secret: The Slack signing secret.
:param pulumi.Input[str] verification_token: The Slack verification token. Value only returned through POST to the action Channel List API, otherwise empty.
"""
pulumi.set(__self__, "is_enabled", is_enabled)
if client_id is not None:
pulumi.set(__self__, "client_id", client_id)
if client_secret is not None:
pulumi.set(__self__, "client_secret", client_secret)
if landing_page_url is not None:
pulumi.set(__self__, "landing_page_url", landing_page_url)
if register_before_o_auth_flow is not None:
pulumi.set(__self__, "register_before_o_auth_flow", register_before_o_auth_flow)
if scopes is not None:
pulumi.set(__self__, "scopes", scopes)
if signing_secret is not None:
pulumi.set(__self__, "signing_secret", signing_secret)
if verification_token is not None:
pulumi.set(__self__, "verification_token", verification_token)
@property
@pulumi.getter(name="isEnabled")
def is_enabled(self) -> pulumi.Input[bool]:
"""
Whether this channel is enabled for the bot
"""
return pulumi.get(self, "is_enabled")
@is_enabled.setter
def is_enabled(self, value: pulumi.Input[bool]):
pulumi.set(self, "is_enabled", value)
@property
@pulumi.getter(name="clientId")
def client_id(self) -> Optional[pulumi.Input[str]]:
"""
The Slack client id
"""
return pulumi.get(self, "client_id")
@client_id.setter
def client_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_id", value)
@property
@pulumi.getter(name="clientSecret")
def client_secret(self) -> Optional[pulumi.Input[str]]:
"""
The Slack client secret. Value only returned through POST to the action Channel List API, otherwise empty.
"""
return pulumi.get(self, "client_secret")
@client_secret.setter
def client_secret(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_secret", value)
@property
@pulumi.getter(name="landingPageUrl")
def landing_page_url(self) -> Optional[pulumi.Input[str]]:
"""
The Slack landing page Url
"""
return pulumi.get(self, "landing_page_url")
@landing_page_url.setter
def landing_page_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "landing_page_url", value)
@property
@pulumi.getter(name="registerBeforeOAuthFlow")
def register_before_o_auth_flow(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to register the settings before OAuth validation is performed. Recommended to True.
"""
return pulumi.get(self, "register_before_o_auth_flow")
@register_before_o_auth_flow.setter
def register_before_o_auth_flow(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "register_before_o_auth_flow", value)
@property
@pulumi.getter
def scopes(self) -> Optional[pulumi.Input[str]]:
"""
The Slack permission scopes.
"""
return pulumi.get(self, "scopes")
@scopes.setter
def scopes(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scopes", value)
@property
@pulumi.getter(name="signingSecret")
def signing_secret(self) -> Optional[pulumi.Input[str]]:
"""
The Slack signing secret.
"""
return pulumi.get(self, "signing_secret")
@signing_secret.setter
def signing_secret(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "signing_secret", value)
@property
@pulumi.getter(name="verificationToken")
def verification_token(self) -> Optional[pulumi.Input[str]]:
"""
The Slack verification token. Value only returned through POST to the action Channel List API, otherwise empty.
"""
return pulumi.get(self, "verification_token")
@verification_token.setter
def verification_token(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "verification_token", value)
@pulumi.input_type
class SlackChannelArgs:
def __init__(__self__, *,
channel_name: pulumi.Input[str],
etag: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input['SlackChannelPropertiesArgs']] = None):
"""
Slack channel definition
:param pulumi.Input[str] channel_name: The channel name
Expected value is 'SlackChannel'.
:param pulumi.Input[str] etag: Entity Tag of the resource
:param pulumi.Input[str] location: Specifies the location of the resource.
:param pulumi.Input['SlackChannelPropertiesArgs'] properties: The set of properties specific to Slack channel resource
"""
pulumi.set(__self__, "channel_name", 'SlackChannel')
if etag is not None:
pulumi.set(__self__, "etag", etag)
if location is None:
location = 'global'
if location is not None:
pulumi.set(__self__, "location", location)
if properties is not None:
pulumi.set(__self__, "properties", properties)
@property
@pulumi.getter(name="channelName")
def channel_name(self) -> pulumi.Input[str]:
"""
The channel name
Expected value is 'SlackChannel'.
"""
return pulumi.get(self, "channel_name")
@channel_name.setter
def channel_name(self, value: pulumi.Input[str]):
pulumi.set(self, "channel_name", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
Entity Tag of the resource
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input['SlackChannelPropertiesArgs']]:
"""
The set of properties specific to Slack channel resource
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input['SlackChannelPropertiesArgs']]):
pulumi.set(self, "properties", value)
@pulumi.input_type
class SmsChannelPropertiesArgs:
def __init__(__self__, *,
account_sid: pulumi.Input[str],
is_enabled: pulumi.Input[bool],
phone: pulumi.Input[str],
auth_token: Optional[pulumi.Input[str]] = None,
is_validated: Optional[pulumi.Input[bool]] = None):
"""
The parameters to provide for the Sms channel.
:param pulumi.Input[str] account_sid: The Sms account SID. Value only returned through POST to the action Channel List API, otherwise empty.
:param pulumi.Input[bool] is_enabled: Whether this channel is enabled for the bot
:param pulumi.Input[str] phone: The Sms phone
:param pulumi.Input[str] auth_token: The Sms auth token. Value only returned through POST to the action Channel List API, otherwise empty.
:param pulumi.Input[bool] is_validated: Whether this channel is validated for the bot
"""
pulumi.set(__self__, "account_sid", account_sid)
pulumi.set(__self__, "is_enabled", is_enabled)
pulumi.set(__self__, "phone", phone)
if auth_token is not None:
pulumi.set(__self__, "auth_token", auth_token)
if is_validated is not None:
pulumi.set(__self__, "is_validated", is_validated)
@property
@pulumi.getter(name="accountSID")
def account_sid(self) -> pulumi.Input[str]:
"""
The Sms account SID. Value only returned through POST to the action Channel List API, otherwise empty.
"""
return pulumi.get(self, "account_sid")
@account_sid.setter
def account_sid(self, value: pulumi.Input[str]):
pulumi.set(self, "account_sid", value)
@property
@pulumi.getter(name="isEnabled")
def is_enabled(self) -> pulumi.Input[bool]:
"""
Whether this channel is enabled for the bot
"""
return pulumi.get(self, "is_enabled")
@is_enabled.setter
def is_enabled(self, value: pulumi.Input[bool]):
pulumi.set(self, "is_enabled", value)
@property
@pulumi.getter
def phone(self) -> pulumi.Input[str]:
"""
The Sms phone
"""
return pulumi.get(self, "phone")
@phone.setter
def phone(self, value: pulumi.Input[str]):
pulumi.set(self, "phone", value)
@property
@pulumi.getter(name="authToken")
def auth_token(self) -> Optional[pulumi.Input[str]]:
"""
The Sms auth token. Value only returned through POST to the action Channel List API, otherwise empty.
"""
return pulumi.get(self, "auth_token")
@auth_token.setter
def auth_token(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "auth_token", value)
@property
@pulumi.getter(name="isValidated")
def is_validated(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this channel is validated for the bot
"""
return pulumi.get(self, "is_validated")
@is_validated.setter
def is_validated(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_validated", value)
@pulumi.input_type
class SmsChannelArgs:
def __init__(__self__, *,
channel_name: pulumi.Input[str],
etag: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input['SmsChannelPropertiesArgs']] = None):
"""
Sms channel definition
:param pulumi.Input[str] channel_name: The channel name
Expected value is 'SmsChannel'.
:param pulumi.Input[str] etag: Entity Tag of the resource
:param pulumi.Input[str] location: Specifies the location of the resource.
:param pulumi.Input['SmsChannelPropertiesArgs'] properties: The set of properties specific to Sms channel resource
"""
pulumi.set(__self__, "channel_name", 'SmsChannel')
if etag is not None:
pulumi.set(__self__, "etag", etag)
if location is None:
location = 'global'
if location is not None:
pulumi.set(__self__, "location", location)
if properties is not None:
pulumi.set(__self__, "properties", properties)
@property
@pulumi.getter(name="channelName")
def channel_name(self) -> pulumi.Input[str]:
"""
The channel name
Expected value is 'SmsChannel'.
"""
return pulumi.get(self, "channel_name")
@channel_name.setter
def channel_name(self, value: pulumi.Input[str]):
pulumi.set(self, "channel_name", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
Entity Tag of the resource
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input['SmsChannelPropertiesArgs']]:
"""
The set of properties specific to Sms channel resource
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input['SmsChannelPropertiesArgs']]):
pulumi.set(self, "properties", value)
@pulumi.input_type
class TelegramChannelPropertiesArgs:
def __init__(__self__, *,
is_enabled: pulumi.Input[bool],
access_token: Optional[pulumi.Input[str]] = None,
is_validated: Optional[pulumi.Input[bool]] = None):
"""
The parameters to provide for the Telegram channel.
:param pulumi.Input[bool] is_enabled: Whether this channel is enabled for the bot
:param pulumi.Input[str] access_token: The Telegram access token. Value only returned through POST to the action Channel List API, otherwise empty.
:param pulumi.Input[bool] is_validated: Whether this channel is validated for the bot
"""
pulumi.set(__self__, "is_enabled", is_enabled)
if access_token is not None:
pulumi.set(__self__, "access_token", access_token)
if is_validated is not None:
pulumi.set(__self__, "is_validated", is_validated)
@property
@pulumi.getter(name="isEnabled")
def is_enabled(self) -> pulumi.Input[bool]:
"""
Whether this channel is enabled for the bot
"""
return pulumi.get(self, "is_enabled")
@is_enabled.setter
def is_enabled(self, value: pulumi.Input[bool]):
pulumi.set(self, "is_enabled", value)
@property
@pulumi.getter(name="accessToken")
def access_token(self) -> Optional[pulumi.Input[str]]:
"""
The Telegram access token. Value only returned through POST to the action Channel List API, otherwise empty.
"""
return pulumi.get(self, "access_token")
@access_token.setter
def access_token(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "access_token", value)
@property
@pulumi.getter(name="isValidated")
def is_validated(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this channel is validated for the bot
"""
return pulumi.get(self, "is_validated")
@is_validated.setter
def is_validated(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_validated", value)
@pulumi.input_type
class TelegramChannelArgs:
def __init__(__self__, *,
channel_name: pulumi.Input[str],
etag: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input['TelegramChannelPropertiesArgs']] = None):
"""
Telegram channel definition
:param pulumi.Input[str] channel_name: The channel name
Expected value is 'TelegramChannel'.
:param pulumi.Input[str] etag: Entity Tag of the resource
:param pulumi.Input[str] location: Specifies the location of the resource.
:param pulumi.Input['TelegramChannelPropertiesArgs'] properties: The set of properties specific to Telegram channel resource
"""
pulumi.set(__self__, "channel_name", 'TelegramChannel')
if etag is not None:
pulumi.set(__self__, "etag", etag)
if location is None:
location = 'global'
if location is not None:
pulumi.set(__self__, "location", location)
if properties is not None:
pulumi.set(__self__, "properties", properties)
@property
@pulumi.getter(name="channelName")
def channel_name(self) -> pulumi.Input[str]:
"""
The channel name
Expected value is 'TelegramChannel'.
"""
return pulumi.get(self, "channel_name")
@channel_name.setter
def channel_name(self, value: pulumi.Input[str]):
pulumi.set(self, "channel_name", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
Entity Tag of the resource
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input['TelegramChannelPropertiesArgs']]:
"""
The set of properties specific to Telegram channel resource
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input['TelegramChannelPropertiesArgs']]):
pulumi.set(self, "properties", value)
@pulumi.input_type
class TelephonyChannelPropertiesArgs:
def __init__(__self__, *,
api_configurations: Optional[pulumi.Input[Sequence[pulumi.Input['TelephonyChannelResourceApiConfigurationArgs']]]] = None,
cognitive_service_region: Optional[pulumi.Input[str]] = None,
cognitive_service_subscription_key: Optional[pulumi.Input[str]] = None,
default_locale: Optional[pulumi.Input[str]] = None,
is_enabled: Optional[pulumi.Input[bool]] = None,
phone_numbers: Optional[pulumi.Input[Sequence[pulumi.Input['TelephonyPhoneNumbersArgs']]]] = None,
premium_sku: Optional[pulumi.Input[str]] = None):
"""
The parameters to provide for the Direct Line channel.
:param pulumi.Input[Sequence[pulumi.Input['TelephonyChannelResourceApiConfigurationArgs']]] api_configurations: The list of Telephony api configuration
:param pulumi.Input[str] cognitive_service_region: The extensionKey2
:param pulumi.Input[str] cognitive_service_subscription_key: The extensionKey1
:param pulumi.Input[str] default_locale: The default locale of the channel
:param pulumi.Input[bool] is_enabled: Whether the channel is enabled
:param pulumi.Input[Sequence[pulumi.Input['TelephonyPhoneNumbersArgs']]] phone_numbers: The list of Telephony phone numbers
:param pulumi.Input[str] premium_sku: The premium SKU applied to the channel
"""
if api_configurations is not None:
pulumi.set(__self__, "api_configurations", api_configurations)
if cognitive_service_region is not None:
pulumi.set(__self__, "cognitive_service_region", cognitive_service_region)
if cognitive_service_subscription_key is not None:
pulumi.set(__self__, "cognitive_service_subscription_key", cognitive_service_subscription_key)
if default_locale is not None:
pulumi.set(__self__, "default_locale", default_locale)
if is_enabled is not None:
pulumi.set(__self__, "is_enabled", is_enabled)
if phone_numbers is not None:
pulumi.set(__self__, "phone_numbers", phone_numbers)
if premium_sku is not None:
pulumi.set(__self__, "premium_sku", premium_sku)
@property
@pulumi.getter(name="apiConfigurations")
def api_configurations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['TelephonyChannelResourceApiConfigurationArgs']]]]:
"""
The list of Telephony api configuration
"""
return pulumi.get(self, "api_configurations")
@api_configurations.setter
def api_configurations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['TelephonyChannelResourceApiConfigurationArgs']]]]):
pulumi.set(self, "api_configurations", value)
@property
@pulumi.getter(name="cognitiveServiceRegion")
def cognitive_service_region(self) -> Optional[pulumi.Input[str]]:
"""
The extensionKey2
"""
return pulumi.get(self, "cognitive_service_region")
@cognitive_service_region.setter
def cognitive_service_region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cognitive_service_region", value)
@property
@pulumi.getter(name="cognitiveServiceSubscriptionKey")
def cognitive_service_subscription_key(self) -> Optional[pulumi.Input[str]]:
"""
The extensionKey1
"""
return pulumi.get(self, "cognitive_service_subscription_key")
@cognitive_service_subscription_key.setter
def cognitive_service_subscription_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cognitive_service_subscription_key", value)
@property
@pulumi.getter(name="defaultLocale")
def default_locale(self) -> Optional[pulumi.Input[str]]:
"""
The default locale of the channel
"""
return pulumi.get(self, "default_locale")
@default_locale.setter
def default_locale(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "default_locale", value)
@property
@pulumi.getter(name="isEnabled")
def is_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the channel is enabled
"""
return pulumi.get(self, "is_enabled")
@is_enabled.setter
def is_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_enabled", value)
@property
@pulumi.getter(name="phoneNumbers")
def phone_numbers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['TelephonyPhoneNumbersArgs']]]]:
"""
The list of Telephony phone numbers
"""
return pulumi.get(self, "phone_numbers")
@phone_numbers.setter
def phone_numbers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['TelephonyPhoneNumbersArgs']]]]):
pulumi.set(self, "phone_numbers", value)
@property
@pulumi.getter(name="premiumSKU")
def premium_sku(self) -> Optional[pulumi.Input[str]]:
"""
The premium SKU applied to the channel
"""
return pulumi.get(self, "premium_sku")
@premium_sku.setter
def premium_sku(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "premium_sku", value)
@pulumi.input_type
class TelephonyChannelResourceApiConfigurationArgs:
def __init__(__self__, *,
cognitive_service_region: Optional[pulumi.Input[str]] = None,
cognitive_service_resource_id: Optional[pulumi.Input[str]] = None,
cognitive_service_subscription_key: Optional[pulumi.Input[str]] = None,
default_locale: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
provider_name: Optional[pulumi.Input[str]] = None):
"""
A resource Api configuration for the Telephony channel
:param pulumi.Input[str] cognitive_service_region: The cognitive service region.
:param pulumi.Input[str] cognitive_service_resource_id: The cognitive service resourceId.
:param pulumi.Input[str] cognitive_service_subscription_key: The cognitive service subscription key.
:param pulumi.Input[str] default_locale: The default locale.
:param pulumi.Input[str] id: The id of config.
:param pulumi.Input[str] provider_name: The provider name.
"""
if cognitive_service_region is not None:
pulumi.set(__self__, "cognitive_service_region", cognitive_service_region)
if cognitive_service_resource_id is not None:
pulumi.set(__self__, "cognitive_service_resource_id", cognitive_service_resource_id)
if cognitive_service_subscription_key is not None:
pulumi.set(__self__, "cognitive_service_subscription_key", cognitive_service_subscription_key)
if default_locale is not None:
pulumi.set(__self__, "default_locale", default_locale)
if id is not None:
pulumi.set(__self__, "id", id)
if provider_name is not None:
pulumi.set(__self__, "provider_name", provider_name)
@property
@pulumi.getter(name="cognitiveServiceRegion")
def cognitive_service_region(self) -> Optional[pulumi.Input[str]]:
"""
The cognitive service region.
"""
return pulumi.get(self, "cognitive_service_region")
@cognitive_service_region.setter
def cognitive_service_region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cognitive_service_region", value)
@property
@pulumi.getter(name="cognitiveServiceResourceId")
def cognitive_service_resource_id(self) -> Optional[pulumi.Input[str]]:
"""
The cognitive service resourceId.
"""
return pulumi.get(self, "cognitive_service_resource_id")
@cognitive_service_resource_id.setter
def cognitive_service_resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cognitive_service_resource_id", value)
@property
@pulumi.getter(name="cognitiveServiceSubscriptionKey")
def cognitive_service_subscription_key(self) -> Optional[pulumi.Input[str]]:
"""
The cognitive service subscription key.
"""
return pulumi.get(self, "cognitive_service_subscription_key")
@cognitive_service_subscription_key.setter
def cognitive_service_subscription_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cognitive_service_subscription_key", value)
@property
@pulumi.getter(name="defaultLocale")
def default_locale(self) -> Optional[pulumi.Input[str]]:
"""
The default locale.
"""
return pulumi.get(self, "default_locale")
@default_locale.setter
def default_locale(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "default_locale", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
The id of config.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="providerName")
def provider_name(self) -> Optional[pulumi.Input[str]]:
"""
The provider name.
"""
return pulumi.get(self, "provider_name")
@provider_name.setter
def provider_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "provider_name", value)
@pulumi.input_type
class TelephonyChannelArgs:
def __init__(__self__, *,
channel_name: pulumi.Input[str],
etag: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input['TelephonyChannelPropertiesArgs']] = None):
"""
Telephony channel definition
:param pulumi.Input[str] channel_name: The channel name
Expected value is 'TelephonyChannel'.
:param pulumi.Input[str] etag: Entity Tag of the resource
:param pulumi.Input[str] location: Specifies the location of the resource.
:param pulumi.Input['TelephonyChannelPropertiesArgs'] properties: The set of properties specific to Telephony channel resource
"""
pulumi.set(__self__, "channel_name", 'TelephonyChannel')
if etag is not None:
pulumi.set(__self__, "etag", etag)
if location is None:
location = 'global'
if location is not None:
pulumi.set(__self__, "location", location)
if properties is not None:
pulumi.set(__self__, "properties", properties)
@property
@pulumi.getter(name="channelName")
def channel_name(self) -> pulumi.Input[str]:
"""
The channel name
Expected value is 'TelephonyChannel'.
"""
return pulumi.get(self, "channel_name")
@channel_name.setter
def channel_name(self, value: pulumi.Input[str]):
pulumi.set(self, "channel_name", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
Entity Tag of the resource
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input['TelephonyChannelPropertiesArgs']]:
"""
The set of properties specific to Telephony channel resource
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input['TelephonyChannelPropertiesArgs']]):
pulumi.set(self, "properties", value)
@pulumi.input_type
class TelephonyPhoneNumbersArgs:
def __init__(__self__, *,
acs_endpoint: Optional[pulumi.Input[str]] = None,
acs_resource_id: Optional[pulumi.Input[str]] = None,
acs_secret: Optional[pulumi.Input[str]] = None,
cognitive_service_region: Optional[pulumi.Input[str]] = None,
cognitive_service_resource_id: Optional[pulumi.Input[str]] = None,
cognitive_service_subscription_key: Optional[pulumi.Input[str]] = None,
default_locale: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
offer_type: Optional[pulumi.Input[str]] = None,
phone_number: Optional[pulumi.Input[str]] = None):
"""
A telephone number for the Telephony channel
:param pulumi.Input[str] acs_endpoint: The endpoint of ACS.
:param pulumi.Input[str] acs_resource_id: The resource id of ACS.
:param pulumi.Input[str] acs_secret: The secret of ACS.
:param pulumi.Input[str] cognitive_service_region: The service region of cognitive service.
:param pulumi.Input[str] cognitive_service_resource_id: The resource id of cognitive service.
:param pulumi.Input[str] cognitive_service_subscription_key: The subscription key of cognitive service.
:param pulumi.Input[str] default_locale: The default locale of the phone number.
:param pulumi.Input[str] id: The element id.
:param pulumi.Input[str] offer_type: Optional Property that will determine the offering type of the phone.
:param pulumi.Input[str] phone_number: The phone number.
"""
if acs_endpoint is not None:
pulumi.set(__self__, "acs_endpoint", acs_endpoint)
if acs_resource_id is not None:
pulumi.set(__self__, "acs_resource_id", acs_resource_id)
if acs_secret is not None:
pulumi.set(__self__, "acs_secret", acs_secret)
if cognitive_service_region is not None:
pulumi.set(__self__, "cognitive_service_region", cognitive_service_region)
if cognitive_service_resource_id is not None:
pulumi.set(__self__, "cognitive_service_resource_id", cognitive_service_resource_id)
if cognitive_service_subscription_key is not None:
pulumi.set(__self__, "cognitive_service_subscription_key", cognitive_service_subscription_key)
if default_locale is not None:
pulumi.set(__self__, "default_locale", default_locale)
if id is not None:
pulumi.set(__self__, "id", id)
if offer_type is not None:
pulumi.set(__self__, "offer_type", offer_type)
if phone_number is not None:
pulumi.set(__self__, "phone_number", phone_number)
@property
@pulumi.getter(name="acsEndpoint")
def acs_endpoint(self) -> Optional[pulumi.Input[str]]:
"""
The endpoint of ACS.
"""
return pulumi.get(self, "acs_endpoint")
@acs_endpoint.setter
def acs_endpoint(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "acs_endpoint", value)
@property
@pulumi.getter(name="acsResourceId")
def acs_resource_id(self) -> Optional[pulumi.Input[str]]:
"""
The resource id of ACS.
"""
return pulumi.get(self, "acs_resource_id")
@acs_resource_id.setter
def acs_resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "acs_resource_id", value)
@property
@pulumi.getter(name="acsSecret")
def acs_secret(self) -> Optional[pulumi.Input[str]]:
"""
The secret of ACS.
"""
return pulumi.get(self, "acs_secret")
@acs_secret.setter
def acs_secret(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "acs_secret", value)
@property
@pulumi.getter(name="cognitiveServiceRegion")
def cognitive_service_region(self) -> Optional[pulumi.Input[str]]:
"""
The service region of cognitive service.
"""
return pulumi.get(self, "cognitive_service_region")
@cognitive_service_region.setter
def cognitive_service_region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cognitive_service_region", value)
@property
@pulumi.getter(name="cognitiveServiceResourceId")
def cognitive_service_resource_id(self) -> Optional[pulumi.Input[str]]:
"""
The resource id of cognitive service.
"""
return pulumi.get(self, "cognitive_service_resource_id")
@cognitive_service_resource_id.setter
def cognitive_service_resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cognitive_service_resource_id", value)
@property
@pulumi.getter(name="cognitiveServiceSubscriptionKey")
def cognitive_service_subscription_key(self) -> Optional[pulumi.Input[str]]:
"""
The subscription key of cognitive service.
"""
return pulumi.get(self, "cognitive_service_subscription_key")
@cognitive_service_subscription_key.setter
def cognitive_service_subscription_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cognitive_service_subscription_key", value)
@property
@pulumi.getter(name="defaultLocale")
def default_locale(self) -> Optional[pulumi.Input[str]]:
"""
The default locale of the phone number.
"""
return pulumi.get(self, "default_locale")
@default_locale.setter
def default_locale(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "default_locale", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
The element id.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="offerType")
def offer_type(self) -> Optional[pulumi.Input[str]]:
"""
Optional Property that will determine the offering type of the phone.
"""
return pulumi.get(self, "offer_type")
@offer_type.setter
def offer_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "offer_type", value)
@property
@pulumi.getter(name="phoneNumber")
def phone_number(self) -> Optional[pulumi.Input[str]]:
"""
The phone number.
"""
return pulumi.get(self, "phone_number")
@phone_number.setter
def phone_number(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "phone_number", value)
@pulumi.input_type
class WebChatChannelPropertiesArgs:
def __init__(__self__, *,
sites: Optional[pulumi.Input[Sequence[pulumi.Input['WebChatSiteArgs']]]] = None):
"""
The parameters to provide for the Web Chat channel.
:param pulumi.Input[Sequence[pulumi.Input['WebChatSiteArgs']]] sites: The list of Web Chat sites
"""
if sites is not None:
pulumi.set(__self__, "sites", sites)
@property
@pulumi.getter
def sites(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['WebChatSiteArgs']]]]:
"""
The list of Web Chat sites
"""
return pulumi.get(self, "sites")
@sites.setter
def sites(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['WebChatSiteArgs']]]]):
pulumi.set(self, "sites", value)
@pulumi.input_type
class WebChatChannelArgs:
def __init__(__self__, *,
channel_name: pulumi.Input[str],
etag: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input['WebChatChannelPropertiesArgs']] = None):
"""
Web Chat channel definition
:param pulumi.Input[str] channel_name: The channel name
Expected value is 'WebChatChannel'.
:param pulumi.Input[str] etag: Entity Tag of the resource
:param pulumi.Input[str] location: Specifies the location of the resource.
:param pulumi.Input['WebChatChannelPropertiesArgs'] properties: The set of properties specific to Web Chat channel resource
"""
pulumi.set(__self__, "channel_name", 'WebChatChannel')
if etag is not None:
pulumi.set(__self__, "etag", etag)
if location is None:
location = 'global'
if location is not None:
pulumi.set(__self__, "location", location)
if properties is not None:
pulumi.set(__self__, "properties", properties)
@property
@pulumi.getter(name="channelName")
def channel_name(self) -> pulumi.Input[str]:
"""
The channel name
Expected value is 'WebChatChannel'.
"""
return pulumi.get(self, "channel_name")
@channel_name.setter
def channel_name(self, value: pulumi.Input[str]):
pulumi.set(self, "channel_name", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
Entity Tag of the resource
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input['WebChatChannelPropertiesArgs']]:
"""
The set of properties specific to Web Chat channel resource
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input['WebChatChannelPropertiesArgs']]):
pulumi.set(self, "properties", value)
@pulumi.input_type
class WebChatSiteArgs:
def __init__(__self__, *,
is_enabled: pulumi.Input[bool],
is_webchat_preview_enabled: pulumi.Input[bool],
site_name: pulumi.Input[str],
app_id: Optional[pulumi.Input[str]] = None,
e_tag: Optional[pulumi.Input[str]] = None,
is_block_user_upload_enabled: Optional[pulumi.Input[bool]] = None,
is_detailed_logging_enabled: Optional[pulumi.Input[bool]] = None,
is_endpoint_parameters_enabled: Optional[pulumi.Input[bool]] = None,
is_no_storage_enabled: Optional[pulumi.Input[bool]] = None,
is_secure_site_enabled: Optional[pulumi.Input[bool]] = None,
is_v1_enabled: Optional[pulumi.Input[bool]] = None,
is_v3_enabled: Optional[pulumi.Input[bool]] = None,
is_web_chat_speech_enabled: Optional[pulumi.Input[bool]] = None,
tenant_id: Optional[pulumi.Input[str]] = None,
trusted_origins: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
A site for the Webchat channel
:param pulumi.Input[bool] is_enabled: Whether this site is enabled for DirectLine channel
:param pulumi.Input[bool] is_webchat_preview_enabled: Whether this site is enabled for preview versions of Webchat
:param pulumi.Input[str] site_name: Site name
:param pulumi.Input[str] app_id: DirectLine application id
:param pulumi.Input[str] e_tag: Entity Tag
:param pulumi.Input[bool] is_block_user_upload_enabled: Whether this site is enabled for block user upload.
:param pulumi.Input[bool] is_detailed_logging_enabled: Whether this site is disabled detailed logging for
:param pulumi.Input[bool] is_endpoint_parameters_enabled: Whether this site is EndpointParameters enabled for channel
:param pulumi.Input[bool] is_no_storage_enabled: Whether this no-storage site is disabled detailed logging for
:param pulumi.Input[bool] is_secure_site_enabled: Whether this site is enabled for authentication with Bot Framework.
:param pulumi.Input[bool] is_v1_enabled: Whether this site is enabled for Bot Framework V1 protocol.
:param pulumi.Input[bool] is_v3_enabled: Whether this site is enabled for Bot Framework V3 protocol.
:param pulumi.Input[bool] is_web_chat_speech_enabled: Whether this site is enabled for Webchat Speech
:param pulumi.Input[str] tenant_id: Tenant Id
:param pulumi.Input[Sequence[pulumi.Input[str]]] trusted_origins: List of Trusted Origin URLs for this site. This field is applicable only if isSecureSiteEnabled is True.
"""
pulumi.set(__self__, "is_enabled", is_enabled)
if is_webchat_preview_enabled is None:
is_webchat_preview_enabled = False
pulumi.set(__self__, "is_webchat_preview_enabled", is_webchat_preview_enabled)
pulumi.set(__self__, "site_name", site_name)
if app_id is not None:
pulumi.set(__self__, "app_id", app_id)
if e_tag is not None:
pulumi.set(__self__, "e_tag", e_tag)
if is_block_user_upload_enabled is not None:
pulumi.set(__self__, "is_block_user_upload_enabled", is_block_user_upload_enabled)
if is_detailed_logging_enabled is not None:
pulumi.set(__self__, "is_detailed_logging_enabled", is_detailed_logging_enabled)
if is_endpoint_parameters_enabled is not None:
pulumi.set(__self__, "is_endpoint_parameters_enabled", is_endpoint_parameters_enabled)
if is_no_storage_enabled is not None:
pulumi.set(__self__, "is_no_storage_enabled", is_no_storage_enabled)
if is_secure_site_enabled is not None:
pulumi.set(__self__, "is_secure_site_enabled", is_secure_site_enabled)
if is_v1_enabled is not None:
pulumi.set(__self__, "is_v1_enabled", is_v1_enabled)
if is_v3_enabled is not None:
pulumi.set(__self__, "is_v3_enabled", is_v3_enabled)
if is_web_chat_speech_enabled is None:
is_web_chat_speech_enabled = False
if is_web_chat_speech_enabled is not None:
pulumi.set(__self__, "is_web_chat_speech_enabled", is_web_chat_speech_enabled)
if tenant_id is not None:
pulumi.set(__self__, "tenant_id", tenant_id)
if trusted_origins is not None:
pulumi.set(__self__, "trusted_origins", trusted_origins)
@property
@pulumi.getter(name="isEnabled")
def is_enabled(self) -> pulumi.Input[bool]:
"""
Whether this site is enabled for DirectLine channel
"""
return pulumi.get(self, "is_enabled")
@is_enabled.setter
def is_enabled(self, value: pulumi.Input[bool]):
pulumi.set(self, "is_enabled", value)
@property
@pulumi.getter(name="isWebchatPreviewEnabled")
def is_webchat_preview_enabled(self) -> pulumi.Input[bool]:
"""
Whether this site is enabled for preview versions of Webchat
"""
return pulumi.get(self, "is_webchat_preview_enabled")
@is_webchat_preview_enabled.setter
def is_webchat_preview_enabled(self, value: pulumi.Input[bool]):
pulumi.set(self, "is_webchat_preview_enabled", value)
@property
@pulumi.getter(name="siteName")
def site_name(self) -> pulumi.Input[str]:
"""
Site name
"""
return pulumi.get(self, "site_name")
@site_name.setter
def site_name(self, value: pulumi.Input[str]):
pulumi.set(self, "site_name", value)
@property
@pulumi.getter(name="appId")
def app_id(self) -> Optional[pulumi.Input[str]]:
"""
DirectLine application id
"""
return pulumi.get(self, "app_id")
@app_id.setter
def app_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "app_id", value)
@property
@pulumi.getter(name="eTag")
def e_tag(self) -> Optional[pulumi.Input[str]]:
"""
Entity Tag
"""
return pulumi.get(self, "e_tag")
@e_tag.setter
def e_tag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "e_tag", value)
@property
@pulumi.getter(name="isBlockUserUploadEnabled")
def is_block_user_upload_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this site is enabled for block user upload.
"""
return pulumi.get(self, "is_block_user_upload_enabled")
@is_block_user_upload_enabled.setter
def is_block_user_upload_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_block_user_upload_enabled", value)
@property
@pulumi.getter(name="isDetailedLoggingEnabled")
def is_detailed_logging_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this site is disabled detailed logging for
"""
return pulumi.get(self, "is_detailed_logging_enabled")
@is_detailed_logging_enabled.setter
def is_detailed_logging_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_detailed_logging_enabled", value)
@property
@pulumi.getter(name="isEndpointParametersEnabled")
def is_endpoint_parameters_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this site is EndpointParameters enabled for channel
"""
return pulumi.get(self, "is_endpoint_parameters_enabled")
@is_endpoint_parameters_enabled.setter
def is_endpoint_parameters_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_endpoint_parameters_enabled", value)
@property
@pulumi.getter(name="isNoStorageEnabled")
def is_no_storage_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this no-storage site is disabled detailed logging for
"""
return pulumi.get(self, "is_no_storage_enabled")
@is_no_storage_enabled.setter
def is_no_storage_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_no_storage_enabled", value)
@property
@pulumi.getter(name="isSecureSiteEnabled")
def is_secure_site_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this site is enabled for authentication with Bot Framework.
"""
return pulumi.get(self, "is_secure_site_enabled")
@is_secure_site_enabled.setter
def is_secure_site_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_secure_site_enabled", value)
@property
@pulumi.getter(name="isV1Enabled")
def is_v1_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this site is enabled for Bot Framework V1 protocol.
"""
return pulumi.get(self, "is_v1_enabled")
@is_v1_enabled.setter
def is_v1_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_v1_enabled", value)
@property
@pulumi.getter(name="isV3Enabled")
def is_v3_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this site is enabled for Bot Framework V3 protocol.
"""
return pulumi.get(self, "is_v3_enabled")
@is_v3_enabled.setter
def is_v3_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_v3_enabled", value)
@property
@pulumi.getter(name="isWebChatSpeechEnabled")
def is_web_chat_speech_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this site is enabled for Webchat Speech
"""
return pulumi.get(self, "is_web_chat_speech_enabled")
@is_web_chat_speech_enabled.setter
def is_web_chat_speech_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_web_chat_speech_enabled", value)
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> Optional[pulumi.Input[str]]:
"""
Tenant Id
"""
return pulumi.get(self, "tenant_id")
@tenant_id.setter
def tenant_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tenant_id", value)
@property
@pulumi.getter(name="trustedOrigins")
def trusted_origins(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of Trusted Origin URLs for this site. This field is applicable only if isSecureSiteEnabled is True.
"""
return pulumi.get(self, "trusted_origins")
@trusted_origins.setter
def trusted_origins(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "trusted_origins", value)
|
PypiClean
|
/mLib-1.2.4.tar.gz/mLib-1.2.4/src/misc.py
|
try:
import re2 as re
except ImportError:
import re
import enum
from functools import wraps
from copy import deepcopy
import gzip as _gz
import errno
import signal
import os
import string
import StringIO
import random
import hashlib
#import log
#log = log.get_logger(__name__)
def chunks(l, n): return [l[x: x + n] for x in xrange(0, len(l), n)]
BASEPATH = ''
def re_match(r, d, cstr):
if cstr:
r += '\x00'
return map(lambda x: x.strip("\x00"), re.findall(r, d))
get_urls = lambda d, cstr=False: re_match("https?://[\x21-\x7e]{6,}", d, cstr)
get_strings = lambda d, cstr=False: re_match('[ -~]{3,}', d, cstr)
class E(enum.Enum):
@classmethod
def from_val(cls, val):
for n, e in cls.__members__.items():
if e.value == val:
return e
return None
# def generic_parse(_data):
# try:
# return _generic_parse(_data)
# except:
# print _data
# raise Exception('asdf')
def generic_unparse(data, do_rest=False):
cfg = deepcopy(data)
r = ['INJECTS']
r.append('=' * 80)
for inj in cfg['injects']:
r.append('Target: ' + inj['target'])
inj['injects']
for i in inj['injects']:
if 'pre' in i:
r.append('<data_before>')
r.append(i['pre'])
r.append('<data_end>')
if 'post' in i:
r.append('<data_after>')
r.append(i['post'])
r.append('<data_end>')
if 'inj' in i:
r.append('<data_inject>')
r.append(i['inj'])
r.append('<data_end>')
r.append('\n\nACTIONS')
r.append('=' * 80)
for a in cfg.get('actions', []):
r.append('Target: %s | Action: %s | Type: %s' %
(a['target'], a['action'], a['type']))
r.append("\n")
if do_rest:
for el in cfg:
if el in ['injects', 'actions']:
continue
r.append(el.upper())
r.append('=' * 80)
for e in cfg[el]:
r.append(str(e))
r.append("\n")
return "\n".join(r)
def realpath(p):
my_path = os.path.abspath(os.path.expanduser(p))
if os.path.islink(my_path):
my_path = os.readlink(my_path)
return my_path
def realdir(p):
my_path = realpath(p)
return os.path.dirname(os.path.dirname(my_path))
def get_my_path():
global BASEPATH
if not BASEPATH:
BASEPATH = realdir(__file__) + os.sep + __name__.split('.')[0]
return BASEPATH
def ngrams(data, cnt=4):
a = [data]
for i in range(1, cnt):
a.append(data[cnt:])
return zip(*a)
def generic_parse(_data):
if not _data:
return None
off = _data.find('set_url')
off2 = 0
ret = []
while off < len(_data):
off2 = _data.find('set_url', off + 7)
if off2 == -1:
ret.append(_process_ent(_data[off:]))
break
else:
ret.append(_process_ent(_data[off:off2]))
off = off2
# print off
return ret
def _process_ent(d):
try:
ret = {}
__process_ent(d, ret)
except Exception as e:
import traceback
print '-' * 20
print d
print '#' * 20
print ret
print '-' * 20
traceback.print_exc()
return ret
def __process_ent(d, ret):
TRNSL = {'data_before': 'pre', 'data_after': 'post',
'data_inject': 'inj', 'data_count': 'cnt'}
d = d.strip()
# try:
trgt, rest = d.split("\n", 1)
# except:
# print '#'*20
# print rest
# raise Exception('e1')
try:
_, url, fl = trgt.strip().split(' ')
except ValueError:
if trgt.strip() == 'set_url':
# huh target url in new line?
url, rest = rest.split("\n", 1)
else:
_, url = trgt.strip().split(' ')
fl = ''
ret['flags'] = fl
ret['target'] = url
ret['injects'] = []
r = {}
while rest:
# skip comments?
if rest.startswith(';') or rest.startswith('#'):
o = rest.find("\n")
if o == -1:
return ret
rest = rest[o + 1:]
continue
# try:
tag, rest2 = rest.split("\n", 1)
# except:
# print '#'*20
# print `rest`
# raise Exception('e2')
rest = rest2
tag = tag.strip()
if not tag:
continue
if tag == 'data_end':
#log.error('fucked up config... skip it...')
print '[-] fucked up config... skip it...'
continue
_end = rest.find('data_end')
r[TRNSL[tag]] = rest[:_end].strip().decode('unicode-escape')
if tag == 'data_after':
ret['injects'].append(r)
r = {}
rest = rest[_end + 8:].strip()
def load_dll(path):
import ctypes
p = get_my_path()
return ctypes.cdll.LoadLibrary(os.path.join(p, path))
def get_thread_pool(c):
from multiprocessing.pool import ThreadPool
return ThreadPool(processes=c)
|
PypiClean
|
/apollox_connector_python-1.1.0-py3-none-any.whl/apollox/websocket/client/stream.py
|
from apollox.websocket.websocket_client import ApolloxWebsocketClient
class WebsocketClient(ApolloxWebsocketClient):
def __init__(self, stream_url="wss://fstream.apollox.finance"):
super().__init__(stream_url)
def agg_trade(self, symbol: str, id: int, callback, **kwargs):
"""
| **Aggregate Trade Stream**
| *The Aggregate Trade Streams push market trade information that is aggregated for a single taker order every 100 milliseconds.*
:Stream name: ``<symbol>@aggTrade``
:Doc: https://github.com/apollox-finance/apollox-finance-api-docs/blob/master/apollox-finance-api.md#aggregate-trade-streams
"""
self.live_subscribe(
"{}@aggTrade".format(symbol.lower()), id, callback, **kwargs
)
def mark_price(self, symbol: str, id: int, callback, speed=None, **kwargs):
"""
| **Mark Price Stream**
| *Mark price and funding rate for a single symbol pushed every 3 seconds or every second.*
:Stream name: ``<symbol>@markPrice`` or ``<symbol>@markPrice@1s``
:Doc: https://github.com/apollox-finance/apollox-finance-api-docs/blob/master/apollox-finance-api.md#mark-price-stream
"""
if speed is None:
self.live_subscribe(
"{}@markPrice".format(symbol.lower()), id, callback, **kwargs
)
else:
self.live_subscribe(
"{}@markPrice@{}s".format(symbol.lower(), speed), id, callback, **kwargs
)
def mark_price_all_market(self, id: int, callback, speed=None, **kwargs):
"""
| **Mark Price Stream for All market**
| *Mark price and funding rate for all symbols pushed every 3 seconds or every second.*
:Stream name: ``!markPrice@arr`` or ``!markPrice@arr@1s``
:Doc: https://github.com/apollox-finance/apollox-finance-api-docs/blob/master/apollox-finance-api.md#mark-price-stream-for-all-market
"""
if speed is None:
self.live_subscribe("{!markPrice@arr", id, callback, **kwargs)
else:
self.live_subscribe(
"{!markPrice@arr@{}s".format(speed), id, callback, **kwargs
)
def kline(self, symbol: str, id: int, interval: str, callback, **kwargs):
"""
| **Kline/Candlestick Streams**
| *The Kline/Candlestick Stream push updates to the current klines/candlestick every 250 milliseconds (if existing).*
:Stream name: ``<symbol>@kline_<interval>``
:Doc: https://github.com/apollox-finance/apollox-finance-api-docs/blob/master/apollox-finance-api.md#klinecandlestick-streams
"""
self.live_subscribe(
"{}@kline_{}".format(symbol.lower(), interval), id, callback, **kwargs
)
def mini_ticker(self, id: int, callback, symbol=None, **kwargs):
"""
| **Individual Symbol or All Market Mini Ticker Stream**
| *24hr rolling window mini-ticker statistics for a single symbol or all market. These are NOT the statistics of the UTC day, but a 24hr rolling window from requestTime to 24hrs before.*
:Stream name: ``<symbol>@miniTicker``
:Stream name: ``!miniTicker@arr``
:Doc: https://github.com/apollox-finance/apollox-finance-api-docs/blob/master/apollox-finance-api.md#individual-symbol-mini-ticker-stream
:Doc: https://github.com/apollox-finance/apollox-finance-api-docs/blob/master/apollox-finance-api.md#all-market-mini-tickers-stream
"""
if symbol is None:
self.live_subscribe("!miniTicker@arr", id, callback, **kwargs)
else:
self.live_subscribe(
"{}@miniTicker".format(symbol.lower()), id, callback, **kwargs
)
def ticker(self, id: int, callback, symbol=None, **kwargs):
"""
| **Individual Symbol or All Market Ticker Streams**
| *24hr rollwing window ticker statistics for a single symbol or all market. These are NOT the statistics of the UTC day, but a 24hr rolling window from requestTime to 24hrs before.*
:Stream name: ``<symbol>@ticker``
:Stream name: ``!ticker@arr``
:Doc: https://github.com/apollox-finance/apollox-finance-api-docs/blob/master/apollox-finance-api.md#individual-symbol-ticker-streams
:Doc: https://github.com/apollox-finance/apollox-finance-api-docs/blob/master/apollox-finance-api.md#all-market-tickers-streams
"""
if symbol is None:
self.live_subscribe("!ticker@arr", id, callback, **kwargs)
else:
self.live_subscribe(
"{}@ticker".format(symbol.lower()), id, callback, **kwargs
)
def book_ticker(self, id: int, callback, symbol=None, **kwargs):
"""
| **Individual Symbol or All Market Book Ticker Streams**
| *Pushes any update to the best bid or ask's price or quantity in real-time for a specified symbol or all market.*
:Stream name: ``<symbol>@bookTicker``
:Stream name: ``!bookTicker``
:Doc: https://github.com/apollox-finance/apollox-finance-api-docs/blob/master/apollox-finance-api.md#individual-symbol-book-ticker-streams
:Doc: https://github.com/apollox-finance/apollox-finance-api-docs/blob/master/apollox-finance-api.md#all-book-tickers-stream
"""
if symbol is None:
self.live_subscribe("!bookTicker", id, callback, **kwargs)
else:
self.live_subscribe(
"{}@bookTicker".format(symbol.lower()), id, callback, **kwargs
)
def liquidation_order(self, id: int, callback, symbol=None, **kwargs):
"""
| **Liquidation Order Streams**
| *The Liquidation Order Snapshot Streams push force liquidation order information for specific symbol or all market.*
:Stream name: ``<symbol>@forceOrder``
:Stream name: ``!forceOrder@arr``
:Doc: https://github.com/apollox-finance/apollox-finance-api-docs/blob/master/apollox-finance-api.md#liquidation-order-streams
:Doc: https://github.com/apollox-finance/apollox-finance-api-docs/blob/master/apollox-finance-api.md#all-market-liquidation-order-streams
"""
if symbol is None:
self.live_subscribe("!forceOrder@arr", id, callback, **kwargs)
else:
self.live_subscribe(
"{}@forceOrder".format(symbol.lower()), id, callback, **kwargs
)
def partial_book_depth(self, symbol: str, id: int, level, speed, callback, **kwargs):
"""
| **Partial Book Depth Streams**
| *Top <levels> bids and asks, Valid <levels> are 5, 10, or 20. Valid <speed> are 250, 500, or 100*
:Stream name: ``<symbol>@depth<levels>`` or ``<symbol>@depth<levels>@500ms`` or ``<symbol>@depth<levels>@100ms``
:Doc: https://github.com/apollox-finance/apollox-finance-api-docs/blob/master/apollox-finance-api.md#partial-book-depth-streams
"""
self.live_subscribe(
"{}@depth{}@{}ms".format(symbol.lower(), level, speed), id, callback, **kwargs
)
def diff_book_depth(self, symbol: str, id: int, speed, callback, **kwargs):
"""
| **Diff. Book Depth Streams**
| *Bids and asks, pushed every 250 milliseconds, 500 milliseconds, 100 milliseconds (if existing)*
:Stream name: ``<symbol>@depth`` or ``<symbol>@depth@500ms`` or ``<symbol>@depth@100ms``
:Doc: https://github.com/apollox-finance/apollox-finance-api-docs/blob/master/apollox-finance-api.md#diff-book-depth-streams
"""
self.live_subscribe(
"{}@depth@{}ms".format(symbol.lower(), speed), id, callback, **kwargs
)
def user_data(self, listen_key: str, id: int, callback, **kwargs):
"""listen to user data by provided listenkey"""
self.live_subscribe(listen_key, id, callback, **kwargs)
|
PypiClean
|
/alvin_integration-1.0.0b0.tar.gz/alvin_integration-1.0.0b0/CHANGELOG.md
|
# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [1.0.0-beta.0] --- 2022-04-28
### Feature
* Add interpolation in python ([`c637563`](https://github.com/alvindotai/alvin-integration/commit/c6375636440202bc00e1232e3f40d056cfa659ac))
* Some features ([`68f46ff`](https://github.com/alvindotai/alvin-integration/commit/68f46fffd0435aedc7de571398dc6f4e83bb9117))
### Fix
* Move alvin-integration ([`94b54e2`](https://github.com/alvindotai/alvin-integration/commit/94b54e298272fdd705120902e85fc1ac5076bafb))
* More changes on tmp.changes.md ([`fe070b1`](https://github.com/alvindotai/alvin-integration/commit/fe070b18cbab21de28b1166f7156fb3531dbafae))
* More changes ([`61591ca`](https://github.com/alvindotai/alvin-integration/commit/61591ca89bfdac35b1e9da0c558c875a44e01002))
* Some fixes ([`36b787b`](https://github.com/alvindotai/alvin-integration/commit/36b787bd26a9602efa1f16134895c3d255e70796))
* SOME SPACES ([`269f585`](https://github.com/alvindotai/alvin-integration/commit/269f5858acc6cbe570f4c7e24c4400f94b4e63a3))
|
PypiClean
|
/pulsar-app-0.15.4.tar.gz/pulsar-app-0.15.4/pulsar/managers/base/base_drmaa.py
|
import logging
try:
from drmaa import JobState
except (OSError, ImportError, RuntimeError):
JobState = None
from pulsar.managers import status
from .external import ExternalBaseManager
from ..util.drmaa import DrmaaSessionFactory
log = logging.getLogger(__name__)
IGNORE_SUBMISSION_SPEC_MESSAGE = "Submission recieved native_specification but being overridden by manager specification."
class BaseDrmaaManager(ExternalBaseManager):
"""Base class for Pulsar managers using DRMAA."""
def __init__(self, name, app, **kwds):
"""Setup native specification and drmaa session factory."""
super().__init__(name, app, **kwds)
self.native_specification = kwds.get('native_specification', None)
drmaa_session_factory_class = kwds.get('drmaa_session_factory_class', DrmaaSessionFactory)
drmaa_session_factory = drmaa_session_factory_class()
self.drmaa_session = drmaa_session_factory.get()
def shutdown(self, timeout=None):
"""Cleanup DRMAA session and call shutdown of parent."""
try:
super().shutdown(timeout)
except Exception:
pass
self.drmaa_session.close()
def _get_status_external(self, external_id):
drmaa_state = self.drmaa_session.job_status(external_id)
return {
JobState.UNDETERMINED: status.COMPLETE,
JobState.QUEUED_ACTIVE: status.QUEUED,
JobState.SYSTEM_ON_HOLD: status.QUEUED,
JobState.USER_ON_HOLD: status.QUEUED,
JobState.USER_SYSTEM_ON_HOLD: status.QUEUED,
JobState.RUNNING: status.RUNNING,
JobState.SYSTEM_SUSPENDED: status.QUEUED,
JobState.USER_SUSPENDED: status.QUEUED,
JobState.DONE: status.COMPLETE,
JobState.FAILED: status.COMPLETE, # Should be a FAILED state here as well
}[drmaa_state]
def _build_template_attributes(self, job_id, command_line, dependencies_description=None, env=[], submit_params={}, setup_params=None):
stdout_path = self._job_stdout_path(job_id)
stderr_path = self._job_stderr_path(job_id)
working_directory = self.job_directory(job_id).working_directory()
attributes = {
"remoteCommand": self._setup_job_file(
job_id,
command_line,
dependencies_description=dependencies_description,
env=env,
setup_params=setup_params
),
"jobName": self._job_name(job_id),
"outputPath": ":%s" % stdout_path,
"errorPath": ":%s" % stderr_path,
"workingDirectory": working_directory,
}
submit_native_specification = submit_params.get("native_specification", None)
native_specification = None
if self.native_specification:
native_specification = self.native_specification
if submit_native_specification is not None:
log.warn(IGNORE_SUBMISSION_SPEC_MESSAGE)
elif submit_native_specification:
native_specification = submit_params["native_specification"]
if native_specification is not None:
attributes["nativeSpecification"] = native_specification
log.info("Submitting DRMAA job with nativeSpecification [%s]" % native_specification)
else:
log.debug("No native specification supplied, DRMAA job will be submitted with default parameters.")
return attributes
__all__ = ("BaseDrmaaManager",)
|
PypiClean
|
/tw.dojo-0.9.181.tar.gz/tw.dojo-0.9.181/tw/dojo/static/1.8.1/debug/dijit/a11yclick.js
|
define("dijit/a11yclick", [
"dojo/on",
"dojo/_base/array", // array.forEach
"dojo/keys", // keys.ENTER keys.SPACE
"dojo/_base/declare", // declare
"dojo/has", // has("dom-addeventlistener")
"dojo/_base/unload", // unload.addOnWindowUnload
"dojo/_base/window" // win.doc.addEventListener win.doc.attachEvent win.doc.detachEvent
], function(on, array, keys, declare, has, unload, win){
// module:
// dijit/a11yclick
// Keep track of where the last keydown event was, to help avoid generating
// spurious ondijitclick events when:
// 1. focus is on a <button> or <a>
// 2. user presses then releases the ENTER key
// 3. onclick handler fires and shifts focus to another node, with an ondijitclick handler
// 4. onkeyup event fires, causing the ondijitclick handler to fire
var lastKeyDownNode = null;
if(has("dom-addeventlistener")){
win.doc.addEventListener('keydown', function(evt){
lastKeyDownNode = evt.target;
}, true);
}else{
// Fallback path for IE6-8
(function(){
var keydownCallback = function(evt){
lastKeyDownNode = evt.srcElement;
};
win.doc.attachEvent('onkeydown', keydownCallback);
unload.addOnWindowUnload(function(){
win.doc.detachEvent('onkeydown', keydownCallback);
});
})();
}
function clickKey(/*Event*/ e){
return (e.keyCode === keys.ENTER || e.keyCode === keys.SPACE) &&
!e.ctrlKey && !e.shiftKey && !e.altKey && !e.metaKey;
}
return function(node, listener){
// summary:
// Custom a11yclick (a.k.a. ondijitclick) event
// which triggers on a mouse click, touch, or space/enter keyup.
if(/input|button/i.test(node.nodeName)){
// pass through, the browser already generates click event on SPACE/ENTER key
return on(node, "click", listener);
}else{
// Don't fire the click event unless both the keydown and keyup occur on this node.
// Avoids problems where focus shifted to this node or away from the node on keydown,
// either causing this node to process a stray keyup event, or causing another node
// to get a stray keyup event.
var handles = [
on(node, "keydown", function(e){
//console.log(this.id + ": onkeydown, e.target = ", e.target, ", lastKeyDownNode was ", lastKeyDownNode, ", equality is ", (e.target === lastKeyDownNode));
if(clickKey(e)){
// needed on IE for when focus changes between keydown and keyup - otherwise dropdown menus do not work
lastKeyDownNode = e.target;
// Prevent viewport scrolling on space key in IE<9.
// (Reproducible on test_Button.html on any of the first dijit/form/Button examples)
e.preventDefault();
}
}),
on(node, "keyup", function(e){
//console.log(this.id + ": onkeyup, e.target = ", e.target, ", lastKeyDownNode was ", lastKeyDownNode, ", equality is ", (e.target === lastKeyDownNode));
if(clickKey(e) && e.target == lastKeyDownNode){ // === breaks greasemonkey
//need reset here or have problems in FF when focus returns to trigger element after closing popup/alert
lastKeyDownNode = null;
on.emit(e.target, "click", {
cancelable: true,
bubbles: true
});
}
}),
on(node, "click", function(e){
// catch mouse clicks, plus the on.emit() calls from above and below
listener.call(this, e);
})
];
if(has("touch")){
// touchstart-->touchend will automatically generate a click event, but there are problems
// on iOS after focus has been programatically shifted (#14604, #14918), so setup a failsafe
// if click doesn't fire naturally.
var clickTimer;
handles.push(
on(node, "touchend", function(e){
var target = e.target;
clickTimer = setTimeout(function(){
clickTimer = null;
on.emit(target, "click", {
cancelable: true,
bubbles: true
});
}, 600);
}),
on(node, "click", function(e){
// If browser generates a click naturally, clear the timer to fire a synthetic click event
if(clickTimer){
clearTimeout(clickTimer);
}
})
// TODO: if the touchstart and touchend were <100ms apart, and then there's another touchstart
// event <300ms after the touchend event, then clear the synthetic click timer, because user
// is doing a zoom. Alternately monitor screen.deviceXDPI (or something similar) to see if
// zoom level has changed.
);
}
return {
remove: function(){
array.forEach(handles, function(h){ h.remove(); });
if(clickTimer){
clearTimeout(clickTimer);
clickTimer = null;
}
}
};
}
};
return ret;
});
|
PypiClean
|
/tencentcloud-sdk-python-intl-en-3.0.786.tar.gz/tencentcloud-sdk-python-intl-en-3.0.786/tencentcloud/dts/v20180330/errorcodes.py
|
# The current user is not allowed to perform this operation as the authentication failed.
AUTHFAILURE_UNAUTHORIZEDOPERATIONERROR = 'AuthFailure.UnauthorizedOperationError'
# This operation is prohibited.
FAILEDOPERATION_NOTALLOWOPERATION = 'FailedOperation.NotAllowOperation'
# Failed to start the task.
FAILEDOPERATION_STARTJOBFAILED = 'FailedOperation.StartJobFailed'
# This operation cannot be performed due to status conflict.
FAILEDOPERATION_STATUSINCONFLICT = 'FailedOperation.StatusInConflict'
# An internal error occurred.
INTERNALERROR = 'InternalError'
# Failed to create the async task.
INTERNALERROR_ADDTASKERROR = 'InternalError.AddTaskError'
# Internal scheduling system error.
INTERNALERROR_CELERYERROR = 'InternalError.CeleryError'
# CGW system error.
INTERNALERROR_CGWSYSTEMERROR = 'InternalError.CgwSystemError'
# Failed to access the database on the DTS platform.
INTERNALERROR_DATABASEERROR = 'InternalError.DatabaseError'
# Migration tasks are in conflict.
INTERNALERROR_DUPLICATEJOB = 'InternalError.DuplicateJob'
# Locks are in conflict.
INTERNALERROR_LOCKERROR = 'InternalError.LockError'
# Communication protocol error.
INTERNALERROR_PROTOCOLERROR = 'InternalError.ProtocolError'
# Internal error.
INTERNALERROR_UNDEFINEDERROR = 'InternalError.UndefinedError'
# Unknown internal error.
INTERNALERROR_UNKNOWNERROR = 'InternalError.UnknownError'
# A parameter error occurred.
INVALIDPARAMETER = 'InvalidParameter'
# Parameter value error.
INVALIDPARAMETER_BIZINVALIDPARAMETERVALUEERROR = 'InvalidParameter.BizInvalidParameterValueError'
# The instance does not exist.
INVALIDPARAMETER_INSTANCENOTFOUND = 'InvalidParameter.InstanceNotFound'
# Incorrect parameter value.
INVALIDPARAMETERVALUE_INVALIDPARAMETERVALUE = 'InvalidParameterValue.InvalidParameterValue'
# The number of idle migration tasks exceeds the limit.
LIMITEXCEEDED_MAXUNUSEDJOBS = 'LimitExceeded.MaxUnusedJobs'
# Operation denied.
OPERATIONDENIED = 'OperationDenied'
# The operation was denied as the condition was not met.
OPERATIONDENIED_BIZOPERATIONDENIEDERROR = 'OperationDenied.BizOperationDeniedError'
# Task operation failure.
OPERATIONDENIED_JOBOPERATIONDENIEDERROR = 'OperationDenied.JobOperationDeniedError'
# DTS does not support the current migration type.
OPERATIONDENIED_MIGRATESERVICESUPPORTERROR = 'OperationDenied.MigrateServiceSupportError'
# This operation cannot be performed.
OPERATIONDENIED_OPERATIONDENIED = 'OperationDenied.OperationDenied'
# The resource does not exist.
RESOURCENOTFOUND = 'ResourceNotFound'
# Resource not found.
RESOURCENOTFOUND_BIZRESOURCENOTFOUNDERROR = 'ResourceNotFound.BizResourceNotFoundError'
# The migration task does not exist.
RESOURCENOTFOUND_JOBNOTEXIST = 'ResourceNotFound.JobNotExist'
# The instance is not found.
RESOURCENOTFOUND_RESOURCENOTFOUND = 'ResourceNotFound.ResourceNotFound'
# Verification failed. Insufficient permissions.
UNAUTHORIZEDOPERATION_NOTENOUGHPRIVILEGES = 'UnauthorizedOperation.NotEnoughPrivileges'
# Unsupported operation
UNSUPPORTEDOPERATION = 'UnsupportedOperation'
|
PypiClean
|
/onegov.libres-0.3.0-py3-none-any.whl/onegov/libres/collection.py
|
from onegov.core.utils import normalize_for_url
from onegov.libres.models import Resource
from uuid import uuid4
any_type = object()
class ResourceCollection(object):
""" Manages a list of resources.
"""
def __init__(self, libres_context):
assert hasattr(libres_context, 'get_service'), """
The ResourceCollection expected the libres_contex, not the session.
"""
self.libres_context = libres_context
self.session = libres_context.get_service('session_provider').session()
def query(self):
return self.session.query(Resource)
def add(self, title, timezone, type=None, name=None, meta={}, content={},
definition=None, group=None):
# look up the right class depending on the type (we need to do
# this a bit akwardly here, because Resource does not use the
# ModelBase as declarative base)
resource = Resource.get_polymorphic_class(type, Resource)()
resource.id == uuid4()
resource.name = name or normalize_for_url(title)
resource.title = title
resource.timezone = timezone
resource.meta = meta
resource.content = content
resource.definition = definition
resource.group = group
self.session.add(resource)
self.session.flush()
return self.bind(resource)
def bind(self, resource):
if resource:
resource.bind_to_libres_context(self.libres_context)
return resource
def by_id(self, id, ensure_type=any_type):
query = self.query().filter(Resource.id == id)
if ensure_type is not any_type:
query = query.filter(Resource.type == type)
return self.bind(query.first())
def by_name(self, name, ensure_type=any_type):
query = self.query().filter(Resource.name == name)
if ensure_type is not any_type:
query = query.filter(Resource.type == type)
return self.bind(query.first())
def by_allocation(self, allocation):
return self.by_id(allocation.resource)
def by_reservation(self, reservation):
return self.by_id(reservation.resource)
def delete(self, resource, including_reservations=False):
scheduler = resource.get_scheduler(self.libres_context)
if not including_reservations:
assert not scheduler.managed_reserved_slots().first()
assert not scheduler.managed_reservations().first()
scheduler.managed_allocations().delete('fetch')
else:
scheduler.extinguish_managed_records()
self.session.delete(resource)
self.session.flush()
|
PypiClean
|
/uncompyle6-3.7.2.tar.gz/uncompyle6-3.7.2/test/ok_lib2.7/bsddb/dbobj.py
|
#
# TODO it would be *really nice* to have an automatic shadow class populator
# so that new methods don't need to be added here manually after being
# added to _bsddb.c.
#
import sys
absolute_import = (sys.version_info[0] >= 3)
if absolute_import :
# Because this syntaxis is not valid before Python 2.5
exec("from . import db")
else :
import db
if sys.version_info < (2, 6) :
from UserDict import DictMixin as MutableMapping
else :
import collections
MutableMapping = collections.MutableMapping
class DBEnv:
def __init__(self, *args, **kwargs):
self._cobj = db.DBEnv(*args, **kwargs)
def close(self, *args, **kwargs):
return self._cobj.close(*args, **kwargs)
def open(self, *args, **kwargs):
return self._cobj.open(*args, **kwargs)
def remove(self, *args, **kwargs):
return self._cobj.remove(*args, **kwargs)
def set_shm_key(self, *args, **kwargs):
return self._cobj.set_shm_key(*args, **kwargs)
def set_cachesize(self, *args, **kwargs):
return self._cobj.set_cachesize(*args, **kwargs)
def set_data_dir(self, *args, **kwargs):
return self._cobj.set_data_dir(*args, **kwargs)
def set_flags(self, *args, **kwargs):
return self._cobj.set_flags(*args, **kwargs)
def set_lg_bsize(self, *args, **kwargs):
return self._cobj.set_lg_bsize(*args, **kwargs)
def set_lg_dir(self, *args, **kwargs):
return self._cobj.set_lg_dir(*args, **kwargs)
def set_lg_max(self, *args, **kwargs):
return self._cobj.set_lg_max(*args, **kwargs)
def set_lk_detect(self, *args, **kwargs):
return self._cobj.set_lk_detect(*args, **kwargs)
if db.version() < (4,5):
def set_lk_max(self, *args, **kwargs):
return self._cobj.set_lk_max(*args, **kwargs)
def set_lk_max_locks(self, *args, **kwargs):
return self._cobj.set_lk_max_locks(*args, **kwargs)
def set_lk_max_lockers(self, *args, **kwargs):
return self._cobj.set_lk_max_lockers(*args, **kwargs)
def set_lk_max_objects(self, *args, **kwargs):
return self._cobj.set_lk_max_objects(*args, **kwargs)
def set_mp_mmapsize(self, *args, **kwargs):
return self._cobj.set_mp_mmapsize(*args, **kwargs)
def set_timeout(self, *args, **kwargs):
return self._cobj.set_timeout(*args, **kwargs)
def set_tmp_dir(self, *args, **kwargs):
return self._cobj.set_tmp_dir(*args, **kwargs)
def txn_begin(self, *args, **kwargs):
return self._cobj.txn_begin(*args, **kwargs)
def txn_checkpoint(self, *args, **kwargs):
return self._cobj.txn_checkpoint(*args, **kwargs)
def txn_stat(self, *args, **kwargs):
return self._cobj.txn_stat(*args, **kwargs)
def set_tx_max(self, *args, **kwargs):
return self._cobj.set_tx_max(*args, **kwargs)
def set_tx_timestamp(self, *args, **kwargs):
return self._cobj.set_tx_timestamp(*args, **kwargs)
def lock_detect(self, *args, **kwargs):
return self._cobj.lock_detect(*args, **kwargs)
def lock_get(self, *args, **kwargs):
return self._cobj.lock_get(*args, **kwargs)
def lock_id(self, *args, **kwargs):
return self._cobj.lock_id(*args, **kwargs)
def lock_put(self, *args, **kwargs):
return self._cobj.lock_put(*args, **kwargs)
def lock_stat(self, *args, **kwargs):
return self._cobj.lock_stat(*args, **kwargs)
def log_archive(self, *args, **kwargs):
return self._cobj.log_archive(*args, **kwargs)
def set_get_returns_none(self, *args, **kwargs):
return self._cobj.set_get_returns_none(*args, **kwargs)
def log_stat(self, *args, **kwargs):
return self._cobj.log_stat(*args, **kwargs)
def dbremove(self, *args, **kwargs):
return self._cobj.dbremove(*args, **kwargs)
def dbrename(self, *args, **kwargs):
return self._cobj.dbrename(*args, **kwargs)
def set_encrypt(self, *args, **kwargs):
return self._cobj.set_encrypt(*args, **kwargs)
if db.version() >= (4,4):
def fileid_reset(self, *args, **kwargs):
return self._cobj.fileid_reset(*args, **kwargs)
def lsn_reset(self, *args, **kwargs):
return self._cobj.lsn_reset(*args, **kwargs)
class DB(MutableMapping):
def __init__(self, dbenv, *args, **kwargs):
# give it the proper DBEnv C object that its expecting
self._cobj = db.DB(*((dbenv._cobj,) + args), **kwargs)
# TODO are there other dict methods that need to be overridden?
def __len__(self):
return len(self._cobj)
def __getitem__(self, arg):
return self._cobj[arg]
def __setitem__(self, key, value):
self._cobj[key] = value
def __delitem__(self, arg):
del self._cobj[arg]
if sys.version_info >= (2, 6) :
def __iter__(self) :
return self._cobj.__iter__()
def append(self, *args, **kwargs):
return self._cobj.append(*args, **kwargs)
def associate(self, *args, **kwargs):
return self._cobj.associate(*args, **kwargs)
def close(self, *args, **kwargs):
return self._cobj.close(*args, **kwargs)
def consume(self, *args, **kwargs):
return self._cobj.consume(*args, **kwargs)
def consume_wait(self, *args, **kwargs):
return self._cobj.consume_wait(*args, **kwargs)
def cursor(self, *args, **kwargs):
return self._cobj.cursor(*args, **kwargs)
def delete(self, *args, **kwargs):
return self._cobj.delete(*args, **kwargs)
def fd(self, *args, **kwargs):
return self._cobj.fd(*args, **kwargs)
def get(self, *args, **kwargs):
return self._cobj.get(*args, **kwargs)
def pget(self, *args, **kwargs):
return self._cobj.pget(*args, **kwargs)
def get_both(self, *args, **kwargs):
return self._cobj.get_both(*args, **kwargs)
def get_byteswapped(self, *args, **kwargs):
return self._cobj.get_byteswapped(*args, **kwargs)
def get_size(self, *args, **kwargs):
return self._cobj.get_size(*args, **kwargs)
def get_type(self, *args, **kwargs):
return self._cobj.get_type(*args, **kwargs)
def join(self, *args, **kwargs):
return self._cobj.join(*args, **kwargs)
def key_range(self, *args, **kwargs):
return self._cobj.key_range(*args, **kwargs)
def has_key(self, *args, **kwargs):
return self._cobj.has_key(*args, **kwargs)
def items(self, *args, **kwargs):
return self._cobj.items(*args, **kwargs)
def keys(self, *args, **kwargs):
return self._cobj.keys(*args, **kwargs)
def open(self, *args, **kwargs):
return self._cobj.open(*args, **kwargs)
def put(self, *args, **kwargs):
return self._cobj.put(*args, **kwargs)
def remove(self, *args, **kwargs):
return self._cobj.remove(*args, **kwargs)
def rename(self, *args, **kwargs):
return self._cobj.rename(*args, **kwargs)
def set_bt_minkey(self, *args, **kwargs):
return self._cobj.set_bt_minkey(*args, **kwargs)
def set_bt_compare(self, *args, **kwargs):
return self._cobj.set_bt_compare(*args, **kwargs)
def set_cachesize(self, *args, **kwargs):
return self._cobj.set_cachesize(*args, **kwargs)
def set_dup_compare(self, *args, **kwargs) :
return self._cobj.set_dup_compare(*args, **kwargs)
def set_flags(self, *args, **kwargs):
return self._cobj.set_flags(*args, **kwargs)
def set_h_ffactor(self, *args, **kwargs):
return self._cobj.set_h_ffactor(*args, **kwargs)
def set_h_nelem(self, *args, **kwargs):
return self._cobj.set_h_nelem(*args, **kwargs)
def set_lorder(self, *args, **kwargs):
return self._cobj.set_lorder(*args, **kwargs)
def set_pagesize(self, *args, **kwargs):
return self._cobj.set_pagesize(*args, **kwargs)
def set_re_delim(self, *args, **kwargs):
return self._cobj.set_re_delim(*args, **kwargs)
def set_re_len(self, *args, **kwargs):
return self._cobj.set_re_len(*args, **kwargs)
def set_re_pad(self, *args, **kwargs):
return self._cobj.set_re_pad(*args, **kwargs)
def set_re_source(self, *args, **kwargs):
return self._cobj.set_re_source(*args, **kwargs)
def set_q_extentsize(self, *args, **kwargs):
return self._cobj.set_q_extentsize(*args, **kwargs)
def stat(self, *args, **kwargs):
return self._cobj.stat(*args, **kwargs)
def sync(self, *args, **kwargs):
return self._cobj.sync(*args, **kwargs)
def type(self, *args, **kwargs):
return self._cobj.type(*args, **kwargs)
def upgrade(self, *args, **kwargs):
return self._cobj.upgrade(*args, **kwargs)
def values(self, *args, **kwargs):
return self._cobj.values(*args, **kwargs)
def verify(self, *args, **kwargs):
return self._cobj.verify(*args, **kwargs)
def set_get_returns_none(self, *args, **kwargs):
return self._cobj.set_get_returns_none(*args, **kwargs)
def set_encrypt(self, *args, **kwargs):
return self._cobj.set_encrypt(*args, **kwargs)
class DBSequence:
def __init__(self, *args, **kwargs):
self._cobj = db.DBSequence(*args, **kwargs)
def close(self, *args, **kwargs):
return self._cobj.close(*args, **kwargs)
def get(self, *args, **kwargs):
return self._cobj.get(*args, **kwargs)
def get_dbp(self, *args, **kwargs):
return self._cobj.get_dbp(*args, **kwargs)
def get_key(self, *args, **kwargs):
return self._cobj.get_key(*args, **kwargs)
def init_value(self, *args, **kwargs):
return self._cobj.init_value(*args, **kwargs)
def open(self, *args, **kwargs):
return self._cobj.open(*args, **kwargs)
def remove(self, *args, **kwargs):
return self._cobj.remove(*args, **kwargs)
def stat(self, *args, **kwargs):
return self._cobj.stat(*args, **kwargs)
def set_cachesize(self, *args, **kwargs):
return self._cobj.set_cachesize(*args, **kwargs)
def set_flags(self, *args, **kwargs):
return self._cobj.set_flags(*args, **kwargs)
def set_range(self, *args, **kwargs):
return self._cobj.set_range(*args, **kwargs)
def get_cachesize(self, *args, **kwargs):
return self._cobj.get_cachesize(*args, **kwargs)
def get_flags(self, *args, **kwargs):
return self._cobj.get_flags(*args, **kwargs)
def get_range(self, *args, **kwargs):
return self._cobj.get_range(*args, **kwargs)
|
PypiClean
|
/django-attention-0.2.tar.gz/django-attention-0.2/README
|
<!--*-markdown-*-->
# `django-attention`
`django-attention` is a small session-based flash notice system for Django. It can be used to send messages to the next (or any future) page, with an optional ‘level’ indicating the urgency or purpose of the message.
## Installation
* Install `django-attention` from PyPI:
$ easy_install django-attention # OR
$ pip install django-attention
* Add `djattn` to your `INSTALLED_APPS` setting. You’ll also need to have
`django.contrib.sessions` installed.
* Add `'djattn.AttentionMiddleware'` to your `MIDDLEWARE_CLASSES` setting,
making sure it comes after the session middleware.
* If you want to access the notices from your templates, just add
`'django.core.context_processors.request'` to your
`TEMPLATE_CONTEXT_PROCESSORS` setting.
## Usage
### Quickstart
From view code:
def some_view(request):
# ...process the request...
request.attn.info('Something has happened!')
# ...return a response...
From the template:
<ul id="notices">
{% for notice in request.attn %}
<li class="notice.level">{{ notice.message|escape }}</li>
{% endfor %}
</ul>
### `request.attn`
The `AttentionMiddleware` adds an `attn` attribute to each request. This is an
instance of `djattn.AttentionHandler`, and it implements all the methods needed
for setting/getting notices.
You can customize the attribute it’s set to with the `ATTENTION_REQUEST_ATTR`
setting.
`request.attn` also supports iteration (as you can see from
`{% for notice in request.attn %}` above). Iteration removes the notices from
the session as it yields them.
Note that you can also control the session key where the messages are stored
with the `ATTENTION_SESSION_KEY` setting. This defaults to `'_attn'`.
## (Un)license
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or distribute this
software, either in source code form or as a compiled binary, for any purpose,
commercial or non-commercial, and by any means.
In jurisdictions that recognize copyright laws, the author or authors of this
software dedicate any and all copyright interest in the software to the public
domain. We make this dedication for the benefit of the public at large and to
the detriment of our heirs and successors. We intend this dedication to be an
overt act of relinquishment in perpetuity of all present and future rights to
this software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <http://unlicense.org/>
|
PypiClean
|
/tensorflow_macos-2.14.0rc0-cp311-cp311-macosx_12_0_arm64.whl/tensorflow/python/training/moving_averages.py
|
"""Maintain moving averages of parameters."""
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import reduce_util as ds_reduce_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor
from tensorflow.python.ops import cond
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variable_v1
from tensorflow.python.ops import variables
from tensorflow.python.training import slot_creator
from tensorflow.python.util.tf_export import tf_export
from tensorflow.tools.docs import doc_controls
@tf_export("__internal__.train.assign_moving_average", v1=[])
def assign_moving_average(variable, value, decay, zero_debias=True, name=None):
"""Compute the moving average of a variable.
The moving average of 'variable' updated with 'value' is:
variable * decay + value * (1 - decay)
The returned Operation sets 'variable' to the newly computed moving average,
by performing this subtraction:
variable -= (1 - decay) * (variable - value)
Since variables that are initialized to a `0` value will be `0` biased,
`zero_debias` optionally enables scaling by the mathematically correct
debiasing factor of
1 - decay ** num_updates
See Section 3 of (Kingma et al., 2015) for more details.
The names of the debias shadow variables, by default, include both the scope
they were created in and the scope of the variables they debias. They are also
given a uniquifying-suffix.
E.g.:
```
with tf.compat.v1.variable_scope('scope1'):
with tf.compat.v1.variable_scope('scope2'):
var = tf.compat.v1.get_variable('foo')
update_1 = tf.assign_moving_average(var, 0.0, 1.0)
update_2 = tf.assign_moving_average(var, 0.0, 0.9)
# var.name: 'scope1/scope2/foo'
# shadow var names: 'scope1/scope2/scope1/scope2/foo/biased'
# 'scope1/scope2/scope1/scope2/foo/biased_1'
```
Args:
variable: A Variable.
value: A tensor with the same shape as 'variable'.
decay: A float `Tensor` or float value. The moving average decay.
zero_debias: A python bool. If true, assume the variable is 0-initialized
and unbias it, as in (Kingma et al., 2015). See docstring in
`_zero_debias` for more details.
name: Optional name of the returned operation.
Returns:
A tensor which if evaluated will compute and return the new moving average.
References:
Adam - A Method for Stochastic Optimization:
[Kingma et al., 2015](https://arxiv.org/abs/1412.6980)
([pdf](https://arxiv.org/pdf/1412.6980.pdf))
"""
with ops.name_scope(name, "AssignMovingAvg",
[variable, value, decay]) as scope:
decay = ops.convert_to_tensor(1.0 - decay, name="decay")
if decay.dtype != variable.dtype.base_dtype:
decay = math_ops.cast(decay, variable.dtype.base_dtype)
def update_fn(v, value):
return state_ops.assign_sub(v, (v - value) * decay, name=scope)
def update(strategy, v, value):
if zero_debias:
return _zero_debias(strategy, v, value, decay)
else:
return _update(strategy, v, update_fn, args=(value,))
replica_context = distribute_lib.get_replica_context()
if replica_context:
# In a replica context, we update variable using the mean of value across
# replicas.
def merge_fn(strategy, v, value):
value = strategy.extended.reduce_to(ds_reduce_util.ReduceOp.MEAN, value,
v)
return update(strategy, v, value)
return replica_context.merge_call(merge_fn, args=(variable, value))
else:
strategy = distribute_lib.get_cross_replica_context()
return update(strategy, variable, value)
def weighted_moving_average(value,
decay,
weight,
truediv=True,
collections=None,
name=None):
"""Compute the weighted moving average of `value`.
Conceptually, the weighted moving average is:
`moving_average(value * weight) / moving_average(weight)`,
where a moving average updates by the rule
`new_value = decay * old_value + (1 - decay) * update`
Internally, this Op keeps moving average variables of both `value * weight`
and `weight`.
Args:
value: A numeric `Tensor`.
decay: A float `Tensor` or float value. The moving average decay.
weight: `Tensor` that keeps the current value of a weight. Shape should be
able to multiply `value`.
truediv: Boolean, if `True`, dividing by `moving_average(weight)` is
floating point division. If `False`, use division implied by dtypes.
collections: List of graph collections keys to add the internal variables
`value * weight` and `weight` to. Defaults to
`[GraphKeys.GLOBAL_VARIABLES]`.
name: Optional name of the returned operation. Defaults to
"WeightedMovingAvg".
Returns:
An Operation that updates and returns the weighted moving average.
"""
# Unlike assign_moving_average, the weighted moving average doesn't modify
# user-visible variables. It is the ratio of two internal variables, which are
# moving averages of the updates. Thus, the signature of this function is
# quite different than assign_moving_average.
if collections is None:
collections = [ops.GraphKeys.GLOBAL_VARIABLES]
with variable_scope.variable_scope(name, "WeightedMovingAvg",
[value, weight, decay]) as scope:
value_x_weight_var = variable_scope.get_variable(
"value_x_weight",
shape=value.get_shape(),
dtype=value.dtype,
initializer=init_ops.zeros_initializer(),
trainable=False,
collections=collections)
weight_var = variable_scope.get_variable(
"weight",
shape=weight.get_shape(),
dtype=weight.dtype,
initializer=init_ops.zeros_initializer(),
trainable=False,
collections=collections)
numerator = assign_moving_average(
value_x_weight_var, value * weight, decay, zero_debias=False)
denominator = assign_moving_average(
weight_var, weight, decay, zero_debias=False)
if truediv:
return math_ops.truediv(numerator, denominator, name=scope.name)
else:
return math_ops.divide(numerator, denominator, name=scope.name)
def _update(strategy, var, update_fn, args):
"""Applies updates depending on the context."""
assert distribute_lib.in_cross_replica_context(), (
"_update can only be called in cross-replica context")
if distribute_lib.get_update_replica_id() is not None:
# Call update_fn on var to delegate the implementation. We expect `var` will
# do the right thing in update context, e.g, if `var` is a MirroredVariable,
# it should pick its component variable based on `update_replica_id` and
# only update that.
return update_fn(var, *args)
else:
return strategy.extended.update(var, update_fn, args)
def _zero_debias(strategy, unbiased_var, value, decay):
"""Compute the delta required for a debiased Variable.
All exponential moving averages initialized with Tensors are initialized to 0,
and therefore are biased to 0. Variables initialized to 0 and used as EMAs are
similarly biased. This function creates the debias updated amount according to
a scale factor, as in (Kingma et al., 2015).
To demonstrate the bias the results from 0-initialization, take an EMA that
was initialized to `0` with decay `b`. After `t` timesteps of seeing the
constant `c`, the variable have the following value:
```
EMA = 0*b^(t) + c*(1 - b)*b^(t-1) + c*(1 - b)*b^(t-2) + ...
= c*(1 - b^t)
```
To have the true value `c`, we would divide by the scale factor `1 - b^t`.
In order to perform debiasing, we use two shadow variables. One keeps track of
the biased estimate, and the other keeps track of the number of updates that
have occurred.
Args:
strategy: `Strategy` used to create and update variables.
unbiased_var: A Variable representing the current value of the unbiased EMA.
value: A Tensor representing the most recent value.
decay: A Tensor representing `1-decay` for the EMA.
Returns:
The amount that the unbiased variable should be updated. Computing this
tensor will also update the shadow variables appropriately.
References:
Adam - A Method for Stochastic Optimization:
[Kingma et al., 2015](https://arxiv.org/abs/1412.6980)
([pdf](https://arxiv.org/pdf/1412.6980.pdf))
"""
with variable_scope.variable_scope(
unbiased_var.name[:-len(":0")], values=[unbiased_var, value, decay]):
with ops.init_scope():
biased_initializer = init_ops.zeros_initializer()
local_step_initializer = init_ops.zeros_initializer()
def _maybe_get_unique(name):
"""Get name for a unique variable, if not `reuse=True`."""
if variable_scope.get_variable_scope().reuse:
return name
vs_vars = [
x.op.name
for x in variable_scope.get_variable_scope().global_variables()
]
full_name = variable_scope.get_variable_scope().name + "/" + name
if full_name not in vs_vars:
return name
idx = 1
while full_name + ("_%d" % idx) in vs_vars:
idx += 1
return name + ("_%d" % idx)
with strategy.extended.colocate_vars_with(unbiased_var):
biased_var = variable_scope.get_variable(
_maybe_get_unique("biased"),
initializer=biased_initializer,
shape=unbiased_var.get_shape(),
dtype=unbiased_var.dtype,
trainable=False)
local_step = variable_scope.get_variable(
_maybe_get_unique("local_step"),
shape=[],
dtype=unbiased_var.dtype,
initializer=local_step_initializer,
trainable=False)
def update_fn(v, value, biased_var, local_step):
update_biased = state_ops.assign_sub(biased_var,
(biased_var - value) * decay)
update_local_step = local_step.assign_add(1)
# This function gets `1 - decay`, so use `1.0 - decay` in the exponent.
bias_factor = 1 - math_ops.pow(1.0 - decay, update_local_step)
return state_ops.assign(
v, update_biased / bias_factor, name=ops.get_name_scope() + "/")
return _update(
strategy, unbiased_var, update_fn, args=(value, biased_var, local_step))
@tf_export("train.ExponentialMovingAverage")
class ExponentialMovingAverage:
"""Maintains moving averages of variables by employing an exponential decay.
When training a model, it is often beneficial to maintain moving averages of
the trained parameters. Evaluations that use averaged parameters sometimes
produce significantly better results than the final trained values.
The `apply()` method adds shadow copies of trained variables the first time
it is called, and maintains a moving average of the trained variables in
their shadow copies at every additional invocation.
It should generally be called immediately after creating the model weights,
and then after each training step.
The `average()` method gives access to the shadow variables.
It allows you to use the moving averages in place of the last trained values
for evaluations, by loading the moving averages into your model via
`var.assign(ema.average(var))`.
Additionally, although `ExponentialMovingAverage`
objects are not directly trackable by checkpoints,
`average()` returns the moving average variables for your model weights,
which you can then checkpoint. (There is an example
of this near the bottom of this docstring).
So, `average()` is useful when
building an evaluation model, or when restoring a model from a checkpoint
file.
The moving averages are computed using exponential decay. You specify the
decay value (as a scalar float value, `Tensor`, or `Variable`) when creating
the `ExponentialMovingAverage` object. The shadow variables are initialized
with the same initial values as the trained variables. When you run `apply`
to update the moving averages, each shadow variable is updated with the
formula:
`shadow_variable -= (1 - decay) * (shadow_variable - variable)`
This is mathematically equivalent to the classic formula below, but the use
of an `assign_sub` op (the `"-="` in the formula) allows concurrent lockless
updates to the variables:
`shadow_variable = decay * shadow_variable + (1 - decay) * variable`
Reasonable values for `decay` are close to 1.0, typically in the
multiple-nines range: 0.999, 0.9999, etc.
To have fine-grained control over the value of the decay parameter during
training, pass a scalar `tf.Variable` as the `decay` value to the constructor,
and update the variable as needed.
Example usage when creating a training model:
```python
# Create variables.
var0 = tf.Variable(...)
var1 = tf.Variable(...)
# ... use the variables to build a training model...
# Create an ExponentialMovingAverage object
ema = tf.train.ExponentialMovingAverage(decay=0.9999)
# The first `apply` creates the shadow variables that hold the moving averages
ema.apply([var0, var1])
# grab the moving averages for checkpointing purposes or to be able to
# load the moving averages into the model weights
averages = [ema.average(var0), ema.average(var1)]
...
def train_step(...):
...
# Apply the optimizer.
opt.minimize(my_loss, [var0, var1])
# Update the moving averages
# of var0 and var1 with additional calls to `apply`
ema.apply([var0, var1])
...train the model by running train_step multiple times...
```
There are several ways to use the moving averages for evaluations:
1. Assign the values of the shadow variables to your model variables with
`Variable.assign(...)` before evaluating your
model. You can use the `average()`
method to get the shadow variable for a given variable. To continue
training after using this approach, make sure to record the unaveraged
weights and restore them before continuing to train. You can see the
tensorflow-addons' MovingAverage optimizer's `swap_weights` method for
one example of how to swap variables efficiently in distributed settings:
https://github.com/tensorflow/addons/blob/v0.13.0/tensorflow_addons/optimizers/moving_average.py#L151
2. Make sure to checkpoint out your moving average variables in your
`tf.train.Checkpoint`. At evaluation time, create your shadow variables and
use `tf.train.Checkpoint` to restore the moving averages into the shadow
variables. Then, load the moving averages into the actual model weights via
`var.assign(moving_avg)`.
3. Checkpoint out your moving average variables in your `tf.train.Checkpoint`.
For evaluation, restore your model weights directly from the moving
averages instead of from the non-averaged weights.
Caution: If you choose this approach, include only the object-graph paths
to the averaged path in your checkpoint restore.
If you point both the unaveraged and averaged paths in a checkpoint
restore to the same variables, it is hard to reason about whether your
model will restore the averaged or non-averaged variables.
Example of saving out then restoring the shadow variable values:
```python
# Create variables.
var0 = tf.Variable(...)
var1 = tf.Variable(...)
# ... use the variables to build a training model...
# Create an ExponentialMovingAverage object, create the shadow variables,
# and grab the moving averages for checkpointing purposes.
# (The ExponentialMovingAverage object itself is not checkpointable)
ema = tf.train.ExponentialMovingAverage(decay=0.9999)
ema.apply([var0, var1])
avg_var0 = ema.average(var0)
avg_var1 = ema.average(var1)
# Create a Checkpoint that will manage the model weights and the averages,
checkpoint = tf.train.Checkpoint(model_weights=[var0, var1],
averaged_weights=[avg_var0, avg_var1])
... # Do training
# Save out the checkpoint including the model weights and the moving averages
checkpoint.save(...)
```
Restore option: restore all averaged & non-averaged weights, then load
moving averages into the model via `var.assign()`
```python
# Create variables.
var0 = tf.Variable(...)
var1 = tf.Variable(...)
# ... use the variables to build a training model...
# Create an ExponentialMovingAverage object, create the shadow variables,
# and grab the moving averages for checkpoint restore purposes.
# (The ExponentialMovingAverage object itself is not checkpointable)
ema = tf.train.ExponentialMovingAverage(decay=0.9999)
ema.apply([var0, var1])
avg_var0 = ema.average(var0)
avg_var1 = ema.average(var1)
# Create a Checkpoint that will manage the model weights and the averages,
checkpoint = tf.train.Checkpoint(model_weights=[var0, var1],
averaged_weights=[avg_var0, avg_var1])
checkpoint.restore(...)
var0.assign(avg_var0)
var1.assign(avg_var1)
# var0 and var1 now hold the moving average values
```
Restore option: Directly restore the moving averages into the model weights.
```python
# Create variables.
var0 = tf.Variable(...)
var1 = tf.Variable(...)
# ... use the variables to build a training model...
# Create a Checkpoint that will manage two objects with trackable state,
checkpoint = tf.train.Checkpoint(averaged_weights=[var0, var1])
checkpoint.restore(...)
# var0 and var1 now hold the moving average values
```
"""
def __init__(self,
decay,
num_updates=None,
zero_debias=False,
name="ExponentialMovingAverage"):
"""Creates a new ExponentialMovingAverage object.
The `apply()` method has to be called to create shadow variables.
Follow-on calls to the `apply()` method will update the moving averages
in the shadow variables.
(In TF 1.x graphs `apply()` will return an update op to update
the moving averages which must be explicitly run).
The optional `num_updates` parameter allows one to tweak the decay rate
dynamically. It is typical to pass the count of training steps, usually
kept in a variable that is incremented at each step, in which case the
decay rate is lower at the start of training. This makes moving averages
move faster. If passed, the actual decay rate used is:
`min(decay, (1 + num_updates) / (10 + num_updates))`
Args:
decay: A scalar float value, `Tensor`, or `Variable`. The decay parameter.
num_updates: Optional count of number of updates applied to variables.
zero_debias: If `True`, zero debias moving-averages that are initialized
with tensors. (Note: moving averages may not be initialized with
non-variable tensors when eager execution is enabled).
name: String. Optional prefix name to use for the name of ops added in
`apply()`.
"""
self._decay = decay
self._num_updates = num_updates
self._zero_debias = zero_debias
self._name = name
self._averages = {}
@property
def name(self):
"""The name of this ExponentialMovingAverage object."""
return self._name
def apply(self, var_list=None):
"""Maintains moving averages of variables.
`var_list` must be a list of `Variable` objects. This method
creates shadow variables (holding the moving averages)
for all elements of `var_list`, and
updates the moving averages using the current `var_list` values. Shadow
variables for `Variable` objects are initialized to the variable's initial
value.
Shadow variables are created with `trainable=False`. To access them you
can use the EMA object's `average` method. Note that `EMA` objects are
not trackable by checkpoints, so if you want to checkpoint or restore the
moving variables you will need to manually grab the shadow
variables via `average()` and assign them as `tf.Module` properties or
directly pass them to your `tf.train.Checkpoint`.
Note that `apply()` can be called multiple times. When eager execution is
enabled each call to apply will update the variables once, so this needs to
be called in a loop.
In legacy TF 1.x graphs, this method returns an op that updates all
shadow variables from the current value of their associated variables. In
TF 1.x graphs without automatically control dependencies this op needs to be
manually run.
Args:
var_list: A list of Variable objects. The variables
must be of types bfloat16, float16, float32, or float64.
(In legacy TF 1.x graphs these may be tensors, but this is unsupported
when eager execution is enabled.)
Returns:
An Operation that updates the moving averages.
Raises:
TypeError: If the arguments are not an allowed type.
"""
# TODO(touts): op_scope
if var_list is None:
var_list = variables.trainable_variables()
for v in var_list:
if (isinstance(v, tensor.Tensor)
and ops.executing_eagerly_outside_functions()):
raise TypeError(
"tf.train.ExponentialMovingAverage does not support non-Variable"
" tensors when eager execution is enabled.")
zero_debias_true = set() # set of vars to set `zero_debias=True`
for var in var_list:
if var.dtype.base_dtype not in [
dtypes.bfloat16, dtypes.float16, dtypes.float32, dtypes.float64
]:
raise TypeError("The variables must be half, float, or double: %s" %
var.name)
if var.ref() not in self._averages:
# For variables: to lower communication bandwidth across devices we keep
# the moving averages on the same device as the variables. For other
# tensors, we rely on the existing device allocation mechanism.
with ops.init_scope():
if isinstance(var, variables.Variable):
with ops.device(var.device):
initialized_value = cond.cond(
variable_v1.is_variable_initialized(var), var.read_value,
lambda: var.initial_value) # pylint: disable=cell-var-from-loop
avg = slot_creator.create_slot(
var,
initialized_value,
self.name,
colocate_with_primary=True,
copy_xla_sharding=True)
# NOTE(mrry): We only add `tf.Variable` objects to the
# `MOVING_AVERAGE_VARIABLES` collection.
ops.add_to_collection(ops.GraphKeys.MOVING_AVERAGE_VARIABLES, var)
else:
avg = slot_creator.create_zeros_slot(
var,
self.name,
colocate_with_primary=(var.op.type in [
"Variable", "VariableV2", "VarHandleOp"
]),
copy_xla_sharding=True)
if self._zero_debias:
zero_debias_true.add(avg.ref())
self._averages[var.ref()] = avg
with ops.name_scope(self.name) as scope:
decay = ops.convert_to_tensor(
self._decay, dtype=dtypes.float32, name="decay")
if self._num_updates is not None:
num_updates = math_ops.cast(
self._num_updates, dtypes.float32, name="num_updates")
decay = math_ops.minimum(decay,
(1.0 + num_updates) / (10.0 + num_updates))
updates = []
for var in var_list:
avg = self._averages[var.ref()]
zero_debias = avg.ref() in zero_debias_true
updates.append(assign_moving_average(avg, var, decay, zero_debias))
return control_flow_ops.group(*updates, name=scope)
def average(self, var):
"""Returns the `Variable` holding the average of `var`.
Args:
var: A `Variable` object.
Returns:
A `Variable` object or `None` if the moving average of `var`
is not maintained.
"""
return self._averages.get(var.ref(), None)
@doc_controls.do_not_generate_docs
def average_name(self, var):
"""[Meant for TF1] Returns name of `Variable` holding the average for `var`.
(Designed to work with legacy `tf.compat.v1.train.Saver`, it is sensitive to
specific variable names and not recommended for TF2)
The typical scenario for `ExponentialMovingAverage` is to compute moving
averages of variables during training, and restore the variables from the
computed moving averages during evaluations.
To restore variables, you have to know the name of the shadow variables.
That name and the original variable can then be passed to a `Saver()` object
to restore the variable from the moving average value with:
`saver = tf.compat.v1.train.Saver({ema.average_name(var): var})`
`average_name()` can be called whether or not `apply()` has been called.
Args:
var: A `Variable` object.
Returns:
A string: The name of the variable that will be used or was used
by the `ExponentialMovingAverage class` to hold the moving average of
`var`.
"""
if var.ref() in self._averages:
return self._averages[var.ref()].name[:-len(":0")]
return ops.get_default_graph().unique_name(
var.name[:-len(":0")] + "/" + self.name, mark_as_used=False)
@doc_controls.do_not_generate_docs
def variables_to_restore(self, moving_avg_variables=None):
"""[Designed for TF 1.x] Returns a map of names to `Variables` to restore.
(Designed to work with legacy `tf.compat.v1.train.Saver`, sensitive to
specific variable names and not recommended for TF2)
If a variable has a moving average, use the moving average variable name as
the restore name; otherwise, use the variable name.
For example,
```python
variables_to_restore = ema.variables_to_restore()
saver = tf.compat.v1.train.Saver(variables_to_restore)
```
Below is an example of such mapping:
```
conv/batchnorm/gamma/ExponentialMovingAverage: conv/batchnorm/gamma,
conv_4/conv2d_params/ExponentialMovingAverage: conv_4/conv2d_params,
global_step: global_step
```
Args:
moving_avg_variables: a list of variables that require to use of the
moving average variable name to be restored. If None, it will default to
variables.moving_average_variables() + variables.trainable_variables()
Returns:
A map from restore_names to variables. The restore_name is either the
original or the moving average version of the variable name, depending
on whether the variable name is in the `moving_avg_variables`.
"""
name_map = {}
if moving_avg_variables is None:
# Include trainable variables and variables which have been explicitly
# added to the moving_average_variables collection.
moving_avg_variables = variables.trainable_variables()
moving_avg_variables += variables.moving_average_variables()
# Remove duplicates
moving_avg_variables = set(v.ref() for v in moving_avg_variables)
# Collect all the variables with moving average,
for v in moving_avg_variables:
name_map[self.average_name(v.deref())] = v.deref()
# Make sure we restore variables without moving averages as well.
moving_avg_variable_names = set(
v.deref().name for v in moving_avg_variables)
for v in list(set(variables.global_variables())):
if v.name not in moving_avg_variable_names and v.op.name not in name_map:
name_map[v.op.name] = v
return name_map
|
PypiClean
|
/pipicat-0.0.1.tar.gz/pipicat-0.0.1/ml/my_cross_val.py
|
import numpy as np
# This function return the accuracy score of the prediction for classification
def my_accuracy_score_classification(ytrue,ypred):
ytrue = np.array(ytrue)
ypred = np.array(ypred)
if ytrue.shape[0] != ypred.shape[0]:
raise Exception('ERROR: ytrue and ypred not same length!')
accuracy_score = 0;
for i in range(0,ytrue.shape[0]):
if ytrue[i] == ypred[i]:
accuracy_score = accuracy_score + 1;
return (float(accuracy_score)/float(ytrue.shape[0]))
# This function return the accuracy score of the prediction for regression
def my_accuracy_score_regression(ytrue,ypred):
ytrue = np.array(ytrue)
ypred = np.array(ypred)
if ytrue.shape[0] != ypred.shape[0]:
raise Exception('ERROR: ytrue and ypred not same length!')
# Here we use R^2(R Square) to evaluate the performance of the model
y_bar = np.mean(ytrue)
sum_hat_sqr = 0
sum_bar_sqr = 0
for i in range(0,ytrue.shape[0]):
sum_hat_sqr = sum_hat_sqr + (ytrue[i]-ypred[i])*(ytrue[i]-ypred[i])
sum_bar_sqr = sum_bar_sqr + (ytrue[i]-y_bar)*(ytrue[i]-y_bar)
R_sqr = 1 - sum_hat_sqr/sum_bar_sqr
return R_sqr
# Main function
# ml_type = 0 means classification
# ml_type = 1 means regression
def my_cross_val(method,X,y,k,ml_type):
X = np.array(X)
y = np.array(y)
y = np.reshape(y,(X.shape[0],1))
# Initialize array for the test set error
errRat = np.empty([k, 1])
# Permute the indices randomly
rndInd = np.random.permutation(y.size)
# Start and end index of test set
sttInd = 0;
endInd = (np.array(y.size/k).astype(int))
indLen = (np.array(y.size/k).astype(int))
for i in range(0, k):
# Prepare training data and test data
Xtrain = np.concatenate((X[rndInd[0:sttInd],:],X[rndInd[endInd:y.size],:]), axis=0)
ytrain = np.concatenate((y[rndInd[0:sttInd]],y[rndInd[endInd:y.size]]), axis=0)
Xtest = X[rndInd[sttInd:endInd],:]
ytest = y[rndInd[sttInd:endInd]]
sttInd = endInd
endInd = endInd + indLen
# Create the model
myMethod = method()
# Fit the data
myMethod.fit(Xtrain,ytrain.ravel())
# Test the model on (new) data
ypred = myMethod.predict(Xtest)
#print("ytest:",ytest)
#print("ypred:",ypred)
# Save error rate
if ml_type == 0:
errRat[i] = 1 - my_accuracy_score_classification(ytest, ypred)
elif ml_type == 1:
errRat[i] = my_accuracy_score_regression(ytest, ypred)
else:
raise Exception('Invalid ml_type!')
return errRat
|
PypiClean
|
/mibi_bin_tools-0.2.10-cp310-cp310-macosx_10_9_x86_64.whl/mibi_bin_tools/panel_utils.py
|
from typing import Union, List
import pandas as pd
from alpineer import misc_utils
def make_panel(mass: Union[float, List[float]],
target_name: Union[str, List[str], None] = None,
low_range: Union[float, List[float]] = 0.3,
high_range: Union[float, List[float]] = 0.0) -> pd.DataFrame:
""" Creates single mass panel
Args:
mass (float | List[float]):
central m/z for signal
target_name (str | List[str] | None):
naming for target. 'Target' if None
low_range (float | List[float]):
units below central mass to start integration
high_range (float | List[float]):
units above central mass to stop integration
Returns:
pd.DataFrame:
single mass panel as pandas dataframe
"""
mass = misc_utils.make_iterable(mass)
if target_name is not None:
target_name = misc_utils.make_iterable(target_name)
if len(mass) != len(target_name):
raise ValueError(
'`mass` and `target_name` did not contain the same number of elements. '
'If target names aren\'t required, then set `target_name=None`. '
)
else:
target_name = [f'targ{i}' for i in range(len(mass))]
# check for range lists
for r in (low_range, high_range):
if misc_utils.make_iterable(r) == r:
if len(r) != len(mass):
raise ValueError(
'`mass` and a range argument did not contain the same number of elements. '
'If only one integration range is required, `low_range` and `high_range` can '
'be set to float values, e.g `low_range=0.3`'
)
low_range = misc_utils.make_iterable(low_range)
high_range = misc_utils.make_iterable(high_range)
if len(low_range) != len(mass):
low_range = low_range * len(mass)
if len(high_range) != len(mass):
high_range = high_range * len(mass)
rows = []
for m, ch, low, high in zip(mass, target_name, low_range, high_range):
rows.append({
'Mass': m,
'Target': ch,
'Start': m - low,
'Stop': m + high,
})
return pd.DataFrame(rows)
|
PypiClean
|
/mpi-age-aegis-1.0.tar.gz/mpi-age-aegis-1.0/aegis/Core/Plotter.py
|
# NOTE only plots age_distribution if it was recorded for all stages
# NOTE only plots survival curve if age distribution was recorded for all stages
# and last_K is lesser than number of stages (last_K is defined in ./__init__.py)
from .functions import make_windows, timenow, get_runtime
import numpy as np, pandas as pd, os, shutil
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import seaborn as sns
try:
import cPickle as pickle
except:
import pickle
class Plotter:
"""Wrapper class for storing dataframes and their associated plots."""
def __init__(self, inpath, verbose=False, outpath=""):
"""Import csv files as pandas dataframes and initialise plotting methods."""
self.verbose = verbose
if self.verbose:
self.starttime = timenow(False)
print "\nBeginning plotting {}.".format(timenow())
print "Working directory: "+os.getcwd()
print "Reading csv files from {}.".format(os.path.join(\
os.path.abspath(inpath),"csv_files"))
self.abort = False
try:
inp = os.path.join(inpath)
self.single_df = pd.read_csv(os.path.join(inp,"single.csv"))\
.set_index("name")
self.nstagex1_df = pd.read_csv(os.path.join(inp,"nstage-x-1.csv"))
self.has_agedist = self.single_df.loc["age_dist_N","value"]=="all"
if self.has_agedist:
self.nstagexmaxls_df = pd.read_csv(os.path.join(inp,"nstage-x-maxls.csv"))
self.has_surv_curve = os.path.exists(os.path.join(inp,"maxls-x-1.csv"))
if self.has_surv_curve:
self.maxlsx1_df = pd.read_csv(os.path.join(inp,"maxls-x-1.csv"))
self.nsnapxmaxls_df = pd.read_csv(os.path.join(inp,"nsnap-x-maxls.csv"))
self.nsnapxnbit_df = pd.read_csv(os.path.join(inp,"nsnap-x-nbit.csv"))
self.sliding_window_df = pd.read_csv(os.path.join(inp,"sliding_window.csv"))
self.nsnapxnloci_df = pd.read_csv(os.path.join(inp,"nsnap-x-nloci.csv"))
self.nsnapx1_df = pd.read_csv(os.path.join(inp,"nsnap-x-1.csv"))
except:
print "Error occured while reading csv files. Makes sure the provided path contains a directory 'csv_files' that was generated by running aegis read --csv. Aborting..."
self.abort = True
self.plot_methods = ["plot_pop_res",\
"plot_genotype_mean_snaps",\
"plot_genotype_mean",\
"plot_fitness_term_snaps",\
"plot_fitness_term",\
"plot_n1_sliding_window_snaps",\
"plot_surv_curve",\
"plot_observed_repr",\
"plot_age_dist",\
#"plot_bit_variance"\
#"plot_generation"\
]
self.plot_names = ["01_pop-res",\
"02_genotype-mean-snaps",\
"02_genotype-mean",\
"03_fitness-term-snaps",\
"03_fitness-term",\
"04_bits-snaps",\
"05_surv-curve",\
"06_observed-repr",\
"07_age-dist",\
#"08_bit-variance"\
#"09_generation"\
]
self.figures = []
self.outpath = os.getcwd() if outpath=="" else outpath
self.outdir = os.path.join(self.outpath,\
self.single_df.loc["output_prefix","value"] + "_plots")
# set style for seaborn plots
sns.set(style="darkgrid")
def generate_figures(self):
if self.abort: return
if self.verbose: print "\nGenerating figures:"
for m in self.plot_methods:
if self.verbose: print m,
p = getattr(self,m)()
if p:
self.figures.append(p)
if self.verbose: print ": OK"
elif self.verbose: print ": not generated"
def save_figures(self):
if self.abort: return
# Remove not generated plot names
if not self.has_surv_curve:
self.plot_methods.remove("plot_surv_curve")
self.plot_names.remove("05_surv-curve")
self.plot_methods.remove("plot_observed_repr")
self.plot_names.remove("06_observed-repr")
if not self.has_agedist:
self.plot_methods.remove("plot_age_dist")
self.plot_names.remove("07_age-dist")
if not int(self.single_df.loc["n_snapshots","value"])>1:
self.plot_methods.remove("plot_genotype_mean_snaps")
self.plot_names.remove("02_genotype-mean-snaps")
self.plot_methods.remove("plot_fitness_term_snaps")
self.plot_names.remove("03_fitness-term-snaps")
self.plot_methods.remove("plot_n1_sliding_window_snaps")
self.plot_names.remove("04_bits-snaps")
# Make/replace output directory
pm,pn,p = self.plot_methods, self.plot_names, self.figures
if not len(pm) == len(pn) == len(p):
errstr = "Plot names, methods and images are of different lengths."
raise ValueError(errstr)
if os.path.exists(self.outdir): # Overwrite existing output
shutil.rmtree(self.outdir)
os.makedirs(self.outdir)
if self.verbose:
print "\nSaving figures in "+os.path.join(os.getcwd(),self.outdir)+":"
for n in xrange(len(self.figures)):
outpath = os.path.join(self.outdir, self.plot_names[n] + ".png")
if self.verbose: print self.plot_names[n]
self.figures[n].savefig(outpath)
s = "\nSuccessfully saved all figures. Total runtime"
if self.verbose: print get_runtime(self.starttime, timenow(False), s)
def plot_pop_res(self):
f,ax = plt.subplots()
df = self.nstagex1_df.loc[:,["stage","popsize","resources"]].set_index("stage")
sns.lineplot(data=df, ax=ax)
ax.set_ylim(bottom=0)
f.suptitle("population size")
s1 = "mean starvation length: {0}".format(np.around(float(self.single_df.loc["avg_starvation_length","value"]),2))
s2 = "mean growth length: {0}".format(np.around(float(self.single_df.loc["avg_growth_length","value"]),2))
s = "{0}\n{1}".format(s1,s2)
ax.text(0.65, 1, s, fontsize=8, transform=ax.transAxes)
return f
def plot_bit_variance(self):
f,ax = plt.subplots()
df = self.nstagex1_df.loc[:,["stage","bit_variance_premature","bit_variance_mature"]].set_index("stage")
sns.lineplot(data=df, ax=ax)
ax.set_ylim(bottom=0)
f.suptitle("mean bit variance")
return f
def plot_genotype_mean_snaps(self):
if not int(self.single_df.loc["n_snapshots","value"])>1: return
df = self.nsnapxnloci_df
g = sns.relplot(data=df, x="locus", y="mean_gt", hue="type", col="snap", col_wrap=4)
#ax.set_ylim(bottom=0,top=2*int(self.single_df.loc["n_base","value"]))
plt.subplots_adjust(top=0.92)
g.fig.suptitle("mean genotype sum")
return g.fig
def plot_genotype_mean(self):
f,ax = plt.subplots()
df = self.nsnapxnloci_df[self.nsnapxnloci_df.snap==self.nsnapxnloci_df.snap.max()]
df = df.drop(columns=["snap"])
sns.scatterplot(data=df, x="locus", y="mean_gt", hue="type", ax=ax)
ax.set_ylim(bottom=0,top=2*int(self.single_df.loc["n_base","value"]))
f.suptitle("mean genotype sum (last snap)")
return f
def plot_fitness_term_snaps(self):
if not int(self.single_df.loc["n_snapshots","value"])>1: return
#f,ax = plt.subplots()
df = self.nsnapxmaxls_df.loc[:,["snap","age","fitness_term"]]
df = df[df.age >= int(self.single_df.loc["maturity","value"])]
g = sns.relplot(data=df, x="age", y="fitness_term", col="snap", col_wrap=4,\
kind="line", marker="o")
#ax.set_ylim(bottom=0)
plt.subplots_adjust(top=0.92)
g.fig.suptitle("fitness term")
return g.fig
def plot_fitness_term(self):
f,ax = plt.subplots()
df = self.nsnapxmaxls_df.loc[:,["snap","age","fitness_term"]]
df = df[df.snap==df.snap.max()]
df = df.drop(columns=["snap"])
df = df[df.age >= int(self.single_df.loc["maturity","value"])]
sns.lineplot(data=df, x="age", y="fitness_term", ax=ax, marker="o")
ax.set_ylim(bottom=0)
f.suptitle("fitness term (last snap)")
return f
def plot_n1_sliding_window_snaps(self):
if not int(self.single_df.loc["n_snapshots","value"])>1: return
#f,ax = plt.subplots()
df = self.sliding_window_df
g = sns.relplot(data=df, x="bit", y="n1_window_mean", col="snap", col_wrap=4,\
linewidths=0)
#ax.set_ylim(bottom=0)
plt.subplots_adjust(top=0.92)
g.fig.suptitle("bit distribution - sliding window\nsize: %s" %\
self.single_df.loc["n1_window_size","value"])
return g.fig
def plot_surv_curve(self):
if not self.has_surv_curve: return
f,ax = plt.subplots()
df = self.maxlsx1_df
sns.lineplot(data=df, x="age", y="kaplan-meier", ci=None, ax=ax)
ax.set_ylim(bottom=0)
f.suptitle("kaplan-meier survival curve (mean over all stages)")
return f
def plot_observed_repr(self):
if not self.has_surv_curve: return
f,ax = plt.subplots()
df = self.maxlsx1_df
df = df[df.age >= int(self.single_df.loc["maturity","value"])]
sns.lineplot(data=df, x="age", y="observed_repr_rate", ci=None, ax=ax)
ax.set_ylim(bottom=0)
f.suptitle("observed reproduction rate (mean over all stages)")
return f
def plot_generation(self):
f,ax = plt.subplots()
df = self.nstagex1_df.loc[:,["stage", "generation_min", "generation_max"]]\
.set_index("stage")
sns.lineplot(data=df, ax=ax)
f.suptitle("generation")
return f
def plot_age_dist(self):
if not self.has_agedist: return
f,ax = plt.subplots()
df = self.nstagexmaxls_df
sns.lineplot(data=df, x="age", y="age_distribution", ci=None, ax=ax)
f.suptitle("age distribution (mean over all stages)")
return f
|
PypiClean
|
/scorepower-0.1.1.tar.gz/scorepower-0.1.1/config/config.py
|
import configparser,os
import abc
from .parameters import cfgPara
##抽象类,定义参数操作方法接口
class Ipara_op(metaclass=abc.ABCMeta):
@abc.abstractmethod
def get_para(self,name):
pass
#TODO
#从命令行获取参数
class para_cmdHandl(Ipara_op):
def __init__(self, args):
self.args_string = args
def get_para(self, name):
pass
#从配置文件获取参数
class para_cfgHandl(Ipara_op):
def __init__(self,cfgfile):
self.__cfgPara=configparser.ConfigParser()
self.__cfgPara.read(cfgfile)
def get_para(self,name,section_name="task"):
return self.__cfgPara.get(section_name, name)
class config(cfgPara):
def __init__(self,cfgfile_handl):
self._cfgPara=cfgfile_handl
self.get_all_paras()
#获取参数,
def get_all_paras(self):
print("*****************----[start get all config parameters]-----*****************")
#*****************----task section----*****************# cpu_core
print("----------task section----------")
cfgPara.task.task = self._cfgPara.get_para("task",cfgPara.task.section)
print("[config] :cfgPara.task.task:",cfgPara.task.task )
cfgPara.task.cpu_core = int(self._cfgPara.get_para("cpu.cores", cfgPara.task.section))
print("[config] :cfgPara.task.cpu_core:", cfgPara.task.cpu_core)
cfgPara.task.basePoint = float(self._cfgPara.get_para("basePoint", cfgPara.task.section))
print("[config] :cfgPara.task.basePoint:", cfgPara.task.basePoint)
cfgPara.task.odds = float(self._cfgPara.get_para("odds", cfgPara.task.section))
print("[config] :cfgPara.train.odds:", cfgPara.task.odds)
# *****************----train section----*****************#
print("----------train section----------")
cfgPara.train.method = self._cfgPara.get_para("method", cfgPara.train.section)
print("[config] :cfgPara.train.method:", cfgPara.train.method)
cfgPara.train.data_path = self._cfgPara.get_para("data.path", cfgPara.train.section)
print("[config] :cfgPara.data.data_path:", cfgPara.train.data_path)
cfgPara.train.data_label = self._cfgPara.get_para("data.label", cfgPara.train.section)
print("[config] :cfgPara.data.data_label:", cfgPara.train.data_label)
cfgPara.train.data_label_good = self._cfgPara.get_para("data.label.good", cfgPara.train.section)
print("[config] :cfgPara.data.data_label_good:", cfgPara.train.data_label_good)
cfgPara.train.data_label_bad = self._cfgPara.get_para("data.label.bad", cfgPara.train.section)
print("[config] :cfgPara.data.data_label_bad:", cfgPara.train.data_label_bad)
# *****************----task_train_lr section----*****************#
print("------task_train_lr section------")
cfgPara.task_train_lr.reports_baseDir = self._cfgPara.get_para("lr.reports.baseDir", cfgPara.task_train_lr.section)
print("[config] :cfgPara.task_train_lr.reports_baseDir:", cfgPara.task_train_lr.reports_baseDir)
cfgPara.task_train_lr.reports_missRate = self._cfgPara.get_para("lr.reports.missRate", cfgPara.task_train_lr.section)
print("[config] :cfgPara.task_train_lr.reports_missRate:", cfgPara.task_train_lr.reports_missRate)
cfgPara.task_train_lr.reports_highMissRate = self._cfgPara.get_para("lr.reports.highMissRate",cfgPara.task_train_lr.section)
print("[config] :cfgPara.task_train_lr.reports_highMissRate:", cfgPara.task_train_lr.reports_highMissRate)
cfgPara.task_train_lr.reports_missRate_genType = self._cfgPara.get_para("lr.reports.missRate.genType",cfgPara.task_train_lr.section)
print("[config] :cfgPara.task_train_lr.reports_missRate_genType:", cfgPara.task_train_lr.reports_missRate_genType)
cfgPara.task_train_lr.reports_maxPercent = self._cfgPara.get_para("lr.reports.maxPercent",cfgPara.task_train_lr.section)
print("[config] :cfgPara.task_train_lr.reports_maxPercent:",cfgPara.task_train_lr.reports_maxPercent)
cfgPara.task_train_lr.reports_maxPercent_genType = self._cfgPara.get_para("lr.reports.maxPercent.genType",cfgPara.task_train_lr.section)
print("[config] :cfgPara.task_train_lr.reports_maxPercent_genType:", cfgPara.task_train_lr.reports_maxPercent_genType)
cfgPara.task_train_lr.reports_corr = self._cfgPara.get_para("lr.reports.corr",cfgPara.task_train_lr.section)
print("[config] :cfgPara.task_train_lr.reports_corr:", cfgPara.task_train_lr.reports_corr)
cfgPara.task_train_lr.reports_highCorr = self._cfgPara.get_para("lr.reports.highCorr", cfgPara.task_train_lr.section)
print("[config] :cfgPara.task_train_lr.reports_highCorr:", cfgPara.task_train_lr.reports_highCorr)
cfgPara.task_train_lr.reports_corr_genType = self._cfgPara.get_para("lr.reports.corr.genType",cfgPara.task_train_lr.section)
print("[config] :cfgPara.task_train_lr.reports_corr_genType:", cfgPara.task_train_lr.reports_corr_genType)
cfgPara.task_train_lr.reports_vif = self._cfgPara.get_para("lr.reports.vif", cfgPara.task_train_lr.section)
print("[config] :cfgPara.task_train_lr.reports_vif::", cfgPara.task_train_lr.reports_vif)
cfgPara.task_train_lr.reports_vif_genType = self._cfgPara.get_para("lr.reports.vif.genType", cfgPara.task_train_lr.section)
print("[config] :cfgPara.task_train_lr.reports_vif_genType", cfgPara.task_train_lr.reports_vif_genType)
cfgPara.task_train_lr.reports_featureImportance = self._cfgPara.get_para("lr.reports.featureImportance", cfgPara.task_train_lr.section)
print("[config] :cfgPara.task_train_lr.reports_featureImportance:", cfgPara.task_train_lr.reports_featureImportance)
cfgPara.task_train_lr.reports_featureImportance_png = self._cfgPara.get_para("lr.reports.featureImportance.png",cfgPara.task_train_lr.section)
print("[config] :cfgPara.task_train_lr.reports_featureImportance_png:",cfgPara.task_train_lr.reports_featureImportance_png)
cfgPara.task_train_lr.reports_featureImportance_genType = self._cfgPara.get_para("lr.reports.featureImportance.genType",cfgPara.task_train_lr.section)
print("[config] :cfgPara.task_train_lr.reports_featureImportance_genType:",cfgPara.task_train_lr.reports_featureImportance_genType)
cfgPara.task_train_lr.reports_ks = self._cfgPara.get_para("lr.reports.ks",cfgPara.task_train_lr.section)
print("[config] :cfgPara.task_train_lr.reports_ks:",cfgPara.task_train_lr.reports_ks)
cfgPara.task_train_lr.reports_ks_png = self._cfgPara.get_para("lr.reports.ks.png", cfgPara.task_train_lr.section)
print("[config] :cfgPara.task_train_lr.reports_ks_png:", cfgPara.task_train_lr.reports_ks_png)
cfgPara.task_train_lr.reports_ks_scores_bin = int(self._cfgPara.get_para("lr.reports.ks.scores.bin",cfgPara.task_train_lr.section))
print("[config] :cfgPara.task_train_lr.reports_ks_scores_bin:", cfgPara.task_train_lr.reports_ks_scores_bin)
cfgPara.task_train_lr.reports_ks_genType = self._cfgPara.get_para("lr.reports.ks.genType", cfgPara.task_train_lr.section)
print("[config] :cfgPara.task_train_lr.reports_ks_genType:", cfgPara.task_train_lr.reports_ks_genType)
cfgPara.task_train_lr.reports_confusionMatrix_Png = self._cfgPara.get_para("lr.reports.confusionMatrix.Png", cfgPara.task_train_lr.section)
print("[config] :cfgPara.task_train_lr.reports_confusionMatrix_Png:", cfgPara.task_train_lr.reports_confusionMatrix_Png)
cfgPara.task_train_lr.reports_confusionMatrix_genType = self._cfgPara.get_para("lr.reports.confusionMatrix.genType",cfgPara.task_train_lr.section)
print("[config] :cfgPara.task_train_lr.reports_confusionMatrix_genType:",cfgPara.task_train_lr.reports_confusionMatrix_genType)
cfgPara.task_train_lr.model_baseDir = self._cfgPara.get_para("lr.model.baseDir",cfgPara.task_train_lr.section)
print("[config] :cfgPara.task_train_lr.model_baseDir:",cfgPara.task_train_lr.model_baseDir)
cfgPara.task_train_lr.model_ivParaPickle = self._cfgPara.get_para("lr.model.ivParaPickle", cfgPara.task_train_lr.section)
print("[config] :cfgPara.task_train_lr.model_ivParaPickle:", cfgPara.task_train_lr.model_ivParaPickle)
cfgPara.task_train_lr.model_woeParaPickle = self._cfgPara.get_para("lr.model.woeParaPickle",cfgPara.task_train_lr.section)
print("[config] :cfgPara.task_train_lr.model_woeParaPickle:", cfgPara.task_train_lr.model_woeParaPickle)
cfgPara.task_train_lr.model_cutoffParaPickle = self._cfgPara.get_para("lr.model.cutoffParaPickle",cfgPara.task_train_lr.section)
print("[config] :cfgPara.task_train_lr.model_cutoffParaPickle:", cfgPara.task_train_lr.model_cutoffParaPickle)
cfgPara.task_train_lr.model_ivWoeCutoff = self._cfgPara.get_para("lr.model.ivWoeCutoff", cfgPara.task_train_lr.section)
print("[config] :cfgPara.task_train_lr.model_ivWoeCutoff:", cfgPara.task_train_lr.model_ivWoeCutoff)
cfgPara.task_train_lr.model_singBinPcn = self._cfgPara.get_para("lr.model.singBinPcn",cfgPara.task_train_lr.section)
print("[config] :cfgPara.task_train_lr.model_singBinPcn:", cfgPara.task_train_lr.model_singBinPcn)
cfgPara.task_train_lr.model_singBinPcnOverThreshold = self._cfgPara.get_para("lr.model.singBinPcnOverThreshold", cfgPara.task_train_lr.section)
print("[config] :cfgPara.task_train_lr.model_singBinPcnOverThreshold:",cfgPara.task_train_lr.model_singBinPcnOverThreshold)
cfgPara.task_train_lr.model_ivWoeCutoff_genType = self._cfgPara.get_para("lr.model.ivWoeCutoff.genType",cfgPara.task_train_lr.section)
print("[config] :cfgPara.task_train_lr.model_ivWoeCutoff_genType:",cfgPara.task_train_lr.model_ivWoeCutoff_genType)
cfgPara.task_train_lr.model_pValuePara = self._cfgPara.get_para("lr.model.pValuePara",cfgPara.task_train_lr.section)
print("[config] :cfgPara.task_train_lr.model_pValuePara:", cfgPara.task_train_lr.model_pValuePara)
cfgPara.task_train_lr.model_pValuePara_genType = self._cfgPara.get_para("lr.model.pValuePara.genType",cfgPara.task_train_lr.section)
print("[config] :cfgPara.task_train_lr.model_pValuePara_genType:", cfgPara.task_train_lr.model_pValuePara_genType)
cfgPara.task_train_lr.model_lrSummary_png = self._cfgPara.get_para("lr.model.lrSummary.png",cfgPara.task_train_lr.section)
print("[config] :cfgPara.task_train_lr.model_lrSummary_png:", cfgPara.task_train_lr.model_lrSummary_png)
cfgPara.task_train_lr.model_lrSummary_png_genType = self._cfgPara.get_para("lr.model.lrSummary.png.genType",cfgPara.task_train_lr.section)
print("[config] :cfgPara.task_train_lr.model_lrSummary_png_genType:", cfgPara.task_train_lr.model_lrSummary_png_genType)
cfgPara.task_train_lr.model_coef = self._cfgPara.get_para("lr.model.coef",cfgPara.task_train_lr.section)
print("[config] :cfgPara.task_train_lr.model_coef:", cfgPara.task_train_lr.model_coef)
cfgPara.task_train_lr.model_joblib_pkl = self._cfgPara.get_para("lr.model.joblib_pkl",cfgPara.task_train_lr.section)
print("[config] :cfgPara.task_train_lr.model_joblib_pkl:",cfgPara.task_train_lr.model_joblib_pkl)
cfgPara.task_train_lr.para_highMissThreshold = float(self._cfgPara.get_para("lr.para.highMissThreshold",cfgPara.task_train_lr.section))
print("[config] :cfgPara.task_train_lr.para_highMissThreshold:", cfgPara.task_train_lr.para_highMissThreshold)
cfgPara.task_train_lr.para_maxPercent = float(self._cfgPara.get_para("lr.para.maxPercent",cfgPara.task_train_lr.section))
print("[config] :cfgPara.task_train_lr.para_maxPercent:",cfgPara.task_train_lr.para_maxPercent)
cfgPara.task_train_lr.para_min_div_max_badrate = float(self._cfgPara.get_para("lr.para.min_div_max_badrate", cfgPara.task_train_lr.section))
print("[config] :cfgPara.task_train_lr.para_min_div_max_badrate:", cfgPara.task_train_lr.para_min_div_max_badrate)
cfgPara.task_train_lr.para_ivThreshold_low = float(self._cfgPara.get_para("lr.para.ivThreshold_low",cfgPara.task_train_lr.section))
print("[config] :cfgPara.task_train_lr.para_ivThreshold_low:",cfgPara.task_train_lr.para_ivThreshold_low)
cfgPara.task_train_lr.para_ivThreshold_high = float(self._cfgPara.get_para("lr.para.ivThreshold_high",cfgPara.task_train_lr.section))
print("[config] :cfgPara.task_train_lr.para_ivThreshold_high:", cfgPara.task_train_lr.para_ivThreshold_high)
cfgPara.task_train_lr.para_singBin_maxPercentThreshHold =float( self._cfgPara.get_para("lr.para.singBin_maxPercentThreshHold",cfgPara.task_train_lr.section))
print("[config] :cfgPara.task_train_lr.para_ivThreshold_high:",cfgPara.task_train_lr.para_singBin_maxPercentThreshHold)
cfgPara.task_train_lr.para_highCorrThreshhold_max = float(self._cfgPara.get_para("lr.para.highCorrThreshhold_max", cfgPara.task_train_lr.section))
print("[config] :cfgPara.task_train_lr.para_highCorrThreshhold_max:",cfgPara.task_train_lr.para_highCorrThreshhold_max)
cfgPara.task_train_lr.para_highCorrThreshhold_min = float(self._cfgPara.get_para("lr.para.highCorrThreshhold_min",cfgPara.task_train_lr.section))
print("[config] :cfgPara.task_train_lr.para_highCorrThreshhold_min:",cfgPara.task_train_lr.para_highCorrThreshhold_min)
cfgPara.task_train_lr.para_vif_threshold = float(self._cfgPara.get_para("lr.para.vif_threshold",cfgPara.task_train_lr.section))
print("[config] :cfgPara.task_train_lr.para_highCorrThreshhold_min:",cfgPara.task_train_lr.para_vif_threshold)
cfgPara.task_train_lr.para_pValuer_threshold = float(self._cfgPara.get_para("lr.para.pValuer_threshold",cfgPara.task_train_lr.section))
print("[config] :cfgPara.task_train_lr.para_pValuer_threshold:",cfgPara.task_train_lr.para_pValuer_threshold)
# *****************----task_train_lgb section----*****************#
print("------task_train_lgb section------")
cfgPara.task_train_lgb.reports_baseDir = self._cfgPara.get_para("lgb.reports.baseDir",cfgPara.task_train_lgb.section)
print("[config] :cfgPara.task_train_lgb.reports_baseDir:", cfgPara.task_train_lgb.reports_baseDir)
cfgPara.task_train_lgb.reports_missRate = self._cfgPara.get_para("lgb.reports.missRate", cfgPara.task_train_lgb.section)
print("[config] :cfgPara.task_train_lgb.reports_missRate:", cfgPara.task_train_lgb.reports_missRate)
cfgPara.task_train_lgb.reports_highMissRate = self._cfgPara.get_para("lgb.reports.highMissRate",cfgPara.task_train_lgb.section)
print("[config] :cfgPara.task_train_lgb.reports_highMissRate:", cfgPara.task_train_lgb.reports_highMissRate)
cfgPara.task_train_lgb.reports_missRate_genType = self._cfgPara.get_para("lgb.reports.missRate.genType", cfgPara.task_train_lgb.section)
print("[config] :cfgPara.task_train_lgb.reports_missRate_genType:", cfgPara.task_train_lgb.reports_missRate_genType)
cfgPara.task_train_lgb.reports_maxPercent = self._cfgPara.get_para("lgb.reports.maxPercent", cfgPara.task_train_lgb.section)
print("[config] :cfgPara.task_train_lgb.reports_maxPercent:", cfgPara.task_train_lgb.reports_maxPercent)
cfgPara.task_train_lgb.reports_maxPercent_genType = self._cfgPara.get_para("lgb.reports.maxPercent.genType", cfgPara.task_train_lgb.section)
print("[config] :cfgPara.task_train_lgb.reports_maxPercent_genType:", cfgPara.task_train_lgb.reports_maxPercent_genType)
cfgPara.task_train_lgb.reports_corr = self._cfgPara.get_para("lgb.reports.corr", cfgPara.task_train_lgb.section)
print("[config] :cfgPara.task_train_lgb.reports_corr:", cfgPara.task_train_lgb.reports_corr)
cfgPara.task_train_lgb.reports_highCorr = self._cfgPara.get_para("lgb.reports.highCorr", cfgPara.task_train_lgb.section)
print("[config] :cfgPara.task_train_lgb.reports_highCorr:", cfgPara.task_train_lgb.reports_highCorr)
cfgPara.task_train_lgb.reports_corr_genType = self._cfgPara.get_para("lgb.reports.corr.genType", cfgPara.task_train_lgb.section)
print("[config] :cfgPara.task_train_lgb.reports_corr_genType:", cfgPara.task_train_lgb.reports_corr_genType)
cfgPara.task_train_lgb.reports_ks = self._cfgPara.get_para("lgb.reports.ks", cfgPara.task_train_lgb.section)
print("[config] :cfgPara.task_train_lgb.reports_ks:", cfgPara.task_train_lgb.reports_ks)
cfgPara.task_train_lgb.reports_ks_png = self._cfgPara.get_para("lgb.reports.ks.png",cfgPara.task_train_lgb.section)
print("[config] :cfgPara.task_train_lgb.reports_ks_png:", cfgPara.task_train_lgb.reports_ks_png)
cfgPara.task_train_lgb.reports_ks_scores_bin = int(self._cfgPara.get_para("lgb.reports.ks.scores.bin", cfgPara.task_train_lgb.section))
print("[config] :cfgPara.task_train_lgb.reports_ks_scores_bin:", cfgPara.task_train_lgb.reports_ks_scores_bin)
cfgPara.task_train_lgb.reports_ks_genType = self._cfgPara.get_para("lgb.reports.ks.genType",cfgPara.task_train_lgb.section)
print("[config] :cfgPara.task_train_lgb.reports_ks_genType:", cfgPara.task_train_lgb.reports_ks_genType)
cfgPara.task_train_lgb.reports_featureImportance = self._cfgPara.get_para("lgb.reports.featureImportance",cfgPara.task_train_lgb.section)
print("[config] :cfgPara.task_train_lgb.reports_featureImportance:",cfgPara.task_train_lr.reports_featureImportance)
cfgPara.task_train_lgb.reports_featureImportance_png = self._cfgPara.get_para("lgb.reports.featureImportance.png",cfgPara.task_train_lgb.section)
print("[config] :cfgPara.task_train_lgb.reports_featureImportance_png:",cfgPara.task_train_lr.reports_featureImportance_png)
cfgPara.task_train_lgb.reports_featureImportance_genType = self._cfgPara.get_para("lgb.reports.featureImportance.genType", cfgPara.task_train_lgb.section)
print("[config] :cfgPara.task_train_lgb.reports_featureImportance_genType:",cfgPara.task_train_lr.reports_featureImportance_genType)
cfgPara.task_train_lgb.para_highMissThreshold = float(self._cfgPara.get_para("lgb.para.highMissThreshold", cfgPara.task_train_lgb.section))
print("[config] :cfgPara.task_train_lgb.para_highMissThreshold:", cfgPara.task_train_lgb.para_highMissThreshold)
cfgPara.task_train_lgb.para_maxPercent = float(self._cfgPara.get_para("lgb.para.maxPercent", cfgPara.task_train_lgb.section))
print("[config] :cfgPara.task_train_lgb.para_maxPercent:", cfgPara.task_train_lgb.para_maxPercent)
cfgPara.task_train_lgb.para_min_div_max_badrate = float(self._cfgPara.get_para("lgb.para.min_div_max_badrate", cfgPara.task_train_lgb.section))
print("[config] :cfgPara.task_train_lgb.para_min_div_max_badrate:",cfgPara.task_train_lgb.para_min_div_max_badrate)
cfgPara.task_train_lgb.model_baseDir = self._cfgPara.get_para("lgb.model.baseDir", cfgPara.task_train_lgb.section)
print("[config] :cfgPara.task_train_lgb.model_baseDir:", cfgPara.task_train_lgb.model_baseDir)
cfgPara.task_train_lgb.model_trian_type = (self._cfgPara.get_para("lgb.model.trian.type", cfgPara.task_train_lgb.section))
print("[config] :cfgPara.task_train_lgb.model_trian_type:",cfgPara.task_train_lgb.model_trian_type)
cfgPara.task_train_lgb.model_coef = self._cfgPara.get_para("lgb.model.saveFile", cfgPara.task_train_lgb.section)
print("[config] :cfgPara.task_train_lgb.model_coef:", cfgPara.task_train_lgb.model_coef)
cfgPara.task_train_lgb.model_joblib_pkl = self._cfgPara.get_para("lgb.model.joblib_pkl",cfgPara.task_train_lgb.section)
print("[config] :cfgPara.task_train_lgb.model_joblib_pkl:", cfgPara.task_train_lgb.model_joblib_pkl)
# *****************----task_train_catboost section----*****************#
print("------task_train_catboost section------")
cfgPara.task_train_catboost.reports_baseDir = self._cfgPara.get_para("cb.reports.baseDir", cfgPara.task_train_catboost.section)
print("[config] :cfgPara.task_train_catboost.reports_baseDir:", cfgPara.task_train_catboost.reports_baseDir)
cfgPara.task_train_catboost.reports_missRate = self._cfgPara.get_para("cb.reports.missRate", cfgPara.task_train_catboost.section)
print("[config] :cfgPara.task_train_catboost.reports_missRate:", cfgPara.task_train_catboost.reports_missRate)
cfgPara.task_train_catboost.reports_highMissRate = self._cfgPara.get_para("cb.reports.highMissRate", cfgPara.task_train_catboost.section)
print("[config] :cfgPara.task_train_catboost.reports_highMissRate:", cfgPara.task_train_catboost.reports_highMissRate)
cfgPara.task_train_catboost.reports_missRate_genType = self._cfgPara.get_para("cb.reports.missRate.genType", cfgPara.task_train_catboost.section)
print("[config] :cfgPara.task_train_catboost.reports_missRate_genType:", cfgPara.task_train_catboost.reports_missRate_genType)
cfgPara.task_train_catboost.reports_maxPercent = self._cfgPara.get_para("cb.reports.maxPercent", cfgPara.task_train_catboost.section)
print("[config] :cfgPara.task_train_catboost.reports_maxPercent:", cfgPara.task_train_catboost.reports_maxPercent)
cfgPara.task_train_catboost.reports_maxPercent_genType = self._cfgPara.get_para("cb.reports.maxPercent.genType", cfgPara.task_train_catboost.section)
print("[config] :cfgPara.task_train_catboost.reports_maxPercent_genType:", cfgPara.task_train_catboost.reports_maxPercent_genType)
cfgPara.task_train_catboost.reports_corr = self._cfgPara.get_para("cb.reports.corr", cfgPara.task_train_catboost.section)
print("[config] :cfgPara.task_train_catboost.reports_corr:", cfgPara.task_train_catboost.reports_corr)
cfgPara.task_train_catboost.reports_highCorr = self._cfgPara.get_para("cb.reports.highCorr", cfgPara.task_train_catboost.section)
print("[config] :cfgPara.task_train_catboost.reports_highCorr:", cfgPara.task_train_catboost.reports_highCorr)
cfgPara.task_train_catboost.reports_corr_genType = self._cfgPara.get_para("cb.reports.corr.genType", cfgPara.task_train_catboost.section)
print("[config] :cfgPara.task_train_catboost.reports_corr_genType:", cfgPara.task_train_catboost.reports_corr_genType)
cfgPara.task_train_catboost.reports_ks = self._cfgPara.get_para("cb.reports.ks", cfgPara.task_train_catboost.section)
print("[config] :cfgPara.task_train_catboost.reports_ks:", cfgPara.task_train_catboost.reports_ks)
cfgPara.task_train_catboost.reports_ks_png = self._cfgPara.get_para("cb.reports.ks.png", cfgPara.task_train_catboost.section)
print("[config] :cfgPara.task_train_catboost.reports_ks_png:", cfgPara.task_train_catboost.reports_ks_png)
cfgPara.task_train_catboost.reports_ks_scores_bin = int(self._cfgPara.get_para("cb.reports.ks.scores.bin",cfgPara.task_train_catboost.section))
print("[config] :cfgPara.task_train_catboost.reports_ks_scores_bin:", cfgPara.task_train_catboost.reports_ks_scores_bin)
cfgPara.task_train_catboost.reports_ks_genType = self._cfgPara.get_para("cb.reports.ks.genType", cfgPara.task_train_catboost.section)
print("[config] :cfgPara.task_train_catboost.reports_ks_genType:", cfgPara.task_train_catboost.reports_ks_genType)
cfgPara.task_train_catboost.reports_psi = (self._cfgPara.get_para("cb.reports.psi", cfgPara.task_train_catboost.section))
print("[config] :cfgPara.task_train_catboost.reports_psi:",cfgPara.task_train_catboost.reports_psi)
cfgPara.task_train_catboost.reports_psi_png = self._cfgPara.get_para("cb.reports.psi.png",cfgPara.task_train_catboost.section)
print("[config] :cfgPara.task_train_catboost.reports_psi_png:",cfgPara.task_train_catboost.reports_psi_png)
cfgPara.task_train_catboost.reports_psi_bin = int(self._cfgPara.get_para("cb.reports.psi.bin",cfgPara.task_train_catboost.section))
print("[config] :cfgPara.task_train_catboost.reports_psi_bin:", cfgPara.task_train_catboost.reports_psi_bin)
cfgPara.task_train_catboost.reports_featureImportance = self._cfgPara.get_para("cb.reports.featureImportance", cfgPara.task_train_catboost.section)
print("[config] :cfgPara.task_train_catboost.reports_featureImportance:", cfgPara.task_train_lr.reports_featureImportance)
cfgPara.task_train_catboost.reports_featureImportance_png = self._cfgPara.get_para("cb.reports.featureImportance.png",cfgPara.task_train_catboost.section)
print("[config] :cfgPara.task_train_catboost.reports_featureImportance_png:",cfgPara.task_train_lr.reports_featureImportance_png)
cfgPara.task_train_catboost.reports_featureImportance_genType = self._cfgPara.get_para("cb.reports.featureImportance.genType", cfgPara.task_train_catboost.section)
print("[config] :cfgPara.task_train_catboost.reports_featureImportance_genType:", cfgPara.task_train_lr.reports_featureImportance_genType)
cfgPara.task_train_catboost.para_highMissThreshold = float( self._cfgPara.get_para("cb.para.highMissThreshold", cfgPara.task_train_catboost.section))
print("[config] :cfgPara.task_train_catboost.para_highMissThreshold:", cfgPara.task_train_catboost.para_highMissThreshold)
cfgPara.task_train_catboost.para_maxPercent = float(self._cfgPara.get_para("cb.para.maxPercent", cfgPara.task_train_catboost.section))
print("[config] :cfgPara.task_train_catboost.para_maxPercent:", cfgPara.task_train_catboost.para_maxPercent)
cfgPara.task_train_catboost.para_min_div_max_badrate = float(self._cfgPara.get_para("cb.para.min_div_max_badrate", cfgPara.task_train_catboost.section))
print("[config] :cfgPara.task_train_catboost.para_min_div_max_badrate:", cfgPara.task_train_catboost.para_min_div_max_badrate)
cfgPara.task_train_catboost.model_baseDir = self._cfgPara.get_para("cb.model.baseDir", cfgPara.task_train_catboost.section)
print("[config] :cfgPara.task_train_lgb.model_baseDir:", cfgPara.task_train_catboost.model_baseDir)
cfgPara.task_train_catboost.model_trian_type = ( self._cfgPara.get_para("cb.model.trian.type", cfgPara.task_train_catboost.section))
print("[config] :cfgPara.task_train_catboost.model_trian_type:", cfgPara.task_train_catboost.model_trian_type)
cfgPara.task_train_catboost.model_coef = self._cfgPara.get_para("cb.model.saveFile", cfgPara.task_train_catboost.section)
print("[config] :cfgPara.task_train_catboost.model_coef:", cfgPara.task_train_catboost.model_coef)
cfgPara.task_train_catboost.model_joblib_pkl = self._cfgPara.get_para("cb.model.joblib_pkl", cfgPara.task_train_catboost.section)
print("[config] :cfgPara.task_train_catboost.model_joblib_pkl:", cfgPara.task_train_catboost.model_joblib_pkl)
# *****************----task_train_mlp section----*****************#
print("------task_train_mlp section------")
cfgPara.task_train_mlp.reports_baseDir = self._cfgPara.get_para("mlp.reports.baseDir",cfgPara.task_train_mlp.section)
print("[config] :cfgPara.task_train_mlp.reports_baseDir:", cfgPara.task_train_mlp.reports_baseDir)
cfgPara.task_train_mlp.reports_missRate = self._cfgPara.get_para("mlp.reports.missRate",cfgPara.task_train_mlp.section)
print("[config] :cfgPara.task_train_mlp.reports_missRate:", cfgPara.task_train_mlp.reports_missRate)
cfgPara.task_train_mlp.reports_highMissRate = self._cfgPara.get_para("mlp.reports.highMissRate",cfgPara.task_train_mlp.section)
print("[config] :cfgPara.task_train_mlp.reports_highMissRate:", cfgPara.task_train_mlp.reports_highMissRate)
cfgPara.task_train_mlp.reports_missRate_genType = self._cfgPara.get_para("mlp.reports.missRate.genType",cfgPara.task_train_mlp.section)
print("[config] :cfgPara.task_train_mlp.reports_missRate_genType:",cfgPara.task_train_mlp.reports_missRate_genType)
cfgPara.task_train_mlp.reports_maxPercent = self._cfgPara.get_para("mlp.reports.maxPercent",cfgPara.task_train_mlp.section)
print("[config] :cfgPara.task_train_mlp.reports_maxPercent:", cfgPara.task_train_mlp.reports_maxPercent)
cfgPara.task_train_mlp.reports_maxPercent_genType = self._cfgPara.get_para("mlp.reports.maxPercent.genType",cfgPara.task_train_mlp.section)
print("[config] :cfgPara.task_train_mlp.reports_maxPercent_genType:",cfgPara.task_train_mlp.reports_maxPercent_genType)
cfgPara.task_train_mlp.reports_corr = self._cfgPara.get_para("mlp.reports.corr", cfgPara.task_train_mlp.section)
print("[config] :cfgPara.task_train_mlp.reports_corr:", cfgPara.task_train_mlp.reports_corr)
cfgPara.task_train_mlp.reports_highCorr = self._cfgPara.get_para("mlp.reports.highCorr",cfgPara.task_train_mlp.section)
print("[config] :cfgPara.task_train_mlp.reports_highCorr:", cfgPara.task_train_mlp.reports_highCorr)
cfgPara.task_train_mlp.reports_corr_genType = self._cfgPara.get_para("mlp.reports.corr.genType",cfgPara.task_train_mlp.section)
print("[config] :cfgPara.task_train_mlp.reports_corr_genType:", cfgPara.task_train_mlp.reports_corr_genType)
cfgPara.task_train_mlp.reports_ks = self._cfgPara.get_para("mlp.reports.ks", cfgPara.task_train_mlp.section)
print("[config] :cfgPara.task_train_mlp.reports_ks:", cfgPara.task_train_mlp.reports_ks)
cfgPara.task_train_mlp.reports_ks_png = self._cfgPara.get_para("mlp.reports.ks.png",cfgPara.task_train_mlp.section)
print("[config] :cfgPara.task_train_mlp.reports_ks_png:", cfgPara.task_train_mlp.reports_ks_png)
cfgPara.task_train_mlp.reports_ks_genType = self._cfgPara.get_para("mlp.reports.ks.genType",cfgPara.task_train_mlp.section)
print("[config] :cfgPara.task_train_mlp.reports_ks_genType:", cfgPara.task_train_mlp.reports_ks_genType)
cfgPara.task_train_mlp.reports_featureImportance = self._cfgPara.get_para("mlp.reports.featureImportance",cfgPara.task_train_mlp.section)
print("[config] :cfgPara.task_train_mlp.reports_featureImportance:",cfgPara.task_train_mlp.reports_featureImportance)
cfgPara.task_train_mlp.reports_featureImportance_png = self._cfgPara.get_para("mlp.reports.featureImportance.png",cfgPara.task_train_mlp.section)
print("[config] :cfgPara.task_train_mlp.reports_featureImportance_png:",cfgPara.task_train_mlp.reports_featureImportance_png)
cfgPara.task_train_mlp.reports_featureImportance_genType = self._cfgPara.get_para("mlp.reports.featureImportance.genType", cfgPara.task_train_mlp.section)
print("[config] :cfgPara.task_train_mlp.reports_featureImportance_genType:",cfgPara.task_train_mlp.reports_featureImportance_genType)
cfgPara.task_train_mlp.para_highMissThreshold = float(self._cfgPara.get_para("mlp.para.highMissThreshold", cfgPara.task_train_mlp.section))
print("[config] :cfgPara.task_train_mlp.para_highMissThreshold:", cfgPara.task_train_mlp.para_highMissThreshold)
cfgPara.task_train_mlp.para_maxPercent = float(self._cfgPara.get_para("mlp.para.maxPercent", cfgPara.task_train_mlp.section))
print("[config] :cfgPara.task_train_mlp.para_maxPercent:", cfgPara.task_train_mlp.para_maxPercent)
cfgPara.task_train_mlp.para_min_div_max_badrate = float(self._cfgPara.get_para("mlp.para.min_div_max_badrate", cfgPara.task_train_mlp.section))
print("[config] :cfgPara.task_train_mlp.para_min_div_max_badrate:",cfgPara.task_train_mlp.para_min_div_max_badrate)
cfgPara.task_train_mlp.model_baseDir = self._cfgPara.get_para("mlp.model.baseDir",cfgPara.task_train_mlp.section)
print("[config] :cfgPara.task_train_mlp.model_baseDir:", cfgPara.task_train_mlp.model_baseDir)
cfgPara.task_train_mlp.model_trian_type = (self._cfgPara.get_para("mlp.model.trian.type", cfgPara.task_train_mlp.section))
print("[config] :cfgPara.task_train_mlp.model_trian_type:", cfgPara.task_train_mlp.model_trian_type)
cfgPara.task_train_mlp.model_coef = self._cfgPara.get_para("mlp.model.saveFile", cfgPara.task_train_mlp.section)
print("[config] :cfgPara.task_train_mlp.model_coef:", cfgPara.task_train_mlp.model_coef)
cfgPara.task_train_mlp.model_joblib_pkl = self._cfgPara.get_para("mlp.model.joblib_pkl",cfgPara.task_train_mlp.section)
print("[config] :cfgPara.task_train_mlp.model_joblib_pkl:", cfgPara.task_train_mlp.model_joblib_pkl)
# *****************----task_psi section----*****************#
print("------task_psi section------")
cfgPara.task_psi.reports_baseDir = self._cfgPara.get_para("psi.reports.baseDir",cfgPara.task_psi.section)
print("[config] :cfgPara.task_psi.reports_baseDir:", cfgPara.task_psi.reports_baseDir)
cfgPara.task_psi.reports_psi = self._cfgPara.get_para("psi.reports",cfgPara.task_psi.section)
print("[config] :cfgPara.task_psi.reports_psi:", cfgPara.task_psi.reports_psi)
cfgPara.task_psi.reports_psi_png = self._cfgPara.get_para("psi.reports.png", cfgPara.task_psi.section)
print("[config] :cfgPara.task_psi.reports_psi_png:", cfgPara.task_psi.reports_psi_png)
cfgPara.task_psi.psi_data1 = self._cfgPara.get_para("psi.data1", cfgPara.task_psi.section)
print("[config] :cfgPara.task_psi.psi_data1:", cfgPara.task_psi.psi_data1)
cfgPara.task_psi.psi_data2 = self._cfgPara.get_para("psi.data2", cfgPara.task_psi.section)
print("[config] :cfgPara.task_psi.psi_data2:", cfgPara.task_psi.psi_data2)
cfgPara.task_psi.psi_data1_name = self._cfgPara.get_para("psi.data1.name", cfgPara.task_psi.section)
print("[config] :cfgPara.task_psi.psi_data1_name:", cfgPara.task_psi.psi_data1_name)
cfgPara.task_psi.psi_data2_name = self._cfgPara.get_para("psi.data2.name", cfgPara.task_psi.section)
print("[config] :cfgPara.task_psi.psi_data2_name:", cfgPara.task_psi.psi_data2_name)
cfgPara.task_psi.psi_bins = int(self._cfgPara.get_para("psi.bins", cfgPara.task_psi.section))
print("[config] :cfgPara.task_psi.psi_bins:", cfgPara.task_psi.psi_bins)
cfgPara.task_psi.psi_model_pkl = self._cfgPara.get_para("psi.mode.pkl", cfgPara.task_psi.section)
print("[config] :cfgPara.task_psi.psi_model_pkl:", cfgPara.task_psi.psi_model_pkl)
print("*****************----[end get all config parameters]-----*****************")
|
PypiClean
|
/dj_url_filter-0.4.4.tar.gz/dj_url_filter-0.4.4/url_filter/backends/plain.py
|
from __future__ import absolute_import, print_function, unicode_literals
import re
from ..utils import dictify
from .base import BaseFilterBackend
class PlainFilterBackend(BaseFilterBackend):
"""
Filter backend for filtering plain Python iterables.
.. warning::
The filter backend does filtering inside a regular loop
by comparing attributes of individual objects within iterable.
As a result, this is **NOT** efficient method for filtering
any large amounts of data. In those cases, it would probably
be better to find more appropriate and efficient way to filter data.
"""
name = "plain"
enforce_same_models = False
supported_lookups = {
"contains",
"day",
"endswith",
"exact",
"gt",
"gte",
"hour",
"icontains",
"iendswith",
"iexact",
"iin",
"in",
"iregex",
"isnull",
"istartswith",
"lt",
"lte",
"minute",
"month",
"range",
"regex",
"second",
"startswith",
"week_day",
"year",
}
def empty(self):
"""
Get empty queryset
"""
return []
def get_model(self):
"""
Get the model from the given queryset
Since there is no specific model for filtering Python lists,
this simply returns ``object``
"""
return object
def filter_by_specs(self, queryset):
"""
Filter queryset by applying all filter specifications
The filtering is done by calling manually loping over all
items in the iterable and comparing inner attributes with the
filter specification.
"""
if not self.regular_specs:
return queryset
return list(filter(self._filter_callable, queryset))
def _filter_callable(self, item):
return all(self._filter_by_spec(item, spec) for spec in self.regular_specs)
def _filter_by_spec(self, item, spec):
filtered = self._filter_by_spec_and_value(item, spec.components, spec)
if spec.is_negated:
return not filtered
return filtered
def _filter_by_spec_and_value(self, item, components, spec):
if not components and not isinstance(item, (list, tuple)):
comparator = getattr(self, "_compare_{}".format(spec.lookup))
try:
return comparator(item, spec)
except Exception:
return True
if isinstance(item, (list, tuple)):
return any(
self._filter_by_spec_and_value(i, components, spec) for i in item
)
if not isinstance(item, dict):
item = dictify(item)
return self._filter_by_spec_and_value(
item.get(components[0], {}), components[1:], spec
)
def _compare_contains(self, value, spec):
return spec.value in value
def _compare_day(self, value, spec):
return value.day == spec.value
def _compare_endswith(self, value, spec):
return value.endswith(spec.value)
def _compare_exact(self, value, spec):
return value == spec.value
def _compare_gt(self, value, spec):
return value > spec.value
def _compare_gte(self, value, spec):
return value >= spec.value
def _compare_hour(self, value, spec):
return value.hour == spec.value
def _compare_icontains(self, value, spec):
return spec.value.lower() in value.lower()
def _compare_iendswith(self, value, spec):
return value.lower().endswith(spec.value.lower())
def _compare_iexact(self, value, spec):
return value.lower() == spec.value.lower()
def _compare_in(self, value, spec):
return value in spec.value
def _compare_iin(self, value, spec):
return value.lower() in [i.lower() for i in spec.value]
def _compare_iregex(self, value, spec):
return bool(re.match(spec.value, value, re.IGNORECASE))
def _compare_isnull(self, value, spec):
if spec.value:
return value is None
else:
return value is not None
def _compare_istartswith(self, value, spec):
return value.lower().startswith(spec.value.lower())
def _compare_lt(self, value, spec):
return value < spec.value
def _compare_lte(self, value, spec):
return value <= spec.value
def _compare_minute(self, value, spec):
return value.minute == spec.value
def _compare_month(self, value, spec):
return value.month == spec.value
def _compare_range(self, value, spec):
return spec.value[0] <= value <= spec.value[1]
def _compare_regex(self, value, spec):
return bool(re.match(spec.value, value))
def _compare_second(self, value, spec):
return value.second == spec.value
def _compare_startswith(self, value, spec):
return value.startswith(spec.value)
def _compare_week_day(self, value, spec):
# expected 1-Sunday and 7-Saturday
return ((value.weekday() + 1) % 7) + 1 == spec.value
def _compare_year(self, value, spec):
return value.year == spec.value
|
PypiClean
|
/loggingex-1.2.0.tar.gz/loggingex-1.2.0/noxfile.py
|
from json import loads
from pathlib import Path
from typing import Any, Dict, Iterable, List, Tuple, Union
import nox
from nox.sessions import Session
DepsT = Union[List[str], Tuple[str]]
TEST_DEPENDENCIES = ("pytest", "pytest-mock", "webtest")
BLACK_DEPENDENCIES = ("black",)
BLACKEN_DEPENDENCIES = ("black",)
FLAKE8_DEPENDENCIES = (
"flake8",
"flake8-bugbear",
"flake8-builtins",
"flake8-commas",
"flake8-comprehensions",
"flake8-docstrings",
"flake8-eradicate",
"flake8-import-order",
"flake8-pytest",
"flake8-quotes",
"flake8-super-call",
"pep8-naming",
)
nox.options.sessions = ["black", "flake8", "tests", "example"]
nox.options.reuse_existing_virtualenvs = True
nox.options.error_on_missing_interpreters = True
PROJECT_ROOT = Path(__file__).parent
EXAMPLES_ROOT = Path(PROJECT_ROOT, "examples")
@nox.session(python="3.7")
def blacken(session: Session):
install_dependencies(session, BLACKEN_DEPENDENCIES, install_self=False)
show_environment_info(session, ("black", "--version"))
session.run("black", ".")
@nox.session(python="3.7")
def black(session: Session):
install_dependencies(session, BLACK_DEPENDENCIES, install_self=False)
show_environment_info(session, ("black", "--version"))
session.run("black", "--check", ".")
@nox.session(python="3.7")
def flake8(session: Session):
install_dependencies(session, FLAKE8_DEPENDENCIES, install_self=False)
show_environment_info(session, ("flake8", "--version"))
session.run("flake8", ".")
@nox.session(python=["3.5", "3.6", "3.7"])
def tests(session: Session):
install_dependencies(session, TEST_DEPENDENCIES)
session.install(".")
show_environment_info(session, ("pytest", "--version"))
session.run("pytest")
def example_run(session: Session, name: str = None):
descr = get_example_descr(name)
install_dependencies(session, descr.get("requires", []))
show_environment_info(session)
session.log("Running example %r", descr.get("name"))
session.chdir(str(descr["dir"].absolute()))
result = session.run(*descr["command"], silent=True)
output = get_contents(Path(descr["dir"], descr["output"]))
if result != output:
session.error(
"Example output did not match expected output:\n"
"===== EXPECTED OUTPUT BEGIN =====\n%s\n"
"===== EXPECTED OUTPUT END =====\n"
"===== ACTUAL OUTPUT BEGIN =====\n%s\n"
"===== ACTUAL OUTPUT END =====\n",
output,
result,
)
session.log("Example output matched expected output, all is well.")
def install_dependencies(
session: Session, dependencies: DepsT = None, install_self: bool = True
):
if dependencies:
session.install(*dependencies)
if install_self:
session.install(".")
def show_environment_info(session: Session, *args):
session.run("python", "--version")
session.run("pip", "--version")
session.run("pip", "list", "--format=columns")
for arg in args:
session.run(*arg)
def get_contents(filename: Path) -> str:
with filename.open("r") as f:
return f.read()
def get_example_descr(name: str) -> Dict[str, Any]:
example_dir = Path(EXAMPLES_ROOT, name)
assert example_dir.exists()
assert example_dir.is_dir()
example_json = Path(example_dir, "example.json")
assert example_json.exists()
assert example_json.is_file()
descr = loads(get_contents(example_json))
descr.update({"dir": example_dir, "json": example_json})
return descr
def get_all_example_names(examples_root: Path = EXAMPLES_ROOT) -> Iterable[str]:
"""Return a list of directories, that have 'example.py' file."""
examples_dir = Path(examples_root)
assert examples_dir.exists
examples = []
for item in examples_dir.iterdir():
if not item.is_dir():
continue
example_json = Path(item, "example.json")
if example_json.exists() and example_json.is_file():
examples.append(item.name)
return examples
EXAMPLES = get_all_example_names()
def example(session: Session, name: str = None):
if name is None:
session.log("There are no examples.")
return
example_run(session, name)
# Only create the actual example session, when there are examples present
if EXAMPLES:
example = nox.parametrize("name", EXAMPLES)(example)
example = nox.session(example, python=["3.5", "3.6", "3.7"])
else:
example = nox.session(example)
|
PypiClean
|
/accelbyte_py_sdk-0.48.0.tar.gz/accelbyte_py_sdk-0.48.0/accelbyte_py_sdk/api/platform/operations/catalog_changes/query_changes.py
|
# template file: ags_py_codegen
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
# AccelByte Gaming Services Platform Service (4.34.0)
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
from .....core import Operation
from .....core import HeaderStr
from .....core import HttpResponse
from .....core import StrEnum
from ...models import CatalogChangePagingSlicedResult
class ActionEnum(StrEnum):
CREATE = "CREATE"
DELETE = "DELETE"
UPDATE = "UPDATE"
class ItemTypeEnum(StrEnum):
APP = "APP"
BUNDLE = "BUNDLE"
CODE = "CODE"
COINS = "COINS"
EXTENSION = "EXTENSION"
INGAMEITEM = "INGAMEITEM"
LOOTBOX = "LOOTBOX"
MEDIA = "MEDIA"
OPTIONBOX = "OPTIONBOX"
SEASON = "SEASON"
SUBSCRIPTION = "SUBSCRIPTION"
class SortByEnum(StrEnum):
CREATEDAT = "createdAt"
CREATEDAT_ASC = "createdAt:asc"
CREATEDAT_DESC = "createdAt:desc"
UPDATEDAT = "updatedAt"
UPDATEDAT_ASC = "updatedAt:asc"
UPDATEDAT_DESC = "updatedAt:desc"
class StatusEnum(StrEnum):
PUBLISHED = "PUBLISHED"
UNPUBLISHED = "UNPUBLISHED"
class TypeEnum(StrEnum):
CATEGORY = "CATEGORY"
ITEM = "ITEM"
SECTION = "SECTION"
STORE = "STORE"
VIEW = "VIEW"
class QueryChanges(Operation):
"""Query catalog changes (queryChanges)
This API is used to query changes .
Other detail info:
* Required permission : resource="ADMIN:NAMESPACE:{namespace}:STORE", action=2 (READ)
* Returns : the pagination of changes
Required Permission(s):
- ADMIN:NAMESPACE:{namespace}:STORE [READ]
Properties:
url: /platform/admin/namespaces/{namespace}/stores/{storeId}/catalogChanges/byCriteria
method: GET
tags: ["CatalogChanges"]
consumes: []
produces: ["application/json"]
securities: [BEARER_AUTH] or [BEARER_AUTH]
namespace: (namespace) REQUIRED str in path
store_id: (storeId) REQUIRED str in path
action: (action) OPTIONAL Union[str, ActionEnum] in query
item_sku: (itemSku) OPTIONAL str in query
item_type: (itemType) OPTIONAL Union[str, ItemTypeEnum] in query
limit: (limit) OPTIONAL int in query
offset: (offset) OPTIONAL int in query
selected: (selected) OPTIONAL bool in query
sort_by: (sortBy) OPTIONAL List[Union[str, SortByEnum]] in query
status: (status) OPTIONAL Union[str, StatusEnum] in query
type_: (type) OPTIONAL Union[str, TypeEnum] in query
updated_at_end: (updatedAtEnd) OPTIONAL str in query
updated_at_start: (updatedAtStart) OPTIONAL str in query
Responses:
200: OK - CatalogChangePagingSlicedResult (successful operation)
"""
# region fields
_url: str = "/platform/admin/namespaces/{namespace}/stores/{storeId}/catalogChanges/byCriteria"
_method: str = "GET"
_consumes: List[str] = []
_produces: List[str] = ["application/json"]
_securities: List[List[str]] = [["BEARER_AUTH"], ["BEARER_AUTH"]]
_location_query: str = None
namespace: str # REQUIRED in [path]
store_id: str # REQUIRED in [path]
action: Union[str, ActionEnum] # OPTIONAL in [query]
item_sku: str # OPTIONAL in [query]
item_type: Union[str, ItemTypeEnum] # OPTIONAL in [query]
limit: int # OPTIONAL in [query]
offset: int # OPTIONAL in [query]
selected: bool # OPTIONAL in [query]
sort_by: List[Union[str, SortByEnum]] # OPTIONAL in [query]
status: Union[str, StatusEnum] # OPTIONAL in [query]
type_: Union[str, TypeEnum] # OPTIONAL in [query]
updated_at_end: str # OPTIONAL in [query]
updated_at_start: str # OPTIONAL in [query]
# endregion fields
# region properties
@property
def url(self) -> str:
return self._url
@property
def method(self) -> str:
return self._method
@property
def consumes(self) -> List[str]:
return self._consumes
@property
def produces(self) -> List[str]:
return self._produces
@property
def securities(self) -> List[List[str]]:
return self._securities
@property
def location_query(self) -> str:
return self._location_query
# endregion properties
# region get methods
# endregion get methods
# region get_x_params methods
def get_all_params(self) -> dict:
return {
"path": self.get_path_params(),
"query": self.get_query_params(),
}
def get_path_params(self) -> dict:
result = {}
if hasattr(self, "namespace"):
result["namespace"] = self.namespace
if hasattr(self, "store_id"):
result["storeId"] = self.store_id
return result
def get_query_params(self) -> dict:
result = {}
if hasattr(self, "action"):
result["action"] = self.action
if hasattr(self, "item_sku"):
result["itemSku"] = self.item_sku
if hasattr(self, "item_type"):
result["itemType"] = self.item_type
if hasattr(self, "limit"):
result["limit"] = self.limit
if hasattr(self, "offset"):
result["offset"] = self.offset
if hasattr(self, "selected"):
result["selected"] = self.selected
if hasattr(self, "sort_by"):
result["sortBy"] = self.sort_by
if hasattr(self, "status"):
result["status"] = self.status
if hasattr(self, "type_"):
result["type"] = self.type_
if hasattr(self, "updated_at_end"):
result["updatedAtEnd"] = self.updated_at_end
if hasattr(self, "updated_at_start"):
result["updatedAtStart"] = self.updated_at_start
return result
# endregion get_x_params methods
# region is/has methods
# endregion is/has methods
# region with_x methods
def with_namespace(self, value: str) -> QueryChanges:
self.namespace = value
return self
def with_store_id(self, value: str) -> QueryChanges:
self.store_id = value
return self
def with_action(self, value: Union[str, ActionEnum]) -> QueryChanges:
self.action = value
return self
def with_item_sku(self, value: str) -> QueryChanges:
self.item_sku = value
return self
def with_item_type(self, value: Union[str, ItemTypeEnum]) -> QueryChanges:
self.item_type = value
return self
def with_limit(self, value: int) -> QueryChanges:
self.limit = value
return self
def with_offset(self, value: int) -> QueryChanges:
self.offset = value
return self
def with_selected(self, value: bool) -> QueryChanges:
self.selected = value
return self
def with_sort_by(self, value: List[Union[str, SortByEnum]]) -> QueryChanges:
self.sort_by = value
return self
def with_status(self, value: Union[str, StatusEnum]) -> QueryChanges:
self.status = value
return self
def with_type_(self, value: Union[str, TypeEnum]) -> QueryChanges:
self.type_ = value
return self
def with_updated_at_end(self, value: str) -> QueryChanges:
self.updated_at_end = value
return self
def with_updated_at_start(self, value: str) -> QueryChanges:
self.updated_at_start = value
return self
# endregion with_x methods
# region to methods
def to_dict(self, include_empty: bool = False) -> dict:
result: dict = {}
if hasattr(self, "namespace") and self.namespace:
result["namespace"] = str(self.namespace)
elif include_empty:
result["namespace"] = ""
if hasattr(self, "store_id") and self.store_id:
result["storeId"] = str(self.store_id)
elif include_empty:
result["storeId"] = ""
if hasattr(self, "action") and self.action:
result["action"] = str(self.action)
elif include_empty:
result["action"] = Union[str, ActionEnum]()
if hasattr(self, "item_sku") and self.item_sku:
result["itemSku"] = str(self.item_sku)
elif include_empty:
result["itemSku"] = ""
if hasattr(self, "item_type") and self.item_type:
result["itemType"] = str(self.item_type)
elif include_empty:
result["itemType"] = Union[str, ItemTypeEnum]()
if hasattr(self, "limit") and self.limit:
result["limit"] = int(self.limit)
elif include_empty:
result["limit"] = 0
if hasattr(self, "offset") and self.offset:
result["offset"] = int(self.offset)
elif include_empty:
result["offset"] = 0
if hasattr(self, "selected") and self.selected:
result["selected"] = bool(self.selected)
elif include_empty:
result["selected"] = False
if hasattr(self, "sort_by") and self.sort_by:
result["sortBy"] = [str(i0) for i0 in self.sort_by]
elif include_empty:
result["sortBy"] = []
if hasattr(self, "status") and self.status:
result["status"] = str(self.status)
elif include_empty:
result["status"] = Union[str, StatusEnum]()
if hasattr(self, "type_") and self.type_:
result["type"] = str(self.type_)
elif include_empty:
result["type"] = Union[str, TypeEnum]()
if hasattr(self, "updated_at_end") and self.updated_at_end:
result["updatedAtEnd"] = str(self.updated_at_end)
elif include_empty:
result["updatedAtEnd"] = ""
if hasattr(self, "updated_at_start") and self.updated_at_start:
result["updatedAtStart"] = str(self.updated_at_start)
elif include_empty:
result["updatedAtStart"] = ""
return result
# endregion to methods
# region response methods
# noinspection PyMethodMayBeStatic
def parse_response(
self, code: int, content_type: str, content: Any
) -> Tuple[Union[None, CatalogChangePagingSlicedResult], Union[None, HttpResponse]]:
"""Parse the given response.
200: OK - CatalogChangePagingSlicedResult (successful operation)
---: HttpResponse (Undocumented Response)
---: HttpResponse (Unexpected Content-Type Error)
---: HttpResponse (Unhandled Error)
"""
pre_processed_response, error = self.pre_process_response(
code=code, content_type=content_type, content=content
)
if error is not None:
return None, None if error.is_no_content() else error
code, content_type, content = pre_processed_response
if code == 200:
return CatalogChangePagingSlicedResult.create_from_dict(content), None
return self.handle_undocumented_response(
code=code, content_type=content_type, content=content
)
# endregion response methods
# region static methods
@classmethod
def create(
cls,
namespace: str,
store_id: str,
action: Optional[Union[str, ActionEnum]] = None,
item_sku: Optional[str] = None,
item_type: Optional[Union[str, ItemTypeEnum]] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
selected: Optional[bool] = None,
sort_by: Optional[List[Union[str, SortByEnum]]] = None,
status: Optional[Union[str, StatusEnum]] = None,
type_: Optional[Union[str, TypeEnum]] = None,
updated_at_end: Optional[str] = None,
updated_at_start: Optional[str] = None,
**kwargs,
) -> QueryChanges:
instance = cls()
instance.namespace = namespace
instance.store_id = store_id
if action is not None:
instance.action = action
if item_sku is not None:
instance.item_sku = item_sku
if item_type is not None:
instance.item_type = item_type
if limit is not None:
instance.limit = limit
if offset is not None:
instance.offset = offset
if selected is not None:
instance.selected = selected
if sort_by is not None:
instance.sort_by = sort_by
if status is not None:
instance.status = status
if type_ is not None:
instance.type_ = type_
if updated_at_end is not None:
instance.updated_at_end = updated_at_end
if updated_at_start is not None:
instance.updated_at_start = updated_at_start
return instance
@classmethod
def create_from_dict(cls, dict_: dict, include_empty: bool = False) -> QueryChanges:
instance = cls()
if "namespace" in dict_ and dict_["namespace"] is not None:
instance.namespace = str(dict_["namespace"])
elif include_empty:
instance.namespace = ""
if "storeId" in dict_ and dict_["storeId"] is not None:
instance.store_id = str(dict_["storeId"])
elif include_empty:
instance.store_id = ""
if "action" in dict_ and dict_["action"] is not None:
instance.action = str(dict_["action"])
elif include_empty:
instance.action = Union[str, ActionEnum]()
if "itemSku" in dict_ and dict_["itemSku"] is not None:
instance.item_sku = str(dict_["itemSku"])
elif include_empty:
instance.item_sku = ""
if "itemType" in dict_ and dict_["itemType"] is not None:
instance.item_type = str(dict_["itemType"])
elif include_empty:
instance.item_type = Union[str, ItemTypeEnum]()
if "limit" in dict_ and dict_["limit"] is not None:
instance.limit = int(dict_["limit"])
elif include_empty:
instance.limit = 0
if "offset" in dict_ and dict_["offset"] is not None:
instance.offset = int(dict_["offset"])
elif include_empty:
instance.offset = 0
if "selected" in dict_ and dict_["selected"] is not None:
instance.selected = bool(dict_["selected"])
elif include_empty:
instance.selected = False
if "sortBy" in dict_ and dict_["sortBy"] is not None:
instance.sort_by = [str(i0) for i0 in dict_["sortBy"]]
elif include_empty:
instance.sort_by = []
if "status" in dict_ and dict_["status"] is not None:
instance.status = str(dict_["status"])
elif include_empty:
instance.status = Union[str, StatusEnum]()
if "type" in dict_ and dict_["type"] is not None:
instance.type_ = str(dict_["type"])
elif include_empty:
instance.type_ = Union[str, TypeEnum]()
if "updatedAtEnd" in dict_ and dict_["updatedAtEnd"] is not None:
instance.updated_at_end = str(dict_["updatedAtEnd"])
elif include_empty:
instance.updated_at_end = ""
if "updatedAtStart" in dict_ and dict_["updatedAtStart"] is not None:
instance.updated_at_start = str(dict_["updatedAtStart"])
elif include_empty:
instance.updated_at_start = ""
return instance
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"namespace": "namespace",
"storeId": "store_id",
"action": "action",
"itemSku": "item_sku",
"itemType": "item_type",
"limit": "limit",
"offset": "offset",
"selected": "selected",
"sortBy": "sort_by",
"status": "status",
"type": "type_",
"updatedAtEnd": "updated_at_end",
"updatedAtStart": "updated_at_start",
}
@staticmethod
def get_required_map() -> Dict[str, bool]:
return {
"namespace": True,
"storeId": True,
"action": False,
"itemSku": False,
"itemType": False,
"limit": False,
"offset": False,
"selected": False,
"sortBy": False,
"status": False,
"type": False,
"updatedAtEnd": False,
"updatedAtStart": False,
}
@staticmethod
def get_collection_format_map() -> Dict[str, Union[None, str]]:
return {
"sortBy": "csv", # in query
}
@staticmethod
def get_enum_map() -> Dict[str, List[Any]]:
return {
"action": ["CREATE", "DELETE", "UPDATE"], # in query
"itemType": [
"APP",
"BUNDLE",
"CODE",
"COINS",
"EXTENSION",
"INGAMEITEM",
"LOOTBOX",
"MEDIA",
"OPTIONBOX",
"SEASON",
"SUBSCRIPTION",
], # in query
"sortBy": [
"createdAt",
"createdAt:asc",
"createdAt:desc",
"updatedAt",
"updatedAt:asc",
"updatedAt:desc",
], # in query
"status": ["PUBLISHED", "UNPUBLISHED"], # in query
"type": ["CATEGORY", "ITEM", "SECTION", "STORE", "VIEW"], # in query
}
# endregion static methods
|
PypiClean
|
/Calkulate-23.5.tar.gz/Calkulate-23.5/README.md
|
# Calkulate

[](https://pypi.org/project/calkulate/)
[](https://doi.org/10.5281/zenodo.2634304)
[](https://calkulate.readthedocs.io/en/latest/)
[](https://github.com/mvdh7/calkulate/blob/main/.misc/coverage.txt)
[](https://www.gnu.org/licenses/gpl-3.0)
[](https://github.com/psf/black)
Calkulate is a Python package for finding total alkalinity from titration data using [PyCO2SYS](https://PyCO2SYS.rtfd.io).
## Installation
pip install calkulate
## Use
If the data for each individual titration is in its own text file and you have a spreadsheet containing the metadata for each titration on separate rows — all formatted as expected — then all you need to do with Calkulate is:
```python
import calkulate as calk
data = calk.read_csv("path/to/metadata_file.csv").calkulate()
data.alkalinity # <== here are your alkalinity results
```
For more detail, see [the online documentation](https://mvdh.xyz/calkulate/).
## About
Calkulate is being developed primarily by [Dr Matthew P. Humphreys](https://www.nioz.nl/en/about/organisation/staff/matthew-humphreys) at the Royal Netherlands Institute for Sea Research ([NIOZ, Texel](https://www.nioz.nl/en)).
## Citation
If you use Calkulate in your work, please cite it as:
> Humphreys, M. P. and Matthews, R. S. (2023). Calkulate: total alkalinity from titration data in Python. *Zenodo.* [doi:10.5281/zenodo.2634304](https://doi.org/10.5281/zenodo.2634304).
Please report which version you are using. To find this out:
```python
import calkulate as calk
calk.hello()
```
## License
Calkulate is licensed under the [GNU General Public License version 3 (GPLv3)](https://www.gnu.org/licenses/gpl-3.0.en.html).
|
PypiClean
|
/snmpfwd_lextudio-0.4.4.tar.gz/snmpfwd_lextudio-0.4.4/snmpfwd/plugins/manager.py
|
import os
import sys
from snmpfwd.plugins.status import *
from snmpfwd import log, error
class PluginManager(object):
def __init__(self, path, progId, apiVer):
self.__path = path
self.__progId = progId
self.__apiVer = apiVer
self.__plugins = {}
def hasPlugin(self, pluginId):
return pluginId in self.__plugins
def loadPlugin(self, pluginId, pluginModuleName, pluginOptions):
if pluginId in self.__plugins:
raise error.SnmpfwdError('duplicate plugin ID %s' % pluginId)
for pluginModulesDir in self.__path:
log.info('scanning "%s" directory for plugin modules...' % pluginModulesDir)
if not os.path.exists(pluginModulesDir):
log.error('directory "%s" does not exist' % pluginModulesDir)
continue
modPath = os.path.join(pluginModulesDir, pluginModuleName + '.py')
if not os.path.exists(modPath):
log.error('Variation module "%s" not found' % modPath)
continue
ctx = {'modulePath': modPath,
'moduleContext': {},
'moduleOptions': pluginOptions}
modData = open(modPath).read()
try:
exec(compile(modData, modPath, 'exec'), ctx)
except Exception:
raise error.SnmpfwdError('plugin module "%s" execution failure: %s' % (modPath, sys.exc_info()[1]))
else:
pluginModule = ctx
try:
if self.__progId not in pluginModule['hostProgs']:
log.error('ignoring plugin module "%s" (unmatched program ID)' % modPath)
continue
if self.__apiVer not in pluginModule['apiVersions']:
log.error('ignoring plugin module "%s" (incompatible API version)' % modPath)
continue
except KeyError:
log.error('ignoring plugin module "%s" (missing versioning info)' % modPath)
continue
self.__plugins[pluginId] = pluginModule
log.info('plugin module "%s" loaded' % modPath)
break
else:
raise error.SnmpfwdError('plugin module "%s" not found in search path(s): %s' % (pluginModuleName, ', '.join(self.__path)))
def processCommandRequest(self, pluginId, snmpEngine, pdu, snmpReqInfo, reqCtx):
if pluginId not in self.__plugins:
log.error('skipping non-existing plugin %s' % pluginId)
return NEXT, pdu
if 'processCommandRequest' not in self.__plugins[pluginId]:
return NEXT, pdu
plugin = self.__plugins[pluginId]['processCommandRequest']
return plugin(pluginId, snmpEngine, pdu, snmpReqInfo, reqCtx)
def processCommandResponse(self, pluginId, snmpEngine, pdu, snmpReqInfo, reqCtx):
if pluginId not in self.__plugins:
log.error('skipping non-existing plugin %s' % pluginId)
return NEXT, pdu
if 'processCommandResponse' not in self.__plugins[pluginId]:
return NEXT, pdu
plugin = self.__plugins[pluginId]['processCommandResponse']
return plugin(pluginId, snmpEngine, pdu, snmpReqInfo, reqCtx)
def processNotificationRequest(self, pluginId, snmpEngine, pdu, snmpReqInfo, reqCtx):
if pluginId not in self.__plugins:
log.error('skipping non-existing plugin %s' % pluginId)
return NEXT, pdu
if 'processNotificationRequest' not in self.__plugins[pluginId]:
return NEXT, pdu
plugin = self.__plugins[pluginId]['processNotificationRequest']
return plugin(pluginId, snmpEngine, pdu, snmpReqInfo, reqCtx)
def processNotificationResponse(self, pluginId, snmpEngine, pdu, snmpReqInfo, reqCtx):
if pluginId not in self.__plugins:
log.error('skipping non-existing plugin %s' % pluginId)
return NEXT, pdu
if 'processNotificationResponse' not in self.__plugins[pluginId]:
return NEXT, pdu
plugin = self.__plugins[pluginId]['processNotificationResponse']
return plugin(pluginId, snmpEngine, pdu, snmpReqInfo, reqCtx)
|
PypiClean
|
/riptide_cli-0.8.0b3-py3-none-any.whl/riptide_cli/helpers.py
|
import asyncio
import traceback
from click import style, echo, ClickException
from click._compat import get_text_stderr
from functools import update_wrapper
from schema import SchemaError
def get_is_verbose(ctx):
"""Returns whether or not verbose mode is enabled"""
if hasattr(ctx, "riptide_options"):
return ctx.riptide_options["verbose"]
if hasattr(ctx, "parent"):
if hasattr(ctx.parent, "riptide_options"):
return ctx.parent.riptide_options["verbose"]
return True
class RiptideCliError(ClickException):
"""Custom error class for displaying errors in the Riptide CLI"""
def __init__(self, message, ctx):
super().__init__(message)
self.ctx = ctx
def show(self, file=None):
if self.ctx.resilient_parsing:
return
verbose = get_is_verbose(self.ctx) or file is not None
if file is None:
file = get_text_stderr()
if verbose:
echo(style(traceback.format_exc(), bg='red'), file=file)
else:
echo(style(self.message, bg='red', fg='white', bold=True), file=file)
current_err = self
previous_message = str(self)
while current_err.__context__ is not None:
current_err = current_err.__context__
# Filter duplicate exception messages. 'schema' used by configcrunch does that for example.
if previous_message != str(current_err):
echo(style(f'>> Caused by: {str(current_err)}', bg='red', fg='white'), file=file)
previous_message = str(current_err)
echo()
echo(style('Use -v to show stack traces.', fg='yellow'), file=file)
def __str__(self):
error_string = self.__class__.__name__ + ": " + self.message
if self.__cause__:
error_string += ": " + self.__cause__.__class__.__name__ + ": " + str(self.__cause__)
return error_string
def warn(msg, with_prefix=True):
echo((style('Warning: ', fg='yellow', bold=True) if with_prefix else "") + style(msg, fg='yellow'))
def cli_section(section):
"""
Assigns commands to a section. Must be added as an annotation to commands,
and therefor BEFORE the @click.command.
:param section:
:return:
"""
def decorator(f):
f.riptide_section = section
return f
return decorator
def async_command(interrupt_handler=lambda _, __: True):
"""
Makes a Click command be wrapped inside the execution of an asyncio loop
SOURCE: https://github.com/pallets/click/issues/85
"""
def decorator(f):
def wrapper(ctx, *args, **kwargs):
loop = asyncio.get_event_loop()
try:
return loop.run_until_complete(f(ctx, *args, **kwargs))
except (KeyboardInterrupt, SystemExit) as ex:
interrupt_handler(ctx, ex)
return update_wrapper(wrapper, f)
return decorator
def header(msg, bold=False):
"""Uniform header style"""
return style(msg, bg='cyan', fg='white', bold=bold)
TAB = ' '
|
PypiClean
|
/Python-EasyGraph1-0.2a29.tar.gz/Python-EasyGraph1-0.2a29/easygraph/functions/components/connected.py
|
import easygraph
from easygraph.utils.decorators import only_implemented_for_UnDirected_graph
from threading import Thread
__all__ = [
"is_connected",
"number_connected_components",
"connected_components",
"connected_component_of_node"
]
@only_implemented_for_UnDirected_graph
def is_connected(G):
"""Returns whether the graph is connected or not.
Parameters
----------
G : easygraph.Graph or easygraph.DiGraph
Returns
-------
is_biconnected : boolean
`True` if the graph is connected.
Examples
--------
>>> is_connected(G)
"""
assert len(G) != 0, "No node in the graph."
arbitrary_node = next(iter(G)) # Pick an arbitrary node to run BFS
return len(G) == sum(1 for node in _plain_bfs(G, arbitrary_node))
@only_implemented_for_UnDirected_graph
def number_connected_components(G):
"""Returns the number of connected components.
Parameters
----------
G : easygraph.Graph
Returns
-------
number_connected_components : int
The number of connected components.
Examples
--------
>>> number_connected_components(G)
"""
return sum(1 for component in _generator_connected_components(G))
@only_implemented_for_UnDirected_graph
def connected_components(G):
"""Returns a list of connected components, each of which denotes the edges set of a connected component.
Parameters
----------
G : easygraph.Graph or easygraph.DiGraph
Returns
-------
biconnected_components : list of list
Each element list is the edges set of a connected component.
Examples
--------
>>> connected_components(G)
"""
# Return all components ordered by number of nodes included
all_components = sorted(list(_generator_connected_components(G)), key=len)
return all_components
@only_implemented_for_UnDirected_graph
def _generator_connected_components(G):
seen = set()
for v in G:
if v not in seen:
component = set(_plain_bfs(G, v))
yield component
seen.update(component)
@only_implemented_for_UnDirected_graph
def connected_component_of_node(G, node):
"""Returns the connected component that *node* belongs to.
Parameters
----------
G : easygraph.Graph
node : object
The target node
Returns
-------
connected_component_of_node : set
The connected component that *node* belongs to.
Examples
--------
Returns the connected component of one node `Jack`.
>>> connected_component_of_node(G, node='Jack')
"""
return set(_plain_bfs(G, node))
def _plain_bfs(G, source):
"""
A fast BFS node generator
"""
G_adj = G.adj
seen = set()
nextlevel = {source}
while nextlevel:
thislevel = nextlevel
nextlevel = set()
for v in thislevel:
if v not in seen:
yield v
seen.add(v)
nextlevel.update(G_adj[v])
|
PypiClean
|
/Minetorch-0.6.17.tar.gz/Minetorch-0.6.17/minetorch/metrics.py
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sn
from sklearn.metrics import classification_report, cohen_kappa_score, confusion_matrix
from .plugin import Plugin
class MultiClassesClassificationMetricWithLogic(Plugin):
"""MultiClassesClassificationMetric
This can be used directly if your loss function is torch.nn.CrossEntropy
"""
def __init__(
self,
accuracy=True,
confusion_matrix=True,
kappa_score=True,
plot_confusion_matrix=True,
classification_report=True,
sheet_key_prefix="",
):
super().__init__(sheet_key_prefix)
self.accuracy = accuracy
self.confusion_matrix = confusion_matrix
self.kappa_score = kappa_score
self.plot_confusion_matrix = plot_confusion_matrix
self.classification_report = classification_report
self.sheet_key_prefix = sheet_key_prefix
def before_init(self):
self.create_sheet_column("latest_confusion_matrix", "Latest Confusion Matrix")
self.create_sheet_column("kappa_score", "Kappa Score")
self.create_sheet_column("accuracy", "Accuracy")
def before_epoch_start(self, epoch):
self.raw_output = []
self.predicts = []
self.targets = []
def after_val_iteration_ended(self, predicts, data, **ignore):
raw_output = predicts.detach().cpu().numpy()
predicts = np.argmax(raw_output, axis=1)
targets = data[1].cpu().numpy()
self.raw_output.append(raw_output)
self.predicts.append(predicts)
self.targets.append(targets)
def after_epoch_end(self, val_loss, **ignore):
self.predicts = np.concatenate(self.predicts)
self.targets = np.concatenate(self.targets)
self._save_results()
self.accuracy and self._accuracy()
self.confusion_matrix and self._confusion_matrix()
self.kappa_score and self._kappa_score()
self.classification_report and self._classification_report()
self.plot_confusion_matrix and self._plot_confusion_matrix(val_loss)
def _classification_report(self):
result = classification_report(self.targets, self.predicts)
self.print_txt(result, "classification_report")
def _plot_confusion_matrix(self, val_loss):
matrix = confusion_matrix(self.targets, self.predicts)
df_cm = pd.DataFrame(matrix)
svm = sn.heatmap(df_cm, annot=True, cmap="OrRd", fmt=".3g")
figure = svm.get_figure()
if val_loss < self.lowest_val_loss:
figure.savefig(
self.plugin_file("confusion_matrix_epoch_best.png"), facecolor="#F0FFFC"
)
figure.savefig(
self.plugin_file(f"confusion_matrix_epoch_{self.current_epoch}.png"),
facecolor="#F0FFFC",
)
figure.savefig(
self.plugin_file("confusion_matrix_epoch_latest.png"), facecolor="#F0FFFC"
)
plt.clf()
self.update_sheet(
"latest_confusion_matrix",
{
"raw": self.plugin_file("confusion_matrix_epoch_latest.png"),
"processor": "upload_image",
},
)
def _accuracy(self):
png_file = self.scalars(
{"accuracy": (self.predicts == self.targets).sum() / len(self.predicts)},
"accuracy",
)
if png_file:
self.update_sheet(
"accuracy", {"raw": png_file, "processor": "upload_image"}
)
def _confusion_matrix(self):
matrix = confusion_matrix(self.targets, self.predicts)
self.print_txt(matrix, "confusion_matrix")
def _kappa_score(self):
png_file = self.scalars(
{
"kappa_score": cohen_kappa_score(
self.targets, self.predicts, weights="quadratic"
)
},
"kappa_score",
)
if png_file:
self.update_sheet(
"kappa_score", {"raw": png_file, "processor": "upload_image"}
)
def _save_results(self):
file_name = self.plugin_file(f"result.{self.current_epoch}.npz")
raw_output = np.concatenate(self.raw_output)
np.savez_compressed(file_name, predicts=self.predicts, targets=self.targets, raw_output=raw_output)
|
PypiClean
|
/fonttools-4.42.1-cp39-cp39-macosx_10_9_x86_64.whl/fontTools/pens/momentsPen.py
|
from fontTools.pens.basePen import BasePen, OpenContourError
try:
import cython
COMPILED = cython.compiled
except (AttributeError, ImportError):
# if cython not installed, use mock module with no-op decorators and types
from fontTools.misc import cython
COMPILED = False
__all__ = ["MomentsPen"]
class MomentsPen(BasePen):
def __init__(self, glyphset=None):
BasePen.__init__(self, glyphset)
self.area = 0
self.momentX = 0
self.momentY = 0
self.momentXX = 0
self.momentXY = 0
self.momentYY = 0
def _moveTo(self, p0):
self.__startPoint = p0
def _closePath(self):
p0 = self._getCurrentPoint()
if p0 != self.__startPoint:
self._lineTo(self.__startPoint)
def _endPath(self):
p0 = self._getCurrentPoint()
if p0 != self.__startPoint:
# Green theorem is not defined on open contours.
raise OpenContourError("Green theorem is not defined on open contours.")
@cython.locals(r0=cython.double)
@cython.locals(r1=cython.double)
@cython.locals(r2=cython.double)
@cython.locals(r3=cython.double)
@cython.locals(r4=cython.double)
@cython.locals(r5=cython.double)
@cython.locals(r6=cython.double)
@cython.locals(r7=cython.double)
@cython.locals(r8=cython.double)
@cython.locals(r9=cython.double)
@cython.locals(r10=cython.double)
@cython.locals(r11=cython.double)
@cython.locals(r12=cython.double)
@cython.locals(x0=cython.double, y0=cython.double)
@cython.locals(x1=cython.double, y1=cython.double)
def _lineTo(self, p1):
x0, y0 = self._getCurrentPoint()
x1, y1 = p1
r0 = x1 * y0
r1 = x1 * y1
r2 = x1**2
r3 = r2 * y1
r4 = y0 - y1
r5 = r4 * x0
r6 = x0**2
r7 = 2 * y0
r8 = y0**2
r9 = y1**2
r10 = x1**3
r11 = y0**3
r12 = y1**3
self.area += -r0 / 2 - r1 / 2 + x0 * (y0 + y1) / 2
self.momentX += -r2 * y0 / 6 - r3 / 3 - r5 * x1 / 6 + r6 * (r7 + y1) / 6
self.momentY += (
-r0 * y1 / 6 - r8 * x1 / 6 - r9 * x1 / 6 + x0 * (r8 + r9 + y0 * y1) / 6
)
self.momentXX += (
-r10 * y0 / 12
- r10 * y1 / 4
- r2 * r5 / 12
- r4 * r6 * x1 / 12
+ x0**3 * (3 * y0 + y1) / 12
)
self.momentXY += (
-r2 * r8 / 24
- r2 * r9 / 8
- r3 * r7 / 24
+ r6 * (r7 * y1 + 3 * r8 + r9) / 24
- x0 * x1 * (r8 - r9) / 12
)
self.momentYY += (
-r0 * r9 / 12
- r1 * r8 / 12
- r11 * x1 / 12
- r12 * x1 / 12
+ x0 * (r11 + r12 + r8 * y1 + r9 * y0) / 12
)
@cython.locals(r0=cython.double)
@cython.locals(r1=cython.double)
@cython.locals(r2=cython.double)
@cython.locals(r3=cython.double)
@cython.locals(r4=cython.double)
@cython.locals(r5=cython.double)
@cython.locals(r6=cython.double)
@cython.locals(r7=cython.double)
@cython.locals(r8=cython.double)
@cython.locals(r9=cython.double)
@cython.locals(r10=cython.double)
@cython.locals(r11=cython.double)
@cython.locals(r12=cython.double)
@cython.locals(r13=cython.double)
@cython.locals(r14=cython.double)
@cython.locals(r15=cython.double)
@cython.locals(r16=cython.double)
@cython.locals(r17=cython.double)
@cython.locals(r18=cython.double)
@cython.locals(r19=cython.double)
@cython.locals(r20=cython.double)
@cython.locals(r21=cython.double)
@cython.locals(r22=cython.double)
@cython.locals(r23=cython.double)
@cython.locals(r24=cython.double)
@cython.locals(r25=cython.double)
@cython.locals(r26=cython.double)
@cython.locals(r27=cython.double)
@cython.locals(r28=cython.double)
@cython.locals(r29=cython.double)
@cython.locals(r30=cython.double)
@cython.locals(r31=cython.double)
@cython.locals(r32=cython.double)
@cython.locals(r33=cython.double)
@cython.locals(r34=cython.double)
@cython.locals(r35=cython.double)
@cython.locals(r36=cython.double)
@cython.locals(r37=cython.double)
@cython.locals(r38=cython.double)
@cython.locals(r39=cython.double)
@cython.locals(r40=cython.double)
@cython.locals(r41=cython.double)
@cython.locals(r42=cython.double)
@cython.locals(r43=cython.double)
@cython.locals(r44=cython.double)
@cython.locals(r45=cython.double)
@cython.locals(r46=cython.double)
@cython.locals(r47=cython.double)
@cython.locals(r48=cython.double)
@cython.locals(r49=cython.double)
@cython.locals(r50=cython.double)
@cython.locals(r51=cython.double)
@cython.locals(r52=cython.double)
@cython.locals(r53=cython.double)
@cython.locals(x0=cython.double, y0=cython.double)
@cython.locals(x1=cython.double, y1=cython.double)
@cython.locals(x2=cython.double, y2=cython.double)
def _qCurveToOne(self, p1, p2):
x0, y0 = self._getCurrentPoint()
x1, y1 = p1
x2, y2 = p2
r0 = 2 * y1
r1 = r0 * x2
r2 = x2 * y2
r3 = 3 * r2
r4 = 2 * x1
r5 = 3 * y0
r6 = x1**2
r7 = x2**2
r8 = 4 * y1
r9 = 10 * y2
r10 = 2 * y2
r11 = r4 * x2
r12 = x0**2
r13 = 10 * y0
r14 = r4 * y2
r15 = x2 * y0
r16 = 4 * x1
r17 = r0 * x1 + r2
r18 = r2 * r8
r19 = y1**2
r20 = 2 * r19
r21 = y2**2
r22 = r21 * x2
r23 = 5 * r22
r24 = y0**2
r25 = y0 * y2
r26 = 5 * r24
r27 = x1**3
r28 = x2**3
r29 = 30 * y1
r30 = 6 * y1
r31 = 10 * r7 * x1
r32 = 5 * y2
r33 = 12 * r6
r34 = 30 * x1
r35 = x1 * y1
r36 = r3 + 20 * r35
r37 = 12 * x1
r38 = 20 * r6
r39 = 8 * r6 * y1
r40 = r32 * r7
r41 = 60 * y1
r42 = 20 * r19
r43 = 4 * r19
r44 = 15 * r21
r45 = 12 * x2
r46 = 12 * y2
r47 = 6 * x1
r48 = 8 * r19 * x1 + r23
r49 = 8 * y1**3
r50 = y2**3
r51 = y0**3
r52 = 10 * y1
r53 = 12 * y1
self.area += (
-r1 / 6
- r3 / 6
+ x0 * (r0 + r5 + y2) / 6
+ x1 * y2 / 3
- y0 * (r4 + x2) / 6
)
self.momentX += (
-r11 * (-r10 + y1) / 30
+ r12 * (r13 + r8 + y2) / 30
+ r6 * y2 / 15
- r7 * r8 / 30
- r7 * r9 / 30
+ x0 * (r14 - r15 - r16 * y0 + r17) / 30
- y0 * (r11 + 2 * r6 + r7) / 30
)
self.momentY += (
-r18 / 30
- r20 * x2 / 30
- r23 / 30
- r24 * (r16 + x2) / 30
+ x0 * (r0 * y2 + r20 + r21 + r25 + r26 + r8 * y0) / 30
+ x1 * y2 * (r10 + y1) / 15
- y0 * (r1 + r17) / 30
)
self.momentXX += (
r12 * (r1 - 5 * r15 - r34 * y0 + r36 + r9 * x1) / 420
+ 2 * r27 * y2 / 105
- r28 * r29 / 420
- r28 * y2 / 4
- r31 * (r0 - 3 * y2) / 420
- r6 * x2 * (r0 - r32) / 105
+ x0**3 * (r30 + 21 * y0 + y2) / 84
- x0
* (
r0 * r7
+ r15 * r37
- r2 * r37
- r33 * y2
+ r38 * y0
- r39
- r40
+ r5 * r7
)
/ 420
- y0 * (8 * r27 + 5 * r28 + r31 + r33 * x2) / 420
)
self.momentXY += (
r12 * (r13 * y2 + 3 * r21 + 105 * r24 + r41 * y0 + r42 + r46 * y1) / 840
- r16 * x2 * (r43 - r44) / 840
- r21 * r7 / 8
- r24 * (r38 + r45 * x1 + 3 * r7) / 840
- r41 * r7 * y2 / 840
- r42 * r7 / 840
+ r6 * y2 * (r32 + r8) / 210
+ x0
* (
-r15 * r8
+ r16 * r25
+ r18
+ r21 * r47
- r24 * r34
- r26 * x2
+ r35 * r46
+ r48
)
/ 420
- y0 * (r16 * r2 + r30 * r7 + r35 * r45 + r39 + r40) / 420
)
self.momentYY += (
-r2 * r42 / 420
- r22 * r29 / 420
- r24 * (r14 + r36 + r52 * x2) / 420
- r49 * x2 / 420
- r50 * x2 / 12
- r51 * (r47 + x2) / 84
+ x0
* (
r19 * r46
+ r21 * r5
+ r21 * r52
+ r24 * r29
+ r25 * r53
+ r26 * y2
+ r42 * y0
+ r49
+ 5 * r50
+ 35 * r51
)
/ 420
+ x1 * y2 * (r43 + r44 + r9 * y1) / 210
- y0 * (r19 * r45 + r2 * r53 - r21 * r4 + r48) / 420
)
@cython.locals(r0=cython.double)
@cython.locals(r1=cython.double)
@cython.locals(r2=cython.double)
@cython.locals(r3=cython.double)
@cython.locals(r4=cython.double)
@cython.locals(r5=cython.double)
@cython.locals(r6=cython.double)
@cython.locals(r7=cython.double)
@cython.locals(r8=cython.double)
@cython.locals(r9=cython.double)
@cython.locals(r10=cython.double)
@cython.locals(r11=cython.double)
@cython.locals(r12=cython.double)
@cython.locals(r13=cython.double)
@cython.locals(r14=cython.double)
@cython.locals(r15=cython.double)
@cython.locals(r16=cython.double)
@cython.locals(r17=cython.double)
@cython.locals(r18=cython.double)
@cython.locals(r19=cython.double)
@cython.locals(r20=cython.double)
@cython.locals(r21=cython.double)
@cython.locals(r22=cython.double)
@cython.locals(r23=cython.double)
@cython.locals(r24=cython.double)
@cython.locals(r25=cython.double)
@cython.locals(r26=cython.double)
@cython.locals(r27=cython.double)
@cython.locals(r28=cython.double)
@cython.locals(r29=cython.double)
@cython.locals(r30=cython.double)
@cython.locals(r31=cython.double)
@cython.locals(r32=cython.double)
@cython.locals(r33=cython.double)
@cython.locals(r34=cython.double)
@cython.locals(r35=cython.double)
@cython.locals(r36=cython.double)
@cython.locals(r37=cython.double)
@cython.locals(r38=cython.double)
@cython.locals(r39=cython.double)
@cython.locals(r40=cython.double)
@cython.locals(r41=cython.double)
@cython.locals(r42=cython.double)
@cython.locals(r43=cython.double)
@cython.locals(r44=cython.double)
@cython.locals(r45=cython.double)
@cython.locals(r46=cython.double)
@cython.locals(r47=cython.double)
@cython.locals(r48=cython.double)
@cython.locals(r49=cython.double)
@cython.locals(r50=cython.double)
@cython.locals(r51=cython.double)
@cython.locals(r52=cython.double)
@cython.locals(r53=cython.double)
@cython.locals(r54=cython.double)
@cython.locals(r55=cython.double)
@cython.locals(r56=cython.double)
@cython.locals(r57=cython.double)
@cython.locals(r58=cython.double)
@cython.locals(r59=cython.double)
@cython.locals(r60=cython.double)
@cython.locals(r61=cython.double)
@cython.locals(r62=cython.double)
@cython.locals(r63=cython.double)
@cython.locals(r64=cython.double)
@cython.locals(r65=cython.double)
@cython.locals(r66=cython.double)
@cython.locals(r67=cython.double)
@cython.locals(r68=cython.double)
@cython.locals(r69=cython.double)
@cython.locals(r70=cython.double)
@cython.locals(r71=cython.double)
@cython.locals(r72=cython.double)
@cython.locals(r73=cython.double)
@cython.locals(r74=cython.double)
@cython.locals(r75=cython.double)
@cython.locals(r76=cython.double)
@cython.locals(r77=cython.double)
@cython.locals(r78=cython.double)
@cython.locals(r79=cython.double)
@cython.locals(r80=cython.double)
@cython.locals(r81=cython.double)
@cython.locals(r82=cython.double)
@cython.locals(r83=cython.double)
@cython.locals(r84=cython.double)
@cython.locals(r85=cython.double)
@cython.locals(r86=cython.double)
@cython.locals(r87=cython.double)
@cython.locals(r88=cython.double)
@cython.locals(r89=cython.double)
@cython.locals(r90=cython.double)
@cython.locals(r91=cython.double)
@cython.locals(r92=cython.double)
@cython.locals(r93=cython.double)
@cython.locals(r94=cython.double)
@cython.locals(r95=cython.double)
@cython.locals(r96=cython.double)
@cython.locals(r97=cython.double)
@cython.locals(r98=cython.double)
@cython.locals(r99=cython.double)
@cython.locals(r100=cython.double)
@cython.locals(r101=cython.double)
@cython.locals(r102=cython.double)
@cython.locals(r103=cython.double)
@cython.locals(r104=cython.double)
@cython.locals(r105=cython.double)
@cython.locals(r106=cython.double)
@cython.locals(r107=cython.double)
@cython.locals(r108=cython.double)
@cython.locals(r109=cython.double)
@cython.locals(r110=cython.double)
@cython.locals(r111=cython.double)
@cython.locals(r112=cython.double)
@cython.locals(r113=cython.double)
@cython.locals(r114=cython.double)
@cython.locals(r115=cython.double)
@cython.locals(r116=cython.double)
@cython.locals(r117=cython.double)
@cython.locals(r118=cython.double)
@cython.locals(r119=cython.double)
@cython.locals(r120=cython.double)
@cython.locals(r121=cython.double)
@cython.locals(r122=cython.double)
@cython.locals(r123=cython.double)
@cython.locals(r124=cython.double)
@cython.locals(r125=cython.double)
@cython.locals(r126=cython.double)
@cython.locals(r127=cython.double)
@cython.locals(r128=cython.double)
@cython.locals(r129=cython.double)
@cython.locals(r130=cython.double)
@cython.locals(r131=cython.double)
@cython.locals(r132=cython.double)
@cython.locals(x0=cython.double, y0=cython.double)
@cython.locals(x1=cython.double, y1=cython.double)
@cython.locals(x2=cython.double, y2=cython.double)
@cython.locals(x3=cython.double, y3=cython.double)
def _curveToOne(self, p1, p2, p3):
x0, y0 = self._getCurrentPoint()
x1, y1 = p1
x2, y2 = p2
x3, y3 = p3
r0 = 6 * y2
r1 = r0 * x3
r2 = 10 * y3
r3 = r2 * x3
r4 = 3 * y1
r5 = 6 * x1
r6 = 3 * x2
r7 = 6 * y1
r8 = 3 * y2
r9 = x2**2
r10 = 45 * r9
r11 = r10 * y3
r12 = x3**2
r13 = r12 * y2
r14 = r12 * y3
r15 = 7 * y3
r16 = 15 * x3
r17 = r16 * x2
r18 = x1**2
r19 = 9 * r18
r20 = x0**2
r21 = 21 * y1
r22 = 9 * r9
r23 = r7 * x3
r24 = 9 * y2
r25 = r24 * x2 + r3
r26 = 9 * x2
r27 = x2 * y3
r28 = -r26 * y1 + 15 * r27
r29 = 3 * x1
r30 = 45 * x1
r31 = 12 * x3
r32 = 45 * r18
r33 = 5 * r12
r34 = r8 * x3
r35 = 105 * y0
r36 = 30 * y0
r37 = r36 * x2
r38 = 5 * x3
r39 = 15 * y3
r40 = 5 * y3
r41 = r40 * x3
r42 = x2 * y2
r43 = 18 * r42
r44 = 45 * y1
r45 = r41 + r43 + r44 * x1
r46 = y2 * y3
r47 = r46 * x3
r48 = y2**2
r49 = 45 * r48
r50 = r49 * x3
r51 = y3**2
r52 = r51 * x3
r53 = y1**2
r54 = 9 * r53
r55 = y0**2
r56 = 21 * x1
r57 = 6 * x2
r58 = r16 * y2
r59 = r39 * y2
r60 = 9 * r48
r61 = r6 * y3
r62 = 3 * y3
r63 = r36 * y2
r64 = y1 * y3
r65 = 45 * r53
r66 = 5 * r51
r67 = x2**3
r68 = x3**3
r69 = 630 * y2
r70 = 126 * x3
r71 = x1**3
r72 = 126 * x2
r73 = 63 * r9
r74 = r73 * x3
r75 = r15 * x3 + 15 * r42
r76 = 630 * x1
r77 = 14 * x3
r78 = 21 * r27
r79 = 42 * x1
r80 = 42 * x2
r81 = x1 * y2
r82 = 63 * r42
r83 = x1 * y1
r84 = r41 + r82 + 378 * r83
r85 = x2 * x3
r86 = r85 * y1
r87 = r27 * x3
r88 = 27 * r9
r89 = r88 * y2
r90 = 42 * r14
r91 = 90 * x1
r92 = 189 * r18
r93 = 378 * r18
r94 = r12 * y1
r95 = 252 * x1 * x2
r96 = r79 * x3
r97 = 30 * r85
r98 = r83 * x3
r99 = 30 * x3
r100 = 42 * x3
r101 = r42 * x1
r102 = r10 * y2 + 14 * r14 + 126 * r18 * y1 + r81 * r99
r103 = 378 * r48
r104 = 18 * y1
r105 = r104 * y2
r106 = y0 * y1
r107 = 252 * y2
r108 = r107 * y0
r109 = y0 * y3
r110 = 42 * r64
r111 = 378 * r53
r112 = 63 * r48
r113 = 27 * x2
r114 = r27 * y2
r115 = r113 * r48 + 42 * r52
r116 = x3 * y3
r117 = 54 * r42
r118 = r51 * x1
r119 = r51 * x2
r120 = r48 * x1
r121 = 21 * x3
r122 = r64 * x1
r123 = r81 * y3
r124 = 30 * r27 * y1 + r49 * x2 + 14 * r52 + 126 * r53 * x1
r125 = y2**3
r126 = y3**3
r127 = y1**3
r128 = y0**3
r129 = r51 * y2
r130 = r112 * y3 + r21 * r51
r131 = 189 * r53
r132 = 90 * y2
self.area += (
-r1 / 20
- r3 / 20
- r4 * (x2 + x3) / 20
+ x0 * (r7 + r8 + 10 * y0 + y3) / 20
+ 3 * x1 * (y2 + y3) / 20
+ 3 * x2 * y3 / 10
- y0 * (r5 + r6 + x3) / 20
)
self.momentX += (
r11 / 840
- r13 / 8
- r14 / 3
- r17 * (-r15 + r8) / 840
+ r19 * (r8 + 2 * y3) / 840
+ r20 * (r0 + r21 + 56 * y0 + y3) / 168
+ r29 * (-r23 + r25 + r28) / 840
- r4 * (10 * r12 + r17 + r22) / 840
+ x0
* (
12 * r27
+ r30 * y2
+ r34
- r35 * x1
- r37
- r38 * y0
+ r39 * x1
- r4 * x3
+ r45
)
/ 840
- y0 * (r17 + r30 * x2 + r31 * x1 + r32 + r33 + 18 * r9) / 840
)
self.momentY += (
-r4 * (r25 + r58) / 840
- r47 / 8
- r50 / 840
- r52 / 6
- r54 * (r6 + 2 * x3) / 840
- r55 * (r56 + r57 + x3) / 168
+ x0
* (
r35 * y1
+ r40 * y0
+ r44 * y2
+ 18 * r48
+ 140 * r55
+ r59
+ r63
+ 12 * r64
+ r65
+ r66
)
/ 840
+ x1 * (r24 * y1 + 10 * r51 + r59 + r60 + r7 * y3) / 280
+ x2 * y3 * (r15 + r8) / 56
- y0 * (r16 * y1 + r31 * y2 + r44 * x2 + r45 + r61 - r62 * x1) / 840
)
self.momentXX += (
-r12 * r72 * (-r40 + r8) / 9240
+ 3 * r18 * (r28 + r34 - r38 * y1 + r75) / 3080
+ r20
* (
r24 * x3
- r72 * y0
- r76 * y0
- r77 * y0
+ r78
+ r79 * y3
+ r80 * y1
+ 210 * r81
+ r84
)
/ 9240
- r29
* (
r12 * r21
+ 14 * r13
+ r44 * r9
- r73 * y3
+ 54 * r86
- 84 * r87
- r89
- r90
)
/ 9240
- r4 * (70 * r12 * x2 + 27 * r67 + 42 * r68 + r74) / 9240
+ 3 * r67 * y3 / 220
- r68 * r69 / 9240
- r68 * y3 / 4
- r70 * r9 * (-r62 + y2) / 9240
+ 3 * r71 * (r24 + r40) / 3080
+ x0**3 * (r24 + r44 + 165 * y0 + y3) / 660
+ x0
* (
r100 * r27
+ 162 * r101
+ r102
+ r11
+ 63 * r18 * y3
+ r27 * r91
- r33 * y0
- r37 * x3
+ r43 * x3
- r73 * y0
- r88 * y1
+ r92 * y2
- r93 * y0
- 9 * r94
- r95 * y0
- r96 * y0
- r97 * y1
- 18 * r98
+ r99 * x1 * y3
)
/ 9240
- y0
* (
r12 * r56
+ r12 * r80
+ r32 * x3
+ 45 * r67
+ 14 * r68
+ 126 * r71
+ r74
+ r85 * r91
+ 135 * r9 * x1
+ r92 * x2
)
/ 9240
)
self.momentXY += (
-r103 * r12 / 18480
- r12 * r51 / 8
- 3 * r14 * y2 / 44
+ 3 * r18 * (r105 + r2 * y1 + 18 * r46 + 15 * r48 + 7 * r51) / 6160
+ r20
* (
1260 * r106
+ r107 * y1
+ r108
+ 28 * r109
+ r110
+ r111
+ r112
+ 30 * r46
+ 2310 * r55
+ r66
)
/ 18480
- r54 * (7 * r12 + 18 * r85 + 15 * r9) / 18480
- r55 * (r33 + r73 + r93 + r95 + r96 + r97) / 18480
- r7 * (42 * r13 + r82 * x3 + 28 * r87 + r89 + r90) / 18480
- 3 * r85 * (r48 - r66) / 220
+ 3 * r9 * y3 * (r62 + 2 * y2) / 440
+ x0
* (
-r1 * y0
- 84 * r106 * x2
+ r109 * r56
+ 54 * r114
+ r117 * y1
+ 15 * r118
+ 21 * r119
+ 81 * r120
+ r121 * r46
+ 54 * r122
+ 60 * r123
+ r124
- r21 * x3 * y0
+ r23 * y3
- r54 * x3
- r55 * r72
- r55 * r76
- r55 * r77
+ r57 * y0 * y3
+ r60 * x3
+ 84 * r81 * y0
+ 189 * r81 * y1
)
/ 9240
+ x1
* (
r104 * r27
- r105 * x3
- r113 * r53
+ 63 * r114
+ r115
- r16 * r53
+ 28 * r47
+ r51 * r80
)
/ 3080
- y0
* (
54 * r101
+ r102
+ r116 * r5
+ r117 * x3
+ 21 * r13
- r19 * y3
+ r22 * y3
+ r78 * x3
+ 189 * r83 * x2
+ 60 * r86
+ 81 * r9 * y1
+ 15 * r94
+ 54 * r98
)
/ 9240
)
self.momentYY += (
-r103 * r116 / 9240
- r125 * r70 / 9240
- r126 * x3 / 12
- 3 * r127 * (r26 + r38) / 3080
- r128 * (r26 + r30 + x3) / 660
- r4 * (r112 * x3 + r115 - 14 * r119 + 84 * r47) / 9240
- r52 * r69 / 9240
- r54 * (r58 + r61 + r75) / 9240
- r55
* (r100 * y1 + r121 * y2 + r26 * y3 + r79 * y2 + r84 + 210 * x2 * y1)
/ 9240
+ x0
* (
r108 * y1
+ r110 * y0
+ r111 * y0
+ r112 * y0
+ 45 * r125
+ 14 * r126
+ 126 * r127
+ 770 * r128
+ 42 * r129
+ r130
+ r131 * y2
+ r132 * r64
+ 135 * r48 * y1
+ 630 * r55 * y1
+ 126 * r55 * y2
+ 14 * r55 * y3
+ r63 * y3
+ r65 * y3
+ r66 * y0
)
/ 9240
+ x1
* (
27 * r125
+ 42 * r126
+ 70 * r129
+ r130
+ r39 * r53
+ r44 * r48
+ 27 * r53 * y2
+ 54 * r64 * y2
)
/ 3080
+ 3 * x2 * y3 * (r48 + r66 + r8 * y3) / 220
- y0
* (
r100 * r46
+ 18 * r114
- 9 * r118
- 27 * r120
- 18 * r122
- 30 * r123
+ r124
+ r131 * x2
+ r132 * x3 * y1
+ 162 * r42 * y1
+ r50
+ 63 * r53 * x3
+ r64 * r99
)
/ 9240
)
if __name__ == "__main__":
from fontTools.misc.symfont import x, y, printGreenPen
printGreenPen(
"MomentsPen",
[
("area", 1),
("momentX", x),
("momentY", y),
("momentXX", x**2),
("momentXY", x * y),
("momentYY", y**2),
],
)
|
PypiClean
|
/vns_web3-0.0.2.tar.gz/vns_web3-0.0.2/vns_web3/web3/_utils/threads.py
|
import threading
import time
class Timeout(Exception):
"""
A limited subset of the `gevent.Timeout` context manager.
"""
seconds = None
exception = None
begun_at = None
is_running = None
def __init__(self, seconds=None, exception=None, *args, **kwargs):
self.seconds = seconds
self.exception = exception
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return False
def __str__(self):
if self.seconds is None:
return ''
return "{0} seconds".format(self.seconds)
@property
def expire_at(self):
if self.seconds is None:
raise ValueError("Timeouts with `seconds == None` do not have an expiration time")
elif self.begun_at is None:
raise ValueError("Timeout has not been started")
return self.begun_at + self.seconds
def start(self):
if self.is_running is not None:
raise ValueError("Timeout has already been started")
self.begun_at = time.time()
self.is_running = True
def check(self):
if self.is_running is None:
raise ValueError("Timeout has not been started")
elif self.is_running is False:
raise ValueError("Timeout has already been cancelled")
elif self.seconds is None:
return
elif time.time() > self.expire_at:
self.is_running = False
if isinstance(self.exception, type):
raise self.exception(str(self))
elif isinstance(self.exception, Exception):
raise self.exception
else:
raise self
def cancel(self):
self.is_running = False
def sleep(self, seconds):
time.sleep(seconds)
self.check()
class ThreadWithReturn(threading.Thread):
def __init__(self, target=None, args=None, kwargs=None):
super().__init__(
target=target,
args=args or tuple(),
kwargs=kwargs or {},
)
self.target = target
self.args = args
self.kwargs = kwargs
def run(self):
self._return = self.target(*self.args, **self.kwargs)
def get(self, timeout=None):
self.join(timeout)
try:
return self._return
except AttributeError:
raise RuntimeError("Something went wrong. No `_return` property was set")
class TimerClass(threading.Thread):
def __init__(self, interval, callback, *args):
threading.Thread.__init__(self)
self.callback = callback
self.terminate_event = threading.Event()
self.interval = interval
self.args = args
def run(self):
while not self.terminate_event.is_set():
self.callback(*self.args)
self.terminate_event.wait(self.interval)
def stop(self):
self.terminate_event.set()
def spawn(target, *args, thread_class=ThreadWithReturn, **kwargs):
thread = thread_class(
target=target,
args=args,
kwargs=kwargs,
)
thread.daemon = True
thread.start()
return thread
|
PypiClean
|
/sdnn-cl-2.2.0.tar.gz/sdnn-cl-2.2.0/tvm/topi/testing/conv3d_transpose_ncdhw_python.py
|
"""Convolution 3D transpose in python"""
import numpy as np
import tvm.topi.testing
from tvm.topi.nn.utils import get_pad_tuple3d
def conv3d_transpose_ncdhw_python(a_np, w_np, stride, padding, output_padding):
"""Transposed 3d convolution operator in NCDHW layout.
Parameters
----------
a_np : numpy.ndarray
5-D with shape [batch, in_channel, in_depth, in_height, in_width]
w_np : numpy.ndarray
5-D with shape [in_channel, num_filter, filter_depth, filter_height, filter_width]
stride : int or a list/tuple of two ints
Stride size, or [stride_depth, stride_height, stride_width]
padding : int or str
Padding size
output_padding : int or list/tuple of three ints
Used to disambiguate output shape.
Returns
-------
b_np : np.ndarray
5-D with shape [batch, out_channel, out_depth, out_height, out_width]
"""
batch, in_c, in_d, in_h, in_w = a_np.shape
_, out_c, filter_d, filter_h, filter_w = w_np.shape
if isinstance(stride, int):
stride_d = stride_h = stride_w = stride
else:
stride_d, stride_h, stride_w = stride
if isinstance(output_padding, int):
opad_d = opad_h = opad_w = output_padding
else:
opad_d, opad_h, opad_w = output_padding
assert opad_d < stride_d and opad_h < stride_h and opad_w < stride_w
# dilate stage
dilated_a_np = tvm.topi.testing.dilate_python(a_np, [1, 1, stride_d, stride_h, stride_w])
# padding stage
fpad_front, fpad_top, fpad_left, fpad_back, fpad_bottom, fpad_right = get_pad_tuple3d(
padding, (filter_d, filter_h, filter_w)
)
bpad_front = filter_d - 1 - fpad_front
bpad_back = filter_d - 1 - fpad_back + opad_d
bpad_top = filter_h - 1 - fpad_top
bpad_bottom = filter_h - 1 - fpad_bottom + opad_h
bpad_left = filter_w - 1 - fpad_left
bpad_right = filter_w - 1 - fpad_right + opad_w
padded_a_np = np.zeros(
(
batch,
in_c,
dilated_a_np.shape[2] + bpad_front + bpad_back,
dilated_a_np.shape[3] + bpad_top + bpad_bottom,
dilated_a_np.shape[4] + bpad_left + bpad_right,
)
)
padded_a_np[
:,
:,
bpad_front : dilated_a_np.shape[2] + bpad_front,
bpad_top : dilated_a_np.shape[3] + bpad_top,
bpad_left : dilated_a_np.shape[4] + bpad_left,
] = dilated_a_np
# convolution stage
out_d = (in_d - 1) * stride_d - bpad_front - bpad_back + filter_d
out_h = (in_h - 1) * stride_h - fpad_top - fpad_bottom + filter_h
out_w = (in_w - 1) * stride_w - fpad_left - fpad_right + filter_w
w_np = np.flip(w_np, axis=[2, 3, 4]).transpose((1, 0, 2, 3, 4))
b_np = tvm.topi.testing.conv3d_ncdhw_python(
padded_a_np, w_np, stride=(1, 1, 1), padding=(0, 0, 0)
)
return b_np
|
PypiClean
|
/autogluon.tabular-0.7.0b20230217-py3-none-any.whl/autogluon/tabular/configs/config_helper.py
|
from __future__ import annotations
import copy
from typing import Union
from sklearn.base import BaseEstimator
from autogluon.core.scheduler import scheduler_factory
from autogluon.features import AutoMLPipelineFeatureGenerator
from autogluon.tabular.configs.hyperparameter_configs import hyperparameter_config_dict
from autogluon.tabular.configs.presets_configs import tabular_presets_dict
from autogluon.tabular.trainer.model_presets.presets import MODEL_TYPES
class FeatureGeneratorBuilder:
def __init__(self, parent=None):
self.parent = parent
self.config = {}
def enable_numeric_features(self, value: bool = True) -> FeatureGeneratorBuilder:
"""
Whether to keep features of 'int' and 'float' raw types.
These features are passed without alteration to the models.
Appends IdentityFeatureGenerator(infer_features_in_args=dict(valid_raw_types=['int', 'float']))) to the generator group.
"""
self.config['enable_numeric_features'] = value
return self
def enable_categorical_features(self, value: bool = True) -> FeatureGeneratorBuilder:
"""
Whether to keep features of 'object' and 'category' raw types.
These features are processed into memory optimized 'category' features.
Appends CategoryFeatureGenerator() to the generator group.
"""
self.config['enable_categorical_features'] = value
return self
def enable_datetime_features(self, value: bool = True) -> FeatureGeneratorBuilder:
"""
Whether to keep features of 'datetime' raw type and 'object' features identified as 'datetime_as_object' features.
These features will be converted to 'int' features representing milliseconds since epoch.
Appends DatetimeFeatureGenerator() to the generator group.
"""
self.config['enable_datetime_features'] = value
return self
def enable_text_special_features(self, value: bool = True) -> FeatureGeneratorBuilder:
"""
Whether to use 'object' features identified as 'text' features to generate 'text_special' features such as word count, capital letter ratio, and symbol counts.
Appends TextSpecialFeatureGenerator() to the generator group.
"""
self.config['enable_text_special_features'] = value
return self
def enable_text_ngram_features(self, value: bool = True) -> FeatureGeneratorBuilder:
"""
Whether to use 'object' features identified as 'text' features to generate 'text_ngram' features.
Appends TextNgramFeatureGenerator(vectorizer=vectorizer) to the generator group.
"""
self.config['enable_text_ngram_features'] = value
return self
def enable_raw_text_features(self, value: bool = True) -> FeatureGeneratorBuilder:
"""
Whether to keep the raw text features.
Appends IdentityFeatureGenerator(infer_features_in_args=dict(required_special_types=['text'])) to the generator group.
"""
self.config['enable_raw_text_features'] = value
return self
def enable_vision_features(self, value: bool = True) -> FeatureGeneratorBuilder:
"""
[Experimental]
Whether to keep 'object' features identified as 'image_path' special type. Features of this form should have a string path to an image file as their value.
Only vision models can leverage these features, and these features will not be treated as categorical.
Note: 'image_path' features will not be automatically inferred. These features must be explicitly specified as such in a custom FeatureMetadata object.
Note: It is recommended that the string paths use absolute paths rather than relative, as it will likely be more stable.
"""
self.config['enable_vision_features'] = value
return self
def vectorizer(self, value: BaseEstimator) -> FeatureGeneratorBuilder:
"""
sklearn CountVectorizer object to use in TextNgramFeatureGenerator.
Only used if `enable_text_ngram_features=True`.
"""
self.config['vectorizer'] = value
return self
def text_ngram_params(self, value: bool = True) -> FeatureGeneratorBuilder:
"""
Appends TextNgramFeatureGenerator(vectorizer=vectorizer, text_ngram_params) to the generator group. See text_ngram.py for valid parameters.
"""
self.config['text_ngram_params'] = value
return self
def build(self) -> Union[ConfigBuilder, AutoMLPipelineFeatureGenerator]:
generator = AutoMLPipelineFeatureGenerator(**self.config)
if self.parent:
self.parent.config['feature_generator'] = generator
return self.parent
else:
return generator
class ConfigBuilder:
def __init__(self):
self.config = {}
def presets(self, presets: Union[str, list, dict]) -> ConfigBuilder:
"""
List of preset configurations for various arguments in `fit()`. Can significantly impact predictive accuracy, memory-footprint, and inference latency of trained models, and various other properties of the returned `predictor`.
It is recommended to specify presets and avoid specifying most other `fit()` arguments or model hyperparameters prior to becoming familiar with AutoGluon.
Available Presets: ['best_quality', 'high_quality', 'good_quality', 'medium_quality', 'optimize_for_deployment', 'ignore_text']
It is recommended to only use one `quality` based preset in a given call to `fit()` as they alter many of the same arguments and are not compatible with each-other.
If there is an overlap in presets keys, the latter presets will override the earlier ones.
"""
valid_keys = list(tabular_presets_dict.keys())
if isinstance(presets, str):
presets = [presets]
if isinstance(presets, list):
unknown_keys = [k for k in presets if k not in valid_keys]
assert len(unknown_keys) == 0, f'The following presets are not recognized: {unknown_keys} - use one of the valid presets: {valid_keys}'
self.config['presets'] = presets
return self
def time_limit(self, time_limit: int) -> ConfigBuilder:
"""
Approximately how long `fit()` should run for (wallclock time in seconds).
If not specified, `fit()` will run until all models have completed training, but will not repeatedly bag models unless `num_bag_sets` is specified.
"""
if time_limit is not None:
assert time_limit > 0, 'time_limit must be greater than zero'
self.config['time_limit'] = time_limit
return self
def hyperparameters(self, hyperparameters: Union[str, dict]) -> ConfigBuilder:
valid_keys = [m for m in MODEL_TYPES.keys() if m not in ['ENS_WEIGHTED', 'SIMPLE_ENS_WEIGHTED']]
valid_str_values = list(hyperparameter_config_dict.keys())
if isinstance(hyperparameters, str):
assert hyperparameters in hyperparameter_config_dict, f'{hyperparameters} is not one of the valid presets {valid_str_values}'
elif isinstance(hyperparameters, dict):
unknown_keys = [k for k in hyperparameters.keys() if isinstance(k, str) and (k not in valid_keys)]
assert len(unknown_keys) == 0, f'The following model types are not recognized: {unknown_keys} - use one of the valid models: {valid_keys}'
else:
raise ValueError(f'hyperparameters must be either str: {valid_str_values} or dict with keys of {valid_keys}')
self.config['hyperparameters'] = hyperparameters
return self
def auto_stack(self, auto_stack: bool = True) -> ConfigBuilder:
"""
Whether AutoGluon should automatically utilize bagging and multi-layer stack ensembling to boost predictive accuracy.
Set this = True if you are willing to tolerate longer training times in order to maximize predictive accuracy!
Automatically sets `num_bag_folds` and `num_stack_levels` arguments based on dataset properties.
Note: Setting `num_bag_folds` and `num_stack_levels` arguments will override `auto_stack`.
Note: This can increase training time (and inference time) by up to 20x, but can greatly improve predictive performance.
"""
self.config['auto_stack'] = auto_stack
return self
def num_bag_folds(self, num_bag_folds: int) -> ConfigBuilder:
"""
Number of folds used for bagging of models. When `num_bag_folds = k`, training time is roughly increased by a factor of `k` (set = 0 to disable bagging).
Disabled by default (0), but we recommend values between 5-10 to maximize predictive performance.
Increasing num_bag_folds will result in models with lower bias but that are more prone to overfitting.
`num_bag_folds = 1` is an invalid value, and will raise a ValueError.
Values > 10 may produce diminishing returns, and can even harm overall results due to overfitting.
To further improve predictions, avoid increasing `num_bag_folds` much beyond 10 and instead increase `num_bag_sets`.
"""
assert num_bag_folds >= 0, 'num_bag_folds must be greater or equal than zero'
self.config['num_bag_folds'] = num_bag_folds
return self
def num_bag_sets(self, num_bag_sets: int) -> ConfigBuilder:
"""
Number of repeats of kfold bagging to perform (values must be >= 1). Total number of models trained during bagging = `num_bag_folds * num_bag_sets`.
Defaults to 1 if `time_limit` is not specified, otherwise 20 (always disabled if `num_bag_folds` is not specified).
Values greater than 1 will result in superior predictive performance, especially on smaller problems and with stacking enabled (reduces overall variance).
"""
assert num_bag_sets > 0, 'num_bag_sets must be greater than zero'
self.config['num_bag_sets'] = num_bag_sets
return self
def num_stack_levels(self, num_stack_levels: int) -> ConfigBuilder:
"""
Number of stacking levels to use in stack ensemble. Roughly increases model training time by factor of `num_stack_levels+1` (set = 0 to disable stack ensembling).
Disabled by default (0), but we recommend values between 1-3 to maximize predictive performance.
To prevent overfitting, `num_bag_folds >= 2` must also be set or else a ValueError will be raised.
"""
assert num_stack_levels >= 0, 'num_stack_levels must be greater or equal than zero'
self.config['num_stack_levels'] = num_stack_levels
return self
def holdout_frac(self, holdout_frac: float) -> ConfigBuilder:
"""
Fraction of train_data to holdout as tuning data for optimizing hyperparameters (ignored unless `tuning_data = None`, ignored if `num_bag_folds != 0` unless `use_bag_holdout == True`).
Default value (if None) is selected based on the number of rows in the training data. Default values range from 0.2 at 2,500 rows to 0.01 at 250,000 rows.
Default value is doubled if `hyperparameter_tune_kwargs` is set, up to a maximum of 0.2.
Disabled if `num_bag_folds >= 2` unless `use_bag_holdout == True`.
"""
assert (holdout_frac >= 0) & (holdout_frac <= 1), 'holdout_frac must be between 0 and 1'
self.config['holdout_frac'] = holdout_frac
return self
def use_bag_holdout(self, use_bag_holdout: bool = True) -> ConfigBuilder:
"""
If True, a `holdout_frac` portion of the data is held-out from model bagging.
This held-out data is only used to score models and determine weighted ensemble weights.
Enable this if there is a large gap between score_val and score_test in stack models.
Note: If `tuning_data` was specified, `tuning_data` is used as the holdout data.
Disabled if not bagging.
"""
self.config['use_bag_holdout'] = use_bag_holdout
return self
def hyperparameter_tune_kwargs(self, hyperparameter_tune_kwargs: Union[str, dict]) -> ConfigBuilder:
"""
Hyperparameter tuning strategy and kwargs (for example, how many HPO trials to run).
If None, then hyperparameter tuning will not be performed.
Valid preset values:
'auto': Uses the 'random' preset.
'random': Performs HPO via random search using local scheduler.
The 'searcher' key is required when providing a dict.
"""
valid_str_values = scheduler_factory._scheduler_presets.keys()
if isinstance(hyperparameter_tune_kwargs, str):
assert hyperparameter_tune_kwargs in valid_str_values, f'{hyperparameter_tune_kwargs} string must be one of {valid_str_values}'
elif not isinstance(hyperparameter_tune_kwargs, dict):
raise ValueError(f'hyperparameter_tune_kwargs must be either str: {valid_str_values} or dict')
self.config['hyperparameter_tune_kwargs'] = hyperparameter_tune_kwargs
return self
def ag_args(self, ag_args: dict) -> ConfigBuilder:
"""
Keyword arguments to pass to all models (i.e. common hyperparameters shared by all AutoGluon models).
See the `ag_args` argument from "Advanced functionality: Custom AutoGluon model arguments" in the `hyperparameters` argument documentation for valid values.
Identical to specifying `ag_args` parameter for all models in `hyperparameters`.
If a key in `ag_args` is already specified for a model in `hyperparameters`, it will not be altered through this argument.
"""
self.config['ag_args'] = ag_args
return self
def ag_args_fit(self, ag_args_fit: dict) -> ConfigBuilder:
"""
Keyword arguments to pass to all models.
See the `ag_args_fit` argument from "Advanced functionality: Custom AutoGluon model arguments" in the `hyperparameters` argument documentation for valid values.
Identical to specifying `ag_args_fit` parameter for all models in `hyperparameters`.
If a key in `ag_args_fit` is already specified for a model in `hyperparameters`, it will not be altered through this argument.
"""
self.config['ag_args_fit'] = ag_args_fit
return self
def ag_args_ensemble(self, ag_args_ensemble: dict) -> ConfigBuilder:
"""
Keyword arguments to pass to all models.
See the `ag_args_ensemble` argument from "Advanced functionality: Custom AutoGluon model arguments" in the `hyperparameters` argument documentation for valid values.
Identical to specifying `ag_args_ensemble` parameter for all models in `hyperparameters`.
If a key in `ag_args_ensemble` is already specified for a model in `hyperparameters`, it will not be altered through this argument.
"""
self.config['ag_args_ensemble'] = ag_args_ensemble
return self
def excluded_model_types(self, models: Union[str, list]) -> ConfigBuilder:
"""
Banned subset of model types to avoid training during `fit()`, even if present in `hyperparameters`.
Reference `hyperparameters` documentation for what models correspond to each value.
Useful when a particular model type such as 'KNN' or 'custom' is not desired but altering the `hyperparameters` dictionary is difficult or time-consuming.
Example: To exclude both 'KNN' and 'custom' models, specify `excluded_model_types=['KNN', 'custom']`.
"""
valid_keys = [m for m in MODEL_TYPES.keys() if m not in ['ENS_WEIGHTED', 'SIMPLE_ENS_WEIGHTED']]
if not isinstance(models, list):
models = [models]
for model in models:
assert model in valid_keys, f'{model} is not one of the valid models {valid_keys}'
self.config['excluded_model_types'] = sorted(list(set(models)))
return self
def included_model_types(self, models: Union[str, list]) -> ConfigBuilder:
"""
Subset of model types to train during `fit()`.
Reference `hyperparameters` documentation for what models correspond to each value.
Useful when only the particular models should be trained such as 'KNN' or 'custom', but altering the `hyperparameters` dictionary is difficult or time-consuming.
Example: To keep only 'KNN' and 'custom' models, specify `included_model_types=['KNN', 'custom']`.
"""
valid_keys = [m for m in MODEL_TYPES.keys() if m not in ['ENS_WEIGHTED', 'SIMPLE_ENS_WEIGHTED']]
if not isinstance(models, list):
models = [models]
unknown_keys = [k for k in models if isinstance(k, str) and (k not in valid_keys)]
assert len(unknown_keys) == 0, f'The following model types are not recognized: {unknown_keys} - use one of the valid models: {valid_keys}'
models = [m for m in valid_keys if m not in models]
self.config['excluded_model_types'] = models
return self
def refit_full(self, refit_full: Union[bool, str] = True) -> ConfigBuilder:
"""
Whether to retrain all models on all of the data (training + validation) after the normal training procedure.
This is equivalent to calling `predictor.refit_full(model=refit_full)` after fit.
If `refit_full=True`, it will be treated as `refit_full='all'`.
If `refit_full=False`, refitting will not occur.
Valid str values:
`all`: refits all models.
`best`: refits only the best model (and its ancestors if it is a stacker model).
`{model_name}`: refits only the specified model (and its ancestors if it is a stacker model).
"""
self.config['refit_full'] = refit_full
return self
def set_best_to_refit_full(self, set_best_to_refit_full=True) -> ConfigBuilder:
"""
If True, will change the default model that Predictor uses for prediction when model is not specified to the refit_full version of the model that exhibited the highest validation score.
Only valid if `refit_full` is set.
"""
self.config['set_best_to_refit_full'] = set_best_to_refit_full
return self
def keep_only_best(self, keep_only_best=True) -> ConfigBuilder:
"""
If True, only the best model and its ancestor models are saved in the outputted `predictor`. All other models are deleted.
If you only care about deploying the most accurate predictor with the smallest file-size and no longer need any of the other trained models or functionality beyond prediction on new data, then set: `keep_only_best=True`, `save_space=True`.
This is equivalent to calling `predictor.delete_models(models_to_keep='best', dry_run=False)` directly after `fit()`.
If used with `refit_full` and `set_best_to_refit_full`, the best model will be the refit_full model, and the original bagged best model will be deleted.
`refit_full` will be automatically set to 'best' in this case to avoid training models which will be later deleted.
"""
self.config['keep_only_best'] = keep_only_best
return self
def save_space(self, save_space=True) -> ConfigBuilder:
"""
If True, reduces the memory and disk size of predictor by deleting auxiliary model files that aren't needed for prediction on new data.
This is equivalent to calling `predictor.save_space()` directly after `fit()`.
This has NO impact on inference accuracy.
It is recommended if the only goal is to use the trained model for prediction.
Certain advanced functionality may no longer be available if `save_space=True`. Refer to `predictor.save_space()` documentation for more details.
"""
self.config['save_space'] = save_space
return self
def feature_generator(self) -> FeatureGeneratorBuilder:
"""
The feature generator used by AutoGluon to process the input data to the form sent to the models. This often includes automated feature generation and data cleaning.
It is generally recommended to keep the default feature generator unless handling an advanced use-case.
"""
return FeatureGeneratorBuilder(self)
def calibrate(self, calibrate=True) -> ConfigBuilder:
"""
If True and the problem_type is classification, temperature scaling will be used to calibrate the Predictor's estimated class probabilities
(which may improve metrics like log_loss) and will train a scalar parameter on the validation set.
If True and the problem_type is quantile regression, conformalization will be used to calibrate the Predictor's estimated quantiles
(which may improve the prediction interval coverage, and bagging could further improve it) and will compute a set of scalar parameters on the validation set.
"""
self.config['calibrate'] = calibrate
return self
def build(self) -> dict:
"""
Build the config.
"""
return copy.deepcopy(self.config)
|
PypiClean
|
/iknl-flasgger-0.9.2.post1.tar.gz/iknl-flasgger-0.9.2.post1/flasgger/ui2/static/lang/ru.js
|
'use strict';
/* jshint quotmark: double */
window.SwaggerTranslator.learn({
"Warning: Deprecated":"Предупреждение: Устарело",
"Implementation Notes":"Заметки",
"Response Class":"Пример ответа",
"Status":"Статус",
"Parameters":"Параметры",
"Parameter":"Параметр",
"Value":"Значение",
"Description":"Описание",
"Parameter Type":"Тип параметра",
"Data Type":"Тип данных",
"HTTP Status Code":"HTTP код",
"Reason":"Причина",
"Response Model":"Структура ответа",
"Request URL":"URL запроса",
"Response Body":"Тело ответа",
"Response Code":"HTTP код ответа",
"Response Headers":"Заголовки ответа",
"Hide Response":"Спрятать ответ",
"Headers":"Заголовки",
"Response Messages":"Что может прийти в ответ",
"Try it out!":"Попробовать!",
"Show/Hide":"Показать/Скрыть",
"List Operations":"Операции кратко",
"Expand Operations":"Операции подробно",
"Raw":"В сыром виде",
"can't parse JSON. Raw result":"Не удается распарсить ответ:",
"Example Value":"Пример",
"Model Schema":"Структура",
"Model":"Описание",
"Click to set as parameter value":"Нажмите, чтобы испльзовать в качестве значения параметра",
"apply":"применить",
"Username":"Имя пользователя",
"Password":"Пароль",
"Terms of service":"Условия использования",
"Created by":"Разработано",
"See more at":"Еще тут",
"Contact the developer":"Связаться с разработчиком",
"api version":"Версия API",
"Response Content Type":"Content Type ответа",
"Parameter content type:":"Content Type параметра:",
"fetching resource":"Получение ресурса",
"fetching resource list":"Получение ресурсов",
"Explore":"Показать",
"Show Swagger Petstore Example Apis":"Показать примеры АПИ",
"Can't read from server. It may not have the appropriate access-control-origin settings.":"Не удается получить ответ от сервера. Возможно, проблема с настройками доступа",
"Please specify the protocol for":"Пожалуйста, укажите протокол для",
"Can't read swagger JSON from":"Не получается прочитать swagger json из",
"Finished Loading Resource Information. Rendering Swagger UI":"Загрузка информации о ресурсах завершена. Рендерим",
"Unable to read api":"Не удалось прочитать api",
"from path":"по адресу",
"server returned":"сервер сказал"
});
|
PypiClean
|
/malaya_speech-1.4.0rc1-py3-none-any.whl/malaya_speech/streaming/torchaudio.py
|
import collections
from datetime import datetime
from malaya_speech.utils.validator import check_pipeline
from malaya_speech.utils.torch_featurization import StreamReader, torchaudio_available
from malaya_speech.torch_model.torchaudio import Conformer
from malaya_speech.streaming import stream as base_stream
from functools import partial
import torch
import logging
logger = logging.getLogger(__name__)
if StreamReader is None:
logger.warning(f'`torchaudio.io.StreamReader` is not available, `{__name__}` is not able to use.')
class ContextCacher:
"""Cache the end of input data and prepend the next input data with it.
Args:
segment_length (int): The size of main segment.
If the incoming segment is shorter, then the segment is padded.
context_length (int): The size of the context, cached and appended.
"""
def __init__(self, segment_length: int, context_length: int):
self.segment_length = segment_length
self.context_length = context_length
self.context = torch.zeros([context_length])
def __call__(self, chunk: torch.Tensor):
if chunk.size(0) < self.segment_length:
chunk = torch.nn.functional.pad(chunk, (0, self.segment_length - chunk.size(0)))
chunk_with_context = torch.cat((self.context, chunk))
self.context = chunk[-self.context_length:]
return chunk_with_context
def _base_stream(
src,
format=None,
option=None,
buffer_size: int = 4096,
sample_rate: int = 16000,
segment_length: int = 2560,
):
if StreamReader is None:
raise ValueError('`torchaudio.io.StreamReader is not available, please make sure your ffmpeg installed properly.')
streamer = StreamReader(src=src, format=format, option=option, buffer_size=buffer_size)
streamer.add_basic_audio_stream(frames_per_chunk=segment_length, sample_rate=sample_rate)
logger.info(streamer.get_src_stream_info(0))
stream_iterator = streamer.stream()
return streamer.stream()
class Audio:
def __init__(
self,
src,
vad_model=None,
format=None,
option=None,
buffer_size: int = 4096,
sample_rate: int = 16000,
segment_length: int = 2560,
**kwargs,
):
self.vad_model = vad_model
self.stream_iterator = _base_stream(
src=src,
format=format,
option=option,
buffer_size=buffer_size,
sample_rate=sample_rate,
segment_length=segment_length,
)
self.segment_length = segment_length
def destroy(self):
pass
def vad_collector(self, num_padding_frames=20, ratio=0.75):
"""
Generator that yields series of consecutive audio frames comprising each utterence, separated by yielding a single None.
Determines voice activity by ratio of frames in padding_ms. Uses a buffer to include padding_ms prior to being triggered.
Example: (frame, ..., frame, None, frame, ..., frame, None, ...)
|---utterence---| |---utterence---|
"""
ring_buffer = collections.deque(maxlen=num_padding_frames)
triggered = False
for i, (chunk,) in enumerate(self.stream_iterator, start=1):
frame = chunk[:, 0].numpy()
if len(frame) != self.segment_length:
continue
if self.vad_model:
try:
is_speech = self.vad_model(frame)
if isinstance(is_speech, dict):
is_speech = is_speech['vad']
except Exception as e:
logger.debug(e)
is_speech = False
else:
is_speech = True
logger.debug(is_speech)
if not triggered:
ring_buffer.append((frame, is_speech))
num_voiced = len([f for f, speech in ring_buffer if speech])
if num_voiced > ratio * ring_buffer.maxlen:
triggered = True
for f, s in ring_buffer:
yield f
ring_buffer.clear()
else:
yield frame
ring_buffer.append((frame, is_speech))
num_unvoiced = len(
[f for f, speech in ring_buffer if not speech]
)
if num_unvoiced > ratio * ring_buffer.maxlen:
triggered = False
yield None
ring_buffer.clear()
def stream(
src,
vad_model=None,
asr_model=None,
classification_model=None,
format=None,
option=None,
buffer_size: int = 4096,
sample_rate: int = 16000,
segment_length: int = 2560,
num_padding_frames: int = 20,
ratio: float = 0.75,
min_length: float = 0.1,
max_length: float = 10.0,
realtime_print: bool = True,
**kwargs,
):
"""
Stream an audio using torchaudio library.
Parameters
----------
vad_model: object, optional (default=None)
vad model / pipeline.
asr_model: object, optional (default=None)
ASR model / pipeline, will transcribe each subsamples realtime.
classification_model: object, optional (default=None)
classification pipeline, will classify each subsamples realtime.
format: str, optional (default=None)
Supported `format` for `torchaudio.io.StreamReader`,
https://pytorch.org/audio/stable/generated/torchaudio.io.StreamReader.html#torchaudio.io.StreamReader
option: dict, optional (default=None)
Supported `option` for `torchaudio.io.StreamReader`,
https://pytorch.org/audio/stable/generated/torchaudio.io.StreamReader.html#torchaudio.io.StreamReader
buffer_size: int, optional (default=4096)
Supported `buffer_size` for `torchaudio.io.StreamReader`, buffer size in byte. Used only when src is file-like object,
https://pytorch.org/audio/stable/generated/torchaudio.io.StreamReader.html#torchaudio.io.StreamReader
sample_rate: int, optional (default = 16000)
output sample rate.
segment_length: int, optional (default=2560)
usually derived from asr_model.segment_length * asr_model.hop_length,
size of audio chunks, actual size in term of second is `segment_length` / `sample_rate`.
num_padding_frames: int, optional (default=20)
size of acceptable padding frames for queue.
ratio: float, optional (default = 0.75)
if 75% of the queue is positive, assumed it is a voice activity.
min_length: float, optional (default=0.1)
minimum length (second) to accept a subsample.
max_length: float, optional (default=10.0)
maximum length (second) to accept a subsample.
realtime_print: bool, optional (default=True)
Will print results for ASR.
**kwargs: vector argument
vector argument pass to malaya_speech.streaming.pyaudio.Audio interface.
Returns
-------
result : List[dict]
"""
return base_stream(
audio_class=partial(Audio, src=src, format=format, option=option, buffer_size=buffer_size),
vad_model=vad_model,
asr_model=asr_model,
classification_model=classification_model,
sample_rate=sample_rate,
segment_length=segment_length,
num_padding_frames=num_padding_frames,
ratio=ratio,
min_length=min_length,
max_length=max_length,
realtime_print=realtime_print,
**kwargs,
)
def stream_rnnt(
src,
asr_model=None,
classification_model=None,
format=None,
option=None,
beam_width: int = 10,
buffer_size: int = 4096,
sample_rate: int = 16000,
segment_length: int = 2560,
context_length: int = 640,
realtime_print: bool = True,
**kwargs,
):
"""
Parameters
-----------
src: str
Supported `src` for `torchaudio.io.StreamReader`
Read more at https://pytorch.org/audio/stable/tutorials/streamreader_basic_tutorial.html#sphx-glr-tutorials-streamreader-basic-tutorial-py
or https://pytorch.org/audio/stable/tutorials/streamreader_advanced_tutorial.html#sphx-glr-tutorials-streamreader-advanced-tutorial-py
asr_model: object, optional (default=None)
ASR model / pipeline, will transcribe each subsamples realtime.
must be an object of `malaya_speech.torch_model.torchaudio.Conformer`.
classification_model: object, optional (default=None)
classification pipeline, will classify each subsamples realtime.
format: str, optional (default=None)
Supported `format` for `torchaudio.io.StreamReader`,
https://pytorch.org/audio/stable/generated/torchaudio.io.StreamReader.html#torchaudio.io.StreamReader
option: dict, optional (default=None)
Supported `option` for `torchaudio.io.StreamReader`,
https://pytorch.org/audio/stable/generated/torchaudio.io.StreamReader.html#torchaudio.io.StreamReader
buffer_size: int, optional (default=4096)
Supported `buffer_size` for `torchaudio.io.StreamReader`, buffer size in byte. Used only when src is file-like object,
https://pytorch.org/audio/stable/generated/torchaudio.io.StreamReader.html#torchaudio.io.StreamReader
sample_rate: int, optional (default=16000)
sample rate from input device, this will auto resampling.
segment_length: int, optional (default=2560)
usually derived from asr_model.segment_length * asr_model.hop_length,
size of audio chunks, actual size in term of second is `segment_length` / `sample_rate`.
context_length: int, optional (default=640)
usually derived from asr_model.right_context_length * asr_model.hop_length,
size of append context chunks, only useful for streaming RNNT.
beam_width: int, optional (default=10)
width for beam decoding.
realtime_print: bool, optional (default=True)
Will print results for ASR.
"""
if not isinstance(asr_model, Conformer):
raise ValueError('`asr_model` only support Enformer RNNT.')
if not getattr(asr_model, 'rnnt_streaming', False):
raise ValueError('`asr_model` only support Enformer RNNT.')
if classification_model:
check_pipeline(
classification_model, 'classification', 'classification_model'
)
if asr_model.feature_extractor.pad:
asr_model.feature_extractor.pad = False
stream_iterator = _base_stream(
src=src,
format=format,
option=option,
buffer_size=buffer_size,
sample_rate=sample_rate,
)
cacher = ContextCacher(segment_length, context_length)
@torch.inference_mode()
def run_inference(state=None, hypothesis=None):
results = []
try:
for i, (chunk,) in enumerate(stream_iterator, start=1):
audio = chunk[:, 0]
wav_data = {
'wav_data': audio.numpy(),
'timestamp': datetime.now(),
}
segment = cacher(audio)
features, length = asr_model.feature_extractor(segment)
hypos, state = asr_model.decoder.infer(features, length, beam_width, state=state, hypothesis=hypothesis)
hypothesis = hypos[0]
transcript = asr_model.tokenizer(hypothesis[0], lstrip=False)
wav_data['asr_model'] = transcript
if len(transcript.strip()) and classification_model:
t_ = classification_model(wav_data['wav_data'])
if isinstance(t_, dict):
t_ = t_['classification']
wav_data['classification_model'] = t_
if realtime_print:
print(transcript, end='', flush=True)
results.append(wav_data)
except KeyboardInterrupt:
pass
except Exception as e:
raise e
return results
return run_inference()
|
PypiClean
|
/certora_cli_alpha_CVL_rewrite-20230330.12.28.586030-py3-none-any.whl/certora_cli/certoraRun.py
|
import sys
import time
import logging
from typing import List, Optional
from pathlib import Path
scripts_dir_path = Path(__file__).parent.resolve() # containing directory
sys.path.insert(0, str(scripts_dir_path))
from Shared.certoraUtils import run_jar_cmd
from Shared.certoraUtils import check_results_from_file, is_ci_or_git_action, run_local_spec_check
from Shared.certoraUtils import remove_file, is_new_api
from Shared.certoraUtils import CertoraUserInputError
from Shared.certoraUtils import get_certora_internal_dir, safe_create_dir
from Shared.certoraUtils import Mode, reset_certora_internal_dir
from Shared.certoraUtils import print_completion_message, mode_has_spec_file
from EVMVerifier.certoraCloudIO import CloudVerification, validate_version_and_branch
from EVMVerifier.certoraCollectRunMetadata import collect_run_metadata
from Shared.certoraLogging import LoggingManager
from EVMVerifier.certoraBuild import build
from EVMVerifier.certoraContext import get_local_run_cmd, get_args, handle_flags_in_args
from EVMVerifier import certoraContextValidator as Cv
BUILD_SCRIPT_PATH = Path("EVMVerifier/certoraBuild.py")
# logger for issues regarding the general run flow.
# Also serves as the default logger for errors originating from unexpected places.
run_logger = logging.getLogger("run")
def run_certora(args: List[str], is_library: bool = False) -> Optional[Path]:
"""
The main function that is responsible for the general flow of the script.
The general flow is:
1. Parse program arguments
2. Run the necessary steps (type checking/ build/ cloud verification/ local verification)
3. Shut down
IMPORTANT - if run_certora is not run with is_library set to true we assume the scripts always reaches the
shut down code. DO NOT USE SYS.EXIT() IN THE SCRIPT FILES!
If is_library is set to False The program terminates with an exit code of 0 in case of success and 1 otherwise
If is_library is set to True and the prover does not run locally the link to the status url is returned, else None
is returned
"""
# If we are not in debug mode, we do not want to print the traceback in case of exceptions.
if '--debug' not in args: # We check manually, because we want no traceback in argument parsing exceptions
sys.tracebacklimit = 0
# creating the default internal dir, files may be copied to user defined build directory after
# parsing the input
reset_certora_internal_dir()
safe_create_dir(get_certora_internal_dir(), revert=False)
logging_manager = LoggingManager()
# adds ' around arguments with spaces
pretty_args = [f"'{arg}'" if ' ' in str(arg) else str(arg) for arg in args]
if is_new_api():
handle_flags_in_args(args)
context, conf_dict = get_args(args) # Parse arguments
logging_manager.set_log_level_and_format(is_quiet=context.short_output,
debug=context.debug,
debug_topics=context.debug_topics,
show_debug_topics=context.show_debug_topics)
if context.short_output is False:
if is_ci_or_git_action():
context.short_output = True
timings = {}
exit_code = 0 # The exit code of the script. 0 means success, any other number is an error.
return_value = None
try:
collect_run_metadata(wd=Path.cwd(), raw_args=sys.argv, conf_dict=conf_dict, context=context) \
.dump()
# When a TAC file is provided, no build arguments will be processed
if context.mode not in [Mode.TAC]:
run_logger.debug(f"There is no TAC file. Going to script {BUILD_SCRIPT_PATH} to main_with_args()")
build_start = time.perf_counter()
# If we are not in CI, we also check the spec for Syntax errors.
build(context, ignore_spec_syntax_check=is_library)
build_end = time.perf_counter()
timings["buildTime"] = round(build_end - build_start, 4)
if not context.build_only and exit_code == 0: # either we skipped building (TAC MODE) or build succeeded
if context.local:
compare_with_expected_file = Path(context.expected_file).exists()
specified_tool_output = context.tool_output is not None
# If we want to compare results we have tell the jar where to store the output of the current run,
# But we don't want to override the path if it was specified
if compare_with_expected_file and not specified_tool_output:
context.tool_output = 'tmpOutput.json'
check_cmd = get_local_run_cmd(context)
# In local mode, this is reserved for Certora devs, so let the script print it
print(f"Verifier run command:\n {check_cmd}", flush=True)
run_result = \
run_jar_cmd(check_cmd, compare_with_expected_file, logger_topic="verification", print_output=True)
if run_result != 0:
exit_code = 1
else:
print_completion_message("Finished running verifier:")
print(f"\t{check_cmd}")
if compare_with_expected_file:
print("Comparing tool output to the expected output:")
result = check_results_from_file(context.tool_output, context.expected_file)
if not result:
exit_code = 1
if not specified_tool_output:
# Remove actual before starting the current test
remove_file(context.tool_output)
else: # Remote run
# In cloud mode, we first run a local type checker
"""
Before running the local type checker, we see if the current package version is compatible with
the latest. We check it before running the local type checker, because local type checking
errors could be simply a result of syntax introduced in the newest version.
The line below Will raise an exception if the local version is incompatible.
"""
validate_version_and_branch(context.cloud if context.cloud else context.staging,
context.commit_sha1)
# Syntax checking and typechecking
if mode_has_spec_file(context.mode):
if context.disableLocalTypeChecking:
run_logger.warning(
"Local checks of CVL specification files disabled. It is recommended to enable "
"the checks.")
else:
typechecking_start = time.perf_counter()
spec_check_failed = run_local_spec_check(with_typechecking=True)
if spec_check_failed:
raise CertoraUserInputError("CVL specification syntax and type check failed")
else:
typechecking_end = time.perf_counter()
timings['typecheckingTime'] = round(typechecking_end - typechecking_start, 4)
if not context.typecheck_only and exit_code == 0: # Local typechecking either succeeded or skipped
context.key = Cv.validate_certora_key()
cloud_verifier = CloudVerification(context, timings)
# Wrap strings with space with ' so it can be copied and pasted to shell
pretty_args = [f"'{arg}'" if ' ' in arg else arg for arg in args]
cl_args = ' '.join(pretty_args)
logging_manager.remove_debug_logger()
result = cloud_verifier.cli_verify_and_report(cl_args, context.send_only)
if cloud_verifier.statusUrl:
return_value = Path(cloud_verifier.statusUrl)
if not result:
exit_code = 1
except Exception as e:
err_msg = "Encountered an error running Certora Prover"
if isinstance(e, CertoraUserInputError):
err_msg = f"{err_msg}:\n{e}"
else:
err_msg += ", please contact Certora"
if not logging_manager.is_debugging:
err_msg += "; consider running the script again with --debug to find out why it failed"
run_logger.debug("Failure traceback: ", exc_info=e)
run_logger.fatal(err_msg)
exit_code = 1
except KeyboardInterrupt:
print('\nInterrupted by user', flush=True) # We go down a line because last characters in terminal were ^C
sys.exit(1) # We exit ALWAYS, even if we are running from a library
# If the exit_code is 0, we do not call sys.exit() -> calling sys.exit() also exits any script that wraps this one
if not is_library and exit_code != 0:
sys.exit(exit_code)
return return_value
def entry_point() -> None:
"""
This function is the entry point of the certora_cli customer-facing package, as well as this script.
It is important this function gets no arguments!
"""
run_certora(sys.argv[1:], is_library=False)
if __name__ == '__main__':
entry_point()
|
PypiClean
|
/comt-2.6.4.tar.gz/comt-2.6.4/src/cm/media/js/lib/yui/yui3-3.15.0/build/imageloader/imageloader-min.js
|
YUI.add("imageloader",function(e,t){e.ImgLoadGroup=function(){this._init(),e.ImgLoadGroup.superclass.constructor.apply(this,arguments)},e.ImgLoadGroup.NAME="imgLoadGroup",e.ImgLoadGroup.ATTRS={name:{value:""},timeLimit:{value:null},foldDistance:{validator:e.Lang.isNumber,setter:function(e){return this._setFoldTriggers(),e},lazyAdd:!1},className:{value:null,setter:function(e){return this._className=e,e},lazyAdd:!1},classNameAction:{value:"default"}};var n={_init:function(){this._triggers=[],this._imgObjs={},this._timeout=null,this._classImageEls=null,this._className=null,this._areFoldTriggersSet=!1,this._maxKnownHLimit=0,e.on("domready",this._onloadTasks,this)},addTrigger:function(t,n){if(!t||!n)return this;var r=function(){this.fetch()};return this._triggers.push(e.on(n,r,t,this)),this},addCustomTrigger:function(t,n){if(!t)return this;var r=function(){this.fetch()};return e.Lang.isUndefined(n)?this._triggers.push(e.on(t,r,this)):this._triggers.push(n.on(t,r,this)),this},_setFoldTriggers:function(){if(this._areFoldTriggersSet)return;var t=function(){this._foldCheck()};this._triggers.push(e.on("scroll",t,window,this)),this._triggers.push(e.on("resize",t,window,this)),this._areFoldTriggersSet=!0},_onloadTasks:function(){var t=this.get("timeLimit");t&&t>0&&(this._timeout=setTimeout(this._getFetchTimeout(),t*1e3)),e.Lang.isUndefined(this.get("foldDistance"))||this._foldCheck()},_getFetchTimeout:function(){var e=this;return function(){e.fetch()}},registerImage:function(){var t=arguments[0].domId;return t?(this._imgObjs[t]=new e.ImgLoadImgObj(arguments[0]),this._imgObjs[t]):null},fetch:function(){this._clearTriggers(),this._fetchByClass();for(var e in this._imgObjs)this._imgObjs.hasOwnProperty(e)&&this._imgObjs[e].fetch()},_clearTriggers:function(){clearTimeout(this._timeout);for(var e=0,t=this._triggers.length;e<t;e++)this._triggers[e].detach()},_foldCheck:function(){var t=!0,n=e.DOM.viewportRegion(),r=n.bottom+this.get("foldDistance"),i,s,o,u,a;if(r<=this._maxKnownHLimit)return;this._maxKnownHLimit=r;for(i in this._imgObjs)this._imgObjs.hasOwnProperty(i)&&(s=this._imgObjs[i].fetch(r),t=t&&s);if(this._className){this._classImageEls===null&&(this._classImageEls=[],o=e.all("."+this._className),o.each(function(e){this._classImageEls.push({el:e,y:e.getY(),fetched:!1})},this)),o=this._classImageEls;for(u=0,a=o.length;u<a;u++){if(o[u].fetched)continue;o[u].y&&o[u].y<=r?(this._updateNodeClassName(o[u].el),o[u].fetched=!0):t=!1}}t&&this._clearTriggers()},_updateNodeClassName:function(e){var t;this.get("classNameAction")=="enhanced"&&e.get("tagName").toLowerCase()=="img"&&(t=e.getStyle("backgroundImage"),/url\(["']?(.*?)["']?\)/.test(t),t=RegExp.$1,e.set("src",t),e.setStyle("backgroundImage","")),e.removeClass(this._className)},_fetchByClass:function(){if(!this._className)return;e.all("."+this._className).each(e.bind(this._updateNodeClassName,this))}};e.extend(e.ImgLoadGroup,e.Base,n),e.ImgLoadImgObj=function(){e.ImgLoadImgObj.superclass.constructor.apply(this,arguments),this._init()},e.ImgLoadImgObj.NAME="imgLoadImgObj",e.ImgLoadImgObj.ATTRS={domId:{value:null,writeOnce:!0},bgUrl:{value:null},srcUrl:{value:null},width:{value:null},height:{value:null},setVisible:{value:!1},isPng:{value:!1},sizingMethod:{value:"scale"},enabled:{value:"true"}};var r={_init:function(){this._fetched=!1,this._imgEl=null,this._yPos=null},fetch:function(t){if(this._fetched)return!0;var n=this._getImgEl(),r;if(!n)return!1;if(t){r=this._getYPos();if(!r||r>t)return!1}return this.get("bgUrl")!==null?this.get("isPng")&&e.UA.ie&&e.UA.ie<=6?n.setStyle("filter",'progid:DXImageTransform.Microsoft.AlphaImageLoader(src="'+this.get("bgUrl")+'", sizingMethod="'+this.get("sizingMethod")+'", enabled="'+this.get("enabled")+'")'):n.setStyle("backgroundImage","url('"+this.get("bgUrl")+"')"):this.get("srcUrl")!==null&&n.setAttribute("src",this.get("srcUrl")),this.get("setVisible")&&n.setStyle("visibility","visible"),this.get("width")&&n.setAttribute("width",this.get("width")),this.get("height")&&n.setAttribute("height",this.get("height")),this._fetched=!0,!0},_getImgEl:function(){return this._imgEl===null&&(this._imgEl=e.one("#"+this.get("domId"))),this._imgEl},_getYPos:function(){return this._yPos===null&&(this._yPos=this._getImgEl().getY()),this._yPos}};e.extend(e.ImgLoadImgObj,e.Base,r)},"@VERSION@",{requires:["base-base","node-style","node-screen"]});
|
PypiClean
|
/reverse-ns-1.0.0.tar.gz/reverse-ns-1.0.0/src/reversens/client.py
|
from json import loads, JSONDecodeError
import re
from .net.http import ApiRequester
from .models.response import Response
from .exceptions.error import ParameterError, EmptyApiKeyError, \
UnparsableApiResponseError
class Client:
__default_url = "https://reverse-ns.whoisxmlapi.com/api/v1"
_api_requester: ApiRequester or None
_api_key: str
_last_result: Response or None
_name_server: str or None
_re_api_key = re.compile(r'^at_[a-z0-9]{29}$', re.IGNORECASE)
_re_domain_name = re.compile(
r'^(?:[0-9a-z_](?:[0-9a-z-_]{0,62}(?<=[0-9a-z-_])[0-9a-z_])?\.)+'
+ r'[0-9a-z][0-9a-z-]{0,62}[a-z0-9]$', re.IGNORECASE)
_PARSABLE_FORMAT = 'json'
JSON_FORMAT = 'json'
XML_FORMAT = 'xml'
def __init__(self, api_key: str, **kwargs):
"""
:param api_key: str: Your API key.
:key base_url: str: (optional) API endpoint URL.
:key timeout: float: (optional) API call timeout in seconds
"""
self._api_key = ''
self.api_key = api_key
self._last_result = None
self._name_server = ''
if 'base_url' not in kwargs:
kwargs['base_url'] = Client.__default_url
self.api_requester = ApiRequester(**kwargs)
@property
def api_key(self) -> str:
return self._api_key
@api_key.setter
def api_key(self, value: str):
self._api_key = Client._validate_api_key(value)
@property
def api_requester(self) -> ApiRequester or None:
return self._api_requester
@api_requester.setter
def api_requester(self, value: ApiRequester):
self._api_requester = value
@property
def base_url(self) -> str:
return self._api_requester.base_url
@base_url.setter
def base_url(self, value: str or None):
if value is None:
self._api_requester.base_url = Client.__default_url
else:
self._api_requester.base_url = value
@property
def last_result(self) -> Response or None:
return self._last_result
@last_result.setter
def last_result(self, value: Response or None):
if value is None:
self._last_result = value
elif isinstance(value, Response):
self._last_result = value
else:
raise ValueError(
"Values should be an instance of reversens.Response or None")
@property
def name_server(self) -> str:
return self._name_server
@name_server.setter
def name_server(self, ns: str):
try:
self._name_server = Client._validate_domain_name(ns)
except ParameterError as err:
raise ValueError(err.message)
@property
def timeout(self) -> float:
return self._api_requester.timeout
@timeout.setter
def timeout(self, value: float):
self._api_requester.timeout = value
def __iter__(self):
if not self._name_server:
raise ValueError('You need to specify a NS first. '
'Use the `name_server` instance`s property')
self._last_result = None
return self
def __next__(self):
if not self._name_server:
raise ValueError('You need to specify a NS first. '
'Use the `name_server` instance`s property')
if self._last_result:
if not self._last_result.has_next():
self._name_server = ''
raise StopIteration
return self.get(ns=self._name_server,
search_from=self._last_result.result[-1].name)
else:
return self.get(self._name_server)
def get(self, ns: str,
search_from: str = None) -> Response:
"""
Get parsed API response as a `Response` instance.
:param ns: Domain name of the name server, string
:param search_from: The last record of the current page to get
records after it, string
:return: `Response` instance
:raises ConnectionError:
:raises ReverseNsApiError: Base class for all errors below
:raises ResponseError: response contains an error message
:raises ApiAuthError: Server returned 401, 402 or 403 HTTP code
:raises BadRequestError: Server returned 400 or 422 HTTP code
:raises HttpApiError: HTTP code >= 300 and not equal to above codes
:raises ParameterError: invalid parameter's value
"""
output_format = Client._PARSABLE_FORMAT
response = self.get_raw(ns, search_from, output_format)
try:
parsed = loads(str(response))
if 'result' in parsed:
self.last_result = Response(parsed)
self.name_server = ns
return self.last_result
raise UnparsableApiResponseError(
"Could not find the correct root element.", None)
except JSONDecodeError as error:
raise UnparsableApiResponseError("Could not parse API response", error)
def get_raw(self, ns: str, search_from: str = None,
output_format: str or None = None) -> str:
"""
Get raw API response.
:param ns: Domain name of the name server, string
:param search_from: The last record of the current page to get
records after it, string
:param output_format: Use Client.JSON_FORMAT and Client.XML_FORMAT
constants
:return: str
:raises ConnectionError:
:raises ReverseNsApiError: Base class for all errors below
:raises ResponseError: response contains an error message
:raises ApiAuthError: Server returned 401, 402 or 403 HTTP code
:raises BadRequestError: Server returned 400 or 422 HTTP code
:raises HttpApiError: HTTP code >= 300 and not equal to above codes
:raises ParameterError: invalid parameter's value
"""
if self.api_key == '':
raise EmptyApiKeyError('')
_ns = Client._validate_domain_name(ns)
_from = Client._validate_search_from(search_from) \
if search_from is not None else None
_output_format = Client._validate_output_format(output_format) \
if output_format is not None else None
return self._api_requester.get(self._build_payload(
self.api_key,
_ns,
_from,
_output_format
))
@staticmethod
def _validate_api_key(api_key) -> str:
if Client._re_api_key.search(str(api_key)):
return str(api_key)
else:
raise ParameterError("Invalid API key format.")
@staticmethod
def _validate_domain_name(value) -> str:
if Client._re_domain_name.search(str(value)):
return str(value)
raise ParameterError("Invalid ns name")
@staticmethod
def _validate_output_format(value: str):
if str(value).lower() in {Client.JSON_FORMAT, Client.XML_FORMAT}:
return str(value).lower()
raise ParameterError(
f"Response format must be {Client.JSON_FORMAT} "
f"or {Client.XML_FORMAT}")
@staticmethod
def _validate_search_from(value) -> str:
if value and str(value):
return str(value)
raise ParameterError("search_from should be a string")
@staticmethod
def _build_payload(
api_key,
ns,
search_from,
output_format
) -> dict:
tmp = {
'apiKey': api_key,
'ns': ns,
'from': search_from,
'outputFormat': output_format
}
return {k: v for (k, v) in tmp.items() if v is not None}
|
PypiClean
|
/django-skylark-0.4.6.tar.gz/django-skylark-0.4.6/src/skylark/templates/chirp/media/dojox/lang/async.js
|
if(!dojo._hasResource["dojox.lang.async"]){ //_hasResource checks added by build. Do not use _hasResource directly in your code.
dojo._hasResource["dojox.lang.async"] = true;
dojo.provide("dojox.lang.async");
(function(){
var d = dojo, Deferred = d.Deferred, each = d.forEach, some = d.some,
async = dojox.lang.async, aps = Array.prototype.slice,
opts = Object.prototype.toString;
async.seq = function(x){
// summary:
// Executes functions sequentially. Waits if any of them returns Deferred.
var fs = opts.call(x) == "[object Array]" ? x : arguments;
return function(init){
var x = new Deferred();
each(fs, function(f){ x.addCallback(f); });
x.callback(init);
return x;
};
};
async.par = function(x){
// summary:
// Executes functions in parallel. Waits for all of them to finish.
var fs = opts.call(x) == "[object Array]" ? x : arguments;
return function(init){
var results = new Array(fs.length),
cancel = function(){
each(results, function(v){
if(v instanceof Deferred && v.fired < 0){
v.cancel();
}
});
},
x = new Deferred(cancel),
ready = fs.length;
each(fs, function(f, i){
var x;
try {
x = f(init);
}catch(e){
x = e;
}
results[i] = x;
});
var failed = some(results, function(v){
if(v instanceof Error){
cancel();
x.errback(v);
return true;
}
return false;
});
if(!failed){
each(results, function(v, i){
if(v instanceof Deferred){
v.addCallbacks(
function(v){
results[i] = v;
if(!--ready){
x.callback(results);
}
},
function(v){
cancel();
x.errback(v);
}
);
}else{
--ready;
}
});
}
if(!ready){
x.callback(results);
}
return x;
};
};
async.any = function(x){
// summary:
// Executes functions in parallel. As soon as one of them finishes
// cancels the rest.
var fs = opts.call(x) == "[object Array]" ? x : arguments;
return function(init){
var results = new Array(fs.length), noResult = true;
cancel = function(index){
each(results, function(v, i){
if(i != index && v instanceof Deferred && v.fired < 0){
v.cancel();
}
});
},
x = new Deferred(cancel);
each(fs, function(f, i){
var x;
try {
x = f(init);
}catch(e){
x = e;
}
results[i] = x;
});
var done = some(results, function(v, i){
if(!(v instanceof Deferred)){
cancel(i);
x.callback(v);
return true;
}
return false;
});
if(!done){
each(results, function(v, i){
v.addBoth(
function(v){
if(noResult){
noResult = false;
cancel(i);
x.callback(v);
}
}
);
});
}
return x;
};
};
async.select = function(cond, x){
// summary:
// Executes a condition, waits for it if necessary, and executes
// Nth function from list.
var fs = opts.call(x) == "[object Array]" ? x : aps.call(arguments, 1);
return function(init){
return new Deferred().addCallback(cond).addCallback(function(v){
if(typeof v == "number" && v >= 0 && v < fs.length){
return fs[v](init);
}else{
return new Error("async.select: out of range");
}
}).callback(init);
};
};
async.ifThen = function(cond, ifTrue, ifFalse){
// summary:
// Executes a condition, waits for it if necessary, and executes
// one of two functions.
return function(init){
return new Deferred().addCallback(cond).addCallback(function(v){
return (v ? ifTrue : ifFalse)(init);
}).callback(init);
};
};
async.loop = function(cond, body){
// summary:
// Executes a condition, waits for it if necessary, and executes
// the body, if truthy value was returned.
// Then it repeats the cycle until the condition function returns
// a falsy value.
return function(init){
var x, y = new Deferred(function(){ x.cancel(); });
function ifErr(v){ y.errback(v); }
function loop(v){
if(v){
x.addCallback(body).addCallback(setUp);
}else{
y.callback(v);
}
return v;
}
function setUp(init){
x = new Deferred().
addCallback(cond).
addCallback(loop).
addErrback(ifErr);
x.callback(init);
}
setUp(init);
return y;
};
};
})();
/*
Design decisions:
seq() - behaves like the normal Deferred callback chain.
par() - if error, all pending Deferreds are cancelled and the error is signaled,
otherwise return an array of all results.
any() - just like par() but only one result is returned.
select() - any error is returned, otherwise the selected result is returned.
loop() - any error is returned, otherwise the last result is returned.
*/
}
|
PypiClean
|
/cached_interpolate-0.2.0-cp311-cp311-macosx_10_9_universal2.whl/cached_interpolate/interpolate.py
|
import numpy as np
from .build import build_linear_interpolant, build_natural_cubic_spline
class CachingInterpolant:
"""
Efficient evaluation of interpolants at fixed points.
Evaluating interpolants typically requires two stages:
1. finding the closest knot of the interpolant to the new point and the distance from that knot.
2. evaluating the interpolant at that point.
Sometimes it is necessary to evaluate many interpolants with identical knot points and evaluation
points but different functions being approximated and so the first of these stages is done many times unnecessarily.
This can be made more efficient by caching the locations of the evaluation points leaving just the evaluation of the
interpolation coefficients to be done at each iteration.
A further advantage of this, is that it allows broadcasting the interpolation using `cupy`.
This package implements this caching for nearest neighbour, linear, and cubic interpolation.
```python
import numpy as np
from cached_interpolate import CachingInterpolant
x_nodes = np.linspace(0, 1, 10)
y_nodes = np.random.uniform(-1, 1, 10)
evaluation_points = np.random.uniform(0, 1, 10000)
interpolant = CachingInterpolant(x=x_nodes, y=y_nodes, kind="cubic")
interpolated_values = interpolant(evaluation_points)
```
We can now evaluate this interpolant in a loop with the caching.
```python
for _ in range(1000):
y_nodes = np.random.uniform(-1, 1, 10)
interpolant(x=evaluation_points, y=y_nodes)
```
If we need to evaluate for a new set of points, we have to tell the interpolant to reset the cache.
There are two ways to do this:
- create a new interpolant, this will require reevaluating the interplation coefficients.
- disable the evaluation point caching.
```python
new_evaluation_points = np.random.uniform(0, 1, 10000)
interpolant(x=new_evaluation_points, use_cache=False)
```
If you have access to an `nvidia` GPU and are evaluating the spline at ~ O(10^5) or more points you may want
to switch to the `cupy` backend.
This uses `cupy` just for the evaluation stage, not for computing the interpolation coefficients.
```python
import cupy as cp
evaluation_points = cp.asarray(evaluation_points)
interpolant = CachingInterpolant(x=x_nodes, y=y_nodes, backend=cp)
interpolated_values = interpolant(evaluation_points)
```
"""
def __init__(self, x, y, kind="cubic", backend=np, bc_type="natural"):
"""
Initialize the interpolator
:param x: np.ndarray
The nodes of the interpolant
:param y: np.ndarray
The value of the function being interpolated at the nodes
:param kind: str
The interpolation type, should be in ["nearest", "linear", "cubic"],
default="cubic"
:param backend: module
Backend for array operations, e.g., `numpy` or `cupy`.
This enables simple GPU acceleration.
"""
if bc_type != "natural":
raise NotImplementedError(
"Only natural boundary conditions are supported for the generic interpolant"
)
self.return_float = False
self.bk = backend
allowed_kinds = ["nearest", "linear", "cubic"]
if kind not in allowed_kinds:
raise ValueError(f"kind must be in {allowed_kinds}")
self.x_array = x
self.y_array = y
self._data = None
self.kind = kind
self._cached = False
@property
def kind(self):
return self._kind
@kind.setter
def kind(self, kind):
self._kind = kind
data = self.build()
if data is not None:
data = self.bk.asarray(list(data))
self._data = data
def build(self):
"""
Call the constructor for the interpolant.
:return: tuple
Tuple containing the interpolation coefficients
"""
if self.kind == "cubic":
if self.y_array.dtype == complex:
real_ = self.bk.vstack(
build_natural_cubic_spline(xx=self.x_array, yy=self.y_array.real)
)
imag_ = self.bk.vstack(
build_natural_cubic_spline(xx=self.x_array, yy=self.y_array.imag)
)
return real_ + 1j * imag_
else:
return self.bk.vstack(
build_natural_cubic_spline(xx=self.x_array, yy=self.y_array)
)
elif self.kind == "linear":
return self.bk.asarray(
build_linear_interpolant(xx=self.x_array, yy=self.y_array)
)
elif self.kind == "nearest":
return self.bk.asarray(self.y_array)
def _construct_cache(self, x_values):
"""
Calculate the quantities required for the interpolation.
These are:
- the indices of the reference x node.
- the distance from that node along with the required powers of that distance.
:param x_values: np.ndarray
The values that the interpolant will be evaluated at
"""
x_array = self.bk.asarray(self.x_array)
x_values = self.bk.atleast_1d(x_values)
if x_values.size == 1:
self.return_float = True
input_shape = x_values.shape
x_values = x_values.reshape(-1)
self._cached = True
self._idxs = self.bk.empty(x_values.shape, dtype=int)
if self.kind == "nearest":
for ii, xval in enumerate(x_values):
self._idxs[ii] = self.bk.argmin(abs(xval - x_array))
self._idxs = self._idxs.reshape(input_shape)
else:
for ii, xval in enumerate(x_values):
if xval <= x_array[0]:
self._idxs[ii] = 0
else:
self._idxs[ii] = self.bk.where(xval > x_array)[0][-1]
self._idxs = self._idxs.reshape(input_shape)
x_values = x_values.reshape(input_shape)
diffs = [self.bk.ones(x_values.shape), x_values - x_array[self._idxs]]
if self.kind == "cubic":
diffs += [
(x_values - x_array[self._idxs]) ** 2,
(x_values - x_array[self._idxs]) ** 3,
]
self._diffs = self.bk.stack(diffs)
else:
self._diffs = diffs
def __call__(self, x, y=None, use_cache=True):
"""
Call the interpolant with desired caching
:param x: np.ndarray
The values that the interpolant will be evaluated at
:param y: np.ndarray
New interpolation points, this disables the caching of the target function
:param use_cache: bool
Whether to use the cached x values
:return: np.ndarray
The value of the interpolant at `x`
"""
if y is not None:
self.y_array = y
self._data = self.build()
if not (self._cached and use_cache):
self._construct_cache(x_values=x)
if self.kind == "cubic":
out = self._call_cubic()
elif self.kind == "linear":
out = self._call_linear()
elif self.kind == "nearest":
out = self._call_nearest()
if self.return_float:
out = out[0]
return out
def _call_nearest(self):
return self._data[self._idxs]
def _call_linear(self):
return self.bk.sum(self._data[:, self._idxs] * self._diffs, axis=0)
def _call_cubic(self):
return self.bk.sum(self._data[:, self._idxs] * self._diffs, axis=0)
class RegularCachingInterpolant:
"""
Efficient evaluation of interpolants at fixed points.
Evaluating interpolants typically requires two stages:
1. finding the closest knot of the interpolant to the new point and the distance from that knot.
2. evaluating the interpolant at that point.
Sometimes it is necessary to evaluate many interpolants with identical knot points and evaluation
points but different functions being approximated and so the first of these stages is done many times unnecessarily.
This can be made more efficient by caching the locations of the evaluation points leaving just the evaluation of the
interpolation coefficients to be done at each iteration.
A further advantage of this, is that it allows broadcasting the interpolation using `cupy`.
This package implements this caching for nearest neighbour, linear, and cubic interpolation.
```python
import numpy as np
from cached_interpolate import CachingInterpolant
x_nodes = np.linspace(0, 1, 10)
y_nodes = np.random.uniform(-1, 1, 10)
evaluation_points = np.random.uniform(0, 1, 10000)
interpolant = CachingInterpolant(x=x_nodes, y=y_nodes, kind="cubic")
interpolated_values = interpolant(evaluation_points)
```
We can now evaluate this interpolant in a loop with the caching.
```python
for _ in range(1000):
y_nodes = np.random.uniform(-1, 1, 10)
interpolant(x=evaluation_points, y=y_nodes)
```
If we need to evaluate for a new set of points, we have to tell the interpolant to reset the cache.
There are two ways to do this:
- create a new interpolant, this will require reevaluating the interplation coefficients.
- disable the evaluation point caching.
```python
new_evaluation_points = np.random.uniform(0, 1, 10000)
interpolant(x=new_evaluation_points, use_cache=False)
```
If you have access to an `nvidia` GPU and are evaluating the spline at ~ O(10^5) or more points you may want
to switch to the `cupy` backend.
This uses `cupy` just for the evaluation stage, not for computing the interpolation coefficients.
```python
import cupy as cp
evaluation_points = cp.asarray(evaluation_points)
interpolant = CachingInterpolant(x=x_nodes, y=y_nodes, backend=cp)
interpolated_values = interpolant(evaluation_points)
```
"""
def __init__(self, x, y, kind="cubic", backend=np, bc_type="not-a-knot"):
"""
Initialize the interpolator
:param x: np.ndarray
The nodes of the interpolant
:param y: np.ndarray
The value of the function being interpolated at the nodes
:param kind: str
The interpolation type, should be in ["nearest", "linear", "cubic"],
default="cubic"
:param backend: module
Backend for array operations, e.g., `numpy` or `cupy`.
This enables simple GPU acceleration.
"""
from .matrix_forms import MAPPING
self.bk = backend
self.n_nodes = len(x)
if bc_type not in MAPPING:
raise NotImplementedError(
f"bc_type must be one of {list(MAPPING.keys())} not {bc_type}"
)
self.conversion = MAPPING[bc_type](self.n_nodes)
self.return_float = False
allowed_kinds = ["nearest", "linear", "cubic"]
if kind not in allowed_kinds:
raise ValueError(f"kind must be in {allowed_kinds}")
self.x_array = x
self.y_array = y
self._data = None
self.kind = kind
self._cached = False
self.delta = x[1] - x[0]
@property
def kind(self):
return self._kind
@kind.setter
def kind(self, kind):
self._kind = kind
data = self.build()
if data is not None:
data = self.bk.asarray(list(data))
self._data = data
def build(self):
"""
Call the constructor for the interpolant.
:return: tuple
Tuple containing the interpolation coefficients
"""
if self.kind == "cubic":
values = self.conversion @ self.y_array
return self.bk.asarray(
[
self.y_array[:-1],
self.y_array[1:],
values[:-1],
values[1:],
]
)
elif self.kind == "linear":
return self.bk.asarray(
[
self.y_array[: self.n_nodes - 1],
np.diff(self.y_array) / np.diff(self.x_array),
]
)
return self.bk.asarray(
build_linear_interpolant(xx=self.x_array, yy=self.y_array)
)
elif self.kind == "nearest":
return self.bk.asarray(self.y_array)
def _construct_cache(self, x_values):
"""
Calculate the quantities required for the interpolation.
These are:
- the indices of the reference x node.
- the distance from that node along with the required powers of that distance.
:param x_values: np.ndarray
The values that the interpolant will be evaluated at
"""
np = self.bk
x_array = np.asarray(self.x_array)
x_values = np.atleast_1d(x_values)
if x_values.size == 1:
self.return_float = True
scaled = (x_values - x_array[0]) / self.delta
if self.kind == "nearest":
idxs = np.clip(
np.round(scaled).astype(int), a_min=0, a_max=self.n_nodes - 1
)
else:
idxs = np.clip(
np.floor(scaled).astype(int), a_min=0, a_max=self.n_nodes - 2
)
self._idxs = idxs
if self.kind == "cubic":
bb = scaled - idxs
aa = 1 - bb
cc = (aa**3 - aa) / 6
dd = (bb**3 - bb) / 6
self._diffs = self.bk.asarray([aa, bb, cc, dd])
elif self.kind == "linear":
self._diffs = self.bk.asarray(
[self.bk.ones(x_values.shape), x_values - x_array[idxs]]
)
self._cached = True
def __call__(self, x, y=None, use_cache=True):
"""
Call the interpolant with desired caching
:param x: np.ndarray
The values that the interpolant will be evaluated at
:param y: np.ndarray
New interpolation points, this disables the caching of the target function
:param use_cache: bool
Whether to use the cached x values
:return: np.ndarray
The value of the interpolant at `x`
"""
if y is not None:
self.y_array = y
self._data = self.build()
if not (self._cached and use_cache):
self._construct_cache(x_values=x)
if self.kind == "cubic":
out = self._call_cubic()
elif self.kind == "linear":
out = self._call_linear()
elif self.kind == "nearest":
out = self._call_nearest()
if self.return_float:
out = out[0]
return out
def _call_nearest(self):
return self._data[self._idxs]
def _call_linear(self):
return self.bk.sum(self._data[:, self._idxs] * self._diffs, axis=0)
def _call_cubic(self):
return self.bk.sum(self._data[:, self._idxs] * self._diffs, axis=0)
|
PypiClean
|
/django-xvalidate-0.4.2.tar.gz/django-xvalidate-0.4.2/xvalidate/models.py
|
from django.db import models
from django.utils.decorators import classonlymethod
from django.core.exceptions import (ValidationError, NON_FIELD_ERRORS)
from django.core import checks
from .validators import AbnormalValues
__all__ = (
'XValidatedModel',
)
class XValidatedModel(models.Model):
class Meta:
abstract = True
class XVMeta:
spec = []
@classmethod
def _check_xvmeta(cls, **kwargs):
if not issubclass(cls.XVMeta, XValidatedModel.XVMeta):
yield checks.Error(
"Model's XVMeta should inherit from XValidatedModel.XVMeta",
hint="Use XVMeta(XValidatedModel.XVMeta) in the model's definition",
obj=cls,
id='xvalidate.E001',
)
for name in dir(cls.XVMeta):
if name.startswith('_'):
continue
if name not in dir(XValidatedModel.XVMeta):
yield checks.Error(
"Unexpected field '{}' in XVMeta definition".format(name),
hint="Check for typos in XVMeta",
obj=cls,
id='xvalidate.E002',
)
def _raise_validation_error_if_needed(self, result):
value = result['value']
if AbnormalValues.is_abnormal(value):
return
if not value:
message = result['message']
if not message:
message = {
NON_FIELD_ERRORS: "Validation failed, but no message specified"
}
raise ValidationError(message)
def _clean_xvmeta(self):
for s in self.XVMeta.spec:
result = s._clean(self)
self._raise_validation_error_if_needed(result)
@classonlymethod
def check(cls, **kwargs):
errors = super(XValidatedModel, cls).check(**kwargs)
errors.extend(cls._check_xvmeta(**kwargs))
for s in cls.XVMeta.spec:
errors.extend(s._check(cls))
return errors
def clean(self):
super(XValidatedModel, self).clean()
self._clean_xvmeta()
|
PypiClean
|
/yaclog-1.1.2.tar.gz/yaclog-1.1.2/docs/handbook/changelog_files.md
|
# Changelog Files
Yaclog works on Markdown changelog files, using a machine-readable format based on what is proposed by [Keep a Changelog](https://keepachangelog.com). Changelog files can be created using the {command}`yaclog init` command.
## Preamble
The preamble is the text at the top of the file before any version information. It can contain the title, an explanation of the file's purpose, as well as any general machine-readable information you may want to include for use with other tools. Yaclog does not provide any ways to manipulate the front matter from the command line due to its open-ended nature.
## Versions
Version information begins with a header, which is an H2 containing the version's name, as well as optionally the date in ISO-8601 form, and any tag metadata. Some example version headers:
```markdown
## 1.0.0
```
```markdown
## 3.2.0 "Columbia" - 1981-07-20
```
```markdown
## Version 8.0.0rc1 1988-11-15 [PRERELEASE]
```
Version names should (but are not required to) include a version number in {pep}`440` format, which is a superset of [semantic versioning](https://semver.org). Versions can be incremented or renamed using the {command}`yaclog release` command.
## Entries
Entries are individual changes made since the previous version. They can be paragraphs, list items, or any markdown block element. Entries can be either uncategorized, or organized into sections using H3 headers. Entries can be added using the {command}`yaclog entry` command.
## Tags
Tags are additional metadata added to a version header, denoted by all-caps text surrounded in square brackets. Tags can be used to mark that a version is a prerelease, that it has been yanked for security reasons, or for marking compatibility with some other piece of software. Tags can be added and removed using the {command}`yaclog tag` command.
## Example
```markdown
# Changelog
All notable changes to this project will be documented in this file.
## 0.13.0 "Aquarius" - 1970-04-11 [YANKED]
Yanked due to issues with oxygen tanks, currently investigating
### Added
- Extra propellant in preparation for future versions
### Changed
- Replaced Ken Mattingly
- Stirred oxygen tanks
## 0.12.0 "Intrepid" - 1969-11-14
### Added
- New ALSEP package for surface science
- Color cameras
- Surface rendezvous with Surveyor 3
### Fixed
- 1201/1202 alarm distracting crew during landing
### Known Issues
- Lightning strike during launch: No effect on performance
## 0.11.0 "Eagle" - 1969-07-20
Initial stable release
### Changed
- Fully fueled lander to allow landing on the lunar surface
```
|
PypiClean
|
/mongs_kfp-2.0.0a2-py3-none-any.whl/kfp/deprecated/dsl/types.py
|
from typing import Dict, Union
import warnings
from kfp.components.types import type_utils
class BaseType:
"""BaseType is a base type for all scalar and artifact types."""
def to_dict(self) -> Union[Dict, str]:
"""to_dict serializes the type instance into a python dictionary or
string."""
return {
type(self).__name__: self.__dict__
} if self.__dict__ else type(self).__name__
# Primitive Types
class Integer(BaseType):
def __init__(self):
self.openapi_schema_validator = {"type": "integer"}
class String(BaseType):
def __init__(self):
self.openapi_schema_validator = {"type": "string"}
class Float(BaseType):
def __init__(self):
self.openapi_schema_validator = {"type": "number"}
class Bool(BaseType):
def __init__(self):
self.openapi_schema_validator = {"type": "boolean"}
class List(BaseType):
def __init__(self):
self.openapi_schema_validator = {"type": "array"}
class Dict(BaseType):
def __init__(self):
self.openapi_schema_validator = {
"type": "object",
}
# GCP Types
class GCSPath(BaseType):
def __init__(self):
self.openapi_schema_validator = {
"type": "string",
"pattern": "^gs://.*$"
}
class GCRPath(BaseType):
def __init__(self):
self.openapi_schema_validator = {
"type": "string",
"pattern": "^.*gcr\\.io/.*$"
}
class GCPRegion(BaseType):
def __init__(self):
self.openapi_schema_validator = {"type": "string"}
class GCPProjectID(BaseType):
"""MetaGCPProjectID: GCP project id"""
def __init__(self):
self.openapi_schema_validator = {"type": "string"}
# General Types
class LocalPath(BaseType):
#TODO: add restriction to path
def __init__(self):
self.openapi_schema_validator = {"type": "string"}
class InconsistentTypeException(Exception):
"""InconsistencyTypeException is raised when two types are not
consistent."""
pass
class InconsistentTypeWarning(Warning):
"""InconsistentTypeWarning is issued when two types are not consistent."""
pass
TypeSpecType = Union[str, Dict]
def verify_type_compatibility(given_type: TypeSpecType,
expected_type: TypeSpecType,
error_message_prefix: str = ""):
"""verify_type_compatibility verifies that the given argument type is
compatible with the expected input type.
Args:
given_type (str/dict): The type of the argument passed to the
input
expected_type (str/dict): The declared type of the input
"""
# Missing types are treated as being compatible with missing types.
if given_type is None or expected_type is None:
return True
# Generic artifacts resulted from missing type or explicit "Artifact" type
# is compatible with any artifact types.
# However, generic artifacts resulted from arbitrary unknown types do not
# have such "compatible" feature.
if not type_utils.is_parameter_type(
str(expected_type)) and str(given_type).lower() == "artifact":
return True
if not type_utils.is_parameter_type(
str(given_type)) and str(expected_type).lower() == "artifact":
return True
types_are_compatible = check_types(given_type, expected_type)
if not types_are_compatible:
error_text = error_message_prefix + (
'Argument type "{}" is incompatible with the input type "{}"'
).format(str(given_type), str(expected_type))
import kfp.deprecated as kfp
if kfp.TYPE_CHECK:
raise InconsistentTypeException(error_text)
else:
warnings.warn(InconsistentTypeWarning(error_text))
return types_are_compatible
def check_types(checked_type, expected_type):
"""check_types checks the type consistency.
For each of the attribute in checked_type, there is the same attribute
in expected_type with the same value.
However, expected_type could contain more attributes that checked_type
does not contain.
Args:
checked_type (BaseType/str/dict): it describes a type from the
upstream component output
expected_type (BaseType/str/dict): it describes a type from the
downstream component input
"""
if isinstance(checked_type, BaseType):
checked_type = checked_type.to_dict()
if isinstance(checked_type, str):
checked_type = {checked_type: {}}
if isinstance(expected_type, BaseType):
expected_type = expected_type.to_dict()
if isinstance(expected_type, str):
expected_type = {expected_type: {}}
return _check_dict_types(checked_type, expected_type)
def _check_valid_type_dict(payload):
"""_check_valid_type_dict checks whether a dict is a correct serialization
of a type.
Args: payload(dict)
"""
if not isinstance(payload, dict) or len(payload) != 1:
return False
for type_name in payload:
if not isinstance(payload[type_name], dict):
return False
property_types = (int, str, float, bool)
property_value_types = (int, str, float, bool, dict)
for property_name in payload[type_name]:
if not isinstance(property_name, property_types) or not isinstance(
payload[type_name][property_name], property_value_types):
return False
return True
def _check_dict_types(checked_type, expected_type):
"""_check_dict_types checks the type consistency.
Args:
checked_type (dict): A dict that describes a type from the upstream
component output
expected_type (dict): A dict that describes a type from the downstream
component input
"""
if not checked_type or not expected_type:
# If the type is empty, it matches any types
return True
checked_type_name, _ = list(checked_type.items())[0]
expected_type_name, _ = list(expected_type.items())[0]
if checked_type_name == "" or expected_type_name == "":
# If the type name is empty, it matches any types
return True
if checked_type_name != expected_type_name:
print("type name " + str(checked_type_name) +
" is different from expected: " + str(expected_type_name))
return False
type_name = checked_type_name
for type_property in checked_type[type_name]:
if type_property not in expected_type[type_name]:
print(type_name + " has a property " + str(type_property) +
" that the latter does not.")
return False
if checked_type[type_name][type_property] != expected_type[type_name][
type_property]:
print(type_name + " has a property " + str(type_property) +
" with value: " +
str(checked_type[type_name][type_property]) + " and " +
str(expected_type[type_name][type_property]))
return False
return True
|
PypiClean
|
/dupfilefind-1.6.9.tar.gz/dupfilefind-1.6.9/ez_setup.py
|
import os, sys
DEFAULT_VERSION = "0.6c7"
DEFAULT_URL = "http://pypi.python.org/packages/%s/s/setuptools/" % sys.version[:3]
md5_data = {
'setuptools-0.6c7-py2.3.egg': '209fdf9adc3a615e5115b725658e13e2',
'setuptools-0.6c7-py2.4.egg': '5a8f954807d46a0fb67cf1f26c55a82e',
'setuptools-0.6c7-py2.5.egg': '45d2ad28f9750e7434111fde831e8372',
}
def _validate_md5(egg_name, data):
if egg_name in md5_data:
from md5 import md5
digest = md5(data).hexdigest()
if digest != md5_data[egg_name]:
print >>sys.stderr, (
"md5 validation of %s failed! (Possible download problem?)"
% egg_name
)
sys.exit(2)
return data
# The following code to parse versions is copied from pkg_resources.py so that
# we can parse versions without importing that module.
import re
component_re = re.compile(r'(\d+ | [a-z]+ | \.| -)', re.VERBOSE)
replace = {'pre':'c', 'preview':'c','-':'final-','rc':'c','dev':'@'}.get
def _parse_version_parts(s):
for part in component_re.split(s):
part = replace(part,part)
if not part or part=='.':
continue
if part[:1] in '0123456789':
yield part.zfill(8) # pad for numeric comparison
else:
yield '*'+part
yield '*final' # ensure that alpha/beta/candidate are before final
def parse_version(s):
parts = []
for part in _parse_version_parts(s.lower()):
if part.startswith('*'):
if part<'*final': # remove '-' before a prerelease tag
while parts and parts[-1]=='*final-': parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1]=='00000000':
parts.pop()
parts.append(part)
return tuple(parts)
def setuptools_is_new_enough(required_version):
"""Return True if setuptools is already installed and has a version
number >= required_version."""
(cin, cout, cerr,) = os.popen3("%s -c \"import setuptools;print setuptools.__version__\"" % (sys.executable,))
verstr = cout.read().strip()
ver = parse_version(verstr)
return ver and ver >= parse_version(required_version)
def use_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
min_version=None, download_delay=15
):
"""Automatically find/download setuptools and make it available on sys.path
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end with
a '/'). `to_dir` is the directory where setuptools will be downloaded, if
it is not already available. If `download_delay` is specified, it should
be the number of seconds that will be paused before initiating a download,
should one be required. If an older version of setuptools is installed,
this routine will print a message to ``sys.stderr`` and raise SystemExit in
an attempt to abort the calling script.
"""
if min_version is None:
min_version = version
if not setuptools_is_new_enough(min_version):
egg = download_setuptools(version, download_base, to_dir, download_delay)
sys.path.insert(0, egg)
import setuptools; setuptools.bootstrap_install_from = egg
def download_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
delay = 15
):
"""Download setuptools from a specified location and return its filename
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download attempt.
"""
import urllib2, shutil
egg_name = "setuptools-%s-py%s.egg" % (version,sys.version[:3])
url = download_base + egg_name
saveto = os.path.join(to_dir, egg_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
from distutils import log
if delay:
log.warn("""
---------------------------------------------------------------------------
This script requires setuptools version %s to run (even to display
help). I will attempt to download it for you (from
%s), but
you may need to enable firewall access for this script first.
I will start the download in %d seconds.
(Note: if this machine does not have network access, please obtain the file
%s
and place it in this directory before rerunning this script.)
---------------------------------------------------------------------------""",
version, download_base, delay, url
); from time import sleep; sleep(delay)
log.warn("Downloading %s", url)
src = urllib2.urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = _validate_md5(egg_name, src.read())
dst = open(saveto,"wb"); dst.write(data)
finally:
if src: src.close()
if dst: dst.close()
return os.path.realpath(saveto)
def main(argv, version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
if setuptools_is_new_enough(version):
if argv:
from setuptools.command.easy_install import main
main(argv)
else:
print "Setuptools version",version,"or greater has been installed."
print '(Run "ez_setup.py -U setuptools" to reinstall or upgrade.)'
else:
egg = None
try:
egg = download_setuptools(version, delay=0)
sys.path.insert(0,egg)
from setuptools.command.easy_install import main
return main(list(argv)+[egg]) # we're done here
finally:
if egg and os.path.exists(egg):
os.unlink(egg)
def update_md5(filenames):
"""Update our built-in md5 registry"""
import re
from md5 import md5
for name in filenames:
base = os.path.basename(name)
f = open(name,'rb')
md5_data[base] = md5(f.read()).hexdigest()
f.close()
data = [" %r: %r,\n" % it for it in md5_data.items()]
data.sort()
repl = "".join(data)
import inspect
srcfile = inspect.getsourcefile(sys.modules[__name__])
f = open(srcfile, 'rb'); src = f.read(); f.close()
match = re.search("\nmd5_data = {\n([^}]+)}", src)
if not match:
print >>sys.stderr, "Internal error!"
sys.exit(2)
src = src[:match.start(1)] + repl + src[match.end(1):]
f = open(srcfile,'w')
f.write(src)
f.close()
if __name__=='__main__':
if '--md5update' in sys.argv:
sys.argv.remove('--md5update')
update_md5(sys.argv[1:])
else:
main(sys.argv[1:])
|
PypiClean
|
/mxnet_cu110-2.0.0a0-py3-none-manylinux2014_x86_64.whl/mxnet/_deferred_compute.py
|
"""Deferred Compute for NDArray."""
import ctypes
import contextlib
from .base import _LIB, check_call, SymbolHandle, _as_list
from .symbol import Symbol
__all__ = []
def is_deferred_compute():
"""Get status of deferred compute mode."""
curr = ctypes.c_bool()
check_call(_LIB.MXNDArrayIsDeferredCompute(ctypes.byref(curr)))
return curr.value
def set_deferred_compute(state):
"""Enable / Disable deferred compute mode.
Parameters
----------
state: bool
Returns
-------
Previous deferred compute state.
"""
prev = ctypes.c_int()
check_call(_LIB.MXNDArraySetIsDeferredCompute(ctypes.c_int(state), ctypes.byref(prev)))
return bool(prev.value)
@contextlib.contextmanager
def context(state=True):
"""Set deferred compute state to `state` within context. Reset afterwards to previous value."""
# Like other MXNet context manager, this bleeds state across concurrent
# code: "Context managers that have state should use Context Variables
# instead of threading.local() to prevent their state from bleeding to
# other code unexpectedly, when used in concurrent code."
# https://github.com/apache/incubator-mxnet/issues/17495#issuecomment-585461965
val = set_deferred_compute(state)
try:
yield
finally:
set_deferred_compute(val)
def get_symbol(output_arrays, *, sym_cls=Symbol):
"""Get symbolic representation of computation recorded in deferred compute mode.
Parameters
----------
output_arrays: NDArray or List[NDArray]
sym_cls: class used to construct Symbol
Returns
-------
Symbol of sym_cls
"""
output_arrays = _as_list(output_arrays)
# Prepare ctypes array types
output_handles_type = ctypes.c_void_p * len(output_arrays)
# Convert handles
output_handles = output_handles_type(*[array.handle for array in output_arrays])
handle = SymbolHandle()
check_call(_LIB.MXNDArrayGetDeferredComputeSymbol(output_handles, len(output_arrays),
ctypes.byref(handle)))
return sym_cls(handle)
def set_variable(arrays, variables):
"""Associate variables with arrays.
Parameters
----------
arrays: NDArray or List[NDArray]
variables: Symbol or List[Symbol] of variables
"""
arrays = _as_list(arrays)
variables = _as_list(variables)
# Prepare ctypes array types
arrays_type = variables_type = ctypes.c_void_p * len(arrays)
# Convert handles
arrays = arrays_type(*[array.handle for array in arrays])
variables = variables_type(*[symbol.handle for symbol in variables])
check_call(_LIB.MXNDArraySetDeferredComputeVariable(arrays, variables, len(arrays)))
|
PypiClean
|
/hamster-gtk-0.11.0.tar.gz/hamster-gtk-0.11.0/hamster_gtk/misc/dialogs/date_range_select_dialog.py
|
# This file is part of 'hamster-gtk'.
#
# 'hamster-gtk' is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# 'hamster-gtk' is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with 'hamster-gtk'. If not, see <http://www.gnu.org/licenses/>.
"""This module contains Dialog for selecting a date range."""
from __future__ import absolute_import, unicode_literals
import calendar
import datetime
from gettext import gettext as _
from gi.repository import Gtk
from hamster_gtk import helpers
class DateRangeSelectDialog(Gtk.Dialog):
"""
A Dialog that allows to select two dates that form a 'daterange'.
The core of the dialog is two :class:`Gtk.Calendar` widgets which allow for
manual setting of start- and enddate. Additionally, three presets are
provided for the users convenience.
"""
# Gtk.Calendar returns month in a ``0`` based ordering which is why we
# need to add/subtract ``1`` when translating with real live months.
def __init__(self, parent, *args, **kwargs):
"""
Initialize widget.
Args:
parent (OverviewScreen): Parent window for this dialog.
"""
super(DateRangeSelectDialog, self).__init__(*args, **kwargs)
self.set_transient_for(parent)
self._mainbox = Gtk.Grid()
self._mainbox.set_hexpand(True)
self._mainbox.set_vexpand(True)
self._start_calendar = Gtk.Calendar()
self._end_calendar = Gtk.Calendar()
self._mainbox.attach(self._get_today_widget(), 0, 0, 4, 1)
self._mainbox.attach(self._get_week_widget(), 0, 1, 4, 1)
self._mainbox.attach(self._get_month_widget(), 0, 2, 4, 1)
self._mainbox.attach(self._get_custom_range_label(), 0, 3, 1, 1)
self._mainbox.attach(self._get_custom_range_connection_label(), 2, 3, 1, 1)
self._mainbox.attach(self._start_calendar, 1, 3, 1, 1)
self._mainbox.attach(self._end_calendar, 3, 3, 1, 1)
self.get_content_area().add(self._mainbox)
self.add_action_widget(self._get_apply_button(), Gtk.ResponseType.APPLY)
self.show_all()
@property
def daterange(self):
"""Return start and end date as per calendar widgets."""
start = helpers.calendar_date_to_datetime(self._start_calendar.get_date())
end = helpers.calendar_date_to_datetime(self._end_calendar.get_date())
return (start, end)
@daterange.setter
def daterange(self, daterange):
"""Set calendar dates according to daterange."""
start, end = daterange
self._start_calendar.select_month(start.month - 1, start.year)
self._start_calendar.select_day(start.day)
self._end_calendar.select_month(end.month - 1, end.year)
self._end_calendar.select_day(end.day)
# Widgets
def _get_apply_button(self):
button = Gtk.Button(_('_Apply'), use_underline=True)
return button
def _get_today_widget(self):
"""Return a widget that sets the daterange to today."""
button = self._get_double_label_button(_("Today"), datetime.date.today())
button.set_hexpand(True)
button.set_relief(Gtk.ReliefStyle.NONE)
button.connect('clicked', self._on_today_button_clicked)
return button
def _get_week_widget(self):
"""Return a widget that sets the daterange to the current week."""
start, end = self._get_week_range(datetime.date.today())
date_text = _("{} to {}".format(start, end))
button = self._get_double_label_button(_("Current Week"), date_text)
button.set_hexpand(True)
button.set_relief(Gtk.ReliefStyle.NONE)
button.connect('clicked', self._on_week_button_clicked)
return button
def _get_month_widget(self):
"""Return a widget that sets the daterange to the current month."""
start, end = self._get_month_range(datetime.date.today())
date_text = _("{} to {}".format(start, end))
button = self._get_double_label_button(_("Current Month"), date_text)
button.set_hexpand(True)
button.set_relief(Gtk.ReliefStyle.NONE)
button.connect('clicked', self._on_month_button_clicked)
return button
def _get_start_calendar(self):
"""Return ``Gtk.Calendar`` instance for the start date."""
return Gtk.Calendar()
def _get_end_calendar(self):
"""Return ``Gtk.Calendar`` instance for the end date."""
return Gtk.Calendar()
def _get_custom_range_label(self):
"""Return a 'heading' label for the widget."""
return Gtk.Label(_("Custom Range"))
def _get_custom_range_connection_label(self):
"""Return the label to be displayed between the two calendars."""
return Gtk.Label(_("to"))
# Helper
def _get_double_label_button(self, left_label, right_label):
"""
Return a special button with two label components.
The left label will be left aligned the right one right aligned.
"""
button = Gtk.Button()
grid = Gtk.Grid()
button.add(grid)
left_label = Gtk.Label(left_label)
left_label.set_hexpand(True)
left_label.set_halign(Gtk.Align.START)
right_label = Gtk.Label(right_label)
right_label.set_hexpand(True)
right_label.set_halign(Gtk.Align.END)
grid.attach(left_label, 0, 0, 1, 1)
grid.attach(right_label, 1, 0, 1, 1)
return button
def _get_week_range(self, date):
"""Return the start- and enddate of the week a given date is in."""
def get_offset_to_weekstart(weekday):
"""
Return the distance to the desired start of the week given weekday.
No extra work is required if we want weeks to start on mondays as
in this case ``weekday=0``. If a different start of the week is
desired, we need to add some adjustments.
"""
offset = weekday
return datetime.timedelta(days=offset)
start = date - get_offset_to_weekstart(date.weekday())
end = start + datetime.timedelta(days=6)
return (start, end)
def _get_month_range(self, date):
"""Return the start- and enddate of the month a given date is in."""
start = date - datetime.timedelta(days=date.day - 1)
days_in_month = calendar.monthrange(date.year, date.month)[1]
end = start + datetime.timedelta(days=days_in_month - 1)
return (start, end)
# Callbacks
def _on_today_button_clicked(self, button):
today = datetime.date.today()
self.daterange = (today, today)
self.response(Gtk.ResponseType.APPLY)
def _on_week_button_clicked(self, button):
start, end = self._get_week_range(datetime.date.today())
self.daterange = (start, end)
self.response(Gtk.ResponseType.APPLY)
def _on_month_button_clicked(self, button):
start, end = self._get_month_range(datetime.date.today())
self.daterange = (start, end)
self.response(Gtk.ResponseType.APPLY)
|
PypiClean
|
/pke_zh-0.2.5.tar.gz/pke_zh-0.2.5/pke_zh/keybert.py
|
import itertools
from typing import List
import numpy as np
from loguru import logger
from sklearn.metrics.pairwise import cosine_similarity
from text2vec import SentenceModel
from pke_zh.base import BaseKeywordExtractModel
def max_sum_ranking(
doc_embedding: np.ndarray,
can_embeddings: np.ndarray,
can_names: List[str],
top_n: int,
nr_candidates: int
):
""" Calculate Max Sum Distance for extraction of keywords
We take the 2 x top_n most similar words/phrases to the document.
Then, we take all top_n combinations from the 2 x top_n words and
extract the combination that are the least similar to each other
by cosine similarity.
NOTE:
This is O(n^2) and therefore not advised if you use a large top_n
Arguments:
doc_embedding: The document embeddings
can_embeddings: The embeddings of the selected candidate keywords/phrases
can_names: The selected candidate keywords/keyphrases
top_n: 取top_n 个关键词
nr_candidates: The number of candidates to consider, generaly set top_n *2
Returns:
List[Tuple[str, float]]: The selected keywords/keyphrases with their distances
"""
# calculate distances and extract words
if len(doc_embedding.shape) == 1:
doc_embedding = doc_embedding.reshape(1, -1)
if len(can_embeddings.shape) == 1:
can_embeddings = can_embeddings.reshape(1, -1)
distances = cosine_similarity(doc_embedding, can_embeddings)
distance_words = cosine_similarity(can_embeddings)
# Get 2*top_n words as candidates based on cosine similarity
can_idx = list(distances.argsort()[0][-nr_candidates:])
can_name_filter = [can_names[i] for i in can_idx]
cand_distance = distance_words[np.ix_(can_idx, can_idx)]
# Calculate the topn of words, that are the least similar to each other
min_sim = 100000
final_candidate = None
# print(can_idx)
for combination in itertools.combinations(range(len(can_idx)), top_n):
sim = sum([cand_distance[i][j] for i in combination for j in combination if i != j])
if sim < min_sim:
final_candidate = combination
min_sim = sim
# return candi_name and score
result = []
if not final_candidate:
final_candidate = can_idx
for val in final_candidate:
result.append((can_name_filter[val], distances[0][can_idx[val]]))
return result
def mmr_ranking(
doc_embedding: np.ndarray,
can_embeddings: np.ndarray,
can_names: List[str],
top_n: int,
alpha: float = 0.5
):
""" Calculate Maximal Marginal Relevance (MMR)
between candidate keywords and the document.
MMR considers the similarity of keywords/keyphrases with the
document, along with the similarity of already selected
keywords and keyphrases. This results in a selection of keywords
that maximize their within diversity with respect to the document.
Arguments:
doc_embedding: The document embeddings
can_embeddings: The embeddings of the selected candidate keywords/phrases
can_names: The selected candidate keywords/keyphrases
top_n: The number of keywords/keyhprases to return
alpha: How diverse the select keywords/keyphrases are.
Values between 0 and 1 with 0 being not diverse at all
and 1 being most diverse.
Returns:
List[Tuple[str, float]]: The selected keywords/keyphrases with their distances
"""
# calculate distances and extract words
doc_can_distances = cosine_similarity(can_embeddings, doc_embedding)
distance_words = cosine_similarity(can_embeddings)
# Initialize candidates and already choose best keyword/keyphras
keywords_idx = [np.argmax(doc_can_distances)]
candidates_idx = [i for i in range(len(can_names)) if i != keywords_idx[0]]
for r in range(min(top_n, len(can_embeddings) - 1)):
# extract similarities
candidate_similarities = doc_can_distances[candidates_idx, :]
target_similarities = np.max(distance_words[candidates_idx][:, keywords_idx], axis=1)
# Calculate MMR
mmr = alpha * candidate_similarities - (1 - alpha) * target_similarities.reshape(-1, 1)
mmr_idx = candidates_idx[np.argmax(mmr)]
# Update keywords & candidates
keywords_idx.append(mmr_idx)
candidates_idx.remove(mmr_idx)
# return candia_name and score
result = []
for val in keywords_idx:
result.append((can_names[val], doc_can_distances[val][0]))
return result
def mmr_norm_ranking(
doc_embedding: np.ndarray,
can_embeddings: np.ndarray,
can_names: List[str],
top_n: int,
alpha: float = 0.5
):
"""Rank candidates according to a query
:param document: np.array, dense representation of document (query)
:param candidates: np.array, dense representation of candidates
:param l: float, ratio between distance to query or distance between
chosen candidates
Returns: a list of candidates rank
"""
def norm(sim, **kwargs):
sim -= sim.min(**kwargs)
sim /= (sim.max(**kwargs) + 1e-10)
sim = 0.5 + (sim - sim.mean(**kwargs)) / (sim.std(**kwargs) + 1e-10)
return sim
def norm2(sim, **kwargs):
min_ = sim.min(**kwargs)
max_ = (sim.max(**kwargs) + 1e-10)
sim = (sim - min_) / max_
sim = 0.5 + (sim - sim.mean(**kwargs)) / (sim.std(**kwargs) + 1e-10)
return sim
sim_doc = cosine_similarity(doc_embedding, can_embeddings)
sim_doc[np.isnan(sim_doc)] = 0.
sim_doc = norm(sim_doc)
sim_doc[np.isnan(sim_doc)] = 0.
sim_can = cosine_similarity(can_embeddings)
sim_can[np.isnan(sim_can)] = 0.
sim_can = norm(sim_can, axis=1)
sim_can[np.isnan(sim_can)] = 0.
sel = np.zeros(len(can_embeddings), dtype=bool)
ranks = [None] * len(can_embeddings)
# Compute first candidate, the second part of the calculation is 0
# as there are no other chosen candidates to maximise distance to
chosen_candidate = (sim_doc * alpha).argmax()
sel[chosen_candidate] = True
ranks[chosen_candidate] = 0
for r in range(1, len(can_embeddings)):
# Remove already chosen candidates
sim_can[sel] = np.nan
# Compute MMR score
scores = alpha * sim_doc - (1 - alpha) * sim_can[:, sel].max(axis=1)
chosen_candidate = np.nanargmax(scores)
# Update output and mask with chosen candidate
sel[chosen_candidate] = True
ranks[chosen_candidate] = r
result = []
for can_id, val in enumerate(ranks):
if not val is None:
result.append((can_names[can_id], (len(ranks) - 1 - val) / (len(ranks) - 1)))
return result
class KeyBert(BaseKeywordExtractModel):
def __init__(self, model='shibing624/text2vec-base-chinese'):
"""
model:
支持若干种embedding 方法:SentenceModel、SentenceTransformers、Flair、Spacy、gensim
中文默认使用text2vec.SentenceModel model="shibing624/text2vec-base-chinese"模型,
英文可设置model="shibing624/text2vec-base-multilingual"模型,
其他语言参考:sentence-transformers models:
* https://www.sbert.net/docs/pretrained_models.html
"""
super(KeyBert, self).__init__()
if isinstance(model, str):
try:
self.model = SentenceModel(model)
except Exception as e:
logger.warning(f'wrong url for sentence model, change to default! {e}')
self.model = SentenceModel('shibing624/text2vec-base-chinese')
elif isinstance(model, SentenceModel):
self.model = model
else:
raise ValueError('model must be str or text2vec.SentenceModel')
self.max_length = self.model.max_seq_length
def candidate_selection(self, pos=None):
"""Candidate selection using longest sequences of PoS.
:param pos: set of valid POS tags, defaults to ('NOUN', 'ADJ').
"""
if pos is not None:
self.valid_pos = pos
# select sequence of adjectives and nouns
self.longest_pos_sequence_selection(valid_pos=self.valid_pos)
self.candidate_filtering()
def _flatten_doc_words(self, lower):
"""flatten sentence words whose postags are valid"""
doc = ' '.join(w.lower() if lower else w for s in self.sentences
for i, w in enumerate(s.words)
if s.pos[i] in self.valid_pos)
return doc
def _calculate_candidate_weights(self, rank, cand_name):
for candidate_id, r in enumerate(rank):
if len(rank) > 1:
# Inverting ranks so the first ranked candidate has the biggest score
score = (len(rank) - 1 - r) / (len(rank) - 1)
else:
score = r
self.weights[cand_name[candidate_id]] = score
def _doc_to_sent_list(self):
"""
为了充分获取长文本语义,针对单句做的合并
:return: sentences list
"""
sentence = []
cur_len = 0
cur_sent = []
for i, sent in enumerate(self.sentences):
cur_text = ''.join(sent.words)
cur_len += len(cur_text)
if cur_len >= self.max_length and cur_sent:
sentence.append(''.join(cur_sent))
cur_sent = [cur_text]
cur_len = len(cur_text)
else:
cur_sent.append(cur_text)
if cur_len:
sentence.append("".join(cur_sent))
return sentence
def _weights_update(self, canlist):
for canname, score in canlist:
self.weights[canname] = score
def candidate_weighting(self, use_maxsum=True, use_mmr=False, top_n=10, alpha=0.5, nr_candidates=20):
"""Candidate weighting function using distance to document.
:param l: float, Lambda parameter for EmbedRank++ Maximal Marginal
Relevance (MMR) computation. Use 1 to compute EmbedRank and 0 to not
use the document, but only the most diverse set of candidates
(defaults to 1).
"""
# get doc's sentences
doc_sents = self._doc_to_sent_list()
doc_embed = self.model.encode(doc_sents)
doc_embed = np.average(doc_embed, axis=0) # 取平均
doc_embed = np.expand_dims(doc_embed, axis=0) # 增加一个维度
cand_name = list(self.candidates.keys())
cand_embed = self.model.encode(cand_name)
if use_mmr:
can_list = mmr_ranking(doc_embed, cand_embed, cand_name, top_n, alpha)
self._weights_update(can_list)
elif use_maxsum:
can_list = max_sum_ranking(doc_embed, cand_embed, cand_name, top_n, nr_candidates)
self._weights_update(can_list)
else:
distances = cosine_similarity(doc_embed, cand_embed)
for i, score in enumerate(distances[0]):
self.weights[cand_name[i]] = score
def extract(self, input_file_or_string, n_best=10, use_maxsum=True, use_mmr=False, alpha=0.5, nr_candidates=20):
"""
提取text的关键词
:param input_file_or_string: 输入doc
:param n_best: top n_best个关键词
:param use_maxsum: 是否使用maxsum similarity for the selection of keywords
:param use_mmr: Whether to use Maximal Marginal Relevance (MMR) for the selection of keywords/keyphrases
:param alpha: mmr算法的超参数,if use_mmr is set True, default:0.5
:param nr_candidates: The number of candidates to consider if use_maxsum is set to True
:return: keywords list
"""
# 1. load the content of the document.
keyphrases = []
if not input_file_or_string:
return keyphrases
self.load_document(input=input_file_or_string, language='zh', normalization=None)
# 2. select sequences of nouns and adjectives as candidates.
self.candidate_selection()
if not self.candidates:
return keyphrases
# 3. weight the candidates using EmbedRank method
self.candidate_weighting(use_maxsum, use_mmr, n_best, alpha, nr_candidates)
# 4. get the 10-highest scored candidates as keyphrases
keyphrases = self.get_n_best(n=n_best, redundancy_removal=True)
return keyphrases
|
PypiClean
|
/conversor_divisor-3.3.3.tar.gz/conversor_divisor-3.3.3/conversor_divisor/MP4Box/gui/extensions/H2B2VS/h2b2vs.js
|
extension = {
setup: false,
dialog: null,
uhd_demo_enabled: false,
uhd_demo_on: false,
uhd_state_on: true,
addon_url: null,
scene_url: null,
overlay_position: 0,
icon_width: 0,
icon_height: 0,
movie_width: 0,
movie_height: 0,
toggle_uhd_demo: function (val) {
this.uhd_demo_on = val;
var notif = null;
if (this.uhd_demo_on) {
notif = gw_new_message(null, 'UHD Demo Enbaled', 'Click to toggle quality');
} else {
notif = gw_new_message(null, 'UHD Demo Disabled', 'Double-click to re-enable');
this.logo.children[0].url[0] = '';
}
this.do_layout();
notif.set_size(20 * gwskin.default_text_font_size, gwskin.default_icon_height + 2 * gwskin.default_text_font_size);
notif.show();
},
ext_filter_event: function (evt) {
switch (evt.type) {
case GF_EVENT_ADDON_DETECTED:
this.confirm_addon(evt);
return true;
case GF_EVENT_QUIT:
this.save_session();
return false;
case GF_EVENT_DBLCLICK:
if (this.uhd_demo_enabled) {
this.toggle_uhd_demo(!this.uhd_demo_on);
}
return false;
case GF_EVENT_MOUSEUP:
if (this.uhd_demo_on) {
this.uhd_state_on = !this.uhd_state_on;
gpac.switch_quality(this.uhd_state_on);
return true;
}
return false;
case GF_EVENT_MOUSEDOWN:
if (this.uhd_demo_on) {
return true;
}
return false;
case GF_EVENT_SCENE_SIZE:
if (typeof evt.width != 'undefined') {
this.movie_width = evt.width;
this.movie_height = evt.height;
if (this.movie_height > 1080) this.uhd_state_on = true;
if (this.uhd_demo_on) {
this.do_layout();
}
}
return false;
case GF_EVENT_KEYDOWN:
//alert('key is '+evt.keycode);
if (evt.keycode == 'U+003D') {
this.overlay_position++;
if (this.overlay_position==4) {
this.do_deactivate_addon();
} else {
if (this.overlay_position==5) {
this.do_activate_addon();
this.overlay_position=0;
}
this.set_option('OverlayPosition', '' + this.overlay_position);
this.refresh_addon();
}
return true;
}
return false;
default:
return false;
}
},
create_event_filter: function (__anobj) {
return function (evt) {
return __anobj.ext_filter_event(evt);
}
},
do_layout: function () {
if (this.uhd_demo_enabled && this.uhd_demo_on) {
var url = this.get_option('path');
if (this.movie_height > 1080) {
url += 'logo_uhd.png';
this.logo.scale.x = 1;
this.logo.scale.y = 1;
} else {
url += 'logo_hd.png';
this.logo.scale.x = 2;
this.logo.scale.y = 2;
}
this.logo.children[0].url[0] = url;
} else {
this.logo.children[0].url[0] = '';
}
},
start: function () {
//first launch - register event filter and exit
if (!this.setup) {
gwlib_add_event_filter(this.create_event_filter(this), true);
this.setup = true;
this.overlay_position = parseInt(this.get_option('OverlayPosition', '0'));
/*create media nodes element for playback*/
this.logo = gw_new_container();
this.logo.children[0] = new SFNode('Inline');
this.logo.children[0].extension = this;
this.logo.children[0].url[0] = '';
this.logo.children[0].on_scene_size = function (evt) {
this.extension.icon_width = evt.width;
this.extension.icon_height = evt.height;
this.extension.do_layout();
};
gw_add_child(null, this.logo);
this.logo.children[0].addEventListener('gpac_scene_attached', this.logo.children[0].on_scene_size, 0);
this.restore_session();
//check our args
var i, argc = gpac.argc;
for (i = 1; i < argc; i++) {
var arg = gpac.get_arg(i);
if (arg == '-demo-uhd') {
this.uhd_demo_enabled = true;
this.toggle_uhd_demo(true);
gwlog(l_war, 'UHD Demo enabled');
break;
}
}
return;
}
gw_hide_dock();
var wnd = gw_new_window_full(null, true, 'H2B2VS Preferences');
this.dialog = wnd;
this.dialog.extension = this;
wnd.area = gw_new_grid_container(wnd);
wnd.area.spread_h = true;
wnd.area.break_at_hidden = true;
wnd.txt1 = gw_new_text(wnd.area, 'Overlay Position');
gw_new_separator(wnd.area);
wnd.check_pos = function (value) {
this.chk_pos1.set_checked((value == 0) ? true : false);
this.chk_pos2.set_checked((value == 1) ? true : false);
this.chk_pos3.set_checked((value == 2) ? true : false);
this.chk_pos4.set_checked((value == 3) ? true : false);
this.extension.set_option('OverlayPosition', '' + value);
this.extension.refresh_addon();
}
wnd.chk_pos4 = gw_new_checkbox(wnd.area, 'Top-Left');
wnd.chk_pos4.on_check = function (value) {
this.parent.parent.check_pos(3);
}
wnd.chk_pos2 = gw_new_checkbox(wnd.area, 'Top-Right');
wnd.chk_pos2.on_check = function (value) {
this.parent.parent.check_pos(1);
}
wnd.chk_pos3 = gw_new_checkbox(wnd.area, 'Bottom-Left');
wnd.chk_pos3.on_check = function (value) {
this.parent.parent.check_pos(2);
}
wnd.chk_pos1 = gw_new_checkbox(wnd.area, 'Bottom-Right');
wnd.chk_pos1.on_check = function (value) {
this.parent.parent.check_pos(0);
}
wnd.txt2 = gw_new_text(wnd.area, 'Overlay Size');
gw_new_separator(wnd.area);
wnd.check_size = function (value) {
this.chk_size1.set_checked((value == 0) ? true : false);
this.chk_size2.set_checked((value == 1) ? true : false);
this.chk_size3.set_checked((value == 2) ? true : false);
this.extension.set_option('OverlaySize', '' + value);
this.extension.refresh_addon();
}
wnd.chk_size1 = gw_new_checkbox(wnd.area, '1/2 Height');
wnd.chk_size1.on_check = function (value) {
this.parent.parent.check_size(0);
}
wnd.chk_size2 = gw_new_checkbox(wnd.area, '1/3 Height');
wnd.chk_size2.on_check = function (value) {
this.parent.parent.check_size(1);
}
wnd.chk_size3 = gw_new_checkbox(wnd.area, '1/4 Height');
wnd.chk_size3.on_check = function (value) {
this.parent.parent.check_size(2);
}
wnd.txt3 = gw_new_text(wnd.area, 'User Identifier');
gw_new_separator(wnd.area);
wnd.edit = gw_new_text_edit(wnd.area, this.get_option('UserID', 'H2B2VSUser'));
gpac.set_focus(wnd.edit);
wnd.edit.on_text = function (val) {
if (val != '') {
this.parent.parent.extension.set_option('UserID', val);
}
}
gw_new_separator(wnd.area);
wnd.chk_addon = gw_new_checkbox(wnd.area, 'Auto-select addon');
wnd.chk_addon.on_check = function (value) {
this.parent.parent.extension.set_option('AutoSelect', value ? 'yes' : 'no');
}
var do_sel = this.get_option('AutoSelect', 'no');
wnd.chk_addon.set_checked((do_sel == 'yes') ? true : false);
wnd.dbg_addon = gw_new_checkbox(wnd.area, 'Debug PVR addon');
wnd.dbg_addon.on_check = function (value) {
gpac.set_option('Systems', 'DebugPVRScene', value ? 'yes' : 'no');
}
do_sel = gpac.get_option('Systems', 'DebugPVRScene');
wnd.dbg_addon.set_checked((do_sel == 'yes') ? true : false);
gw_new_separator(wnd.area);
wnd.uhd_demo = gw_new_checkbox(wnd.area, 'UHD Demo');
wnd.uhd_demo.on_check = function (value) {
this.parent.parent.extension.uhd_demo_enabled = value;
this.parent.parent.extension.set_option('UHDDemo', value ? 'yes' : 'no');
}
do_sel = this.get_option('UHDDemo', 'no');
this.uhd_demo_enabled = (do_sel == 'yes') ? true : false;
wnd.uhd_demo.set_checked(this.uhd_demo_enabled);
if (this.uhd_demo_enabled) this.uhd_demo_on = true;
wnd.on_display_size = function (width, height) {
w = 0.9 * width;
if (w > 500) w = 500;
this.txt1.set_size(w / 3, gwskin.default_icon_height);
this.chk_pos1.set_size(w / 2, gwskin.default_control_height);
this.chk_pos2.set_size(w / 2, gwskin.default_control_height);
this.chk_pos3.set_size(w / 2, gwskin.default_control_height);
this.chk_pos4.set_size(w / 2, gwskin.default_control_height);
this.txt2.set_size(w / 3, gwskin.default_icon_height);
this.chk_size1.set_size(w / 3, gwskin.default_control_height);
this.chk_size2.set_size(w / 3, gwskin.default_control_height);
this.chk_size3.set_size(w / 3, gwskin.default_control_height);
this.txt3.set_size(w / 3, gwskin.default_icon_height);
this.edit.set_size(w / 2, gwskin.default_icon_height);
this.chk_addon.set_size(w / 2, gwskin.default_icon_height);
this.dbg_addon.set_size(w / 2, gwskin.default_icon_height);
this.uhd_demo.set_size(w / 2, gwskin.default_icon_height);
this.set_size(w, 13 * gwskin.default_icon_height);
}
wnd.check_pos(this.overlay_position);
wnd.check_size(parseInt(this.get_option('OverlaySize', '0')));
wnd.on_display_size(gw_display_width, gw_display_height);
wnd.set_alpha(0.9);
wnd.show();
wnd.on_close = function () {
gw_show_dock();
wnd.extension.dialog = null;
};
},
refresh_addon: function () {
if (this.scene_url) {
var odm = gpac.get_object_manager(this.scene_url);
if (odm) {
odm.addon_layout(parseInt(this.get_option('OverlayPosition', '0')), parseInt(this.get_option('OverlaySize', '0')));
}
}
},
do_activate_addon: function () {
var odm = gpac.get_object_manager(this.scene_url);
if (odm) {
odm.enable_addon(this.addon_url);
odm.addon_layout(parseInt(this.get_option('OverlayPosition', '0')), parseInt(this.get_option('OverlaySize', '0')));
}
},
do_deactivate_addon: function () {
var odm = gpac.get_object_manager(this.scene_url);
if (odm) {
odm.enable_addon(this.addon_url, true);
}
},
confirm_addon: function (evt) {
if (this.get_option('AutoSelect', 'no') == 'yes') {
this.scene_url = evt.scene_url;
this.addon_url = evt.addon_url;
this.do_activate_addon();
return;
}
var names = ext = evt.addon_url.split('/');
if (names.length == 0) names = f.url.split('\\');
var dlg = gw_new_confirm_wnd(null, 'Addon detected (' + names.pop() + '), enable it ?');
dlg.set_alpha(0.95);
dlg.show();
dlg.extension = this;
dlg.scene_url = evt.scene_url;
dlg.addon_url = evt.addon_url;
dlg.on_confirm = function (value) {
if (!value) return;
this.extension.scene_url = evt.scene_url;
this.extension.addon_url = evt.addon_url;
this.extension.do_activate_addon();
}
},
do_xhr: function (url, cmd) {
var xhr = new XMLHttpRequest();
xhr.open('POST', url, false);
xhr.setRequestHeader('Content-Type', 'application/json');
xhr.setRequestHeader('Content-Length', cmd.length);
xhr.send(cmd);
if ((xhr.status != 200) || (xhr.readyState != 4)) {
if (xhr.status) {
gwlog(l_err, '[H2B2VS] Failed to query server: ' + xhr.responseText);
} else {
gwlog(l_err, '[H2B2VS] Failed to send request');
}
return null;
}
gwlog(l_deb, 'Command sent is ' + cmd + ' - response is ' + xhr.responseText);
var obj = gwskin.parse(xhr.responseText);
if (typeof obj.result == 'undefined') {
gwlog(l_err, '[H2B2VS] Non conformant response object ' + xhr.responseText);
return null;
}
if (obj.result != 0) {
gwlog(l_inf, '[H2B2VS] No session found for user - ' + xhr.responseText);
return null;
}
return obj;
},
restore_session: function () {
if (gwskin.media_url) {
gwlog(l_inf, 'URL was given when opening, skipping session restore');
return;
}
var server = this.get_option('SessionServer', null);
var user = this.get_option('UserID', null);
if (!server || !user) return;
var url = server + 'getData';
var cmd = 'ID=' + user;
var obj = this.do_xhr(url, cmd);
if (!obj) return;
var dlg = gw_new_confirm_wnd(null, 'Restore last session ?');
dlg.set_alpha(0.95);
dlg.show();
dlg.sess = obj.data;
dlg.on_confirm = function (value) {
if (!value) return;
gwskin.restore_session(this.sess.url, this.sess.media_time, this.sess.media_clock);
}
},
save_session: function () {
var server = this.get_option('SessionServer', null);
var user = this.get_option('UserID', null);
if (!server || !user) return;
var obj = {};
var url = gwskin.pvr_url;
if (url === '') url = gwskin.media_url;
obj.url = url.replace(/\\/g, "/");
obj.media_time = 0;
obj.media_clock = 0;
if (typeof gwskin.media_time != 'undefined') obj.media_time = gwskin.media_time;
if (typeof gwskin.media_clock != 'undefined') obj.media_clock = gwskin.media_clock;
var str = gwskin.stringify(obj);
var url = server + 'setData';
var cmd = 'ID=' + user + '&Data=' + str;
this.do_xhr(url, cmd);
}
};
|
PypiClean
|
/sparseml_nightly-1.6.0.20230829-py3-none-any.whl/sparseml/pytorch/sparsification/training/modifier_lr.py
|
import math
import sys
from typing import Dict, List, Optional, Union
from torch.nn import Module
from torch.optim.lr_scheduler import (
CosineAnnealingWarmRestarts,
ExponentialLR,
MultiStepLR,
StepLR,
)
from torch.optim.optimizer import Optimizer
from sparseml.optim import BaseModifier
from sparseml.pytorch.sparsification.modifier import (
ModifierProp,
PyTorchModifierYAML,
ScheduledModifier,
ScheduledUpdateModifier,
)
from sparseml.pytorch.utils import (
get_optim_groups_learning_rates,
set_optim_learning_rate,
)
from sparseml.sparsification import LearningRateModifier as BaseLearningRateModifier
from sparseml.sparsification import (
SetLearningRateModifier as BaseSetLearningRateModifier,
)
from sparseml.sparsification import SparsificationTypes
from sparseml.utils import convert_to_bool
__all__ = [
"SetLearningRateModifier",
"LearningRateFunctionModifier",
"LearningRateModifier",
]
CONSTRUCTORS = {
"StepLR": StepLR,
"MultiStepLR": MultiStepLR,
"ExponentialLR": ExponentialLR,
"CosineAnnealingWarmRestarts": CosineAnnealingWarmRestarts,
}
@PyTorchModifierYAML()
class SetLearningRateModifier(BaseSetLearningRateModifier, ScheduledModifier):
"""
Modifier to set the learning rate to a specific value at a certain point in the
training process.
Once that point is reached,
will update the optimizer's params with the learning rate.
| Sample yaml:
| !SetLearningRateModifier
| start_epoch: 0.0
| learning_rate: 0.001
| constant_logging: True
:param learning_rate: The learning rate to use once this modifier starts
:param start_epoch: The epoch to start the modifier at
(set to -1.0 so it starts immediately)
:param end_epoch: unused and should not be set
:param constant_logging: True to constantly log on every step,
False to only log on an LR change and min once per epoch, default False
"""
def __init__(
self,
learning_rate: Union[float, None],
param_groups: Optional[List[int]] = None,
start_epoch: float = -1.0,
end_epoch: float = -1.0,
constant_logging: bool = False,
):
super(SetLearningRateModifier, self).__init__(
learning_rate=learning_rate,
start_epoch=start_epoch,
end_epoch=-1,
end_comparator=None,
)
self._param_groups = param_groups
self._lr_set = False
self._applied = -1.0
self._constant_logging = convert_to_bool(constant_logging)
self._last_logged_lr = None
@ModifierProp()
def param_groups(self) -> Optional[List[int]]:
"""
:return: The param group indices to set the lr for within the optimizer,
if not set will set the lr for all param groups
"""
return self._param_groups
@param_groups.setter
def param_groups(self, value: Optional[List[int]]):
"""
:param value: The param group indices to set the lr for within the optimizer,
if not set will set the lr for all param groups
"""
self._param_groups = value
@ModifierProp()
def constant_logging(self) -> bool:
"""
:return: True to constantly log on every step,
False to only log on an LR change, default True
"""
return self._constant_logging
@constant_logging.setter
def constant_logging(self, value: bool):
"""
:param value: True to constantly log on every step,
False to only log on an LR change, default True
"""
self._constant_logging = value
@ModifierProp(serializable=False)
def applied_learning_rate(self) -> float:
"""
:return: the last applied learning rate to the optimizer,
-1.0 if hasn't been applied
"""
return self._applied
@ScheduledModifier.log_call
def update(
self, module: Module, optimizer: Optimizer, epoch: float, steps_per_epoch: int
):
"""
Check whether to update the learning rate for the optimizer or not
:param module: module to modify
:param optimizer: optimizer to modify
:param epoch: current epoch and progress within the current epoch
:param steps_per_epoch: number of steps taken within each epoch
(calculate batch number using this and epoch)
"""
super().update(module, optimizer, epoch, steps_per_epoch)
self._check_set_lr(optimizer, epoch)
def log_update(
self,
module: Module,
optimizer: Optimizer,
epoch: float,
steps_per_epoch: int,
):
"""
Check whether to log an update for the learning rate of the modifier
If constant logging is enabled, then will always log
Otherwise checks for a change in the LR before logging
:param module: module to modify
:param optimizer: optimizer to modify
:param epoch: current epoch and progress within the current epoch
:param steps_per_epoch: number of steps taken within each epoch
(calculate batch number using this and epoch)
"""
super().log_update(module, optimizer, epoch, steps_per_epoch)
group_lrs = [
(f"ParamGroup{index}", lr)
for (index, lr) in enumerate(get_optim_groups_learning_rates(optimizer))
if not self.param_groups or index in self.param_groups
]
if not group_lrs:
raise ValueError(
"Could not find param groups in the optimizer "
f"for given param_groups {self.param_groups}"
)
current_lr = group_lrs[-1][1]
if self._constant_logging or self._last_logged_lr != current_lr:
self._last_logged_lr = current_lr
self.log_named_scalars(
name_value_pairs=group_lrs,
epoch=epoch,
steps_per_epoch=steps_per_epoch,
)
def _check_set_lr(self, optimizer: Optimizer, epoch: float):
if (
(
self.start_epoch < 0.0
or (self.start_epoch - epoch) < sys.float_info.epsilon
)
and not self._lr_set
and self._learning_rate is not None
):
for (index, group) in enumerate(optimizer.param_groups):
if not self.param_groups or index in self.param_groups:
group["lr"] = self.learning_rate
self._applied = self.learning_rate
self._lr_set = True
@PyTorchModifierYAML()
class LearningRateFunctionModifier(ScheduledUpdateModifier):
"""
Modifier to set the learning rate based on supported math functions scaling between
an init_lr and a final_lr.
Any time an update point is reached, the LR is updated for the parameters groups
in the optimizer.
Specific parameter groups can be targeted for the optimizer as well.
| Sample yaml:
| !LearningRateFunctionModifier
| start_epoch: 0.0
| end_epoch: 10.0
| lr_func: linear
| init_lr: 0.1
| final_lr: 0.001
:param lr_func: The name of the lr function to use: [linear, cosine]
:param init_lr: The initial learning rate to use once this modifier starts
:param init_lr: The final learning rate to use once this modifier starts
:param start_epoch: The epoch to start the modifier at
(set to -1.0 so it starts immediately)
:param end_epoch: The epoch to end the modifier at,
(set to -1.0 so it doesn't end)
:param cycle_epochs: The number of epochs between two consecutive LR rewinding;
used for cyclic_linear schedule only.
:param_groups: The param group indices to set the lr for within the optimizer,
if not set will set the lr for all param groups
:param update_frequency: unused and should not be set
:param constant_logging: True to constantly log on every step,
False to only log on an LR change and min once per epoch, default False
"""
def __init__(
self,
lr_func: str,
init_lr: float,
final_lr: float,
start_epoch: float,
end_epoch: float,
cycle_epochs: float = 1.0,
param_groups: Optional[List[int]] = None,
update_frequency: float = -1.0,
):
super().__init__(
start_epoch=start_epoch,
end_epoch=end_epoch,
update_frequency=-1.0,
end_comparator=1,
)
self._lr_func = lr_func
self._init_lr = init_lr
self._final_lr = final_lr
self._cycle_epochs = cycle_epochs
self._param_groups = param_groups
self._learning_rate = None
self._last_applied_lr = None
self._last_logged_lr = None
self.validate()
@BaseModifier.sparsification_types.getter
def sparsification_types(self) -> List[SparsificationTypes]:
"""
:return: the sparsification types this modifier instance will apply
"""
return [SparsificationTypes.learning_rate]
@ModifierProp()
def lr_func(self) -> str:
"""
:return: The name of the lr function to use: [linear, cosine]
"""
return self._lr_func
@lr_func.setter
def lr_func(self, value: str):
"""
:param value: The name of the lr function to use: [linear, cosine]
"""
self._lr_func = value
self.validate()
@ModifierProp()
def init_lr(self) -> float:
"""
:return: The initial learning rate to use once this modifier starts
"""
return self._init_lr
@init_lr.setter
def init_lr(self, value: float):
"""
:param value: The initial learning rate to use once this modifier starts
"""
self._init_lr = value
self.validate()
@ModifierProp()
def final_lr(self) -> float:
"""
:return: The final learning rate to use once this modifier starts
"""
return self._final_lr
@final_lr.setter
def final_lr(self, value: float):
"""
:param value: The final learning rate to use once this modifier starts
"""
self._final_lr = value
self.validate()
@ModifierProp()
def cycle_epochs(self) -> float:
return self._cycle_epochs
@cycle_epochs.setter
def cycle_epochs(self, value: float):
self._cycle_epochs = value
self.validate()
@ModifierProp()
def param_groups(self) -> Optional[List[int]]:
"""
:return: The param group indices to set the lr for within the optimizer,
if not set will set the lr for all param groups
"""
return self._param_groups
@param_groups.setter
def param_groups(self, value: Optional[List[int]]):
"""
:param value: The param group indices to set the lr for within the optimizer,
if not set will set the lr for all param groups
"""
self._param_groups = value
self.validate()
@ScheduledModifier.log_call
def update(
self, module: Module, optimizer: Optimizer, epoch: float, steps_per_epoch: int
):
"""
Updates the LR based on the given epoch for the optimizer
:param module: module to modify
:param optimizer: optimizer to modify
:param epoch: current epoch and progress within the current epoch
:param steps_per_epoch: number of steps taken within each epoch
(calculate batch number using this and epoch)
"""
super().update(module, optimizer, epoch, steps_per_epoch)
lambad_func = getattr(LearningRateFunctionModifier, f"_{self._lr_func}")
self._learning_rate = lambad_func(self, epoch, steps_per_epoch)
set_optim_learning_rate(optimizer, self._learning_rate, self.param_groups)
def log_update(
self,
module: Module,
optimizer: Optimizer,
epoch: float,
steps_per_epoch: int,
):
"""
Check whether to log an update for the learning rate of the modifier.
Checks for a change in the LR or epoch before logging
:param module: module to modify
:param optimizer: optimizer to modify
:param epoch: current epoch and progress within the current epoch
:param steps_per_epoch: number of steps taken within each epoch
(calculate batch number using this and epoch)
"""
super().log_update(module, optimizer, epoch, steps_per_epoch)
group_lrs = [
(f"ParamGroup{index}", lr)
for (index, lr) in enumerate(get_optim_groups_learning_rates(optimizer))
if not self.param_groups or index in self.param_groups
]
if not group_lrs:
raise ValueError(
"Could not find param groups in the optimizer "
f"for given param_groups {self.param_groups}"
)
current_lr = group_lrs[-1][1]
if current_lr != self._last_logged_lr:
self.log_named_scalars(
name_value_pairs=group_lrs,
epoch=epoch,
steps_per_epoch=steps_per_epoch,
)
self._last_logged_lr = current_lr
def validate(self):
"""
Validate the values of the params for the current instance are valid
"""
lr_funcs = ["linear", "cosine", "cyclic_linear"]
if self.lr_func not in lr_funcs:
raise ValueError(f"lr_func must be one of {lr_funcs}")
if lr_funcs == "cyclic_linear" and self.cycle_epochs <= 0.0:
raise ValueError(
"cycle_epochs in the cyclic_linear schedule must be positive"
)
if isinstance(self.init_lr, str):
self.init_lr = float(self.init_lr)
if (
(not self.init_lr and self.init_lr != 0)
or self.init_lr < 0.0
or self.init_lr > 1.0
):
raise ValueError(
f"init_lr must be within range [0.0, 1.0], given {self.init_lr}"
)
if isinstance(self.final_lr, str):
self.final_lr = float(self.final_lr)
if (
(not self.final_lr and self.final_lr != 0)
or self.final_lr < 0.0
or self.final_lr > 1.0
):
raise ValueError(
f"final_lr must be within range [0.0, 1.0], given {self.final_lr}"
)
if self.update_frequency != -1.0:
raise ValueError("update_frequency must be kept at -1.0")
def _linear(self, epoch: float, steps_per_epoch: int) -> float:
# y = y1 + ((x – x1) / (x2 – x1)) * (y2 – y1)
start = self.start_epoch if self.start_epoch > 0 else 0.0
end = self.end_epoch
return self.init_lr + ((epoch - start) / (end - start)) * (
self.final_lr - self.init_lr
)
def _cosine(self, epoch: float, steps_per_epoch: int) -> float:
start = self.start_epoch if self.start_epoch > 0 else 0.0
end = self.end_epoch
# scale x to [0-1] for use with cosine
x_norm = (epoch - start) / (end - start)
# conditional to support cosine down to a value and up to a value
if self.final_lr < self.init_lr:
y_range = self.init_lr - self.final_lr
y_shift = self.final_lr
x_shift = 0
else:
y_range = self.final_lr - self.init_lr
y_shift = self.init_lr
x_shift = math.pi
return (
math.cos(x_norm * math.pi + x_shift) * y_range / 2 + y_range / 2 + y_shift
)
def _cyclic_linear(self, epoch: float, steps_per_epoch: int):
end_step = self.end_epoch * steps_per_epoch
start_step = self.start_epoch * steps_per_epoch
cycle_steps = self.cycle_epochs * steps_per_epoch
current_step = (epoch - self.start_epoch) * steps_per_epoch
if current_step > int((end_step - start_step) / cycle_steps) * cycle_steps:
cycle_steps = (end_step - start_step) % cycle_steps
adjusted_step = current_step % cycle_steps
lr = self.init_lr - (adjusted_step / (cycle_steps - 1)) * (
self.init_lr - self.final_lr
)
return lr
@PyTorchModifierYAML()
class LearningRateModifier(BaseLearningRateModifier, ScheduledUpdateModifier):
"""
Modifier to set the learning rate to specific values at certain points in the
training process between set epochs.
Any time an update point is reached, the LR is updated for the parameters
in the optimizer.
Builds on top of the builtin LR schedulers in PyTorch.
| Sample yaml:
| !LearningRateModifier
| start_epoch: 0.0
| end_epoch: 10.0
| lr_class: ExponentialLR
| lr_kwargs:
| gamma: 0.95
| init_lr: 0.01
| constant_logging: True
:param lr_class: The name of the lr scheduler class to use:
[StepLR, MultiStepLR, ExponentialLR, CosineAnnealingWarmRestarts]
:param lr_kwargs: The dictionary of keyword arguments to pass to the constructor
for the lr_class
:param init_lr: The initial learning rate to use once this modifier starts
:param start_epoch: The epoch to start the modifier at
(set to -1.0 so it starts immediately)
:param end_epoch: The epoch to end the modifier at,
(set to -1.0 so it doesn't end)
:param update_frequency: unused and should not be set
:param constant_logging: True to constantly log on every step,
False to only log on an LR change and min once per epoch, default False
"""
def __init__(
self,
lr_class: str,
lr_kwargs: Dict,
init_lr: float,
start_epoch: float,
end_epoch: float = -1.0,
update_frequency: float = -1.0,
constant_logging: bool = False,
):
super(LearningRateModifier, self).__init__(
lr_class=lr_class,
lr_kwargs=lr_kwargs,
init_lr=init_lr,
start_epoch=start_epoch,
end_epoch=end_epoch,
update_frequency=-1.0,
end_comparator=-1,
)
self._lr_scheduler = None
self._base_lr_set = False
self._last_scheduler_epoch = math.floor(start_epoch)
self._constant_logging = convert_to_bool(constant_logging)
self._double_step = False
self._last_logged_lr = None
self._scheduler_steps = 0
self.validate()
@ModifierProp()
def constant_logging(self) -> bool:
"""
:return: True to constantly log on every step,
False to only log on an LR change, default True
"""
return self._constant_logging
@constant_logging.setter
def constant_logging(self, value: bool):
"""
:param value: True to constantly log on every step,
False to only log on an LR change, default True
"""
self._constant_logging = value
@ScheduledModifier.log_call
def update(
self, module: Module, optimizer: Optimizer, epoch: float, steps_per_epoch: int
):
"""
Calls into the lr scheduler to step given the epoch
Additionally will first set the lr to the init_lr if not set yet
:param module: module to modify
:param optimizer: optimizer to modify
:param epoch: current epoch and progress within the current epoch
:param steps_per_epoch: number of steps taken within each epoch
(calculate batch number using this and epoch)
"""
super().update(module, optimizer, epoch, steps_per_epoch)
self._check_init_lr(optimizer)
if epoch <= sys.float_info.epsilon:
# make sure we don't apply an lr step before the optimizer step
# mark the step to be applied on the next update
self._scheduler_steps -= 1
return
if (
abs(self.end_epoch - epoch) <= sys.float_info.epsilon
and self.end_epoch >= 0.0
):
# no cleanup step for LR, so exit before adding another LR step
return
self._check_setup_lr_scheduler(optimizer, epoch, steps_per_epoch)
if self.lr_class != "CosineAnnealingWarmRestarts":
global_step = (
round(epoch * steps_per_epoch)
if self.end_epoch < 0.0 or epoch <= self.end_epoch
else round(self.end_epoch * steps_per_epoch)
)
step_diff = global_step - self._scheduler_steps
if step_diff > 0:
for _ in range(step_diff):
self._lr_scheduler.step()
self._scheduler_steps = global_step
else:
self._lr_scheduler.step(
epoch - self.start_epoch if self.start_epoch >= 0.0 else 0.0
)
def log_update(
self,
module: Module,
optimizer: Optimizer,
epoch: float,
steps_per_epoch: int,
):
"""
Check whether to log an update for the learning rate of the modifier
If constant logging is enabled, then will always log
Otherwise checks for a change in the LR before logging
:param module: module to modify
:param optimizer: optimizer to modify
:param epoch: current epoch and progress within the current epoch
:param steps_per_epoch: number of steps taken within each epoch
(calculate batch number using this and epoch)
"""
super().log_update(module, optimizer, epoch, steps_per_epoch)
group_lrs = [
(f"ParamGroup{index}", lr)
for (index, lr) in enumerate(get_optim_groups_learning_rates(optimizer))
]
if not group_lrs:
raise ValueError("Could not find any param groups in the optimizer")
current_lr = group_lrs[-1][1]
if self._constant_logging or current_lr != self._last_logged_lr:
self._last_logged_lr = current_lr
self.log_named_scalars(
name_value_pairs=group_lrs,
epoch=epoch,
steps_per_epoch=steps_per_epoch,
)
def validate(self):
"""
Validate the values of the params for the current instance are valid
"""
if self.update_frequency != -1.0:
raise ValueError("update_frequency must be kept at -1.0")
def _check_init_lr(self, optimizer: Optimizer):
if self._lr_scheduler is not None:
return
if self._init_lr:
for param_group in optimizer.param_groups:
param_group["lr"] = self._init_lr
def _check_setup_lr_scheduler(
self, optimizer: Optimizer, epoch: float, steps_per_epoch: int
):
if self._lr_scheduler is not None:
return False
lr_class, lr_kwargs = self.corrected_lr_info(
steps_per_epoch, self.start_epoch, self.end_epoch
)
self._lr_scheduler = CONSTRUCTORS[lr_class](optimizer=optimizer, **lr_kwargs)
if hasattr(optimizer, "_step_count"):
# hack to keep pytorch lr scheduler from complaining
optimizer._step_count += 1
global_step = round(epoch * steps_per_epoch)
self._scheduler_steps += global_step
return True
|
PypiClean
|
/unofficial_stog-0.0.21.tar.gz/unofficial_stog-0.0.21/stog/modules/attention/biaffine_attention.py
|
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
class BiaffineAttention(nn.Module):
"""
Adopted from NeuroNLP2:
https://github.com/XuezheMax/NeuroNLP2/blob/master/neuronlp2/nn/modules/attention.py
Bi-Affine attention layer.
"""
def __init__(self, input_size_encoder, input_size_decoder, num_labels=1, biaffine=True, **kwargs):
"""
Args:
input_size_encoder: int
the dimension of the encoder input.
input_size_decoder: int
the dimension of the decoder input.
num_labels: int
the number of labels of the crf layer
biaffine: bool
if apply bi-affine parameter.
**kwargs:
"""
super(BiaffineAttention, self).__init__()
self.input_size_encoder = input_size_encoder
self.input_size_decoder = input_size_decoder
self.num_labels = num_labels
self.biaffine = biaffine
self.W_d = Parameter(torch.Tensor(self.num_labels, self.input_size_decoder))
self.W_e = Parameter(torch.Tensor(self.num_labels, self.input_size_encoder))
self.b = Parameter(torch.Tensor(self.num_labels, 1, 1))
if self.biaffine:
self.U = Parameter(torch.Tensor(self.num_labels, self.input_size_decoder, self.input_size_encoder))
else:
self.register_parameter('U', None)
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_normal_(self.W_d)
nn.init.xavier_normal_(self.W_e)
nn.init.constant_(self.b, 0.)
if self.biaffine:
nn.init.xavier_uniform_(self.U)
def forward(self, input_d, input_e, mask_d=None, mask_e=None):
"""
Args:
input_d: Tensor
the decoder input tensor with shape = [batch, length_decoder, input_size]
input_e: Tensor
the child input tensor with shape = [batch, length_encoder, input_size]
mask_d: Tensor or None
the mask tensor for decoder with shape = [batch, length_decoder]
mask_e: Tensor or None
the mask tensor for encoder with shape = [batch, length_encoder]
Returns: Tensor
the energy tensor with shape = [batch, num_label, length, length]
"""
assert input_d.size(0) == input_e.size(0), 'batch sizes of encoder and decoder are requires to be equal.'
batch, length_decoder, _ = input_d.size()
_, length_encoder, _ = input_e.size()
# compute decoder part: [num_label, input_size_decoder] * [batch, input_size_decoder, length_decoder]
# the output shape is [batch, num_label, length_decoder]
out_d = torch.matmul(self.W_d, input_d.transpose(1, 2)).unsqueeze(3)
# compute decoder part: [num_label, input_size_encoder] * [batch, input_size_encoder, length_encoder]
# the output shape is [batch, num_label, length_encoder]
out_e = torch.matmul(self.W_e, input_e.transpose(1, 2)).unsqueeze(2)
# output shape [batch, num_label, length_decoder, length_encoder]
if self.biaffine:
# compute bi-affine part
# [batch, 1, length_decoder, input_size_decoder] * [num_labels, input_size_decoder, input_size_encoder]
# output shape [batch, num_label, length_decoder, input_size_encoder]
output = torch.matmul(input_d.unsqueeze(1), self.U)
# [batch, num_label, length_decoder, input_size_encoder] * [batch, 1, input_size_encoder, length_encoder]
# output shape [batch, num_label, length_decoder, length_encoder]
output = torch.matmul(output, input_e.unsqueeze(1).transpose(2, 3))
output = output + out_d + out_e + self.b
else:
output = out_d + out_d + self.b
if mask_d is not None and mask_e is not None:
output = output * mask_d.unsqueeze(1).unsqueeze(3) * mask_e.unsqueeze(1).unsqueeze(2)
return output
|
PypiClean
|
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/jsdom/lib/jsdom/living/nodes/HTMLFormElement-impl.js
|
"use strict";
const DOMException = require("domexception/webidl2js-wrapper");
const { serializeURL } = require("whatwg-url");
const HTMLElementImpl = require("jsdom/lib/jsdom/living/nodes/HTMLElement-impl").implementation;
const { domSymbolTree } = require("jsdom/lib/jsdom/living/helpers/internal-constants");
const { fireAnEvent } = require("jsdom/lib/jsdom/living/helpers/events");
const { isListed, isSubmittable, isSubmitButton } = require("jsdom/lib/jsdom/living/helpers/form-controls");
const HTMLCollection = require("jsdom/lib/jsdom/living/generated/HTMLCollection");
const notImplemented = require("jsdom/lib/jsdom/browser/not-implemented");
const { parseURLToResultingURLRecord } = require("jsdom/lib/jsdom/living/helpers/document-base-url");
const encTypes = new Set([
"application/x-www-form-urlencoded",
"multipart/form-data",
"text/plain"
]);
const methods = new Set([
"get",
"post",
"dialog"
]);
const constraintValidationPositiveResult = Symbol("positive");
const constraintValidationNegativeResult = Symbol("negative");
class HTMLFormElementImpl extends HTMLElementImpl {
_descendantAdded(parent, child) {
const form = this;
for (const el of domSymbolTree.treeIterator(child)) {
if (typeof el._changedFormOwner === "function") {
el._changedFormOwner(form);
}
}
super._descendantAdded.apply(this, arguments);
}
_descendantRemoved(parent, child) {
for (const el of domSymbolTree.treeIterator(child)) {
if (typeof el._changedFormOwner === "function") {
el._changedFormOwner(null);
}
}
super._descendantRemoved.apply(this, arguments);
}
// https://html.spec.whatwg.org/multipage/forms.html#dom-form-elements
get elements() {
// TODO: Return a HTMLFormControlsCollection
return HTMLCollection.createImpl(this._globalObject, [], {
element: this,
query: () => domSymbolTree.treeToArray(this, {
filter: node => isListed(node) && (node._localName !== "input" || node.type !== "image")
})
});
}
get length() {
return this.elements.length;
}
_doSubmit() {
if (!this.isConnected) {
return;
}
this.submit();
}
submit() {
if (!fireAnEvent("submit", this, undefined, { bubbles: true, cancelable: true })) {
return;
}
notImplemented("HTMLFormElement.prototype.submit", this._ownerDocument._defaultView);
}
requestSubmit(submitter = undefined) {
if (submitter !== undefined) {
if (!isSubmitButton(submitter)) {
throw new TypeError("The specified element is not a submit button");
}
if (submitter.form !== this) {
throw DOMException.create(this._globalObject, [
"The specified element is not owned by this form element",
"NotFoundError"
]);
}
}
if (!fireAnEvent("submit", this, undefined, { bubbles: true, cancelable: true })) {
return;
}
notImplemented("HTMLFormElement.prototype.requestSubmit", this._ownerDocument._defaultView);
}
_doReset() {
if (!this.isConnected) {
return;
}
this.reset();
}
reset() {
if (!fireAnEvent("reset", this, undefined, { bubbles: true, cancelable: true })) {
return;
}
for (const el of this.elements) {
if (typeof el._formReset === "function") {
el._formReset();
}
}
}
get method() {
let method = this.getAttributeNS(null, "method");
if (method) {
method = method.toLowerCase();
}
if (methods.has(method)) {
return method;
}
return "get";
}
set method(V) {
this.setAttributeNS(null, "method", V);
}
get enctype() {
let type = this.getAttributeNS(null, "enctype");
if (type) {
type = type.toLowerCase();
}
if (encTypes.has(type)) {
return type;
}
return "application/x-www-form-urlencoded";
}
set enctype(V) {
this.setAttributeNS(null, "enctype", V);
}
get action() {
const attributeValue = this.getAttributeNS(null, "action");
if (attributeValue === null || attributeValue === "") {
return this._ownerDocument.URL;
}
const urlRecord = parseURLToResultingURLRecord(attributeValue, this._ownerDocument);
if (urlRecord === null) {
return attributeValue;
}
return serializeURL(urlRecord);
}
set action(V) {
this.setAttributeNS(null, "action", V);
}
// If the checkValidity() method is invoked, the user agent must statically validate the
// constraints of the form element, and return true if the constraint validation returned
// a positive result, and false if it returned a negative result.
checkValidity() {
return this._staticallyValidateConstraints().result === constraintValidationPositiveResult;
}
// https://html.spec.whatwg.org/multipage/form-control-infrastructure.html#interactively-validate-the-constraints
reportValidity() {
return this.checkValidity();
}
// https://html.spec.whatwg.org/multipage/form-control-infrastructure.html#statically-validate-the-constraints
_staticallyValidateConstraints() {
const controls = [];
for (const el of domSymbolTree.treeIterator(this)) {
if (el.form === this && isSubmittable(el)) {
controls.push(el);
}
}
const invalidControls = [];
for (const control of controls) {
if (control._isCandidateForConstraintValidation() && !control._satisfiesConstraints()) {
invalidControls.push(control);
}
}
if (invalidControls.length === 0) {
return { result: constraintValidationPositiveResult };
}
const unhandledInvalidControls = [];
for (const invalidControl of invalidControls) {
const notCancelled = fireAnEvent("invalid", invalidControl, undefined, { cancelable: true });
if (notCancelled) {
unhandledInvalidControls.push(invalidControl);
}
}
return { result: constraintValidationNegativeResult, unhandledInvalidControls };
}
}
module.exports = {
implementation: HTMLFormElementImpl
};
|
PypiClean
|
/srmd-ncnn-vulkan-python-1.0.2.tar.gz/srmd-ncnn-vulkan-python-1.0.2/srmd_ncnn_vulkan_python/srmd-ncnn-vulkan/src/ncnn/python/pybind11/docs/advanced/pycpp/numpy.rst
|
.. _numpy:
NumPy
#####
Buffer protocol
===============
Python supports an extremely general and convenient approach for exchanging
data between plugin libraries. Types can expose a buffer view [#f2]_, which
provides fast direct access to the raw internal data representation. Suppose we
want to bind the following simplistic Matrix class:
.. code-block:: cpp
class Matrix {
public:
Matrix(size_t rows, size_t cols) : m_rows(rows), m_cols(cols) {
m_data = new float[rows*cols];
}
float *data() { return m_data; }
size_t rows() const { return m_rows; }
size_t cols() const { return m_cols; }
private:
size_t m_rows, m_cols;
float *m_data;
};
The following binding code exposes the ``Matrix`` contents as a buffer object,
making it possible to cast Matrices into NumPy arrays. It is even possible to
completely avoid copy operations with Python expressions like
``np.array(matrix_instance, copy = False)``.
.. code-block:: cpp
py::class_<Matrix>(m, "Matrix", py::buffer_protocol())
.def_buffer([](Matrix &m) -> py::buffer_info {
return py::buffer_info(
m.data(), /* Pointer to buffer */
sizeof(float), /* Size of one scalar */
py::format_descriptor<float>::format(), /* Python struct-style format descriptor */
2, /* Number of dimensions */
{ m.rows(), m.cols() }, /* Buffer dimensions */
{ sizeof(float) * m.cols(), /* Strides (in bytes) for each index */
sizeof(float) }
);
});
Supporting the buffer protocol in a new type involves specifying the special
``py::buffer_protocol()`` tag in the ``py::class_`` constructor and calling the
``def_buffer()`` method with a lambda function that creates a
``py::buffer_info`` description record on demand describing a given matrix
instance. The contents of ``py::buffer_info`` mirror the Python buffer protocol
specification.
.. code-block:: cpp
struct buffer_info {
void *ptr;
py::ssize_t itemsize;
std::string format;
py::ssize_t ndim;
std::vector<py::ssize_t> shape;
std::vector<py::ssize_t> strides;
};
To create a C++ function that can take a Python buffer object as an argument,
simply use the type ``py::buffer`` as one of its arguments. Buffers can exist
in a great variety of configurations, hence some safety checks are usually
necessary in the function body. Below, you can see a basic example on how to
define a custom constructor for the Eigen double precision matrix
(``Eigen::MatrixXd``) type, which supports initialization from compatible
buffer objects (e.g. a NumPy matrix).
.. code-block:: cpp
/* Bind MatrixXd (or some other Eigen type) to Python */
typedef Eigen::MatrixXd Matrix;
typedef Matrix::Scalar Scalar;
constexpr bool rowMajor = Matrix::Flags & Eigen::RowMajorBit;
py::class_<Matrix>(m, "Matrix", py::buffer_protocol())
.def(py::init([](py::buffer b) {
typedef Eigen::Stride<Eigen::Dynamic, Eigen::Dynamic> Strides;
/* Request a buffer descriptor from Python */
py::buffer_info info = b.request();
/* Some sanity checks ... */
if (info.format != py::format_descriptor<Scalar>::format())
throw std::runtime_error("Incompatible format: expected a double array!");
if (info.ndim != 2)
throw std::runtime_error("Incompatible buffer dimension!");
auto strides = Strides(
info.strides[rowMajor ? 0 : 1] / (py::ssize_t)sizeof(Scalar),
info.strides[rowMajor ? 1 : 0] / (py::ssize_t)sizeof(Scalar));
auto map = Eigen::Map<Matrix, 0, Strides>(
static_cast<Scalar *>(info.ptr), info.shape[0], info.shape[1], strides);
return Matrix(map);
}));
For reference, the ``def_buffer()`` call for this Eigen data type should look
as follows:
.. code-block:: cpp
.def_buffer([](Matrix &m) -> py::buffer_info {
return py::buffer_info(
m.data(), /* Pointer to buffer */
sizeof(Scalar), /* Size of one scalar */
py::format_descriptor<Scalar>::format(), /* Python struct-style format descriptor */
2, /* Number of dimensions */
{ m.rows(), m.cols() }, /* Buffer dimensions */
{ sizeof(Scalar) * (rowMajor ? m.cols() : 1),
sizeof(Scalar) * (rowMajor ? 1 : m.rows()) }
/* Strides (in bytes) for each index */
);
})
For a much easier approach of binding Eigen types (although with some
limitations), refer to the section on :doc:`/advanced/cast/eigen`.
.. seealso::
The file :file:`tests/test_buffers.cpp` contains a complete example
that demonstrates using the buffer protocol with pybind11 in more detail.
.. [#f2] http://docs.python.org/3/c-api/buffer.html
Arrays
======
By exchanging ``py::buffer`` with ``py::array`` in the above snippet, we can
restrict the function so that it only accepts NumPy arrays (rather than any
type of Python object satisfying the buffer protocol).
In many situations, we want to define a function which only accepts a NumPy
array of a certain data type. This is possible via the ``py::array_t<T>``
template. For instance, the following function requires the argument to be a
NumPy array containing double precision values.
.. code-block:: cpp
void f(py::array_t<double> array);
When it is invoked with a different type (e.g. an integer or a list of
integers), the binding code will attempt to cast the input into a NumPy array
of the requested type. This feature requires the :file:`pybind11/numpy.h`
header to be included. Note that :file:`pybind11/numpy.h` does not depend on
the NumPy headers, and thus can be used without declaring a build-time
dependency on NumPy; NumPy>=1.7.0 is a runtime dependency.
Data in NumPy arrays is not guaranteed to packed in a dense manner;
furthermore, entries can be separated by arbitrary column and row strides.
Sometimes, it can be useful to require a function to only accept dense arrays
using either the C (row-major) or Fortran (column-major) ordering. This can be
accomplished via a second template argument with values ``py::array::c_style``
or ``py::array::f_style``.
.. code-block:: cpp
void f(py::array_t<double, py::array::c_style | py::array::forcecast> array);
The ``py::array::forcecast`` argument is the default value of the second
template parameter, and it ensures that non-conforming arguments are converted
into an array satisfying the specified requirements instead of trying the next
function overload.
Structured types
================
In order for ``py::array_t`` to work with structured (record) types, we first
need to register the memory layout of the type. This can be done via
``PYBIND11_NUMPY_DTYPE`` macro, called in the plugin definition code, which
expects the type followed by field names:
.. code-block:: cpp
struct A {
int x;
double y;
};
struct B {
int z;
A a;
};
// ...
PYBIND11_MODULE(test, m) {
// ...
PYBIND11_NUMPY_DTYPE(A, x, y);
PYBIND11_NUMPY_DTYPE(B, z, a);
/* now both A and B can be used as template arguments to py::array_t */
}
The structure should consist of fundamental arithmetic types, ``std::complex``,
previously registered substructures, and arrays of any of the above. Both C++
arrays and ``std::array`` are supported. While there is a static assertion to
prevent many types of unsupported structures, it is still the user's
responsibility to use only "plain" structures that can be safely manipulated as
raw memory without violating invariants.
Vectorizing functions
=====================
Suppose we want to bind a function with the following signature to Python so
that it can process arbitrary NumPy array arguments (vectors, matrices, general
N-D arrays) in addition to its normal arguments:
.. code-block:: cpp
double my_func(int x, float y, double z);
After including the ``pybind11/numpy.h`` header, this is extremely simple:
.. code-block:: cpp
m.def("vectorized_func", py::vectorize(my_func));
Invoking the function like below causes 4 calls to be made to ``my_func`` with
each of the array elements. The significant advantage of this compared to
solutions like ``numpy.vectorize()`` is that the loop over the elements runs
entirely on the C++ side and can be crunched down into a tight, optimized loop
by the compiler. The result is returned as a NumPy array of type
``numpy.dtype.float64``.
.. code-block:: pycon
>>> x = np.array([[1, 3],[5, 7]])
>>> y = np.array([[2, 4],[6, 8]])
>>> z = 3
>>> result = vectorized_func(x, y, z)
The scalar argument ``z`` is transparently replicated 4 times. The input
arrays ``x`` and ``y`` are automatically converted into the right types (they
are of type ``numpy.dtype.int64`` but need to be ``numpy.dtype.int32`` and
``numpy.dtype.float32``, respectively).
.. note::
Only arithmetic, complex, and POD types passed by value or by ``const &``
reference are vectorized; all other arguments are passed through as-is.
Functions taking rvalue reference arguments cannot be vectorized.
In cases where the computation is too complicated to be reduced to
``vectorize``, it will be necessary to create and access the buffer contents
manually. The following snippet contains a complete example that shows how this
works (the code is somewhat contrived, since it could have been done more
simply using ``vectorize``).
.. code-block:: cpp
#include <pybind11/pybind11.h>
#include <pybind11/numpy.h>
namespace py = pybind11;
py::array_t<double> add_arrays(py::array_t<double> input1, py::array_t<double> input2) {
py::buffer_info buf1 = input1.request(), buf2 = input2.request();
if (buf1.ndim != 1 || buf2.ndim != 1)
throw std::runtime_error("Number of dimensions must be one");
if (buf1.size != buf2.size)
throw std::runtime_error("Input shapes must match");
/* No pointer is passed, so NumPy will allocate the buffer */
auto result = py::array_t<double>(buf1.size);
py::buffer_info buf3 = result.request();
double *ptr1 = static_cast<double *>(buf1.ptr);
double *ptr2 = static_cast<double *>(buf2.ptr);
double *ptr3 = static_cast<double *>(buf3.ptr);
for (size_t idx = 0; idx < buf1.shape[0]; idx++)
ptr3[idx] = ptr1[idx] + ptr2[idx];
return result;
}
PYBIND11_MODULE(test, m) {
m.def("add_arrays", &add_arrays, "Add two NumPy arrays");
}
.. seealso::
The file :file:`tests/test_numpy_vectorize.cpp` contains a complete
example that demonstrates using :func:`vectorize` in more detail.
Direct access
=============
For performance reasons, particularly when dealing with very large arrays, it
is often desirable to directly access array elements without internal checking
of dimensions and bounds on every access when indices are known to be already
valid. To avoid such checks, the ``array`` class and ``array_t<T>`` template
class offer an unchecked proxy object that can be used for this unchecked
access through the ``unchecked<N>`` and ``mutable_unchecked<N>`` methods,
where ``N`` gives the required dimensionality of the array:
.. code-block:: cpp
m.def("sum_3d", [](py::array_t<double> x) {
auto r = x.unchecked<3>(); // x must have ndim = 3; can be non-writeable
double sum = 0;
for (py::ssize_t i = 0; i < r.shape(0); i++)
for (py::ssize_t j = 0; j < r.shape(1); j++)
for (py::ssize_t k = 0; k < r.shape(2); k++)
sum += r(i, j, k);
return sum;
});
m.def("increment_3d", [](py::array_t<double> x) {
auto r = x.mutable_unchecked<3>(); // Will throw if ndim != 3 or flags.writeable is false
for (py::ssize_t i = 0; i < r.shape(0); i++)
for (py::ssize_t j = 0; j < r.shape(1); j++)
for (py::ssize_t k = 0; k < r.shape(2); k++)
r(i, j, k) += 1.0;
}, py::arg().noconvert());
To obtain the proxy from an ``array`` object, you must specify both the data
type and number of dimensions as template arguments, such as ``auto r =
myarray.mutable_unchecked<float, 2>()``.
If the number of dimensions is not known at compile time, you can omit the
dimensions template parameter (i.e. calling ``arr_t.unchecked()`` or
``arr.unchecked<T>()``. This will give you a proxy object that works in the
same way, but results in less optimizable code and thus a small efficiency
loss in tight loops.
Note that the returned proxy object directly references the array's data, and
only reads its shape, strides, and writeable flag when constructed. You must
take care to ensure that the referenced array is not destroyed or reshaped for
the duration of the returned object, typically by limiting the scope of the
returned instance.
The returned proxy object supports some of the same methods as ``py::array`` so
that it can be used as a drop-in replacement for some existing, index-checked
uses of ``py::array``:
- ``r.ndim()`` returns the number of dimensions
- ``r.data(1, 2, ...)`` and ``r.mutable_data(1, 2, ...)``` returns a pointer to
the ``const T`` or ``T`` data, respectively, at the given indices. The
latter is only available to proxies obtained via ``a.mutable_unchecked()``.
- ``itemsize()`` returns the size of an item in bytes, i.e. ``sizeof(T)``.
- ``ndim()`` returns the number of dimensions.
- ``shape(n)`` returns the size of dimension ``n``
- ``size()`` returns the total number of elements (i.e. the product of the shapes).
- ``nbytes()`` returns the number of bytes used by the referenced elements
(i.e. ``itemsize()`` times ``size()``).
.. seealso::
The file :file:`tests/test_numpy_array.cpp` contains additional examples
demonstrating the use of this feature.
Ellipsis
========
Python 3 provides a convenient ``...`` ellipsis notation that is often used to
slice multidimensional arrays. For instance, the following snippet extracts the
middle dimensions of a tensor with the first and last index set to zero.
In Python 2, the syntactic sugar ``...`` is not available, but the singleton
``Ellipsis`` (of type ``ellipsis``) can still be used directly.
.. code-block:: python
a = # a NumPy array
b = a[0, ..., 0]
The function ``py::ellipsis()`` function can be used to perform the same
operation on the C++ side:
.. code-block:: cpp
py::array a = /* A NumPy array */;
py::array b = a[py::make_tuple(0, py::ellipsis(), 0)];
.. versionchanged:: 2.6
``py::ellipsis()`` is now also avaliable in Python 2.
Memory view
===========
For a case when we simply want to provide a direct accessor to C/C++ buffer
without a concrete class object, we can return a ``memoryview`` object. Suppose
we wish to expose a ``memoryview`` for 2x4 uint8_t array, we can do the
following:
.. code-block:: cpp
const uint8_t buffer[] = {
0, 1, 2, 3,
4, 5, 6, 7
};
m.def("get_memoryview2d", []() {
return py::memoryview::from_buffer(
buffer, // buffer pointer
{ 2, 4 }, // shape (rows, cols)
{ sizeof(uint8_t) * 4, sizeof(uint8_t) } // strides in bytes
);
})
This approach is meant for providing a ``memoryview`` for a C/C++ buffer not
managed by Python. The user is responsible for managing the lifetime of the
buffer. Using a ``memoryview`` created in this way after deleting the buffer in
C++ side results in undefined behavior.
We can also use ``memoryview::from_memory`` for a simple 1D contiguous buffer:
.. code-block:: cpp
m.def("get_memoryview1d", []() {
return py::memoryview::from_memory(
buffer, // buffer pointer
sizeof(uint8_t) * 8 // buffer size
);
})
.. note::
``memoryview::from_memory`` is not available in Python 2.
.. versionchanged:: 2.6
``memoryview::from_memory`` added.
|
PypiClean
|
/hana_ml-2.17.23080800-py3-none-any.whl/hana_ml/algorithms/pal/tsa/exponential_smoothing.py
|
import logging
import uuid
import warnings
try:
import pyodbc
except ImportError as error:
pass
from hdbcli import dbapi
from hana_ml.dataframe import quotename
from hana_ml.algorithms.pal.tsa.utility import _convert_index_from_timestamp_to_int, _is_index_int, _col_index_check
from hana_ml.algorithms.pal.tsa.utility import _get_forecast_starttime_and_timedelta, _delete_none_key_in_dict
from hana_ml.algorithms.pal.pal_base import (
PALBase,
pal_param_register,
arg,
ParameterTable,
ListOfStrings,
ListOfTuples,
try_drop,
require_pal_usable
)
from hana_ml.algorithms.pal.utility import check_pal_function_exist, _map_param
from hana_ml.algorithms.pal.unified_exponentialsmoothing import UnifiedExponentialSmoothing
from hana_ml.visualizers.report_builder import Page
from hana_ml.visualizers.time_series_report import TimeSeriesExplainer
logger = logging.getLogger(__name__)
class _ExponentialSmoothingBase(PALBase):
trend_test_map = {'mk': 1, 'difference-sign': 2}
def __init__(self,
model_selection=None,# Auto ESM
forecast_model_name=None,# Auto ESM
optimizer_time_budget=None,# Auto ESM
max_iter=None,# Auto ESM
optimizer_random_seed=None,# Auto ESM
thread_ratio=None,# Auto ESM
alpha=None,
beta=None,
gamma=None,
phi=None,
forecast_num=None,
seasonal_period=None,
seasonal=None,
initial_method=None,
training_ratio=None,
damped=None,
accuracy_measure=None,
seasonality_criterion=None,# Auto ESM
trend_test_method=None,# Auto ESM
trend_test_alpha=None,# Auto ESM
alpha_min=None, # Auto ESM
beta_min=None,# Auto ESM
gamma_min=None,# Auto ESM
phi_min=None,# Auto ESM
alpha_max=None,# Auto ESM
beta_max=None,# Auto ESM
gamma_max=None,# Auto ESM
phi_max=None,# Auto ESM
prediction_confidence_1=None,
prediction_confidence_2=None,
level_start=None,
trend_start=None,
season_start=None,
delta=None,#SESM
adaptive_method=None,#SESM
ignore_zero=None,
expost_flag=None,
method=None
):
super(_ExponentialSmoothingBase, self).__init__()
self.model_selection = self._arg('model_selection', model_selection, (int, bool))
self.forecast_model_name = self._arg('forecast_model_name', forecast_model_name, str)
self.optimizer_time_budget = self._arg('optimizer_time_budget', optimizer_time_budget, int)
self.max_iter = self._arg('max_iter', max_iter, int)
self.optimizer_random_seed = self._arg('optimizer_random_seed', optimizer_random_seed, int)
self.thread_ratio = self._arg('thread_ratio', thread_ratio, float)
self.alpha = self._arg('alpha', alpha, float)
self.beta = self._arg('beta', beta, float)
self.gamma = self._arg('gamma', gamma, float)
self.phi = self._arg('phi', phi, float)
self.forecast_num = self._arg('forecast_num', forecast_num, int)
self.seasonal_period = self._arg('seasonal_period', seasonal_period, int)
self.seasonal = self._arg('seasonal', seasonal, (int, str))
if isinstance(self.seasonal, str):
self.seasonal = self._arg('seasonal', seasonal,
dict(multiplicative=0, additive=1))
self.initial_method = self._arg('initial_method', initial_method, int)
self.training_ratio = self._arg('training_ratio', training_ratio, float)
self.damped = self._arg('damped', damped, (int, bool))
self.seasonality_criterion = self._arg('seasonality_criterion', seasonality_criterion, float)
self.trend_test_method = self._arg('trend_test_method', trend_test_method, (int, str))
if isinstance(self.trend_test_method, str):
self.trend_test_method = self._arg('trend_test_method',
trend_test_method,
self.trend_test_map)
self.trend_test_alpha = self._arg('trend_test_alpha', trend_test_alpha, float)
self.alpha_min = self._arg('alpha_min', alpha_min, float)
self.beta_min = self._arg('beta_min', beta_min, float)
self.gamma_min = self._arg('gamma_min', gamma_min, float)
self.phi_min = self._arg('phi_min', phi_min, float)
self.alpha_max = self._arg('alpha_max', alpha_max, float)
self.beta_max = self._arg('beta_max', beta_max, float)
self.gamma_max = self._arg('gamma_max', gamma_max, float)
self.phi_max = self._arg('phi_max', phi_max, float)
self.prediction_confidence_1 = self._arg('prediction_confidence_1', prediction_confidence_1, float)
self.prediction_confidence_2 = self._arg('prediction_confidence_2', prediction_confidence_2, float)
self.level_start = self._arg('level_start', level_start, float)
self.trend_start = self._arg('trend_start', trend_start, float)
self.delta = self._arg('delta', delta, float)
self.adaptive_method = self._arg('adaptive_method', adaptive_method, bool)
self.ignore_zero = self._arg('ignore_zero', ignore_zero, bool)
self.expost_flag = self._arg('expost_flag', expost_flag, bool)
self.method = self._arg('method', method, int)
# accuracy_measure for single/double/triple exp smooth
accuracy_measure_list = {"mpe":"mpe", "mse":"mse", "rmse":"rmse", "et":"et",
"mad":"mad", "mase":"mase", "wmape":"wmape",
"smape":"smape", "mape":"mape"}
if accuracy_measure is not None:
if isinstance(accuracy_measure, str):
accuracy_measure = [accuracy_measure]
for acc in accuracy_measure:
self._arg('accuracy_measure', acc.lower(), accuracy_measure_list)
self.accuracy_measure = [acc.upper() for acc in accuracy_measure]
else:
self.accuracy_measure = None
#check self.season_start which is a list of tuple. Each tuple has two elements and 1st element is int and 2nd is float
self.season_start = self._arg('season_start', season_start, list)
if self.season_start is not None:
if all(isinstance(elm, tuple) for elm in self.season_start):
if not all(len(elm) == 2 for elm in self.season_start):
msg = "If 'season_start' is a list of tuples, the each tuple " +\
"must be of length 2."
logger.error(msg)
raise ValueError(msg)
for element in self.season_start:
if not isinstance(element[0], int):
msg = ('The type of the first element of the tuple of season_start should be int!')
logger.error(msg)
raise ValueError(msg)
if not isinstance(element[1], (float, int)):
msg = ('The type of the second element of the tuple of season_start should be float!')
logger.error(msg)
raise ValueError(msg)
elif not all(isinstance(elm, (float, int)) for elm in self.season_start):
msg = "If 'season_start' is a not a list of tuples, then it must be "+\
"a list of numerical values."
logger.error(msg)
raise ValueError(msg)
self.is_index_int = None
self.forecast_start = None
self.timedelta = None
def _fit_predict(self, exp_smooth_function, data, key, endog):
conn = data.connection_context
require_pal_usable(conn)
setattr(self, "training_data", data)
setattr(self, "key", key)
setattr(self, "endog", endog)
setattr(self, "exog", None)
if not self._disable_hana_execution:
cols = data.columns
if len(cols) < 2:
msg = ("Input data should contain at least 2 columns: " +
"one for ID, another for raw data.")
logger.error(msg)
raise ValueError(msg)
index = data.index
key = self._arg('key', key, str)
if key is not None and key not in cols:
msg = ('Please select key from name of columns!')
logger.error(msg)
raise ValueError(msg)
if index is not None:
if key is None:
if not isinstance(index, str):
key = cols[0]
warn_msg = "The index of data is not a single column and key is None, so the first column of data is used as key!"
warnings.warn(message=warn_msg)
else:
key = index
else:
if key != index:
warn_msg = "Discrepancy between the designated key column '{}' ".format(key) +\
"and the designated index column '{}'.".format(index)
warnings.warn(message=warn_msg)
else:
if key is None:
key = cols[0]
cols.remove(key)
endog = self._arg('endog', endog, str)
if endog is not None:
if endog not in cols:
msg = ('Please select endog from name of columns!')
logger.error(msg)
raise ValueError(msg)
else:
endog = cols[0]
data_ = data[[key] + [endog]]
self.is_index_int = _is_index_int(data_, key)
if not self.is_index_int:
data_ = _convert_index_from_timestamp_to_int(data_, key)
try:
self.forecast_start, self.timedelta = _get_forecast_starttime_and_timedelta(data, key, self.is_index_int)
except Exception as err:
logger.warning(err)
pass
else:
data_ = data
function_map = {1:'PAL_SINGLE_EXPSMOOTH',
2:'PAL_DOUBLE_EXPSMOOTH',
3:'PAL_TRIPLE_EXPSMOOTH',
4:'PAL_AUTO_EXPSMOOTH',
5:'PAL_BROWN_EXPSMOOTH',
6:'PAL_CROSTON'}
unique_id = str(uuid.uuid1()).replace('-', '_').upper()
outputs = ['FORECAST', 'STATISTICS']
outputs = ['#PAL_EXP_SMOOTHING_{}_TBL_{}_{}'.format(name, self.id, unique_id)
for name in outputs]
forecast_tbl, stats_tbl = outputs
param_rows = [
('MODELSELECTION', self.model_selection, None, None),
('FORECAST_MODEL_NAME', None, None, self.forecast_model_name),
('OPTIMIZER_TIME_BUDGET', self.optimizer_time_budget, None, None),
('MAX_ITERATION', self.max_iter, None, None),
('OPTIMIZER_RANDOM_SEED', self.optimizer_random_seed, None, None),
('THREAD_RATIO', None, self.thread_ratio, None),
('ALPHA', None, self.alpha, None),
('BETA', None, self.beta, None),
('GAMMA', None, self.gamma, None),
('PHI', None, self.phi, None),
('FORECAST_NUM', self.forecast_num, None, None),
('CYCLE', self.seasonal_period, None, None),
('SEASONAL', self.seasonal, None, None),
('INITIAL_METHOD', self.initial_method, None, None),
('TRAINING_RATIO', None, self.training_ratio, None),
('DAMPED', self.damped, None, None),
('SEASONALITY_CRITERION', None, self.seasonality_criterion, None),
('TREND_TEST_METHOD', self.trend_test_method, None, None),
('TREND_TEST_ALPHA', None, self.trend_test_alpha, None),
('ALPHA_MIN', None, self.alpha_min, None),
('BETA_MIN', None, self.beta_min, None),
('GAMMA_MIN', None, self.gamma_min, None),
('PHI_MIN', None, self.phi_min, None),
('ALPHA_MAX', None, self.alpha_max, None),
('BETA_MAX', None, self.beta_max, None),
('GAMMA_MAX', None, self.gamma_max, None),
('PHI_MAX', None, self.phi_max, None),
('PREDICTION_CONFIDENCE_1', None, self.prediction_confidence_1, None),
('PREDICTION_CONFIDENCE_2', None, self.prediction_confidence_2, None),
('LEVEL_START', None, self.level_start, None),
('TREND_START', None, self.trend_start, None),
('DELTA', None, self.delta, None),#SESM
('ADAPTIVE_METHOD', self.adaptive_method, None, None),#SESM
('IGNORE_ZERO', self.ignore_zero, None, None),
('EXPOST_FLAG', self.expost_flag, None, None),
('METHOD', self.method, None, None)
]
if self.accuracy_measure is not None:
if isinstance(self.accuracy_measure, str):
self.accuracy_measure = [self.accuracy_measure]
for acc_measure in self.accuracy_measure:
param_rows.extend([('ACCURACY_MEASURE', None, None, acc_measure)])
param_rows.extend([('MEASURE_NAME', None, None, acc_measure)])
if self.season_start is not None:
if isinstance(self.season_start, tuple):
param_rows.extend([('SEASON_START', element[0], element[1], None)
for element in self.season_start])
else:
param_rows.extend([('SEASON_START', idx + 1, val, None)
for idx, val in enumerate(self.season_start)])
pal_function = function_map[exp_smooth_function]
if exp_smooth_function == 5:
if check_pal_function_exist(conn, 'BROWN%INTERVAL%', like=True):
pal_function = 'PAL_BROWN_EXPSMOOTH_INTERVAL'
try:
self._call_pal_auto(conn,
pal_function,
data_,
ParameterTable().with_data(param_rows),
forecast_tbl,
stats_tbl)
except dbapi.Error as db_err:
logger.exception(str(db_err))
try_drop(conn, forecast_tbl)
try_drop(conn, stats_tbl)
raise
except pyodbc.Error as db_err:
logger.exception(str(db_err.args[1]))
try_drop(conn, forecast_tbl)
try_drop(conn, stats_tbl)
raise
self.stats_ = conn.table(stats_tbl)
self.forecast_ = conn.table(forecast_tbl)
if not (self.is_index_int or self._disable_hana_execution):
if exp_smooth_function < 5:
fct_ = conn.sql("""
SELECT ADD_SECONDS('{0}', ({1}-{9}) * {2}) AS {10},
{4},
{5},
{6},
{7},
{8}
FROM ({3})
""".format(self.forecast_start,
quotename(self.forecast_.columns[0]),
self.timedelta,
self.forecast_.select_statement,
quotename(self.forecast_.columns[1]),
quotename(self.forecast_.columns[2]),
quotename(self.forecast_.columns[3]),
quotename(self.forecast_.columns[4]),
quotename(self.forecast_.columns[5]),
data.count() + 1,
quotename(key)))
if exp_smooth_function == 5:
if pal_function == 'PAL_BROWN_EXPSMOOTH_INTERVAL':
fct_ = conn.sql("""
SELECT ADD_SECONDS('{0}', ({1}-{9}) * {2}) AS {10},
{4},
{5},
{6},
{7},
{8}
FROM ({3})
""".format(self.forecast_start,
quotename(self.forecast_.columns[0]),
self.timedelta,
self.forecast_.select_statement,
quotename(self.forecast_.columns[1]),
quotename(self.forecast_.columns[2]),
quotename(self.forecast_.columns[3]),
quotename(self.forecast_.columns[4]),
quotename(self.forecast_.columns[5]),
data.count() + 1,
quotename(key)))
else:
fct_ = conn.sql("""
SELECT ADD_SECONDS('{0}', ({1}-{5}) * {2}) AS {6},
{4} FROM ({3})
""".format(self.forecast_start,
quotename(self.forecast_.columns[0]),
self.timedelta,
self.forecast_.select_statement,
quotename(self.forecast_.columns[1]),
data.count() + 1,
quotename(key)))
if exp_smooth_function == 6:
fct_ = conn.sql("""
SELECT ADD_SECONDS('{0}', ({1}-{6}) * {2}) AS {5},
{4} FROM ({3})
""".format(self.forecast_start,
quotename(self.forecast_.columns[0]),
self.timedelta,
self.forecast_.select_statement,
quotename(self.forecast_.columns[1]),
quotename(key),
data.count() + 1))
self.forecast_ = fct_
setattr(self, "forecast_result", self.forecast_)
return self.forecast_
def build_report(self):
r"""
Generate time series report.
"""
from hana_ml.visualizers.time_series_report_template_helper import TimeSeriesTemplateReportHelper #pylint: disable=cylic-import
if self.key is None:
self.key = self.training_data.columns[0]
if self.endog is None:
self.endog = self.training_data.columns[1]
if len(self.training_data.columns) > 2:
if self.exog is None:
self.exog = self.training_data.columns
self.exog.remove(self.key)
self.exog.remove(self.endog)
self.report = TimeSeriesTemplateReportHelper(self)
pages = []
page0 = Page("Forecast Result Analysis")
tse = TimeSeriesExplainer(key=self.key, endog=self.endog, exog=self.exog)
tse.add_line_to_comparison_item("Training Data", data=self.training_data, x_name=self.key, y_name=self.endog)
tse.add_line_to_comparison_item("Forecast Data", data=self.forecast_result, x_name=self.forecast_result.columns[0], y_name=self.forecast_result.columns[1])
tse.add_line_to_comparison_item('PI1', data=self.forecast_result, x_name=self.forecast_result.columns[0], confidence_interval_names=[self.forecast_result.columns[2], self.forecast_result.columns[3]],color="pink")
tse.add_line_to_comparison_item('PI2', data=self.forecast_result, x_name=self.forecast_result.columns[0], confidence_interval_names=[self.forecast_result.columns[4], self.forecast_result.columns[5]],color="#ccc")
page0.addItems(tse.get_comparison_item())
pages.append(page0)
self.report.add_pages(pages)
self.report.build_report()
def generate_html_report(self, filename=None):
"""
Display function.
"""
self.report.generate_html_report(filename)
def generate_notebook_iframe_report(self):
"""
Display function.
"""
self.report.generate_notebook_iframe_report()
class SingleExponentialSmoothing(_ExponentialSmoothingBase):
r"""
Single exponential smoothing is suitable to model the time series without trend and seasonality.
In the model, the smoothed value is the weighted sum of previous smoothed value and previous observed value.
PAL provides two simple exponential smoothing algorithms: single exponential smoothing and adaptive-response-rate simple exponential smoothing.
The adaptive-response-rate single exponential smoothing algorithm may have an advantage over single exponential smoothing in that it allows the value of alpha to be modified.
Parameters
----------
alpha : float, optional
The smoothing constant alpha for single exponential smoothing,
or the initialization value for adaptive-response-rate single exponential smoothing.
Valid range is (0, 1).
Defaults to 0.1 for single exponential smoothing, and 0.2 for adaptive-response-rate single exponential smoothing.
delta : float, optional
Value of weighted for At and Mt(relative for the computation of adaptive smoothing parameter).
The definitions of At and Mt are stated in
`SAP HANA PAL Single Exponential Smoothing <https://help.sap.com/viewer/2cfbc5cf2bc14f028cfbe2a2bba60a50/2.0.06/en-US/ba4bb85a74d84c2b994aa7192cac3b1b.html>`_
Only valid when ``adaptive_method`` is True.
Defaults to 0.2.
forecast_num : int, optional
Number of values to be forecast.
Defaults to 0.
adaptive_method : bool, optional
- False: Single exponential smoothing.
- True: Adaptive-response-rate single exponential smoothing.
Defaults to False.
accuracy_measure : str or list of str, optional
The metric to quantify how well a model fits input data.
Options: "mpe", "mse", "rmse", "et", "mad", "mase", "wmape", "smape", "mape".
No default value.
.. Note::
Specify a measure name if you want the corresponding measure value to be
reflected in the output statistics self.stats\_.
ignore_zero : bool, optional
- False: Uses zero values in the input dataset when calculating "mpe" or "mape".
- True: Ignores zero values in the input dataset when calculating "mpe" or "mape".
Only valid when ``accuracy_measure`` is "mpe" or "mape".
Defaults to False.
expost_flag : bool, optional
- False: Does not output the expost forecast, and just outputs the forecast values.
- True: Outputs the expost forecast and the forecast values.
Defaults to True.
prediction_confidence_1 : float, optional
Prediction confidence for interval 1.
Only valid when the upper and lower columns are provided in the result table.
Defaults to 0.8.
prediction_confidence_2 : float, optional
Prediction confidence for interval 2.
Only valid when the upper and lower columns are provided in the result table.
Defaults to 0.95.
Attributes
----------
forecast_ : DataFrame
Forecast values.
stats_ : DataFrame
Statistics analysis content.
Examples
--------
Input Dataframe df for SingleExponentialSmoothing:
>>> df.collect()
ID RAW_DATA
1 200.0
2 135.0
3 195.0
4 197.5
5 310.0
6 175.0
7 155.0
8 130.0
9 220.0
10 277.5
11 235.0
Create a SingleExponentialSmoothing instance:
>>> sesm = SingleExponentialSmoothing(adaptive_method=False,
accuracy_measure='mse',
alpha=0.1,
delta=0.2,
forecast_num=12,
expost_flag=True,
prediction_confidence_1=0.8,
prediction_confidence_2=0.95)
Perform fit_predict on the given data:
>>> sesm.fit_predict(data=df)
Output:
>>> sesm.forecast_.collect().set_index('TIMESTAMP').head(3)
TIMESTAMP VALUE PI1_LOWER PI1_UPPER PI2_LOWER PI2_UPPER
2 200 NaN NaN NaN NaN
3 193.5 NaN NaN NaN NaN
4 193.65 NaN NaN NaN NaN
>>> sesm.stats_.collect()
STAT_NAME STAT_VALUE
MSE 3438.3321
"""
op_name = 'SingleExpSm'
def __init__(self,
alpha=None,
delta=None,
forecast_num=None,
adaptive_method=None,
accuracy_measure=None,
ignore_zero=None,
expost_flag=None,
prediction_confidence_1=None,
prediction_confidence_2=None
):
setattr(self, 'hanaml_parameters', pal_param_register())
if delta is not None and adaptive_method is False:
msg = ('delta is only valid when adaptive_method is True!')
logger.error(msg)
raise ValueError(msg)
super(SingleExponentialSmoothing, self).__init__(
alpha=alpha,
delta=delta,
forecast_num=forecast_num,
adaptive_method=adaptive_method,
accuracy_measure=accuracy_measure,
ignore_zero=ignore_zero,
expost_flag=expost_flag,
prediction_confidence_1=prediction_confidence_1,
prediction_confidence_2=prediction_confidence_2)
def fit_predict(self, data, key=None, endog=None):
"""
Fit and predict based on the given time series.
Parameters
----------
data : DataFrame
Input data. At least two columns, one is ID column, the other is raw data.
key : str, optional
The ID column.
Defaults to the first column of data if the index column of data is not provided.
Otherwise, defaults to the index column of data.
endog : str, optional
The column of series to be fitted and predicted.
Defaults to the first non-ID column.
Returns
-------
DataFrame
Forecast values.
"""
setattr(self, 'hanaml_fit_params', pal_param_register())
return super(SingleExponentialSmoothing, self)._fit_predict(exp_smooth_function=1,
data=data,
key=key,
endog=endog)
class DoubleExponentialSmoothing(_ExponentialSmoothingBase):
r"""
Double exponential smoothing is suitable to model the time series with trend but without seasonality.
In the model there are two kinds of smoothed quantities: smoothed signal and smoothed trend.
Parameters
----------
alpha : float, optional
Weight for smoothing. Value range: 0 < alpha < 1.
Defaults to 0.1.
beta : float, optional
Weight for the trend component. Value range: 0 < beta < 1.
Defaults to 0.1.
forecast_num : int, optional
Number of values to be forecast.
Defaults to 0.
phi : float, optional
Value of the damped smoothing constant phi (0 < phi < 1).
Defaults to 0.1.
damped : bool, optional
Specifies whether or not to use damped trend method.
- False: No, uses the Holt's linear trend method.
- True: Yes, use damped trend method.
Defaults to False.
accuracy_measure : str or list of str, optional
The metric to quantify how well a model fits input data.
Options: "mpe", "mse", "rmse", "et", "mad", "mase", "wmape", "smape", "mape".
No default value.
.. Note::
Specify a measure name if you want the corresponding measure value to be
reflected in the output statistics self.stats\_.
ignore_zero : bool, optional
- False: Uses zero values in the input dataset when calculating "mpe" or "mape".
- True: Ignores zero values in the input dataset when calculating "mpe" or "mape".
Only valid when ``accuracy_measure`` is "mpe" or "mape".
Defaults to False.
expost_flag : bool, optional
- False: Does not output the expost forecast, and just outputs the forecast values.
- True: Outputs the expost forecast and the forecast values.
Defaults to True.
prediction_confidence_1 : float, optional
Prediction confidence for interval 1.
Only valid when the upper and lower columns are provided in the result table.
Defaults to 0.8.
prediction_confidence_2 : float, optional
Prediction confidence for interval 2.
Only valid when the upper and lower columns are provided in the result table.
Defaults to 0.95.
Attributes
----------
forecast_ : DataFrame
Forecast values.
stats_ : DataFrame
Statistics analysis content.
Examples
--------
Input Dataframe df for DoubleExponentialSmoothing:
>>> df.collect()
ID RAW_DATA
1 143.0
2 152.0
3 161.0
4 139.0
21 223.0
22 242.0
23 239.0
24 266.0
Create a DoubleExponentialSmoothing instance:
>>> desm = DoubleExponentialSmoothing(alpha=0.501,
beta=0.072,
forecast_num=6,
phi=None,
damped=None,
accuracy_measure='mse',
ignore_zero=None,
expost_flag=None,
prediction_confidence_1=0.8,
prediction_confidence_2=0.95)
Perform fit_predict on the given data:
>>> desm.fit_predict(data=df)
Output:
>>> desm.forecast_.collect().set_index('TIMESTAMP').head(3)
TIMESTAMP VALUE PI1_LOWER PI1_UPPER PI2_LOWER PI2_UPPER
2 152 NaN NaN NaN NaN
3 161 NaN NaN NaN NaN
4 170 NaN NaN NaN NaN
>>> desm.stats_.collect()
STAT_NAME STAT_VALUE
0 MSE 274.8960228
"""
op_name = 'DoubleExpSm'
def __init__(self,
alpha=None,
beta=None,
forecast_num=None,
phi=None,
damped=None,
accuracy_measure=None,
ignore_zero=None,
expost_flag=None,
prediction_confidence_1=None,
prediction_confidence_2=None
):
setattr(self, 'hanaml_parameters', pal_param_register())
super(DoubleExponentialSmoothing, self).__init__(
alpha=alpha,
beta=beta,
forecast_num=forecast_num,
phi=phi,
damped=damped,
accuracy_measure=accuracy_measure,
ignore_zero=ignore_zero,
expost_flag=expost_flag,
prediction_confidence_1=prediction_confidence_1,
prediction_confidence_2=prediction_confidence_2)
def fit_predict(self, data, key=None, endog=None):
"""
Fit and predict based on the given time series.
Parameters
----------
data : DataFrame
Input data. At least two columns, one is ID column, the other is raw data.
key : str, optional
The ID column.
Defaults to the first column of data if the index column of data is not provided.
Otherwise, defaults to the index column of data.
endog : str, optional
The column of series to be fitted and predicted.
Defaults to the first non-ID column.
Returns
-------
DataFrame
Forecast values.
"""
setattr(self, 'hanaml_fit_params', pal_param_register())
return super(DoubleExponentialSmoothing, self)._fit_predict(exp_smooth_function=2,
data=data,
key=key,
endog=endog)
class TripleExponentialSmoothing(_ExponentialSmoothingBase):
r"""
Triple exponential smoothing is used to handle the time series data containing a seasonal component.
Parameters
----------
alpha : float, optional
Weight for smoothing. Value range: 0 < alpha < 1.
Defaults to 0.1.
beta : float, optional
Weight for the trend component. Value range: 0 <= beta < 1.
Defaults to 0.1.
gamma : float, optional
Weight for the seasonal component. Value range: 0 < gamma < 1.
Defaults to 0.1.
seasonal_period : int, optional
Length of a seasonal_period(should be greater than 1).
For example, the ``seasonal_period`` of quarterly data is 4,
and the ``seasonal_period`` of monthly data is 12.
Defaults to 2.
forecast_num : int, optional
Number of values to be forecast.
Defaults to 0.
seasonal : {'multiplicative', 'additive'}, optional
Specifies the type of model for triple exponential smoothing.
- 'multiplicative': Multiplicative triple exponential smoothing.
- 'additive': Additive triple exponential smoothing.
When ``seasonal`` is set to 'additive', the default value of initial_method is 1;
When ``seasonal`` is set to 'multiplicative', the default value of initial_method is 0.
Defaults to 'multiplicative'.
initial_method : int, optional
Initialization method for the trend and seasonal components.
Defaults to 0 or 1, depending the setting of ``seasonal``.
phi : float, optional
Value of the damped smoothing constant phi (0 < phi < 1).
Defaults to 0.1.
damped : bool, optional
Specifies whether or not to use damped trend method.
- False: No, uses the Holt's linear trend method.
- True: Yes, use damped trend method.
Defaults to False.
accuracy_measure : str or list of str, optional
The metric to quantify how well a model fits input data.
Options: "mpe", "mse", "rmse", "et", "mad", "mase", "wmape", "smape", "mape".
No default value.
.. Note::
Specify a measure name if you want the corresponding measure value to be
reflected in the output statistics self.stats\_.
ignore_zero : bool, optional
- False: Uses zero values in the input dataset when calculating "mpe" or "mape".
- True: Ignores zero values in the input dataset when calculating "mpe" or "mape".
Only valid when ``accuracy_measure`` is "mpe" or "mape".
Defaults to False.
expost_flag : bool, optional
- False: Does not output the expost forecast, and just outputs the forecast values.
- True: Outputs the expost forecast and the forecast values.
Defaults to True.
level_start : float, optional
The initial value for level component S.
If this value is not provided, it will be calculated in the way as described in Triple Exponential Smoothing.
``level_start`` cannot be zero. If it is set to zero, 0.0000000001 will be used instead.
trend_start : float, optional
The initial value for trend component B.
season_start : list of tuple/float, optional
A list of initial values for seasonal component C. If specified, the list
must be of the length specified in ``seasonal_period``, i.e. start values
must be provided for a whole seasonal period.
We can simply give out the start values in a list, where the cycle index of each value is determined by
its index in the list; or we can give out the start values together with their cycle indices in a list of tuples.
For example, suppose the seasonal period is 4, with starting values :math:`x_i, 1 \leq i \leq 4` indexed by their cycle ID.
Then the four season start values can be specified in a list as :math:`[x_1, x_2, x_3, x_4]`,
or equivalently in a list of tuples as :math:`[(1, x_1), (2, x_2), (3, x_3), (4, x_4)]`.
If not provided, start values shall be computed by a default scheme.
prediction_confidence_1 : float, optional
Prediction confidence for interval 1.
Only valid when the upper and lower columns are provided in the result table.
Defaults to 0.8.
prediction_confidence_2 : float, optional
Prediction confidence for interval 2.
Only valid when the upper and lower columns are provided in the result table.
Defaults to 0.95.
Attributes
----------
forecast_ : DataFrame
Forecast values.
stats\_ : DataFrame
Statistics analysis content.
Examples
--------
Input Dataframe df for TripleExponentialSmoothing:
>>> df.collect()
ID RAW_DATA
1 362.0
2 385.0
3 432.0
4 341.0
5 382.0
...
18 707.0
19 773.0
20 592.0
21 627.0
22 725.0
23 854.0
24 661.0
Create a TripleExponentialSmoothing instance:
>>> tesm = TripleExponentialSmoothing(alpha=0.822,
beta=0.055,
gamma=0.055,
seasonal_period=4,
forecast_num=6,
seasonal=0,
initial_method=0,
phi=None,
damped=None,
accuracy_measure='mse',
ignore_zero=None,
expost_flag=True,
level_start=None,
trend_start=None,
season_start=None,
prediction_confidence_1=0.8,
prediction_confidence_2=0.95)
Perform fit_predict on the given data:
>>> tesm.fit_predict(data=df)
Output:
>>> tesm.forecast_.collect().set_index('TIMESTAMP').head(3)
TIMESTAMP VALUE PI1_LOWER PI1_UPPER PI2_LOWER PI2_UPPER
5 371.288158 NaN NaN NaN NaN
6 414.636207 NaN NaN NaN NaN
7 471.431808 NaN NaN NaN NaN
>>> tesm.stats_.collect()
STAT_NAME STAT_VALUE
MSE 616.541542
"""
op_name = 'TripleExpSm'
def __init__(self,
alpha=None,
beta=None,
gamma=None,
seasonal_period=None,
forecast_num=None,
seasonal=None,
initial_method=None,
phi=None,
damped=None,
accuracy_measure=None,
ignore_zero=None,
expost_flag=None,
level_start=None,
trend_start=None,
season_start=None,
prediction_confidence_1=None,
prediction_confidence_2=None
):
setattr(self, 'hanaml_parameters', pal_param_register())
super(TripleExponentialSmoothing, self).__init__(
alpha=alpha,
beta=beta,
gamma=gamma,
seasonal_period=seasonal_period,
forecast_num=forecast_num,
seasonal=seasonal,
initial_method=initial_method,
phi=phi,
damped=damped,
accuracy_measure=accuracy_measure,
ignore_zero=ignore_zero,
expost_flag=expost_flag,
level_start=level_start,
trend_start=trend_start,
season_start=season_start,
prediction_confidence_1=prediction_confidence_1,
prediction_confidence_2=prediction_confidence_2)
def fit_predict(self, data, key=None, endog=None):
"""
Fit and predict based on the given time series.
Parameters
----------
data : DataFrame
Input data. At least two columns, one is ID column, the other is raw data.
key : str, optional
The ID column.
Defaults to the first column of data if the index column of data is not provided.
Otherwise, defaults to the index column of data.
endog : str, optional
The column of series to be fitted and predicted.
Defaults to the first non-ID column.
Returns
-------
DataFrame
Forecast values.
"""
setattr(self, 'hanaml_fit_params', pal_param_register())
return super(TripleExponentialSmoothing, self)._fit_predict(exp_smooth_function=3,
data=data,
key=key,
endog=endog)
class AutoExponentialSmoothing(_ExponentialSmoothingBase):
r"""
Auto exponential smoothing (previously named forecast smoothing) is used to calculate optimal parameters of a set of smoothing functions in SAP HANA PAL,
including Single Exponential Smoothing, Double Exponential Smoothing, and Triple Exponential Smoothing.
Parameters
----------
model_selection : bool, optional
Specifies whether the algorithms will perform model selection or not.
- True: the algorithm will select the best model among Single/Double/Triple/
Damped Double/Damped Triple Exponential Smoothing models.
- False: the algorithm will not perform the model selection.
If ``forecast_model_name`` is set, the model defined by forecast_model_name will be used.
Defaults to False.
forecast_model_name : str, optional
Name of the statistical model used for calculating the forecast.
- 'SESM': Single Exponential Smoothing.
- 'DESM': Double Exponential Smoothing.
- 'TESM': Triple Exponential Smoothing.
This parameter must be set unless ``model_selection`` is set to 1.
optimizer_time_budget : int, optional
Time budget for Nelder-Mead optimization process.
The time unit is second and the value should be larger than zero.
Defaults to 1.
max_iter : int, optional
Maximum number of iterations for simulated annealing.
Defaults to 100.
optimizer_random_seed : int, optional
Random seed for simulated annealing.
The value should be larger than zero.
Defaults to system time.
thread_ratio : float, optional
Controls the proportion of available threads to use.
The ratio of available threads.
- 0: single thread.
- 0~1: percentage.
- Others: heuristically determined.
Defaults to 1.0.
alpha : float, optional
Weight for smoothing. Value range: 0 < alpha < 1.
Default value is computed automatically.
beta : float, optional
Weight for the trend component. Value range: 0 <= beta < 1.
If it is not set, the optimized value will be computed automatically.
Only valid when the model is set by user or identified by the algorithm as 'DESM' or 'TESM'.
Value 0 is allowed under TESM model only.
Defaults value is computed automatically.
gamma : float, optional
Weight for the seasonal component. Value range: 0 < gamma < 1.
Only valid when the model is set by user or identified by the algorithm as TESM.
Default value is computed automatically.
phi : float, optional
Value of the damped smoothing constant phi (0 < phi < 1).
Only valid when the model is set by user or identified by the algorithm as a damped model.
Default value is computed automatically.
forecast_num : int, optional
Number of values to be forecast.
Defaults to 0.
seasonal_period : int, optional
Length of a seasonal_period (L > 1).
For example, the ``seasonal_period`` of quarterly data is 4,
and the ``seasonal_period`` of monthly data is 12.
Only valid when the model is set by user or identified by the algorithm as 'TESM'.
Default value is computed automatically.
seasonal : {'multiplicative', 'additive'}, optional
Specifies the type of model for triple exponential smoothing.
- 'multiplicative': Multiplicative triple exponential smoothing.
- 'additive': Additive triple exponential smoothing.
When ``seasonal`` is set to 'additive', the default value of initial_method is 1;
When ``seasonal`` is set to 'multiplicative', the default value of initial_method is 0.
Defaults to 'multiplicative'.
initial_method : int, optional
Initialization method for the trend and seasonal components.
Refer to :class:`~hana_ml.algorithms.pal.tsa.exponential_smoothing.TripleExponentialSmoothing` for detailed information on initialization method.
Only valid when the model is set by user or identified by the algorithm as 'TESM'.
Defaults to 0 or 1.
training_ratio : float, optional
The ratio of training data to the whole time series.
Assuming the size of time series is N, and the training ratio is r,
the first N*r time series is used to train, whereas only the latter N*(1-r) one
is used to test.
If this parameter is set to 0.0 or 1.0, or the resulting training data
(N*r) is less than 1 or equal to the size of time series, no train-and-test procedure is
carried out.
Defaults to 1.0.
damped : int, optional
For DESM:
- False: Uses the Holt's linear method.
- True: Uses the additive damped trend Holt's linear method.
For TESM:
- False: Uses the Holt Winter method.
- True: Uses the additive damped seasonal Holt Winter method.
If ``model_selection`` is set to 1, the default value will be computed automatically.
Otherwise, the default value is False.
accuracy_measure : str, {'mse', 'mape'}, optional
The criterion used for the optimization.
Defaults to 'mse'.
seasonality_criterion : float, optional
The criterion of the auto-correlation coefficient for accepting seasonality,
in the range of (0, 1).
The larger it is, the less probable a time series is
regarded to be seasonal.
Only valid when ``forecast_model_name`` is 'TESM' or model_selection
is set to 1, and ``seasonal_period`` is not defined.
Defaults to 0.5.
trend_test_method : {'mk', 'difference-sign'}, optional
- 'mk': Mann-Kendall test.
- 'difference-sign': Difference-sign test.
Defaults to 'mk'.
trend_test_alpha : float, optional
Tolerance probability for trend test. The value range is (0, 0.5).
Only valid when ``model_selection`` is set to 1.
Defaults to 0.05.
alpha_min : float, optional
Sets the minimum value of alpha.
Only valid when ``alpha`` is not defined.
Defaults to 0.0000000001.
beta_min : float, optional
Sets the minimum value of beta.
Only valid when ``beta`` is not defined.
Defaults to 0.0000000001.
gamma_min : float, optional
Sets the minimum value of gamma.
Only valid when ``gamma`` is not defined.
Defaults to 0.0000000001.
phi_min : float, optional
Sets the minimum value of phi.
Only valid when ``phi`` is not defined.
Defaults to 0.0000000001.
alpha_max : float, optional
Sets the maximum value of alpha.
Only valid when ``alpha`` is not defined.
Defaults to 1.0.
beta_max : float, optional
Sets the maximum value of beta.
Only valid when ``beta`` is not defined.
Defaults to 1.0.
gamma_max : float, optional
Sets the maximum value of gamma.
Only valid when ``gamma`` is not defined.
Defaults to 1.0.
phi_max : float, optional
Sets the maximum value of phi.
Only valid when ``phi`` is not defined.
Defaults to 1.0.
prediction_confidence_1 : float, optional
Prediction confidence for interval 1.
Only valid when the upper and lower columns are provided in the result table.
Defaults to 0.8.
prediction_confidence_2 : float, optional
Prediction confidence for interval 2.
Only valid when the upper and lower columns are provided in the result table.
Defaults to is 0.95.
level_start : float, optional
The initial value for level component S.
If this value is not provided, it will be calculated in the way as described in :class:`~hana_ml.algorithms.pal.tsa.exponential_smoothing.TripleExponentialSmoothing`.
Notice that ``level_start`` cannot be zero.
If it is set to zero, 0.0000000001 will be used instead.
trend_start : float, optional
The initial value for trend component B.
If this value is not provided, it will be calculated in the way as described in :class:`~hana_ml.algorithms.pal.tsa.exponential_smoothing.TripleExponentialSmoothing`.
season_start : list of tuple/float, optional
A list of initial values for seasonal component C. If specified, the list
must be of the length specified in ``seasonal_period``, i.e. start values
must be provided for a whole seasonal period.
We can simply give out the start values in a list, where the cycle index of each value is determined by
its index in the list; or we can give out the start values together with their cycle indices in a list of tuples.
For example, suppose the seasonal period is 4, with starting values :math:`x_i, 1 \leq i \leq 4` indexed by their cycle IDs.
Then the four season start values can be specified in a list as :math:`[x_1, x_2, x_3, x_4]`,
or equivalently in a list of tuples as :math:`[(1, x_1), (2, x_2), (3, x_3), (4, x_4)]`.
If not provided, start values shall be computed by a default scheme.
expost_flag : bool, optional
- False: Does not output the expost forecast, and just outputs the forecast values.
- True: Outputs the expost forecast and the forecast values.
Defaults to True.
Attributes
----------
forecast_ : DataFrame
Forecast values.
stats_ : DataFrame
Statistics analysis content.
Examples
--------
Input Dataframe df for AutoExponentialSmoothing:
>>> df.collect()
TIMESTAMP Y
1 362
2 385
3 432
4 341
5 382
......
21 627
22 725
23 854
24 661
Create AutoExponentialSmoothing instance:
>>> autoExp = time_series.AutoExponentialSmoothing(forecast_model_name='TESM',
alpha=0.4,
beta=0.4,
gamma=0.4,
seasonal_period=4,
forecast_num=3,
seasonal='multiplicative',
initial_method=1,
training_ratio=0.75)
Perform fit on the given data:
>>> autoExp.fit(data=df)
Output:
>>> autoExp.forecast_.collect().set_index('TIMESTAMP').head(6)
TIMESTAMP VALUE PI1_LOWER PI1_UPPER PI2_LOWER PI2_UPPER
1 320.018502 NaN NaN NaN NaN
2 374.225113 NaN NaN NaN NaN
3 458.649782 NaN NaN NaN NaN
4 364.376078 NaN NaN NaN NaN
5 416.009008 NaN NaN NaN NaN
>>> autoExp.stats_.collect().head(4)
STAT_NAME STAT_VALUE
MSE 467.811415778471
NUMBER_OF_ITERATIONS 110
SA_NUMBER_OF_ITERATIONS 100
NM_NUMBER_OF_ITERATIONS 10
"""
def __init__(self,
model_selection=None,# Auto ESM
forecast_model_name=None,# Auto ESM
optimizer_time_budget=None,# Auto ESM
max_iter=None,# Auto ESM
optimizer_random_seed=None,# Auto ESM
thread_ratio=None,# Auto ESM
alpha=None,
beta=None,
gamma=None,
phi=None,
forecast_num=None,
seasonal_period=None,
seasonal=None,
initial_method=None,
training_ratio=None,
damped=None,
accuracy_measure=None,
seasonality_criterion=None,# Auto ESM
trend_test_method=None,# Auto ESM
trend_test_alpha=None,# Auto ESM
alpha_min=None, # Auto ESM
beta_min=None,# Auto ESM
gamma_min=None,# Auto ESM
phi_min=None,# Auto ESM
alpha_max=None,# Auto ESM
beta_max=None,# Auto ESM
gamma_max=None,# Auto ESM
phi_max=None,# Auto ESM
prediction_confidence_1=None,
prediction_confidence_2=None,
level_start=None,
trend_start=None,
season_start=None,
expost_flag=None
):
setattr(self, 'hanaml_parameters', pal_param_register())
if accuracy_measure is not None:
if isinstance(accuracy_measure, str):
accuracy_measure = [accuracy_measure]
if len(accuracy_measure) != 1:
msg = "Please select accuracy_measure from 'mse' OR 'mape'!"
logger.error(msg)
raise ValueError(msg)
self._arg('accuracy_measure', accuracy_measure[0].lower(), {'mse':'mse', 'mape':'mape'})
accuracy_measure = accuracy_measure[0].lower()
super(AutoExponentialSmoothing, self).__init__(
model_selection=model_selection,
forecast_model_name=forecast_model_name,
optimizer_time_budget=optimizer_time_budget,
max_iter=max_iter,
optimizer_random_seed=optimizer_random_seed,
thread_ratio=thread_ratio,
alpha=alpha,
beta=beta,
gamma=gamma,
phi=phi,
forecast_num=forecast_num,
seasonal_period=seasonal_period,
seasonal=seasonal,
initial_method=initial_method,
training_ratio=training_ratio,
damped=damped,
accuracy_measure=accuracy_measure,
seasonality_criterion=seasonality_criterion,
trend_test_method=trend_test_method,
trend_test_alpha=trend_test_alpha,
alpha_min=alpha_min,
beta_min=beta_min,
gamma_min=gamma_min,
phi_min=phi_min,
alpha_max=alpha_max,
beta_max=beta_max,
gamma_max=gamma_max,
phi_max=phi_max,
prediction_confidence_1=prediction_confidence_1,
prediction_confidence_2=prediction_confidence_2,
level_start=level_start,
trend_start=trend_start,
season_start=season_start,
expost_flag=expost_flag)
def fit_predict(self, data, key=None, endog=None):
"""
Fit and predict based on the given time series.
Parameters
----------
data : DataFrame
Input data. At least two columns, one is ID column, the other is raw data.
key : str, optional
The ID column.
Defaults to the first column of data if the index column of data is not provided.
Otherwise, defaults to the index column of data.
endog : str, optional
The column of series to be fitted and predicted.
Defaults to the first non-ID column.
Returns
-------
DataFrame
Forecast values.
"""
if self.training_ratio is None:
self.training_ratio = 1.0
if not self._disable_hana_execution:
rows = data.count() * self.training_ratio
half_row = rows/2
if self.seasonal_period is not None and self.seasonal_period > half_row:
msg = ('seasonal_period should be smaller than' +
' 1/2(row number * training_ratio) of data!')
logger.error(msg)
raise ValueError(msg)
return super(AutoExponentialSmoothing, self)._fit_predict(exp_smooth_function=4, data=data,
key=key, endog=endog)
class BrownExponentialSmoothing(_ExponentialSmoothingBase):
r"""
Brown exponential smoothing is suitable to model the time series with trend but without seasonality.
Both non-adaptive and adaptive brown linear exponential smoothing are provided in PAL.
Parameters
----------
alpha : float, optional
The smoothing constant alpha for brown exponential smoothing or
the initialization value for adaptive brown exponential smoothing (0 < alpha < 1).
- Defaults to 0.1 when Brown exponential smoothing
- Defaults to 0.2 when Adaptive brown exponential smoothing
delta : float, optional
Value of weighted for At and Mt.
Only valid when ``adaptive_method`` is True.
Defaults to 0.2
forecast_num : int, optional
Number of values to be forecast.
Defaults to 0.
adaptive_method : bool, optional
- False: Brown exponential smoothing.
- True: Adaptive brown exponential smoothing.
Defaults to False.
accuracy_measure : str or list of str, optional
The metric to quantify how well a model fits input data.
Options: "mpe", "mse", "rmse", "et", "mad", "mase", "wmape", "smape", "mape".
No default value.
.. Note::
Specify a measure name if you want the corresponding measure value to be
reflected in the output statistics self.stats\_.
ignore_zero : bool, optional
- False: Uses zero values in the input dataset when calculating "mpe" or "mape".
- True: Ignores zero values in the input dataset when calculating "mpe" or "mape".
Only valid when ``accuracy_measure`` is "mpe" or "mape".
Defaults to False.
expost_flag : bool, optional
- False: Does not output the expost forecast, and just outputs the forecast values.
- True: Outputs the expost forecast and the forecast values.
Defaults to True.
prediction_confidence_1 : float, optional
Prediction confidence for interval 1.
Only valid when the upper and lower columns are provided in the result table.
Defaults to 0.8.
prediction_confidence_2 : float, optional
Prediction confidence for interval 2.
Only valid when the upper and lower columns are provided in the result table.
Defaults to 0.95.
Attributes
----------
forecast_ : DateFrame
Forecast values.
stats_ : DataFrame
Statistics analysis content.
Examples
--------
Input dataframe df for BrownExponentialSmoothing:
>>> df.collect()
ID RAWDATA
1 143.0
2 152.0
3 161.0
4 139.0
5 137.0
21 223.0
22 242.0
23 239.0
24 266.0
Create BrownExponentialSmoothing instance:
>>> brown_exp_smooth = BrownExponentialSmoothing(alpha=0.1,
delta=0.2,
forecast_num=6,
adaptive_method=False,
accuracy_measure='mse',
ignore_zero=0,
expost_flag=1)
Perform fit on the given data:
>>> brown_exp_smooth.fit_predict(data=df)
Output:
>>> brown_exp_smooth.forecast_.collect().set_index('TIMESTAMP').head(6)
TIMESTAMP VALUE
2 143.00000
3 144.80000
4 148.13000
5 146.55600
6 144.80550
7 150.70954
>>> brown_exp_smooth.stats_.collect()
STAT_NAME STAT_VALUE
MSE 474.142004
"""
op_name = 'BrownExpSm'
def __init__(self,
alpha=None,
delta=None,
forecast_num=None,
adaptive_method=None,
accuracy_measure=None,
ignore_zero=None,
expost_flag=None,
prediction_confidence_1=None,
prediction_confidence_2=None):
setattr(self, 'hanaml_parameters', pal_param_register())
if delta is not None and adaptive_method is False:
msg = 'delta is only valid when adaptive_method is True!'
logger.error(msg)
raise ValueError(msg)
super(BrownExponentialSmoothing, self).__init__(alpha=alpha,
delta=delta,
forecast_num=forecast_num,
adaptive_method=adaptive_method,
accuracy_measure=accuracy_measure,
ignore_zero=ignore_zero,
expost_flag=expost_flag,
prediction_confidence_1=prediction_confidence_1,
prediction_confidence_2=prediction_confidence_2)
def fit_predict(self, data, key=None, endog=None):
"""
Fit and predict based on the given time series.
Parameters
----------
data : DataFrame
Input data. At least two columns, one is ID column, the other is raw data.
key : str, optional
The ID column.
Defaults to the first column of data if the index column of data is not provided.
Otherwise, defaults to the index column of data.
endog : str, optional
The column of series to be fitted and predicted.
Defaults to the first non-ID column.
Returns
-------
DataFrame
Forecast values.
"""
setattr(self, 'hanaml_fit_params', pal_param_register())
return super(BrownExponentialSmoothing, self)._fit_predict(exp_smooth_function=5,
data=data,
key=key,
endog=endog)
class Croston(_ExponentialSmoothingBase):
r"""
Croston method is a forecast strategy for products with intermittent demand.
Croston method consists of two steps. First, separate exponential smoothing estimates are made of the average size of a demand.
Second, the average interval between demands is calculated. This is then used in a form of the constant model to predict the future demand.
Parameters
----------
alpha : float, optional
Value of the smoothing constant alpha (0 < alpha < 1).
Defaults to 0.1.
forecast_num : int, optional
Number of values to be forecast.
When it is set to 1, the algorithm only forecasts one value.
Defaults to 0.
method : str, optional
- 'sporadic': Use the sporadic method.
- 'constant': Use the constant method.
Defaults to 'sporadic'.
accuracy_measure : str or list of str, optional
The metric to quantify how well a model fits input data.
Options: "mpe", "mse", "rmse", "et", "mad", "mase", "wmape", "smape", "mape".
No default value.
.. Note::
Specify a measure name if you want the corresponding measure value to be
reflected in the output statistics self.stats\_.
ignore_zero : bool, optional
- False: Uses zero values in the input dataset when calculating "mpe" or "mape".
- True: Ignores zero values in the input dataset when calculating "mpe" or "mape".
Only valid when ``accuracy_measure`` is "mpe" or "mape".
Defaults to False.
expost_flag : bool, optional
- False: Does not output the expost forecast, and just outputs the forecast values.
- True: Outputs the expost forecast and the forecast values.
Defaults to True.
Attributes
----------
forecast_ : DateFrame
Forecast values.
stats_ : DataFrame
Statistics analysis content.
Examples
--------
Input dataframe df for Croston:
>>> df.collect()
ID RAWDATA
0 0.0
1 1.0
2 4.0
3 0.0
4 0.0
5 0.0
6 5.0
7 3.0
8 0.0
9 0.0
10 0.0
Create a Croston instance:
>>> croston = Croston(alpha=0.1,
forecast_num=1,
method='sporadic',
accuracy_measure='mape')
Perform fit on the given data:
>>> croston.fit_predict(data=df)
Output:
>>> croston.forecast_.collect().set_index('ID').head(6)
ID RAWDATA
0 0.000000
1 3.025000
2 3.122500
3 0.000000
4 0.000000
5 0.000000
>>> croston.stats_.collect()
STAT_NAME STAT_VALUE
MAPE 0.2432181818181818
"""
def __init__(self,
alpha=None,
forecast_num=None,
method=None,
accuracy_measure=None,
ignore_zero=None,
expost_flag=None):
method = self._arg('method', method,
{'sporadic': 0, 'constant': 1})
if alpha is None:
alpha = 0.1
super(Croston, self).__init__(alpha=alpha,
forecast_num=forecast_num,
accuracy_measure=accuracy_measure,
ignore_zero=ignore_zero,
expost_flag=expost_flag,
method=method)
def fit_predict(self, data, key=None, endog=None):
"""
Fit and predict based on the given time series.
Parameters
----------
data : DataFrame
Input data. At least two columns, one is ID column, the other is raw data.
key : str, optional
The ID column.
Defaults to the first column of data if the index column of data is not provided.
Otherwise, defaults to the index column of data.
endog : str, optional
The column of series to be fitted and predicted.
Defaults to the first non-ID column.
Returns
-------
DataFrame
Forecast values.
"""
return super(Croston, self)._fit_predict(exp_smooth_function=6, data=data, key=key,
endog=endog)
def build_report(self):
r"""
Generate time series report.
"""
from hana_ml.visualizers.time_series_report_template_helper import TimeSeriesTemplateReportHelper #pylint: disable=cylic-import
if self.key is None:
self.key = self.training_data.columns[0]
if self.endog is None:
self.endog = self.training_data.columns[1]
if len(self.training_data.columns) > 2:
if self.exog is None:
self.exog = self.training_data.columns
self.exog.remove(self.key)
self.exog.remove(self.endog)
self.report = TimeSeriesTemplateReportHelper(self)
pages = []
page0 = Page("Forecast Result Analysis")
tse = TimeSeriesExplainer(key=self.key, endog=self.endog, exog=self.exog)
tse.add_line_to_comparison_item("Training Data", data=self.training_data, x_name=self.key, y_name=self.endog)
tse.add_line_to_comparison_item("Forecast Data", data=self.forecast_result, x_name=self.forecast_result.columns[0], y_name=self.forecast_result.columns[1])
page0.addItems(tse.get_comparison_item())
pages.append(page0)
self.report.add_pages(pages)
self.report.build_report()
def _params_check(input_dict, param_map):
update_params = {}
if not input_dict or input_dict is None:
return update_params
for parm in input_dict:
if parm in param_map.keys():
if parm == 'accuracy_measure':
if input_dict.get('accuracy_measure') is not None:
ac = input_dict.get('accuracy_measure')
if isinstance(ac, str):
ac = [ac]
acc_list = {"mpe":"mpe", "mse":"mse", "rmse":"rmse", "et":"et",
"mad":"mad", "mase":"mase", "wmape":"wmape",
"smape":"smape", "mape":"mape"}
for acc in ac:
acc = acc.lower()
arg('accuracy_measure', acc, acc_list)
update_params['accuracy_measure'] = (ac, ListOfStrings)
else:
parm_val = input_dict[parm]
arg_map = param_map[parm]
if arg_map[1] == ListOfStrings and isinstance(parm_val, str):
parm_val = [parm_val]
if len(arg_map) == 2:
update_params[arg_map[0]] = (arg(parm, parm_val, arg_map[1]), arg_map[1])
else:
update_params[arg_map[0]] = (arg(parm, parm_val, arg_map[2]), arg_map[1])
else:
err_msg = "'{}' is not a valid parameter name for initializing a Croston TSB model!".format(parm)
logger.error(err_msg)
raise KeyError(err_msg)
return update_params
class CrostonTSB(PALBase):
r"""
Croston TSB method (for Teunter, Syntetos & Babai) is a forecast strategy for products
with intermittent demand. It is a modification of Croston's method.
It replaces the demand interval in Croston's method by demand probability
which is updated every period. Compared to Croston's method, the forecast
of TSB method is not biased and its probability forecast can be used to
estimate the risk of obsolescence.
Parameters
----------
alpha : float, optional
Smoothing parameter for demand.
Defaults to 0.1.
beta : float, optional
Smoothing parameter for probability.
Defaults to 0.1.
forecast_num : int, optional
Number of values to be forecast.
When it is set to 1, the algorithm only forecasts one value.
Defaults to 0.
method : str, optional
- 'sporadic': Use the sporadic method.
- 'constant': Use the constant method.
Defaults to 'sporadic'.
accuracy_measure : str or list of str, optional
The metric to quantify how well a model fits input data.
Options: "mpe", "mse", "rmse", "et", "mad", "mase", "wmape", "smape", "mape".
No default value.
.. Note::
Specify a measure name if you want the corresponding measure value to be
reflected in the output statistics self.stats\_.
expost_flag : bool, optional
- False: Does not output the expost forecast, and just outputs the forecast values.
- True: Outputs the expost forecast and the forecast values.
Defaults to True.
ignore_zero : bool, optional
- False: Uses zero values in the input dataset when calculating "mpe" or "mape".
- True: Ignores zero values in the input dataset when calculating "mpe" or "mape".
Only valid when ``accuracy_measure`` is "mpe" or "mape".
Defaults to False.
remove_leading_zeros : bool, optional
- False: Uses leading zero values in the input dataset when smoothing the probability;
- True: Ignores leading zero values in the input dataset when smoothing the probability.
When it is set to 1, the leading zeros are ignored for calculating measure.
Defaults to False.
massive : bool, optional
Specifies whether or not to use massive mode of croston TSB.
- True : massive mode.
- False : single mode.
For parameter setting in massive mode, you could use both
group_params (please see the example below) or the original parameters.
Using original parameters will apply for all groups. However, if you define some parameters of a group,
the value of all original parameter setting will be not applicable to such group.
An example is as follows:
.. only:: latex
>>> mcr = CrostonTSB(massive=True,
expost_flag=False,
group_params={'Group_1': {'accuracy_measure':'MAPE'}})
>>> res = mcr.fit_predict(data=df,
key='ID',
endog='y',
group_key='GROUP_ID')
.. raw:: html
<iframe allowtransparency="true" style="border:1px solid #ccc; background: #eeffcb;"
src="../../_static/croston_tsb_example.html" width="100%" height="60%">
</iframe>
In this example, as 'accuracy_measure' is set in group_params for Group_1,
parameter setting of 'expost_flag' is not applicable to Group_1.
Defaults to False.
group_params : dict, optional
If massive mode is activated (``massive`` is True),
input data for croston TSB shall be divided into different
groups with different parameters applied.
An example is as follows:
.. only:: latex
>>> mcr = CrostonTSB(massive=True,
expost_flag=False,
group_params={'Group_1': {'accuracy_measure':'MAPE'}})
>>> res = mcr.fit_predict(data=df,
key='ID',
endog='y',
group_key='GROUP_ID')
.. raw:: html
<iframe allowtransparency="true" style="border:1px solid #ccc; background: #eeffcb;"
src="../../_static/croston_tsb_example.html" width="100%" height="60%">
</iframe>
Valid only when ``massive`` is True and defaults to None.
Attributes
----------
forecast_ : DateFrame
Forecast values.
stats_ : DataFrame
Statistics analysis.
metrics_ : DataFrame
Metrics Value.
error_msg_ : DataFrame
Error message.
Only valid if ``massive`` is True when initializing a 'CrostonTSB' instance.
Examples
--------
Input dataframe df:
>>> df.collect()
ID Y
0 0.0
1 0.0
2 4.0
3 0.0
4 0.0
5 0.0
6 5.0
7 3.0
8 0.0
9 0.0
10 0.0
Create an instance:
>>> cr = CrostonTSB(alpha=0.3,
beta=0.1,
forecast_num=10,
method='constant',
accuracy_measure=['mape'],
expost_flag=True,
ignore_zero=False,
remove_leading_zeros=False,
massive=True)
Perform fit on the given data:
>>> forecast = cr.fit_predict(df, key='ID', endog='Y', group_key='GROUP_ID')
Output:
>>> forecast.collect()
ID OUTPUT_VALUE
1 1.440000
2 1.296000
3 1.566400
4 1.409760
...
14 1.225253
15 1.225253
16 1.225253
17 1.225253
18 1.225253
19 1.225253
20 1.225253
>>> cr.stats_.collect()
STAT_NAME STAT_VALUE
MAPE 0.895982
>>> cr.metrics_.collect()
DEMAND_FORECAST 3.990000
PROBABILITY_FORECAST 0.307081
"""
__init_param_dict = {'alpha' : ('ALPHA', float),
'beta' : ('BETA', float),
'forecast_num' : ('FORECAST_NUM', int),
'method' : ('METHOD', int, {'sporadic': 0, 'constant': 1}),
'accuracy_measure' : ('MEASURE_NAME', ListOfStrings),
'ignore_zero' : ('IGNORE_ZERO', bool),
'expost_flag' : ('EXPOST_FLAG', bool),
'remove_leading_zeros' : ('REMOVE_LEADING_ZEROS', bool)}
def __init__(self,
alpha=None,
beta=None,
forecast_num=None,
method=None,
accuracy_measure=None,
ignore_zero=None,
expost_flag=None,
remove_leading_zeros=None,
massive=False,
group_params=None):
super(CrostonTSB, self).__init__()
setattr(self, 'hanaml_parameters', pal_param_register())
init_params = {'alpha' : alpha,
'beta' : beta,
'forecast_num' : forecast_num,
'method' : method,
'accuracy_measure' : accuracy_measure,
'ignore_zero' : ignore_zero,
'expost_flag' : expost_flag,
'remove_leading_zeros' : remove_leading_zeros}
init_params = _delete_none_key_in_dict(init_params)
self.init_params = init_params
self.__pal_params = {}
self.massive = self._arg('massive', massive, bool)
if self.massive is not True:
self.__pal_params = _params_check(input_dict=self.init_params,
param_map=self.__init_param_dict)
else: # massive mode
group_params = self._arg('group_params', group_params, dict)
group_params = {} if group_params is None else group_params
for group in group_params:
self._arg('Parameters with GROUP ID ' + str(group), group_params[group], dict)
self.group_params = group_params
for group in self.group_params:
self.__pal_params[group] = _params_check(input_dict=self.group_params[group],
param_map=self.__init_param_dict)
if self.init_params:
special_group_name = 'PAL_MASSIVE_PROCESSING_SPECIAL_GROUP_ID'
self.__pal_params[special_group_name] = _params_check(input_dict=self.init_params,
param_map=self.__init_param_dict)
self.forecast_ = None
self.stats_ = None
self.metrics__ = None
self.err_msg_ = None
self.is_index_int = None
self.forecast_start = None
self.timedelta = None
def fit_predict(self, data, key=None, endog=None, group_key=None):
"""
Fit and predict based on the given time series.
Parameters
----------
data : DataFrame
Input data. At least two columns, one is ID column, the other is raw data.
key : str, optional
The ID column.
In single mode, defaults to the first column of data if the index column of data is not provided.
Otherwise, defaults to the index column of data.
In massive mode, defaults to the first-non group key column of data if the index columns of data is not provided.
Otherwise, defaults to the second of index columns of data and the first column of index columns is group_key.
endog : str, optional
The column of series to be fitted and predicted.
In single mode, defaults to the first non-ID column.
In massive mode, defaults to the first non group_key, non key column.
group_key : str, optional
The column of group_key. The data type can be INT or NVARCHAR/VARCHAR.
If data type is INT, only parameters set in the group_params are valid.
This parameter is only valid when ``massive`` is True.
Defaults to the first column of data if the index columns of data is not provided.
Otherwise, defaults to the first column of index columns.
Returns
-------
DataFrame
Forecast values.
"""
conn = data.connection_context
require_pal_usable(conn)
setattr(self, "training_data", data)
setattr(self, "key", key)
setattr(self, "endog", endog)
setattr(self, "exog", None)
param_rows = []
if data is None:
msg = ('The data cannot be None!')
logger.error(msg)
raise ValueError(msg)
cols = data.columns
index = data.index
self.is_index_int = True
group_key_type = None
if self.massive is True:
group_key = self._arg('group_key', group_key, str)
if index is not None:
group_key = _col_index_check(group_key, 'group_key', index[0], cols)
else:
if group_key is None:
group_key = cols[0]
if group_key is not None and group_key not in cols:
msg = ("Please select group_key from {}!".format(cols))
logger.error(msg)
raise ValueError(msg)
data_groups = list(data[[group_key]].collect()[group_key].drop_duplicates())
param_keys = list(self.group_params.keys())
if not self._disable_hana_execution:
gid_type = data[[group_key]].dtypes()[0]
if not all([(int(ky) if 'INT' in gid_type[1] else ky) in data_groups for ky in param_keys]):
msg = 'Invalid group key identified in group parameters!'
logger.error(msg)
raise ValueError(msg)
else:
gid_type = {tp[0]:(tp[0], tp[1], tp[2]) for tp in data.dtypes()}[group_key]
if 'INT' in gid_type[1]:
group_key_type = gid_type[1]
elif 'VARCHAR' in gid_type[1]:
group_key_type = gid_type[1] + '({})'.format(gid_type[2])
cols.remove(group_key)
key = self._arg('key', key, str)
if index is not None:
key = _col_index_check(key, 'key', index[1], cols)
else:
if key is None:
key = cols[0]
else: # single mode
key = self._arg('key', key, str)
if index is not None:
key = _col_index_check(key, 'key', index, cols)
else:
if key is None:
key = cols[0]
if key is not None and key not in cols:
msg = ("Please select key from {}!".format(cols))
logger.error(msg)
raise ValueError(msg)
cols.remove(key)
endog = self._arg('endog', endog, str)
if endog is not None:
if endog not in cols:
msg = ("Please select endog from {}!".format(cols))
logger.error(msg)
raise ValueError(msg)
else:
endog = cols[0]
if self.massive is not True:
data_ = data[[key] + [endog]]
self.is_index_int = _is_index_int(data_, key)
if not self.is_index_int:
data_ = _convert_index_from_timestamp_to_int(data_, key)
try:
self.forecast_start, self.timedelta = _get_forecast_starttime_and_timedelta(data, key, self.is_index_int)
except Exception as err:
logger.warning(err)
for name in self.__pal_params:
value, typ = self.__pal_params[name]
if name == 'accuracy_measure':
if isinstance(value, str):
value = [value]
for each_ac in value:
param_rows.extend([('MEASURE_NAME', None, None, each_ac)])
else:
tpl = [_map_param(name, value, typ)]
param_rows.extend(tpl)
unique_id = str(uuid.uuid1()).replace('-', '_').upper()
outputs = ['FORECAST', 'STATS', 'METRICS']
outputs = ['#PAL_CROSTON_TSB_{}_{}_{}'.format(tbl, self.id, unique_id)
for tbl in outputs]
forecast_tbl, stats_tbl, metrics_tbl = outputs
try:
if check_pal_function_exist(conn, '%MASSIVE_CROSTONTSB%', like=True) or self._disable_hana_execution:
self._call_pal_auto(conn,
'PAL_CROSTONTSB',
data_,
ParameterTable().with_data(param_rows),
*outputs)
else:
msg = 'The version of SAP HANA does not support Croston TSB!'
logger.error(msg)
raise ValueError(msg)
except dbapi.Error as db_err:
logger.error(str(db_err))
try_drop(conn, outputs)
raise
except pyodbc.Error as db_err:
logger.error(str(db_err.args[1]))
try_drop(conn, outputs)
raise
self.forecast_ = conn.table(forecast_tbl)
if not self.is_index_int:
single_sql = """
SELECT
ADD_SECONDS('{2}', ({0}-{7}) *{3}) AS {5},
{1} AS {6}
FROM ({4})
""".format(quotename(self.forecast_.columns[0]),
quotename(self.forecast_.columns[1]),
self.forecast_start,
self.timedelta,
self.forecast_.select_statement,
quotename(key),
quotename(endog),
data.count() + 1)
self.forecast_ = conn.sql(single_sql)
self.stats_ = conn.table(stats_tbl)
self.metrics_ = conn.table(metrics_tbl)
setattr(self, "forecast_result", self.forecast_)
return self.forecast_
# massive mode
if 'INT' in group_key_type and self.init_params:
warn_msg = "If the type of group_key is INTEGER, only parameters in group_params are valid!"
warnings.warn(message=warn_msg)
data_ = data[[group_key, key, endog]]
self.is_index_int = _is_index_int(data_, key)
if not self.is_index_int:# timestamp
recomb_data = None
self.forecast_start = {}
self.timedelta = {}
group_count = {}
for group in data_groups:
group_val = group if 'INT' in group_key_type else "'{}'".format(group)
group_data = data_.filter("{}={}".format(quotename(data_.dtypes()[0][0]),
group_val)).sort(data_.dtypes()[0][0])
group_count[group] = group_data.count()
try:
self.forecast_start[group], self.timedelta[group] =\
_get_forecast_starttime_and_timedelta(group_data,
key,
self.is_index_int)
except Exception as err:
logger.warning(err)
pass
group_data = _convert_index_from_timestamp_to_int(group_data, key)
if recomb_data is None:
recomb_data = group_data
else:
recomb_data = recomb_data.union(group_data)
data_ = recomb_data[[group_key, key + '(INT)', endog]]
for group in self.__pal_params:
is_special_group = False
if group in ['PAL_MASSIVE_PROCESSING_SPECIAL_GROUP_ID']:
group_val = 'PAL_MASSIVE_PROCESSING_SPECIAL_GROUP_ID'
is_special_group = True
else:
group_val = int(group) if 'INT' in group_key_type else group
if 'INT' in group_key_type and is_special_group is True:
continue
for name in self.__pal_params[group]:
value, typ = self.__pal_params[group][name]
if name == 'accuracy_measure':
if isinstance(value, str):
value = [value]
for each_ac in value:
param_rows.extend([(group_val, 'MEASURE_NAME', None, None, each_ac)])
else:
tpl = [tuple([group_val] + list(_map_param(name, value, typ)))]
param_rows.extend(tpl)
if not param_rows:
param_rows = [('1', 'PLACE_HOLDER', None, None, 'place_holder')]
unique_id = str(uuid.uuid1()).replace('-', '_').upper()
outputs = ['FORECAST', 'STATS', 'METRICS', 'ERROR_MSG']
outputs = ['#PAL_MASSIVE_CROSTON_TSB{}_{}_{}'.format(tbl, self.id, unique_id)
for tbl in outputs]
forecast_tbl, stats_tbl, metrics_tbl, error_msg_tbl = outputs
try:
if check_pal_function_exist(conn, '%MASSIVE_CROSTONTSB%', like=True) or self._disable_hana_execution:
self._call_pal_auto(conn,
'PAL_MASSIVE_CROSTONTSB',
data_,
ParameterTable(itype=group_key_type).with_data(param_rows),
*outputs)
else:
msg = 'The version of your SAP HANA does not support massive CrostonTSB!'
logger.error(msg)
raise ValueError(msg)
except dbapi.Error as db_err:
logger.error(str(db_err))
try_drop(conn, outputs)
raise
except pyodbc.Error as db_err:
logger.error(str(db_err.args[1]))
try_drop(conn, outputs)
raise
self.forecast_ = conn.table(forecast_tbl)
if not self._disable_hana_execution:
if not self.is_index_int:
comb_data = None
fct = self.forecast_
for group in data_groups:
group_val = int(group) if 'INT' in group_key_type else "'{}'".format(group)
group_fct = fct.filter('GROUP_ID={}'.format(group_val)).sort(key+'(INT)')
massive_sql = """
SELECT {0},
ADD_SECONDS('{3}', ({1}-{6}) * {4}) AS {7},
{2} AS {8}
FROM ({5})
""".format(quotename(self.forecast_.columns[0]),
quotename(self.forecast_.columns[1]),
quotename(self.forecast_.columns[2]),
self.forecast_start[group],
self.timedelta[group],
group_fct.select_statement,
group_count[group] + 1,
quotename(key),
quotename(endog))
group_fct = conn.sql(massive_sql)
if comb_data is None:
comb_data = group_fct
else:
comb_data = group_fct.union(comb_data)
fct_ = comb_data.sort(['GROUP_ID', key])
self.forecast_ = fct_
self.stats_ = conn.table(stats_tbl)
self.metrics_ = conn.table(metrics_tbl)
self.error_msg_ = conn.table(error_msg_tbl)
if not self.error_msg_.collect().empty:
row = self.error_msg_.count()
for i in range(1, row+1):
warn_msg = "For group_key '{}',".format(self.error_msg_.collect()['GROUP_ID'][i-1]) +\
" the error message is '{}'.".format(self.error_msg_.collect()['MESSAGE'][i-1]) +\
"More information could be seen in the attribute error_msg_!"
warnings.warn(message=warn_msg)
setattr(self, "forecast_result", self.forecast_)
return self.forecast_
def build_report(self):
r"""
Generates time series report.
"""
from hana_ml.visualizers.time_series_report_template_helper import TimeSeriesTemplateReportHelper #pylint: disable=cylic-import
if self.key is None:
self.key = self.training_data.columns[0]
if self.endog is None:
self.endog = self.training_data.columns[1]
if len(self.training_data.columns) > 2:
if self.exog is None:
self.exog = self.training_data.columns
self.exog.remove(self.key)
self.exog.remove(self.endog)
self.report = TimeSeriesTemplateReportHelper(self)
pages = []
page0 = Page("Forecast Result Analysis")
tse = TimeSeriesExplainer(key=self.key, endog=self.endog, exog=self.exog)
tse.add_line_to_comparison_item("Training Data", data=self.training_data, x_name=self.key, y_name=self.endog)
tse.add_line_to_comparison_item("Forecast Data", data=self.forecast_result, x_name=self.forecast_result.columns[0], y_name=self.forecast_result.columns[1])
page0.addItems(tse.get_comparison_item())
pages.append(page0)
self.report.add_pages(pages)
self.report.build_report()
def generate_html_report(self, filename=None):
"""
Display function.
"""
self.report.generate_html_report(filename)
def generate_notebook_iframe_report(self):
"""
Display function.
"""
self.report.generate_notebook_iframe_report()
|
PypiClean
|
/letigre-moto-0.0.1.tar.gz/letigre-moto-0.0.1/moto/autoscaling/responses.py
|
from __future__ import unicode_literals
from moto.core.responses import BaseResponse
from moto.core.utils import amz_crc32, amzn_request_id
from .models import autoscaling_backends
class AutoScalingResponse(BaseResponse):
@property
def autoscaling_backend(self):
return autoscaling_backends[self.region]
def create_launch_configuration(self):
instance_monitoring_string = self._get_param(
'InstanceMonitoring.Enabled')
if instance_monitoring_string == 'true':
instance_monitoring = True
else:
instance_monitoring = False
self.autoscaling_backend.create_launch_configuration(
name=self._get_param('LaunchConfigurationName'),
image_id=self._get_param('ImageId'),
key_name=self._get_param('KeyName'),
ramdisk_id=self._get_param('RamdiskId'),
kernel_id=self._get_param('KernelId'),
security_groups=self._get_multi_param('SecurityGroups.member'),
user_data=self._get_param('UserData'),
instance_type=self._get_param('InstanceType'),
instance_monitoring=instance_monitoring,
instance_profile_name=self._get_param('IamInstanceProfile'),
spot_price=self._get_param('SpotPrice'),
ebs_optimized=self._get_param('EbsOptimized'),
associate_public_ip_address=self._get_param(
"AssociatePublicIpAddress"),
block_device_mappings=self._get_list_prefix(
'BlockDeviceMappings.member')
)
template = self.response_template(CREATE_LAUNCH_CONFIGURATION_TEMPLATE)
return template.render()
def describe_launch_configurations(self):
names = self._get_multi_param('LaunchConfigurationNames.member')
all_launch_configurations = self.autoscaling_backend.describe_launch_configurations(names)
marker = self._get_param('NextToken')
all_names = [lc.name for lc in all_launch_configurations]
if marker:
start = all_names.index(marker) + 1
else:
start = 0
max_records = self._get_param('MaxRecords', 50) # the default is 100, but using 50 to make testing easier
launch_configurations_resp = all_launch_configurations[start:start + max_records]
next_token = None
if len(all_launch_configurations) > start + max_records:
next_token = launch_configurations_resp[-1].name
template = self.response_template(
DESCRIBE_LAUNCH_CONFIGURATIONS_TEMPLATE)
return template.render(launch_configurations=launch_configurations_resp, next_token=next_token)
def delete_launch_configuration(self):
launch_configurations_name = self.querystring.get(
'LaunchConfigurationName')[0]
self.autoscaling_backend.delete_launch_configuration(
launch_configurations_name)
template = self.response_template(DELETE_LAUNCH_CONFIGURATION_TEMPLATE)
return template.render()
def create_auto_scaling_group(self):
self.autoscaling_backend.create_auto_scaling_group(
name=self._get_param('AutoScalingGroupName'),
availability_zones=self._get_multi_param(
'AvailabilityZones.member'),
desired_capacity=self._get_int_param('DesiredCapacity'),
max_size=self._get_int_param('MaxSize'),
min_size=self._get_int_param('MinSize'),
launch_config_name=self._get_param('LaunchConfigurationName'),
vpc_zone_identifier=self._get_param('VPCZoneIdentifier'),
default_cooldown=self._get_int_param('DefaultCooldown'),
health_check_period=self._get_int_param('HealthCheckGracePeriod'),
health_check_type=self._get_param('HealthCheckType'),
load_balancers=self._get_multi_param('LoadBalancerNames.member'),
target_group_arns=self._get_multi_param('TargetGroupARNs.member'),
placement_group=self._get_param('PlacementGroup'),
termination_policies=self._get_multi_param(
'TerminationPolicies.member'),
tags=self._get_list_prefix('Tags.member'),
new_instances_protected_from_scale_in=self._get_bool_param(
'NewInstancesProtectedFromScaleIn', False)
)
template = self.response_template(CREATE_AUTOSCALING_GROUP_TEMPLATE)
return template.render()
@amz_crc32
@amzn_request_id
def attach_instances(self):
group_name = self._get_param('AutoScalingGroupName')
instance_ids = self._get_multi_param('InstanceIds.member')
self.autoscaling_backend.attach_instances(
group_name, instance_ids)
template = self.response_template(ATTACH_INSTANCES_TEMPLATE)
return template.render()
@amz_crc32
@amzn_request_id
def set_instance_health(self):
instance_id = self._get_param('InstanceId')
health_status = self._get_param("HealthStatus")
if health_status not in ['Healthy', 'Unhealthy']:
raise ValueError('Valid instance health states are: [Healthy, Unhealthy]')
should_respect_grace_period = self._get_param("ShouldRespectGracePeriod")
self.autoscaling_backend.set_instance_health(instance_id, health_status, should_respect_grace_period)
template = self.response_template(SET_INSTANCE_HEALTH_TEMPLATE)
return template.render()
@amz_crc32
@amzn_request_id
def detach_instances(self):
group_name = self._get_param('AutoScalingGroupName')
instance_ids = self._get_multi_param('InstanceIds.member')
should_decrement_string = self._get_param('ShouldDecrementDesiredCapacity')
if should_decrement_string == 'true':
should_decrement = True
else:
should_decrement = False
detached_instances = self.autoscaling_backend.detach_instances(
group_name, instance_ids, should_decrement)
template = self.response_template(DETACH_INSTANCES_TEMPLATE)
return template.render(detached_instances=detached_instances)
@amz_crc32
@amzn_request_id
def attach_load_balancer_target_groups(self):
group_name = self._get_param('AutoScalingGroupName')
target_group_arns = self._get_multi_param('TargetGroupARNs.member')
self.autoscaling_backend.attach_load_balancer_target_groups(
group_name, target_group_arns)
template = self.response_template(ATTACH_LOAD_BALANCER_TARGET_GROUPS_TEMPLATE)
return template.render()
@amz_crc32
@amzn_request_id
def describe_load_balancer_target_groups(self):
group_name = self._get_param('AutoScalingGroupName')
target_group_arns = self.autoscaling_backend.describe_load_balancer_target_groups(
group_name)
template = self.response_template(DESCRIBE_LOAD_BALANCER_TARGET_GROUPS)
return template.render(target_group_arns=target_group_arns)
@amz_crc32
@amzn_request_id
def detach_load_balancer_target_groups(self):
group_name = self._get_param('AutoScalingGroupName')
target_group_arns = self._get_multi_param('TargetGroupARNs.member')
self.autoscaling_backend.detach_load_balancer_target_groups(
group_name, target_group_arns)
template = self.response_template(DETACH_LOAD_BALANCER_TARGET_GROUPS_TEMPLATE)
return template.render()
def describe_auto_scaling_groups(self):
names = self._get_multi_param("AutoScalingGroupNames.member")
token = self._get_param("NextToken")
all_groups = self.autoscaling_backend.describe_auto_scaling_groups(names)
all_names = [group.name for group in all_groups]
if token:
start = all_names.index(token) + 1
else:
start = 0
max_records = self._get_int_param("MaxRecords", 50)
if max_records > 100:
raise ValueError
groups = all_groups[start:start + max_records]
next_token = None
if max_records and len(all_groups) > start + max_records:
next_token = groups[-1].name
template = self.response_template(DESCRIBE_AUTOSCALING_GROUPS_TEMPLATE)
return template.render(groups=groups, next_token=next_token)
def update_auto_scaling_group(self):
self.autoscaling_backend.update_auto_scaling_group(
name=self._get_param('AutoScalingGroupName'),
availability_zones=self._get_multi_param(
'AvailabilityZones.member'),
desired_capacity=self._get_int_param('DesiredCapacity'),
max_size=self._get_int_param('MaxSize'),
min_size=self._get_int_param('MinSize'),
launch_config_name=self._get_param('LaunchConfigurationName'),
vpc_zone_identifier=self._get_param('VPCZoneIdentifier'),
default_cooldown=self._get_int_param('DefaultCooldown'),
health_check_period=self._get_int_param('HealthCheckGracePeriod'),
health_check_type=self._get_param('HealthCheckType'),
placement_group=self._get_param('PlacementGroup'),
termination_policies=self._get_multi_param(
'TerminationPolicies.member'),
new_instances_protected_from_scale_in=self._get_bool_param(
'NewInstancesProtectedFromScaleIn', None)
)
template = self.response_template(UPDATE_AUTOSCALING_GROUP_TEMPLATE)
return template.render()
def delete_auto_scaling_group(self):
group_name = self._get_param('AutoScalingGroupName')
self.autoscaling_backend.delete_auto_scaling_group(group_name)
template = self.response_template(DELETE_AUTOSCALING_GROUP_TEMPLATE)
return template.render()
def set_desired_capacity(self):
group_name = self._get_param('AutoScalingGroupName')
desired_capacity = self._get_int_param('DesiredCapacity')
self.autoscaling_backend.set_desired_capacity(
group_name, desired_capacity)
template = self.response_template(SET_DESIRED_CAPACITY_TEMPLATE)
return template.render()
def create_or_update_tags(self):
tags = self._get_list_prefix('Tags.member')
self.autoscaling_backend.create_or_update_tags(tags)
template = self.response_template(UPDATE_AUTOSCALING_GROUP_TEMPLATE)
return template.render()
def describe_auto_scaling_instances(self):
instance_states = self.autoscaling_backend.describe_auto_scaling_instances()
template = self.response_template(
DESCRIBE_AUTOSCALING_INSTANCES_TEMPLATE)
return template.render(instance_states=instance_states)
def put_scaling_policy(self):
policy = self.autoscaling_backend.create_autoscaling_policy(
name=self._get_param('PolicyName'),
policy_type=self._get_param('PolicyType'),
adjustment_type=self._get_param('AdjustmentType'),
as_name=self._get_param('AutoScalingGroupName'),
scaling_adjustment=self._get_int_param('ScalingAdjustment'),
cooldown=self._get_int_param('Cooldown'),
)
template = self.response_template(CREATE_SCALING_POLICY_TEMPLATE)
return template.render(policy=policy)
def describe_policies(self):
policies = self.autoscaling_backend.describe_policies(
autoscaling_group_name=self._get_param('AutoScalingGroupName'),
policy_names=self._get_multi_param('PolicyNames.member'),
policy_types=self._get_multi_param('PolicyTypes.member'))
template = self.response_template(DESCRIBE_SCALING_POLICIES_TEMPLATE)
return template.render(policies=policies)
def delete_policy(self):
group_name = self._get_param('PolicyName')
self.autoscaling_backend.delete_policy(group_name)
template = self.response_template(DELETE_POLICY_TEMPLATE)
return template.render()
def execute_policy(self):
group_name = self._get_param('PolicyName')
self.autoscaling_backend.execute_policy(group_name)
template = self.response_template(EXECUTE_POLICY_TEMPLATE)
return template.render()
@amz_crc32
@amzn_request_id
def attach_load_balancers(self):
group_name = self._get_param('AutoScalingGroupName')
load_balancer_names = self._get_multi_param("LoadBalancerNames.member")
self.autoscaling_backend.attach_load_balancers(
group_name, load_balancer_names)
template = self.response_template(ATTACH_LOAD_BALANCERS_TEMPLATE)
return template.render()
@amz_crc32
@amzn_request_id
def describe_load_balancers(self):
group_name = self._get_param('AutoScalingGroupName')
load_balancers = self.autoscaling_backend.describe_load_balancers(group_name)
template = self.response_template(DESCRIBE_LOAD_BALANCERS_TEMPLATE)
return template.render(load_balancers=load_balancers)
@amz_crc32
@amzn_request_id
def detach_load_balancers(self):
group_name = self._get_param('AutoScalingGroupName')
load_balancer_names = self._get_multi_param("LoadBalancerNames.member")
self.autoscaling_backend.detach_load_balancers(
group_name, load_balancer_names)
template = self.response_template(DETACH_LOAD_BALANCERS_TEMPLATE)
return template.render()
def suspend_processes(self):
autoscaling_group_name = self._get_param('AutoScalingGroupName')
scaling_processes = self._get_multi_param('ScalingProcesses.member')
self.autoscaling_backend.suspend_processes(autoscaling_group_name, scaling_processes)
template = self.response_template(SUSPEND_PROCESSES_TEMPLATE)
return template.render()
def set_instance_protection(self):
group_name = self._get_param('AutoScalingGroupName')
instance_ids = self._get_multi_param('InstanceIds.member')
protected_from_scale_in = self._get_bool_param('ProtectedFromScaleIn')
self.autoscaling_backend.set_instance_protection(
group_name, instance_ids, protected_from_scale_in)
template = self.response_template(SET_INSTANCE_PROTECTION_TEMPLATE)
return template.render()
CREATE_LAUNCH_CONFIGURATION_TEMPLATE = """<CreateLaunchConfigurationResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/">
<ResponseMetadata>
<RequestId>7c6e177f-f082-11e1-ac58-3714bEXAMPLE</RequestId>
</ResponseMetadata>
</CreateLaunchConfigurationResponse>"""
DESCRIBE_LAUNCH_CONFIGURATIONS_TEMPLATE = """<DescribeLaunchConfigurationsResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/">
<DescribeLaunchConfigurationsResult>
<LaunchConfigurations>
{% for launch_configuration in launch_configurations %}
<member>
<AssociatePublicIpAddress>{{ launch_configuration.associate_public_ip_address }}</AssociatePublicIpAddress>
<SecurityGroups>
{% for security_group in launch_configuration.security_groups %}
<member>{{ security_group }}</member>
{% endfor %}
</SecurityGroups>
<CreatedTime>2013-01-21T23:04:42.200Z</CreatedTime>
<KernelId>{{ launch_configuration.kernel_id }}</KernelId>
{% if launch_configuration.instance_profile_name %}
<IamInstanceProfile>{{ launch_configuration.instance_profile_name }}</IamInstanceProfile>
{% endif %}
<LaunchConfigurationName>{{ launch_configuration.name }}</LaunchConfigurationName>
{% if launch_configuration.user_data %}
<UserData>{{ launch_configuration.user_data }}</UserData>
{% else %}
<UserData/>
{% endif %}
<InstanceType>{{ launch_configuration.instance_type }}</InstanceType>
<LaunchConfigurationARN>arn:aws:autoscaling:us-east-1:803981987763:launchConfiguration:9dbbbf87-6141-428a-a409-0752edbe6cad:launchConfigurationName/{{ launch_configuration.name }}</LaunchConfigurationARN>
{% if launch_configuration.block_device_mappings %}
<BlockDeviceMappings>
{% for mount_point, mapping in launch_configuration.block_device_mappings.items() %}
<member>
<DeviceName>{{ mount_point }}</DeviceName>
{% if mapping.ephemeral_name %}
<VirtualName>{{ mapping.ephemeral_name }}</VirtualName>
{% else %}
<Ebs>
{% if mapping.snapshot_id %}
<SnapshotId>{{ mapping.snapshot_id }}</SnapshotId>
{% endif %}
{% if mapping.size %}
<VolumeSize>{{ mapping.size }}</VolumeSize>
{% endif %}
{% if mapping.iops %}
<Iops>{{ mapping.iops }}</Iops>
{% endif %}
<DeleteOnTermination>{{ mapping.delete_on_termination }}</DeleteOnTermination>
<VolumeType>{{ mapping.volume_type }}</VolumeType>
</Ebs>
{% endif %}
</member>
{% endfor %}
</BlockDeviceMappings>
{% else %}
<BlockDeviceMappings/>
{% endif %}
<ImageId>{{ launch_configuration.image_id }}</ImageId>
{% if launch_configuration.key_name %}
<KeyName>{{ launch_configuration.key_name }}</KeyName>
{% else %}
<KeyName/>
{% endif %}
<RamdiskId>{{ launch_configuration.ramdisk_id }}</RamdiskId>
<EbsOptimized>{{ launch_configuration.ebs_optimized }}</EbsOptimized>
<InstanceMonitoring>
<Enabled>{{ launch_configuration.instance_monitoring_enabled }}</Enabled>
</InstanceMonitoring>
{% if launch_configuration.spot_price %}
<SpotPrice>{{ launch_configuration.spot_price }}</SpotPrice>
{% endif %}
</member>
{% endfor %}
</LaunchConfigurations>
{% if next_token %}
<NextToken>{{ next_token }}</NextToken>
{% endif %}
</DescribeLaunchConfigurationsResult>
<ResponseMetadata>
<RequestId>d05a22f8-b690-11e2-bf8e-2113fEXAMPLE</RequestId>
</ResponseMetadata>
</DescribeLaunchConfigurationsResponse>"""
DELETE_LAUNCH_CONFIGURATION_TEMPLATE = """<DeleteLaunchConfigurationResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/">
<ResponseMetadata>
<RequestId>7347261f-97df-11e2-8756-35eEXAMPLE</RequestId>
</ResponseMetadata>
</DeleteLaunchConfigurationResponse>"""
CREATE_AUTOSCALING_GROUP_TEMPLATE = """<CreateAutoScalingGroupResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/">
<ResponseMetadata>
<RequestId>8d798a29-f083-11e1-bdfb-cb223EXAMPLE</RequestId>
</ResponseMetadata>
</CreateAutoScalingGroupResponse>"""
ATTACH_LOAD_BALANCER_TARGET_GROUPS_TEMPLATE = """<AttachLoadBalancerTargetGroupsResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/">
<AttachLoadBalancerTargetGroupsResult>
</AttachLoadBalancerTargetGroupsResult>
<ResponseMetadata>
<RequestId></RequestId>
</ResponseMetadata>
</AttachLoadBalancerTargetGroupsResponse>"""
ATTACH_INSTANCES_TEMPLATE = """<AttachInstancesResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/">
<AttachInstancesResult>
</AttachInstancesResult>
<ResponseMetadata>
<RequestId></RequestId>
</ResponseMetadata>
</AttachInstancesResponse>"""
DESCRIBE_LOAD_BALANCER_TARGET_GROUPS = """<DescribeLoadBalancerTargetGroupsResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/">
<DescribeLoadBalancerTargetGroupsResult>
<LoadBalancerTargetGroups>
{% for arn in target_group_arns %}
<member>
<LoadBalancerTargetGroupARN>{{ arn }}</LoadBalancerTargetGroupARN>
<State>Added</State>
</member>
{% endfor %}
</LoadBalancerTargetGroups>
</DescribeLoadBalancerTargetGroupsResult>
<ResponseMetadata>
<RequestId></RequestId>
</ResponseMetadata>
</DescribeLoadBalancerTargetGroupsResponse>"""
DETACH_INSTANCES_TEMPLATE = """<DetachInstancesResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/">
<DetachInstancesResult>
<Activities>
{% for instance in detached_instances %}
<member>
<ActivityId>5091cb52-547a-47ce-a236-c9ccbc2cb2c9EXAMPLE</ActivityId>
<AutoScalingGroupName>{{ group_name }}</AutoScalingGroupName>
<Cause>
At 2017-10-15T15:55:21Z instance {{ instance.instance.id }} was detached in response to a user request.
</Cause>
<Description>Detaching EC2 instance: {{ instance.instance.id }}</Description>
<StartTime>2017-10-15T15:55:21Z</StartTime>
<EndTime>2017-10-15T15:55:21Z</EndTime>
<StatusCode>InProgress</StatusCode>
<StatusMessage>InProgress</StatusMessage>
<Progress>50</Progress>
<Details>details</Details>
</member>
{% endfor %}
</Activities>
</DetachInstancesResult>
<ResponseMetadata>
<RequestId></RequestId>
</ResponseMetadata>
</DetachInstancesResponse>"""
DETACH_LOAD_BALANCER_TARGET_GROUPS_TEMPLATE = """<DetachLoadBalancerTargetGroupsResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/">
<DetachLoadBalancerTargetGroupsResult>
</DetachLoadBalancerTargetGroupsResult>
<ResponseMetadata>
<RequestId></RequestId>
</ResponseMetadata>
</DetachLoadBalancerTargetGroupsResponse>"""
DESCRIBE_AUTOSCALING_GROUPS_TEMPLATE = """<DescribeAutoScalingGroupsResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/">
<DescribeAutoScalingGroupsResult>
<AutoScalingGroups>
{% for group in groups %}
<member>
<Tags>
{% for tag in group.tags %}
<member>
<ResourceType>{{ tag.resource_type or tag.ResourceType }}</ResourceType>
<ResourceId>{{ tag.resource_id or tag.ResourceId }}</ResourceId>
<PropagateAtLaunch>{{ tag.propagate_at_launch or tag.PropagateAtLaunch }}</PropagateAtLaunch>
<Key>{{ tag.key or tag.Key }}</Key>
<Value>{{ tag.value or tag.Value }}</Value>
</member>
{% endfor %}
</Tags>
<SuspendedProcesses>
{% for suspended_process in group.suspended_processes %}
<member>
<ProcessName>{{suspended_process}}</ProcessName>
<SuspensionReason></SuspensionReason>
</member>
{% endfor %}
</SuspendedProcesses>
<AutoScalingGroupName>{{ group.name }}</AutoScalingGroupName>
<HealthCheckType>{{ group.health_check_type }}</HealthCheckType>
<CreatedTime>2013-05-06T17:47:15.107Z</CreatedTime>
<EnabledMetrics/>
<LaunchConfigurationName>{{ group.launch_config_name }}</LaunchConfigurationName>
<Instances>
{% for instance_state in group.instance_states %}
<member>
<HealthStatus>{{ instance_state.health_status }}</HealthStatus>
<AvailabilityZone>us-east-1e</AvailabilityZone>
<InstanceId>{{ instance_state.instance.id }}</InstanceId>
<LaunchConfigurationName>{{ group.launch_config_name }}</LaunchConfigurationName>
<LifecycleState>{{ instance_state.lifecycle_state }}</LifecycleState>
<ProtectedFromScaleIn>{{ instance_state.protected_from_scale_in|string|lower }}</ProtectedFromScaleIn>
</member>
{% endfor %}
</Instances>
<DesiredCapacity>{{ group.desired_capacity }}</DesiredCapacity>
<AvailabilityZones>
{% for availability_zone in group.availability_zones %}
<member>{{ availability_zone }}</member>
{% endfor %}
</AvailabilityZones>
{% if group.load_balancers %}
<LoadBalancerNames>
{% for load_balancer in group.load_balancers %}
<member>{{ load_balancer }}</member>
{% endfor %}
</LoadBalancerNames>
{% else %}
<LoadBalancerNames/>
{% endif %}
{% if group.target_group_arns %}
<TargetGroupARNs>
{% for target_group_arn in group.target_group_arns %}
<member>{{ target_group_arn }}</member>
{% endfor %}
</TargetGroupARNs>
{% else %}
<TargetGroupARNs/>
{% endif %}
<MinSize>{{ group.min_size }}</MinSize>
{% if group.vpc_zone_identifier %}
<VPCZoneIdentifier>{{ group.vpc_zone_identifier }}</VPCZoneIdentifier>
{% else %}
<VPCZoneIdentifier/>
{% endif %}
<HealthCheckGracePeriod>{{ group.health_check_period }}</HealthCheckGracePeriod>
<DefaultCooldown>{{ group.default_cooldown }}</DefaultCooldown>
<AutoScalingGroupARN>arn:aws:autoscaling:us-east-1:803981987763:autoScalingGroup:ca861182-c8f9-4ca7-b1eb-cd35505f5ebb:autoScalingGroupName/{{ group.name }}</AutoScalingGroupARN>
{% if group.termination_policies %}
<TerminationPolicies>
{% for policy in group.termination_policies %}
<member>{{ policy }}</member>
{% endfor %}
</TerminationPolicies>
{% else %}
<TerminationPolicies/>
{% endif %}
<MaxSize>{{ group.max_size }}</MaxSize>
{% if group.placement_group %}
<PlacementGroup>{{ group.placement_group }}</PlacementGroup>
{% endif %}
<NewInstancesProtectedFromScaleIn>{{ group.new_instances_protected_from_scale_in|string|lower }}</NewInstancesProtectedFromScaleIn>
</member>
{% endfor %}
</AutoScalingGroups>
{% if next_token %}
<NextToken>{{ next_token }}</NextToken>
{% endif %}
</DescribeAutoScalingGroupsResult>
<ResponseMetadata>
<RequestId>0f02a07d-b677-11e2-9eb0-dd50EXAMPLE</RequestId>
</ResponseMetadata>
</DescribeAutoScalingGroupsResponse>"""
UPDATE_AUTOSCALING_GROUP_TEMPLATE = """<UpdateAutoScalingGroupResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/">
<ResponseMetadata>
<RequestId>adafead0-ab8a-11e2-ba13-ab0ccEXAMPLE</RequestId>
</ResponseMetadata>
</UpdateAutoScalingGroupResponse>"""
DELETE_AUTOSCALING_GROUP_TEMPLATE = """<DeleteAutoScalingGroupResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/">
<ResponseMetadata>
<RequestId>70a76d42-9665-11e2-9fdf-211deEXAMPLE</RequestId>
</ResponseMetadata>
</DeleteAutoScalingGroupResponse>"""
DESCRIBE_AUTOSCALING_INSTANCES_TEMPLATE = """<DescribeAutoScalingInstancesResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/">
<DescribeAutoScalingInstancesResult>
<AutoScalingInstances>
{% for instance_state in instance_states %}
<member>
<HealthStatus>{{ instance_state.health_status }}</HealthStatus>
<AutoScalingGroupName>{{ instance_state.instance.autoscaling_group.name }}</AutoScalingGroupName>
<AvailabilityZone>us-east-1e</AvailabilityZone>
<InstanceId>{{ instance_state.instance.id }}</InstanceId>
<LaunchConfigurationName>{{ instance_state.instance.autoscaling_group.launch_config_name }}</LaunchConfigurationName>
<LifecycleState>{{ instance_state.lifecycle_state }}</LifecycleState>
<ProtectedFromScaleIn>{{ instance_state.protected_from_scale_in|string|lower }}</ProtectedFromScaleIn>
</member>
{% endfor %}
</AutoScalingInstances>
</DescribeAutoScalingInstancesResult>
<ResponseMetadata>
<RequestId>df992dc3-b72f-11e2-81e1-750aa6EXAMPLE</RequestId>
</ResponseMetadata>
</DescribeAutoScalingInstancesResponse>"""
CREATE_SCALING_POLICY_TEMPLATE = """<PutScalingPolicyResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/">
<PutScalingPolicyResult>
<PolicyARN>arn:aws:autoscaling:us-east-1:803981987763:scalingPolicy:b0dcf5e8
-02e6-4e31-9719-0675d0dc31ae:autoScalingGroupName/my-test-asg:policyName/my-scal
eout-policy</PolicyARN>
</PutScalingPolicyResult>
<ResponseMetadata>
<RequestId>3cfc6fef-c08b-11e2-a697-2922EXAMPLE</RequestId>
</ResponseMetadata>
</PutScalingPolicyResponse>"""
DESCRIBE_SCALING_POLICIES_TEMPLATE = """<DescribePoliciesResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/">
<DescribePoliciesResult>
<ScalingPolicies>
{% for policy in policies %}
<member>
<PolicyARN>arn:aws:autoscaling:us-east-1:803981987763:scalingPolicy:c322
761b-3172-4d56-9a21-0ed9d6161d67:autoScalingGroupName/my-test-asg:policyName/MyScaleDownPolicy</PolicyARN>
<AdjustmentType>{{ policy.adjustment_type }}</AdjustmentType>
<ScalingAdjustment>{{ policy.scaling_adjustment }}</ScalingAdjustment>
<PolicyName>{{ policy.name }}</PolicyName>
<PolicyType>{{ policy.policy_type }}</PolicyType>
<AutoScalingGroupName>{{ policy.as_name }}</AutoScalingGroupName>
<Cooldown>{{ policy.cooldown }}</Cooldown>
<Alarms/>
</member>
{% endfor %}
</ScalingPolicies>
</DescribePoliciesResult>
<ResponseMetadata>
<RequestId>ec3bffad-b739-11e2-b38d-15fbEXAMPLE</RequestId>
</ResponseMetadata>
</DescribePoliciesResponse>"""
SET_DESIRED_CAPACITY_TEMPLATE = """<SetDesiredCapacityResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/">
<ResponseMetadata>
<RequestId>9fb7e2db-6998-11e2-a985-57c82EXAMPLE</RequestId>
</ResponseMetadata>
</SetDesiredCapacityResponse>"""
EXECUTE_POLICY_TEMPLATE = """<ExecuteScalingPolicyResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/">
<ResponseMetadata>
<RequestId>70a76d42-9665-11e2-9fdf-211deEXAMPLE</RequestId>
</ResponseMetadata>
</ExecuteScalingPolicyResponse>"""
DELETE_POLICY_TEMPLATE = """<DeleteScalingPolicyResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/">
<ResponseMetadata>
<RequestId>70a76d42-9665-11e2-9fdf-211deEXAMPLE</RequestId>
</ResponseMetadata>
</DeleteScalingPolicyResponse>"""
ATTACH_LOAD_BALANCERS_TEMPLATE = """<AttachLoadBalancersResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/">
<AttachLoadBalancersResult></AttachLoadBalancersResult>
<ResponseMetadata>
<RequestId></RequestId>
</ResponseMetadata>
</AttachLoadBalancersResponse>"""
DESCRIBE_LOAD_BALANCERS_TEMPLATE = """<DescribeLoadBalancersResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/">
<DescribeLoadBalancersResult>
<LoadBalancers>
{% for load_balancer in load_balancers %}
<member>
<LoadBalancerName>{{ load_balancer }}</LoadBalancerName>
<State>Added</State>
</member>
{% endfor %}
</LoadBalancers>
</DescribeLoadBalancersResult>
<ResponseMetadata>
<RequestId></RequestId>
</ResponseMetadata>
</DescribeLoadBalancersResponse>"""
DETACH_LOAD_BALANCERS_TEMPLATE = """<DetachLoadBalancersResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/">
<DetachLoadBalancersResult></DetachLoadBalancersResult>
<ResponseMetadata>
<RequestId></RequestId>
</ResponseMetadata>
</DetachLoadBalancersResponse>"""
SUSPEND_PROCESSES_TEMPLATE = """<SuspendProcessesResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/">
<ResponseMetadata>
<RequestId>7c6e177f-f082-11e1-ac58-3714bEXAMPLE</RequestId>
</ResponseMetadata>
</SuspendProcessesResponse>"""
SET_INSTANCE_HEALTH_TEMPLATE = """<SetInstanceHealthResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/">
<SetInstanceHealthResponse></SetInstanceHealthResponse>
<ResponseMetadata>
<RequestId></RequestId>
</ResponseMetadata>
</SetInstanceHealthResponse>"""
SET_INSTANCE_PROTECTION_TEMPLATE = """<SetInstanceProtectionResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/">
<SetInstanceProtectionResult></SetInstanceProtectionResult>
<ResponseMetadata>
<RequestId></RequestId>
</ResponseMetadata>
</SetInstanceProtectionResponse>"""
|
PypiClean
|
/neural_compressor_full-2.1.1.tar.gz/neural_compressor_full-2.1.1/neural_compressor/strategy/bayesian.py
|
import copy
import warnings
import numpy as np
from scipy.optimize import minimize
from sklearn.gaussian_process.kernels import Matern
from sklearn.gaussian_process import GaussianProcessRegressor
from collections import OrderedDict
from copy import deepcopy
from ..utils import logger
from .strategy import strategy_registry, TuneStrategy
from .utils.tuning_sampler import OpWiseTuningSampler
from .utils.tuning_structs import OpTuningConfig
@strategy_registry
class BayesianTuneStrategy(TuneStrategy):
"""The Bayesian tuning strategy."""
def __init__(self, model, conf, q_dataloader, q_func=None, eval_dataloader=None,
eval_func=None, dicts=None, q_hooks=None):
"""Init the BaySian tuning strategy."""
super().__init__(model, conf, q_dataloader, q_func, eval_dataloader,
eval_func, dicts, q_hooks)
self.bayes_opt = None
def __getstate__(self):
"""Magic method for pickle saving.
Returns:
dict: Saved dict for resuming
"""
for history in self.tuning_history:
if self._same_yaml(history['cfg'], self.cfg):
history['bayes_opt'] = self.bayes_opt
save_dict = super().__getstate__()
return save_dict
def _params_to_tune_configs(self, params):
op_tuning_cfg = {}
calib_sampling_size_lst = self.tuning_space.root_item.get_option_by_name('calib_sampling_size').options
for op_name_type, configs in self.op_configs.items():
if len(configs) == 1:
op_tuning_cfg[op_name_type] = configs[0]
else:
op_tuning_cfg[op_name_type] = configs[min(len(configs) - 1, int(params[op_name_type[0]]))]
if len(calib_sampling_size_lst) > 1:
calib_sampling_size = calib_sampling_size_lst[min(len(configs) - 1, int(params['calib_sampling_size']))]
else:
calib_sampling_size = calib_sampling_size_lst[0]
op_tuning_cfg['calib_sampling_size'] = calib_sampling_size
return op_tuning_cfg
def next_tune_cfg(self):
"""Generate the next tuning config according to bayesian search algorithm.
This strategy comes from the Bayesian optimization package and changed it to a discrete version.
It uses Gaussian processes to define the prior/posterior distribution over the black-box
function with the tuning history and then finds the tuning configuration that maximizes
the expected improvement.
Returns:
tune_config (dict): A dict containing the tuning configuration for quantization.
"""
params = None
pbounds = {}
tuning_space = self.tuning_space
calib_sampling_size_lst = tuning_space.root_item.get_option_by_name('calib_sampling_size').options
op_item_dtype_dict, quant_mode_wise_items, initial_op_tuning_cfg = self.initial_tuning_cfg()
op_wise_pool = OpWiseTuningSampler(tuning_space, [], [],
op_item_dtype_dict, initial_op_tuning_cfg)
self.op_configs = op_wise_pool.get_opwise_candidate()
for op_name_type, configs in self.op_configs.items():
if len(configs) > 1:
pbounds[op_name_type[0]] = (0, len(configs))
if len(calib_sampling_size_lst) > 1:
pbounds['calib_sampling_size'] = (0, len(calib_sampling_size_lst))
if len(pbounds) == 0:
yield self._params_to_tune_configs(params)
return
if self.bayes_opt is None:
self.bayes_opt = BayesianOptimization(
pbounds=pbounds, random_seed=self.cfg.tuning.random_seed)
while True:
params = self.bayes_opt.gen_next_params()
logger.debug("Dump current bayesian params:")
logger.debug(params)
yield self._params_to_tune_configs(params)
try:
self.bayes_opt._space.register(params, self.last_tune_result[0])
except KeyError:
logger.debug("Find registered params, skip it.")
pass
# Util part
# Bayesian opt acq function
def acq_max(ac, gp, y_max, bounds, random_seed, n_warmup=10000, n_iter=10):
"""Find the maximum of the acquisition function parameters.
Args:
ac: The acquisition function object that return its point-wise value.
gp: A gaussian process fitted to the relevant data.
y_max: The current maximum known value of the target function.
bounds: The variables bounds to limit the search of the acq max.
random_seed: instance of np.RandomState random number generator
n_warmup: number of times to randomly sample the acquisition function
n_iter: number of times to run scipy.minimize
Returns:
x_max: The arg max of the acquisition function.
"""
# Warm up with random points
x_tries = np.random.uniform(bounds[:, 0], bounds[:, 1],
size=(n_warmup, bounds.shape[0]))
ys = ac(x_tries, gp=gp, y_max=y_max)
x_max = x_tries[ys.argmax()]
max_acq = ys.max()
# Explore the parameter space more thoroughly
x_seeds = np.random.uniform(bounds[:, 0], bounds[:, 1],
size=(n_iter, bounds.shape[0]))
for x_try in x_seeds:
# Find the minimum of minus the acquisition function
res = minimize(lambda x: -ac(x.reshape(1, -1), gp=gp, y_max=y_max),
x_try.reshape(1, -1),
bounds=bounds,
method="L-BFGS-B")
# See if success
if not res.success:
continue
if isinstance(res.fun, float):
res.fun = np.array([res.fun])
# Store it if better than previous minimum(maximum).
if max_acq is None or -res.fun[0] >= max_acq:
x_max = res.x
max_acq = -res.fun[0]
# Clip output to make sure it lies within the bounds. Due to floating
# point technicalities this is not always the case.
return np.clip(x_max, bounds[:, 0], bounds[:, 1])
def _hashable(x):
"""Ensure that an point is hashable by a python dict."""
return tuple(map(float, x))
# Target space part
class TargetSpace(object):
"""Holds the param-space coordinates (X) and target values (Y).
Allows for constant-time appends while ensuring no duplicates are added.
"""
def __init__(self, pbounds, random_seed=9527):
"""Construct a TargetSpace.
Args:
target_func (function): Function to be maximized.
pbounds (dict): Dictionary with parameters names as keys and a tuple with minimum and maximum values.
random_seed (int): Optionally specify a seed for a random number generator
"""
self.random_seed = random_seed
# Get the name of the parameters
names = list(pbounds.keys())
self._keys = deepcopy(names)
# Create an array with parameters bounds
self._bounds = np.array(
[pbounds[name] for name in names],
dtype=np.float32
)
# preallocated memory for X and Y points
self._params = np.empty(shape=(0, self.dim))
self._target = np.empty(shape=(0))
# keep track of unique points we have seen so far
self._cache = {}
def __contains__(self, x):
"""Check if param x is cached in this space."""
return _hashable(x) in self._cache
def __len__(self):
"""Get the total count of stored items."""
assert len(self._params) == len(self._target)
return len(self._target)
@property
def empty(self):
"""Check if the space is empty."""
return len(self) == 0
@property
def params(self):
"""Get all params stored in this space."""
return self._params
@property
def target(self):
"""Get all target values in this space."""
return self._target
@property
def dim(self):
"""Get the dimension of this space."""
return len(self._keys)
@property
def keys(self):
"""Get all keys of this space."""
return self._keys
@property
def bounds(self):
"""Get the bounds of this space."""
return self._bounds
def params_to_array(self, params):
"""Generate an array from params.
Args:
params (Dict): The dict contains keys in `self.keys`, and
corresponding param.
Returns:
np.array: An array contains all params.
"""
try:
assert set(params) == set(self.keys)
except AssertionError:
raise ValueError(
"Parameters' keys ({}) do ".format(list(params.keys())) +
"not match the expected set of keys ({}).".format(self.keys)
)
return np.asarray([params[key] for key in self.keys])
def array_to_params(self, x):
"""Generate an params' dict from array.
Args:
x (np.array): The array contains all params.
Returns:
dict: the dict contains keys and the params corresponding to it.
"""
try:
assert len(x) == len(self.keys)
except AssertionError:
raise ValueError(
"Size of array ({}) is different than the ".format(len(x)) +
"expected number of parameters ({}).".format(len(self.keys))
)
return dict(zip(self.keys, x))
def _as_array(self, x):
try:
x = np.asarray(x, dtype=float)
except TypeError:
x = self.params_to_array(x)
x = x.ravel()
try:
assert x.size == self.dim
except AssertionError:
raise ValueError(
"Size of array ({}) is different than the ".format(len(x)) +
"expected number of parameters ({}).".format(len(self.keys))
)
return x
def register(self, params, target):
"""Append a point and its target value to the known data.
Runs in amortized constant time.
Args:
params (ndarray): a single point, with len(params) == self.dim
target (float): target function value
Raises:
KeyError: if the point is not unique
"""
x = self._as_array(params)
if x in self:
raise KeyError('Params point {} is not unique'.format(x))
# Insert data into unique dictionary
self._cache[_hashable(x.ravel())] = target
self._params = np.concatenate([self._params, x.reshape(1, -1)])
self._target = np.concatenate([self._target, [target]])
def get_target(self, params):
"""Get the target value of params.
Args:
params (ndarray): a single point, with len(params) == self.dim
Returns:
target (float): target function value.
"""
x = self._as_array(params)
target = self._cache[_hashable(x)]
return target
def random_sample(self):
"""Create random points within the bounds of the space.
Returns:
data (ndarray): [num x dim] array points with dimensions corresponding to `self._keys`
"""
# TODO: support integer, category, and basic scipy.optimize constraints
data = np.empty((1, self.dim))
for col, (lower, upper) in enumerate(self._bounds):
data.T[col] = np.random.uniform( # pylint: disable=unsupported-assignment-operation
lower, upper, size=1)
return data.ravel()
def max(self):
"""Get maximum target value found and corresponding parametes."""
try:
res = {
'target': self.target.max(),
'params': dict(
zip(self.keys, self.params[self.target.argmax()])
)
}
except ValueError:
res = {}
return res
def res(self):
"""Get all target values found and corresponding parametes."""
params = [dict(zip(self.keys, p)) for p in self.params]
return [
{"target": target, "params": param}
for target, param in zip(self.target, params)
]
# Tuning part
class BayesianOptimization():
"""The class for bayesian optimization.
This class takes the parameters bounds in order to find which values for
the parameters yield the maximum value using bayesian optimization.
"""
def __init__(self, pbounds, random_seed=9527, verbose=2):
"""Init bayesian optimization.
Args:
pbounds (dict): Dictionary with parameters names as keys and a tuple with
minimum and maximum values.
random_seed (int, optional): The seed for random searching. Default to 9527.
verbose (int, optional): The level of verbosity. Default to 2.
"""
self._random_seed = random_seed
# Data structure containing the bounds of its domain,
# and a record of the points we have evaluated.
self._space = TargetSpace(pbounds, random_seed)
# Internal GP regressor
self._gp = GaussianProcessRegressor(
kernel=Matern(nu=2.5),
alpha=1e-6,
normalize_y=True,
n_restarts_optimizer=5,
random_state=self._random_seed,
)
self._verbose = verbose
@property
def space(self):
"""Get the target space."""
return self._space
@property
def max(self):
"""Get the maximum value of target space."""
return self._space.max()
@property
def res(self):
"""Get the minimum value of target space."""
return self._space.res()
@staticmethod
def _ucb(x, gp, y_max, kappa=2.576):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mean, std = gp.predict(x, return_std=True)
return mean + kappa * std
def suggest(self):
"""Suggest the most promising points."""
if len(set(self._space.target)) < 2:
return self._space.array_to_params(self._space.random_sample())
# Sklearn's GP throws a large number of warnings at times, but
# we don't really need to see them here.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self._gp.fit(self._space.params, self._space.target)
# Finding argmax of the acquisition function.
suggestion = acq_max(
ac=self._ucb,
gp=self._gp,
y_max=self._space.target.max(),
bounds=self._space.bounds,
random_seed=self._random_seed
)
return self._space.array_to_params(suggestion)
def gen_next_params(self):
"""Get the next parameter."""
next_params = self.suggest()
return next_params
|
PypiClean
|
/python_da_final_vkaul-0.0.1.tar.gz/python_da_final_vkaul-0.0.1/.eggs/pandas-1.2.2-py3.9-win-amd64.egg/pandas/core/reshape/merge.py
|
import copy
import datetime
from functools import partial
import hashlib
import string
from typing import TYPE_CHECKING, Optional, Tuple, cast
import warnings
import numpy as np
from pandas._libs import Timedelta, hashtable as libhashtable, join as libjoin, lib
from pandas._typing import ArrayLike, FrameOrSeries, FrameOrSeriesUnion
from pandas.errors import MergeError
from pandas.util._decorators import Appender, Substitution
from pandas.core.dtypes.common import (
ensure_float64,
ensure_int64,
ensure_object,
is_array_like,
is_bool,
is_bool_dtype,
is_categorical_dtype,
is_datetime64tz_dtype,
is_dtype_equal,
is_extension_array_dtype,
is_float_dtype,
is_integer,
is_integer_dtype,
is_list_like,
is_number,
is_numeric_dtype,
is_object_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries
from pandas.core.dtypes.missing import isna, na_value_for_dtype
from pandas import Categorical, Index, MultiIndex
from pandas.core import groupby
import pandas.core.algorithms as algos
import pandas.core.common as com
from pandas.core.construction import extract_array
from pandas.core.frame import _merge_doc
from pandas.core.internals import concatenate_block_managers
from pandas.core.sorting import is_int64_overflow_possible
if TYPE_CHECKING:
from pandas import DataFrame
from pandas.core.arrays import DatetimeArray
@Substitution("\nleft : DataFrame")
@Appender(_merge_doc, indents=0)
def merge(
left,
right,
how: str = "inner",
on=None,
left_on=None,
right_on=None,
left_index: bool = False,
right_index: bool = False,
sort: bool = False,
suffixes=("_x", "_y"),
copy: bool = True,
indicator: bool = False,
validate=None,
) -> "DataFrame":
op = _MergeOperation(
left,
right,
how=how,
on=on,
left_on=left_on,
right_on=right_on,
left_index=left_index,
right_index=right_index,
sort=sort,
suffixes=suffixes,
copy=copy,
indicator=indicator,
validate=validate,
)
return op.get_result()
if __debug__:
merge.__doc__ = _merge_doc % "\nleft : DataFrame"
def _groupby_and_merge(by, on, left: "DataFrame", right: "DataFrame", merge_pieces):
"""
groupby & merge; we are always performing a left-by type operation
Parameters
----------
by: field to group
on: duplicates field
left: DataFrame
right: DataFrame
merge_pieces: function for merging
"""
pieces = []
if not isinstance(by, (list, tuple)):
by = [by]
lby = left.groupby(by, sort=False)
rby: Optional[groupby.DataFrameGroupBy] = None
# if we can groupby the rhs
# then we can get vastly better perf
if all(item in right.columns for item in by):
rby = right.groupby(by, sort=False)
for key, lhs in lby:
if rby is None:
rhs = right
else:
try:
rhs = right.take(rby.indices[key])
except KeyError:
# key doesn't exist in left
lcols = lhs.columns.tolist()
cols = lcols + [r for r in right.columns if r not in set(lcols)]
merged = lhs.reindex(columns=cols)
merged.index = range(len(merged))
pieces.append(merged)
continue
merged = merge_pieces(lhs, rhs)
# make sure join keys are in the merged
# TODO, should merge_pieces do this?
merged[by] = key
pieces.append(merged)
# preserve the original order
# if we have a missing piece this can be reset
from pandas.core.reshape.concat import concat
result = concat(pieces, ignore_index=True)
result = result.reindex(columns=pieces[0].columns, copy=False)
return result, lby
def merge_ordered(
left,
right,
on=None,
left_on=None,
right_on=None,
left_by=None,
right_by=None,
fill_method=None,
suffixes=("_x", "_y"),
how: str = "outer",
) -> "DataFrame":
"""
Perform merge with optional filling/interpolation.
Designed for ordered data like time series data. Optionally
perform group-wise merge (see examples).
Parameters
----------
left : DataFrame
right : DataFrame
on : label or list
Field names to join on. Must be found in both DataFrames.
left_on : label or list, or array-like
Field names to join on in left DataFrame. Can be a vector or list of
vectors of the length of the DataFrame to use a particular vector as
the join key instead of columns.
right_on : label or list, or array-like
Field names to join on in right DataFrame or vector/list of vectors per
left_on docs.
left_by : column name or list of column names
Group left DataFrame by group columns and merge piece by piece with
right DataFrame.
right_by : column name or list of column names
Group right DataFrame by group columns and merge piece by piece with
left DataFrame.
fill_method : {'ffill', None}, default None
Interpolation method for data.
suffixes : list-like, default is ("_x", "_y")
A length-2 sequence where each element is optionally a string
indicating the suffix to add to overlapping column names in
`left` and `right` respectively. Pass a value of `None` instead
of a string to indicate that the column name from `left` or
`right` should be left as-is, with no suffix. At least one of the
values must not be None.
.. versionchanged:: 0.25.0
how : {'left', 'right', 'outer', 'inner'}, default 'outer'
* left: use only keys from left frame (SQL: left outer join)
* right: use only keys from right frame (SQL: right outer join)
* outer: use union of keys from both frames (SQL: full outer join)
* inner: use intersection of keys from both frames (SQL: inner join).
Returns
-------
DataFrame
The merged DataFrame output type will the be same as
'left', if it is a subclass of DataFrame.
See Also
--------
merge : Merge with a database-style join.
merge_asof : Merge on nearest keys.
Examples
--------
>>> df1 = pd.DataFrame(
... {
... "key": ["a", "c", "e", "a", "c", "e"],
... "lvalue": [1, 2, 3, 1, 2, 3],
... "group": ["a", "a", "a", "b", "b", "b"]
... }
... )
>>> df1
key lvalue group
0 a 1 a
1 c 2 a
2 e 3 a
3 a 1 b
4 c 2 b
5 e 3 b
>>> df2 = pd.DataFrame({"key": ["b", "c", "d"], "rvalue": [1, 2, 3]})
>>> df2
key rvalue
0 b 1
1 c 2
2 d 3
>>> merge_ordered(df1, df2, fill_method="ffill", left_by="group")
key lvalue group rvalue
0 a 1 a NaN
1 b 1 a 1.0
2 c 2 a 2.0
3 d 2 a 3.0
4 e 3 a 3.0
5 a 1 b NaN
6 b 1 b 1.0
7 c 2 b 2.0
8 d 2 b 3.0
9 e 3 b 3.0
"""
def _merger(x, y):
# perform the ordered merge operation
op = _OrderedMerge(
x,
y,
on=on,
left_on=left_on,
right_on=right_on,
suffixes=suffixes,
fill_method=fill_method,
how=how,
)
return op.get_result()
if left_by is not None and right_by is not None:
raise ValueError("Can only group either left or right frames")
elif left_by is not None:
if isinstance(left_by, str):
left_by = [left_by]
check = set(left_by).difference(left.columns)
if len(check) != 0:
raise KeyError(f"{check} not found in left columns")
result, _ = _groupby_and_merge(
left_by, on, left, right, lambda x, y: _merger(x, y)
)
elif right_by is not None:
if isinstance(right_by, str):
right_by = [right_by]
check = set(right_by).difference(right.columns)
if len(check) != 0:
raise KeyError(f"{check} not found in right columns")
result, _ = _groupby_and_merge(
right_by, on, right, left, lambda x, y: _merger(y, x)
)
else:
result = _merger(left, right)
return result
def merge_asof(
left,
right,
on=None,
left_on=None,
right_on=None,
left_index: bool = False,
right_index: bool = False,
by=None,
left_by=None,
right_by=None,
suffixes=("_x", "_y"),
tolerance=None,
allow_exact_matches: bool = True,
direction: str = "backward",
) -> "DataFrame":
"""
Perform an asof merge.
This is similar to a left-join except that we match on nearest
key rather than equal keys. Both DataFrames must be sorted by the key.
For each row in the left DataFrame:
- A "backward" search selects the last row in the right DataFrame whose
'on' key is less than or equal to the left's key.
- A "forward" search selects the first row in the right DataFrame whose
'on' key is greater than or equal to the left's key.
- A "nearest" search selects the row in the right DataFrame whose 'on'
key is closest in absolute distance to the left's key.
The default is "backward" and is compatible in versions below 0.20.0.
The direction parameter was added in version 0.20.0 and introduces
"forward" and "nearest".
Optionally match on equivalent keys with 'by' before searching with 'on'.
Parameters
----------
left : DataFrame
right : DataFrame
on : label
Field name to join on. Must be found in both DataFrames.
The data MUST be ordered. Furthermore this must be a numeric column,
such as datetimelike, integer, or float. On or left_on/right_on
must be given.
left_on : label
Field name to join on in left DataFrame.
right_on : label
Field name to join on in right DataFrame.
left_index : bool
Use the index of the left DataFrame as the join key.
right_index : bool
Use the index of the right DataFrame as the join key.
by : column name or list of column names
Match on these columns before performing merge operation.
left_by : column name
Field names to match on in the left DataFrame.
right_by : column name
Field names to match on in the right DataFrame.
suffixes : 2-length sequence (tuple, list, ...)
Suffix to apply to overlapping column names in the left and right
side, respectively.
tolerance : int or Timedelta, optional, default None
Select asof tolerance within this range; must be compatible
with the merge index.
allow_exact_matches : bool, default True
- If True, allow matching with the same 'on' value
(i.e. less-than-or-equal-to / greater-than-or-equal-to)
- If False, don't match the same 'on' value
(i.e., strictly less-than / strictly greater-than).
direction : 'backward' (default), 'forward', or 'nearest'
Whether to search for prior, subsequent, or closest matches.
Returns
-------
merged : DataFrame
See Also
--------
merge : Merge with a database-style join.
merge_ordered : Merge with optional filling/interpolation.
Examples
--------
>>> left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
>>> left
a left_val
0 1 a
1 5 b
2 10 c
>>> right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]})
>>> right
a right_val
0 1 1
1 2 2
2 3 3
3 6 6
4 7 7
>>> pd.merge_asof(left, right, on="a")
a left_val right_val
0 1 a 1
1 5 b 3
2 10 c 7
>>> pd.merge_asof(left, right, on="a", allow_exact_matches=False)
a left_val right_val
0 1 a NaN
1 5 b 3.0
2 10 c 7.0
>>> pd.merge_asof(left, right, on="a", direction="forward")
a left_val right_val
0 1 a 1.0
1 5 b 6.0
2 10 c NaN
>>> pd.merge_asof(left, right, on="a", direction="nearest")
a left_val right_val
0 1 a 1
1 5 b 6
2 10 c 7
We can use indexed DataFrames as well.
>>> left = pd.DataFrame({"left_val": ["a", "b", "c"]}, index=[1, 5, 10])
>>> left
left_val
1 a
5 b
10 c
>>> right = pd.DataFrame({"right_val": [1, 2, 3, 6, 7]}, index=[1, 2, 3, 6, 7])
>>> right
right_val
1 1
2 2
3 3
6 6
7 7
>>> pd.merge_asof(left, right, left_index=True, right_index=True)
left_val right_val
1 a 1
5 b 3
10 c 7
Here is a real-world times-series example
>>> quotes = pd.DataFrame(
... {
... "time": [
... pd.Timestamp("2016-05-25 13:30:00.023"),
... pd.Timestamp("2016-05-25 13:30:00.023"),
... pd.Timestamp("2016-05-25 13:30:00.030"),
... pd.Timestamp("2016-05-25 13:30:00.041"),
... pd.Timestamp("2016-05-25 13:30:00.048"),
... pd.Timestamp("2016-05-25 13:30:00.049"),
... pd.Timestamp("2016-05-25 13:30:00.072"),
... pd.Timestamp("2016-05-25 13:30:00.075")
... ],
... "ticker": [
... "GOOG",
... "MSFT",
... "MSFT",
... "MSFT",
... "GOOG",
... "AAPL",
... "GOOG",
... "MSFT"
... ],
... "bid": [720.50, 51.95, 51.97, 51.99, 720.50, 97.99, 720.50, 52.01],
... "ask": [720.93, 51.96, 51.98, 52.00, 720.93, 98.01, 720.88, 52.03]
... }
... )
>>> quotes
time ticker bid ask
0 2016-05-25 13:30:00.023 GOOG 720.50 720.93
1 2016-05-25 13:30:00.023 MSFT 51.95 51.96
2 2016-05-25 13:30:00.030 MSFT 51.97 51.98
3 2016-05-25 13:30:00.041 MSFT 51.99 52.00
4 2016-05-25 13:30:00.048 GOOG 720.50 720.93
5 2016-05-25 13:30:00.049 AAPL 97.99 98.01
6 2016-05-25 13:30:00.072 GOOG 720.50 720.88
7 2016-05-25 13:30:00.075 MSFT 52.01 52.03
>>> trades = pd.DataFrame(
... {
... "time": [
... pd.Timestamp("2016-05-25 13:30:00.023"),
... pd.Timestamp("2016-05-25 13:30:00.038"),
... pd.Timestamp("2016-05-25 13:30:00.048"),
... pd.Timestamp("2016-05-25 13:30:00.048"),
... pd.Timestamp("2016-05-25 13:30:00.048")
... ],
... "ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
... "price": [51.95, 51.95, 720.77, 720.92, 98.0],
... "quantity": [75, 155, 100, 100, 100]
... }
... )
>>> trades
time ticker price quantity
0 2016-05-25 13:30:00.023 MSFT 51.95 75
1 2016-05-25 13:30:00.038 MSFT 51.95 155
2 2016-05-25 13:30:00.048 GOOG 720.77 100
3 2016-05-25 13:30:00.048 GOOG 720.92 100
4 2016-05-25 13:30:00.048 AAPL 98.00 100
By default we are taking the asof of the quotes
>>> pd.merge_asof(trades, quotes, on="time", by="ticker")
time ticker price quantity bid ask
0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96
1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98
2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93
3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93
4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN
We only asof within 2ms between the quote time and the trade time
>>> pd.merge_asof(
... trades, quotes, on="time", by="ticker", tolerance=pd.Timedelta("2ms")
... )
time ticker price quantity bid ask
0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96
1 2016-05-25 13:30:00.038 MSFT 51.95 155 NaN NaN
2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93
3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93
4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN
We only asof within 10ms between the quote time and the trade time
and we exclude exact matches on time. However *prior* data will
propagate forward
>>> pd.merge_asof(
... trades,
... quotes,
... on="time",
... by="ticker",
... tolerance=pd.Timedelta("10ms"),
... allow_exact_matches=False
... )
time ticker price quantity bid ask
0 2016-05-25 13:30:00.023 MSFT 51.95 75 NaN NaN
1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98
2 2016-05-25 13:30:00.048 GOOG 720.77 100 NaN NaN
3 2016-05-25 13:30:00.048 GOOG 720.92 100 NaN NaN
4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN
"""
op = _AsOfMerge(
left,
right,
on=on,
left_on=left_on,
right_on=right_on,
left_index=left_index,
right_index=right_index,
by=by,
left_by=left_by,
right_by=right_by,
suffixes=suffixes,
how="asof",
tolerance=tolerance,
allow_exact_matches=allow_exact_matches,
direction=direction,
)
return op.get_result()
# TODO: transformations??
# TODO: only copy DataFrames when modification necessary
class _MergeOperation:
"""
Perform a database (SQL) merge operation between two DataFrame or Series
objects using either columns as keys or their row indexes
"""
_merge_type = "merge"
def __init__(
self,
left: FrameOrSeriesUnion,
right: FrameOrSeriesUnion,
how: str = "inner",
on=None,
left_on=None,
right_on=None,
axis=1,
left_index: bool = False,
right_index: bool = False,
sort: bool = True,
suffixes=("_x", "_y"),
copy: bool = True,
indicator: bool = False,
validate=None,
):
_left = _validate_operand(left)
_right = _validate_operand(right)
self.left = self.orig_left = _left
self.right = self.orig_right = _right
self.how = how
# bm_axis -> the axis on the BlockManager
self.bm_axis = axis
# axis --> the axis on the Series/DataFrame
self.axis = 1 - axis if self.left.ndim == 2 else 0
self.on = com.maybe_make_list(on)
self.left_on = com.maybe_make_list(left_on)
self.right_on = com.maybe_make_list(right_on)
self.copy = copy
self.suffixes = suffixes
self.sort = sort
self.left_index = left_index
self.right_index = right_index
self.indicator = indicator
self.indicator_name: Optional[str]
if isinstance(self.indicator, str):
self.indicator_name = self.indicator
elif isinstance(self.indicator, bool):
self.indicator_name = "_merge" if self.indicator else None
else:
raise ValueError(
"indicator option can only accept boolean or string arguments"
)
if not is_bool(left_index):
raise ValueError(
f"left_index parameter must be of type bool, not {type(left_index)}"
)
if not is_bool(right_index):
raise ValueError(
f"right_index parameter must be of type bool, not {type(right_index)}"
)
# warn user when merging between different levels
if _left.columns.nlevels != _right.columns.nlevels:
msg = (
"merging between different levels can give an unintended "
f"result ({left.columns.nlevels} levels on the left,"
f"{right.columns.nlevels} on the right)"
)
warnings.warn(msg, UserWarning)
self._validate_specification()
cross_col = None
if self.how == "cross":
(
self.left,
self.right,
self.how,
cross_col,
) = self._create_cross_configuration(self.left, self.right)
self.left_on = self.right_on = [cross_col]
self._cross = cross_col
# note this function has side effects
(
self.left_join_keys,
self.right_join_keys,
self.join_names,
) = self._get_merge_keys()
# validate the merge keys dtypes. We may need to coerce
# to avoid incompatible dtypes
self._maybe_coerce_merge_keys()
# If argument passed to validate,
# check if columns specified as unique
# are in fact unique.
if validate is not None:
self._validate(validate)
def get_result(self):
if self.indicator:
self.left, self.right = self._indicator_pre_merge(self.left, self.right)
join_index, left_indexer, right_indexer = self._get_join_info()
llabels, rlabels = _items_overlap_with_suffix(
self.left._info_axis, self.right._info_axis, self.suffixes
)
lindexers = {1: left_indexer} if left_indexer is not None else {}
rindexers = {1: right_indexer} if right_indexer is not None else {}
result_data = concatenate_block_managers(
[(self.left._mgr, lindexers), (self.right._mgr, rindexers)],
axes=[llabels.append(rlabels), join_index],
concat_axis=0,
copy=self.copy,
)
typ = self.left._constructor
result = typ(result_data).__finalize__(self, method=self._merge_type)
if self.indicator:
result = self._indicator_post_merge(result)
self._maybe_add_join_keys(result, left_indexer, right_indexer)
self._maybe_restore_index_levels(result)
self._maybe_drop_cross_column(result, self._cross)
return result.__finalize__(self, method="merge")
def _maybe_drop_cross_column(self, result: "DataFrame", cross_col: Optional[str]):
if cross_col is not None:
result.drop(columns=cross_col, inplace=True)
def _indicator_pre_merge(
self, left: "DataFrame", right: "DataFrame"
) -> Tuple["DataFrame", "DataFrame"]:
columns = left.columns.union(right.columns)
for i in ["_left_indicator", "_right_indicator"]:
if i in columns:
raise ValueError(
"Cannot use `indicator=True` option when "
f"data contains a column named {i}"
)
if self.indicator_name in columns:
raise ValueError(
"Cannot use name of an existing column for indicator column"
)
left = left.copy()
right = right.copy()
left["_left_indicator"] = 1
left["_left_indicator"] = left["_left_indicator"].astype("int8")
right["_right_indicator"] = 2
right["_right_indicator"] = right["_right_indicator"].astype("int8")
return left, right
def _indicator_post_merge(self, result):
result["_left_indicator"] = result["_left_indicator"].fillna(0)
result["_right_indicator"] = result["_right_indicator"].fillna(0)
result[self.indicator_name] = Categorical(
(result["_left_indicator"] + result["_right_indicator"]),
categories=[1, 2, 3],
)
result[self.indicator_name] = result[self.indicator_name].cat.rename_categories(
["left_only", "right_only", "both"]
)
result = result.drop(labels=["_left_indicator", "_right_indicator"], axis=1)
return result
def _maybe_restore_index_levels(self, result):
"""
Restore index levels specified as `on` parameters
Here we check for cases where `self.left_on` and `self.right_on` pairs
each reference an index level in their respective DataFrames. The
joined columns corresponding to these pairs are then restored to the
index of `result`.
**Note:** This method has side effects. It modifies `result` in-place
Parameters
----------
result: DataFrame
merge result
Returns
-------
None
"""
names_to_restore = []
for name, left_key, right_key in zip(
self.join_names, self.left_on, self.right_on
):
if (
self.orig_left._is_level_reference(left_key)
and self.orig_right._is_level_reference(right_key)
and name not in result.index.names
):
names_to_restore.append(name)
if names_to_restore:
result.set_index(names_to_restore, inplace=True)
def _maybe_add_join_keys(self, result, left_indexer, right_indexer):
left_has_missing = None
right_has_missing = None
keys = zip(self.join_names, self.left_on, self.right_on)
for i, (name, lname, rname) in enumerate(keys):
if not _should_fill(lname, rname):
continue
take_left, take_right = None, None
if name in result:
if left_indexer is not None and right_indexer is not None:
if name in self.left:
if left_has_missing is None:
left_has_missing = (left_indexer == -1).any()
if left_has_missing:
take_right = self.right_join_keys[i]
if not is_dtype_equal(
result[name].dtype, self.left[name].dtype
):
take_left = self.left[name]._values
elif name in self.right:
if right_has_missing is None:
right_has_missing = (right_indexer == -1).any()
if right_has_missing:
take_left = self.left_join_keys[i]
if not is_dtype_equal(
result[name].dtype, self.right[name].dtype
):
take_right = self.right[name]._values
elif left_indexer is not None and is_array_like(self.left_join_keys[i]):
take_left = self.left_join_keys[i]
take_right = self.right_join_keys[i]
if take_left is not None or take_right is not None:
if take_left is None:
lvals = result[name]._values
else:
lfill = na_value_for_dtype(take_left.dtype)
lvals = algos.take_1d(take_left, left_indexer, fill_value=lfill)
if take_right is None:
rvals = result[name]._values
else:
rfill = na_value_for_dtype(take_right.dtype)
rvals = algos.take_1d(take_right, right_indexer, fill_value=rfill)
# if we have an all missing left_indexer
# make sure to just use the right values or vice-versa
mask_left = left_indexer == -1
mask_right = right_indexer == -1
if mask_left.all():
key_col = rvals
elif right_indexer is not None and mask_right.all():
key_col = lvals
else:
key_col = Index(lvals).where(~mask_left, rvals)
if result._is_label_reference(name):
result[name] = key_col
elif result._is_level_reference(name):
if isinstance(result.index, MultiIndex):
key_col.name = name
idx_list = [
result.index.get_level_values(level_name)
if level_name != name
else key_col
for level_name in result.index.names
]
result.set_index(idx_list, inplace=True)
else:
result.index = Index(key_col, name=name)
else:
result.insert(i, name or f"key_{i}", key_col)
def _get_join_indexers(self):
""" return the join indexers """
return get_join_indexers(
self.left_join_keys, self.right_join_keys, sort=self.sort, how=self.how
)
def _get_join_info(self):
left_ax = self.left.axes[self.axis]
right_ax = self.right.axes[self.axis]
if self.left_index and self.right_index and self.how != "asof":
join_index, left_indexer, right_indexer = left_ax.join(
right_ax, how=self.how, return_indexers=True, sort=self.sort
)
elif self.right_index and self.how == "left":
join_index, left_indexer, right_indexer = _left_join_on_index(
left_ax, right_ax, self.left_join_keys, sort=self.sort
)
elif self.left_index and self.how == "right":
join_index, right_indexer, left_indexer = _left_join_on_index(
right_ax, left_ax, self.right_join_keys, sort=self.sort
)
else:
(left_indexer, right_indexer) = self._get_join_indexers()
if self.right_index:
if len(self.left) > 0:
join_index = self._create_join_index(
self.left.index,
self.right.index,
left_indexer,
right_indexer,
how="right",
)
else:
join_index = self.right.index.take(right_indexer)
left_indexer = np.array([-1] * len(join_index))
elif self.left_index:
if len(self.right) > 0:
join_index = self._create_join_index(
self.right.index,
self.left.index,
right_indexer,
left_indexer,
how="left",
)
else:
join_index = self.left.index.take(left_indexer)
right_indexer = np.array([-1] * len(join_index))
else:
join_index = Index(np.arange(len(left_indexer)))
if len(join_index) == 0:
join_index = join_index.astype(object)
return join_index, left_indexer, right_indexer
def _create_join_index(
self,
index: Index,
other_index: Index,
indexer,
other_indexer,
how: str = "left",
):
"""
Create a join index by rearranging one index to match another
Parameters
----------
index: Index being rearranged
other_index: Index used to supply values not found in index
indexer: how to rearrange index
how: replacement is only necessary if indexer based on other_index
Returns
-------
join_index
"""
if self.how in (how, "outer") and not isinstance(other_index, MultiIndex):
# if final index requires values in other_index but not target
# index, indexer may hold missing (-1) values, causing Index.take
# to take the final value in target index. So, we set the last
# element to be the desired fill value. We do not use allow_fill
# and fill_value because it throws a ValueError on integer indices
mask = indexer == -1
if np.any(mask):
fill_value = na_value_for_dtype(index.dtype, compat=False)
index = index.append(Index([fill_value]))
return index.take(indexer)
def _get_merge_keys(self):
"""
Note: has side effects (copy/delete key columns)
Parameters
----------
left
right
on
Returns
-------
left_keys, right_keys
"""
left_keys = []
right_keys = []
# pandas\core\reshape\merge.py:966: error: Need type annotation for
# 'join_names' (hint: "join_names: List[<type>] = ...")
# [var-annotated]
join_names = [] # type: ignore[var-annotated]
right_drop = []
left_drop = []
left, right = self.left, self.right
is_lkey = lambda x: is_array_like(x) and len(x) == len(left)
is_rkey = lambda x: is_array_like(x) and len(x) == len(right)
# Note that pd.merge_asof() has separate 'on' and 'by' parameters. A
# user could, for example, request 'left_index' and 'left_by'. In a
# regular pd.merge(), users cannot specify both 'left_index' and
# 'left_on'. (Instead, users have a MultiIndex). That means the
# self.left_on in this function is always empty in a pd.merge(), but
# a pd.merge_asof(left_index=True, left_by=...) will result in a
# self.left_on array with a None in the middle of it. This requires
# a work-around as designated in the code below.
# See _validate_specification() for where this happens.
# ugh, spaghetti re #733
if _any(self.left_on) and _any(self.right_on):
for lk, rk in zip(self.left_on, self.right_on):
if is_lkey(lk):
left_keys.append(lk)
if is_rkey(rk):
right_keys.append(rk)
join_names.append(None) # what to do?
else:
if rk is not None:
right_keys.append(right._get_label_or_level_values(rk))
join_names.append(rk)
else:
# work-around for merge_asof(right_index=True)
right_keys.append(right.index)
join_names.append(right.index.name)
else:
if not is_rkey(rk):
if rk is not None:
right_keys.append(right._get_label_or_level_values(rk))
else:
# work-around for merge_asof(right_index=True)
right_keys.append(right.index)
if lk is not None and lk == rk:
# avoid key upcast in corner case (length-0)
if len(left) > 0:
right_drop.append(rk)
else:
left_drop.append(lk)
else:
right_keys.append(rk)
if lk is not None:
left_keys.append(left._get_label_or_level_values(lk))
join_names.append(lk)
else:
# work-around for merge_asof(left_index=True)
left_keys.append(left.index)
join_names.append(left.index.name)
elif _any(self.left_on):
for k in self.left_on:
if is_lkey(k):
left_keys.append(k)
join_names.append(None)
else:
left_keys.append(left._get_label_or_level_values(k))
join_names.append(k)
if isinstance(self.right.index, MultiIndex):
right_keys = [
lev._values.take(lev_codes)
for lev, lev_codes in zip(
self.right.index.levels, self.right.index.codes
)
]
else:
right_keys = [self.right.index._values]
elif _any(self.right_on):
for k in self.right_on:
if is_rkey(k):
right_keys.append(k)
join_names.append(None)
else:
right_keys.append(right._get_label_or_level_values(k))
join_names.append(k)
if isinstance(self.left.index, MultiIndex):
left_keys = [
lev._values.take(lev_codes)
for lev, lev_codes in zip(
self.left.index.levels, self.left.index.codes
)
]
else:
left_keys = [self.left.index._values]
if left_drop:
self.left = self.left._drop_labels_or_levels(left_drop)
if right_drop:
self.right = self.right._drop_labels_or_levels(right_drop)
return left_keys, right_keys, join_names
def _maybe_coerce_merge_keys(self):
# we have valid merges but we may have to further
# coerce these if they are originally incompatible types
#
# for example if these are categorical, but are not dtype_equal
# or if we have object and integer dtypes
for lk, rk, name in zip(
self.left_join_keys, self.right_join_keys, self.join_names
):
if (len(lk) and not len(rk)) or (not len(lk) and len(rk)):
continue
lk_is_cat = is_categorical_dtype(lk.dtype)
rk_is_cat = is_categorical_dtype(rk.dtype)
lk_is_object = is_object_dtype(lk.dtype)
rk_is_object = is_object_dtype(rk.dtype)
# if either left or right is a categorical
# then the must match exactly in categories & ordered
if lk_is_cat and rk_is_cat:
if lk._categories_match_up_to_permutation(rk):
continue
elif lk_is_cat or rk_is_cat:
pass
elif is_dtype_equal(lk.dtype, rk.dtype):
continue
msg = (
f"You are trying to merge on {lk.dtype} and "
f"{rk.dtype} columns. If you wish to proceed you should use pd.concat"
)
# if we are numeric, then allow differing
# kinds to proceed, eg. int64 and int8, int and float
# further if we are object, but we infer to
# the same, then proceed
if is_numeric_dtype(lk.dtype) and is_numeric_dtype(rk.dtype):
if lk.dtype.kind == rk.dtype.kind:
continue
# check whether ints and floats
elif is_integer_dtype(rk.dtype) and is_float_dtype(lk.dtype):
if not (lk == lk.astype(rk.dtype))[~np.isnan(lk)].all():
warnings.warn(
"You are merging on int and float "
"columns where the float values "
"are not equal to their int representation",
UserWarning,
)
continue
elif is_float_dtype(rk.dtype) and is_integer_dtype(lk.dtype):
if not (rk == rk.astype(lk.dtype))[~np.isnan(rk)].all():
warnings.warn(
"You are merging on int and float "
"columns where the float values "
"are not equal to their int representation",
UserWarning,
)
continue
# let's infer and see if we are ok
elif lib.infer_dtype(lk, skipna=False) == lib.infer_dtype(
rk, skipna=False
):
continue
# Check if we are trying to merge on obviously
# incompatible dtypes GH 9780, GH 15800
# bool values are coerced to object
elif (lk_is_object and is_bool_dtype(rk.dtype)) or (
is_bool_dtype(lk.dtype) and rk_is_object
):
pass
# object values are allowed to be merged
elif (lk_is_object and is_numeric_dtype(rk.dtype)) or (
is_numeric_dtype(lk.dtype) and rk_is_object
):
inferred_left = lib.infer_dtype(lk, skipna=False)
inferred_right = lib.infer_dtype(rk, skipna=False)
bool_types = ["integer", "mixed-integer", "boolean", "empty"]
string_types = ["string", "unicode", "mixed", "bytes", "empty"]
# inferred bool
if inferred_left in bool_types and inferred_right in bool_types:
pass
# unless we are merging non-string-like with string-like
elif (
inferred_left in string_types and inferred_right not in string_types
) or (
inferred_right in string_types and inferred_left not in string_types
):
raise ValueError(msg)
# datetimelikes must match exactly
elif needs_i8_conversion(lk.dtype) and not needs_i8_conversion(rk.dtype):
raise ValueError(msg)
elif not needs_i8_conversion(lk.dtype) and needs_i8_conversion(rk.dtype):
raise ValueError(msg)
elif is_datetime64tz_dtype(lk.dtype) and not is_datetime64tz_dtype(
rk.dtype
):
raise ValueError(msg)
elif not is_datetime64tz_dtype(lk.dtype) and is_datetime64tz_dtype(
rk.dtype
):
raise ValueError(msg)
elif lk_is_object and rk_is_object:
continue
# Houston, we have a problem!
# let's coerce to object if the dtypes aren't
# categorical, otherwise coerce to the category
# dtype. If we coerced categories to object,
# then we would lose type information on some
# columns, and end up trying to merge
# incompatible dtypes. See GH 16900.
if name in self.left.columns:
typ = lk.categories.dtype if lk_is_cat else object
self.left = self.left.assign(**{name: self.left[name].astype(typ)})
if name in self.right.columns:
typ = rk.categories.dtype if rk_is_cat else object
self.right = self.right.assign(**{name: self.right[name].astype(typ)})
def _create_cross_configuration(
self, left, right
) -> Tuple["DataFrame", "DataFrame", str, str]:
"""
Creates the configuration to dispatch the cross operation to inner join,
e.g. adding a join column and resetting parameters. Join column is added
to a new object, no inplace modification
Parameters
----------
left: DataFrame
right DataFrame
Returns
-------
a tuple (left, right, how, cross_col) representing the adjusted
DataFrames with cross_col, the merge operation set to inner and the column
to join over.
"""
cross_col = f"_cross_{hashlib.md5().hexdigest()}"
how = "inner"
return (
left.assign(**{cross_col: 1}),
right.assign(**{cross_col: 1}),
how,
cross_col,
)
def _validate_specification(self):
if self.how == "cross":
if (
self.left_index
or self.right_index
or self.right_on is not None
or self.left_on is not None
or self.on is not None
):
raise MergeError(
"Can not pass on, right_on, left_on or set right_index=True or "
"left_index=True"
)
return
# Hm, any way to make this logic less complicated??
elif self.on is None and self.left_on is None and self.right_on is None:
if self.left_index and self.right_index:
self.left_on, self.right_on = (), ()
elif self.left_index:
raise MergeError("Must pass right_on or right_index=True")
elif self.right_index:
raise MergeError("Must pass left_on or left_index=True")
else:
# use the common columns
left_cols = self.left.columns
right_cols = self.right.columns
common_cols = left_cols.intersection(right_cols)
if len(common_cols) == 0:
raise MergeError(
"No common columns to perform merge on. "
f"Merge options: left_on={self.left_on}, "
f"right_on={self.right_on}, "
f"left_index={self.left_index}, "
f"right_index={self.right_index}"
)
if (
not left_cols.join(common_cols, how="inner").is_unique
or not right_cols.join(common_cols, how="inner").is_unique
):
raise MergeError(f"Data columns not unique: {repr(common_cols)}")
self.left_on = self.right_on = common_cols
elif self.on is not None:
if self.left_on is not None or self.right_on is not None:
raise MergeError(
'Can only pass argument "on" OR "left_on" '
'and "right_on", not a combination of both.'
)
if self.left_index or self.right_index:
raise MergeError(
'Can only pass argument "on" OR "left_index" '
'and "right_index", not a combination of both.'
)
self.left_on = self.right_on = self.on
elif self.left_on is not None:
if self.left_index:
raise MergeError(
'Can only pass argument "left_on" OR "left_index" not both.'
)
if not self.right_index and self.right_on is None:
raise MergeError('Must pass "right_on" OR "right_index".')
n = len(self.left_on)
if self.right_index:
if len(self.left_on) != self.right.index.nlevels:
raise ValueError(
"len(left_on) must equal the number "
'of levels in the index of "right"'
)
self.right_on = [None] * n
elif self.right_on is not None:
if self.right_index:
raise MergeError(
'Can only pass argument "right_on" OR "right_index" not both.'
)
if not self.left_index and self.left_on is None:
raise MergeError('Must pass "left_on" OR "left_index".')
n = len(self.right_on)
if self.left_index:
if len(self.right_on) != self.left.index.nlevels:
raise ValueError(
"len(right_on) must equal the number "
'of levels in the index of "left"'
)
self.left_on = [None] * n
if self.how != "cross" and len(self.right_on) != len(self.left_on):
raise ValueError("len(right_on) must equal len(left_on)")
def _validate(self, validate: str):
# Check uniqueness of each
if self.left_index:
left_unique = self.orig_left.index.is_unique
else:
left_unique = MultiIndex.from_arrays(self.left_join_keys).is_unique
if self.right_index:
right_unique = self.orig_right.index.is_unique
else:
right_unique = MultiIndex.from_arrays(self.right_join_keys).is_unique
# Check data integrity
if validate in ["one_to_one", "1:1"]:
if not left_unique and not right_unique:
raise MergeError(
"Merge keys are not unique in either left "
"or right dataset; not a one-to-one merge"
)
elif not left_unique:
raise MergeError(
"Merge keys are not unique in left dataset; not a one-to-one merge"
)
elif not right_unique:
raise MergeError(
"Merge keys are not unique in right dataset; not a one-to-one merge"
)
elif validate in ["one_to_many", "1:m"]:
if not left_unique:
raise MergeError(
"Merge keys are not unique in left dataset; not a one-to-many merge"
)
elif validate in ["many_to_one", "m:1"]:
if not right_unique:
raise MergeError(
"Merge keys are not unique in right dataset; "
"not a many-to-one merge"
)
elif validate in ["many_to_many", "m:m"]:
pass
else:
raise ValueError("Not a valid argument for validate")
def get_join_indexers(
left_keys, right_keys, sort: bool = False, how: str = "inner", **kwargs
):
"""
Parameters
----------
left_keys: ndarray, Index, Series
right_keys: ndarray, Index, Series
sort: bool, default False
how: string {'inner', 'outer', 'left', 'right'}, default 'inner'
Returns
-------
tuple of (left_indexer, right_indexer)
indexers into the left_keys, right_keys
"""
assert len(left_keys) == len(
right_keys
), "left_key and right_keys must be the same length"
# get left & right join labels and num. of levels at each location
mapped = (
_factorize_keys(left_keys[n], right_keys[n], sort=sort, how=how)
for n in range(len(left_keys))
)
zipped = zip(*mapped)
llab, rlab, shape = [list(x) for x in zipped]
# get flat i8 keys from label lists
lkey, rkey = _get_join_keys(llab, rlab, shape, sort)
# factorize keys to a dense i8 space
# `count` is the num. of unique keys
# set(lkey) | set(rkey) == range(count)
lkey, rkey, count = _factorize_keys(lkey, rkey, sort=sort, how=how)
# preserve left frame order if how == 'left' and sort == False
kwargs = copy.copy(kwargs)
if how in ("left", "right"):
kwargs["sort"] = sort
join_func = {
"inner": libjoin.inner_join,
"left": libjoin.left_outer_join,
"right": lambda x, y, count, **kwargs: libjoin.left_outer_join(
y, x, count, **kwargs
)[::-1],
"outer": libjoin.full_outer_join,
}[how]
return join_func(lkey, rkey, count, **kwargs)
def restore_dropped_levels_multijoin(
left: MultiIndex,
right: MultiIndex,
dropped_level_names,
join_index,
lindexer,
rindexer,
):
"""
*this is an internal non-public method*
Returns the levels, labels and names of a multi-index to multi-index join.
Depending on the type of join, this method restores the appropriate
dropped levels of the joined multi-index.
The method relies on lidx, rindexer which hold the index positions of
left and right, where a join was feasible
Parameters
----------
left : MultiIndex
left index
right : MultiIndex
right index
dropped_level_names : str array
list of non-common level names
join_index : MultiIndex
the index of the join between the
common levels of left and right
lindexer : intp array
left indexer
rindexer : intp array
right indexer
Returns
-------
levels : list of Index
levels of combined multiindexes
labels : intp array
labels of combined multiindexes
names : str array
names of combined multiindexes
"""
def _convert_to_multiindex(index) -> MultiIndex:
if isinstance(index, MultiIndex):
return index
else:
return MultiIndex.from_arrays([index._values], names=[index.name])
# For multi-multi joins with one overlapping level,
# the returned index if of type Index
# Assure that join_index is of type MultiIndex
# so that dropped levels can be appended
join_index = _convert_to_multiindex(join_index)
join_levels = join_index.levels
join_codes = join_index.codes
join_names = join_index.names
# lindexer and rindexer hold the indexes where the join occurred
# for left and right respectively. If left/right is None then
# the join occurred on all indices of left/right
if lindexer is None:
lindexer = range(left.size)
if rindexer is None:
rindexer = range(right.size)
# Iterate through the levels that must be restored
for dropped_level_name in dropped_level_names:
if dropped_level_name in left.names:
idx = left
indexer = lindexer
else:
idx = right
indexer = rindexer
# The index of the level name to be restored
name_idx = idx.names.index(dropped_level_name)
restore_levels = idx.levels[name_idx]
# Inject -1 in the codes list where a join was not possible
# IOW indexer[i]=-1
codes = idx.codes[name_idx]
restore_codes = algos.take_nd(codes, indexer, fill_value=-1)
join_levels = join_levels + [restore_levels]
join_codes = join_codes + [restore_codes]
join_names = join_names + [dropped_level_name]
return join_levels, join_codes, join_names
class _OrderedMerge(_MergeOperation):
_merge_type = "ordered_merge"
def __init__(
self,
left,
right,
on=None,
left_on=None,
right_on=None,
left_index: bool = False,
right_index: bool = False,
axis=1,
suffixes=("_x", "_y"),
copy: bool = True,
fill_method=None,
how: str = "outer",
):
self.fill_method = fill_method
_MergeOperation.__init__(
self,
left,
right,
on=on,
left_on=left_on,
left_index=left_index,
right_index=right_index,
right_on=right_on,
axis=axis,
how=how,
suffixes=suffixes,
sort=True, # factorize sorts
)
def get_result(self):
join_index, left_indexer, right_indexer = self._get_join_info()
llabels, rlabels = _items_overlap_with_suffix(
self.left._info_axis, self.right._info_axis, self.suffixes
)
if self.fill_method == "ffill":
left_join_indexer = libjoin.ffill_indexer(left_indexer)
right_join_indexer = libjoin.ffill_indexer(right_indexer)
else:
left_join_indexer = left_indexer
right_join_indexer = right_indexer
lindexers = {1: left_join_indexer} if left_join_indexer is not None else {}
rindexers = {1: right_join_indexer} if right_join_indexer is not None else {}
result_data = concatenate_block_managers(
[(self.left._mgr, lindexers), (self.right._mgr, rindexers)],
axes=[llabels.append(rlabels), join_index],
concat_axis=0,
copy=self.copy,
)
typ = self.left._constructor
result = typ(result_data)
self._maybe_add_join_keys(result, left_indexer, right_indexer)
return result
def _asof_function(direction: str):
name = f"asof_join_{direction}"
return getattr(libjoin, name, None)
def _asof_by_function(direction: str):
name = f"asof_join_{direction}_on_X_by_Y"
return getattr(libjoin, name, None)
_type_casters = {
"int64_t": ensure_int64,
"double": ensure_float64,
"object": ensure_object,
}
def _get_cython_type_upcast(dtype):
""" Upcast a dtype to 'int64_t', 'double', or 'object' """
if is_integer_dtype(dtype):
return "int64_t"
elif is_float_dtype(dtype):
return "double"
else:
return "object"
class _AsOfMerge(_OrderedMerge):
_merge_type = "asof_merge"
def __init__(
self,
left,
right,
on=None,
left_on=None,
right_on=None,
left_index: bool = False,
right_index: bool = False,
by=None,
left_by=None,
right_by=None,
axis=1,
suffixes=("_x", "_y"),
copy: bool = True,
fill_method=None,
how: str = "asof",
tolerance=None,
allow_exact_matches: bool = True,
direction: str = "backward",
):
self.by = by
self.left_by = left_by
self.right_by = right_by
self.tolerance = tolerance
self.allow_exact_matches = allow_exact_matches
self.direction = direction
_OrderedMerge.__init__(
self,
left,
right,
on=on,
left_on=left_on,
right_on=right_on,
left_index=left_index,
right_index=right_index,
axis=axis,
how=how,
suffixes=suffixes,
fill_method=fill_method,
)
def _validate_specification(self):
super()._validate_specification()
# we only allow on to be a single item for on
if len(self.left_on) != 1 and not self.left_index:
raise MergeError("can only asof on a key for left")
if len(self.right_on) != 1 and not self.right_index:
raise MergeError("can only asof on a key for right")
if self.left_index and isinstance(self.left.index, MultiIndex):
raise MergeError("left can only have one index")
if self.right_index and isinstance(self.right.index, MultiIndex):
raise MergeError("right can only have one index")
# set 'by' columns
if self.by is not None:
if self.left_by is not None or self.right_by is not None:
raise MergeError("Can only pass by OR left_by and right_by")
self.left_by = self.right_by = self.by
if self.left_by is None and self.right_by is not None:
raise MergeError("missing left_by")
if self.left_by is not None and self.right_by is None:
raise MergeError("missing right_by")
# add 'by' to our key-list so we can have it in the
# output as a key
if self.left_by is not None:
if not is_list_like(self.left_by):
self.left_by = [self.left_by]
if not is_list_like(self.right_by):
self.right_by = [self.right_by]
if len(self.left_by) != len(self.right_by):
raise MergeError("left_by and right_by must be same length")
self.left_on = self.left_by + list(self.left_on)
self.right_on = self.right_by + list(self.right_on)
# check 'direction' is valid
if self.direction not in ["backward", "forward", "nearest"]:
raise MergeError(f"direction invalid: {self.direction}")
def _get_merge_keys(self):
# note this function has side effects
(left_join_keys, right_join_keys, join_names) = super()._get_merge_keys()
# validate index types are the same
for i, (lk, rk) in enumerate(zip(left_join_keys, right_join_keys)):
if not is_dtype_equal(lk.dtype, rk.dtype):
if is_categorical_dtype(lk.dtype) and is_categorical_dtype(rk.dtype):
# The generic error message is confusing for categoricals.
#
# In this function, the join keys include both the original
# ones of the merge_asof() call, and also the keys passed
# to its by= argument. Unordered but equal categories
# are not supported for the former, but will fail
# later with a ValueError, so we don't *need* to check
# for them here.
msg = (
f"incompatible merge keys [{i}] {repr(lk.dtype)} and "
f"{repr(rk.dtype)}, both sides category, but not equal ones"
)
else:
msg = (
f"incompatible merge keys [{i}] {repr(lk.dtype)} and "
f"{repr(rk.dtype)}, must be the same type"
)
raise MergeError(msg)
# validate tolerance; datetime.timedelta or Timedelta if we have a DTI
if self.tolerance is not None:
if self.left_index:
lt = self.left.index
else:
lt = left_join_keys[-1]
msg = (
f"incompatible tolerance {self.tolerance}, must be compat "
f"with type {repr(lt.dtype)}"
)
if needs_i8_conversion(lt):
if not isinstance(self.tolerance, datetime.timedelta):
raise MergeError(msg)
if self.tolerance < Timedelta(0):
raise MergeError("tolerance must be positive")
elif is_integer_dtype(lt):
if not is_integer(self.tolerance):
raise MergeError(msg)
if self.tolerance < 0:
raise MergeError("tolerance must be positive")
elif is_float_dtype(lt):
if not is_number(self.tolerance):
raise MergeError(msg)
if self.tolerance < 0:
raise MergeError("tolerance must be positive")
else:
raise MergeError("key must be integer, timestamp or float")
# validate allow_exact_matches
if not is_bool(self.allow_exact_matches):
msg = (
"allow_exact_matches must be boolean, "
f"passed {self.allow_exact_matches}"
)
raise MergeError(msg)
return left_join_keys, right_join_keys, join_names
def _get_join_indexers(self):
""" return the join indexers """
def flip(xs) -> np.ndarray:
""" unlike np.transpose, this returns an array of tuples """
xs = [
x
if not is_extension_array_dtype(x)
else extract_array(x)._values_for_argsort()
for x in xs
]
labels = list(string.ascii_lowercase[: len(xs)])
dtypes = [x.dtype for x in xs]
labeled_dtypes = list(zip(labels, dtypes))
return np.array(list(zip(*xs)), labeled_dtypes)
# values to compare
left_values = (
self.left.index._values if self.left_index else self.left_join_keys[-1]
)
right_values = (
self.right.index._values if self.right_index else self.right_join_keys[-1]
)
tolerance = self.tolerance
# we require sortedness and non-null values in the join keys
if not Index(left_values).is_monotonic:
side = "left"
if isna(left_values).any():
raise ValueError(f"Merge keys contain null values on {side} side")
else:
raise ValueError(f"{side} keys must be sorted")
if not Index(right_values).is_monotonic:
side = "right"
if isna(right_values).any():
raise ValueError(f"Merge keys contain null values on {side} side")
else:
raise ValueError(f"{side} keys must be sorted")
# initial type conversion as needed
if needs_i8_conversion(left_values):
left_values = left_values.view("i8")
right_values = right_values.view("i8")
if tolerance is not None:
tolerance = Timedelta(tolerance)
tolerance = tolerance.value
# a "by" parameter requires special handling
if self.left_by is not None:
# remove 'on' parameter from values if one existed
if self.left_index and self.right_index:
left_by_values = self.left_join_keys
right_by_values = self.right_join_keys
else:
left_by_values = self.left_join_keys[0:-1]
right_by_values = self.right_join_keys[0:-1]
# get tuple representation of values if more than one
if len(left_by_values) == 1:
left_by_values = left_by_values[0]
right_by_values = right_by_values[0]
else:
left_by_values = flip(left_by_values)
right_by_values = flip(right_by_values)
# upcast 'by' parameter because HashTable is limited
by_type = _get_cython_type_upcast(left_by_values.dtype)
by_type_caster = _type_casters[by_type]
left_by_values = by_type_caster(left_by_values)
right_by_values = by_type_caster(right_by_values)
# choose appropriate function by type
func = _asof_by_function(self.direction)
return func(
left_values,
right_values,
left_by_values,
right_by_values,
self.allow_exact_matches,
tolerance,
)
else:
# choose appropriate function by type
func = _asof_function(self.direction)
return func(left_values, right_values, self.allow_exact_matches, tolerance)
def _get_multiindex_indexer(join_keys, index: MultiIndex, sort: bool):
# left & right join labels and num. of levels at each location
mapped = (
_factorize_keys(index.levels[n], join_keys[n], sort=sort)
for n in range(index.nlevels)
)
zipped = zip(*mapped)
rcodes, lcodes, shape = [list(x) for x in zipped]
if sort:
rcodes = list(map(np.take, rcodes, index.codes))
else:
i8copy = lambda a: a.astype("i8", subok=False, copy=True)
rcodes = list(map(i8copy, index.codes))
# fix right labels if there were any nulls
for i in range(len(join_keys)):
mask = index.codes[i] == -1
if mask.any():
# check if there already was any nulls at this location
# if there was, it is factorized to `shape[i] - 1`
a = join_keys[i][lcodes[i] == shape[i] - 1]
if a.size == 0 or not a[0] != a[0]:
shape[i] += 1
rcodes[i][mask] = shape[i] - 1
# get flat i8 join keys
lkey, rkey = _get_join_keys(lcodes, rcodes, shape, sort)
# factorize keys to a dense i8 space
lkey, rkey, count = _factorize_keys(lkey, rkey, sort=sort)
return libjoin.left_outer_join(lkey, rkey, count, sort=sort)
def _get_single_indexer(join_key, index, sort: bool = False):
left_key, right_key, count = _factorize_keys(join_key, index, sort=sort)
left_indexer, right_indexer = libjoin.left_outer_join(
ensure_int64(left_key), ensure_int64(right_key), count, sort=sort
)
return left_indexer, right_indexer
def _left_join_on_index(left_ax: Index, right_ax: Index, join_keys, sort: bool = False):
if len(join_keys) > 1:
if not (
isinstance(right_ax, MultiIndex) and len(join_keys) == right_ax.nlevels
):
raise AssertionError(
"If more than one join key is given then "
"'right_ax' must be a MultiIndex and the "
"number of join keys must be the number of levels in right_ax"
)
left_indexer, right_indexer = _get_multiindex_indexer(
join_keys, right_ax, sort=sort
)
else:
jkey = join_keys[0]
left_indexer, right_indexer = _get_single_indexer(jkey, right_ax, sort=sort)
if sort or len(left_ax) != len(left_indexer):
# if asked to sort or there are 1-to-many matches
join_index = left_ax.take(left_indexer)
return join_index, left_indexer, right_indexer
# left frame preserves order & length of its index
return left_ax, None, right_indexer
def _factorize_keys(
lk: ArrayLike, rk: ArrayLike, sort: bool = True, how: str = "inner"
) -> Tuple[np.ndarray, np.ndarray, int]:
"""
Encode left and right keys as enumerated types.
This is used to get the join indexers to be used when merging DataFrames.
Parameters
----------
lk : array-like
Left key.
rk : array-like
Right key.
sort : bool, defaults to True
If True, the encoding is done such that the unique elements in the
keys are sorted.
how : {‘left’, ‘right’, ‘outer’, ‘inner’}, default ‘inner’
Type of merge.
Returns
-------
array
Left (resp. right if called with `key='right'`) labels, as enumerated type.
array
Right (resp. left if called with `key='right'`) labels, as enumerated type.
int
Number of unique elements in union of left and right labels.
See Also
--------
merge : Merge DataFrame or named Series objects
with a database-style join.
algorithms.factorize : Encode the object as an enumerated type
or categorical variable.
Examples
--------
>>> lk = np.array(["a", "c", "b"])
>>> rk = np.array(["a", "c"])
Here, the unique values are `'a', 'b', 'c'`. With the default
`sort=True`, the encoding will be `{0: 'a', 1: 'b', 2: 'c'}`:
>>> pd.core.reshape.merge._factorize_keys(lk, rk)
(array([0, 2, 1]), array([0, 2]), 3)
With the `sort=False`, the encoding will correspond to the order
in which the unique elements first appear: `{0: 'a', 1: 'c', 2: 'b'}`:
>>> pd.core.reshape.merge._factorize_keys(lk, rk, sort=False)
(array([0, 1, 2]), array([0, 1]), 3)
"""
# Some pre-processing for non-ndarray lk / rk
lk = extract_array(lk, extract_numpy=True)
rk = extract_array(rk, extract_numpy=True)
if is_datetime64tz_dtype(lk.dtype) and is_datetime64tz_dtype(rk.dtype):
# Extract the ndarray (UTC-localized) values
# Note: we dont need the dtypes to match, as these can still be compared
lk = cast("DatetimeArray", lk)._ndarray
rk = cast("DatetimeArray", rk)._ndarray
elif (
is_categorical_dtype(lk.dtype)
and is_categorical_dtype(rk.dtype)
and is_dtype_equal(lk.dtype, rk.dtype)
):
assert isinstance(lk, Categorical)
assert isinstance(rk, Categorical)
# Cast rk to encoding so we can compare codes with lk
rk = lk._encode_with_my_categories(rk)
lk = ensure_int64(lk.codes)
rk = ensure_int64(rk.codes)
elif is_extension_array_dtype(lk.dtype) and is_dtype_equal(lk.dtype, rk.dtype):
lk, _ = lk._values_for_factorize()
rk, _ = rk._values_for_factorize()
if is_integer_dtype(lk.dtype) and is_integer_dtype(rk.dtype):
# GH#23917 TODO: needs tests for case where lk is integer-dtype
# and rk is datetime-dtype
klass = libhashtable.Int64Factorizer
lk = ensure_int64(np.asarray(lk))
rk = ensure_int64(np.asarray(rk))
elif needs_i8_conversion(lk.dtype) and is_dtype_equal(lk.dtype, rk.dtype):
# GH#23917 TODO: Needs tests for non-matching dtypes
klass = libhashtable.Int64Factorizer
lk = ensure_int64(np.asarray(lk, dtype=np.int64))
rk = ensure_int64(np.asarray(rk, dtype=np.int64))
else:
klass = libhashtable.Factorizer
lk = ensure_object(lk)
rk = ensure_object(rk)
rizer = klass(max(len(lk), len(rk)))
llab = rizer.factorize(lk)
rlab = rizer.factorize(rk)
count = rizer.get_count()
if sort:
uniques = rizer.uniques.to_array()
llab, rlab = _sort_labels(uniques, llab, rlab)
# NA group
lmask = llab == -1
lany = lmask.any()
rmask = rlab == -1
rany = rmask.any()
if lany or rany:
if lany:
np.putmask(llab, lmask, count)
if rany:
np.putmask(rlab, rmask, count)
count += 1
if how == "right":
return rlab, llab, count
return llab, rlab, count
def _sort_labels(uniques: np.ndarray, left, right):
llength = len(left)
labels = np.concatenate([left, right])
_, new_labels = algos.safe_sort(uniques, labels, na_sentinel=-1)
new_labels = ensure_int64(new_labels)
new_left, new_right = new_labels[:llength], new_labels[llength:]
return new_left, new_right
def _get_join_keys(llab, rlab, shape, sort: bool):
# how many levels can be done without overflow
nlev = next(
lev
for lev in range(len(shape), 0, -1)
if not is_int64_overflow_possible(shape[:lev])
)
# get keys for the first `nlev` levels
stride = np.prod(shape[1:nlev], dtype="i8")
lkey = stride * llab[0].astype("i8", subok=False, copy=False)
rkey = stride * rlab[0].astype("i8", subok=False, copy=False)
for i in range(1, nlev):
with np.errstate(divide="ignore"):
stride //= shape[i]
lkey += llab[i] * stride
rkey += rlab[i] * stride
if nlev == len(shape): # all done!
return lkey, rkey
# densify current keys to avoid overflow
lkey, rkey, count = _factorize_keys(lkey, rkey, sort=sort)
llab = [lkey] + llab[nlev:]
rlab = [rkey] + rlab[nlev:]
shape = [count] + shape[nlev:]
return _get_join_keys(llab, rlab, shape, sort)
def _should_fill(lname, rname) -> bool:
if not isinstance(lname, str) or not isinstance(rname, str):
return True
return lname == rname
def _any(x) -> bool:
return x is not None and com.any_not_none(*x)
def _validate_operand(obj: FrameOrSeries) -> "DataFrame":
if isinstance(obj, ABCDataFrame):
return obj
elif isinstance(obj, ABCSeries):
if obj.name is None:
raise ValueError("Cannot merge a Series without a name")
else:
return obj.to_frame()
else:
raise TypeError(
f"Can only merge Series or DataFrame objects, a {type(obj)} was passed"
)
def _items_overlap_with_suffix(left: Index, right: Index, suffixes: Tuple[str, str]):
"""
Suffixes type validation.
If two indices overlap, add suffixes to overlapping entries.
If corresponding suffix is empty, the entry is simply converted to string.
"""
if not is_list_like(suffixes, allow_sets=False):
warnings.warn(
f"Passing 'suffixes' as a {type(suffixes)}, is not supported and may give "
"unexpected results. Provide 'suffixes' as a tuple instead. In the "
"future a 'TypeError' will be raised.",
FutureWarning,
stacklevel=4,
)
to_rename = left.intersection(right)
if len(to_rename) == 0:
return left, right
lsuffix, rsuffix = suffixes
if not lsuffix and not rsuffix:
raise ValueError(f"columns overlap but no suffix specified: {to_rename}")
def renamer(x, suffix):
"""
Rename the left and right indices.
If there is overlap, and suffix is not None, add
suffix, otherwise, leave it as-is.
Parameters
----------
x : original column name
suffix : str or None
Returns
-------
x : renamed column name
"""
if x in to_rename and suffix is not None:
return f"{x}{suffix}"
return x
lrenamer = partial(renamer, suffix=lsuffix)
rrenamer = partial(renamer, suffix=rsuffix)
return (left._transform_index(lrenamer), right._transform_index(rrenamer))
|
PypiClean
|
/SuperalloyDataExtractor-0.0.6.tar.gz/SuperalloyDataExtractor-0.0.6/README.md
|
**SuperalloyDataExtractor**
----------------------
The functions of superalloydataextractor toolkit include batch downloading documents in XML and TXT format from the Elsevier database, locating target sentences from the full text and automatically extracting triple information in the form of <material name, property specifier, value>.
This package is released under MIT License, please see the LICENSE file for details.
**Features**
----------------------
- Rule-based named entity recognition for superalloy.
- An automated data extraction pipeline for superalloy.
- Algorithm based on distance and number of entities, processing multiple relationship extraction without labeling samples.
**Superalloy Data Extractor Code**
----------------------
This code extracts data of property from TXT files. These TXT files need to be supplied by the researcher. The code is written in Python3. To run the code:
1. Fork this repository
2. Download the word embeddings and dictionary.ini:
- Available here: https:
3. Download all 5 files and place in the superalloydataextractor/bin folder
4. Place all files in superalloydataextractor/data
**USAGE**
----------------------
Clone this github repository and run
```
python3 setup.py install
```
Or simply use the code in your own project.
**LICENSE**
----------------------
All source code is licensed under the MIT license.
**Install**
----------------------
```
pip install superalloydataextractor
```
or if you are an Anaconda user, run:
```
conda install -c superalloydataextractor superalloydataextractor
```
|
PypiClean
|
/napalm-yang-0.1.0.tar.gz/napalm-yang-0.1.0/napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/l2vpn_vpls/prefix_limit/state/__init__.py
|
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/bgp/neighbors/neighbor/afi-safis/afi-safi/l2vpn-vpls/prefix-limit/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: State information relating to the prefix-limit for the
AFI-SAFI
"""
__slots__ = (
"_path_helper",
"_extmethods",
"__max_prefixes",
"__prevent_teardown",
"__shutdown_threshold_pct",
"__restart_timer",
)
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__max_prefixes = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="max-prefixes",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
self.__prevent_teardown = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="prevent-teardown",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
self.__shutdown_threshold_pct = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["0..100"]},
),
is_leaf=True,
yang_name="shutdown-threshold-pct",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-types:percentage",
is_config=False,
)
self.__restart_timer = YANGDynClass(
base=RestrictedPrecisionDecimalType(precision=2),
is_leaf=True,
yang_name="restart-timer",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="decimal64",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"bgp",
"neighbors",
"neighbor",
"afi-safis",
"afi-safi",
"l2vpn-vpls",
"prefix-limit",
"state",
]
def _get_max_prefixes(self):
"""
Getter method for max_prefixes, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/l2vpn_vpls/prefix_limit/state/max_prefixes (uint32)
YANG Description: Maximum number of prefixes that will be accepted
from the neighbour
"""
return self.__max_prefixes
def _set_max_prefixes(self, v, load=False):
"""
Setter method for max_prefixes, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/l2vpn_vpls/prefix_limit/state/max_prefixes (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_max_prefixes is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_max_prefixes() directly.
YANG Description: Maximum number of prefixes that will be accepted
from the neighbour
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="max-prefixes",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """max_prefixes must be of a type compatible with uint32""",
"defined-type": "uint32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="max-prefixes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)""",
}
)
self.__max_prefixes = t
if hasattr(self, "_set"):
self._set()
def _unset_max_prefixes(self):
self.__max_prefixes = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="max-prefixes",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
def _get_prevent_teardown(self):
"""
Getter method for prevent_teardown, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/l2vpn_vpls/prefix_limit/state/prevent_teardown (boolean)
YANG Description: Do not tear down the BGP session when the maximum
prefix limit is exceeded, but rather only log a
warning. The default of this leaf is false, such
that when it is not specified, the session is torn
down.
"""
return self.__prevent_teardown
def _set_prevent_teardown(self, v, load=False):
"""
Setter method for prevent_teardown, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/l2vpn_vpls/prefix_limit/state/prevent_teardown (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_prevent_teardown is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_prevent_teardown() directly.
YANG Description: Do not tear down the BGP session when the maximum
prefix limit is exceeded, but rather only log a
warning. The default of this leaf is false, such
that when it is not specified, the session is torn
down.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="prevent-teardown",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """prevent_teardown must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="prevent-teardown", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)""",
}
)
self.__prevent_teardown = t
if hasattr(self, "_set"):
self._set()
def _unset_prevent_teardown(self):
self.__prevent_teardown = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="prevent-teardown",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
def _get_shutdown_threshold_pct(self):
"""
Getter method for shutdown_threshold_pct, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/l2vpn_vpls/prefix_limit/state/shutdown_threshold_pct (oc-types:percentage)
YANG Description: Threshold on number of prefixes that can be received
from a neighbour before generation of warning messages
or log entries. Expressed as a percentage of
max-prefixes
"""
return self.__shutdown_threshold_pct
def _set_shutdown_threshold_pct(self, v, load=False):
"""
Setter method for shutdown_threshold_pct, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/l2vpn_vpls/prefix_limit/state/shutdown_threshold_pct (oc-types:percentage)
If this variable is read-only (config: false) in the
source YANG file, then _set_shutdown_threshold_pct is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_shutdown_threshold_pct() directly.
YANG Description: Threshold on number of prefixes that can be received
from a neighbour before generation of warning messages
or log entries. Expressed as a percentage of
max-prefixes
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int,
restriction_dict={"range": ["0..255"]},
int_size=8,
),
restriction_dict={"range": ["0..100"]},
),
is_leaf=True,
yang_name="shutdown-threshold-pct",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-types:percentage",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """shutdown_threshold_pct must be of a type compatible with oc-types:percentage""",
"defined-type": "oc-types:percentage",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['0..100']}), is_leaf=True, yang_name="shutdown-threshold-pct", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-types:percentage', is_config=False)""",
}
)
self.__shutdown_threshold_pct = t
if hasattr(self, "_set"):
self._set()
def _unset_shutdown_threshold_pct(self):
self.__shutdown_threshold_pct = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["0..100"]},
),
is_leaf=True,
yang_name="shutdown-threshold-pct",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-types:percentage",
is_config=False,
)
def _get_restart_timer(self):
"""
Getter method for restart_timer, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/l2vpn_vpls/prefix_limit/state/restart_timer (decimal64)
YANG Description: Time interval in seconds after which the BGP session
is re-established after being torn down due to exceeding
the max-prefix limit.
"""
return self.__restart_timer
def _set_restart_timer(self, v, load=False):
"""
Setter method for restart_timer, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/l2vpn_vpls/prefix_limit/state/restart_timer (decimal64)
If this variable is read-only (config: false) in the
source YANG file, then _set_restart_timer is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_restart_timer() directly.
YANG Description: Time interval in seconds after which the BGP session
is re-established after being torn down due to exceeding
the max-prefix limit.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedPrecisionDecimalType(precision=2),
is_leaf=True,
yang_name="restart-timer",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="decimal64",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """restart_timer must be of a type compatible with decimal64""",
"defined-type": "decimal64",
"generated-type": """YANGDynClass(base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="restart-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='decimal64', is_config=False)""",
}
)
self.__restart_timer = t
if hasattr(self, "_set"):
self._set()
def _unset_restart_timer(self):
self.__restart_timer = YANGDynClass(
base=RestrictedPrecisionDecimalType(precision=2),
is_leaf=True,
yang_name="restart-timer",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="decimal64",
is_config=False,
)
max_prefixes = __builtin__.property(_get_max_prefixes)
prevent_teardown = __builtin__.property(_get_prevent_teardown)
shutdown_threshold_pct = __builtin__.property(_get_shutdown_threshold_pct)
restart_timer = __builtin__.property(_get_restart_timer)
_pyangbind_elements = OrderedDict(
[
("max_prefixes", max_prefixes),
("prevent_teardown", prevent_teardown),
("shutdown_threshold_pct", shutdown_threshold_pct),
("restart_timer", restart_timer),
]
)
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/bgp/neighbors/neighbor/afi-safis/afi-safi/l2vpn-vpls/prefix-limit/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: State information relating to the prefix-limit for the
AFI-SAFI
"""
__slots__ = (
"_path_helper",
"_extmethods",
"__max_prefixes",
"__prevent_teardown",
"__shutdown_threshold_pct",
"__restart_timer",
)
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__max_prefixes = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="max-prefixes",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
self.__prevent_teardown = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="prevent-teardown",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
self.__shutdown_threshold_pct = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["0..100"]},
),
is_leaf=True,
yang_name="shutdown-threshold-pct",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-types:percentage",
is_config=False,
)
self.__restart_timer = YANGDynClass(
base=RestrictedPrecisionDecimalType(precision=2),
is_leaf=True,
yang_name="restart-timer",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="decimal64",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"bgp",
"neighbors",
"neighbor",
"afi-safis",
"afi-safi",
"l2vpn-vpls",
"prefix-limit",
"state",
]
def _get_max_prefixes(self):
"""
Getter method for max_prefixes, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/l2vpn_vpls/prefix_limit/state/max_prefixes (uint32)
YANG Description: Maximum number of prefixes that will be accepted
from the neighbour
"""
return self.__max_prefixes
def _set_max_prefixes(self, v, load=False):
"""
Setter method for max_prefixes, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/l2vpn_vpls/prefix_limit/state/max_prefixes (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_max_prefixes is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_max_prefixes() directly.
YANG Description: Maximum number of prefixes that will be accepted
from the neighbour
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="max-prefixes",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """max_prefixes must be of a type compatible with uint32""",
"defined-type": "uint32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="max-prefixes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)""",
}
)
self.__max_prefixes = t
if hasattr(self, "_set"):
self._set()
def _unset_max_prefixes(self):
self.__max_prefixes = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="max-prefixes",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
def _get_prevent_teardown(self):
"""
Getter method for prevent_teardown, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/l2vpn_vpls/prefix_limit/state/prevent_teardown (boolean)
YANG Description: Do not tear down the BGP session when the maximum
prefix limit is exceeded, but rather only log a
warning. The default of this leaf is false, such
that when it is not specified, the session is torn
down.
"""
return self.__prevent_teardown
def _set_prevent_teardown(self, v, load=False):
"""
Setter method for prevent_teardown, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/l2vpn_vpls/prefix_limit/state/prevent_teardown (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_prevent_teardown is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_prevent_teardown() directly.
YANG Description: Do not tear down the BGP session when the maximum
prefix limit is exceeded, but rather only log a
warning. The default of this leaf is false, such
that when it is not specified, the session is torn
down.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="prevent-teardown",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """prevent_teardown must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="prevent-teardown", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)""",
}
)
self.__prevent_teardown = t
if hasattr(self, "_set"):
self._set()
def _unset_prevent_teardown(self):
self.__prevent_teardown = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="prevent-teardown",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
def _get_shutdown_threshold_pct(self):
"""
Getter method for shutdown_threshold_pct, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/l2vpn_vpls/prefix_limit/state/shutdown_threshold_pct (oc-types:percentage)
YANG Description: Threshold on number of prefixes that can be received
from a neighbour before generation of warning messages
or log entries. Expressed as a percentage of
max-prefixes
"""
return self.__shutdown_threshold_pct
def _set_shutdown_threshold_pct(self, v, load=False):
"""
Setter method for shutdown_threshold_pct, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/l2vpn_vpls/prefix_limit/state/shutdown_threshold_pct (oc-types:percentage)
If this variable is read-only (config: false) in the
source YANG file, then _set_shutdown_threshold_pct is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_shutdown_threshold_pct() directly.
YANG Description: Threshold on number of prefixes that can be received
from a neighbour before generation of warning messages
or log entries. Expressed as a percentage of
max-prefixes
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int,
restriction_dict={"range": ["0..255"]},
int_size=8,
),
restriction_dict={"range": ["0..100"]},
),
is_leaf=True,
yang_name="shutdown-threshold-pct",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-types:percentage",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """shutdown_threshold_pct must be of a type compatible with oc-types:percentage""",
"defined-type": "oc-types:percentage",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['0..100']}), is_leaf=True, yang_name="shutdown-threshold-pct", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-types:percentage', is_config=False)""",
}
)
self.__shutdown_threshold_pct = t
if hasattr(self, "_set"):
self._set()
def _unset_shutdown_threshold_pct(self):
self.__shutdown_threshold_pct = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["0..100"]},
),
is_leaf=True,
yang_name="shutdown-threshold-pct",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-types:percentage",
is_config=False,
)
def _get_restart_timer(self):
"""
Getter method for restart_timer, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/l2vpn_vpls/prefix_limit/state/restart_timer (decimal64)
YANG Description: Time interval in seconds after which the BGP session
is re-established after being torn down due to exceeding
the max-prefix limit.
"""
return self.__restart_timer
def _set_restart_timer(self, v, load=False):
"""
Setter method for restart_timer, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/l2vpn_vpls/prefix_limit/state/restart_timer (decimal64)
If this variable is read-only (config: false) in the
source YANG file, then _set_restart_timer is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_restart_timer() directly.
YANG Description: Time interval in seconds after which the BGP session
is re-established after being torn down due to exceeding
the max-prefix limit.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedPrecisionDecimalType(precision=2),
is_leaf=True,
yang_name="restart-timer",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="decimal64",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """restart_timer must be of a type compatible with decimal64""",
"defined-type": "decimal64",
"generated-type": """YANGDynClass(base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="restart-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='decimal64', is_config=False)""",
}
)
self.__restart_timer = t
if hasattr(self, "_set"):
self._set()
def _unset_restart_timer(self):
self.__restart_timer = YANGDynClass(
base=RestrictedPrecisionDecimalType(precision=2),
is_leaf=True,
yang_name="restart-timer",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="decimal64",
is_config=False,
)
max_prefixes = __builtin__.property(_get_max_prefixes)
prevent_teardown = __builtin__.property(_get_prevent_teardown)
shutdown_threshold_pct = __builtin__.property(_get_shutdown_threshold_pct)
restart_timer = __builtin__.property(_get_restart_timer)
_pyangbind_elements = OrderedDict(
[
("max_prefixes", max_prefixes),
("prevent_teardown", prevent_teardown),
("shutdown_threshold_pct", shutdown_threshold_pct),
("restart_timer", restart_timer),
]
)
|
PypiClean
|
/schemaorg_types-0.4.0.tar.gz/schemaorg_types-0.4.0/schemaorg_types/UnitPriceSpecification.py
|
from __future__ import annotations
from datetime import *
from time import *
from typing import *
from pydantic import *
class UnitPriceSpecification(BaseModel):
"""The price asked for a given offer by the respective organization or person.
References:
https://schema.org/UnitPriceSpecification
Note:
Model Depth 5
Attributes:
potentialAction: (Optional[Union[List[Union[str, Any]], str, Any]]): Indicates a potential Action, which describes an idealized action in which this thing would play an 'object' role.
mainEntityOfPage: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): Indicates a page (or other CreativeWork) for which this thing is the main entity being described. See [background notes](/docs/datamodel.html#mainEntityBackground) for details.
subjectOf: (Optional[Union[List[Union[str, Any]], str, Any]]): A CreativeWork or Event about this Thing.
url: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): URL of the item.
alternateName: (Union[List[Union[str, Any]], str, Any]): An alias for the item.
sameAs: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): URL of a reference Web page that unambiguously indicates the item's identity. E.g. the URL of the item's Wikipedia page, Wikidata entry, or official website.
description: (Union[List[Union[str, Any]], str, Any]): A description of the item.
disambiguatingDescription: (Union[List[Union[str, Any]], str, Any]): A sub property of description. A short description of the item used to disambiguate from other, similar items. Information from other properties (in particular, name) may be necessary for the description to be useful for disambiguation.
identifier: (Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]): The identifier property represents any kind of identifier for any kind of [[Thing]], such as ISBNs, GTIN codes, UUIDs etc. Schema.org provides dedicated properties for representing many of these, either as textual strings or as URL (URI) links. See [background notes](/docs/datamodel.html#identifierBg) for more details.
image: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): An image of the item. This can be a [[URL]] or a fully described [[ImageObject]].
name: (Union[List[Union[str, Any]], str, Any]): The name of the item.
additionalType: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): An additional type for the item, typically used for adding more specific types from external vocabularies in microdata syntax. This is a relationship between something and a class that the thing is in. In RDFa syntax, it is better to use the native RDFa syntax - the 'typeof' attribute - for multiple types. Schema.org tools may have only weaker understanding of extra types, in particular those defined externally.
eligibleQuantity: (Optional[Union[List[Union[str, Any]], str, Any]]): The interval and unit of measurement of ordering quantities for which the offer or price specification is valid. This allows e.g. specifying that a certain freight charge is valid only for a certain quantity.
valueAddedTaxIncluded: (Optional[Union[List[Union[str, StrictBool, Any]], str, StrictBool, Any]]): Specifies whether the applicable value-added tax (VAT) is included in the price specification or not.
minPrice: (Optional[Union[List[Union[str, Any, StrictInt, StrictFloat]], str, Any, StrictInt, StrictFloat]]): The lowest price if the price is a range.
price: (Union[List[Union[str, Any, StrictInt, StrictFloat]], str, Any, StrictInt, StrictFloat]): The offer price of a product, or of a price component when attached to PriceSpecification and its subtypes.Usage guidelines:* Use the [[priceCurrency]] property (with standard formats: [ISO 4217 currency format](http://en.wikipedia.org/wiki/ISO_4217), e.g. "USD"; [Ticker symbol](https://en.wikipedia.org/wiki/List_of_cryptocurrencies) for cryptocurrencies, e.g. "BTC"; well known names for [Local Exchange Trading Systems](https://en.wikipedia.org/wiki/Local_exchange_trading_system) (LETS) and other currency types, e.g. "Ithaca HOUR") instead of including [ambiguous symbols](http://en.wikipedia.org/wiki/Dollar_sign#Currencies_that_use_the_dollar_or_peso_sign) such as '$' in the value.* Use '.' (Unicode 'FULL STOP' (U+002E)) rather than ',' to indicate a decimal point. Avoid using these symbols as a readability separator.* Note that both [RDFa](http://www.w3.org/TR/xhtml-rdfa-primer/#using-the-content-attribute) and Microdata syntax allow the use of a "content=" attribute for publishing simple machine-readable values alongside more human-friendly formatting.* Use values from 0123456789 (Unicode 'DIGIT ZERO' (U+0030) to 'DIGIT NINE' (U+0039)) rather than superficially similar Unicode symbols.
validThrough: (Optional[Union[List[Union[datetime, str, Any, date]], datetime, str, Any, date]]): The date after when the item is not valid. For example the end of an offer, salary period, or a period of opening hours.
maxPrice: (Optional[Union[List[Union[str, Any, StrictInt, StrictFloat]], str, Any, StrictInt, StrictFloat]]): The highest price if the price is a range.
validFrom: (Optional[Union[List[Union[datetime, str, Any, date]], datetime, str, Any, date]]): The date when the item becomes valid.
eligibleTransactionVolume: (Optional[Union[List[Union[str, Any]], str, Any]]): The transaction volume, in a monetary unit, for which the offer or price specification is valid, e.g. for indicating a minimal purchasing volume, to express free shipping above a certain order volume, or to limit the acceptance of credit cards to purchases to a certain minimal amount.
priceCurrency: (Union[List[Union[str, Any]], str, Any]): The currency of the price, or a price component when attached to [[PriceSpecification]] and its subtypes.Use standard formats: [ISO 4217 currency format](http://en.wikipedia.org/wiki/ISO_4217), e.g. "USD"; [Ticker symbol](https://en.wikipedia.org/wiki/List_of_cryptocurrencies) for cryptocurrencies, e.g. "BTC"; well known names for [Local Exchange Trading Systems](https://en.wikipedia.org/wiki/Local_exchange_trading_system) (LETS) and other currency types, e.g. "Ithaca HOUR".
priceType: (Union[List[Union[str, Any]], str, Any]): Defines the type of a price specified for an offered product, for example a list price, a (temporary) sale price or a manufacturer suggested retail price. If multiple prices are specified for an offer the [[priceType]] property can be used to identify the type of each such specified price. The value of priceType can be specified as a value from enumeration PriceTypeEnumeration or as a free form text string for price types that are not already predefined in PriceTypeEnumeration.
priceComponentType: (Optional[Union[List[Union[str, Any]], str, Any]]): Identifies a price component (for example, a line item on an invoice), part of the total price for an offer.
billingStart: (Optional[Union[List[Union[str, Any, StrictInt, StrictFloat]], str, Any, StrictInt, StrictFloat]]): Specifies after how much time this price (or price component) becomes valid and billing starts. Can be used, for example, to model a price increase after the first year of a subscription. The unit of measurement is specified by the unitCode property.
unitCode: (Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]): The unit of measurement given using the UN/CEFACT Common Code (3 characters) or a URL. Other codes than the UN/CEFACT Common Code may be used with a prefix followed by a colon.
billingDuration: (Optional[Union[List[Union[str, Any, StrictInt, StrictFloat]], str, Any, StrictInt, StrictFloat]]): Specifies for how long this price (or price component) will be billed. Can be used, for example, to model the contractual duration of a subscription or payment plan. Type can be either a Duration or a Number (in which case the unit of measurement, for example month, is specified by the unitCode property).
unitText: (Union[List[Union[str, Any]], str, Any]): A string or text indicating the unit of measurement. Useful if you cannot provide a standard unit code for<a href='unitCode'>unitCode</a>.
referenceQuantity: (Optional[Union[List[Union[str, Any]], str, Any]]): The reference quantity for which a certain price applies, e.g. 1 EUR per 4 kWh of electricity. This property is a replacement for unitOfMeasurement for the advanced cases where the price does not relate to a standard unit.
billingIncrement: (Optional[Union[List[Union[str, Any, StrictInt, StrictFloat]], str, Any, StrictInt, StrictFloat]]): This property specifies the minimal quantity and rounding increment that will be the basis for the billing. The unit of measurement is specified by the unitCode property.
"""
type_: str = Field(default="UnitPriceSpecification", alias="@type", const=True)
potentialAction: Optional[Union[List[Union[str, Any]], str, Any]] = Field(
default=None,
description="Indicates a potential Action, which describes an idealized action in which this thing"
"would play an 'object' role.",
)
mainEntityOfPage: Optional[
Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]
] = Field(
default=None,
description="Indicates a page (or other CreativeWork) for which this thing is the main entity being"
"described. See [background notes](/docs/datamodel.html#mainEntityBackground)"
"for details.",
)
subjectOf: Optional[Union[List[Union[str, Any]], str, Any]] = Field(
default=None,
description="A CreativeWork or Event about this Thing.",
)
url: Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]] = Field(
default=None,
description="URL of the item.",
)
alternateName: Union[List[Union[str, Any]], str, Any] = Field(
default=None,
description="An alias for the item.",
)
sameAs: Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]] = Field(
default=None,
description="URL of a reference Web page that unambiguously indicates the item's identity. E.g. the"
"URL of the item's Wikipedia page, Wikidata entry, or official website.",
)
description: Union[List[Union[str, Any]], str, Any] = Field(
default=None,
description="A description of the item.",
)
disambiguatingDescription: Union[List[Union[str, Any]], str, Any] = Field(
default=None,
description="A sub property of description. A short description of the item used to disambiguate from"
"other, similar items. Information from other properties (in particular, name) may"
"be necessary for the description to be useful for disambiguation.",
)
identifier: Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any] = Field(
default=None,
description="The identifier property represents any kind of identifier for any kind of [[Thing]],"
"such as ISBNs, GTIN codes, UUIDs etc. Schema.org provides dedicated properties for"
"representing many of these, either as textual strings or as URL (URI) links. See [background"
"notes](/docs/datamodel.html#identifierBg) for more details.",
)
image: Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]] = Field(
default=None,
description="An image of the item. This can be a [[URL]] or a fully described [[ImageObject]].",
)
name: Union[List[Union[str, Any]], str, Any] = Field(
default=None,
description="The name of the item.",
)
additionalType: Optional[
Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]
] = Field(
default=None,
description="An additional type for the item, typically used for adding more specific types from external"
"vocabularies in microdata syntax. This is a relationship between something and a class"
"that the thing is in. In RDFa syntax, it is better to use the native RDFa syntax - the 'typeof'"
"attribute - for multiple types. Schema.org tools may have only weaker understanding"
"of extra types, in particular those defined externally.",
)
eligibleQuantity: Optional[Union[List[Union[str, Any]], str, Any]] = Field(
default=None,
description="The interval and unit of measurement of ordering quantities for which the offer or price"
"specification is valid. This allows e.g. specifying that a certain freight charge is"
"valid only for a certain quantity.",
)
valueAddedTaxIncluded: Optional[
Union[List[Union[str, StrictBool, Any]], str, StrictBool, Any]
] = Field(
default=None,
description="Specifies whether the applicable value-added tax (VAT) is included in the price specification"
"or not.",
)
minPrice: Optional[
Union[
List[Union[str, Any, StrictInt, StrictFloat]],
str,
Any,
StrictInt,
StrictFloat,
]
] = Field(
default=None,
description="The lowest price if the price is a range.",
)
price: Union[
List[Union[str, Any, StrictInt, StrictFloat]], str, Any, StrictInt, StrictFloat
] = Field(
default=None,
description="The offer price of a product, or of a price component when attached to PriceSpecification"
"and its subtypes.Usage guidelines:* Use the [[priceCurrency]] property (with standard"
"formats: [ISO 4217 currency format](http://en.wikipedia.org/wiki/ISO_4217),"
'e.g. "USD"; [Ticker symbol](https://en.wikipedia.org/wiki/List_of_cryptocurrencies)'
'for cryptocurrencies, e.g. "BTC"; well known names for [Local Exchange Trading Systems](https://en.wikipedia.org/wiki/Local_exchange_trading_system)'
'(LETS) and other currency types, e.g. "Ithaca HOUR") instead of including [ambiguous'
"symbols](http://en.wikipedia.org/wiki/Dollar_sign#Currencies_that_use_the_dollar_or_peso_sign)"
"such as '$' in the value.* Use '.' (Unicode 'FULL STOP' (U+002E)) rather than ',' to indicate"
"a decimal point. Avoid using these symbols as a readability separator.* Note that both"
"[RDFa](http://www.w3.org/TR/xhtml-rdfa-primer/#using-the-content-attribute)"
'and Microdata syntax allow the use of a "content=" attribute for publishing simple'
"machine-readable values alongside more human-friendly formatting.* Use values from"
"0123456789 (Unicode 'DIGIT ZERO' (U+0030) to 'DIGIT NINE' (U+0039)) rather than superficially"
"similar Unicode symbols.",
)
validThrough: Optional[
Union[List[Union[datetime, str, Any, date]], datetime, str, Any, date]
] = Field(
default=None,
description="The date after when the item is not valid. For example the end of an offer, salary period,"
"or a period of opening hours.",
)
maxPrice: Optional[
Union[
List[Union[str, Any, StrictInt, StrictFloat]],
str,
Any,
StrictInt,
StrictFloat,
]
] = Field(
default=None,
description="The highest price if the price is a range.",
)
validFrom: Optional[
Union[List[Union[datetime, str, Any, date]], datetime, str, Any, date]
] = Field(
default=None,
description="The date when the item becomes valid.",
)
eligibleTransactionVolume: Optional[Union[List[Union[str, Any]], str, Any]] = Field(
default=None,
description="The transaction volume, in a monetary unit, for which the offer or price specification"
"is valid, e.g. for indicating a minimal purchasing volume, to express free shipping"
"above a certain order volume, or to limit the acceptance of credit cards to purchases"
"to a certain minimal amount.",
)
priceCurrency: Union[List[Union[str, Any]], str, Any] = Field(
default=None,
description="The currency of the price, or a price component when attached to [[PriceSpecification]]"
"and its subtypes.Use standard formats: [ISO 4217 currency format](http://en.wikipedia.org/wiki/ISO_4217),"
'e.g. "USD"; [Ticker symbol](https://en.wikipedia.org/wiki/List_of_cryptocurrencies)'
'for cryptocurrencies, e.g. "BTC"; well known names for [Local Exchange Trading Systems](https://en.wikipedia.org/wiki/Local_exchange_trading_system)'
'(LETS) and other currency types, e.g. "Ithaca HOUR".',
)
priceType: Union[List[Union[str, Any]], str, Any] = Field(
default=None,
description="Defines the type of a price specified for an offered product, for example a list price,"
"a (temporary) sale price or a manufacturer suggested retail price. If multiple prices"
"are specified for an offer the [[priceType]] property can be used to identify the type"
"of each such specified price. The value of priceType can be specified as a value from enumeration"
"PriceTypeEnumeration or as a free form text string for price types that are not already"
"predefined in PriceTypeEnumeration.",
)
priceComponentType: Optional[Union[List[Union[str, Any]], str, Any]] = Field(
default=None,
description="Identifies a price component (for example, a line item on an invoice), part of the total"
"price for an offer.",
)
billingStart: Optional[
Union[
List[Union[str, Any, StrictInt, StrictFloat]],
str,
Any,
StrictInt,
StrictFloat,
]
] = Field(
default=None,
description="Specifies after how much time this price (or price component) becomes valid and billing"
"starts. Can be used, for example, to model a price increase after the first year of a subscription."
"The unit of measurement is specified by the unitCode property.",
)
unitCode: Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any] = Field(
default=None,
description="The unit of measurement given using the UN/CEFACT Common Code (3 characters) or a URL."
"Other codes than the UN/CEFACT Common Code may be used with a prefix followed by a colon.",
)
billingDuration: Optional[
Union[
List[Union[str, Any, StrictInt, StrictFloat]],
str,
Any,
StrictInt,
StrictFloat,
]
] = Field(
default=None,
description="Specifies for how long this price (or price component) will be billed. Can be used, for"
"example, to model the contractual duration of a subscription or payment plan. Type can"
"be either a Duration or a Number (in which case the unit of measurement, for example month,"
"is specified by the unitCode property).",
)
unitText: Union[List[Union[str, Any]], str, Any] = Field(
default=None,
description="A string or text indicating the unit of measurement. Useful if you cannot provide a standard"
"unit code for<a href='unitCode'>unitCode</a>.",
)
referenceQuantity: Optional[Union[List[Union[str, Any]], str, Any]] = Field(
default=None,
description="The reference quantity for which a certain price applies, e.g. 1 EUR per 4 kWh of electricity."
"This property is a replacement for unitOfMeasurement for the advanced cases where the"
"price does not relate to a standard unit.",
)
billingIncrement: Optional[
Union[
List[Union[str, Any, StrictInt, StrictFloat]],
str,
Any,
StrictInt,
StrictFloat,
]
] = Field(
default=None,
description="This property specifies the minimal quantity and rounding increment that will be the"
"basis for the billing. The unit of measurement is specified by the unitCode property.",
)
|
PypiClean
|
/adafruit-circuitpython-minimqtt-7.4.1.tar.gz/adafruit-circuitpython-minimqtt-7.4.1/examples/cpython/minimqtt_adafruitio_cpython.py
|
import socket
import ssl
import time
import adafruit_minimqtt.adafruit_minimqtt as MQTT
### Secrets File Setup ###
try:
from secrets import secrets
except ImportError:
print("Connection secrets are kept in secrets.py, please add them there!")
raise
### Feeds ###
# Setup a feed named 'photocell' for publishing to a feed
photocell_feed = secrets["aio_username"] + "/feeds/photocell"
# Setup a feed named 'onoff' for subscribing to changes
onoff_feed = secrets["aio_username"] + "/feeds/onoff"
### Code ###
# Define callback methods which are called when events occur
# pylint: disable=unused-argument, redefined-outer-name
def connected(client, userdata, flags, rc):
# This function will be called when the client is connected
# successfully to the broker.
print("Connected to Adafruit IO! Listening for topic changes on %s" % onoff_feed)
# Subscribe to all changes on the onoff_feed.
client.subscribe(onoff_feed)
def disconnected(client, userdata, rc):
# This method is called when the client is disconnected
print("Disconnected from Adafruit IO!")
def message(client, topic, message):
# This method is called when a topic the client is subscribed to
# has a new message.
print("New message on topic {0}: {1}".format(topic, message))
# Set up a MiniMQTT Client
mqtt_client = MQTT.MQTT(
broker="io.adafruit.com",
username=secrets["aio_username"],
password=secrets["aio_key"],
socket_pool=socket,
is_ssl=True,
ssl_context=ssl.create_default_context(),
)
# Setup the callback methods above
mqtt_client.on_connect = connected
mqtt_client.on_disconnect = disconnected
mqtt_client.on_message = message
# Connect the client to the MQTT broker.
print("Connecting to Adafruit IO...")
mqtt_client.connect()
photocell_val = 0
while True:
# Poll the message queue
mqtt_client.loop()
# Send a new message
print("Sending photocell value: %d..." % photocell_val)
mqtt_client.publish(photocell_feed, photocell_val)
print("Sent!")
photocell_val += 1
time.sleep(1)
|
PypiClean
|
/pypcapkit-1.2.1-cp310-none-any.whl/pcapkit/protocols/link/ospf.py
|
import ipaddress
import re
from typing import TYPE_CHECKING, cast
from pcapkit.const.ospf.authentication import Authentication as Enum_Authentication
from pcapkit.const.ospf.packet import Packet as Enum_Packet
from pcapkit.protocols.data.link.ospf import OSPF as Data_OSPF
from pcapkit.protocols.data.link.ospf import \
CrytographicAuthentication as Data_CrytographicAuthentication
from pcapkit.protocols.link.link import Link
from pcapkit.protocols.schema.link.ospf import OSPF as Schema_OSPF
from pcapkit.protocols.schema.link.ospf import \
CrytographicAuthentication as Schema_CrytographicAuthentication
from pcapkit.utilities.exceptions import ProtocolError, UnsupportedCall
if TYPE_CHECKING:
from enum import IntEnum as StdlibEnum
from ipaddress import IPv4Address
from typing import Any, NoReturn, Optional, Type
from aenum import IntEnum as AenumEnum
from typing_extensions import Literal
from pcapkit.protocols.protocol import Protocol
from pcapkit.protocols.schema.schema import Schema
__all__ = ['OSPF']
# Ethernet address pattern
PAT_MAC_ADDR = re.compile(rb'(?i)(?:[0-9a-f]{2}[:-]){5}[0-9a-f]{2}')
class OSPF(Link[Data_OSPF, Schema_OSPF],
schema=Schema_OSPF, data=Data_OSPF):
"""This class implements Open Shortest Path First."""
##########################################################################
# Properties.
##########################################################################
@property
def name(self) -> 'str':
"""Name of current protocol."""
return f'Open Shortest Path First version {self._info.version}'
@property
def alias(self) -> 'str':
"""Acronym of current protocol."""
return f'OSPFv{self._info.version}'
@property
def length(self) -> 'Literal[24]':
"""Header length of current protocol."""
return 24
@property
def type(self) -> 'Enum_Packet':
"""OSPF packet type."""
return self._info.type
##########################################################################
# Methods.
##########################################################################
def read(self, length: 'Optional[int]' = None, **kwargs: 'Any') -> 'Data_OSPF':
"""Read Open Shortest Path First.
Structure of OSPF header [:rfc:`2328`]:
.. code-block:: text
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Version # | Type | Packet length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Router ID |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Area ID |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Checksum | AuType |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Authentication |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Authentication |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Args:
length: Length of packet data.
**kwargs: Arbitrary keyword arguments.
Returns:
Parsed packet data.
"""
schema = self.__schema__
ospf = Data_OSPF(
version=schema.version,
type=schema.type,
len=schema.length,
router_id=schema.router_id,
area_id=schema.area_id,
chksum=schema.checksum,
autype=schema.auth_type,
)
length = schema.length if schema.length else (length or len(self))
if ospf.autype == Enum_Authentication.Cryptographic_authentication:
ospf.__update__([
('auth', self._read_encrypt_auth(
cast('Schema_CrytographicAuthentication', schema.auth_data),
)),
])
else:
ospf.__update__([
('auth', cast('bytes', schema.auth_data)),
])
return self._decode_next_layer(ospf, length - self.length)
def make(self,
version: 'int' = 2,
type: 'Enum_Packet | StdlibEnum | AenumEnum | str | int' = Enum_Packet.Hello,
type_default: 'Optional[int]' = None,
type_namespace: 'Optional[dict[str, int] | dict[int, str] | Type[StdlibEnum] | Type[AenumEnum]]' = None, # pylint: disable=line-too-long
type_reversed: 'bool' = False,
router_id: 'IPv4Address | str | bytes | bytearray' = '0.0.0.0', # nosec: B104
area_id: 'IPv4Address | str | bytes | bytearray' = '0.0.0.0', # nosec: B104
checksum: 'bytes' = b'\x00\x00',
auth_type: 'Enum_Authentication | StdlibEnum | AenumEnum | str | int' = Enum_Authentication.No_Authentication,
auth_type_default: 'Optional[int]' = None,
auth_type_namespace: 'Optional[dict[str, int] | dict[int, str] | Type[StdlibEnum] | Type[AenumEnum]]' = None, # pylint: disable=line-too-long
auth_type_reversed: 'bool' = False,
auth_data: 'bytes | Schema_CrytographicAuthentication | Data_CrytographicAuthentication' = b'\x00\x00\x00\x00\x00\x00\x00\x00',
payload: 'bytes | Protocol | Schema' = b'',
**kwargs: 'Any') -> 'Schema_OSPF':
"""Make (construct) packet data.
Args:
version: OSPF version number.
type: OSPF packet type.
type_default: Default value for ``type`` if not specified.
type_namespace: Namespace for ``type``.
type_reversed: Reverse namespace for ``type``.
router_id: Router ID.
area_id: Area ID.
checksum: Checksum.
auth_type: Authentication type.
auth_type_default: Default value for ``auth_type`` if not specified.
auth_type_namespace: Namespace for ``auth_type``.
auth_type_reversed: Reverse namespace for ``auth_type``.
auth_data: Authentication data.
payload: Payload data.
**kwargs: Arbitrary keyword arguments.
Returns:
Constructed packet data.
"""
type_ = self._make_index(type, type_default, namespace=type_namespace,
reversed=type_reversed, pack=False)
auth_type_ = self._make_index(auth_type, auth_type_default, namespace=auth_type_namespace,
reversed=auth_type_reversed, pack=False)
if auth_type_ == Enum_Authentication.Cryptographic_authentication:
data = self._make_encrypt_auth(auth_data)
else:
if not isinstance(auth_data, bytes):
raise ProtocolError(f'OSPF: invalid type for authentication data: {auth_data!r}')
data = auth_data
return Schema_OSPF(
version=version,
type=type_, # type: ignore[arg-type]
length=24 + len(payload),
router_id=router_id,
area_id=area_id,
checksum=checksum,
auth_type=auth_type_, # type: ignore[arg-type]
auth_data=data,
payload=payload,
)
##########################################################################
# Data models.
##########################################################################
def __length_hint__(self) -> 'Literal[24]':
"""Return an estimated length for the object."""
return 24
@classmethod
def __index__(cls) -> 'NoReturn': # pylint: disable=invalid-index-returned
"""Numeral registry index of the protocol.
Raises:
UnsupportedCall: This protocol has no registry entry.
"""
raise UnsupportedCall(f'{cls.__name__!r} object cannot be interpreted as an integer')
##########################################################################
# Utilities.
##########################################################################
@classmethod
def _make_data(cls, data: 'Data_OSPF') -> 'dict[str, Any]': # type: ignore[override]
"""Create key-value pairs from ``data`` for protocol construction.
Args:
data: protocol data
Returns:
Key-value pairs for protocol construction.
"""
return {
'version': data.version,
'type': data.type,
'router_id': data.router_id,
'area_id': data.area_id,
'checksum': data.chksum,
'auth_type': data.autype,
'auth_data': data.auth,
'payload': cls._make_payload(data)
}
def _read_id_numbers(self, id: 'bytes') -> 'IPv4Address':
"""Read router and area IDs.
Args:
id: ID bytes.
Returns:
Parsed IDs as an IPv4 address.
"""
#_byte = self._read_fileng(4)
#_addr = '.'.join(str(_) for _ in _byte)
return ipaddress.ip_address(id) # type: ignore[return-value]
def _make_id_numbers(self, id: 'IPv4Address | str | bytes | bytearray') -> 'bytes':
"""Make router and area IDs.
Args:
id: ID.
Returns:
ID bytes.
"""
return ipaddress.ip_address(id).packed
def _read_encrypt_auth(self, schema: 'Schema_CrytographicAuthentication') -> 'Data_CrytographicAuthentication':
"""Read Authentication field when Cryptographic Authentication is employed,
i.e. :attr:`~OSPF.autype` is ``2``.
Structure of Cryptographic Authentication [:rfc:`2328`]:
.. code-block:: text
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| 0 | Key ID | Auth Data Len |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Cryptographic sequence number |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Args:
schema: parsed authentication data
Returns:
Parsed packet data.
"""
auth = Data_CrytographicAuthentication(
key_id=schema.key_id,
len=schema.len,
seq=schema.seq,
)
return auth
def _make_encrypt_auth(self,
auth_data: 'bytes | Schema_CrytographicAuthentication | Data_CrytographicAuthentication' # pylint: disable=line-too-long
) -> 'bytes | Schema_CrytographicAuthentication':
"""Make Authentication field when Cryptographic Authentication is employed.
Args:
auth_type: Authentication type.
auth_data: Authentication data.
Returns:
Authentication bytes.
"""
if isinstance(auth_data, (Schema_CrytographicAuthentication, bytes)):
return auth_data
if isinstance(auth_data, Data_CrytographicAuthentication):
return Schema_CrytographicAuthentication(
key_id=auth_data.key_id,
len=auth_data.len,
seq=auth_data.seq,
)
raise ProtocolError(f'OSPF: invalid type for auth_data: {auth_data!r}')
|
PypiClean
|
/pulumi_azure_native-2.5.1a1693590910.tar.gz/pulumi_azure_native-2.5.1a1693590910/pulumi_azure_native/eventhub/_inputs.py
|
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from ._enums import *
__all__ = [
'CaptureDescriptionArgs',
'ClusterSkuArgs',
'ConnectionStateArgs',
'DestinationArgs',
'EncryptionArgs',
'IdentityArgs',
'KeyVaultPropertiesArgs',
'NWRuleSetIpRulesArgs',
'NWRuleSetVirtualNetworkRulesArgs',
'PrivateEndpointConnectionArgs',
'PrivateEndpointArgs',
'RetentionDescriptionArgs',
'SkuArgs',
'SubnetArgs',
'ThrottlingPolicyArgs',
'UserAssignedIdentityPropertiesArgs',
]
@pulumi.input_type
class CaptureDescriptionArgs:
def __init__(__self__, *,
destination: Optional[pulumi.Input['DestinationArgs']] = None,
enabled: Optional[pulumi.Input[bool]] = None,
encoding: Optional[pulumi.Input['EncodingCaptureDescription']] = None,
interval_in_seconds: Optional[pulumi.Input[int]] = None,
size_limit_in_bytes: Optional[pulumi.Input[int]] = None,
skip_empty_archives: Optional[pulumi.Input[bool]] = None):
"""
Properties to configure capture description for eventhub
:param pulumi.Input['DestinationArgs'] destination: Properties of Destination where capture will be stored. (Storage Account, Blob Names)
:param pulumi.Input[bool] enabled: A value that indicates whether capture description is enabled.
:param pulumi.Input['EncodingCaptureDescription'] encoding: Enumerates the possible values for the encoding format of capture description. Note: 'AvroDeflate' will be deprecated in New API Version
:param pulumi.Input[int] interval_in_seconds: The time window allows you to set the frequency with which the capture to Azure Blobs will happen, value should between 60 to 900 seconds
:param pulumi.Input[int] size_limit_in_bytes: The size window defines the amount of data built up in your Event Hub before an capture operation, value should be between 10485760 to 524288000 bytes
:param pulumi.Input[bool] skip_empty_archives: A value that indicates whether to Skip Empty Archives
"""
if destination is not None:
pulumi.set(__self__, "destination", destination)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if encoding is not None:
pulumi.set(__self__, "encoding", encoding)
if interval_in_seconds is not None:
pulumi.set(__self__, "interval_in_seconds", interval_in_seconds)
if size_limit_in_bytes is not None:
pulumi.set(__self__, "size_limit_in_bytes", size_limit_in_bytes)
if skip_empty_archives is not None:
pulumi.set(__self__, "skip_empty_archives", skip_empty_archives)
@property
@pulumi.getter
def destination(self) -> Optional[pulumi.Input['DestinationArgs']]:
"""
Properties of Destination where capture will be stored. (Storage Account, Blob Names)
"""
return pulumi.get(self, "destination")
@destination.setter
def destination(self, value: Optional[pulumi.Input['DestinationArgs']]):
pulumi.set(self, "destination", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
A value that indicates whether capture description is enabled.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter
def encoding(self) -> Optional[pulumi.Input['EncodingCaptureDescription']]:
"""
Enumerates the possible values for the encoding format of capture description. Note: 'AvroDeflate' will be deprecated in New API Version
"""
return pulumi.get(self, "encoding")
@encoding.setter
def encoding(self, value: Optional[pulumi.Input['EncodingCaptureDescription']]):
pulumi.set(self, "encoding", value)
@property
@pulumi.getter(name="intervalInSeconds")
def interval_in_seconds(self) -> Optional[pulumi.Input[int]]:
"""
The time window allows you to set the frequency with which the capture to Azure Blobs will happen, value should between 60 to 900 seconds
"""
return pulumi.get(self, "interval_in_seconds")
@interval_in_seconds.setter
def interval_in_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "interval_in_seconds", value)
@property
@pulumi.getter(name="sizeLimitInBytes")
def size_limit_in_bytes(self) -> Optional[pulumi.Input[int]]:
"""
The size window defines the amount of data built up in your Event Hub before an capture operation, value should be between 10485760 to 524288000 bytes
"""
return pulumi.get(self, "size_limit_in_bytes")
@size_limit_in_bytes.setter
def size_limit_in_bytes(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "size_limit_in_bytes", value)
@property
@pulumi.getter(name="skipEmptyArchives")
def skip_empty_archives(self) -> Optional[pulumi.Input[bool]]:
"""
A value that indicates whether to Skip Empty Archives
"""
return pulumi.get(self, "skip_empty_archives")
@skip_empty_archives.setter
def skip_empty_archives(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "skip_empty_archives", value)
@pulumi.input_type
class ClusterSkuArgs:
def __init__(__self__, *,
name: pulumi.Input[Union[str, 'ClusterSkuName']],
capacity: Optional[pulumi.Input[int]] = None):
"""
SKU parameters particular to a cluster instance.
:param pulumi.Input[Union[str, 'ClusterSkuName']] name: Name of this SKU.
:param pulumi.Input[int] capacity: The quantity of Event Hubs Cluster Capacity Units contained in this cluster.
"""
pulumi.set(__self__, "name", name)
if capacity is not None:
pulumi.set(__self__, "capacity", capacity)
@property
@pulumi.getter
def name(self) -> pulumi.Input[Union[str, 'ClusterSkuName']]:
"""
Name of this SKU.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[Union[str, 'ClusterSkuName']]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def capacity(self) -> Optional[pulumi.Input[int]]:
"""
The quantity of Event Hubs Cluster Capacity Units contained in this cluster.
"""
return pulumi.get(self, "capacity")
@capacity.setter
def capacity(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "capacity", value)
@pulumi.input_type
class ConnectionStateArgs:
def __init__(__self__, *,
description: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[Union[str, 'PrivateLinkConnectionStatus']]] = None):
"""
ConnectionState information.
:param pulumi.Input[str] description: Description of the connection state.
:param pulumi.Input[Union[str, 'PrivateLinkConnectionStatus']] status: Status of the connection.
"""
if description is not None:
pulumi.set(__self__, "description", description)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description of the connection state.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[Union[str, 'PrivateLinkConnectionStatus']]]:
"""
Status of the connection.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[Union[str, 'PrivateLinkConnectionStatus']]]):
pulumi.set(self, "status", value)
@pulumi.input_type
class DestinationArgs:
def __init__(__self__, *,
archive_name_format: Optional[pulumi.Input[str]] = None,
blob_container: Optional[pulumi.Input[str]] = None,
data_lake_account_name: Optional[pulumi.Input[str]] = None,
data_lake_folder_path: Optional[pulumi.Input[str]] = None,
data_lake_subscription_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
storage_account_resource_id: Optional[pulumi.Input[str]] = None):
"""
Capture storage details for capture description
:param pulumi.Input[str] archive_name_format: Blob naming convention for archive, e.g. {Namespace}/{EventHub}/{PartitionId}/{Year}/{Month}/{Day}/{Hour}/{Minute}/{Second}. Here all the parameters (Namespace,EventHub .. etc) are mandatory irrespective of order
:param pulumi.Input[str] blob_container: Blob container Name
:param pulumi.Input[str] data_lake_account_name: The Azure Data Lake Store name for the captured events
:param pulumi.Input[str] data_lake_folder_path: The destination folder path for the captured events
:param pulumi.Input[str] data_lake_subscription_id: Subscription Id of Azure Data Lake Store
:param pulumi.Input[str] name: Name for capture destination
:param pulumi.Input[str] storage_account_resource_id: Resource id of the storage account to be used to create the blobs
"""
if archive_name_format is not None:
pulumi.set(__self__, "archive_name_format", archive_name_format)
if blob_container is not None:
pulumi.set(__self__, "blob_container", blob_container)
if data_lake_account_name is not None:
pulumi.set(__self__, "data_lake_account_name", data_lake_account_name)
if data_lake_folder_path is not None:
pulumi.set(__self__, "data_lake_folder_path", data_lake_folder_path)
if data_lake_subscription_id is not None:
pulumi.set(__self__, "data_lake_subscription_id", data_lake_subscription_id)
if name is not None:
pulumi.set(__self__, "name", name)
if storage_account_resource_id is not None:
pulumi.set(__self__, "storage_account_resource_id", storage_account_resource_id)
@property
@pulumi.getter(name="archiveNameFormat")
def archive_name_format(self) -> Optional[pulumi.Input[str]]:
"""
Blob naming convention for archive, e.g. {Namespace}/{EventHub}/{PartitionId}/{Year}/{Month}/{Day}/{Hour}/{Minute}/{Second}. Here all the parameters (Namespace,EventHub .. etc) are mandatory irrespective of order
"""
return pulumi.get(self, "archive_name_format")
@archive_name_format.setter
def archive_name_format(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "archive_name_format", value)
@property
@pulumi.getter(name="blobContainer")
def blob_container(self) -> Optional[pulumi.Input[str]]:
"""
Blob container Name
"""
return pulumi.get(self, "blob_container")
@blob_container.setter
def blob_container(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "blob_container", value)
@property
@pulumi.getter(name="dataLakeAccountName")
def data_lake_account_name(self) -> Optional[pulumi.Input[str]]:
"""
The Azure Data Lake Store name for the captured events
"""
return pulumi.get(self, "data_lake_account_name")
@data_lake_account_name.setter
def data_lake_account_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "data_lake_account_name", value)
@property
@pulumi.getter(name="dataLakeFolderPath")
def data_lake_folder_path(self) -> Optional[pulumi.Input[str]]:
"""
The destination folder path for the captured events
"""
return pulumi.get(self, "data_lake_folder_path")
@data_lake_folder_path.setter
def data_lake_folder_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "data_lake_folder_path", value)
@property
@pulumi.getter(name="dataLakeSubscriptionId")
def data_lake_subscription_id(self) -> Optional[pulumi.Input[str]]:
"""
Subscription Id of Azure Data Lake Store
"""
return pulumi.get(self, "data_lake_subscription_id")
@data_lake_subscription_id.setter
def data_lake_subscription_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "data_lake_subscription_id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name for capture destination
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="storageAccountResourceId")
def storage_account_resource_id(self) -> Optional[pulumi.Input[str]]:
"""
Resource id of the storage account to be used to create the blobs
"""
return pulumi.get(self, "storage_account_resource_id")
@storage_account_resource_id.setter
def storage_account_resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "storage_account_resource_id", value)
@pulumi.input_type
class EncryptionArgs:
def __init__(__self__, *,
key_source: Optional[pulumi.Input['KeySource']] = None,
key_vault_properties: Optional[pulumi.Input[Sequence[pulumi.Input['KeyVaultPropertiesArgs']]]] = None,
require_infrastructure_encryption: Optional[pulumi.Input[bool]] = None):
"""
Properties to configure Encryption
:param pulumi.Input['KeySource'] key_source: Enumerates the possible value of keySource for Encryption
:param pulumi.Input[Sequence[pulumi.Input['KeyVaultPropertiesArgs']]] key_vault_properties: Properties of KeyVault
:param pulumi.Input[bool] require_infrastructure_encryption: Enable Infrastructure Encryption (Double Encryption)
"""
if key_source is None:
key_source = 'Microsoft.KeyVault'
if key_source is not None:
pulumi.set(__self__, "key_source", key_source)
if key_vault_properties is not None:
pulumi.set(__self__, "key_vault_properties", key_vault_properties)
if require_infrastructure_encryption is not None:
pulumi.set(__self__, "require_infrastructure_encryption", require_infrastructure_encryption)
@property
@pulumi.getter(name="keySource")
def key_source(self) -> Optional[pulumi.Input['KeySource']]:
"""
Enumerates the possible value of keySource for Encryption
"""
return pulumi.get(self, "key_source")
@key_source.setter
def key_source(self, value: Optional[pulumi.Input['KeySource']]):
pulumi.set(self, "key_source", value)
@property
@pulumi.getter(name="keyVaultProperties")
def key_vault_properties(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['KeyVaultPropertiesArgs']]]]:
"""
Properties of KeyVault
"""
return pulumi.get(self, "key_vault_properties")
@key_vault_properties.setter
def key_vault_properties(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['KeyVaultPropertiesArgs']]]]):
pulumi.set(self, "key_vault_properties", value)
@property
@pulumi.getter(name="requireInfrastructureEncryption")
def require_infrastructure_encryption(self) -> Optional[pulumi.Input[bool]]:
"""
Enable Infrastructure Encryption (Double Encryption)
"""
return pulumi.get(self, "require_infrastructure_encryption")
@require_infrastructure_encryption.setter
def require_infrastructure_encryption(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "require_infrastructure_encryption", value)
@pulumi.input_type
class IdentityArgs:
def __init__(__self__, *,
type: Optional[pulumi.Input['ManagedServiceIdentityType']] = None,
user_assigned_identities: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
Properties to configure Identity for Bring your Own Keys
:param pulumi.Input['ManagedServiceIdentityType'] type: Type of managed service identity.
:param pulumi.Input[Sequence[pulumi.Input[str]]] user_assigned_identities: Properties for User Assigned Identities
"""
if type is not None:
pulumi.set(__self__, "type", type)
if user_assigned_identities is not None:
pulumi.set(__self__, "user_assigned_identities", user_assigned_identities)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input['ManagedServiceIdentityType']]:
"""
Type of managed service identity.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input['ManagedServiceIdentityType']]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="userAssignedIdentities")
def user_assigned_identities(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Properties for User Assigned Identities
"""
return pulumi.get(self, "user_assigned_identities")
@user_assigned_identities.setter
def user_assigned_identities(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "user_assigned_identities", value)
@pulumi.input_type
class KeyVaultPropertiesArgs:
def __init__(__self__, *,
identity: Optional[pulumi.Input['UserAssignedIdentityPropertiesArgs']] = None,
key_name: Optional[pulumi.Input[str]] = None,
key_vault_uri: Optional[pulumi.Input[str]] = None,
key_version: Optional[pulumi.Input[str]] = None):
"""
Properties to configure keyVault Properties
:param pulumi.Input[str] key_name: Name of the Key from KeyVault
:param pulumi.Input[str] key_vault_uri: Uri of KeyVault
:param pulumi.Input[str] key_version: Key Version
"""
if identity is not None:
pulumi.set(__self__, "identity", identity)
if key_name is not None:
pulumi.set(__self__, "key_name", key_name)
if key_vault_uri is not None:
pulumi.set(__self__, "key_vault_uri", key_vault_uri)
if key_version is not None:
pulumi.set(__self__, "key_version", key_version)
@property
@pulumi.getter
def identity(self) -> Optional[pulumi.Input['UserAssignedIdentityPropertiesArgs']]:
return pulumi.get(self, "identity")
@identity.setter
def identity(self, value: Optional[pulumi.Input['UserAssignedIdentityPropertiesArgs']]):
pulumi.set(self, "identity", value)
@property
@pulumi.getter(name="keyName")
def key_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the Key from KeyVault
"""
return pulumi.get(self, "key_name")
@key_name.setter
def key_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key_name", value)
@property
@pulumi.getter(name="keyVaultUri")
def key_vault_uri(self) -> Optional[pulumi.Input[str]]:
"""
Uri of KeyVault
"""
return pulumi.get(self, "key_vault_uri")
@key_vault_uri.setter
def key_vault_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key_vault_uri", value)
@property
@pulumi.getter(name="keyVersion")
def key_version(self) -> Optional[pulumi.Input[str]]:
"""
Key Version
"""
return pulumi.get(self, "key_version")
@key_version.setter
def key_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key_version", value)
@pulumi.input_type
class NWRuleSetIpRulesArgs:
def __init__(__self__, *,
action: Optional[pulumi.Input[Union[str, 'NetworkRuleIPAction']]] = None,
ip_mask: Optional[pulumi.Input[str]] = None):
"""
The response from the List namespace operation.
:param pulumi.Input[Union[str, 'NetworkRuleIPAction']] action: The IP Filter Action
:param pulumi.Input[str] ip_mask: IP Mask
"""
if action is not None:
pulumi.set(__self__, "action", action)
if ip_mask is not None:
pulumi.set(__self__, "ip_mask", ip_mask)
@property
@pulumi.getter
def action(self) -> Optional[pulumi.Input[Union[str, 'NetworkRuleIPAction']]]:
"""
The IP Filter Action
"""
return pulumi.get(self, "action")
@action.setter
def action(self, value: Optional[pulumi.Input[Union[str, 'NetworkRuleIPAction']]]):
pulumi.set(self, "action", value)
@property
@pulumi.getter(name="ipMask")
def ip_mask(self) -> Optional[pulumi.Input[str]]:
"""
IP Mask
"""
return pulumi.get(self, "ip_mask")
@ip_mask.setter
def ip_mask(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ip_mask", value)
@pulumi.input_type
class NWRuleSetVirtualNetworkRulesArgs:
def __init__(__self__, *,
ignore_missing_vnet_service_endpoint: Optional[pulumi.Input[bool]] = None,
subnet: Optional[pulumi.Input['SubnetArgs']] = None):
"""
The response from the List namespace operation.
:param pulumi.Input[bool] ignore_missing_vnet_service_endpoint: Value that indicates whether to ignore missing Vnet Service Endpoint
:param pulumi.Input['SubnetArgs'] subnet: Subnet properties
"""
if ignore_missing_vnet_service_endpoint is not None:
pulumi.set(__self__, "ignore_missing_vnet_service_endpoint", ignore_missing_vnet_service_endpoint)
if subnet is not None:
pulumi.set(__self__, "subnet", subnet)
@property
@pulumi.getter(name="ignoreMissingVnetServiceEndpoint")
def ignore_missing_vnet_service_endpoint(self) -> Optional[pulumi.Input[bool]]:
"""
Value that indicates whether to ignore missing Vnet Service Endpoint
"""
return pulumi.get(self, "ignore_missing_vnet_service_endpoint")
@ignore_missing_vnet_service_endpoint.setter
def ignore_missing_vnet_service_endpoint(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "ignore_missing_vnet_service_endpoint", value)
@property
@pulumi.getter
def subnet(self) -> Optional[pulumi.Input['SubnetArgs']]:
"""
Subnet properties
"""
return pulumi.get(self, "subnet")
@subnet.setter
def subnet(self, value: Optional[pulumi.Input['SubnetArgs']]):
pulumi.set(self, "subnet", value)
@pulumi.input_type
class PrivateEndpointConnectionArgs:
def __init__(__self__, *,
private_endpoint: Optional[pulumi.Input['PrivateEndpointArgs']] = None,
private_link_service_connection_state: Optional[pulumi.Input['ConnectionStateArgs']] = None,
provisioning_state: Optional[pulumi.Input[Union[str, 'EndPointProvisioningState']]] = None):
"""
Properties of the PrivateEndpointConnection.
:param pulumi.Input['PrivateEndpointArgs'] private_endpoint: The Private Endpoint resource for this Connection.
:param pulumi.Input['ConnectionStateArgs'] private_link_service_connection_state: Details about the state of the connection.
:param pulumi.Input[Union[str, 'EndPointProvisioningState']] provisioning_state: Provisioning state of the Private Endpoint Connection.
"""
if private_endpoint is not None:
pulumi.set(__self__, "private_endpoint", private_endpoint)
if private_link_service_connection_state is not None:
pulumi.set(__self__, "private_link_service_connection_state", private_link_service_connection_state)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> Optional[pulumi.Input['PrivateEndpointArgs']]:
"""
The Private Endpoint resource for this Connection.
"""
return pulumi.get(self, "private_endpoint")
@private_endpoint.setter
def private_endpoint(self, value: Optional[pulumi.Input['PrivateEndpointArgs']]):
pulumi.set(self, "private_endpoint", value)
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> Optional[pulumi.Input['ConnectionStateArgs']]:
"""
Details about the state of the connection.
"""
return pulumi.get(self, "private_link_service_connection_state")
@private_link_service_connection_state.setter
def private_link_service_connection_state(self, value: Optional[pulumi.Input['ConnectionStateArgs']]):
pulumi.set(self, "private_link_service_connection_state", value)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[pulumi.Input[Union[str, 'EndPointProvisioningState']]]:
"""
Provisioning state of the Private Endpoint Connection.
"""
return pulumi.get(self, "provisioning_state")
@provisioning_state.setter
def provisioning_state(self, value: Optional[pulumi.Input[Union[str, 'EndPointProvisioningState']]]):
pulumi.set(self, "provisioning_state", value)
@pulumi.input_type
class PrivateEndpointArgs:
def __init__(__self__, *,
id: Optional[pulumi.Input[str]] = None):
"""
PrivateEndpoint information.
:param pulumi.Input[str] id: The ARM identifier for Private Endpoint.
"""
if id is not None:
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
The ARM identifier for Private Endpoint.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@pulumi.input_type
class RetentionDescriptionArgs:
def __init__(__self__, *,
cleanup_policy: Optional[pulumi.Input[Union[str, 'CleanupPolicyRetentionDescription']]] = None,
retention_time_in_hours: Optional[pulumi.Input[float]] = None,
tombstone_retention_time_in_hours: Optional[pulumi.Input[int]] = None):
"""
Properties to configure retention settings for the eventhub
:param pulumi.Input[Union[str, 'CleanupPolicyRetentionDescription']] cleanup_policy: Enumerates the possible values for cleanup policy
:param pulumi.Input[float] retention_time_in_hours: Number of hours to retain the events for this Event Hub. This value is only used when cleanupPolicy is Delete. If cleanupPolicy is Compact the returned value of this property is Long.MaxValue
:param pulumi.Input[int] tombstone_retention_time_in_hours: Number of hours to retain the tombstone markers of a compacted Event Hub. This value is only used when cleanupPolicy is Compact. Consumer must complete reading the tombstone marker within this specified amount of time if consumer begins from starting offset to ensure they get a valid snapshot for the specific key described by the tombstone marker within the compacted Event Hub
"""
if cleanup_policy is not None:
pulumi.set(__self__, "cleanup_policy", cleanup_policy)
if retention_time_in_hours is not None:
pulumi.set(__self__, "retention_time_in_hours", retention_time_in_hours)
if tombstone_retention_time_in_hours is not None:
pulumi.set(__self__, "tombstone_retention_time_in_hours", tombstone_retention_time_in_hours)
@property
@pulumi.getter(name="cleanupPolicy")
def cleanup_policy(self) -> Optional[pulumi.Input[Union[str, 'CleanupPolicyRetentionDescription']]]:
"""
Enumerates the possible values for cleanup policy
"""
return pulumi.get(self, "cleanup_policy")
@cleanup_policy.setter
def cleanup_policy(self, value: Optional[pulumi.Input[Union[str, 'CleanupPolicyRetentionDescription']]]):
pulumi.set(self, "cleanup_policy", value)
@property
@pulumi.getter(name="retentionTimeInHours")
def retention_time_in_hours(self) -> Optional[pulumi.Input[float]]:
"""
Number of hours to retain the events for this Event Hub. This value is only used when cleanupPolicy is Delete. If cleanupPolicy is Compact the returned value of this property is Long.MaxValue
"""
return pulumi.get(self, "retention_time_in_hours")
@retention_time_in_hours.setter
def retention_time_in_hours(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "retention_time_in_hours", value)
@property
@pulumi.getter(name="tombstoneRetentionTimeInHours")
def tombstone_retention_time_in_hours(self) -> Optional[pulumi.Input[int]]:
"""
Number of hours to retain the tombstone markers of a compacted Event Hub. This value is only used when cleanupPolicy is Compact. Consumer must complete reading the tombstone marker within this specified amount of time if consumer begins from starting offset to ensure they get a valid snapshot for the specific key described by the tombstone marker within the compacted Event Hub
"""
return pulumi.get(self, "tombstone_retention_time_in_hours")
@tombstone_retention_time_in_hours.setter
def tombstone_retention_time_in_hours(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "tombstone_retention_time_in_hours", value)
@pulumi.input_type
class SkuArgs:
def __init__(__self__, *,
name: pulumi.Input[Union[str, 'SkuName']],
capacity: Optional[pulumi.Input[int]] = None,
tier: Optional[pulumi.Input[Union[str, 'SkuTier']]] = None):
"""
SKU parameters supplied to the create namespace operation
:param pulumi.Input[Union[str, 'SkuName']] name: Name of this SKU.
:param pulumi.Input[int] capacity: The Event Hubs throughput units for Basic or Standard tiers, where value should be 0 to 20 throughput units. The Event Hubs premium units for Premium tier, where value should be 0 to 10 premium units.
:param pulumi.Input[Union[str, 'SkuTier']] tier: The billing tier of this particular SKU.
"""
pulumi.set(__self__, "name", name)
if capacity is not None:
pulumi.set(__self__, "capacity", capacity)
if tier is not None:
pulumi.set(__self__, "tier", tier)
@property
@pulumi.getter
def name(self) -> pulumi.Input[Union[str, 'SkuName']]:
"""
Name of this SKU.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[Union[str, 'SkuName']]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def capacity(self) -> Optional[pulumi.Input[int]]:
"""
The Event Hubs throughput units for Basic or Standard tiers, where value should be 0 to 20 throughput units. The Event Hubs premium units for Premium tier, where value should be 0 to 10 premium units.
"""
return pulumi.get(self, "capacity")
@capacity.setter
def capacity(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "capacity", value)
@property
@pulumi.getter
def tier(self) -> Optional[pulumi.Input[Union[str, 'SkuTier']]]:
"""
The billing tier of this particular SKU.
"""
return pulumi.get(self, "tier")
@tier.setter
def tier(self, value: Optional[pulumi.Input[Union[str, 'SkuTier']]]):
pulumi.set(self, "tier", value)
@pulumi.input_type
class SubnetArgs:
def __init__(__self__, *,
id: Optional[pulumi.Input[str]] = None):
"""
Properties supplied for Subnet
:param pulumi.Input[str] id: Resource ID of Virtual Network Subnet
"""
if id is not None:
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource ID of Virtual Network Subnet
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@pulumi.input_type
class ThrottlingPolicyArgs:
def __init__(__self__, *,
metric_id: pulumi.Input[Union[str, 'MetricId']],
name: pulumi.Input[str],
rate_limit_threshold: pulumi.Input[float],
type: pulumi.Input[str]):
"""
Properties of the throttling policy
:param pulumi.Input[Union[str, 'MetricId']] metric_id: Metric Id on which the throttle limit should be set, MetricId can be discovered by hovering over Metric in the Metrics section of Event Hub Namespace inside Azure Portal
:param pulumi.Input[str] name: The Name of this policy
:param pulumi.Input[float] rate_limit_threshold: The Threshold limit above which the application group will be throttled.Rate limit is always per second.
:param pulumi.Input[str] type: Application Group Policy types
Expected value is 'ThrottlingPolicy'.
"""
pulumi.set(__self__, "metric_id", metric_id)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "rate_limit_threshold", rate_limit_threshold)
pulumi.set(__self__, "type", 'ThrottlingPolicy')
@property
@pulumi.getter(name="metricId")
def metric_id(self) -> pulumi.Input[Union[str, 'MetricId']]:
"""
Metric Id on which the throttle limit should be set, MetricId can be discovered by hovering over Metric in the Metrics section of Event Hub Namespace inside Azure Portal
"""
return pulumi.get(self, "metric_id")
@metric_id.setter
def metric_id(self, value: pulumi.Input[Union[str, 'MetricId']]):
pulumi.set(self, "metric_id", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The Name of this policy
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="rateLimitThreshold")
def rate_limit_threshold(self) -> pulumi.Input[float]:
"""
The Threshold limit above which the application group will be throttled.Rate limit is always per second.
"""
return pulumi.get(self, "rate_limit_threshold")
@rate_limit_threshold.setter
def rate_limit_threshold(self, value: pulumi.Input[float]):
pulumi.set(self, "rate_limit_threshold", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
Application Group Policy types
Expected value is 'ThrottlingPolicy'.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@pulumi.input_type
class UserAssignedIdentityPropertiesArgs:
def __init__(__self__, *,
user_assigned_identity: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] user_assigned_identity: ARM ID of user Identity selected for encryption
"""
if user_assigned_identity is not None:
pulumi.set(__self__, "user_assigned_identity", user_assigned_identity)
@property
@pulumi.getter(name="userAssignedIdentity")
def user_assigned_identity(self) -> Optional[pulumi.Input[str]]:
"""
ARM ID of user Identity selected for encryption
"""
return pulumi.get(self, "user_assigned_identity")
@user_assigned_identity.setter
def user_assigned_identity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_assigned_identity", value)
|
PypiClean
|
/odooku_odoo_base-11.0.7-py35-none-any.whl/odoo/addons/web_settings_dashboard/controllers/main.py
|
from datetime import datetime, timedelta
from odoo import fields, http
from odoo.exceptions import AccessError
from odoo.http import request
from odoo import release
class WebSettingsDashboard(http.Controller):
@http.route('/web_settings_dashboard/data', type='json', auth='user')
def web_settings_dashboard_data(self, **kw):
if not request.env.user.has_group('base.group_erp_manager'):
raise AccessError("Access Denied")
installed_apps = request.env['ir.module.module'].search_count([
('application', '=', True),
('state', 'in', ['installed', 'to upgrade', 'to remove'])
])
cr = request.cr
cr.execute("""
SELECT count(*)
FROM res_users
WHERE active=true AND
share=false
""")
active_count = cr.dictfetchall()[0].get('count')
cr.execute("""
SELECT count(u.*)
FROM res_users u
WHERE active=true AND
NOT exists(SELECT 1 FROM res_users_log WHERE create_uid=u.id)
""")
pending_count = cr.dictfetchall()[0].get('count')
cr.execute("""
SELECT id, login
FROM res_users u
WHERE active=true
AND NOT exists(SELECT 1 FROM res_users_log WHERE create_uid=u.id)
ORDER BY id desc
LIMIT 10
""")
pending_users = cr.fetchall()
# See update.py for this computation
limit_date = datetime.now() - timedelta(15)
enterprise_users = request.env['res.users'].search_count([("login_date", ">=", fields.Datetime.to_string(limit_date)), ('share', '=', False)])
expiration_date = request.env['ir.config_parameter'].sudo().get_param('database.expiration_date')
return {
'apps': {
'installed_apps': installed_apps,
'enterprise_users': enterprise_users,
},
'users_info': {
'active_users': active_count,
'pending_count': pending_count,
'pending_users': pending_users,
'user_form_view_id': request.env['ir.model.data'].xmlid_to_res_id("base.view_users_form"),
},
'share': {
'server_version': release.version,
'expiration_date': expiration_date,
'debug': request.debug,
},
'company': {
'company_id': request.env.user.company_id.id,
'company_name': request.env.user.company_id.name
}
}
|
PypiClean
|
/cy-0.5.8.tar.gz/cy-0.5.8/crispy/LMModels.py
|
import logging
import numpy as np
import pandas as pd
from scipy.stats import chi2
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression
from statsmodels.stats.multitest import multipletests
LOG = logging.getLogger("Crispy")
class LModel:
def __init__(
self,
Y,
X,
M,
M2=None,
normalize=False,
fit_intercept=True,
copy_X=True,
n_jobs=4,
verbose=1,
):
self.samples = set.intersection(
set(Y.index),
set(X.index),
set(M.index),
set(Y.index) if M2 is None else set(M2.index),
)
self.X = X.loc[self.samples]
self.X = self.X.loc[:, self.X.count() > (M.shape[1] + (1 if M2 is None else 2))]
self.X_ma = np.ma.masked_invalid(self.X.values)
self.Y = Y.loc[self.samples]
self.Y = self.Y.loc[:, self.Y.std() > 0]
self.M = M.loc[self.samples]
self.M2 = M2.loc[self.samples, self.X.columns] if M2 is not None else M2
self.normalize = normalize
self.fit_intercept = fit_intercept
self.copy_X = copy_X
self.n_jobs = n_jobs
self.verbose = verbose
self.log = logging.getLogger("Crispy")
def model_regressor(self):
regressor = LinearRegression(
fit_intercept=self.fit_intercept,
normalize=self.normalize,
copy_X=self.copy_X,
n_jobs=self.n_jobs,
)
return regressor
@staticmethod
def loglike(y_true, y_pred):
nobs = len(y_true)
nobs2 = nobs / 2.0
ssr = np.power(y_true - y_pred, 2).sum()
llf = -nobs2 * np.log(2 * np.pi) - nobs2 * np.log(ssr / nobs) - nobs2
return llf
@staticmethod
def multipletests_per(
associations, method="fdr_bh", field="pval", fdr_field="fdr", index_cols=None
):
index_cols = ["y_id"] if index_cols is None else index_cols
d_unique = {tuple(i) for i in associations[index_cols].values}
df = associations.set_index(index_cols)
df = pd.concat(
[
df.loc[i]
.assign(fdr=multipletests(df.loc[i, field], method=method)[1])
.rename(columns={"fdr": fdr_field})
for i in d_unique
]
).reset_index()
return df
def fit_matrix(self):
lms = []
for x_idx, x_var in enumerate(self.X):
if self.verbose > 0:
self.log.info(f"LM={x_var} ({x_idx})")
# Mask NaNs
x_ma = np.ma.mask_rowcols(self.X_ma[:, [x_idx]], axis=0)
# Build matrices
x = self.X.iloc[~x_ma.mask.any(axis=1), [x_idx]]
y = self.Y.iloc[~x_ma.mask.any(axis=1), :]
# Covariate matrix (remove invariable features and add noise)
m = self.M.iloc[~x_ma.mask.any(axis=1), :]
if self.M2 is not None:
m2 = self.M2.iloc[~x_ma.mask.any(axis=1), [x_idx]]
m = pd.concat([m2, m], axis=1)
m = m.loc[:, m.std() > 0]
m += np.random.normal(0, 1e-4, m.shape)
# Fit covariate model
lm_small = self.model_regressor().fit(m, y)
lm_small_ll = self.loglike(y, lm_small.predict(m))
# Fit full model: covariates + feature
lm_full_x = np.concatenate([m, x], axis=1)
lm_full = self.model_regressor().fit(lm_full_x, y)
lm_full_ll = self.loglike(y, lm_full.predict(lm_full_x))
# Log-ratio test
lr = 2 * (lm_full_ll - lm_small_ll)
lr_pval = chi2(1).sf(lr)
# Assemble + append results
res = pd.DataFrame(
dict(
y_id=y.columns,
x_id=x_var,
n=y.attrs["nan_mask"].loc[y.columns, x.index].sum(1) if "nan_mask" in y.attrs else len(x),
beta=lm_full.coef_[:, -1],
lr=lr.values,
covs=m.shape[1],
pval=lr_pval,
fdr=multipletests(lr_pval, method="fdr_bh")[1],
)
)
lms.append(res)
lms = pd.concat(lms, ignore_index=True).sort_values("pval")
return lms
@staticmethod
def lm_residuals(y, x, fit_intercept=True, add_intercept=False):
# Prepare input matrices
ys = y.dropna()
xs = x.loc[ys.index].dropna()
xs = xs.loc[:, xs.std() > 0]
ys = ys.loc[xs.index]
if ys.shape[0] <= xs.shape[1]:
return None
# Linear regression models
lm = LinearRegression(fit_intercept=fit_intercept).fit(xs, ys)
# Calculate residuals
residuals = ys - lm.predict(xs) - lm.intercept_
# Add intercept
if add_intercept:
residuals += lm.intercept_
return residuals
class LMModels:
""""
Class to perform the linear regression models
""" ""
RES_ORDER = [
"y_id",
"x_id",
"beta",
"beta_se",
"pval",
"fdr",
"nsamples",
"ncovariates",
]
def __init__(
self,
y,
x,
k=None,
m=None,
m2=None,
x_feature_type="all",
m2_feature_type="same_y",
add_intercept=True,
lik="normal",
transform_y="scale",
transform_x="scale",
transform_m2="scale",
x_min_events=None,
institute=True,
verbose=1,
):
# Misc
self.verbose = verbose
self.x_feature_type = x_feature_type
self.m2_feature_type = m2_feature_type
self.add_intercept = add_intercept
# LIMIX parameters
self.lik = lik
# Preprocessing steps
self.transform_y = transform_y
self.transform_x = transform_x
self.transform_m2 = transform_m2
self.x_min_events = x_min_events
# Build random effects and covariates matrices
self.k = self.kinship(x) if k is None else k.copy()
self.m = self.define_covariates(institute=institute) if m is None else m.copy()
# Samples overlap
self.samples = list(
set.intersection(
set(y.index),
set(x.index),
set(self.m.index),
set(self.k.index),
set(y.index) if m2 is None else set(m2.index),
)
)
LOG.info(f"Samples: {len(self.samples)}")
# Y matrix
self.y, self.y_columns = self.__build_y(y.copy())
# X matrix
self.x, self.x_columns = self.__build_x(x.copy())
# Covariates
self.m = self.m.loc[self.samples, self.m.std() > 0]
self.m, self.m_columns = self.m.values, np.array(list(self.m.columns))
# Random effects matrix
self.k = self.k.loc[self.samples, self.samples].values
LOG.info(
f"Y: {self.y.shape[1]}; X: {self.x.shape[1]}; M: {self.m.shape[1]}; K: {self.k.shape[1]}"
)
# Second covariates matrix
if m2 is not None:
self.m2, self.m2_columns = self.__build_m2(m2.copy())
LOG.info(f"M2: {self.m2.shape[1]}")
else:
self.m2, self.m2_columns = None, None
def __build_y(self, y):
"""
Method to build the y matrix.
:param y:
:return:
"""
y_ = self.transform_matrix(y.loc[self.samples], t_type=self.transform_y)
return y_.values, np.array(list(y_.columns))
def __build_m2(self, m2):
"""
Method to build the m2 matrix.
:param m2:
:return:
"""
m2_ = self.transform_matrix(m2.loc[self.samples], t_type=self.transform_m2)
return m2_.values, np.array(list(m2_.columns))
def __build_x(self, x):
"""
Method to build the x matrix.
:param x:
:return:
"""
x_ = x.loc[self.samples, x.std() > 0]
if self.x_min_events is not None:
x_ = x_.loc[:, x_.sum() >= self.x_min_events]
else:
x_ = self.transform_matrix(x_, t_type=self.transform_x)
return x_.values, np.array(list(x_.columns))
def __prepare_inputs__(self, y_var):
# Define samples with NaNs
y_idx = list(self.y_columns).index(y_var)
y_nans_idx = np.isnan(self.y[:, y_idx])
if self.verbose > 0:
LOG.info(f"y_id: {y_var} ({y_idx}); N samples: {sum(1 - y_nans_idx)}")
# Remove NaNs from y
y_ = self.y[y_nans_idx == 0][:, [y_idx]]
# Subset X
x_ = self.x[y_nans_idx == 0]
if self.x_feature_type == "drop_y":
if y_var not in self.x_columns:
LOG.warning(f"[x_feature_type=drop_y] Y feature {y_idx} not in X")
x_ = x_[:, self.x_columns != y_var]
x_vars = self.x_columns[self.x_columns != y_var]
elif self.x_feature_type == "same_y":
if y_var not in self.x_columns:
LOG.error(f"[x_feature_type=same_y] Y feature {y_idx} not in X")
x_ = x_[:, self.x_columns == y_var]
x_vars = self.x_columns[self.x_columns == y_var]
else:
x_vars = self.x_columns[np.std(x_, axis=0) > 0]
x_ = x_[:, np.std(x_, axis=0) > 0]
# Subset m
m_ = self.m[y_nans_idx == 0]
m_ = m_[:, np.std(m_, axis=0) > 0]
if (self.m2 is not None) and (self.m2_feature_type == "same_y"):
m_ = np.append(
m_, self.m2[y_nans_idx == 0][:, self.m2_columns == y_var], axis=1
)
if self.add_intercept:
m_ = np.insert(m_, m_.shape[1], values=1, axis=1)
# Subset random effects matrix
k_ = self.k[:, y_nans_idx == 0][y_nans_idx == 0, :]
return y_, y_nans_idx, x_, x_vars, m_, k_
@staticmethod
def log_likelihood(y_true, y_pred):
n = len(y_true)
ssr = np.power(y_true - y_pred, 2).sum()
var = ssr / n
l = np.longdouble(1 / (np.sqrt(2 * np.pi * var))) ** n * np.exp(
-(np.power(y_true - y_pred, 2) / (2 * var)).sum()
)
ln_l = np.log(l)
return float(ln_l)
def lmm(self, y_var):
"""
Linear regression method, using measurements of the y matrix for the variable specified by y_var.
:param y_var: String y variable name
:return: pandas.DataFrame of the associations
"""
import limix
y_, y_nans_idx, x_, x_vars, m_, k_ = self.__prepare_inputs__(y_var)
# Linear Mixed Model
lmm = limix.qtl.scan(G=x_, Y=y_, K=k_, M=m_, lik=self.lik, verbose=False)
# Build results
lmm_betas = lmm.effsizes["h2"].query("effect_type == 'candidate'")
lmm = pd.DataFrame(
dict(
y_id=y_var,
x_id=x_vars,
beta=list(lmm_betas["effsize"].round(5)),
beta_se=list(lmm_betas["effsize_se"].round(5)),
pval=list(lmm.stats.loc[lmm_betas["test"], "pv20"]),
nsamples=sum(1 - y_nans_idx),
ncovariates=m_.shape[1],
)
)
return lmm
def matrix_lmm(self, pval_adj="fdr_bh", pval_adj_overall=False):
# Iterate through Y variables
res = []
for y_var in self.y_columns:
res.append(self.lmm(y_var=y_var))
res = pd.concat(res, ignore_index=True)
# Multiple p-value correction
if pval_adj_overall:
res = res.assign(fdr=multipletests(res["pval"], method=pval_adj)[1])
else:
res = self.multipletests(res, field="pval", pval_method=pval_adj)
return res.sort_values("fdr")[self.RES_ORDER]
def write_lmm(self, output_folder):
for i in self.y_columns:
self.lmm(y_var=i).to_csv(
f"{output_folder}/{i}.csv.gz", index=False, compression="gzip"
)
@staticmethod
def multipletests(
parsed_results, pval_method="fdr_bh", field="pval", idx_cols=None
):
idx_cols = ["y_id"] if idx_cols is None else idx_cols
parsed_results_adj = []
for idx, df in parsed_results.groupby(idx_cols):
df = df.assign(fdr=multipletests(df[field], method=pval_method)[1])
parsed_results_adj.append(df)
parsed_results_adj = pd.concat(parsed_results_adj, ignore_index=True)
return parsed_results_adj
@staticmethod
def transform_matrix(matrix, t_type="scale", add_nan_mask=True, fillna_func=np.mean):
# Create mask attribute
mask_df = matrix.notna()
# Fill NaNs
if fillna_func is not None:
matrix = matrix.T.fillna(matrix.apply(fillna_func, axis=1)).T
# Type of transformation
if t_type == "scale":
from sklearn.preprocessing import StandardScaler
matrix = pd.DataFrame(
StandardScaler().fit_transform(matrix),
index=matrix.index,
columns=matrix.columns,
)
elif t_type == "rank":
matrix = matrix.rank(axis=1).values
else:
LOG.warning(
f"{t_type} transformation not supported. Original matrix returned."
)
if add_nan_mask:
matrix.attrs["nan_mask"] = mask_df.loc[matrix.index, matrix.columns]
return matrix
@staticmethod
def define_covariates(
std_filter=True,
medium=True,
tissuetype=True,
cancertype=True,
mburden=True,
ploidy=True,
institute=True,
):
from crispy.DataImporter import Sample
# Imports
samplesheet = Sample().samplesheet
# Covariates
covariates = []
# CRISPR institute of origin
if type(institute) is pd.Series:
covariates.append(pd.get_dummies(institute).astype(int))
elif institute is True:
covariates.append(pd.get_dummies(samplesheet["institute"]).astype(int))
# Cell lines culture conditions
if medium:
culture = pd.get_dummies(samplesheet["growth_properties"]).drop(
columns=["Unknown"]
)
covariates.append(culture)
# Cancer type
if cancertype:
ctype = pd.get_dummies(samplesheet["cancer_type"])
covariates.append(ctype)
# Cancer type
if tissuetype:
ttype = pd.get_dummies(samplesheet["tissue"])
covariates.append(ttype)
# Mutation burden
if mburden:
m_burdern = samplesheet["mutational_burden"]
covariates.append(m_burdern)
# Ploidy
if ploidy:
ploidy = samplesheet["ploidy"]
covariates.append(ploidy)
# Merge covariates
covariates = pd.concat(covariates, axis=1, sort=False)
# Remove covariates with zero standard deviation
if std_filter:
covariates = covariates.loc[:, covariates.std() > 0]
return covariates.dropna().astype(np.float)
@staticmethod
def kinship(k, decimal_places=5, kinship=False):
if kinship:
from limix.stats import linear_kinship
K = pd.DataFrame(linear_kinship(k.values), index=k.index, columns=k.index)
else:
K = k.dot(k.T)
K /= K.values.diagonal().mean()
return K.round(decimal_places)
|
PypiClean
|
/geli-python-common-2.0.0.tar.gz/geli-python-common-2.0.0/src/geli_python_common/amqp/message_bus_connector.py
|
import attr
from abc import ABC, abstractmethod
from logbook import Logger
from typing import Type, List
from geli_python_common.amqp.subscriptions import AmqpSubscriptions, Subscriptions
from geli_python_common.dto.dto import Dto
logger = Logger(__name__)
@attr.s
class Message(ABC):
"""
Message intended for a message bus.
:ivar payload: body of the message in form of a Dto
"""
payload = attr.ib(type=Type[Dto])
resource = attr.ib(default=None)
@attr.s
class MessageBusConnector(ABC):
"""
Client class for connecting to a message bus.
"""
client_id = attr.ib()
_message_bus_port = attr.ib(type=int, converter=int)
_message_bus_host = attr.ib(default="127.0.0.1", type=str)
loop_in_own_thread = attr.ib(default=False, type=bool)
# subscriptions for all microservices
_subscriptions = attr.ib(default=attr.Factory(list))
message_bus_protocol = None
# subscriptions for kubernetes-service which required special handling this case, so we will fix in later release
_k8s_subscriptions = attr.ib(default=None)
def __attrs_post_init__(self):
self._k8s_subscriptions = self._k8s_subscriptions if self._k8s_subscriptions else AmqpSubscriptions()
@abstractmethod
def send_message(self, message: Message) -> None:
"""
Publish message to message bus
"""
raise NotImplementedError()
def subscribe(self, subscriptions: List[Type['Subscription']]) -> None:
"""
Register callbacks for all microservices subscriptions.
"""
self._subscriptions.extend(subscriptions)
# add callbacks for new subscriptions
for subscription in subscriptions:
self._register_callback(subscription.target, subscription.callback.callback)
def k8s_subscribe(self, subscriptions: Type[Subscriptions]) -> None:
"""
Register callbacks for k8s service subscriptions.
"""
self._k8s_subscriptions.add_all(subscriptions)
# add callbacks for new subscriptions
for subscription, handle in subscriptions.subscriptions.items():
self._register_callback(subscription, handle)
def subscribe_single(self, subscription: 'Subscription') -> None:
"""
Register callbacks for single subscription.
"""
self._register_callback(subscription.target, subscription.callback.callback)
|
PypiClean
|
/accelbyte_py_sdk-0.48.0.tar.gz/accelbyte_py_sdk-0.48.0/accelbyte_py_sdk/api/eventlog/models/models_event_response_v2.py
|
# template file: ags_py_codegen
# AccelByte Gaming Services Event Log Service (2.1.0)
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
from ....core import Model
from ..models.models_event_v2 import ModelsEventV2
from ..models.models_paging import ModelsPaging
class ModelsEventResponseV2(Model):
"""Models event response V2 (models.EventResponseV2)
Properties:
data: (data) REQUIRED List[ModelsEventV2]
paging: (paging) REQUIRED ModelsPaging
"""
# region fields
data: List[ModelsEventV2] # REQUIRED
paging: ModelsPaging # REQUIRED
# endregion fields
# region with_x methods
def with_data(self, value: List[ModelsEventV2]) -> ModelsEventResponseV2:
self.data = value
return self
def with_paging(self, value: ModelsPaging) -> ModelsEventResponseV2:
self.paging = value
return self
# endregion with_x methods
# region to methods
def to_dict(self, include_empty: bool = False) -> dict:
result: dict = {}
if hasattr(self, "data"):
result["data"] = [
i0.to_dict(include_empty=include_empty) for i0 in self.data
]
elif include_empty:
result["data"] = []
if hasattr(self, "paging"):
result["paging"] = self.paging.to_dict(include_empty=include_empty)
elif include_empty:
result["paging"] = ModelsPaging()
return result
# endregion to methods
# region static methods
@classmethod
def create(
cls, data: List[ModelsEventV2], paging: ModelsPaging, **kwargs
) -> ModelsEventResponseV2:
instance = cls()
instance.data = data
instance.paging = paging
return instance
@classmethod
def create_from_dict(
cls, dict_: dict, include_empty: bool = False
) -> ModelsEventResponseV2:
instance = cls()
if not dict_:
return instance
if "data" in dict_ and dict_["data"] is not None:
instance.data = [
ModelsEventV2.create_from_dict(i0, include_empty=include_empty)
for i0 in dict_["data"]
]
elif include_empty:
instance.data = []
if "paging" in dict_ and dict_["paging"] is not None:
instance.paging = ModelsPaging.create_from_dict(
dict_["paging"], include_empty=include_empty
)
elif include_empty:
instance.paging = ModelsPaging()
return instance
@classmethod
def create_many_from_dict(
cls, dict_: dict, include_empty: bool = False
) -> Dict[str, ModelsEventResponseV2]:
return (
{k: cls.create_from_dict(v, include_empty=include_empty) for k, v in dict_}
if dict_
else {}
)
@classmethod
def create_many_from_list(
cls, list_: list, include_empty: bool = False
) -> List[ModelsEventResponseV2]:
return (
[cls.create_from_dict(i, include_empty=include_empty) for i in list_]
if list_
else []
)
@classmethod
def create_from_any(
cls, any_: any, include_empty: bool = False, many: bool = False
) -> Union[
ModelsEventResponseV2,
List[ModelsEventResponseV2],
Dict[Any, ModelsEventResponseV2],
]:
if many:
if isinstance(any_, dict):
return cls.create_many_from_dict(any_, include_empty=include_empty)
elif isinstance(any_, list):
return cls.create_many_from_list(any_, include_empty=include_empty)
else:
raise ValueError()
else:
return cls.create_from_dict(any_, include_empty=include_empty)
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"data": "data",
"paging": "paging",
}
@staticmethod
def get_required_map() -> Dict[str, bool]:
return {
"data": True,
"paging": True,
}
# endregion static methods
|
PypiClean
|
/auto/dataset.py
|
import os
import time
import tempfile
import numpy as np
import pytorch_lightning as pl
from typing import Iterable, List, Dict, Union, Any, Tuple, Callable
from torch.utils.data import IterableDataset
from pytorch_lightning.loggers.base import LoggerCollection
from machin.utils.media import create_video, numpy_array_to_pil_image
Scalar = Any
def determine_precision(models):
dtype = set()
for model in models:
for k, v in model.named_parameters():
dtype.add(v.dtype)
dtype = list(dtype)
if len(dtype) > 1:
raise RuntimeError(
"Multiple data types of parameters detected "
f"in models: {dtype}, this is currently not supported "
"since we need to determine the data type of your "
"model input from your model parameter data type."
)
return dtype[0]
def get_loggers_as_list(module: pl.LightningModule):
if isinstance(module.logger, LoggerCollection):
return module.logger._logger_iterable
else:
return [module.logger]
def log_image(module, name, image: np.ndarray):
for logger in get_loggers_as_list(module):
if hasattr(logger, "log_image") and callable(logger.log_image):
logger.log_image(name, numpy_array_to_pil_image(image))
def log_video(module, name, video_frames: List[np.ndarray]):
# create video temp file
fd, path = tempfile.mkstemp(suffix=".gif")
os.close(fd)
try:
create_video(
video_frames,
os.path.dirname(path),
os.path.basename(os.path.splitext(path)[0]),
extension=".gif",
)
except Exception as e:
print(e)
os.remove(path)
return
size = os.path.getsize(path)
while True:
time.sleep(1)
new_size = os.path.getsize(path)
if size != 0 and new_size == size:
break
size = new_size
for logger in get_loggers_as_list(module):
if hasattr(logger, "log_artifact") and callable(logger.log_artifact):
logger.log_artifact(path, name + ".gif")
if os.path.exists(path):
os.remove(path)
class DatasetResult:
def __init__(
self,
observations: List[Dict[str, Any]] = None,
logs: List[Dict[str, Union[Scalar, Tuple[Scalar, str]]]] = None,
):
self.observations = observations or []
self.logs = logs or []
def add_observation(self, obs: Dict[str, Any]):
self.observations.append(obs)
def add_log(self, log: Dict[str, Union[Scalar, Tuple[Any, Callable]]]):
self.logs.append(log)
def __len__(self):
return len(self.observations)
class RLDataset(IterableDataset):
"""
Base class for all RL Datasets.
"""
early_stopping_monitor = ""
def __init__(self, **_kwargs):
super().__init__()
def __iter__(self) -> Iterable:
return self
def __next__(self):
raise StopIteration()
|
PypiClean
|
/ScarletNBA-0.0.3.tar.gz/ScarletNBA-0.0.3/training/training_utils.py
|
import os
from math import sqrt
from dateutil import rrule, parser
from itertools import compress
import numpy as np
from sklearn.preprocessing import LabelBinarizer, LabelEncoder, OneHotEncoder, MultiLabelBinarizer, Imputer, RobustScaler
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.feature_selection import VarianceThreshold
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.metrics import mean_squared_error
# import matplotlib.pyplot as plt
#deploying pickle files to gcs
import pickle
from datetime import datetime
from gcloud import storage
from tempfile import NamedTemporaryFile
def generate_date_list(date1, date2):
return [x.strftime("%Y-%m-%d") for x in list(rrule.rrule(rrule.DAILY,
dtstart=parser.parse(date1),
until=parser.parse(date2)))]
def get_rolling_game_avgs(df, index_on, games=20):
# Given a dataframe and an index and number of games, compute the rolling averages (do this for player and opponent/pos)
_df = df.groupby(index_on, as_index=False, group_keys=False).rolling(games).mean().reset_index().drop(["index"], axis=1).fillna(0)
df_transformed = _df.set_index(["date"] + index_on).select_dtypes([np.number])
new_col_names = ["{col}_{index_type}_{rolling}g".format(col=c,
index_type = '_'.join(index_on),
rolling=games) for c in df_transformed.columns]
df_transformed.columns = new_col_names
return df_transformed
def model_bakeoff(model, df, dependent_var, test_size, random_state=42):
#make a copy of the model if the accuracy is low, then train on everything
model_deploy = model
X = df.drop([dependent_var], axis=1)
y = df[dependent_var]
numerics = X.select_dtypes([np.number]).columns
categoricals = [x for x in X.columns if x not in numerics]
categorical_pipeline = FeatureUnion(categorical_binarizer(categoricals))
numerical_pipeline = Pipeline([("selector", DataFrameSelector(numerics)),
("imputer", Imputer(strategy="median")),
("rob_scaler", RobustScaler())])
complete_pipeline = FeaturePipeline([
('join_features', FeatureUnion([
('numerical', numerical_pipeline),
('categorical', categorical_pipeline)
]))
])
try:
X_transformed = complete_pipeline.fit_transform(X)
except:
X_transformed = X.select_dtypes([np.number])
print(len(X_transformed.columns))
X_train, X_test, y_train, y_test = train_test_split(X_transformed, y, test_size=test_size, random_state=random_state)
model.fit(X=X_train, y=y_train)
y_hat = model.predict(X_test)
testing_accuracy = sqrt(mean_squared_error(y_pred=y_hat, y_true=y_test))
training_accuracy = sqrt(mean_squared_error(y_pred=model.predict(X_train), y_true=y_train))
is_overfit = abs(testing_accuracy - training_accuracy) > 1
if testing_accuracy < 10:
model_deploy.fit(X_transformed, y)
return {"model":model_deploy,
"preprocessing":complete_pipeline,
"accuracy":testing_accuracy,
"overfit":is_overfit}
else:
return {"model":model,
"preprocessing":complete_pipeline,
"accuracy":testing_accuracy,
"overfit":is_overfit}
class DataFrameSelector(BaseEstimator, TransformerMixin):
def __init__(self, attribute_names):
self.attribute_names=attribute_names
def fit(self, X, y=None):
return self
def transform(self, X):
return X[self.attribute_names].values
class FeaturePipeline(Pipeline):
def get_feature_names(self):
feature_names = []
mask = []
for step_name, step in self.steps:
if type(step) is LabelBinarizer:
if step.y_type_ == 'multiclass':
feature_names = [f for f in step.classes_]
if step.y_type == 'binary':
feature_names = ['binary']
if type(step) is DataFrameSelector:
feature_names = [f for f in step.attribute_names]
if hasattr(step, 'get_feature_names'):
feature_names.extent([f for f in step.get_feature_names()])
if hasattr(step, 'get_support'):
if len(mask) > 0:
mask = mask & step.get_support()
else:
mask = step.get_support()
if len(mask) > 0:
feature_names = list(compress(feature_names, mask))
return feature_names
def categorical_binarizer(categorical_features):
pipelines = []
for f in categorical_features:
pipelines.append((f, Pipeline([("selector", DataFrameSelector(f)),
("Binarizer", LabelBinarizer())])))
return(pipelines)
def deploy_pickle(obj, project_id, bucket, destination_path, filename, local=False):
if local:
client = storage.Client.from_service_account_json(project=project_id,
json_credentials_path='../scarlet-labs-2e06fe082fb4.json')
else:
client = storage.Client(project=project_id)
with NamedTemporaryFile(mode='wb') as temp:
pickle.dump(obj, temp)
temp.seek(0)
gcs_path = os.path.join(destination_path, datetime.today().strftime("%Y%m%d"), '{filename}.pkl'.format(filename=filename))
client.bucket(bucket).blob(gcs_path).upload_from_filename(temp.name)
def load_pipeline(project_id, bucket, destination_path, filename, local=False):
if local:
client = storage.Client.from_service_account_json(project=project_id,
json_credentials_path='../scarlet-labs-2e06fe082fb4.json')
else:
client = storage.Client(project=project_id)
with NamedTemporaryFile(mode='rb') as tempfile:
gcs_path = os.path.join(destination_path, '{filename}.pkl'.format(filename=filename))
client.bucket(bucket).blob(gcs_path).download_to_filename(tempfile.name)
tempfile.seek(0)
return pickle.load(tempfile)
def check_buckets():
storage_client = storage.Client.from_service_account_json(project='scarlet-labs',
json_credentials_path='../scarlet-labs-2e06fe082fb4.json')
buckets = list(storage_client.list_buckets())
print(buckets)
|
PypiClean
|
/django-skylark-0.4.6.tar.gz/django-skylark-0.4.6/src/skylark/templates/chirp/media/dojox/grid/enhanced/plugins/Exporter.js
|
if(!dojo._hasResource["dojox.grid.enhanced.plugins.Exporter"]){ //_hasResource checks added by build. Do not use _hasResource directly in your code.
dojo._hasResource["dojox.grid.enhanced.plugins.Exporter"] = true;
dojo.provide("dojox.grid.enhanced.plugins.Exporter");
dojo.require("dojox.grid.enhanced._Plugin");
dojo.require("dojox.grid._RowSelector");
dojo.declare("dojox.grid.enhanced.plugins.Exporter", dojox.grid.enhanced._Plugin, {
// summary:
// Provide functions to export the grid data into a given format.
//
// Acceptable plugin parameters:
// 1. exportFormatter: function(data, cell, rowIndex, item)
// Provide a way to customize how data should look in exported string.
// Note that usually the formatter of grid cell should not be used here (it can return HTML or even widget).
// example:
// | function onExported(exported_text){
// | //custom code here...
// | }
// | dijit.byId("my_grid_id").exportTo("csv", //registered export format, mandatory
// | { //the whole object is optional.
// | fetchArgs: {start:0,count:1000}, //keywordArgs for fetch, optional
// | writerArgs: {separator:';'}, //export writer specific arguments, optional
// | },
// | function(str){
// | //call back function, mandatory
// | });
// | var result = dijit.byId("my_grid_id").exportSelectedTo("table", //registered export format, mandatory
// | {separator:'|'} //export writer specific arguments, optional
// | );
//
// name: String
// Plugin name.
name: "exporter",
constructor: function(grid, args){
// summary:
// only newed by _Plugin
// grid: EnhancedGrid
// The grid to plug in to.
this.grid = grid;
this.formatter = (args && dojo.isObject(args)) && args.exportFormatter;
this._mixinGrid();
},
_mixinGrid: function(){
var g = this.grid;
g.exportTo = dojo.hitch(this, this.exportTo);
g.exportGrid = dojo.hitch(this, this.exportGrid);
g.exportSelected = dojo.hitch(this, this.exportSelected);
g.setExportFormatter = dojo.hitch(this, this.setExportFormatter);
},
setExportFormatter: function(formatter){
this.formatter = formatter;
},
exportGrid: function(type, args, onExported){
// summary:
// Export required rows(fetchArgs) to a kind of format(type)
// using the corresponding writer with given arguments(writerArgs),
// then pass the exported text to a given function(onExported).
// tags:
// public
// type: string
// A registered export format name
// args: object?
// includes:
// {
// fetchArgs: object?
// Any arguments for store.fetch
// writerArgs: object?
// Arguments for the given format writer
// }
// onExported: function(string)
// Call back function when export result is ready
if(dojo.isFunction(args)){
onExported = args;
args = {};
}
if(!dojo.isString(type) || !dojo.isFunction(onExported)){
return;
}
args = args || {};
var g = this.grid, _this = this,
writer = this._getExportWriter(type, args.writerArgs),
fetchArgs = (args.fetchArgs && dojo.isObject(args.fetchArgs)) ? args.fetchArgs : {},
oldFunc = fetchArgs.onComplete;
if(g.store){
fetchArgs.onComplete = function(items, request){
if(oldFunc){
oldFunc(items, request);
}
onExported(_this._goThroughGridData(items, writer));
};
fetchArgs.sort = fetchArgs.sort || g.getSortProps();
g._storeLayerFetch(fetchArgs);
}else{
//Data is defined directly in the structure;
var start = fetchArgs.start || 0,
count = fetchArgs.count || -1,
items = [];
for(var i = start; i != start + count && i < g.rowCount; ++i){
items.push(g.getItem(i));
}
onExported(this._goThroughGridData(items, writer));
}
},
exportSelected: function(type, writerArgs){
// summary:
// Only export selected rows.
// tags:
// public
// type: string
// A registered export format name
// writerArgs: object?
// Arguments for the given format writer
// returns: string
// The exported string
if(!dojo.isString(type)){
return "";
}
var writer = this._getExportWriter(type, writerArgs);
return this._goThroughGridData(this.grid.selection.getSelected(), writer); //String
},
_buildRow: function(/* object */arg_obj,/* ExportWriter */writer){
// summary:
// Use the given export writer(writer) to go through a single row
// which is given in the context object(arg_obj).
// tags:
// private
// returns:
// undefined
var _this = this;
dojo.forEach(arg_obj._views, function(view, vIdx){
arg_obj.view = view;
arg_obj.viewIdx = vIdx;
if(writer.beforeView(arg_obj)){
dojo.forEach(view.structure.cells, function(subrow, srIdx){
arg_obj.subrow = subrow;
arg_obj.subrowIdx = srIdx;
if(writer.beforeSubrow(arg_obj)){
dojo.forEach(subrow, function(cell, cIdx){
if(arg_obj.isHeader && _this._isSpecialCol(cell)){
arg_obj.spCols.push(cell.index);
}
arg_obj.cell = cell;
arg_obj.cellIdx = cIdx;
writer.handleCell(arg_obj);
});
writer.afterSubrow(arg_obj);
}
});
writer.afterView(arg_obj);
}
});
},
_goThroughGridData: function(/* Array */items,/* ExportWriter */writer){
// summary:
// Use the given export writer(writer) to go through the grid structure
// and the given rows(items), then return the writer output.
// tags:
// private
var grid = this.grid,
views = dojo.filter(grid.views.views, function(view){
return !(view instanceof dojox.grid._RowSelector);
}),
arg_obj = {
'grid': grid,
'isHeader': true,
'spCols': [],
'_views': views,
'colOffset': (views.length < grid.views.views.length ? -1 : 0)
};
//go through header
if(writer.beforeHeader(grid)){
this._buildRow(arg_obj,writer);
writer.afterHeader();
}
//go through content
arg_obj.isHeader = false;
if(writer.beforeContent(items)){
dojo.forEach(items, function(item, rIdx){
arg_obj.row = item;
arg_obj.rowIdx = rIdx;
if(writer.beforeContentRow(arg_obj)){
this._buildRow(arg_obj, writer);
writer.afterContentRow(arg_obj);
}
}, this);
writer.afterContent();
}
return writer.toString();
},
_isSpecialCol: function(/* dojox.grid.__CellDef */header_cell){
// summary:
// Row selectors and row indexes should be recognized and handled separately.
// tags:
// private
return header_cell.isRowSelector || header_cell instanceof dojox.grid.cells.RowIndex; //Boolean
},
_getExportWriter: function(/* string */ fileType, /* object? */ writerArgs){
// summary:
// Use the given export format type(fileType)
// and writer arguments(writerArgs) to create
// a ExportWriter and return it.
// tags:
// private
var writerName, cls,
expCls = dojox.grid.enhanced.plugins.Exporter;
if(expCls.writerNames){
writerName = expCls.writerNames[fileType.toLowerCase()];
cls = dojo.getObject(writerName);
if(cls){
var writer = new cls(writerArgs);
writer.formatter = this.formatter;
return writer; //ExportWriter
}else{
throw new Error('Please make sure class "' + writerName + '" is required.');
}
}
throw new Error('The writer for "' + fileType + '" has not been registered.');
}
});
dojox.grid.enhanced.plugins.Exporter.registerWriter = function(/* string */fileType,/* string */writerClsName){
// summary:
// Register a writer(writerClsName) to a export format type(fileType).
// This function separates the Exporter from all kinds of writers.
// tags:
// public
var expCls = dojox.grid.enhanced.plugins.Exporter;
expCls.writerNames = expCls.writerNames || {};
expCls.writerNames[fileType] = writerClsName;
};
dojox.grid.EnhancedGrid.registerPlugin(dojox.grid.enhanced.plugins.Exporter/*name:'exporter'*/);
}
|
PypiClean
|
/django_api_data_cache-0.2.1.tar.gz/django_api_data_cache-0.2.1/api_data_cache/search_filter.py
|
from rest_framework import filters
import re
class RefinerSearchFilter(filters.SearchFilter):
search_field_dict = {}
custom_property_entity = None
def filter_queryset(self, request, queryset, view):
# 1 - search in regular fields that were pre-defined in the view()
qs_regular_search = super().filter_queryset(request, queryset, view)
self.custom_property_entity = view.search_custom_property_entity
search_results_ids = []
try:
included_by_requester = queryset.filter(id__in=view.include_in_search_results)
except:
included_by_requester = queryset.none()
# 1 + 2 + 3
queryset = qs_regular_search | \
queryset.filter(id__in=search_results_ids) | \
included_by_requester
return queryset.distinct()
def get_search_terms(self, request):
"""
Search terms are set by a ?search=... query parameter,
and may be comma and/or whitespace delimited.
"""
params, search_field_dict = strip_search_fields(request.query_params.get(self.search_param, ''))
self.search_field_dict = search_field_dict
return params.replace(',', ' ').split()
def strip_search_fields(qsearch):
if not qsearch:
return '', {}
search_text = re.sub(':', ' ', re.sub('\{([^\{]*)\}', ' ', qsearch))
search_field = {}
sub_string_brackets = re.search('\{([^\{]*)\}', qsearch)
if sub_string_brackets:
segments = sub_string_brackets.group().split(':')
for idx in range(0, len(segments), 1):
value = ''
label = segments[idx]
between_quotes = re.findall('"([^"]*)"', label)
if between_quotes: label = between_quotes[-1]
if idx < (len(segments) - 1):
value = segments[idx + 1]
between_quotes = re.findall('"([^"]*)"', value)
if between_quotes: value = between_quotes[0]
if value:
search_field[label] = value
return search_text, search_field
|
PypiClean
|
/redshift-console-0.1.3.tar.gz/redshift-console-0.1.3/redshift_console/static/dist/bower_components/momentjs/locale/it.js
|
(function (factory) {
if (typeof define === 'function' && define.amd) {
define(['moment'], factory); // AMD
} else if (typeof exports === 'object') {
module.exports = factory(require('../moment')); // Node
} else {
factory((typeof global !== 'undefined' ? global : this).moment); // node or other global
}
}(function (moment) {
return moment.defineLocale('it', {
months : 'gennaio_febbraio_marzo_aprile_maggio_giugno_luglio_agosto_settembre_ottobre_novembre_dicembre'.split('_'),
monthsShort : 'gen_feb_mar_apr_mag_giu_lug_ago_set_ott_nov_dic'.split('_'),
weekdays : 'Domenica_Lunedì_Martedì_Mercoledì_Giovedì_Venerdì_Sabato'.split('_'),
weekdaysShort : 'Dom_Lun_Mar_Mer_Gio_Ven_Sab'.split('_'),
weekdaysMin : 'D_L_Ma_Me_G_V_S'.split('_'),
longDateFormat : {
LT : 'HH:mm',
LTS : 'LT:ss',
L : 'DD/MM/YYYY',
LL : 'D MMMM YYYY',
LLL : 'D MMMM YYYY LT',
LLLL : 'dddd, D MMMM YYYY LT'
},
calendar : {
sameDay: '[Oggi alle] LT',
nextDay: '[Domani alle] LT',
nextWeek: 'dddd [alle] LT',
lastDay: '[Ieri alle] LT',
lastWeek: function () {
switch (this.day()) {
case 0:
return '[la scorsa] dddd [alle] LT';
default:
return '[lo scorso] dddd [alle] LT';
}
},
sameElse: 'L'
},
relativeTime : {
future : function (s) {
return ((/^[0-9].+$/).test(s) ? 'tra' : 'in') + ' ' + s;
},
past : '%s fa',
s : 'alcuni secondi',
m : 'un minuto',
mm : '%d minuti',
h : 'un\'ora',
hh : '%d ore',
d : 'un giorno',
dd : '%d giorni',
M : 'un mese',
MM : '%d mesi',
y : 'un anno',
yy : '%d anni'
},
ordinalParse : /\d{1,2}º/,
ordinal: '%dº',
week : {
dow : 1, // Monday is the first day of the week.
doy : 4 // The week that contains Jan 4th is the first week of the year.
}
});
}));
|
PypiClean
|
/trixie-0.1.2.tar.gz/trixie-0.1.2/homeassistant/components/persistent_notification/__init__.py
|
import asyncio
import logging
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.exceptions import TemplateError
from homeassistant.loader import bind_hass
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.entity import async_generate_entity_id
from homeassistant.util import slugify
ATTR_MESSAGE = 'message'
ATTR_NOTIFICATION_ID = 'notification_id'
ATTR_TITLE = 'title'
DOMAIN = 'persistent_notification'
ENTITY_ID_FORMAT = DOMAIN + '.{}'
SERVICE_CREATE = 'create'
SERVICE_DISMISS = 'dismiss'
SCHEMA_SERVICE_CREATE = vol.Schema({
vol.Required(ATTR_MESSAGE): cv.template,
vol.Optional(ATTR_TITLE): cv.template,
vol.Optional(ATTR_NOTIFICATION_ID): cv.string,
})
SCHEMA_SERVICE_DISMISS = vol.Schema({
vol.Required(ATTR_NOTIFICATION_ID): cv.string,
})
DEFAULT_OBJECT_ID = 'notification'
_LOGGER = logging.getLogger(__name__)
STATE = 'notifying'
@bind_hass
def create(hass, message, title=None, notification_id=None):
"""Generate a notification."""
hass.add_job(async_create, hass, message, title, notification_id)
@bind_hass
def dismiss(hass, notification_id):
"""Remove a notification."""
hass.add_job(async_dismiss, hass, notification_id)
@callback
@bind_hass
def async_create(hass, message, title=None, notification_id=None):
"""Generate a notification."""
data = {
key: value for key, value in [
(ATTR_TITLE, title),
(ATTR_MESSAGE, message),
(ATTR_NOTIFICATION_ID, notification_id),
] if value is not None
}
hass.async_add_job(hass.services.async_call(DOMAIN, SERVICE_CREATE, data))
@callback
@bind_hass
def async_dismiss(hass, notification_id):
"""Remove a notification."""
data = {ATTR_NOTIFICATION_ID: notification_id}
hass.async_add_job(hass.services.async_call(DOMAIN, SERVICE_DISMISS, data))
@asyncio.coroutine
def async_setup(hass, config):
"""Set up the persistent notification component."""
@callback
def create_service(call):
"""Handle a create notification service call."""
title = call.data.get(ATTR_TITLE)
message = call.data.get(ATTR_MESSAGE)
notification_id = call.data.get(ATTR_NOTIFICATION_ID)
if notification_id is not None:
entity_id = ENTITY_ID_FORMAT.format(slugify(notification_id))
else:
entity_id = async_generate_entity_id(
ENTITY_ID_FORMAT, DEFAULT_OBJECT_ID, hass=hass)
attr = {}
if title is not None:
try:
title.hass = hass
title = title.async_render()
except TemplateError as ex:
_LOGGER.error('Error rendering title %s: %s', title, ex)
title = title.template
attr[ATTR_TITLE] = title
try:
message.hass = hass
message = message.async_render()
except TemplateError as ex:
_LOGGER.error('Error rendering message %s: %s', message, ex)
message = message.template
attr[ATTR_MESSAGE] = message
hass.states.async_set(entity_id, STATE, attr)
@callback
def dismiss_service(call):
"""Handle the dismiss notification service call."""
notification_id = call.data.get(ATTR_NOTIFICATION_ID)
entity_id = ENTITY_ID_FORMAT.format(slugify(notification_id))
hass.states.async_remove(entity_id)
hass.services.async_register(DOMAIN, SERVICE_CREATE, create_service,
SCHEMA_SERVICE_CREATE)
hass.services.async_register(DOMAIN, SERVICE_DISMISS, dismiss_service,
SCHEMA_SERVICE_DISMISS)
return True
|
PypiClean
|
/nso_oc-2.79.2-py3-none-any.whl/package_nso_to_oc/xe/xe_network_instances.py
|
import sys
from importlib.util import find_spec
import copy
network_instances_notes = []
openconfig_network_instances = {
"openconfig-network-instance:network-instances": {
"openconfig-network-instance:network-instance": [
{
"openconfig-network-instance:name": "default",
"openconfig-network-instance:config": {
"openconfig-network-instance:name": "default",
"openconfig-network-instance:type": "DEFAULT_INSTANCE",
"openconfig-network-instance:enabled": "true"
},
"openconfig-network-instance:protocols": {"openconfig-network-instance:protocol": []},
"openconfig-network-instance:interfaces": {"openconfig-network-instance:interface": []},
"openconfig-network-instance:vlans": {}
}
]
}
}
def generate_list_indexes_to_delete(a_list: list, greatest_length: int) -> list:
delete_indexes = []
for i in a_list:
if len(i) <= greatest_length:
delete_indexes.append(a_list.index(i))
delete_indexes.sort(reverse=True)
return delete_indexes
def xe_network_instances(config_before: dict, config_leftover: dict) -> None:
"""
Translates NSO XE NED to MDD OpenConfig Network Instances
"""
if config_before.get("tailf-ned-cisco-ios:vrf", {}).get("definition"):
for vrf_index, vrf in enumerate(config_before.get("tailf-ned-cisco-ios:vrf", {}).get("definition")):
if vrf.get("address-family"):
address_families = []
for key in vrf.get("address-family").keys():
if key == "ipv4":
address_families.append("openconfig-types:IPV4")
if key == "ipv6":
address_families.append("openconfig-types:IPV6")
temp_vrf = {
"openconfig-network-instance:name": vrf["name"],
"openconfig-network-instance:config": {
"openconfig-network-instance:name": vrf["name"],
"openconfig-network-instance:type": "L3VRF",
"openconfig-network-instance:enabled": "true",
"openconfig-network-instance:enabled-address-families": address_families
},
"openconfig-network-instance:protocols": {"openconfig-network-instance:protocol": []},
"openconfig-network-instance:interfaces": {"openconfig-network-instance:interface": []}
}
process_rd_rt(temp_vrf, vrf, vrf_index, config_leftover)
if vrf.get("description"):
temp_vrf["openconfig-network-instance:config"]["openconfig-network-instance:description"] = vrf.get(
"description")
del config_leftover["tailf-ned-cisco-ios:vrf"]["definition"][vrf_index]["description"]
del config_leftover["tailf-ned-cisco-ios:vrf"]["definition"][vrf_index]["address-family"]
openconfig_network_instances["openconfig-network-instance:network-instances"][
"openconfig-network-instance:network-instance"].append(temp_vrf)
# Clean up VRF remaining
indexes_to_remove = generate_list_indexes_to_delete(
config_leftover.get("tailf-ned-cisco-ios:vrf", {}).get("definition", []), 1)
if indexes_to_remove:
for vrf_index in indexes_to_remove:
del config_leftover["tailf-ned-cisco-ios:vrf"]["definition"][vrf_index]
if not config_leftover["tailf-ned-cisco-ios:vrf"]["definition"]:
del config_leftover["tailf-ned-cisco-ios:vrf"]["definition"]
if len(config_leftover["tailf-ned-cisco-ios:vrf"]) == 0:
del config_leftover["tailf-ned-cisco-ios:vrf"]
interfaces_by_vrf = get_interfaces_by_vrf(config_before)
route_forwarding_list_by_vrf = get_route_forwarding_list_by_vrf(config_before)
configure_network_instances(config_before, config_leftover, interfaces_by_vrf, route_forwarding_list_by_vrf)
if type(config_before.get("tailf-ned-cisco-ios:ip", {}).get("multicast-routing", {}).get("distributed", '')) is list:
configure_pim_network_instance(config_before, config_leftover)
configure_igmp_network_instance(config_before, config_leftover)
configure_cgmp_network_instance(config_before, config_leftover)
cleanup_null_ospf_leftovers(config_leftover)
cleanup_null_static_route_leftovers(config_leftover)
cleanup_null_bgp_leftovers(config_before, config_leftover)
def get_interfaces_by_vrf(config_before):
interfaces_by_vrf = {}
interfaces = config_before.get("tailf-ned-cisco-ios:interface", {})
for interface_type, interface_list in interfaces.items():
if interface_type == "Port-channel-subinterface":
interface_type = "Port-channel"
interface_list = interface_list[interface_type]
for interface in interface_list:
if (not "ip" in interface or not "address" in interface["ip"]
or not "primary" in interface["ip"]["address"] or not "address" in interface["ip"]["address"][
"primary"]):
continue
interface_copy = copy.deepcopy(interface)
interface_copy["type"] = interface_type
# Ensure we get a string type
interface_copy["name"] = str(interface_copy["name"])
if "vrf" in interface_copy and "forwarding" in interface_copy["vrf"]:
vrf_name = interface_copy["vrf"]["forwarding"]
else:
vrf_name = "default"
if not vrf_name in interfaces_by_vrf:
interfaces_by_vrf[vrf_name] = []
interfaces_by_vrf[vrf_name].append(interface_copy)
return interfaces_by_vrf
def get_route_forwarding_list_by_vrf(config_before):
route_forwarding_list_by_vrf = {}
ip_obj = config_before.get("tailf-ned-cisco-ios:ip", {"route": {}}).get("route", {})
route_forwarding_list_by_vrf["default"] = {
common_xe.IP_FORWARDING_LIST: copy.deepcopy(ip_obj.get(common_xe.IP_FORWARDING_LIST, [])),
common_xe.INTF_LIST: copy.deepcopy(ip_obj.get(common_xe.INTF_LIST, [])),
common_xe.IP_INTF_FORWARDING_LIST: copy.deepcopy(ip_obj.get(common_xe.IP_INTF_FORWARDING_LIST, []))
}
for index, vrf in enumerate(ip_obj.get("vrf", [])):
route_forwarding_list_by_vrf[vrf["name"]] = {
"vrf-index": index,
common_xe.IP_FORWARDING_LIST: copy.deepcopy(vrf.get(common_xe.IP_FORWARDING_LIST, [])),
common_xe.INTF_LIST: copy.deepcopy(vrf.get(common_xe.INTF_LIST, [])),
common_xe.IP_INTF_FORWARDING_LIST: copy.deepcopy(vrf.get(common_xe.IP_INTF_FORWARDING_LIST, []))
}
return route_forwarding_list_by_vrf
def build_router_ospf_by_vrf(config_before):
router_ospf_by_vrf = {}
for index, ospf in enumerate(config_before.get("tailf-ned-cisco-ios:router", {}).get("ospf", [])):
if "vrf" in ospf:
vrf_name = ospf["vrf"]
else:
vrf_name = "default"
if not vrf_name in router_ospf_by_vrf:
router_ospf_by_vrf[vrf_name] = []
router_ospf_by_vrf[vrf_name].append(index)
return router_ospf_by_vrf
def configure_network_instances(config_before, config_leftover, interfaces_by_vrf, route_forwarding_list_by_vrf):
router_ospf_by_vrf = build_router_ospf_by_vrf(config_before)
for net_inst in openconfig_network_instances["openconfig-network-instance:network-instances"][
"openconfig-network-instance:network-instance"]:
configure_network_interfaces(net_inst, interfaces_by_vrf)
if len(interfaces_by_vrf.get(net_inst["openconfig-network-instance:name"], [])) > 0:
vrf_interfaces = interfaces_by_vrf.get(net_inst["openconfig-network-instance:name"])
xe_ospfv2.configure_xe_ospf(net_inst, vrf_interfaces, config_before, config_leftover,
network_instances_notes)
if len(route_forwarding_list_by_vrf.get(net_inst["openconfig-network-instance:name"], [])) > 0:
vrf_forwarding_list = route_forwarding_list_by_vrf.get(net_inst["openconfig-network-instance:name"])
xe_static_route.configure_xe_static_routes(net_inst, vrf_forwarding_list, config_leftover,
network_instances_notes)
xe_ospfv2.configure_xe_ospf_redistribution(net_inst, config_before, config_leftover, router_ospf_by_vrf)
xe_bgp.configure_xe_bgp(net_inst, config_before, config_leftover, network_instances_notes)
xe_bgp.configure_xe_bgp_redistribution(net_inst, config_before, config_leftover)
xe_mpls.configure_xe_mpls(net_inst, config_before, config_leftover, network_instances_notes)
def configure_network_interfaces(net_inst, interfaces_by_vrf):
for interface in interfaces_by_vrf.get(net_inst["openconfig-network-instance:name"], []):
name_split = interface["name"].split(".")
primary_interface = name_split[0]
new_interface = {
"openconfig-network-instance:id": interface["type"] + interface["name"],
"openconfig-network-instance:config": {
"openconfig-network-instance:id": interface["type"] + interface["name"],
"openconfig-network-instance:interface": interface["type"] + primary_interface
}
}
if (interface["type"] != "Tunnel") and (interface["type"] != "Vlan") and (interface["type"] != "Port-channel"):
subinterface = '0' if len(name_split) == 1 else name_split[1]
new_interface["openconfig-network-instance:config"][
"openconfig-network-instance:subinterface"] = subinterface
elif interface["type"] == "Port-channel": # Port-channel's don't have a sub-if 0
if len(name_split) > 1:
new_interface["openconfig-network-instance:config"]["openconfig-network-instance:subinterface"] = \
name_split[1]
net_inst["openconfig-network-instance:interfaces"]["openconfig-network-instance:interface"].append(
new_interface)
def configure_pim_network_instance(config_before, config_leftover):
"""
Translates NSO XE NED to MDD OpenConfig Network Instance for IP multicast and interface PIM configuration
"""
pim_protocol_by_networkinstance = {}
pim_protocol_instance = {
"openconfig-network-instance:identifier": "PIM",
"openconfig-network-instance:name": "PIM",
"openconfig-network-instance:config": {
"openconfig-network-instance:identifier": "PIM",
"openconfig-network-instance:name": "PIM",
"openconfig-network-instance:enabled": True,
"openconfig-network-instance:default-metric": 1
},
"openconfig-network-instance:pim": {
"openconfig-network-instance:interfaces": {
"openconfig-network-instance:interface": [
]
}
}
}
pim_interface = {
"openconfig-network-instance:interface-id": "",
"openconfig-network-instance:config": {
"openconfig-network-instance:enabled": "",
"openconfig-network-instance:interface-id": "",
"openconfig-network-instance:mode": "",
"openconfig-network-instance:dr-priority": 0,
"openconfig-network-instance:hello-interval": 0,
"openconfig-pim-ext:neighbor-filter": ""
},
"openconfig-network-instance:interface-ref": {
"openconfig-network-instance:config": {
"openconfig-network-instance:interface": "",
"openconfig-network-instance:subinterface": ""
}
}
}
for interface_type in config_before.get("tailf-ned-cisco-ios:interface", {}):
for nso_index, value in enumerate(config_before["tailf-ned-cisco-ios:interface"][interface_type]):
tmp_pim_interface = copy.deepcopy(pim_interface)
if value.get("ip", {}).get("pim", {}):
int_num = str(value['name']).split(".")[0]
subint_num = 0
if "." in str(value['name']):
subint_num = value['name'].split(".")[1]
tmp_pim_interface["openconfig-network-instance:interface-id"] = int_num
tmp_pim_interface["openconfig-network-instance:config"]["openconfig-network-instance:enabled"] = True
tmp_pim_interface["openconfig-network-instance:config"]["openconfig-network-instance:interface-id"] = int_num
tmp_pim_interface["openconfig-network-instance:interface-ref"]["openconfig-network-instance:config"]["openconfig-network-instance:interface"] = interface_type + int_num
tmp_pim_interface["openconfig-network-instance:interface-ref"]["openconfig-network-instance:config"]["openconfig-network-instance:subinterface"] = subint_num
for pim_key, pim_value in value.get("ip", {}).get("pim", {}).items():
if "dr-priority" in pim_key:
tmp_pim_interface["openconfig-network-instance:config"]["openconfig-network-instance:dr-priority"] = pim_value
if "query-interval" in pim_key:
tmp_pim_interface["openconfig-network-instance:config"]["openconfig-network-instance:hello-interval"] = pim_value
if "neighbor-filter" in pim_key:
tmp_pim_interface["openconfig-network-instance:config"]["openconfig-pim-ext:neighbor-filter"] = str(pim_value)
if "mode" in pim_key:
if "sparse-dense-mode" in pim_value:
tmp_pim_interface["openconfig-network-instance:config"]["openconfig-network-instance:mode"] = "openconfig-pim-types:PIM_MODE_DENSE"
elif "sparse-mode" in pim_value:
tmp_pim_interface["openconfig-network-instance:config"]["openconfig-network-instance:mode"] = "openconfig-pim-types:PIM_MODE_SPARSE"
if value.get("vrf", {}).get("forwarding", {}):
vrf_name = value["vrf"]["forwarding"]
if pim_protocol_by_networkinstance.get(vrf_name) is None:
pim_protocol_by_networkinstance[vrf_name] = {}
tmp_pim_protocol_instance = copy.deepcopy(pim_protocol_instance)
pim_protocol_by_networkinstance.update({vrf_name : tmp_pim_protocol_instance})
else:
vrf_name = "default"
if pim_protocol_by_networkinstance.get(vrf_name) is None:
pim_protocol_by_networkinstance[vrf_name] = {}
tmp_pim_protocol_instance = copy.deepcopy(pim_protocol_instance)
pim_protocol_by_networkinstance.update({vrf_name : tmp_pim_protocol_instance})
pim_protocol_by_networkinstance[vrf_name]["openconfig-network-instance:pim"]["openconfig-network-instance:interfaces"]["openconfig-network-instance:interface"].append(tmp_pim_interface)
del config_leftover["tailf-ned-cisco-ios:interface"][interface_type][nso_index]["ip"]["pim"]
if "multicast-routing" in config_leftover.get("tailf-ned-cisco-ios:ip", {}):
del config_leftover["tailf-ned-cisco-ios:ip"]["multicast-routing"]
for instance_name, network_instance in pim_protocol_by_networkinstance.items():
index = 0
for oc_name in openconfig_network_instances["openconfig-network-instance:network-instances"]["openconfig-network-instance:network-instance"]:
for oc_instance, oc_instance_name in oc_name.items():
if oc_instance_name == instance_name:
openconfig_network_instances["openconfig-network-instance:network-instances"]["openconfig-network-instance:network-instance"][index]["openconfig-network-instance:protocols"]["openconfig-network-instance:protocol"].append(network_instance)
index += 1
def configure_igmp_network_instance(config_before, config_leftover):
"""
Translates NSO XE NED to MDD OpenConfig Network Instance for IP multicast and interface IGMP configuration
"""
igmp_protocol_by_networkinstance = {}
igmp_protocol_instance = {
"openconfig-network-instance:identifier": "IGMP",
"openconfig-network-instance:name": "IGMP",
"openconfig-network-instance:config": {
"openconfig-network-instance:identifier": "IGMP",
"openconfig-network-instance:name": "IGMP",
"openconfig-network-instance:enabled": True,
"openconfig-network-instance:default-metric": 1
},
"openconfig-network-instance:igmp": {
"openconfig-network-instance:interfaces": {
"openconfig-network-instance:interface": [
]
}
}
}
igmp_interface = {
"openconfig-network-instance:interface-id": "",
"openconfig-network-instance:config": {
"openconfig-network-instance:enabled": "",
"openconfig-network-instance:interface-id": "",
"openconfig-network-instance:version": "",
"openconfig-network-instance:query-interval": "",
"openconfig-network-instance:filter-prefixes": ""
},
"openconfig-network-instance:interface-ref": {
"openconfig-network-instance:config": {
"openconfig-network-instance:interface": "",
"openconfig-network-instance:subinterface": ""
}
}
}
for interface_type in config_before.get("tailf-ned-cisco-ios:interface", {}):
for nso_index, value in enumerate(config_before["tailf-ned-cisco-ios:interface"][interface_type]):
tmp_igmp_interface = copy.deepcopy(igmp_interface)
if value.get("ip", {}).get("igmp", {}):
int_num = str(value['name']).split(".")[0]
subint_num = 0
if "." in str(value['name']):
subint_num = value['name'].split(".")[1]
tmp_igmp_interface["openconfig-network-instance:interface-id"] = int_num
tmp_igmp_interface["openconfig-network-instance:config"]["openconfig-network-instance:enabled"] = True
tmp_igmp_interface["openconfig-network-instance:config"]["openconfig-network-instance:interface-id"] = int_num
tmp_igmp_interface["openconfig-network-instance:interface-ref"]["openconfig-network-instance:config"]["openconfig-network-instance:interface"] = interface_type + int_num
tmp_igmp_interface["openconfig-network-instance:interface-ref"]["openconfig-network-instance:config"]["openconfig-network-instance:subinterface"] = subint_num
for igmp_key, igmp_value in value.get("ip", {}).get("igmp", {}).items():
if "version" in igmp_key:
tmp_igmp_interface["openconfig-network-instance:config"]["openconfig-network-instance:version"] = igmp_value
if "query-interval" in igmp_key:
tmp_igmp_interface["openconfig-network-instance:config"]["openconfig-network-instance:query-interval"] = igmp_value
if "access-group" in igmp_key:
tmp_igmp_interface["openconfig-network-instance:config"]["openconfig-network-instance:filter-prefixes"] = igmp_value
if value.get("vrf", {}).get("forwarding", {}):
vrf_name = value["vrf"]["forwarding"]
if igmp_protocol_by_networkinstance.get(vrf_name) is None:
igmp_protocol_by_networkinstance[vrf_name] = {}
tmp_igmp_protocol_instance = copy.deepcopy(igmp_protocol_instance)
igmp_protocol_by_networkinstance.update({vrf_name : tmp_igmp_protocol_instance})
else:
vrf_name = "default"
if igmp_protocol_by_networkinstance.get(vrf_name) is None:
igmp_protocol_by_networkinstance[vrf_name] = {}
tmp_igmp_protocol_instance = copy.deepcopy(igmp_protocol_instance)
igmp_protocol_by_networkinstance.update({vrf_name : tmp_igmp_protocol_instance})
igmp_protocol_by_networkinstance[vrf_name]["openconfig-network-instance:igmp"]["openconfig-network-instance:interfaces"]["openconfig-network-instance:interface"].append(tmp_igmp_interface)
del config_leftover["tailf-ned-cisco-ios:interface"][interface_type][nso_index]["ip"]["igmp"]
if "multicast-routing" in config_leftover.get("tailf-ned-cisco-ios:ip", {}):
del config_leftover["tailf-ned-cisco-ios:ip"]["multicast-routing"]
for instance_name, network_instance in igmp_protocol_by_networkinstance.items():
index = 0
for oc_name in openconfig_network_instances["openconfig-network-instance:network-instances"]["openconfig-network-instance:network-instance"]:
for oc_instance, oc_instance_name in oc_name.items():
if oc_instance_name == instance_name:
openconfig_network_instances["openconfig-network-instance:network-instances"]["openconfig-network-instance:network-instance"][index]["openconfig-network-instance:protocols"]["openconfig-network-instance:protocol"].append(network_instance)
index += 1
def configure_cgmp_network_instance(config_before, config_leftover):
"""
Translates NSO XE NED to MDD OpenConfig Network Instance for IP multicast and interface CGMP configuration
"""
cgmp_protocol_by_networkinstance = {}
cgmp_protocol_instance = {
"openconfig-network-instance:identifier": "CGMP",
"openconfig-network-instance:name": "CGMP",
"openconfig-network-instance:config": {
"openconfig-network-instance:identifier": "CGMP",
"openconfig-network-instance:name": "CGMP",
"openconfig-network-instance:enabled": True,
"openconfig-network-instance:default-metric": 1
},
"openconfig-network-instance:cgmp": {
"openconfig-network-instance:interfaces": {
"openconfig-network-instance:interface": [
]
}
}
}
cgmp_interface = {
"openconfig-network-instance:interface-id": "",
"openconfig-network-instance:config": {
"openconfig-network-instance:enabled": "",
"openconfig-network-instance:interface-id": "",
"openconfig-network-instance:cgmp-options": "NOT_APPLICABLE",
},
"openconfig-network-instance:interface-ref": {
"openconfig-network-instance:config": {
"openconfig-network-instance:interface": "",
"openconfig-network-instance:subinterface": ""
}
}
}
for interface_type in config_before.get("tailf-ned-cisco-ios:interface", {}):
for nso_index, value in enumerate(config_before["tailf-ned-cisco-ios:interface"][interface_type]):
tmp_cgmp_interface = copy.deepcopy(cgmp_interface)
if type(value.get("ip", {}).get("cgmp", '')) is dict:
int_num = str(value['name']).split(".")[0]
subint_num = 0
if "." in str(value['name']):
subint_num = value['name'].split(".")[1]
tmp_cgmp_interface["openconfig-network-instance:interface-id"] = int_num
tmp_cgmp_interface["openconfig-network-instance:config"]["openconfig-network-instance:enabled"] = True
tmp_cgmp_interface["openconfig-network-instance:config"]["openconfig-network-instance:interface-id"] = int_num
tmp_cgmp_interface["openconfig-network-instance:interface-ref"]["openconfig-network-instance:config"]["openconfig-network-instance:interface"] = interface_type + int_num
tmp_cgmp_interface["openconfig-network-instance:interface-ref"]["openconfig-network-instance:config"]["openconfig-network-instance:subinterface"] = subint_num
if value.get("vrf", {}).get("forwarding", {}):
vrf_name = value["vrf"]["forwarding"]
if cgmp_protocol_by_networkinstance.get(vrf_name) is None:
cgmp_protocol_by_networkinstance[vrf_name] = {}
tmp_cgmp_protocol_instance = copy.deepcopy(cgmp_protocol_instance)
cgmp_protocol_by_networkinstance.update({vrf_name : tmp_cgmp_protocol_instance})
else:
vrf_name = "default"
if cgmp_protocol_by_networkinstance.get(vrf_name) is None:
cgmp_protocol_by_networkinstance[vrf_name] = {}
tmp_cgmp_protocol_instance = copy.deepcopy(cgmp_protocol_instance)
cgmp_protocol_by_networkinstance.update({vrf_name : tmp_cgmp_protocol_instance})
cgmp_protocol_by_networkinstance[vrf_name]["openconfig-network-instance:cgmp"]["openconfig-network-instance:interfaces"]["openconfig-network-instance:interface"].append(tmp_cgmp_interface)
del config_leftover["tailf-ned-cisco-ios:interface"][interface_type][nso_index]["ip"]["cgmp"]
if "multicast-routing" in config_leftover.get("tailf-ned-cisco-ios:ip", {}):
del config_leftover["tailf-ned-cisco-ios:ip"]["multicast-routing"]
for instance_name, network_instance in cgmp_protocol_by_networkinstance.items():
index = 0
for oc_name in openconfig_network_instances["openconfig-network-instance:network-instances"]["openconfig-network-instance:network-instance"]:
for oc_instance, oc_instance_name in oc_name.items():
if oc_instance_name == instance_name:
openconfig_network_instances["openconfig-network-instance:network-instances"]["openconfig-network-instance:network-instance"][index]["openconfig-network-instance:protocols"]["openconfig-network-instance:protocol"].append(network_instance)
index += 1
def process_rd_rt(temp_vrf, vrf, vrf_index, config_leftover):
if "rd" in vrf:
temp_vrf["openconfig-network-instance:config"][
"openconfig-network-instance:route-distinguisher"] = vrf["rd"]
temp_vrf["openconfig-network-instance:config"][
"openconfig-network-instance-ext:route-targets-import"] = []
temp_vrf["openconfig-network-instance:config"][
"openconfig-network-instance-ext:route-targets-export"] = []
# RD is required to create RTs
if "route-target" in vrf:
process_rt(temp_vrf, vrf, "import")
process_rt(temp_vrf, vrf, "export")
del config_leftover["tailf-ned-cisco-ios:vrf"]["definition"][vrf_index]["route-target"]
del config_leftover["tailf-ned-cisco-ios:vrf"]["definition"][vrf_index]["rd"]
# IPv4 RT import and export policies
temp_policies = {
"openconfig-network-instance:inter-instance-policies": {
"openconfig-network-instance:apply-policy": {
"openconfig-network-instance:config": {
"openconfig-network-instance:export-policy": [],
"openconfig-network-instance:import-policy": []}}}}
if vrf.get("address-family", {}).get("ipv4", {}).get("import", {}).get("ipv4", {}).get("unicast", {}).get(
"map"):
temp_policies["openconfig-network-instance:inter-instance-policies"][
"openconfig-network-instance:apply-policy"]["openconfig-network-instance:config"][
"openconfig-network-instance:import-policy"].append(
vrf.get("address-family", {}).get("ipv4", {}).get("import", {}).get("ipv4", {}).get("unicast", {}).get(
"map"))
del config_leftover["tailf-ned-cisco-ios:vrf"]["definition"][vrf_index]["address-family"]["ipv4"]["import"]
if vrf.get("address-family", {}).get("ipv4", {}).get("export", {}).get("map"):
temp_policies["openconfig-network-instance:inter-instance-policies"][
"openconfig-network-instance:apply-policy"]["openconfig-network-instance:config"][
"openconfig-network-instance:export-policy"].append(
vrf.get("address-family", {}).get("ipv4", {}).get("export", {}).get("map"))
del config_leftover["tailf-ned-cisco-ios:vrf"]["definition"][vrf_index]["address-family"]["ipv4"]["export"]
if "ipv4" in vrf.get("address-family", {}) and len(vrf.get("address-family", {}).get("ipv4", {"1": "1"})) == 0:
del config_leftover["tailf-ned-cisco-ios:vrf"]["definition"][vrf_index]["address-family"]["ipv4"]
temp_vrf.update(temp_policies)
# TODO IPv6 RT import and export policies
def process_rt(temp_vrf, vrf, rt_type):
for rt in vrf["route-target"].get(rt_type, []):
if "asn-ip" in rt:
temp_vrf["openconfig-network-instance:config"][
f"openconfig-network-instance-ext:route-targets-{rt_type}"].append(rt["asn-ip"])
def cleanup_null_ospf_leftovers(config_leftover):
ospf_leftover = config_leftover.get("tailf-ned-cisco-ios:router", {}).get("ospf", [])
updated_ospf_list = []
for ospf_index in range(len(ospf_leftover)):
cleanup_network_statements(ospf_leftover[ospf_index])
cleanup_neighbors(ospf_leftover[ospf_index])
cleanup_traffic_area(ospf_leftover[ospf_index])
cleanup_virtual_link(ospf_leftover[ospf_index])
if len(ospf_leftover[ospf_index]) > 0:
updated_ospf_list.append(ospf_leftover[ospf_index])
if len(updated_ospf_list) > 0:
config_leftover.get("tailf-ned-cisco-ios:router", {})["ospf"] = updated_ospf_list
elif "ospf" in config_leftover.get("tailf-ned-cisco-ios:router", {}):
del config_leftover["tailf-ned-cisco-ios:router"]["ospf"]
def cleanup_network_statements(ospf_leftover):
if "network" in ospf_leftover:
del ospf_leftover["network"]
def cleanup_neighbors(ospf_leftover):
if "neighbor" in ospf_leftover:
del ospf_leftover["neighbor"]
def cleanup_virtual_link(ospf_leftover):
if len(ospf_leftover.get("area", [])) < 1:
return
for area in ospf_leftover["area"]:
updated_virtual_link_list = []
for virtual_link in area.get("virtual-link", []):
if virtual_link:
updated_virtual_link_list.append(virtual_link)
if len(updated_virtual_link_list) > 0:
area["virtual-link"] = updated_virtual_link_list
elif "virtual-link" in area:
del area["virtual-link"]
def cleanup_traffic_area(ospf_leftover):
if not "mpls" in ospf_leftover:
return
updated_traffic_area_list = []
for area_item in ospf_leftover["mpls"].get("traffic-eng", {}).get("area", []):
if area_item:
updated_traffic_area_list.append(area_item)
if len(updated_traffic_area_list) > 0:
ospf_leftover["mpls"]["traffic-eng"]["area"] = updated_traffic_area_list
elif "area" in ospf_leftover["mpls"].get("traffic-eng", {}):
del ospf_leftover["mpls"]["traffic-eng"]["area"]
def cleanup_null_static_route_leftovers(config_leftover):
if "route" in config_leftover.get("tailf-ned-cisco-ios:ip", {}):
cleanup_static_routes(config_leftover["tailf-ned-cisco-ios:ip"]["route"])
cleanup_vrf_null_leftover_static_routes(config_leftover)
if "route" in config_leftover.get("tailf-ned-cisco-ios:ip", {}) and len(
config_leftover["tailf-ned-cisco-ios:ip"]["route"]) == 0:
del config_leftover["tailf-ned-cisco-ios:ip"]["route"]
def cleanup_vrf_null_leftover_static_routes(config_leftover):
if len(config_leftover.get("tailf-ned-cisco-ios:ip", {"route": {}}).get("route", {}).get("vrf", [])) > 0:
updated_vrf_list = []
for vrf in config_leftover["tailf-ned-cisco-ios:ip"]["route"]["vrf"]:
cleanup_static_routes(vrf)
if len(vrf) > 0:
updated_vrf_list.append(vrf)
if len(updated_vrf_list) > 0:
config_leftover["tailf-ned-cisco-ios:ip"]["route"]["vrf"] = updated_vrf_list
else:
del config_leftover["tailf-ned-cisco-ios:ip"]["route"]["vrf"]
def cleanup_static_routes(leftover_route):
if common_xe.IP_FORWARDING_LIST in leftover_route:
updated_ip_forwarding_list_leftover = get_updated_configs(leftover_route[common_xe.IP_FORWARDING_LIST])
if len(updated_ip_forwarding_list_leftover) > 0:
leftover_route[common_xe.IP_FORWARDING_LIST] = updated_ip_forwarding_list_leftover
elif common_xe.IP_FORWARDING_LIST in leftover_route:
del leftover_route[common_xe.IP_FORWARDING_LIST]
if common_xe.INTF_LIST in leftover_route:
updated_intf_list_leftover = get_updated_configs(leftover_route[common_xe.INTF_LIST])
if len(updated_intf_list_leftover) > 0:
leftover_route[common_xe.INTF_LIST] = updated_intf_list_leftover
elif common_xe.INTF_LIST in leftover_route:
del leftover_route[common_xe.INTF_LIST]
if common_xe.IP_INTF_FORWARDING_LIST in leftover_route:
updated_ip_intf_forwarding_list_leftover = get_updated_configs(
leftover_route[common_xe.IP_INTF_FORWARDING_LIST])
if len(updated_ip_intf_forwarding_list_leftover) > 0:
leftover_route[common_xe.IP_INTF_FORWARDING_LIST] = updated_ip_intf_forwarding_list_leftover
elif common_xe.IP_INTF_FORWARDING_LIST in leftover_route:
del leftover_route[common_xe.IP_INTF_FORWARDING_LIST]
if "name" in leftover_route and len(leftover_route) < 2:
del leftover_route["name"]
def get_updated_configs(list_leftover):
updated_static_list = []
for item in list_leftover:
if item:
updated_static_list.append(item)
return updated_static_list
def cleanup_null_bgp_leftovers(config_before, config_leftover):
bgp_before_list = config_before.get("tailf-ned-cisco-ios:router", {"bgp": []}).get("bgp")
bgp_leftover_list = config_leftover.get("tailf-ned-cisco-ios:router", {"bgp": []}).get("bgp")
if bgp_leftover_list == None or len(bgp_leftover_list) == 0:
return
bgp_before = bgp_before_list[0]
bgp_leftover = bgp_leftover_list[0]
clean_up_default_neighbors_and_peers(bgp_before, bgp_leftover)
clean_up_vrf_neighbors_and_peers(bgp_before.get("address-family", {}).get("with-vrf", {}),
bgp_leftover.get("address-family", {}).get("with-vrf", {}).get("ipv4", []))
if bgp_leftover != None and bgp_leftover.get("bgp") != None:
del bgp_leftover["bgp"]
# if bgp_leftover != None and len(bgp_leftover["bgp"]) == 0:
# del bgp_leftover["bgp"]
# if bgp_leftover.get("address-family", {}).get("ipv4") != None:
# check_delete_protocol_leftovers(bgp_leftover, "ipv4")
# if bgp_leftover.get("address-family") != None:
# check_delete_protocol_leftovers(bgp_leftover, "vpnv4")
# if bgp_leftover.get("address-family") != None:
# pass
def clean_up_default_neighbors_and_peers(bgp_before, bgp_leftover):
delete_peers_and_neighbors(bgp_leftover)
updated_ipv4_list = []
updated_vpnv4_list = []
for ipv4_index, afi_ipv4 in enumerate(bgp_before.get("address-family", {}).get("ipv4", [])):
if afi_ipv4.get("af") == "unicast":
delete_peers_and_neighbors(bgp_leftover["address-family"]["ipv4"][ipv4_index])
if (bgp_leftover["address-family"]["ipv4"][ipv4_index]
and len(bgp_leftover["address-family"]["ipv4"][ipv4_index]) > 0):
updated_ipv4_list.append(bgp_leftover["address-family"]["ipv4"][ipv4_index])
for vpnv4_index, afi_vpnv4 in enumerate(bgp_before.get("address-family", {}).get("vpnv4", [])):
if afi_vpnv4.get("af") == "unicast":
delete_peers_and_neighbors(bgp_leftover["address-family"]["vpnv4"][vpnv4_index])
if len(bgp_leftover["address-family"]["vpnv4"][vpnv4_index]) > 0:
updated_vpnv4_list.append(bgp_leftover["address-family"]["vpnv4"][vpnv4_index])
# Device may not be using MP-BGP
if bgp_before.get("address-family", {}).get("ipv4"):
bgp_leftover["address-family"]["ipv4"] = updated_ipv4_list
if bgp_before.get("address-family", {}).get("vpnv4"):
bgp_leftover["address-family"]["vpnv4"] = updated_vpnv4_list
def clean_up_vrf_neighbors_and_peers(afi_vrf, afi_vrf_leftover):
for index, afi_ipv4 in enumerate(afi_vrf.get("ipv4", [])):
if afi_ipv4.get("af") == "unicast":
updated_vrf_list = []
for vrf_index, afi_ipv4_vrf in enumerate(afi_ipv4.get("vrf", [])):
afi_vrf_ipv4_leftover = afi_vrf_leftover[index]["vrf"][vrf_index]
delete_peers_and_neighbors(afi_vrf_ipv4_leftover)
if len(afi_vrf_ipv4_leftover) == 0:
afi_vrf_leftover[index]["vrf"][vrf_index] = None
else:
updated_vrf_list.append(afi_vrf_ipv4_leftover)
afi_vrf_leftover[index]["vrf"] = updated_vrf_list
def delete_peers_and_neighbors(peer_neighbor_list_leftover):
is_peers_present = (peer_neighbor_list_leftover != None
and peer_neighbor_list_leftover.get("neighbor-tag") != None
and peer_neighbor_list_leftover["neighbor-tag"].get("neighbor") != None)
is_neighbors_present = (peer_neighbor_list_leftover != None
and peer_neighbor_list_leftover.get("neighbor") != None)
remove_bgp_nulls(peer_neighbor_list_leftover, is_peers_present, is_neighbors_present)
if is_peers_present and len(peer_neighbor_list_leftover["neighbor-tag"]["neighbor"]) == 0:
del peer_neighbor_list_leftover["neighbor-tag"]
if is_neighbors_present and len(peer_neighbor_list_leftover["neighbor"]) == 0:
del peer_neighbor_list_leftover["neighbor"]
def remove_bgp_nulls(peer_neighbor_list_leftover, is_peers_present, is_neighbors_present):
updated_peers = []
updated_neighbors = []
if is_peers_present:
for peer in peer_neighbor_list_leftover["neighbor-tag"]["neighbor"]:
if peer != None:
updated_peers.append(peer)
peer_neighbor_list_leftover["neighbor-tag"]["neighbor"] = updated_peers
if is_neighbors_present:
for neighbor in peer_neighbor_list_leftover["neighbor"]:
if neighbor != None:
updated_neighbors.append(neighbor)
peer_neighbor_list_leftover["neighbor"] = updated_neighbors
def check_delete_protocol_leftovers(bgp_leftover, protocol):
is_ipv4_empty = True
for ipv4_item in bgp_leftover.get("address-family", {}).get(protocol, []):
if ipv4_item != None and len(ipv4_item) > 0:
is_ipv4_empty = False
if is_ipv4_empty:
del bgp_leftover["address-family"][protocol]
def main(before: dict, leftover: dict, translation_notes: list = []) -> dict:
"""
Translates NSO Device configurations to MDD OpenConfig configurations.
Requires environment variables:
NSO_URL: str
NSO_USERNAME: str
NSO_PASSWORD: str
NSO_DEVICE: str
TEST - If True, sends generated OC configuration to NSO Server: str
:param before: Original NSO Device configuration: dict
:param leftover: NSO Device configuration minus configs replaced with MDD OC: dict
:return: MDD Openconfig Network Instances configuration: dict
"""
xe_network_instances(before, leftover)
translation_notes += network_instances_notes
return openconfig_network_instances
if __name__ == "__main__":
sys.path.append("../../")
sys.path.append("../../../")
if (find_spec("package_nso_to_oc") is not None):
from package_nso_to_oc.xe import common_xe
from package_nso_to_oc.xe import xe_ospfv2
from package_nso_to_oc.xe import xe_static_route
from package_nso_to_oc.xe import xe_bgp
from package_nso_to_oc.xe import xe_mpls
from package_nso_to_oc import common
else:
import common_xe
import xe_ospfv2
import xe_static_route
import xe_bgp
import xe_mpls
import common
(config_before_dict, config_leftover_dict, interface_ip_dict) = common_xe.init_xe_configs()
main(config_before_dict, config_leftover_dict)
config_name = "_network_instances"
config_remaining_name = "_remaining_network_instances"
oc_name = "_openconfig_network_instances"
common.print_and_test_configs(
"xe1", config_before_dict, config_leftover_dict, openconfig_network_instances,
config_name, config_remaining_name, oc_name, network_instances_notes)
else:
# This is needed for now due to top level __init__.py. We need to determine if contents in __init__.py is still necessary.
if (find_spec("package_nso_to_oc") is not None):
from package_nso_to_oc.xe import common_xe
from package_nso_to_oc.xe import xe_ospfv2
from package_nso_to_oc.xe import xe_static_route
from package_nso_to_oc.xe import xe_bgp
from package_nso_to_oc.xe import xe_mpls
from package_nso_to_oc import common
else:
from xe import common_xe
from xe import xe_ospfv2
from xe import xe_static_route
from xe import xe_bgp
from xe import xe_mpls
import common
|
PypiClean
|
/foliantcontrib.includes-1.1.16-py3-none-any.whl/foliant/preprocessors/includes.py
|
import re
import urllib
from shutil import rmtree
from io import StringIO
from hashlib import md5
from pathlib import Path
import socket
from subprocess import run, CalledProcessError, PIPE, STDOUT
from foliant.preprocessors.base import BasePreprocessor
from foliant.preprocessors import escapecode
from foliant.meta.tools import remove_meta
class Preprocessor(BasePreprocessor):
defaults = {
'recursive': True,
'stub_text': True,
'allow_failure': True,
'cache_dir': Path('.includescache'),
'aliases': {},
'extensions': ['md']
}
tags = 'include',
_heading_pattern = re.compile(
r'^(?P<hashes>\#{1,6})\s+(?P<content>.*\S+)(?P<tail>\s*)$',
flags=re.MULTILINE
)
_image_pattern = re.compile(r'\!\[(?P<caption>.*?)\]\((?P<path>((?!:\/\/).)+?)\)')
_tag_body_pattern = re.compile(
r'(\$(?P<repo>[^\#^\$]+)(\#(?P<revision>[^\$]+))?\$)?' +
r'(?P<path>[^\#]+)' +
r'(\#(?P<from_heading>[^:]*)(:(?P<to_heading>.+))?)?'
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._cache_dir_path = self.project_path / self.options['cache_dir']
self._downloaded_dir_path = self._cache_dir_path / '_downloaded_content'
self.logger = self.logger.getChild('includes')
self.logger.debug(f'Preprocessor inited: {self.__dict__}')
def _find_file(
self,
file_name: str,
lookup_dir: Path
) -> Path or None:
'''Find a file in a directory by name. Check subdirectories recursively.
:param file_name: Name of the file
:lookup_dir: Starting directory
:returns: Path to the found file or None if the file was not found
:raises: FileNotFoundError
'''
self.logger.debug(f'Trying to find the file {file_name} inside the directory {lookup_dir}')
result = None
for item in lookup_dir.rglob('*'):
if item.name == file_name:
result = item
break
else:
raise FileNotFoundError(file_name)
self.logger.debug(f'File found: {result}')
return result
def create_full_link(self, repo_url: str, revision: str, path: str):
if repo_url.endswith('.git'):
repo_url = repo_url[:-4]
if revision:
full_repo_url = repo_url + '/tree/' + revision + '/' + path.rpartition('/')[0]
else:
full_repo_url = repo_url + '/-/blob/master/' + path.rpartition('/')[0]
return full_repo_url
def _download_file_from_url(self, url: str) -> Path:
'''Download file as the content of resource located at specified URL.
Place downloaded file into the cache directory with a unique name.
:param url: URL to get the included file content
:returns: Path to the downloaded file
'''
self.logger.debug(f'The included file content should be requested at the URL: {url}')
url_path = Path(urllib.parse.urlparse(url).path)
extra_stem = ''
extra_suffix = ''
if not url_path.stem:
extra_stem = 'content'
if not url_path.suffix:
extra_suffix = '.inc'
downloaded_file_path = (
self._downloaded_dir_path /
f'{md5(url.encode()).hexdigest()[:8]}_{url_path.stem}{extra_stem}{url_path.suffix}{extra_suffix}'
)
self.logger.debug(f'Downloaded file path: {downloaded_file_path}')
if not downloaded_file_path.exists():
self.logger.debug('Performing URL request')
try:
response = urllib.request.urlopen(url, timeout=2)
except (urllib.error.HTTPError, urllib.error.URLError) as error:
self.logger.error(f'Data is not retrieved with {error}\nURL: {url}')
except socket.timeout:
self.logger.error(f'socket timed out - URL {url}')
else:
charset = 'utf-8'
if response.headers['Content-Type']:
charset_match = re.search(r'(^|[\s;])charset=(?P<charset>[^\s;]+)', response.headers['Content-Type'])
if charset_match:
charset = charset_match.group('charset')
self.logger.debug(f'Detected source charset: {charset}')
downloaded_content = response.read().decode(charset)
self._downloaded_dir_path.mkdir(parents=True, exist_ok=True)
# The beginning of the block codes for converting relative paths to links
dict_new_link = {}
regexp_find_link = re.compile('\[.+?\]\(.+?\)')
regexp_find_path = re.compile('\(.+?\)')
old_found_link = regexp_find_link.findall(downloaded_content)
for line in old_found_link:
exceptions_characters = re.findall(r'http|@|:', line)
if exceptions_characters:
continue
else:
relative_path = regexp_find_path.findall(line)
sub_relative_path = re.findall(r'\[.+?\]', line)
dict_new_link[line] = sub_relative_path[0] + '(' + url.rpartition('/')[0].replace('raw',
'blob') + '/' + \
relative_path[0].partition('(')[2]
for line in dict_new_link:
downloaded_content = downloaded_content.replace(line, dict_new_link[line])
# End of the conversion code block
with open(downloaded_file_path, 'w', encoding='utf8') as downloaded_file:
downloaded_file.write(downloaded_content)
else:
self.logger.debug('File found in cache, it was already downloaded at this run')
return downloaded_file_path
def _sync_repo(
self,
repo_url: str,
revision: str or None = None
) -> Path:
'''Clone a Git repository to the cache dir. If it has been cloned before, update it.
:param repo_url: Repository URL
:param revision: Revision: branch, commit hash, or tag
:returns: Path to the cloned repository
'''
repo_name = repo_url.split('/')[-1].rsplit('.', maxsplit=1)[0]
repo_path = (self._cache_dir_path / repo_name).resolve()
self.logger.debug(f'Synchronizing with repo; URL: {repo_url}, revision: {revision}')
try:
self.logger.debug(f'Cloning repo {repo_url} to {repo_path}')
run(
f'git clone {repo_url} {repo_path}',
shell=True,
check=True,
stdout=PIPE,
stderr=STDOUT
)
except CalledProcessError as exception:
if repo_path.exists():
self.logger.debug('Repo already cloned; pulling from remote')
try:
run(
'git pull',
cwd=repo_path,
shell=True,
check=True,
stdout=PIPE,
stderr=STDOUT
)
except CalledProcessError as exception:
self.logger.warning(str(exception))
else:
self.logger.error(str(exception))
if revision:
run(
f'git checkout {revision}',
cwd=repo_path,
shell=True,
check=True,
stdout=PIPE,
stderr=STDOUT
)
return repo_path
def _shift_headings(
self,
content: str,
shift: int
) -> str:
'''Shift Markdown headings in a string by a given value. The shift
can be positive or negative.
:param content: Markdown content
:param shift: Heading shift
:returns: Markdown content with headings shifted by ``shift``
'''
def _sub(heading):
new_heading_level = len(heading.group('hashes')) + shift
self.logger.debug(
f'Shift heading level to {new_heading_level}, heading content: {heading.group("content")}'
)
if new_heading_level <= 6:
return f'{"#" * new_heading_level} {heading.group("content")}{heading.group("tail")}'
else:
self.logger.debug('New heading level is out of range, using bold paragraph text instead of heading')
return f'**{heading.group("content")}**{heading.group("tail")}'
return self._heading_pattern.sub(_sub, content)
def _find_top_heading_level(
self,
content: str
) -> int:
'''Find the highest level heading (i.e. having the least '#'s)
in a Markdown string.
:param content: Markdown content
:returns: Maximum heading level detected; if no heading is found, 0 is returned
'''
result = float('inf')
for heading in self._heading_pattern.finditer(content):
heading_level = len(heading.group('hashes'))
if heading_level < result:
result = heading_level
self.logger.debug(f'Maximum heading level: {result}')
return result if result < float('inf') else 0
def _cut_from_position_to_position(
self,
content: str,
from_heading: str or None = None,
to_heading: str or None = None,
from_id: str or None = None,
to_id: str or None = None,
to_end: bool = False,
sethead: int or None = None,
nohead: bool = False
) -> str:
'''Cut part of Markdown string between two positions,
set internal heading level, and remove top heading.
Starting position may be defined by the heading content,
ID of the heading, ID of the anchor.
Ending position may be defined like the starting position,
and also as the end of the included content.
If only the starting position is defined, cut to the next heading
of the same level.
If neither starting nor ending position is defined,
the whole string is returned.
Heading shift and top heading elimination are optional.
:param content: Markdown content
:param from_heading: Starting heading
:param to_heading: Ending heading (will not be incuded in the output)
:param from_id: ID of starting heading or anchor;
this argument has higher priority than ``from_heading``
:param to_id: ID of ending heading (the heading itself will not be incuded in the output)
or anchor; this argument has higher priority than ``to_heading``
:param to_end: Flag that tells to cut up to the end of the included content;
this argument has higher priority than ``to_id``
:param sethead: Level of the topmost heading in the included content
:param nohead: Flag that tells to strip the starting heading from the included content
:returns: Part of the Markdown content between defined positions
with internal headings adjusted
'''
self.logger.debug(
'Cutting from position to position: ' +
f'from_heading: {from_heading}, to_heading: {to_heading}, ' +
f'from_id: {from_id}, to_id: {to_id}, ' +
f'to_end: {to_end}, ' +
f'sethead: {sethead}, nohead: {nohead}'
)
# First, cut the content from the starting position to the end
if from_id:
self.logger.debug('Starting point is defined by its ID')
from_identified_heading_pattern = re.compile(
r'^\#{1,6}\s+.*\S+\s+\{\#' + rf'{re.escape(from_id)}' + r'\}\s*$',
flags=re.MULTILINE
)
from_anchor_pattern = re.compile(
rf'(?:(?<!\<))\<anchor(?:\s(?:[^\<\>]*))?\>{re.escape(from_id)}<\/anchor\>'
)
if from_identified_heading_pattern.findall(content):
self.logger.debug('Starting heading with defined ID is found')
result = from_identified_heading_pattern.split(content)[1]
from_heading_line = from_identified_heading_pattern.findall(content)[0]
from_heading_level = len(self._heading_pattern.match(from_heading_line).group('hashes'))
self.logger.debug(f'Level of starting heading: {from_heading_level}')
elif from_anchor_pattern.findall(content):
self.logger.debug('Starting anchor with defined ID is found')
result = from_anchor_pattern.split(content)[1]
previous_content = from_anchor_pattern.split(content)[0]
from_heading_line = None
from_heading_level = None
for previous_heading_match in self._heading_pattern.finditer(previous_content):
from_heading_level = len(previous_heading_match.group('hashes'))
self.logger.debug(f'Level of starting heading: {from_heading_level}')
else:
self.logger.debug(
'Neither starting heading nor starting anchor is found, '
'skipping the included content'
)
return ''
elif from_heading:
self.logger.debug('Starting heading is defined by its content')
from_heading_pattern = re.compile(
r'^\#{1,6}\s+' + rf'{re.escape(from_heading)}\s*$',
flags=re.MULTILINE
)
if from_heading_pattern.findall(content):
self.logger.debug('Starting heading with defined content is found')
result = from_heading_pattern.split(content)[1]
from_heading_line = from_heading_pattern.findall(content)[0]
from_heading_level = len(self._heading_pattern.match(from_heading_line).group('hashes'))
self.logger.debug(f'Level of starting heading: {from_heading_level}')
else:
self.logger.debug('Starting heading is not found, skipping the included content')
return ''
else:
self.logger.debug('Starting point is not defined')
content_buffer = StringIO(content)
first_line = content_buffer.readline()
if self._heading_pattern.fullmatch(first_line):
self.logger.debug('The content starts with heading')
result = content_buffer.read()
from_heading_line = first_line
from_heading_level = len(self._heading_pattern.match(from_heading_line).group('hashes'))
else:
self.logger.debug('The content does not start with heading')
result = content
from_heading_line = None
from_heading_level = self._find_top_heading_level(content)
self.logger.debug(f'Topmost heading level: {from_heading_level}')
# After that, cut the result to the ending position
if to_end:
self.logger.debug('Ending point is defined as the end of the document')
elif to_id:
self.logger.debug('Ending point is defined by its ID')
to_identified_heading_pattern = re.compile(
r'^\#{1,6}\s+.*\S+\s+\{\#' + rf'{re.escape(to_id)}' + r'\}\s*$',
flags=re.MULTILINE
)
to_anchor_pattern = re.compile(
rf'(?:(?<!\<))\<anchor(?:\s(?:[^\<\>]*))?\>{re.escape(to_id)}<\/anchor\>'
)
if to_identified_heading_pattern.findall(result):
self.logger.debug('Ending heading with defined ID is found')
result = to_identified_heading_pattern.split(result)[0]
elif to_anchor_pattern.findall(result):
self.logger.debug('Ending anchor with defined ID is found')
result = to_anchor_pattern.split(result)[0]
else:
self.logger.debug('Neither ending heading nor ending anchor is found, cutting to the end')
elif to_heading:
self.logger.debug('Ending heading is defined by its content')
to_heading_pattern = re.compile(
r'^\#{1,6}\s+' + rf'{re.escape(to_heading)}\s*$',
flags=re.MULTILINE
)
if to_heading_pattern.findall(result):
self.logger.debug('Ending heading with defined content is found')
result = to_heading_pattern.split(result)[0]
else:
self.logger.debug('Ending heading is not found, cutting to the end')
else:
self.logger.debug('Ending point is not defined')
if from_id or from_heading:
self.logger.debug(
'Since starting point is defined, cutting to the next heading of the same level'
)
to_heading_pattern = re.compile(
rf'^\#{{1,{from_heading_level}}}\s+\S+.*$',
flags=re.MULTILINE
)
result = to_heading_pattern.split(result)[0]
else:
self.logger.debug(
'Since starting point is not defined, using the whole included content'
)
# Finally, take into account the options nohead and sethead
if not nohead and from_heading_line:
self.logger.debug(
'Since nohead option is not specified, and the included content starts with heading, ' +
'including starting heading into the output'
)
result = from_heading_line + result
if sethead:
if sethead > 0:
self.logger.debug(
'Since sethead option is specified, shifting headings levels in the included content'
)
result = self._shift_headings(
result,
sethead - from_heading_level
)
return result
def _adjust_image_paths(
self,
content: str,
markdown_file_path: Path
) -> str:
'''Locate images referenced in a Markdown string and replace their paths
with the absolute ones.
:param content: Markdown content
:param markdown_file_path: Path to the Markdown file containing the content
:returns: Markdown content with absolute image paths
'''
def _sub(image):
image_caption = image.group('caption')
image_path = (markdown_file_path.parent / Path(image.group('path'))).resolve()
self.logger.debug(
f'Updating image reference; user specified path: {image.group("path")}, ' +
f'absolute path: {image_path}, caption: {image_caption}'
)
return f''
return self._image_pattern.sub(_sub, content)
def _adjust_paths_in_tags_attributes(
self,
content: str,
modifier: str,
base_path: Path
) -> str:
'''Locate pseudo-XML tags in Markdown string. Replace the paths
that are specified as values of pseudo-XML tags attributes
preceded by modifiers (i.e. YAML tags such as ``!path``)
with absolute ones based on ``base_path``.
:param content: Markdown content
:param modifier: Modifier (i.e. YAML tag) that precedes an attribute value
:param base_path: Base path that the replaced paths must be relative to
:returns: Markdown content with absolute paths in attributes
of pseudo-XML tags
'''
def sub_tag(match):
def sub_path_attribute(match):
quote = match.group('quote')
modifier = match.group('modifier')
resolved_path = (base_path / match.group('path')).resolve()
adjusted_quoted_attribute_value = f'{quote}{modifier}{resolved_path}{quote}'
self.logger.debug(
'Updating path in tag attribute value; ' +
f'user specified value: {quote}{modifier}{match.group("path")}{quote}, ' +
f'adjusted value: {adjusted_quoted_attribute_value}'
)
return adjusted_quoted_attribute_value
path_attribute_pattern = re.compile(
r'''(?P<quote>'|")''' +
rf'(?P<modifier>\s*{re.escape(modifier)}\s+)' +
r'(?P<path>.+?)' +
r'(?P=quote)',
re.DOTALL
)
open_tag = path_attribute_pattern.sub(sub_path_attribute, match.group('open_tag'))
body = match.group('body')
closing_tag = match.group('closing_tag')
return f'{open_tag}{body}{closing_tag}'
tag_pattern = re.compile(
r'(?<!\<)(?P<open_tag><(?P<tag>\S+)(?:\s[^\<\>]*)?\>)'
r'(?P<body>.*?)'
r'(?P<closing_tag>\<\/(?P=tag)\>)',
re.DOTALL
)
return tag_pattern.sub(sub_tag, content)
def _get_src_file_path(
self,
markdown_file_path: Path
) -> Path:
'''Translate the path of Markdown file that is located inside the temporary working directory
into the path of the corresponding Markdown file that is located inside the source directory
of Foliant project.
:param markdown_file_path: Path to Markdown file that is located inside the temporary working directory
:returns: Mapping of Markdown file path to the source directory
'''
path_relative_to_working_dir = markdown_file_path.relative_to(self.working_dir.resolve())
self.logger.debug(
'Currently processed Markdown file path relative to working dir: ' +
f'{path_relative_to_working_dir}'
)
path_mapped_to_src_dir = (
self.project_path.resolve() /
self.config['src_dir'] /
path_relative_to_working_dir
)
self.logger.debug(
'Currently processed Markdown file path mapped to source dir: ' +
f'{path_mapped_to_src_dir}'
)
return path_mapped_to_src_dir
def _get_included_file_path(
self,
user_specified_path: str or Path,
current_processed_file_path: Path
) -> Path:
'''Resolve user specified path to the local included file.
:param user_specified_path: User specified string that represents
the path to a local file
:param current_processed_file_path: Path to the currently processed Markdown file
that contains include statements
:returns: Local path of the included file relative to the currently processed Markdown file
'''
self.logger.debug(f'Currently processed Markdown file: {current_processed_file_path}')
included_file_path = (current_processed_file_path.parent / user_specified_path).resolve()
self.logger.debug(f'User-specified included file path: {included_file_path}')
if (
self.working_dir.resolve() in current_processed_file_path.parents
and
self.working_dir.resolve() not in included_file_path.parents
):
self.logger.debug(
'Currently processed file is located inside the working dir, ' +
'but included file is located outside the working dir. ' +
'So currently processed file path should be rewritten with the path of corresponding file ' +
'that is located inside the source dir'
)
included_file_path = (
self._get_src_file_path(current_processed_file_path).parent / user_specified_path
).resolve()
else:
self.logger.debug(
'Using these paths without changes'
)
self.logger.debug(f'Finally, included file path: {included_file_path}')
return included_file_path
def _process_include(
self,
included_file_path: Path,
project_root_path: Path or None = None,
from_heading: str or None = None,
to_heading: str or None = None,
from_id: str or None = None,
to_id: str or None = None,
to_end: bool = False,
sethead: int or None = None,
nohead: bool = False,
include_link: str or None = None
) -> str:
'''Replace a local include statement with the file content. Necessary
adjustments are applied to the content: cut between certain headings,
strip the top heading, set heading level.
:param included_file_path: Path to the included file
:param project_root_path: Path to the “root” directory of Foliant project
that the currently processed Markdown file belongs to
:param from_heading: Include starting from this heading
:param to_heading: Include up to this heading (not including the heading itself)
:param from_id: Include starting from the heading or the anchor that has this ID
:param to_id: Include up to the heading or the anchor that has this ID
(not including the heading itself)
:param to_end: Flag that tells to cut to the end of document
:param sethead: Level of the topmost heading in the included content
:param nohead: Flag that tells to strip the starting heading from the included content
:returns: Included file content
'''
self.logger.debug(
f'Included file path: {included_file_path}, from heading: {from_heading}, ' +
f'to heading: {to_heading}, sethead: {sethead}, nohead: {nohead}'
)
if included_file_path.exists():
included_file_path = included_file_path
else:
if self.options['allow_failure']:
self.logger.error(f'The url or repo_url link is not correct, file not found: {included_file_path}')
path_error_link = Path(self._cache_dir_path/'_error_link').resolve()
if not Path(path_error_link).exists():
Path(path_error_link).mkdir()
path_error_file = open(path_error_link/included_file_path.name, 'w+')
if self.options['stub_text']:
path_error_file.write(f'The url or repo_url link is not correct, file not found: {included_file_path}')
path_error_file.close()
included_file_path=path_error_link/included_file_path.name
else:
self.logger.error(f'The url or repo_url link is not correct, file not found: {included_file_path}')
with open(included_file_path, encoding='utf8') as included_file:
included_content = included_file.read()
# The beginning of the block codes for converting relative paths to links
if include_link:
dict_new_link = {}
regexp_find_link = re.compile('\[.+?\]\(.+?\)')
regexp_find_path = re.compile('\(.+?\)')
old_found_link = regexp_find_link.findall(included_content)
for line in old_found_link:
relative_path = regexp_find_path.findall(line)
for ex_line in relative_path:
exceptions_characters = re.findall(r'https?://[^\s]+|@|:|\.png|\.jpeg|.svg', ex_line)
if exceptions_characters:
continue
else:
sub_relative_path = re.findall(r'\[.+?\]', line)
dict_new_link[line] = sub_relative_path[0] + '(' + include_link.rpartition('/')[0].replace(
'raw', 'blob') + '/' + relative_path[0].partition('(')[2]
for line in dict_new_link:
included_content = included_content.replace(line, dict_new_link[line])
# End of the conversion code block
if self.config.get('escape_code', False):
if isinstance(self.config['escape_code'], dict):
escapecode_options = self.config['escape_code'].get('options', {})
else:
escapecode_options = {}
self.logger.debug(
'Since escape_code mode is on, applying the escapecode preprocessor ' +
'to the included file content'
)
included_content = escapecode.Preprocessor(
self.context,
self.logger,
self.quiet,
self.debug,
escapecode_options
).escape(included_content)
# Removing metadata from content before including
included_content = remove_meta(included_content)
included_content = self._cut_from_position_to_position(
included_content,
from_heading,
to_heading,
from_id,
to_id,
to_end,
sethead,
nohead
)
included_content = self._adjust_image_paths(included_content, included_file_path)
if project_root_path:
included_content = self._adjust_paths_in_tags_attributes(
included_content,
'!path',
project_root_path
)
included_content = self._adjust_paths_in_tags_attributes(
included_content,
'!project_path',
project_root_path
)
included_content = self._adjust_paths_in_tags_attributes(
included_content,
'!rel_path',
included_file_path.parent
)
return included_content
def process_includes(
self,
markdown_file_path: Path,
content: str,
project_root_path: Path or None = None,
sethead: int or None = None
) -> str:
'''Replace all include statements with the respective file contents.
:param markdown_file_path: Path to currently processed Markdown file
:param content: Markdown content
:param project_root_path: Path to the “root” directory of Foliant project
that the currently processed Markdown file belongs to
:param sethead: Level of the topmost heading in the content,
it may be set when the method is called recursively
:returns: Markdown content with resolved includes
'''
markdown_file_path = markdown_file_path.resolve()
self.logger.debug(f'Processing Markdown file: {markdown_file_path}')
processed_content = ''
include_statement_pattern = re.compile(
rf'((?<!\<)\<(?:{"|".join(self.tags)})(?:\s[^\<\>]*)?\>.*?\<\/(?:{"|".join(self.tags)})\>)',
flags=re.DOTALL
)
content_parts = include_statement_pattern.split(content)
for content_part in content_parts:
include_statement = self.pattern.fullmatch(content_part)
if include_statement:
current_project_root_path = project_root_path
body = self._tag_body_pattern.match(include_statement.group('body').strip())
options = self.get_options(include_statement.group('options'))
self.logger.debug(
f'Processing include statement; body: {body}, options: {options}, ' +
f'current project root path: {current_project_root_path}'
)
current_sethead = sethead
self.logger.debug(
f'Current sethead: {current_sethead}, ' +
f'user-specified sethead: {options.get("sethead")}'
)
if options.get('sethead'):
if current_sethead:
current_sethead += options['sethead'] - 1
else:
current_sethead = options['sethead']
self.logger.debug(f'Set new current sethead: {current_sethead}')
# If the tag body is not empty, the legacy syntax is expected:
#
# <include project_root="..." sethead="..." nohead="..." inline="...">
# ($repo_url#revision$path|src)#from_heading:to_heading
# </include>
#
# If the tag body is empty, the new syntax is expected:
#
# <include
# repo_url="..." revision="..." path="..." | url="..." | src="..."
# project_root="..."
# from_heading="..." to_heading="..."
# from_id="..." to_id="..."
# to_end="..."
# sethead="..." nohead="..."
# inline="..."
# wrap_code="..."
# code_language="..."
# ></include>
if body:
self.logger.debug('Using the legacy syntax rules')
if body.group('repo'):
self.logger.debug('File in Git repository referenced')
repo_from_alias = self.options['aliases'].get(body.group('repo'))
revision = None
if repo_from_alias:
self.logger.debug(f'Alias found: {body.group("repo")}, resolved as: {repo_from_alias}')
if '#' in repo_from_alias:
repo_url, revision = repo_from_alias.split('#', maxsplit=1)
else:
repo_url = repo_from_alias
else:
repo_url = body.group('repo')
if body.group('revision'):
revision = body.group('revision')
self.logger.debug(
f'Highest priority revision specified in the include statement: {revision}'
)
self.logger.debug(f'Repo URL: {repo_url}, revision: {revision}')
repo_path = self._sync_repo(repo_url, revision)
self.logger.debug(f'Local path of the repo: {repo_path}')
included_file_path = repo_path / body.group('path')
if included_file_path.name.startswith('^'):
included_file_path = self._find_file(
included_file_path.name[1:], included_file_path.parent
)
self.logger.debug(f'Resolved path to the included file: {included_file_path}')
current_project_root_path = (
repo_path / options.get('project_root', '')
).resolve()
self.logger.debug(f'Set new current project root path: {current_project_root_path}')
processed_content_part = self._process_include(
included_file_path=included_file_path,
project_root_path=current_project_root_path,
from_heading=body.group('from_heading'),
to_heading=body.group('to_heading'),
sethead=current_sethead,
nohead=options.get('nohead')
)
else:
self.logger.debug('Local file referenced')
included_file_path = self._get_included_file_path(body.group('path'), markdown_file_path)
if included_file_path.name.startswith('^'):
included_file_path = self._find_file(
included_file_path.name[1:], included_file_path.parent
)
self.logger.debug(f'Resolved path to the included file: {included_file_path}')
if options.get('project_root'):
current_project_root_path = (
markdown_file_path.parent / options.get('project_root')
).resolve()
self.logger.debug(f'Set new current project root path: {current_project_root_path}')
processed_content_part = self._process_include(
included_file_path=included_file_path,
project_root_path=current_project_root_path,
from_heading=body.group('from_heading'),
to_heading=body.group('to_heading'),
sethead=current_sethead,
nohead=options.get('nohead')
)
else: # if body
self.logger.debug('Using the new syntax rules')
if options.get('repo_url') and options.get('path'):
self.logger.debug('File in Git repository referenced')
repo_path = self._sync_repo(options.get('repo_url'), options.get('revision'))
self.logger.debug(f'Local path of the repo: {repo_path}')
included_file_path = repo_path / options['path']
self.logger.debug(f'Resolved path to the included file: {included_file_path}')
current_project_root_path = (
repo_path / options.get('project_root', '')
).resolve()
include_link = self.create_full_link(options.get('repo_url'), options.get('revision'),
options.get('path'))
self.logger.debug(f'Set new current project root path: {current_project_root_path}')
processed_content_part = self._process_include(
included_file_path=included_file_path,
project_root_path=current_project_root_path,
from_heading=options.get('from_heading'),
to_heading=options.get('to_heading'),
from_id=options.get('from_id'),
to_id=options.get('to_id'),
to_end=options.get('to_end'),
sethead=current_sethead,
nohead=options.get('nohead'),
include_link=include_link
)
elif options.get('url'):
self.logger.debug('File to get by URL referenced')
included_file_path = self._download_file_from_url(options['url'])
self.logger.debug(f'Resolved path to the included file: {included_file_path}')
if options.get('project_root'):
current_project_root_path = (
markdown_file_path.parent / options.get('project_root')
).resolve()
self.logger.debug(f'Set new current project root path: {current_project_root_path}')
processed_content_part = self._process_include(
included_file_path=included_file_path,
project_root_path=current_project_root_path,
from_heading=options.get('from_heading'),
to_heading=options.get('to_heading'),
from_id=options.get('from_id'),
to_id=options.get('to_id'),
to_end=options.get('to_end'),
sethead=current_sethead,
nohead=options.get('nohead')
)
elif options.get('src'):
self.logger.debug('Local file referenced')
included_file_path = self._get_included_file_path(options.get('src'), markdown_file_path)
self.logger.debug(f'Resolved path to the included file: {included_file_path}')
if options.get('project_root'):
current_project_root_path = (
markdown_file_path.parent / options.get('project_root')
).resolve()
self.logger.debug(f'Set new current project root path: {current_project_root_path}')
processed_content_part = self._process_include(
included_file_path=included_file_path,
project_root_path=current_project_root_path,
from_heading=options.get('from_heading'),
to_heading=options.get('to_heading'),
from_id=options.get('from_id'),
to_id=options.get('to_id'),
to_end=options.get('to_end'),
sethead=current_sethead,
nohead=options.get('nohead')
)
else:
self.logger.warning(
'Neither repo_url+path nor src specified, ignoring the include statement'
)
processed_content_part = ''
if self.options['recursive'] and self.pattern.search(processed_content_part):
self.logger.debug('Recursive call of include statements processing')
processed_content_part = self.process_includes(
included_file_path,
processed_content_part,
current_project_root_path,
current_sethead
)
wrap_code = options.get('wrap_code', '')
if wrap_code == 'triple_backticks' or wrap_code == 'triple_tildas':
if wrap_code == 'triple_backticks':
self.logger.debug('Wrapping included content as fence code block with triple backticks')
wrapper = '```'
elif wrap_code == 'triple_tildas':
self.logger.debug('Wrapping included content as fence code block with triple tildas')
wrapper = '~~~'
code_language = options.get('code_language', '')
if code_language:
self.logger.debug(f'Specifying code language: {code_language}')
else:
self.logger.debug('Do not specify code language')
if not processed_content_part.endswith('\n'):
processed_content_part += '\n'
processed_content_part = (
f'{wrapper}{code_language}' + '\n' + processed_content_part + wrapper + '\n'
)
elif wrap_code == 'single_backticks':
self.logger.debug('Wrapping included content as inline code with single backticks')
processed_content_part = '`' + processed_content_part + '`'
if options.get('inline'):
self.logger.debug(
'Processing included content part as inline, multiple lines will be stretched into one'
)
processed_content_part = re.sub(r'\s+', ' ', processed_content_part).strip()
else:
processed_content_part = content_part
processed_content += processed_content_part
return processed_content
def _get_source_files_extensions(self) -> list:
'''Get list of specified extensions from the ``extensions`` config param,
and convert it into list of glob patterns for each file type.
:returns: List of glob patters for each file type specified in config
'''
extensions_from_config = list(set(self.options['extensions']))
source_files_extensions = []
md_involved = False
for extension in extensions_from_config:
extension = extension.lstrip('.')
source_files_extensions.append(f'*.{extension}')
if extension == 'md':
md_involved = True
if not md_involved:
self.logger.warning(
"Markdown file extension 'md' is not mentioned in the extensions list! " +
"Didn’t you forget to put it there?"
)
return source_files_extensions
def apply(self):
self.logger.info('Applying preprocessor')
# Cleaning up downloads because the content of remote source may have modified
rmtree(self._downloaded_dir_path, ignore_errors=True)
source_files_extensions = self._get_source_files_extensions()
for source_files_extension in source_files_extensions:
for source_file_path in self.working_dir.rglob(source_files_extension):
with open(source_file_path, encoding='utf8') as source_file:
source_content = source_file.read()
processed_content = self.process_includes(
source_file_path,
source_content,
self.project_path.resolve()
)
if processed_content:
with open(source_file_path, 'w', encoding='utf8') as processed_file:
processed_file.write(processed_content)
self.logger.info('Preprocessor applied')
|
PypiClean
|
/api/entrypoint.py
|
import os
import sys
import signal
import re
import typing
from miniapp.api.base import ApiBase
from miniapp.api.configbase import ConfigBase
from miniapp.api.sso_utils import SSOSupport
from miniapp.comm.csrf import CSRF
from miniapp.comm.http import HandlerConfig
from miniapp.comm.rest import MiniREST
from miniapp.comm.session import DbSessionHandler
from miniapp.errs import GeneralError, ReportedException
from miniapp.utils.reloader import is_hot_reload_requested, run_with_reloader
from miniapp.utils.task_runner import TaskRunner
# default value for entry_point()'s add_http_headers__api parameter
HTTP_HEADERS_NO_CACHING = {"Cache-control": "no-store", "Pragma": "no-cache"}
ON_STOPPED = []
def on_stopped(handler: callable):
"""
Add a method to call when the application is stopped.
"""
ON_STOPPED.append(handler)
def call_on_stopped_handlers(logger: callable):
"""
Call all the on_stop handlers.
"""
for handler in ON_STOPPED:
try:
handler()
except Exception as err:
if not isinstance(err, GeneralError):
err = ReportedException(err)
logger(err.to_log())
def startup_logger(msg):
"""
This logger is only used until an API object can be constructed. Thereafter, the API is used for logging.
"""
print(msg)
sys.stdout.flush()
def generate_secure_http_headers(server_name: str=None, app_domains: (str, list)=None, script_domains: (str, list)=None, iframe_domains: (str, list)=None, dev_mode: bool=False):
"""
Put together a set of HTTP headers which enforce some best practice security settings.
:param server_name: Name to send in 'Server' header, instead of 'miniapp/version'.
:param app_domains: One or more hostnames (wildcards allowed), where the application and its related services are
hosted. All permissions are granted to these domains. Ex: "*.mydomain.com".
:param script_domains: Hostnames for all sources of javascript files, not including app_domains, which are included
automatically.
:param iframe_domains: All domains that host iframes which are allowed to be embedded in the application.
:param dev_mode: In developer mode, certain settings are omitted which are very unlikely to work in local
development environments.
"""
def domain_list(dl: (str, list)) -> str:
""" stringify a list of allowed domains """
if not dl:
return ""
if isinstance(dl, str):
return dl
return " ".join(str(d or '') for d in dl)
headers = {
"X-Frame-Options": "sameorigin",
"Referrer-Policy": "strict-origin-when-cross-origin",
"X-Content-Type-Options": "nosniff",
"X-XSS-Protection": "1, mode=block",
}
if server_name:
headers["Server"] = server_name
if app_domains:
csp = [
f"default-src 'self' 'unsafe-eval' 'unsafe-inline' {domain_list(app_domains)}",
f"object-src 'self'",
f"script-src 'self' 'unsafe-inline' 'unsafe-eval' {domain_list(app_domains + script_domains)}",
f"img-src 'self' {domain_list(app_domains + ['data:'])}",
f"connect-src 'self' {domain_list(app_domains)}"
]
if iframe_domains:
csp.append(f"frame-src {iframe_domains}")
if not dev_mode:
csp.append(f"upgrade-insecure-requests")
csp.append(f"block-all-mixed-content")
headers["Content-Security-Policy"] = "; ".join(csp)
return headers
def web_root_contains_file(web_root, filename):
"""
Check whether a given file is available in the web root.
"""
if not web_root:
return
folders = [web_root] if isinstance(web_root, str) else web_root
for folder in folders:
if os.path.exists(os.path.join(folder, filename)):
return True
def entry_point(api_class: type, config_class: type=None, config_defaults: dict=None,
main_py: str="main.py", api_base_url: str="/api/v1", web_root: (str, dict)=None,
api_setup: typing.Callable[[ApiBase.__class__], None]=None,
bg_task_setup: typing.Callable[[ApiBase.__class__, TaskRunner.__class__], None]=None,
redirects: (dict, list)=None, rewrites: (dict, list)=None,
reload_source_root: (str, list)=None,
call_when_stopped: callable=None,
csrf_ignore_urls: (re.Pattern, str)=None, logged_in_paths: list=None, secure_cookies: bool=True,
add_http_headers: dict=None, add_http_headers__api: dict=None,
enable_api_test_page: str=None, use_subprocesses: bool=False, daemon_threads: bool=True):
"""
Entry point helper for microservices and application back-ends.
:param api_class: API - class to instantiate.
:param config_class: Configuration - class to instantiate, values will be filled in from environment variables.
:param config_defaults: Changes to default values for configuration. Environment variables override these.
:param main_py: Relative path to entry point. Used for HOT_RELOAD. Default is 'main.py'.
:param api_base_url: Where the API will be hosted. Default is /api/v1.
:param web_root: Path to web content, or a {} mapping URLs to web roots.
:param reload_source_root: Location of source folder(s) that should trigger 'hot reload'.
:param api_setup: Additional setup of API after it is instantiated.
:param bg_task_setup: Set up background tasks - supply a method that takes (api, task_runner) which calls
task_runner.add_task(). See TaskRunner.
:param redirects: Redirection rules. A {src: dst, ...} or a [(src, dst), ...]. See MiniREST.add_redirect().
:param rewrites: Rewrite rules. A {src: dst, ...} or a [(src, dst), ...]. See MiniREST.add_rewrite().
:param call_when_stopped: Method to call when server shuts down.
:param csrf_ignore_urls: A regex for URLs to omit from CSRF protection.
:param logged_in_paths: Experimental. A separate session cookie will be set for each listed path.
Default is ["/"].
:param secure_cookies: True to require HTTPS for session cookies. This should be True for
user-facing services and False for internal services. Will be changed to False if
config.development_features is set.
:param add_http_headers: HTTP headers added to all responses. You can use generate_secure_http_headers() to build
this value or omit it to not add any headers, or to allow the application or a proxy be in
charge of adding headers.
:param add_http_headers__api: HTTP headers added to REST responses. A default value that disables caching is used.
To inhibit this default, pass {}.
:param enable_api_test_page: To enable the built-in API test page, supply a path.
:param use_subprocesses: True to handle all requests in subprocesses (experimental),
False to handle them in threads.
:param daemon_threads: True - all requests terminate when the server is stopped (i.e. with SIGTERM),
False - requests are allowed to terminate when server is stopped.
:return: Does not return.
"""
# defaults
if not config_class:
config_class = ConfigBase
if add_http_headers__api is None:
add_http_headers__api = HTTP_HEADERS_NO_CACHING
# register supplied clean-up function
if call_when_stopped:
on_stopped(call_when_stopped)
# hot reload requested?
if reload_source_root and is_hot_reload_requested() and "--no-reload" not in sys.argv:
startup_logger("RUNNING WITH HOT_RELOAD=1")
run_with_reloader(reload_source_root, [main_py, "--no-reload"], "python")
sys.exit(0)
# get configuration - note the precedence, from lowest to highest (higher overrides lower):
# 1) config_class's defaults
# 2) supplied 'config_defaults'
# 3) environment variables
# 4) command line
config = config_class(**(config_defaults or {}))
config.apply_environment()
config.apply_command_line()
dev_mode = hasattr(config, "development_features") and config.development_features
if dev_mode:
secure_cookies = False
# set up web server and API
logger = config.default_logger
the_db = config.get_db()
session_handler = DbSessionHandler(the_db) if the_db else None
handler_config = HandlerConfig(logged_in_paths=logged_in_paths, https_only=secure_cookies)
max_session_expire = 24*3600 if config.get_sso_config() else config.session_idle_expiration + 1
server = MiniREST(
config.port, logger=logger, session_handler=session_handler, handler_config=handler_config,
max_session_expire=max_session_expire
)
# custom http headers
if add_http_headers:
server.add_custom_headers(".*", add_http_headers)
if add_http_headers__api:
server.add_custom_headers(f"^{api_base_url}($|/.*)", add_http_headers__api)
# more server setup
if config.enable_csrf:
server.csrf = CSRF(csrf_ignore_urls)
server.track_session_activity()
# exit elegantly
def signal_handler(sig, frame):
call_on_stopped_handlers(logger)
server.shutdown(1) # haven't found a way to shut down connection threads yet, so using a short timeout
sys.exit(0)
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
# instantiate and configure the API
api = api_class(config, server)
server.logger = lambda message, level, caller_detail=None: api.log(message, level, caller_detail or "WEB")
# - connect endpoints to server
api.configure_server(base_url=api_base_url)
if config.get_sso_config():
SSOSupport(api).setup()
# serve static content
if web_root:
if isinstance(web_root, str):
server.add_static_folder("/", web_root)
else:
for url, root in web_root.items():
server.add_static_folder(url, root)
if redirects is None and web_root_contains_file(web_root, "index.html"):
server.add_redirect("/", "/index.html")
# API test page
if enable_api_test_page:
if not enable_api_test_page.startswith("/"):
enable_api_test_page = "/" + enable_api_test_page
if not enable_api_test_page.endswith("/"):
enable_api_test_page += "/"
api_test_content = os.path.join(os.path.dirname(__file__), "../web/api_test")
server.add_static_folder(enable_api_test_page, api_test_content)
server.add_redirect(enable_api_test_page, enable_api_test_page + f"index.html?url={api_base_url}")
# redirects & rewrites
for src, dst in (redirects.items() if isinstance(redirects, dict) else redirects or []):
server.add_redirect(src, dst)
for src, dst in (rewrites.items() if isinstance(rewrites, dict) else rewrites or []):
server.add_rewrite(src, dst)
# more api setup
if api_setup:
# general setup
api_setup(api)
if bg_task_setup:
# set up background tasks
task_runner = TaskRunner()
bg_task_setup(api, task_runner)
task_runner.start()
on_stopped(task_runner.stop)
# ready to start up
version_info = ", ".join("%s=%s" % kv for kv in api.version().items())
api.log("starting on port %d, %s" % (config.port, version_info), caller_detail="STARTUP")
ext_url = config.get_external_url()
if ext_url:
api.log(f"url: {ext_url}", caller_detail="STARTUP")
if dev_mode:
api.log("RUNNING IN DEVELOPMENT MODE", caller_detail="STARTUP")
# start the server
server.start(
mode="subprocesses" if use_subprocesses else "threads",
wait_for_shutdown=True,
daemon_threads=daemon_threads
)
# shut down
call_on_stopped_handlers(logger)
sys.exit(0)
|
PypiClean
|
/dsin100days603v38-6.0.3.tar.gz/dsin100days603v38-6.0.3/notebook/static/components/MathJax/localization/sco/MathMenu.js
|
MathJax.Localization.addTranslation("sco","MathMenu",{version:"2.7.9",isLoaded:true,strings:{Show:"Shaw maths aes",MathMLcode:"MathML code",OriginalMathML:"Oreeginal MathML",TeXCommands:"TeX commauns",AsciiMathInput:"AsciiMathML input",Original:"Oreeginal form",ErrorMessage:"Mistak message",Annotation:"Annotation",TeX:"TeX",StarMath:"StarMath",Maple:"Maple",ContentMathML:"Content MathML",OpenMath:"OpenMath",texHints:"Shaw TeX hints in MathML",Settings:"Maths settins",ZoomTrigger:"Zuim trigger",Hover:"Hover",Click:"Clap",DoubleClick:"Dooble-clap",NoZoom:"Naw zuim",TriggerRequires:"Trigger needs:",Option:"Optie",Alt:"Alt",Command:"Commaun",Control:"Control",Shift:"Shift",ZoomFactor:"Zuim facter",Renderer:"Maths renderer",MPHandles:"Let MathPlayer haunle:",MenuEvents:"Menu events",MouseEvents:"Moose events",MenuAndMouse:"Moose n menu events",FontPrefs:"Font preferences",ForHTMLCSS:"Fer HTML-CSS:",Auto:"Aut\u00E6",TeXLocal:"TeX (local)",TeXWeb:"TeX (wab)",TeXImage:"TeX (eimage)",STIXLocal:"STIX (local)",STIXWeb:"STIX (wab)",AsanaMathWeb:"Asana Math (wab)",GyrePagellaWeb:"Gyre Pagella (wab)",GyreTermesWeb:"Gyre Termes (wab)",LatinModernWeb:"Latin Modern (wab)",NeoEulerWeb:"Neo Euler (wab)",ContextMenu:"Contextual menu",Browser:"Brouser",Scale:"Scale aw maths ...",Discoverable:"Heilicht oan hover",Locale:"Leid",LoadLocale:"Laid fae URL ...",About:"Aneat MathJax",Help:"MathJax heelp",localTeXfonts:"uisin local TeX fonts",webTeXfonts:"uisin wab TeX font",imagefonts:"uisin Eimage fonts",localSTIXfonts:"uisin local STIX fonts",webSVGfonts:"uisin wab SVG fonts",genericfonts:"uisin generic Unicode fonts",wofforotffonts:"WOFF or OTF fonts",eotffonts:"EOT fonts",svgfonts:"SVG fonts",WebkitNativeMMLWarning:"Yer brouser disna seem tae support MathML nateevelie, sae switchin tae MathML ootput micht cause the mathematics oan the page tae become onreadable",MSIENativeMMLWarning:"Internet Explorer needs the MathPlayer plug-in fer tae process MathML ootput.",OperaNativeMMLWarning:"Opera's support fer MathML is leemitit, sae switchin tae MathML ootput micht cause some expressions tae render puirlie.",SafariNativeMMLWarning:"Yer brouser's native MathML disna implement aw the features uised bi MathJax, sae some expressions michtna render properlie.",FirefoxNativeMMLWarning:"Yer brouser's native MathML disna implement aw the features uised bi MathJax, sae some expressions michtna render properlie.",MSIESVGWarning:"SVG isna implemented in Internet Explorer prior til IE9 or whan it's emulating IE8 or ablo. Switchin til SVG ootput will cause the mathematics tae no displey properlie.",LoadURL:"Laid owersetin data fae this URL:",BadURL:"The URL shid be fer ae JavaScript file that defines MathJax owersetin data. JavaScript file names shid end wi '.js'",BadData:"Failed tae laid owersetin data fae %1",SwitchAnyway:"Switch the renderer oniewas?\n\n(Press OK tae switch, CANCEL tae continue wi the current renderer)",ScaleMath:"Scale aw mathematics (compared til surroondin tex) bi",NonZeroScale:"The scale shidna be zero",PercentScale:"The scale shid be ae percentage (fer example 120%%)",IE8warning:"This will disable the MathJax menu n zuim features, but ye can Alt-Clap oan aen expression tae obtain the MathJax menu insteid.\n\nReallie want tae chynge the MathPlayer settins?",IE9warning:"The MathJax contextual menu will be disabled, but ye can Alt-Clap oan aen expression tae obtain the MathJax menu insteid.",NoOriginalForm:"Naw oreeginal form available",Close:"Claise",EqSource:"MathJax Equation Soorce"}});MathJax.Ajax.loadComplete("[MathJax]/localization/sco/MathMenu.js");
|
PypiClean
|
/expsolver-0.5.tar.gz/expsolver-0.5/README.md
|
### Table of Contents
1. [Installation](#installation)
2. [Project Motivation](#motivation)
3. [Algorithm](#algorithm)
4. [File Descriptions](#files)
5. [Inputs and Outputs](#inputoutput)
6. [Example code](#example)
7. [Limitations](#limitations)
8. [Improvements](#improvements)
9. [Licensing, Authors, and Acknowledgements](#licensing)
## Installation <a name="installation"></a>
The code was developed using <b>Python version 3.6.11.</b><br>
## Project Motivation<a name="motivation"></a>
<b>The intent is to solve multiple kinds of equations (expression = 0) efficiently and return: </b><br>
1. Roots
2. Local maxima
3. Local minima
## Algorithm <a name="algorithm"></a>
The basic logic consists of moving two brackets of x values, one in negative and the other in the
positive direction and checking for roots, maxima and minima. The psuedocode is as below: <br>
Determining order of equation: <br>
1. IF "**" in expression, find the highest value following the "**" as order
2. Else if "*x" in expression, order is 1
3. Else if "x" in expression, order is 1
4. Else order is zero
5. If any execption occurs in steps "a" through "d"
If "x" in expresssion, order= 10
else order = 0
Order of expression is used to calculate the tolerance (xtol and ytol),since for higher orders,
small changes in "x" can result in significant changes in expression value
Determining roots of equation: </br>
1. Find a semi-random starting "x" value as a function of order
2. create a list of "x" values (xlist) as [x, x+tol, x, x-tol]
3. Evaluate exp for xlist to obtain ylist
4. If ylist has a zero, append the x value to roots list
5. If ylist[0]*ylist[1] < 0 indicating the root lies between xlist[0] and xlist[1]
add the arithmetic mean of xlist[0] and xlist[1] to roots
6. If ylist[2]*ylist[3] < 0 indicating the root lies between xlist[2] and xlist[3]
add the arithmetic mean of xlist[2] and xlist[3] to roots
7. Else make xlist as [x=x+tol, x+tol=x+2*tol, x=x-tol, x-tol=x-2*tol]
8. Repeat steps a through f for a certain number of times (a large value, based on xtol)
9. Change xtol = xtol / order and repeat step h till exp(roots) is within ytol around zero
Determining maxima and minima:</br>
1. If the difference in ylist[0] and ylist[1] from ylist goes from negative to positive,
add xlist[0] to minima
2. If the difference in ylist[0] and ylist[1] from ylist goes from positive to negative,
add xlist[0] to maxima
3. If the difference in ylist[2] and ylist[3] from ylist goes from negative to positive,
add xlist[2] to minima
4. If the difference in ylist[2] and ylist[3] from ylist goes from positive to negative,
add xlist[2] to maxima
## File Descriptions <a name="files"></a>
The important files include: <br>
1. expsolver.py: The "Solver" class
## Inputs and Outputs <a name="inputoutput"></a>
<b>Inputs:</b><br>
1. exp: expression to be solved e.g. 2*x**2+3*x-100=0
<b>Solve:</b><br>
1. solve(): solves for "exp = 0"
<b>Outputs:</b><br>
1. get_order(): Integer order of equation "exp=0"
2. get_root(): List of roots for "exp=0"
3. get_delta(): List of "0-exp(root)"
4. get_minima(): List of local minima
5. get_maxima(): List of local maxima
## Example Code<a name="example"></a>
<b>Code snippet:</b><br>
from expsolver import Solver as solver <br>
exp='x**3-2*x**2-3*x-100' # Expression to be solved <br>
solv_obj=solver(exp) # Solver object exp <br>
solv_obj.solve() # Solve exp <br>
<b>Results:</b><br>
solv_obj.get_order() <br>
2 <br>
solv_obj.get_maxima() <br>
[-0.5375000000000474] <br>
solv_obj.get_roots() <br>
[5.65625] <br>
solv_obj.get_delta() <br>
[-0.006256103515625] <br>
solv_obj.get_minima() <br>
[1.8624999999999483] <br>
## Limitations<a name="limitations"></a>
1. Currently cannot solve advanced functions from the math library
2. May be unable to compute very large roots
3. May return multiple roots for root / maxima/minima values too close to each other
## Improvements<a name="improvements"></a>
1. Modified to include math functions
2. Efficiency can be improved by optimizing tolerances and tolerance based iteration logic
## Licensing, Authors, Acknowledgements<a name="licensing"></a>
For licensing information (MIT) check the LICENSE file <br>
[Link to Github](https://github.com/kgraghav/expsolver/)
|
PypiClean
|
/cloudbender-0.16.3-py3-none-any.whl/cloudbender-0.16.3.dist-info/licenses/LICENSE.md
|
GNU Affero General Public License
=================================
_Version 3, 19 November 2007_
_Copyright © 2007 Free Software Foundation, Inc. <<http://fsf.org/>>_
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
## Preamble
The GNU Affero General Public License is a free, copyleft license for
software and other kinds of works, specifically designed to ensure
cooperation with the community in the case of network server software.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
our General Public Licenses are intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
Developers that use our General Public Licenses protect your rights
with two steps: **(1)** assert copyright on the software, and **(2)** offer
you this License which gives you legal permission to copy, distribute
and/or modify the software.
A secondary benefit of defending all users' freedom is that
improvements made in alternate versions of the program, if they
receive widespread use, become available for other developers to
incorporate. Many developers of free software are heartened and
encouraged by the resulting cooperation. However, in the case of
software used on network servers, this result may fail to come about.
The GNU General Public License permits making a modified version and
letting the public access it on a server without ever releasing its
source code to the public.
The GNU Affero General Public License is designed specifically to
ensure that, in such cases, the modified source code becomes available
to the community. It requires the operator of a network server to
provide the source code of the modified version running there to the
users of that server. Therefore, public use of a modified version, on
a publicly accessible server, gives the public access to the source
code of the modified version.
An older license, called the Affero General Public License and
published by Affero, was designed to accomplish similar goals. This is
a different license, not a version of the Affero GPL, but Affero has
released a new version of the Affero GPL which permits relicensing under
this license.
The precise terms and conditions for copying, distribution and
modification follow.
## TERMS AND CONDITIONS
### 0. Definitions
“This License” refers to version 3 of the GNU Affero General Public License.
“Copyright” also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
“The Program” refers to any copyrightable work licensed under this
License. Each licensee is addressed as “you”. “Licensees” and
“recipients” may be individuals or organizations.
To “modify” a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a “modified version” of the
earlier work or a work “based on” the earlier work.
A “covered work” means either the unmodified Program or a work based
on the Program.
To “propagate” a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To “convey” a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays “Appropriate Legal Notices”
to the extent that it includes a convenient and prominently visible
feature that **(1)** displays an appropriate copyright notice, and **(2)**
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
### 1. Source Code
The “source code” for a work means the preferred form of the work
for making modifications to it. “Object code” means any non-source
form of a work.
A “Standard Interface” means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The “System Libraries” of an executable work include anything, other
than the work as a whole, that **(a)** is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and **(b)** serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
“Major Component”, in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The “Corresponding Source” for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
### 2. Basic Permissions
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
### 3. Protecting Users' Legal Rights From Anti-Circumvention Law
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
### 4. Conveying Verbatim Copies
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
### 5. Conveying Modified Source Versions
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
* **a)** The work must carry prominent notices stating that you modified
it, and giving a relevant date.
* **b)** The work must carry prominent notices stating that it is
released under this License and any conditions added under section 7.
This requirement modifies the requirement in section 4 to
“keep intact all notices”.
* **c)** You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
* **d)** If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
“aggregate” if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
### 6. Conveying Non-Source Forms
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
* **a)** Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
* **b)** Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either **(1)** a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or **(2)** access to copy the
Corresponding Source from a network server at no charge.
* **c)** Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
* **d)** Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
* **e)** Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A “User Product” is either **(1)** a “consumer product”, which means any
tangible personal property which is normally used for personal, family,
or household purposes, or **(2)** anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, “normally used” refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
“Installation Information” for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
### 7. Additional Terms
“Additional permissions” are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
* **a)** Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
* **b)** Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
* **c)** Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
* **d)** Limiting the use for publicity purposes of names of licensors or
authors of the material; or
* **e)** Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
* **f)** Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered “further
restrictions” within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
### 8. Termination
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated **(a)**
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and **(b)** permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
### 9. Acceptance Not Required for Having Copies
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
### 10. Automatic Licensing of Downstream Recipients
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An “entity transaction” is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
### 11. Patents
A “contributor” is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's “contributor version”.
A contributor's “essential patent claims” are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, “control” includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a “patent license” is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To “grant” such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either **(1)** cause the Corresponding Source to be so
available, or **(2)** arrange to deprive yourself of the benefit of the
patent license for this particular work, or **(3)** arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. “Knowingly relying” means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is “discriminatory” if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license **(a)** in connection with copies of the covered work
conveyed by you (or copies made from those copies), or **(b)** primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
### 12. No Surrender of Others' Freedom
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
### 13. Remote Network Interaction; Use with the GNU General Public License
Notwithstanding any other provision of this License, if you modify the
Program, your modified version must prominently offer all users
interacting with it remotely through a computer network (if your version
supports such interaction) an opportunity to receive the Corresponding
Source of your version by providing access to the Corresponding Source
from a network server at no charge, through some standard or customary
means of facilitating copying of software. This Corresponding Source
shall include the Corresponding Source for any work covered by version 3
of the GNU General Public License that is incorporated pursuant to the
following paragraph.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the work with which it is combined will remain governed by version
3 of the GNU General Public License.
### 14. Revised Versions of this License
The Free Software Foundation may publish revised and/or new versions of
the GNU Affero General Public License from time to time. Such new versions
will be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU Affero General
Public License “or any later version” applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU Affero General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU Affero General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
### 15. Disclaimer of Warranty
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM “AS IS” WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
### 16. Limitation of Liability
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
### 17. Interpretation of Sections 15 and 16
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
_END OF TERMS AND CONDITIONS_
## How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the “copyright” line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If your software can interact with users remotely through a computer
network, you should also make sure that it provides a way for users to
get its source. For example, if your program is a web application, its
interface could display a “Source” link that leads users to an archive
of the code. There are many ways you could offer source, and different
solutions will be better for different programs; see section 13 for the
specific requirements.
You should also get your employer (if you work as a programmer) or school,
if any, to sign a “copyright disclaimer” for the program, if necessary.
For more information on this, and how to apply and follow the GNU AGPL, see
<<http://www.gnu.org/licenses/>>.
|
PypiClean
|
/sagemath-standard-10.0b0.tar.gz/sagemath-standard-10.0b0/sage/databases/conway.py
|
r"""
Frank Luebeck's tables of Conway polynomials over finite fields
"""
# ****************************************************************************
#
# Copyright (C) 2005-2006 William Stein <[email protected]>
# Copyright (C) 2010 Alexandru Ghitza
# Copyright (C) 2013 R. Andrew Ohana <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# https://www.gnu.org/licenses/
# ****************************************************************************
from collections.abc import Mapping
import pickle
from sage.features.databases import DatabaseConwayPolynomials
_conwaydict = None
class DictInMapping(Mapping):
def __init__(self, dict):
"""
Places dict into a non-mutable mapping.
TESTS::
sage: from sage.databases.conway import DictInMapping
sage: d = {}
sage: m = DictInMapping(d); m
{}
sage: d[0] = 1; m
{0: 1}
sage: m[2] = 3
Traceback (most recent call last):
...
TypeError: 'DictInMapping' object does not support item assignment
"""
self._store = dict
def __getitem__(self, key):
"""
TESTS::
sage: from sage.databases.conway import DictInMapping
sage: DictInMapping({'foo': 'bar'})['foo']
'bar'
"""
return self._store[key]
def __len__(self):
"""
TESTS::
sage: from sage.databases.conway import DictInMapping
sage: d = {}
sage: m = DictInMapping(d); len(m)
0
sage: d['foo'] = 'bar'; len(m)
1
"""
return len(self._store)
def __iter__(self):
"""
TESTS::
sage: from sage.databases.conway import DictInMapping
sage: next(iter(DictInMapping({'foo': 'bar'})))
'foo'
"""
return iter(self._store)
def __repr__(self):
"""
TESTS::
sage: from sage.databases.conway import DictInMapping
sage: DictInMapping({'foo': 'bar'})
{'foo': 'bar'}
"""
return repr(self._store)
class ConwayPolynomials(Mapping):
def __init__(self):
"""
Initialize the database.
TESTS::
sage: c = ConwayPolynomials()
sage: c
Frank Luebeck's database of Conway polynomials
"""
global _conwaydict
if _conwaydict is None:
_CONWAYDATA = DatabaseConwayPolynomials().absolute_filename()
with open(_CONWAYDATA, 'rb') as f:
_conwaydict = pickle.load(f)
self._store = _conwaydict
def __repr__(self):
"""
Return a description of this database.
TESTS::
sage: c = ConwayPolynomials()
sage: c.__repr__()
"Frank Luebeck's database of Conway polynomials"
"""
return "Frank Luebeck's database of Conway polynomials"
def __getitem__(self, key):
"""
If key is a pair of integers ``p,n``, return the Conway
polynomial of degree ``n`` over ``GF(p)``.
If key is an integer ``p``, return a non-mutable mapping
whose keys are the degrees of the polynomial values.
TESTS::
sage: c = ConwayPolynomials()
sage: c[60859]
{1: (60856, 1), 2: (3, 60854, 1),
3: (60856, 8, 0, 1), 4: (3, 32881, 3, 0, 1)}
sage: c[60869, 3]
(60867, 2, 0, 1)
"""
try:
return DictInMapping(self._store[key])
except KeyError as err:
try:
if isinstance(key, (tuple, list)):
if len(key) == 2:
return self._store[key[0]][key[1]]
except KeyError:
pass
raise err
def __len__(self):
"""
Return the number of polynomials in this database.
TESTS::
sage: c = ConwayPolynomials()
sage: len(c)
35352
"""
try:
return self._len
except AttributeError:
pass
self._len = sum(len(a) for a in self._store.values())
return self._len
def __iter__(self):
"""
Return an iterator over the keys of this database.
TESTS::
sage: c = ConwayPolynomials()
sage: itr = iter(c)
sage: next(itr) # random
(65537, 4)
"""
for a, b in self._store.items():
for c in b:
yield a, c
def polynomial(self, p, n):
"""
Return the Conway polynomial of degree ``n`` over ``GF(p)``,
or raise a RuntimeError if this polynomial is not in the
database.
.. NOTE::
See also the global function ``conway_polynomial`` for
a more user-friendly way of accessing the polynomial.
INPUT:
- ``p`` -- prime number
- ``n`` -- positive integer
OUTPUT:
List of Python int's giving the coefficients of the corresponding
Conway polynomial in ascending order of degree.
EXAMPLES::
sage: c = ConwayPolynomials()
sage: c.polynomial(3, 21)
(1, 2, 0, 2, 0, 1, 2, 0, 2, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)
sage: c.polynomial(97, 128)
Traceback (most recent call last):
...
RuntimeError: Conway polynomial over F_97 of degree 128 not in database.
"""
try:
return self[p, n]
except KeyError:
raise RuntimeError("Conway polynomial over F_%s of degree %s not in database." % (p, n))
def has_polynomial(self, p, n):
"""
Return True if the database of Conway polynomials contains the
polynomial of degree ``n`` over ``GF(p)``.
INPUT:
- ``p`` -- prime number
- ``n`` -- positive integer
EXAMPLES::
sage: c = ConwayPolynomials()
sage: c.has_polynomial(97, 12)
True
sage: c.has_polynomial(60821, 5)
False
"""
return (p,n) in self
def primes(self):
"""
Return the list of prime numbers ``p`` for which the database of
Conway polynomials contains polynomials over ``GF(p)``.
EXAMPLES::
sage: c = ConwayPolynomials()
sage: P = c.primes()
sage: 2 in P
True
sage: next_prime(10^7) in P
False
"""
return self._store.keys()
def degrees(self, p):
"""
Return the list of integers ``n`` for which the database of Conway
polynomials contains the polynomial of degree ``n`` over ``GF(p)``.
EXAMPLES::
sage: c = ConwayPolynomials()
sage: c.degrees(60821)
[1, 2, 3, 4]
sage: c.degrees(next_prime(10^7))
[]
"""
if p not in self._store:
return []
return list(self._store[p])
def __reduce__(self):
"""
TESTS::
sage: c = ConwayPolynomials()
sage: loads(dumps(c)) == c
True
"""
return (ConwayPolynomials, ())
|
PypiClean
|
/qtop-0.9.20161222.tar.gz/qtop-0.9.20161222/qtop_py/yaml_parser.py
|
import os
import logging
## TODO: black sheep
def fix_config_list(config_list):
"""
transforms a list of the form ['a, b'] to ['a', 'b']
"""
if not config_list:
return []
t = config_list
item = t[0]
list_items = item.split(',')
return [nr.strip() for nr in list_items]
def get_line(fin, verbatim=False, SEPARATOR=None, DEF_INDENT=2):
"""
Yields a list per line read, of the following form:
[indentation_change, line up to first space, line after first space if exists]
Comment lines are omitted.
Lines where comments exist at the end are stripped off their comment.
Indentation is calculated with respect to the previous line.
1: line is further indented
0: same indentation
-1: line is unindent
Empty lines only return the indentation change.
Where the line splits depends on SEPARATOR (default is first space)
DEF_INDENT is how many spaces is an indent by default.
e.g. qtop config file uses 2 spaces, oarnodes_s_y uses 4
"""
indent = 0
indenter = {
0: 0,
DEF_INDENT / 2: 1,
DEF_INDENT: 1,
3 * int(float(DEF_INDENT) / 2): 2,
2 * DEF_INDENT: 2,
-DEF_INDENT / 2: -1,
-DEF_INDENT: -1,
-3 * int(float(DEF_INDENT) / 2): -2,
- 2 * DEF_INDENT: -2,
}
for line in fin:
if line.lstrip().startswith('#') or line.strip() == '---':
continue
elif ' #' in line and not line.endswith('#\n'):
line = line.split(' #', 1)[0]
prev_indent = indent
indent = len(line) - len(line.lstrip(' '))
diff = indent - prev_indent
try:
d_indent = indenter[diff]
except KeyError:
d_indent = diff
line = line.rstrip()
list_line = verbatim and [d_indent, line] or [d_indent] + line.split(None or SEPARATOR, 1)
if len(list_line) > 1:
if list_line[1].startswith(('"', "'")):
list_line[1] = list_line[1][1:-1]
else:
pass
yield list_line
def convert_dash_key_in_dict(d):
"""
takes a dict of the form {'-': [...]} and converts it to [...]
"""
try:
assert isinstance(d, dict)
except AssertionError:
return d # TODO: Maybe this should fail, not be muted
for key_out in d:
if not (isinstance(d[key_out], dict) or len(d[key_out]) == 1):
continue
try:
for key_in in d[key_out]:
if key_in == '-' and key_out != 'state':
d[key_out] = d[key_out][key_in]
# elif key_in == '-' and key_out == 'state':
# d[key_out] = eval(d[key_out])
# break
except TypeError:
return d
except IndexError:
continue
return d
def parse(fn, DEF_INDENT=2):
raw_key_values = {}
with open(fn, mode='r') as fin:
try:
assert os.stat(fn).st_size != 0
except AssertionError:
logging.critical('File %s is empty!! Exiting...\n' % fn)
raise
except IOError:
raise
logging.debug('File state before parse: %s' % fin)
get_lines = get_line(fin, DEF_INDENT=DEF_INDENT) # TODO: weird
line = next(get_lines)
while line:
block, line = read_yaml_config_block(line, fin, get_lines)
block = convert_dash_key_in_dict(block)
for k in block:
block[k] = convert_dash_key_in_dict(block[k])
raw_key_values.update(block)
logging.debug('File state after parse: %s' % fin)
a_dict = dict([(key, value) for key, value in raw_key_values.items()])
return a_dict
def read_yaml_config_block(line, fin, get_lines):
block = dict()
parent_container = block
open_containers = list()
open_containers.append(block)
# if len(line) > 1: # non-empty line
# key_value, parent_container = process_line(line, fin, get_lines, parent_container)
# for (k, v) in key_value.items():
# block[k] = v
while len(line) == 1: # skip empty lines
try:
line = next(get_lines)
except StopIteration: # EO(config)F
return {}, ''
while len(line) > 1: # as long as a blank line is not reached (i.e. block is not complete)
# if line[0] == 0 or (line[0] != 0 and line[1] == '-'): # same level
# key_value used below belongs to previous line. It will work for first block line because of short circuit logic
if line[0] == 0 \
or (line[0] == 1 and (key_value.keys()[0] == '-'))\
or (line[0] == -1 and line[1] == '-'): # same level or entry level
key_value, container = process_line(line, fin, get_lines, parent_container)
for k in key_value:
pass # assign dict's sole key to k
if parent_container == {} or '-' not in parent_container:
parent_container[k] = key_value[k]
elif '-' in parent_container and '-' not in key_value:
last_item = parent_container['-'].pop()
key_value.update(last_item)
parent_container['-'].append(key_value)
else:
parent_container.setdefault(k, []).extend(key_value[k]) # was waiting for a list, but a str came in!
if container == {}:
open_containers.append(container)
parent_container = open_containers[-1] # point towards latest container (key_value's value)
elif (line[0] == 1) or (line[0] > 1): # go down one level
key_value, container = process_line(line, fin, get_lines, parent_container)
for k in key_value:
pass
# if container == {}: # up parent container with new value
if parent_container == {}: # above it is a key waiting to be filled with values
parent_container[k] = key_value[k]
else:
parent_container.setdefault(k, []).append(key_value[k]) if isinstance(key_value[k], str) else \
parent_container.setdefault(k, []).extend(key_value[k])
if container == {}:
open_containers.append(container)
parent_container = open_containers[-1] # point towards latest container (key_value's value)
elif line[0] == -2 and line[1] == '-': # go up two levels
key_value, container = process_line(line, fin, get_lines, parent_container)
len(open_containers) > 1 and open_containers.pop() or None
for k in key_value:
pass # assign dict's sole key to k
if open_containers[-1].get('-'):
open_containers[-1].setdefault('-', []).extend(key_value[k])
else:
open_containers[-1][k] = key_value[k]
if container == {}:
open_containers.append(container)
parent_container = open_containers[-1] # point towards latest container (key_value's value)
else:
parent_container = open_containers[-1]
elif line[0] == -1: # go up one level
key_value, container = process_line(line, fin, get_lines, parent_container)
len(open_containers) > 1 and open_containers.pop() or None
for k in key_value:
pass # assign dict's sole key to k
if open_containers[-1].get('-'):
open_containers[-1].setdefault('-', []).extend(key_value[k])
else:
open_containers[-1][k] = key_value[k]
if container == {}:
open_containers.append(container)
parent_container = open_containers[-1] # point towards latest container (key_value's value)
else:
parent_container = open_containers[-1]
try:
line = next(get_lines)
except StopIteration:
return block, ''
else:
if line[-1] == '...':
return block, line
return block, line
def process_line(list_line, fin, get_lines, parent_container):
key = list_line[1]
if len(list_line) == 2: # key-only, so what's in the line following should be written in a new container
container = {}
return {key.rstrip(':'): container}, container
elif len(list_line) == 3:
container = list_line[2]
if container.endswith(':'): # key: '-' - testkey:
parent_key = key
key = container
new_container = {}
return {parent_key: [{key.rstrip(':'): new_container}]}, new_container #list
elif ': ' in container: # key: '-' - testkey: testvalue
parent_key = key
key, container = container.split(None, 1)
# container = [container[1:-1]] if container.startswith('[') else container
container = container[1:-1].split(', ') if container.startswith('[') else container
container = "" if container in ("''", '""') else container
if len(container) == 1 and isinstance(container, list) and isinstance(container[0], str):
try:
container = list(eval(container[0]))
except NameError:
pass
return {'-': [{key.rstrip(':'): container}]}, container #list
elif container.endswith('|'):
container = process_code(fin)
return {key.rstrip(':'): container}, parent_container
else: # simple value
if key == '-': # i.e. - testvalue
return {'-': [container]}, container # was parent_container******was :[container]}, container
else: # i.e. testkey: testvalue
container = [container[1:-1]] if container.startswith('[') else container #list
if len(container) == 1 and isinstance(container, list) and isinstance(container[0], str):
try:
container = list(eval(container[0]))
except NameError:
pass
elif container.startswith("'") and container.endswith("'"):
container = eval(container)
return {key.rstrip(':'): container}, container # was parent_container#str
else:
raise ValueError("Didn't anticipate that!")
def process_code(fin):
get_code = get_line(fin, verbatim=True)
# line = next(get_code)
line = next(get_code)
code = []
while line[0] > -1:
code.append(' ' + line[-1])
try:
line = next(get_code)
except StopIteration:
break
return '\n'.join([c.strip() for c in code]).strip()
def safe_load(fin, DEF_INDENT=2):
a_dict = parse(fin, DEF_INDENT)
logging.debug("YAML dict length: %s" % len(a_dict))
return a_dict
def load_all(fin):
list_of_dicts = []
get_lines = get_line(fin)
while True:
try:
line = next(get_lines)
except StopIteration:
break
block, line = read_yaml_config_block(line, fin, get_lines)
block = convert_dash_key_in_dict(block)
list_of_dicts.append(block)
return list_of_dicts
def get_yaml_key_part(config, scheduler, outermost_key):
"""
only return the list items of the yaml outermost_key if a yaml key subkey exists
(this signals a user-inserted value)
"""
# e.g. outermost_key = 'workernodes_matrix'
for part in config[outermost_key]:
part_name = [i for i in part][0]
part_options = part[part_name]
yaml_key = part_options.get('yaml_key')
# if no systems line exists, all systems are supported, and thus the current
systems = fix_config_list(part_options.get('systems', [scheduler]))
if yaml_key:
yield yaml_key, part_name, systems
|
PypiClean
|
/v2/model/list_publicips_response.py
|
import pprint
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
class ListPublicipsResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'publicips': 'list[PublicipShowResp]'
}
attribute_map = {
'publicips': 'publicips'
}
def __init__(self, publicips=None):
"""ListPublicipsResponse - a model defined in huaweicloud sdk"""
super().__init__()
self._publicips = None
self.discriminator = None
if publicips is not None:
self.publicips = publicips
@property
def publicips(self):
"""Gets the publicips of this ListPublicipsResponse.
弹性公网IP对象
:return: The publicips of this ListPublicipsResponse.
:rtype: list[PublicipShowResp]
"""
return self._publicips
@publicips.setter
def publicips(self, publicips):
"""Sets the publicips of this ListPublicipsResponse.
弹性公网IP对象
:param publicips: The publicips of this ListPublicipsResponse.
:type: list[PublicipShowResp]
"""
self._publicips = publicips
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListPublicipsResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
PypiClean
|
/flask-journey-0.1.4.tar.gz/flask-journey-0.1.4/flask_journey/utils.py
|
import re
from functools import wraps
from flask import jsonify, request
from marshmallow import ValidationError, Schema
from furl import furl
from .exceptions import IncompatibleSchema, InvalidPath
def sanitize_path(path):
"""Performs sanitation of the path after validating
:param path: path to sanitize
:return: path
:raises:
- InvalidPath if the path doesn't start with a slash
"""
if path == '/': # Nothing to do, just return
return path
if path[:1] != '/':
raise InvalidPath('The path must start with a slash')
# Deduplicate slashes in path
path = re.sub(r'/+', '/', path)
# Strip trailing slashes and return
return path.rstrip('/')
def _validate_schema(obj):
"""Ensures the passed schema instance is compatible
:param obj: object to validate
:return: obj
:raises:
- IncompatibleSchema if the passed schema is of an incompatible type
"""
if obj is not None and not isinstance(obj, Schema):
raise IncompatibleSchema('Schema must be of type {0}'.format(Schema))
return obj
def route(bp, *args, **kwargs):
"""Journey route decorator
Enables simple serialization, deserialization and validation of Flask routes with the help of Marshmallow.
:param bp: :class:`flask.Blueprint` object
:param args: args to pass along to `Blueprint.route`
:param kwargs:
- :strict_slashes: Enable / disable strict slashes (default False)
- :validate: Enable / disable body/query validation (default True)
- :_query: Unmarshal Query string into this schema
- :_body: Unmarshal JSON body into this schema
- :marshal_with: Serialize the output with this schema
:raises:
- ValidationError if the query parameters or JSON body fails validation
"""
kwargs['strict_slashes'] = kwargs.pop('strict_slashes', False)
body = _validate_schema(kwargs.pop('_body', None))
query = _validate_schema(kwargs.pop('_query', None))
output = _validate_schema(kwargs.pop('marshal_with', None))
validate = kwargs.pop('validate', True)
def decorator(f):
@bp.route(*args, **kwargs)
@wraps(f)
def wrapper(*inner_args, **inner_kwargs):
"""If a schema (_body and/or _query) was supplied to the route decorator, the deserialized
:class`marshmallow.Schema` object is injected into the decorated function's kwargs."""
try:
if query is not None:
query.strict = validate
url = furl(request.url)
inner_kwargs['_query'] = query.load(data=url.args)
if body is not None:
body.strict = validate
json_data = request.get_json()
if json_data is None:
# Set json_data to empty dict if body is empty, so it gets picked up by the validator
json_data = {}
inner_kwargs['_body'] = body.load(data=json_data)
except ValidationError as err:
return jsonify(err.messages), 422
if output:
data = output.dump(f(*inner_args, **inner_kwargs))
return jsonify(data[0])
return f(*inner_args, **inner_kwargs)
return f
return decorator
|
PypiClean
|
/prutils-0.0.13.tar.gz/prutils-0.0.13/README.md
|
# prutils
#### 介绍
将自己工作学习中可能会重复使用的功能代码抽取到这里,在多个项目中可以方便的安装使用
#### 软件架构
#### 安装教程
#### 使用说明
#### 参与贡献
#### 码云特技
### 有道云笔记非会员Markdown文档不能粘贴图片的折衷替代方案
#### 需求
* 有道云笔记非会员Markdown文档不能粘贴图片,多有不便
#### 寻求方案
* 发现有道云笔的简单笔记文档可以粘贴图片,分享该文档,打开分享链接可以得到图片的链接
* Markdown文档可以用`![]()`的方式引入图片
#### 解决方案
* 有道云笔记创建一个简单笔记作为图库
* 写一个工具读取这个文档最后一张图片的链接
* 在Markdown文档使用该链接
#### 工具安装
* 下载安装python
* 下载地址:https://www.python.org/ftp/python/3.8.1/python-3.8.1.exe
* 安装路径:C:\Python38\python.exe
* 添加C:\Python38和C:\Python38\Scripts路径到环境变量path
* 安装prutils
进cmd, 执行pip install --upgrade prutils==0.0.9
#### 工具使用
* 进cmd执行`pru_cmds ydtk {图片url}`,输出最后一张图片地址

|
PypiClean
|
/mitmproxy_lin_customization-5.2.2.1.tar.gz/mitmproxy_lin_customization-5.2.2.1/mitmproxy/websocket.py
|
import time
import queue
from typing import List, Optional
from wsproto.frame_protocol import CloseReason
from wsproto.frame_protocol import Opcode
from mitmproxy import flow
from mitmproxy.net import websockets
from mitmproxy.coretypes import serializable
from mitmproxy.utils import strutils, human
class WebSocketMessage(serializable.Serializable):
"""
A WebSocket message sent from one endpoint to the other.
"""
def __init__(
self, type: int, from_client: bool, content: bytes, timestamp: Optional[float]=None, killed: bool=False
) -> None:
self.type = Opcode(type) # type: ignore
"""indicates either TEXT or BINARY (from wsproto.frame_protocol.Opcode)."""
self.from_client = from_client
"""True if this messages was sent by the client."""
self.content = content
"""A byte-string representing the content of this message."""
self.timestamp: float = timestamp or time.time()
"""Timestamp of when this message was received or created."""
self.killed = killed
"""True if this messages was killed and should not be sent to the other endpoint."""
@classmethod
def from_state(cls, state):
return cls(*state)
def get_state(self):
return int(self.type), self.from_client, self.content, self.timestamp, self.killed
def set_state(self, state):
self.type, self.from_client, self.content, self.timestamp, self.killed = state
self.type = Opcode(self.type) # replace enum with bare int
def __repr__(self):
if self.type == Opcode.TEXT:
return "text message: {}".format(repr(self.content))
else:
return "binary message: {}".format(strutils.bytes_to_escaped_str(self.content))
def kill(self):
"""
Kill this message.
It will not be sent to the other endpoint. This has no effect in streaming mode.
"""
self.killed = True
class WebSocketFlow(flow.Flow):
"""
A WebSocketFlow is a simplified representation of a Websocket connection.
"""
def __init__(self, client_conn, server_conn, handshake_flow, live=None):
super().__init__("websocket", client_conn, server_conn, live)
self.messages: List[WebSocketMessage] = []
"""A list containing all WebSocketMessage's."""
self.close_sender = 'client'
"""'client' if the client initiated connection closing."""
self.close_code = CloseReason.NORMAL_CLOSURE
"""WebSocket close code."""
self.close_message = '(message missing)'
"""WebSocket close message."""
self.close_reason = 'unknown status code'
"""WebSocket close reason."""
self.stream = False
"""True of this connection is streaming directly to the other endpoint."""
self.handshake_flow = handshake_flow
"""The HTTP flow containing the initial WebSocket handshake."""
self.ended = False
"""True when the WebSocket connection has been closed."""
self._inject_messages_client = queue.Queue(maxsize=1)
self._inject_messages_server = queue.Queue(maxsize=1)
if handshake_flow:
self.client_key = websockets.get_client_key(handshake_flow.request.headers)
self.client_protocol = websockets.get_protocol(handshake_flow.request.headers)
self.client_extensions = websockets.get_extensions(handshake_flow.request.headers)
self.server_accept = websockets.get_server_accept(handshake_flow.response.headers)
self.server_protocol = websockets.get_protocol(handshake_flow.response.headers)
self.server_extensions = websockets.get_extensions(handshake_flow.response.headers)
else:
self.client_key = ''
self.client_protocol = ''
self.client_extensions = ''
self.server_accept = ''
self.server_protocol = ''
self.server_extensions = ''
_stateobject_attributes = flow.Flow._stateobject_attributes.copy()
# mypy doesn't support update with kwargs
_stateobject_attributes.update(dict(
messages=List[WebSocketMessage],
close_sender=str,
close_code=int,
close_message=str,
close_reason=str,
client_key=str,
client_protocol=str,
client_extensions=str,
server_accept=str,
server_protocol=str,
server_extensions=str,
# Do not include handshake_flow, to prevent recursive serialization!
# Since mitmproxy-console currently only displays HTTPFlows,
# dumping the handshake_flow will include the WebSocketFlow too.
))
def get_state(self):
d = super().get_state()
d['close_code'] = int(d['close_code']) # replace enum with bare int
return d
@classmethod
def from_state(cls, state):
f = cls(None, None, None)
f.set_state(state)
return f
def __repr__(self):
return "<WebSocketFlow ({} messages)>".format(len(self.messages))
def message_info(self, message: WebSocketMessage) -> str:
return "{client} {direction} WebSocket {type} message {direction} {server}{endpoint}".format(
type=message.type,
client=human.format_address(self.client_conn.address),
server=human.format_address(self.server_conn.address),
direction="->" if message.from_client else "<-",
endpoint=self.handshake_flow.request.path,
)
def inject_message(self, endpoint, payload):
"""
Inject and send a full WebSocket message to the remote endpoint.
This might corrupt your WebSocket connection! Be careful!
The endpoint needs to be either flow.client_conn or flow.server_conn.
If ``payload`` is of type ``bytes`` then the message is flagged as
being binary If it is of type ``str`` encoded as UTF-8 and sent as
text.
:param payload: The message body to send.
:type payload: ``bytes`` or ``str``
"""
if endpoint == self.client_conn:
self._inject_messages_client.put(payload)
elif endpoint == self.server_conn:
self._inject_messages_server.put(payload)
else:
raise ValueError('Invalid endpoint')
|
PypiClean
|
/mayhem/examples/ms16_098_bsod.py
|
MF_POPUP = 0x0010
MF_STRING = 0x0000
MFS_ENABLED = 0x0000
MFT_STRING = 0x0000
MIIM_BITMAP = 0x0080
MIIM_ID = 0x0002
MIIM_STRING = 0x0040
MIIM_SUBMENU = 0x0004
HBMMENU_SYSTEM = 1
import binascii
import ctypes
import ctypes.wintypes as wintypes
import os
import platform
import random
import sys
import threading
import time
lib_path = os.path.split(__file__)[0]
lib_path = os.path.join(lib_path, '..')
lib_path = os.path.abspath(lib_path)
sys.path.insert(0, lib_path)
import mayhem
from mayhem.datatypes.windows import MENUITEMINFOW
from mayhem.datatypes.windows import UNICODE_STRING
from mayhem.exploit.windows import WindowsSyscall
from mayhem.exploit.windows import error_on_null
from mayhem.exploit.windows import print_handle
gdi32 = ctypes.windll.gdi32
kernel32 = ctypes.windll.kernel32
ntdll = ctypes.windll.ntdll
user32 = ctypes.windll.user32
syscall = WindowsSyscall()
def add_submenu_item(h_menu, name, w_id=None):
h_submenu = user32.CreatePopupMenu()
mi_info = MENUITEMINFOW()
mi_info.cbSize = ctypes.sizeof(MENUITEMINFOW)
mi_info.fMask = MIIM_STRING | MIIM_SUBMENU | MIIM_ID | MIIM_BITMAP
mi_info.fState = MFS_ENABLED
mi_info.hSubMenu = h_submenu
mi_info.wID = random.randint(0x10, 0xff) if w_id is None else w_id
mi_info.dwTypeData = name
mi_info.hbmpItem = HBMMENU_SYSTEM # (required to set nPosition to 1 in trigger)
item = UNICODE_STRING.from_string(name)
result = error_on_null(syscall.NtUserThunkedMenuItemInfo(
h_menu, # HMENU hMenu
0, # UINT nPosition
False, # BOOL fByPosition
True, # BOOL fInsert
ctypes.byref(mi_info), # LPMENUITEMINFOW lpmii
ctypes.byref(item) # PUNICODE_STRING pstrItem
))
print("NtUserThunkedMenuItemInfo submenu result: 0x{0:08x}".format(result))
return h_submenu
def add_menu_item(h_menu, name, w_id=None):
mi_info = MENUITEMINFOW()
mi_info.cbSize = ctypes.sizeof(MENUITEMINFOW)
mi_info.fMask = MIIM_STRING | MIIM_ID
mi_info.fType = MFT_STRING
mi_info.fState = MFS_ENABLED
mi_info.wID = random.randint(0x1000, 0xffff) if w_id is None else w_id
item = UNICODE_STRING.from_string(name)
result = error_on_null(syscall.NtUserThunkedMenuItemInfo(
h_menu, # HMENU hMenu
-1, # UINT nPosition
True, # BOOL fByPosition
True, # BOOL fInsert
ctypes.byref(mi_info), # LPMENUITEMINFOW lpmii
ctypes.byref(item) # PUNICODE_STRING pstrItem
))
print(" mi_info->wID = 0x{0:04x}".format(mi_info.wID))
return result
def trigger(h_menu, name, w_id, n_position, f_by_position):
mi_info = MENUITEMINFOW()
mi_info.cbSize = ctypes.sizeof(MENUITEMINFOW)
mi_info.fMask = MIIM_STRING | MIIM_ID
mi_info.fType = MFT_STRING
mi_info.fState = MFS_ENABLED
mi_info.wID = w_id
item = UNICODE_STRING.from_string(name)
result = error_on_null(syscall.NtUserThunkedMenuItemInfo(
h_menu, # HMENU hMenu
n_position, # UINT nPosition
f_by_position, # BOOL fByPosition
True, # BOOL fInsert
ctypes.byref(mi_info), # LPMENUITEMINFOW lpmii
ctypes.byref(item) # PUNICODE_STRING pstrItem
))
return result
def fill_menu(h_menu, base_idx=0x1000, count=7):
for idx in range(0, count):
print("[*] adding menu item #{0}".format(idx + 1))
time.sleep(0.25)
add_menu_item(h_menu, "menu item {0}".format(idx), w_id=(base_idx + idx))
return
def main():
print('**************************************************')
print('* CVE-2016-3308 / MS16-098 / ZDI-16-453 BSOD *')
print('* win32k!xxxInsertMenuItem Out-of-Bounds Access *')
print('* Spencer (@zeroSteiner) McIntyre *')
print('**************************************************')
if platform.architecture()[0] == '64bit':
print("[*] x86-64 syscall: 0x{0:016x}".format(syscall.address))
else:
print("[*] x86 syscall: 0x{0:08x}".format(syscall.address))
#raw_input("[*] PID: {0}, press enter to continue...".format(os.getpid()))
h_menu = user32.CreateMenu()
print("[*] h_menu: 0x{0:08x}".format(h_menu))
print_handle(h_menu)
h_submenu = add_submenu_item(h_menu, 'submenu', w_id=0x0123)
print("[*] h_submenu: 0x{0:08x}".format(h_submenu))
print_handle(h_submenu)
add_menu_item(h_submenu, 'subsubmenu-item', w_id=0x0001)
fill_menu(h_menu, base_idx=0x1001)
print("[+] triggering...")
time.sleep(0.5)
trigger(h_menu, 'sploit', w_id=0, n_position=0x0123, f_by_position=False)
return 0
main()
|
PypiClean
|
/geezlibs1-2.0.0-py3-none-any.whl/geezlibs/methods/messages/edit_message_media.py
|
import os
import re
import io
from typing import Union
import geezlibs
from geezlibs import raw
from geezlibs import types
from geezlibs import utils
from geezlibs.file_id import FileType
class EditMessageMedia:
async def edit_message_media(
self: "geezlibs.Client",
chat_id: Union[int, str],
message_id: int,
media: "types.InputMedia",
reply_markup: "types.InlineKeyboardMarkup" = None,
file_name: str = None
) -> "types.Message":
"""Edit animation, audio, document, photo or video messages.
If a message is a part of a message album, then it can be edited only to a photo or a video. Otherwise, the
message type can be changed arbitrarily.
.. include:: /_includes/usable-by/users-bots.rst
Parameters:
chat_id (``int`` | ``str``):
Unique identifier (int) or username (str) of the target chat.
For your personal cloud (Saved Messages) you can simply use "me" or "self".
For a contact that exists in your Telegram address book you can use his phone number (str).
message_id (``int``):
Message identifier in the chat specified in chat_id.
media (:obj:`~geezlibs.types.InputMedia`):
One of the InputMedia objects describing an animation, audio, document, photo or video.
reply_markup (:obj:`~geezlibs.types.InlineKeyboardMarkup`, *optional*):
An InlineKeyboardMarkup object.
file_name (``str``, *optional*):
File name of the media to be sent. Not applicable to photos.
Defaults to file's path basename.
Returns:
:obj:`~geezlibs.types.Message`: On success, the edited message is returned.
Example:
.. code-block:: python
from geezlibs.types import InputMediaPhoto, InputMediaVideo, InputMediaAudio
# Replace the current media with a local photo
await app.edit_message_media(chat_id, message_id,
InputMediaPhoto("new_photo.jpg"))
# Replace the current media with a local video
await app.edit_message_media(chat_id, message_id,
InputMediaVideo("new_video.mp4"))
# Replace the current media with a local audio
await app.edit_message_media(chat_id, message_id,
InputMediaAudio("new_audio.mp3"))
"""
caption = media.caption
parse_mode = media.parse_mode
message, entities = None, None
if caption is not None:
message, entities = (await self.parser.parse(caption, parse_mode)).values()
if isinstance(media, types.InputMediaPhoto):
if isinstance(media.media, io.BytesIO) or os.path.isfile(media.media):
media = await self.invoke(
raw.functions.messages.UploadMedia(
peer=await self.resolve_peer(chat_id),
media=raw.types.InputMediaUploadedPhoto(
file=await self.save_file(media.media)
)
)
)
media = raw.types.InputMediaPhoto(
id=raw.types.InputPhoto(
id=media.photo.id,
access_hash=media.photo.access_hash,
file_reference=media.photo.file_reference
)
)
elif re.match("^https?://", media.media):
media = raw.types.InputMediaPhotoExternal(
url=media.media
)
else:
media = utils.get_input_media_from_file_id(media.media, FileType.PHOTO)
elif isinstance(media, types.InputMediaVideo):
if isinstance(media.media, io.BytesIO) or os.path.isfile(media.media):
media = await self.invoke(
raw.functions.messages.UploadMedia(
peer=await self.resolve_peer(chat_id),
media=raw.types.InputMediaUploadedDocument(
mime_type=self.guess_mime_type(media.media) or "video/mp4",
thumb=await self.save_file(media.thumb),
file=await self.save_file(media.media),
attributes=[
raw.types.DocumentAttributeVideo(
supports_streaming=media.supports_streaming or None,
duration=media.duration,
w=media.width,
h=media.height
),
raw.types.DocumentAttributeFilename(
file_name=file_name or os.path.basename(media.media)
)
]
)
)
)
media = raw.types.InputMediaDocument(
id=raw.types.InputDocument(
id=media.document.id,
access_hash=media.document.access_hash,
file_reference=media.document.file_reference
)
)
elif re.match("^https?://", media.media):
media = raw.types.InputMediaDocumentExternal(
url=media.media
)
else:
media = utils.get_input_media_from_file_id(media.media, FileType.VIDEO)
elif isinstance(media, types.InputMediaAudio):
if isinstance(media.media, io.BytesIO) or os.path.isfile(media.media):
media = await self.invoke(
raw.functions.messages.UploadMedia(
peer=await self.resolve_peer(chat_id),
media=raw.types.InputMediaUploadedDocument(
mime_type=self.guess_mime_type(media.media) or "audio/mpeg",
thumb=await self.save_file(media.thumb),
file=await self.save_file(media.media),
attributes=[
raw.types.DocumentAttributeAudio(
duration=media.duration,
performer=media.performer,
title=media.title
),
raw.types.DocumentAttributeFilename(
file_name=file_name or os.path.basename(media.media)
)
]
)
)
)
media = raw.types.InputMediaDocument(
id=raw.types.InputDocument(
id=media.document.id,
access_hash=media.document.access_hash,
file_reference=media.document.file_reference
)
)
elif re.match("^https?://", media.media):
media = raw.types.InputMediaDocumentExternal(
url=media.media
)
else:
media = utils.get_input_media_from_file_id(media.media, FileType.AUDIO)
elif isinstance(media, types.InputMediaAnimation):
if isinstance(media.media, io.BytesIO) or os.path.isfile(media.media):
media = await self.invoke(
raw.functions.messages.UploadMedia(
peer=await self.resolve_peer(chat_id),
media=raw.types.InputMediaUploadedDocument(
mime_type=self.guess_mime_type(media.media) or "video/mp4",
thumb=await self.save_file(media.thumb),
file=await self.save_file(media.media),
attributes=[
raw.types.DocumentAttributeVideo(
supports_streaming=True,
duration=media.duration,
w=media.width,
h=media.height
),
raw.types.DocumentAttributeFilename(
file_name=file_name or os.path.basename(media.media)
),
raw.types.DocumentAttributeAnimated()
]
)
)
)
media = raw.types.InputMediaDocument(
id=raw.types.InputDocument(
id=media.document.id,
access_hash=media.document.access_hash,
file_reference=media.document.file_reference
)
)
elif re.match("^https?://", media.media):
media = raw.types.InputMediaDocumentExternal(
url=media.media
)
else:
media = utils.get_input_media_from_file_id(media.media, FileType.ANIMATION)
elif isinstance(media, types.InputMediaDocument):
if isinstance(media.media, io.BytesIO) or os.path.isfile(media.media):
media = await self.invoke(
raw.functions.messages.UploadMedia(
peer=await self.resolve_peer(chat_id),
media=raw.types.InputMediaUploadedDocument(
mime_type=self.guess_mime_type(media.media) or "application/zip",
thumb=await self.save_file(media.thumb),
file=await self.save_file(media.media),
attributes=[
raw.types.DocumentAttributeFilename(
file_name=file_name or os.path.basename(media.media)
)
]
)
)
)
media = raw.types.InputMediaDocument(
id=raw.types.InputDocument(
id=media.document.id,
access_hash=media.document.access_hash,
file_reference=media.document.file_reference
)
)
elif re.match("^https?://", media.media):
media = raw.types.InputMediaDocumentExternal(
url=media.media
)
else:
media = utils.get_input_media_from_file_id(media.media, FileType.DOCUMENT)
r = await self.invoke(
raw.functions.messages.EditMessage(
peer=await self.resolve_peer(chat_id),
id=message_id,
media=media,
reply_markup=await reply_markup.write(self) if reply_markup else None,
message=message,
entities=entities
)
)
for i in r.updates:
if isinstance(i, (raw.types.UpdateEditMessage, raw.types.UpdateEditChannelMessage)):
return await types.Message._parse(
self, i.message,
{i.id: i for i in r.users},
{i.id: i for i in r.chats}
)
|
PypiClean
|
/pulumi_aws_native-0.75.1a1693503310.tar.gz/pulumi_aws_native-0.75.1a1693503310/pulumi_aws_native/elasticache/replication_group.py
|
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['ReplicationGroupArgs', 'ReplicationGroup']
@pulumi.input_type
class ReplicationGroupArgs:
def __init__(__self__, *,
replication_group_description: pulumi.Input[str],
at_rest_encryption_enabled: Optional[pulumi.Input[bool]] = None,
auth_token: Optional[pulumi.Input[str]] = None,
auto_minor_version_upgrade: Optional[pulumi.Input[bool]] = None,
automatic_failover_enabled: Optional[pulumi.Input[bool]] = None,
cache_node_type: Optional[pulumi.Input[str]] = None,
cache_parameter_group_name: Optional[pulumi.Input[str]] = None,
cache_security_group_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
cache_subnet_group_name: Optional[pulumi.Input[str]] = None,
cluster_mode: Optional[pulumi.Input[str]] = None,
configuration_end_point_address: Optional[pulumi.Input[str]] = None,
configuration_end_point_port: Optional[pulumi.Input[str]] = None,
data_tiering_enabled: Optional[pulumi.Input[bool]] = None,
engine: Optional[pulumi.Input[str]] = None,
engine_version: Optional[pulumi.Input[str]] = None,
global_replication_group_id: Optional[pulumi.Input[str]] = None,
ip_discovery: Optional[pulumi.Input[str]] = None,
kms_key_id: Optional[pulumi.Input[str]] = None,
log_delivery_configurations: Optional[pulumi.Input[Sequence[pulumi.Input['ReplicationGroupLogDeliveryConfigurationRequestArgs']]]] = None,
multi_az_enabled: Optional[pulumi.Input[bool]] = None,
network_type: Optional[pulumi.Input[str]] = None,
node_group_configuration: Optional[pulumi.Input[Sequence[pulumi.Input['ReplicationGroupNodeGroupConfigurationArgs']]]] = None,
notification_topic_arn: Optional[pulumi.Input[str]] = None,
num_cache_clusters: Optional[pulumi.Input[int]] = None,
num_node_groups: Optional[pulumi.Input[int]] = None,
port: Optional[pulumi.Input[int]] = None,
preferred_cache_cluster_azs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
preferred_maintenance_window: Optional[pulumi.Input[str]] = None,
primary_cluster_id: Optional[pulumi.Input[str]] = None,
primary_end_point_address: Optional[pulumi.Input[str]] = None,
primary_end_point_port: Optional[pulumi.Input[str]] = None,
read_end_point_addresses: Optional[pulumi.Input[str]] = None,
read_end_point_addresses_list: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
read_end_point_ports: Optional[pulumi.Input[str]] = None,
read_end_point_ports_list: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
reader_end_point_address: Optional[pulumi.Input[str]] = None,
reader_end_point_port: Optional[pulumi.Input[str]] = None,
replicas_per_node_group: Optional[pulumi.Input[int]] = None,
replication_group_id: Optional[pulumi.Input[str]] = None,
security_group_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
snapshot_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
snapshot_name: Optional[pulumi.Input[str]] = None,
snapshot_retention_limit: Optional[pulumi.Input[int]] = None,
snapshot_window: Optional[pulumi.Input[str]] = None,
snapshotting_cluster_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input['ReplicationGroupTagArgs']]]] = None,
transit_encryption_enabled: Optional[pulumi.Input[bool]] = None,
transit_encryption_mode: Optional[pulumi.Input[str]] = None,
user_group_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a ReplicationGroup resource.
"""
pulumi.set(__self__, "replication_group_description", replication_group_description)
if at_rest_encryption_enabled is not None:
pulumi.set(__self__, "at_rest_encryption_enabled", at_rest_encryption_enabled)
if auth_token is not None:
pulumi.set(__self__, "auth_token", auth_token)
if auto_minor_version_upgrade is not None:
pulumi.set(__self__, "auto_minor_version_upgrade", auto_minor_version_upgrade)
if automatic_failover_enabled is not None:
pulumi.set(__self__, "automatic_failover_enabled", automatic_failover_enabled)
if cache_node_type is not None:
pulumi.set(__self__, "cache_node_type", cache_node_type)
if cache_parameter_group_name is not None:
pulumi.set(__self__, "cache_parameter_group_name", cache_parameter_group_name)
if cache_security_group_names is not None:
pulumi.set(__self__, "cache_security_group_names", cache_security_group_names)
if cache_subnet_group_name is not None:
pulumi.set(__self__, "cache_subnet_group_name", cache_subnet_group_name)
if cluster_mode is not None:
pulumi.set(__self__, "cluster_mode", cluster_mode)
if configuration_end_point_address is not None:
pulumi.set(__self__, "configuration_end_point_address", configuration_end_point_address)
if configuration_end_point_port is not None:
pulumi.set(__self__, "configuration_end_point_port", configuration_end_point_port)
if data_tiering_enabled is not None:
pulumi.set(__self__, "data_tiering_enabled", data_tiering_enabled)
if engine is not None:
pulumi.set(__self__, "engine", engine)
if engine_version is not None:
pulumi.set(__self__, "engine_version", engine_version)
if global_replication_group_id is not None:
pulumi.set(__self__, "global_replication_group_id", global_replication_group_id)
if ip_discovery is not None:
pulumi.set(__self__, "ip_discovery", ip_discovery)
if kms_key_id is not None:
pulumi.set(__self__, "kms_key_id", kms_key_id)
if log_delivery_configurations is not None:
pulumi.set(__self__, "log_delivery_configurations", log_delivery_configurations)
if multi_az_enabled is not None:
pulumi.set(__self__, "multi_az_enabled", multi_az_enabled)
if network_type is not None:
pulumi.set(__self__, "network_type", network_type)
if node_group_configuration is not None:
pulumi.set(__self__, "node_group_configuration", node_group_configuration)
if notification_topic_arn is not None:
pulumi.set(__self__, "notification_topic_arn", notification_topic_arn)
if num_cache_clusters is not None:
pulumi.set(__self__, "num_cache_clusters", num_cache_clusters)
if num_node_groups is not None:
pulumi.set(__self__, "num_node_groups", num_node_groups)
if port is not None:
pulumi.set(__self__, "port", port)
if preferred_cache_cluster_azs is not None:
pulumi.set(__self__, "preferred_cache_cluster_azs", preferred_cache_cluster_azs)
if preferred_maintenance_window is not None:
pulumi.set(__self__, "preferred_maintenance_window", preferred_maintenance_window)
if primary_cluster_id is not None:
pulumi.set(__self__, "primary_cluster_id", primary_cluster_id)
if primary_end_point_address is not None:
pulumi.set(__self__, "primary_end_point_address", primary_end_point_address)
if primary_end_point_port is not None:
pulumi.set(__self__, "primary_end_point_port", primary_end_point_port)
if read_end_point_addresses is not None:
pulumi.set(__self__, "read_end_point_addresses", read_end_point_addresses)
if read_end_point_addresses_list is not None:
pulumi.set(__self__, "read_end_point_addresses_list", read_end_point_addresses_list)
if read_end_point_ports is not None:
pulumi.set(__self__, "read_end_point_ports", read_end_point_ports)
if read_end_point_ports_list is not None:
pulumi.set(__self__, "read_end_point_ports_list", read_end_point_ports_list)
if reader_end_point_address is not None:
pulumi.set(__self__, "reader_end_point_address", reader_end_point_address)
if reader_end_point_port is not None:
pulumi.set(__self__, "reader_end_point_port", reader_end_point_port)
if replicas_per_node_group is not None:
pulumi.set(__self__, "replicas_per_node_group", replicas_per_node_group)
if replication_group_id is not None:
pulumi.set(__self__, "replication_group_id", replication_group_id)
if security_group_ids is not None:
pulumi.set(__self__, "security_group_ids", security_group_ids)
if snapshot_arns is not None:
pulumi.set(__self__, "snapshot_arns", snapshot_arns)
if snapshot_name is not None:
pulumi.set(__self__, "snapshot_name", snapshot_name)
if snapshot_retention_limit is not None:
pulumi.set(__self__, "snapshot_retention_limit", snapshot_retention_limit)
if snapshot_window is not None:
pulumi.set(__self__, "snapshot_window", snapshot_window)
if snapshotting_cluster_id is not None:
pulumi.set(__self__, "snapshotting_cluster_id", snapshotting_cluster_id)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if transit_encryption_enabled is not None:
pulumi.set(__self__, "transit_encryption_enabled", transit_encryption_enabled)
if transit_encryption_mode is not None:
pulumi.set(__self__, "transit_encryption_mode", transit_encryption_mode)
if user_group_ids is not None:
pulumi.set(__self__, "user_group_ids", user_group_ids)
@property
@pulumi.getter(name="replicationGroupDescription")
def replication_group_description(self) -> pulumi.Input[str]:
return pulumi.get(self, "replication_group_description")
@replication_group_description.setter
def replication_group_description(self, value: pulumi.Input[str]):
pulumi.set(self, "replication_group_description", value)
@property
@pulumi.getter(name="atRestEncryptionEnabled")
def at_rest_encryption_enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "at_rest_encryption_enabled")
@at_rest_encryption_enabled.setter
def at_rest_encryption_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "at_rest_encryption_enabled", value)
@property
@pulumi.getter(name="authToken")
def auth_token(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "auth_token")
@auth_token.setter
def auth_token(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "auth_token", value)
@property
@pulumi.getter(name="autoMinorVersionUpgrade")
def auto_minor_version_upgrade(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "auto_minor_version_upgrade")
@auto_minor_version_upgrade.setter
def auto_minor_version_upgrade(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "auto_minor_version_upgrade", value)
@property
@pulumi.getter(name="automaticFailoverEnabled")
def automatic_failover_enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "automatic_failover_enabled")
@automatic_failover_enabled.setter
def automatic_failover_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "automatic_failover_enabled", value)
@property
@pulumi.getter(name="cacheNodeType")
def cache_node_type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "cache_node_type")
@cache_node_type.setter
def cache_node_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cache_node_type", value)
@property
@pulumi.getter(name="cacheParameterGroupName")
def cache_parameter_group_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "cache_parameter_group_name")
@cache_parameter_group_name.setter
def cache_parameter_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cache_parameter_group_name", value)
@property
@pulumi.getter(name="cacheSecurityGroupNames")
def cache_security_group_names(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "cache_security_group_names")
@cache_security_group_names.setter
def cache_security_group_names(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "cache_security_group_names", value)
@property
@pulumi.getter(name="cacheSubnetGroupName")
def cache_subnet_group_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "cache_subnet_group_name")
@cache_subnet_group_name.setter
def cache_subnet_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cache_subnet_group_name", value)
@property
@pulumi.getter(name="clusterMode")
def cluster_mode(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "cluster_mode")
@cluster_mode.setter
def cluster_mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cluster_mode", value)
@property
@pulumi.getter(name="configurationEndPointAddress")
def configuration_end_point_address(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "configuration_end_point_address")
@configuration_end_point_address.setter
def configuration_end_point_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "configuration_end_point_address", value)
@property
@pulumi.getter(name="configurationEndPointPort")
def configuration_end_point_port(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "configuration_end_point_port")
@configuration_end_point_port.setter
def configuration_end_point_port(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "configuration_end_point_port", value)
@property
@pulumi.getter(name="dataTieringEnabled")
def data_tiering_enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "data_tiering_enabled")
@data_tiering_enabled.setter
def data_tiering_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "data_tiering_enabled", value)
@property
@pulumi.getter
def engine(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "engine")
@engine.setter
def engine(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "engine", value)
@property
@pulumi.getter(name="engineVersion")
def engine_version(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "engine_version")
@engine_version.setter
def engine_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "engine_version", value)
@property
@pulumi.getter(name="globalReplicationGroupId")
def global_replication_group_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "global_replication_group_id")
@global_replication_group_id.setter
def global_replication_group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "global_replication_group_id", value)
@property
@pulumi.getter(name="ipDiscovery")
def ip_discovery(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "ip_discovery")
@ip_discovery.setter
def ip_discovery(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ip_discovery", value)
@property
@pulumi.getter(name="kmsKeyId")
def kms_key_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "kms_key_id")
@kms_key_id.setter
def kms_key_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kms_key_id", value)
@property
@pulumi.getter(name="logDeliveryConfigurations")
def log_delivery_configurations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ReplicationGroupLogDeliveryConfigurationRequestArgs']]]]:
return pulumi.get(self, "log_delivery_configurations")
@log_delivery_configurations.setter
def log_delivery_configurations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ReplicationGroupLogDeliveryConfigurationRequestArgs']]]]):
pulumi.set(self, "log_delivery_configurations", value)
@property
@pulumi.getter(name="multiAzEnabled")
def multi_az_enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "multi_az_enabled")
@multi_az_enabled.setter
def multi_az_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "multi_az_enabled", value)
@property
@pulumi.getter(name="networkType")
def network_type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "network_type")
@network_type.setter
def network_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "network_type", value)
@property
@pulumi.getter(name="nodeGroupConfiguration")
def node_group_configuration(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ReplicationGroupNodeGroupConfigurationArgs']]]]:
return pulumi.get(self, "node_group_configuration")
@node_group_configuration.setter
def node_group_configuration(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ReplicationGroupNodeGroupConfigurationArgs']]]]):
pulumi.set(self, "node_group_configuration", value)
@property
@pulumi.getter(name="notificationTopicArn")
def notification_topic_arn(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "notification_topic_arn")
@notification_topic_arn.setter
def notification_topic_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "notification_topic_arn", value)
@property
@pulumi.getter(name="numCacheClusters")
def num_cache_clusters(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "num_cache_clusters")
@num_cache_clusters.setter
def num_cache_clusters(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "num_cache_clusters", value)
@property
@pulumi.getter(name="numNodeGroups")
def num_node_groups(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "num_node_groups")
@num_node_groups.setter
def num_node_groups(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "num_node_groups", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter(name="preferredCacheClusterAzs")
def preferred_cache_cluster_azs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "preferred_cache_cluster_azs")
@preferred_cache_cluster_azs.setter
def preferred_cache_cluster_azs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "preferred_cache_cluster_azs", value)
@property
@pulumi.getter(name="preferredMaintenanceWindow")
def preferred_maintenance_window(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "preferred_maintenance_window")
@preferred_maintenance_window.setter
def preferred_maintenance_window(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "preferred_maintenance_window", value)
@property
@pulumi.getter(name="primaryClusterId")
def primary_cluster_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "primary_cluster_id")
@primary_cluster_id.setter
def primary_cluster_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "primary_cluster_id", value)
@property
@pulumi.getter(name="primaryEndPointAddress")
def primary_end_point_address(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "primary_end_point_address")
@primary_end_point_address.setter
def primary_end_point_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "primary_end_point_address", value)
@property
@pulumi.getter(name="primaryEndPointPort")
def primary_end_point_port(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "primary_end_point_port")
@primary_end_point_port.setter
def primary_end_point_port(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "primary_end_point_port", value)
@property
@pulumi.getter(name="readEndPointAddresses")
def read_end_point_addresses(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "read_end_point_addresses")
@read_end_point_addresses.setter
def read_end_point_addresses(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "read_end_point_addresses", value)
@property
@pulumi.getter(name="readEndPointAddressesList")
def read_end_point_addresses_list(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "read_end_point_addresses_list")
@read_end_point_addresses_list.setter
def read_end_point_addresses_list(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "read_end_point_addresses_list", value)
@property
@pulumi.getter(name="readEndPointPorts")
def read_end_point_ports(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "read_end_point_ports")
@read_end_point_ports.setter
def read_end_point_ports(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "read_end_point_ports", value)
@property
@pulumi.getter(name="readEndPointPortsList")
def read_end_point_ports_list(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "read_end_point_ports_list")
@read_end_point_ports_list.setter
def read_end_point_ports_list(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "read_end_point_ports_list", value)
@property
@pulumi.getter(name="readerEndPointAddress")
def reader_end_point_address(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "reader_end_point_address")
@reader_end_point_address.setter
def reader_end_point_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "reader_end_point_address", value)
@property
@pulumi.getter(name="readerEndPointPort")
def reader_end_point_port(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "reader_end_point_port")
@reader_end_point_port.setter
def reader_end_point_port(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "reader_end_point_port", value)
@property
@pulumi.getter(name="replicasPerNodeGroup")
def replicas_per_node_group(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "replicas_per_node_group")
@replicas_per_node_group.setter
def replicas_per_node_group(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "replicas_per_node_group", value)
@property
@pulumi.getter(name="replicationGroupId")
def replication_group_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "replication_group_id")
@replication_group_id.setter
def replication_group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "replication_group_id", value)
@property
@pulumi.getter(name="securityGroupIds")
def security_group_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "security_group_ids")
@security_group_ids.setter
def security_group_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "security_group_ids", value)
@property
@pulumi.getter(name="snapshotArns")
def snapshot_arns(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "snapshot_arns")
@snapshot_arns.setter
def snapshot_arns(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "snapshot_arns", value)
@property
@pulumi.getter(name="snapshotName")
def snapshot_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "snapshot_name")
@snapshot_name.setter
def snapshot_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "snapshot_name", value)
@property
@pulumi.getter(name="snapshotRetentionLimit")
def snapshot_retention_limit(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "snapshot_retention_limit")
@snapshot_retention_limit.setter
def snapshot_retention_limit(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "snapshot_retention_limit", value)
@property
@pulumi.getter(name="snapshotWindow")
def snapshot_window(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "snapshot_window")
@snapshot_window.setter
def snapshot_window(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "snapshot_window", value)
@property
@pulumi.getter(name="snapshottingClusterId")
def snapshotting_cluster_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "snapshotting_cluster_id")
@snapshotting_cluster_id.setter
def snapshotting_cluster_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "snapshotting_cluster_id", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ReplicationGroupTagArgs']]]]:
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ReplicationGroupTagArgs']]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="transitEncryptionEnabled")
def transit_encryption_enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "transit_encryption_enabled")
@transit_encryption_enabled.setter
def transit_encryption_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "transit_encryption_enabled", value)
@property
@pulumi.getter(name="transitEncryptionMode")
def transit_encryption_mode(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "transit_encryption_mode")
@transit_encryption_mode.setter
def transit_encryption_mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "transit_encryption_mode", value)
@property
@pulumi.getter(name="userGroupIds")
def user_group_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "user_group_ids")
@user_group_ids.setter
def user_group_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "user_group_ids", value)
warnings.warn("""ReplicationGroup is not yet supported by AWS Native, so its creation will currently fail. Please use the classic AWS provider, if possible.""", DeprecationWarning)
class ReplicationGroup(pulumi.CustomResource):
warnings.warn("""ReplicationGroup is not yet supported by AWS Native, so its creation will currently fail. Please use the classic AWS provider, if possible.""", DeprecationWarning)
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
at_rest_encryption_enabled: Optional[pulumi.Input[bool]] = None,
auth_token: Optional[pulumi.Input[str]] = None,
auto_minor_version_upgrade: Optional[pulumi.Input[bool]] = None,
automatic_failover_enabled: Optional[pulumi.Input[bool]] = None,
cache_node_type: Optional[pulumi.Input[str]] = None,
cache_parameter_group_name: Optional[pulumi.Input[str]] = None,
cache_security_group_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
cache_subnet_group_name: Optional[pulumi.Input[str]] = None,
cluster_mode: Optional[pulumi.Input[str]] = None,
configuration_end_point_address: Optional[pulumi.Input[str]] = None,
configuration_end_point_port: Optional[pulumi.Input[str]] = None,
data_tiering_enabled: Optional[pulumi.Input[bool]] = None,
engine: Optional[pulumi.Input[str]] = None,
engine_version: Optional[pulumi.Input[str]] = None,
global_replication_group_id: Optional[pulumi.Input[str]] = None,
ip_discovery: Optional[pulumi.Input[str]] = None,
kms_key_id: Optional[pulumi.Input[str]] = None,
log_delivery_configurations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ReplicationGroupLogDeliveryConfigurationRequestArgs']]]]] = None,
multi_az_enabled: Optional[pulumi.Input[bool]] = None,
network_type: Optional[pulumi.Input[str]] = None,
node_group_configuration: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ReplicationGroupNodeGroupConfigurationArgs']]]]] = None,
notification_topic_arn: Optional[pulumi.Input[str]] = None,
num_cache_clusters: Optional[pulumi.Input[int]] = None,
num_node_groups: Optional[pulumi.Input[int]] = None,
port: Optional[pulumi.Input[int]] = None,
preferred_cache_cluster_azs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
preferred_maintenance_window: Optional[pulumi.Input[str]] = None,
primary_cluster_id: Optional[pulumi.Input[str]] = None,
primary_end_point_address: Optional[pulumi.Input[str]] = None,
primary_end_point_port: Optional[pulumi.Input[str]] = None,
read_end_point_addresses: Optional[pulumi.Input[str]] = None,
read_end_point_addresses_list: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
read_end_point_ports: Optional[pulumi.Input[str]] = None,
read_end_point_ports_list: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
reader_end_point_address: Optional[pulumi.Input[str]] = None,
reader_end_point_port: Optional[pulumi.Input[str]] = None,
replicas_per_node_group: Optional[pulumi.Input[int]] = None,
replication_group_description: Optional[pulumi.Input[str]] = None,
replication_group_id: Optional[pulumi.Input[str]] = None,
security_group_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
snapshot_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
snapshot_name: Optional[pulumi.Input[str]] = None,
snapshot_retention_limit: Optional[pulumi.Input[int]] = None,
snapshot_window: Optional[pulumi.Input[str]] = None,
snapshotting_cluster_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ReplicationGroupTagArgs']]]]] = None,
transit_encryption_enabled: Optional[pulumi.Input[bool]] = None,
transit_encryption_mode: Optional[pulumi.Input[str]] = None,
user_group_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
"""
Resource Type definition for AWS::ElastiCache::ReplicationGroup
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ReplicationGroupArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Resource Type definition for AWS::ElastiCache::ReplicationGroup
:param str resource_name: The name of the resource.
:param ReplicationGroupArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ReplicationGroupArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
at_rest_encryption_enabled: Optional[pulumi.Input[bool]] = None,
auth_token: Optional[pulumi.Input[str]] = None,
auto_minor_version_upgrade: Optional[pulumi.Input[bool]] = None,
automatic_failover_enabled: Optional[pulumi.Input[bool]] = None,
cache_node_type: Optional[pulumi.Input[str]] = None,
cache_parameter_group_name: Optional[pulumi.Input[str]] = None,
cache_security_group_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
cache_subnet_group_name: Optional[pulumi.Input[str]] = None,
cluster_mode: Optional[pulumi.Input[str]] = None,
configuration_end_point_address: Optional[pulumi.Input[str]] = None,
configuration_end_point_port: Optional[pulumi.Input[str]] = None,
data_tiering_enabled: Optional[pulumi.Input[bool]] = None,
engine: Optional[pulumi.Input[str]] = None,
engine_version: Optional[pulumi.Input[str]] = None,
global_replication_group_id: Optional[pulumi.Input[str]] = None,
ip_discovery: Optional[pulumi.Input[str]] = None,
kms_key_id: Optional[pulumi.Input[str]] = None,
log_delivery_configurations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ReplicationGroupLogDeliveryConfigurationRequestArgs']]]]] = None,
multi_az_enabled: Optional[pulumi.Input[bool]] = None,
network_type: Optional[pulumi.Input[str]] = None,
node_group_configuration: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ReplicationGroupNodeGroupConfigurationArgs']]]]] = None,
notification_topic_arn: Optional[pulumi.Input[str]] = None,
num_cache_clusters: Optional[pulumi.Input[int]] = None,
num_node_groups: Optional[pulumi.Input[int]] = None,
port: Optional[pulumi.Input[int]] = None,
preferred_cache_cluster_azs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
preferred_maintenance_window: Optional[pulumi.Input[str]] = None,
primary_cluster_id: Optional[pulumi.Input[str]] = None,
primary_end_point_address: Optional[pulumi.Input[str]] = None,
primary_end_point_port: Optional[pulumi.Input[str]] = None,
read_end_point_addresses: Optional[pulumi.Input[str]] = None,
read_end_point_addresses_list: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
read_end_point_ports: Optional[pulumi.Input[str]] = None,
read_end_point_ports_list: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
reader_end_point_address: Optional[pulumi.Input[str]] = None,
reader_end_point_port: Optional[pulumi.Input[str]] = None,
replicas_per_node_group: Optional[pulumi.Input[int]] = None,
replication_group_description: Optional[pulumi.Input[str]] = None,
replication_group_id: Optional[pulumi.Input[str]] = None,
security_group_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
snapshot_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
snapshot_name: Optional[pulumi.Input[str]] = None,
snapshot_retention_limit: Optional[pulumi.Input[int]] = None,
snapshot_window: Optional[pulumi.Input[str]] = None,
snapshotting_cluster_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ReplicationGroupTagArgs']]]]] = None,
transit_encryption_enabled: Optional[pulumi.Input[bool]] = None,
transit_encryption_mode: Optional[pulumi.Input[str]] = None,
user_group_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
pulumi.log.warn("""ReplicationGroup is deprecated: ReplicationGroup is not yet supported by AWS Native, so its creation will currently fail. Please use the classic AWS provider, if possible.""")
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ReplicationGroupArgs.__new__(ReplicationGroupArgs)
__props__.__dict__["at_rest_encryption_enabled"] = at_rest_encryption_enabled
__props__.__dict__["auth_token"] = auth_token
__props__.__dict__["auto_minor_version_upgrade"] = auto_minor_version_upgrade
__props__.__dict__["automatic_failover_enabled"] = automatic_failover_enabled
__props__.__dict__["cache_node_type"] = cache_node_type
__props__.__dict__["cache_parameter_group_name"] = cache_parameter_group_name
__props__.__dict__["cache_security_group_names"] = cache_security_group_names
__props__.__dict__["cache_subnet_group_name"] = cache_subnet_group_name
__props__.__dict__["cluster_mode"] = cluster_mode
__props__.__dict__["configuration_end_point_address"] = configuration_end_point_address
__props__.__dict__["configuration_end_point_port"] = configuration_end_point_port
__props__.__dict__["data_tiering_enabled"] = data_tiering_enabled
__props__.__dict__["engine"] = engine
__props__.__dict__["engine_version"] = engine_version
__props__.__dict__["global_replication_group_id"] = global_replication_group_id
__props__.__dict__["ip_discovery"] = ip_discovery
__props__.__dict__["kms_key_id"] = kms_key_id
__props__.__dict__["log_delivery_configurations"] = log_delivery_configurations
__props__.__dict__["multi_az_enabled"] = multi_az_enabled
__props__.__dict__["network_type"] = network_type
__props__.__dict__["node_group_configuration"] = node_group_configuration
__props__.__dict__["notification_topic_arn"] = notification_topic_arn
__props__.__dict__["num_cache_clusters"] = num_cache_clusters
__props__.__dict__["num_node_groups"] = num_node_groups
__props__.__dict__["port"] = port
__props__.__dict__["preferred_cache_cluster_azs"] = preferred_cache_cluster_azs
__props__.__dict__["preferred_maintenance_window"] = preferred_maintenance_window
__props__.__dict__["primary_cluster_id"] = primary_cluster_id
__props__.__dict__["primary_end_point_address"] = primary_end_point_address
__props__.__dict__["primary_end_point_port"] = primary_end_point_port
__props__.__dict__["read_end_point_addresses"] = read_end_point_addresses
__props__.__dict__["read_end_point_addresses_list"] = read_end_point_addresses_list
__props__.__dict__["read_end_point_ports"] = read_end_point_ports
__props__.__dict__["read_end_point_ports_list"] = read_end_point_ports_list
__props__.__dict__["reader_end_point_address"] = reader_end_point_address
__props__.__dict__["reader_end_point_port"] = reader_end_point_port
__props__.__dict__["replicas_per_node_group"] = replicas_per_node_group
if replication_group_description is None and not opts.urn:
raise TypeError("Missing required property 'replication_group_description'")
__props__.__dict__["replication_group_description"] = replication_group_description
__props__.__dict__["replication_group_id"] = replication_group_id
__props__.__dict__["security_group_ids"] = security_group_ids
__props__.__dict__["snapshot_arns"] = snapshot_arns
__props__.__dict__["snapshot_name"] = snapshot_name
__props__.__dict__["snapshot_retention_limit"] = snapshot_retention_limit
__props__.__dict__["snapshot_window"] = snapshot_window
__props__.__dict__["snapshotting_cluster_id"] = snapshotting_cluster_id
__props__.__dict__["tags"] = tags
__props__.__dict__["transit_encryption_enabled"] = transit_encryption_enabled
__props__.__dict__["transit_encryption_mode"] = transit_encryption_mode
__props__.__dict__["user_group_ids"] = user_group_ids
super(ReplicationGroup, __self__).__init__(
'aws-native:elasticache:ReplicationGroup',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ReplicationGroup':
"""
Get an existing ReplicationGroup resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ReplicationGroupArgs.__new__(ReplicationGroupArgs)
__props__.__dict__["at_rest_encryption_enabled"] = None
__props__.__dict__["auth_token"] = None
__props__.__dict__["auto_minor_version_upgrade"] = None
__props__.__dict__["automatic_failover_enabled"] = None
__props__.__dict__["cache_node_type"] = None
__props__.__dict__["cache_parameter_group_name"] = None
__props__.__dict__["cache_security_group_names"] = None
__props__.__dict__["cache_subnet_group_name"] = None
__props__.__dict__["cluster_mode"] = None
__props__.__dict__["configuration_end_point_address"] = None
__props__.__dict__["configuration_end_point_port"] = None
__props__.__dict__["data_tiering_enabled"] = None
__props__.__dict__["engine"] = None
__props__.__dict__["engine_version"] = None
__props__.__dict__["global_replication_group_id"] = None
__props__.__dict__["ip_discovery"] = None
__props__.__dict__["kms_key_id"] = None
__props__.__dict__["log_delivery_configurations"] = None
__props__.__dict__["multi_az_enabled"] = None
__props__.__dict__["network_type"] = None
__props__.__dict__["node_group_configuration"] = None
__props__.__dict__["notification_topic_arn"] = None
__props__.__dict__["num_cache_clusters"] = None
__props__.__dict__["num_node_groups"] = None
__props__.__dict__["port"] = None
__props__.__dict__["preferred_cache_cluster_azs"] = None
__props__.__dict__["preferred_maintenance_window"] = None
__props__.__dict__["primary_cluster_id"] = None
__props__.__dict__["primary_end_point_address"] = None
__props__.__dict__["primary_end_point_port"] = None
__props__.__dict__["read_end_point_addresses"] = None
__props__.__dict__["read_end_point_addresses_list"] = None
__props__.__dict__["read_end_point_ports"] = None
__props__.__dict__["read_end_point_ports_list"] = None
__props__.__dict__["reader_end_point_address"] = None
__props__.__dict__["reader_end_point_port"] = None
__props__.__dict__["replicas_per_node_group"] = None
__props__.__dict__["replication_group_description"] = None
__props__.__dict__["replication_group_id"] = None
__props__.__dict__["security_group_ids"] = None
__props__.__dict__["snapshot_arns"] = None
__props__.__dict__["snapshot_name"] = None
__props__.__dict__["snapshot_retention_limit"] = None
__props__.__dict__["snapshot_window"] = None
__props__.__dict__["snapshotting_cluster_id"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["transit_encryption_enabled"] = None
__props__.__dict__["transit_encryption_mode"] = None
__props__.__dict__["user_group_ids"] = None
return ReplicationGroup(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="atRestEncryptionEnabled")
def at_rest_encryption_enabled(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "at_rest_encryption_enabled")
@property
@pulumi.getter(name="authToken")
def auth_token(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "auth_token")
@property
@pulumi.getter(name="autoMinorVersionUpgrade")
def auto_minor_version_upgrade(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "auto_minor_version_upgrade")
@property
@pulumi.getter(name="automaticFailoverEnabled")
def automatic_failover_enabled(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "automatic_failover_enabled")
@property
@pulumi.getter(name="cacheNodeType")
def cache_node_type(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "cache_node_type")
@property
@pulumi.getter(name="cacheParameterGroupName")
def cache_parameter_group_name(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "cache_parameter_group_name")
@property
@pulumi.getter(name="cacheSecurityGroupNames")
def cache_security_group_names(self) -> pulumi.Output[Optional[Sequence[str]]]:
return pulumi.get(self, "cache_security_group_names")
@property
@pulumi.getter(name="cacheSubnetGroupName")
def cache_subnet_group_name(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "cache_subnet_group_name")
@property
@pulumi.getter(name="clusterMode")
def cluster_mode(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "cluster_mode")
@property
@pulumi.getter(name="configurationEndPointAddress")
def configuration_end_point_address(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "configuration_end_point_address")
@property
@pulumi.getter(name="configurationEndPointPort")
def configuration_end_point_port(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "configuration_end_point_port")
@property
@pulumi.getter(name="dataTieringEnabled")
def data_tiering_enabled(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "data_tiering_enabled")
@property
@pulumi.getter
def engine(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "engine")
@property
@pulumi.getter(name="engineVersion")
def engine_version(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "engine_version")
@property
@pulumi.getter(name="globalReplicationGroupId")
def global_replication_group_id(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "global_replication_group_id")
@property
@pulumi.getter(name="ipDiscovery")
def ip_discovery(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "ip_discovery")
@property
@pulumi.getter(name="kmsKeyId")
def kms_key_id(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "kms_key_id")
@property
@pulumi.getter(name="logDeliveryConfigurations")
def log_delivery_configurations(self) -> pulumi.Output[Optional[Sequence['outputs.ReplicationGroupLogDeliveryConfigurationRequest']]]:
return pulumi.get(self, "log_delivery_configurations")
@property
@pulumi.getter(name="multiAzEnabled")
def multi_az_enabled(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "multi_az_enabled")
@property
@pulumi.getter(name="networkType")
def network_type(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "network_type")
@property
@pulumi.getter(name="nodeGroupConfiguration")
def node_group_configuration(self) -> pulumi.Output[Optional[Sequence['outputs.ReplicationGroupNodeGroupConfiguration']]]:
return pulumi.get(self, "node_group_configuration")
@property
@pulumi.getter(name="notificationTopicArn")
def notification_topic_arn(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "notification_topic_arn")
@property
@pulumi.getter(name="numCacheClusters")
def num_cache_clusters(self) -> pulumi.Output[Optional[int]]:
return pulumi.get(self, "num_cache_clusters")
@property
@pulumi.getter(name="numNodeGroups")
def num_node_groups(self) -> pulumi.Output[Optional[int]]:
return pulumi.get(self, "num_node_groups")
@property
@pulumi.getter
def port(self) -> pulumi.Output[Optional[int]]:
return pulumi.get(self, "port")
@property
@pulumi.getter(name="preferredCacheClusterAzs")
def preferred_cache_cluster_azs(self) -> pulumi.Output[Optional[Sequence[str]]]:
return pulumi.get(self, "preferred_cache_cluster_azs")
@property
@pulumi.getter(name="preferredMaintenanceWindow")
def preferred_maintenance_window(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "preferred_maintenance_window")
@property
@pulumi.getter(name="primaryClusterId")
def primary_cluster_id(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "primary_cluster_id")
@property
@pulumi.getter(name="primaryEndPointAddress")
def primary_end_point_address(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "primary_end_point_address")
@property
@pulumi.getter(name="primaryEndPointPort")
def primary_end_point_port(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "primary_end_point_port")
@property
@pulumi.getter(name="readEndPointAddresses")
def read_end_point_addresses(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "read_end_point_addresses")
@property
@pulumi.getter(name="readEndPointAddressesList")
def read_end_point_addresses_list(self) -> pulumi.Output[Optional[Sequence[str]]]:
return pulumi.get(self, "read_end_point_addresses_list")
@property
@pulumi.getter(name="readEndPointPorts")
def read_end_point_ports(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "read_end_point_ports")
@property
@pulumi.getter(name="readEndPointPortsList")
def read_end_point_ports_list(self) -> pulumi.Output[Optional[Sequence[str]]]:
return pulumi.get(self, "read_end_point_ports_list")
@property
@pulumi.getter(name="readerEndPointAddress")
def reader_end_point_address(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "reader_end_point_address")
@property
@pulumi.getter(name="readerEndPointPort")
def reader_end_point_port(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "reader_end_point_port")
@property
@pulumi.getter(name="replicasPerNodeGroup")
def replicas_per_node_group(self) -> pulumi.Output[Optional[int]]:
return pulumi.get(self, "replicas_per_node_group")
@property
@pulumi.getter(name="replicationGroupDescription")
def replication_group_description(self) -> pulumi.Output[str]:
return pulumi.get(self, "replication_group_description")
@property
@pulumi.getter(name="replicationGroupId")
def replication_group_id(self) -> pulumi.Output[str]:
return pulumi.get(self, "replication_group_id")
@property
@pulumi.getter(name="securityGroupIds")
def security_group_ids(self) -> pulumi.Output[Optional[Sequence[str]]]:
return pulumi.get(self, "security_group_ids")
@property
@pulumi.getter(name="snapshotArns")
def snapshot_arns(self) -> pulumi.Output[Optional[Sequence[str]]]:
return pulumi.get(self, "snapshot_arns")
@property
@pulumi.getter(name="snapshotName")
def snapshot_name(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "snapshot_name")
@property
@pulumi.getter(name="snapshotRetentionLimit")
def snapshot_retention_limit(self) -> pulumi.Output[Optional[int]]:
return pulumi.get(self, "snapshot_retention_limit")
@property
@pulumi.getter(name="snapshotWindow")
def snapshot_window(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "snapshot_window")
@property
@pulumi.getter(name="snapshottingClusterId")
def snapshotting_cluster_id(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "snapshotting_cluster_id")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Sequence['outputs.ReplicationGroupTag']]]:
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="transitEncryptionEnabled")
def transit_encryption_enabled(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "transit_encryption_enabled")
@property
@pulumi.getter(name="transitEncryptionMode")
def transit_encryption_mode(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "transit_encryption_mode")
@property
@pulumi.getter(name="userGroupIds")
def user_group_ids(self) -> pulumi.Output[Optional[Sequence[str]]]:
return pulumi.get(self, "user_group_ids")
|
PypiClean
|
/enpraxis.educommons-4.1.2.tar.gz/enpraxis.educommons-4.1.2/enpraxis/educommons/utilities/staticsiteutility.py
|
__author__ = '''Brent Lambert, David Ray, Jon Thomas'''
__version__ = '$ Revision 0.0 $'[11:-2]
import re
from urlparse import urlparse, urlunparse, urlsplit
from zope.interface import implements
from zope.component import getUtility, getMultiAdapter
from enpraxis.educommons.utilities.interfaces import IECUtility
from enpraxis.staticsite.utilities.staticsiteutility import StaticSiteUtility
from enpraxis.staticsite.utilities.interfaces import IStaticSiteUtility
class eduStaticSiteUtility(StaticSiteUtility):
""" Deploy a static site """
implements(IStaticSiteUtility)
def runDocumentFilters(self, portal, current, soup, ssprops):
self.filterBaseTag(soup, current)
self.filterIgnoredSections(soup, ssprops)
self.filterIgnoredPortlets(soup, ssprops)
self.filterIgnoredActions(soup, ssprops)
self.filterCSSLinks(soup, current)
self.filterIEFixesCSS(soup, current)
self.filterS5BaseUrl(soup, current)
self.filterBaseFilesLinks(soup, current, portal, ssprops)
self.filterImageFullscreenBackLink(soup, current)
self.filterCourseDownloadLink(soup, current, portal, ssprops)
self.filterAttributionLinks(soup, current, portal, ssprops)
self.filterCSSValidatorLink(soup, current, portal, ssprops)
self.filterBookmarkletsLinks(soup, current, portal, ssprops)
links = self.getDocumentLinks(soup)
for x in links:
orig = x['href']
x['href'] = self.filterDocumentLink(x['href'],
current,
portal,
ssprops.getProperty('views_to_add'),
ssprops.getProperty('non_html_views'))
#print ' %s => %s' %(orig, x['href'])
data = soup.prettify()
return self.filterPortalUrl(data, current)
def filterCourseDownloadLink(self, soup, current, portal, ssprops):
link = soup.find('dd', id='download_course')
if link:
href = link.a['href']
result = current
hr = urlparse(current)
p = urlparse(portal.portal_url())
if p[1] == hr[1]:
h = hr[2].split('/')
if h[-1] == 'index.html':
h = h[:-1]
for view in ssprops.getProperty('views_to_add'):
if view in h[-1]:
h[-1] = h[-1].replace('-%s.html' % view, '')
result = portal.portal_catalog.searchResults(query={'path':'/'.join(h),}, id=h[-1])[0].getObject()
course = getUtility(IECUtility).FindECParent(result)
zip_url = '%s/%s.zip' % (course.absolute_url(), course.id)
link.a['href'] = zip_url
def filterAttributionLinks(self, soup, current, portal, ssprops):
if current == portal.portal_url():
current += '/index.html'
elif '.htm' not in current:
current += '.html'
ccite = soup.find(id='click_citation')
pcite = soup.find(id='print_citation')
scite = soup.find(id='skinless_citation')
portal_url = portal.portal_url()
deploy_url = ssprops.getProperty('deployment_url')
if ccite:
sstring = ccite['onclick']
pattern = re.compile( r"\b(http:\/\/).*\b\." )
nstring = pattern.sub('%s.' % current, sstring)
nstring = nstring.replace(portal_url, deploy_url)
ccite['onclick'] = nstring
if pcite:
sstring = pcite.contents[0]
pattern = re.compile( r"\b(http:\/\/).*\b\." )
nstring = pattern.sub('%s.' % current, sstring)
nstring = nstring.replace(portal_url, deploy_url)
pcite.contents[0].replaceWith(nstring)
if scite:
sstring = scite.span.contents[0]
pattern = re.compile( r"\b(http:\/\/).*\b\." )
nstring = pattern.sub('%s.' % current, sstring)
nstring = nstring.replace(portal_url, deploy_url)
scite.span.contents[0].replaceWith(nstring)
def filterBookmarkletsLinks(self, soup, current, portal, ssprops):
bookmarks = soup.find('span', id="toggledBookmarks")
if bookmarks:
links = bookmarks.findAll('a')
for link in links:
href = link['href']
parts = href.split('=')
index = 0
for part in parts:
if portal.portal_url() in part:
url_parts = part.split('&')
if len(url_parts) > 0:
if '.htm' not in current:
current += '.html'
url_parts[0] = current
newurl = '&'.join(url_parts)
else:
newurl = current
parts[index] = newurl
index += 1
newurl = '='.join(parts)
newurl = newurl.replace(portal.portal_url(), ssprops.getProperty('deployment_url'))
link['href'] = newurl
|
PypiClean
|
/jupyterlab_remote_contents-0.1.1.tar.gz/jupyterlab_remote_contents-0.1.1/node_modules/@lumino/algorithm/dist/index.js
|
(function (global, factory) {
typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports) :
typeof define === 'function' && define.amd ? define(['exports'], factory) :
(global = typeof globalThis !== 'undefined' ? globalThis : global || self, factory(global.lumino_algorithm = {}));
}(this, (function (exports) { 'use strict';
// Copyright (c) Jupyter Development Team.
// Distributed under the terms of the Modified BSD License.
/*-----------------------------------------------------------------------------
| Copyright (c) 2014-2017, PhosphorJS Contributors
|
| Distributed under the terms of the BSD 3-Clause License.
|
| The full license is in the file LICENSE, distributed with this software.
|----------------------------------------------------------------------------*/
/**
* The namespace for array-specific algorithms.
*/
exports.ArrayExt = void 0;
(function (ArrayExt) {
/**
* Find the index of the first occurrence of a value in an array.
*
* @param array - The array-like object to search.
*
* @param value - The value to locate in the array. Values are
* compared using strict `===` equality.
*
* @param start - The index of the first element in the range to be
* searched, inclusive. The default value is `0`. Negative values
* are taken as an offset from the end of the array.
*
* @param stop - The index of the last element in the range to be
* searched, inclusive. The default value is `-1`. Negative values
* are taken as an offset from the end of the array.
*
* @returns The index of the first occurrence of the value, or `-1`
* if the value is not found.
*
* #### Notes
* If `stop < start` the search will wrap at the end of the array.
*
* #### Complexity
* Linear.
*
* #### Undefined Behavior
* A `start` or `stop` which is non-integral.
*
* #### Example
* ```typescript
* import { ArrayExt } from '@lumino/algorithm';
*
* let data = ['one', 'two', 'three', 'four', 'one'];
* ArrayExt.firstIndexOf(data, 'red'); // -1
* ArrayExt.firstIndexOf(data, 'one'); // 0
* ArrayExt.firstIndexOf(data, 'one', 1); // 4
* ArrayExt.firstIndexOf(data, 'two', 2); // -1
* ArrayExt.firstIndexOf(data, 'two', 2, 1); // 1
* ```
*/
function firstIndexOf(array, value, start, stop) {
if (start === void 0) { start = 0; }
if (stop === void 0) { stop = -1; }
var n = array.length;
if (n === 0) {
return -1;
}
if (start < 0) {
start = Math.max(0, start + n);
}
else {
start = Math.min(start, n - 1);
}
if (stop < 0) {
stop = Math.max(0, stop + n);
}
else {
stop = Math.min(stop, n - 1);
}
var span;
if (stop < start) {
span = stop + 1 + (n - start);
}
else {
span = stop - start + 1;
}
for (var i = 0; i < span; ++i) {
var j = (start + i) % n;
if (array[j] === value) {
return j;
}
}
return -1;
}
ArrayExt.firstIndexOf = firstIndexOf;
/**
* Find the index of the last occurrence of a value in an array.
*
* @param array - The array-like object to search.
*
* @param value - The value to locate in the array. Values are
* compared using strict `===` equality.
*
* @param start - The index of the first element in the range to be
* searched, inclusive. The default value is `-1`. Negative values
* are taken as an offset from the end of the array.
*
* @param stop - The index of the last element in the range to be
* searched, inclusive. The default value is `0`. Negative values
* are taken as an offset from the end of the array.
*
* @returns The index of the last occurrence of the value, or `-1`
* if the value is not found.
*
* #### Notes
* If `start < stop` the search will wrap at the front of the array.
*
* #### Complexity
* Linear.
*
* #### Undefined Behavior
* A `start` or `stop` which is non-integral.
*
* #### Example
* ```typescript
* import { ArrayExt } from '@lumino/algorithm';
*
* let data = ['one', 'two', 'three', 'four', 'one'];
* ArrayExt.lastIndexOf(data, 'red'); // -1
* ArrayExt.lastIndexOf(data, 'one'); // 4
* ArrayExt.lastIndexOf(data, 'one', 1); // 0
* ArrayExt.lastIndexOf(data, 'two', 0); // -1
* ArrayExt.lastIndexOf(data, 'two', 0, 1); // 1
* ```
*/
function lastIndexOf(array, value, start, stop) {
if (start === void 0) { start = -1; }
if (stop === void 0) { stop = 0; }
var n = array.length;
if (n === 0) {
return -1;
}
if (start < 0) {
start = Math.max(0, start + n);
}
else {
start = Math.min(start, n - 1);
}
if (stop < 0) {
stop = Math.max(0, stop + n);
}
else {
stop = Math.min(stop, n - 1);
}
var span;
if (start < stop) {
span = start + 1 + (n - stop);
}
else {
span = start - stop + 1;
}
for (var i = 0; i < span; ++i) {
var j = (start - i + n) % n;
if (array[j] === value) {
return j;
}
}
return -1;
}
ArrayExt.lastIndexOf = lastIndexOf;
/**
* Find the index of the first value which matches a predicate.
*
* @param array - The array-like object to search.
*
* @param fn - The predicate function to apply to the values.
*
* @param start - The index of the first element in the range to be
* searched, inclusive. The default value is `0`. Negative values
* are taken as an offset from the end of the array.
*
* @param stop - The index of the last element in the range to be
* searched, inclusive. The default value is `-1`. Negative values
* are taken as an offset from the end of the array.
*
* @returns The index of the first matching value, or `-1` if no
* matching value is found.
*
* #### Notes
* If `stop < start` the search will wrap at the end of the array.
*
* #### Complexity
* Linear.
*
* #### Undefined Behavior
* A `start` or `stop` which is non-integral.
*
* Modifying the length of the array while searching.
*
* #### Example
* ```typescript
* import { ArrayExt } from '@lumino/algorithm';
*
* function isEven(value: number): boolean {
* return value % 2 === 0;
* }
*
* let data = [1, 2, 3, 4, 3, 2, 1];
* ArrayExt.findFirstIndex(data, isEven); // 1
* ArrayExt.findFirstIndex(data, isEven, 4); // 5
* ArrayExt.findFirstIndex(data, isEven, 6); // -1
* ArrayExt.findFirstIndex(data, isEven, 6, 5); // 1
* ```
*/
function findFirstIndex(array, fn, start, stop) {
if (start === void 0) { start = 0; }
if (stop === void 0) { stop = -1; }
var n = array.length;
if (n === 0) {
return -1;
}
if (start < 0) {
start = Math.max(0, start + n);
}
else {
start = Math.min(start, n - 1);
}
if (stop < 0) {
stop = Math.max(0, stop + n);
}
else {
stop = Math.min(stop, n - 1);
}
var span;
if (stop < start) {
span = stop + 1 + (n - start);
}
else {
span = stop - start + 1;
}
for (var i = 0; i < span; ++i) {
var j = (start + i) % n;
if (fn(array[j], j)) {
return j;
}
}
return -1;
}
ArrayExt.findFirstIndex = findFirstIndex;
/**
* Find the index of the last value which matches a predicate.
*
* @param object - The array-like object to search.
*
* @param fn - The predicate function to apply to the values.
*
* @param start - The index of the first element in the range to be
* searched, inclusive. The default value is `-1`. Negative values
* are taken as an offset from the end of the array.
*
* @param stop - The index of the last element in the range to be
* searched, inclusive. The default value is `0`. Negative values
* are taken as an offset from the end of the array.
*
* @returns The index of the last matching value, or `-1` if no
* matching value is found.
*
* #### Notes
* If `start < stop` the search will wrap at the front of the array.
*
* #### Complexity
* Linear.
*
* #### Undefined Behavior
* A `start` or `stop` which is non-integral.
*
* Modifying the length of the array while searching.
*
* #### Example
* ```typescript
* import { ArrayExt } from '@lumino/algorithm';
*
* function isEven(value: number): boolean {
* return value % 2 === 0;
* }
*
* let data = [1, 2, 3, 4, 3, 2, 1];
* ArrayExt.findLastIndex(data, isEven); // 5
* ArrayExt.findLastIndex(data, isEven, 4); // 3
* ArrayExt.findLastIndex(data, isEven, 0); // -1
* ArrayExt.findLastIndex(data, isEven, 0, 1); // 5
* ```
*/
function findLastIndex(array, fn, start, stop) {
if (start === void 0) { start = -1; }
if (stop === void 0) { stop = 0; }
var n = array.length;
if (n === 0) {
return -1;
}
if (start < 0) {
start = Math.max(0, start + n);
}
else {
start = Math.min(start, n - 1);
}
if (stop < 0) {
stop = Math.max(0, stop + n);
}
else {
stop = Math.min(stop, n - 1);
}
var d;
if (start < stop) {
d = start + 1 + (n - stop);
}
else {
d = start - stop + 1;
}
for (var i = 0; i < d; ++i) {
var j = (start - i + n) % n;
if (fn(array[j], j)) {
return j;
}
}
return -1;
}
ArrayExt.findLastIndex = findLastIndex;
/**
* Find the first value which matches a predicate.
*
* @param array - The array-like object to search.
*
* @param fn - The predicate function to apply to the values.
*
* @param start - The index of the first element in the range to be
* searched, inclusive. The default value is `0`. Negative values
* are taken as an offset from the end of the array.
*
* @param stop - The index of the last element in the range to be
* searched, inclusive. The default value is `-1`. Negative values
* are taken as an offset from the end of the array.
*
* @returns The first matching value, or `undefined` if no matching
* value is found.
*
* #### Notes
* If `stop < start` the search will wrap at the end of the array.
*
* #### Complexity
* Linear.
*
* #### Undefined Behavior
* A `start` or `stop` which is non-integral.
*
* Modifying the length of the array while searching.
*
* #### Example
* ```typescript
* import { ArrayExt } from '@lumino/algorithm';
*
* function isEven(value: number): boolean {
* return value % 2 === 0;
* }
*
* let data = [1, 2, 3, 4, 3, 2, 1];
* ArrayExt.findFirstValue(data, isEven); // 2
* ArrayExt.findFirstValue(data, isEven, 2); // 4
* ArrayExt.findFirstValue(data, isEven, 6); // undefined
* ArrayExt.findFirstValue(data, isEven, 6, 5); // 2
* ```
*/
function findFirstValue(array, fn, start, stop) {
if (start === void 0) { start = 0; }
if (stop === void 0) { stop = -1; }
var index = findFirstIndex(array, fn, start, stop);
return index !== -1 ? array[index] : undefined;
}
ArrayExt.findFirstValue = findFirstValue;
/**
* Find the last value which matches a predicate.
*
* @param object - The array-like object to search.
*
* @param fn - The predicate function to apply to the values.
*
* @param start - The index of the first element in the range to be
* searched, inclusive. The default value is `-1`. Negative values
* are taken as an offset from the end of the array.
*
* @param stop - The index of the last element in the range to be
* searched, inclusive. The default value is `0`. Negative values
* are taken as an offset from the end of the array.
*
* @returns The last matching value, or `undefined` if no matching
* value is found.
*
* #### Notes
* If `start < stop` the search will wrap at the front of the array.
*
* #### Complexity
* Linear.
*
* #### Undefined Behavior
* A `start` or `stop` which is non-integral.
*
* Modifying the length of the array while searching.
*
* #### Example
* ```typescript
* import { ArrayExt } from '@lumino/algorithm';
*
* function isEven(value: number): boolean {
* return value % 2 === 0;
* }
*
* let data = [1, 2, 3, 4, 3, 2, 1];
* ArrayExt.findLastValue(data, isEven); // 2
* ArrayExt.findLastValue(data, isEven, 4); // 4
* ArrayExt.findLastValue(data, isEven, 0); // undefined
* ArrayExt.findLastValue(data, isEven, 0, 1); // 2
* ```
*/
function findLastValue(array, fn, start, stop) {
if (start === void 0) { start = -1; }
if (stop === void 0) { stop = 0; }
var index = findLastIndex(array, fn, start, stop);
return index !== -1 ? array[index] : undefined;
}
ArrayExt.findLastValue = findLastValue;
/**
* Find the index of the first element which compares `>=` to a value.
*
* @param array - The sorted array-like object to search.
*
* @param value - The value to locate in the array.
*
* @param fn - The 3-way comparison function to apply to the values.
* It should return `< 0` if an element is less than a value, `0` if
* an element is equal to a value, or `> 0` if an element is greater
* than a value.
*
* @param start - The index of the first element in the range to be
* searched, inclusive. The default value is `0`. Negative values
* are taken as an offset from the end of the array.
*
* @param stop - The index of the last element in the range to be
* searched, inclusive. The default value is `-1`. Negative values
* are taken as an offset from the end of the array.
*
* @returns The index of the first element which compares `>=` to the
* value, or `length` if there is no such element. If the computed
* index for `stop` is less than `start`, then the computed index
* for `start` is returned.
*
* #### Notes
* The array must already be sorted in ascending order according to
* the comparison function.
*
* #### Complexity
* Logarithmic.
*
* #### Undefined Behavior
* Searching a range which is not sorted in ascending order.
*
* A `start` or `stop` which is non-integral.
*
* Modifying the length of the array while searching.
*
* #### Example
* ```typescript
* import { ArrayExt } from '@lumino/algorithm';
*
* function numberCmp(a: number, b: number): number {
* return a - b;
* }
*
* let data = [0, 3, 4, 7, 7, 9];
* ArrayExt.lowerBound(data, 0, numberCmp); // 0
* ArrayExt.lowerBound(data, 6, numberCmp); // 3
* ArrayExt.lowerBound(data, 7, numberCmp); // 3
* ArrayExt.lowerBound(data, -1, numberCmp); // 0
* ArrayExt.lowerBound(data, 10, numberCmp); // 6
* ```
*/
function lowerBound(array, value, fn, start, stop) {
if (start === void 0) { start = 0; }
if (stop === void 0) { stop = -1; }
var n = array.length;
if (n === 0) {
return 0;
}
if (start < 0) {
start = Math.max(0, start + n);
}
else {
start = Math.min(start, n - 1);
}
if (stop < 0) {
stop = Math.max(0, stop + n);
}
else {
stop = Math.min(stop, n - 1);
}
var begin = start;
var span = stop - start + 1;
while (span > 0) {
var half = span >> 1;
var middle = begin + half;
if (fn(array[middle], value) < 0) {
begin = middle + 1;
span -= half + 1;
}
else {
span = half;
}
}
return begin;
}
ArrayExt.lowerBound = lowerBound;
/**
* Find the index of the first element which compares `>` than a value.
*
* @param array - The sorted array-like object to search.
*
* @param value - The value to locate in the array.
*
* @param fn - The 3-way comparison function to apply to the values.
* It should return `< 0` if an element is less than a value, `0` if
* an element is equal to a value, or `> 0` if an element is greater
* than a value.
*
* @param start - The index of the first element in the range to be
* searched, inclusive. The default value is `0`. Negative values
* are taken as an offset from the end of the array.
*
* @param stop - The index of the last element in the range to be
* searched, inclusive. The default value is `-1`. Negative values
* are taken as an offset from the end of the array.
*
* @returns The index of the first element which compares `>` than the
* value, or `length` if there is no such element. If the computed
* index for `stop` is less than `start`, then the computed index
* for `start` is returned.
*
* #### Notes
* The array must already be sorted in ascending order according to
* the comparison function.
*
* #### Complexity
* Logarithmic.
*
* #### Undefined Behavior
* Searching a range which is not sorted in ascending order.
*
* A `start` or `stop` which is non-integral.
*
* Modifying the length of the array while searching.
*
* #### Example
* ```typescript
* import { ArrayExt } from '@lumino/algorithm';
*
* function numberCmp(a: number, b: number): number {
* return a - b;
* }
*
* let data = [0, 3, 4, 7, 7, 9];
* ArrayExt.upperBound(data, 0, numberCmp); // 1
* ArrayExt.upperBound(data, 6, numberCmp); // 3
* ArrayExt.upperBound(data, 7, numberCmp); // 5
* ArrayExt.upperBound(data, -1, numberCmp); // 0
* ArrayExt.upperBound(data, 10, numberCmp); // 6
* ```
*/
function upperBound(array, value, fn, start, stop) {
if (start === void 0) { start = 0; }
if (stop === void 0) { stop = -1; }
var n = array.length;
if (n === 0) {
return 0;
}
if (start < 0) {
start = Math.max(0, start + n);
}
else {
start = Math.min(start, n - 1);
}
if (stop < 0) {
stop = Math.max(0, stop + n);
}
else {
stop = Math.min(stop, n - 1);
}
var begin = start;
var span = stop - start + 1;
while (span > 0) {
var half = span >> 1;
var middle = begin + half;
if (fn(array[middle], value) > 0) {
span = half;
}
else {
begin = middle + 1;
span -= half + 1;
}
}
return begin;
}
ArrayExt.upperBound = upperBound;
/**
* Test whether two arrays are shallowly equal.
*
* @param a - The first array-like object to compare.
*
* @param b - The second array-like object to compare.
*
* @param fn - The comparison function to apply to the elements. It
* should return `true` if the elements are "equal". The default
* compares elements using strict `===` equality.
*
* @returns Whether the two arrays are shallowly equal.
*
* #### Complexity
* Linear.
*
* #### Undefined Behavior
* Modifying the length of the arrays while comparing.
*
* #### Example
* ```typescript
* import { ArrayExt } from '@lumino/algorithm';
*
* let d1 = [0, 3, 4, 7, 7, 9];
* let d2 = [0, 3, 4, 7, 7, 9];
* let d3 = [42];
* ArrayExt.shallowEqual(d1, d2); // true
* ArrayExt.shallowEqual(d2, d3); // false
* ```
*/
function shallowEqual(a, b, fn) {
// Check for object identity first.
if (a === b) {
return true;
}
// Bail early if the lengths are different.
if (a.length !== b.length) {
return false;
}
// Compare each element for equality.
for (var i = 0, n = a.length; i < n; ++i) {
if (fn ? !fn(a[i], b[i]) : a[i] !== b[i]) {
return false;
}
}
// The array are shallowly equal.
return true;
}
ArrayExt.shallowEqual = shallowEqual;
/**
* Create a slice of an array subject to an optional step.
*
* @param array - The array-like object of interest.
*
* @param options - The options for configuring the slice.
*
* @returns A new array with the specified values.
*
* @throws An exception if the slice `step` is `0`.
*
* #### Complexity
* Linear.
*
* #### Undefined Behavior
* A `start`, `stop`, or `step` which is non-integral.
*
* #### Example
* ```typescript
* import { ArrayExt } from '@lumino/algorithm';
*
* let data = [0, 3, 4, 7, 7, 9];
* ArrayExt.slice(data); // [0, 3, 4, 7, 7, 9]
* ArrayExt.slice(data, { start: 2 }); // [4, 7, 7, 9]
* ArrayExt.slice(data, { start: 0, stop: 4 }); // [0, 3, 4, 7]
* ArrayExt.slice(data, { step: 2 }); // [0, 4, 7]
* ArrayExt.slice(data, { step: -1 }); // [9, 7, 7, 4, 3, 0]
* ```
*/
function slice(array, options) {
if (options === void 0) { options = {}; }
// Extract the options.
var start = options.start, stop = options.stop, step = options.step;
// Set up the `step` value.
if (step === undefined) {
step = 1;
}
// Validate the step size.
if (step === 0) {
throw new Error('Slice `step` cannot be zero.');
}
// Look up the length of the array.
var n = array.length;
// Set up the `start` value.
if (start === undefined) {
start = step < 0 ? n - 1 : 0;
}
else if (start < 0) {
start = Math.max(start + n, step < 0 ? -1 : 0);
}
else if (start >= n) {
start = step < 0 ? n - 1 : n;
}
// Set up the `stop` value.
if (stop === undefined) {
stop = step < 0 ? -1 : n;
}
else if (stop < 0) {
stop = Math.max(stop + n, step < 0 ? -1 : 0);
}
else if (stop >= n) {
stop = step < 0 ? n - 1 : n;
}
// Compute the slice length.
var length;
if ((step < 0 && stop >= start) || (step > 0 && start >= stop)) {
length = 0;
}
else if (step < 0) {
length = Math.floor((stop - start + 1) / step + 1);
}
else {
length = Math.floor((stop - start - 1) / step + 1);
}
// Compute the sliced result.
var result = [];
for (var i = 0; i < length; ++i) {
result[i] = array[start + i * step];
}
// Return the result.
return result;
}
ArrayExt.slice = slice;
/**
* Move an element in an array from one index to another.
*
* @param array - The mutable array-like object of interest.
*
* @param fromIndex - The index of the element to move. Negative
* values are taken as an offset from the end of the array.
*
* @param toIndex - The target index of the element. Negative
* values are taken as an offset from the end of the array.
*
* #### Complexity
* Linear.
*
* #### Undefined Behavior
* A `fromIndex` or `toIndex` which is non-integral.
*
* #### Example
* ```typescript
* import { ArrayExt } from from '@lumino/algorithm';
*
* let data = [0, 1, 2, 3, 4];
* ArrayExt.move(data, 1, 2); // [0, 2, 1, 3, 4]
* ArrayExt.move(data, 4, 2); // [0, 2, 4, 1, 3]
* ```
*/
function move(array, fromIndex, toIndex) {
var n = array.length;
if (n <= 1) {
return;
}
if (fromIndex < 0) {
fromIndex = Math.max(0, fromIndex + n);
}
else {
fromIndex = Math.min(fromIndex, n - 1);
}
if (toIndex < 0) {
toIndex = Math.max(0, toIndex + n);
}
else {
toIndex = Math.min(toIndex, n - 1);
}
if (fromIndex === toIndex) {
return;
}
var value = array[fromIndex];
var d = fromIndex < toIndex ? 1 : -1;
for (var i = fromIndex; i !== toIndex; i += d) {
array[i] = array[i + d];
}
array[toIndex] = value;
}
ArrayExt.move = move;
/**
* Reverse an array in-place.
*
* @param array - The mutable array-like object of interest.
*
* @param start - The index of the first element in the range to be
* reversed, inclusive. The default value is `0`. Negative values
* are taken as an offset from the end of the array.
*
* @param stop - The index of the last element in the range to be
* reversed, inclusive. The default value is `-1`. Negative values
* are taken as an offset from the end of the array.
*
* #### Complexity
* Linear.
*
* #### Undefined Behavior
* A `start` or `stop` index which is non-integral.
*
* #### Example
* ```typescript
* import { ArrayExt } from '@lumino/algorithm';
*
* let data = [0, 1, 2, 3, 4];
* ArrayExt.reverse(data, 1, 3); // [0, 3, 2, 1, 4]
* ArrayExt.reverse(data, 3); // [0, 3, 2, 4, 1]
* ArrayExt.reverse(data); // [1, 4, 2, 3, 0]
* ```
*/
function reverse(array, start, stop) {
if (start === void 0) { start = 0; }
if (stop === void 0) { stop = -1; }
var n = array.length;
if (n <= 1) {
return;
}
if (start < 0) {
start = Math.max(0, start + n);
}
else {
start = Math.min(start, n - 1);
}
if (stop < 0) {
stop = Math.max(0, stop + n);
}
else {
stop = Math.min(stop, n - 1);
}
while (start < stop) {
var a = array[start];
var b = array[stop];
array[start++] = b;
array[stop--] = a;
}
}
ArrayExt.reverse = reverse;
/**
* Rotate the elements of an array in-place.
*
* @param array - The mutable array-like object of interest.
*
* @param delta - The amount of rotation to apply to the elements. A
* positive value will rotate the elements to the left. A negative
* value will rotate the elements to the right.
*
* @param start - The index of the first element in the range to be
* rotated, inclusive. The default value is `0`. Negative values
* are taken as an offset from the end of the array.
*
* @param stop - The index of the last element in the range to be
* rotated, inclusive. The default value is `-1`. Negative values
* are taken as an offset from the end of the array.
*
* #### Complexity
* Linear.
*
* #### Undefined Behavior
* A `delta`, `start`, or `stop` which is non-integral.
*
* #### Example
* ```typescript
* import { ArrayExt } from '@lumino/algorithm';
*
* let data = [0, 1, 2, 3, 4];
* ArrayExt.rotate(data, 2); // [2, 3, 4, 0, 1]
* ArrayExt.rotate(data, -2); // [0, 1, 2, 3, 4]
* ArrayExt.rotate(data, 10); // [0, 1, 2, 3, 4]
* ArrayExt.rotate(data, 9); // [4, 0, 1, 2, 3]
* ArrayExt.rotate(data, 2, 1, 3); // [4, 2, 0, 1, 3]
* ```
*/
function rotate(array, delta, start, stop) {
if (start === void 0) { start = 0; }
if (stop === void 0) { stop = -1; }
var n = array.length;
if (n <= 1) {
return;
}
if (start < 0) {
start = Math.max(0, start + n);
}
else {
start = Math.min(start, n - 1);
}
if (stop < 0) {
stop = Math.max(0, stop + n);
}
else {
stop = Math.min(stop, n - 1);
}
if (start >= stop) {
return;
}
var length = stop - start + 1;
if (delta > 0) {
delta = delta % length;
}
else if (delta < 0) {
delta = ((delta % length) + length) % length;
}
if (delta === 0) {
return;
}
var pivot = start + delta;
reverse(array, start, pivot - 1);
reverse(array, pivot, stop);
reverse(array, start, stop);
}
ArrayExt.rotate = rotate;
/**
* Fill an array with a static value.
*
* @param array - The mutable array-like object to fill.
*
* @param value - The static value to use to fill the array.
*
* @param start - The index of the first element in the range to be
* filled, inclusive. The default value is `0`. Negative values
* are taken as an offset from the end of the array.
*
* @param stop - The index of the last element in the range to be
* filled, inclusive. The default value is `-1`. Negative values
* are taken as an offset from the end of the array.
*
* #### Notes
* If `stop < start` the fill will wrap at the end of the array.
*
* #### Complexity
* Linear.
*
* #### Undefined Behavior
* A `start` or `stop` which is non-integral.
*
* #### Example
* ```typescript
* import { ArrayExt } from '@lumino/algorithm';
*
* let data = ['one', 'two', 'three', 'four'];
* ArrayExt.fill(data, 'r'); // ['r', 'r', 'r', 'r']
* ArrayExt.fill(data, 'g', 1); // ['r', 'g', 'g', 'g']
* ArrayExt.fill(data, 'b', 2, 3); // ['r', 'g', 'b', 'b']
* ArrayExt.fill(data, 'z', 3, 1); // ['z', 'z', 'b', 'z']
* ```
*/
function fill(array, value, start, stop) {
if (start === void 0) { start = 0; }
if (stop === void 0) { stop = -1; }
var n = array.length;
if (n === 0) {
return;
}
if (start < 0) {
start = Math.max(0, start + n);
}
else {
start = Math.min(start, n - 1);
}
if (stop < 0) {
stop = Math.max(0, stop + n);
}
else {
stop = Math.min(stop, n - 1);
}
var span;
if (stop < start) {
span = stop + 1 + (n - start);
}
else {
span = stop - start + 1;
}
for (var i = 0; i < span; ++i) {
array[(start + i) % n] = value;
}
}
ArrayExt.fill = fill;
/**
* Insert a value into an array at a specific index.
*
* @param array - The array of interest.
*
* @param index - The index at which to insert the value. Negative
* values are taken as an offset from the end of the array.
*
* @param value - The value to set at the specified index.
*
* #### Complexity
* Linear.
*
* #### Undefined Behavior
* An `index` which is non-integral.
*
* #### Example
* ```typescript
* import { ArrayExt } from '@lumino/algorithm';
*
* let data = [0, 1, 2];
* ArrayExt.insert(data, 0, -1); // [-1, 0, 1, 2]
* ArrayExt.insert(data, 2, 12); // [-1, 0, 12, 1, 2]
* ArrayExt.insert(data, -1, 7); // [-1, 0, 12, 1, 7, 2]
* ArrayExt.insert(data, 6, 19); // [-1, 0, 12, 1, 7, 2, 19]
* ```
*/
function insert(array, index, value) {
var n = array.length;
if (index < 0) {
index = Math.max(0, index + n);
}
else {
index = Math.min(index, n);
}
for (var i = n; i > index; --i) {
array[i] = array[i - 1];
}
array[index] = value;
}
ArrayExt.insert = insert;
/**
* Remove and return a value at a specific index in an array.
*
* @param array - The array of interest.
*
* @param index - The index of the value to remove. Negative values
* are taken as an offset from the end of the array.
*
* @returns The value at the specified index, or `undefined` if the
* index is out of range.
*
* #### Complexity
* Linear.
*
* #### Undefined Behavior
* An `index` which is non-integral.
*
* #### Example
* ```typescript
* import { ArrayExt } from '@lumino/algorithm';
*
* let data = [0, 12, 23, 39, 14, 12, 75];
* ArrayExt.removeAt(data, 2); // 23
* ArrayExt.removeAt(data, -2); // 12
* ArrayExt.removeAt(data, 10); // undefined;
* ```
*/
function removeAt(array, index) {
var n = array.length;
if (index < 0) {
index += n;
}
if (index < 0 || index >= n) {
return undefined;
}
var value = array[index];
for (var i = index + 1; i < n; ++i) {
array[i - 1] = array[i];
}
array.length = n - 1;
return value;
}
ArrayExt.removeAt = removeAt;
/**
* Remove the first occurrence of a value from an array.
*
* @param array - The array of interest.
*
* @param value - The value to remove from the array. Values are
* compared using strict `===` equality.
*
* @param start - The index of the first element in the range to be
* searched, inclusive. The default value is `0`. Negative values
* are taken as an offset from the end of the array.
*
* @param stop - The index of the last element in the range to be
* searched, inclusive. The default value is `-1`. Negative values
* are taken as an offset from the end of the array.
*
* @returns The index of the removed value, or `-1` if the value
* is not contained in the array.
*
* #### Notes
* If `stop < start` the search will wrap at the end of the array.
*
* #### Complexity
* Linear.
*
* #### Example
* ```typescript
* import { ArrayExt } from '@lumino/algorithm';
*
* let data = [0, 12, 23, 39, 14, 12, 75];
* ArrayExt.removeFirstOf(data, 12); // 1
* ArrayExt.removeFirstOf(data, 17); // -1
* ArrayExt.removeFirstOf(data, 39, 3); // -1
* ArrayExt.removeFirstOf(data, 39, 3, 2); // 2
* ```
*/
function removeFirstOf(array, value, start, stop) {
if (start === void 0) { start = 0; }
if (stop === void 0) { stop = -1; }
var index = firstIndexOf(array, value, start, stop);
if (index !== -1) {
removeAt(array, index);
}
return index;
}
ArrayExt.removeFirstOf = removeFirstOf;
/**
* Remove the last occurrence of a value from an array.
*
* @param array - The array of interest.
*
* @param value - The value to remove from the array. Values are
* compared using strict `===` equality.
*
* @param start - The index of the first element in the range to be
* searched, inclusive. The default value is `-1`. Negative values
* are taken as an offset from the end of the array.
*
* @param stop - The index of the last element in the range to be
* searched, inclusive. The default value is `0`. Negative values
* are taken as an offset from the end of the array.
*
* @returns The index of the removed value, or `-1` if the value
* is not contained in the array.
*
* #### Notes
* If `start < stop` the search will wrap at the end of the array.
*
* #### Complexity
* Linear.
*
* #### Example
* ```typescript
* import { ArrayExt } from '@lumino/algorithm';
*
* let data = [0, 12, 23, 39, 14, 12, 75];
* ArrayExt.removeLastOf(data, 12); // 5
* ArrayExt.removeLastOf(data, 17); // -1
* ArrayExt.removeLastOf(data, 39, 2); // -1
* ArrayExt.removeLastOf(data, 39, 2, 3); // 3
* ```
*/
function removeLastOf(array, value, start, stop) {
if (start === void 0) { start = -1; }
if (stop === void 0) { stop = 0; }
var index = lastIndexOf(array, value, start, stop);
if (index !== -1) {
removeAt(array, index);
}
return index;
}
ArrayExt.removeLastOf = removeLastOf;
/**
* Remove all occurrences of a value from an array.
*
* @param array - The array of interest.
*
* @param value - The value to remove from the array. Values are
* compared using strict `===` equality.
*
* @param start - The index of the first element in the range to be
* searched, inclusive. The default value is `0`. Negative values
* are taken as an offset from the end of the array.
*
* @param stop - The index of the last element in the range to be
* searched, inclusive. The default value is `-1`. Negative values
* are taken as an offset from the end of the array.
*
* @returns The number of elements removed from the array.
*
* #### Notes
* If `stop < start` the search will conceptually wrap at the end of
* the array, however the array will be traversed front-to-back.
*
* #### Complexity
* Linear.
*
* #### Example
* ```typescript
* import { ArrayExt } from '@lumino/algorithm';
*
* let data = [14, 12, 23, 39, 14, 12, 19, 14];
* ArrayExt.removeAllOf(data, 12); // 2
* ArrayExt.removeAllOf(data, 17); // 0
* ArrayExt.removeAllOf(data, 14, 1, 4); // 1
* ```
*/
function removeAllOf(array, value, start, stop) {
if (start === void 0) { start = 0; }
if (stop === void 0) { stop = -1; }
var n = array.length;
if (n === 0) {
return 0;
}
if (start < 0) {
start = Math.max(0, start + n);
}
else {
start = Math.min(start, n - 1);
}
if (stop < 0) {
stop = Math.max(0, stop + n);
}
else {
stop = Math.min(stop, n - 1);
}
var count = 0;
for (var i = 0; i < n; ++i) {
if (start <= stop && i >= start && i <= stop && array[i] === value) {
count++;
}
else if (stop < start &&
(i <= stop || i >= start) &&
array[i] === value) {
count++;
}
else if (count > 0) {
array[i - count] = array[i];
}
}
if (count > 0) {
array.length = n - count;
}
return count;
}
ArrayExt.removeAllOf = removeAllOf;
/**
* Remove the first occurrence of a value which matches a predicate.
*
* @param array - The array of interest.
*
* @param fn - The predicate function to apply to the values.
*
* @param start - The index of the first element in the range to be
* searched, inclusive. The default value is `0`. Negative values
* are taken as an offset from the end of the array.
*
* @param stop - The index of the last element in the range to be
* searched, inclusive. The default value is `-1`. Negative values
* are taken as an offset from the end of the array.
*
* @returns The removed `{ index, value }`, which will be `-1` and
* `undefined` if the value is not contained in the array.
*
* #### Notes
* If `stop < start` the search will wrap at the end of the array.
*
* #### Complexity
* Linear.
*
* #### Example
* ```typescript
* import { ArrayExt } from '@lumino/algorithm';
*
* function isEven(value: number): boolean {
* return value % 2 === 0;
* }
*
* let data = [0, 12, 23, 39, 14, 12, 75];
* ArrayExt.removeFirstWhere(data, isEven); // { index: 0, value: 0 }
* ArrayExt.removeFirstWhere(data, isEven, 2); // { index: 3, value: 14 }
* ArrayExt.removeFirstWhere(data, isEven, 4); // { index: -1, value: undefined }
* ```
*/
function removeFirstWhere(array, fn, start, stop) {
if (start === void 0) { start = 0; }
if (stop === void 0) { stop = -1; }
var value;
var index = findFirstIndex(array, fn, start, stop);
if (index !== -1) {
value = removeAt(array, index);
}
return { index: index, value: value };
}
ArrayExt.removeFirstWhere = removeFirstWhere;
/**
* Remove the last occurrence of a value which matches a predicate.
*
* @param array - The array of interest.
*
* @param fn - The predicate function to apply to the values.
*
* @param start - The index of the first element in the range to be
* searched, inclusive. The default value is `-1`. Negative values
* are taken as an offset from the end of the array.
*
* @param stop - The index of the last element in the range to be
* searched, inclusive. The default value is `0`. Negative values
* are taken as an offset from the end of the array.
*
* @returns The removed `{ index, value }`, which will be `-1` and
* `undefined` if the value is not contained in the array.
*
* #### Notes
* If `start < stop` the search will wrap at the end of the array.
*
* #### Complexity
* Linear.
*
* #### Example
* ```typescript
* import { ArrayExt } from '@lumino/algorithm';
*
* function isEven(value: number): boolean {
* return value % 2 === 0;
* }
*
* let data = [0, 12, 23, 39, 14, 12, 75];
* ArrayExt.removeLastWhere(data, isEven); // { index: 5, value: 12 }
* ArrayExt.removeLastWhere(data, isEven, 2); // { index: 1, value: 12 }
* ArrayExt.removeLastWhere(data, isEven, 2, 1); // { index: -1, value: undefined }
* ```
*/
function removeLastWhere(array, fn, start, stop) {
if (start === void 0) { start = -1; }
if (stop === void 0) { stop = 0; }
var value;
var index = findLastIndex(array, fn, start, stop);
if (index !== -1) {
value = removeAt(array, index);
}
return { index: index, value: value };
}
ArrayExt.removeLastWhere = removeLastWhere;
/**
* Remove all occurrences of values which match a predicate.
*
* @param array - The array of interest.
*
* @param fn - The predicate function to apply to the values.
*
* @param start - The index of the first element in the range to be
* searched, inclusive. The default value is `0`. Negative values
* are taken as an offset from the end of the array.
*
* @param stop - The index of the last element in the range to be
* searched, inclusive. The default value is `-1`. Negative values
* are taken as an offset from the end of the array.
*
* @returns The number of elements removed from the array.
*
* #### Notes
* If `stop < start` the search will conceptually wrap at the end of
* the array, however the array will be traversed front-to-back.
*
* #### Complexity
* Linear.
*
* #### Example
* ```typescript
* import { ArrayExt } from '@lumino/algorithm';
*
* function isEven(value: number): boolean {
* return value % 2 === 0;
* }
*
* function isNegative(value: number): boolean {
* return value < 0;
* }
*
* let data = [0, 12, -13, -9, 23, 39, 14, -15, 12, 75];
* ArrayExt.removeAllWhere(data, isEven); // 4
* ArrayExt.removeAllWhere(data, isNegative, 0, 3); // 2
* ```
*/
function removeAllWhere(array, fn, start, stop) {
if (start === void 0) { start = 0; }
if (stop === void 0) { stop = -1; }
var n = array.length;
if (n === 0) {
return 0;
}
if (start < 0) {
start = Math.max(0, start + n);
}
else {
start = Math.min(start, n - 1);
}
if (stop < 0) {
stop = Math.max(0, stop + n);
}
else {
stop = Math.min(stop, n - 1);
}
var count = 0;
for (var i = 0; i < n; ++i) {
if (start <= stop && i >= start && i <= stop && fn(array[i], i)) {
count++;
}
else if (stop < start && (i <= stop || i >= start) && fn(array[i], i)) {
count++;
}
else if (count > 0) {
array[i - count] = array[i];
}
}
if (count > 0) {
array.length = n - count;
}
return count;
}
ArrayExt.removeAllWhere = removeAllWhere;
})(exports.ArrayExt || (exports.ArrayExt = {}));
// Copyright (c) Jupyter Development Team.
// Distributed under the terms of the Modified BSD License.
/*-----------------------------------------------------------------------------
| Copyright (c) 2014-2017, PhosphorJS Contributors
|
| Distributed under the terms of the BSD 3-Clause License.
|
| The full license is in the file LICENSE, distributed with this software.
|----------------------------------------------------------------------------*/
/**
* Create an iterator for an iterable object.
*
* @param object - The iterable or array-like object of interest.
*
* @returns A new iterator for the given object.
*
* #### Notes
* This function allows iteration algorithms to operate on user-defined
* iterable types and builtin array-like objects in a uniform fashion.
*/
function iter(object) {
var it;
if (typeof object.iter === 'function') {
it = object.iter();
}
else {
it = new ArrayIterator(object);
}
return it;
}
/**
* Create an iterator for the keys in an object.
*
* @param object - The object of interest.
*
* @returns A new iterator for the keys in the given object.
*
* #### Complexity
* Linear.
*
* #### Example
* ```typescript
* import { each, keys } from '@lumino/algorithm';
*
* let data = { one: 1, two: 2, three: 3 };
*
* each(keys(data), key => { console.log(key); }); // 'one', 'two', 'three'
* ```
*/
function iterKeys(object) {
return new KeyIterator(object);
}
/**
* Create an iterator for the values in an object.
*
* @param object - The object of interest.
*
* @returns A new iterator for the values in the given object.
*
* #### Complexity
* Linear.
*
* #### Example
* ```typescript
* import { each, values } from '@lumino/algorithm';
*
* let data = { one: 1, two: 2, three: 3 };
*
* each(values(data), value => { console.log(value); }); // 1, 2, 3
* ```
*/
function iterValues(object) {
return new ValueIterator(object);
}
/**
* Create an iterator for the items in an object.
*
* @param object - The object of interest.
*
* @returns A new iterator for the items in the given object.
*
* #### Complexity
* Linear.
*
* #### Example
* ```typescript
* import { each, items } from '@lumino/algorithm';
*
* let data = { one: 1, two: 2, three: 3 };
*
* each(items(data), value => { console.log(value); }); // ['one', 1], ['two', 2], ['three', 3]
* ```
*/
function iterItems(object) {
return new ItemIterator(object);
}
/**
* Create an iterator for an iterator-like function.
*
* @param fn - A function which behaves like an iterator `next` method.
*
* @returns A new iterator for the given function.
*
* #### Notes
* The returned iterator **cannot** be cloned.
*
* #### Example
* ```typescript
* import { each, iterFn } from '@lumino/algorithm';
*
* let it = iterFn((() => {
* let i = 0;
* return () => i > 3 ? undefined : i++;
* })());
*
* each(it, v => { console.log(v); }); // 0, 1, 2, 3
* ```
*/
function iterFn(fn) {
return new FnIterator(fn);
}
/**
* Invoke a function for each value in an iterable.
*
* @param object - The iterable or array-like object of interest.
*
* @param fn - The callback function to invoke for each value.
*
* #### Notes
* Iteration can be terminated early by returning `false` from the
* callback function.
*
* #### Complexity
* Linear.
*
* #### Example
* ```typescript
* import { each } from '@lumino/algorithm';
*
* let data = [5, 7, 0, -2, 9];
*
* each(data, value => { console.log(value); });
* ```
*/
function each(object, fn) {
var index = 0;
var it = iter(object);
var value;
while ((value = it.next()) !== undefined) {
if (fn(value, index++) === false) {
return;
}
}
}
/**
* Test whether all values in an iterable satisfy a predicate.
*
* @param object - The iterable or array-like object of interest.
*
* @param fn - The predicate function to invoke for each value.
*
* @returns `true` if all values pass the test, `false` otherwise.
*
* #### Notes
* Iteration terminates on the first `false` predicate result.
*
* #### Complexity
* Linear.
*
* #### Example
* ```typescript
* import { every } from '@lumino/algorithm';
*
* let data = [5, 7, 1];
*
* every(data, value => value % 2 === 0); // false
* every(data, value => value % 2 === 1); // true
* ```
*/
function every(object, fn) {
var index = 0;
var it = iter(object);
var value;
while ((value = it.next()) !== undefined) {
if (!fn(value, index++)) {
return false;
}
}
return true;
}
/**
* Test whether any value in an iterable satisfies a predicate.
*
* @param object - The iterable or array-like object of interest.
*
* @param fn - The predicate function to invoke for each value.
*
* @returns `true` if any value passes the test, `false` otherwise.
*
* #### Notes
* Iteration terminates on the first `true` predicate result.
*
* #### Complexity
* Linear.
*
* #### Example
* ```typescript
* import { some } from '@lumino/algorithm';
*
* let data = [5, 7, 1];
*
* some(data, value => value === 7); // true
* some(data, value => value === 3); // false
* ```
*/
function some(object, fn) {
var index = 0;
var it = iter(object);
var value;
while ((value = it.next()) !== undefined) {
if (fn(value, index++)) {
return true;
}
}
return false;
}
/**
* Create an array from an iterable of values.
*
* @param object - The iterable or array-like object of interest.
*
* @returns A new array of values from the given object.
*
* #### Example
* ```typescript
* import { iter, toArray } from '@lumino/algorithm';
*
* let data = [1, 2, 3, 4, 5, 6];
*
* let stream = iter(data);
*
* toArray(stream); // [1, 2, 3, 4, 5, 6];
* ```
*/
function toArray(object) {
var index = 0;
var result = [];
var it = iter(object);
var value;
while ((value = it.next()) !== undefined) {
result[index++] = value;
}
return result;
}
/**
* Create an object from an iterable of key/value pairs.
*
* @param object - The iterable or array-like object of interest.
*
* @returns A new object mapping keys to values.
*
* #### Example
* ```typescript
* import { toObject } from '@lumino/algorithm';
*
* let data = [['one', 1], ['two', 2], ['three', 3]];
*
* toObject(data); // { one: 1, two: 2, three: 3 }
* ```
*/
function toObject(object) {
var it = iter(object);
var pair;
var result = {};
while ((pair = it.next()) !== undefined) {
result[pair[0]] = pair[1];
}
return result;
}
/**
* An iterator for an array-like object.
*
* #### Notes
* This iterator can be used for any builtin JS array-like object.
*/
var ArrayIterator = /** @class */ (function () {
/**
* Construct a new array iterator.
*
* @param source - The array-like object of interest.
*/
function ArrayIterator(source) {
this._index = 0;
this._source = source;
}
/**
* Get an iterator over the object's values.
*
* @returns An iterator which yields the object's values.
*/
ArrayIterator.prototype.iter = function () {
return this;
};
/**
* Create an independent clone of the iterator.
*
* @returns A new independent clone of the iterator.
*/
ArrayIterator.prototype.clone = function () {
var result = new ArrayIterator(this._source);
result._index = this._index;
return result;
};
/**
* Get the next value from the iterator.
*
* @returns The next value from the iterator, or `undefined`.
*/
ArrayIterator.prototype.next = function () {
if (this._index >= this._source.length) {
return undefined;
}
return this._source[this._index++];
};
return ArrayIterator;
}());
/**
* An iterator for the keys in an object.
*
* #### Notes
* This iterator can be used for any JS object.
*/
var KeyIterator = /** @class */ (function () {
/**
* Construct a new key iterator.
*
* @param source - The object of interest.
*
* @param keys - The keys to iterate, if known.
*/
function KeyIterator(source, keys) {
if (keys === void 0) { keys = Object.keys(source); }
this._index = 0;
this._source = source;
this._keys = keys;
}
/**
* Get an iterator over the object's values.
*
* @returns An iterator which yields the object's values.
*/
KeyIterator.prototype.iter = function () {
return this;
};
/**
* Create an independent clone of the iterator.
*
* @returns A new independent clone of the iterator.
*/
KeyIterator.prototype.clone = function () {
var result = new KeyIterator(this._source, this._keys);
result._index = this._index;
return result;
};
/**
* Get the next value from the iterator.
*
* @returns The next value from the iterator, or `undefined`.
*/
KeyIterator.prototype.next = function () {
if (this._index >= this._keys.length) {
return undefined;
}
var key = this._keys[this._index++];
if (key in this._source) {
return key;
}
return this.next();
};
return KeyIterator;
}());
/**
* An iterator for the values in an object.
*
* #### Notes
* This iterator can be used for any JS object.
*/
var ValueIterator = /** @class */ (function () {
/**
* Construct a new value iterator.
*
* @param source - The object of interest.
*
* @param keys - The keys to iterate, if known.
*/
function ValueIterator(source, keys) {
if (keys === void 0) { keys = Object.keys(source); }
this._index = 0;
this._source = source;
this._keys = keys;
}
/**
* Get an iterator over the object's values.
*
* @returns An iterator which yields the object's values.
*/
ValueIterator.prototype.iter = function () {
return this;
};
/**
* Create an independent clone of the iterator.
*
* @returns A new independent clone of the iterator.
*/
ValueIterator.prototype.clone = function () {
var result = new ValueIterator(this._source, this._keys);
result._index = this._index;
return result;
};
/**
* Get the next value from the iterator.
*
* @returns The next value from the iterator, or `undefined`.
*/
ValueIterator.prototype.next = function () {
if (this._index >= this._keys.length) {
return undefined;
}
var key = this._keys[this._index++];
if (key in this._source) {
return this._source[key];
}
return this.next();
};
return ValueIterator;
}());
/**
* An iterator for the items in an object.
*
* #### Notes
* This iterator can be used for any JS object.
*/
var ItemIterator = /** @class */ (function () {
/**
* Construct a new item iterator.
*
* @param source - The object of interest.
*
* @param keys - The keys to iterate, if known.
*/
function ItemIterator(source, keys) {
if (keys === void 0) { keys = Object.keys(source); }
this._index = 0;
this._source = source;
this._keys = keys;
}
/**
* Get an iterator over the object's values.
*
* @returns An iterator which yields the object's values.
*/
ItemIterator.prototype.iter = function () {
return this;
};
/**
* Create an independent clone of the iterator.
*
* @returns A new independent clone of the iterator.
*/
ItemIterator.prototype.clone = function () {
var result = new ItemIterator(this._source, this._keys);
result._index = this._index;
return result;
};
/**
* Get the next value from the iterator.
*
* @returns The next value from the iterator, or `undefined`.
*/
ItemIterator.prototype.next = function () {
if (this._index >= this._keys.length) {
return undefined;
}
var key = this._keys[this._index++];
if (key in this._source) {
return [key, this._source[key]];
}
return this.next();
};
return ItemIterator;
}());
/**
* An iterator for an iterator-like function.
*/
var FnIterator = /** @class */ (function () {
/**
* Construct a new function iterator.
*
* @param fn - The iterator-like function of interest.
*/
function FnIterator(fn) {
this._fn = fn;
}
/**
* Get an iterator over the object's values.
*
* @returns An iterator which yields the object's values.
*/
FnIterator.prototype.iter = function () {
return this;
};
/**
* Create an independent clone of the iterator.
*
* @returns A new independent clone of the iterator.
*/
FnIterator.prototype.clone = function () {
throw new Error('An `FnIterator` cannot be cloned.');
};
/**
* Get the next value from the iterator.
*
* @returns The next value from the iterator, or `undefined`.
*/
FnIterator.prototype.next = function () {
return this._fn.call(undefined);
};
return FnIterator;
}());
// Copyright (c) Jupyter Development Team.
/**
* Chain together several iterables.
*
* @param objects - The iterable or array-like objects of interest.
*
* @returns An iterator which yields the values of the iterables
* in the order in which they are supplied.
*
* #### Example
* ```typescript
* import { chain, toArray } from '@lumino/algorithm';
*
* let data1 = [1, 2, 3];
* let data2 = [4, 5, 6];
*
* let stream = chain(data1, data2);
*
* toArray(stream); // [1, 2, 3, 4, 5, 6]
* ```
*/
function chain() {
var objects = [];
for (var _i = 0; _i < arguments.length; _i++) {
objects[_i] = arguments[_i];
}
return new ChainIterator(iter(objects.map(iter)));
}
/**
* An iterator which chains together several iterators.
*/
var ChainIterator = /** @class */ (function () {
/**
* Construct a new chain iterator.
*
* @param source - The iterator of iterators of interest.
*/
function ChainIterator(source) {
this._cloned = false;
this._source = source;
this._active = undefined;
}
/**
* Get an iterator over the object's values.
*
* @returns An iterator which yields the object's values.
*/
ChainIterator.prototype.iter = function () {
return this;
};
/**
* Create an independent clone of the iterator.
*
* @returns A new independent clone of the iterator.
*/
ChainIterator.prototype.clone = function () {
var result = new ChainIterator(this._source.clone());
result._active = this._active && this._active.clone();
result._cloned = true;
this._cloned = true;
return result;
};
/**
* Get the next value from the iterator.
*
* @returns The next value from the iterator, or `undefined`.
*/
ChainIterator.prototype.next = function () {
if (this._active === undefined) {
var active = this._source.next();
if (active === undefined) {
return undefined;
}
this._active = this._cloned ? active.clone() : active;
}
var value = this._active.next();
if (value !== undefined) {
return value;
}
this._active = undefined;
return this.next();
};
return ChainIterator;
}());
/**
* Create an empty iterator.
*
* @returns A new iterator which yields nothing.
*
* #### Example
* ```typescript
* import { empty, toArray } from '@lumino/algorithm';
*
* let stream = empty<number>();
*
* toArray(stream); // []
* ```
*/
function empty() {
return new EmptyIterator();
}
/**
* An iterator which is always empty.
*/
var EmptyIterator = /** @class */ (function () {
function EmptyIterator() {
}
/**
* Get an iterator over the object's values.
*
* @returns An iterator which yields the object's values.
*/
EmptyIterator.prototype.iter = function () {
return this;
};
/**
* Create an independent clone of the iterator.
*
* @returns A new independent clone of the iterator.
*/
EmptyIterator.prototype.clone = function () {
return new EmptyIterator();
};
/**
* Get the next value from the iterator.
*
* @returns The next value from the iterator, or `undefined`.
*/
EmptyIterator.prototype.next = function () {
return undefined;
};
return EmptyIterator;
}());
// Copyright (c) Jupyter Development Team.
/**
* Enumerate an iterable object.
*
* @param object - The iterable or array-like object of interest.
*
* @param start - The starting enum value. The default is `0`.
*
* @returns An iterator which yields the enumerated values.
*
* #### Example
* ```typescript
* import { enumerate, toArray } from '@lumino/algorithm';
*
* let data = ['foo', 'bar', 'baz'];
*
* let stream = enumerate(data, 1);
*
* toArray(stream); // [[1, 'foo'], [2, 'bar'], [3, 'baz']]
* ```
*/
function enumerate(object, start) {
if (start === void 0) { start = 0; }
return new EnumerateIterator(iter(object), start);
}
/**
* An iterator which enumerates the source values.
*/
var EnumerateIterator = /** @class */ (function () {
/**
* Construct a new enumerate iterator.
*
* @param source - The iterator of values of interest.
*
* @param start - The starting enum value.
*/
function EnumerateIterator(source, start) {
this._source = source;
this._index = start;
}
/**
* Get an iterator over the object's values.
*
* @returns An iterator which yields the object's values.
*/
EnumerateIterator.prototype.iter = function () {
return this;
};
/**
* Create an independent clone of the iterator.
*
* @returns A new independent clone of the iterator.
*/
EnumerateIterator.prototype.clone = function () {
return new EnumerateIterator(this._source.clone(), this._index);
};
/**
* Get the next value from the iterator.
*
* @returns The next value from the iterator, or `undefined`.
*/
EnumerateIterator.prototype.next = function () {
var value = this._source.next();
if (value === undefined) {
return undefined;
}
return [this._index++, value];
};
return EnumerateIterator;
}());
// Copyright (c) Jupyter Development Team.
/**
* Filter an iterable for values which pass a test.
*
* @param object - The iterable or array-like object of interest.
*
* @param fn - The predicate function to invoke for each value.
*
* @returns An iterator which yields the values which pass the test.
*
* #### Example
* ```typescript
* import { filter, toArray } from '@lumino/algorithm';
*
* let data = [1, 2, 3, 4, 5, 6];
*
* let stream = filter(data, value => value % 2 === 0);
*
* toArray(stream); // [2, 4, 6]
* ```
*/
function filter(object, fn) {
return new FilterIterator(iter(object), fn);
}
/**
* An iterator which yields values which pass a test.
*/
var FilterIterator = /** @class */ (function () {
/**
* Construct a new filter iterator.
*
* @param source - The iterator of values of interest.
*
* @param fn - The predicate function to invoke for each value.
*/
function FilterIterator(source, fn) {
this._index = 0;
this._source = source;
this._fn = fn;
}
/**
* Get an iterator over the object's values.
*
* @returns An iterator which yields the object's values.
*/
FilterIterator.prototype.iter = function () {
return this;
};
/**
* Create an independent clone of the iterator.
*
* @returns A new independent clone of the iterator.
*/
FilterIterator.prototype.clone = function () {
var result = new FilterIterator(this._source.clone(), this._fn);
result._index = this._index;
return result;
};
/**
* Get the next value from the iterator.
*
* @returns The next value from the iterator, or `undefined`.
*/
FilterIterator.prototype.next = function () {
var fn = this._fn;
var it = this._source;
var value;
while ((value = it.next()) !== undefined) {
if (fn(value, this._index++)) {
return value;
}
}
return undefined;
};
return FilterIterator;
}());
// Copyright (c) Jupyter Development Team.
/**
* Find the first value in an iterable which matches a predicate.
*
* @param object - The iterable or array-like object to search.
*
* @param fn - The predicate function to apply to the values.
*
* @returns The first matching value, or `undefined` if no matching
* value is found.
*
* #### Complexity
* Linear.
*
* #### Example
* ```typescript
* import { find } from '@lumino/algorithm';
*
* interface IAnimal { species: string, name: string };
*
* function isCat(value: IAnimal): boolean {
* return value.species === 'cat';
* }
*
* let data: IAnimal[] = [
* { species: 'dog', name: 'spot' },
* { species: 'cat', name: 'fluffy' },
* { species: 'alligator', name: 'pocho' }
* ];
*
* find(data, isCat).name; // 'fluffy'
* ```
*/
function find(object, fn) {
var index = 0;
var it = iter(object);
var value;
while ((value = it.next()) !== undefined) {
if (fn(value, index++)) {
return value;
}
}
return undefined;
}
/**
* Find the index of the first value which matches a predicate.
*
* @param object - The iterable or array-like object to search.
*
* @param fn - The predicate function to apply to the values.
*
* @returns The index of the first matching value, or `-1` if no
* matching value is found.
*
* #### Complexity
* Linear.
*
* #### Example
* ```typescript
* import { findIndex } from '@lumino/algorithm';
*
* interface IAnimal { species: string, name: string };
*
* function isCat(value: IAnimal): boolean {
* return value.species === 'cat';
* }
*
* let data: IAnimal[] = [
* { species: 'dog', name: 'spot' },
* { species: 'cat', name: 'fluffy' },
* { species: 'alligator', name: 'pocho' }
* ];
*
* findIndex(data, isCat); // 1
* ```
*/
function findIndex(object, fn) {
var index = 0;
var it = iter(object);
var value;
while ((value = it.next()) !== undefined) {
if (fn(value, index++)) {
return index - 1;
}
}
return -1;
}
/**
* Find the minimum value in an iterable.
*
* @param object - The iterable or array-like object to search.
*
* @param fn - The 3-way comparison function to apply to the values.
* It should return `< 0` if the first value is less than the second.
* `0` if the values are equivalent, or `> 0` if the first value is
* greater than the second.
*
* @returns The minimum value in the iterable. If multiple values are
* equivalent to the minimum, the left-most value is returned. If
* the iterable is empty, this returns `undefined`.
*
* #### Complexity
* Linear.
*
* #### Example
* ```typescript
* import { min } from '@lumino/algorithm';
*
* function numberCmp(a: number, b: number): number {
* return a - b;
* }
*
* min([7, 4, 0, 3, 9, 4], numberCmp); // 0
* ```
*/
function min(object, fn) {
var it = iter(object);
var value = it.next();
if (value === undefined) {
return undefined;
}
var result = value;
while ((value = it.next()) !== undefined) {
if (fn(value, result) < 0) {
result = value;
}
}
return result;
}
/**
* Find the maximum value in an iterable.
*
* @param object - The iterable or array-like object to search.
*
* @param fn - The 3-way comparison function to apply to the values.
* It should return `< 0` if the first value is less than the second.
* `0` if the values are equivalent, or `> 0` if the first value is
* greater than the second.
*
* @returns The maximum value in the iterable. If multiple values are
* equivalent to the maximum, the left-most value is returned. If
* the iterable is empty, this returns `undefined`.
*
* #### Complexity
* Linear.
*
* #### Example
* ```typescript
* import { max } from '@lumino/algorithm';
*
* function numberCmp(a: number, b: number): number {
* return a - b;
* }
*
* max([7, 4, 0, 3, 9, 4], numberCmp); // 9
* ```
*/
function max(object, fn) {
var it = iter(object);
var value = it.next();
if (value === undefined) {
return undefined;
}
var result = value;
while ((value = it.next()) !== undefined) {
if (fn(value, result) > 0) {
result = value;
}
}
return result;
}
/**
* Find the minimum and maximum values in an iterable.
*
* @param object - The iterable or array-like object to search.
*
* @param fn - The 3-way comparison function to apply to the values.
* It should return `< 0` if the first value is less than the second.
* `0` if the values are equivalent, or `> 0` if the first value is
* greater than the second.
*
* @returns A 2-tuple of the `[min, max]` values in the iterable. If
* multiple values are equivalent, the left-most values are returned.
* If the iterable is empty, this returns `undefined`.
*
* #### Complexity
* Linear.
*
* #### Example
* ```typescript
* import { minmax } from '@lumino/algorithm';
*
* function numberCmp(a: number, b: number): number {
* return a - b;
* }
*
* minmax([7, 4, 0, 3, 9, 4], numberCmp); // [0, 9]
* ```
*/
function minmax(object, fn) {
var it = iter(object);
var value = it.next();
if (value === undefined) {
return undefined;
}
var vmin = value;
var vmax = value;
while ((value = it.next()) !== undefined) {
if (fn(value, vmin) < 0) {
vmin = value;
}
else if (fn(value, vmax) > 0) {
vmax = value;
}
}
return [vmin, vmax];
}
// Copyright (c) Jupyter Development Team.
/**
* Transform the values of an iterable with a mapping function.
*
* @param object - The iterable or array-like object of interest.
*
* @param fn - The mapping function to invoke for each value.
*
* @returns An iterator which yields the transformed values.
*
* #### Example
* ```typescript
* import { map, toArray } from '@lumino/algorithm';
*
* let data = [1, 2, 3];
*
* let stream = map(data, value => value * 2);
*
* toArray(stream); // [2, 4, 6]
* ```
*/
function map(object, fn) {
return new MapIterator(iter(object), fn);
}
/**
* An iterator which transforms values using a mapping function.
*/
var MapIterator = /** @class */ (function () {
/**
* Construct a new map iterator.
*
* @param source - The iterator of values of interest.
*
* @param fn - The mapping function to invoke for each value.
*/
function MapIterator(source, fn) {
this._index = 0;
this._source = source;
this._fn = fn;
}
/**
* Get an iterator over the object's values.
*
* @returns An iterator which yields the object's values.
*/
MapIterator.prototype.iter = function () {
return this;
};
/**
* Create an independent clone of the iterator.
*
* @returns A new independent clone of the iterator.
*/
MapIterator.prototype.clone = function () {
var result = new MapIterator(this._source.clone(), this._fn);
result._index = this._index;
return result;
};
/**
* Get the next value from the iterator.
*
* @returns The next value from the iterator, or `undefined`.
*/
MapIterator.prototype.next = function () {
var value = this._source.next();
if (value === undefined) {
return undefined;
}
return this._fn.call(undefined, value, this._index++);
};
return MapIterator;
}());
/**
* Create an iterator of evenly spaced values.
*
* @param start - The starting value for the range, inclusive.
*
* @param stop - The stopping value for the range, exclusive.
*
* @param step - The distance between each value.
*
* @returns An iterator which produces evenly spaced values.
*
* #### Notes
* In the single argument form of `range(stop)`, `start` defaults to
* `0` and `step` defaults to `1`.
*
* In the two argument form of `range(start, stop)`, `step` defaults
* to `1`.
*/
function range(start, stop, step) {
if (stop === undefined) {
return new RangeIterator(0, start, 1);
}
if (step === undefined) {
return new RangeIterator(start, stop, 1);
}
return new RangeIterator(start, stop, step);
}
/**
* An iterator which produces a range of evenly spaced values.
*/
var RangeIterator = /** @class */ (function () {
/**
* Construct a new range iterator.
*
* @param start - The starting value for the range, inclusive.
*
* @param stop - The stopping value for the range, exclusive.
*
* @param step - The distance between each value.
*/
function RangeIterator(start, stop, step) {
this._index = 0;
this._start = start;
this._stop = stop;
this._step = step;
this._length = Private.rangeLength(start, stop, step);
}
/**
* Get an iterator over the object's values.
*
* @returns An iterator which yields the object's values.
*/
RangeIterator.prototype.iter = function () {
return this;
};
/**
* Create an independent clone of the iterator.
*
* @returns A new independent clone of the iterator.
*/
RangeIterator.prototype.clone = function () {
var result = new RangeIterator(this._start, this._stop, this._step);
result._index = this._index;
return result;
};
/**
* Get the next value from the iterator.
*
* @returns The next value from the iterator, or `undefined`.
*/
RangeIterator.prototype.next = function () {
if (this._index >= this._length) {
return undefined;
}
return this._start + this._step * this._index++;
};
return RangeIterator;
}());
/**
* The namespace for the module implementation details.
*/
var Private;
(function (Private) {
/**
* Compute the effective length of a range.
*
* @param start - The starting value for the range, inclusive.
*
* @param stop - The stopping value for the range, exclusive.
*
* @param step - The distance between each value.
*
* @returns The number of steps need to traverse the range.
*/
function rangeLength(start, stop, step) {
if (step === 0) {
return Infinity;
}
if (start > stop && step > 0) {
return 0;
}
if (start < stop && step < 0) {
return 0;
}
return Math.ceil((stop - start) / step);
}
Private.rangeLength = rangeLength;
})(Private || (Private = {}));
// Copyright (c) Jupyter Development Team.
function reduce(object, fn, initial) {
// Setup the iterator and fetch the first value.
var index = 0;
var it = iter(object);
var first = it.next();
// An empty iterator and no initial value is an error.
if (first === undefined && initial === undefined) {
throw new TypeError('Reduce of empty iterable with no initial value.');
}
// If the iterator is empty, return the initial value.
if (first === undefined) {
return initial;
}
// If the iterator has a single item and no initial value, the
// reducer is not invoked and the first item is the return value.
var second = it.next();
if (second === undefined && initial === undefined) {
return first;
}
// If iterator has a single item and an initial value is provided,
// the reducer is invoked and that result is the return value.
if (second === undefined) {
return fn(initial, first, index++);
}
// Setup the initial accumlated value.
var accumulator;
if (initial === undefined) {
accumulator = fn(first, second, index++);
}
else {
accumulator = fn(fn(initial, first, index++), second, index++);
}
// Iterate the rest of the values, updating the accumulator.
var next;
while ((next = it.next()) !== undefined) {
accumulator = fn(accumulator, next, index++);
}
// Return the final accumulated value.
return accumulator;
}
/**
* Create an iterator which repeats a value a number of times.
*
* @param value - The value to repeat.
*
* @param count - The number of times to repeat the value.
*
* @returns A new iterator which repeats the specified value.
*
* #### Example
* ```typescript
* import { repeat, toArray } from '@lumino/algorithm';
*
* let stream = repeat(7, 3);
*
* toArray(stream); // [7, 7, 7]
* ```
*/
function repeat(value, count) {
return new RepeatIterator(value, count);
}
/**
* Create an iterator which yields a value a single time.
*
* @param value - The value to wrap in an iterator.
*
* @returns A new iterator which yields the value a single time.
*
* #### Example
* ```typescript
* import { once, toArray } from '@lumino/algorithm';
*
* let stream = once(7);
*
* toArray(stream); // [7]
* ```
*/
function once(value) {
return new RepeatIterator(value, 1);
}
/**
* An iterator which repeats a value a specified number of times.
*/
var RepeatIterator = /** @class */ (function () {
/**
* Construct a new repeat iterator.
*
* @param value - The value to repeat.
*
* @param count - The number of times to repeat the value.
*/
function RepeatIterator(value, count) {
this._value = value;
this._count = count;
}
/**
* Get an iterator over the object's values.
*
* @returns An iterator which yields the object's values.
*/
RepeatIterator.prototype.iter = function () {
return this;
};
/**
* Create an independent clone of the iterator.
*
* @returns A new independent clone of the iterator.
*/
RepeatIterator.prototype.clone = function () {
return new RepeatIterator(this._value, this._count);
};
/**
* Get the next value from the iterator.
*
* @returns The next value from the iterator, or `undefined`.
*/
RepeatIterator.prototype.next = function () {
if (this._count <= 0) {
return undefined;
}
this._count--;
return this._value;
};
return RepeatIterator;
}());
/**
* Create an iterator for a retroable object.
*
* @param object - The retroable or array-like object of interest.
*
* @returns An iterator which traverses the object's values in reverse.
*
* #### Example
* ```typescript
* import { retro, toArray } from '@lumino/algorithm';
*
* let data = [1, 2, 3, 4, 5, 6];
*
* let stream = retro(data);
*
* toArray(stream); // [6, 5, 4, 3, 2, 1]
* ```
*/
function retro(object) {
var it;
if (typeof object.retro === 'function') {
it = object.retro();
}
else {
it = new RetroArrayIterator(object);
}
return it;
}
/**
* An iterator which traverses an array-like object in reverse.
*
* #### Notes
* This iterator can be used for any builtin JS array-like object.
*/
var RetroArrayIterator = /** @class */ (function () {
/**
* Construct a new retro iterator.
*
* @param source - The array-like object of interest.
*/
function RetroArrayIterator(source) {
this._source = source;
this._index = source.length - 1;
}
/**
* Get an iterator over the object's values.
*
* @returns An iterator which yields the object's values.
*/
RetroArrayIterator.prototype.iter = function () {
return this;
};
/**
* Create an independent clone of the iterator.
*
* @returns A new independent clone of the iterator.
*/
RetroArrayIterator.prototype.clone = function () {
var result = new RetroArrayIterator(this._source);
result._index = this._index;
return result;
};
/**
* Get the next value from the iterator.
*
* @returns The next value from the iterator, or `undefined`.
*/
RetroArrayIterator.prototype.next = function () {
if (this._index < 0 || this._index >= this._source.length) {
return undefined;
}
return this._source[this._index--];
};
return RetroArrayIterator;
}());
// Copyright (c) Jupyter Development Team.
/**
* Topologically sort an iterable of edges.
*
* @param edges - The iterable or array-like object of edges to sort.
* An edge is represented as a 2-tuple of `[fromNode, toNode]`.
*
* @returns The topologically sorted array of nodes.
*
* #### Notes
* If a cycle is present in the graph, the cycle will be ignored and
* the return value will be only approximately sorted.
*
* #### Example
* ```typescript
* import { topologicSort } from '@lumino/algorithm';
*
* let data = [
* ['d', 'e'],
* ['c', 'd'],
* ['a', 'b'],
* ['b', 'c']
* ];
*
* topologicSort(data); // ['a', 'b', 'c', 'd', 'e']
* ```
*/
function topologicSort(edges) {
// Setup the shared sorting state.
var sorted = [];
var visited = new Set();
var graph = new Map();
// Add the edges to the graph.
each(edges, addEdge);
// Visit each node in the graph.
graph.forEach(function (v, k) {
visit(k);
});
// Return the sorted results.
return sorted;
// Add an edge to the graph.
function addEdge(edge) {
var fromNode = edge[0], toNode = edge[1];
var children = graph.get(toNode);
if (children) {
children.push(fromNode);
}
else {
graph.set(toNode, [fromNode]);
}
}
// Recursively visit the node.
function visit(node) {
if (visited.has(node)) {
return;
}
visited.add(node);
var children = graph.get(node);
if (children) {
children.forEach(visit);
}
sorted.push(node);
}
}
// Copyright (c) Jupyter Development Team.
/**
* Iterate over an iterable using a stepped increment.
*
* @param object - The iterable or array-like object of interest.
*
* @param step - The distance to step on each iteration. A value
* of less than `1` will behave the same as a value of `1`.
*
* @returns An iterator which traverses the iterable step-wise.
*
* #### Example
* ```typescript
* import { stride, toArray } from '@lumino/algorithm';
*
* let data = [1, 2, 3, 4, 5, 6];
*
* let stream = stride(data, 2);
*
* toArray(stream); // [1, 3, 5];
* ```
*/
function stride(object, step) {
return new StrideIterator(iter(object), step);
}
/**
* An iterator which traverses a source iterator step-wise.
*/
var StrideIterator = /** @class */ (function () {
/**
* Construct a new stride iterator.
*
* @param source - The iterator of values of interest.
*
* @param step - The distance to step on each iteration. A value
* of less than `1` will behave the same as a value of `1`.
*/
function StrideIterator(source, step) {
this._source = source;
this._step = step;
}
/**
* Get an iterator over the object's values.
*
* @returns An iterator which yields the object's values.
*/
StrideIterator.prototype.iter = function () {
return this;
};
/**
* Create an independent clone of the iterator.
*
* @returns A new independent clone of the iterator.
*/
StrideIterator.prototype.clone = function () {
return new StrideIterator(this._source.clone(), this._step);
};
/**
* Get the next value from the iterator.
*
* @returns The next value from the iterator, or `undefined`.
*/
StrideIterator.prototype.next = function () {
var value = this._source.next();
for (var n = this._step - 1; n > 0; --n) {
this._source.next();
}
return value;
};
return StrideIterator;
}());
// Copyright (c) Jupyter Development Team.
// Distributed under the terms of the Modified BSD License.
/*-----------------------------------------------------------------------------
| Copyright (c) 2014-2017, PhosphorJS Contributors
|
| Distributed under the terms of the BSD 3-Clause License.
|
| The full license is in the file LICENSE, distributed with this software.
|----------------------------------------------------------------------------*/
/**
* The namespace for string-specific algorithms.
*/
exports.StringExt = void 0;
(function (StringExt) {
/**
* Find the indices of characters in a source text.
*
* @param source - The source text which should be searched.
*
* @param query - The characters to locate in the source text.
*
* @param start - The index to start the search.
*
* @returns The matched indices, or `null` if there is no match.
*
* #### Complexity
* Linear on `sourceText`.
*
* #### Notes
* In order for there to be a match, all of the characters in `query`
* **must** appear in `source` in the order given by `query`.
*
* Characters are matched using strict `===` equality.
*/
function findIndices(source, query, start) {
if (start === void 0) { start = 0; }
var indices = new Array(query.length);
for (var i = 0, j = start, n = query.length; i < n; ++i, ++j) {
j = source.indexOf(query[i], j);
if (j === -1) {
return null;
}
indices[i] = j;
}
return indices;
}
StringExt.findIndices = findIndices;
/**
* A string matcher which uses a sum-of-squares algorithm.
*
* @param source - The source text which should be searched.
*
* @param query - The characters to locate in the source text.
*
* @param start - The index to start the search.
*
* @returns The match result, or `null` if there is no match.
* A lower `score` represents a stronger match.
*
* #### Complexity
* Linear on `sourceText`.
*
* #### Notes
* This scoring algorithm uses a sum-of-squares approach to determine
* the score. In order for there to be a match, all of the characters
* in `query` **must** appear in `source` in order. The index of each
* matching character is squared and added to the score. This means
* that early and consecutive character matches are preferred, while
* late matches are heavily penalized.
*/
function matchSumOfSquares(source, query, start) {
if (start === void 0) { start = 0; }
var indices = findIndices(source, query, start);
if (!indices) {
return null;
}
var score = 0;
for (var i = 0, n = indices.length; i < n; ++i) {
var j = indices[i] - start;
score += j * j;
}
return { score: score, indices: indices };
}
StringExt.matchSumOfSquares = matchSumOfSquares;
/**
* A string matcher which uses a sum-of-deltas algorithm.
*
* @param source - The source text which should be searched.
*
* @param query - The characters to locate in the source text.
*
* @param start - The index to start the search.
*
* @returns The match result, or `null` if there is no match.
* A lower `score` represents a stronger match.
*
* #### Complexity
* Linear on `sourceText`.
*
* #### Notes
* This scoring algorithm uses a sum-of-deltas approach to determine
* the score. In order for there to be a match, all of the characters
* in `query` **must** appear in `source` in order. The delta between
* the indices are summed to create the score. This means that groups
* of matched characters are preferred, while fragmented matches are
* penalized.
*/
function matchSumOfDeltas(source, query, start) {
if (start === void 0) { start = 0; }
var indices = findIndices(source, query, start);
if (!indices) {
return null;
}
var score = 0;
var last = start - 1;
for (var i = 0, n = indices.length; i < n; ++i) {
var j = indices[i];
score += j - last - 1;
last = j;
}
return { score: score, indices: indices };
}
StringExt.matchSumOfDeltas = matchSumOfDeltas;
/**
* Highlight the matched characters of a source text.
*
* @param source - The text which should be highlighted.
*
* @param indices - The indices of the matched characters. They must
* appear in increasing order and must be in bounds of the source.
*
* @param fn - The function to apply to the matched chunks.
*
* @returns An array of unmatched and highlighted chunks.
*/
function highlight(source, indices, fn) {
// Set up the result array.
var result = [];
// Set up the counter variables.
var k = 0;
var last = 0;
var n = indices.length;
// Iterator over each index.
while (k < n) {
// Set up the chunk indices.
var i = indices[k];
var j = indices[k];
// Advance the right chunk index until it's non-contiguous.
while (++k < n && indices[k] === j + 1) {
j++;
}
// Extract the unmatched text.
if (last < i) {
result.push(source.slice(last, i));
}
// Extract and highlight the matched text.
if (i < j + 1) {
result.push(fn(source.slice(i, j + 1)));
}
// Update the last visited index.
last = j + 1;
}
// Extract any remaining unmatched text.
if (last < source.length) {
result.push(source.slice(last));
}
// Return the highlighted result.
return result;
}
StringExt.highlight = highlight;
/**
* A 3-way string comparison function.
*
* @param a - The first string of interest.
*
* @param b - The second string of interest.
*
* @returns `-1` if `a < b`, else `1` if `a > b`, else `0`.
*/
function cmp(a, b) {
return a < b ? -1 : a > b ? 1 : 0;
}
StringExt.cmp = cmp;
})(exports.StringExt || (exports.StringExt = {}));
// Copyright (c) Jupyter Development Team.
/**
* Take a fixed number of items from an iterable.
*
* @param object - The iterable or array-like object of interest.
*
* @param count - The number of items to take from the iterable.
*
* @returns An iterator which yields the specified number of items
* from the source iterable.
*
* #### Notes
* The returned iterator will exhaust early if the source iterable
* contains an insufficient number of items.
*/
function take(object, count) {
return new TakeIterator(iter(object), count);
}
/**
* An iterator which takes a fixed number of items from a source.
*/
var TakeIterator = /** @class */ (function () {
/**
* Construct a new take iterator.
*
* @param source - The iterator of interest.
*
* @param count - The number of items to take from the source.
*/
function TakeIterator(source, count) {
this._source = source;
this._count = count;
}
/**
* Get an iterator over the object's values.
*
* @returns An iterator which yields the object's values.
*/
TakeIterator.prototype.iter = function () {
return this;
};
/**
* Create an independent clone of the iterator.
*
* @returns A new independent clone of the iterator.
*/
TakeIterator.prototype.clone = function () {
return new TakeIterator(this._source.clone(), this._count);
};
/**
* Get the next value from the iterator.
*
* @returns The next value from the iterator, or `undefined`.
*/
TakeIterator.prototype.next = function () {
if (this._count <= 0) {
return undefined;
}
var value = this._source.next();
if (value === undefined) {
return undefined;
}
this._count--;
return value;
};
return TakeIterator;
}());
// Copyright (c) Jupyter Development Team.
/**
* Iterate several iterables in lockstep.
*
* @param objects - The iterable or array-like objects of interest.
*
* @returns An iterator which yields successive tuples of values where
* each value is taken in turn from the provided iterables. It will
* be as long as the shortest provided iterable.
*
* #### Example
* ```typescript
* import { zip, toArray } from '@lumino/algorithm';
*
* let data1 = [1, 2, 3];
* let data2 = [4, 5, 6];
*
* let stream = zip(data1, data2);
*
* toArray(stream); // [[1, 4], [2, 5], [3, 6]]
* ```
*/
function zip() {
var objects = [];
for (var _i = 0; _i < arguments.length; _i++) {
objects[_i] = arguments[_i];
}
return new ZipIterator(objects.map(iter));
}
/**
* An iterator which iterates several sources in lockstep.
*/
var ZipIterator = /** @class */ (function () {
/**
* Construct a new zip iterator.
*
* @param source - The iterators of interest.
*/
function ZipIterator(source) {
this._source = source;
}
/**
* Get an iterator over the object's values.
*
* @returns An iterator which yields the object's values.
*/
ZipIterator.prototype.iter = function () {
return this;
};
/**
* Create an independent clone of the iterator.
*
* @returns A new independent clone of the iterator.
*/
ZipIterator.prototype.clone = function () {
return new ZipIterator(this._source.map(function (it) { return it.clone(); }));
};
/**
* Get the next value from the iterator.
*
* @returns The next value from the iterator, or `undefined`.
*/
ZipIterator.prototype.next = function () {
var result = new Array(this._source.length);
for (var i = 0, n = this._source.length; i < n; ++i) {
var value = this._source[i].next();
if (value === undefined) {
return undefined;
}
result[i] = value;
}
return result;
};
return ZipIterator;
}());
exports.ArrayIterator = ArrayIterator;
exports.ChainIterator = ChainIterator;
exports.EmptyIterator = EmptyIterator;
exports.EnumerateIterator = EnumerateIterator;
exports.FilterIterator = FilterIterator;
exports.FnIterator = FnIterator;
exports.ItemIterator = ItemIterator;
exports.KeyIterator = KeyIterator;
exports.MapIterator = MapIterator;
exports.RangeIterator = RangeIterator;
exports.RepeatIterator = RepeatIterator;
exports.RetroArrayIterator = RetroArrayIterator;
exports.StrideIterator = StrideIterator;
exports.TakeIterator = TakeIterator;
exports.ValueIterator = ValueIterator;
exports.ZipIterator = ZipIterator;
exports.chain = chain;
exports.each = each;
exports.empty = empty;
exports.enumerate = enumerate;
exports.every = every;
exports.filter = filter;
exports.find = find;
exports.findIndex = findIndex;
exports.iter = iter;
exports.iterFn = iterFn;
exports.iterItems = iterItems;
exports.iterKeys = iterKeys;
exports.iterValues = iterValues;
exports.map = map;
exports.max = max;
exports.min = min;
exports.minmax = minmax;
exports.once = once;
exports.range = range;
exports.reduce = reduce;
exports.repeat = repeat;
exports.retro = retro;
exports.some = some;
exports.stride = stride;
exports.take = take;
exports.toArray = toArray;
exports.toObject = toObject;
exports.topologicSort = topologicSort;
exports.zip = zip;
Object.defineProperty(exports, '__esModule', { value: true });
})));
//# sourceMappingURL=index.js.map
|
PypiClean
|
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/data/bcolz_daily_bars.py
|
from functools import partial
import warnings
from bcolz import carray, ctable
import logbook
import numpy as np
from numpy import (
array,
full,
iinfo,
nan,
)
from pandas import (
DatetimeIndex,
NaT,
read_csv,
to_datetime,
Timestamp,
)
from six import iteritems, viewkeys
from toolz import compose
from trading_calendars import get_calendar
from zipline.data.session_bars import CurrencyAwareSessionBarReader
from zipline.data.bar_reader import (
NoDataAfterDate,
NoDataBeforeDate,
NoDataOnDate,
)
from zipline.utils.functional import apply
from zipline.utils.input_validation import expect_element
from zipline.utils.numpy_utils import iNaT, float64_dtype, uint32_dtype
from zipline.utils.memoize import lazyval
from zipline.utils.cli import maybe_show_progress
from ._equities import _compute_row_slices, _read_bcolz_data
logger = logbook.Logger('UsEquityPricing')
OHLC = frozenset(['open', 'high', 'low', 'close'])
US_EQUITY_PRICING_BCOLZ_COLUMNS = (
'open', 'high', 'low', 'close', 'volume', 'day', 'id'
)
UINT32_MAX = iinfo(np.uint32).max
def check_uint32_safe(value, colname):
if value >= UINT32_MAX:
raise ValueError(
"Value %s from column '%s' is too large" % (value, colname)
)
@expect_element(invalid_data_behavior={'warn', 'raise', 'ignore'})
def winsorise_uint32(df, invalid_data_behavior, column, *columns):
"""Drops any record where a value would not fit into a uint32.
Parameters
----------
df : pd.DataFrame
The dataframe to winsorise.
invalid_data_behavior : {'warn', 'raise', 'ignore'}
What to do when data is outside the bounds of a uint32.
*columns : iterable[str]
The names of the columns to check.
Returns
-------
truncated : pd.DataFrame
``df`` with values that do not fit into a uint32 zeroed out.
"""
columns = list((column,) + columns)
mask = df[columns] > UINT32_MAX
if invalid_data_behavior != 'ignore':
mask |= df[columns].isnull()
else:
# we are not going to generate a warning or error for this so just use
# nan_to_num
df[columns] = np.nan_to_num(df[columns])
mv = mask.values
if mv.any():
if invalid_data_behavior == 'raise':
raise ValueError(
'%d values out of bounds for uint32: %r' % (
mv.sum(), df[mask.any(axis=1)],
),
)
if invalid_data_behavior == 'warn':
warnings.warn(
'Ignoring %d values because they are out of bounds for'
' uint32: %r' % (
mv.sum(), df[mask.any(axis=1)],
),
stacklevel=3, # one extra frame for `expect_element`
)
df[mask] = 0
return df
class BcolzDailyBarWriter(object):
"""
Class capable of writing daily OHLCV data to disk in a format that can
be read efficiently by BcolzDailyOHLCVReader.
Parameters
----------
filename : str
The location at which we should write our output.
calendar : zipline.utils.calendar.trading_calendar
Calendar to use to compute asset calendar offsets.
start_session: pd.Timestamp
Midnight UTC session label.
end_session: pd.Timestamp
Midnight UTC session label.
See Also
--------
zipline.data.bcolz_daily_bars.BcolzDailyBarReader
"""
_csv_dtypes = {
'open': float64_dtype,
'high': float64_dtype,
'low': float64_dtype,
'close': float64_dtype,
'volume': float64_dtype,
}
def __init__(self, filename, calendar, start_session, end_session):
self._filename = filename
if start_session != end_session:
if not calendar.is_session(start_session):
raise ValueError(
"Start session %s is invalid!" % start_session
)
if not calendar.is_session(end_session):
raise ValueError(
"End session %s is invalid!" % end_session
)
self._start_session = start_session
self._end_session = end_session
self._calendar = calendar
@property
def progress_bar_message(self):
return "Merging daily equity files:"
def progress_bar_item_show_func(self, value):
return value if value is None else str(value[0])
def write(self,
data,
assets=None,
show_progress=False,
invalid_data_behavior='warn'):
"""
Parameters
----------
data : iterable[tuple[int, pandas.DataFrame or bcolz.ctable]]
The data chunks to write. Each chunk should be a tuple of sid
and the data for that asset.
assets : set[int], optional
The assets that should be in ``data``. If this is provided
we will check ``data`` against the assets and provide better
progress information.
show_progress : bool, optional
Whether or not to show a progress bar while writing.
invalid_data_behavior : {'warn', 'raise', 'ignore'}, optional
What to do when data is encountered that is outside the range of
a uint32.
Returns
-------
table : bcolz.ctable
The newly-written table.
"""
ctx = maybe_show_progress(
(
(sid, self.to_ctable(df, invalid_data_behavior))
for sid, df in data
),
show_progress=show_progress,
item_show_func=self.progress_bar_item_show_func,
label=self.progress_bar_message,
length=len(assets) if assets is not None else None,
)
with ctx as it:
return self._write_internal(it, assets)
def write_csvs(self,
asset_map,
show_progress=False,
invalid_data_behavior='warn'):
"""Read CSVs as DataFrames from our asset map.
Parameters
----------
asset_map : dict[int -> str]
A mapping from asset id to file path with the CSV data for that
asset
show_progress : bool
Whether or not to show a progress bar while writing.
invalid_data_behavior : {'warn', 'raise', 'ignore'}
What to do when data is encountered that is outside the range of
a uint32.
"""
read = partial(
read_csv,
parse_dates=['day'],
index_col='day',
dtype=self._csv_dtypes,
)
return self.write(
((asset, read(path)) for asset, path in iteritems(asset_map)),
assets=viewkeys(asset_map),
show_progress=show_progress,
invalid_data_behavior=invalid_data_behavior,
)
def _write_internal(self, iterator, assets):
"""
Internal implementation of write.
`iterator` should be an iterator yielding pairs of (asset, ctable).
"""
total_rows = 0
first_row = {}
last_row = {}
calendar_offset = {}
# Maps column name -> output carray.
columns = {
k: carray(array([], dtype=uint32_dtype))
for k in US_EQUITY_PRICING_BCOLZ_COLUMNS
}
earliest_date = None
sessions = self._calendar.sessions_in_range(
self._start_session, self._end_session
)
if assets is not None:
@apply
def iterator(iterator=iterator, assets=set(assets)):
for asset_id, table in iterator:
if asset_id not in assets:
raise ValueError('unknown asset id %r' % asset_id)
yield asset_id, table
for asset_id, table in iterator:
nrows = len(table)
for column_name in columns:
if column_name == 'id':
# We know what the content of this column is, so don't
# bother reading it.
columns['id'].append(
full((nrows,), asset_id, dtype='uint32'),
)
continue
columns[column_name].append(table[column_name])
if earliest_date is None:
earliest_date = table["day"][0]
else:
earliest_date = min(earliest_date, table["day"][0])
# Bcolz doesn't support ints as keys in `attrs`, so convert
# assets to strings for use as attr keys.
asset_key = str(asset_id)
# Calculate the index into the array of the first and last row
# for this asset. This allows us to efficiently load single
# assets when querying the data back out of the table.
first_row[asset_key] = total_rows
last_row[asset_key] = total_rows + nrows - 1
total_rows += nrows
table_day_to_session = compose(
self._calendar.minute_to_session_label,
partial(Timestamp, unit='s', tz='UTC'),
)
asset_first_day = table_day_to_session(table['day'][0])
asset_last_day = table_day_to_session(table['day'][-1])
asset_sessions = sessions[
sessions.slice_indexer(asset_first_day, asset_last_day)
]
if len(table) != len(asset_sessions):
msg = (
'Asset id: {}, Got {} rows for daily bars table with first day={}, last '
'day={}, expected {} rows.\n'
'Missing sessions: {}\n'
'Extra sessions: {}. Skipping it'.format(
asset_id,
len(table),
asset_first_day.date(),
asset_last_day.date(),
len(asset_sessions),
asset_sessions.difference(
to_datetime(
np.array(table['day']),
unit='s',
utc=True,
)
).tolist(),
to_datetime(
np.array(table['day']),
unit='s',
utc=True,
).difference(asset_sessions).tolist(),
)
)
logger.warning(msg)
continue
# Calculate the number of trading days between the first date
# in the stored data and the first date of **this** asset. This
# offset used for output alignment by the reader.
calendar_offset[asset_key] = sessions.get_loc(asset_first_day)
# This writes the table to disk.
full_table = ctable(
columns=[
columns[colname]
for colname in US_EQUITY_PRICING_BCOLZ_COLUMNS
],
names=US_EQUITY_PRICING_BCOLZ_COLUMNS,
rootdir=self._filename,
mode='w',
)
full_table.attrs['first_trading_day'] = (
earliest_date if earliest_date is not None else iNaT
)
full_table.attrs['first_row'] = first_row
full_table.attrs['last_row'] = last_row
full_table.attrs['calendar_offset'] = calendar_offset
full_table.attrs['calendar_name'] = self._calendar.name
full_table.attrs['start_session_ns'] = self._start_session.value
full_table.attrs['end_session_ns'] = self._end_session.value
full_table.flush()
return full_table
@expect_element(invalid_data_behavior={'warn', 'raise', 'ignore'})
def to_ctable(self, raw_data, invalid_data_behavior):
if isinstance(raw_data, ctable):
# we already have a ctable so do nothing
return raw_data
winsorise_uint32(raw_data, invalid_data_behavior, 'volume', *OHLC)
processed = (raw_data[list(OHLC)] * 1000).round().astype('uint32')
dates = raw_data.index.values.astype('datetime64[s]')
check_uint32_safe(dates.max().view(np.int64), 'day')
processed['day'] = dates.astype('uint32')
processed['volume'] = raw_data.volume.astype('uint32')
return ctable.fromdataframe(processed)
class BcolzDailyBarReader(CurrencyAwareSessionBarReader):
"""
Reader for raw pricing data written by BcolzDailyOHLCVWriter.
Parameters
----------
table : bcolz.ctable
The ctable contaning the pricing data, with attrs corresponding to the
Attributes list below.
read_all_threshold : int
The number of equities at which; below, the data is read by reading a
slice from the carray per asset. above, the data is read by pulling
all of the data for all assets into memory and then indexing into that
array for each day and asset pair. Used to tune performance of reads
when using a small or large number of equities.
Attributes
----------
The table with which this loader interacts contains the following
attributes:
first_row : dict
Map from asset_id -> index of first row in the dataset with that id.
last_row : dict
Map from asset_id -> index of last row in the dataset with that id.
calendar_offset : dict
Map from asset_id -> calendar index of first row.
start_session_ns: int
Epoch ns of the first session used in this dataset.
end_session_ns: int
Epoch ns of the last session used in this dataset.
calendar_name: str
String identifier of trading calendar used (ie, "NYSE").
We use first_row and last_row together to quickly find ranges of rows to
load when reading an asset's data into memory.
We use calendar_offset and calendar to orient loaded blocks within a
range of queried dates.
Notes
------
A Bcolz CTable is comprised of Columns and Attributes.
The table with which this loader interacts contains the following columns:
['open', 'high', 'low', 'close', 'volume', 'day', 'id'].
The data in these columns is interpreted as follows:
- Price columns ('open', 'high', 'low', 'close') are interpreted as 1000 *
as-traded dollar value.
- Volume is interpreted as as-traded volume.
- Day is interpreted as seconds since midnight UTC, Jan 1, 1970.
- Id is the asset id of the row.
The data in each column is grouped by asset and then sorted by day within
each asset block.
The table is built to represent a long time range of data, e.g. ten years
of equity data, so the lengths of each asset block is not equal to each
other. The blocks are clipped to the known start and end date of each asset
to cut down on the number of empty values that would need to be included to
make a regular/cubic dataset.
When read across the open, high, low, close, and volume with the same
index should represent the same asset and day.
See Also
--------
zipline.data.bcolz_daily_bars.BcolzDailyBarWriter
"""
def __init__(self, table, read_all_threshold=3000):
self._maybe_table_rootdir = table
# Cache of fully read np.array for the carrays in the daily bar table.
# raw_array does not use the same cache, but it could.
# Need to test keeping the entire array in memory for the course of a
# process first.
self._spot_cols = {}
self.PRICE_ADJUSTMENT_FACTOR = 0.001
self._read_all_threshold = read_all_threshold
@lazyval
def _table(self):
maybe_table_rootdir = self._maybe_table_rootdir
if isinstance(maybe_table_rootdir, ctable):
return maybe_table_rootdir
return ctable(rootdir=maybe_table_rootdir, mode='r')
@lazyval
def sessions(self):
if 'calendar' in self._table.attrs.attrs:
# backwards compatibility with old formats, will remove
return DatetimeIndex(self._table.attrs['calendar'], tz='UTC')
else:
cal = get_calendar(self._table.attrs['calendar_name'])
start_session_ns = self._table.attrs['start_session_ns']
start_session = Timestamp(start_session_ns, tz='UTC')
end_session_ns = self._table.attrs['end_session_ns']
end_session = Timestamp(end_session_ns, tz='UTC')
sessions = cal.sessions_in_range(start_session, end_session)
return sessions
@lazyval
def _first_rows(self):
return {
int(asset_id): start_index
for asset_id, start_index in iteritems(
self._table.attrs['first_row'],
)
}
@lazyval
def _last_rows(self):
return {
int(asset_id): end_index
for asset_id, end_index in iteritems(
self._table.attrs['last_row'],
)
}
@lazyval
def _calendar_offsets(self):
return {
int(id_): offset
for id_, offset in iteritems(
self._table.attrs['calendar_offset'],
)
}
@lazyval
def first_trading_day(self):
try:
return Timestamp(
self._table.attrs['first_trading_day'],
unit='s',
tz='UTC'
)
except KeyError:
return None
@lazyval
def trading_calendar(self):
if 'calendar_name' in self._table.attrs.attrs:
return get_calendar(self._table.attrs['calendar_name'])
else:
return None
@property
def last_available_dt(self):
return self.sessions[-1]
def _compute_slices(self, start_idx, end_idx, assets):
"""
Compute the raw row indices to load for each asset on a query for the
given dates after applying a shift.
Parameters
----------
start_idx : int
Index of first date for which we want data.
end_idx : int
Index of last date for which we want data.
assets : pandas.Int64Index
Assets for which we want to compute row indices
Returns
-------
A 3-tuple of (first_rows, last_rows, offsets):
first_rows : np.array[intp]
Array with length == len(assets) containing the index of the first
row to load for each asset in `assets`.
last_rows : np.array[intp]
Array with length == len(assets) containing the index of the last
row to load for each asset in `assets`.
offset : np.array[intp]
Array with length == (len(asset) containing the index in a buffer
of length `dates` corresponding to the first row of each asset.
The value of offset[i] will be 0 if asset[i] existed at the start
of a query. Otherwise, offset[i] will be equal to the number of
entries in `dates` for which the asset did not yet exist.
"""
# The core implementation of the logic here is implemented in Cython
# for efficiency.
return _compute_row_slices(
self._first_rows,
self._last_rows,
self._calendar_offsets,
start_idx,
end_idx,
assets,
)
def load_raw_arrays(self, columns, start_date, end_date, assets):
start_idx = self._load_raw_arrays_date_to_index(start_date)
end_idx = self._load_raw_arrays_date_to_index(end_date)
first_rows, last_rows, offsets = self._compute_slices(
start_idx,
end_idx,
assets,
)
read_all = len(assets) > self._read_all_threshold
return _read_bcolz_data(
self._table,
(end_idx - start_idx + 1, len(assets)),
list(columns),
first_rows,
last_rows,
offsets,
read_all,
)
def _load_raw_arrays_date_to_index(self, date):
try:
return self.sessions.get_loc(date)
except KeyError:
raise NoDataOnDate(date)
def _spot_col(self, colname):
"""
Get the colname from daily_bar_table and read all of it into memory,
caching the result.
Parameters
----------
colname : string
A name of a OHLCV carray in the daily_bar_table
Returns
-------
array (uint32)
Full read array of the carray in the daily_bar_table with the
given colname.
"""
try:
col = self._spot_cols[colname]
except KeyError:
col = self._spot_cols[colname] = self._table[colname]
return col
def get_last_traded_dt(self, asset, day):
volumes = self._spot_col('volume')
search_day = day
while True:
try:
ix = self.sid_day_index(asset, search_day)
except NoDataBeforeDate:
return NaT
except NoDataAfterDate:
prev_day_ix = self.sessions.get_loc(search_day) - 1
if prev_day_ix > -1:
search_day = self.sessions[prev_day_ix]
continue
except NoDataOnDate:
return NaT
if volumes[ix] != 0:
return search_day
prev_day_ix = self.sessions.get_loc(search_day) - 1
if prev_day_ix > -1:
search_day = self.sessions[prev_day_ix]
else:
return NaT
def sid_day_index(self, sid, day):
"""
Parameters
----------
sid : int
The asset identifier.
day : datetime64-like
Midnight of the day for which data is requested.
Returns
-------
int
Index into the data tape for the given sid and day.
Raises a NoDataOnDate exception if the given day and sid is before
or after the date range of the equity.
"""
try:
day_loc = self.sessions.get_loc(day)
except Exception:
raise NoDataOnDate("day={0} is outside of calendar={1}".format(
day, self.sessions))
offset = day_loc - self._calendar_offsets[sid]
if offset < 0:
raise NoDataBeforeDate(
"No data on or before day={0} for sid={1}".format(
day, sid))
ix = self._first_rows[sid] + offset
if ix > self._last_rows[sid]:
raise NoDataAfterDate(
"No data on or after day={0} for sid={1}".format(
day, sid))
return ix
def get_value(self, sid, dt, field):
"""
Parameters
----------
sid : int
The asset identifier.
day : datetime64-like
Midnight of the day for which data is requested.
colname : string
The price field. e.g. ('open', 'high', 'low', 'close', 'volume')
Returns
-------
float
The spot price for colname of the given sid on the given day.
Raises a NoDataOnDate exception if the given day and sid is before
or after the date range of the equity.
Returns -1 if the day is within the date range, but the price is
0.
"""
ix = self.sid_day_index(sid, dt)
price = self._spot_col(field)[ix]
if field != 'volume':
if price == 0:
return nan
else:
return price * 0.001
else:
return price
def currency_codes(self, sids):
# XXX: This is pretty inefficient. This reader doesn't really support
# country codes, so we always either return USD or None if we don't
# know about the sid at all.
first_rows = self._first_rows
out = []
for sid in sids:
if sid in first_rows:
out.append('USD')
else:
out.append(None)
return np.array(out, dtype=object)
|
PypiClean
|
/dcos-deploy-0.4.0.tar.gz/dcos-deploy-0.4.0/dcosdeploy/modules/iam_groups.py
|
from ..base import ConfigurationException
from ..util import global_config
from ..util.output import echo
from ..adapters.bouncer import BouncerAdapter
from .iam_users import render_permissions
class IAMGroup:
def __init__(self, name, description, provider_type, permissions):
self.name = name
self.description = description
self.provider_type = provider_type
self.permissions = permissions
def parse_config(name, config, config_helper):
name = config.get("name")
if not name:
raise ConfigurationException("name is required for iam_group")
name = config_helper.render(name)
description = config.get("description")
if not description:
raise ConfigurationException("description is required for iam_group")
description = config_helper.render(description)
provider_type = config_helper.render(config.get("provider_type"))
permissions = render_permissions(config_helper, config.get("permissions", dict()))
return IAMGroup(name, description, provider_type, permissions)
class IAMGroupsManager:
def __init__(self):
self.bouncer = BouncerAdapter()
def deploy(self, config, dependencies_changed=False, force=False):
changed = False
existing_group = self.bouncer.get_group(config.name)
if existing_group is None:
echo("\tCreating group")
self.bouncer.create_group(config.name, config.description, config.provider_type)
changed = True
else:
if existing_group["description"] != config.description:
echo("\tGroup already exists. Updating description.")
self.bouncer.update_group(config.name, config.description)
else:
echo("\tGroup already exists.")
existing_permissions = self.bouncer.get_permissions_for_group(config.name)
existing_rids = self.bouncer.get_rids()
# Update permissions
echo("\tUpdating permissions")
for rid, actions in existing_permissions.items():
target_actions = config.permissions.get(rid, list())
for action in actions:
if action not in target_actions:
self.bouncer.remove_permission_from_group(config.name, rid, action)
changed = True
for rid, actions in config.permissions.items():
if rid not in existing_rids:
self.bouncer.create_permission(rid)
for action in actions:
if action not in existing_permissions.get(rid, list()):
self.bouncer.add_permission_to_group(config.name, rid, action)
changed = True
return changed
def dry_run(self, config, dependencies_changed=False):
existing_group = self.bouncer.get_group(config.name)
if existing_group is None:
echo("Would create group %s" % config.name)
return True
elif existing_group["description"] != config.description:
if global_config.debug:
echo("Would update description for group %s from '%s' to '%s'" % (config.name, existing_group["description"], config.description))
else:
echo("Would update description for group %s" % config.name)
# Check permissions
existing_rids = self.bouncer.get_rids()
existing_permissions = self.bouncer.get_permissions_for_group(config.name)
changes = False
for rid, actions in existing_permissions.items():
if rid not in config.permissions:
echo("Would remove permission %s completely from group %s" % (rid, config.name))
changes = True
else:
for action in actions:
if action not in config.permissions[rid]:
echo("Would remove permission %s %s from group %s" % (rid, action, config.name))
changes = True
for rid, actions in config.permissions.items():
if rid not in existing_rids:
echo("Would create permission %s" % rid)
for action in actions:
if action not in existing_permissions.get(rid, list()):
echo("Would add permission %s %s to group %s" % (rid, action, config.name))
changes = True
return changes
def delete(self, config, force=False):
echo("\tDeleting group")
self.bouncer.delete_group(config.name)
echo("\tDeletion complete.")
return True
def dry_delete(self, config):
if self.bouncer.get_group(config.name) is not None:
echo("Would delete group %s" % config.name)
return True
else:
return False
__config__ = IAMGroup
__manager__ = IAMGroupsManager
__config_name__ = "iam_group"
|
PypiClean
|
/openlmi-tools-0.10.4.tar.gz/openlmi-tools-0.10.4/lmi/scripts/common/versioncheck/__init__.py
|
import functools
from pyparsing import ParseException
from lmi.scripts.common import Configuration
from lmi.scripts.common import errors
from lmi.scripts.common.versioncheck import parser
def cmp_profiles(fst, snd):
"""
Compare two profiles by their version.
:returns:
* -1 if the *fst* profile has lower version than *snd*
* 0 if their versions are equal
* 1 otherwise
:rtype: int
"""
fstver = fst.RegisteredVersion
sndver = snd.RegisteredVersion
if fstver == sndver:
return 0
return -1 if parser.cmp_version(fstver, sndver) else 1
def get_profile_version(conn, name, cache=None):
"""
Get version of registered profile on particular broker. Queries
``CIM_RegisteredProfile`` and ``CIM_RegisteredSubProfile``. The latter
comes in question only when ``CIM_RegisteredProfile`` does not yield any
matching result.
:param conn: Connection object.
:param string name: Name of the profile which must match value of *RegisteredName*
property.
:param dictionary cache: Optional cache where the result will be stored for
later use. This greatly speeds up evaluation of several expressions refering
to same profiles or classes.
:returns: Version of matching profile found. If there were more of them,
the highest version will be returned. ``None`` will be returned when no matching
profile or subprofile is found.
:rtype: string
"""
if cache and name in cache:
return cache[(conn.uri, name)]
insts = conn.root.interop.wql('SELECT * FROM CIM_RegisteredProfile'
' WHERE RegisteredName=\"%s\"' % name)
regular = set(i for i in insts if i.classname.endswith('RegisteredProfile'))
if regular: # select instances of PG_RegisteredProfile if available
insts = regular
else: # otherwise fallback to PG_RegisteredSubProfile instances
insts = set(i for i in insts if i not in regular)
if not insts:
ret = None
else:
ret = sorted(insts, cmp=cmp_profiles)[-1].RegisteredVersion
if cache is not None:
cache[(conn.uri, name)] = ret
return ret
def get_class_version(conn, name, namespace=None, cache=None):
"""
Query broker for version of particular CIM class. Version is stored in
``Version`` qualifier of particular CIM class.
:param conn: Connection object.
:param string name: Name of class to query.
:param string namespace: Optional CIM namespace. Defaults to configured namespace.
:param dictionary cache: Optional cache used to speed up expression prrocessing.
:returns: Version of CIM matching class. Empty string if class is registered but
is missing ``Version`` qualifier and ``None`` if it is not registered.
:rtype: string
"""
if namespace is None:
namespace = Configuration.get_instance().namespace
if cache and (namespace, name) in cache:
return cache[(conn.uri, namespace, name)]
ns = conn.get_namespace(namespace)
cls = getattr(ns, name, None)
if not cls:
ret = None
else:
quals = cls.wrapped_object.qualifiers
if 'Version' not in quals:
ret = ''
else:
ret = quals['Version'].value
if cache is not None:
cache[(conn.uri, namespace, name)] = ret
return ret
def eval_respl(expr, conn, namespace=None, cache=None):
"""
Evaluate LMIReSpL expression on particular broker.
:param string expr: Expression to evaluate.
:param conn: Connection object.
:param string namespace: Optional CIM namespace where CIM classes will be
searched.
:param dictionary cache: Optional cache speeding up evaluation.
:returns: ``True`` if requirements in expression are satisfied.
:rtype: boolean
"""
if namespace is None:
namespace = Configuration.get_instance().namespace
stack = []
pvget = functools.partial(get_profile_version, conn, cache=cache)
cvget = functools.partial(get_class_version, conn,
namespace=namespace, cache=cache)
pr = parser.bnf_parser(stack, pvget, cvget)
pr.parseString(expr, parseAll=True)
# Now evaluate starting non-terminal created on stack.
return stack[0]()
|
PypiClean
|
/matxscript-1.8.1-py3-none-macosx_11_0_arm64.whl/matx/vision/tv_transforms/crop.py
|
from os import sync
from typing import Tuple, Union, Sequence, Dict, Any, List, overload
import numbers
import random
import sys
matx = sys.modules['matx']
from .. import ASYNC, BORDER_CONSTANT, BORDER_REPLICATE, BORDER_REFLECT_101, BORDER_REFLECT
from .. import CropOp, PadWithBorderOp
from ._base import BaseInterfaceClass, BatchBaseClass
class CenterCrop(BaseInterfaceClass):
def __init__(self,
size: List[int],
device_id: int = -2,
sync: int = ASYNC) -> None:
super().__init__(device_id=device_id, sync=sync)
# check size
if len(size) == 1:
self._size: Tuple[int, int] = (size[0], size[0])
elif len(size) == 2:
self._size: Tuple[int, int] = (size[0], size[1])
else:
assert False, "Crop size value should be an integer or a list/tuple with length 2."
def __call__(self, device: Any, device_str: str, sync: int) -> Any:
return CenterCropImpl(device, device_str, self._size, sync)
class CenterCropImpl(BatchBaseClass):
def __init__(self,
device: Any,
device_str: str,
size: Tuple[int, int],
sync: int = ASYNC) -> None:
super().__init__()
self.size: Tuple[int, int] = size
self.device_str: str = device_str
self.sync: int = sync
fill = (0, 0, 0)
padding_mode = BORDER_CONSTANT
self.pad_op: PadWithBorderOp = PadWithBorderOp(device, fill, padding_mode)
self.crop_op: CropOp = CropOp(device)
self.name: str = "CenterCropImpl"
def get_crop_params(self, h: int, w: int) -> Tuple[int, int, int, int]:
th, tw = self.size
x = (w - tw) // 2
y = (h - th) // 2
return y, x, th, tw
def get_pad_params(self, h: int, w: int) -> Tuple[int, int, bool]:
th, tw = self.size
h_pad, w_pad = 0, 0
if th > h:
h_pad = int((1 + th - h) / 2)
if tw > w:
w_pad = int((1 + tw - w) / 2)
need_pad = (h_pad + w_pad) > 0
return h_pad, w_pad, need_pad
def _process_crop_op(self, imgs: List[matx.NDArray]) -> List[matx.NDArray]:
batch_size = len(imgs)
need_pad = False
h_pads, w_pads = [], []
padded_height, padded_width = [], []
for i in range(batch_size):
h, w = imgs[i].shape()[:2]
h_pad, w_pad, need_pad_tmp = self.get_pad_params(h, w)
need_pad |= need_pad_tmp
h_pads.append(h_pad)
w_pads.append(w_pad)
padded_height.append(h + 2 * h_pad)
padded_width.append(w + 2 * w_pad)
if need_pad:
imgs = self.pad_op(imgs, h_pads, h_pads, w_pads, w_pads, sync=self.sync)
crop_x, crop_y, crop_w, crop_h = [], [], [], []
for i in range(batch_size):
y, x, h, w = self.get_crop_params(padded_height[i], padded_width[i])
crop_x.append(x)
crop_y.append(y)
crop_w.append(w)
crop_h.append(h)
imgs = self.crop_op(imgs, crop_x, crop_y, crop_w, crop_h, sync=self.sync)
return imgs
def _process(self, imgs: List[matx.NDArray]) -> List[matx.NDArray]:
return self._process_crop_op(imgs)
def __repr__(self) -> str:
return self.name + '(size={0}, device={1}, sync={2})'.format(
self.size, self.device_str, self.sync)
class RandomCrop(BaseInterfaceClass):
def __init__(self,
size: List[int],
padding: List[int],
pad_if_needed: bool = False,
fill: List[int] = [0],
padding_mode: str = "constant",
device_id: int = -2,
sync: int = ASYNC) -> None:
super().__init__(device_id=device_id, sync=sync)
# check size
if len(size) == 1:
self._size: Tuple[int, int] = (size[0], size[0])
elif len(size) == 2:
self._size: Tuple[int, int] = (size[0], size[1])
else:
assert False, "Crop size value should be an integer or a list/tuple with length 2."
# check padding
if padding is None:
self._padding: Tuple[int, int, int, int] = (0, 0, 0, 0)
elif len(padding) == 1:
self._padding: Tuple[int, int, int, int] = (
padding[0], padding[0], padding[0], padding[0])
elif len(padding) == 2:
self._padding: Tuple[int, int, int, int] = (
padding[0], padding[1], padding[0], padding[1])
elif len(padding) == 4:
self._padding: Tuple[int, int, int, int] = (
padding[0], padding[1], padding[2], padding[3])
else:
assert False, "Padding must be None or a 1, 2 or 4 element tuple.."
self._pad_if_needed: bool = pad_if_needed
# check fill
if len(fill) == 1:
self._fill: Tuple[int, int, int] = (fill[0], fill[0], fill[0])
elif len(fill) == 3:
self._fill: Tuple[int, int, int] = (fill[0], fill[1], fill[2])
else:
assert False, "fill value should be a 1 or 3 element tuple."
# check padding_mode
assert padding_mode in ["constant", "edge", "reflect",
"symmetric"], "padding_mode should be constant, edge, reflect or symmetric."
self._padding_mode: str = padding_mode
def __call__(self, device: Any, device_str: str, sync: int) -> Any:
return RandomCropImpl(
device,
device_str,
self._size,
self._padding,
self._pad_if_needed,
self._fill,
self._padding_mode,
sync)
class RandomCropImpl(BatchBaseClass):
def __init__(self,
device: Any,
device_str: str,
size: Tuple[int, int],
padding: Tuple[int, int, int, int],
pad_if_needed: bool,
fill: Tuple[int, int, int],
padding_mode: str,
sync: int) -> None:
super().__init__()
self.device_str: str = device_str
self.size: Tuple[int, int] = size
self.padding: Tuple[int, int, int, int] = padding
self.pad_if_needed: bool = pad_if_needed
self.fill: Tuple[int, int, int] = fill
self.padding_mode: str = padding_mode
self.sync: int = sync
self.crop_op: CropOp = CropOp(device)
torch_padding_mode: Dict[str, str] = {
"constant": BORDER_CONSTANT,
"edge": BORDER_REPLICATE,
"reflect": BORDER_REFLECT_101,
"symmetric": BORDER_REFLECT
}
self.pad_op: PadWithBorderOp = PadWithBorderOp(
device, self.fill, torch_padding_mode[self.padding_mode])
self.name: str = "RandomCrop"
def get_crop_params(self, h: int, w: int) -> Tuple[int, int, int, int]:
th, tw = self.size
if w == tw and h == th:
return 0, 0, h, w
i = 0
j = 0
if h - th > 0:
i = random.randint(0, h - th)
else:
i = random.randint(h - th, 0)
if w - tw > 0:
j = random.randint(0, w - tw)
else:
j = random.randint(w - tw, 0)
return i, j, th, tw
def get_pad_params(self, h: int, w: int) -> Tuple[int, int, int, int, bool]:
left_p, top_p, right_p, bot_p = self.padding
if self.pad_if_needed and h + top_p + bot_p < self.size[0]:
h_pad = self.size[0] - h - top_p - bot_p
top_p += h_pad
bot_p += h_pad
if self.pad_if_needed and w + left_p + right_p < self.size[1]:
w_pad = self.size[1] - w - left_p - right_p
left_p += w_pad
right_p += w_pad
need_pad = (top_p + bot_p + left_p + right_p) > 0
return top_p, bot_p, left_p, right_p, need_pad
def _process_crop_op(self, imgs: List[matx.NDArray]) -> List[matx.NDArray]:
batch_size = len(imgs)
need_pad = False
top_pads, bot_pads, left_pads, right_pads = [], [], [], []
padded_height, padded_width = [], []
for i in range(batch_size):
h, w = imgs[i].shape()[:2]
top_p, bot_p, left_p, right_p, need_pad_tmp = self.get_pad_params(h, w)
need_pad |= need_pad_tmp
top_pads.append(top_p)
bot_pads.append(bot_p)
left_pads.append(left_p)
right_pads.append(right_p)
padded_height.append(h + top_p + bot_p)
padded_width.append(w + left_p + right_p)
if need_pad:
imgs = self.pad_op(imgs, top_pads, bot_pads, left_pads, right_pads, sync=self.sync)
crop_x, crop_y, crop_w, crop_h = [], [], [], []
for i in range(batch_size):
y, x, h, w = self.get_crop_params(padded_height[i], padded_width[i])
crop_x.append(x)
crop_y.append(y)
crop_w.append(w)
crop_h.append(h)
imgs = self.crop_op(imgs, crop_x, crop_y, crop_w, crop_h, sync=self.sync)
return imgs
def _process(self, imgs: List[matx.NDArray]) -> List[matx.NDArray]:
return self._process_crop_op(imgs)
def __repr__(self) -> str:
return self.name + "(size={0}, padding={1}, device={2}, sync={3})".format(
self.size, self.padding, self.device_str, self.sync)
|
PypiClean
|
/LZBEAT-0.13.1.tar.gz/LZBEAT-0.13.1/econml/metalearners/_metalearners.py
|
import numpy as np
import warnings
from .._cate_estimator import BaseCateEstimator, LinearCateEstimator, TreatmentExpansionMixin
from sklearn import clone
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.utils import check_array, check_X_y
from sklearn.preprocessing import OneHotEncoder, FunctionTransformer
from ..utilities import (check_inputs, check_models, broadcast_unit_treatments, reshape_treatmentwise_effects,
inverse_onehot, transpose, _deprecate_positional)
from .._shap import _shap_explain_model_cate
class TLearner(TreatmentExpansionMixin, LinearCateEstimator):
"""Conditional mean regression estimator.
Parameters
----------
models : outcome estimators for both control units and treatment units
It can be a single estimator applied to all the control and treatment units or a tuple/list of
estimators with one estimator per treatment (including control).
Must implement `fit` and `predict` methods.
categories: 'auto' or list, default 'auto'
The categories to use when encoding discrete treatments (or 'auto' to use the unique sorted values).
The first category will be treated as the control treatment.
"""
def __init__(self, *,
models,
categories='auto'):
self.models = clone(models, safe=False)
self.categories = categories
super().__init__()
@BaseCateEstimator._wrap_fit
def fit(self, Y, T, *, X, inference=None):
"""Build an instance of TLearner.
Parameters
----------
Y : array-like, shape (n, ) or (n, d_y)
Outcome(s) for the treatment policy.
T : array-like, shape (n, ) or (n, 1)
Treatment policy. Only binary treatments are accepted as input.
T will be flattened if shape is (n, 1).
X : array-like, shape (n, d_x)
Feature vector that captures heterogeneity.
inference : string, :class:`.Inference` instance, or None
Method for performing inference. This estimator supports 'bootstrap'
(or an instance of :class:`.BootstrapInference`)
Returns
-------
self : an instance of self.
"""
# Check inputs
Y, T, X, _ = check_inputs(Y, T, X, multi_output_T=False)
categories = self.categories
if categories != 'auto':
categories = [categories] # OneHotEncoder expects a 2D array with features per column
self.transformer = OneHotEncoder(categories=categories, sparse=False, drop='first')
T = self.transformer.fit_transform(T.reshape(-1, 1))
self._d_t = T.shape[1:]
T = inverse_onehot(T)
self.models = check_models(self.models, self._d_t[0] + 1)
for ind in range(self._d_t[0] + 1):
self.models[ind].fit(X[T == ind], Y[T == ind])
def const_marginal_effect(self, X):
"""Calculate the constant marignal treatment effect on a vector of features for each sample.
Parameters
----------
X : matrix, shape (m × d_x)
Matrix of features for each sample.
Returns
-------
τ_hat : matrix, shape (m, d_y, d_t)
Constant marginal CATE of each treatment on each outcome for each sample X[i].
Note that when Y is a vector rather than a 2-dimensional array,
the corresponding singleton dimensions in the output will be collapsed
"""
# Check inputs
X = check_array(X)
taus = []
for ind in range(self._d_t[0]):
taus.append(self.models[ind + 1].predict(X) - self.models[0].predict(X))
taus = np.column_stack(taus).reshape((-1,) + self._d_t + self._d_y) # shape as of m*d_t*d_y
if self._d_y:
taus = transpose(taus, (0, 2, 1)) # shape as of m*d_y*d_t
return taus
class SLearner(TreatmentExpansionMixin, LinearCateEstimator):
"""Conditional mean regression estimator where the treatment assignment is taken as a feature in the ML model.
Parameters
----------
overall_model : outcome estimator for all units
Model will be trained on X|T where '|' denotes concatenation.
Must implement `fit` and `predict` methods.
categories: 'auto' or list, default 'auto'
The categories to use when encoding discrete treatments (or 'auto' to use the unique sorted values).
The first category will be treated as the control treatment.
"""
def __init__(self, *,
overall_model,
categories='auto'):
self.overall_model = clone(overall_model, safe=False)
self.categories = categories
super().__init__()
@BaseCateEstimator._wrap_fit
def fit(self, Y, T, *, X=None, inference=None):
"""Build an instance of SLearner.
Parameters
----------
Y : array-like, shape (n, ) or (n, d_y)
Outcome(s) for the treatment policy.
T : array-like, shape (n, ) or (n, 1)
Treatment policy. Only binary treatments are accepted as input.
T will be flattened if shape is (n, 1).
X : array-like, shape (n, d_x), optional
Feature vector that captures heterogeneity.
inference: string, :class:`.Inference` instance, or None
Method for performing inference. This estimator supports 'bootstrap'
(or an instance of :class:`.BootstrapInference`)
Returns
-------
self : an instance of self.
"""
# Check inputs
if X is None:
X = np.zeros((Y.shape[0], 1))
Y, T, X, _ = check_inputs(Y, T, X, multi_output_T=False)
categories = self.categories
if categories != 'auto':
categories = [categories] # OneHotEncoder expects a 2D array with features per column
self.transformer = OneHotEncoder(categories=categories, sparse=False, drop='first')
T = self.transformer.fit_transform(T.reshape(-1, 1))
self._d_t = (T.shape[1], )
# Note: unlike other Metalearners, we need the controls' encoded column for training
# Thus, we append the controls column before the one-hot-encoded T
# We might want to revisit, though, since it's linearly determined by the others
feat_arr = np.concatenate((X, 1 - np.sum(T, axis=1).reshape(-1, 1), T), axis=1)
self.overall_model.fit(feat_arr, Y)
def const_marginal_effect(self, X=None):
"""Calculate the constant marginal treatment effect on a vector of features for each sample.
Parameters
----------
X : matrix, shape (m × dₓ), optional
Matrix of features for each sample.
Returns
-------
τ_hat : matrix, shape (m, d_y, d_t)
Constant marginal CATE of each treatment on each outcome for each sample X[i].
Note that when Y is a vector rather than a 2-dimensional array,
the corresponding singleton dimensions in the output will be collapsed
"""
# Check inputs
if X is None:
X = np.zeros((1, 1))
X = check_array(X)
Xs, Ts = broadcast_unit_treatments(X, self._d_t[0] + 1)
feat_arr = np.concatenate((Xs, Ts), axis=1)
prediction = self.overall_model.predict(feat_arr).reshape((-1, self._d_t[0] + 1,) + self._d_y)
if self._d_y:
prediction = transpose(prediction, (0, 2, 1))
taus = (prediction - np.repeat(prediction[:, :, 0], self._d_t[0] + 1).reshape(prediction.shape))[:, :, 1:]
else:
taus = (prediction - np.repeat(prediction[:, 0], self._d_t[0] + 1).reshape(prediction.shape))[:, 1:]
return taus
class XLearner(TreatmentExpansionMixin, LinearCateEstimator):
"""Meta-algorithm proposed by Kunzel et al. that performs best in settings
where the number of units in one treatment arm is much larger than others.
Parameters
----------
models : outcome estimators for both control units and treatment units
It can be a single estimator applied to all the control and treatment units or a tuple/list of
estimators with one estimator per treatment (including control).
Must implement `fit` and `predict` methods.
cate_models : estimator for pseudo-treatment effects on control and treatments
It can be a single estimator applied to all the control and treatments or a tuple/list of
estimators with one estimator per treatment (including control).
If None, it will be same models as the outcome estimators.
Must implement `fit` and `predict` methods.
propensity_model : estimator for the propensity function
Must implement `fit` and `predict_proba` methods. The `fit` method must
be able to accept X and T, where T is a shape (n, ) array.
categories: 'auto' or list, default 'auto'
The categories to use when encoding discrete treatments (or 'auto' to use the unique sorted values).
The first category will be treated as the control treatment.
"""
def __init__(self, *,
models,
cate_models=None,
propensity_model=LogisticRegression(),
categories='auto'):
self.models = clone(models, safe=False)
self.cate_models = clone(cate_models, safe=False)
self.propensity_model = clone(propensity_model, safe=False)
self.categories = categories
super().__init__()
@BaseCateEstimator._wrap_fit
def fit(self, Y, T, *, X, inference=None):
"""Build an instance of XLearner.
Parameters
----------
Y : array-like, shape (n, ) or (n, d_y)
Outcome(s) for the treatment policy.
T : array-like, shape (n, ) or (n, 1)
Treatment policy. Only binary treatments are accepted as input.
T will be flattened if shape is (n, 1).
X : array-like, shape (n, d_x)
Feature vector that captures heterogeneity.
inference : string, :class:`.Inference` instance, or None
Method for performing inference. This estimator supports 'bootstrap'
(or an instance of :class:`.BootstrapInference`)
Returns
-------
self : an instance of self.
"""
# Check inputs
Y, T, X, _ = check_inputs(Y, T, X, multi_output_T=False)
if Y.ndim == 2 and Y.shape[1] == 1:
Y = Y.flatten()
categories = self.categories
if categories != 'auto':
categories = [categories] # OneHotEncoder expects a 2D array with features per column
self.transformer = OneHotEncoder(categories=categories, sparse=False, drop='first')
T = self.transformer.fit_transform(T.reshape(-1, 1))
self._d_t = T.shape[1:]
T = inverse_onehot(T)
self.models = check_models(self.models, self._d_t[0] + 1)
if self.cate_models is None:
self.cate_models = [clone(model, safe=False) for model in self.models]
else:
self.cate_models = check_models(self.cate_models, self._d_t[0] + 1)
self.propensity_models = []
self.cate_treated_models = []
self.cate_controls_models = []
# Estimate response function
for ind in range(self._d_t[0] + 1):
self.models[ind].fit(X[T == ind], Y[T == ind])
for ind in range(self._d_t[0]):
self.cate_treated_models.append(clone(self.cate_models[ind + 1], safe=False))
self.cate_controls_models.append(clone(self.cate_models[0], safe=False))
self.propensity_models.append(clone(self.propensity_model, safe=False))
imputed_effect_on_controls = self.models[ind + 1].predict(X[T == 0]) - Y[T == 0]
imputed_effect_on_treated = Y[T == ind + 1] - self.models[0].predict(X[T == ind + 1])
self.cate_controls_models[ind].fit(X[T == 0], imputed_effect_on_controls)
self.cate_treated_models[ind].fit(X[T == ind + 1], imputed_effect_on_treated)
X_concat = np.concatenate((X[T == 0], X[T == ind + 1]), axis=0)
T_concat = np.concatenate((T[T == 0], T[T == ind + 1]), axis=0)
self.propensity_models[ind].fit(X_concat, T_concat)
def const_marginal_effect(self, X):
"""Calculate the constant marginal treatment effect on a vector of features for each sample.
Parameters
----------
X : matrix, shape (m × dₓ)
Matrix of features for each sample.
Returns
-------
τ_hat : matrix, shape (m, d_y, d_t)
Constant marginal CATE of each treatment on each outcome for each sample X[i].
Note that when Y is a vector rather than a 2-dimensional array,
the corresponding singleton dimensions in the output will be collapsed
"""
X = check_array(X)
m = X.shape[0]
taus = []
for ind in range(self._d_t[0]):
propensity_scores = self.propensity_models[ind].predict_proba(X)[:, 1:]
tau_hat = propensity_scores * self.cate_controls_models[ind].predict(X).reshape(m, -1) \
+ (1 - propensity_scores) * self.cate_treated_models[ind].predict(X).reshape(m, -1)
taus.append(tau_hat)
taus = np.column_stack(taus).reshape((-1,) + self._d_t + self._d_y) # shape as of m*d_t*d_y
if self._d_y:
taus = transpose(taus, (0, 2, 1)) # shape as of m*d_y*d_t
return taus
class DomainAdaptationLearner(TreatmentExpansionMixin, LinearCateEstimator):
"""Meta-algorithm that uses domain adaptation techniques to account for
covariate shift (selection bias) among the treatment arms.
Parameters
----------
models : outcome estimators for both control units and treatment units
It can be a single estimator applied to all the control and treatment units or a tuple/list of
estimators with one estimator per treatment (including control).
Must implement `fit` and `predict` methods.
The `fit` method must accept the `sample_weight` parameter.
final_models : estimators for pseudo-treatment effects for each treatment
It can be a single estimator applied to all the control and treatment units or a tuple/list of
estimators with ones estimator per treatments (excluding control).
Must implement `fit` and `predict` methods.
propensity_model : estimator for the propensity function
Must implement `fit` and `predict_proba` methods. The `fit` method must
be able to accept X and T, where T is a shape (n, 1) array.
categories: 'auto' or list, default 'auto'
The categories to use when encoding discrete treatments (or 'auto' to use the unique sorted values).
The first category will be treated as the control treatment.
"""
def __init__(self, *,
models,
final_models,
propensity_model=LogisticRegression(),
categories='auto'):
self.models = clone(models, safe=False)
self.final_models = clone(final_models, safe=False)
self.propensity_model = clone(propensity_model, safe=False)
self.categories = categories
super().__init__()
@BaseCateEstimator._wrap_fit
def fit(self, Y, T, *, X, inference=None):
"""Build an instance of DomainAdaptationLearner.
Parameters
----------
Y : array-like, shape (n, ) or (n, d_y)
Outcome(s) for the treatment policy.
T : array-like, shape (n, ) or (n, 1)
Treatment policy. Only binary treatments are accepted as input.
T will be flattened if shape is (n, 1).
X : array-like, shape (n, d_x)
Feature vector that captures heterogeneity.
inference : string, :class:`.Inference` instance, or None
Method for performing inference. This estimator supports 'bootstrap'
(or an instance of :class:`.BootstrapInference`)
Returns
-------
self : an instance of self.
"""
# Check inputs
Y, T, X, _ = check_inputs(Y, T, X, multi_output_T=False)
categories = self.categories
if categories != 'auto':
categories = [categories] # OneHotEncoder expects a 2D array with features per column
self.transformer = OneHotEncoder(categories=categories, sparse=False, drop='first')
T = self.transformer.fit_transform(T.reshape(-1, 1))
self._d_t = T.shape[1:]
T = inverse_onehot(T)
self.models = check_models(self.models, self._d_t[0] + 1)
self.final_models = check_models(self.final_models, self._d_t[0])
self.propensity_models = []
self.models_control = []
self.models_treated = []
for ind in range(self._d_t[0]):
self.models_control.append(clone(self.models[0], safe=False))
self.models_treated.append(clone(self.models[ind + 1], safe=False))
self.propensity_models.append(clone(self.propensity_model, safe=False))
X_concat = np.concatenate((X[T == 0], X[T == ind + 1]), axis=0)
T_concat = np.concatenate((T[T == 0], T[T == ind + 1]), axis=0)
self.propensity_models[ind].fit(X_concat, T_concat)
pro_scores = self.propensity_models[ind].predict_proba(X_concat)[:, 1]
# Train model on controls. Assign higher weight to units resembling
# treated units.
self._fit_weighted_pipeline(self.models_control[ind], X[T == 0], Y[T == 0],
sample_weight=pro_scores[T_concat == 0] / (1 - pro_scores[T_concat == 0]))
# Train model on the treated. Assign higher weight to units resembling
# control units.
self._fit_weighted_pipeline(self.models_treated[ind], X[T == ind + 1], Y[T == ind + 1],
sample_weight=(1 - pro_scores[T_concat == ind + 1]) /
pro_scores[T_concat == ind + 1])
imputed_effect_on_controls = self.models_treated[ind].predict(X[T == 0]) - Y[T == 0]
imputed_effect_on_treated = Y[T == ind + 1] - self.models_control[ind].predict(X[T == ind + 1])
imputed_effects_concat = np.concatenate((imputed_effect_on_controls, imputed_effect_on_treated), axis=0)
self.final_models[ind].fit(X_concat, imputed_effects_concat)
def const_marginal_effect(self, X):
"""Calculate the constant marginal treatment effect on a vector of features for each sample.
Parameters
----------
X : matrix, shape (m × dₓ)
Matrix of features for each sample.
Returns
-------
τ_hat : matrix, shape (m, d_y, d_t)
Constant marginal CATE of each treatment on each outcome for each sample X[i].
Note that when Y is a vector rather than a 2-dimensional array,
the corresponding singleton dimensions in the output will be collapsed
"""
X = check_array(X)
taus = []
for model in self.final_models:
taus.append(model.predict(X))
taus = np.column_stack(taus).reshape((-1,) + self._d_t + self._d_y) # shape as of m*d_t*d_y
if self._d_y:
taus = transpose(taus, (0, 2, 1)) # shape as of m*d_y*d_t
return taus
def _fit_weighted_pipeline(self, model_instance, X, y, sample_weight):
if not isinstance(model_instance, Pipeline):
model_instance.fit(X, y, sample_weight)
else:
last_step_name = model_instance.steps[-1][0]
model_instance.fit(X, y, **{"{0}__sample_weight".format(last_step_name): sample_weight})
def shap_values(self, X, *, feature_names=None, treatment_names=None, output_names=None, background_samples=100):
return _shap_explain_model_cate(self.const_marginal_effect, self.final_models, X, self._d_t, self._d_y,
featurizer=None,
feature_names=feature_names,
treatment_names=treatment_names,
output_names=output_names,
input_names=self._input_names,
background_samples=background_samples)
shap_values.__doc__ = LinearCateEstimator.shap_values.__doc__
|
PypiClean
|
/mosaik-householdsim-2.1.0.tar.gz/mosaik-householdsim-2.1.0/householdsim/model.py
|
import json
import arrow
DATE_FORMAT = ['YYYY-MM-DD HH:mm', 'YYYY-MM-DD HH:mm:ss']
"""Date format used to convert strings to dates."""
class HouseModel:
"""The HouseModel processes and prepares the load profiles and their
associated meta data to allow and easier access to it.
"""
def __init__(self, data, lv_grid):
# Process meta data
assert next(data).startswith('# meta')
meta = json.loads(next(data))
self.start = arrow.get(meta['start_date'], DATE_FORMAT)
"""The start date of the profile data."""
self.resolution = meta['resolution']
"""The time resolution of the data in minutes."""
self.unit = meta['unit']
"""The unit used for the load profiles (e.g., *W*)."""
self.num_profiles = meta['num_profiles']
"""The number of load profiles in the file."""
# Obtain id lists
assert next(data).startswith('# id_list')
id_list_lines = []
for line in data:
if line.startswith('# attrs'):
break
id_list_lines.append(line)
id_lists = json.loads(''.join(id_list_lines))
self.node_ids = id_lists[lv_grid]
"""List of power grid node IDs for which to create houses."""
# Enable pre-processing of the data
self._data = self._get_line(data)
# Obtain static attributes and create list of house info dicts
attrs = {}
for attr, *vals in self._data:
if attr.startswith('# profiles'):
break
attrs[attr] = [int(val) for val in vals]
#: List of house info dicts
self.houses = [
{
'num': i + 1,
'node_id': n,
'num_hh': attrs['num_hh'][i % self.num_profiles],
'num_res': attrs['num_residents'][i % self.num_profiles],
} for i, n in enumerate(self.node_ids)
]
# Helpers for get()
self._last_date = None
self._cache = None
def get(self, minutes):
"""Get the current load for all houses for *minutes* minutes since
:attr:`start`.
If the model uses a 15min resolution and minutes not multiple of 15,
the next smaller multiple of 15 will be used. For example, if you
pass ``minutes=23``, you'll get the value for ``15``.
"""
# Trim "minutes" to multiples of "self.resolution"
# Example: res=15, minutes=40 -> minutes == 30
minutes = minutes // self.resolution * self.resolution
target_date = self.start.shift(minutes=minutes)
if target_date != self._last_date:
# If target date not already reached, search data until we find it:
for date, *values in self._data:
date = arrow.get(date, DATE_FORMAT)
if date == target_date:
# Found target date, cache results:
values = list(map(float, values))
self._cache = [values[i % self.num_profiles]
for i, _ in enumerate(self.houses)]
self._last_date = date
break
else:
# We've reached the end of our data file if the for loop
# normally finishes.
raise IndexError('Target date "%s" (%s minutes from start) '
'out of range.' % (target_date, minutes))
return self._cache
def get_delta(self, date):
"""Get the amount of minutes between *date* and :attr:`start`.
The date needs to be a strings formated like :data:`DATE_FORMAT`.
Raise a :exc:`ValueError` if *date* is smaller than :attr:`start`.
"""
date = arrow.get(date, DATE_FORMAT)
if date < self.start:
raise ValueError('date must >= "%s".' %
self.start.format(DATE_FORMAT))
dt = date - self.start
minutes = (dt.days * 1440) + (dt.seconds // 60)
return minutes
def _get_line(self, iterator):
for line in iterator:
yield [item.strip() for item in line.split(',')]
|
PypiClean
|
/bucket3-0.16.1.tar.gz/bucket3-0.16.1/README.rst
|
Bucket3
=======
bucket3 is a simple, blog aware, static site generator written in python. It reads your content and spits out a complete, static website suitable for serving with Apache or your favorite web server.
bucket3 would like to become a virtual “information bucket” where you throw
pieces of information (texts, images, audio, etc), and presents them in a nice
blog-like format.
Quick intro
===========
1. pip install bucket3
2. mkdir myblog
3. cd myblog; bucket3 init
4. Edit .bucket3/conf.yaml
5. cd posts; bucket3 new hello-world-1
6. Edit the file generated, and add some text.
7. bucket3 update
8. You should now have your whole blog under "html" (as defined in your conf file).
9. Upload the files under html/ to your server.
Examples
--------
Check out the source of http://www.bucket3.com/ at https://github.com/vrypan/www.bucket3.com
http://blog.vrypan.net/ is also generated using bucket3.
License
=======
bucket3 is distributed under the MIT LICENSE.
Copyright
=========
Panayotis Vryonis, http://www.vrypan.net/
See also
=========
If you are not familiar with the idea of a static HTML blog, visit https://github.com/mojombo/jekyll they've done a much better way at explaining it! (the intro is actually a copy from jekyll's README file)
|
PypiClean
|
/pyplan-ide-0.31.37.tar.gz/pyplan-ide-0.31.37/pyplan/frontend/8_5ebd1b62aaf4d4019dde.js
|
(window.webpackJsonp=window.webpackJsonp||[]).push([[8,282],{691:function(e,a,t){"use strict";(function(n,o){var r,i;r=[t(664),t(726)],void 0===(i=function(e,a){return n.Controller.extend({name:"dashboardManager",showDashboard:function(a,n,r,i,c,l){var s="dashboard-"+a,u=new e;u.existsTask(s)?(u.selectTask(s),i&&o(".mainTask[data-rel='"+s+"']").length>0&&o(".mainTask[data-rel='"+s+"']").trigger("refreshView")):(u.addSimpleTask(s,r),t.e(19).then((function(){var e=[t(868)];(function(e){(new e).render(a,n,c,l),u.selectTask(s)}).apply(null,e)})).catch(t.oe))},showEmbeddableDashboard:function(a,n,r,i,c,l){var s="dashboard-"+a,u=new e;__currentSession.fromEmbedded&&u.removeAllTask(),u.existsTask(s)?(u.selectTask(s),i&&o(".mainTask[data-rel='"+s+"']").length>0&&o(".mainTask[data-rel='"+s+"']").trigger("refreshView")):(u.addSimpleTask(s,r),Promise.all([t.e(19),t.e(251)]).then((function(){var e=[t(1777)];(function(e){(new e).render(a,n,c,l),u.selectTask(s)}).apply(null,e)})).catch(t.oe))},removeDashboardTaskFromHome:function(a){var t="dashboard-"+a,n=new e;n.existsTask(t)&&n.removeTask(t)},drawDashboardBars:function(e,a){this.drawToolbar(e,(function(){a()}))},drawBottombar:function(e,a){t.e(211).then((function(){var n=[t(1779)];(function(t){var n=new t;n.setElement(e),n.render(),null!=a&&a()}).apply(null,n)})).catch(t.oe)},drawToolbar:function(e,a){t.e(50).then((function(){var n=[t(871)];(function(t){(new t).show({el:e,positions:["left","right"],onLoad:a,className:"dockDashboardProperty"})}).apply(null,n)})).catch(t.oe)},drawMoreDashboard:function(e,a){t.e(250).then((function(){var e=[t(1780)];(function(e){var t=new e;t.setElement("body"),t.render(a)}).apply(null,e)})).catch(t.oe)},getDefaultContent:function(e,a){Promise.all([t.e(1),t.e(10),t.e(90)]).then((function(){var n=[t(872)];(function(t){var n=new t({model:e});a(n)}).apply(null,n)})).catch(t.oe)},getEmptyContent:function(e,a){Promise.all([t.e(1),t.e(91)]).then((function(){var n=[t(1394)];(function(t){var n=new t({model:e});a(n)}).apply(null,n)})).catch(t.oe)},getChartToolbar:function(e,a,n,o){var r;switch(e){case"linechart":r="line/lineToolbar";break;case"columnchart":case"columnchartstacked":case"columnchartpercent":case"barchart":case"barchartstacked":case"barchartpercent":r="columnAndBar/columnAndBarToolbar";break;case"areachart":case"areachartstacked":case"areachartpercent":r="area/areaToolbar";break;case"piechart":r="pie/pieToolbar";break;case"funnelchart":r="funnelToolbar/funnelToolbar";break;case"pyramidchart":r="pyramidToolbar/pyramidToolbar";break;case"gaugechart":r="gauge/gaugeToolbar";break;case"waterfallchart":r="waterfall/waterfallToolbar";break;case"scatterchart":r="scatter/scatterToolbar";break;case"table":r="table/tableToolbar";break;case"indexlist":r="index/indexToolbar";break;case"map":r="map/mapToolbar";break;case"indicator":r="indicator/indicatorToolbar";break;case"selector":r="selector/selectorToolbar";break;case"formnode":r="formnode/formnodeToolbar";break;case"nodetable":r="nodeTable/nodetableToolbar";break;case"button":r="button/buttonToolbar";break;case"analyticachart":r="analyticaChart/analyticaChartToolbar";break;case"objectItem":switch(a){case"texteditor":r="texteditor/texteditorToolbar";break;case"cubeviewer":r="cubeviewer/cubeviewerToolbar";break;case"diagramviewer":r="diagramViewer/diagramViewerToolbar";break;case"mapviewer":r="mapViewer/mapViewerToolbar";break;case"menuwidget":r="menuWidget/menuWidgetToolbar";break;case"dashboardcontainer_QUITAR_ESTO_PARA_MOSTRAR":r="dashboardContainer/dashboardContainerToolbar"}break;case"complexchart":r="complexchart/complexChartToolbar";break;default:r=!1}r?Promise.all([t.e(25),t.e(44),t.e(96)]).then((function(){var e=[t(1786)("./"+r)];(function(e){var a=new e({model:n});o(a)}).apply(null,e)})).catch(t.oe):Promise.all([t.e(25),t.e(44),t.e(318)]).then((function(){var e=[t(1798)];(function(e){var a=new e({model:n});o(a)}).apply(null,e)})).catch(t.oe)},getChartItemViewFromType:function(e,a){switch(e){case"empty":Promise.all([t.e(1),t.e(91)]).then((function(){var e=[t(1394)];(function(e){a(e)}).apply(null,e)})).catch(t.oe);break;case"linechart":Promise.all([t.e(1),t.e(10),t.e(309)]).then((function(){var e=[t(1799)];(function(e){a(e)}).apply(null,e)})).catch(t.oe);break;case"columnchart":Promise.all([t.e(1),t.e(10),t.e(90)]).then((function(){var e=[t(872)];(function(e){a(e)}).apply(null,e)})).catch(t.oe);break;case"columnchartstacked":Promise.all([t.e(1),t.e(10),t.e(248)]).then((function(){var e=[t(1800)];(function(e){a(e)}).apply(null,e)})).catch(t.oe);break;case"columnchartpercent":Promise.all([t.e(1),t.e(10),t.e(247)]).then((function(){var e=[t(1801)];(function(e){a(e)}).apply(null,e)})).catch(t.oe);break;case"barchart":Promise.all([t.e(1),t.e(10),t.e(305)]).then((function(){var e=[t(907)];(function(e){a(e)}).apply(null,e)})).catch(t.oe);break;case"barchartstacked":Promise.all([t.e(1),t.e(10),t.e(246)]).then((function(){var e=[t(1802)];(function(e){a(e)}).apply(null,e)})).catch(t.oe);break;case"barchartpercent":Promise.all([t.e(1),t.e(10),t.e(245)]).then((function(){var e=[t(1803)];(function(e){a(e)}).apply(null,e)})).catch(t.oe);break;case"areachart":Promise.all([t.e(1),t.e(10),t.e(304)]).then((function(){var e=[t(908)];(function(e){a(e)}).apply(null,e)})).catch(t.oe);break;case"areachartstacked":Promise.all([t.e(1),t.e(10),t.e(244)]).then((function(){var e=[t(1804)];(function(e){a(e)}).apply(null,e)})).catch(t.oe);break;case"areachartpercent":Promise.all([t.e(1),t.e(10),t.e(243)]).then((function(){var e=[t(1805)];(function(e){a(e)}).apply(null,e)})).catch(t.oe);break;case"piechart":Promise.all([t.e(1),t.e(10),t.e(310)]).then((function(){var e=[t(1806)];(function(e){a(e)}).apply(null,e)})).catch(t.oe);break;case"gaugechart":Promise.all([t.e(1),t.e(10),t.e(308)]).then((function(){var e=[t(1807)];(function(e){a(e)}).apply(null,e)})).catch(t.oe);break;case"waterfallchart":Promise.all([t.e(1),t.e(10),t.e(313)]).then((function(){var e=[t(1808)];(function(e){a(e)}).apply(null,e)})).catch(t.oe);break;case"scatterchart":Promise.all([t.e(1),t.e(10),t.e(312)]).then((function(){var e=[t(1809)];(function(e){a(e)}).apply(null,e)})).catch(t.oe);break;case"complexchart":Promise.all([t.e(1),t.e(10),t.e(306)]).then((function(){var e=[t(1810)];(function(e){a(e)}).apply(null,e)})).catch(t.oe);break;case"table":Promise.all([t.e(1),t.e(12),t.e(121)]).then((function(){var e=[t(1811)];(function(e){a(e)}).apply(null,e)})).catch(t.oe);break;case"indexlist":Promise.all([t.e(1),t.e(148)]).then((function(){var e=[t(1812)];(function(e){a(e)}).apply(null,e)})).catch(t.oe);break;case"map":Promise.all([t.e(37),t.e(1),t.e(12),t.e(57),t.e(210)]).then((function(){var e=[t(1813)];(function(e){a(e)}).apply(null,e)})).catch(t.oe);break;case"indicator":Promise.all([t.e(80),t.e(1),t.e(209)]).then((function(){var e=[t(1816)];(function(e){a(e)}).apply(null,e)})).catch(t.oe);break;case"selector":Promise.all([t.e(1),t.e(175)]).then((function(){var e=[t(1818)];(function(e){a(e)}).apply(null,e)})).catch(t.oe);break;case"formnode":Promise.all([t.e(1),t.e(208)]).then((function(){var e=[t(1821)];(function(e){a(e)}).apply(null,e)})).catch(t.oe);break;case"nodetable":Promise.all([t.e(1),t.e(15),t.e(28),t.e(249)]).then((function(){var e=[t(1822)];(function(e){a(e)}).apply(null,e)})).catch(t.oe);break;case"button":Promise.all([t.e(1),t.e(207)]).then((function(){var e=[t(1832)];(function(e){a(e)}).apply(null,e)})).catch(t.oe);break;case"funnelchart":Promise.all([t.e(1),t.e(10),t.e(307)]).then((function(){var e=[t(1834)];(function(e){a(e)}).apply(null,e)})).catch(t.oe);break;case"pyramidchart":Promise.all([t.e(1),t.e(10),t.e(311)]).then((function(){var e=[t(1835)];(function(e){a(e)}).apply(null,e)})).catch(t.oe);break;case"analyticachart":Promise.all([t.e(1),t.e(242)]).then((function(){var e=[t(1836)];(function(e){a(e)}).apply(null,e)})).catch(t.oe)}},getObjectItemViewFromType:function(e,a){switch(e){case"texteditor":Promise.all([t.e(1),t.e(61),t.e(317)]).then((function(){var e=[t(1837)];(function(e){a(e)}).apply(null,e)})).catch(t.oe);break;case"cubeviewer":Promise.all([t.e(1),t.e(15),t.e(28),t.e(314)]).then((function(){var e=[t(1838)];(function(e){a(e)}).apply(null,e)})).catch(t.oe);break;case"diagramviewer":Promise.all([t.e(1),t.e(12),t.e(17),t.e(315)]).then((function(){var e=[t(1839)];(function(e){a(e)}).apply(null,e)})).catch(t.oe);break;case"mapviewer":Promise.all([t.e(37),t.e(1),t.e(12),t.e(58),t.e(316)]).then((function(){var e=[t(1840)];(function(e){a(e)}).apply(null,e)})).catch(t.oe);break;case"inputform":Promise.all([t.e(1),t.e(15),t.e(29),t.e(226)]).then((function(){var e=[t(1846)];(function(e){a(e)}).apply(null,e)})).catch(t.oe);break;case"dashboardcontainer":Promise.all([t.e(1),t.e(252)]).then((function(){var e=[t(1849)];(function(e){a(e)}).apply(null,e)})).catch(t.oe)}},getFilterView:function(e){t.e(162).then((function(){var a=[t(1850)];(function(a){e(a)}).apply(null,a)})).catch(t.oe)},showCopyDashboard:function(e,a,n){t.e(188).then((function(){var o=[t(1851)];(function(t){(new t).render(e,a,n)}).apply(null,o)})).catch(t.oe)},showDashboardComments:function(e){t.e(136).then((function(){var a=[t(1852)];(function(a){var t=new a;t.setElement(e.parent),t.render(e)}).apply(null,a)})).catch(t.oe)},refreshAllOpenDashboards:function(){o(".mainTask.dashboardTask .btnRefresh").trigger("click")},showTimeFrameSetting:function(e){t.e(195).then((function(){var a=[t(1853)];(function(a){var t=new a(e);t.setElement(e.el),t.render()}).apply(null,a)})).catch(t.oe)},updatePrintReportProgress:function(e){t.e(47).then((function(){var a=[t(1170)];(function(a){new a(e).updatePrintReportProgress()}).apply(null,a)})).catch(t.oe)},updatePrintReportMessage:function(e){t.e(47).then((function(){var a=[t(1170)];(function(a){new a(e).updatePrintReportMessage()}).apply(null,a)})).catch(t.oe)},updatePrintReportComplete:function(e){t.e(47).then((function(){var a=[t(1170)];(function(a){new a(e).updatePrintReportComplete()}).apply(null,a)})).catch(t.oe)}})}.apply(a,r))||(e.exports=i)}).call(this,t(677),t(1))},726:function(e,a,t){"use strict";(function(n,o){var r,i=t(679);void 0===(r=function(){return n.Model.extend({defaults:{dashId:null,dashboardViewList:[],modifiedDash:!1,noQuery:[],styleLibraries:[],nodeOwner:"",extraOptions:void 0},resizeTimeOut:0,getDashboard:function(e,a){(0,i.send)("dashboardManager/by_id/".concat(e,"/"),null,null,(function(e){e&&(a(e),o("#summary").trigger("refresh"))}))},getNavigator:function(e,a){var t=e.reportId,n=void 0===t?null:t,o=e.dashboardId,r=void 0===o?null:o,c="";n?(c="?report_id=".concat(n),r&&(c+="&dashboard_id=".concat(r))):r&&(c="?dashboard_id=".concat(r)),(0,i.send)("reportManager/getNavigator/".concat(c),null,null,a)},calculateScaleFactor:function(e,a){if(this.getItemsModel().length>0)for(var t=this.getItemsModel(),n=0;n<t.length;n++)t[n].calculateScaleFactor(e,a)},updateSizes:function(e,a){if(this.getItemsModel().length>0){var t=300;a&&(t=1),clearTimeout(this.resizeTimeOut);var n=this.getItemsModel();this.resizeTimeOut=setTimeout((function(){var e;for(e=0;e<n.length;e++)n[e].baseUpdateSize(),n[e].updateSize()}),t)}},setStateModified:function(e){this.set({modifiedDash:e})},getStateModified:function(){return this.get("modifiedDash")},addItemToModel:function(e){this.get("dashboardViewList").push(e)},getItemsModel:function(){return this.get("dashboardViewList")},getItemModel:function(e){var a,t=this.getItemsModel();for(a=0;a<t.length;a++)if(t[a].tagId==e)return t[a]},countItemsModelByNodeId:function(e){for(var a=0,t=this.getItemsModel(),n=0;n<t.length;n++)t[n].currentNodeId&&e&&t[n].currentNodeId.toLowerCase()==e.toLowerCase()&&a++;return a},removeItemModel:function(e){var a,t=this.getItemsModel();for(a=0;a<t.length;a++)if(t[a].tagId==e.tagId){t.splice(a,1);break}this.set({dashboardViewList:t})},removeAllItemsModel:function(){this.set("dashboardViewList",[])},changeItemModel:function(e,a){this.removeItemModel(e),this.addItemToModel(a)},setNodeOwner:function(e){this.set("node",e)},getNodeOwner:function(){return this.get("node")},onFilterChange:function(e,a,t,n,o,r){if(o){var i=this.getItemModel(o);if(i&&i.isUnlinkedIndex(e))return void i.onFilterChange(e,a,t,n,!0)}var c=this.getItemsModel();if(c)for(var l=0;l<c.length;l++)c[l].onFilterChange(e,a,t,n,!1,r)},onFiltersChange:function(e){var a=this.getItemsModel();if(a)for(var t=0;t<a.length;t++)a[t].onFiltersChange(e)},synchronizeDrop:function(e,a,t,n,o,r,i){if(i){var c=this.getItemsModel();if(c)for(var l=0;l<c.length;l++)c[l].tagId&&c[l].tagId!=i&&c[l].onSynchronizeDrop(e,a,t,n,o,r)}},synchronizeLevel:function(e,a,t){if(t){var n=this.getItemsModel();if(n)for(var o=0;o<n.length;o++)n[o].tagId&&n[o].tagId!=t&&n[o].onSynchronizeLevel(e,a)}},getNodeFullData:function(e,a,t,n){var o={node:e};a&&(o.fromRow=1,o.toRow=a),(0,i.send)("dashboardManager/getNodeFullData/",o,{type:"GET"},t,n)},getNodeIndexes:function(e,a){(0,i.send)("dashboardManager/getNodeIndexes/",{node:e},null,(function(e){e&&a(e)}))},getIndexValues:function(e,a){(0,i.send)("dashboardManager/getIndexValues/",e,null,(function(e){e&&a(e)}))},getGeoDef:function(e,a){(0,i.send)("Dashboard/GetGeoDef/"+e,null,null,(function(e){e&&a(e)}))},evaluateNode:function(e,a,t,n,o,r,c,l,s,u,d,h,f,p,m){l||(l="sum");var b={node:e,dims:a,rows:t,columns:n,summaryBy:l,bottomTotal:d,rightTotal:h,timeFormat:f,timeFormatType:p,calendarType:m},g="evaluateNode";r&&(g="evaluateNodeDef"),s&&s>0&&u&&u>0&&(b.fromRow=s*(u-1)+1,b.toRow=s*u),(0,i.send)("dashboardManager/".concat(g,"/"),JSON.stringify(b),{type:"POST",contentType:"application/json;charset=utf-8"},o,c)},evaluateNodeForPivot:function(e,a,t,n){(0,i.send)("Dashboard/EvaluateNodeForPivot/",{node:e},{type:"GET"},(function(e){var r=e.value,i={success:function(e,t,n){a(e)},complete:function(){o("#mainLoading").hide(),o("#secondLoading").hide(),t()},dataType:"json",progress:function(e){if(e.lengthComputable){var a=e.loaded/e.total*100;n(a)}else try{var t=this.getResponseHeader("X-Content-Length");a=e.loaded/t*100;n(a)}catch(e){}}};o("#secondLoading").show(),o.ajax("".concat(__apiURL,"/scripts/download.aspx?name=").concat(r),i)}),t)},updateDefinition:function(e,a,t,n){var r=[];t&&o.each(t,(function(e,a){r.push({id:a})}));var c={dashboardId:e,definition:a,styles:t};(0,i.send)("dashboardManager/".concat(e,"/"),JSON.stringify(c),{type:"PATCH",contentType:"application/json;charset=utf-8",dataType:"text"},(function(e){null!=n&&n(e)}))},updateDashboardImage:function(e,a,t){var n={id:e,fileData:a};(0,i.send)("Dashboard/UpdateImage/",n,{type:"PUT"},(function(a){o("#summary").trigger("refresh"),o("#model-summary").trigger("refresh",{id:e}),a&&t&&t(a)}))},validateIndexes:function(e,a){var t=function(a){var t;for(t=0;t<e.length;t++)if(e[t].field==a)return e[t]},n=function(e){var a,n;for(a=0;a<e.length;a++)null!=(n=t(e[a].field))&&(e[a].name=n.name)};n(a.dims),n(a.rows),n(a.columns)},initializeDashboardQuery:function(e){this.set("noQuery",e),this._initNoQuery()},_initNoQuery:function(){var e=this.get("noQuery");e&&e.length>0&&o.each(e,(function(e,a){a.started=!1}))},isReadyForEvaluate:function(e,a){var t=!0,n=this.get("noQuery");return n&&n.length>0&&o.each(n,(function(n,o){o.node==e?o.started=!0:0==o.started&&o.rel&&o.rel.indexOf(a)>=0&&(t=!1)})),t},setNodeValueChanges:function(e,a){(0,i.send)("dashboardManager/pivotGrid/setCubeChanges/",JSON.stringify(e),{type:"POST",contentType:"application/json;charset=utf-8"},(function(e){a(e),o("body").trigger("pendingChanges",[!0])}))},isResultComputed:function(e,a){(0,i.send)("dashboardManager/isResultComputed/",JSON.stringify({nodes:e}),{type:"POST",contentType:"application/json;charset=utf-8"},(function(e){a(e)}))},reevaluateNodesNeeded:function(e){o(".dashboardTask").trigger("reevaluateNodesNeeded",[e])},reevaluateNodesNeededInThisDashboard:function(e){for(var a=[],t=this.getItemsModel(),n=0;n<t.length;n++){var r=t[n].getNodesOfView();r&&(a=a.concat(r))}a.length>0&&this.isResultComputed(a,(function(a){if(a&&a.length>0)for(var n=0;n<t.length;n++)e&&t[n].tagId==e||(a.indexOf(t[n].currentNodeId)>=0&&o(t[n].tagId).trigger("evaluateNodeFromCurrentResult"),t[n].needRefresh(a)&&t[n].refreshItemDash())}))},applyNumberFormat:function(e){for(var a=this.getItemsModel(),t=0;t<a.length;t++)a[t].currentNodeId==e&&a[t].applyNumberFormat()},getStyleLibrary:function(e){var a=this.get("styleLibraries");if(a)for(var t=0;t<a.length;t++)if(a[t].id==e)return a[t].definition;return[]},getStyleLibraries:function(){return this.get("styleLibraries")},setStyleLibraries:function(e){return this.set("styleLibraries",e)},refreshStyleLibraries:function(e){var a=this;t.e(86).then((function(){var n=[t(749)];(function(t){(new t).list(null,(function(t){a.set("styleLibraries",t),e(t)}))}).apply(null,n)})).catch(t.oe)},syncDrilldown:function(e,a,t){var n=this.getItemsModel();if(n)for(var o=0;o<n.length;o++)n[o].tagId!=t&&n[o].syncDrilldown(e,a,t)},syncDrillUp:function(e,a){var t=this.getItemsModel();if(t)for(var n=0;n<t.length;n++)t[n].tagId!=a&&t[n].syncDrillup(e,a)},syncShowHideLegend:function(e,a,t){},viewAsChart:function(e,a){(0,i.send)("Dashboard/viewAsChart/",e,{type:"POST"},a)}})}.apply(a,[]))||(e.exports=r)}).call(this,t(219),t(1))}}]);
|
PypiClean
|
/sportmonks_python_sdk-0.1.0-py3-none-any.whl/sportmonks/paths/version_sport_odds_inplay_fixtures_fixture_id/get.py
|
from dataclasses import dataclass
import typing_extensions
import urllib3
from sportmonks.request_before_hook import request_before_hook
import json
from urllib3._collections import HTTPHeaderDict
from sportmonks.api_response import AsyncGeneratorResponse
from sportmonks import api_client, exceptions
from datetime import date, datetime # noqa: F401
import decimal # noqa: F401
import functools # noqa: F401
import io # noqa: F401
import re # noqa: F401
import typing # noqa: F401
import typing_extensions # noqa: F401
import uuid # noqa: F401
import frozendict # noqa: F401
from sportmonks import schemas # noqa: F401
from sportmonks.model.sport_odds_in_play_by_fixture_id_response import SportOddsInPlayByFixtureIdResponse as SportOddsInPlayByFixtureIdResponseSchema
from sportmonks.type.sport_odds_in_play_by_fixture_id_response import SportOddsInPlayByFixtureIdResponse
from . import path
# Path params
VersionSchema = schemas.StrSchema
SportSchema = schemas.StrSchema
FixtureIdSchema = schemas.IntSchema
RequestRequiredPathParams = typing_extensions.TypedDict(
'RequestRequiredPathParams',
{
'fixtureId': typing.Union[FixtureIdSchema, decimal.Decimal, int, ],
}
)
RequestOptionalPathParams = typing_extensions.TypedDict(
'RequestOptionalPathParams',
{
'version': typing.Union[VersionSchema, str, ],
'sport': typing.Union[SportSchema, str, ],
},
total=False
)
class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams):
pass
request_path_version = api_client.PathParameter(
name="version",
style=api_client.ParameterStyle.SIMPLE,
schema=VersionSchema,
)
request_path_sport = api_client.PathParameter(
name="sport",
style=api_client.ParameterStyle.SIMPLE,
schema=SportSchema,
)
request_path_fixture_id = api_client.PathParameter(
name="fixtureId",
style=api_client.ParameterStyle.SIMPLE,
schema=FixtureIdSchema,
required=True,
)
_auth = [
'apikeyAuth',
]
DateSchema = schemas.StrSchema
date_parameter = api_client.HeaderParameter(
name="Date",
style=api_client.ParameterStyle.SIMPLE,
schema=DateSchema,
)
ServerSchema = schemas.StrSchema
server_parameter = api_client.HeaderParameter(
name="Server",
style=api_client.ParameterStyle.SIMPLE,
schema=ServerSchema,
)
CacheControlSchema = schemas.StrSchema
cache_control_parameter = api_client.HeaderParameter(
name="Cache-Control",
style=api_client.ParameterStyle.SIMPLE,
schema=CacheControlSchema,
)
XRateLimitLimitSchema = schemas.IntSchema
x_rate_limit_limit_parameter = api_client.HeaderParameter(
name="X-RateLimit-Limit",
style=api_client.ParameterStyle.SIMPLE,
schema=XRateLimitLimitSchema,
)
XRateLimitRemainingSchema = schemas.IntSchema
x_rate_limit_remaining_parameter = api_client.HeaderParameter(
name="X-RateLimit-Remaining",
style=api_client.ParameterStyle.SIMPLE,
schema=XRateLimitRemainingSchema,
)
VarySchema = schemas.StrSchema
vary_parameter = api_client.HeaderParameter(
name="Vary",
style=api_client.ParameterStyle.SIMPLE,
schema=VarySchema,
)
ContentEncodingSchema = schemas.StrSchema
content_encoding_parameter = api_client.HeaderParameter(
name="Content-Encoding",
style=api_client.ParameterStyle.SIMPLE,
schema=ContentEncodingSchema,
)
XRobotsTagSchema = schemas.StrSchema
x_robots_tag_parameter = api_client.HeaderParameter(
name="X-Robots-Tag",
style=api_client.ParameterStyle.SIMPLE,
schema=XRobotsTagSchema,
)
ContentLengthSchema = schemas.IntSchema
content_length_parameter = api_client.HeaderParameter(
name="Content-Length",
style=api_client.ParameterStyle.SIMPLE,
schema=ContentLengthSchema,
)
SchemaFor200ResponseBodyApplicationJson = SportOddsInPlayByFixtureIdResponseSchema
ResponseHeadersFor200 = typing_extensions.TypedDict(
'ResponseHeadersFor200',
{
'Date': DateSchema,
'Server': ServerSchema,
'Cache-Control': CacheControlSchema,
'X-RateLimit-Limit': XRateLimitLimitSchema,
'X-RateLimit-Remaining': XRateLimitRemainingSchema,
'Vary': VarySchema,
'Content-Encoding': ContentEncodingSchema,
'X-Robots-Tag': XRobotsTagSchema,
'Content-Length': ContentLengthSchema,
}
)
@dataclass
class ApiResponseFor200(api_client.ApiResponse):
body: SportOddsInPlayByFixtureIdResponse
@dataclass
class ApiResponseFor200Async(api_client.AsyncApiResponse):
body: SportOddsInPlayByFixtureIdResponse
_response_for_200 = api_client.OpenApiResponse(
response_cls=ApiResponseFor200,
response_cls_async=ApiResponseFor200Async,
content={
'application/json': api_client.MediaType(
schema=SchemaFor200ResponseBodyApplicationJson),
},
headers=[
date_parameter,
server_parameter,
cache_control_parameter,
x_rate_limit_limit_parameter,
x_rate_limit_remaining_parameter,
vary_parameter,
content_encoding_parameter,
x_robots_tag_parameter,
content_length_parameter,
]
)
_status_code_to_response = {
'200': _response_for_200,
}
_all_accept_content_types = (
'application/json',
)
class BaseApi(api_client.Api):
def _odds_in_play_by_fixture_id_mapped_args(
self,
fixture_id: int,
version: typing.Optional[str] = None,
sport: typing.Optional[str] = None,
) -> api_client.MappedArgs:
args: api_client.MappedArgs = api_client.MappedArgs()
_path_params = {}
if version is not None:
_path_params["version"] = version
if sport is not None:
_path_params["sport"] = sport
if fixture_id is not None:
_path_params["fixtureId"] = fixture_id
args.path = _path_params
return args
async def _aodds_in_play_by_fixture_id_oapg(
self,
path_params: typing.Optional[dict] = {},
skip_deserialization: bool = True,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
) -> typing.Union[
ApiResponseFor200Async,
api_client.ApiResponseWithoutDeserializationAsync,
AsyncGeneratorResponse,
]:
"""
In-play by Fixture ID
:param skip_deserialization: If true then api_response.response will be set but
api_response.body and api_response.headers will not be deserialized into schema
class instances
"""
self._verify_typed_dict_inputs_oapg(RequestPathParams, path_params)
used_path = path.value
_path_params = {}
for parameter in (
request_path_version,
request_path_sport,
request_path_fixture_id,
):
parameter_data = path_params.get(parameter.name, schemas.unset)
if parameter_data is schemas.unset:
continue
serialized_data = parameter.serialize(parameter_data)
_path_params.update(serialized_data)
for k, v in _path_params.items():
used_path = used_path.replace('{%s}' % k, v)
_headers = HTTPHeaderDict()
# TODO add cookie handling
if accept_content_types:
for accept_content_type in accept_content_types:
_headers.add('Accept', accept_content_type)
method = 'get'.upper()
request_before_hook(
resource_path=used_path,
method=method,
configuration=self.api_client.configuration,
auth_settings=_auth,
headers=_headers,
)
response = await self.api_client.async_call_api(
resource_path=used_path,
method=method,
headers=_headers,
auth_settings=_auth,
timeout=timeout,
)
if stream:
if not 200 <= response.http_response.status <= 299:
body = (await response.http_response.content.read()).decode("utf-8")
raise exceptions.ApiStreamingException(
status=response.http_response.status,
reason=response.http_response.reason,
body=body,
)
async def stream_iterator():
"""
iterates over response.http_response.content and closes connection once iteration has finished
"""
async for line in response.http_response.content:
if line == b'\r\n':
continue
yield line
response.http_response.close()
await response.session.close()
return AsyncGeneratorResponse(
content=stream_iterator(),
headers=response.http_response.headers,
status=response.http_response.status,
response=response.http_response
)
response_for_status = _status_code_to_response.get(str(response.http_response.status))
if response_for_status:
api_response = await response_for_status.deserialize_async(
response,
self.api_client.configuration,
skip_deserialization=skip_deserialization
)
else:
# If response data is JSON then deserialize for SDK consumer convenience
is_json = api_client.JSONDetector._content_type_is_json(response.http_response.headers.get('Content-Type', ''))
api_response = api_client.ApiResponseWithoutDeserializationAsync(
body=await response.http_response.json() if is_json else await response.http_response.text(),
response=response.http_response,
round_trip_time=response.round_trip_time,
status=response.http_response.status,
headers=response.http_response.headers,
)
if not 200 <= api_response.status <= 299:
raise exceptions.ApiException(api_response=api_response)
# cleanup session / response
response.http_response.close()
await response.session.close()
return api_response
def _odds_in_play_by_fixture_id_oapg(
self,
path_params: typing.Optional[dict] = {},
skip_deserialization: bool = True,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization,
]:
"""
In-play by Fixture ID
:param skip_deserialization: If true then api_response.response will be set but
api_response.body and api_response.headers will not be deserialized into schema
class instances
"""
self._verify_typed_dict_inputs_oapg(RequestPathParams, path_params)
used_path = path.value
_path_params = {}
for parameter in (
request_path_version,
request_path_sport,
request_path_fixture_id,
):
parameter_data = path_params.get(parameter.name, schemas.unset)
if parameter_data is schemas.unset:
continue
serialized_data = parameter.serialize(parameter_data)
_path_params.update(serialized_data)
for k, v in _path_params.items():
used_path = used_path.replace('{%s}' % k, v)
_headers = HTTPHeaderDict()
# TODO add cookie handling
if accept_content_types:
for accept_content_type in accept_content_types:
_headers.add('Accept', accept_content_type)
method = 'get'.upper()
request_before_hook(
resource_path=used_path,
method=method,
configuration=self.api_client.configuration,
auth_settings=_auth,
headers=_headers,
)
response = self.api_client.call_api(
resource_path=used_path,
method=method,
headers=_headers,
auth_settings=_auth,
timeout=timeout,
)
response_for_status = _status_code_to_response.get(str(response.http_response.status))
if response_for_status:
api_response = response_for_status.deserialize(
response,
self.api_client.configuration,
skip_deserialization=skip_deserialization
)
else:
# If response data is JSON then deserialize for SDK consumer convenience
is_json = api_client.JSONDetector._content_type_is_json(response.http_response.headers.get('Content-Type', ''))
api_response = api_client.ApiResponseWithoutDeserialization(
body=json.loads(response.http_response.data) if is_json else response.http_response.data,
response=response.http_response,
round_trip_time=response.round_trip_time,
status=response.http_response.status,
headers=response.http_response.headers,
)
if not 200 <= api_response.status <= 299:
raise exceptions.ApiException(api_response=api_response)
return api_response
class OddsInPlayByFixtureId(BaseApi):
# this class is used by api classes that refer to endpoints with operationId fn names
async def aodds_in_play_by_fixture_id(
self,
fixture_id: int,
version: typing.Optional[str] = None,
sport: typing.Optional[str] = None,
) -> typing.Union[
ApiResponseFor200Async,
api_client.ApiResponseWithoutDeserializationAsync,
AsyncGeneratorResponse,
]:
args = self._odds_in_play_by_fixture_id_mapped_args(
fixture_id=fixture_id,
version=version,
sport=sport,
)
return await self._aodds_in_play_by_fixture_id_oapg(
path_params=args.path,
)
def odds_in_play_by_fixture_id(
self,
fixture_id: int,
version: typing.Optional[str] = None,
sport: typing.Optional[str] = None,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization,
]:
args = self._odds_in_play_by_fixture_id_mapped_args(
fixture_id=fixture_id,
version=version,
sport=sport,
)
return self._odds_in_play_by_fixture_id_oapg(
path_params=args.path,
)
class ApiForget(BaseApi):
# this class is used by api classes that refer to endpoints by path and http method names
async def aget(
self,
fixture_id: int,
version: typing.Optional[str] = None,
sport: typing.Optional[str] = None,
) -> typing.Union[
ApiResponseFor200Async,
api_client.ApiResponseWithoutDeserializationAsync,
AsyncGeneratorResponse,
]:
args = self._odds_in_play_by_fixture_id_mapped_args(
fixture_id=fixture_id,
version=version,
sport=sport,
)
return await self._aodds_in_play_by_fixture_id_oapg(
path_params=args.path,
)
def get(
self,
fixture_id: int,
version: typing.Optional[str] = None,
sport: typing.Optional[str] = None,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization,
]:
args = self._odds_in_play_by_fixture_id_mapped_args(
fixture_id=fixture_id,
version=version,
sport=sport,
)
return self._odds_in_play_by_fixture_id_oapg(
path_params=args.path,
)
|
PypiClean
|
/ansible-8.3.0-py3-none-any.whl/ansible_collections/junipernetworks/junos/plugins/module_utils/network/junos/facts/ospf_interfaces/ospf_interfaces.py
|
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from copy import deepcopy
from ansible.module_utils._text import to_bytes
from ansible.module_utils.basic import missing_required_lib
from ansible.module_utils.six import string_types
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
generate_dict,
remove_empties,
)
from ansible_collections.junipernetworks.junos.plugins.module_utils.network.junos.argspec.ospf_interfaces.ospf_interfaces import (
Ospf_interfacesArgs,
)
from ansible_collections.junipernetworks.junos.plugins.module_utils.network.junos.utils.utils import (
_validate_config,
)
try:
from lxml import etree
HAS_LXML = True
except ImportError:
HAS_LXML = False
try:
import xmltodict
HAS_XMLTODICT = True
except ImportError:
HAS_XMLTODICT = False
class Ospf_interfacesFacts(object):
"""The junos ospf_interfaces fact class"""
def __init__(self, module, subspec="config", options="options"):
self._module = module
self.argument_spec = Ospf_interfacesArgs.argument_spec
spec = deepcopy(self.argument_spec)
if subspec:
if options:
facts_argument_spec = spec[subspec][options]
else:
facts_argument_spec = spec[subspec]
else:
facts_argument_spec = spec
self.generated_spec = generate_dict(facts_argument_spec)
self.router_id = ""
def get_connection(self, connection, config_filter):
"""
:param connection:
:param config_filter:
:return:
"""
return connection.get_configuration(filter=config_filter)
def populate_facts(self, connection, ansible_facts, data=None):
"""Populate the facts for ospf_interfaces
:param connection: the device connection
:param ansible_facts: Facts dictionary
:param data: previously collected conf
:rtype: dictionary
:returns: facts
"""
if not HAS_LXML:
self._module.fail_json(msg="lxml is not installed.")
if not data:
config_filter = """
<configuration>
<protocols>
<ospf/>
</protocols>
<routing-options>
<router-id/>
</routing-options>
</configuration>
"""
data = self.get_connection(connection, config_filter)
if isinstance(data, string_types):
data = etree.fromstring(
to_bytes(data, errors="surrogate_then_replace"),
)
resources = data.xpath("configuration/protocols/ospf")
router_id_path = data.xpath("configuration/routing-options/router-id")
if router_id_path:
self.router_id = self._get_xml_dict(router_id_path.pop())
else:
self.router_id = ""
objs = []
for resource in resources:
if resource:
xml = self._get_xml_dict(resource)
objs = self.render_config(self.generated_spec, xml)
facts = {}
if objs:
facts["junos_ospf_interfaces"] = []
params = _validate_config(
self._module,
self.argument_spec,
{"config": objs},
redact=True,
)
for cfg in params["config"]:
facts["junos_ospf_interfaces"].append(remove_empties(cfg))
ansible_facts["ansible_network_resources"].update(facts)
return ansible_facts
def _get_xml_dict(self, xml_root):
if not HAS_XMLTODICT:
self._module.fail_json(msg=missing_required_lib("xmltodict"))
xml_dict = xmltodict.parse(
etree.tostring(xml_root),
dict_constructor=dict,
)
return xml_dict
def render_config(self, spec, conf):
"""
Render config as dictionary structure and delete keys
from spec for null values
:param spec: The facts tree, generated from the argspec
:param conf: The configuration
:rtype: dictionary
:returns: The generated config
"""
ospf_interfaces_config = []
ospf = conf.get("ospf")
if ospf.get("area"):
areas = ospf.get("area")
if not isinstance(areas, list):
areas = [areas]
for area in areas:
rendered_area = {}
rendered_area["area_id"] = area.get("name")
rendered_area["interfaces"] = []
interfaces = area["interface"]
if not isinstance(interfaces, list):
interfaces = [interfaces]
for interface in interfaces:
interface_dict = {}
interface_dict["priority"] = interface.get("priority")
interface_dict["metric"] = interface.get("metric")
interface_dict["mtu"] = interface.get("mtu")
interface_dict["te_metric"] = interface.get("te-metric")
interface_dict["ipsec_sa"] = interface.get("ipsec-sa")
interface_dict["hello_interval"] = interface.get(
"hello-interval",
)
interface_dict["dead_interval"] = interface.get(
"dead-interval",
)
interface_dict["retransmit_interval"] = interface.get(
"retransmit-interval",
)
interface_dict["transit_delay"] = interface.get(
"transit-delay",
)
interface_dict["poll_interval"] = interface.get(
"poll-interval",
)
if "passive" in interface.keys():
interface_dict["passive"] = True
if "flood-reduction" in interface.keys():
interface_dict["flood_reduction"] = True
if "demand-circuit" in interface.keys():
interface_dict["demand_circuit"] = True
if "no-advertise-adjacency-segment" in interface.keys():
interface_dict["no_advertise_adjacency_segment"] = True
if "no-eligible-backup" in interface.keys():
interface_dict["no_eligible_backup"] = True
if "no-eligible-remote-backup" in interface.keys():
interface_dict["no_eligible_remote_backup"] = True
if "no-interface-state-traps" in interface.keys():
interface_dict["no_interface_state_traps"] = True
if "no-neighbor-down-notification" in interface.keys():
interface_dict["no_neighbor_down_notification"] = True
if "node-link-protection" in interface.keys():
interface_dict["node_link_protection"] = True
if "bandwidth-based-metrics" in interface.keys():
bandwidth_metrics = interface["bandwidth-based-metrics"].get("bandwidth")
if not isinstance(bandwidth_metrics, list):
bandwidth_metrics = [bandwidth_metrics]
interface_dict["bandwidth_based_metrics"] = []
for metric in bandwidth_metrics:
interface_dict["bandwidth_based_metrics"].append(
{
"metric": metric.get("metric"),
"bandwidth": metric.get("name"),
},
)
if "authentication" in interface.keys():
auth = interface["authentication"]
auth_dict = {}
if auth.get("simple-password"):
auth_dict["simple_password"] = auth.get(
"simple-password",
)
elif auth.get("md5"):
auth_dict["type"] = {"md5": []}
md5_list = auth.get("md5")
if not isinstance(md5_list, list):
md5_list = [md5_list]
for md5_auth in md5_list:
auth_dict["type"]["md5"].append(
{
"key_id": md5_auth.get("name"),
"key": md5_auth.get("key"),
},
)
interface_dict["authentication"] = auth_dict
rendered_area["interfaces"].append(interface_dict)
af = {}
conf = {}
areas = {}
address_family = []
af["afi"] = "ipv4"
areas["area_id"] = rendered_area["area_id"]
interface_dict["area"] = areas
af["processes"] = interface_dict
address_family.append(af)
conf["address_family"] = address_family
conf["name"] = interface.get("name")
if self.router_id:
conf["router_id"] = self.router_id["router-id"]
remove_empties(conf)
ospf_interfaces_config.append(conf)
return ospf_interfaces_config
|
PypiClean
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.